blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M โ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
55968e8443fcb461836ed5f78744c1e4c108bce1 | a7b0fccb92d6ccf24669ae23ce9275a05b99b527 | /main.py | 8d821260df99e82719e618bbd95d0b4d79a2887a | [] | no_license | tdameros/minesweeper | 18d2aedaf01e84f1ee9dc05645524bdf5c95321a | cf03ccd135b8fec718b3909217159db35e5fce48 | refs/heads/main | 2023-08-11T13:15:21.616502 | 2021-09-13T18:45:03 | 2021-09-13T18:45:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,171 | py | import sys
from PySide6 import QtWidgets
from minesweeper import Minesweeper
class MyWidget(QtWidgets.QWidget, Minesweeper):
def __init__(self):
self.mines = 30
QtWidgets.QWidget.__init__(self)
Minesweeper.__init__(self, height=14, width=18, mines=self.mines)
self.setMaximumSize(450, 350)
self.setWindowTitle("Minesweeper")
self.width = 18
self.height = 14
self.setup_ui()
self.reproduce(first=True)
def setup_ui(self):
self.create_layouts()
self.create_widgets()
self.modify_widgets()
self.add_widgets_to_layouts()
self.setup_connections()
def coord(self, i):
print("Coord :", i)
def create_widgets(self):
for i in range(14):
for y in range(18):
self.create_button(i, y)
def modify_widgets(self):
pass
def create_layouts(self):
self.main_layout = QtWidgets.QGridLayout(self)
self.main_layout.setContentsMargins(0, 0, 0, 0)
self.main_layout.setSpacing(0)
def add_widgets_to_layouts(self):
pass
def setup_connections(self):
pass
def create_button(self, i, y, value=9):
font_color = "black"
if value == 9 and self.lose:
case = QtWidgets.QPushButton("B")
color = "#c84c4c"
font_color = "black"
elif value == 9:
case = QtWidgets.QPushButton("")
if y % 2 == 0 and (self.height - 1 - i) % 2 == 0:
color = "#a2d149"
elif y % 2 == 1 and (self.height - 1 - i) % 2 == 1:
color = "#a2d149"
else:
color = "#d3f590"
else:
if value == 0:
case = QtWidgets.QPushButton("")
else:
case = QtWidgets.QPushButton(str(value))
if y % 2 == 0 and (self.height - 1 - i) % 2 == 0:
color = "#d7b899"
elif y % 2 == 1 and (self.height - 1 - i) % 2 == 1:
color = "#d7b899"
else:
color = "#e5c29f"
if value == 1:
font_color = "blue"
elif value == 2:
font_color = "green"
elif value == 3:
font_color = "red"
elif value == 4:
font_color = "yellow"
case.setStyleSheet(f"""
background-color: {color};
color: {font_color};
border: None;
max-width: 25px;
max-height: 25px;
min-width: 25px;
min-height: 25px;
margin: 0px;
""")
case.clicked.connect(lambda: self.playing(y, self.height - 1 - i))
self.main_layout.addWidget(case, i, y, 1, 1)
def playing(self, x, y):
self.player_put((x, y))
self.reproduce()
def reproduce(self, first=False):
if not first:
if self.win():
lose = QtWidgets.QMessageBox(text="Gagnรฉ !")
for y, listy in enumerate(self.secret_grid):
for x, value in enumerate(listy):
self.create_button(y, x, value=value)
lose.exec()
sys.exit(app.exec())
if self.lose:
lose = QtWidgets.QMessageBox(text="Perdu!")
for y, listy in enumerate(self.secret_grid):
for x, value in enumerate(listy):
self.create_button(y, x, value=value)
lose.exec()
sys.exit(app.exec())
for y, listy in enumerate(self.player_grid):
for x, value in enumerate(listy):
self.create_button(y, x, value=value)
def win(self):
compteur = 0
for y, listy in enumerate(self.player_grid):
for x, value in enumerate(listy):
if value == 9:
compteur += 1
if compteur == self.mines:
return True
else:
return False
if __name__ == "__main__":
app = QtWidgets.QApplication([])
widget = MyWidget()
widget.resize(450, 350)
widget.show()
sys.exit(app.exec())
| [
"tomdamerose@gmail.com"
] | tomdamerose@gmail.com |
4b6c1a8e10bab33aaa6629088bb2f48ab5184699 | d2bb13cec7faf28e3d268312298f03c99806bd8b | /calc_tdc_offset/corelli_calc_tdc_offset_func_loop.py | f73d0e5a8641d0c738264885957499cec67aac99 | [] | no_license | rosswhitfield/corelli | 06a91c26556ea788f20f973a1018a56e82a8c09a | d9e47107e3272c4457aa0d2e0732fc0446f54279 | refs/heads/master | 2021-08-07T14:04:24.426151 | 2021-08-03T19:19:05 | 2021-08-03T19:19:05 | 51,771,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | from corelli_calc_tdc_offset_func import *
for i in range(637,640):
#for i in range(2100,2110):
filename='CORELLI_'+str(i)
results=calc_tdc_offset(filename)
print results
| [
"whitfieldre@ornl.gov"
] | whitfieldre@ornl.gov |
45c9d5ee3a7cc8b934eac813c9bce43d8dc8d910 | 13d4a9fda8c393f6c588964b1cca360935491488 | /study/Dictionary.py | 9f1b61960186c17a786182233b34db7ed9bf8efc | [] | no_license | BecomingBigdataAnalyst/DataAnalysis-Python | 66ba0f93cc455adc04d496950f5ce6a2e7cb3cec | a19e181b8bd94e153c79539e8fdbd8a9562a831f | refs/heads/master | 2021-04-07T02:01:21.350824 | 2018-03-19T06:22:17 | 2018-03-19T06:22:17 | 125,472,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,622 | py | #๋์
๋๋ฆฌ : ๋งคํ ์๋ฃ๊ตฌ์กฐ
#ํค์ ๊ฐ์ ์ฐ๊ฒฐ์ํค๋ ๋ฐฉ์์ผ๋ก ๋ฐ์ดํฐ๋ฅผ ๋ค๋ฃจ๋ ๋ฐฉ๋ฒ ์ ๊ณต
#ํค๋ ์ ์ฅ๋ ๋ฐ์ดํฐ๋ฅผ ์๋ณํ๊ธฐ ์ํ ๋ฒํธ๋ ์ด๋ฆ
#๊ฐ์ ๊ฐ ํค์ ์ฐ๊ฒฐ๋์ด ์ ์ฅ๋ ๋ฐ์ดํฐ
#๋ฐ๋ผ์, ํค๋ง ์๋ฉด ๋ฐ์ดํฐ๋ฅผ ๋ฐ๋ก ์ฐพ์ ์ ์์
#๋์
๋๋ฆฌ๋ { } ์ ํค:๊ฐ ํํ๋ก ์ด์ฉ
#ํค:๊ฐ์ด ์ฌ๋ฌ ๊ฐ ์กด์ฌ ํ ๊ฒฝ์ฐ , ๋ก ๊ตฌ๋ถ
menu = {'1': 'newSungJuk', 2:'showSungJuk', 'abc':'modifySungJuk'} #ํค๋ ๋ค์ํ ์๋ฃํ์ผ๋ก ์ฌ์ฉ
book = {
'bookid': '1',
'bookname' : '์ถ๊ตฌ์์ญ์ฌ' ,
'publicher' : '๊ตฟ์คํฌ์ธ ',
'price' : '7000',
'orderdate' : '2014-07-01'
}
order = {
'orderid' : '1',
'custid' : '1',
'bookid' : '1',
'saleprice' : '6000',
'orderdate' : '2014-07-01'
}
customer={
'custid' : '1',
'bookid' : '1',
'price' : '7000',
'orderdate' : '2014-07-01'
}
print(book)
books_list = []
books_list.append( book )
books_list.append( book )
books_list.append( book )
print(books_list)
#๋์
๋๋ฆฌ ์ฒ๋ฆฌ ๋ฉ์๋
print('1' in book) #๋์
๋๋ฆฌ์์ in ์ฐ์ฐ์๋ key๋ฅผ ๊ฒ์
print('bookid' in book)
print(book['bookid']) #๋์
๋๋ฆฌ์์ ํค๋ก ๊ฒ์
print(book['bookname'])
print(book['price'])
#print(book['orderid']) #์กด์ฌํ์ง ์๋ ํค ๊ฒ์์ ์ค๋ฅ!
print(book.get('bookname'))
print(book.get('orderid')) #์กด์ฌํ์ง ์๋ ํค ๊ฒ์์ None ์ถ๋ ฅ
bkname = book['bookname'] #ํค๋ก ๊ฒ์ํ ๊ฐ ์ถ๋ ฅ
print(bkname)
print(book.get('bookid'))
book['bookid'] = 99 #ํค๋ก ๊ฐ ์์
print(book.get('bookid'))
print(book)
book.update({'ํํ' : '3x4'}) #์๋ก์ด ํค : ๊ฐ ์ถ๊ฐ/ ์์
print(book)
book.update({":"})
print(book)
book.update({'ํํ' : '6 x 10'}) #์๋ก์ด ํค : ๊ฐ ์์
print(book)
del book['ํํ'] #๊ธฐ์กด ํค ์ญ์
print(book)
#book.clear() #๋ชจ๋ ํค ์ญ์
print(book.keys()) #๋ชจ๋ ํค๋ฅผ ์ถ๋ ฅ
print(book.values()) #๋ชจ๋ ๊ฐ์ ์ถ๋ ฅ
print(book.items()) #๋ชจ๋ ํค: ๊ฐ์ ํํ๋ก ์ถ๋ ฅ
print(list(book.items())) #๋ชจ๋ ํค : ๊ฐ์ ํํ-๋ฆฌ์คํธ๋ก ์ถ๋ ฅ
items = book.items() #๋ชจ๋ ํค: ๊ฐ์ ํํ - ๋ฆฌ์คํธ๋ก ์ถ๋ ฅ
print(list(items))
abc=[1,2,3]
print(abc.reverse())
def myRange(start, end, hop=1) :
retVal = start
while retVal <= end:
yield retVal
retVal += hop
hap = 0
for i in myRange(1,5,2): #์ข
๋ฃ๊ฐ์ ํฌํจ์ํจ range ํจ์ ์์ฑ
#๊ฒฐ๊ตญ, ๋ฆฌ์คํธ ํํ์ ๊ฐ์ด ์ถ๋ ฅ
#for i in range(1,5,2) : #i : 1, 3
#for i in [1,3,5] : # i : 1, 3, 5
hap += i
print(hap)
def myRange2(start, end, hop=1) :
retVal = start
while retVal <= end:
#return retVal ?? #์ค๊ฐ ๊ณ์ฐ๊ฒฐ๊ณผ๋ฅผ ์ถ๋ ฅ ๋๋ ์ฒ๋ฆฌ
yield retVal #์คํ์ค์ ๊ณ์ผ๋ ๊ฐ์
retVal += hop #generator ํ์
์ ์ ์ฅํด ๋
myRange2(1,5,2) #yield๋ก ๋๊ธด ๋ฐ์ดํฐ๋ ์ํํ์์
a = myRange(1,5,2) #generator ํ์
์์ฑ
print(a)
print(next(a)) #generator ํ์
์ ์ ์ฅ๋ ๊ฐ์
#iteatorํ์์ผ๋ก ๋ค๋ฃฐ ์ ์์
#iteator๋ ๋ฆฌ์คํธ์ ์ ์ฅ๋ ๊ฐ์ฒด๋ฅผ
#์ํํ๋ฉฐ ํ๋์ฉ ๊บผ๋ด ์ฌ์ฉํ๋ ์๋ฃ๊ตฌ์กฐ
print(next(a))
print(next(a))
for i in a: #generator ํ์
์ ์ ์ฅ๋ ๊ฐ์
print(i) #for๋ฌธ์ผ๋ก๋ ์ถ๋ ฅ ๊ฐ๋ฅ | [
"jjh9523@naver.com"
] | jjh9523@naver.com |
29a9af3be2a85ff3e255f7f20682d60548aa28d3 | 809b5e7c80e72e890cdc2d94ee446d492fea47ba | /adminApp/migrations/0009_instructorfeedback_has_response.py | d2277bc9d3408d265b7d6981e97ecb71dc00e848 | [] | no_license | JoelMekonnen/My-Lingua | fdf0034242bb7fe793ebfa8df8d4751e3775e051 | 41b7cb4f780c68e8bf2ac30923e3d7d4fa424778 | refs/heads/master | 2023-05-25T10:59:23.680281 | 2023-05-14T18:28:20 | 2023-05-14T18:28:20 | 343,859,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | # Generated by Django 3.1.7 on 2021-03-09 17:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('adminApp', '0008_adminfeedback_instructorfeedback'),
]
operations = [
migrations.AddField(
model_name='instructorfeedback',
name='has_response',
field=models.BooleanField(default='False'),
),
]
| [
"joelmek.gmail.com"
] | joelmek.gmail.com |
47ede7a086f2a0feeeddf75aef608e80939a862e | f0d85de6e413e360223dad170e782974943e11db | /check_wins.py | b1fdcf841827aa4dd224a9cbf9d2e920a24ef2d3 | [] | no_license | AndrewSaltz/ThePub | b9f88a6ac125bf83a50d59cf07b1f4c950d2d2c8 | fa9117075e2d8b077496d3262036141ba078b70a | refs/heads/master | 2021-01-11T09:40:36.141742 | 2017-08-26T19:11:23 | 2017-08-26T19:11:23 | 77,493,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,581 | py | import os, django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "thepub.settings")
django.setup()
#Get models
from teamsports.models import Teams, Schedule
#Get F
from django.db.models import F
#Make all wins, losses, ties at zero
Teams.objects.all().update(loss=0, win=0, tie=0)
#The logic, practice
for games in Schedule.objects.all():
if games.is_disputed is False:
if games.home_score is not None and games.away_score is not None:
if games.home_score > games.away_score:
winner = Teams.objects.get(pk=games.home.team)
loser = Teams.objects.get(pk=games.away.team)
winner.win = F('win') + 1
loser.loss = F('loss') +1
winner.save()
loser.save()
print (winner)
elif games.home_score < games.away_score:
winner = Teams.objects.get(pk=games.away.team)
loser = Teams.objects.get(pk=games.home.team)
winner.win = F('win') + 1
loser.loss = F('loss') +1
winner.save()
loser.save()
print (winner)
elif games.home_score == games.away_score:
home_tie = Teams.objects.get(pk=games.away.team)
away_tie = Teams.objects.get(pk=games.home.team)
home_tie.tie = F('tie') + 1
away_tie.tie = F('tie') +1
home_tie.save()
away_tie.save()
print ("tie")
else:
pass
else:
continue
| [
"amsaltz@gmail.com"
] | amsaltz@gmail.com |
43882d5e19e6886e6c2e341b84dd6b2d6a68f830 | 4943adf7e95a8fb4b56f34f71477d942de22886c | /ecom/api/product/models.py | f714be11292e40d28434f45bb6a71b1db346845d | [] | no_license | Harsha196/Full-Stack-React_and_Django-EcommerceSite | 0960b0157429756fca1a0351b36828cd7c9d88b1 | eb8f931a8c05ea0748446bb390081a17c2103b39 | refs/heads/master | 2023-01-31T07:25:53.881230 | 2020-12-13T11:31:18 | 2020-12-13T11:31:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | from django.db import models
from api.category.models import Category
# Create your models here.
class Product(models.Model):
name=models.CharField(max_length=50)
description=models.CharField(max_length=250)
price=models.CharField(max_length=50)
stock=models.CharField(max_length=50)
is_active=models.BooleanField(default=True,blank=True)
image=models.ImageField(upload_to='images/',blank=True,null=True)
category=models.ForeignKey(Category,on_delete=models.SET_NULL,blank=True,null=True)
created_at=models.DateTimeField(auto_now_add=True)
updated_at=models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
| [
"jayam.ganapathi12@gmail.com"
] | jayam.ganapathi12@gmail.com |
5e075deff90a093e627ea1cd97c6ebe0f39f8755 | bb903d05fa6136f785001c562ee55ee02ca04526 | /networktest.py | a211bd13b912e5d955bdcbbdf824b9b3eaca7390 | [] | no_license | NRiess/Prediction-of-Muscle-Activation | 85f618bea71d3e3f59823129e9015eabacd5476a | d6b8ab20793d9a97b795004e3d5f285ba65c5db8 | refs/heads/main | 2023-05-14T09:58:19.622849 | 2021-06-05T18:55:55 | 2021-06-05T18:55:55 | 373,056,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,155 | py | # testsetting for evaluating multiple networks
import numpy as np
import tensorflow as tf
from operator import itemgetter
from numpy import savetxt
import matplotlib.pyplot as plt
import os
from functions import *
from mpl_toolkits.mplot3d import Axes3D
import time
import math
from tensorflow.keras.utils import plot_model
import string
def plot_mae_for_angle_range(val_x, mae, angle_range, LSTM):
val_x_angle_range = val_x
if LSTM:
mae_angle_range = mae[:, 1]
else:
mae_angle_range = mae[:, 0]
if angle_range == 0:
for row in range(len(val_x)-1, -1, -1):
if val_x[row][0]>0.25:
val_x_angle_range = np.delete(val_x_angle_range, row, 0)
mae_angle_range = np.delete(mae_angle_range, row)
if LSTM:
plt.plot(mae_angle_range, label="LSTM, angle_range: 0-0.25")
else:
plt.plot(mae_angle_range, label="Dense, angle_range: 0-0.25")
elif angle_range == 1:
for row in range(len(val_x)-1, -1, -1):
if val_x_angle_range[row][0]<=0.25 or val_x_angle_range[row][0]>=0.75:
val_x_angle_range = np.delete(val_x_angle_range, row, 0)
mae_angle_range = np.delete(mae_angle_range, row)
if LSTM:
plt.plot(mae_angle_range, label="LSTM, angle_range: 0.25-0.75")
else:
plt.plot(mae_angle_range, label="Dense, angle_range: 0.25-0.75")
elif angle_range == 2:
for row in range(len(val_x)-1, -1, -1):
if val_x_angle_range[row][0]<0.75:
val_x_angle_range = np.delete(val_x_angle_range, row, 0)
mae_angle_range = np.delete(mae_angle_range, row)
if LSTM:
plt.plot(mae_angle_range, label="LSTM, angle_range: 0.75-1")
else:
plt.plot(mae_angle_range, label="Dense, angle_range: 0.75-1")
else:
raise ValueError("The angle range hast to be \n" +
" 0 for validation samples with an angle between 0 and 0.25 \n" +
" 1 for validation samples with an angle between 0.25 and 0.75 \n" +
" 2 for validation samples with an angle between 0.75 and 1")
return np.mean(mae_angle_range)
def print_average_mae_for_each_angle_range(angle_range, mae_angle_range, LSTM):
if angle_range == 0:
if LSTM:
print('Average MAE of LSTM NN in angle range 0 to 0.25: ' + str(np.mean(mae_angle_range)))
else:
print('Average MAE of Dense NN in angle range 0 to 0.25: ' + str(np.mean(mae_angle_range)))
elif angle_range == 1:
if LSTM:
print('Average MAE of LSTM NN in angle range 0.25 to 0.75: ' + str(np.mean(mae_angle_range)))
else:
print('Average MAE of Dense NN in angle range 0.25 to 0.75: ' + str(np.mean(mae_angle_range)))
elif angle_range == 2:
if LSTM:
print('Average MAE of LSTM NN in angle range 0.75 to 1: ' + str(np.mean(mae_angle_range)))
else:
print('Average MAE of Dense NN in angle range 0.75 to 1: ' + str(np.mean(mae_angle_range)))
def plot_outputs(model, val_x, LSTM, angle_range, muscle):
fontsize = 10
if LSTM:
eval = model[0].predict(valn_x)
else:
eval = model[0].predict(val_x)
if angle_range == 0:
for row in range(len(val_x)-1, -1, -1):
if val_x[row][0]>0.25:
eval = np.delete(eval, row, 0)
if LSTM:
plt.plot(eval[:, muscle])
plt.title("LSTM, angle_range: 0-0.25, muscle: "+str(muscle), fontsize=fontsize)
else:
plt.plot(eval[:, muscle])
plt.title("Dense, angle_range: 0-0.25, muscle: "+str(muscle), fontsize=fontsize)
elif angle_range == 1:
for row in range(len(val_x)-1, -1, -1):
if val_x[row][0]<=0.25 or val_x[row][0]>=0.75:
eval = np.delete(eval, row, 0)
if LSTM:
plt.plot(eval[:, muscle])
plt.title("LSTM, angle_range: 0.25-0.75, muscle: "+str(muscle), fontsize=fontsize)
else:
plt.plot(eval[:, muscle])
plt.title("Dense, angle_range: 0.25-0.75, muscle: "+str(muscle), fontsize=fontsize)
elif angle_range == 2:
for row in range(len(val_x)-1, -1, -1):
if val_x[row][0]<0.75:
eval = np.delete(eval, row, 0)
if LSTM:
plt.plot(eval[:, muscle])
plt.title("LSTM, angle_range: 0.75-1, muscle: "+str(muscle), fontsize=fontsize)
else:
plt.plot(eval[:, muscle])
plt.title("Dense, angle_range: 0.75-1, muscle: "+str(muscle), fontsize=fontsize)
else:
raise ValueError("The angle range hast to be \n" +
" 0 for validation samples with an angle between 0 and 0.25 \n" +
" 1 for validation samples with an angle between 0.25 and 0.75 \n" +
" 2 for validation samples with an angle between 0.75 and 1")
if __name__ == "__main__":
trainingset_name = 'tr_data_shuffled_and_separated'
raw_data_path = 'tr_data.npy'
tr_data, val_data, tr_x, tr_y, val_x, val_y = shuffle_and_separate_tr_data(raw_data_path, trainingset_name)
# load data
tr_data, val_data, tr_x, tr_y, val_x, val_y = get_tr_data('tr_data_shuffled_and_separated') # ('low_data')
# sort val data by weight, angle and speed
val_data = sort_data(val_data, [2, 3, 0, 1], False)
val_x = val_data[:, :4]
val_y = val_data[:, 4:]
print('val_shape: ' + str(val_x.shape))
print('tr_shape: ' + str(tr_x.shape))
# reshape for LSTM (not needed if using next block of code)
# tr_x = np.reshape(tr_x, [tr_x.shape[0], 1, tr_x.shape[1]])
# val_x = np.reshape(val_x, [val_x.shape[0], 1, val_x.shape[1]])
# data for n timesteps LSTM (use these 2 lines to create LSTM-ready data)
n = 1
# data gets sorted by weight, angle, speed in 'create_samples_ntimesteps'
trn_x, trn_y, valn_x, valn_y = create_samples_ntimesteps(tr_data, val_data, n)
# 8 layers parameters for 50 nets
layers_arr8 = np.zeros([50, 8])
for i in range(1, 51):
setup8 = [i, math.ceil(1.5 * i), 2 * i, 3 * i, 2 * i, i, math.ceil(i * 0.5), 5]
layers_arr8[i - 1] = setup8
# 7 layers parameters for 50 nets
layers_arr7 = np.zeros([50, 7])
for i in range(1, 51):
setup7 = [i, math.ceil(1.5 * i), 2 * i, 2 * i, i, math.ceil(0.5 * i), 5]
layers_arr7[i - 1] = setup7
# 6 layers parameters for 50 nets
layers_arr6 = np.zeros([50, 6])
for i in range(1, 51):
setup6 = [i, math.ceil(1.5 * i), 2 * i, i, math.ceil(0.5 * i), 5]
layers_arr6[i - 1] = setup6
# 5 layers parameters for 50 nets
layers_arr5 = np.zeros([50, 5])
for i in range(1, 51):
setup5 = [i, math.ceil(1.5 * i), 2 * i, i, 5]
layers_arr5[i - 1] = setup5
# 4 layers parameters for 50 nets
layers_arr4 = np.zeros([50, 4])
for i in range(1, 51):
setup4 = [i, 2 * i, math.ceil(1.5 * i), 5]
layers_arr4[i - 1] = setup4
# 3 layers parameters for 50 nets
layers_arr3 = np.zeros([50, 3])
for i in range(1, 51):
setup3 = [math.ceil(0.5 * i), math.ceil(0.75 * i), 5]
layers_arr3[i - 1] = setup3
# testing networks
# create networks with create_networksLSTM or create_networksDENSE
# eval_models( ) saves and returns results as array with [mae, avg inference time] for each model and an array of the mae for all steps
#
layers = np.array([[10, 20, 40, 20, 10, 5]]) # 5, 20, 5
models_LSTM = create_networksLSTM(trn_x, trn_y, layers)
layers = np.array([[10, 20, 5]]) # 5, 20, 5
# choose between: create_networksDENSE() or create_networksLSTM()
models_DENSE = create_networksDENSE(tr_x, tr_y, layers) # create_networksDENSE create_networksLSTM
resultsD_DENSE, mae_DENSE = eval_models(val_x, val_y, models_DENSE, '3', 'D')
resultsD_LSTM, mae_LSTM = eval_models(valn_x, valn_y, models_LSTM, '3', 'D')
mae = np.concatenate((mae_DENSE[0, :].reshape(mae_DENSE.shape[1], 1), mae_LSTM[0, :].reshape(mae_LSTM.shape[1], 1)), axis=1)
plot_model(models_DENSE[0], to_file='model_DENSE.png', show_shapes=True, show_layer_names=True)
plot_model(models_LSTM[0], to_file='model_LSTM.png', show_shapes=True, show_layer_names=True)
fig_mae, axs_mae = plt.subplots(1, 1)
plt.title('MAE')
mae_angle_range = np.zeros((2, 3))
for LSTM in range(0, 2):
for angle_range in range(0, 3):
average_mae = plot_mae_for_angle_range(val_x, mae, angle_range, LSTM)
mae_angle_range[LSTM, angle_range] = average_mae
print('Average MAE of dense network: ' + str(np.mean(mae_angle_range[0, :].flatten())))
print('Average MAE of LSTM network: ' + str(np.mean(mae_angle_range[1, :].flatten())))
plt.legend(['Dense, angle: 0-0.25', 'Dense, angle: 0.25-0.75', 'Dense, angle: 0.75-1',
'LSTM, angle: 0-0.25', 'LSTM, angle: 0.25-0.75', 'LSTM, angle: 0.75-1'])
plt.get_current_fig_manager().window.showMaximized()
for LSTM in range(0, 2):
for angle_range in range(0, 3):
print_average_mae_for_each_angle_range(angle_range, mae_angle_range[LSTM, angle_range], LSTM)
print_average_mae_for_each_angle_range(mae_angle_range[LSTM, angle_range], LSTM)
fig_output, axs_output = plt.subplots(3, 5)
fig_output.suptitle('Muscle activation')
counter = 1
for angle_range in range(0, 3):
for muscle in range(0, 5):
plt.subplot(3, 5, counter)
plot_outputs(models_DENSE, val_x, False, angle_range, muscle)
plot_outputs(models_LSTM, val_x, True, angle_range, muscle)
plt.legend(['Dense', 'LSTM'])
plt.ylim(-0.2, 1.2)
counter += 1
plt.get_current_fig_manager().window.showMaximized()
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
4bbac5316f308b5d3e670762dc88786d18c9bee7 | 64497b25c73cfae34b0aa37b3f7d5f042db54f15 | /object_detection/detect_objects.py | ed6c2bd1e72fce4be7c48d40a961f5d8eb8b240e | [
"MIT"
] | permissive | guidocalvano/ObjectDetection | 904107ecb94cd6af92f258b97b68def0a9646780 | cfa75084ea65f49542ac5a2a6210e565373530cf | refs/heads/master | 2020-11-24T08:28:55.717114 | 2019-01-07T22:24:04 | 2019-01-07T22:24:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,136 | py | import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
import json
import time
import glob
from io import StringIO
from PIL import Image
import matplotlib.pyplot as plt
from object_detection.utils import label_map_util
from object_detection.protos import pipeline_pb2
from google.protobuf import text_format
from multiprocessing.dummy import Pool as ThreadPool
import os
import config
MAX_NUMBER_OF_BOXES = 10
MINIMUM_CONFIDENCE = 0.9
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.gfile.GFile(config.PIPELINE_CONFIG_PATH, 'r') as f:
text_format.Merge(f.read(), pipeline_config)
# PATH_TO_LABELS = os.path.join(config.ANNOTATION_PATH, 'label_map.pbtxt')
label_map = label_map_util.load_labelmap(pipeline_config.train_input_reader.label_map_path)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=sys.maxsize, use_display_name=True)
CATEGORY_INDEX = label_map_util.create_category_index(categories)
class PredictionServer:
def __init__(self):
self.detection_graph = None
self.last_load = None
self.load_graph()
def load_graph(self):
print('load graph')
graph_file_path = os.path.join(config.OUTPUT_INFERENCE_GRAPH_PATH, 'frozen_inference_graph.pb')
if self.last_load is not None and os.path.isfile(graph_file_path) and self.last_load == os.path.getctime(graph_file_path):
print('graph is already up to date')
return
print('update required, reloading graph')
self.last_load = os.path.getctime(graph_file_path)
# Load model into memory
print('Loading model...')
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(graph_file_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
def load_image_into_numpy_array(self, image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def detection_xml_string(self, boxes, class_names, image_width, image_height):
result = """
<annotation>
<folder>less_selected</folder>
<filename>undefined</filename>
<size>
<width>""" + str(image_width) + """</width>
<height>""" + str(image_height) + """</height>
</size>
<segmented>0</segmented>
"""
for i in range(len(class_names)):
ymin, xmin, ymax, xmax = tuple(boxes[i].tolist())
print(json.dumps(class_names[i]))
print(json.dumps(ymin))
print(json.dumps(ymax))
result += """<object>
<name>""" + class_names[i]["name"] + """</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>""" + str(xmin * image_width) + """</xmin>
<ymin>""" + str(ymin * image_height) + """</ymin>
<xmax>""" + str(xmax * image_width) + """</xmax>
<ymax>""" + str(ymax * image_height) + """</ymax>
</bndbox>
</object>"""
result += "</annotation>"
return result
def detect_objects(self, image_path):
self.load_graph()
print('detecting...')
with self.detection_graph.as_default():
with tf.Session(graph=self.detection_graph) as sess:
image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
image = Image.open(image_path)
image_np = self.load_image_into_numpy_array(image)
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = sess.run([detection_boxes, detection_scores, detection_classes, num_detections], feed_dict={image_tensor: image_np_expanded})
box_filter = scores > MINIMUM_CONFIDENCE
boxes = boxes[box_filter]
classes = classes[box_filter]
scores = scores[box_filter]
class_names = []
for i in range(classes.shape[0]):
class_names.append(CATEGORY_INDEX[classes[i]])
return self.detection_xml_string(boxes, class_names, image.size[0], image.size[1])
| [
"garbagedetectionamsterdam@gmail.com"
] | garbagedetectionamsterdam@gmail.com |
1bee663d7c4ec53d0aae190aa76827e89a0ec34e | b65032c8b76dd2115fd37ae45669a44537ad9df4 | /Code/dictionary_words.py | a1ae64f3596492ec99008c0aa807de8a02d24fd2 | [] | no_license | reikamoon/CS-1.2-Intro-Data-Structures | a795dc8ca9e52f02cafb9d0782a80632bcc7b206 | 40b19ad8d93631bbdbd589fa95b0b3a7ec40b53a | refs/heads/master | 2022-12-22T00:22:05.667638 | 2019-12-11T20:45:11 | 2019-12-11T20:45:11 | 220,103,212 | 0 | 0 | null | 2022-12-08T06:16:43 | 2019-11-06T22:35:08 | Python | UTF-8 | Python | false | false | 642 | py | from random import randint
from os import sys
def get_words():
words = list()
with open('/usr/share/dict/words', 'r') as f:
words = f.read().split('\n')
return words
def random_words(integer_input, word_list):
sentence = str()
while integer_input > 0:
index = randint(0, len(words) - 1)
if integer_input == 1:
print("My Random Sentence:")
else:
sentence += word_list[index] + ' '
integer_input -= 1
return sentence
if __name__ == '__main__':
words = get_words()
integer_input = int(sys.argv[1])
print(random_words(integer_input, words))
| [
"ambrosio.anjelica@gmail.com"
] | ambrosio.anjelica@gmail.com |
6dfbfef776daceb15fe420c71a7effaf85379b71 | 2ae0b8d95d439ccfd55ea7933ad4a2994ad0f6c5 | /tests/layer_tests/pytorch_tests/test_convnd.py | 8b46b2992d2c072c48f4b6aaa35fbb0cdf2c3517 | [
"Apache-2.0"
] | permissive | openvinotoolkit/openvino | 38ea745a247887a4e14580dbc9fc68005e2149f9 | e4bed7a31c9f00d8afbfcabee3f64f55496ae56a | refs/heads/master | 2023-08-18T03:47:44.572979 | 2023-08-17T21:24:59 | 2023-08-17T21:24:59 | 153,097,643 | 3,953 | 1,492 | Apache-2.0 | 2023-09-14T21:42:24 | 2018-10-15T10:54:40 | C++ | UTF-8 | Python | false | false | 10,460 | py | # Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import pytest
from openvino.frontend import FrontEndManager
from openvino.frontend.pytorch.ts_decoder import TorchScriptPythonDecoder
from pytorch_layer_test_class import PytorchLayerTest
class TestConv2D(PytorchLayerTest):
def _prepare_input(self):
import numpy as np
return (np.random.randn(2, 3, 25, 25).astype(np.float32),)
def create_model(self, weights_shape, strides, pads, dilations, groups, bias):
import torch
import torch.nn.functional as F
class aten_conv2d(torch.nn.Module):
def __init__(self):
super(aten_conv2d, self).__init__()
self.weight = torch.randn(weights_shape)
self.bias = None
if bias:
self.bias = torch.randn(weights_shape[0])
self.strides = strides
self.pads = pads
self.dilations = dilations
self.groups = groups
def forward(self, x):
return F.conv2d(x, self.weight, self.bias, self.strides, self.pads, self.dilations, self.groups)
ref_net = None
return aten_conv2d(), ref_net, "aten::conv2d"
@pytest.mark.parametrize("params",
[{'weights_shape': [1, 3, 3, 3], 'strides': 1, 'pads': 0, 'dilations': 1, 'groups': 1},
{'weights_shape': [1, 3, 3, 3], 'strides': 2, 'pads': 0, 'dilations': 1, 'groups': 1},
{'weights_shape': [1, 3, 3, 3], 'strides': 1, 'pads': 1, 'dilations': 1, 'groups': 1},
{'weights_shape': [1, 3, 3, 3], 'strides': 1, 'pads': 0, 'dilations': 2, 'groups': 1},
{'weights_shape': [1, 3, 3, 3], 'strides': 1, 'pads': [0, 1], 'dilations': 1,
'groups': 1},
{'weights_shape': [1, 3, 3, 3], 'strides': 1, 'pads': [1, 0], 'dilations': 1,
'groups': 1},
{'weights_shape': [1, 3, 3, 3], 'strides': 1, 'pads': 'same', 'dilations': 1,
'groups': 1},
{'weights_shape': [1, 3, 3, 3], 'strides': 1, 'pads': 'valid', 'dilations': 1,
'groups': 1},
{'weights_shape': [3, 1, 3, 3], 'strides': 1, 'pads': 0, 'dilations': 1, 'groups': 3},
])
@pytest.mark.parametrize("bias", [True, False])
@pytest.mark.nightly
@pytest.mark.precommit
def test_conv2d(self, params, bias, ie_device, precision, ir_version):
self._test(*self.create_model(**params, bias=bias),
ie_device, precision, ir_version)
class TestConv1D(PytorchLayerTest):
def _prepare_input(self):
import numpy as np
return (np.random.randn(2, 3, 25).astype(np.float32),)
def create_model(self, weights_shape, strides, pads, dilations, groups, bias):
import torch
import torch.nn.functional as F
class aten_conv1d(torch.nn.Module):
def __init__(self):
super(aten_conv1d, self).__init__()
self.weight = torch.randn(weights_shape)
self.bias = None
if bias:
self.bias = torch.randn(weights_shape[0])
self.strides = strides
self.pads = pads
self.dilations = dilations
self.groups = groups
def forward(self, x):
return F.conv1d(x, self.weight, self.bias, self.strides, self.pads, self.dilations, self.groups)
ref_net = None
return aten_conv1d(), ref_net, "aten::conv1d"
@pytest.mark.parametrize("params",
[{'weights_shape': [3, 3, 3], 'strides': 1, 'pads': 0, 'dilations': 1, 'groups': 1},
{'weights_shape': [3, 3, 3], 'strides': 2, 'pads': 0, 'dilations': 1, 'groups': 1},
{'weights_shape': [3, 3, 3], 'strides': 1, 'pads': 1, 'dilations': 1, 'groups': 1},
{'weights_shape': [3, 3, 3], 'strides': 1, 'pads': 0, 'dilations': 2, 'groups': 1},
{'weights_shape': [3, 3, 3], 'strides': 1, 'pads': 'same', 'dilations': 1, 'groups': 1},
{'weights_shape': [3, 3, 3], 'strides': 1, 'pads': 'valid', 'dilations': 1, 'groups': 1},
{'weights_shape': [3, 1, 3], 'strides': 1, 'pads': 0, 'dilations': 1, 'groups': 3},
])
@pytest.mark.parametrize("bias", [True, False])
@pytest.mark.nightly
@pytest.mark.precommit
def test_conv1d(self, params, bias, ie_device, precision, ir_version):
self._test(*self.create_model(**params, bias=bias),
ie_device, precision, ir_version)
class TestConv3D(PytorchLayerTest):
def _prepare_input(self):
import numpy as np
return (np.random.randn(2, 3, 25, 25, 25).astype(np.float32),)
def create_model(self, weights_shape, strides, pads, dilations, groups, bias):
import torch
import torch.nn.functional as F
class aten_conv3d(torch.nn.Module):
def __init__(self):
super(aten_conv3d, self).__init__()
self.weight = torch.randn(weights_shape)
self.bias = None
if bias:
self.bias = torch.randn(weights_shape[0])
self.strides = strides
self.pads = pads
self.dilations = dilations
self.groups = groups
def forward(self, x):
return F.conv3d(x, self.weight, self.bias, self.strides, self.pads, self.dilations, self.groups)
ref_net = None
return aten_conv3d(), ref_net, "aten::conv3d"
@pytest.mark.parametrize("params",
[{'weights_shape': [1, 3, 3, 3, 3], 'strides': 1, 'pads': 0, 'dilations': 1, 'groups': 1},
{'weights_shape': [1, 3, 3, 3, 3], 'strides': 2, 'pads': 0, 'dilations': 1, 'groups': 1},
{'weights_shape': [1, 3, 3, 3, 3], 'strides': 1, 'pads': 1, 'dilations': 1, 'groups': 1},
{'weights_shape': [1, 3, 3, 3, 3], 'strides': 1, 'pads': 0, 'dilations': 2, 'groups': 1},
{'weights_shape': [1, 3, 3, 3, 3], 'strides': 1, 'pads': [0, 1, 0], 'dilations': 1,
'groups': 1},
{'weights_shape': [1, 3, 3, 3, 3], 'strides': 1, 'pads': [1, 0, 0], 'dilations': 1,
'groups': 1},
{'weights_shape': [1, 3, 3, 3, 3], 'strides': 1, 'pads': [0, 0, 1], 'dilations': 1,
'groups': 1},
{'weights_shape': [1, 3, 3, 3, 3], 'strides': 1, 'pads': [1, 1, 0], 'dilations': 1,
'groups': 1},
{'weights_shape': [1, 3, 3, 3, 3], 'strides': 1, 'pads': [0, 1, 1], 'dilations': 1,
'groups': 1},
{'weights_shape': [1, 3, 3, 3, 3], 'strides': 1, 'pads': [1, 0, 1], 'dilations': 1,
'groups': 1},
{'weights_shape': [1, 3, 3, 3, 3], 'strides': 1, 'pads': 'same', 'dilations': 1,
'groups': 1},
{'weights_shape': [1, 3, 3, 3, 3], 'strides': 1, 'pads': 'valid', 'dilations': 1,
'groups': 1},
{'weights_shape': [3, 1, 3, 3, 3], 'strides': 1, 'pads': 0, 'dilations': 1, 'groups': 3},
])
@pytest.mark.parametrize("bias", [True, False])
@pytest.mark.nightly
@pytest.mark.precommit
def test_conv3d(self, params, bias, ie_device, precision, ir_version):
self._test(*self.create_model(**params, bias=bias),
ie_device, precision, ir_version)
class TestConv2DInSubgraph(PytorchLayerTest):
def _prepare_input(self):
import numpy as np
return (np.random.randn(2, 3, 25, 25).astype(np.float32), np.array([1], dtype=np.int32))
def convert_directly_via_frontend(self, model, example_input, trace_model, dynamic_shapes, ov_inputs, freeze_model):
# Overload function to allow reproduction of issue caused by additional freeze.
import torch
fe_manager = FrontEndManager()
fe = fe_manager.load_by_framework('pytorch')
model.eval()
with torch.no_grad():
if trace_model:
model = torch.jit.trace(model, example_input)
else:
model = torch.jit.script(model)
model = torch.jit.freeze(model)
print(model.inlined_graph)
decoder = TorchScriptPythonDecoder(model)
im = fe.load(decoder)
om = fe.convert(im)
self._resolve_input_shape_dtype(om, ov_inputs, dynamic_shapes)
return model, om
def create_model(self):
import torch
from torchvision.ops import Conv2dNormActivation
class aten_conv2d(torch.nn.Module):
def __init__(self):
super().__init__()
convs = []
conv_depth=2
for _ in range(conv_depth):
convs.append(Conv2dNormActivation(3, 3, 3, norm_layer=None))
self.convs = torch.nn.Sequential(*convs)
for layer in self.modules():
if isinstance(layer, torch.nn.Conv2d):
torch.nn.init.normal_(layer.weight) # type: ignore[arg-type]
torch.nn.init.constant_(layer.bias, 0) # type: ignore[arg-type]
def forward(self, x, y):
acc = self.convs(x)
if y:
acc += self.convs(x)
return acc
ref_net = None
return aten_conv2d(), ref_net, "aten::conv2d"
@pytest.mark.nightly
@pytest.mark.precommit
def test_conv2d(self, ie_device, precision, ir_version):
self._test(*self.create_model(),
ie_device, precision, ir_version, freeze_model=True, dynamic_shapes=False)
| [
"noreply@github.com"
] | noreply@github.com |
3e61b344581e41fde3a12da57f13f08a18175541 | 93659df3bffee1874112e44fc081de71a35da866 | /code/zhuanqu_test.py | ddc57b840f7f03640bf3a176da73ecaca2e9d8f0 | [] | no_license | schemmy/hupudata | 03c9fe92e69241ea253d5d0d9f462b1d7bcf7c0b | a22be24e9fe38215ad217436a7198c4c9bf4e62b | refs/heads/master | 2021-05-09T00:18:26.675710 | 2018-02-14T00:22:31 | 2018-02-14T00:22:31 | 119,740,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,492 | py | <<<<<<< HEAD
##################
# @Author: Chenxin Ma
# @Email: machx9@gmail.com
# @Date: 2018-02-02 17:58:59
# @Last Modified by: schemmy
# @Last Modified time: 2018-02-13 19:22:28
##################
from os import listdir
from os.path import isdir, join
import pandas as pd
import csv,codecs
def get_score(row):
if row['poster'] == 'Y':
return 5
else:
return 1
def combine_teams():
path = '../data/'
folders = [f for f in listdir(path) if isdir(join(path, f))]
files = [path+i+'/'+f for i in folders for f in listdir(path+i) if not f.startswith('.')]
count = 0
for f in files:
o = pd.read_csv(f, sep=',',
names=['poster', 'ids', 'name', 'url'])
o['sc'] = o.apply(lambda row: get_score(row), axis=1)
o1 = o.groupby(['ids']).sum()
o2 = o1.sort_values(['sc'], ascending=[0])
o2['team'] = f.split('/')[-1].split('.')[0]
o3 = o2[o2['sc'] >= 3]
count += 1
if count == 1:
o_entire = o3
else:
o_entire = pd.concat([o_entire, o3])
# print (o3)
# print (len(o), len(o3), f)
o_entire.to_csv('../data/zhuanqu.csv', header=False)
#61062 50564
o = pd.read_csv('../data/zhuanqu.csv', sep=',', names=['ids', 'sc', 'team'])
o = o.sort_values(['ids'])
print (len(o), len(set(o['ids'])))
o.to_csv('../data/zhuanqu.csv', header=False, index=False)
def build_network():
path = '../data/'
folders = [f for f in listdir(path) if isdir(join(path, f))]
files = [f.split('.')[0] for i in folders for f in listdir(path+i) if not f.startswith('.')]
dic = {}
for i in range(len(files)):
dic[files[i]] = i
print (dic)
count = len(dic)
o = pd.read_csv('../data/zhuanqu.csv', sep=',', names=['ids', 'sc', 'team'])
for index, row in o.iterrows():
if row['ids'] not in dic:
dic[row['ids']] = count
count += 1
network = {}
for index, row in o.iterrows():
network[dic[row['team']]] = network.get(dic[row['team']], []) + [str(dic[row['ids']])]
network[dic[row['ids']]] = network.get(dic[row['ids']], []) + [str(dic[row['team']])]
txt = open('../data/network.txt', 'w')
for i in range(len(network)):
txt.write(str(i) + ',')
txt.write(','.join(network[i]))
txt.write('\n')
txt.close()
def node_txt():
path = '../data/'
folders = [f for f in listdir(path) if isdir(join(path, f))]
files = [f.split('.')[0] for i in folders for f in listdir(path+i) if not f.startswith('.')]
f = codecs.open('../data/node.csv', 'w')
writer = csv.writer(f)
txt = open('../data/network.txt', 'r')
writer.writerow(['Id','Label','Discipline','counts'])
i = 0
for line in txt:
if i < 19:
cat = 'football'
else:
cat = 'nba'
line = [str(i), files[i], cat, str(len(line.split(','))-1)]
writer.writerow(line)
i += 1
if i == len(files):
break
f.close()
def edge_txt():
txt = open('../data/edge.txt', 'r')
f = codecs.open('../data/edge.csv', 'w')
writer = csv.writer(f)
writer.writerow(['Source','Target','Weight','Type'])
for line in txt:
l = line.split(',') + ['Directed']
writer.writerow(l)
f.close()
# combine_teams()
# build_network()
node_txt()
# edge_txt() | [
"chm514@lehigh.edu"
] | chm514@lehigh.edu |
9a5313aa163c400517d5d6dbb6c1015e050657bd | 9567d8d9572a0f6dbf45139a7d2dde8ba8554c58 | /CNN/รrnek_รงalฤฑลmalar/El_yazisi_siniflandirma/mnist.py | 0d3fcef2329a7ff0019af068222d4815fbc5a680 | [] | no_license | kilicmustafa/Deep_Learning | 4b0ee82aed11ade6fe9487b16438387021388126 | 16a69f71c5d0f459b508a21eae942ac9575179d5 | refs/heads/master | 2022-07-07T11:01:33.313185 | 2020-05-11T19:15:08 | 2020-05-11T19:15:08 | 261,508,705 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,679 | py | #import list
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
#%% load dataset
train = pd.read_csv("mnist_train.csv")
print(train.shape)
print(train.head(5))
test = pd.read_csv("mnist_test.csv")
print(test.shape)
print(train.head(5))
#%% X- Y separation
y_train = train["label"]
x_train = train.drop(labels = ["label"] ,axis = 1)
y_test = test["label"]
x_test = test.drop(labels = ["label"] ,axis = 1)
print(y_train.head(3))
print(x_train.head(3))
print(y_test.head(3))
print(x_test.head(3))
plt.figure()
sns.countplot(y_train , palette = "icefire")
plt.title("train y_head class variable counts")
print(y_train.value_counts())
plt.show()
plt.figure()
sns.countplot(y_test)
plt.title("test y_head class variable counts")
print(y_test.value_counts())
plt.show()
plt.figure()
img = np.array(x_train.iloc[9])
img = img.reshape((28 ,28))
plt.imshow(img , cmap= "gray")
plt.axis("off")
plt.show()
#%% Normalization , Reshape and Label Encoding
#Normalize
x_train = x_train / 255.0
x_test = x_test / 255.0
print("x_train shape : ",x_train.shape)
print("x_test shape : " , x_test.shape)
#Reshape
x_train = x_train.values.reshape( -1 ,28,28 ,1 )
x_test = x_test.values.reshape( -1 ,28,28 ,1 )
print("x_train shape : " , x_train.shape)
print("x_test shape : ",x_test.shape)
#%% Train - Validation split
from sklearn.model_selection import train_test_split
x_train ,x_val ,y_train , y_val = train_test_split(x_train ,y_train , random_state = 3 ,test_size = 0.1 )
print("x_train shape : " , x_train.shape)
print("y_train shape : " ,y_train.shape)
print("x_val shape : " ,x_val.shape)
print("y_val shape : " ,y_val.shape)
#%% Label Encoding
#Label encoding Keras
from keras.utils.np_utils import to_categorical
y_train = to_categorical( y_train ,num_classes = 10)
y_val = to_categorical(y_val ,num_classes = 10)
y_test = to_categorical(y_val , num_classes = 10)
#%% Create Model
from keras.models import Sequential
from keras.layers import Conv2D , MaxPooling2D ,Activation ,Dropout ,Flatten ,Dense
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
model = Sequential()
model.add(Conv2D(filters = 16 , kernel_size = (3 ,3) ,input_shape = (28 , 28 ,1)))
model.add(Activation("relu"))
model.add(MaxPooling2D())
model.add(Conv2D(filters = 32 , kernel_size = (3,3) ))
model.add(Activation("relu"))
model.add(MaxPooling2D())
model.add(Conv2D(filters = 64 , kernel_size = (3,3) ))
model.add(Activation("relu"))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(256))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(10 )) # deฤisken sayฤฑsฤฑ
model.add(Activation("softmax")) #kategori sayฤฑsฤฑ fazla olduฤu iรงin
optimizer = Adam(lr = 0.001 ,beta_1 = 0.9 ,beta_2 =0.999 )
model.compile(optimizer = optimizer,
loss = "categorical_crossentropy",
metrics = ["accuracy"])
batch_size = 32
epochs = 10 #
#%% Data generation Train-Test
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # dimesion reduction
rotation_range=0.5, # randomly rotate images in the range 5 degrees
zoom_range = 0.5, # Randomly zoom image 5%
width_shift_range=0.5, # randomly shift images horizontally 5%
height_shift_range=0.5, # randomly shift images vertically 5%
horizontal_flip=False, # resimleri รงevirir 6 yi 9 yapabilir
vertical_flip=False) # basamaฤฤฑ deฤiลtirdiฤi iรงin kullanฤฑlmaz )
datagen.fit(x_train)
hist= model.fit_generator(datagen.flow(x_train ,y_train , batch_size = batch_size),
validation_data = (x_val ,y_val),
epochs =epochs,
steps_per_epoch = 1600 // batch_size)
#%% Model Save
model.save_weights("save_model_1.h5")
#%% Save History
import pandas as pd
import json
hist_df = pd.DataFrame(hist.history)
with open("hist_save.json" ,"w") as f:
hist_df.to_json(f)
#%% Load History
with open("hist_save.json") as json_file:
h = json.load(json_file)
df = pd.DataFrame(h)
print(df)
plt.plot(df["loss"], label = "Train Loss")
plt.plot(df["val_loss"], label = "Validation Loss")
plt.legend()
plt.show()
plt.plot(df["accuracy"], label = "Train Loss")
plt.plot(df["val_accuracy"], label = "Validation Loss")
plt.legend()
plt.show()
| [
"51002612+kilicmustafa@users.noreply.github.com"
] | 51002612+kilicmustafa@users.noreply.github.com |
13f0a300bed6f6a9e0f1e0a34c142e75fbc7d12e | 7a43b71484b8ef010c0d4cece58ffdd379f15ac6 | /Python_for_data_analysis/Chapter_03/cprof_example.py | 2c670d022ec60962bca637c65142fba204dfee5c | [
"MIT"
] | permissive | ALEXKIRNAS/DataScience | 7ea6155bd786920b216ed4363e03c79b5540a6cf | 14119565b8fdde042f6ea3070bc0f30db26620c0 | refs/heads/master | 2021-01-24T06:16:08.621483 | 2017-12-28T22:53:03 | 2017-12-28T22:53:03 | 93,309,479 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | import numpy as np
from numpy.linalg import eigvals
def run_experiment(niter = 100):
K = 100
results = []
for _ in range(niter):
mat = np.random.randn(K, K)
max_eigenvalue = np.abs(eigvals(mat)).max()
results.append(max_eigenvalue)
return results
some_results = run_experiment()
print(np.max(some_results))
| [
"alexkirnas@ukr.net"
] | alexkirnas@ukr.net |
0ea962df3cf9877904f1ea8f5f1b0f67b3f881f2 | a3d65aa2be7872db1e6bd8da94764b71abdebd99 | /week09/labUseFib.py | 44fa4176083f798f99ee7111d33117a3ba1277b1 | [] | no_license | ssteffens/myWork | 2a2854b2aeed758a44969276bde36a79a6554558 | 354b413a97c0e1f5adf26b26ca09c2e069af4dff | refs/heads/main | 2023-04-07T06:20:37.737877 | 2021-04-18T15:26:58 | 2021-04-18T15:26:58 | 331,762,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | # This program prompts the user for a number and prints out the fibonacci sequence of that many numbers
# Author: Stefanie Steffens
import labMyFunctions
nTimes = int(input("how many: "))
print(labMyFunctions.fibonacci(nTimes))
| [
"77699063+ssteffens@users.noreply.github.com"
] | 77699063+ssteffens@users.noreply.github.com |
082ae04a5c36262e14182602b53ff46f5aa16fcf | 1f08436bab6cd03bcfb257e8e49405cbc265195a | /8_function/Sample/functions_ex3.py | 0b362e6fc10e31311f529f7db4e12747dd2833cc | [] | no_license | kuchunbk/PythonBasic | e3ba6322f256d577e37deff09c814c3a374b93b2 | a87135d7a98be8830d30acd750d84bcbf777280b | refs/heads/master | 2020-03-10T04:28:42.947308 | 2018-04-17T04:25:51 | 2018-04-17T04:25:51 | 129,192,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | '''Question:
Write a Python function to multiply all the numbers in a list.
'''
# Python code:
def multiply(numbers):
total = 1
for x in numbers:
total *= x
return total
print(multiply((8, 2, 3, -1, 7)))
'''Output sample:
-336
''' | [
"kuchunbk@gmail.com"
] | kuchunbk@gmail.com |
8848ab074e6ffc479ca17e76bc03b10bd7d34f11 | 34e1988211e7b8a8210ffe8592fbd79a43d8d997 | /maddpg/main.py | a6e8f1be527c47635edaa11fe366ccc250c97ed9 | [] | no_license | SachinKonan/MARL | fb3d39041e52968917480d80492b96bf8fb32bde | 1c16e8c695b3175dd4c64d1c87589064db6f4fed | refs/heads/master | 2023-02-08T07:21:16.813947 | 2020-12-02T23:32:53 | 2020-12-02T23:32:53 | 317,743,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,810 | py | from maddpg.MADDPG import MADDPG
import numpy as np
import torch as th
from maddpg.params import scale_reward
import gym
import ma_gym
# do not render the scene
env_name = 'PredatorPrey5x5-v0'
#random_seed = 543
#torch.manual_seed(random_seed)
env = gym.make(env_name)
reward_record = []
np.random.seed(1234)
th.manual_seed(1234)
n_agents = env.n_agents
n_actions = env.action_space[0].n
n_states = env.observation_space[0].shape[0]
capacity = 1000000
batch_size = 1000
n_episode = 2000
max_steps = 100
episodes_before_train = 100
win = None
param = None
maddpg = MADDPG(n_agents, n_states, n_actions, batch_size, capacity,
episodes_before_train)
FloatTensor = th.cuda.FloatTensor if maddpg.use_cuda else th.FloatTensor
for i_episode in range(n_episode):
obs = env.reset()
obs = np.stack(obs)
if isinstance(obs, np.ndarray):
obs = th.from_numpy(obs).float()
total_reward = 0.0
rr = np.zeros((n_agents,))
for t in range(max_steps):
# render every 100 episodes to speed up training
obs = obs.type(FloatTensor)
action = maddpg.select_action(obs).data.cpu()
obs_, reward, done, _ = env.step(action.numpy())
reward = th.FloatTensor(reward).type(FloatTensor)
obs_ = np.stack(obs_)
obs_ = th.from_numpy(obs_).float()
if t != max_steps - 1:
next_obs = obs_
else:
next_obs = None
total_reward += reward.sum()
rr += reward.cpu().numpy()
maddpg.memory.push(obs.data, action, next_obs, reward)
obs = next_obs
c_loss, a_loss = maddpg.update_policy()
maddpg.episode_done += 1
print('Episode: %d, reward = %f' % (i_episode, total_reward))
reward_record.append(total_reward)
np.save('rewards_predator', reward_record) | [
"sachinkonan480@gmail.com"
] | sachinkonan480@gmail.com |
e564aeb74503389e96f90e993d5b23fe405ed52a | bf8178bcf3aa09655fc827f5bc5a9e587f907fb7 | /utils/permissions.py | a304cd625a735603793c918db6f56f8c10d69033 | [] | no_license | Tinaz0996/django-twitter | fd2ebf4c718ce1107b2eaf5d38d01ac7a4884b96 | 2d64b37a04fa7656da2d886aa1418aaf42b10c56 | refs/heads/main | 2023-08-01T01:36:57.327371 | 2021-09-11T20:18:42 | 2021-09-11T20:18:42 | 364,114,579 | 0 | 0 | null | 2021-09-11T20:10:41 | 2021-05-04T02:07:56 | Python | UTF-8 | Python | false | false | 846 | py | from rest_framework.permissions import BasePermission
class IsObjectOwner(BasePermission):
"""
่ฟไธช Permission ่ด่ดฃๆฃๆฅ obj.user ๆฏไธๆฏ == request.user
่ฟไธช็ฑปๆฏๆฏ่พ้็จ็๏ผไปๅๅฆๆๆๅ
ถไปไน็จๅฐ่ฟไธช็ฑป็ๅฐๆน๏ผๅฏไปฅๅฐๆไปถๆพๅฐไธไธชๅ
ฑไบซ็ไฝ็ฝฎ
Permission ไผไธไธชไธช่ขซๆง่ก
- ๅฆๆๆฏ detail=False ็ action๏ผๅชๆฃๆต has_permission
- ๅฆๆๆฏ detail=True ็ action๏ผๅๆถๆฃๆต has_permission ๅ has_object_permission
ๅฆๆๅบ้็ๆถๅ๏ผ้ป่ฎค็้่ฏฏไฟกๆฏไผๆพ็คบ IsObjectOwner.message ไธญ็ๅ
ๅฎน
"""
message = "You do not have permission to access this object"
def has_permission(self, request, view):
return True
def has_object_permission(self, request, view, obj):
return request.user == obj.user | [
"xiaoxuz0996@gmail.com"
] | xiaoxuz0996@gmail.com |
574a1e88961aa1174faddd9bd0c3fa7486d04a66 | 093111846764d93579255f3d0f19d6893a9317c1 | /mvs/mvs_server.py | a0c1b8a79c7641dd7a0ba1c0159f829adcf4c3a0 | [] | no_license | lightscalar/mvs | a3485aab8d76dbc03e6bdd768ec9552ea7208852 | 6e5c1d772064bea5c3e66a9fac10a7c89350dcc6 | refs/heads/master | 2021-01-16T18:08:53.816743 | 2017-08-15T16:14:25 | 2017-08-15T16:14:25 | 100,040,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,407 | py | from mvs.unit import *
from pymongo import MongoClient
from master_controller import *
from pyro.basics import *
# Define currentl available units.
AVAILABLE_UNITS = [('Unit-01', 0)]
DATABASE_NAME = 'mvs_database'
client = MongoClient()
database = client[DATABASE_NAME]
# Instantiate available models.
units = {}
for _, unit_id in AVAILABLE_UNITS:
units[unit_id] = Unit(unit_id)
# Construct a MasterController for each module.
controllers = {}
for unit_id, unit in units.items():
controllers[unit_id] = MasterController(unit, database)
controllers[unit_id].start()
# Attach the database.
Pyro.attach_db(database)
class Experiment(Pyro):
pass
class UnitCommand(Pyro):
pass
class Unit(Pyro):
pass
class Target(Pyro):
pass
class Image(Pyro):
pass
# Define relationships.
Experiment.has_many(Target)
Target.has_many(Image)
# Register Modules,
Unit.delete_all()
for name, unit_id in AVAILABLE_UNITS:
unit = {}
unit['name'] = name
unit['unit_id'] = unit_id
unit['position'] = {'x': 0, 'y': 0,'z': 0}
unit['integer_position'] = {'x': 0, 'y': 0,'z': 0}
unit['camera_status'] = 'active'
unit['motor_status'] = 'active'
unit['is_translating'] = False
unit['last_calibration'] = 0
unit['image_url'] = 'http://localhost:{}'.format(unit['unit_id'] + 1493)
Unit.create(unit)
# Launch the server.
app = Application(Pyro)
app.run()
| [
"lightscalar@gmail.com"
] | lightscalar@gmail.com |
4eb1fd0550f77f77711b51db989afcf7ef39a245 | 8c3c2e1fe91592219f83d43c96ff66a5851fd361 | /ipythonlogs/final_exam.py | eaa2338361834d26a7326ace2ca39cb5861b585d | [] | no_license | tylerc-atx/u_st101 | 410def9331e8fc3b3845c87101cb76ff3b319eb0 | facb0b36e6b8e399a56adbcc9b0b5f3d1ab186c4 | refs/heads/master | 2021-06-09T20:13:43.005483 | 2017-02-02T02:04:06 | 2017-02-02T02:04:06 | 79,617,800 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,170 | py | # coding: utf-8
from math import *
get_ipython().magic(u'ls ')
0.7 * 0.5
.3*.5
.7*.5
.3*.5
.35+.15
1
.35+.15
.5*(1/6)
.5*(1/8)
.083333333333333+.0625
.0833333333/.14583333333
(0.2**2)*(.8**5)
.4 / .14
.2*(.8**6)
0.05243*7
0.05243*2
21*(0.2**2)*(0.8**5)
7*.2*(.8**6)
.27525+0.36700
1-.64255
1-.367002
.8**7
1-.209715-.367002
(130-100)/15
.5**2
sqrt(1.25)
70*2.54
(2.54**2)
6.4516*25
4950/10000
1.96*sqrt((.495*.505)/10000)
1.645*sqrt((.495*.505)/10000)
1.282*sqrt((.495*.505)/10000)
1.96*sqrt((.495*.505)/10000)
0.495-.00822459
0.495+0.00822459
.79+.7+.73+.66+.65+.70+.34+.81+.71+.70
.79+.7+.73+.66+.65+.70+.74+.81+.71+.70
7.1899999/10
1.96*sqrt((0.719*0.281)/10)
0.719-0.279
0.719+0.279
1-.719
list = [.79, .70, .73, .66, .65, .70, .74, .81, .71, .70]
list
xiu
xiu = []
for d in list:
xiu.append((d-.719)**2)
xiu
total = 0
for n in xiu:
total += n
total
total / 10
1.96*sqrt(0.00233/10)
.719-.299
.719+.299
.72+.03
.72+.3
sqrt(.299)
.5468*1.96
sqrt(.00233)
.0483*1.96
.719-0.085
.719+0.085
list
var = 0
for x in list:
var += (x - 0.719)**2
var
sqrt(0.002329)
.719-.04826
.719+.04826
0.04826/sqrt(10)
0.01526*1.96
.72-.03
.72+.03
0+1+2
3/3
4/3
| [
"tylerc-atx@github.com"
] | tylerc-atx@github.com |
90e9c5289c22dc01e847cec0f98fd1c852758c18 | 364149ef8a4809c142bcabb0291fd441be10ff24 | /example_api/urls.py | d040cf4ba5798b63e07ce4a74d6e1b906c0fa94e | [] | no_license | awaistkd/django-rest-api-example | 8ea33822e9324fcf4cf441ba8968fbe404f1cc78 | e4738f416e6080f9f5dcf2954d5116dd8e47d630 | refs/heads/master | 2022-05-04T11:25:05.334252 | 2020-05-24T18:23:18 | 2020-05-24T18:23:18 | 193,535,019 | 0 | 0 | null | 2022-04-22T21:37:40 | 2019-06-24T15:48:44 | Python | UTF-8 | Python | false | false | 891 | py | """example_api URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from app import views
urlpatterns = [
path('admin/', admin.site.urls),
path('employees/', views.EmployeeList.as_view()),
]
| [
"aawais.nu@gmail.com"
] | aawais.nu@gmail.com |
9453d95d3efaed6a8de50c3edffaa2d636244d88 | af4abf0a22db1cebae466c56b45da2f36f02f323 | /parser/team29/analizer/statement/instructions/alter/alter_data_base.py | 4d67266a5977732607c3de3c6160d88dfad9e3a9 | [
"MIT"
] | permissive | joorgej/tytus | 0c29408c09a021781bd3087f419420a62194d726 | 004efe1d73b58b4b8168f32e01b17d7d8a333a69 | refs/heads/main | 2023-02-17T14:00:00.571200 | 2021-01-09T00:48:47 | 2021-01-09T00:48:47 | 322,429,634 | 3 | 0 | MIT | 2021-01-09T00:40:50 | 2020-12-17T22:40:05 | Python | UTF-8 | Python | false | false | 3,288 | py | from analizer.abstract import instruction
from analizer.typechecker.Metadata import Struct
from analizer.reports import Nodo
from storage.storageManager import jsonMode
# carga de datos
Struct.load()
class AlterDataBase(instruction.Instruction):
def __init__(self, option, name, newname, row, column):
instruction.Instruction.__init__(self, row, column)
self.option = option # define si se renombra o se cambia de dueรฑo
self.name = name # define el nombre nuevo de la base de datos o el nuevo dueรฑo
self.newname = newname
def execute(self, environment):
Struct.load()
try:
if self.option == "RENAME":
valor = jsonMode.alterDatabase(self.name, self.newname)
if valor == 2:
instruction.semanticErrors.append(
["La base de datos " + str(self.name) + " no existe", self.row]
)
instruction.syntaxPostgreSQL.append(
"Error: 42000: La base de datos "
+ str(self.name)
+ " no existe"
)
return "La base de datos no existe: '" + self.name + "'."
if valor == 3:
instruction.semanticErrors.append(
[
"La base de datos " + str(self.newname) + " ya existe",
self.row,
]
)
instruction.syntaxPostgreSQL.append(
"Error: 42P04: La base de datos "
+ str(self.newname)
+ " ya existe"
)
return "El nuevo nombre para la base de datos existe"
if valor == 1:
instruction.syntaxPostgreSQL.append("Error: XX000: Error interno")
return "Hubo un problema en la ejecucion de la sentencia"
if valor == 0:
Struct.alterDatabaseRename(self.name, self.newname)
return (
"Base de datos renombrada: " + self.name + " - " + self.newname
)
return "Error ALTER DATABASE RENAME: " + self.newname
elif self.option == "OWNER":
valor = Struct.alterDatabaseOwner(self.name, self.newname)
if valor == 0:
return "Instruccion ejecutada con exito ALTER DATABASE OWNER"
instruction.syntaxPostgreSQL.append("Error: XX000: Error interno")
return "Error ALTER DATABASE OWNER"
instruction.syntaxPostgreSQL.append("Error: XX000: Error interno")
return "Fatal Error ALTER DATABASE: " + self.newname
except:
instruction.syntaxPostgreSQL.append(
"Error: P0001: Error en la instruccion ALTER DATABASE"
)
def dot(self):
new = Nodo.Nodo("ALTER_DATABASE")
iddb = Nodo.Nodo(self.name)
new.addNode(iddb)
optionNode = Nodo.Nodo(self.option)
new.addNode(optionNode)
valOption = Nodo.Nodo(self.newname)
optionNode.addNode(valOption)
return new
| [
"39706929+Yosoyfr@users.noreply.github.com"
] | 39706929+Yosoyfr@users.noreply.github.com |
4a6b78de21ffdffea8c1583ad2df047b3419aa55 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_117/ch73_2019_04_04_18_01_16_761758.py | 2a91c18fcec24852640d02b74224cf472d03ccae | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | def remove_vogais(letras):
i=0
while i<len(letras):
if letras[i]== 'a' or letras[i] == 'e' or letras[i] == 'i' or letras[i] == 'o' or letras[i] == 'u':
del letras[i]
else:
i+=1
return remove_vogais(letras)
| [
"you@example.com"
] | you@example.com |
4db31dc3dacc7d2647cce109b1b57cc1ddb0c41e | 95a2c37f40ed28c6052d3fed0c0f1b80846bffac | /datasources/views/__init__.py | a9638065b7f3d3f840e2ba8905802847088e2825 | [
"MIT"
] | permissive | PEDASI/PEDASI | eecbab837877f965aef4258e908baec9a389083a | 25a111ac7cf4b23fee50ad8eac6ea21564954859 | refs/heads/master | 2022-12-10T02:36:42.416249 | 2021-03-24T16:32:14 | 2021-03-24T16:32:14 | 144,545,470 | 0 | 1 | MIT | 2022-12-08T02:48:34 | 2018-08-13T07:39:15 | Python | UTF-8 | Python | false | false | 55 | py | from . import datasource, licence, user_permission_link | [
"J.Graham@software.ac.uk"
] | J.Graham@software.ac.uk |
66f9b0dba88e8ebaea2e7d87dfecb174463ccf41 | 0194a3c0a6055ec07320d1589815620729a1a85b | /univ_analysis/prob_dist_and_dens.py | 6073a4aae4c099ab8e8a0cfe13e1bb686baa11b8 | [] | no_license | williamherring/Thinkful-Data-Science-Course | b3e7b15b9866956f2e2627640b36c99e34e61d06 | 67a410f3a7ad31e87a35e29f32c8dade180567cf | refs/heads/master | 2021-01-17T16:04:40.570281 | 2016-06-30T20:49:39 | 2016-06-30T20:49:39 | 61,718,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
plt.figure()
test_data = np.random.normal(size=1000)
graph1 = stats.probplot(test_data, dist="norm", plot=plt)
plt.show()
plt.figure()
test_data2 = np.random.uniform(size=1000)
graph2 = stats.probplot(test_data2, dist="norm", plot=plt)
plt.show() | [
"william.herring.jr@gmail.com"
] | william.herring.jr@gmail.com |
39543716ce468bf3259746dcfca8cb0814b014c3 | 966245c3a47798f20648dd2819e1ee7839b9bdbb | /backend/Yiqi/Yiqi/apps/userOperation/migrations/0006_activityuserinfo_type.py | abdc9c6ea1591cbebd9bbcc64b1f27d44d80a2e5 | [] | no_license | wxSmallProgram/deerlet | b63e71884c99b2653a6c2bb064c5fbcb9347afcb | e6a29df17ec14c8db2b4ebe3523d44d4c94dfe32 | refs/heads/master | 2022-12-11T13:15:46.312823 | 2019-04-11T02:55:54 | 2019-04-11T02:55:54 | 180,702,704 | 0 | 1 | null | 2022-12-08T01:03:02 | 2019-04-11T02:54:39 | Python | UTF-8 | Python | false | false | 569 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-07-07 03:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userOperation', '0005_browseusermodel'),
]
operations = [
migrations.AddField(
model_name='activityuserinfo',
name='type',
field=models.CharField(choices=[('1', 'ๆดปๅจๅๅ ไบบ'), ('0', 'ๆดปๅจๅ่ตทไบบ')], default='1', max_length=1, verbose_name='ๆฅๅ็จๆท็ฑปๅ'),
),
]
| [
"zz634682577@163.com"
] | zz634682577@163.com |
92e1311a7a23fa68bd32337ca78affafc101e26a | 468a5d80429d301973a4938f106ed61d20d7913b | /BookMyDoc/doctor/forms.py | f0f20a02720360d8e9c0239b5517a95f42df6785 | [] | no_license | nayanbharada/bookmydoc | 1a41c8142c88997f2df3a15f93b543e9d41ac9ec | f00303f16ca675922f7519755516505694eeeea4 | refs/heads/master | 2023-02-20T19:33:54.761302 | 2021-01-25T16:56:45 | 2021-01-25T16:56:45 | 332,816,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,281 | py | from django import forms
from django.forms import TimeField
from .models import DoctorProfile, Qualification, DocTimeSlot,DoctorDegree, DoctorUniversity
from django.contrib.auth import get_user_model
import datetime
User = get_user_model()
class DoctorProfileForm(forms.ModelForm):
experience = forms.CharField(required=True)
class Meta:
model = DoctorProfile
fields = '__all__'
exclude = ('user', 'hospital')
class DoctorUserForm(forms.ModelForm):
class Meta:
model = User
fields = ['username', 'first_name', 'last_name',
'email', 'mobile_number', 'gender',
'profile_image']
help_texts = {
'username': None
}
def year_choices():
return [(r, r) for r in range(1987, datetime.date.today().year + 1)]
def current_year():
return datetime.date.today().year
class QualificationForm(forms.ModelForm):
degree = forms.ModelChoiceField(queryset=DoctorDegree.objects.all())
university = forms.ModelChoiceField(queryset=DoctorUniversity.objects.all())
year_completion = forms.TypedChoiceField(choices=year_choices, initial=current_year)
class Meta:
model = Qualification
fields = '__all__'
exclude = ('doctor_profile',)
class DocTimeSlotForm(forms.ModelForm):
# morning_from = forms.TimeField(input_formats=['%I:%M %p'],
# widget=forms.TimeInput(format='%I:%M %p'))
# morning_to = forms.TimeField(input_formats=['%I:%M %p'],
# widget=forms.TimeInput(format='%I:%M %p'))
# evening_from = forms.TimeField(input_formats=['%I:%M %p'],
# widget=forms.TimeInput(format='%I:%M %p'))
# evening_to = forms.TimeField(input_formats=['%I:%M %p'],
# widget=forms.TimeInput(format='%I:%M %p'))
class Meta:
model = DocTimeSlot
fields = '__all__'
from_time = forms.TimeField(
widget=forms.TimeInput(attrs={'class': 'time'}))
to_time = forms.TimeField(widget=forms.TimeInput(attrs={'class': 'time'}))
# widgets = {'from_time': TimeField(attrs={'class': 'ui-timepicker-input'})}
exclude = ('doc', 'hospital',)
| [
"47547578+nayanbharada@users.noreply.github.com"
] | 47547578+nayanbharada@users.noreply.github.com |
3fa693040c1097b4016610cbc5628712ffeda8a3 | b4eda202d51e2677f6f1584ed8371ff36f12b421 | /medium/happy_birthday.py | 1ba5d339611cd928440aa633b069bec330c1765b | [] | no_license | OM-Ra/interview_python | af3d66049aa985ae2fc2f1deb30988f7245a7a8c | 70d4464d9b54a5fce1a51765fa86688af3d1a8de | refs/heads/master | 2023-08-31T14:59:20.894025 | 2021-10-21T18:52:28 | 2021-10-21T18:52:28 | 386,342,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,607 | py | # -*- coding: utf-8 -*-
'''
ะะปะตะณั ะฝะต ั
ะพัะตััั ััะฐะฝะพะฒะธัััั ััะฐััะต, ะฟะพััะพะผั ะพะฝ ัะตัะธะป ะฟัะฐะทะดะฝะพะฒะฐัั
ัะพะปัะบะพ ัะฒะพะต 20-ะปะตัะธะต (ะฝั ะธ 21 ะณะพะด ัะพะถะต, ะปะฐะดะฝะพ ัะถ). ะญัะพ ะฒะพะทะผะพะถะฝะพ,
ะตัะปะธ ะฟัะธะผะตะฝะธัั ะฝะตะบะพัะพััะต ะผะฐัะตะผะฐัะธัะตัะบะธะต ะฝะฐะฒัะบะธ. ะัะถะฝะพ ะฟัะพััะพ
ะฟะพะดะพะฑัะฐัั ะฟะพะดั
ะพะดััะตะต ะพัะฝะพะฒะฐะฝะธะต ัะธัะปะฐ!
ะะฐะฟัะธะผะตั, ะตัะปะธ ัะตะนัะฐั ะะปะตะณั 22 ะณะพะดะฐ, ััะพ 20 ั ะพัะฝะพะฒะฐะฝะธะตะผ 11.
ะะฝะฐะปะพะณะธัะฝะพ 65 ะปะตั โ ััะพ ัะพะฒะฝะพ 21 ะณะพะด ั ะพัะฝะพะฒะฐะฝะธะตะผ 32.
ะ ัะฐะบ ะดะฐะปะตะต.
ะกะพะทะดะฐะนัะต ััะฝะบัะธั, ะบะพัะพัะฐั ะฑัะดะตั ะฟัะธะฝะธะผะฐัั ัะตะบััะธะน ะฒะพะทัะฐัั age
ะธ ะฒะพะทะฒัะฐัะฐัั ยซะฝัะถะฝัะนยป ะฒะพะทัะฐัั (20 ะปะตั ะธะปะธ 21 ะณะพะด), ะฐ ัะฐะบะถะต
ะพัะฝะพะฒะฐะฝะธะต ัะธัะปะฐ ะฒ ัะพะผ ะถะต ัะพัะผะฐัะต, ััะพ ะฒ ะฟัะธะผะตัะฐั
.
ะัะธะผะตัั:
happy_birthday(22) "Oleg is just 20, in base 11!"
happy_birthday(65) "Oleg is just 21, in base 32!"
happy_birthday(83) "Oleg is just 21, in base 41!"
ะัะธะผะตัะฐะฝะธะต: ะฟะตัะตะดะฒะฐะตะผัะน ะฒ ััะฝะบัะธั ะฒะพะทัะฐัั ะฒัะตะณะดะฐ ะฑัะดะตั ะฑะพะปััะต 21.
'''
# ะัะตัะฐัะพั ะฑะตะท ะพะณัะฐะฝะธัะตะฝะธั.
from itertools import count
def happy_birthday(age: int) -> str:
'''
ะััะธัะปัะตั ะพัะฝะพะฒะฐะฝะธะต ะดะปั ัะธัะปะฐ 20 ะธะปะธ 21 ัะฐะบ, ััะพะฑั ะฟะพะปััะธะปะพัั
ัะธัะปะพ age. ะ ะฒะพะทะฒัะฐัะฐะตั ัััะพะบั ะฒ ัะพัะผะฐัะธัะพะฒะฐะฝะฝะพะผ ะฒะธะดะต ั
ัะตะทัะปััะฐัะพะผ.
'''
# ะะฐัะธะฝะฐั ั ะพัะฝะพะฒะฐะฝะธั 11 ะฑัะดะตั ะฟะพะดะฑะธัะฐัั ะฝะตะพะฑั
ะพะดะธะผะพะต ะพัะฝะพะฒะฐะฝะธะต.
for base in count(11):
# ะะตัะตะฑะพั ะฟะพ ะธัะบะพะผะพะผั ัะธัะปั ะฟะพะด ะพัะฝะพะฒะฐะฝะธะต base.
for nbr in (20, 21):
# ะัะพะฒะตััะตั ัะพะพัะฒะตัััะฒะธะต ัะธัะปะฐ ะธ ะพัะฝะพะฒะฐะฝะธั ั ะธัะบะพะผัะผ age.
if sum(digit * base ** index
for index, digit in enumerate(map(int, str(nbr)[::-1]))) == age:
# ะะพะณะดะฐ ัะธัะปะพ ะธ ะพัะฝะพะฒะฐะฝะธะต ะฟะพะดะพะฑัะฐะฝั ะฟัะฐะฒะธะปัะฝะพ ัะตะทัะปััะฐั
# ัะพัะผะฐัะธััะตััั ะฒ ะฝัะถะฝัั ัััะพะบั.
return f'Oleg is just {nbr}, in base {base}!'
tests = ((22, "Oleg is just 20, in base 11!"),
(65, "Oleg is just 21, in base 32!"),
(83, "Oleg is just 21, in base 41!"))
for age, check in tests:
print(happy_birthday(age=age) == check)
| [
"syndeft@gmail.com"
] | syndeft@gmail.com |
d8a5f27c8d1dd586ce84191a204d5d0378424983 | 875c5f4ef5892be5a31590d5b1ad4a2c6adecec9 | /dictionaries.py | efbc2c01f404e7063ca9eaf641f1df164477e2b9 | [] | no_license | hmhuan/advanced-python | 1304d4dad301a85af19855a691069efc2c5511ff | 17d86bb7c6d616f4d8d595c4358db311a2d7d884 | refs/heads/master | 2023-07-12T12:36:51.598489 | 2021-08-16T09:18:44 | 2021-08-16T09:18:44 | 387,825,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | my_dict = {"name": "Ming", "age": 21, "city": "New York"}
print(my_dict)
name = my_dict["name"]
print(name)
my_dict["email"] = "abc@gmail.com"
print(my_dict)
# delete key - value
del my_dict["email"]
print(my_dict)
city = my_dict.pop("city")
print(city)
print(my_dict)
print(my_dict.popitem())
# loop in dictionary
my_dict = {"name": "Ming", "age": 21, "city": "New York"}
for key in my_dict:
print(my_dict[key])
pass
for value in my_dict.values():
print(value)
pass
for key, value in my_dict.items():
print(key, value)
pass
# copy a dictionary
new_my_dict = my_dict.copy()
new_my_dict["name"] = "Tokyo"
print(new_my_dict)
print(my_dict)
# merge dictionaries using update()
# existing keys are overwritten, new keys are added
my_dict.update(new_my_dict)
print(my_dict)
# use tuple (immutable) as a key - list cannot because mutable
my_dict = {(1, 2): 1, (2, 3): 0}
print(my_dict)
# nested dictionaries -> should you copy not using pass reference
my_dict_1 = {"name": "Ming", "age": 21, "city": "New York"}
my_dict_2 = {"name": "Tokyo", "age": 22, "city": "Las Vegas"}
nested_dict = {"dict1": my_dict_1, "dict2": my_dict_2}
my_dict_1["name"] = "kanojo"
print(my_dict_1)
nested_dict["dict1"]["name"] = "naruto"
print(my_dict_1)
print(nested_dict)
| [
"huan.huynh@linecorp.com"
] | huan.huynh@linecorp.com |
350c55ba9706c064dfe760bd360f7497b79236ac | 8391636811f5a768e3948454363c2694a1347c92 | /examples/perfetto/aggregate_scripts/aggregate_subject.py | dcca482a7f5e04461069e7ebd0b0ca92d5ea7fe2 | [] | no_license | S2-group/android-runner | a0256e4d78fc2e16fa22ea7c8d13519fe1582236 | f0fe5f815064416ed14aadcad90f89b2674947db | refs/heads/master | 2023-06-25T11:16:58.252947 | 2023-06-19T14:12:21 | 2023-06-19T14:12:21 | 143,003,319 | 29 | 106 | null | 2023-09-11T17:00:55 | 2018-07-31T11:09:08 | Python | UTF-8 | Python | false | false | 359 | py | from AndroidRunner.Plugins.perfetto.trace_wrapper import PerfettoTrace
import os
def main(dummy, path):
for perfetto_trace_file in os.listdir(path):
trace = PerfettoTrace(perfetto_trace_file, trace_processor_path="/home/pi/android-runner/AndroidRunner/Plugins/perfetto/trace_processor")
data = trace.query("SELECT * FROM TABLE")
| [
"omar.website@gmail.com"
] | omar.website@gmail.com |
d4661de7781d69bf47240b7d4a8effe187d22ad9 | dea3e6876afe2fdae5b5b4a3f429cfce81b7a0a1 | /tests/test_frameSetUtils.py | 963a1cbd09e97306839efc9adabd9dc07e8a72a9 | [] | no_license | frossie-shadow/afw | 741f09cd202a5a9cc3b3943696a389b94a4ee404 | a1c44404738dcd73ff400e3bcd176ffe4dd51aab | refs/heads/master | 2021-01-19T17:49:51.003432 | 2017-08-19T03:11:56 | 2017-08-19T03:11:56 | 35,149,129 | 0 | 0 | null | 2015-05-06T08:54:49 | 2015-05-06T08:54:49 | null | UTF-8 | Python | false | false | 3,063 | py | from __future__ import absolute_import, division, print_function
import unittest
from lsst.afw.coord import IcrsCoord
from lsst.afw.geom import arcseconds, degrees, makeCdMatrix, Point2D
from lsst.afw.geom.detail import makeTanWcsMetadata, readFitsWcs, readLsstSkyWcs
import lsst.utils.tests
PrintStrippedNames = False
class FrameSetUtilsTestCase(lsst.utils.tests.TestCase):
"""This is sparse because SkyWcs unit tests test much of this package
"""
def setUp(self):
# arbitrary values
self.crpix = Point2D(100, 100)
self.crval = IcrsCoord(30 * degrees, 45 * degrees)
self.scale = 1.0 * arcseconds
def makeMetadata(self):
"""Return a WCS that is typical for an image
It will contain 32 cards:
- 14 standard WCS cards
- 15 standard cards:
- SIMPLE, BITPIX, NAXIS, NAXIS1, NAXIS2, BZERO, BSCALE
- DATE-OBS, MJD-OBS, TIMESYS
- EXPTIME
- 2 COMMENT cards
- INHERIT
- EXTEND
- LTV1 and LTV2, an IRAF convention LSST uses for image XY0
- 1 nonstandard card
"""
# arbitrary values
orientation = 0 * degrees
flipX = False
metadata = makeTanWcsMetadata(
crpix = self.crpix,
crval = self.crval,
cdMatrix = makeCdMatrix(scale=self.scale, orientation=orientation, flipX=flipX),
)
self.assertEqual(metadata.nameCount(), 14)
metadata.add("SIMPLE", True)
metadata.add("BITPIX", 16)
metadata.add("NAXIS", 2)
metadata.add("NAXIS1", 500)
metadata.add("NAXIS2", 200)
metadata.add("BZERO", 32768)
metadata.add("BSCALE", 1)
metadata.add("TIMESYS", "UTC")
metadata.add("UTC-OBS", "12:04:45.73")
metadata.add("DATE-OBS", "2006-05-20")
metadata.add("EXPTIME", 5.0)
metadata.add("COMMENT", "a comment")
metadata.add("COMMENT", "another comment")
metadata.add("EXTEND", True)
metadata.add("INHERIT", False)
metadata.add("LTV1", 5)
metadata.add("LTV2", -10)
metadata.add("ZOTHER", "non-standard")
return metadata
def testReadFitsWcsStripMetadata(self):
metadata = self.makeMetadata()
self.assertEqual(len(metadata.toList()), 32)
readFitsWcs(metadata, strip=False)
self.assertEqual(len(metadata.toList()), 32)
readFitsWcs(metadata, strip=True)
self.assertEqual(len(metadata.toList()), 18)
def testReadLsstSkyWcsStripMetadata(self):
metadata = self.makeMetadata()
self.assertEqual(len(metadata.toList()), 32)
readLsstSkyWcs(metadata, strip=False)
self.assertEqual(len(metadata.toList()), 32)
readLsstSkyWcs(metadata, strip=True)
self.assertEqual(len(metadata.toList()), 18)
class TestMemory(lsst.utils.tests.MemoryTestCase):
pass
def setup_module(module):
lsst.utils.tests.init()
if __name__ == "__main__":
lsst.utils.tests.init()
unittest.main()
| [
"rowen@uw.edu"
] | rowen@uw.edu |
4c59bf2329fd1567caddbca76105185740dad7e5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02987/s680269618.py | 10f62cd0a31d38e548bfb5cbca9157ed13e880b2 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | S = input()
if S[0] == S[1] and S[2] == S[3] and len(set(S)) == 2:
print('Yes')
elif S[0] == S[2] and S[1] == S[3] and len(set(S)) == 2:
print('Yes')
elif S[0] == S[3] and S[1] == S[2] and len(set(S)) == 2:
print('Yes')
else:
print('No')
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
8978c7239669262c75ee2bb775b37c2f64e6e9c2 | 55e29dec1872a50336b0ab94c38d3fbb4e50104d | /swot.py | 0f5ec7709e2458153f8219ecb9939b591dd89358 | [] | no_license | tiltedwrld/swot | 35ae82bde27b0b3d5e4f4cb4f7975c5f77249783 | 81c2f286c08fdec7c59cedd5465def14c76b0266 | refs/heads/main | 2023-03-28T01:07:21.397240 | 2021-03-26T19:54:40 | 2021-03-26T19:54:40 | 351,893,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,681 | py | # -*- coding: utf-8 -*-
"""swot.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/14Aq7oPiSKFWJoK129_LaO8GiWCzp-3b7
"""
print('ะััะฟะฟะฐ: 20ะะ-3')
print('ะคะะ: ะะตัะฑะธััะบะธะน ะะฐัะฒะตะน ะะฐะบัะธะผะพะฒะธั')
!ln -fs /usr/share/zoneinfo/Europe/Moscow /etc/localtime
!date
from google.colab import drive
drive.mount('/content/drive/')
import os
print(os.getcwd())
print(os.listdir('./'))
print(os.listdir('/content/drive'))
print(os.listdir('/content/drive/MyDrive'))
# Commented out IPython magic to ensure Python compatibility.
import os
import time
from google.colab import auth
auth.authenticate_user()
!pip install --upgrade gspread
import gspread
from oauth2client.client import GoogleCredentials
print('\nะ ะะะะขะ ะก ะขะะะะะฆะะ')
# %ll -lAF /content/drive/MyDrive/
gs = gspread.authorize(GoogleCredentials.get_application_default())
os.stat('/content/drive/MyDrive/restik.gsheet')
table = gs.open_by_key('1xfLXwP0XdgeExdyYHMYrpFK--zujy8xTm-nyxRKIsu8')
worksheet = table.worksheet('Strengths')
rows = worksheet.get_all_values()
name = list()
actions = list()
importance = list()
probability = list()
power = list()
power_sh = list()
power_raw = list()
i=0
for row in rows:
if(i>0):
print (i, row)
name.append(row[0])
actions.append(row[1])
importance.append(int(row[2]))
probability.append(float(row[3]))
power.append(int(row[2])*float(row[3]))
power_raw = list()
power_raw.append(int(row[2])*float(row[3])) #ััะธัะฐะตััั ัะธะปะฐ
power_sh.append(power_raw)
i+=1
worksheet.update('E2:E8', power_sh)
print('\n',name, actions, importance, probability, power, sep='\n', end='\n\n')
strengths_sum = sum(power)
print('ะกัะผะผะฐัะฝะฐั ัะธะปะฐ ัะธะปัะฝัะน ััะพัะพะฝ:', strengths_sum)
worksheet.update('F2', strengths_sum)
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
i=0
labels = list()
cols = worksheet.col_values(1)
for col in cols: #ะดะพะฑะฐะฒะปะตะฝะธะต ะทะฝะฐัะตะฝะธะน name
if(i>0):
labels.append(i)
i+=1
width = 0.4 #ัะธัะธะฝะฐ ะบะพะปะพะฝะพะบ
fig, ax = plt.subplots()
x = np.arange(len(labels))
rects = ax.bar(x - width/2, power, width)
ax.set_ylabel('ะะพัะฝะพััั ะฒะพะทะดะตะนััะฒะธั')
ax.set_title('ะกะธะปัะฝัะต ััะพัะพะฝั')
ax.set_xticks(x)
ax.set_xticklabels(labels)
def autolabel(rects):
#ะะพะฑะฐะฒะปะตะฝะธะต ะทะฝะฐัะตะฝะธะน ะฝะฐะด ะดะธะณัะฐะผะผะฐะผะธ
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3),
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects) #ะทะฝะฐัะตะฝะธั ะฝะฐะด ะบะพะปะพะฝะบะฐะผะธ
fig.tight_layout()
plt.show()
j=0
for col in cols: #ะฟะตัะฐัั ะพะฑะพะทะฝะฐัะตะฝะธะน ะฟะพะด ะดะธะฐะณัะฐะผะผะพะน
if(j>0):
print(j, '-', col)
j+=1
# Commented out IPython magic to ensure Python compatibility.
import os
import time
from google.colab import auth
auth.authenticate_user()
!pip install --upgrade gspread
import gspread
from oauth2client.client import GoogleCredentials
print('\nะ ะะะะขะ ะก ะขะะะะะฆะะ')
# %ll -lAF /content/drive/MyDrive/
gs = gspread.authorize(GoogleCredentials.get_application_default())
os.stat('/content/drive/MyDrive/restik.gsheet')
table = gs.open_by_key('1xfLXwP0XdgeExdyYHMYrpFK--zujy8xTm-nyxRKIsu8')
worksheet = table.worksheet('Weaknesses')
rows = worksheet.get_all_values()
name = list()
actions = list()
importance = list()
probability = list()
power = list()
power_sh = list()
power_raw = list()
i=0
for row in rows:
if(i>0):
print (i, row)
name.append(row[0])
actions.append(row[1])
importance.append(int(row[2]))
probability.append(float(row[3]))
power.append(int(row[2])*float(row[3]))
power_raw = list()
power_raw.append(int(row[2])*float(row[3])) #ััะธัะฐะตััั ัะธะปะฐ
power_sh.append(power_raw)
i+=1
worksheet.update('E2:E8', power_sh)
print('\n',name, actions, importance, probability, power, sep='\n', end='\n\n')
weaknesses_sum = sum(power)
print('ะกัะผะผะฐัะฝะฐั ัะธะปะฐ ัะธะปัะฝัะน ััะพัะพะฝ:', weaknesses_sum)
worksheet.update('F2', weaknesses_sum)
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
i=0
labels = list()
cols = worksheet.col_values(1)
for col in cols: #ะดะพะฑะฐะฒะปะตะฝะธะต ะทะฝะฐัะตะฝะธะน name
if(i>0):
labels.append(i)
i+=1
width = 0.4 #ัะธัะธะฝะฐ ะบะพะปะพะฝะพะบ
fig, ax = plt.subplots()
x = np.arange(len(labels))
rects = ax.bar(x - width/2, power, width)
ax.set_ylabel('ะะพัะฝะพััั ะฒะพะทะดะตะนััะฒะธั')
ax.set_title('ะกะปะฐะฑัะต ััะพัะพะฝั')
ax.set_xticks(x)
ax.set_xticklabels(labels)
def autolabel(rects):
#ะะพะฑะฐะฒะปะตะฝะธะต ะทะฝะฐัะตะฝะธะน ะฝะฐะด ะดะธะณัะฐะผะผะฐะผะธ
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3),
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects) #ะทะฝะฐัะตะฝะธั ะฝะฐะด ะบะพะปะพะฝะบะฐะผะธ
fig.tight_layout()
plt.show()
j=0
for col in cols: #ะฟะตัะฐัั ะพะฑะพะทะฝะฐัะตะฝะธะน ะฟะพะด ะดะธะฐะณัะฐะผะผะพะน
if(j>0):
print(j, '-', col)
j+=1
# Commented out IPython magic to ensure Python compatibility.
import os
import time
from google.colab import auth
auth.authenticate_user()
!pip install --upgrade gspread
import gspread
from oauth2client.client import GoogleCredentials
print('\nะ ะะะะขะ ะก ะขะะะะะฆะะ')
# %ll -lAF /content/drive/MyDrive/
gs = gspread.authorize(GoogleCredentials.get_application_default())
os.stat('/content/drive/MyDrive/restik.gsheet')
table = gs.open_by_key('1xfLXwP0XdgeExdyYHMYrpFK--zujy8xTm-nyxRKIsu8')
worksheet = table.worksheet('Opportunities')
rows = worksheet.get_all_values()
name = list()
actions = list()
importance = list()
probability = list()
power = list()
power_sh = list()
power_raw = list()
i=0
for row in rows:
if(i>0):
print (i, row)
name.append(row[0])
actions.append(row[1])
importance.append(int(row[2]))
probability.append(float(row[3]))
power.append(int(row[2])*float(row[3]))
power_raw = list()
power_raw.append(int(row[2])*float(row[3])) #ััะธัะฐะตััั ัะธะปะฐ
power_sh.append(power_raw)
i+=1
worksheet.update('E2:E8', power_sh)
print('\n',name, actions, importance, probability, power, sep='\n', end='\n\n')
opportunities_sum = sum(power)
print('ะกัะผะผะฐัะฝะฐั ัะธะปะฐ ัะธะปัะฝัะน ััะพัะพะฝ:', opportunities_sum)
worksheet.update('F2', opportunities_sum)
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
i=0
labels = list()
cols = worksheet.col_values(1)
for col in cols: #ะดะพะฑะฐะฒะปะตะฝะธะต ะทะฝะฐัะตะฝะธะน name
if(i>0):
labels.append(i)
i+=1
width = 0.4 #ัะธัะธะฝะฐ ะบะพะปะพะฝะพะบ
fig, ax = plt.subplots()
x = np.arange(len(labels))
rects = ax.bar(x - width/2, power, width)
ax.set_ylabel('ะะพัะฝะพััั ะฒะพะทะดะตะนััะฒะธั')
ax.set_title('ะะพะทะผะพะถะฝะพััะธ')
ax.set_xticks(x)
ax.set_xticklabels(labels)
def autolabel(rects):
#ะะพะฑะฐะฒะปะตะฝะธะต ะทะฝะฐัะตะฝะธะน ะฝะฐะด ะดะธะณัะฐะผะผะฐะผะธ
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3),
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects) #ะทะฝะฐัะตะฝะธั ะฝะฐะด ะบะพะปะพะฝะบะฐะผะธ
fig.tight_layout()
plt.show()
j=0
for col in cols: #ะฟะตัะฐัั ะพะฑะพะทะฝะฐัะตะฝะธะน ะฟะพะด ะดะธะฐะณัะฐะผะผะพะน
if(j>0):
print(j, '-', col)
j+=1
# Commented out IPython magic to ensure Python compatibility.
import os
import time
from google.colab import auth
auth.authenticate_user()
!pip install --upgrade gspread
import gspread
from oauth2client.client import GoogleCredentials
print('\nะ ะะะะขะ ะก ะขะะะะะฆะะ')
# %ll -lAF /content/drive/MyDrive/
gs = gspread.authorize(GoogleCredentials.get_application_default())
os.stat('/content/drive/MyDrive/restik.gsheet')
table = gs.open_by_key('1xfLXwP0XdgeExdyYHMYrpFK--zujy8xTm-nyxRKIsu8')
worksheet = table.worksheet('Threats')
rows = worksheet.get_all_values()
name = list()
actions = list()
importance = list()
probability = list()
power = list()
power_sh = list()
power_raw = list()
i=0
for row in rows:
if(i>0):
print (i, row)
name.append(row[0])
actions.append(row[1])
importance.append(int(row[2]))
probability.append(float(row[3]))
power.append(int(row[2])*float(row[3]))
power_raw = list()
power_raw.append(int(row[2])*float(row[3])) #ััะธัะฐะตััั ัะธะปะฐ
power_sh.append(power_raw)
i+=1
worksheet.update('E2:E8', power_sh)
print('\n',name, actions, importance, probability, power, sep='\n', end='\n\n')
threats_sum = sum(power)
print('ะกัะผะผะฐัะฝะฐั ัะธะปะฐ ัะธะปัะฝัะน ััะพัะพะฝ:', threats_sum)
worksheet.update('F2', threats_sum)
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
i=0
labels = list()
cols = worksheet.col_values(1)
for col in cols: #ะดะพะฑะฐะฒะปะตะฝะธะต ะทะฝะฐัะตะฝะธะน name
if(i>0):
labels.append(i)
i+=1
width = 0.4 #ัะธัะธะฝะฐ ะบะพะปะพะฝะพะบ
fig, ax = plt.subplots()
x = np.arange(len(labels))
rects = ax.bar(x - width/2, power, width)
ax.set_ylabel('ะะพัะฝะพััั ะฒะพะทะดะตะนััะฒะธั')
ax.set_title('ะฃะณัะพะทั')
ax.set_xticks(x)
ax.set_xticklabels(labels)
def autolabel(rects):
#ะะพะฑะฐะฒะปะตะฝะธะต ะทะฝะฐัะตะฝะธะน ะฝะฐะด ะดะธะณัะฐะผะผะฐะผะธ
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3),
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects) #ะทะฝะฐัะตะฝะธั ะฝะฐะด ะบะพะปะพะฝะบะฐะผะธ
fig.tight_layout()
plt.show()
j=0
for col in cols: #ะฟะตัะฐัั ะพะฑะพะทะฝะฐัะตะฝะธะน ะฟะพะด ะดะธะฐะณัะฐะผะผะพะน
if(j>0):
print(j, '-', col)
j+=1
#ะบะพะฟะธัะพะฒะฐะฝะธะต ะทะฝะฐัะตะฝะธะน ัะธะปั ะธะท ะปะธััะพะฒ
worksheet = table.worksheet('Strengths')
strengths_power = worksheet.acell('F2').value
worksheet = table.worksheet('Weaknesses')
weaknesses_power = worksheet.acell('F2').value
worksheet = table.worksheet('Opportunities')
opportunities_power = worksheet.acell('F2').value
worksheet = table.worksheet('Threats')
threats_power = worksheet.acell('F2').value
#ะดะพะฑะฐะฒะปะตะฝะธะต ะฒ ัะฐะฑะปะธัั result
worksheet = table.worksheet('Result')
worksheet.update('A1', 'ะกัะผะผะฐัะฝะฐั ัะธะปะฐ ัะธะปัะฝัั
ััะพัะพะฝ')
worksheet.update('A2', strengths_power)
worksheet.update('B1', 'ะกัะผะผะฐัะฝะฐั ัะธะปะฐ ัะปะฐะฑัั
ััะพัะพะฝ')
worksheet.update('B2', weaknesses_power)
worksheet.update('A4', 'ะกัะผะผะฐัะฝะฐั ัะธะปะฐ ะฒะพะทะผะพะถะฝะพััะตะน')
worksheet.update('A5', opportunities_power)
worksheet.update('B4', 'ะกัะผะผะฐัะฝะฐั ัะธะปะฐ ัะณัะพะท')
worksheet.update('B5', threats_power)
#ะฟะพะดััะตั ัะตะทัะปััะฐัะฐ ะฐะฝะฐะปะธะทะฐ ะธ ะดะพะฑะฐะฒะปะตะฝะธะต ะตะณะพ ะฒ ัะฐะฑะปะธัั
result = float(strengths_power) - 1*float(weaknesses_power) + float(opportunities_power) - 1*float(threats_power)
print('ะ ะตะทัะปััะฐั:', result)
worksheet = table.worksheet('Result')
worksheet.update('A7', 'ะ ะตะทัะปััะฐั')
worksheet.update('A8', result)
#ัะพะทะดะฐะฝะธะต ะณัะฐัะธะบะฐ
def matplot(element):
import matplotlib.pyplot as plt
x=list()
x_float=list()
title=list()
y_float=list()
x_float = [1, 2, 3, 4, 5]
y_float = [float(strengths_power), -1*float(weaknesses_power), float(opportunities_power), -1*float(threats_power), result]
title = [ "ะกะธะปัะฝัะต ััะพัะพะฝั", "ะกะปะฐะฑัะต ััะพัะพะฝั", "ะะพะทะผะพะถะฝะพััะธ", "ะฃะณะพัะพะทั", "ะ ะตะทัะปััะฐั"]
x_pos=list()
i=0
for i in range(x_float.__len__()):
x_pos.append(i)
fig=plt.figure(figsize=(8,6), dpi=72)
plt.bar(x_pos, y_float, width=0.75, align='edge', alpha=0.4)
plt.xticks(x_pos, x_float, fontsize=14)
plt.xlabel('ะะฑะพะทะฝะฐัะตะฝะธั', fontsize=14)
plt.ylabel('ะะพัะฝะพััั ะฒะพะทะดะตะนััะฒะธั', fontsize=14)
plt.title('SWOT', fontsize=14)
plt.grid(True, color='r', linestyle='-', linewidth=2)
plt.show()
#ะฟะตัะฐัั ะทะฐะณะพะปะพะฒะบะพะฒ
for i in range(title.__len__()):
print(i+1, " - ", title[i])
i += 1 | [
"noreply@github.com"
] | noreply@github.com |
404d4ab1329f9f242c77fd139657e528fb917c08 | 0f9609807405a3dd9de89e2eff84e399b7a4924c | /lagagogn-django/domar/migrations/0007_auto_20181128_1027.py | c9727e842de481f3101ad8943c856921df400867 | [] | no_license | ZJONSSON/domar | d2098a331f14dfb96da3a14ed32ad53a5b9942a2 | f51dc7e19c2ceb29d3fbd3fe70fb4711f648dcde | refs/heads/master | 2020-04-13T09:29:19.855432 | 2018-12-19T09:14:52 | 2018-12-19T09:14:52 | 163,112,930 | 0 | 0 | null | 2018-12-25T21:39:15 | 2018-12-25T21:39:15 | null | UTF-8 | Python | false | false | 504 | py | # Generated by Django 2.1.3 on 2018-11-28 10:27
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('domar', '0006_domur_judge'),
]
operations = [
migrations.AlterField(
model_name='domur',
name='tags',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=255), blank=True, default=list, size=None),
),
]
| [
"pallih@gogn.in"
] | pallih@gogn.in |
645255515c3308d2c803eae9a5f02d2301026483 | b02c7897dbe819ef96694a09ec4995fd4ee7ebfc | /{{cookiecutter.project_name}}/{{cookiecutter.project_name}}/__init__.py | 69f3fb102e7c7ed42424f0b1bbef3fcf946b9a9b | [
"BSD-3-Clause"
] | permissive | mlf4aiur/cookiecutter-daemon | fd2c73ba9ba8b2d7927788c5c2a1e14c9c6c78c9 | 510b39c38d11f9b556185b24c9a1eca79665db5c | refs/heads/master | 2020-05-18T15:17:12.654210 | 2014-12-03T07:00:32 | 2014-12-03T07:00:32 | 27,474,060 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | #!/usr/bin/env python
"""Package for {{cookiecutter.project_name}}."""
__project__ = '{{cookiecutter.project_name}}'
__version__ = '{{cookiecutter.version}}'
VERSION = __project__ + '-' + __version__
| [
"mlf4aiur@gmail.com"
] | mlf4aiur@gmail.com |
ba7639ad6a9c59bd8170920acdd5a7a269c096e7 | e5270423abf42482d956548333d4105d684cca31 | /trails/feeds/malc0de.py | 09d204f3da28e20de8dc18f4ac03427f7557e5e3 | [
"MIT"
] | permissive | ana2s007/maltrail | 2f5f556d222b6f1ba78affedce97400da125232a | 80979e76c33dca58313141a0e4a2626b609c3ebf | refs/heads/master | 2021-01-16T22:49:25.319116 | 2016-01-28T13:04:57 | 2016-01-28T13:04:57 | 50,610,789 | 1 | 0 | null | 2016-01-28T20:18:20 | 2016-01-28T20:18:20 | null | UTF-8 | Python | false | false | 689 | py | #!/usr/bin/env python
"""
Copyright (c) 2014-2016 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying permission
"""
from core.common import retrieve_content
__url__ = "https://raw.githubusercontent.com/firehol/blocklist-ipsets/master/malc0de.ipset"
__check__ = "malc0de"
__info__ = "malware distribution"
__reference__ = "malc0de.com"
def fetch():
retval = {}
content = retrieve_content(__url__)
if __check__ in content:
for line in content.split('\n'):
line = line.strip()
if not line or line.startswith('#') or '.' not in line:
continue
retval[line] = (__info__, __reference__)
return retval
| [
"miroslav.stampar@gmail.com"
] | miroslav.stampar@gmail.com |
761115aa3bdc406dc4f4c52ccd593a7e80e5d5c2 | c1ad248b8172c63f7756f14cb50f96cf726f90d0 | /tensorflow_examples/lite/model_maker/core/utils/ondevice_scann_builder.py | 9031bc02d9da8875c3b62beb2465f38818ce479a | [
"Apache-2.0"
] | permissive | slmsshk/examples | 846ec816c0c6d095cf49e4054df85a80375f4b7f | cd89a54b9e9577bebd22a9f083526ca8cb2b58b5 | refs/heads/master | 2022-08-16T19:59:03.695027 | 2022-08-07T07:30:14 | 2022-08-07T07:30:14 | 256,999,865 | 1 | 0 | Apache-2.0 | 2020-04-19T12:59:03 | 2020-04-19T12:59:01 | null | UTF-8 | Python | false | false | 1,856 | py | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ScannBuilder class for on-device applications."""
from google.protobuf import text_format
from scann.proto import scann_pb2
from scann.scann_ops.py import scann_builder
from scann.scann_ops.py import scann_ops_pybind
def builder(db, num_neighbors, distance_measure):
"""pybind analogue of builder() in scann_ops.py for the on-device use case."""
def builder_lambda(db, config, training_threads, **kwargs):
return scann_ops_pybind.create_searcher(db, config, training_threads,
**kwargs)
return OndeviceScannBuilder(
db, num_neighbors, distance_measure).set_builder_lambda(builder_lambda)
class OndeviceScannBuilder(scann_builder.ScannBuilder):
"""ScannBuilder for on-device applications."""
def create_config(self):
"""Creates the config."""
config = super().create_config()
config_proto = scann_pb2.ScannConfig()
text_format.Parse(config, config_proto)
# We don't support residual quantization on device so we need to disable
# use_residual_quantization.
if config_proto.hash.asymmetric_hash.use_residual_quantization:
config_proto.hash.asymmetric_hash.use_residual_quantization = False
return text_format.MessageToString(config_proto)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
86ddec28dee78756b57aa131bc70d9140872cc04 | 08c5ee41d40f9f14a3c6c3cb48515ed8467845e3 | /python/kfs_lib.py | 6c10b5ce828b790d815030153018533c82f3b5b2 | [
"Apache-2.0"
] | permissive | fdgonthier/kas | 3f971bda691b8c6db7a6343ea419088d1ac10386 | c82a3723085cdd9fec25efca1209e62db09edd72 | refs/heads/master | 2021-01-17T21:38:07.362287 | 2013-08-14T20:54:08 | 2013-08-14T20:54:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,451 | py | import os, ConfigParser, hashlib, stat, struct, logging
# from kpython
import kbase
from kpg import *
from StringIO import StringIO
# local
import kanp
import kcd_client
from kcdpg import KCD_KWS_LOGIN_TYPE_KWMO
# KFS Constants.
KFS_CHUNK_SIZE = 256 * 1024
KFS_FILE = 1
KFS_DIR = 2
KFS_NODE_TYPES = [KFS_FILE, KFS_DIR]
KFS_STATUS_PENDING = 0
KFS_STATUS_OK = 1
KFS_STATUS_DELETED = 2
KFS_STATUSES = [KFS_STATUS_PENDING, KFS_STATUS_OK, KFS_STATUS_DELETED]
KFS_ROOT_INODE_ID = 0
KFS_ROOT_COMMIT_ID = 0
# Put after imports so log is not overwridden by an imported module.
log = logging.getLogger(__name__)
# Replace bad characters in a skurl email subject for directory creation.
def get_kfs_skurl_escaped_subject(s, replacement_char='_'):
allowed_chars = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
]
new_s = ''
for c in s:
if allowed_chars[ord(c)] == 1:
new_s += c
else:
new_s += replacement_char
return new_s
# Convert a skurl email subject into a valid KFS directory.
def get_kfs_skurl_subject(date, subject):
d = time.strftime('%Y-%m-%d %Hh%Mm%S', time.gmtime(date))
if subject == '':
s = 'No subject'
else:
s = get_kfs_skurl_escaped_subject(subject)
s = s.strip()
return d + ' ' + s;
# This checks path and replace characters when needed so that the result is valid.
def kfs_convert_path_name(path_name):
invalid_words = [
"", "CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", "COM4", "COM5",
"COM6", "COM7", "COM8", "COM9", "LPT1", "LPT2", "LPT3", "LPT4",
"LPT5", "LPT6", "LPT7", "LPT8", "LPT9"
]
allowed_chars = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
]
new_str = ""
# Replace "#".
path_name = path_name.replace("#", "#%03i" % ( ord("#") ) )
# Replace bad words. Return immediately the converted string if a bad word is found.
for invalid_word in invalid_words:
if path_name == invalid_word:
for char in path_name:
new_str += "#%03i" % ( ord(char) )
return new_str
# Replace bad characters.
for char in path_name:
if allowed_chars[ord(char)]:
new_str += char
else:
new_str += "#%03i" % ( ord(char) )
# Replace bad leading characters.
char = new_str[0:1]
if char == " ":
new_str = new_str[1:] + "#%03i" % ( ord(char) )
# Replace bad trailing characters.
char = new_str[-1:]
if char == ".":
new_str = new_str[:-1] + "#%03i" % ( ord(char) )
return new_str
# This class represents a Web KFS node.
class WebKFSNode(kbase.PropStore):
def __init__(self, workspace_id=None, share_id=None, inode_id=None):
self.workspace_id = workspace_id
self.share_id = share_id
self.inode_id = inode_id
def from_dict(self, d):
self.workspace_id = d['workspace_id']
self.share_id = d['share_id']
self.inode_id = d['inode_id']
return self
def __str__(self):
return "<%s ws_id=%s share_id=%s inode_id=%s>" % \
( self.__class__.__name__, str(self.workspace_id), str(self.share_id), str(self.inode_id) )
# This class represents a Web KFS directory.
class WebKFSDirectory(WebKFSNode):
pass
# This class represents a Web KFS file.
class WebKFSFile(WebKFSNode):
pass
# Represent a directory to delete (new style)
class KFSOpDirDelete(object):
# Accessible attributes
__slots__ = ['kfs_op', 'inode_id', 'commit_id', 'kfs_error']
def __init__(self, inode_id, commit_id):
self.kfs_op = kanp.KANP_KFS_OP_DELETE_DIR
self.inode_id = inode_id
self.commit_id = commit_id
self.kfs_error = None
# Represent a file to delete (new style).
class KFSOpFileDelete(object):
# Accessible attributes
__slots__ = ['kfs_op', 'inode_id', 'commit_id', 'kfs_error']
def __init__(self, inode_id, commit_id):
self.kfs_op = kanp.KANP_KFS_OP_DELETE_FILE
self.inode_id = inode_id
self.commit_id = commit_id
self.kfs_error = None
# NOT USED #
if 0:
# This class represents a KFS directory.
class KFSDirectory(kbase.PropStore):
def __init__(self):
self.workspace_id = 0
self.share_id = 0
self.inode = 0
self.parent_inode_id = 0
self.commit_id = 0
self.user_id = 0
self.date = 0
self.name = ''
self.kfs_error = None
# This class represents a KFS file.
class KFSFile(kbase.PropStore):
def __init__(self):
self.workspace_id = 0
self.share_id = 0
self.inode = 0
self.parent_inode_id = 0
self.commit_id = 0
self.user_id = 0
self.date = 0
self.size = 0
self.hash = None
self.name = ''
# NOT USED #
if 0:
# This class handles writing to a file.
class KFSFileWriter(object):
def __init__(self, file_path):
self._fd = None
self.file_path = file_path
log.debug("%s: instantiated with file path '%s'." % ( self.__class__.__name__, self.file_path ))
def open(self):
self._fd = os.open(self.file_path, os.O_RDWR|os.O_CREAT)
log.debug("%s: opened file '%s'." % ( self.__class__.__name__, self.file_path ))
def write(self, data):
os.write(self._fd, data)
# Do not uncomment!
#log.debug("%s: writing file %i bytes." % ( self.__class__.__name__, len(data) ))
def close(self):
os.close(self._fd)
log.debug("%s: closed file '%s'." % ( self.__class__.__name__, self.file_path ))
# This class represents a KFS uploaded file.
class KFSUploadFile(KFSFile):
def __init__(self):
KFSFile.__init__(self)
self.kfs_op = None
self.fd = None
self.chunks = []
self.kfs_error = None
# This method sets some attributes based on an open file descriptor.
def set_from_fd(self, fd, size=None):
self.chunks = []
# Get hash of file.
self.hash = "X"*16 #kfs_compute_hash(fd)
# Set fd and size.
self.fd = fd
self.size = size
if not size: self.size = os.fstat(fd)[stat.ST_SIZE]
# Virtually split the file in chunks.
offset=0
while offset < self.size:
remaining_bytes = self.size - offset
size = min(remaining_bytes, KFS_CHUNK_SIZE)
self.chunks += [KFSChunk(self.fd, offset, size)]
offset += size
# NOT USED #
if 0:
# This class represents a KFS downloaded file.
class KFSDownloadFile(KFSFile):
def __init__(self):
KFSFile.__init__(self)
self.hash = None
self.comm = None
self.kfs_error = None
# This class represents a KFS chunk.
class KFSChunk(object):
def __init__(self, fd, offset, size):
self.fd = fd
self.offset = offset
self.size = size
def read(self):
os.lseek(self.fd, self.offset, os.SEEK_SET)
s = ''
cur = 0
while cur < self.size:
remaining_bytes = self.size - cur
d = os.read(self.fd, remaining_bytes)
cur += len(d)
s += d
return s
def __repr__(self):
return "<%s fd=%i offset=%i size=%i>" % ( self.__class__.__name__, self.fd, self.offset, self.size )
class PhaseTwoCommitSubMessage(object):
def __init__(self):
self.size = 0
self.anpm = None
class PhaseTwoChunkSubMessage(object):
def __init__(self):
self.size = 0
self.anpm = None
self.chunk = None
class PhaseTwoMessage(object):
def __init__(self):
self.size = 0
self.sub_messages = []
self.anpm = None
# This class handles KFS operations like creating and updating files in KCD.
class KFSOperations(object):
def __init__(self, kfs_entries, reader, writer):
self.kfs_entries = kfs_entries
self.reader = reader
self.writer = writer
self.phase_two_messages = []
# Allows creating and updating files (need phase 2) or creating directories.
def phase_one(self, email_id, ticket):
# Prepare phase one ANP message.
m = kanp.ANP_msg()
m.add_bin(ticket)
m.add_u64(email_id)
m.add_u32(len(self.kfs_entries))
for kfs_entry in self.kfs_entries:
if kfs_entry.kfs_op == kanp.KANP_KFS_OP_CREATE_FILE:
m.add_u32(5) # nb of elements
m.add_u32(kfs_entry.kfs_op)
m.add_u64(kfs_entry.parent_inode_id)
m.add_u64(kfs_entry.parent_commit_id)
m.add_str(kfs_entry.name)
elif kfs_entry.kfs_op == kanp.KANP_KFS_OP_UPDATE_FILE:
m.add_u32(4) # nb of elements
m.add_u32(kfs_entry.kfs_op)
m.add_u64(kfs_entry.inode)
m.add_u64(kfs_entry.commit_id)
elif kfs_entry.kfs_op == kanp.KANP_KFS_OP_CREATE_DIR:
m.add_u32(5) # nb of elements
m.add_u32(kfs_entry.kfs_op)
m.add_u64(kfs_entry.parent_inode_id)
m.add_u64(kfs_entry.parent_commit_id)
m.add_str(kfs_entry.name)
elif kfs_entry.kfs_op == kanp.KANP_KFS_OP_DELETE_DIR:
m.add_u32(4) # nb of elements
m.add_u32(kfs_entry.kfs_op)
m.add_u64(kfs_entry.inode_id)
m.add_u64(kfs_entry.commit_id)
elif kfs_entry.kfs_op == kanp.KANP_KFS_OP_DELETE_FILE:
m.add_u32(4) # nb of elements
m.add_u32(kfs_entry.kfs_op)
m.add_u64(kfs_entry.inode_id)
m.add_u64(kfs_entry.commit_id)
else:
raise Exception("Unexpected KFS operation: '%s'." % ( str(kfs_entry.kfs_op) ) )
# Send phase one ANP message to KCD.
payload = m.get_payload()
self.writer.send_command_header(kanp.KANP_CMD_KFS_PHASE_1, len(payload))
self.writer.write(payload)
log.debug("Phase 1 data sent.")
# Get phase one result.
h, m = kanp.get_anpt_all(self.reader)
if h.type != kanp.KANP_RES_KFS_PHASE_1:
assert h.type == kanp.KANP_RES_FAIL
raise kanp.KANPFailure(m.get_u32(), m.get_str())
log.debug("Got phase 1 reply.")
# Handle phase one reply.
phase_two_needed = False
commit_id = m.get_u64()
nb_op = m.get_u32()
assert nb_op == len(self.kfs_entries)
for i in range(0, nb_op):
errno = m.get_u32()
error = m.get_str()
if error:
log.debug(
"Phase 1: KFS operation %i error: errno=%i, error='%s'" % \
( i, errno, error ))
self.kfs_entries[i].kfs_error = error
# This function prepares anp messages and sub-messages for phase_two().
# Knowing in advance the size of the files is needed for this function. See other methods for asynchronous uploads.
# NOTE: No longer used, might not be fully working.
def prepare_phase_two(self):
message = None
files_iter = iter(self.kfs_entries)
switch_file = True
switch_message = True
commit_file = False
switch_chunk = True
exit = False
while 1:
if exit or switch_message:
switch_message = False
if message and len(message.sub_messages) > 0:
# Finish ANPT message preparation.
message.anpm = kanp.ANP_msg()
message.anpm.add_u32(len(message.sub_messages))
message.size += message.anpm.get_payload_size()
# Append ANPT message to list.
self.phase_two_messages.append(message)
# Init new ANPT message.
message = PhaseTwoMessage()
if exit:
break
if commit_file:
commit_file = False
# Prepare a file commit sub-message.
log.debug("Committing file.")
# Prepare a partial anp message (missing an ANP bin field for the MD5 signature of the file).
subm = PhaseTwoCommitSubMessage()
subm.anpm = kanp.ANP_msg()
subm.anpm.add_u32(3)
subm.anpm.add_u32(kanp.KANP_KFS_SUBMESSAGE_COMMIT)
#hash = kfs_compute_hash(kfs_entry.fd)
#subm.anpm.add_bin(kfs_entry.hash)
# Calculate total sub-message size.
subm.size = subm.anpm.get_payload_size() + 5 + 16 # partial anp mesg + anp bin header + md5 sign.
log.debug("Commit sub-message has %i bytes in total." % ( subm.size ))
# Append sub-message to current ANPT message.
log.debug("Appending commit sub-message to ANPT message.")
message.sub_messages.append(subm)
message.size += subm.size
# Switch to next file.
switch_file = True
continue
if not message:
# Init new message.
log.debug("Initiating a new message.")
message = PhaseTwoMessage()
if switch_file:
switch_file = False
try:
# Get next file.
kfs_entry = files_iter.next()
log.debug("Got new file: '%s'." % ( kfs_entry.name ))
# Start again with file chunk.
chunks_iter = iter(kfs_entry.chunks)
switch_chunk = True
continue
except StopIteration:
# No more file in list.
log.debug("No more file.")
exit = True
continue
if kfs_entry.kfs_op != kanp.KANP_KFS_OP_CREATE_FILE and kfs_entry.kfs_op != kanp.KANP_KFS_OP_UPDATE_FILE:
# That operation does not need any phase 2 messsage.
log.debug("No phase two needed for that operation.")
switch_file = True
continue
if kfs_entry.kfs_error:
# This file cannot be uploaded. Pass to next file.
log.debug("Skipping file '%s' because it had an error in phase 1: '%s'." % \
(kfs_entry.name, kfs_entry.kfs_error ))
switch_file = True
continue
if switch_chunk:
switch_chunk = False
try:
# Get next KFS file chunk.
chunk = chunks_iter.next()
log.debug("Got a new chunk of %i bytes." % ( chunk.size ))
except StopIteration:
# No more chunks. Commit file.
commit_file = True
continue
# Add chunk to current ANPT message.
# Prepare a partial anp message (missing an ANP bin field for the chunk data).
subm = PhaseTwoChunkSubMessage()
subm.anpm = kanp.ANP_msg()
subm.anpm.add_u32(3)
subm.anpm.add_u32(kanp.KANP_KFS_SUBMESSAGE_CHUNK)
#subm.anpm.add_bin(chunk.read())
# Set sub-message chunk.
subm.chunk = chunk
# Calculate total sub-message size.
subm.size = subm.anpm.get_payload_size() + 5 + chunk.size # partial anp mesg + anp bin header + chunk data
log.debug("Chunk sub-message has %i bytes in total." % ( subm.size ))
if (message.size + subm.size + 100000) > kanp.ANPT_MSG_MAX_SIZE:
# Current ANPT message cannot accept chunk.
# Switch ANPT message.
switch_message = True
# Do not switch chunk (implicit).
#switch_chunk = False
continue
# Append sub-message to this message.
log.debug("Appending chunk sub-message to ANPT message.")
message.sub_messages.append(subm)
message.size += subm.size
switch_chunk = True
# This function handles the phase two communications, after messages are prepared in prepare_phase_two().
# NOTE: No longer used, might not be fully working.
def phase_two(self):
hash = None
i = -1
for message in self.phase_two_messages:
i += 1
# Sent ANP transport header
log.debug("Phase 2: sending ANPT header %i, size %i." % ( i, message.size ))
self.writer.send_command_header(kanp.KANP_CMD_KFS_PHASE_2, message.size)
log.debug("Phase 2: sent ANPT header %i, size %i." % ( i, message.size ))
# Send base message anp message.
kanp.send_anpt_msg(self.writer, message.anpm)
if not hash:
hash = hashlib.md5()
j = -1
for subm in message.sub_messages:
j += 1
if isinstance(subm, PhaseTwoChunkSubMessage):
# send chunk
log.debug("Phase 2: preparing file %i chunk %i anp message." % ( i, j ))
bytes = subm.chunk.read()
hash.update(bytes)
subm.anpm.add_bin(bytes)
log.debug("Phase 2: sending file %i chunk %i anp message." % ( i, j ))
kanp.send_anpt_msg(self.writer, subm.anpm)
log.debug("Phase 2: sent file %i chunk %i anp message." % ( i, j ))
else:
assert isinstance(subm, PhaseTwoCommitSubMessage)
# send commit
log.debug("Phase 2: preparing file %i commit anp message." % ( i ))
bytes = hash.digest()
subm.anpm.add_bin(bytes)
hash = hashlib.md5()
log.debug("Phase 2: sending file %i commit anp message." % ( i ))
kanp.send_anpt_msg(self.writer, subm.anpm)
log.debug("Phase 2: sent file %i commit anp message." % ( i ))
# get response
log.debug("Phase 2: getting %i reply." % ( i ))
h, m = kanp.get_anpt_all(self.reader)
log.debug("Phase 2: got %i reply." % ( i ))
if h.type == kanp.KANP_RES_FAIL:
raise kanp.KANPFailure(m.get_u32(), m.get_str())
assert h.type == kanp.KANP_RES_OK
# get response
h, m = kanp.get_anpt_all(self.reader)
log.debug("Phase 2: got final reply.")
if h.type == kanp.KANP_RES_FAIL:
raise kanp.KANPFailure(m.get_u32(), m.get_str())
assert h.type == kanp.KANP_RES_OK
log.debug("File upload finished.")
#return kfs_entries
# Create a phase 2 chunk sub-message.
def phase_2_create_chunk_submessage(self, data):
# Prepare anp message
subm = PhaseTwoChunkSubMessage()
subm.anpm = kanp.ANP_msg()
subm.anpm.add_u32(3)
subm.anpm.add_u32(kanp.KANP_KFS_SUBMESSAGE_CHUNK)
subm.anpm.add_bin(data)
return subm
# Create a phase 2 commit sub-message.
def phase_2_create_commit_submessage(self, hash):
subm = PhaseTwoCommitSubMessage()
subm.anpm = kanp.ANP_msg()
subm.anpm.add_u32(3)
subm.anpm.add_u32(kanp.KANP_KFS_SUBMESSAGE_COMMIT)
subm.anpm.add_bin(hash)
return subm
# Send a phase 2 message with only 1 submessage
# (for asynchronous uploads when file(s) size(s) is/are not yet known...).
def phase_2_send_message_with_one_submessage(self, subm):
# Prepare ANP message.
message = PhaseTwoMessage()
message.anpm = kanp.ANP_msg()
message.anpm.add_u32(1) # Send only one sub-message
# Calculate base messasge size.
message.size = message.anpm.get_payload_size()
#log.debug("Base message size: %i bytes." % ( message.size ))
# Calculate total sub-message size.
subm.size = subm.anpm.get_payload_size()
log.debug("Chunk sub-message size: %i bytes." % ( subm.size ))
total_size = message.size + subm.size
# Sent ANP transport header
#log.debug("Phase 2: sending ANPT header with data size %i." % ( total_size ))
self.writer.send_command_header(kanp.KANP_CMD_KFS_PHASE_2, total_size)
#log.debug("Phase 2: sent ANPT header, size %i." % ( total_size ))
# Send base message.
kanp.send_anpt_msg(self.writer, message.anpm)
# Send sub-message.
kanp.send_anpt_msg(self.writer, subm.anpm)
# get response
#log.debug("Phase 2: getting reply.")
h, m = kanp.get_anpt_all(self.reader)
#log.debug("ANP RESPONSE DUMP: %s" % (str(m.dump())))
#log.debug("Phase 2: got reply.")
if h.type == kanp.KANP_RES_FAIL:
raise kanp.KANPFailure(m.get_u32(), m.get_str())
assert h.type == kanp.KANP_RES_OK
def kfs_compute_hash(fd):
os.lseek(fd, 0, 0)
hash = hashlib.md5()
while 1:
data = os.read(fd, 1024*1024)
if len(data) == 0: break
hash.update(data)
return hash.digest()
| [
"karim.yaghmour@opersys.com"
] | karim.yaghmour@opersys.com |
09720d321f63f021019918984dce887b72d21bb5 | b05be5a98e43c4c403c1bf91105bd98d52a33439 | /disque/disque.py | d5dcc2d9c56565fd0d63d933318024848e3473e6 | [] | no_license | aallamaa/disquepy | 8704c28106135f5cf449597748efaef2214262d5 | 0d70d2f484d59324b986a7aeac77a5f2b47dfbf6 | refs/heads/master | 2020-06-03T21:49:42.124147 | 2015-04-28T01:38:32 | 2015-04-28T01:38:32 | 34,691,338 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,862 | py | #
# Copyright (c) 2015, Abdelkader ALLAM <abdelkader.allam at gmail dot com>
# All rights reserved.
#
# This source also contains source code from Disque
# developped by Salvatore Sanfilippo <antirez at gmail dot com>
# available at http://github.com/antirez/disque
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Disque nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import socket
import os
import pickle
import time
import urllib2
import threading
import random
import json
from pkg_resources import resource_string
import __builtin__
redisCommands=None
def reloadCommands(url):
global redisCommands
try:
u=urllib2.urlopen(url)
redisCommands=json.load(u)
except urllib2.HTTPError:
raise(Exception("Error unable to load commmands json file"))
if "urlCommands" in dir(__builtin__):
reloadCommands(__builtin__.urlCommands)
if not redisCommands:
try:
redisCommands=json.loads(resource_string(__name__,"commands.json"))
except IOError:
raise(Exception("Error unable to load commmands json file"))
class DisqueError(Exception):
pass
class NodeError(Exception):
pass
cmdmap={"del":"delete","exec":"execute"}
class MetaDisque(type):
def __new__(metacls, name, bases, dct):
def _wrapper(name,redisCommand,methoddct):
runcmd="runcmd"
def _rediscmd(self, *args):
return methoddct[runcmd](self, name, *args)
_rediscmd.__name__= cmdmap.get(name.lower(),str(name.lower().replace(" ","_")))
_rediscmd.__redisname__= name
_rediscmd._json = redisCommand
if redisCommand.has_key("summary"):
_doc = redisCommand["summary"]
if redisCommand.has_key("arguments"):
_doc+="\nParameters:\n"
for d in redisCommand["arguments"]:
_doc+="Name: %s,\tType: %s,\tMultiple parameter:%s\n" % (d["name"],d["type"],d.get("multiple","False"))
_rediscmd.__doc__ = _doc
_rediscmd.__dict__.update(methoddct[runcmd].__dict__)
return _rediscmd
if name != "Disque":
return type.__new__(metacls, name, bases, dct)
newDct = {}
for k in redisCommands.keys():
newDct[cmdmap.get(k.lower(),str(k.lower().replace(" ","_")))]= _wrapper(k,redisCommands[k],dct)
newDct.update(dct)
return type.__new__(metacls, name, bases, newDct)
class Disque(threading.local):
"""
class providing a client interface to Disque
"""
__metaclass__ = MetaDisque
def __init__(self,host="localhost",port=7711,password=None,timeout=None,safe=False):
self.host=host
self.port=port
self.timeout=timeout
self.password=password
self.safe=safe
self.safewait=0.1
self.Nodes=[Node(host,port,password,timeout)]
self.transaction=False
self.subscribed=False
def listen(self,todict=False):
while self.subscribed:
r = self.Nodes[0].parse_resp()
if r[0] == 'unsubscribe' and r[2] == 0:
self.subscribed = False
if todict:
if r[0]=="pmessage":
r=dict(type=r[0],pattern=r[1],channel=r[2],data=r[3])
else:
r=dict(type=r[0],pattern=None,channel=r[1],data=r[2])
yield r
def runcmd(self,cmdname,*args):
#cluster implementation to come soon after antirez publish the first cluster implementation
if cmdname in ["MULTI","WATCH"]:
self.transaction=True
if self.safe and not self.transaction and not self.subscribed:
try:
return self.Nodes[0].runcmd(cmdname,*args)
except NodeError:
time.sleep(self.safewait)
if cmdname in ["DISCARD","EXEC"]:
self.transaction=False
try:
if cmdname in ["SUBSCRIBE","PSUBSCRIBE","UNSUBSCRIBE","PUNSUBSCRIBE"]:
self.Nodes[0].sendcmd(cmdname,*args)
rsp = self.Nodes[0].parse_resp()
else:
rsp = self.Nodes[0].runcmd(cmdname,*args)
if cmdname in ["SUBSCRIBE","PSUBSCRIBE"]:
self.subscribed = True
return rsp
except NodeError as e:
self.transaction=False
self.subscribed=False
raise NodeError(e)
def runcmdon(self,node,cmdname,*args):
return self.node.runcmd(cmdname,*args)
class Node(object):
"""
Manage TCP connections to a redis node
"""
def __init__(self,host="localhost",port=6379,password=None,timeout=None):
self.host=host
self.port=port
self.timeout=timeout
self.password=password
self._sock=None
self._fp=None
def connect(self):
if self._sock:
return
addrinfo = socket.getaddrinfo(self.host, self.port)
addrinfo.sort(key=lambda x: 0 if x[0] == socket.AF_INET else 1)
family, _, _, _, _ = addrinfo[0]
sock = socket.socket(family, socket.SOCK_STREAM)
try:
sock.connect((self.host, self.port))
sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
sock.settimeout(self.timeout)
self._sock = sock
self._fp = sock.makefile('r')
except socket.error as msg:
if len(msg.args)==1:
raise NodeError("Error connecting %s:%s. %s." % (self.host,self.port,msg.args[0]))
else:
raise NodeError("Error %s connecting %s:%s. %s." % (msg.args[0],self.host,self.port,msg.args[1]))
finally:
if self.password:
if not self.runcmd("auth",self.password):
raise DisqueError("Authentication error: Invalid password")
def disconnect(self):
if self._sock:
try:
self._sock.close()
except socket.error:
pass
finally:
self._sock=None
self._fp=None
def read(self,length):
try:
return self._fp.read(length)
except socket.error as msg:
self.disconnet()
if len(msg.args)==1:
raise NodeError("Error connecting %s:%s. %s." % (self.host,self.port,msg.args[0]))
else:
raise NodeError("Error %s connecting %s:%s. %s." % (msg.args[0],self.host,self.port,msg.args[1]))
def readline(self):
try:
return self._fp.readline()
except socket.error as msg:
self.disconnect()
if len(msg.args)==1:
raise NodeError("Error connecting %s:%s. %s." % (self.host,self.port,msg.args[0]))
else:
raise NodeError("Error %s connecting %s:%s. %s." % (msg.args[0],self.host,self.port,msg.args[1]))
def sendline(self,message):
self.connect()
try:
self._sock.send(message+"\r\n")
except socket.error as msg:
self.disconnect()
if len(msg.args)==1:
raise NodeError("Error connecting %s:%s. %s." % (self.host,self.port,msg.args[0]))
else:
raise NodeError("Error %s connecting %s:%s. %s." % (msg.args[0],self.host,self.port,msg.args[1]))
def sendcmd(self,*args):
args2=args[0].split()
args2.extend(args[1:])
cmd=""
cmd+="*%d" % (len(args2))
for arg in args2:
cmd+="\r\n"
cmd+="$%d\r\n" % (len(str(arg)))
cmd+=str(arg)
self.sendline(cmd)
def parse_resp(self):
resp = self.readline()
if not resp:
# resp empty what is happening ? to be investigated
return None
if resp[:-2] in ["$-1","*-1"]:
return None
fb,resp=resp[0],resp[1:]
if fb=="+":
return resp[:-2]
if fb=="-":
raise DisqueError(resp)
if fb==":":
return int(resp)
if fb=="$":
resp=self.read(int(resp))
self.read(2)
return resp
if fb=="*":
return [self.parse_resp() for i in range(int(resp))]
def runcmd(self,cmdname,*args):
self.sendcmd(cmdname,*args)
return self.parse_resp()
| [
"abdelkader.allam@gmail.com"
] | abdelkader.allam@gmail.com |
4209baa817c451e1b89a686b10aa7ddc8096c027 | af4253fe18c2cbd860b631a0934e7389f0b6d547 | /TestObjects/Objects.py | a8de5e0996c843d71056ac81af635b4e3db702e2 | [] | no_license | jainikbhatt/testing-assignments | 1b795b3deadb4816bc0aef320bbff49043f0f6a7 | e2ca52f56d6c3ae08b856c724b041970036ae839 | refs/heads/main | 2023-07-02T21:19:03.029319 | 2021-07-29T14:25:23 | 2021-07-29T14:25:23 | 390,717,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,407 | py |
class Object:
logo_by_xpath = "//*[@id='nav-bar']/div[1]/a/img"
process_by_css_selector = "input[class='menu-item'][href='/#process']"
process_by_xpath = "//*[@id='nav-bar']/div[2]/ul/li[1]/a"
work_by_xpath = "//*[@id='nav-bar']/div[2]/ul/li[2]/a"
careers_by_xpath = "//*[@id='nav-bar']/div[2]/ul/li[3]/a"
contactUs_by_xpath = "//*[@id='nav-bar']/div[2]/ul/li[4]"
partner_btn_by_xpath = "/html/body/section[1]/div/div/a"
first_name_by_id = "contact-form-first-name"
last_name_by_id = "contact-form-last-name"
email_by_id = "contact-form-email"
subject_by_id = "contact-form-subject"
message_by_id = "contact-form-message"
send_by_xpath = "//*[@id='contactus']/div/div/form/button"
def __init__(self, driver):
self.driver = driver
def setLogo(self):
self.driver.find_element_by_xpath(self.logo_by_xpath).click()
def setProcess(self):
self.driver.find_element_by_xpath(self.process_by_xpath).click()
def setWork(self):
self.driver.find_element_by_xpath(self.work_by_xpath).click()
def setCareers(self):
self.driver.find_element_by_xpath(self.careers_by_xpath).click()
def setContactUs(self):
self.driver.find_element_by_xpath(self.contactUs_by_xpath).click()
def setPartnerBtn(self):
self.driver.find_element_by_xpath(self.partner_btn_by_xpath).click()
def setFirstName(self, firstName):
self.driver.find_element_by_id(self.first_name_by_id).clear()
self.driver.find_element_by_id(self.first_name_by_id).send_keys(firstName)
def setLastName(self, lastName):
self.driver.find_element_by_id(self.last_name_by_id).clear()
self.driver.find_element_by_id(self.last_name_by_id).send_keys(lastName)
def setEmail(self, email):
self.driver.find_element_by_id(self.email_by_id).clear()
self.driver.find_element_by_id(self.email_by_id).send_keys(email)
def setSubject(self, subject):
self.driver.find_element_by_id(self.subject_by_id).clear()
self.driver.find_element_by_id(self.subject_by_id).send_keys(subject)
def setMessage(self, message):
self.driver.find_element_by_id(self.message_by_id).clear()
self.driver.find_element_by_id(self.message_by_id).send_keys()
def setSendBtn(self):
self.driver.find_element_by_xpath(self.send_by_xpath).click()
| [
"jainik@kaptaine.com"
] | jainik@kaptaine.com |
3a6969dffb80f9d7a7115a07bdc66312f33ad0db | b1390f9fc51adc01992df9cc3d236d06d44545b6 | /threeNN.py | 95918cc4aac4408b7c5b271ce7d953d0be5b85e4 | [] | no_license | BlingBling921/DL | d7dd71d0a39889c8d301d8e51a8de3ecba4030e5 | 6b4571f3189ccd32606c67e59c8151f1a2ec2291 | refs/heads/master | 2022-11-15T07:27:11.023004 | 2020-06-21T08:04:54 | 2020-06-21T08:04:54 | 272,327,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,614 | py | import numpy
import scipy.special
import matplotlib.pyplot
class neuralNetwork :
# ๅๅงๅๅฝๆฐโโ่ฎพๅฎ่พๅ
ฅๅฑ่็นใ้่ๅฑ็ป็นๅ่พๅบๅฑ่็น็ไธชๆฐ
def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):
# ่พๅ
ฅใ้่ใ่พๅบ
self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = outputnodes
# ๅญฆไน ็
self.Ir = learningrate
# ้พๆฅๆ้๏ผๅณ่พๅ
ฅๅฑๅฐ้่ๅฑ็ๆ้ใ้่ๅฑๅฐ่พๅบๅฑ็ๆ้
# ๆณจ๏ผ๏ผ้ๆบ็ๆ้พๆฅๅๅงๆ้, 3*3 ็ๆฐๅผ่ๅดๅจ๏ผ-0.5๏ผ0.5๏ผ้ๆบๆฐ็ป
# numpy.random.rand(3, 3) - 0.5
# self.wih = (numpy.random.rand(self.hnodes , self.innodes) - 0.5)
# self.who = (numpy.random.rand(self.onodes , self.hnnodes) - 0.5)
# ๅๅงๅๆ้๏ผๅฉ็จๆญฃๆๅๅธ๏ผ
# ๆญฃๆๅๅธไธญๅฟๅผไธบ0.0
# ๆ ๅๆนๅทฎ๏ผpow()ๅณnode็-0.5ๆฌกๆน
# numpyๆฐ็ป็ๅฝข็ถๅคงๅฐ๏ผ่ก้ซๅๅฎฝ
# ไธ้ขไธบhoใihไน้ดๆ้
self.wih = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.hnodes, self.inodes))
self.who = numpy.random.normal(0.0, pow(self.onodes, -0.5), (self.onodes, self.hnodes))
# ๅฎไน่ฐ็จๆฟๆดปๅฝๆฐ็ๅฟๅๅฝๆฐ๏ผ็จไบไฟฎๆนๆฟๆดปๅฝๆฐๅ
้จไปฃ็
self.activation_function = lambda x: scipy.special.expit(x)
pass
# ่ฎญ็ปโโๅญฆไน ็ปๅฎ่ฎญ็ป้ๆ ทๆฌๅ๏ผไผๅๆ้
def train(self, inputs_list, targets_list):
# ๆ่พๅ
ฅๅ้ข็ปๆ็ๆฐ็ปไผ ่ฟๆฅ
# ndmin ๆ็ๆๆฐ็ป็ๆๅฐ็ปดๅบฆ
inputs = numpy.array(inputs_list, ndmin=2).T
targets = numpy.array(targets_list, ndmin=2).T
# ้่ๅฑ็่พๅ
ฅๅผ = ่พๅ
ฅๅผ x ๆ้็ไน็งฏ
hidden_inputs = numpy.dot(self.wih, inputs)
# ้่ๅฑ็่พๅบๅผ = ๅฏน่พๅ
ฅๅผ่ฐ็จๆฟๆดปๅฝๆฐ
hidden_outputs = self.activation_function(hidden_inputs)
# ่พๅบๅฑๅ้่ๅฑ
final_inputs = numpy.dot(self.who, hidden_outputs)
final_outputs = self.activation_function(final_inputs)
# ่ฎก็ฎ่พๅบๅผไธ้ข่ฎกๅผ็่ฏฏๅทฎ
output_errors = targets - final_outputs
# ๅ็จ่ฏฏๅทฎ ็นไน ho้ด็ๆ้็่ฝฌ็ฝฎ็ฉ้ต ๅพๅบ้่ๅผ็ไผๅ
hidden_errors = numpy.dot(self.who.T, output_errors)
# ๆดๆฐ่พๅบๅฑๅ้่ๅฑไน้ด็ๆ้
self.who += self.Ir * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)), numpy.transpose(hidden_outputs))
# ไธ้ขๅไธ้ข็ฑปไผผ๏ผๆนๅ็ๆฏ่พๅ
ฅๅฑๅ้่ๅฑ็ๆ้
self.wih += self.Ir * numpy.dot((hidden_errors * hidden_outputs*(1.0-hidden_outputs)), numpy.transpose(inputs))
pass
# ๆฅ่ฏขโโ็ปๅฎ่พๅ
ฅ๏ผไป่พๅบ่็น็ปๅบ็ญๆก
def query(self, inputs_list):
inputs = numpy.array(inputs_list, ndmin=2).T
# ๅฐ่พๅ
ฅไธๆ้็ธไนๅพๅบ้่ๅฑๅผ
hidden_inputs = numpy.dot(self.wih, inputs)
# ้่ๅฑ็่พๅบๅผไธบไธ้ข่ฟไธช้่ๅฑๅผ่ฐ็จๆฟๆดปๅฝๆฐๅ็็ปๆ
hidden_outputs = self.activation_function(hidden_inputs)
# ๅไธ
final_inputs = numpy.dot(self.who, hidden_outputs)
# ๅไธ
final_outputs = self.activation_function(final_inputs)
return final_outputs
# ่พๅ
ฅๅฑ๏ผ้่ๅฑ๏ผ่พๅบๅฑ็ไธชๆฐ
input_nodes = 784
hidden_node = 100
output_nodes = 10
# ๅญฆไน ็
learning_rate = 0.3
# ๅปบ็ซ็ฅ็ป็ฝ็ป
n = neuralNetwork(input_nodes, hidden_node, output_nodes, learning_rate)
# ๅฏผๅ
ฅcsvๆไปถ
# open ๅฝๆฐๅๅปบไธไธชๅฅๆๅไธไธชๅผ็จ๏ผๅฅๆ็ปdata_file๏ผๅ้ข็ๆไฝ้ฝ็จ่ฟไธชๅฅๆๅฎๆ
training_data_file = open("E:/ไธไธ/mnist_dataset/mnist_train_100.csv", 'r')
# training_data_list[0]่กจ็คบ็ฌฌไธ่ก่ฎฐๅฝ
training_data_list = training_data_file.readlines()
training_data_file.close()
for record in training_data_list:
all_values = record.split(',')
# ๅฏนๅ็ด ่ๅด[0,255]่ฟ่ก็ผฉๆพ๏ผ็ผฉๆพๅฐ[0.01,1.0],ๅณx*0.99+0.01
# ็ผฉๆพ็็ฎ็ๆฏไธบไบ่ฎฉ็นๅพ็น็ๅทฎ่ทๆฌงๆฐ่ท็ฆปๅๅฐ๏ผไปฅ้ฒๆญขๅคชๅคง็็นๅพ็นๆๅฝฑๅๅ ๅญ่ฟๅคง
inputs = (numpy.asfarray(all_values[1:])/255.0*0.99)+0.01
# ๅๅปบ็จ้ถๅกซๅ
็ๆฐ็ป,้ฟๅบฆไธบoutput_nodes,็ฌฌไธไธชๅ
็ด ๅณๆญฃ็กฎ็้ฃไธชๆ ็ญพไธบ0.99๏ผๅ
ถไฝไธบ0.01
targets = numpy.zeros(output_nodes) + 0.01
targets[int(all_values[0])] = 0.99
n.train(inputs, targets)
pass
# ไปฅไธๆฏๅฏนๆจกๅ่ฟ่ก่ฎญ็ป
# ๆฅไธๆฅๅฐฑๆฏๆต่ฏ็ฝ็ป
# ๅฏผๅ
ฅๆฐๆฎ
test_data_file = open("E:/ไธไธ/mnist_dataset/mnist_test_10.csv", 'r')
test_data_list = test_data_file.readlines()
test_data_file.close()
# ๆต่ฏๆ ธๅฟ
all_values = test_data_list[0].split(',')
data = n.query(numpy.asfarray(all_values[1:])/255.0*0.99+0.01)
# ๅคๆญ่พๅบ็ๆฐๅญๆฏๅ
max_num = 0
for i in range(len(data)):
if data[i] > max_num:
max_num = data[i]
num = i
print("ๅพ็ไธ็ๆฐๅญๆฏ:", num)
# ่พๅบๅพ็้จๅ
# numpy.asfarray()ๅฐๆๆฌๅญ็ฌฆไธฒ่ฝฌๆขๆๅฎๆฐ๏ผๅนถๅๅปบ่ฟไบๆฐๅญ็ๆฐ็ป(string่ฝฌint)
# [1๏ผ]๏ผ่กจ็คบ้็จ้คไบๅ่กจไธญ็็ฌฌไธไธชๅ
็ด ไปฅๅค็ๆๆๅผ
# reshape((28, 28))ๅฝขๆ28่ก28ๅ็็ฉ้ตๅฝขๅผ
image_array = numpy.asfarray(all_values[1:]).reshape((28, 28))
# plt.imshow()ๅฝๆฐ่ด่ดฃๅฏนๅพๅ่ฟ่กๅค็๏ผๅนถๆพ็คบๅ
ถๆ ผๅผ
# cmap = 'Greys'็ฐๅบฆ่ฐ่ฒๆฟ
matplotlib.pyplot.imshow(image_array, cmap='Greys', interpolation='None')
# plt.show()ๅๆฏๅฐplt.imshow()ๅค็ๅ็ๅฝๆฐๆพ็คบๅบๆฅ
matplotlib.pyplot.show() | [
"2650400028@qq.com"
] | 2650400028@qq.com |
cafd330140fcfb6368723d583251829672ceb42d | a86599993fcca8fbe67ee02106281b5145f8db5e | /Laboratory 04/wdp_ftopt_l04z04pr.py | 37e25e77b5d7c40c7a9717f6d5240df8b50d219e | [] | no_license | pauliwu/Introduction-to-programming-in-python | 2747572c73a5559c0636523f7b75ae6c4e79d51e | cc4be2030d1a0798054ec2c6b30425fd77d3e117 | refs/heads/master | 2022-03-31T09:15:33.191768 | 2020-01-30T22:05:53 | 2020-01-30T22:05:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | '''
Napisz program, ktรณry poprosi uลผytkownika o podanie promienia koลa, a nastฤpnie wyลwietli
informacjฤ o jego polu i obwodzie.
'''
def kolo(promien):
pi = 3.14
obwod = 2*pi*promien
pole = pi*promien**2
return pole, obwod
def main():
r = float(input("Wprowadz promien kola w cm: "))
p,o = kolo(r)
print("Obwod = ", format(o,".1f"), "cm")
print("Pole = ", format(p,".1f"), "cm^2")
main()
| [
"58003896+majsylw@users.noreply.github.com"
] | 58003896+majsylw@users.noreply.github.com |
ad7c6f3e45d6ce78eacd4972b09e736c5a69aa25 | 3907f4591cf26309b1e6981fc3467da871b4f860 | /mymoney/apps/banktransactionschedulers/views.py | afcea194d45e587af7a5a67e63f4164182a121a6 | [] | no_license | chenchampion/djangosample | e19b0fe457d3e96217c85bf4ce160eb744b4f598 | f4e56daa75a821424d8759bcf1a35674a98e5c7d | refs/heads/master | 2020-12-30T14:19:29.252939 | 2017-05-28T08:04:07 | 2017-05-28T08:04:07 | 91,305,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,206 | py | from django.contrib.auth.mixins import PermissionRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.views import generic
from mymoney.apps.banktransactions.mixins import (
BankTransactionAccessMixin, BankTransactionSaveViewMixin,
)
from mymoney.apps.banktransactions.models import BankTransaction
from mymoney.mymoneycore.utils.dates import GRANULARITY_MONTH, GRANULARITY_WEEK
from .forms import (
BankTransactionSchedulerCreateForm, BankTransactionSchedulerUpdateForm,
)
from .models import BankTransactionScheduler
class BankTransactionSchedulerListView(BankTransactionAccessMixin,
generic.ListView):
model = BankTransactionScheduler
template_name = 'banktransactionschedulers/overview/index.html'
paginate_by = 50
def get_queryset(self):
return (
BankTransactionScheduler.objects
.filter(bankaccount=self.bankaccount)
.order_by('-last_action')
)
def get_context_data(self, **kwargs):
context = super(BankTransactionSchedulerListView, self).get_context_data(**kwargs)
context['bankaccount'] = self.bankaccount
totals, summary = {}, {}
manager = BankTransactionScheduler.objects
total = 0
totals['debit'] = manager.get_total_debit(self.bankaccount)
totals['credit'] = manager.get_total_credit(self.bankaccount)
for bts_type in BankTransactionScheduler.TYPES:
key = bts_type[0]
if key in totals['debit'] or key in totals['credit']:
if key == BankTransactionScheduler.TYPE_WEEKLY:
granularity = GRANULARITY_WEEK
else:
granularity = GRANULARITY_MONTH
total_credit = totals['credit'].get(key, 0)
total_debit = totals['debit'].get(key, 0)
used = BankTransaction.objects.get_total_unscheduled_period(
self.bankaccount, granularity) or 0
summary[key] = {
'type': bts_type[1],
'credit': total_credit,
'debit': total_debit,
'used': used,
'remaining': total_credit + total_debit + used,
}
summary[key]['total'] = total_credit + total_debit
total += summary[key]['total']
context['summary'] = summary
context['total'] = total
return context
class BankTransactionSchedulerCreateView(PermissionRequiredMixin,
BankTransactionAccessMixin,
BankTransactionSaveViewMixin,
SuccessMessageMixin,
generic.CreateView):
model = BankTransactionScheduler
form_class = BankTransactionSchedulerCreateForm
permission_required = ('banktransactionschedulers.add_banktransactionscheduler',)
success_message = _(
"Bank transaction scheduler %(label)s was created successfully."
)
def get_initial(self):
initial = super(BankTransactionSchedulerCreateView, self).get_initial()
if self.request.GET.get('self-redirect', False):
initial['redirect'] = True
return initial
def form_valid(self, form):
response = (
super(BankTransactionSchedulerCreateView, self).form_valid(form)
)
if form.cleaned_data['start_now']:
self.object.clone()
if form.cleaned_data['redirect']:
url_redirect = reverse('banktransactionschedulers:create', kwargs={
'bankaccount_pk': self.object.bankaccount.pk,
}) + '?self-redirect=1'
return HttpResponseRedirect(url_redirect)
return response
class BankTransactionSchedulerUpdateView(PermissionRequiredMixin,
BankTransactionAccessMixin,
BankTransactionSaveViewMixin,
SuccessMessageMixin,
generic.UpdateView):
model = BankTransactionScheduler
form_class = BankTransactionSchedulerUpdateForm
permission_required = ('banktransactionschedulers.change_banktransactionscheduler',)
success_message = _(
"Bank transaction scheduler %(label)s was updated successfully."
)
class BankTransactionSchedulerDeleteView(PermissionRequiredMixin,
BankTransactionAccessMixin,
generic.DeleteView):
model = BankTransactionScheduler
permission_required = ('banktransactionschedulers.delete_banktransactionscheduler',)
def get_success_url(self):
self.success_url = reverse('banktransactionschedulers:list', kwargs={
'bankaccount_pk': self.object.bankaccount.pk,
})
return super(BankTransactionSchedulerDeleteView, self).get_success_url()
| [
"championonline@gmail.com"
] | championonline@gmail.com |
52fb799a9719d0f4a6dd2ee54ce68e2687c19b3a | 4060819693d6ae501e10cfa965f5747214422339 | /model/__init__.py | 1717d8979a45e296f8a35d44f195c49401ca9ee8 | [] | no_license | baicaisir/AutoFunction | 25f7424796e10ebffc52ea68d2ad24c2685ed2b7 | 0cfb121cfb4e2a203504535234a7cadde86197b8 | refs/heads/master | 2023-07-07T10:35:55.701976 | 2021-05-21T03:06:36 | 2021-05-21T03:06:36 | 357,548,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | from .Firebase import Firebase
# ๅฐ่ฏไฝฟ็จๆญค็ฑปๆนๅผๅฎไนๅธธ้๏ผ้ฟๅ
ๅฎไน้ๅค๏ผๅฏผ่ดๅผๅธธ๏ผไพฟไบ็ปดๆค
Firebase = Firebase()
| [
"junhua.future@gmail.com"
] | junhua.future@gmail.com |
6b62ab21c2268296db1080921db84219e376a6f9 | 1a2d843df90bbde9d958c9539b418a6c765cfef5 | /Hard/GridWalk.py | 936bde5ceaf13c6395275e8586c019164bb80ce9 | [] | no_license | tramxme/CodeEval | 4a0255bc795558f512d1aa918ecd53bd555fd506 | 1914daa981d0f4bac0f6ec1227f09cf530fb0431 | refs/heads/master | 2021-01-20T15:12:41.770646 | 2017-06-08T19:13:41 | 2017-06-08T19:13:41 | 90,733,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | import sys, re, math
def isValid(num1, num2):
s1 = str(num1)
s2 = str(num2)
v = 0
for c in s1:
v += int(c)
for c in s2:
v += int(c)
return (v <= 19)
def main():
res = 0
M = []
i = 0
while True:
j = 0
arr = []
while True:
print(res)
if __name__ == '__main__':
main()
| [
"tramlaisf@gmail.com"
] | tramlaisf@gmail.com |
6e6194a66c8390f74429e25e8931b1bca6a81b96 | b09b95b6f1d28b48bb58a44b52dceb6dfbbbc79d | /sessions/001_011/stringreverse.py | 94bfcb579a445633ee523dea90e9a58a457b2f82 | [] | no_license | cookjw/Project-Euler | 05e3852865decaa8167ed6c66c3b9b926516487e | ffea7c1864f9e9a24b7f2792accf58cd511c1e24 | refs/heads/master | 2018-12-28T15:38:35.548334 | 2014-02-07T18:43:33 | 2014-02-07T18:43:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | def switchindex(n):
return -n - 1
def string_reverse(string):
length = len(string) - 1
string_init = string[length]
stringbuilder_list = [string_init]
string_so_far = string_init
for n in range(1, length + 1):
stringbuilder_list.append(string_so_far + string[switchindex(n)])
string_so_far = stringbuilder_list[n]
if string_so_far == stringbuilder_list[length]:
return string_so_far
else:
print "Sorry, didn't work."
| [
"cookjw@gmail.com"
] | cookjw@gmail.com |
d3412ea7bd72697ab7388bbb6fe54280a2dc9514 | e4666ca87a6708f9338bbb0780023c37187665b0 | /Comida.py | 8bf609722b7e8c9b108ca34b0695c4a80a476a85 | [] | no_license | Gergash/cinema | d4b3f3686796b6dbdfc5990ae3539af0fb41ffb5 | 2daaa612f5782c8b2ca7d6e9d9908c25e095802a | refs/heads/master | 2023-08-25T19:32:47.049078 | 2021-10-27T21:25:56 | 2021-10-27T21:25:56 | 402,144,900 | 0 | 1 | null | 2021-09-11T05:36:39 | 2021-09-01T17:19:51 | null | UTF-8 | Python | false | false | 152 | py | from enums.TipoComida import TipoComida
class Comida:
idComida: str
precioComida: int
nombreComida: str
tipoComida: TipoComida
| [
"noreply@github.com"
] | noreply@github.com |
871eb6e8ee0778f806cecd0362c54b91bff6028c | d6e90e0326248389768fc9b6aece86b70e16f3e5 | /code_examples/gnuradio/module_fmcw/gr-radar/python/qa_FMCW_separate_IQ_cc.py | 7933b4c9829cbf1f1334c20a93dcfcf5f7cdd61a | [] | no_license | stwunsch/gsoc-proposal | 22d1d8f23b2f6008e59f80c4a51aab50a04b3e85 | 75d37e8a1e6d16ad0798bf3e7b4ab067d24f9a18 | refs/heads/master | 2021-01-19T16:57:41.145819 | 2014-04-14T16:15:08 | 2014-04-14T16:15:08 | 17,761,313 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,891 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2013 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest,blocks
import radar_swig as radar
class qa_FMCW_separate_IQ_cc (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
data = ( complex(1,1),complex(2,2),complex(3,3),complex(4,4),complex(5,5),complex(6,6) )
src = blocks.vector_source_c( data )
test = radar.FMCW_separate_IQ_cc(2)
snk1 = blocks.vector_sink_c(2)
snk2 = blocks.vector_sink_c(2)
snk3 = blocks.vector_sink_c(2)
self.tb.connect(src,test)
self.tb.connect((test,0),snk1)
self.tb.connect((test,1),snk2)
self.tb.connect((test,2),snk3)
self.tb.run ()
# check data
data1 = ( complex(1,1),complex(2,2) )
data2 = ( complex(3,3),complex(4,4) )
data3 = ( complex(5,5),complex(6,6) )
self.assertTupleEqual(data1,snk1.data())
self.assertTupleEqual(data2,snk2.data())
self.assertTupleEqual(data3,snk3.data())
if __name__ == '__main__':
gr_unittest.run(qa_FMCW_separate_IQ_cc, "qa_FMCW_separate_IQ_cc.xml")
| [
"stefan.wunsch@student.kit.edu"
] | stefan.wunsch@student.kit.edu |
48dfce6bf10d4a7f9141eab7bdb034ead039a3da | 69f8272788d6474c15b47d9d0e95445570b04d89 | /qq_login.py | 7daae349086c8beaf1e9c0a7a105e43b5e31af42 | [] | no_license | songting77/d3 | d3167dfef18867264a3419256b7c1f190a57b3fc | ba98a7ed29a67009061b51cd9dccdca05fb4c17d | refs/heads/master | 2020-03-29T01:47:14.715668 | 2018-09-19T06:58:06 | 2018-09-19T06:58:06 | 149,406,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | def login_by_qq(self,user,password):
pass | [
"1820440070@qq.com"
] | 1820440070@qq.com |
2e612ae69448a10737689a3bfeb630b822833fd5 | bc008babe45f3703ed4d77b767a5dd7335bf7136 | /duckql/functions/tests/test_sum.py | 90243c8cdfdee22cc41d98f05c4bf5f573ff1554 | [
"MIT"
] | permissive | Sibyx/duckql-python | 4da8c7052f43d4d41d3867ac83d8e88972f82124 | ec82b683e929760f4f725f09c0603c68df17ba3b | refs/heads/master | 2023-08-09T18:21:33.488797 | 2022-06-09T09:29:03 | 2022-06-09T09:29:03 | 254,049,497 | 5 | 1 | MIT | 2023-07-20T11:35:28 | 2020-04-08T09:56:01 | Python | UTF-8 | Python | false | false | 294 | py | from duckql.functions.sum import Sum
from duckql.properties.property import Property
def test_simple():
my_function = Sum(
property=Property(name='transactions.amount'),
alias='total_amount'
)
assert str(my_function) == 'SUM(transactions.amount) AS total_amount'
| [
"jakub.dubec@gmail.com"
] | jakub.dubec@gmail.com |
cd6e2ee6dd4b2426d88995adb36e5d9a9495f85c | 005c8f3b475375c6ee3fd11b47a5363c43b27f43 | /maccorcyclingdata/testdata.py | 585d3a2ed217fc71fb28ae8028289b30ead940a0 | [
"MIT"
] | permissive | gillboy1989/maccorcyclingdata | e81150d3ad957d04aee9a803b70310772f65dab0 | c06f88cf28a2f58b3a731acae064aa696f18bada | refs/heads/master | 2023-07-05T07:43:05.057446 | 2021-04-01T21:49:12 | 2021-04-01T21:49:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,268 | py | import pandas as pd
import numpy as np
import os
def import_maccor_data(file_path , file_name, header=0):
"""
Given the file path and file name of the testdata file, this function will import the csv file as a pandas df
and clean it.
Parameters
-----------
file_path : string
File path
file_name : string
Filename
header : integer
Optional input that sets the header to a line number (default=2)
Returns
--------
df : pandas dataframe
The cleaned testdata file as a pandas df
Examples
---------
>>> import maccorcyclingdata.testdata as testdata
>>> df = testdata.import_maccor_data('example_data/', 'testdata.csv')
>>> df.head(5)
"""
if not isinstance(file_path, str):
raise TypeError('file path must be a string')
if not isinstance(file_name, str):
raise TypeError('file name must be a string')
if not isinstance(header, int):
raise TypeError('header must be an integer')
if not os.path.exists(file_path+file_name):
raise NotADirectoryError("The path " + str(file_path + file_name) + " not found")
df = pd.read_csv(file_path+file_name, header =int(header))
df = clean_maccor_df(df)
return df
def import_multiple_csv_data(file_path):
"""
Given the file path that holds multiple csv files (testdata files), this function will import and append
all of the csv files to one another as one dataframe. Returns a cleaned version of that dataframe.
Parameters
-----------
file_path : string
File path
Returns
--------
df : pandas dataframe
All of the cleaned csv files appended to one another as a pandas df
Notes
-----
This function will append the csv files to one another depending on the order they appear in the directory.
Examples
---------
>>> import maccorcyclingdata.testdata as testdata
>>> mult_df = testdata.import_multiple_csv_data('example_data/multiple_csv/')
>>> mult_df.head(5)
"""
if not isinstance(file_path, str):
raise TypeError('file path must be a string')
if not os.path.exists(file_path):
raise NotADirectoryError("The path " + str(file_path) + " not found")
df = pd.DataFrame()
# r=root, d=directories, f = files
for r, d, files in os.walk(file_path):
# We only want to parse files that are CSVs
files = [ file for file in files if file.endswith( ('.csv') ) ]
files.sort()
for file in files:
file_loc = str(file_path+file)
temp_df = pd.read_csv(file_loc, header=0)
df = df.append(temp_df, ignore_index = True)
df = clean_maccor_df(df)
return df
def clean_maccor_df(df):
"""
Given the testdata dataframe, this function will rename the headers and drop unnecessary columns. It will also change some of the units to match the column name and will remove all commas.
Parameters
-----------
df : pandas dataframe
The testdata dataframe
Returns
--------
df : pandas dataframe
The cleaned pandas df of the testdata
Notes
-----
If the following columns exist, the function will delete these: ``ACR``, ``DCIR``, ``Watt-hr``, and ``nnnamed``.
Examples
---------
>>> import maccorcyclingdata.testdata as testdata
>>> df = testdata.clean_maccor_df(df)
>>> df.head(5)
"""
if not isinstance(df, pd.DataFrame):
raise TypeError('input must be a pandas dataframe')
if not len(df.columns) < 14:
raise IndexError("Pandas dataframe can have 14 columns max")
if 'Watt-hr' in df.columns:
df = df.drop(columns=['Watt-hr'])
if 'ACR' in df.columns:
df = df.drop(columns=['ACR'])
if 'DCIR' in df.columns:
df = df.drop(columns=['DCIR'])
if 'Unnamed: 13' in df.columns:
df = df.drop(columns=['Unnamed: 13'])
df.replace(',','', regex=True, inplace=True)
df.columns = ['cyc', 'step', 'test_time_s', 'step_time_s', 'capacity_mah', 'current_ma', 'voltage_v', 'dpt_time', 'thermocouple_temp_c', 'ev_temp'] #rename the column headers
df[["cyc", "step", "test_time_s", "capacity_mah", "current_ma", "voltage_v", "thermocouple_temp_c", "ev_temp"]] = df[["cyc", "step", "test_time_s", "capacity_mah", "current_ma", "voltage_v", "thermocouple_temp_c", "ev_temp"]].apply(pd.to_numeric)
return df
def delete_cycle_steps(df, steps_to_delete, decrement=False):
"""
Given the testdata dataframe (from the import_maccor_data or import_multiple_csv_data functions) and a list of integers (step numbers that you want to delete), this function
will delete all rows from the dataframe that have a cycle step index that matches any in the list of integers
Parameters
-----------
df : pandas dataframe
The testdata dataframe
steps_to_delete : array
An array that has the step numbers you want to delete
decrement : boolean
If set to True, would shift cycle steps to adjust for the deleted steps
Returns
--------
df : pandas dataframe
The dataframe with the corresponding steps deleted
Examples
---------
>>> import maccorcyclingdata.testdata as testdata
>>> del_df = testdata.delete_cycle_steps(df, [1], True)
>>> del_df.head(5)
"""
if not isinstance(df, pd.DataFrame):
raise TypeError('df input must be a pandas dataframe')
if not isinstance(steps_to_delete, list):
raise TypeError('steps_to_delete input must be a list')
if not isinstance(decrement, bool):
raise TypeError('decrement input must be a boolean')
if not len(df.columns) == 10:
raise IndexError("Pandas dataframe must have 10 columns")
if (df.columns.tolist() != ['cyc', 'step', 'test_time_s', 'step_time_s', 'capacity_mah', 'current_ma', 'voltage_v', 'dpt_time', 'thermocouple_temp_c', 'ev_temp']):
raise IndexError("Pandas dataframe must have these columns: ['cyc', 'step', 'test_time_s', 'step_time_s', 'capacity_mah', 'current_ma', 'voltage_v', 'dpt_time', 'thermocouple_temp_c', 'ev_temp']")
for x in steps_to_delete:
to_be_deleted = df.index[df['step'] == x]
df = df.drop(to_be_deleted)
if decrement:
steps_to_delete.sort(reverse = True)
for x in steps_to_delete:
to_be_shifted = df.index[df['step'] > x]
mini = min(df['step'][to_be_shifted].values)
gap = mini-x
all_values_larger = ((df['step'][to_be_shifted].values) - gap)
df.loc[to_be_shifted, 'step'] = all_values_larger
df = df.reset_index(drop = True)
return df
def get_index_range(df, cyc_range, cycle_step_idx = []):
"""
Given the testdata dataframe (from the import_maccor_data or import_multiple_csv_data functions), this function returns the index range for the specified cycle range,
or if a cycle step index is passed, as subset of each cyle for only that specific cycle step.
Parameters
-----------
df : pandas dataframe
The testdata dataframe
cyc_range : array
An array of the cycles you want the indices for
cycle_step_idx : array
The step numbers that you want the indices of. Default value is all steps within each cycle.
Returns
--------
index_range : vector
A vector of the range of df indices for the specified cycle range
Examples
---------
>>> from maccorcyclingdata.testdata import get_index_range
>>> ind = testdata.get_cycle_data(df, [1, 3, 5], [12])
>>> print(ind[:6])
"""
if not isinstance(df, pd.DataFrame):
raise TypeError('df input must be a pandas dataframe')
if not isinstance(cyc_range, list):
raise TypeError('cyc_range input must be a list')
if not isinstance(cycle_step_idx, list):
raise TypeError('cycle_step_index input must be a list')
if not len(df.columns) == 10:
raise IndexError("Pandas dataframe must have 10 columns")
if (df.columns.tolist() != ['cyc', 'step', 'test_time_s', 'step_time_s', 'capacity_mah', 'current_ma', 'voltage_v', 'dpt_time', 'thermocouple_temp_c', 'ev_temp']):
raise IndexError("Pandas dataframe must have these columns: ['cyc', 'step', 'test_time_s', 'step_time_s', 'capacity_mah', 'current_ma', 'voltage_v', 'dpt_time', 'thermocouple_temp_c', 'ev_temp']")
# If we are passed a cycle step index, then we provide the indicies for only that step.
if len(cycle_step_idx) > 0:
index_range = []
if len(cyc_range) > 1:
for i in range(cyc_range[0],cyc_range[1]+1): # Need the '+1' so that we include the upper cycle.
index_range = np.append( index_range,
np.where((df['cyc'] == i) & (df["step"] == cycle_step_idx[0]))[0][:])
else:
index_range = np.append( index_range,
np.where((df['cyc'] == cyc_range[0]) & (df["step"] == cycle_step_idx[0]))[0][:])
else:
if len(cyc_range) > 1:
index_range = np.where(np.logical_and(df['cyc'] >= cyc_range[0] , df['cyc']<= cyc_range[1] ))[0][:]
else:
index_range = np.where(np.logical_and(df['cyc'] >= cyc_range[0] , df['cyc']<= cyc_range[0] ))[0][:]
return index_range
def get_cycle_data(df, Headings , cyc_range, cycle_step_idx=[]):
"""
Given the testdata df (from the import_maccor_data or import_multiple_csv_data functions), this function gets the data specified in the "Headings" for each sample within the specified cyc_range.
Parameters
-----------
df : pandas dataframe
The testdata dataframe
Headings : array
An array with the headers you want the data for
cyc_range : array
An array of the cycle numbers you want data for
cycle_step_idx : array
The step numbers within each cycle that you want the data for. Default value is all steps within each cycle.
Returns
--------
data_df : pandas dataframe
A pandas dataframe that has the data for the specified headers at the specified cycles and steps.
Examples
---------
>>> from maccorcyclingdata.testdata import get_cycle_data
>>> data = testdata.get_cycle_data(df, ['current_ma', 'voltage_v'], [1, 3, 5], [12])
>>> print(data[:6])
"""
if not isinstance(df, pd.DataFrame):
raise TypeError('df input must be a pandas dataframe')
if not isinstance(Headings, list):
raise TypeError('Headings input must be a list')
if not isinstance(cyc_range, list):
raise TypeError('cycle_range input must be a list')
if not isinstance(cycle_step_idx, list):
raise TypeError('cycle_step_index input must be a list')
if not len(df.columns) == 10:
raise IndexError("Pandas dataframe must have 10 columns")
if (df.columns.tolist() != ['cyc', 'step', 'test_time_s', 'step_time_s', 'capacity_mah', 'current_ma', 'voltage_v', 'dpt_time', 'thermocouple_temp_c', 'ev_temp']):
raise IndexError("Pandas dataframe must have these columns: ['cyc', 'step', 'test_time_s', 'step_time_s', 'capacity_mah', 'current_ma', 'voltage_v', 'dpt_time', 'thermocouple_temp_c', 'ev_temp']")
# Find the index range for the specified cycle(s)
index_range = get_index_range(df,cyc_range, cycle_step_idx)
np.set_printoptions(suppress=True)
# Create a numpy array to hold the headings values Each column will be a heading, each row will be a data point
data = np.zeros([len(index_range),len(Headings)])
data_df = pd.DataFrame()
data_df['cyc'] = df['cyc'][index_range].values
data_df['step'] = df['step'][index_range].values
for i in range(0,len(Headings)):
data[:,i] = df[Headings[i]][index_range]
data_df[Headings[i]] = data[:,i]
return data_df
def get_num_cycles(df):
"""
Given the testdata dataframe (from the import_maccor_data or import_multiple_csv_data functions), this function will return the number of cycles.
Parameters
-----------
df : pandas dataframe
The testdata dataframe
Returns
--------
number_of_cycles : integer
An integer of the number of cycles in the dataframe
Notes
------
This function assumes that the first cycle is cycle 0.
Examples
---------
>>> from maccorcyclingdata.testdata import get_num_cycles
>>> get_num_cycles(df)
"""
if not isinstance(df, pd.DataFrame):
raise TypeError('df input must be a pandas dataframe')
if not len(df.columns) == 10:
raise IndexError("Pandas dataframe must have 10 columns")
if (df.columns.tolist() != ['cyc', 'step', 'test_time_s', 'step_time_s', 'capacity_mah', 'current_ma', 'voltage_v', 'dpt_time', 'thermocouple_temp_c', 'ev_temp']):
raise IndexError("Pandas dataframe must have these columns: ['cyc', 'step', 'test_time_s', 'step_time_s', 'capacity_mah', 'current_ma', 'voltage_v', 'dpt_time', 'thermocouple_temp_c', 'ev_temp']")
number_of_cycles = int(max(df['cyc'])) + 1
return number_of_cycles
| [
"shriyachallam10@gmail.com"
] | shriyachallam10@gmail.com |
d01146525edde6003f1dd431e050dd76dea16973 | 148fd2231722077091b4595d406a6b5ffc87d1af | /clase7/urls.py | 390ace00cdc816d0cd62a4f47b40afac4b6b354e | [] | no_license | lvaldivia/django-integracion | 507f4e3d842a43d82abfb9e694ae7ffee38e9229 | 47c42e197f6dae88ee41e824019079b28dd9f053 | refs/heads/master | 2016-09-14T14:22:04.223655 | 2016-05-02T20:34:53 | 2016-05-02T20:34:53 | 57,919,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | """djangoexample URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('app_facebook.urls')),
] | [
"valdivialuis1989@gmail.com"
] | valdivialuis1989@gmail.com |
a2c28ec47fbd74a3159ca3a127c49e89addf2c7d | 7b55cfc4ffa7678e4c7b8f2312831ebbd549e54f | /proj1/tests/other-tests/MINZ_tests/correct/dictionary.py | 3594fcbfb30b0ee6df0cd44083dd7c263c58907c | [] | no_license | czchen1/cs164-projects | 0d330efef85421e611a436b165428ba0ddfb3512 | a04cafbcaafd32e518227dacf89a6d7837bf9f57 | refs/heads/master | 2020-03-27T04:03:31.727524 | 2018-08-23T21:43:46 | 2018-08-23T21:43:46 | 145,909,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | dict = {'value' : 1, 'abc' : "aba", "ab" : "ababa", "abc" : 2}
print(dict["ab"]) #Expect 'ababa'
| [
"czchen@mit.edu"
] | czchen@mit.edu |
8fffb2612b763989d4dd187c65f7f73516c2751d | 8c169510f047c7b3997e381e888adafce5fa91ac | /Integralregning.py | 52fcf8d520475c529f463645fa2ee0e6c47d1a82 | [] | no_license | casper1357/GRAFER | 7fafe90fd5c11a1573f6fdd3c63468888395a097 | 69f9580dfee12017f27cea93872c2316691357e2 | refs/heads/main | 2023-01-22T20:40:58.229044 | 2020-11-24T20:01:40 | 2020-11-24T20:01:40 | 315,739,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 878 | py | import sympy
class Integralregning():
def __init__(self, a, b, columns):
self.a = a
self.b = b
self.columns = columns
def func(self, x, forskrift):
y = sympy.sympify(forskrift).subs(dict(x=x))
self.forskrift = forskrift
return y
def area(self, forskrift):
self.areaoffunc = 0
DeltaX = (self.b - self.a) / int(self.columns)
n = 0
x = self.a
while True:
x += DeltaX
y = self.func(x, forskrift)
columnareal = y * DeltaX
if columnareal > 0:
self.areaoffunc += float(columnareal)
else:
pass
n += 1
if n >= self.columns:
print("Arealet for integralkurven er:", self.areaoffunc)
return self.areaoffunc
| [
"noreply@github.com"
] | noreply@github.com |
e0935743f7688c9951a2d83812994aded07c6dba | ce378bf28153d4d30cd53ec8684e8017abd0ac59 | /pythonProject/leetcode/Rotate Array.py | abac0295ceee22ace5ca239c758306f05baeca4e | [] | no_license | zzong2006/coding-problems-study | 5f006b39264cbe43d11db489ce8b716272329b6e | 9b3affbeb2ddfa673c1d879fb865408e34955c5c | refs/heads/master | 2023-04-07T12:47:41.646054 | 2021-04-08T05:02:33 | 2021-04-08T05:02:33 | 286,918,250 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | class Solution(object):
def rotate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: None Do not return anything, modify nums in-place instead.
"""
print(nums)
n = len(nums)
k %= n
for i in range(n // 2):
nums[i], nums[n - i - 1] = nums[n - i - 1], nums[i]
print(nums)
for i in range(k // 2):
nums[i], nums[k - i - 1] = nums[k - i - 1], nums[i]
print(nums)
for i in range(k, (n + k - 1) // 2 + 1):
nums[i], nums[n - i + k - 1] = nums[n - i + k - 1], nums[i]
print(nums)
a = Solution()
a.rotate([1, 2, 3, 4, 5, 6, 7, 8, 9], k=3)
| [
"zzong2006@gmail.com"
] | zzong2006@gmail.com |
e609fe4e192768341a6bcb783b7f365c59f21f10 | f0ddff43f4a4eec7226e5a0b0fd47dec72d7a9ab | /ImageViewer.py | 5185d2e5636caa63dc8e3c47ad8b162d3c88693a | [] | no_license | LinhIThust/Image-Processing | 3638d0b0ba6d1c0ae332af7c9d17cd6a5c25f84d | 96bbe51bd4b81d844d717b6117e7c998b90afd2b | refs/heads/master | 2022-11-16T16:17:09.922758 | 2020-06-30T17:27:41 | 2020-06-30T17:27:41 | 277,751,258 | 1 | 0 | null | 2020-07-07T07:46:00 | 2020-07-07T07:45:59 | null | UTF-8 | Python | false | false | 1,019 | py | from PyQt5 import QtGui, QtCore, QtWidgets
class ImageViewer(QtWidgets.QMainWindow):
def __init__(self):
super(ImageViewer, self).__init__()
self.STANDARD_WIDTH = 800
self.STANDARD_HEIGHT = 1200
self.imageLabel = QtWidgets.QLabel()
self.imageLabel.setBackgroundRole(QtGui.QPalette.Base)
self.imageLabel.setSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Ignored)
self.imageLabel.setScaledContents(True)
self.scrollArea = QtWidgets.QScrollArea()
self.scrollArea.setBackgroundRole(QtGui.QPalette.Dark)
self.setCentralWidget(self.scrollArea)
self.setWindowTitle("Image Viewer")
self.image = ''
def set_image(self, file_path):
self.image = file_path
pixmap = QtGui.QPixmap(file_path).scaled(self.STANDARD_WIDTH, self.STANDARD_HEIGHT)
self.imageLabel.setPixmap(pixmap)
self.scrollArea.setWidget(self.imageLabel)
self.resize(pixmap.width(), pixmap.height())
| [
"hieu.dm161505@sis.hust.edu.vn"
] | hieu.dm161505@sis.hust.edu.vn |
1a9fa3e8dcf8c60490f47495a2566b6a1f32a92a | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_009/ch90_2019_10_02_18_22_03_037134.py | fcae0603fe15fc773b6d8deebd33737ee6754ef6 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | def segundos_entre(x,y):
t1 = datetime.strptime(x, "%H:%M:%S")
t2 = datetime.strptime(y, "%H:%M:%S")
t2 - t1
a = (t2 - t1).seconds
return f'A diferenรงa entre os horรกrios {x} e {y} รฉ: {a} segundos'
| [
"you@example.com"
] | you@example.com |
da4842483d6af9d811d81589b759060682fadc9f | f3cbbf2bdefc2fe55fd32157e7c4eef7721b0ab5 | /demo/venv/Scripts/easy_install-3.7-script.py | 868a706a8033bec2c0dd67dd8800bac3e47b6580 | [] | no_license | 2474942479/BigData | bcca2aef8b8a258470a87351f5ceba800174f50b | 6255fdf90c34a7a56e121cacdd8bd008aeab7185 | refs/heads/master | 2022-12-05T01:12:20.017030 | 2020-07-30T02:33:32 | 2020-07-30T02:33:32 | 283,649,482 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | #!E:\Pycharm\projects\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"2474942479@qq.com"
] | 2474942479@qq.com |
0ac97ca2e94f840dc49c2017c053199926019612 | b522898c49f858cf7367c85effa862504a18c9d2 | /chapter3/section5/click_edit.py | bf6481aeed47eed2e8f3f440a4c5b7247156ec04 | [] | no_license | kingleoric2010/python_for_linux_system_administration | 42b9a48fc843e09f1db691f7a362e8cb0aa5df36 | 66fc0c198b9c2372cb651883a56abf8164325734 | refs/heads/master | 2021-06-26T06:07:01.454146 | 2017-09-14T12:39:29 | 2017-09-14T12:39:29 | 103,648,255 | 1 | 0 | null | 2017-09-15T11:10:36 | 2017-09-15T11:10:36 | null | UTF-8 | Python | false | false | 98 | py | from __future__ import print_function
import click
message = click.edit()
print(message, end="")
| [
"me@mingxinglai.com"
] | me@mingxinglai.com |
0880800ff1d16a0cdd536c87b0f5d15ae629547d | aac761dd8c8497daf03abc05bc745273fca77625 | /presence/migrations/0001_initial.py | aa2464b5234fc0d3db6848b21c7a75e77b550815 | [] | no_license | zeke1806/gestion_presence | 414a37e96e41ee516b8d353ccd87ea1c15cb7200 | d6d8af545040245c59e897abd40f93101c35674f | refs/heads/master | 2022-12-04T18:40:53.895297 | 2020-02-06T23:24:20 | 2020-02-06T23:24:20 | 227,303,390 | 0 | 0 | null | 2022-11-22T04:53:45 | 2019-12-11T07:29:37 | Python | UTF-8 | Python | false | false | 4,209 | py | # Generated by Django 3.0 on 2019-12-11 09:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Appartenir',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('numero', models.IntegerField()),
],
),
migrations.CreateModel(
name='Categorie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nom_categorie', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Etudiant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('num_matricule', models.IntegerField()),
('niveau', models.CharField(max_length=255)),
('parcours', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Individu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nom', models.CharField(max_length=255)),
('prenom', models.CharField(max_length=255)),
('cin', models.CharField(blank=True, max_length=255)),
('faceId', models.ImageField(upload_to='photos/')),
],
),
migrations.CreateModel(
name='Matiere',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nom_matiere', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Responsable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code_responsable', models.CharField(max_length=255)),
('individu', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='presence.Individu')),
],
),
migrations.CreateModel(
name='GroupeParticipant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nom_groupe_participant', models.CharField(max_length=255)),
('membres', models.ManyToManyField(related_name='_groupeparticipant_membres_+', through='presence.Appartenir', to='presence.Etudiant')),
],
),
migrations.CreateModel(
name='Evenement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_debut', models.DateTimeField()),
('date_fin', models.DateTimeField()),
('categorie', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='presence.Categorie')),
('matiere', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='presence.Matiere')),
('presences', models.ManyToManyField(related_name='evenements', to='presence.Etudiant')),
],
),
migrations.AddField(
model_name='etudiant',
name='individu',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='presence.Individu'),
),
migrations.AddField(
model_name='appartenir',
name='etudiant',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='presence.Etudiant'),
),
migrations.AddField(
model_name='appartenir',
name='groupe',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='presence.GroupeParticipant'),
),
]
| [
"joeloliviersidy@gmail.com"
] | joeloliviersidy@gmail.com |
a05c37f79374dcbb5c055e9eea2fd7f52863f0d4 | 51f3af9642cc13150bbdde47d255eb8daf51fe71 | /lesson3/task2.py | 86b815b6c6ba1ebe8f227c6cc3c4c5fdaec87144 | [] | no_license | Lazemir/Polyanskiy_Artyom_920 | 703479d048ccb1a0171fe64ea5c92c90f2f12a33 | 65878afcae46640ddcd1603b53bc1995cb608eb6 | refs/heads/master | 2020-07-30T10:09:19.147036 | 2019-11-06T05:54:05 | 2019-11-06T05:54:05 | 210,186,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,553 | py | from graph import *
import random
import math
width, height = windowSize()
def half_ellipse(xc, yc, rx, ry, fi: int):
pos = []
fi *= math.pi / 180
for i in range(181):
x = round(rx * math.cos(i * math.pi / 180))
y = round(ry * math.sin(i * math.pi / 180))
x0 = x * math.cos(fi) - y * math.sin(fi)
y0 = x * math.sin(fi) + y * math.cos(fi)
pos.append((xc + x0, yc + y0))
polygon(pos)
#background
penColor('#fed5a2')
brushColor('#fed5a2')
rectangle(0, 0, width, height // 6)
penColor('#fed5c4')
brushColor('#fed5c4')
rectangle(0, height // 6, width, height // 3)
penColor('#fed594')
brushColor('#fed594')
rectangle(0, height // 3, width, height // 2)
penColor('#b38694')
brushColor('#b38694')
rectangle(0, height // 2, width, height)
#sun
penColor('#fcee21')
brushColor('#fcee21')
circle(width // 2, height // 6, 50)
#mountains
N = 20
penColor('#fc9831')
brushColor(penColor())
pos1 = [(0, height // 3)]
for i in range(N + 1):
pos1.append((i * width // N, height // 3 - i * (height // 3 - height // 4) // N - random.randint(0, 100) ))
pos1.append((width, height // 4))
polygon(pos1)
penColor('#ac4334')
brushColor(penColor())
pos2 = [(0, height // 2)]
for i in range(N + 1):
pos2.append((i * width // N, height // 2 - random.randint(0, 100)))
pos2.append((width, height // 2))
polygon(pos2)
for i in range(random.randint(1, 3)):
n = random.randint(2, 7)
j = random.randint(1, n - 1)
half_ellipse(j * width // n, height // 2, 50, 100, 180)
run()
| [
"lazemir@yandex.ru"
] | lazemir@yandex.ru |
5d61d4cc761935504ca40d2a917658d404c405d7 | cee6549aea7069adea1518666228392a92e704b2 | /accounts/models.py | f80e6854f221d04ce0ca716661b80234daf527cd | [] | no_license | lamyar96/grad | 9119e122d409f7942f2c6c773973b0f9dc073190 | 4a8ad0afedfd9a13588c55ef9d22e16d134cbea2 | refs/heads/master | 2021-01-12T14:33:38.620012 | 2016-12-12T17:11:31 | 2016-12-12T17:11:31 | 72,015,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from userena.models import UserenaBaseProfile
class Profile(UserenaBaseProfile):
user = models.OneToOneField(User, unique=True)
first_name = models.CharField(max_length=50)
middle_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
#email = models.EmailField(max_length=70, unique=True) already in django
mobile = models.IntegerField(max_length=50)
major= models.CharField(max_length=50)
interests = models.TextField()
cv= models.FileField(upload_to='CVs')
| [
"Lamya@lamyas-MacBook-Pro.local"
] | Lamya@lamyas-MacBook-Pro.local |
6796233cc8e0d68532199b60208872e887b79dbe | 8af6f0195e94908482ca7236bcd2eae382605fa7 | /python3code/chapter03/fibs.py | 82488642ecd1ea7d7ff1edce7bf88be46820530f | [] | no_license | MeatStack/StarterLearningPython | 4a1e0fc94c4615022ba9ff41455c4e67bd16a5bd | 98f0a9028f40db189cf2636a5e0c3abbcd86f71d | refs/heads/master | 2020-03-23T16:21:02.884442 | 2018-07-21T11:24:11 | 2018-07-21T11:24:11 | 141,805,470 | 1 | 0 | null | 2018-07-21T11:15:42 | 2018-07-21T11:15:42 | null | UTF-8 | Python | false | false | 191 | py | # coding=utf-8
'''
filename: fibs.py
'''
def fibs(n):
result = [0,1]
for i in range(n-2):
result.append(result[-2] + result[-1])
return result
lst = fibs(10)
print(lst)
| [
"qiwsir@gmail.com"
] | qiwsir@gmail.com |
e2141bfbe1940d48e60d545306ad35b1aa55f3e8 | 60f3c767c9f1a700c9e67dac606b8ee3bc46450d | /example.py | bb8e0450c336caa9837456280eb09470e3379615 | [] | no_license | codesharedot/Quadratic-Line-Chart-Sandra | 57b999e12d7ae20b3f907697b2f739c64a45db11 | 9e4eae6d10fc4001464a80de7c7cf5c4e2d6b115 | refs/heads/master | 2020-07-26T12:24:34.892400 | 2019-09-15T19:04:04 | 2019-09-15T19:04:04 | 208,642,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(-1, 1, 50)
y = 9*x*x
plt.plot(x, y,'c-',linewidth=10)
plt.savefig('chart.png') | [
"codeto@sent.com"
] | codeto@sent.com |
5844507275c3bd7504ede191ad80d897ac4386c6 | 5cd042c36162b3a5230c95de21d0976f0b47e6c4 | /quizapp/quiz/migrations/0001_initial.py | f5ecaed19b7e4bcc42bd1e206149444083505591 | [] | no_license | pankajsp25/quiz | ff6af257943b0b38d2b80f4ab129718f1b3a0b1f | 59c04c2d2410bb400f2db0a1cbc85889b3c8098f | refs/heads/master | 2022-12-16T00:41:46.678501 | 2020-09-17T13:10:53 | 2020-09-17T13:10:53 | 296,296,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,019 | py | # Generated by Django 3.1.1 on 2020-09-17 11:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Option',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=500)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.CharField(max_length=500)),
('correct_ans', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions', to='quiz.option')),
('options', models.ManyToManyField(to='quiz.Option')),
],
),
]
| [
"pankaj.gupta@ingrammicro.com"
] | pankaj.gupta@ingrammicro.com |
20fddd6c18c901b9d7e1b3372851a7f4531f018d | c741f04141784a2571d2d27d95e0d994e4584ab1 | /learning/ask/1/post.py | ab38ef683317efbfc0792dcf7434195069199bd4 | [] | no_license | haodonghui/python | bbdece136620bc6f787b4942d6e1760ed808afd4 | 365062ba54297c81093b7f378742e76d438658b7 | refs/heads/master | 2022-02-03T23:52:37.288503 | 2022-01-27T05:23:25 | 2022-01-27T05:23:25 | 191,729,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,746 | py | """
ไผ ้่กจๅ
้ๅธธ๏ผไฝ ๆณ่ฆๅ้ไธไบ็ผ็ ไธบ่กจๅๅฝขๅผ็ๆฐๆฎโ้ๅธธๅไธไธชHTML่กจๅใ ่ฆๅฎ็ฐ่ฟไธช๏ผๅช้็ฎๅๅฐไผ ้ไธไธชๅญๅ
ธ็ป data ๅๆฐใ
ไฝ ็ๆฐๆฎๅญๅ
ธ ๅจๅๅบ่ฏทๆฑๆถไผ่ชๅจ็ผ็ ไธบ่กจๅๅฝขๅผ:
"""
import requests
# ๆต่ฏๅๅ
# domain_name='https://test-api.yestae.com/api'
# ๅผๅๅๅ
# domain_name='http://hdh.tae-tea.net/yestae-community-api'
domain_name = 'http://localhost/yestae-community-api'
# ่ทๅๆดปๅจ่ฏฆๆ
url = domain_name + '/api/TP0001'
# payload = {'activityId': '5ce384680b76d7812af1bab4', 'sign': '955e2772300ca1f8d614e13d8438538d',
# 'location': '{"lon":116.353408,"lat":40.083555}', 'uid': '', 'sid': '', }
payload = {'activityId': '5ce3b39222ec00c3f8b779c2', 'sign': '5482ca4ab93ae7299d684ac8abe8aec0',
'location': '{"lon":116.353408,"lat":40.083555}', 'uid': '', 'sid': '', }
r = requests.post(url, data=payload)
print(r.status_code)
print(r.json())
# ๆดปๅจๆฅๅ
# url = domain_name+'/api/TP0002'
#
# payload = {'uid': '1123058830953328642', 'activityId': '5ce384680b76d7812af1bab4', 'num': 1,
# 'sign': 'ca436df0f9bbb4a11fda587c88ee1c66', }
# r = requests.post(url, data=payload)
# print(r.status_code)
# print(r.json())
#
# # ่ทๅๆดปๅจๅ่กจ
# url = url = domain_name+'/api/TP0003'
#
# payload = {'uid': '1123058830953328642', 'sign': '83ff323b2efa5b042b876bc524f6175f', }
# r = requests.post(url, data=payload)
# print(r.status_code)
# print(r.json())
#
# # ่ทๅwxๆ้้ช่ฏ้
็ฝฎๅฑๆง
# url = url = domain_name+'/api/TP0004'
#
# payload = {'jsurl': 'http://localhost:8080', 'sign': '9fe22d86c2cb4b60a9ad5fcc35b04ca9', 'key3': None}
# r = requests.post(url, data=payload)
# print(r.status_code)
# print(r.json())
| [
"h_donghui@sian.cn"
] | h_donghui@sian.cn |
dcb8c24edec90330787d2c6983a98b1414b258e9 | efad409db922d4542c98aaef2d583fa683c3bd7c | /parsetab.py | 24cc938b407362aaace2e3543cc3ff6f9b1ec358 | [] | no_license | quano2/MATLAB-to-Python-Translator | a071445ccb6e00a76c107a12e334460ef84a5fbd | 6dec102bfd7c96b6116e75ddaad6db4845ccb993 | refs/heads/master | 2021-01-22T14:16:06.209450 | 2015-04-20T04:41:08 | 2015-04-20T04:41:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58,646 | py |
# parsetab.py
# This file is automatically generated. Do not edit.
_tabversion = '3.2'
_lr_method = 'LALR'
_lr_signature = b']u\xb9\xc3,\xbf\xe8\xa0?8K;\xae\xceW\x9d'
_lr_action_items = {'$end':([0,2,6,7,8,9,10,12,13,14,25,30,31,36,37,39,45,46,47,60,96,104,105,106,108,109,113,115,162,163,164,176,177,189,190,196,197,204,205,209,212,215,218,229,231,232,238,],[-1,-14,-54,-43,-12,-19,-20,-10,-9,-15,-13,-17,-11,-16,-18,-2,0,-42,-62,-34,-55,-33,-32,-52,-85,-41,-27,-30,-3,-6,-53,-8,-7,-4,-6,-40,-31,-35,-63,-5,-44,-66,-67,-71,-73,-45,-72,]),'DOTDIV':([5,11,15,16,17,18,22,23,26,28,29,32,38,42,56,57,61,62,63,76,80,91,92,98,101,103,111,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,151,153,155,157,159,161,169,178,179,181,183,186,187,188,203,207,217,],[-74,-75,-79,-82,-76,-81,-77,-86,-78,-87,64,-83,-80,-84,-75,64,64,-89,64,-101,-102,-88,64,-91,64,-95,64,-113,64,64,-111,64,-114,64,64,64,64,64,64,64,-104,64,64,-106,-112,-117,-115,64,64,64,-123,-109,-75,-92,-93,-98,-100,-96,64,-103,-105,64,64,-94,-99,-97,64,64,64,]),'STRING':([0,2,3,6,7,8,9,10,11,12,13,14,15,21,24,25,27,30,31,33,34,36,37,39,40,41,46,47,48,49,50,51,52,53,54,55,60,64,65,66,67,68,69,70,71,72,73,74,75,77,78,79,81,82,83,84,85,86,87,88,89,90,94,96,104,105,106,108,109,110,112,113,115,116,117,118,119,120,149,150,152,154,156,158,160,164,166,168,171,173,174,180,182,191,196,197,199,200,202,204,205,206,208,212,215,218,219,220,221,223,226,229,230,231,232,236,238,],[23,-14,23,-54,-43,-12,-19,-20,54,-10,-9,-15,23,23,23,-13,23,-17,-11,23,23,-16,-18,23,23,23,-42,-62,23,23,-22,-24,-25,-23,-21,54,-34,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,-55,-33,-32,23,-85,-41,23,-26,-27,-30,23,23,23,-64,-65,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,-40,-31,23,23,23,-35,-63,23,23,-44,-66,-67,23,23,23,23,23,-71,23,-73,-45,23,-72,]),'TRY':([0,2,3,6,7,8,9,10,12,13,14,25,30,31,36,37,39,46,47,48,60,96,104,105,106,108,109,110,113,115,117,118,119,120,149,164,168,171,173,180,196,197,199,202,204,205,206,208,212,215,218,219,220,221,223,226,229,230,231,232,236,238,],[3,-14,3,-54,-43,-12,-19,-20,-10,-9,-15,-13,-17,-11,-16,-18,3,-42,-62,3,-34,-55,-33,-32,3,-85,-41,3,-27,-30,3,3,-64,-65,3,3,3,3,3,3,-40,-31,3,3,-35,-63,3,3,-44,-66,-67,3,3,3,3,3,-71,3,-73,-45,3,-72,]),'DIV':([5,11,15,16,17,18,22,23,26,28,29,32,38,42,56,57,61,62,63,76,80,91,92,98,101,103,111,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,151,153,155,157,159,161,169,178,179,181,183,186,187,188,203,207,217,],[-74,-75,-79,-82,-76,-81,-77,-86,-78,-87,67,-83,-80,-84,-75,67,67,-89,67,-101,-102,-88,67,-91,67,-95,67,-113,67,67,-111,67,-114,67,67,67,67,67,67,67,-104,67,67,-106,-112,-117,-115,67,67,67,-123,-109,-75,-92,-93,-98,-100,-96,67,-103,-105,67,67,-94,-99,-97,67,67,67,]),'END':([2,5,6,7,8,9,10,12,13,14,15,16,17,18,22,23,25,26,28,30,31,32,36,37,38,42,46,47,48,56,57,60,62,63,76,80,91,96,98,103,104,105,106,108,109,113,115,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,153,155,157,159,161,164,168,170,171,172,175,176,177,178,179,180,186,187,188,196,197,198,199,201,202,204,205,212,215,216,218,219,220,221,223,226,227,228,229,231,232,233,234,235,236,237,238,],[-14,-74,-54,-43,-12,-19,-20,-10,-9,-15,-79,-82,-76,-81,-77,-86,-13,-78,-87,-17,-11,-83,-16,-18,-80,-84,-42,-62,108,-75,-90,-34,-89,-6,-101,-102,-88,-55,-91,-95,-33,-32,108,-85,-41,-27,-30,-52,-52,-64,-65,-36,-113,-127,-128,-111,-110,-114,-122,-129,-121,-125,-107,-108,-118,-104,-124,-126,-106,-112,-117,-115,-120,-119,-116,-123,-109,-92,-93,-98,-100,-96,108,108,-68,-53,-68,108,-8,-7,-103,-105,108,-94,-99,-97,-40,-31,108,-52,108,-39,-35,-63,-44,-66,-69,-67,-52,-52,108,108,-52,-36,-36,-71,-73,-45,-68,-37,-38,108,-70,-72,]),'NUMBER':([0,2,3,6,7,8,9,10,11,12,13,14,15,21,24,25,27,30,31,33,34,36,37,39,40,41,46,47,48,49,50,51,52,53,54,55,60,64,65,66,67,68,69,70,71,72,73,74,75,77,78,79,81,82,83,84,85,86,87,88,89,90,94,96,104,105,106,108,109,110,112,113,115,116,117,118,119,120,149,150,152,154,156,158,160,164,166,168,171,173,174,180,182,191,196,197,199,200,202,204,205,206,208,212,215,218,219,220,221,223,226,229,230,231,232,236,238,],[5,-14,5,-54,-43,-12,-19,-20,50,-10,-9,-15,5,5,5,-13,5,-17,-11,5,5,-16,-18,5,5,5,-42,-62,5,5,-22,-24,-25,-23,-21,50,-34,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,-55,-33,-32,5,-85,-41,5,-26,-27,-30,5,5,5,-64,-65,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,-40,-31,5,5,5,-35,-63,5,5,-44,-66,-67,5,5,5,5,5,-71,5,-73,-45,5,-72,]),'DOTEXP':([5,11,15,16,17,18,22,23,26,28,29,32,38,42,56,57,61,62,63,76,80,91,92,98,101,103,111,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,151,153,155,157,159,161,169,178,179,181,183,186,187,188,203,207,217,],[-74,-75,-79,-82,-76,-81,-77,-86,-78,-87,69,-83,-80,-84,-75,69,69,69,69,-101,-102,69,69,-91,69,-95,69,69,69,69,69,69,69,69,69,69,69,69,69,69,-104,69,69,-106,-112,69,69,69,69,69,69,69,-75,-92,-93,-98,-100,-96,69,-103,-105,69,69,-94,-99,-97,69,69,69,]),'FUNCTION':([0,2,6,7,8,9,10,12,13,14,25,30,31,36,37,39,45,46,47,60,96,104,105,106,108,109,113,115,162,163,164,176,177,189,190,196,197,204,205,209,212,215,218,229,231,232,238,],[-1,-14,-54,-43,-12,-19,-20,-10,-9,-15,-13,-17,-11,-16,-18,-2,107,-42,-62,-34,-55,-33,-32,-52,-85,-41,-27,-30,-3,-6,-53,-8,-7,-4,-6,-40,-31,-35,-63,-5,-44,-66,-67,-71,-73,-45,-72,]),'MINUS':([0,2,3,5,6,7,8,9,10,11,12,13,14,15,16,17,18,21,22,23,24,25,26,27,28,29,30,31,32,33,34,36,37,38,39,40,41,42,46,47,48,49,56,57,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,94,96,98,101,103,104,105,106,108,109,110,111,113,115,116,117,118,119,120,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,164,166,168,169,171,173,174,178,179,180,181,182,183,186,187,188,191,196,197,199,200,202,203,204,205,206,207,208,212,215,217,218,219,220,221,223,226,229,230,231,232,236,238,],[33,-14,33,-74,-54,-43,-12,-19,-20,-75,-10,-9,-15,33,-82,-76,-81,33,-77,-86,33,-13,-78,33,-87,70,-17,-11,-83,33,33,-16,-18,-80,33,33,33,-84,-42,-62,33,33,-75,70,-34,70,-89,70,33,33,33,33,33,33,33,33,33,33,33,33,-101,33,33,33,-102,33,33,33,33,33,33,33,33,33,33,-88,70,33,-55,-91,70,-95,-33,-32,33,-85,-41,33,70,-27,-30,33,33,33,-64,-65,-113,-127,70,-111,70,-114,-122,70,70,70,70,70,70,-104,70,70,-106,-112,-117,-115,70,70,70,-123,-109,33,33,-75,33,-92,33,-93,33,-98,33,-100,33,-96,33,33,33,70,33,33,33,-103,-105,33,70,33,70,-94,-99,-97,33,-40,-31,33,33,33,70,-35,-63,33,70,33,-44,-66,70,-67,33,33,33,33,33,-71,33,-73,-45,33,-72,]),'COMMA':([0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,22,23,25,26,28,29,30,31,32,36,37,38,39,42,46,47,48,56,57,60,61,62,63,76,80,91,96,98,101,103,104,105,106,108,109,110,111,113,115,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,149,151,153,155,157,159,161,163,164,168,171,173,176,177,178,179,180,186,187,188,189,190,196,197,199,202,203,204,205,206,208,209,212,215,217,218,219,220,221,223,226,229,230,231,232,236,238,],[7,-14,7,49,-74,-54,-43,-12,-19,-20,-75,-10,-9,-15,-79,-82,-76,-81,-77,-86,-13,-78,-87,-60,-17,-11,-83,-16,-18,-80,7,-84,-42,-62,7,-75,-90,-34,119,-89,-6,-101,-102,-88,-55,-91,-60,-95,-33,-32,7,-85,-41,7,-61,-27,-30,7,7,-64,-65,176,-113,-127,-128,-111,-110,-114,-122,-129,-121,-125,-107,-108,-118,-104,-124,-126,-106,-112,-117,-115,-120,-119,-116,-123,-109,7,-75,-92,-93,-98,-100,-96,-6,7,7,7,7,-8,-7,-103,-105,7,-94,-99,-97,176,-6,-40,-31,7,7,119,-35,-63,7,7,176,-44,-66,119,-67,7,7,7,7,7,-71,7,-73,-45,7,-72,]),'OREQUALS':([5,11,15,16,17,18,22,23,26,28,29,32,38,42,56,57,61,62,63,76,80,91,92,98,101,103,111,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,151,153,155,157,159,161,169,178,179,181,183,186,187,188,203,207,217,],[-74,-75,-79,-82,-76,-81,-77,-86,-78,-87,71,-83,-80,-84,-75,-90,71,-89,71,-101,-102,-88,71,-91,71,-95,71,-113,-127,71,-111,-110,-114,-122,71,-121,-125,-107,-108,-118,-104,-124,-126,-106,-112,-117,-115,-120,-119,-116,-123,-109,-75,-92,-93,-98,-100,-96,71,-103,-105,71,71,-94,-99,-97,71,71,71,]),'LESSTHAN':([5,11,15,16,17,18,22,23,26,28,29,32,38,42,56,57,61,62,63,76,80,91,92,98,101,103,111,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,151,153,155,157,159,161,169,178,179,181,183,186,187,188,203,207,217,],[-74,-75,-79,-82,-76,-81,-77,-86,-78,-87,72,-83,-80,-84,-75,-90,72,-89,72,-101,-102,-88,72,-91,72,-95,72,-113,-127,72,-111,72,-114,-122,72,-121,-125,-107,72,-118,-104,-124,72,-106,-112,-117,-115,-120,-119,-116,-123,-109,-75,-92,-93,-98,-100,-96,72,-103,-105,72,72,-94,-99,-97,72,72,72,]),'OR':([5,11,15,16,17,18,22,23,26,28,29,32,38,42,56,57,61,62,63,76,80,91,92,98,101,103,111,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,151,153,155,157,159,161,169,178,179,181,183,186,187,188,203,207,217,],[-74,-75,-79,-82,-76,-81,-77,-86,-78,-87,73,-83,-80,-84,-75,73,73,-89,73,-101,-102,-88,73,-91,73,-95,73,-113,-127,73,-111,73,-114,-122,73,73,-125,-107,73,73,-104,73,73,-106,-112,-117,-115,73,73,73,-123,-109,-75,-92,-93,-98,-100,-96,73,-103,-105,73,73,-94,-99,-97,73,73,73,]),'CASE':([2,5,6,7,8,9,10,12,13,14,15,16,17,18,22,23,25,26,28,30,31,32,36,37,38,42,46,47,56,57,60,62,63,76,80,91,96,98,103,104,105,108,109,113,115,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,153,155,157,159,161,171,176,177,178,179,186,187,188,196,197,204,205,215,218,219,220,227,228,229,231,238,],[-14,-74,-54,-43,-12,-19,-20,-10,-9,-15,-79,-82,-76,-81,-77,-86,-13,-78,-87,-17,-11,-83,-16,-18,-80,-84,-42,-62,-75,-90,-34,-89,-6,-101,-102,-88,-55,-91,-95,-33,-32,-85,-41,-27,-30,-64,-65,174,-113,-127,-128,-111,-110,-114,-122,-129,-121,-125,-107,-108,-118,-104,-124,-126,-106,-112,-117,-115,-120,-119,-116,-123,-109,-92,-93,-98,-100,-96,-53,-8,-7,-103,-105,-94,-99,-97,-40,-31,-35,-63,-66,-67,-52,-52,174,174,-71,-73,-72,]),'AND':([5,11,15,16,17,18,22,23,26,28,29,32,38,42,56,57,61,62,63,76,80,91,92,98,101,103,111,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,151,153,155,157,159,161,169,178,179,181,183,186,187,188,203,207,217,],[-74,-75,-79,-82,-76,-81,-77,-86,-78,-87,74,-83,-80,-84,-75,74,74,-89,74,-101,-102,-88,74,-91,74,-95,74,-113,-127,74,-111,74,-114,-122,74,74,-125,-107,74,74,-104,74,74,-106,-112,-117,-115,74,74,74,-123,-109,-75,-92,-93,-98,-100,-96,74,-103,-105,74,74,-94,-99,-97,74,74,74,]),'error':([5,15,16,17,18,22,23,26,28,32,38,42,56,57,61,62,76,80,91,98,103,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,153,155,157,159,161,178,179,186,187,188,203,],[-74,-79,-82,-76,-81,-77,-86,-78,-87,-83,-80,-84,-75,-90,118,-89,-101,-102,-88,-91,-95,-113,-127,-128,-111,-110,-114,-122,-129,-121,-125,-107,-108,-118,-104,-124,-126,-106,-112,-117,-115,-120,-119,-116,-123,-109,-92,-93,-98,-100,-96,-103,-105,-94,-99,-97,220,]),'SWITCH':([0,2,3,6,7,8,9,10,12,13,14,25,30,31,36,37,39,46,47,48,60,96,104,105,106,108,109,110,113,115,117,118,119,120,149,164,168,171,173,180,196,197,199,202,204,205,206,208,212,215,218,219,220,221,223,226,229,230,231,232,236,238,],[27,-14,27,-54,-43,-12,-19,-20,-10,-9,-15,-13,-17,-11,-16,-18,27,-42,-62,27,-34,-55,-33,-32,27,-85,-41,27,-27,-30,27,27,-64,-65,27,27,27,27,27,27,-40,-31,27,27,-35,-63,27,27,-44,-66,-67,27,27,27,27,27,-71,27,-73,-45,27,-72,]),'ELSE':([2,6,7,8,9,10,12,13,14,25,30,31,36,37,46,47,60,96,104,105,108,109,113,115,117,118,119,120,170,171,172,196,197,204,205,215,218,226,229,231,233,238,],[-14,-54,-43,-12,-19,-20,-10,-9,-15,-13,-17,-11,-16,-18,-42,-62,-34,-55,-33,-32,-85,-41,-27,-30,-52,-52,-64,-65,199,-53,199,-40,-31,-35,-63,-66,-67,-52,-71,-73,199,-72,]),'GREATEREQUAL':([5,11,15,16,17,18,22,23,26,28,29,32,38,42,56,57,61,62,63,76,80,91,92,98,101,103,111,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,151,153,155,157,159,161,169,178,179,181,183,186,187,188,203,207,217,],[-74,-75,-79,-82,-76,-81,-77,-86,-78,-87,77,-83,-80,-84,-75,-90,77,-89,77,-101,-102,-88,77,-91,77,-95,77,-113,-127,77,-111,77,-114,-122,77,-121,-125,-107,77,-118,-104,-124,77,-106,-112,-117,-115,-120,-119,-116,-123,-109,-75,-92,-93,-98,-100,-96,77,-103,-105,77,77,-94,-99,-97,77,77,77,]),'IDENTIFIER':([0,2,3,6,7,8,9,10,11,12,13,14,15,19,21,24,25,27,30,31,33,34,35,36,37,39,40,41,46,47,48,49,50,51,52,53,54,55,58,59,60,64,65,66,67,68,69,70,71,72,73,74,75,77,78,79,81,82,83,84,85,86,87,88,89,90,94,96,104,105,106,107,108,109,110,112,113,114,115,116,117,118,119,120,149,150,152,154,156,158,160,164,166,168,171,173,174,180,182,191,195,196,197,199,200,202,204,205,206,208,212,215,218,219,220,221,223,226,229,230,231,232,236,238,],[11,-14,11,-54,-43,-12,-19,-20,53,-10,-9,-15,56,59,56,56,-13,56,-17,-11,56,56,93,-16,-18,11,56,56,-42,-62,11,56,-22,-24,-25,-23,-21,53,114,-28,-34,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,151,-55,-33,-32,11,165,-85,-41,11,-26,-27,-29,-30,56,11,11,-64,-65,11,56,56,56,56,56,56,11,56,11,11,11,56,11,56,56,214,-40,-31,11,56,11,-35,-63,11,11,-44,-66,-67,11,11,11,11,11,-71,11,-73,-45,11,-72,]),'EQUALEQUAL':([5,11,15,16,17,18,22,23,26,28,29,32,38,42,56,57,61,62,63,76,80,91,92,98,101,103,111,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,151,153,155,157,159,161,169,178,179,181,183,186,187,188,203,207,217,],[-74,-75,-79,-82,-76,-81,-77,-86,-78,-87,88,-83,-80,-84,-75,-90,88,-89,88,-101,-102,-88,88,-91,88,-95,88,-113,-127,88,-111,88,-114,-122,88,-121,-125,-107,88,-118,-104,-124,88,-106,-112,-117,-115,-120,-119,-116,-123,-109,-75,-92,-93,-98,-100,-96,88,-103,-105,88,88,-94,-99,-97,88,88,88,]),'NOTEQUAL':([0,2,3,5,6,7,8,9,10,11,12,13,14,15,16,17,18,21,22,23,24,25,26,27,28,29,30,31,32,33,34,36,37,38,39,40,41,42,46,47,48,49,56,57,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,94,96,98,101,103,104,105,106,108,109,110,111,113,115,116,117,118,119,120,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,164,166,168,169,171,173,174,178,179,180,181,182,183,186,187,188,191,196,197,199,200,202,203,204,205,206,207,208,212,215,217,218,219,220,221,223,226,229,230,231,232,236,238,],[15,-14,15,-74,-54,-43,-12,-19,-20,-75,-10,-9,-15,-79,-82,-76,-81,15,-77,-86,15,-13,-78,15,-87,79,-17,-11,-83,15,15,-16,-18,-80,15,15,15,-84,-42,-62,15,15,-75,-90,-34,79,-89,79,15,15,15,15,15,15,15,15,15,15,15,15,-101,15,15,15,-102,15,15,15,15,15,15,15,15,15,15,-88,79,15,-55,-91,79,-95,-33,-32,15,-85,-41,15,79,-27,-30,15,15,15,-64,-65,-113,-127,79,-111,79,-114,-122,79,-121,-125,-107,79,-118,-104,-124,79,-106,-112,-117,-115,-120,-119,-116,-123,-109,15,15,-75,15,-92,15,-93,15,-98,15,-100,15,-96,15,15,15,79,15,15,15,-103,-105,15,79,15,79,-94,-99,-97,15,-40,-31,15,15,15,79,-35,-63,15,79,15,-44,-66,79,-67,15,15,15,15,15,-71,15,-73,-45,15,-72,]),'DOT':([5,11,15,16,17,18,22,23,26,28,29,32,38,42,56,57,61,62,63,76,80,91,92,98,101,103,111,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,151,153,155,157,159,161,169,178,179,181,183,186,187,188,203,207,217,],[-74,-75,-79,-82,-76,-81,-77,-86,-78,-87,83,-83,-80,-84,-75,83,83,83,83,-101,-102,83,83,-91,83,-95,83,83,83,83,83,83,83,83,83,83,83,83,83,83,-104,83,83,-106,-112,83,83,83,83,83,83,83,-75,-92,-93,-98,-100,-96,83,-103,-105,83,83,-94,-99,-97,83,83,83,]),'EQUALS':([5,11,15,16,17,18,22,23,26,28,29,32,38,42,56,57,59,61,62,63,76,80,91,92,93,95,98,101,103,111,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,151,153,155,157,159,161,165,167,169,178,179,181,183,186,187,188,194,203,207,213,217,],[-74,-75,-79,-82,-76,-81,-77,-86,-78,-87,66,-83,-80,-84,-75,-90,116,66,-89,66,-101,-102,-88,66,150,152,-91,66,-95,66,-113,-127,66,-111,-110,-114,-122,66,-121,-125,-107,-108,-118,-104,-124,-126,-106,-112,-117,-115,-120,-119,-116,-123,-109,-75,-92,-93,-98,-100,-96,-49,195,66,-103,-105,66,66,-94,-99,-97,-50,66,66,-51,66,]),'GLOBAL':([0,2,3,6,7,8,9,10,11,12,13,14,25,30,31,36,37,39,46,47,48,50,51,52,53,54,55,60,96,104,105,106,108,109,110,112,113,115,117,118,119,120,149,164,168,171,173,180,196,197,199,202,204,205,206,208,212,215,218,219,220,221,223,226,229,230,231,232,236,238,],[19,-14,19,-54,-43,-12,-19,-20,51,-10,-9,-15,-13,-17,-11,-16,-18,19,-42,-62,19,-22,-24,-25,-23,-21,51,-34,-55,-33,-32,19,-85,-41,19,-26,-27,-30,19,19,-64,-65,19,19,19,19,19,19,-40,-31,19,19,-35,-63,19,19,-44,-66,-67,19,19,19,19,19,-71,19,-73,-45,19,-72,]),'BREAK':([0,2,3,6,7,8,9,10,12,13,14,25,30,31,36,37,39,46,47,48,60,96,104,105,106,108,109,110,113,115,117,118,119,120,149,164,168,171,173,180,196,197,199,202,204,205,206,208,212,215,218,219,220,221,223,226,229,230,231,232,236,238,],[20,-14,20,-54,-43,-12,-19,-20,-10,-9,-15,-13,-17,-11,-16,-18,20,-42,-62,20,-34,-55,-33,-32,20,-85,-41,20,-27,-30,20,20,-64,-65,20,20,20,20,20,20,-40,-31,20,20,-35,-63,20,20,-44,-66,-67,20,20,20,20,20,-71,20,-73,-45,20,-72,]),'IF':([0,2,3,6,7,8,9,10,12,13,14,25,30,31,36,37,39,46,47,48,60,96,104,105,106,108,109,110,113,115,117,118,119,120,149,164,168,171,173,180,196,197,199,202,204,205,206,208,212,215,218,219,220,221,223,226,229,230,231,232,236,238,],[21,-14,21,-54,-43,-12,-19,-20,-10,-9,-15,-13,-17,-11,-16,-18,21,-42,-62,21,-34,-55,-33,-32,21,-85,-41,21,-27,-30,21,21,-64,-65,21,21,21,21,21,21,-40,-31,21,21,-35,-63,21,21,-44,-66,-67,21,21,21,21,21,-71,21,-73,-45,21,-72,]),'TIMES':([5,11,15,16,17,18,22,23,26,28,29,32,38,42,56,57,61,62,63,76,80,91,92,98,101,103,111,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,151,153,155,157,159,161,169,178,179,181,183,186,187,188,203,207,217,],[-74,-75,-79,-82,-76,-81,-77,-86,-78,-87,89,-83,-80,-84,-75,89,89,-89,89,-101,-102,-88,89,-91,89,-95,89,-113,89,89,-111,89,-114,89,89,89,89,89,89,89,-104,89,89,-106,-112,-117,-115,89,89,89,-123,-109,-75,-92,-93,-98,-100,-96,89,-103,-105,89,89,-94,-99,-97,89,89,89,]),'NOT':([0,2,3,6,7,8,9,10,12,13,14,15,21,24,25,27,30,31,33,34,36,37,39,40,41,46,47,48,49,60,64,65,66,67,68,69,70,71,72,73,74,75,77,78,79,81,82,83,84,85,86,87,88,89,90,94,96,104,105,106,108,109,110,113,115,116,117,118,119,120,149,150,152,154,156,158,160,164,166,168,171,173,174,180,182,191,196,197,199,200,202,204,205,206,208,212,215,218,219,220,221,223,226,229,230,231,232,236,238,],[38,-14,38,-54,-43,-12,-19,-20,-10,-9,-15,38,38,38,-13,38,-17,-11,38,38,-16,-18,38,38,38,-42,-62,38,38,-34,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,-55,-33,-32,38,-85,-41,38,-27,-30,38,38,38,-64,-65,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,-40,-31,38,38,38,-35,-63,38,38,-44,-66,-67,38,38,38,38,38,-71,38,-73,-45,38,-72,]),'PLUS':([0,2,3,5,6,7,8,9,10,11,12,13,14,15,16,17,18,21,22,23,24,25,26,27,28,29,30,31,32,33,34,36,37,38,39,40,41,42,46,47,48,49,56,57,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,94,96,98,101,103,104,105,106,108,109,110,111,113,115,116,117,118,119,120,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,164,166,168,169,171,173,174,178,179,180,181,182,183,186,187,188,191,196,197,199,200,202,203,204,205,206,207,208,212,215,217,218,219,220,221,223,226,229,230,231,232,236,238,],[24,-14,24,-74,-54,-43,-12,-19,-20,-75,-10,-9,-15,24,-82,-76,-81,24,-77,-86,24,-13,-78,24,-87,65,-17,-11,-83,24,24,-16,-18,-80,24,24,24,-84,-42,-62,24,24,-75,65,-34,65,-89,65,24,24,24,24,24,24,24,24,24,24,24,24,-101,24,24,24,-102,24,24,24,24,24,24,24,24,24,24,-88,65,24,-55,-91,65,-95,-33,-32,24,-85,-41,24,65,-27,-30,24,24,24,-64,-65,-113,-127,65,-111,65,-114,-122,65,65,65,65,65,65,-104,65,65,-106,-112,-117,-115,65,65,65,-123,-109,24,24,-75,24,-92,24,-93,24,-98,24,-100,24,-96,24,24,24,65,24,24,24,-103,-105,24,65,24,65,-94,-99,-97,24,-40,-31,24,24,24,65,-35,-63,24,65,24,-44,-66,65,-67,24,24,24,24,24,-71,24,-73,-45,24,-72,]),'EXP':([5,11,15,16,17,18,22,23,26,28,29,32,38,42,56,57,61,62,63,76,80,91,92,98,101,103,111,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,151,153,155,157,159,161,169,178,179,181,183,186,187,188,203,207,217,],[-74,-75,-79,-82,-76,-81,-77,-86,-78,-87,84,-83,-80,-84,-75,84,84,84,84,-101,-102,84,84,-91,84,-95,84,84,84,84,84,84,84,84,84,84,84,84,84,84,-104,84,84,-106,-112,84,84,84,84,84,84,84,-75,-92,-93,-98,-100,-96,84,-103,-105,84,84,-94,-99,-97,84,84,84,]),'COLON':([0,2,3,5,6,7,8,9,10,11,12,13,14,15,16,17,18,21,22,23,24,25,26,27,28,29,30,31,32,33,34,36,37,38,39,40,41,42,46,47,48,49,56,57,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,94,96,98,101,103,104,105,106,108,109,110,111,113,115,116,117,118,119,120,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,164,166,168,169,171,173,174,178,179,180,181,182,183,186,187,188,191,196,197,199,200,202,203,204,205,206,207,208,212,215,217,218,219,220,221,223,226,229,230,231,232,236,238,],[28,-14,28,-74,-54,-43,-12,-19,-20,-75,-10,-9,-15,-79,-82,-76,-81,28,-77,-86,28,-13,-78,28,-87,68,-17,-11,-83,28,28,-16,-18,-80,28,28,28,-84,-42,-62,28,28,-75,-90,-34,68,-89,68,28,28,28,28,28,28,28,28,28,28,28,28,-101,28,28,28,-102,28,28,28,28,28,28,28,28,28,28,-88,68,28,-55,-91,68,-95,-33,-32,28,-85,-41,28,68,-27,-30,28,28,28,-64,-65,-113,-127,68,-111,-110,-114,-122,68,-121,-125,-107,-108,-118,-104,-124,-126,-106,-112,-117,-115,-120,-119,-116,-123,-109,28,28,-75,28,-92,28,-93,28,-98,28,-100,28,-96,28,28,28,68,28,28,28,-103,-105,28,68,28,68,-94,-99,-97,28,-40,-31,28,28,28,68,-35,-63,28,68,28,-44,-66,68,-67,28,28,28,28,28,-71,28,-73,-45,28,-72,]),'RBRACE':([4,5,15,16,17,18,22,23,26,28,29,32,38,40,42,49,56,57,62,76,78,80,91,97,98,99,103,111,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,140,141,142,143,144,145,146,147,148,153,155,156,157,159,161,178,179,184,185,186,187,188,],[-58,-74,-79,-82,-76,-81,-77,-86,-78,-87,-60,-83,-80,98,-84,-59,-75,-90,-89,-101,136,-102,-88,153,-91,155,-95,-61,-113,-127,-128,-111,-110,-114,-122,-129,-121,-125,-107,-108,-118,178,-104,-124,-126,-106,-112,-117,-115,-120,-119,-116,-123,-109,-92,-93,186,-98,-100,-96,-103,-105,-56,-57,-94,-99,-97,]),'ELSEIF':([2,6,7,8,9,10,12,13,14,25,30,31,36,37,46,47,60,96,104,105,108,109,113,115,117,118,119,120,170,171,172,196,197,204,205,215,218,226,229,231,233,238,],[-14,-54,-43,-12,-19,-20,-10,-9,-15,-13,-17,-11,-16,-18,-42,-62,-34,-55,-33,-32,-85,-41,-27,-30,-52,-52,-64,-65,200,-53,200,-40,-31,-35,-63,-66,-67,-52,-71,-73,200,-72,]),'WHILE':([0,2,3,6,7,8,9,10,12,13,14,25,30,31,36,37,39,46,47,48,60,96,104,105,106,108,109,110,113,115,117,118,119,120,149,164,168,171,173,180,196,197,199,202,204,205,206,208,212,215,218,219,220,221,223,226,229,230,231,232,236,238,],[34,-14,34,-54,-43,-12,-19,-20,-10,-9,-15,-13,-17,-11,-16,-18,34,-42,-62,34,-34,-55,-33,-32,34,-85,-41,34,-27,-30,34,34,-64,-65,34,34,34,34,34,34,-40,-31,34,34,-35,-63,34,34,-44,-66,-67,34,34,34,34,34,-71,34,-73,-45,34,-72,]),'OTHERWISE':([2,5,6,7,8,9,10,12,13,14,15,16,17,18,22,23,25,26,28,30,31,32,36,37,38,42,46,47,56,57,60,62,63,76,80,91,96,98,103,104,105,108,109,113,115,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,153,155,157,159,161,171,176,177,178,179,186,187,188,196,197,204,205,215,218,219,220,227,228,229,231,238,],[-14,-74,-54,-43,-12,-19,-20,-10,-9,-15,-79,-82,-76,-81,-77,-86,-13,-78,-87,-17,-11,-83,-16,-18,-80,-84,-42,-62,-75,-90,-34,-89,-6,-101,-102,-88,-55,-91,-95,-33,-32,-85,-41,-27,-30,-64,-65,173,-113,-127,-128,-111,-110,-114,-122,-129,-121,-125,-107,-108,-118,-104,-124,-126,-106,-112,-117,-115,-120,-119,-116,-123,-109,-92,-93,-98,-100,-96,-53,-8,-7,-103,-105,-94,-99,-97,-40,-31,-35,-63,-66,-67,-52,-52,173,173,-71,-73,-72,]),'DOTMUL':([5,11,15,16,17,18,22,23,26,28,29,32,38,42,56,57,61,62,63,76,80,91,92,98,101,103,111,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,151,153,155,157,159,161,169,178,179,181,183,186,187,188,203,207,217,],[-74,-75,-79,-82,-76,-81,-77,-86,-78,-87,85,-83,-80,-84,-75,85,85,-89,85,-101,-102,-88,85,-91,85,-95,85,-113,85,85,-111,85,-114,85,85,85,85,85,85,85,-104,85,85,-106,-112,-117,-115,85,85,85,-123,-109,-75,-92,-93,-98,-100,-96,85,-103,-105,85,85,-94,-99,-97,85,85,85,]),'RETURN':([0,2,3,6,7,8,9,10,12,13,14,25,30,31,36,37,39,46,47,48,60,96,104,105,106,108,109,110,113,115,117,118,119,120,149,164,168,171,173,180,196,197,199,202,204,205,206,208,212,215,218,219,220,221,223,226,229,230,231,232,236,238,],[44,-14,44,-54,-43,-12,-19,-20,-10,-9,-15,-13,-17,-11,-16,-18,44,-42,-62,44,-34,-55,-33,-32,44,-85,-41,44,-27,-30,44,44,-64,-65,44,44,44,44,44,44,-40,-31,44,44,-35,-63,44,44,-44,-66,-67,44,44,44,44,44,-71,44,-73,-45,44,-72,]),'ANDAND':([5,11,15,16,17,18,22,23,26,28,29,32,38,42,56,57,61,62,63,76,80,91,92,98,101,103,111,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,151,153,155,157,159,161,169,178,179,181,183,186,187,188,203,207,217,],[-74,-75,-79,-82,-76,-81,-77,-86,-78,-87,75,-83,-80,-84,-75,-90,75,-89,75,-101,-102,-88,75,-91,75,-95,75,-113,-127,75,-111,75,-114,-122,75,-121,-125,-107,-108,-118,-104,-124,-126,-106,-112,-117,-115,-120,-119,-116,-123,-109,-75,-92,-93,-98,-100,-96,75,-103,-105,75,75,-94,-99,-97,75,75,75,]),'FIELD':([5,11,15,16,17,18,22,23,26,28,29,32,38,42,56,57,61,62,63,76,80,91,92,98,101,103,111,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,151,153,155,157,159,161,169,178,179,181,183,186,187,188,203,207,217,],[-74,-75,-79,-82,-76,-81,-77,-86,-78,-87,76,-83,-80,-84,-75,76,76,76,76,-101,-102,76,76,-91,76,-95,76,76,76,76,76,76,76,76,76,76,76,76,76,76,-104,76,76,-106,-112,76,76,76,76,76,76,76,-75,-92,-93,-98,-100,-96,76,-103,-105,76,76,-94,-99,-97,76,76,76,]),'TRANSPOSE':([5,11,15,16,17,18,22,23,26,28,29,32,38,42,56,57,61,62,63,76,80,91,92,98,101,103,111,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,151,153,155,157,159,161,169,178,179,181,183,186,187,188,203,207,217,],[-74,-75,-79,-82,-76,-81,-77,-86,-78,-87,80,-83,-80,-84,-75,80,80,80,80,-101,-102,80,80,-91,80,-95,80,80,80,80,80,80,-114,80,80,80,80,80,80,80,-104,80,80,-106,-112,-117,80,80,80,80,80,80,-75,-92,-93,-98,-100,-96,80,-103,-105,80,80,-94,-99,-97,80,80,80,]),'OROR':([5,11,15,16,17,18,22,23,26,28,29,32,38,42,56,57,61,62,63,76,80,91,92,98,101,103,111,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,151,153,155,157,159,161,169,178,179,181,183,186,187,188,203,207,217,],[-74,-75,-79,-82,-76,-81,-77,-86,-78,-87,81,-83,-80,-84,-75,-90,81,-89,81,-101,-102,-88,81,-91,81,-95,81,-113,-127,81,-111,81,-114,-122,81,-121,-125,-107,-108,-118,-104,-124,-126,-106,-112,-117,-115,-120,-119,-116,-123,-109,-75,-92,-93,-98,-100,-96,81,-103,-105,81,81,-94,-99,-97,81,81,81,]),'LBRACE':([0,2,3,5,6,7,8,9,10,11,12,13,14,15,16,17,18,21,22,23,24,25,26,27,28,29,30,31,32,33,34,36,37,38,39,40,41,42,46,47,48,49,56,57,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,94,96,98,101,103,104,105,106,108,109,110,111,113,115,116,117,118,119,120,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,164,166,168,169,171,173,174,178,179,180,181,182,183,186,187,188,191,196,197,199,200,202,203,204,205,206,207,208,212,215,217,218,219,220,221,223,226,229,230,231,232,236,238,],[40,-14,40,-74,-54,-43,-12,-19,-20,-75,-10,-9,-15,40,-82,-76,-81,40,-77,-86,40,-13,-78,40,-87,78,-17,-11,-83,40,40,-16,-18,-80,40,40,40,-84,-42,-62,40,40,-75,78,-34,78,78,78,40,40,40,40,40,40,40,40,40,40,40,40,-101,40,40,40,-102,40,40,40,40,40,40,40,40,40,40,78,78,40,-55,-91,78,-95,-33,-32,40,-85,-41,40,78,-27,-30,40,40,40,-64,-65,78,78,78,78,78,78,78,78,78,78,78,78,78,-104,78,78,-106,-112,78,78,78,78,78,78,78,40,40,-75,40,-92,40,-93,40,-98,40,-100,40,-96,40,40,40,78,40,40,40,-103,-105,40,78,40,78,-94,-99,-97,40,-40,-31,40,40,40,78,-35,-63,40,78,40,-44,-66,78,-67,40,40,40,40,40,-71,40,-73,-45,40,-72,]),'LBRACKET':([0,2,3,5,6,7,8,9,10,11,12,13,14,15,16,17,18,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,46,47,48,49,56,57,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,94,96,98,101,103,104,105,106,107,108,109,110,111,113,115,116,117,118,119,120,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,164,165,166,168,169,171,173,174,178,179,180,181,182,183,186,187,188,191,196,197,199,200,202,203,204,205,206,207,208,212,214,215,217,218,219,220,221,223,226,229,230,231,232,236,238,],[41,-14,41,-74,-54,-43,-12,-19,-20,-75,-10,-9,-15,-79,-82,-76,-81,41,-77,-86,41,-13,-78,41,-87,82,-17,-11,-83,41,41,94,-16,-18,-80,41,41,41,-84,-42,-62,41,41,-75,-90,-34,82,-89,82,41,41,41,41,41,41,41,41,41,41,41,41,-101,41,41,41,-102,41,41,41,41,41,41,41,41,41,41,-88,82,41,-55,-91,82,-95,-33,-32,41,166,-85,-41,41,82,-27,-30,41,41,41,-64,-65,-113,-127,-128,-111,-110,-114,-122,-129,-121,-125,-107,-108,-118,-104,-124,-126,-106,-112,-117,-115,-120,-119,-116,-123,-109,41,41,-75,41,-92,41,-93,41,-98,41,-100,41,-96,41,191,41,41,82,41,41,41,-103,-105,41,82,41,82,-94,-99,-97,41,-40,-31,41,41,41,82,-35,-63,41,82,41,-44,191,-66,82,-67,41,41,41,41,41,-71,41,-73,-45,41,-72,]),'CATCH':([2,6,7,8,9,10,12,13,14,25,30,31,36,37,46,47,48,60,96,104,105,108,109,113,115,196,197,204,205,215,218,229,231,238,],[-14,-54,-43,-12,-19,-20,-10,-9,-15,-13,-17,-11,-16,-18,-42,-62,110,-34,-55,-33,-32,-85,-41,-27,-30,-40,-31,-35,-63,-66,-67,-71,-73,-72,]),'CONTINUE':([0,2,3,6,7,8,9,10,12,13,14,25,30,31,36,37,39,46,47,48,60,96,104,105,106,108,109,110,113,115,117,118,119,120,149,164,168,171,173,180,196,197,199,202,204,205,206,208,212,215,218,219,220,221,223,226,229,230,231,232,236,238,],[43,-14,43,-54,-43,-12,-19,-20,-10,-9,-15,-13,-17,-11,-16,-18,43,-42,-62,43,-34,-55,-33,-32,43,-85,-41,43,-27,-30,43,43,-64,-65,43,43,43,43,43,43,-40,-31,43,43,-35,-63,43,43,-44,-66,-67,43,43,43,43,43,-71,43,-73,-45,43,-72,]),'LESSEQUAL':([5,11,15,16,17,18,22,23,26,28,29,32,38,42,56,57,61,62,63,76,80,91,92,98,101,103,111,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,151,153,155,157,159,161,169,178,179,181,183,186,187,188,203,207,217,],[-74,-75,-79,-82,-76,-81,-77,-86,-78,-87,86,-83,-80,-84,-75,-90,86,-89,86,-101,-102,-88,86,-91,86,-95,86,-113,-127,86,-111,86,-114,-122,86,-121,-125,-107,86,-118,-104,-124,86,-106,-112,-117,-115,-120,-119,-116,-123,-109,-75,-92,-93,-98,-100,-96,86,-103,-105,86,86,-94,-99,-97,86,86,86,]),'GREATERTHAN':([5,11,15,16,17,18,22,23,26,28,29,32,38,42,56,57,61,62,63,76,80,91,92,98,101,103,111,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,151,153,155,157,159,161,169,178,179,181,183,186,187,188,203,207,217,],[-74,-75,-79,-82,-76,-81,-77,-86,-78,-87,87,-83,-80,-84,-75,-90,87,-89,87,-101,-102,-88,87,-91,87,-95,87,-113,-127,87,-111,87,-114,-122,87,-121,-125,-107,87,-118,-104,-124,87,-106,-112,-117,-115,-120,-119,-116,-123,-109,-75,-92,-93,-98,-100,-96,87,-103,-105,87,87,-94,-99,-97,87,87,87,]),'FOR':([0,2,3,6,7,8,9,10,12,13,14,25,30,31,36,37,39,46,47,48,60,96,104,105,106,108,109,110,113,115,117,118,119,120,149,164,168,171,173,180,196,197,199,202,204,205,206,208,212,215,218,219,220,221,223,226,229,230,231,232,236,238,],[35,-14,35,-54,-43,-12,-19,-20,-10,-9,-15,-13,-17,-11,-16,-18,35,-42,-62,35,-34,-55,-33,-32,35,-85,-41,35,-27,-30,35,35,-64,-65,35,35,35,35,35,35,-40,-31,35,35,-35,-63,35,35,-44,-66,-67,35,35,35,35,35,-71,35,-73,-45,35,-72,]),'RBRACKET':([4,5,15,16,17,18,22,23,26,28,29,32,38,41,42,49,56,57,62,76,80,82,91,94,98,100,101,102,103,111,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,139,140,141,142,143,144,145,146,147,148,151,153,155,157,158,159,160,161,166,178,179,184,185,186,187,188,191,193,207,210,],[-58,-74,-79,-82,-76,-81,-77,-86,-78,-87,-60,-83,-80,103,-84,-59,-75,-90,-89,-101,-102,140,-88,103,-91,157,159,161,-95,-61,-113,-127,-128,-111,-110,-114,-122,-129,-121,-125,-107,-108,-118,-104,-124,-126,179,-106,-112,-117,-115,-120,-119,-116,-123,-109,-75,-92,-93,-98,187,-100,188,-96,194,-103,-105,-56,-57,-94,-99,-97,211,213,222,224,]),'LDIV':([5,11,15,16,17,18,22,23,26,28,29,32,38,42,56,57,61,62,63,76,80,91,92,98,101,103,111,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,151,153,155,157,159,161,169,178,179,181,183,186,187,188,203,207,217,],[-74,-75,-79,-82,-76,-81,-77,-86,-78,-87,90,-83,-80,-84,-75,90,90,-89,90,-101,-102,-88,90,-91,90,-95,90,-113,90,90,-111,90,-114,90,90,90,90,90,90,90,-104,90,90,-106,-112,-117,-115,90,90,90,-123,-109,-75,-92,-93,-98,-100,-96,90,-103,-105,90,90,-94,-99,-97,90,90,90,]),'SEMI':([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,20,22,23,25,26,28,29,30,31,32,36,37,38,39,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,76,80,91,92,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,112,113,114,115,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,136,137,138,140,141,142,143,144,145,146,147,148,149,151,153,155,157,159,161,163,164,165,168,169,171,173,176,177,178,179,180,181,183,184,185,186,187,188,189,190,192,196,197,199,202,203,204,205,206,208,209,211,212,214,215,217,218,219,220,221,222,223,224,225,226,229,230,231,232,236,238,],[46,47,-14,46,-58,-74,-54,-43,-12,-19,-20,-75,-10,-9,-15,-79,-82,-76,-81,60,-77,-86,-13,-78,-87,-60,-17,-11,-83,-16,-18,-80,46,-84,104,105,-42,-62,46,-59,-22,-24,-25,-23,-21,113,-75,-90,115,-28,-34,120,-89,-6,-101,-102,-88,149,-55,154,-91,156,158,-60,160,-95,-33,-32,46,-85,-41,46,-61,-26,-27,-29,-30,46,46,-64,-65,177,-113,-127,-128,-111,-110,-114,-122,-129,-121,-125,-107,-108,-118,-104,-124,-126,-106,-112,-117,-115,-120,-119,-116,-123,-109,46,-75,-92,-93,-98,-100,-96,-6,46,-46,46,197,46,46,-8,-7,-103,-105,46,206,208,-56,-57,-94,-99,-97,177,-6,212,-40,-31,46,46,120,-35,-63,46,46,177,-47,-44,-46,-66,120,-67,46,46,46,230,46,-48,232,46,-71,46,-73,-45,46,-72,]),}
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'expr_list':([0,3,39,40,41,48,78,82,94,106,110,117,118,149,154,156,158,160,164,166,168,171,173,180,191,199,202,206,208,219,220,221,223,226,230,236,],[1,1,1,97,100,1,135,139,100,1,1,1,1,1,184,185,184,185,1,193,1,1,1,1,210,1,1,1,1,1,1,1,1,1,1,1,]),'for_stmt':([0,3,39,48,106,110,117,118,149,164,168,171,173,180,199,202,206,208,219,220,221,223,226,230,236,],[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,]),'command':([0,3,39,48,106,110,117,118,149,164,168,171,173,180,199,202,206,208,219,220,221,223,226,230,236,],[25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,]),'colon':([0,3,15,21,24,27,33,34,39,40,41,48,49,64,65,66,67,68,69,70,71,72,73,74,75,77,78,79,81,82,83,84,85,86,87,88,89,90,94,106,110,116,117,118,149,150,152,154,156,158,160,164,166,168,171,173,174,180,182,191,199,200,202,206,208,219,220,221,223,226,230,236,],[26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,]),'end':([48,106,164,168,175,180,198,201,221,223,236,],[109,163,190,196,204,205,215,218,229,231,238,]),'expr':([0,3,15,21,24,27,33,34,39,40,41,48,49,64,65,66,67,68,69,70,71,72,73,74,75,77,78,79,81,82,83,84,85,86,87,88,89,90,94,106,110,116,117,118,149,150,152,154,156,158,160,164,166,168,171,173,174,180,182,191,199,200,202,206,208,219,220,221,223,226,230,236,],[29,29,57,61,62,63,91,92,29,29,101,29,111,122,123,124,125,126,127,128,129,130,131,132,133,134,29,137,138,29,141,142,143,144,145,146,147,148,29,29,29,169,29,29,29,181,183,29,29,29,29,29,29,29,29,29,203,29,207,29,29,217,29,29,29,29,29,29,29,29,29,29,]),'global_list':([19,],[58,]),'stmt':([0,3,39,48,106,110,117,118,149,164,168,171,173,180,199,202,206,208,219,220,221,223,226,230,236,],[6,6,96,96,6,6,6,6,6,96,96,96,6,96,6,96,6,6,6,6,96,96,6,6,96,]),'return_stmt':([0,3,39,48,106,110,117,118,149,164,168,171,173,180,199,202,206,208,219,220,221,223,226,230,236,],[30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,]),'global_stmt':([0,3,39,48,106,110,117,118,149,164,168,171,173,180,199,202,206,208,219,220,221,223,226,230,236,],[8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,]),'sep':([61,203,217,],[117,219,226,]),'expr_stmt':([0,3,39,48,106,110,117,118,149,164,168,171,173,180,199,202,206,208,219,220,221,223,226,230,236,],[31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,]),'semi_opt':([63,163,190,],[121,189,209,]),'expr2':([0,3,15,21,24,27,33,34,39,40,41,48,49,64,65,66,67,68,69,70,71,72,73,74,75,77,78,79,81,82,83,84,85,86,87,88,89,90,94,106,110,116,117,118,149,150,152,154,156,158,160,164,166,168,171,173,174,180,182,191,199,200,202,206,208,219,220,221,223,226,230,236,],[32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,]),'stmt_list_opt':([106,117,118,199,219,220,226,],[162,170,172,216,227,228,233,]),'concat_list':([40,41,94,],[99,102,102,]),'exprs':([0,3,39,40,41,48,78,82,94,106,110,117,118,149,154,156,158,160,164,166,168,171,173,180,191,199,202,206,208,219,220,221,223,226,230,236,],[4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,]),'stmt_list':([0,3,106,110,117,118,149,173,199,206,208,219,220,226,230,],[39,48,164,168,171,171,180,202,171,221,223,171,171,171,236,]),'break_stmt':([0,3,39,48,106,110,117,118,149,164,168,171,173,180,199,202,206,208,219,220,221,223,226,230,236,],[12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,]),'try_catch':([0,3,39,48,106,110,117,118,149,164,168,171,173,180,199,202,206,208,219,220,221,223,226,230,236,],[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,]),'null_stmt':([0,3,39,48,106,110,117,118,149,164,168,171,173,180,199,202,206,208,219,220,221,223,226,230,236,],[36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,]),'args_opt':([165,214,],[192,225,]),'ret':([107,],[167,]),'continue_stmt':([0,3,39,48,106,110,117,118,149,164,168,171,173,180,199,202,206,208,219,220,221,223,226,230,236,],[13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,]),'if_stmt':([0,3,39,48,106,110,117,118,149,164,168,171,173,180,199,202,206,208,219,220,221,223,226,230,236,],[14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,]),'elseif_stmt':([170,172,233,],[198,201,237,]),'func_dec':([45,],[106,]),'switch_stmt':([0,3,39,48,106,110,117,118,149,164,168,171,173,180,199,202,206,208,219,220,221,223,226,230,236,],[37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,]),'case_list':([121,227,228,],[175,234,235,]),'expr1':([0,3,15,21,24,27,33,34,39,40,41,48,49,64,65,66,67,68,69,70,71,72,73,74,75,77,78,79,81,82,83,84,85,86,87,88,89,90,94,106,110,116,117,118,149,150,152,154,156,158,160,164,166,168,171,173,174,180,182,191,199,200,202,206,208,219,220,221,223,226,230,236,],[42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,]),'number':([0,3,15,21,24,27,33,34,39,40,41,48,49,64,65,66,67,68,69,70,71,72,73,74,75,77,78,79,81,82,83,84,85,86,87,88,89,90,94,106,110,116,117,118,149,150,152,154,156,158,160,164,166,168,171,173,174,180,182,191,199,200,202,206,208,219,220,221,223,226,230,236,],[17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,]),'arg1':([11,55,],[52,112,]),'matrix':([0,3,15,21,24,27,33,34,35,39,40,41,48,49,64,65,66,67,68,69,70,71,72,73,74,75,77,78,79,81,82,83,84,85,86,87,88,89,90,94,106,110,116,117,118,149,150,152,154,156,158,160,164,166,168,171,173,174,180,182,191,199,200,202,206,208,219,220,221,223,226,230,236,],[18,18,18,18,18,18,18,18,95,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,]),'cellarray':([0,3,15,21,24,27,33,34,39,40,41,48,49,64,65,66,67,68,69,70,71,72,73,74,75,77,78,79,81,82,83,84,85,86,87,88,89,90,94,106,110,116,117,118,149,150,152,154,156,158,160,164,166,168,171,173,174,180,182,191,199,200,202,206,208,219,220,221,223,226,230,236,],[16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,]),'while_stmt':([0,3,39,48,106,110,117,118,149,164,168,171,173,180,199,202,206,208,219,220,221,223,226,230,236,],[10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,]),'args':([11,],[55,]),'top':([0,],[45,]),'string':([0,3,15,21,24,27,33,34,39,40,41,48,49,64,65,66,67,68,69,70,71,72,73,74,75,77,78,79,81,82,83,84,85,86,87,88,89,90,94,106,110,116,117,118,149,150,152,154,156,158,160,164,166,168,171,173,174,180,182,191,199,200,202,206,208,219,220,221,223,226,230,236,],[22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,]),}
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> top","S'",1,None,None,None),
('top -> <empty>','top',0,'p_top','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',49),
('top -> stmt_list','top',1,'p_top','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',50),
('top -> top func_dec stmt_list_opt','top',3,'p_top','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',51),
('top -> top func_dec end semi_opt','top',4,'p_top','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',52),
('top -> top func_dec stmt_list end semi_opt','top',5,'p_top','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',53),
('semi_opt -> <empty>','semi_opt',0,'p_semi_opt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',72),
('semi_opt -> semi_opt SEMI','semi_opt',2,'p_semi_opt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',73),
('semi_opt -> semi_opt COMMA','semi_opt',2,'p_semi_opt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',74),
('stmt -> continue_stmt','stmt',1,'p_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',81),
('stmt -> break_stmt','stmt',1,'p_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',82),
('stmt -> expr_stmt','stmt',1,'p_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',83),
('stmt -> global_stmt','stmt',1,'p_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',84),
('stmt -> command','stmt',1,'p_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',85),
('stmt -> for_stmt','stmt',1,'p_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',86),
('stmt -> if_stmt','stmt',1,'p_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',87),
('stmt -> null_stmt','stmt',1,'p_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',88),
('stmt -> return_stmt','stmt',1,'p_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',89),
('stmt -> switch_stmt','stmt',1,'p_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',90),
('stmt -> try_catch','stmt',1,'p_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',91),
('stmt -> while_stmt','stmt',1,'p_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',92),
('arg1 -> STRING','arg1',1,'p_arg1','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',101),
('arg1 -> NUMBER','arg1',1,'p_arg1','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',102),
('arg1 -> IDENTIFIER','arg1',1,'p_arg1','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',103),
('arg1 -> GLOBAL','arg1',1,'p_arg1','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',104),
('args -> arg1','args',1,'p_args','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',113),
('args -> args arg1','args',2,'p_args','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',114),
('command -> IDENTIFIER args SEMI','command',3,'p_command','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',125),
('global_list -> IDENTIFIER','global_list',1,'p_global_list','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',132),
('global_list -> global_list IDENTIFIER','global_list',2,'p_global_list','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',133),
('global_stmt -> GLOBAL global_list SEMI','global_stmt',3,'p_global_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',143),
('global_stmt -> GLOBAL IDENTIFIER EQUALS expr SEMI','global_stmt',5,'p_global_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',144),
('return_stmt -> RETURN SEMI','return_stmt',2,'p_return_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',154),
('continue_stmt -> CONTINUE SEMI','continue_stmt',2,'p_continue_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',161),
('break_stmt -> BREAK SEMI','break_stmt',2,'p_break_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',168),
('switch_stmt -> SWITCH expr semi_opt case_list end','switch_stmt',5,'p_switch_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',175),
('case_list -> <empty>','case_list',0,'p_case_list','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',184),
('case_list -> CASE expr sep stmt_list_opt case_list','case_list',5,'p_case_list','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',185),
('case_list -> CASE expr error stmt_list_opt case_list','case_list',5,'p_case_list','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',186),
('case_list -> OTHERWISE stmt_list','case_list',2,'p_case_list','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',187),
('try_catch -> TRY stmt_list CATCH stmt_list end','try_catch',5,'p_try_catch','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',202),
('try_catch -> TRY stmt_list end','try_catch',3,'p_try_catch','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',203),
('null_stmt -> SEMI','null_stmt',1,'p_null_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',216),
('null_stmt -> COMMA','null_stmt',1,'p_null_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',217),
('func_dec -> FUNCTION IDENTIFIER args_opt SEMI','func_dec',4,'p_func_dec','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',223),
('func_dec -> FUNCTION ret EQUALS IDENTIFIER args_opt SEMI','func_dec',6,'p_func_dec','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',224),
('args_opt -> <empty>','args_opt',0,'p_args_opt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',234),
('args_opt -> LBRACKET RBRACKET','args_opt',2,'p_args_opt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',235),
('args_opt -> LBRACKET expr_list RBRACKET','args_opt',3,'p_args_opt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',236),
('ret -> IDENTIFIER','ret',1,'p_ret','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',256),
('ret -> LBRACKET RBRACKET','ret',2,'p_ret','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',257),
('ret -> LBRACKET expr_list RBRACKET','ret',3,'p_ret','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',258),
('stmt_list_opt -> <empty>','stmt_list_opt',0,'p_stmt_list_opt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',270),
('stmt_list_opt -> stmt_list','stmt_list_opt',1,'p_stmt_list_opt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',271),
('stmt_list -> stmt','stmt_list',1,'p_stmt_list','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',282),
('stmt_list -> stmt_list stmt','stmt_list',2,'p_stmt_list','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',283),
('concat_list -> expr_list SEMI expr_list','concat_list',3,'p_concat_list','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',298),
('concat_list -> concat_list SEMI expr_list','concat_list',3,'p_concat_list','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',299),
('expr_list -> exprs','expr_list',1,'p_expr_list','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',306),
('expr_list -> exprs COMMA','expr_list',2,'p_expr_list','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',307),
('exprs -> expr','exprs',1,'p_exprs','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',317),
('exprs -> exprs COMMA expr','exprs',3,'p_exprs','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',318),
('expr_stmt -> expr_list SEMI','expr_stmt',2,'p_expr_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',328),
('while_stmt -> WHILE expr SEMI stmt_list end','while_stmt',5,'p_while_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',335),
('sep -> COMMA','sep',1,'p_separator','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',343),
('sep -> SEMI','sep',1,'p_separator','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',344),
('if_stmt -> IF expr sep stmt_list_opt elseif_stmt end','if_stmt',6,'p_if_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',351),
('if_stmt -> IF expr error stmt_list_opt elseif_stmt end','if_stmt',6,'p_if_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',352),
('elseif_stmt -> <empty>','elseif_stmt',0,'p_elseif_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',360),
('elseif_stmt -> ELSE stmt_list_opt','elseif_stmt',2,'p_elseif_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',361),
('elseif_stmt -> ELSEIF expr sep stmt_list_opt elseif_stmt','elseif_stmt',5,'p_elseif_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',362),
('for_stmt -> FOR IDENTIFIER EQUALS expr SEMI stmt_list end','for_stmt',7,'p_for_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',376),
('for_stmt -> FOR LBRACKET IDENTIFIER EQUALS expr RBRACKET SEMI stmt_list end','for_stmt',9,'p_for_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',377),
('for_stmt -> FOR matrix EQUALS expr SEMI stmt_list end','for_stmt',7,'p_for_stmt','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',378),
('number -> NUMBER','number',1,'p_expr_number','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',393),
('expr -> IDENTIFIER','expr',1,'p_expr','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',398),
('expr -> number','expr',1,'p_expr','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',399),
('expr -> string','expr',1,'p_expr','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',400),
('expr -> colon','expr',1,'p_expr','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',401),
('expr -> NOTEQUAL','expr',1,'p_expr','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',402),
('expr -> NOT','expr',1,'p_expr','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',403),
('expr -> matrix','expr',1,'p_expr','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',404),
('expr -> cellarray','expr',1,'p_expr','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',405),
('expr -> expr2','expr',1,'p_expr','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',406),
('expr -> expr1','expr',1,'p_expr','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',407),
('end -> END','end',1,'p_expr_end','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',417),
('string -> STRING','string',1,'p_expr_string','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',429),
('colon -> COLON','colon',1,'p_expr_colon','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',436),
('expr1 -> MINUS expr','expr1',2,'p_expr1','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',442),
('expr1 -> PLUS expr','expr1',2,'p_expr1','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',443),
('expr1 -> NOTEQUAL expr','expr1',2,'p_expr1','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',444),
('cellarray -> LBRACE RBRACE','cellarray',2,'p_cellarray','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',451),
('cellarray -> LBRACE expr_list RBRACE','cellarray',3,'p_cellarray','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',452),
('cellarray -> LBRACE concat_list RBRACE','cellarray',3,'p_cellarray','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',453),
('cellarray -> LBRACE concat_list SEMI RBRACE','cellarray',4,'p_cellarray','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',454),
('matrix -> LBRACKET RBRACKET','matrix',2,'p_matrix','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',461),
('matrix -> LBRACKET concat_list RBRACKET','matrix',3,'p_matrix','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',462),
('matrix -> LBRACKET concat_list SEMI RBRACKET','matrix',4,'p_matrix','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',463),
('matrix -> LBRACKET expr_list RBRACKET','matrix',3,'p_matrix','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',464),
('matrix -> LBRACKET expr_list SEMI RBRACKET','matrix',4,'p_matrix','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',465),
('expr -> LBRACKET expr RBRACKET','expr',3,'p_paren_expr','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',473),
('expr -> expr FIELD','expr',2,'p_field_expr','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',480),
('expr -> expr TRANSPOSE','expr',2,'p_transpose_expr','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',488),
('expr -> expr LBRACE expr_list RBRACE','expr',4,'p_cellarrayref','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',494),
('expr -> expr LBRACE RBRACE','expr',3,'p_cellarrayref','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',495),
('expr -> expr LBRACKET expr_list RBRACKET','expr',4,'p_funcall_expr','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',502),
('expr -> expr LBRACKET RBRACKET','expr',3,'p_funcall_expr','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',503),
('expr2 -> expr AND expr','expr2',3,'p_expr2','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',512),
('expr2 -> expr ANDAND expr','expr2',3,'p_expr2','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',513),
('expr2 -> expr LDIV expr','expr2',3,'p_expr2','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',514),
('expr2 -> expr COLON expr','expr2',3,'p_expr2','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',515),
('expr2 -> expr DIV expr','expr2',3,'p_expr2','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',516),
('expr2 -> expr DOT expr','expr2',3,'p_expr2','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',517),
('expr2 -> expr DOTDIV expr','expr2',3,'p_expr2','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',518),
('expr2 -> expr DOTEXP expr','expr2',3,'p_expr2','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',519),
('expr2 -> expr DOTMUL expr','expr2',3,'p_expr2','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',520),
('expr2 -> expr EQUALEQUAL expr','expr2',3,'p_expr2','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',521),
('expr2 -> expr EXP expr','expr2',3,'p_expr2','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',522),
('expr2 -> expr GREATEREQUAL expr','expr2',3,'p_expr2','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',523),
('expr2 -> expr GREATERTHAN expr','expr2',3,'p_expr2','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',524),
('expr2 -> expr LESSEQUAL expr','expr2',3,'p_expr2','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',525),
('expr2 -> expr LESSTHAN expr','expr2',3,'p_expr2','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',526),
('expr2 -> expr MINUS expr','expr2',3,'p_expr2','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',527),
('expr2 -> expr TIMES expr','expr2',3,'p_expr2','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',528),
('expr2 -> expr NOTEQUAL expr','expr2',3,'p_expr2','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',529),
('expr2 -> expr OR expr','expr2',3,'p_expr2','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',530),
('expr2 -> expr OROR expr','expr2',3,'p_expr2','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',531),
('expr2 -> expr PLUS expr','expr2',3,'p_expr2','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',532),
('expr2 -> expr EQUALS expr','expr2',3,'p_expr2','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',533),
('expr2 -> expr OREQUALS expr','expr2',3,'p_expr2','F:\\Users\\Joe\\PycharmProjects\\MATLAB-to-Python\\Parse.py',534),
]
| [
"quano2@live.com"
] | quano2@live.com |
641eb5e4ce8f4443864024b99da2a1c4b80e0d83 | 167face5e34f69ba36b8a8d93306387dcaa50d24 | /15formatando_strings.py | 1061eb1748036704fe55492e86c058ee0f7e4ae9 | [] | no_license | william-cirico/python-study | 4fbe20936c46af6115f0d88ad861c71e6273db71 | 5923268fea4c78707fe82f1f609535a69859d0df | refs/heads/main | 2023-04-19T03:49:23.237829 | 2021-05-03T01:24:56 | 2021-05-03T01:24:56 | 309,492,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | # ร possรญvel formatar strings das seguintes formas:
nome = "William Cรญrico"
idade = 20
peso = 70.31287418293472
print("Nome: ", nome, "Idade: ", idade, "Peso: ", peso)
print("Nome: {0} Idade: {1} Peso: {2}".format(nome, idade, peso))
print("Nome: {n} Idade: {i} Peso: {p}".format(n=nome, i=idade, p=peso))
print(f"Nome: {nome} Idade: {idade} Peso: {peso:.2f}") | [
"contato.williamc@gmail.com"
] | contato.williamc@gmail.com |
9e3558f3d288087f8b9b9ca49c4c0643f113bda5 | 8982eacbdc0dcc63ca95693e944bc8d1e749bafe | /udemy_python/sec-10/video_code/tests/acceptance/steps/interactions.py | 65ff54dbf34ce75d6ba568e348f8d83c86bd3f69 | [] | no_license | ltsuda/training | 8c98f2b7edf6943baaee737755ac1a0066244522 | 42e42cef95977c18567002f7f70960af5dbcaad4 | refs/heads/master | 2021-09-08T11:32:42.235710 | 2019-05-22T13:12:52 | 2019-05-22T13:12:52 | 125,256,857 | 0 | 0 | null | 2021-08-31T16:05:35 | 2018-03-14T18:29:22 | JavaScript | UTF-8 | Python | false | false | 786 | py | from behave import *
from tests.acceptance.page_model.base_page import BasePage
from tests.acceptance.page_model.new_post_page import NewPostPage
use_step_matcher('re')
@when('I click on the "(.*)" link')
def step_impl(context, link_text):
page = BasePage(context.driver)
links = page.navigation
matching_links = [l for l in links if l.text == link_text]
if len(matching_links) > 0:
matching_links[0].click()
else:
raise RuntimeError()
@when('I enter "(.*)" in the "(.*)" field')
def step_impl(context, content, field_name):
page = NewPostPage(context.driver)
page.form_field(field_name).send_keys(content)
@when('I press the submit button')
def step_impl(context):
page = NewPostPage(context.driver)
page.submit_button.click()
| [
"ltsuda@daitan.com"
] | ltsuda@daitan.com |
81e9000ab5421eed06c53cc2b93b30270fd1bc06 | 6d7894b522ed60dff8e85afc6150321aa6d3f980 | /buffer_overflow/jumptoaddress.py | a97e006a136c4e654df4dff3539fe081e08b38b7 | [] | no_license | demon-i386/material_palestra | 3e21cbbfc7b3b4a383f4c9c76ac786a3a7874bb7 | a2f92becbf1097897c451a7786c21e570e5d499f | refs/heads/master | 2023-06-13T07:54:58.561017 | 2021-07-04T06:14:27 | 2021-07-04T06:14:27 | 382,103,855 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | import struct
junk = "A"*20
address = struct.pack("<Q",0x4004f7); # simbolo roubarpremio, nรฃo รฉ afetado pelo ASLR
print(junk + address)
# ASLR (Address Space Layout Randomization): proteรงรฃo, posiciona objetos como endereรงo
# base do executal, stack, heap e posiรงรฃo das libs em um espaรงo aleatรณrio que muda
# ao fim da execuรงรฃo do processo
# NX (No Execute): marca a stack / heap como nรฃo executรกvel
# Canary: adiciona um "cookie" na stack, caso esse cookie seja sobreescrito com nosso
# junk รฉ entรฃo detectado um buffer overflow, causando a chamada de um exit(), que nรฃo
# possui um ret, sem ret = sem exploit
# RELRO (Relocation Read-Only): basicamente previne escrita na tabela GOT
# sem GOT hijack :C
# PIE (Position Independent Executable): mini ASLR :P
| [
"75624951+demon-i386@users.noreply.github.com"
] | 75624951+demon-i386@users.noreply.github.com |
6c050c340a55dc158eb445212dc89309085f6de1 | a64f1280b8dedc21c85e8c4234072da3d6a43916 | /mnist_perceptron.py | 7e05809d05073d916ab9a9ba237d0942a1037443 | [] | no_license | pseudo-sm/cv-dl-basic | 1c84d5e36422d7f5230cfba7c423cba78cf9fe6d | f84f53365ec44bd59742fe1e04bbf82977c67169 | refs/heads/master | 2022-08-27T16:08:31.650049 | 2022-07-26T07:54:39 | 2022-07-26T07:54:39 | 154,711,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,716 | py | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist_data = input_data.read_data_sets('MNIST_data', one_hot=True)
input_size = 784
no_classes = 10
batch_size = 100
total_batches = 200
x_input = tf.placeholder(tf.float32, shape=[None, input_size])
y_input = tf.placeholder(tf.float32, shape=[None, no_classes])
weights = tf.Variable(tf.random_normal([input_size, no_classes]))
bias = tf.Variable(tf.random_normal([no_classes]))
logits = tf.matmul(x_input, weights) + bias
softmax_cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_input, logits=logits)
loss_operation = tf.reduce_mean(softmax_cross_entropy)
optimiser = tf.train.GradientDescentOptimizer(learning_rate=0.5).minimize(loss_operation)
session = tf.Session()
session.run(tf.global_variables_initializer())
for batch_no in range(total_batches):
mnist_batch = mnist_data.train.next_batch(batch_size)
train_images, train_labels = mnist_batch[0], mnist_batch[1]
_, loss_value = session.run([optimiser, loss_operation], feed_dict={x_input: train_images,
y_input: train_labels})
print(loss_value)
predictions = tf.argmax(logits, 1)
correct_predictions = tf.equal(predictions, tf.argmax(y_input, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))
test_images, test_labels = mnist_data.test.images, mnist_data.test.labels
accuracy_value = session.run(accuracy_operation, feed_dict={x_input: test_images,
y_input: test_labels})
print('Accuracy : ', accuracy_value)
session.close()
| [
"saswathcommand@gmail.com"
] | saswathcommand@gmail.com |
17ebc93a0e4a5b9f3bdb7c23942b97a73909d91d | 0bc4391986b15c706a77e5df314ec83e84375c54 | /articles/migrations/0002_article_image_thumbnail.py | dd12130bb4ff92b2ae300134423a7f1d034fcd9b | [] | no_license | ssshhh0402/django-crud | a6d1a0872942c6215b1130a44ae335182c42937d | da292c07c9f77526bee8cbbec07d37ea8464d6af | refs/heads/master | 2022-05-02T12:07:26.518798 | 2019-09-23T06:26:43 | 2019-09-23T06:26:43 | 203,089,241 | 0 | 0 | null | 2022-04-22T22:11:46 | 2019-08-19T03:07:54 | HTML | UTF-8 | Python | false | false | 443 | py | # Generated by Django 2.2.4 on 2019-09-23 06:07
from django.db import migrations
import imagekit.models.fields
class Migration(migrations.Migration):
dependencies = [
('articles', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='article',
name='image_thumbnail',
field=imagekit.models.fields.ProcessedImageField(blank=True, upload_to=''),
),
]
| [
"ssshhh0402@naver.com"
] | ssshhh0402@naver.com |
df4e2b89e5e838494485cf479d6d0589536e3838 | fa76cf45d7bf4ed533e5a776ecd52cea15da8c90 | /robotframework-ls/src/robotframework_debug_adapter/vendored/force_pydevd.py | 93bcca4fb794844f5a72a146f94071d71202e7a7 | [
"Apache-2.0"
] | permissive | martinRenou/robotframework-lsp | 8a5d63b7cc7d320c9fed2372a79c8c6772d6481e | 5f23b7374139e83d0aa1ebd30675e762d7a0db86 | refs/heads/master | 2023-08-18T22:26:01.386975 | 2021-10-25T13:46:11 | 2021-10-25T13:46:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,358 | py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
import contextlib
from importlib import import_module
import os
import sys
VENDORED_ROOT = os.path.dirname(os.path.abspath(__file__))
def project_root(project):
"""Return the path to the root dir of the vendored project.
If "project" is an empty string then the path prefix for vendored
projects (e.g. "robotframework_debug_adapter/_vendored/") will be returned.
"""
if not project:
project = ""
return os.path.join(VENDORED_ROOT, project)
@contextlib.contextmanager
def vendored(project, root=None):
"""A context manager under which the vendored project will be imported."""
if root is None:
root = project_root(project)
# Add the vendored project directory, so that it gets tried first.
sys.path.insert(0, root)
try:
yield root
finally:
sys.path.remove(root)
def preimport(project, modules, **kwargs):
"""Import each of the named modules out of the vendored project."""
with vendored(project, **kwargs):
for name in modules:
import_module(name)
try:
import pydevd # noqa
except ImportError:
pydevd_available = False
else:
pydevd_available = True
if not pydevd_available:
# Constants must be set before importing any other pydevd module
# # due to heavy use of "from" in them.
with vendored("vendored_pydevd"):
try:
pydevd_constants = import_module("_pydevd_bundle.pydevd_constants")
except ImportError as e:
contents = os.listdir(VENDORED_ROOT)
for c in contents[:]:
if os.path.isdir(c):
contents.append(f"{c}/{os.listdir(c)}")
else:
contents.append(c)
s = "\n".join(contents)
msg = f"Vendored root: {VENDORED_ROOT} -- contents:\n{s}"
raise ImportError(msg) from e
# Now make sure all the top-level modules and packages in pydevd are
# loaded. Any pydevd modules that aren't loaded at this point, will
# be loaded using their parent package's __path__ (i.e. one of the
# following).
preimport(
"vendored_pydevd",
[
"_pydev_bundle",
"_pydev_imps",
"_pydev_runfiles",
"_pydevd_bundle",
"_pydevd_frame_eval",
"pydev_ipython",
"pydevd_concurrency_analyser",
"pydevd_plugins",
"pydevd",
],
)
import pydevd # noqa
# Ensure that pydevd uses JSON protocol by default.
from _pydevd_bundle import pydevd_constants
from _pydevd_bundle import pydevd_defaults
pydevd_defaults.PydevdCustomization.DEFAULT_PROTOCOL = (
pydevd_constants.HTTP_JSON_PROTOCOL
)
from robocorp_ls_core.debug_adapter_core.dap.dap_base_schema import (
BaseSchema as RobotSchema,
)
from _pydevd_bundle._debug_adapter.pydevd_base_schema import BaseSchema as PyDevdSchema
PyDevdSchema._obj_id_to_dap_id = RobotSchema._obj_id_to_dap_id
PyDevdSchema._dap_id_to_obj_id = RobotSchema._dap_id_to_obj_id
PyDevdSchema._next_dap_id = RobotSchema._next_dap_id
| [
"fabiofz@gmail.com"
] | fabiofz@gmail.com |
b2fa1c2267c4363c4044bbd0a1256ecebf629f01 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/DRAFT-MSDP-MIB.py | af1baa3acc14379cc42129394496b65eb61a6067 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 31,396 | py | #
# PySNMP MIB module DRAFT-MSDP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DRAFT-MSDP-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:54:19 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
MibIdentifier, TimeTicks, Counter32, Bits, ModuleIdentity, Counter64, NotificationType, Gauge32, iso, experimental, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, ObjectIdentity, IpAddress, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "TimeTicks", "Counter32", "Bits", "ModuleIdentity", "Counter64", "NotificationType", "Gauge32", "iso", "experimental", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "ObjectIdentity", "IpAddress", "Unsigned32")
TextualConvention, RowStatus, TruthValue, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "RowStatus", "TruthValue", "DisplayString")
msdpMIB = ModuleIdentity((1, 3, 6, 1, 3, 92))
if mibBuilder.loadTexts: msdpMIB.setLastUpdated('9912160000Z')
if mibBuilder.loadTexts: msdpMIB.setOrganization('IETF MSDP Working Group')
if mibBuilder.loadTexts: msdpMIB.setContactInfo(' Bill Fenner 75 Willow Road Menlo Park, CA 94025 Phone: +1 650 867 6073 E-mail: fenner@research.att.com Dave Thaler One Microsoft Way Redmond, WA 98052 Phone: +1 425 703 8835 Email: dthaler@microsoft.com')
if mibBuilder.loadTexts: msdpMIB.setDescription('An experimental MIB module for MSDP Management.')
msdpMIBobjects = MibIdentifier((1, 3, 6, 1, 3, 92, 1))
msdp = MibIdentifier((1, 3, 6, 1, 3, 92, 1, 1))
msdpEnabled = MibScalar((1, 3, 6, 1, 3, 92, 1, 1, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: msdpEnabled.setStatus('current')
if mibBuilder.loadTexts: msdpEnabled.setDescription('The state of MSDP on this MSDP speaker - globally enabled or disabled.')
msdpCacheLifetime = MibScalar((1, 3, 6, 1, 3, 92, 1, 1, 2), TimeTicks()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: msdpCacheLifetime.setStatus('current')
if mibBuilder.loadTexts: msdpCacheLifetime.setDescription('The lifetime given to SA cache entries when created or refreshed. A value of 0 means no SA caching is done by this MSDP speaker.')
msdpNumSACacheEntries = MibScalar((1, 3, 6, 1, 3, 92, 1, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpNumSACacheEntries.setStatus('current')
if mibBuilder.loadTexts: msdpNumSACacheEntries.setDescription('The total number of entries in the SA Cache table.')
msdpSAHoldDownPeriod = MibScalar((1, 3, 6, 1, 3, 92, 1, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)).clone(90)).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpSAHoldDownPeriod.setStatus('current')
if mibBuilder.loadTexts: msdpSAHoldDownPeriod.setDescription('The number of seconds in the MSDP SA Hold-down period')
msdpRequestsTable = MibTable((1, 3, 6, 1, 3, 92, 1, 1, 4), )
if mibBuilder.loadTexts: msdpRequestsTable.setStatus('current')
if mibBuilder.loadTexts: msdpRequestsTable.setDescription('The (conceptual) table listing group ranges and MSDP peers used when deciding where to send an SA Request message when required. If SA Caching is enabled, this table may be empty.')
msdpRequestsEntry = MibTableRow((1, 3, 6, 1, 3, 92, 1, 1, 4, 1), ).setIndexNames((0, "DRAFT-MSDP-MIB", "msdpRequestsGroupAddress"), (0, "DRAFT-MSDP-MIB", "msdpRequestsGroupMask"))
if mibBuilder.loadTexts: msdpRequestsEntry.setStatus('current')
if mibBuilder.loadTexts: msdpRequestsEntry.setDescription('An entry (conceptual row) representing a group range used when deciding where to send an SA Request message.')
msdpRequestsGroupAddress = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 4, 1, 1), IpAddress())
if mibBuilder.loadTexts: msdpRequestsGroupAddress.setStatus('current')
if mibBuilder.loadTexts: msdpRequestsGroupAddress.setDescription('The group address that, when combined with the mask in this entry, represents the group range for which this peer will service MSDP SA Requests.')
msdpRequestsGroupMask = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 4, 1, 2), IpAddress())
if mibBuilder.loadTexts: msdpRequestsGroupMask.setStatus('current')
if mibBuilder.loadTexts: msdpRequestsGroupMask.setDescription('The mask that, when combined with the group address in this entry, represents the group range for which this peer will service MSDP SA Requests.')
msdpRequestsPeer = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 4, 1, 3), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: msdpRequestsPeer.setStatus('current')
if mibBuilder.loadTexts: msdpRequestsPeer.setDescription("The peer to which MSDP SA Requests for groups matching this entry's group range will be sent. Must match the INDEX of a row in the msdpPeerTable.")
msdpRequestsStatus = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 4, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: msdpRequestsStatus.setStatus('current')
if mibBuilder.loadTexts: msdpRequestsStatus.setDescription('The status of this row, by which new rows may be added to the table.')
msdpPeerTable = MibTable((1, 3, 6, 1, 3, 92, 1, 1, 5), )
if mibBuilder.loadTexts: msdpPeerTable.setStatus('current')
if mibBuilder.loadTexts: msdpPeerTable.setDescription("The (conceptual) table listing the MSDP speaker's peers.")
msdpPeerEntry = MibTableRow((1, 3, 6, 1, 3, 92, 1, 1, 5, 1), ).setIndexNames((0, "DRAFT-MSDP-MIB", "msdpPeerRemoteAddress"))
if mibBuilder.loadTexts: msdpPeerEntry.setStatus('current')
if mibBuilder.loadTexts: msdpPeerEntry.setDescription('An entry (conceptual row) representing an MSDP peer.')
msdpPeerRemoteAddress = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 1), IpAddress())
if mibBuilder.loadTexts: msdpPeerRemoteAddress.setStatus('current')
if mibBuilder.loadTexts: msdpPeerRemoteAddress.setDescription('The address of the remote MSDP peer.')
msdpPeerState = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("inactive", 1), ("listen", 2), ("connecting", 3), ("established", 4), ("disabled", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpPeerState.setStatus('current')
if mibBuilder.loadTexts: msdpPeerState.setDescription('The state of the MSDP TCP connection with this peer.')
msdpPeerRPFFailures = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpPeerRPFFailures.setStatus('current')
if mibBuilder.loadTexts: msdpPeerRPFFailures.setDescription('The number of RPF failures on SA messages received from this peer.')
msdpPeerInSAs = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpPeerInSAs.setStatus('current')
if mibBuilder.loadTexts: msdpPeerInSAs.setDescription('The number of MSDP SA messages received on this connection. This object should be initialized to zero when the connection is established.')
msdpPeerOutSAs = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpPeerOutSAs.setStatus('current')
if mibBuilder.loadTexts: msdpPeerOutSAs.setDescription('The number of MSDP SA messages transmitted on this connection. This object should be initialized to zero when the connection is established.')
msdpPeerInSARequests = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpPeerInSARequests.setStatus('current')
if mibBuilder.loadTexts: msdpPeerInSARequests.setDescription('The number of MSDP SA-Request messages received on this connection. This object should be initialized to zero when the connection is established.')
msdpPeerOutSARequests = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpPeerOutSARequests.setStatus('current')
if mibBuilder.loadTexts: msdpPeerOutSARequests.setDescription('The number of MSDP SA-Request messages transmitted on this connection. This object should be initialized to zero when the connection is established.')
msdpPeerInSAResponses = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpPeerInSAResponses.setStatus('current')
if mibBuilder.loadTexts: msdpPeerInSAResponses.setDescription('The number of MSDP SA-Response messages received on this connection. This object should be initialized to zero when the connection is established.')
msdpPeerOutSAResponses = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpPeerOutSAResponses.setStatus('current')
if mibBuilder.loadTexts: msdpPeerOutSAResponses.setDescription('The number of MSDP SA Response messages transmitted on this TCP connection. This object should be initialized to zero when the connection is established.')
msdpPeerInControlMessages = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpPeerInControlMessages.setStatus('current')
if mibBuilder.loadTexts: msdpPeerInControlMessages.setDescription('The total number of MSDP messages received on this TCP connection. This object should be initialized to zero when the connection is established.')
msdpPeerOutControlMessages = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpPeerOutControlMessages.setStatus('current')
if mibBuilder.loadTexts: msdpPeerOutControlMessages.setDescription('The total number of MSDP messages transmitted on this TCP connection. This object should be initialized to zero when the connection is established.')
msdpPeerInDataPackets = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpPeerInDataPackets.setStatus('current')
if mibBuilder.loadTexts: msdpPeerInDataPackets.setDescription('The total number of encapsulated data packets received from this peer. This object should be initialized to zero when the connection is established.')
msdpPeerOutDataPackets = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpPeerOutDataPackets.setStatus('current')
if mibBuilder.loadTexts: msdpPeerOutDataPackets.setDescription('The total number of encapsulated data packets sent to this peer. This object should be initialized to zero when the connection is established.')
msdpPeerFsmEstablishedTransitions = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpPeerFsmEstablishedTransitions.setStatus('current')
if mibBuilder.loadTexts: msdpPeerFsmEstablishedTransitions.setDescription('The total number of times the MSDP FSM transitioned into the established state.')
msdpPeerFsmEstablishedTime = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 16), Gauge32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpPeerFsmEstablishedTime.setStatus('current')
if mibBuilder.loadTexts: msdpPeerFsmEstablishedTime.setDescription('This timer indicates how long (in seconds) this peer has been in the Established state or how long since this peer was last in the Established state. It is set to zero when a new peer is configured or the MSDP speaker is booted.')
msdpPeerInMessageElapsedTime = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 17), Gauge32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpPeerInMessageElapsedTime.setStatus('current')
if mibBuilder.loadTexts: msdpPeerInMessageElapsedTime.setDescription('Elapsed time in seconds since the last MSDP message was received from the peer. Each time msdpPeerInControlMessages is incremented, the value of this object is set to zero (0).')
msdpPeerLocalAddress = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 18), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: msdpPeerLocalAddress.setStatus('current')
if mibBuilder.loadTexts: msdpPeerLocalAddress.setDescription("The local IP address of this entry's MSDP connection.")
msdpPeerSAAdvPeriod = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 19), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)).clone(60)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: msdpPeerSAAdvPeriod.setStatus('current')
if mibBuilder.loadTexts: msdpPeerSAAdvPeriod.setDescription('Time interval in seconds for the MinSAAdvertisementInterval MSDP timer.')
msdpPeerConnectRetryInterval = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 20), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(120)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: msdpPeerConnectRetryInterval.setStatus('current')
if mibBuilder.loadTexts: msdpPeerConnectRetryInterval.setDescription('Time interval in seconds for the ConnectRetry timer.')
msdpPeerHoldTimeConfigured = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(3, 65535), )).clone(90)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: msdpPeerHoldTimeConfigured.setStatus('current')
if mibBuilder.loadTexts: msdpPeerHoldTimeConfigured.setDescription('Time interval in seconds for the Hold Timer configured for this MSDP speaker with this peer.')
msdpPeerKeepAliveConfigured = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 21845), )).clone(30)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: msdpPeerKeepAliveConfigured.setStatus('current')
if mibBuilder.loadTexts: msdpPeerKeepAliveConfigured.setDescription('Time interval in seconds for the KeepAlive timer configured for this MSDP speaker with this peer. A reasonable maximum value for this timer would be configured to be one third of that of msdpPeerHoldTimeConfigured. If the value of this object is zero (0), no periodic KEEPALIVE messages are sent to the peer after the MSDP connection has been established.')
msdpPeerDataTtl = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 23), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: msdpPeerDataTtl.setStatus('current')
if mibBuilder.loadTexts: msdpPeerDataTtl.setDescription('The minimum TTL a packet is required to have before it may be forwarded using SA encapsulation to this peer.')
msdpPeerProcessRequestsFrom = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 24), TruthValue()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: msdpPeerProcessRequestsFrom.setStatus('current')
if mibBuilder.loadTexts: msdpPeerProcessRequestsFrom.setDescription('This object indicates whether or not to process MSDP SA Request messages from this peer. If True(1), MSDP SA Request messages from this peer are processed and replied to (if appropriate) with SA Response messages. If False(2), MSDP SA Request messages from this peer are silently ignored. It defaults to False when msdpCacheLifetime is 0 and True when msdpCacheLifetime is non-0.')
msdpPeerStatus = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 25), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: msdpPeerStatus.setStatus('current')
if mibBuilder.loadTexts: msdpPeerStatus.setDescription("The RowStatus object by which peers can be added and deleted. A transition to 'active' will cause the MSDP Start Event to be generated. A transition out of the 'active' state will cause the MSDP Stop Event to be generated. Care should be used in providing write access to this object without adequate authentication.")
msdpPeerRemotePort = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 26), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpPeerRemotePort.setStatus('current')
if mibBuilder.loadTexts: msdpPeerRemotePort.setDescription('The remote port for the TCP connection between the MSDP peers.')
msdpPeerLocalPort = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 27), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpPeerLocalPort.setStatus('current')
if mibBuilder.loadTexts: msdpPeerLocalPort.setDescription('The local port for the TCP connection between the MSDP peers.')
msdpPeerEncapsulationState = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 28), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("default", 1), ("received", 2), ("advertising", 3), ("sent", 4), ("agreed", 5), ("failed", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpPeerEncapsulationState.setStatus('current')
if mibBuilder.loadTexts: msdpPeerEncapsulationState.setDescription('The status of the encapsulation negotiation state machine.')
msdpPeerEncapsulationType = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 29), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("tcp", 1), ("udp", 2), ("gre", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpPeerEncapsulationType.setStatus('current')
if mibBuilder.loadTexts: msdpPeerEncapsulationType.setDescription('The encapsulation in use when encapsulating data in SA messages to this peer.')
msdpPeerConnectionAttempts = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 30), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpPeerConnectionAttempts.setStatus('current')
if mibBuilder.loadTexts: msdpPeerConnectionAttempts.setDescription('The number of times the state machine has transitioned from inactive to connecting.')
msdpPeerInNotifications = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 31), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpPeerInNotifications.setStatus('current')
if mibBuilder.loadTexts: msdpPeerInNotifications.setDescription('The number of MSDP Notification messages received on this connection. This object should be initialized to zero when the connection is established.')
msdpPeerOutNotifications = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 32), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpPeerOutNotifications.setStatus('current')
if mibBuilder.loadTexts: msdpPeerOutNotifications.setDescription('The number of MSDP Notification messages transmitted on this connection. This object should be initialized to zero when the connection is established.')
msdpPeerLastError = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 5, 1, 33), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2).clone(hexValue="0000")).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpPeerLastError.setStatus('current')
if mibBuilder.loadTexts: msdpPeerLastError.setDescription('The last error code and subcode seen by this peer on this connection. If no error has occurred, this field is zero. Otherwise, the first byte of this two byte OCTET STRING contains the error code, and the second byte contains the subcode.')
msdpSACacheTable = MibTable((1, 3, 6, 1, 3, 92, 1, 1, 6), )
if mibBuilder.loadTexts: msdpSACacheTable.setStatus('current')
if mibBuilder.loadTexts: msdpSACacheTable.setDescription("The (conceptual) table listing the MSDP SA advertisements currently in the MSDP speaker's cache.")
msdpSACacheEntry = MibTableRow((1, 3, 6, 1, 3, 92, 1, 1, 6, 1), ).setIndexNames((0, "DRAFT-MSDP-MIB", "msdpSACacheGroupAddr"), (0, "DRAFT-MSDP-MIB", "msdpSACacheSourceAddr"), (0, "DRAFT-MSDP-MIB", "msdpSACacheOriginRP"))
if mibBuilder.loadTexts: msdpSACacheEntry.setStatus('current')
if mibBuilder.loadTexts: msdpSACacheEntry.setDescription('An entry (conceptual row) representing an MSDP SA advert.')
msdpSACacheGroupAddr = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 6, 1, 1), IpAddress())
if mibBuilder.loadTexts: msdpSACacheGroupAddr.setStatus('current')
if mibBuilder.loadTexts: msdpSACacheGroupAddr.setDescription('The group address of the SA Cache entry.')
msdpSACacheSourceAddr = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 6, 1, 2), IpAddress())
if mibBuilder.loadTexts: msdpSACacheSourceAddr.setStatus('current')
if mibBuilder.loadTexts: msdpSACacheSourceAddr.setDescription('The source address of the SA Cache entry.')
msdpSACacheOriginRP = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 6, 1, 3), IpAddress())
if mibBuilder.loadTexts: msdpSACacheOriginRP.setStatus('current')
if mibBuilder.loadTexts: msdpSACacheOriginRP.setDescription('The address of the RP which originated the last SA message accepted for this entry.')
msdpSACachePeerLearnedFrom = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 6, 1, 4), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpSACachePeerLearnedFrom.setStatus('current')
if mibBuilder.loadTexts: msdpSACachePeerLearnedFrom.setDescription('The peer from which this SA Cache entry was last accepted. This address must correspond to the msdpPeerRemoteAddress value for a row in the MSDP Peer Table.')
msdpSACacheRPFPeer = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 6, 1, 5), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpSACacheRPFPeer.setStatus('current')
if mibBuilder.loadTexts: msdpSACacheRPFPeer.setDescription('The peer from which an SA message corresponding to this cache entry would be accepted (i.e. the RPF peer for msdpSACacheOriginRP). This may be different than msdpSACachePeerLearnedFrom if this entry was created by an MSDP SA-Response. This address must correspond to the msdpPeerRemoteAddress value for a row in the MSDP Peer Table.')
msdpSACacheInSAs = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 6, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpSACacheInSAs.setStatus('current')
if mibBuilder.loadTexts: msdpSACacheInSAs.setDescription('The number of MSDP SA messages received relevant to this cache entry. This object must be initialized to zero when creating a cache entry.')
msdpSACacheInDataPackets = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 6, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpSACacheInDataPackets.setStatus('current')
if mibBuilder.loadTexts: msdpSACacheInDataPackets.setDescription('The number of MSDP encapsulated data packets received relevant to this cache entry. This object must be initialized to zero when creating a cache entry.')
msdpSACacheUpTime = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 6, 1, 8), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpSACacheUpTime.setStatus('current')
if mibBuilder.loadTexts: msdpSACacheUpTime.setDescription('The time since this entry was placed in the SA cache.')
msdpSACacheExpiryTime = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 6, 1, 9), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: msdpSACacheExpiryTime.setStatus('current')
if mibBuilder.loadTexts: msdpSACacheExpiryTime.setDescription('The time remaining before this entry will expire from the SA cache.')
msdpSACacheStatus = MibTableColumn((1, 3, 6, 1, 3, 92, 1, 1, 6, 1, 10), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: msdpSACacheStatus.setStatus('current')
if mibBuilder.loadTexts: msdpSACacheStatus.setDescription("The status of this row in the table. The only allowable actions are to retreive the status, which will be `active', or to set the status to `destroy' in order to remove this entry from the cache.")
msdpTraps = MibIdentifier((1, 3, 6, 1, 3, 92, 1, 1, 7))
msdpEstablished = NotificationType((1, 3, 6, 1, 3, 92, 1, 1, 7, 1)).setObjects(("DRAFT-MSDP-MIB", "msdpPeerFsmEstablishedTransitions"))
if mibBuilder.loadTexts: msdpEstablished.setStatus('current')
if mibBuilder.loadTexts: msdpEstablished.setDescription('The MSDP Established event is generated when the MSDP FSM enters the ESTABLISHED state.')
msdpBackwardTransition = NotificationType((1, 3, 6, 1, 3, 92, 1, 1, 7, 2)).setObjects(("DRAFT-MSDP-MIB", "msdpPeerState"))
if mibBuilder.loadTexts: msdpBackwardTransition.setStatus('current')
if mibBuilder.loadTexts: msdpBackwardTransition.setDescription('The MSDPBackwardTransition Event is generated when the MSDP FSM moves from a higher numbered state to a lower numbered state.')
msdpMIBConformance = MibIdentifier((1, 3, 6, 1, 3, 92, 1, 1, 8))
msdpMIBCompliances = MibIdentifier((1, 3, 6, 1, 3, 92, 1, 1, 8, 1))
msdpMIBGroups = MibIdentifier((1, 3, 6, 1, 3, 92, 1, 1, 8, 2))
msdpMIBCompliance = ModuleCompliance((1, 3, 6, 1, 3, 92, 1, 1, 8, 1, 1)).setObjects(("DRAFT-MSDP-MIB", "msdpMIBGlobalsGroup"), ("DRAFT-MSDP-MIB", "msdpSACacheGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
msdpMIBCompliance = msdpMIBCompliance.setStatus('current')
if mibBuilder.loadTexts: msdpMIBCompliance.setDescription('The compliance statement for entities which implement the MSDP MIB.')
msdpMIBGlobalsGroup = ObjectGroup((1, 3, 6, 1, 3, 92, 1, 1, 8, 2, 1)).setObjects(("DRAFT-MSDP-MIB", "msdpEnabled"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
msdpMIBGlobalsGroup = msdpMIBGlobalsGroup.setStatus('current')
if mibBuilder.loadTexts: msdpMIBGlobalsGroup.setDescription('A collection of objects providing information on global MSDP state.')
msdpMIBPeerGroup = ObjectGroup((1, 3, 6, 1, 3, 92, 1, 1, 8, 2, 2)).setObjects(("DRAFT-MSDP-MIB", "msdpPeerRPFFailures"), ("DRAFT-MSDP-MIB", "msdpPeerState"), ("DRAFT-MSDP-MIB", "msdpPeerInSAs"), ("DRAFT-MSDP-MIB", "msdpPeerOutSAs"), ("DRAFT-MSDP-MIB", "msdpPeerInSARequests"), ("DRAFT-MSDP-MIB", "msdpPeerOutSARequests"), ("DRAFT-MSDP-MIB", "msdpPeerInSAResponses"), ("DRAFT-MSDP-MIB", "msdpPeerOutSAResponses"), ("DRAFT-MSDP-MIB", "msdpPeerInNotifications"), ("DRAFT-MSDP-MIB", "msdpPeerOutNotifications"), ("DRAFT-MSDP-MIB", "msdpPeerInControlMessages"), ("DRAFT-MSDP-MIB", "msdpPeerOutControlMessages"), ("DRAFT-MSDP-MIB", "msdpPeerInDataPackets"), ("DRAFT-MSDP-MIB", "msdpPeerOutDataPackets"), ("DRAFT-MSDP-MIB", "msdpPeerFsmEstablishedTransitions"), ("DRAFT-MSDP-MIB", "msdpPeerFsmEstablishedTime"), ("DRAFT-MSDP-MIB", "msdpPeerLocalAddress"), ("DRAFT-MSDP-MIB", "msdpPeerRemotePort"), ("DRAFT-MSDP-MIB", "msdpPeerLocalPort"), ("DRAFT-MSDP-MIB", "msdpPeerSAAdvPeriod"), ("DRAFT-MSDP-MIB", "msdpPeerConnectRetryInterval"), ("DRAFT-MSDP-MIB", "msdpPeerHoldTimeConfigured"), ("DRAFT-MSDP-MIB", "msdpPeerKeepAliveConfigured"), ("DRAFT-MSDP-MIB", "msdpPeerInMessageElapsedTime"), ("DRAFT-MSDP-MIB", "msdpPeerDataTtl"), ("DRAFT-MSDP-MIB", "msdpPeerProcessRequestsFrom"), ("DRAFT-MSDP-MIB", "msdpPeerEncapsulationState"), ("DRAFT-MSDP-MIB", "msdpPeerEncapsulationType"), ("DRAFT-MSDP-MIB", "msdpPeerConnectionAttempts"), ("DRAFT-MSDP-MIB", "msdpPeerLastError"), ("DRAFT-MSDP-MIB", "msdpPeerStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
msdpMIBPeerGroup = msdpMIBPeerGroup.setStatus('current')
if mibBuilder.loadTexts: msdpMIBPeerGroup.setDescription('A collection of objects for managing MSDP peers.')
msdpSACacheGroup = ObjectGroup((1, 3, 6, 1, 3, 92, 1, 1, 8, 2, 3)).setObjects(("DRAFT-MSDP-MIB", "msdpCacheLifetime"), ("DRAFT-MSDP-MIB", "msdpNumSACacheEntries"), ("DRAFT-MSDP-MIB", "msdpSAHoldDownPeriod"), ("DRAFT-MSDP-MIB", "msdpSACachePeerLearnedFrom"), ("DRAFT-MSDP-MIB", "msdpSACacheRPFPeer"), ("DRAFT-MSDP-MIB", "msdpSACacheInSAs"), ("DRAFT-MSDP-MIB", "msdpSACacheInDataPackets"), ("DRAFT-MSDP-MIB", "msdpSACacheUpTime"), ("DRAFT-MSDP-MIB", "msdpSACacheExpiryTime"), ("DRAFT-MSDP-MIB", "msdpSACacheStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
msdpSACacheGroup = msdpSACacheGroup.setStatus('current')
if mibBuilder.loadTexts: msdpSACacheGroup.setDescription('A collection of objects for managing MSDP SA cache entries.')
mibBuilder.exportSymbols("DRAFT-MSDP-MIB", msdpPeerRPFFailures=msdpPeerRPFFailures, msdpRequestsGroupAddress=msdpRequestsGroupAddress, msdpPeerInSAs=msdpPeerInSAs, msdpMIB=msdpMIB, msdpPeerOutDataPackets=msdpPeerOutDataPackets, msdpSACacheRPFPeer=msdpSACacheRPFPeer, msdpSACacheInDataPackets=msdpSACacheInDataPackets, msdpMIBCompliances=msdpMIBCompliances, msdpNumSACacheEntries=msdpNumSACacheEntries, msdpPeerDataTtl=msdpPeerDataTtl, msdpPeerEntry=msdpPeerEntry, msdpMIBPeerGroup=msdpMIBPeerGroup, msdpSAHoldDownPeriod=msdpSAHoldDownPeriod, msdpRequestsTable=msdpRequestsTable, msdpPeerStatus=msdpPeerStatus, msdpPeerInMessageElapsedTime=msdpPeerInMessageElapsedTime, msdpPeerTable=msdpPeerTable, msdpPeerFsmEstablishedTime=msdpPeerFsmEstablishedTime, msdpPeerKeepAliveConfigured=msdpPeerKeepAliveConfigured, msdpSACacheInSAs=msdpSACacheInSAs, msdpMIBGlobalsGroup=msdpMIBGlobalsGroup, msdpPeerOutControlMessages=msdpPeerOutControlMessages, msdpSACacheUpTime=msdpSACacheUpTime, msdpSACacheGroup=msdpSACacheGroup, msdpPeerInSARequests=msdpPeerInSARequests, msdpPeerSAAdvPeriod=msdpPeerSAAdvPeriod, msdpPeerLocalPort=msdpPeerLocalPort, msdpBackwardTransition=msdpBackwardTransition, msdpPeerOutNotifications=msdpPeerOutNotifications, msdpPeerEncapsulationState=msdpPeerEncapsulationState, msdpMIBCompliance=msdpMIBCompliance, msdpPeerProcessRequestsFrom=msdpPeerProcessRequestsFrom, msdpSACacheStatus=msdpSACacheStatus, msdpPeerRemoteAddress=msdpPeerRemoteAddress, msdpSACacheGroupAddr=msdpSACacheGroupAddr, msdpMIBConformance=msdpMIBConformance, msdp=msdp, msdpSACacheEntry=msdpSACacheEntry, msdpPeerEncapsulationType=msdpPeerEncapsulationType, msdpPeerOutSAs=msdpPeerOutSAs, msdpPeerConnectRetryInterval=msdpPeerConnectRetryInterval, msdpSACacheSourceAddr=msdpSACacheSourceAddr, msdpSACacheOriginRP=msdpSACacheOriginRP, msdpSACacheExpiryTime=msdpSACacheExpiryTime, msdpRequestsGroupMask=msdpRequestsGroupMask, msdpPeerOutSAResponses=msdpPeerOutSAResponses, msdpPeerRemotePort=msdpPeerRemotePort, msdpRequestsPeer=msdpRequestsPeer, msdpSACachePeerLearnedFrom=msdpSACachePeerLearnedFrom, msdpPeerState=msdpPeerState, msdpPeerOutSARequests=msdpPeerOutSARequests, msdpPeerInNotifications=msdpPeerInNotifications, PYSNMP_MODULE_ID=msdpMIB, msdpPeerInSAResponses=msdpPeerInSAResponses, msdpTraps=msdpTraps, msdpMIBobjects=msdpMIBobjects, msdpPeerHoldTimeConfigured=msdpPeerHoldTimeConfigured, msdpRequestsStatus=msdpRequestsStatus, msdpRequestsEntry=msdpRequestsEntry, msdpPeerConnectionAttempts=msdpPeerConnectionAttempts, msdpPeerInControlMessages=msdpPeerInControlMessages, msdpMIBGroups=msdpMIBGroups, msdpPeerLastError=msdpPeerLastError, msdpCacheLifetime=msdpCacheLifetime, msdpPeerLocalAddress=msdpPeerLocalAddress, msdpEnabled=msdpEnabled, msdpPeerInDataPackets=msdpPeerInDataPackets, msdpEstablished=msdpEstablished, msdpPeerFsmEstablishedTransitions=msdpPeerFsmEstablishedTransitions, msdpSACacheTable=msdpSACacheTable)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
3b68e6ca40f8dadcb29028961550c661836d8e9a | 89f1282ae71fe0d838bd406766df817c02fed007 | /notes/migrations/0002_auto_20200711_2211.py | ed67cc7353d83df3366c7d575cf5a1d19ef00114 | [] | no_license | joaopaulozorek/notes-django-bulma | ad7a7be593f52ab62084f60940104421d7e4f107 | ba8430729ed1dcda92276f365b405f0c38a3fbb6 | refs/heads/master | 2022-11-18T13:00:40.184979 | 2020-07-13T15:56:30 | 2020-07-13T15:56:30 | 278,969,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | # Generated by Django 3.0.8 on 2020-07-12 01:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('notes', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='note',
old_name='autor',
new_name='author',
),
migrations.RenameField(
model_name='note',
old_name='data',
new_name='date',
),
migrations.RenameField(
model_name='note',
old_name='texto',
new_name='text',
),
migrations.RenameField(
model_name='note',
old_name='titulo',
new_name='title',
),
]
| [
"joaopaulozorek@gmail.com"
] | joaopaulozorek@gmail.com |
e32ab4c247177bf2ca41ccaf29e0268e813b00d8 | ae8c9fab9d57dd7b633f7b4973af8720c98c7f57 | /tests/test_utils.py | 09f9aec9fd59159209a680d3d8bcf23ad74db19e | [
"MIT"
] | permissive | jungtaekkim/bayeso-benchmarks | ec40ad2198e305bb60d041c3acf11a53ef31628e | 5eaf53d103dcdbe9c646faf743adfa865bf100a5 | refs/heads/main | 2023-08-22T23:58:09.550624 | 2023-01-27T21:37:45 | 2023-01-27T21:37:45 | 228,564,765 | 26 | 7 | MIT | 2023-01-13T21:14:32 | 2019-12-17T08:04:29 | Python | UTF-8 | Python | false | false | 4,767 | py | #
# author: Jungtaek Kim (jtkim@postech.ac.kr)
# last updated: January 6, 2023
#
import numpy as np
import pytest
from bayeso_benchmarks import utils
TEST_EPSILON = 1e-5
def test_get_benchmark():
with pytest.raises(TypeError) as error:
benchmark = utils.get_benchmark()
with pytest.raises(ValueError) as error:
benchmark = utils.get_benchmark('abc', seed=None)
with pytest.raises(AssertionError) as error:
benchmark = utils.get_benchmark('ackley')
with pytest.raises(AssertionError) as error:
benchmark = utils.get_benchmark('ackley', seed='abc')
benchmark = utils.get_benchmark('ackley', dim=4, seed=42)
print(benchmark.output(np.array([0.0, 0.0, 0.0, 0.0])))
with pytest.raises(AssertionError) as error:
benchmark = utils.get_benchmark('cosines')
benchmark = utils.get_benchmark('cosines', dim=4, seed=None)
with pytest.raises(AssertionError) as error:
benchmark = utils.get_benchmark('griewank')
benchmark = utils.get_benchmark('griewank', dim=4, seed=None)
with pytest.raises(AssertionError) as error:
benchmark = utils.get_benchmark('levy')
benchmark = utils.get_benchmark('levy', dim=2, seed=None)
with pytest.raises(AssertionError) as error:
benchmark = utils.get_benchmark('rastrigin')
benchmark = utils.get_benchmark('rastrigin', dim=8, seed=None)
with pytest.raises(AssertionError) as error:
benchmark = utils.get_benchmark('rosenbrock')
benchmark = utils.get_benchmark('rosenbrock', dim=8, seed=None)
with pytest.raises(AssertionError) as error:
benchmark = utils.get_benchmark('sphere')
benchmark = utils.get_benchmark('sphere', dim=16, seed=None)
with pytest.raises(AssertionError) as error:
benchmark = utils.get_benchmark('zakharov')
benchmark = utils.get_benchmark('zakharov', dim=16, seed=None)
with pytest.raises(AssertionError) as error:
benchmark = utils.get_benchmark('constant')
with pytest.raises(AssertionError) as error:
benchmark = utils.get_benchmark('constant', constant=None)
with pytest.raises(AssertionError) as error:
benchmark = utils.get_benchmark('constant', bounds=None)
with pytest.raises(AssertionError) as error:
benchmark = utils.get_benchmark('constant', bounds=np.array([0.0, 10.0]), constant=10.0, seed=None)
benchmark = utils.get_benchmark('constant', bounds=np.array([[0.0, 10.0]]), constant=10.0, seed=None)
benchmark = utils.get_benchmark('gramacyandlee2012')
with pytest.raises(AssertionError) as error:
benchmark = utils.get_benchmark('linear')
benchmark = utils.get_benchmark('linear', bounds=np.array([[0.0, 10.0]]), slope=-1.2, seed=None)
with pytest.raises(AssertionError) as error:
benchmark = utils.get_benchmark('step')
benchmark = utils.get_benchmark('step', steps=[0.0, 3.0, 7.0, 10.0], step_values=[-2.1, 4.0, 10.0], seed=None)
benchmark = utils.get_benchmark('beale')
benchmark = utils.get_benchmark('bohachevsky')
benchmark = utils.get_benchmark('branin')
print(benchmark.output(np.array([1.0, 1.0])))
benchmark = utils.get_benchmark('bukin6')
benchmark = utils.get_benchmark('dejong5')
benchmark = utils.get_benchmark('dropwave')
benchmark = utils.get_benchmark('easom')
benchmark = utils.get_benchmark('eggholder')
benchmark = utils.get_benchmark('goldsteinprice')
benchmark = utils.get_benchmark('holdertable')
benchmark = utils.get_benchmark('kim1')
benchmark = utils.get_benchmark('kim2')
benchmark = utils.get_benchmark('kim3')
benchmark = utils.get_benchmark('michalewicz')
benchmark = utils.get_benchmark('shubert')
benchmark = utils.get_benchmark('sixhumpcamel')
benchmark = utils.get_benchmark('threehumpcamel')
benchmark = utils.get_benchmark('colville')
benchmark = utils.get_benchmark('hartmann3d')
benchmark = utils.get_benchmark('hartmann6d')
def test_pdf_two_dim_normal():
bx = np.array([0.0, 1.0])
mu = np.array([1.0, 1.0])
Cov = np.array([
[2.0, 1.0],
[1.0, 2.0],
])
with pytest.raises(AssertionError) as error:
value = utils.pdf_two_dim_normal(np.array([1.0, 1.0, 1.0]), mu, Cov)
with pytest.raises(AssertionError) as error:
value = utils.pdf_two_dim_normal(np.array([2.0]), mu, Cov)
with pytest.raises(AssertionError) as error:
value = utils.pdf_two_dim_normal(bx, np.array([1.0, 1.0, 1.0]), Cov)
with pytest.raises(AssertionError) as error:
value = utils.pdf_two_dim_normal(bx, np.array([3.0]), Cov)
value = utils.pdf_two_dim_normal(bx, mu, Cov)
print(value)
assert np.abs(0.06584073599896273 - value) < TEST_EPSILON
| [
"jungtaek.kim@pitt.edu"
] | jungtaek.kim@pitt.edu |
8a79f2f3eec81fcaec62dc583d08cdd0dec52e25 | cb1119aa2e410ea5e2edb6c496994a5ddc1789ad | /venv/bin/rst2latex.py | 087f07c2db21054490c32502c60b39c7c89e68e9 | [] | no_license | Korshikov/hackuniversity_2019 | 3a875396d59db960d0874a20d804b9c1b685516f | d003e8b0f8229df4e2a32166ac8bb77b372d583a | refs/heads/master | 2020-05-01T04:43:35.234035 | 2019-03-24T11:03:13 | 2019-03-24T11:03:13 | 177,281,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | #!/home/delf/PycharmProjects/hackuniversity_2019/venv/bin/python
# $Id: rst2latex.py 5905 2009-04-16 12:04:49Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing LaTeX.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='latex', description=description)
| [
"pk.delf@gmail.com"
] | pk.delf@gmail.com |
c191ff7ea89cf172a63b28bb2a28e606fc8dec17 | cd1c07220e37d1387fd1bcbee00b2f5f2871947c | /spareseArray/SparseArray.py | da2cf355a05c4bf29f61da600e9953d3551e0d28 | [] | no_license | r-p-n/projcets | dd2febb4bedce1cc059f361c9e92f53addf29fde | 5c0f315da8d41df29bb8478327aae1933a2d35fe | refs/heads/master | 2023-02-01T11:00:49.172677 | 2020-12-16T04:23:31 | 2020-12-16T04:23:31 | 321,567,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,083 | py | class SparseArray:
class Node:
def __init__(self, data, next_node, previous_node, array_index):
self.array_index = array_index
self.data = data
self.next_node = next_node
self.previous_node = previous_node
def get_data(self):
return self.data
def set_data(self, data):
self.data = data
def get_next_node(self):
return self.next_node
def set_next_node(self, node):
self.next_node = node
def get_array_index(self):
return self.array_index
def get_previous_node(self):
return self.previous_node
def set_previous_node(self, node):
self.previous_node = node
def __init__(self, size):
self.array = [None] * size
self.root = None
self.tail = None
self.size = size
self.usage = 0
def __len__(self):
return self.size
def __getitem__(self, j):
if self.array[j] is not None:
return self.array[j].get_data()
def __setitem__(self, j, e):
if self.array[j] is not None:
self.array[j].set_data(e)
return
if e is None:
self.delete_element(j)
return
if not self.usage == 0:
self.root.set_next_node(self.Node(e, None, self.root, j))
self.root = self.root.get_next_node()
self.array[j] = self.root
else:
self.array[j] = self.Node(e, None, None, j)
self.root = self.array[j]
self.tail = self.array[j]
self.usage += 1
def delete_element(self, j):
if self.usage == 1:
self.array[j] = None
self.root = None
self.tail = None
self.usage = 0
return
current = self.root
for i in range(0, self.usage):
if current.get_array_index() == j:
if not current == self.root and not current == self.tail:
current.get_next_node().set_previous_node(current.get_previous_node())
current.get_previous_node().set_next_node(current.get_next_node())
elif current == self.root:
self.root = current.get_previous_node()
self.root.set_next_node(None)
elif current == self.tail:
self.tail = current.get_next_node()
self.tail.set_previous_node(None)
self.array[j] = None
self.usage -= 1
break
current = current.get_previous_node()
def fill(self, seq):
if len(seq) > self.size - self.usage:
raise ValueError("Sequence size is too large.")
n = 0
for i in seq:
while self.array[n] is not None:
n += 1
self.__setitem__(n, i)
def get_usage(self):
return self.usage
| [
"jimmyc2322@gmail.com"
] | jimmyc2322@gmail.com |
5f2b15931a75439bc09b09b417d82d8f4b96ecbd | 2f6196d1d6dca474dbf9305761e1f5f5503d6e5b | /benchmark/HIGGS/TP-Prior.py | 08b45049438236151ea4f0423eaff5db0b20de75 | [
"MIT"
] | permissive | victor-estrade/SystGradDescent | f3db6acc38a9e5a650982022c2d3cb22948f3d33 | 822e7094290301ec47a99433381a8d6406798aff | refs/heads/master | 2021-07-24T14:29:04.400644 | 2021-07-01T06:47:13 | 2021-07-01T06:47:13 | 174,348,429 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,825 | py | #!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
# Command line :
# python -m benchmark.HIGGS.TP-Prior
import os
import logging
from config import SEED
from config import _ERROR
from config import _TRUTH
import pandas as pd
from visual.misc import set_plot_config
set_plot_config()
from ..common import load_estimations
from ..common import load_conditional_estimations
from utils.log import set_logger
from utils.log import flush
from utils.log import print_line
from utils.model import get_model
from utils.model import get_optimizer
from utils.model import train_or_load_neural_net
from utils.evaluation import evaluate_neural_net
from utils.evaluation import evaluate_classifier
from utils.evaluation import evaluate_config
from utils.evaluation import evaluate_summary_computer
from utils.evaluation import evaluate_minuit
from utils.evaluation import evaluate_estimator
from utils.evaluation import evaluate_conditional_estimation
from utils.images import gather_images
from visual.misc import plot_params
from visual.special.higgs import plot_nll_around_min
from model.tangent_prop import TangentPropClassifier
from ..my_argparser import TP_parse_args
from ..my_argparser import parse_args_tolerance
from collections import OrderedDict
from archi.classic import L4 as ARCHI
from .common import DATA_NAME
from .common import N_BINS
from .common import N_ITER
from .common import Config
from .common import get_minimizer
from .common import NLLComputer
from .common import GeneratorClass
from .common import param_generator
from .common import get_generators_torch
from .common import Parameter
from .common import TES
from .common import JES
from .common import LES
BENCHMARK_NAME = f"{DATA_NAME}-prior-{parse_args_tolerance()}"
from .common import GeneratorCPU
class TrainGenerator:
def __init__(self, data_generator, cuda=False):
self.data_generator = data_generator
if cuda:
self.data_generator.cuda()
else:
self.data_generator.cpu()
self.mu = self.tensor(Config.CALIBRATED.mu, requires_grad=True)
self.params = tuple()
nuisance_params_list = []
if TES:
self.tes = self.tensor(Config.CALIBRATED.tes, requires_grad=True)
self.params = self.params + (self.tes, )
nuisance_params_list.append( ('tes', self.tes) )
if JES:
self.jes = self.tensor(Config.CALIBRATED.jes, requires_grad=True)
self.params = self.params + (self.jes, )
nuisance_params_list.append( ('jes', self.jes) )
if LES:
self.les = self.tensor(Config.CALIBRATED.les, requires_grad=True)
self.params = self.params + (self.les, )
nuisance_params_list.append( ('les', self.les) )
self.params = self.params + (self.mu, )
self.nuisance_params = OrderedDict(nuisance_params_list)
def generate(self, n_samples=None):
X, y, w = self.data_generator.diff_generate(*self.params, n_samples=n_samples)
return X, y, w
def reset(self):
self.data_generator.reset()
def tensor(self, data, requires_grad=False, dtype=None):
return self.data_generator.tensor(data, requires_grad=requires_grad, dtype=dtype)
def build_model(args, i_cv):
args.net = ARCHI(n_in=29, n_out=2, n_unit=args.n_unit)
args.optimizer = get_optimizer(args)
model = get_model(args, TangentPropClassifier)
model.set_info(DATA_NAME, BENCHMARK_NAME, i_cv)
return model
# =====================================================================
# MAIN
# =====================================================================
def main():
# BASIC SETUP
logger = set_logger()
args = TP_parse_args(main_description="Training launcher for Tangent Prop classifier on HIGGS benchmark")
logger.info(args)
flush(logger)
# INFO
model = build_model(args, -1)
os.makedirs(model.results_directory, exist_ok=True)
config = Config()
config_table = evaluate_config(config)
config_table.to_csv(os.path.join(model.results_directory, 'config_table.csv'))
# RUN
if not args.conditional_only:
eval_table = get_eval_table(args, model.results_directory)
if not args.estimate_only:
eval_conditional = get_eval_conditional(args, model.results_directory)
if not args.estimate_only and not args.conditional_only:
eval_table = pd.concat([eval_table, eval_conditional], axis=1)
# EVALUATION
print_line()
print_line()
print(eval_table)
print_line()
print_line()
eval_table.to_csv(os.path.join(model.results_directory, 'evaluation.csv'))
gather_images(model.results_directory)
def get_eval_table(args, results_directory):
logger = logging.getLogger()
if args.load_run:
logger.info(f'Loading previous runs [{args.start_cv},{args.end_cv}[')
estimations = load_estimations(results_directory, start_cv=args.start_cv, end_cv=args.end_cv)
else:
logger.info(f'Running runs [{args.start_cv},{args.end_cv}[')
estimations = [run_estimation(args, i_cv) for i_cv in range(args.start_cv, args.end_cv)]
estimations = pd.concat(estimations, ignore_index=True)
estimations.to_csv(os.path.join(results_directory, 'estimations.csv'))
# EVALUATION
eval_table = evaluate_estimator(Config.INTEREST_PARAM_NAME, estimations)
print_line()
print_line()
print(eval_table)
print_line()
print_line()
eval_table.to_csv(os.path.join(results_directory, 'estimation_evaluation.csv'))
return eval_table
def get_eval_conditional(args, results_directory):
logger = logging.getLogger()
if args.load_run:
logger.info(f'Loading previous runs [{args.start_cv},{args.end_cv}[')
conditional_estimations = load_conditional_estimations(results_directory, start_cv=args.start_cv, end_cv=args.end_cv)
else:
logger.info(f'Running runs [{args.start_cv},{args.end_cv}[')
conditional_estimations = [run_conditional_estimation(args, i_cv) for i_cv in range(args.start_cv, args.end_cv)]
conditional_estimations = pd.concat(conditional_estimations, ignore_index=True)
conditional_estimations.to_csv(os.path.join(results_directory, 'conditional_estimations.csv'))
# EVALUATION
eval_conditional = evaluate_conditional_estimation(conditional_estimations, interest_param_name=Config.INTEREST_PARAM_NAME)
print_line()
print_line()
print(eval_conditional)
print_line()
print_line()
eval_conditional.to_csv(os.path.join(results_directory, 'conditional_evaluation.csv'))
return eval_conditional
def run_estimation(args, i_cv):
logger = logging.getLogger()
print_line()
logger.info('Running iter nยฐ{}'.format(i_cv))
print_line()
result_row = {'i_cv': i_cv}
# LOAD/GENERATE DATA
logger.info('Set up data generator')
config = Config()
seed = SEED + i_cv * 5
train_generator, valid_generator, test_generator = get_generators_torch(seed, cuda=args.cuda, GeneratorClass=GeneratorClass)
train_generator = TrainGenerator(train_generator, cuda=args.cuda)
valid_generator = GeneratorCPU(valid_generator)
test_generator = GeneratorCPU(test_generator)
# SET MODEL
logger.info('Set up classifier')
model = build_model(args, i_cv)
os.makedirs(model.results_path, exist_ok=True)
flush(logger)
# TRAINING / LOADING
train_or_load_neural_net(model, train_generator, retrain=args.retrain)
# CHECK TRAINING
logger.info('Generate validation data')
X_valid, y_valid, w_valid = valid_generator.generate(*config.CALIBRATED, n_samples=config.N_VALIDATION_SAMPLES, no_grad=True)
result_row.update(evaluate_neural_net(model, prefix='valid'))
result_row.update(evaluate_classifier(model, X_valid, y_valid, w_valid, prefix='valid'))
# MEASUREMENT
evaluate_summary_computer(model, X_valid, y_valid, w_valid, n_bins=N_BINS, prefix='valid_', suffix='')
iter_results = [run_estimation_iter(model, result_row, i, test_config, valid_generator, test_generator, n_bins=N_BINS, tolerance=args.tolerance)
for i, test_config in enumerate(config.iter_test_config())]
result_table = pd.DataFrame(iter_results)
result_table.to_csv(os.path.join(model.results_path, 'estimations.csv'))
logger.info('Plot params')
param_names = config.PARAM_NAMES
for name in param_names:
plot_params(name, result_table, title=model.full_name, directory=model.results_path)
logger.info('DONE')
return result_table
def run_estimation_iter(model, result_row, i_iter, config, valid_generator, test_generator, n_bins=N_BINS, tolerance=10):
logger = logging.getLogger()
logger.info('-'*45)
logger.info(f'iter : {i_iter}')
flush(logger)
iter_directory = os.path.join(model.results_path, f'iter_{i_iter}')
os.makedirs(iter_directory, exist_ok=True)
result_row['i'] = i_iter
result_row['n_test_samples'] = test_generator.n_samples
suffix = config.get_suffix()
logger.info('Generate testing data')
test_generator.reset()
X_test, y_test, w_test = test_generator.generate(*config.TRUE, n_samples=config.N_TESTING_SAMPLES, no_grad=True)
# PLOT SUMMARIES
evaluate_summary_computer(model, X_test, y_test, w_test, n_bins=n_bins, prefix='', suffix=suffix, directory=iter_directory)
logger.info('Set up NLL computer')
compute_summaries = model.summary_computer(n_bins=n_bins)
compute_nll = NLLComputer(compute_summaries, valid_generator, X_test, w_test, config=config)
# NLL PLOTS
plot_nll_around_min(compute_nll, config.TRUE, iter_directory, suffix)
# MINIMIZE NLL
logger.info('Prepare minuit minimizer')
minimizer = get_minimizer(compute_nll, config.CALIBRATED, config.CALIBRATED_ERROR, tolerance=tolerance)
result_row.update(evaluate_minuit(minimizer, config.TRUE, iter_directory, suffix=suffix))
return result_row.copy()
def run_conditional_estimation(args, i_cv):
logger = logging.getLogger()
print_line()
logger.info('Running iter nยฐ{}'.format(i_cv))
print_line()
result_row = {'i_cv': i_cv}
# LOAD/GENERATE DATA
logger.info('Set up data generator')
config = Config()
seed = SEED + i_cv * 5
train_generator, valid_generator, test_generator = get_generators_torch(seed, cuda=args.cuda, GeneratorClass=GeneratorClass)
train_generator = GeneratorCPU(train_generator)
valid_generator = GeneratorCPU(valid_generator)
test_generator = GeneratorCPU(test_generator)
# SET MODEL
logger.info('Set up classifier')
model = build_model(args, i_cv)
os.makedirs(model.results_path, exist_ok=True)
flush(logger)
# TRAINING / LOADING
train_or_load_neural_net(model, train_generator, retrain=args.retrain)
# CHECK TRAINING
logger.info('Generate validation data')
X_valid, y_valid, w_valid = valid_generator.generate(*config.CALIBRATED, n_samples=config.N_VALIDATION_SAMPLES, no_grad=True)
result_row.update(evaluate_classifier(model, X_valid, y_valid, w_valid, prefix='valid'))
# MEASUREMENT
evaluate_summary_computer(model, X_valid, y_valid, w_valid, n_bins=N_BINS, prefix='valid_', suffix='')
iter_results = [run_conditional_estimation_iter(model, result_row, i, test_config, valid_generator, test_generator, n_bins=N_BINS)
for i, test_config in enumerate(config.iter_test_config())]
conditional_estimate = pd.concat(iter_results)
conditional_estimate['i_cv'] = i_cv
fname = os.path.join(model.results_path, "conditional_estimations.csv")
conditional_estimate.to_csv(fname)
logger.info('DONE')
return conditional_estimate
def run_conditional_estimation_iter(model, result_row, i_iter, config, valid_generator, test_generator, n_bins=N_BINS):
logger = logging.getLogger()
logger.info('-'*45)
logger.info(f'iter : {i_iter}')
flush(logger)
iter_directory = os.path.join(model.results_path, f'iter_{i_iter}')
os.makedirs(iter_directory, exist_ok=True)
logger.info('Generate testing data')
test_generator.reset()
X_test, y_test, w_test = test_generator.generate(*config.TRUE, n_samples=config.N_TESTING_SAMPLES, no_grad=True)
# SUMMARIES
logger.info('Set up NLL computer')
compute_summaries = model.summary_computer(n_bins=n_bins)
compute_nll = NLLComputer(compute_summaries, valid_generator, X_test, w_test, config=config)
# MEASURE STAT/SYST VARIANCE
logger.info('MEASURE STAT/SYST VARIANCE')
conditional_results = make_conditional_estimation(compute_nll, config)
fname = os.path.join(iter_directory, "no_nuisance.csv")
conditional_estimate = pd.DataFrame(conditional_results)
conditional_estimate['i'] = i_iter
conditional_estimate.to_csv(fname)
return conditional_estimate
def make_conditional_estimation(compute_nll, config):
results = []
for j, nuisance_parameters in enumerate(config.iter_nuisance()):
compute_nll_no_nuisance = lambda mu : compute_nll(*nuisance_parameters, mu)
minimizer = get_minimizer_no_nuisance(compute_nll_no_nuisance, config.CALIBRATED, config.CALIBRATED_ERROR)
results_row = evaluate_minuit(minimizer, config.TRUE, do_hesse=False)
results_row['j'] = j
for name, value in zip(config.CALIBRATED.nuisance_parameters_names, nuisance_parameters):
results_row[name] = value
results_row[name+_TRUTH] = config.TRUE[name]
results.append(results_row)
print(f"ncalls = {results_row['ncalls']}", flush=True)
return results
if __name__ == '__main__':
main()
| [
"victor.antoine.estrade@gmail.com"
] | victor.antoine.estrade@gmail.com |
8d8c9788a9836bac94cd547c3889d9deb500b5f6 | da437d59c9caf5d10e8c7be0e640a6c08507d2f4 | /data/CNN.py | 734b06a9792eb9f55ea0e8eb9f87d55e8548a7e5 | [] | no_license | SoliareofAstora/vision_pipeline | 9982ea7b3d2fe009102d0e712535be9bba362a1c | f7a2d76a155c8b3d863b10e7f9e1a148f98c3780 | refs/heads/main | 2023-05-08T23:36:19.403137 | 2021-06-01T14:12:31 | 2021-06-01T14:12:31 | 372,847,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,179 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
class CNN(nn.Module):
def __init__(self, args):
super(CNN, self).__init__()
self.criterion = torch.nn.CrossEntropyLoss()
self.args = args
self.L = self.args.L
self.D = self.args.D
self.K = self.args.K
# first_conv = 5 if args.out_loc else 3
if self.args.loc_info:
self.add = 2
else:
self.add = 0
if self.args.dataset_name == 'breast':
input_dim = 6 * 6 * 48
elif self.args.dataset_name == 'bacteria':
input_dim = 512
elif self.args.dataset_name == 'fungus':
input_dim = self.args.input_dim
else:
input_dim = 5 * 5 * 48
self.conv1x1 = nn.Conv1d(input_dim, input_dim // 2, 1)
input_dim = input_dim // 2
if self.args.self_att:
self.self_att = SelfAttention(input_dim, self.args)
if self.args['operator'] == 'att':
self.attention = nn.Sequential( # first layer
nn.Linear(input_dim, self.D),
nn.Tanh(),
# second layer
nn.Linear(self.D, self.K)
# outputs A: NxK
)
torch.nn.init.xavier_uniform_(self.attention[0].weight)
self.attention[0].bias.data.zero_()
torch.nn.init.xavier_uniform_(self.attention[2].weight)
self.attention[2].bias.data.zero_()
self.classifier = nn.Sequential(
nn.Linear(input_dim * self.K, self.args.output_dim),
)
elif self.args['operator'] in ['mean', 'max']:
self.classifier = nn.Sequential(
nn.Linear(input_dim, self.args.output_dim),
)
torch.nn.init.xavier_uniform_(self.classifier[0].weight)
self.classifier[0].bias.data.zero_()
def forward(self, x):
# Trash first dimension
if self.args['dataset_name'] == 'bacteria':
x = x.unsqueeze(1)
if not self.args.out_loc:
loc = x[:, 3:]
x = x[:, :3]
# Extract features
# H = self.feature_extractor(x)
# H = self.fc(H)
# H = H.view(-1, H.shape[0])
# if self.args.loc_info:
# pos_x = loc[:, 0, 0, 0].view(-1, 1)
# pos_y = loc[:, 1, 0, 0].view(-1, 1)
# H = torch.cat((H, pos_x, pos_y), dim=1)
# H = self.conv1x1(x.view((x.shape[0], x.shape[1], 1)))
x = x.permute((0, 2, 1))
H = self.conv1x1(x)
H = H.mean(2)
if self.args['dataset_name'] == 'fungus':
H = H.squeeze(0)
H = H.view(-1, H.shape[1])
# print('before', H.shape)
gamma, gamma_kernel = (0, 0)
if self.args.self_att:
H, self_attention, gamma, gamma_kernel = self.self_att(H)
# attention
if self.args['operator'] == 'mean':
M = H.mean(0)
elif self.args['operator'] == 'max':
M, _ = torch.max(H, 0)
elif self.args['operator'] == 'att':
A = self.attention(H) # NxK
A = torch.transpose(A, 1, 0) # KxN
z = F.softmax(A) # softmax over N
M = torch.mm(z, H) # KxL
M = M.view(1, -1) # (K*L)x1
# classification
y_prob = self.classifier(M)
if self.args['operator'] in ['mean', 'max']:
y_prob = y_prob.unsqueeze(0)
_, y_hat = torch.max(y_prob, 1)
if self.args['operator'] in ['mean', 'max']:
return y_prob, y_hat, [], [], gamma, gamma_kernel
elif self.args.self_att:
return y_prob, y_hat, z, (A, self_attention), gamma, gamma_kernel
else:
return y_prob, y_hat, z, A, gamma, gamma_kernel
# AUXILIARY METHODS
def calculate_classification_error(self, X, Y):
# Y = Y.float()
y_prob, y_hat, _, _, gamma, gamma_kernel = self.forward(X)
error = 1. - y_hat.eq(Y).cpu().float().mean()
return error, gamma, gamma_kernel
def calculate_objective(self, X, Y):
# Y = Y.float()
y_prob, _, _, _, gamma, gamma_kernel = self.forward(X)
loss = self.criterion(y_prob, Y.view(1))
return loss, gamma, gamma_kernel
class SelfAttention(nn.Module):
def __init__(self, in_dim, args):
super(SelfAttention, self).__init__()
self.args = args
self.query_conv = nn.Conv1d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
self.key_conv = nn.Conv1d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
self.value_conv = nn.Conv1d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.gamma = nn.Parameter((torch.ones(1)).cuda())
self.gamma_in = nn.Parameter((torch.ones(1)).cuda())
self.softmax = nn.Softmax(dim=-1)
self.alfa = nn.Parameter((torch.ones(1)).cuda())
self.gamma_att = nn.Parameter((torch.ones(1)).cuda())
def forward(self, x):
if self.args.loc_info:
loc = x[:, -2:]
x = x[:, :-2]
x = x.view(1, x.shape[0], x.shape[1]).permute((0, 2, 1))
# x = x.view(1, x.shape[0], x.shape[1])
bs, C, length = x.shape
proj_query = self.query_conv(x).view(bs, -1, length).permute(0, 2, 1) # B X CX(N)
proj_key = self.key_conv(x).view(bs, -1, length) # B X C x (*W*H)
if self.args.att_gauss_spatial:
proj = torch.zeros((length, length))
if self.args.cuda:
proj = proj.cuda()
proj_query = proj_query.permute(0, 2, 1)
for i in range(length):
gauss = torch.pow(proj_query - proj_key[:, :, i].t(), 2).sum(dim=1)
proj[:, i] = torch.exp(-F.relu(self.gamma_att) * gauss)
energy = proj.view((1, length, length))
elif self.args.att_inv_q_spatial:
proj = torch.zeros((length, length))
if self.args.cuda:
proj = proj.cuda()
proj_query = proj_query.permute(0, 2, 1)
for i in range(length):
gauss = torch.pow(proj_query - proj_key[:, :, i].t(), 2).sum(dim=1)
proj[:, i] = 1 / (F.relu(self.gamma_att) * gauss + torch.ones(1).cuda())
energy = proj.view((1, length, length))
elif self.args.att_module:
proj = torch.zeros((length, length))
if self.args.cuda:
proj = proj.cuda()
proj_query = proj_query.permute(0, 2, 1)
for i in range(length):
proj[:, i] = (torch.abs(proj_query - proj_key[:, :, i].t()) -
torch.abs(proj_query) -
torch.abs(proj_key[:, :, i].t())).sum(dim=1)
energy = proj.view((1, length, length))
elif self.args.laplace_att:
proj = torch.zeros((length, length))
if self.args.cuda:
proj = proj.cuda()
proj_query = proj_query.permute(0, 2, 1)
for i in range(length):
proj[:, i] = (-torch.abs(proj_query - proj_key[:, :, i].t())).sum(dim=1)
energy = proj.view((1, length, length))
elif self.args.att_gauss_abnormal:
proj = torch.zeros((length, length))
if self.args.cuda:
proj = proj.cuda()
proj_query = proj_query.permute(0, 2, 1)
for i in range(int(C // 8)):
gauss = proj_query[0, i, :] - proj_key[0, i, :].view(-1, 1)
proj += torch.exp(-F.relu(self.gamma_att) * torch.abs(torch.pow(gauss, 2)))
energy = proj.view((1, length, length))
elif self.args.att_inv_q_abnormal:
proj = torch.zeros((length, length)).cuda()
proj_query = proj_query.permute(0, 2, 1)
for i in range(int(C // 8)):
gauss = proj_query[0, i, :] - proj_key[0, i, :].view(-1, 1)
proj += torch.exp(F.relu(1 / (torch.pow(gauss, 2) + torch.tensor(1).cuda())))
energy = proj.view((1, length, length))
else:
energy = torch.bmm(proj_query, proj_key) # transpose check
if self.args.loc_info:
if self.args.loc_gauss:
loc_energy_x = torch.exp(
-F.relu(self.gamma_in) * torch.abs(torch.pow(loc[:, 0] - loc[:, 0].view(-1, 1), 2)))
loc_energy_y = torch.exp(
-F.relu(self.gamma_in) * torch.abs(torch.pow(loc[:, 1] - loc[:, 1].view(-1, 1), 2)))
energy_pos = self.alfa * (loc_energy_x + loc_energy_y)
energy = energy + energy_pos
elif self.args.loc_inv_q:
loc_energy_x = torch.exp(
1 / (torch.abs(torch.pow(loc[:, 0] - loc[:, 0].view(-1, 1), 2) + torch.tensor(1).cuda())))
loc_energy_y = torch.exp(
1 / (torch.abs(torch.pow(loc[:, 1] - loc[:, 1].view(-1, 1), 2) + torch.tensor(1).cuda())))
energy_pos = self.alfa * loc_energy_x + loc_energy_y
energy = energy + energy_pos
elif self.args.loc_att:
loc_proj = torch.zeros((length, length))
if self.args.cuda:
loc_proj = loc_proj.cuda()
# proj_query = proj_query.permute(0, 2, 1)
rel_loc_x = loc[:, 0] - loc[:, 0].view(-1, 1)
rel_loc_y = loc[:, 1] - loc[:, 1].view(-1, 1)
for i in range(length):
rel_loc_at = torch.sum(proj_query[0] * rel_loc_x[:, i].view(-1) * rel_loc_y[i, :].view(-1), dim=0)
loc_proj[:, i] = rel_loc_at
energy += loc_proj.view((1, length, length))
attention = self.softmax(energy) # BX (N) X (N)
proj_value = self.value_conv(x).view(bs, -1, length) # B X C X N
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(bs, C, length)
out = self.gamma * out + x
return out[0].permute(1, 0), attention, self.gamma, self.gamma_att
# return out[0], attention, self.gamma, self.gamma_att
| [
"piotr1kucharski@gmail.com"
] | piotr1kucharski@gmail.com |
de560c64ba52aaecaeac7ec15a5ce04eb115991c | afc8d5a9b1c2dd476ea59a7211b455732806fdfd | /Configurations/VBSjjlnu/Full2018v7/conf_test_fatjetscale_DY/configuration.py | 586bc0ae5cf8cc622910ab866255e792b1b7f1ac | [] | no_license | latinos/PlotsConfigurations | 6d88a5ad828dde4a7f45c68765081ed182fcda21 | 02417839021e2112e740607b0fb78e09b58c930f | refs/heads/master | 2023-08-18T20:39:31.954943 | 2023-08-18T09:23:34 | 2023-08-18T09:23:34 | 39,819,875 | 10 | 63 | null | 2023-08-10T14:08:04 | 2015-07-28T07:36:50 | Python | UTF-8 | Python | false | false | 950 | py | # Configuration file to produce initial root files -- has both merged and binned ggH samples
treeName = 'Events'
tag = 'DY2018_v7'
# used by mkShape to define output directory for root files
outputDir = 'rootFile'+tag
# file with TTree aliases
aliasesFile = 'aliases.py'
# file with list of variables
variablesFile = 'variables.py'
# file with list of cuts
cutsFile = 'cuts.py'
#cutsFile = 'cuts_topCR.py'
# file with list of samples
samplesFile = 'samples.py'
# file with list of samples
plotFile = 'plot.py'
# luminosity to normalize to (in 1/fb)
lumi = 59.74
# used by mkPlot to define output directory for plots
# different from "outputDir" to do things more tidy
outputDirPlots = 'plots'+tag
# used by mkDatacards to define output directory for datacards
outputDirDatacard = 'datacards'
# structure file for datacard
structureFile = 'structure.py'
# nuisances file for mkDatacards and for mkShape
nuisancesFile = 'nuisances.py'
| [
"davide.valsecchi@cern.ch"
] | davide.valsecchi@cern.ch |
c3701aa49b4c51ded0baedb47b8a7c05212e663c | 360e92850a0bd7dfa192f5ca9132fb69ef88df08 | /Data Generator V 1.0 .py | 1afc52eeef86d9b7db235e92f09629e7b68f807c | [] | no_license | rochellesteele/Data | 87f5536c80366aa1cd6fe32f7dae9d7df219719f | a5c2dfacbe0ff5d171df4438e07be9c5a13dfbdf | refs/heads/master | 2020-06-26T04:26:11.549154 | 2019-07-01T20:16:31 | 2019-07-01T20:16:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,764 | py |
import numpy as np
import math
import random
from statistics import stdev
import pandas as pd
import matplotlib.pyplot as plt
seconds = 4000 #data aquisition time
samples = 20 #samples per second
time = np.linspace(0,seconds,seconds*samples)
heart_beat_amp = 500 #amplitude of the heart beat in counts
breath_amp = 50 #amplitude of breaths in counts
beats_per_sec = 1
breaths_per_sec =.2
noise_coefficient = 248 #This is less sensitive i.e. Need higher numbers to introduce more noise.
def vital_array(my_array,Hz,amplitude,): #This function simply makes a sinusoidal data set given vitals (biological vitals)
new_array = amplitude*2*np.sin(my_array*2*Hz*np.pi) + 2.5*amplitude
return new_array
#creation of the different signals
heartbeat = vital_array(time,beats_per_sec,heart_beat_amp)
breathing = vital_array(time,breaths_per_sec,breath_amp)
noise_random = np.random.uniform(-1,1,len(time)) * noise_coefficient
glucose_concentration = ((-1*(time-2000)**2)*.000018)+145 #creates a data set for glucose rising and falling during 4000 seconds
#this is an inacurate way of calculating the counts due to glucose and will need to be replaced with accurate wavelenth aprox.
glucose_counts = glucose_concentration*(0.0001/0.009)
total_signal_1 = heartbeat + breathing + noise_random + glucose_counts
#used to plot the data. For visual use.
plt.plot(time, total_signal_1)
plt.xlabel('Time (s)')
plt.ylabel('Counts')
plt.title('Counts vs Time')
plt.axis([0,10,0,3000])
#plt.show()
#Creates a data frame to export data
data = pd.DataFrame({'Time (s)': time, 'Wavelength 1': total_signal_1, 'Glucose mg/dL': glucose_concentration},
columns=['Time (s)', 'Wavelength 1', 'Glucose mg/dL'])
data.to_csv("Data.csv")
data.to_excel("Data.xlsx")
| [
"noreply@github.com"
] | noreply@github.com |
05469b710f5c6a95209af7d598ec98156bd5d97e | ab5ec8468dc01aef7e46dd802b6a368d5693ba0c | /archieve/for_jiarong.py | df1b12b973a1856ba60366525dc8d9ecbbf81609 | [] | no_license | Jiarong-L/spacial_map | 4119d90600c2a53a53929c323c1a3fa7b3000fca | ceadf3f498a32c303980cff7154b7effa029b3e8 | refs/heads/master | 2023-02-01T20:06:56.717391 | 2020-12-18T08:29:57 | 2020-12-18T08:29:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,481 | py | import novosparc
import time
import numpy as np
from scipy.spatial.distance import cdist
from scipy.stats import pearsonr
if __name__ == '__main__':
dataset_path = '/sibcb1/zenganlab1/shenrong/output/project_regeneration_20201020/table/expr_matrix_10day_log_normalized.txt' # this could be the dge file, or also can be a 10x mtx folder
output_folder = '~' # folder to save the results, plots etc.
tissue_path = '/sibcb1/zenganlab1/shenrong/output/project_regeneration_20201020/public_data/day10_็ปๆฟ 1.png'
hvg_path = '/sibcb1/zenganlab1/shenrong/output/project_regeneration_20201020/public_data/high_variable_genes_10day.txt'
location_marker = '/sibcb1/zenganlab1/shenrong/output/project_regeneration_20201020/public_data/dge_full.txt'
dataset = novosparc.io.load_data(dataset_path)
# Optional: downsample number of cells to speed up
cells_selected, dataset = novosparc.pp.subsample_dataset(dataset, min_num_cells=5, max_num_cells=1000)
dataset.raw = dataset # this stores the current dataset with all the genes for future use
dataset, hvg = novosparc.pp.subset_to_hvg(dataset, hvg_file = hvg_path)
# plot some genes and save them
gene_list_to_plot = ['SMED30011970', #eye and head, dd_4427
'SMED30030642', #pharynx
'SMED30001882',#brain and phx
'SMED30005457', #super strong; big cells around the gut
'SMED30000013', #gut
'SMED30010123', #protonephridia
'SMED30016244', #secretory cells?
'SMED30011490' #epithelium
]
#########################################
# 1. use top 2000 DEG in scRNAseq && location figure ###
#########################################
# Optional: Subset to the highly variable genes
#Load location from png file
locations = novosparc.geometry.create_target_space_from_image(tissue_path)
#setup and spatial reconstruction
tissue = novosparc.cm.Tissue(dataset=dataset, locations=locations, output_folder=output_folder) #create a tissue object
tissue.setup_reconstruction(num_neighbors_s = 5, num_neighbors_t = 5) #่ฟไธคไธชๅๆฐ่ฐๅคงไธ็นๅฏไปฅ็ปๆๆดๅ็กฎ
tissue.reconstruct(alpha_linear=0)
tissue.calculate_spatially_informative_genes()
path = output_folder + '/top2000DEG_with_location_fig1'
isExists=os.path.exists(path)
if not isExists:
os.makedirs(path)
# save the sdge to file
novosparc.io.write_sdge_to_disk(tissue, path)
novosparc.io.save_gene_pattern_plots(tissue=tissue, gene_list_to_plot=gene_list_to_plot, folder=path)
novosparc.io.save_spatially_informative_gene_pattern_plots(tissue=tissue, gene_count_to_plot=10, folder=path)
| [
"1299025078@qq.com"
] | 1299025078@qq.com |
4d6f518366d4aa6722fcdf84cbdbbce305db1563 | 8bf2be4528af71670309c0ce05e400a48d139f6c | /app/main/routesHelper/routesDbHelper.py | e68125dd752366b2864f7141da930d2752f5f75e | [] | no_license | eoghanmckee/hound | 179655089b2f0724f53dd0f33a93cfd6bcb770ba | 5393384d9cae12a2159694e04a5351fdcaf20031 | refs/heads/master | 2022-12-23T20:42:29.980074 | 2020-09-23T22:51:17 | 2020-09-23T22:51:17 | 263,177,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,258 | py | import logging
import urllib.parse
from app import db
from flask import request
from urllib.parse import quote
from app.models import SlackWebhook, Names, Usernames, UserIDs, \
Emails, Phones, IPaddresses, Domains, Urls, BTCAddresses, Sha256, Sha1, Md5, \
Filenames, Keywords, Events, IOCMatches
def insertslackwebhookHelper(slackwebhook, caseid):
if slackwebhook:
slackwebhook_data = SlackWebhook(slackwebhook, caseid)
db.session.add(slackwebhook_data)
db.session.commit()
def insertformHelper(form, caseid):
ioc_types = {
"names": 'Names',
"usernames": 'Usernames',
"userids": 'UserIDs',
"emails": 'Emails',
"phones": 'Phones',
"ips": 'IPaddresses',
"keywords": 'Keywords',
"btcaddresses": 'BTCAddresses',
"sha256": 'Sha256',
"sha1": 'Sha1',
"md5": 'Md5',
"filenames": 'Filenames'
}
for i in ioc_types:
indicators = request.form[i]
ioc_type = eval(ioc_types[i])
if indicators:
indicators_list = indicators.split(',')
for ioc in indicators_list:
ioc = ioc.strip()
ioc_data = ioc_type(ioc, caseid)
db.session.add(ioc_data)
db.session.commit()
domains = request.form['domains']
inserturlsdomainsHelper(domains, caseid, 'Domains')
urls = request.form['urls']
inserturlsdomainsHelper(urls, caseid, 'Urls')
# Custom Insertion for Urls & Domains - we must url encode
def inserturlsdomainsHelper(iocs, caseid, ioctype):
ioctype = eval(ioctype)
if iocs:
iocs_list = iocs.split(',')
for ioc in iocs_list:
ioc = ioc.strip()
ioc_decode = urllib.parse.quote(ioc)
ioc_data = ioctype(ioc_decode, caseid)
db.session.add(ioc_data)
db.session.commit()
def deleteiocsHelper(id):
tables = ['SlackWebhook', 'Names', 'Usernames', 'UserIDs', 'Emails', 'Phones', \
'IPaddresses', 'Domains', 'Urls', 'BTCAddresses', 'Sha256', 'Sha1', \
'Md5', 'Filenames', 'Keywords']
for table in tables:
table = eval(table)
table.query.filter_by(caseid=id).delete()
db.session.commit() | [
"eoghan.mckee@bitmex.com"
] | eoghan.mckee@bitmex.com |
44adf11e68e123617a038d223a248d466f8f01b2 | 0e836ad043697d0a334f468330850c65080a4c3f | /yanghui.py | 337e941d9605c9f254c235a8347d722e1d15ebbf | [] | no_license | wulandy/pyds | a6e99f2814de0023df13d8ba89c89b6fa171ce57 | abeb40d2b5c0e6194aa0f9ff811dc228690f92a6 | refs/heads/master | 2021-05-01T17:19:36.270946 | 2017-01-19T07:44:49 | 2017-01-19T07:44:49 | 79,427,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | #coding=utf-8
import argparse
from collections import deque
def yanghui(i):
s=deque([])
s.append(1)
s.append(0)
i-=1
num=9999
while i>=0:
i-=1
s.append(1)
while True:
num=s[0]
if num==0:
s.popleft()
break
s.append(s[0]+s[1])
s.popleft()
print num,
s.append(0)
print
parse=argparse.ArgumentParser()
parse.add_argument('-n',dest='num',action='store',help="please input a number")
args=parse.parse_args()
yanghui(int(args.num)) | [
"wulandy1024@gmail.com"
] | wulandy1024@gmail.com |
389b56ddeafab722304da3406994e9dd2e7cbe09 | 4c143c5787f465bbf6686376bb4811ad558cc24a | /api/migrations/0005_rename_screenshoot_app_screenshot.py | ecd0477715e4a2bdf5231062d0eaa11062287e0a | [] | no_license | samlexxy/Crowdbotic-Test-API | 2caa70b30e16e08c51e78bbad9269dc00aed3e4c | d73cc66452ed264931c71578b5436de5a7015211 | refs/heads/master | 2023-08-22T04:31:04.429480 | 2021-11-01T17:51:17 | 2021-11-01T17:51:17 | 423,557,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | # Generated by Django 3.2.8 on 2021-10-18 11:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0004_auto_20211018_1017'),
]
operations = [
migrations.RenameField(
model_name='app',
old_name='screenshoot',
new_name='screenshot',
),
]
| [
"ibiloyes@gmail.com"
] | ibiloyes@gmail.com |
a529fbcae070869bd8992330e56d3ff9294c576d | 794e54abc2504ee76823df70577faf82a3817b51 | /tests/test_errors/test_errors.py | c8c5d4c57d28418c79dc21cdcc74a07382a9c484 | [
"ISC"
] | permissive | thp/pyotherside | 6c54bf04ec657393d915a06452a3406c4c8d8410 | 63eb5290d5994dc31471dd68e43805f78099c7c6 | refs/heads/master | 2022-09-02T18:08:41.498511 | 2022-08-05T11:48:28 | 2022-08-05T11:48:31 | 1,822,789 | 314 | 51 | NOASSERTION | 2021-06-06T10:09:49 | 2011-05-30T18:43:45 | C++ | UTF-8 | Python | false | false | 192 | py | import pyotherside
import threading
import time
def run():
while True:
pyotherside.send("test-errors")
time.sleep(3)
thread = threading.Thread(target=run)
thread.start()
| [
"m@thp.io"
] | m@thp.io |
04912754202eea5fd265b696fa9e5acdcda7a7dd | bb36962cf5a32f78788cdcef39fad76885153024 | /tango_with_django_project/rango/migrations/0006_auto_20170210_0857.py | aef8b581d53a04e3d55886afd74658f872c54426 | [] | no_license | rachelclare47/django_tutorial_1.10 | f80a2c14d52d137b97969212d337e82994629c66 | dffe34da29057b1f8c98ec1bae035c30205c1289 | refs/heads/master | 2021-01-15T13:11:10.054433 | 2017-02-10T14:43:13 | 2017-02-10T14:43:13 | 78,748,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-10 08:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rango', '0005_auto_20170209_1259'),
]
operations = [
migrations.AlterField(
model_name='category',
name='slug',
field=models.SlugField(unique=True),
),
]
| [
"2191180o@student.gla.ac.uk"
] | 2191180o@student.gla.ac.uk |
df4829079e6b486a8b1360bcb22aec9a0b2a476a | f5e61e489e529c47aad126f3a79e4583a869f676 | /alembic/versions/1c465e341efa_removed_balance_field.py | 2a00f48cd02cda35d8ae0a164c2b4eb6f0ae4400 | [
"MIT"
] | permissive | bitcart/bitcart | 9d8da7b3bb3a050869d428fa5530d9ce6ba61176 | c1715ed9c302b5d2c92003d172a94467b4523284 | refs/heads/master | 2023-08-17T19:37:23.075461 | 2023-08-13T23:35:33 | 2023-08-13T23:35:33 | 173,628,650 | 37 | 10 | MIT | 2023-08-08T23:15:17 | 2019-03-03T20:54:15 | Python | UTF-8 | Python | false | false | 706 | py | """removed balance field
Revision ID: 1c465e341efa
Revises: a27789cb7b2a
Create Date: 2020-10-09 23:29:44.645464
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "1c465e341efa"
down_revision = "a27789cb7b2a"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("wallets", "balance")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("wallets", sa.Column("balance", sa.NUMERIC(precision=16, scale=8), autoincrement=False, nullable=True))
# ### end Alembic commands ###
| [
"chuff184@gmail.com"
] | chuff184@gmail.com |
31fa6cf28dee74da3917221dcc286b6239f35fdc | d5ba475a6a782b0eed5d134b66eb8c601c41421c | /terrascript/data/template.py | a964634d94047ba5352fbbb1a6371b1e8858546a | [
"BSD-2-Clause",
"Python-2.0"
] | permissive | amlodzianowski/python-terrascript | ab42a06a5167e53ad8093b656a9bf14a03cb031d | 142b1a4d1164d1012ac8865d12fdcc72f1e7ae75 | refs/heads/master | 2021-05-19T11:59:47.584554 | 2020-03-26T07:13:47 | 2020-03-26T07:13:47 | 251,688,045 | 0 | 0 | BSD-2-Clause | 2020-03-31T18:00:22 | 2020-03-31T18:00:22 | null | UTF-8 | Python | false | false | 233 | py | # terrascript/data/template.py
import terrascript
class template_file(terrascript.Data):
pass
class template_cloudinit_config(terrascript.Data):
pass
__all__ = [
"template_file",
"template_cloudinit_config",
]
| [
"markus@juenemann.net"
] | markus@juenemann.net |
84ce29ec0b055f310b9c7a1cc9c761783cad19b7 | d1a39a3a5217412cb56cc3349963bb3ad5d9857c | /Life.py | cbdcf3effd2d864db06f9ad050a3c4ed870f00d8 | [] | no_license | MingStar/Nature | 8d160ca12103092bf81a192cd91a3e3f83a80799 | b03be7f4cb6dfdacc3bbe758f05fef13c643783a | refs/heads/master | 2019-01-02T07:48:44.209502 | 2012-07-23T14:38:11 | 2012-07-23T14:38:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,098 | py | import random
from Constants import *
from DNA import DNA, Action
X = 0
Y = 1
def getRelativePos(dist):
"""
get relative pos at this distance
"""
dist = abs(dist)
pos = []
for i in xrange(-dist, dist+1):
j = dist - abs(i)
if j == 0:
pos.append((i,j))
else:
pos.extend([(i, -j), (i, j)])
return pos
REL_POSES = []
for i in range(0, MAX_RANGE+1):
REL_POSES.append(getRelativePos(i))
class Life:
"""
Base class of all living things
"""
def __init__(self, land, pos):
self.land = land
self.pos = pos
class Grass(Life):
SELF_CLASS = GRASS
def __init__(self, *args):
Life.__init__(self, *args)
self.remainingDays = random.randrange(50) #(30, 50)
class Animal(Life):
"""
an animal is a life which has DNA to instruct it do things
"""
ARBITARY_LONG_DIST = [-1000, 1000]
def __init__(self, land, pos, dna=None):
Life.__init__(self, land, pos)
self.livingDays = 0
self.remainingDays = random.randrange(40) #(10, 20)
self.dna = dna
if not self.dna:
self.dna = DNA()
def updateStatus(self):
"""
update the animal's status
shuffle positions with equal distance
so that it won't favour paticular ones
"""
status = self.status = [None] * STATUS_MAX_LEN
for poses in REL_POSES:
random.shuffle(poses) # shuffle
for pos in poses:
globalPos = self.land.transformPos((pos[X]+self.pos[X], pos[Y]+self.pos[Y]))
if not self.land.pos.has_key(globalPos):
continue
lives = self.land.pos[globalPos]
if lives[self.__class__.FOOD_CLASS] and status[0] == None:
status[0:2] = pos
if lives[self.__class__.OTHER_CLASS] and status[2] == None:
status[2:4] = pos
if len(lives[self.__class__.SELF_CLASS]) > 1 and status[4] == None:
status[4:6] = pos
if status[0] != None and status[2] != None and status[4] != None:
# hope will save a bit of checking when it's very crowded
return
def proposeMove(self):
"""
propose to move to a new location
"""
for i in range(STATUS_MAX_LEN):
if self.status[i] == None:
self.status[i] = random.choice(self.__class__.ARBITARY_LONG_DIST)
dX, dY = self.dna.eval(self.status)
x, y = self.pos
return (x+dX, y+dY)
def eat(self):
"""
try to eat if there's any food
"""
foodDict = self.land.pos[self.pos][self.FOOD_CLASS]
if not foodDict:
return
life = random.choice(foodDict.keys()) #random choice
self.remainingDays += life.remainingDays
self.land.kill(life)
def mate(self):
"""
try to mate with the same species
"""
mates = self.land.pos[self.pos][self.__class__.SELF_CLASS].keys()
mates.remove(self)
if mates:
self._crossover(random.choice(mates)) #random choice
def _crossover(self, life):
if not self.remainingDays and not life.remainingDays:
return
newDNA = self.dna.crossover(life.dna)
if not newDNA:
return
if self.remainingDays:
r1 = random.randrange(self.remainingDays)
self.remainingDays -= r1
else:
r1 = 0
if life.remainingDays:
r2 = random.randrange(life.remainingDays)
life.remainingDays -= r2
else:
r2 = 0
newLife = self.__class__(self.land, self.pos[:], newDNA)
newLife.remainingDays = r1 + r2
self.land.register(newLife)
class Zebra(Animal):
SELF_CLASS = ZEBRA
FOOD_CLASS = GRASS
OTHER_CLASS = LION
class Lion(Animal):
SELF_CLASS = LION
FOOD_CLASS = ZEBRA
OTHER_CLASS = GRASS
| [
"mingstar215@gmail.com"
] | mingstar215@gmail.com |
2ab9a055c087bec20f55a801e2d7e465ac1a6f7c | c149570f4eefd2ca015581509618d87b61d5d946 | /model/hooks.py | 9fb0a6077d2604812d903a6b1fa898e50acc8d07 | [] | no_license | yfliao/music-transcription | 84c7db67ac3d2a2b7c0b6cae4de53e43c837ce36 | a1615e5c252a33bfa22c3b6ec2e3b4ba2ac11820 | refs/heads/master | 2020-05-15T16:22:57.940063 | 2019-04-19T22:53:55 | 2019-04-19T22:53:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,237 | py | import tensorflow as tf
import pandas
import mir_eval
import evaluation
import datasets
import numpy as np
import visualization as vis
import matplotlib.pyplot as plt
import matplotlib
import os
import csv
import time
mir_eval.multipitch.MIN_FREQ = 1
def simplify_name(name):
return name.lower().replace(" ", "_")
def add_fig(fig, summary_writer, tag, global_step=0):
img_summary = vis.fig2summary(fig)
summary_writer.add_summary(tf.Summary(value=[tf.Summary.Value(tag=tag, image=img_summary)]), global_step)
plt.cla()
plt.clf()
plt.close('all')
class EvaluationHook:
def before_run(self, ctx, vd):
pass
def every_aa(self, ctx, vd, aa, est_time, est_freq):
pass
def after_run(self, ctx, vd, additional):
pass
def _title(self, ctx):
return "OA: {:.3f}, RPA: {:.3f}, RCA: {:.3f}, VR: {:.3f}, VFA: {:.3f}, Loss {:.4f}".format(
ctx.metrics['Overall Accuracy'],
ctx.metrics['Raw Pitch Accuracy'],
ctx.metrics['Raw Chroma Accuracy'],
ctx.metrics['Voicing Recall'],
ctx.metrics['Voicing False Alarm'],
ctx.metrics['Loss']
)
class EvaluationHook_mf0:
def _title(self, ctx):
return "Acc: {:.3f}, Pr: {:.3f}, Re: {:.3f}, Sub: {:.3f}".format(
ctx.metrics['Accuracy'],
ctx.metrics['Precision'],
ctx.metrics['Recall'],
ctx.metrics['Substitution Error'],
)
class VisualOutputHook(EvaluationHook):
def __init__(self, draw_notes=True, draw_probs=True, draw_confusion=False, draw_hists=False):
self.draw_notes = draw_notes
self.draw_probs = draw_probs
self.draw_confusion = draw_confusion
self.draw_hists = draw_hists
def before_run(self, ctx, vd):
self.reference = []
self.estimation = []
additional = []
if self.draw_probs:
additional.append(ctx.note_probabilities)
return additional
def every_aa(self, ctx, vd, aa, est_time, est_freq):
self.reference += aa.annotation.notes_mf0
self.estimation.append(datasets.common.hz_to_midi_safe(est_freq))
def after_run(self, ctx, vd, additional):
prefix = "valid_{}/".format(vd.name)
title = self._title(ctx)
reference = self.reference
estimation = datasets.common.melody_to_multif0(np.concatenate(self.estimation))
global_step = tf.train.global_step(ctx.session, ctx.global_step)
if self.draw_notes:
note_probs = None
if self.draw_probs:
note_probs = np.concatenate(list(additional[ctx.note_probabilities].values())).T
fig = vis.draw_notes(reference, estimation, title=title, note_probs=note_probs)
add_fig(fig, ctx.summary_writer, prefix+"notes", global_step)
if self.draw_confusion:
fig = vis.draw_confusion(reference, estimation)
add_fig(fig, ctx.summary_writer, prefix+"confusion", global_step)
if self.draw_hists:
fig = vis.draw_hists(reference, estimation)
add_fig(fig, ctx.summary_writer, prefix+"histograms", global_step)
class MetricsHook(EvaluationHook):
def __init__(self, write_summaries=True, print_detailed=False, write_estimations=False):
self.print_detailed = print_detailed
self.write_summaries = write_summaries
self.write_estimations = write_estimations
def before_run(self, ctx, vd):
self.write_estimations_timer = 0
self.all_metrics = []
return [ctx.loss]
def every_aa(self, ctx, vd, aa, est_time, est_freq):
if self.write_estimations:
timer = time.time()
est_dir = os.path.join(ctx.logdir, ctx.args.checkpoint+"-f0-outputs", vd.name+"-test-melody-outputs")
os.makedirs(est_dir, exist_ok=True)
with open(os.path.join(est_dir, aa.audio.filename+".csv"), 'w') as f:
writer = csv.writer(f)
writer.writerows(zip(est_time, est_freq))
self.write_estimations_timer += time.time()-timer
ref_time = aa.annotation.times
ref_freq = np.squeeze(aa.annotation.freqs, 1)
assert len(ref_time) == len(est_time)
assert len(ref_freq) == len(est_freq)
assert len(ref_freq) == len(est_freq)
metrics = mir_eval.melody.evaluate(ref_time, ref_freq, est_time, est_freq)
ref_v = ref_freq > 0
est_v = est_freq > 0
cent_voicing = mir_eval.melody.to_cent_voicing(ref_time, ref_freq, est_time, est_freq)
metrics["Raw Pitch Accuracy 25 cent"] = mir_eval.melody.raw_chroma_accuracy(*cent_voicing, cent_tolerance=25)
metrics["Raw Chroma Accuracy 25 cent"] = mir_eval.melody.raw_pitch_accuracy(*cent_voicing, cent_tolerance=25)
metrics["Raw Pitch Accuracy 10 cent"] = mir_eval.melody.raw_chroma_accuracy(*cent_voicing, cent_tolerance=10)
metrics["Raw Chroma Accuracy 10 cent"] = mir_eval.melody.raw_pitch_accuracy(*cent_voicing, cent_tolerance=10)
est_freq, est_v = mir_eval.melody.resample_melody_series(est_time, est_freq, est_v, ref_time, "linear")
metrics["Raw 2 Harmonic Accuracy"] = evaluation.melody.raw_harmonic_accuracy(ref_v, ref_freq, est_v, est_freq, harmonics=2)
metrics["Raw 3 Harmonic Accuracy"] = evaluation.melody.raw_harmonic_accuracy(ref_v, ref_freq, est_v, est_freq, harmonics=3)
metrics["Raw 4 Harmonic Accuracy"] = evaluation.melody.raw_harmonic_accuracy(ref_v, ref_freq, est_v, est_freq, harmonics=4)
timefreq_series = mir_eval.melody.to_cent_voicing(ref_time, ref_freq, ref_time, est_freq)
metrics["Overall Chroma Accuracy"] = evaluation.melody.overall_chroma_accuracy(*timefreq_series)
metrics["Voicing Accuracy"] = evaluation.melody.voicing_accuracy(ref_v, est_v)
metrics["Voiced Frames Proportion"] = est_v.sum() / len(est_v) if len(est_v) > 0 else 0
self.all_metrics.append(metrics)
def _save_metrics(self, ctx, vd, additional):
ctx.metrics = pandas.DataFrame(self.all_metrics).mean()
ctx.metrics["Loss"] = np.mean(additional[ctx.loss])
if self.print_detailed:
print(ctx.metrics)
if vd.name is not None and self.write_summaries:
prefix = "valid_{}/".format(vd.name)
global_step = tf.train.global_step(ctx.session, ctx.global_step)
for name, metric in ctx.metrics.items():
ctx.summary_writer.add_summary(tf.Summary(value=[tf.Summary.Value(tag=prefix+simplify_name(name), simple_value=metric)]), global_step)
def after_run(self, ctx, vd, additional):
self._save_metrics(ctx, vd, additional)
if self.write_estimations:
print("csv outputs written in {:.2f}s".format(self.write_estimations_timer))
print("{}: {}".format(vd.name, self._title(ctx)))
class MetricsHook_mf0(EvaluationHook_mf0, MetricsHook):
def every_aa(self, ctx, vd, aa, est_time, est_freq):
est_freqs = datasets.common.melody_to_multif0(est_freq)
ref_time = aa.annotation.times
ref_freqs = aa.annotation.freqs_mf0
metrics = mir_eval.multipitch.evaluate(ref_time, ref_freqs, est_time, est_freqs)
self.all_metrics.append(metrics)
class VisualOutputHook_mf0(EvaluationHook_mf0, VisualOutputHook):
pass
class SaveBestModelHook(EvaluationHook):
def __init__(self, logdir):
self.best_value = -1
self.logdir = logdir
self.watch_metric = "Raw Pitch Accuracy"
def after_run(self, ctx, vd, additional):
self.model_name = "model-best-{}".format(vd.name)
best_metrics_csv = os.path.join(self.logdir, self.model_name+".csv")
if self.best_value == -1 and os.path.isfile(best_metrics_csv):
self.best_value = pandas.read_csv(best_metrics_csv, header=None, index_col=0, squeeze=True)[self.watch_metric]
value = ctx.metrics[self.watch_metric]
if value > self.best_value:
self.best_value = value
print("Saving best model, best value = {:.2f}".format(value))
ctx.save(self.model_name, ctx.saver_best)
ctx.metrics.to_csv(best_metrics_csv)
| [
"balhar.j@gmail.com"
] | balhar.j@gmail.com |
eece6af6b08c1d59567df069fc76ac636d14c164 | 4e529788eff965b1c591150457914941f1ed5932 | /7 kyu/The Office IV - Find a Meeting Room.py | ef5bae031489c476178680cf62a209766278089b | [] | no_license | Margarita-Sergienko/codewars-python | a8d70c1be8bb83e83b8319604fc68b9a7d4c656b | 1dde0137873ebd596f931eb30c797c5a88e729d1 | refs/heads/main | 2023-03-12T00:02:52.969107 | 2021-02-19T04:05:59 | 2021-02-19T04:05:59 | 305,252,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | # 7 kyu
# The Office IV - Find a Meeting Room
# https://www.codewars.com/kata/57f604a21bd4fe771b00009c
# Your job at E-Corp is both boring and difficult. It isn't made any easier by the fact that everyone constantly wants to have a meeting with you, and that the meeting rooms are always taken!
# In this kata, you will be given an array. Each value represents a meeting room. Your job? Find the first empty one and return its index (N.B. There may be more than one empty room in some test cases).
# 'X' --> busy 'O' --> empty
# If all rooms are busy, return 'None available!'.
def meeting(rooms):
return 'None available!' if "O" not in rooms else rooms.index("O") | [
"noreply@github.com"
] | noreply@github.com |
04ce13404cbea01047eb7cdedbdb9aea7f8b1fc8 | b41b95d716b9b5b2e883f4d1dece78df9566ab07 | /opencv_blur_img.py | 477e3e41588c81e6caca8cf8c4dd958b0e30cc7c | [] | no_license | ervishuu/OpenCv2 | 8e71e581d9d2453bd91e2d0d3dc04917c7fc708a | 3e0691c2666f77ebd01ea1cd6e591a70912883e6 | refs/heads/master | 2022-12-02T21:06:13.288330 | 2020-08-18T04:33:23 | 2020-08-18T04:33:23 | 288,349,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | import cv2
import numpy as np
img= cv2.imread("F:/Images/f1.jpg")
cv2.imshow("original img",img)
cv2.waitKey(0)
#create 3x3 Kernal
kernal_3x3 = np.ones((3,3),np.float32)/9
blurred = cv2.filter2D(img,-1,kernal_3x3)
cv2.imshow("3x3 kernal img",blurred)
cv2.waitKey(0)
#create 7x7 Kernal
kernal_7x7= np.ones((7,7),np.float32)/49
#7x7 kernal
blurred2 = cv2.filter2D(img,-1,kernal_7x7)
cv2.imshow("7x7 kernal img",blurred2)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"vishvanathmetkari2000@gmail.com"
] | vishvanathmetkari2000@gmail.com |
3ae766f684e591c9c4ef4688fc51c7a7723f0ede | 9e5424a09128bd414e0d45bb3edd4fe208fa8312 | /fibonaccisequencerecursion.py | 6f013fffd06bc35ff1f1ed7a4106dee436903589 | [] | no_license | Prabesh-Shrestha/Fibonacci-Sequence-in-Python | 23d3bcc643fd5dbf3654dfbe6aa9a4d708aad0c4 | 9655a70854f4101582fbf377315e42e3ca968df0 | refs/heads/main | 2023-02-05T22:41:20.990817 | 2020-12-30T03:20:28 | 2020-12-30T03:20:28 | 325,443,123 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | userval = int(input("How much do you wana print: "))
def fib(n):
if n == 0:
return 1
if n == 1:
return 1
else:
return fib(n-1)+ fib(n-2)
for i in range(userval):
print(fib(i))
| [
"noreply@github.com"
] | noreply@github.com |
96fa37e9f41607403ec3f0e59669aaa85bb9ed98 | bb3fb268b2a1a586377ca15c4d79187d51ae7273 | /noise_reduction.py | fce63e6fab6d37c595ef7519e806cfcd8d88b624 | [] | no_license | romero8688/SmartSheetMusic | f24e7463aa4d289d17187d5a75fb9631fcb657fb | d8895e94cbe44fe8e1bea529bc269ee5f06eab11 | refs/heads/master | 2023-06-30T12:53:34.916635 | 2019-01-18T12:00:05 | 2019-01-18T12:00:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,835 | py | import numpy as np
import os
os.environ['LIBROSA_CACHE_DIR'] = '/tmp/librosa_cache' # Enable librosa cache
import librosa as lb
import utils_audio_transcript as utils
# We set the window type to Hamming to avoid numerical
# issues while running the algorithm online
# (the Hamming window does not go to zero at the edges)
WINDOW_TYPE = 'hamming'
class NoiseReducer():
def __init__(self,
alpha_power_spectrum=0.99,
noise_bias_correction=1.5,
alpha_snr=0.98):
# The smoothing for the power spectrum (in the estimation
# of the noise
self.alpha_power_spectrum = alpha_power_spectrum
# The correction applied in the estimation of the noise
# (the estimation is biased because we take the min)
self.noise_bias_correction = noise_bias_correction
# The smoothing of the prior SNR for the Ephraim-Malah procedure
self.alpha_snr = alpha_snr
# Initialise the audio data buffers (input and output)
self.audio_data = []
self.audio_data_denoised = []
self.n_fft = 1024
self.hop_length = 512
self.n_coef_fft = self.n_fft//2 + 1
# Pre-allocate the all the arrays to receive max 30 mins of data.
# We could drop the back of the data should memory become a problem.
self.max_frames = int(30*60*utils.SR/self.hop_length)
self.stft = np.zeros([self.max_frames, self.n_coef_fft], dtype=np.complex64) + np.NaN
# Store the magnitude and phase (redundant with STFT, could be removed)
self.stft_mag = np.zeros(self.stft.shape) + np.NaN
self.stft_phase = np.zeros(self.stft.shape, dtype=np.complex64) + np.NaN
# The (total) power spectrum smoothed (in the time dimension) and the running min
self.smooth_power_spectrum = np.zeros(self.stft.shape) + np.NaN
self.min_smooth_power_spectrum = np.zeros(self.stft.shape)
# The estimate for the noise power spectrum
self.noise_estimate = np.zeros(self.stft.shape)
# The index for the previous estimation of noise
self.idx_prev_noise_estimate = -np.Inf
# The gain (i.e. the frequency filter that we apply to the raw signal)
self.gain = np.zeros(self.stft.shape) + np.NaN
# Keep the post-cleaning STFT (only for reporting)
self.stft_denoised = np.zeros(self.stft.shape, dtype=np.complex64) + np.NaN
# After iterating the main loop, we have processed up to
# (and including) self.idx_curr
self.idx_curr = -1
self.idx_prev = np.nan
# Store the SNR (posterior and prior) for the Ephraim-Malah algorithm
self.snr_prior = np.zeros(self.stft.shape) + np.NaN
self.snr_post = np.zeros(self.stft.shape) + np.NaN
def calc_online_stft(self, audio_data_new_length):
'''
Calculate the STFT online.
i.e. find how much of the previous audio data we need to take, append the new audio data
such that the windowing is valid and compute the FFT.
'''
# We need to get (n_fft - hop_length) samples from the previous audio data
start_idx = max(len(self.audio_data) - audio_data_new_length - self.n_fft + self.hop_length, 0)
# Update the indices
n_new_frames = (len(self.audio_data[start_idx:]) - self.n_fft) // self.hop_length + 1
self.idx_prev = self.idx_curr
self.idx_curr = self.idx_prev + n_new_frames
# Break if we have reached the maximum buffer size
if self.idx_curr > self.max_frames-1:
raise(IndexError("Reached max size for the noise reduction audio buffer"))
# Calculate the STFT for the new frames
self.stft[self.idx_prev+1:self.idx_curr+1] = lb.spectrum.stft(np.array(self.audio_data[start_idx:]), self.n_fft,
self.hop_length, window=WINDOW_TYPE, center=False).T
# Also calculate the magnitude and phase spectra
[stft_mag, stft_phase] = lb.core.magphase(self.stft[self.idx_prev+1:self.idx_curr+1].T)
self.stft_mag[self.idx_prev+1:self.idx_curr+1] = stft_mag.T
self.stft_phase[self.idx_prev+1:self.idx_curr+1] = stft_phase.T
def calc_smooth_power_spectrum(self):
'''
Calculate the smoothed power spectrum
'''
# For the first frame, we only have the raw power spectrogram
idx_prev_adj = self.idx_prev
if self.idx_prev < 0:
self.smooth_power_spectrum[0,:] = self.stft_mag[0,:]**2
idx_prev_adj = idx_prev_adj + 1
# After the first frame, we can smooth with EWMA update
for k in np.arange(idx_prev_adj + 1, self.idx_curr + 1):
update = (1-self.alpha_power_spectrum) * self.stft_mag[k,:]**2
self.smooth_power_spectrum[k, :] = self.alpha_power_spectrum * self.smooth_power_spectrum[k-1, :] + update
def calc_noise_estimate(self):
'''
Calculate the noise estimate based on the running minimum of
the smoothed power spectrum.
'''
min_n_frames_noise = 50
n_frames_noise_lookback = 50
n_frames_noise_update = 20
# Only update the rolling minimum every n_frames_noise_update
# (for computational speed reasons)
for k in np.arange(self.idx_prev+1, self.idx_curr+1):
# Until we have enough data for estimation, assume no noise
if k >= min_n_frames_noise-1:
# Calculate the minimum of the smoothed total power
if self.idx_prev_noise_estimate + n_frames_noise_update <= k:
min_smooth_power_spectrum_new = np.min(self.smooth_power_spectrum[max(k - n_frames_noise_lookback, 0):k+1, :], axis=0)
min_smooth_power_spectrum_new = min_smooth_power_spectrum_new * self.noise_bias_correction # Apply correction for bias
self.idx_prev_noise_estimate = k
# Otherwise, carry forward the previous estimate
else:
min_smooth_power_spectrum_new = self.min_smooth_power_spectrum[k-1,:]
# Either way, cap the noise power to the total power
noise_estimate_new = np.minimum(min_smooth_power_spectrum_new, self.stft_mag[k ,:]**2)
# Assign the new row
self.noise_estimate[k,:] = noise_estimate_new
self.min_smooth_power_spectrum[k,:] = min_smooth_power_spectrum_new
def calc_gain_wiener(self):
'''
Calculate the Wiener filter
Can be issued as an alternative to the Ephraim Malah gain calculation.
'''
power_total = self.stft_mag[self.idx_prev + 1:self.idx_curr + 1, :]**2
power_noise_estimate = self.noise_estimate[self.idx_prev + 1:self.idx_curr + 1, :]
gain = np.maximum(1 - power_noise_estimate/power_total, 0)
self.gain = np.vstack((self.gain, gain))
def calc_gain_ephraim_malah(self):
'''
Refs:
[1] Efficient Alternatives to the Ephraim and Malah Suppression
Rule for Audio Signal Enhancement, Wolfe P., Godsill S., 2003.
[2] Single Channel Noise Reduction for Hands Free Operation
in Automotive Environments, Schmitt S., Sandrock M. and Cronemeyer, J., 2002.
'''
# Place-holders for SNRs and gain
snr_prior = np.zeros([self.idx_curr - self.idx_prev, self.n_coef_fft]) + np.NaN
snr_post = np.zeros([self.idx_curr - self.idx_prev, self.n_coef_fft]) + np.NaN
gain = np.zeros([self.idx_curr - self.idx_prev, self.n_coef_fft]) + np.NaN
for n in range(self.idx_prev + 1, self.idx_curr + 1):
k = n - self.idx_prev - 1
# Floor the noise_estimate to 0+tol, as we need to divide Inf by Inf
noise_estimate = np.maximum(self.noise_estimate[n,:], np.finfo(np.float).eps)
snr_post[k,:] = self.stft_mag[n,:]**2 / noise_estimate # -1 needed??
snr_post_floored = np.maximum(snr_post[k,:], 0.0) # Flooring needed?
# Calculate the SNR prior in a "decision-directed" approach (see [2])
if n == 0:
snr_prior_raw = snr_post_floored
else:
noise_estimate_prev = np.maximum(self.noise_estimate[n-1,:], np.finfo(np.float).eps)
gain_prev = self.gain[n-1, :] if k == 0 else gain[k-1, :]
snr_prior_raw = (gain_prev * self.stft_mag[n-1,:])**2 / noise_estimate_prev
snr_prior[k,:] = self.alpha_snr*snr_prior_raw + (1-self.alpha_snr)*np.maximum(snr_post[k,:]-1, 0.0)
# Ephraim-Malah approximation by Wolfe
# (Minimum mean square error spectral power estimator in [1])
p = snr_prior[k,:]/(1+snr_prior[k,:])
gain[k,:] = np.sqrt(p * (1/snr_post[k,:] + p))
# Append the gain and SNRs for the new block
self.snr_prior[self.idx_prev+1:self.idx_curr+1] = snr_prior
self.snr_post[self.idx_prev+1:self.idx_curr+1] = snr_post
self.gain[self.idx_prev+1:self.idx_curr+1] = gain
def reconstruct_audio_data(self):
'''
Apply gain.
Inverse FFT to reconstruct the denoised audio data.
'''
# Only apply on the newly processed chunks
idxs = np.arange(self.idx_prev + 1, self.idx_curr + 1)
# denoised = gain X magnitude total X phase total
self.stft_denoised[idxs, :] = self.gain[idxs, :] * self.stft_mag[idxs, :] * self.stft_phase[idxs, :]
# Reconstruct the signal in the time space
audio_data_denoised = lb.spectrum.istft(self.stft_denoised[idxs, :].T, self.hop_length, window=WINDOW_TYPE, center=False).tolist()
# We need to ditch the beginning of the series (as it add been already
# been processed in the previous iterations).
# This does not apply to the first frame
if self.idx_prev < 0:
self.audio_data_denoised.extend(audio_data_denoised)
else:
self.audio_data_denoised.extend(audio_data_denoised[self.n_fft - self.hop_length:])
def main(self, audio_data_new):
'''
Entry-point for the noise reduction algorithm.
Usage: Make successive calls to main() with new (non-overlapping) chunks of raw audio.
The algorithm:
- append the data to the audio buffer
- calculate the spectrum
- estimate the noise
- calculate the gain
- apply the gain and reconstruct the denoised data
'''
# Check that the input audio is as expected
if len(audio_data_new) < self.n_fft or len(audio_data_new) % self.hop_length != 0:
raise IndexError("Bad size for the new chunk of audio")
self.audio_data.extend(audio_data_new)
self.calc_online_stft(len(audio_data_new))
self.calc_smooth_power_spectrum()
self.calc_noise_estimate()
self.calc_gain_ephraim_malah()
self.reconstruct_audio_data()
def test_noise_reduction():
'''
Run the online noise reduction for a sample track and plot both the raw audio
and the de-noised audio.
'''
import matplotlib.pyplot as plt
wd = utils.WD + "Samples\SaarlandMusicData\SaarlandMusicDataRecorded//"
filename_wav = wd + "Ravel_JeuxDEau_008_20110315-SMD.wav" #"Chopin_Op066_006_20100611-SMD.wav"
audio_data = (lb.core.load(filename_wav, sr = utils.SR, dtype=utils.AUDIO_FORMAT_MAP[utils.AUDIO_FORMAT_DEFAULT][0])[0]).astype(np.float64)
noise_reducer = NoiseReducer()
for k in np.arange(3000):
noise_reducer.main(audio_data[k*1024:k*1024 + 1024])
utils.figure();
plt.plot(noise_reducer.audio_data)
plt.plot(noise_reducer.audio_data_denoised)
| [
"noreply@github.com"
] | noreply@github.com |
6300090e5a1167be972d853d145e04125121895d | ccbcaca6df1c3984a19f039351e29cfa81e73314 | /timetable/schedule.py | a3265c9ffcaa2c76a8c6866709dc7413cf0e18ea | [
"BSD-3-Clause"
] | permissive | pgromano/timetable | b96c6eb2da8ede8abfa211f6d54748a4a5a9c9c7 | 8fa83fa82bb2afc56f6da1b7f8e3836f2b127164 | refs/heads/master | 2021-01-21T00:22:17.376372 | 2016-08-17T14:57:25 | 2016-08-17T14:57:25 | 61,254,584 | 0 | 0 | null | 2016-06-16T02:07:07 | 2016-06-16T02:07:07 | null | UTF-8 | Python | false | false | 182 | py |
class Schedule(object):
"""Student schedule object.
"""
def __init__(self):
def add(self, course):
"""Add course to schedule"""
def courses
| [
"zachsailer@gmail.com"
] | zachsailer@gmail.com |
3b0848f202ecd9d4c0ae3efc96929599353adf99 | 9dfc9d9bbf8cb415e8fe9cc618047f46a3fd8278 | /cut-video-with-text/Text_classification/text_classify/train.py | b28b0ddbf0601d1e78f46edf9d4f20a3a45ef21a | [] | no_license | kaiyu-tang/Little-niu | e50397ec1cd98d35e8b65472effbd7649b88a096 | 2ee215277a08f66c6a6932865a6467cb14722b2d | refs/heads/master | 2021-06-30T04:56:06.597051 | 2018-12-24T13:46:55 | 2018-12-24T13:46:55 | 132,573,637 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,736 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/5/29 ไธๅ3:06
# @Author : Kaiyu
# @Site :
# @File : train.py
import json
import os
import time
import torch
import torch.utils.data.dataloader
import torch.nn.functional as F
from Model import TextCNN, TextRNN, TextVDCNN
from Config import Config
from gensim.models import Word2Vec, FastText
from gensim.models.word2vec import LineSentence
from torch.utils.data import DataLoader, Dataset, SubsetRandomSampler
import pandas as pd
from data.data_loader import MyDataset
import numpy as np
def train_word_vectors(text_path, args):
sentences = LineSentence(text_path)
print("loading word2vec")
# labels = [text['label'] for text in data]
model_word2vec = Word2Vec(sentences=sentences, size=args.word_embed_dim, window=args.word2vec_window,
min_count=args.word2vec_min_count, workers=args.word2vec_worker, sg=args.word2vec_sg,
negative=args.word2vec_negative, iter=args.word2vec_iter, )
print('loading fast text')
model_fasttext = FastText(sentences=sentences, sg=args.fast_sg, size=args.word_embed_dim, window=args.fast_window,
min_count=args.fast_min_count, workers=args.fast_worker, iter=args.fast_iter, )
# print('loading word rank')
# model_wordrank = Wordrank.train(wr_path=args.dir_model, size=args.word_embed_dim, corpus_file=text_path,
# window=args.wordrank_window, out_name=args.wordrank_out_name,
# symmetric=args.wordrank_symmetric, min_count=args.wordrank_min_count,
# iter=args.wordrank_iter,
# np=args.wordrank_worker)
# model_word2vec.build_vocab(sentences=sentences)
# model_fasttext.build_vocab(sentences=sentences)
print("start training")
for epoch in range(args.word_vec_train_epoch):
# random.shuffle(sentences)
model_word2vec.train(sentences=sentences, epochs=model_word2vec.iter,
total_examples=model_word2vec.corpus_count)
# model_fasttext.train(sentences=sentences, epochs=model_fasttext.iter,
# total_examples=model_fasttext.corpus_count)
print(epoch)
if epoch % 20 == 0:
model_word2vec.save(os.path.join(args.dir_model, str(epoch) + "-" + args.word2vec_model_name))
# model_fasttext.save(os.path.join(args.dir_model, str(epoch) + "-" + args.fast_model_name))
model_word2vec.save(os.path.join(args.dir_model, args.word2vec_model_name))
model_fasttext.save(os.path.join(args.dir_model, args.fast_model_name))
# model_wordrank.save(os.path.join(args.dir_model, str(epoch) + "-" + args.wordrank_model_name))
print('finished training')
def eval_model(model, data_iter, args):
model.eval()
y_true = []
y_pred = []
hidden_state = None
from sklearn import metrics
for data_ in data_iter:
feature, target = data_[0], data_[1]
y_true = np.concatenate([y_true, target])
if model.name == "TextCNN":
logit = model(feature)
elif model.name == "TextRNN":
logit, hidden_state = model(feature, hidden_state)
elif model.name == "TextVDCNN":
logit = model(torch.transpose(feature, 1, 2))
y_pred = np.concatenate([y_pred, torch.max(logit, 1)[1].view(target.size()).data])
if len(y_pred) != len(y_true):
print("{} {}".format(len(y_pred), len(y_true)))
#####
# then get the ground truth and the predict label named y_true and y_pred
if len(y_pred) < len(y_true):
print("changed")
y_true = y_true[:len(y_pred)]
classify_report = metrics.classification_report(y_true, y_pred)
# confusion_matrix = metrics.confusion_matrix(y_true, y_pred)
overall_accuracy = metrics.accuracy_score(y_true, y_pred)
acc_for_each_class = metrics.precision_score(y_true, y_pred, average=None)
average_accuracy = np.mean(acc_for_each_class)
score = metrics.accuracy_score(y_true, y_pred)
print('classify_report : \n', classify_report)
# print('confusion_matrix : \n', confusion_matrix)
print('acc_for_each_class : \n', acc_for_each_class)
print('average_accuracy: {0:f}'.format(average_accuracy))
print('overall_accuracy: {0:f}'.format(overall_accuracy))
print('score: {0:f}'.format(score))
return average_accuracy
def save(model, save_dir, save_prefix, steps):
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
save_prefix = os.path.join(save_dir, save_prefix)
save_path = '{}_steps_{}'.format(save_prefix, steps)
torch.save(model.state_dict(), save_path + ".pt")
torch.save(model, save_path + ".pkl")
print('Save Sucessful, path: {}'.format(save_path))
def train(model, train_iter, dev_iter, args, weights, best_acc=0):
optimizer = torch.optim.Adam(model.parameters(), lr=args.options[model.name]["lr"])
#optimizer = torch.optim.SGD(model.parameters(), lr=args.options[model.name]["lr"])
# optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
# word2vec_model = Word2Vec.load(os.path.join(Config.dir_model, Config.word2vec_model_name))
steps = 0
last_step = 0
model.train()
option = args.options[model.name]
print('start training {}'.format(model.name))
#print(-1)
torch.backends.cudnn.benchmark = True
# weights = weights.cuda()
for epoch in range(option["epoch"]):
cur_time = time.time()
for data_ in train_iter:
feature, target = data_[0].long(), data_[1]
if model.name == "TextRNN":
pass
if args.cuda:
feature = feature.float().cuda()
target = target.cuda()
hidden_state = None
optimizer.zero_grad()
if model.name == "TextCNN":
logit = model(feature)
elif model.name == "TextRNN":
logit, hidden_state = model(feature, hidden_state)
elif model.name == "TextVDCNN":
feature = torch.transpose(feature, 1, 2)
logit = model(feature)
loss = F.cross_entropy(logit, target, weight=weights)
loss.backward()
optimizer.step()
end_time = time.time()
steps += 1
print("step: {} time: {} loss: {}".format(steps, end_time - cur_time, loss))
if steps % option["test_interval"] == 0:
dev_acc = eval_model(model, dev_iter, args)
model.train()
if dev_acc > best_acc:
best_acc = dev_acc
last_step = steps
if option["save_best"]:
save(model, args.dir_model, 'best_acc{}'.format(best_acc), steps)
else:
# if steps - last_step >= args.early_stop:
# print('early stop by {} steps.'.format(args.early_stop))
pass
# elif steps % option["save_interval"] == 0:
# # save(model, args.dir_model, 'snapshot', steps)
# pass
return best_acc
def prepare_sen_lab(test=True):
# data pre-process
data_path = './data/okoo-merged-3-label.json'
data = json.load(open(data_path, encoding='utf-8'))
sentences = []
labels = []
for item in data:
sentences.append(item['text'].split())
labels.append(item['merged_label'])
data_path = './data/zhibo7m.json'
data = json.load(open(data_path, encoding="utf-8"))
al = len(data)
count = 0
for item_ in data:
sentences.append(item_["msg"].split())
try:
labels.append(item_["t_label"])
except KeyError as e:
count += 1
labels.append(0)
print(item_["msg"])
print("all: {} error: {}".format(al, count))
return np.asarray(sentences), np.asarray(labels)
if __name__ == '__main__':
data = MyDataset()
# train word2vec
# text_path = 'data' + os.sep + 'okoo-merged-clean-cut-data.txt'
# train_word_vectors(text_path, Config)
# train text-Cnn
print(data.voca_size)
print('loading text model')
textcnn = TextCNN()
textrnn = TextRNN()
textvdcnn = TextVDCNN(voca_size=data.voca_size)
print('finished loading txt model')
print('Cuda: {}'.format(Config.cuda))
print("loading data")
# data = pd.read_csv("./data/full-cut-clean.csv")
# sentences, labels = data["sentence"].values.astype(np.float32), data["label"].values
from sklearn.model_selection import StratifiedShuffleSplit
sss = StratifiedShuffleSplit(n_splits=10, test_size=0.06)
iters = 0
best_acc = 0
print("loaded data")
weights = torch.from_numpy(data.get_weight())
if torch.cuda.is_available():
weights = weights.cuda()
textcnn.cuda()
textrnn.cuda()
textvdcnn.cuda()
for train_index, test_index in sss.split(data.X, data.Y):
start_time = time.time()
train_sampler = SubsetRandomSampler(train_index)
dev_sampler = SubsetRandomSampler(test_index)
train_iters = DataLoader(data, batch_size=64, num_workers=8, sampler=train_sampler,
)
dev_iters = DataLoader(data, batch_size=2, num_workers=8, sampler=dev_sampler,
)
print("start")
end_time = time.time()
iters += 1
# start train
best_acc = train(textvdcnn, train_iters, dev_iters, Config, best_acc=best_acc, weights=weights)
print("Iter: {} time: {} Loading data successful".format(iters, end_time - start_time))
| [
"tangkaiyuvip@gmail.com"
] | tangkaiyuvip@gmail.com |
d934c066420370ae2be79b824a2058c1b52e6568 | 210befd04f2ba70a0843df8b59d309eab1ff3316 | /temp.py | 9578fef96fb82f47e2552d63e0ef4cd614b8fa6d | [] | no_license | ahilh/projet-2i013 | d159b023d7d7f51833057f350eab04891487f208 | 8f008a0d2d7e6c8178603f77aa46b8cc2d6ec27b | refs/heads/master | 2020-04-19T05:34:48.002251 | 2019-02-25T18:01:47 | 2019-02-25T18:01:47 | 167,991,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | from salsa import Attaquant, Defense
from soccersimulator import Player, SoccerTeam, Simulation, show_simu
joueura1 = Player("Attaquant A" , Attaquant())
joueura2 = Player("Defenseur A" , Defense())
team1 = SoccerTeam ("Equipe A" , [ joueura1, joueura2])
# nombre de joueurs de l equipe
joueurb1 = Player("Attaquant B" , Attaquant())
joueurb2 = Player("Defenseur B" , Defense())
team2 = SoccerTeam ("Equipe B" , [ joueurb1, joueurb2])
# Creer un match entre 2 equipes et de duree 10 pas
match = Simulation( team1 , team2 , 1000)
# Jouer le match ( sans le visualiser )
match.start()
# Jouer le match en le visualisant
show_simu( match )
# Attention !! une fois le match joue , la fonction start () permet de faire jouer le replay
# mais pas de relancer le match !!!
# Pour regarder le replay d un match
| [
"ahil.hassanaly@yahoo.fr"
] | ahil.hassanaly@yahoo.fr |
3e14d69378a30d8887db254aeede0f54138ce747 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/matrix/4d38ab06972046a988250a3005464d09.py | 03b161fe26511da6e0ce058e59c662bf8f099254 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 488 | py | class Matrix(object):
def __init__(self, init):
split_at_newline = lambda m: map(lambda s: s.split(), m.split('\n'))
convert_to_int = lambda m: map(lambda s: int(s), m)
column_range = lambda m: range(len(m))
column_member = lambda x, m: map(lambda s: s[x], m)
self.rows = [convert_to_int(row) for row in split_at_newline(init)]
self.columns = [column_member(col, self.rows) for col in column_range(self.rows[0])]
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
507318a00b41ce38db963c43532b962a36ca4c43 | f3bd271bf00325881fb5b2533b9ef7f7448a75ec | /classes/_print32.py | fed133646d96b60d6083b2f83a8360c33eb35250 | [] | no_license | obaica/xcp2k | 7f99fc9d494859e16b9b0ea8e217b0493f4b2f59 | 6e15c2c95658f545102595dc1783f5e03a9e6916 | refs/heads/master | 2020-07-15T17:27:43.378835 | 2019-02-11T16:32:24 | 2019-02-11T16:32:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | from xcp2k.inputsection import InputSection
from _program_run_info23 import _program_run_info23
from _restart10 import _restart10
from _restart_history4 import _restart_history4
from _current1 import _current1
class _print32(InputSection):
def __init__(self):
InputSection.__init__(self)
self.PROGRAM_RUN_INFO = _program_run_info23()
self.RESTART = _restart10()
self.RESTART_HISTORY = _restart_history4()
self.CURRENT = _current1()
self._name = "PRINT"
self._subsections = {'CURRENT': 'CURRENT', 'RESTART_HISTORY': 'RESTART_HISTORY', 'PROGRAM_RUN_INFO': 'PROGRAM_RUN_INFO', 'RESTART': 'RESTART'}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.