blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c923e6312165159a17cfce68ebb90fc23b507106 | 1429cfd7e58858b3557c89cbb174716ff6959f29 | /TIC TAC TOE.py | 62eff864c12b9b67e5b5e38d1a39674f1e3f012c | [] | no_license | mrshahalam/New-Python-programme | f90ae6046da383a5df4dc36a47dfbb2ba51fb840 | 009d4f0fb759168f8927ceb6c713c9b29b584692 | refs/heads/master | 2020-06-28T22:46:01.631996 | 2020-01-30T16:06:45 | 2020-01-30T16:06:45 | 200,361,968 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,577 | py | """
A Two-Player Tic Tac Toe Game.
1. Displaying Board
2. Play Game
3. Handle Turn
4. Check Win
- Check Rows
- Check Columns
- Check Diagonals
5. Check Tie
6. Flip Player from X to O or O to X
"""
from sys import *
# Initializing the Board
board = ["-", "-", "-",
"-", "-", "-",
"-", "-", "-"]
game_going = True
winner = None
current_player = "X"
# Function: Displaying the Board
def view_board():
print()
print(board[0] + " | " + board[1] + " | " + board[2] + " 1 | 2 | 3")
print("----------")
print(board[3] + " | " + board[4] + " | " + board[5] + " 4 | 5 | 6")
print("----------")
print(board[6] + " | " + board[7] + " | " + board[8] + " 7 | 8 | 9")
print()
# Function: Handling the Turn
def handle_turn(player):
print(player, "'s turn.")
position = input("Enter the position where you want to place your player (1 to 9): ")
valid = False
while not valid:
while position not in ["1", "2", "3", "4", "5", "6", "7", "8", "9"]:
position = input("Enter the position where you want to place your player (1 to 9): ")
position = int(position) - 1
if board[position] == "-":
valid = True
else:
print("You cant select that position. Please chooswe position other than", position + 1)
board[position] = player
view_board()
# Function: Check Rows
def checkRows():
# Setting up Global Variable
global game_going
# Check if any of the following rows have all the same value and is not empty
row_1 = board[0] == board[1] == board[2] != "-"
row_2 = board[3] == board[4] == board[5] != "-"
row_3 = board[6] == board[7] == board[8] != "-"
# If any row is True, then there is a win
if row_1 or row_2 or row_3:
game_going = False
# Return the Winner (X or O)
if row_1:
return board[0]
elif row_2:
return board[3]
elif row_3:
return board[6]
# No winner
else:
return None
# Function: Check Columns
def checkColumns():
# Setting up Global Variable
global game_going
# Check if any of the following columns have all the same value and is not empty
column_1 = board[0] == board[3] == board[6] != "-"
column_2 = board[1] == board[4] == board[7] != "-"
column_3 = board[2] == board[5] == board[8] != "-"
# If any column is True, then there is a win
if column_1 or column_2 or column_3:
game_going = False
# Return the Winner (X or O)
if column_1:
return board[0]
elif column_2:
return board[1]
elif column_3:
return board[2]
# No winner
else:
return None
# Function: Check Diagonals
def checkDiagonals():
# Setting up Global Variable
global game_going
# Check if any of the following diagonals have all the same value and is not empty
diagonal_1 = board[0] == board[4] == board[8] != "-"
diagonal_2 = board[2] == board[4] == board[6] != "-"
# If any diagonal is True, then there is a win
if diagonal_1 or diagonal_2:
game_going = False
# Return the Winner (X or O)
if diagonal_1:
return board[0]
elif diagonal_2:
return board[2]
# No winner
else:
return None
# Function: Check Win
def checkWin():
# Setting Up Global Variable
global winner
# Check Rows
row_winner = checkRows()
# Check Columns
column_winner = checkColumns()
# Check Diagonals
diagonal_winner = checkDiagonals()
if row_winner:
winner = row_winner
elif column_winner:
winner = column_winner
elif diagonal_winner:
winner = diagonal_winner
else:
winner = None
return
# Function: Check Tie
def checkTie():
# Setting up the Global Variable
global game_going
if "-" not in board:
game_going = False
return
# Function: Flipping the Player from X to O or O to X
def flip_player():
# Setting up Global Variable
global current_player
# If the current player was X, change it to O
if current_player == 'X':
current_player = 'O'
# If the current player was O, change it to X
elif current_player == 'O':
current_player = 'X'
return
# Function: Check whether the game is over or not
def check_gameOver():
checkWin()
checkTie()
# Function: Play The Game
def play_game():
# Displaying the initial board
view_board()
# While the game is still going
while game_going:
# Handle a turn
handle_turn(current_player)
# Check if the game has ended
check_gameOver()
# Flip to the other player
flip_player()
# Game has ended
if winner == 'X' or winner == 'O':
print(winner + " won.")
elif winner == None:
print("Game Tied.")
# Function: Main Program
def main():
while True:
print("-------------------------")
print("TIC TAC TOE 2-PLAYER GAME")
print("-------------------------")
choice = input("1. Play A New Game\n2. Exit\nEnter Your Choice: ")
try:
int_choice = int(choice)
if int_choice == 1:
play_game()
elif int_choice == 2:
print("Thank you for playing TIC TAC TOE Game App")
exit()
else:
print("Invalid Choice. Please enter either 1 or 2.")
except ValueError:
print("Sorry, Wrong Input. Please enter numbers only.")
main()
| [
"noreply@github.com"
] | noreply@github.com |
5c11d0ef4c5a83f6c0d971af6f4730a9a6fe1a67 | c1e488789b41a714cdd37525d7e71815753c21d9 | /atcoder/beginners/chap1/PASTFILES/ABC088A_1.py | 041c7c7cbb41d815d7d2848a46a3bce2ad8a670a | [] | no_license | happyhappyhappyhappy/pythoncode | 638a0cbeb94ec04829c1c4e216fb200863cd7a4e | 247b8346a503cab272043c20e6210ee03cfdd8c4 | refs/heads/master | 2023-08-31T20:54:06.144750 | 2023-08-30T08:33:15 | 2023-08-30T08:33:15 | 223,697,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | # Problem https://atcoder.jp/contests/abc088/tasks/abc088_a
# Python 1st Try
if __name__ == "__main__":
yes = "Yes"
no = "No"
answer = ""
N = int(input().strip())
A = int(input().strip())
chargeCoin = N % 500
if chargeCoin <= A:
answer = yes
else:
answer = no
print(answer)
exit
| [
"ymnkkj@gmail.com"
] | ymnkkj@gmail.com |
f79103b6166bbcddf98f63d0c258951fb19b31eb | 28280d1c7ca06f89906e811f3b7311a5e8a0046b | /ecoz2py/__init__.py | 5bffc1a049e5d658a7b607a7b4e2c48e1360e361 | [] | no_license | mbari-org/ecoz2py | e5e96ba127a397c7d319a15ca13889f724943ba5 | 00d17b1696debc3aff7da37f0e4be316de70c3a7 | refs/heads/master | 2022-09-03T20:59:18.927539 | 2020-05-03T02:06:51 | 2020-05-03T02:06:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,691 | py | import os
from _ecoz2_extension import ffi
from _ecoz2_extension.lib import ecoz2_hmm_learn
from _ecoz2_extension.lib import ecoz2_prd_show_file
from _ecoz2_extension.lib import ecoz2_set_random_seed
from _ecoz2_extension.lib import ecoz2_version
from _ecoz2_extension.lib import ecoz2_vq_learn
def get_version():
return ffi.string(ecoz2_version())
def prd_show_file(filename,
show_reflections=False,
from_=-1,
to=-1,
):
ecoz2_prd_show_file(filename, show_reflections, from_, to)
def set_random_seed(seed):
ecoz2_set_random_seed(seed)
def hmm_learn(N,
sequence_filenames,
model_type=3,
hmm_epsilon=1.e-5,
val_auto=0.3,
max_iterations=-1,
hmm_learn_callback=None
):
c_sequence_filenames_keepalive = [ffi.new("char[]", _to_bytes(s)) for s in sequence_filenames]
c_sequence_filenames = ffi.new("char *[]", c_sequence_filenames_keepalive)
# for (i, c_sequence_filename) in enumerate(c_sequence_filenames):
# print('SEQ {} => {}'.format(i, ffi.string(c_sequence_filename)))
@ffi.callback("void(char*, double)")
def callback(c_variable, c_value):
if hmm_learn_callback:
variable = _to_str(ffi.string(c_variable))
value = float(c_value)
hmm_learn_callback(variable, value)
ecoz2_hmm_learn(N,
model_type,
c_sequence_filenames,
len(c_sequence_filenames),
hmm_epsilon,
val_auto,
max_iterations,
callback
)
def vq_learn(prediction_order,
predictor_filenames,
codebook_class_name='_',
epsilon=0.05,
vq_learn_callback=None
):
c_codebook_class_name = ffi.new("char []", _to_bytes(codebook_class_name))
c_predictor_filenames_keepalive = [ffi.new("char[]", _to_bytes(s)) for s in predictor_filenames]
c_predictor_filenames = ffi.new("char *[]", c_predictor_filenames_keepalive)
@ffi.callback("void(int, double, double, double)")
def callback(m, avg_distortion, sigma, inertia):
if vq_learn_callback:
vq_learn_callback(m, avg_distortion, sigma, inertia)
return ecoz2_vq_learn(prediction_order,
epsilon,
c_codebook_class_name,
c_predictor_filenames,
len(c_predictor_filenames),
callback
)
def get_actual_filenames(filenames, file_ext):
"""
Returns the given list of files but expanding any directories.
"""
files = []
for path in filenames:
if os.path.isdir(path):
dir_files = list_files(path, file_ext)
files = files + dir_files
elif os.path.isfile(path) and path.endswith(file_ext):
files.append(path)
return files
def list_files(directory, file_ext):
"""
ListS all files under the given directory and having the given extension.
"""
files = []
for e in os.listdir(directory):
f = "{}/{}".format(directory, e)
# print(f)
if os.path.isdir(f):
files = files + list_files(f, file_ext)
elif os.path.isfile(f) and f.endswith(file_ext):
files.append(f)
return files
# ---------
def _to_bytes(s):
return s if isinstance(s, bytes) else str(s).encode("utf-8")
def _to_str(s):
return s if isinstance(s, str) else bytes(s).decode("utf-8")
| [
"carueda@mbari.org"
] | carueda@mbari.org |
4f1cf1347b78f2c9ecb4170992e0d6cc1810de58 | 16dbe8b1be0cd360ac1062072430f1f2b7d95bd6 | /FlightPlanner/BasicGNSS/ParameterDlgs/DlgCaculateWaypoint.py | a6bc525d80735a610431231967d854ce93aaae7a | [] | no_license | developer124320/FlightPlanner | 4a0d9a450ddddede95512ad76437db2906154536 | f1e4c762c360c0a00022ae6fa028fc1aee2a467d | refs/heads/master | 2022-08-25T14:00:57.495037 | 2020-05-27T01:26:27 | 2020-05-27T01:26:27 | 267,186,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47,721 | py | # -*- coding: utf-8 -*-
from PyQt4.QtGui import QWidget, QFrame, QVBoxLayout, QGroupBox, QSizePolicy, QHBoxLayout, \
QLabel, QFont, QLineEdit, QToolButton, QIcon, QPixmap, QDialog, QDialogButtonBox, QMessageBox
from PyQt4.QtCore import QSize, QSizeF, Qt, SIGNAL, QObject
from FlightPlanner.captureCoordinateTool import CaptureCoordinateTool
import define
from qgis.gui import QgsMapToolPan
from qgis.core import QgsPoint
from FlightPlanner.types import RnavCommonWaypoint, DistanceUnits, AngleUnits, AircraftSpeedCategory
from FlightPlanner.QgisHelper import Point3D
from FlightPlanner.helpers import Unit, MathHelper, Distance
from FlightPlanner.BasicGNSS.rnavWaypoints import RnavWaypoints
from FlightPlanner.messages import Messages
from FlightPlanner.MeasureTool import MeasureTool
from FlightPlanner.CaptureBearingTool import CaptureBearingTool
from FlightPlanner.validations import Validations
import math
class CalcDlg(QDialog):
def __init__(self, parent, rnavType, category, position_0, position_1, position_List, flagStr = None):
QDialog.__init__(self, parent)
self.flagStrName = flagStr
# self.resize(326, 310)
self.verticalLayout = QVBoxLayout(self)
self.verticalLayout.setSpacing(6)
self.verticalLayout.setMargin(3)
self.verticalLayout.setObjectName(("verticalLayout"))
self.groupBox = QGroupBox(self)
self.groupBox.setTitle((""))
self.groupBox.setObjectName(("groupBox"))
self.verticalLayout_2 = QVBoxLayout(self.groupBox)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setMargin(3)
self.verticalLayout_2.setObjectName(("verticalLayout_2"))
self.groupBox_5 = QGroupBox(self.groupBox)
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_5.sizePolicy().hasHeightForWidth())
self.groupBox_5.setSizePolicy(sizePolicy)
font = QFont()
font.setFamily(("Arial"))
self.groupBox_5.setFont(font)
self.groupBox_5.setObjectName(("groupBox_5"))
self.horizontalLayout_19 = QHBoxLayout(self.groupBox_5)
self.horizontalLayout_19.setSpacing(0)
self.horizontalLayout_19.setMargin(0)
self.horizontalLayout_19.setObjectName(("horizontalLayout_19"))
self.frame_18 = QFrame(self.groupBox_5)
self.frame_18.setFrameShape(QFrame.StyledPanel)
self.frame_18.setFrameShadow(QFrame.Raised)
self.frame_18.setObjectName(("frame_18"))
self.verticalLayout_13 = QVBoxLayout(self.frame_18)
self.verticalLayout_13.setSpacing(0)
self.verticalLayout_13.setContentsMargins(-1, -1, 0, -1)
self.verticalLayout_13.setObjectName(("verticalLayout_13"))
self.frame_19 = QFrame(self.frame_18)
sizePolicy = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_19.sizePolicy().hasHeightForWidth())
self.frame_19.setSizePolicy(sizePolicy)
self.frame_19.setFrameShape(QFrame.StyledPanel)
self.frame_19.setFrameShadow(QFrame.Raised)
self.frame_19.setObjectName(("frame_19"))
self.horizontalLayout_20 = QHBoxLayout(self.frame_19)
self.horizontalLayout_20.setSpacing(0)
self.horizontalLayout_20.setMargin(0)
self.horizontalLayout_20.setObjectName(("horizontalLayout_20"))
self.label_9 = QLabel(self.frame_19)
self.label_9.setMaximumSize(QSize(60, 16777215))
font = QFont()
font.setFamily(("Arial"))
font.setBold(False)
font.setWeight(50)
self.label_9.setFont(font)
self.label_9.setObjectName(("label_9"))
self.horizontalLayout_20.addWidget(self.label_9)
self.txtTHR_X = QLineEdit(self.frame_19)
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.txtTHR_X.sizePolicy().hasHeightForWidth())
self.txtTHR_X.setSizePolicy(sizePolicy)
self.txtTHR_X.setMaximumSize(QSize(16777215, 16777215))
font = QFont()
font.setFamily(("Arial"))
self.txtTHR_X.setFont(font)
self.txtTHR_X.setObjectName(("txtTHR_X"))
self.horizontalLayout_20.addWidget(self.txtTHR_X)
self.verticalLayout_13.addWidget(self.frame_19)
self.frame_20 = QFrame(self.frame_18)
sizePolicy = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_20.sizePolicy().hasHeightForWidth())
self.frame_20.setSizePolicy(sizePolicy)
self.frame_20.setFrameShape(QFrame.StyledPanel)
self.frame_20.setFrameShadow(QFrame.Raised)
self.frame_20.setObjectName(("frame_20"))
self.horizontalLayout_21 = QHBoxLayout(self.frame_20)
self.horizontalLayout_21.setSpacing(0)
self.horizontalLayout_21.setMargin(0)
self.horizontalLayout_21.setObjectName(("horizontalLayout_21"))
self.label_10 = QLabel(self.frame_20)
self.label_10.setMaximumSize(QSize(60, 16777215))
font = QFont()
font.setFamily(("Arial"))
font.setBold(False)
font.setWeight(50)
self.label_10.setFont(font)
self.label_10.setObjectName(("label_10"))
self.horizontalLayout_21.addWidget(self.label_10)
self.txtTHR_Y = QLineEdit(self.frame_20)
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.txtTHR_Y.sizePolicy().hasHeightForWidth())
self.txtTHR_Y.setSizePolicy(sizePolicy)
self.txtTHR_Y.setMaximumSize(QSize(16777215, 16777215))
font = QFont()
font.setFamily(("Arial"))
self.txtTHR_Y.setFont(font)
self.txtTHR_Y.setObjectName(("txtTHR_Y"))
self.horizontalLayout_21.addWidget(self.txtTHR_Y)
self.verticalLayout_13.addWidget(self.frame_20)
self.horizontalLayout_19.addWidget(self.frame_18)
self.frame_21 = QFrame(self.groupBox_5)
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_21.sizePolicy().hasHeightForWidth())
self.frame_21.setSizePolicy(sizePolicy)
self.frame_21.setMaximumSize(QSize(30, 70))
self.frame_21.setFrameShape(QFrame.StyledPanel)
self.frame_21.setFrameShadow(QFrame.Raised)
self.frame_21.setObjectName(("frame_21"))
self.verticalLayout_14 = QVBoxLayout(self.frame_21)
self.verticalLayout_14.setSpacing(0)
self.verticalLayout_14.setMargin(0)
self.verticalLayout_14.setObjectName(("verticalLayout_14"))
self.btnCaptureRunwayTHR = QToolButton(self.frame_21)
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnCaptureRunwayTHR.sizePolicy().hasHeightForWidth())
self.btnCaptureRunwayTHR.setSizePolicy(sizePolicy)
self.btnCaptureRunwayTHR.setMaximumSize(QSize(16777215, 47))
icon = QIcon()
icon.addPixmap(QPixmap(("Resource/coordinate_capture.png")), QIcon.Normal, QIcon.Off)
self.btnCaptureRunwayTHR.setIcon(icon)
self.btnCaptureRunwayTHR.setObjectName(("btnCaptureRunwayTHR"))
self.verticalLayout_14.addWidget(self.btnCaptureRunwayTHR)
# self.btnToolTHR = QToolButton(self.frame_21)
# sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
# sizePolicy.setHorizontalStretch(0)
# sizePolicy.setVerticalStretch(0)
# sizePolicy.setHeightForWidth(self.btnToolTHR.sizePolicy().hasHeightForWidth())
# self.btnToolTHR.setSizePolicy(sizePolicy)
# self.btnToolTHR.setMaximumSize(QSize(16777215, 20))
# icon1 = QIcon()
# icon1.addPixmap(QPixmap(("Resource/sort2.png")), QIcon.Normal, QIcon.Off)
# self.btnToolTHR.setIcon(icon1)
# self.btnToolTHR.setObjectName(("btnToolTHR"))
# self.verticalLayout_14.addWidget(self.btnToolTHR)
self.horizontalLayout_19.addWidget(self.frame_21)
self.verticalLayout_2.addWidget(self.groupBox_5)
self.groupBox_4 = QGroupBox(self.groupBox)
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_4.sizePolicy().hasHeightForWidth())
self.groupBox_4.setSizePolicy(sizePolicy)
font = QFont()
font.setFamily(("Arial"))
self.groupBox_4.setFont(font)
self.groupBox_4.setObjectName(("groupBox_4"))
self.horizontalLayout_16 = QHBoxLayout(self.groupBox_4)
self.horizontalLayout_16.setSpacing(0)
self.horizontalLayout_16.setMargin(0)
self.horizontalLayout_16.setObjectName(("horizontalLayout_16"))
self.frame_14 = QFrame(self.groupBox_4)
self.frame_14.setFrameShape(QFrame.StyledPanel)
self.frame_14.setFrameShadow(QFrame.Raised)
self.frame_14.setObjectName(("frame_14"))
self.verticalLayout_11 = QVBoxLayout(self.frame_14)
self.verticalLayout_11.setSpacing(0)
self.verticalLayout_11.setContentsMargins(-1, -1, 0, -1)
self.verticalLayout_11.setObjectName(("verticalLayout_11"))
self.frame_15 = QFrame(self.frame_14)
sizePolicy = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_15.sizePolicy().hasHeightForWidth())
self.frame_15.setSizePolicy(sizePolicy)
self.frame_15.setFrameShape(QFrame.StyledPanel)
self.frame_15.setFrameShadow(QFrame.Raised)
self.frame_15.setObjectName(("frame_15"))
self.horizontalLayout_17 = QHBoxLayout(self.frame_15)
self.horizontalLayout_17.setSpacing(0)
self.horizontalLayout_17.setMargin(0)
self.horizontalLayout_17.setObjectName(("horizontalLayout_17"))
self.label_7 = QLabel(self.frame_15)
self.label_7.setMaximumSize(QSize(60, 16777215))
font = QFont()
font.setFamily(("Arial"))
font.setBold(False)
font.setWeight(50)
self.label_7.setFont(font)
self.label_7.setObjectName(("label_7"))
self.horizontalLayout_17.addWidget(self.label_7)
self.txtEND_X = QLineEdit(self.frame_15)
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.txtEND_X.sizePolicy().hasHeightForWidth())
self.txtEND_X.setSizePolicy(sizePolicy)
self.txtEND_X.setMaximumSize(QSize(16777215, 16777215))
font = QFont()
font.setFamily(("Arial"))
self.txtEND_X.setFont(font)
self.txtEND_X.setObjectName(("txtEND_X"))
self.horizontalLayout_17.addWidget(self.txtEND_X)
self.verticalLayout_11.addWidget(self.frame_15)
self.frame_16 = QFrame(self.frame_14)
sizePolicy = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_16.sizePolicy().hasHeightForWidth())
self.frame_16.setSizePolicy(sizePolicy)
self.frame_16.setFrameShape(QFrame.StyledPanel)
self.frame_16.setFrameShadow(QFrame.Raised)
self.frame_16.setObjectName(("frame_16"))
self.horizontalLayout_18 = QHBoxLayout(self.frame_16)
self.horizontalLayout_18.setSpacing(0)
self.horizontalLayout_18.setMargin(0)
self.horizontalLayout_18.setObjectName(("horizontalLayout_18"))
self.label_8 = QLabel(self.frame_16)
self.label_8.setMaximumSize(QSize(60, 16777215))
font = QFont()
font.setFamily(("Arial"))
font.setBold(False)
font.setWeight(50)
self.label_8.setFont(font)
self.label_8.setObjectName(("label_8"))
self.horizontalLayout_18.addWidget(self.label_8)
self.txtEND_Y = QLineEdit(self.frame_16)
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.txtEND_Y.sizePolicy().hasHeightForWidth())
self.txtEND_Y.setSizePolicy(sizePolicy)
self.txtEND_Y.setMaximumSize(QSize(16777215, 16777215))
font = QFont()
font.setFamily(("Arial"))
self.txtEND_Y.setFont(font)
self.txtEND_Y.setObjectName(("txtEND_Y"))
self.horizontalLayout_18.addWidget(self.txtEND_Y)
self.verticalLayout_11.addWidget(self.frame_16)
self.horizontalLayout_16.addWidget(self.frame_14)
self.frame_17 = QFrame(self.groupBox_4)
self.frame_17.setMaximumSize(QSize(30, 16777215))
self.frame_17.setFrameShape(QFrame.StyledPanel)
self.frame_17.setFrameShadow(QFrame.Raised)
self.frame_17.setObjectName(("frame_17"))
self.verticalLayout_12 = QVBoxLayout(self.frame_17)
self.verticalLayout_12.setSpacing(0)
self.verticalLayout_12.setMargin(0)
self.verticalLayout_12.setObjectName(("verticalLayout_12"))
self.btnCaptureRunwayEND = QToolButton(self.frame_17)
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnCaptureRunwayEND.sizePolicy().hasHeightForWidth())
self.btnCaptureRunwayEND.setSizePolicy(sizePolicy)
self.btnCaptureRunwayEND.setMaximumSize(QSize(16777215, 47))
self.btnCaptureRunwayEND.setIcon(icon)
self.btnCaptureRunwayEND.setObjectName(("btnCaptureRunwayEND"))
self.verticalLayout_12.addWidget(self.btnCaptureRunwayEND)
# self.btnToolEND = QToolButton(self.frame_17)
# sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
# sizePolicy.setHorizontalStretch(0)
# sizePolicy.setVerticalStretch(0)
# sizePolicy.setHeightForWidth(self.btnToolEND.sizePolicy().hasHeightForWidth())
# self.btnToolEND.setSizePolicy(sizePolicy)
# self.btnToolEND.setMaximumSize(QSize(16777215, 20))
# self.btnToolEND.setIcon(icon1)
# self.btnToolEND.setObjectName(("btnToolEND"))
# self.verticalLayout_12.addWidget(self.btnToolEND)
self.horizontalLayout_16.addWidget(self.frame_17)
self.verticalLayout_2.addWidget(self.groupBox_4)
self.lbl1 = QLabel(self.groupBox)
font = QFont()
font.setFamily(("Arial"))
self.lbl1.setFont(font)
self.lbl1.setText((""))
self.lbl1.setAlignment(Qt.AlignCenter)
self.lbl1.setWordWrap(False)
self.lbl1.setMargin(0)
self.lbl1.setObjectName(("lbl1"))
self.verticalLayout_2.addWidget(self.lbl1)
self.lbl2 = QLabel(self.groupBox)
font = QFont()
font.setFamily(("Arial"))
font.setBold(False)
font.setWeight(50)
self.lbl2.setFont(font)
self.lbl2.setText((""))
self.lbl2.setAlignment(Qt.AlignCenter)
self.lbl2.setObjectName(("lbl2"))
self.verticalLayout_2.addWidget(self.lbl2)
self.frame_22 = QFrame(self.groupBox)
sizePolicy = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_22.sizePolicy().hasHeightForWidth())
self.frame_22.setSizePolicy(sizePolicy)
self.frame_22.setFrameShape(QFrame.StyledPanel)
self.frame_22.setFrameShadow(QFrame.Raised)
self.frame_22.setObjectName(("frame_22"))
self.horizontalLayout_22 = QHBoxLayout(self.frame_22)
self.horizontalLayout_22.setSpacing(0)
self.horizontalLayout_22.setMargin(0)
self.horizontalLayout_22.setObjectName(("horizontalLayout_22"))
self.label_11 = QLabel(self.frame_22)
self.label_11.setMinimumSize(QSize(170, 0))
self.label_11.setMaximumSize(QSize(180, 16777215))
font = QFont()
font.setFamily(("Arial"))
font.setBold(False)
font.setWeight(50)
self.label_11.setFont(font)
self.label_11.setObjectName(("label_11"))
self.horizontalLayout_22.addWidget(self.label_11)
self.txtForm = QLineEdit(self.frame_22)
self.txtForm.setEnabled(False)
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.txtForm.sizePolicy().hasHeightForWidth())
self.txtForm.setSizePolicy(sizePolicy)
self.txtForm.setMaximumSize(QSize(16777215, 16777215))
font = QFont()
font.setFamily(("Arial"))
self.txtForm.setFont(font)
self.txtForm.setObjectName(("txtForm"))
self.horizontalLayout_22.addWidget(self.txtForm)
self.verticalLayout_2.addWidget(self.frame_22)
self.frame_23 = QFrame(self.groupBox)
sizePolicy = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_23.sizePolicy().hasHeightForWidth())
self.frame_23.setSizePolicy(sizePolicy)
self.frame_23.setFrameShape(QFrame.StyledPanel)
self.frame_23.setFrameShadow(QFrame.Raised)
self.frame_23.setObjectName(("frame_23"))
self.horizontalLayout_23 = QHBoxLayout(self.frame_23)
self.horizontalLayout_23.setSpacing(0)
self.horizontalLayout_23.setMargin(0)
self.horizontalLayout_23.setObjectName(("horizontalLayout_23"))
self.label_12 = QLabel(self.frame_23)
self.label_12.setMinimumSize(QSize(170, 0))
self.label_12.setMaximumSize(QSize(180, 16777215))
font = QFont()
font.setFamily(("Arial"))
font.setBold(False)
font.setWeight(50)
self.label_12.setFont(font)
self.label_12.setObjectName(("label_12"))
self.horizontalLayout_23.addWidget(self.label_12)
self.txtBearing = QLineEdit(self.frame_23)
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.txtBearing.sizePolicy().hasHeightForWidth())
self.txtBearing.setSizePolicy(sizePolicy)
self.txtBearing.setMaximumSize(QSize(16777215, 16777215))
font = QFont()
font.setFamily(("Arial"))
self.txtBearing.setFont(font)
self.txtBearing.setObjectName(("txtBearing"))
self.horizontalLayout_23.addWidget(self.txtBearing)
self.btnCaptureBearing = QToolButton(self.frame_23)
sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnCaptureBearing.sizePolicy().hasHeightForWidth())
self.btnCaptureBearing.setSizePolicy(sizePolicy)
self.btnCaptureBearing.setMaximumSize(QSize(16777215, 25))
self.btnCaptureBearing.setStyleSheet((""))
self.btnCaptureBearing.setIcon(icon)
self.btnCaptureBearing.setObjectName(("btnCaptureBearing"))
self.horizontalLayout_23.addWidget(self.btnCaptureBearing)
self.verticalLayout_2.addWidget(self.frame_23)
self.frame_24 = QFrame(self.groupBox)
sizePolicy = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_24.sizePolicy().hasHeightForWidth())
self.frame_24.setSizePolicy(sizePolicy)
self.frame_24.setFrameShape(QFrame.StyledPanel)
self.frame_24.setFrameShadow(QFrame.Raised)
self.frame_24.setObjectName(("frame_24"))
self.horizontalLayout_24 = QHBoxLayout(self.frame_24)
self.horizontalLayout_24.setSpacing(0)
self.horizontalLayout_24.setMargin(0)
self.horizontalLayout_24.setObjectName(("horizontalLayout_24"))
self.lblDistance = QLabel(self.frame_24)
self.lblDistance.setMinimumSize(QSize(170, 0))
self.lblDistance.setMaximumSize(QSize(180, 16777215))
font = QFont()
font.setFamily(("Arial"))
font.setBold(False)
font.setWeight(50)
self.lblDistance.setFont(font)
self.lblDistance.setObjectName(("lblDistance"))
self.horizontalLayout_24.addWidget(self.lblDistance)
self.txtDistance = QLineEdit(self.frame_24)
self.txtDistance.setEnabled(False)
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.txtDistance.sizePolicy().hasHeightForWidth())
self.txtDistance.setSizePolicy(sizePolicy)
self.txtDistance.setMaximumSize(QSize(16777215, 16777215))
font = QFont()
font.setFamily(("Arial"))
self.txtDistance.setFont(font)
self.txtDistance.setObjectName(("txtDistance"))
self.horizontalLayout_24.addWidget(self.txtDistance)
self.btnCaptureDistance = QToolButton(self.frame_24)
sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnCaptureDistance.sizePolicy().hasHeightForWidth())
self.btnCaptureDistance.setSizePolicy(sizePolicy)
self.btnCaptureDistance.setMaximumSize(QSize(16777215, 23))
self.btnCaptureDistance.setStyleSheet((""))
self.btnCaptureDistance.setIcon(icon)
self.btnCaptureDistance.setObjectName(("btnCaptureDistance"))
self.horizontalLayout_24.addWidget(self.btnCaptureDistance)
self.verticalLayout_2.addWidget(self.frame_24)
self.verticalLayout.addWidget(self.groupBox)
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel|QDialogButtonBox.Ok)
self.buttonBox.setObjectName(("buttonBox"))
self.verticalLayout.addWidget(self.buttonBox)
self.btnCaptureDistance.clicked.connect(self.method_9)
self.btnCaptureBearing.clicked.connect(self.method_8)
self.txtEND_X.textChanged.connect(self.method_4)
self.txtEND_Y.textChanged.connect(self.method_4)
self.txtTHR_X.textChanged.connect(self.method_4)
self.txtTHR_Y.textChanged.connect(self.method_4)
self.type = rnavType
self.category = category
self.resultPosionList = position_List
self.MinBearing2 = 0
self.MaxBearing2= 0
self.waypoint = None
self.distanceMeasureTool = MeasureTool(define._canvas, self.txtDistance, DistanceUnits.NM)
self.bearingTool = CaptureBearingTool(define._canvas, self.txtBearing)
self.CaptureTHRCoordTool = CaptureCoordinateTool(define._canvas, self.txtTHR_X, self.txtTHR_Y)
self.CaptureTHRCoordTool.rubberBandClick.setColor(Qt.green)
self.CaptureENDCoordTool = CaptureCoordinateTool(define._canvas, self.txtEND_X, self.txtEND_Y)
self.CaptureENDCoordTool.rubberBandClick.setColor(Qt.blue)
if rnavType == RnavCommonWaypoint.FAWP or rnavType == RnavCommonWaypoint.MAWP:
self.from1 = position_0
self.resize(326, 310)
if position_List[0] != None:
self.setThrPosition(position_List[0].x(),position_List[0].y())
self.CaptureTHRCoordTool.rubberBandClick.addPoint(QgsPoint(position_List[0].x(),position_List[0].y()))
# self.CaptureTHRCoordTool.rubberBandClick.show()
if position_List[1] != None:
self.setEndPosition(position_List[1].x(),position_List[1].y())
self.CaptureENDCoordTool.rubberBandClick.addPoint(QgsPoint(position_List[1].x(),position_List[1].y()))
# self.setWaypoint(position_List[2])
else:
self.from1 = position_1
num = RnavWaypoints.smethod_0(position_0, position_1)
self.MinBearing = RnavWaypoints.smethod_7(rnavType, category, num)
self.MaxBearing= RnavWaypoints.smethod_8(rnavType, category, num)
self.MinDistance = RnavWaypoints.smethod_4(rnavType, category)
if flagStr == "Y-Bar":
if (rnavType == RnavCommonWaypoint.IAWP1):
self.setBearing(self.MaxBearing)
elif (rnavType != RnavCommonWaypoint.IAWP3):
self.setBearing(num)
else:
self.setBearing(self.MinBearing)
else:
if (rnavType == RnavCommonWaypoint.IAWP1):
self.setBearing(self.MinBearing)
elif (rnavType != RnavCommonWaypoint.IAWP3):
self.setBearing(num)
else:
self.setBearing(self.MaxBearing)
# if self.txtDistance.isEnabled():
# self.setDistance(RnavWaypoints.smethod_6(rnavType, category).NauticalMiles)
# self.setWaypoint(position_List.pop(0))
self.method_4()
self.retranslateUi()
QObject.connect(self.buttonBox, SIGNAL(("accepted()")), self.btnCalculate_Click)
QObject.connect(self.buttonBox, SIGNAL(("rejected()")), self.reject)
# QMetaObject.connectSlotsByName(Dialog)
# self.btnToolEND.clicked.connect(self.removeEnd)
# self.btnToolTHR.clicked.connect(self.removeThr)
self.btnCaptureRunwayTHR.clicked.connect(self.captureTHR)
self.btnCaptureRunwayEND.clicked.connect(self.captureEND)
def retranslateUi(self):
self.setWindowTitle("CaculaterDlg")
self.groupBox_5.setTitle("Runway THR")
self.label_9.setText("X:")
self.label_10.setText("Y:")
self.btnCaptureRunwayTHR.setText("...")
# self.btnToolTHR.setText("...")
self.groupBox_4.setTitle("Runway END")
self.label_7.setText("X:")
self.label_8.setText("Y:")
self.btnCaptureRunwayEND.setText("...")
# self.btnToolEND.setText("...")
self.label_11.setText("From:")
self.txtForm.setText("FAWP")
self.label_12.setText(unicode("Back Azimuth (°) :", "utf-8"))
# self.txtBearing.setText("188.25")
self.btnCaptureBearing.setText("...")
self.lblDistance.setText("Distance From RWY THR (nm):")
self.txtDistance.setText("5")
self.btnCaptureDistance.setText("...")
def captureTHR(self):
define._canvas.setMapTool(self.CaptureTHRCoordTool)
def captureEND(self):
define._canvas.setMapTool(self.CaptureENDCoordTool)
def close(self):
scene = define._canvas.scene()
scene.removeItem(self.CaptureTHRCoordTool.rubberBandClick)
scene.removeItem(self.CaptureENDCoordTool.rubberBandClick)
scene.removeItem(self.bearingTool.rubberBand)
scene.removeItem(self.distanceMeasureTool.rubberBand)
# self.CaptureTHRCoordTool.rubberBand.hide()
# self.CaptureENDCoordTool.rubberBand.hide()
define._canvas.setMapTool(QgsMapToolPan(define._canvas))
# self.reject()
def reject(self):
self.close()
QDialog.reject(self)
def getThrPoint3D(self):
if self.txtTHR_X.text() !="" and self.txtTHR_Y.text() !="":
try:
x = float(self.txtTHR_X.text())
except ValueError:
x = 0
try:
y = float(self.txtTHR_Y.text())
except ValueError:
y = 0
return Point3D(x, y, 0)
else:
return None
def getEndPoint3D(self):
if self.txtEND_X.text() !="" and self.txtEND_Y.text() !="":
try:
x = float(self.txtEND_X.text())
except ValueError:
x = 0
try:
y = float(self.txtEND_Y.text())
except ValueError:
y = 0
return Point3D(x, y, 0)
else:
return None
def setEndPosition(self, x, y):
self.txtEND_X.setText(str(x))
self.txtEND_Y.setText(str(y))
def setThrPosition(self, x, y):
self.txtTHR_X.setText(str(x))
self.txtTHR_Y.setText(str(y))
def getWaypoint(self):
if (self.type == RnavCommonWaypoint.FAWP):
nauticalMiles = float(self.txtDistance.text())
value = float(self.txtBearing.text())
num1 = math.fabs(self.rethr - value)
if (num1 > 180):
num1 = 360 - num1
num2 = math.sin(Unit.smethod_0(num1)) * 0.7559395
num3 = Unit.smethod_1(math.asin(num2 / nauticalMiles))
num4 = math.cos(Unit.smethod_0(num1)) * 0.755939525
num5 = math.cos(Unit.smethod_0(num3)) * nauticalMiles
return RnavWaypoints.smethod_3(self.pos1400m, float(self.txtBearing.text()), Distance(math.fabs(num5 - num4), DistanceUnits.NM))
if (self.type != RnavCommonWaypoint.MAWP):
return RnavWaypoints.smethod_3(self.from1, float(self.txtBearing.text()), Distance(float(self.txtDistance.text()), DistanceUnits.NM))
angle = 90
if (float(self.txtBearing.text()) > self.thrre or float(self.txtBearing.text()) - self.thrre >= 90):
if self.flagStrName == "Y-Bar":
angle = 70
num = self.rethr - angle
if (num < 0):
num = num + 360
else:
num = self.rethr + angle
if (num > 360):
num = num - 360
point3d1 = self.from1
point3d2 = self.getThrPoint3D()
point3d = MathHelper.getIntersectionPoint(point3d1, RnavWaypoints.smethod_3(self.from1, float(self.txtBearing.text()), Distance(1000)), point3d2, RnavWaypoints.smethod_3(self.getThrPoint3D(), num, Distance(1000)))
if point3d == None:
raise UserWarning, Messages.ERR_FAILED_TO_CALCULATE_INTERSECTION_POINT
return RnavWaypoints.smethod_3(self.getThrPoint3D(), num, Distance(MathHelper.calcDistance(point3d2, point3d)))
def setWaypoint(self, value):
self.waypoint = value
if self.from1 != None and self.waypoint != None:
# self.setBearing(RnavWaypoints.smethod_0(self.from1, value))
# if self.txtDistance.isEnabled():
# print RnavWaypoints.smethod_2(self.from1, value).NauticalMiles
# print RnavWaypoints.smethod_2(self.from1, value).NauticalMiles
self.setDistance(RnavWaypoints.smethod_2(self.from1, value).NauticalMiles)
def setDistance(self, value):
self.txtDistance.setText("%i"%round(value))
def setBearing(self, value):
self.txtBearing.setText(str(value))
def btnCalculate_Click(self):
# try:
if self.type == RnavCommonWaypoint.FAWP or self.type == RnavCommonWaypoint.MAWP:
if self.getThrPoint3D() == None or self.getEndPoint3D() == None:
return
if not MathHelper.smethod_112(float(self.txtBearing.text()), self.MinBearing, self.MaxBearing, AngleUnits.Degrees):
if self.type != RnavCommonWaypoint.MAWP or MathHelper.smethod_96(self.MinBearing2) or MathHelper.smethod_96(self.MaxBearing2):
raise UserWarning, Messages.VALUE_NOT_WITHIN_ACCEPTABLE_RANGE
elif not MathHelper.smethod_106(float(self.txtBearing.text()), self.MinBearing2, self.MaxBearing2):
raise UserWarning, Messages.VALUE_NOT_WITHIN_ACCEPTABLE_RANGE
if self.txtDistance.isEnabled() and Distance(float(self.txtDistance.text()),DistanceUnits.NM).Metres < (self.MinDistance.Metres - 100):
raise UserWarning, Validations.VALUE_CANNOT_BE_SMALLER_THAN%self.MinDistance.NauticalMiles
wayPoint = self.getWaypoint()
if self.type == RnavCommonWaypoint.FAWP or self.type == RnavCommonWaypoint.MAWP:
if self.type == RnavCommonWaypoint.FAWP:
self.parent().gbFAWP.setPosition( wayPoint.x(), wayPoint.y())
if len(self.parent().parameterCalcList) > 0:
self.parent().parameterCalcList.pop(0)
self.parent().parameterCalcList.insert(0,(self.txtBearing.text(), self.txtDistance.text()))
self.parent().annotationFAWP.setMapPosition(QgsPoint(wayPoint.x(), wayPoint.y()))
else:
self.parent().gbMAWP.setPosition( wayPoint.x(), wayPoint.y())
if len(self.parent().parameterCalcList) > 1:
self.parent().parameterCalcList.pop(1)
self.parent().parameterCalcList.insert(1,(self.txtBearing.text(), self.txtDistance.text()))
self.parent().annotationMAWP.setMapPosition(QgsPoint(wayPoint.x(), wayPoint.y()))
self.parent().RwyTHR = self.getThrPoint3D()
self.parent().RwyEND = self.getEndPoint3D()
elif self.type == RnavCommonWaypoint.MAHWP:
self.parent().gbMAHWP.setPosition( wayPoint.x(), wayPoint.y())
if len(self.parent().parameterCalcList) > 2:
self.parent().parameterCalcList.pop(2)
self.parent().parameterCalcList.insert(2,(self.txtBearing.text(), self.txtDistance.text()))
self.parent().annotationMAHWP.setMapPosition(QgsPoint(wayPoint.x(), wayPoint.y()))
elif self.type == RnavCommonWaypoint.IWP:
self.parent().gbIWP.setPosition( wayPoint.x(), wayPoint.y())
if len(self.parent().parameterCalcList) > 3:
self.parent().parameterCalcList.pop(3)
self.parent().parameterCalcList.insert(3,(self.txtBearing.text(), self.txtDistance.text()))
self.parent().annotationIWP.setMapPosition(QgsPoint(wayPoint.x(), wayPoint.y()))
elif self.type == RnavCommonWaypoint.IAWP1:
self.parent().gbIAWP1.setPosition( wayPoint.x(), wayPoint.y())
if len(self.parent().parameterCalcList) > 4:
self.parent().parameterCalcList.pop(4)
self.parent().parameterCalcList.insert(4,(self.txtBearing.text(), self.txtDistance.text()))
self.parent().annotationIAWP1.setMapPosition(QgsPoint(wayPoint.x(), wayPoint.y()))
elif self.type == RnavCommonWaypoint.IAWP2:
self.parent().gbIAWP2.setPosition( wayPoint.x(), wayPoint.y())
if len(self.parent().parameterCalcList) > 5:
self.parent().parameterCalcList.pop(5)
self.parent().parameterCalcList.insert(5,(self.txtBearing.text(), self.txtDistance.text()))
self.parent().annotationIAWP2.setMapPosition(QgsPoint(wayPoint.x(), wayPoint.y()))
elif self.type == RnavCommonWaypoint.IAWP3:
self.parent().gbIAWP3.setPosition( wayPoint.x(), wayPoint.y())
if len(self.parent().parameterCalcList) > 6:
self.parent().parameterCalcList.pop(6)
self.parent().parameterCalcList.insert(6,(self.txtBearing.text(), self.txtDistance.text()))
self.parent().annotationIAWP3.setMapPosition(QgsPoint(wayPoint.x(), wayPoint.y()))
self.close()
QDialog.accept(self)
# except UserWarning as e:
# pass
# # QMessageBox.warning(self, "warning", e.message)
def method_9(self):
# self.distanceMeasureTool = MeasureTool(define._canvas, self.txtDistance, DistanceUnits.NM)
define._canvas.setMapTool(self.distanceMeasureTool)
def method_8(self):
# self.bearingTool = CaptureBearingTool(define._canvas, self.txtBearing)
define._canvas.setMapTool(self.bearingTool)
def method_4(self):
num = None
num1 = None
num2 = None
num3 = None
num4 = None
num5 = None
num6 = None
num7 = None
num8 = None
num9 = None
self.lbl1.setText(Validations.PLEASE_ENTER_VALID_RUNWAY_POSITIONS)
self.lbl2.setText(" ")
if (self.type == RnavCommonWaypoint.FAWP):
position = self.getThrPoint3D()
position1 = self.getEndPoint3D()
if position is None or position1 is None:
self.txtBearing.setText("")
# self.txtDistance.setText("")
return
self.thrre = RnavWaypoints.smethod_0(position, position1)
self.rethr = RnavWaypoints.smethod_0(position1, position)
self.pos1400m = RnavWaypoints.smethod_3(position, self.rethr, Distance(1400))
self.MinDistance = RnavWaypoints.smethod_4(self.type, self.category)
# if self.txtDistance.isEnabled():
# self.setDistance(RnavWaypoints.smethod_6(self.type, self.category).NauticalMiles)
self.setBearing(self.rethr)
self.MinBearing = RnavWaypoints.smethod_7(self.type, self.category, self.rethr)
self.MaxBearing = RnavWaypoints.smethod_8(self.type, self.category, self.rethr)
# if self.waypoint is not None:
# self.setBearing(round(RnavWaypoints.smethod_0(self.pos1400m, self.waypoint), 2))
# # if self.txtDistance.isEnabled():
# self.setDistance(RnavWaypoints.smethod_2(position, self.waypoint).NauticalMiles)
elif (self.type == RnavCommonWaypoint.MAWP):
position2 = self.getThrPoint3D()
position3 = self.getEndPoint3D()
if position2 is None or position3 is None:
self.txtBearing.setText("")
return
self.thrre = RnavWaypoints.smethod_0(position2, position3)
self.rethr = RnavWaypoints.smethod_0(position3, position2)
self.pos1400m = RnavWaypoints.smethod_3(position2, self.rethr, Distance(1400))
num10 = RnavWaypoints.smethod_1(self.pos1400m, self.from1)
num = RnavWaypoints.smethod_1(self.from1, self.pos1400m)
num11 = 15
position4 = None
if (self.category == AircraftSpeedCategory.A or self.category == AircraftSpeedCategory.B or self.category == AircraftSpeedCategory.H):
num11 = 30
if (num10 > self.rethr or self.rethr - num10 >= 90):
num1 = self.thrre + num11
num2 = num
if (num2 > 360):
num2 = num2 - 360
else:
num1 = num
num2 = self.thrre - num11
if (num2 < 0):
num2 = num2 + 360
if (max(num1, num2) <= 270 or min(num1, num2) >= 90):
num3 =min(num1, num2)
num4 = max(num1, num2)
else:
num3 = max(num1, num2)
num4 = min(num1, num2)
position4 = RnavWaypoints.smethod_3(position2, self.thrre, Distance(466))
num12 = RnavWaypoints.smethod_0(position4, self.from1)
num13 = math.fabs(num12 - self.rethr)
if (num13 > 180):
num13 = 360 - num13
if (num13 > 5):
num5 = 0
num6 = 0
num7 = 0
num8 = 0
else:
if (num12 > self.rethr or self.rethr - num12 >= 90):
num9 = self.rethr + 90
if (num9 > 360):
num9 = num9 - 360
else:
num9 = self.rethr - 90
if (num9 < 0):
num9 = num9 + 360
position5 = RnavWaypoints.smethod_3(self.pos1400m, num9, Distance(150))
num5 = RnavWaypoints.smethod_0(self.from1, position5)
num6 = RnavWaypoints.smethod_0(self.from1, self.pos1400m)
if (max(num5, num6) <= 270 or min(num5, num6) >= 90):
num7 = min(num5, num6)
num8 = max(num5, num6)
else:
num7 = max(num5, num6)
num8 = min(num5, num6)
if (MathHelper.smethod_99(num, self.thrre, 1)):
position6 = RnavWaypoints.smethod_3(self.pos1400m, self.rethr - 90, Distance(150))
position7 = RnavWaypoints.smethod_3(self.pos1400m, self.rethr + 90, Distance(150))
num1 = RnavWaypoints.smethod_0(self.from1, position6)
num2 = RnavWaypoints.smethod_0(self.from1, position7)
num7 = 0
num8 = 0
if (max(num1, num2) <= 270 or min(num1, num2) >= 90):
num3 = min(num1, num2)
num4 = max(num1, num2)
else:
num3 = max(num1, num2)
num4 = min(num1, num2)
if (MathHelper.smethod_96(num7) or MathHelper.smethod_96(num8)):
self.MinBearing = MathHelper.smethod_3(num3)
self.MaxBearing = MathHelper.smethod_3(num4)
self.MinBearing2 = MathHelper.smethod_3(num7)
self.MaxBearing2 = MathHelper.smethod_3(num8)
elif (min(num3, num4) >= min(num7, num8)):
if (MathHelper.smethod_99(num8, num3, 0.3)):
num8 = num4
num3 = 0
num4 = 0
self.MinBearing = MathHelper.smethod_3(num7)
self.MaxBearing = MathHelper.smethod_3(num8)
self.MinBearing2 = MathHelper.smethod_3(num3)
self.MaxBearing2 = MathHelper.smethod_3(num4)
else:
if (MathHelper.smethod_99(num4, num7, 0.3)):
num4 = num8
num7 = 0
num8 = 0
self.MinBearing = MathHelper.smethod_3(num3)
self.MaxBearing = MathHelper.smethod_3(num4)
self.MinBearing2 = MathHelper.smethod_3(num7)
self.MaxBearing2 = MathHelper.smethod_3(num8)
self.MinDistance = RnavWaypoints.smethod_4(self.type, self.category)
# if self.txtDistance.isEnabled():
# self.setDistance(RnavWaypoints.smethod_6(self.type, self.category).NauticalMiles)
if (self.MinBearing <= self.MaxBearing):
self.setBearing((self.MinBearing + self.MaxBearing) / 2)
else:
self.setBearing(MathHelper.smethod_3(self.MinBearing + (360 - self.MinBearing + self.MaxBearing)))
# if (self.waypoint is not None):
# self.setBearing(RnavWaypoints.smethod_0(self.from1, self.waypoint))
if (MathHelper.smethod_96(self.MinBearing2) or MathHelper.smethod_96(self.MaxBearing2)):
self.lbl1.setText(unicode("Acceptable bearings are %.1f° - %.1f°", "utf-8")%(self.MinBearing, self.MaxBearing))
else:
self.lbl1.setText(Validations.ACCEPTABLE_BEARINGS_ARE_X_Y_AND_X_Y%( self.MinBearing, self.MaxBearing, self.MinBearing2, self.MaxBearing2))
if self.MinDistance != None and self.type != RnavCommonWaypoint.MAWP:
self.lbl2.setText(Validations.ACCEPTABLE_MINIMUM_DISTANCE_IS_X%(self.MinDistance.NauticalMiles))
# def removeEnd(self):
# self.txtEND_X.setText("")
# self.txtEND_Y.setText("")
# def removeThr(self):
# self.txtTHR_X.setText("")
# self.txtTHR_Y.setText("")
# @staticmethod
# def smethod_0( parent, rnavCommonWaypoint_0, aircraftSpeedCategory_0, position_0, position_1, position_List):
# flag = None
# using (DlgCalculateWaypoint dlgCalculateWaypoint = new DlgCalculateWaypoint())
# {
# dlgCalculateWaypoint.Text = string.Format("{0} {1}", Captions.CALCULATE, EnumHelper.smethod_0(rnavCommonWaypoint_0))
# dlgCalculateWaypoint.Type = rnavCommonWaypoint_0
# dlgCalculateWaypoint.Category = aircraftSpeedCategory_0
# dlgCalculateWaypoint.From = position_1
# double num = RnavWaypoints.smethod_0(position_0, position_1)
# dlgCalculateWaypoint.MinBearing = RnavWaypoints.smethod_7(rnavCommonWaypoint_0, aircraftSpeedCategory_0, num)
# dlgCalculateWaypoint.MaxBearing = RnavWaypoints.smethod_8(rnavCommonWaypoint_0, aircraftSpeedCategory_0, num)
# dlgCalculateWaypoint.MinDistance = RnavWaypoints.smethod_4(rnavCommonWaypoint_0, aircraftSpeedCategory_0)
# if (rnavCommonWaypoint_0 == RnavCommonWaypoint.IAWP1)
# {
# dlgCalculateWaypoint.Bearing = dlgCalculateWaypoint.MinBearing
# }
# else if (rnavCommonWaypoint_0 != RnavCommonWaypoint.IAWP3)
# {
# dlgCalculateWaypoint.Bearing = num
# }
# else
# {
# dlgCalculateWaypoint.Bearing = dlgCalculateWaypoint.MaxBearing
# }
# dlgCalculateWaypoint.Distance = RnavWaypoints.smethod_6(rnavCommonWaypoint_0, aircraftSpeedCategory_0)
# dlgCalculateWaypoint.Waypoint = position_2
# if (dlgCalculateWaypoint.method_2(iwin32Window_0) != System.Windows.Forms.DialogResult.OK)
# {
# flag = false
# }
# else
# {
# position_2 = dlgCalculateWaypoint.Waypoint
# flag = true
# }
# }
# return flag
# }
#
# public static bool smethod_1(IWin32Window iwin32Window_0, RnavCommonWaypoint rnavCommonWaypoint_0, AircraftSpeedCategory aircraftSpeedCategory_0, Position position_0, ref Position position_1, ref Position position_2, ref Position position_3)
# {
# bool flag
# using (DlgCalculateWaypoint dlgCalculateWaypoint = new DlgCalculateWaypoint())
# {
# dlgCalculateWaypoint.Text = string.Format("{0} {1}", Captions.CALCULATE, EnumHelper.smethod_0(rnavCommonWaypoint_0))
# dlgCalculateWaypoint.Type = rnavCommonWaypoint_0
# dlgCalculateWaypoint.Category = aircraftSpeedCategory_0
# dlgCalculateWaypoint.From = position_0
# dlgCalculateWaypoint.RwyThr = position_1
# dlgCalculateWaypoint.RwyEnd = position_2
# dlgCalculateWaypoint.Waypoint = position_3
# bool flag1 = dlgCalculateWaypoint.method_2(iwin32Window_0) == System.Windows.Forms.DialogResult.OK
# position_1 = dlgCalculateWaypoint.RwyThr
# position_2 = dlgCalculateWaypoint.RwyEnd
# if (flag1)
# {
# position_3 = dlgCalculateWaypoint.Waypoint
# }
# flag = flag1
# }
# return flag
# }
# } | [
"yongjin.818@gmail.com"
] | yongjin.818@gmail.com |
f29f4d3f9eb00ed98d6c9da648caeb5da3c9d380 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /f3jX2BwzAuR8DXsy4_22.py | e469acc301be3f4807256f980cb528fa19e2fb93 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | """
Create a function that takes an integer `n` and returns the **factorial of
factorials**. See below examples for a better understanding:
### Examples
fact_of_fact(4) ➞ 288
# 4! * 3! * 2! * 1! = 288
fact_of_fact(5) ➞ 34560
fact_of_fact(6) ➞ 24883200
### Notes
N/A
"""
import math
from functools import reduce
def fact_of_fact(n):
m = [math.factorial(i) for i in list(range(1, n+1))]
return reduce((lambda x, y: x * y), m)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
fe29981fe14f0c7e9d65c8cd88d54641a0a4e314 | ca715ecc910c61ebf126be96c2e38224ff774fd7 | /bot.py | 6c56f8abe8888d77c6aa8415d7441978cdfc6084 | [] | no_license | jeraldseow/cryptobot | dd5bda9318ea6d4b94f90ead40f7c263008fb55b | 6ceccdbd26a835b6c8e3a74db67a40137b886f70 | refs/heads/master | 2023-04-27T16:47:29.765535 | 2021-05-17T11:09:39 | 2021-05-17T11:09:39 | 368,135,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,620 | py | import websocket, json, pprint, talib, numpy, sender
ETH_USDT = "ethusdt"
BTC_USDT = "btcusdt"
SOCKET = f"wss://stream.binance.com:9443/ws/{BTC_USDT}@kline_1m"
RSI_PERIOD = 14
RSI_OVERBOUGHT = 70
RSI_OVERSOLD = 30
TRADE_SYMBOL = "ETHUSD"
closes = []
def on_open(ws):
print("Opened Connection")
def on_close(ws):
print("Closed Connection")
def on_message(ws, message):
global closes
json_message = json.loads(message)
#pprint.pprint(json_message)
candle = json_message['k']
is_candle_closed = candle['x']
close = candle['c']
if True: #change to if is_candle_closed, now true for testing purposes
print("candle closed at {}".format(close))
closes.append(float(close))
#sender.send_message(f"Current Etherium price: {close}")
if len(closes) > RSI_PERIOD:
np_closes = numpy.array(closes)
rsi = talib.RSI(np_closes, RSI_PERIOD)
last_rsi = rsi[-1]
print("the latest RSI is {}".format(last_rsi))
closes.pop(0)
print("Closes:")
print(closes)
if last_rsi > RSI_OVERBOUGHT:
print("SELL - BEING OVERBOUGHT")
sender.send_message(f"Etherium being overbought, current price: {close}. Can consider selling!")
if last_rsi < RSI_OVERSOLD:
print("BUY - BEING OVERSOLD")
sender.send_message(f"Etherium being oversold, current price: {close}. Can consider buying!")
ws = websocket.WebSocketApp(SOCKET, on_open=on_open, on_close=on_close, on_message=on_message)
ws.run_forever() | [
"jeraldseowwenyuan@gmail.com"
] | jeraldseowwenyuan@gmail.com |
96093e0e0b317846982be7163fd0fb65f35e64a6 | 90fa5489f9849494da93d64d54557491bf85d52f | /PythonNLP/C05/C0502.py | 01b42fee0d9479ef0e45dd5c28809e341fbbdf0f | [] | no_license | liping2084/NLTK-Python-CN | f0a072e9632661e95b4fb6419bb125dcdb8d68cd | f5b7520811f727e0111336efd536b28bbb01edf1 | refs/heads/master | 2023-05-09T05:42:34.004242 | 2020-12-15T04:39:37 | 2020-12-15T04:39:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,587 | py | # -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : 526614962@qq.com
@site : https://zhuyuanxiang.github.io
---------------------------
@Software : PyCharm
@Project : NLTK-Python-CN
@File : C0502.py
@Version : v0.1
@Time : 2020-11-19 10:34
@License : (C)Copyright 2018-2020, zYx.Tom
@Reference :
@Desc :
@理解:
"""
import nltk
from nltk.corpus import brown
from tools import show_subtitle
# Sec 5.2 标注语料库
# 5.2.1 表示已经标注的标识符
# str2tuple() 将已经标注的字符串转换成元组
# Ch5 分类和标注词汇
# 1. 什么是词汇分类,在自然语言处理中它们如何使用?
# 2. 对于存储词汇和它们的分类来说什么是好的 Python 数据结构?
# 3. 如何自动标注文本中每个词汇的词类?
# - 词性标注(parts-of-speech tagging,POS tagging):简称标注。将词汇按照它们的词性(parts-of-speech,POS)进行分类并对它们进行标注
# - 词性:也称为词类或者词汇范畴。
# - 标记集:用于特定任务标记的集合。
taggen_token = nltk.tag.str2tuple('fly/NN')
print("taggen_token= ", taggen_token)
print("taggen_token[0]= ", taggen_token[0])
print("taggen_token[1]= ", taggen_token[1])
# 拆分已经标注的语料文本为元组
sent = '''
The/AT grand/JJ jury/NN commented/VBD on/IN a/AT number/NN of/IN
other/AP topics/NNS ,/, AMONG/IN them/PPO the/AT Atlanta/NP and/CC
Fulton/NP-tl County/NN-tl purchasing/VBG departments/NNS which/WDT it/PPS
said/VBD ``/`` ARE/BER well/QL operated/VBN and/CC follow/VB generally/RB
accepted/VBN practices/NNS which/WDT inure/VB to/IN the/AT best/JJT
interest/NN of/IN both/ABX governments/NNS ''/'' ./.
'''
print("sent.split()= ", sent.split())
split_sent_to_tuple = [nltk.tag.str2tuple(t) for t in sent.split()]
print("split_sent_to_tuple= ", split_sent_to_tuple)
# 5.2.2 读取已经标注的语料库
# 打开brown语料库的ca01文件,可以看到下面的内容:
# The/at Fulton/np-tl County/nn-tl Grand/jj-tl Jury/nn-tl said/vbd Friday/nr an/at
# investigation/nn of/in Atlanta's/np$ recent/jj primary/nn election/nn produced/vbd
# ``/`` no/at evidence/nn ''/'' that/cs any/dti irregularities/nns took/vbd place/nn ./.
# 这个是已经标注好的语料库,可以使用函数tagged_words()直接读取
# tagset='universal' 使用通用标注集进行词类标注
# simplify_tags 不再支持
show_subtitle("brown.tagged_words()")
print(brown.tagged_words())
show_subtitle("brown.tagged_words(tagset='universal')")
print(brown.tagged_words(tagset='universal'))
show_subtitle("nltk.corpus.treebank.tagged_words()")
print(nltk.corpus.treebank.tagged_words())
show_subtitle("nltk.corpus.treebank.tagged_words(tagset='universal')")
print(nltk.corpus.treebank.tagged_words(tagset='universal'))
show_subtitle("nltk.corpus.nps_chat.tagged_words()")
print(nltk.corpus.nps_chat.tagged_words())
show_subtitle("nltk.corpus.nps_chat.tagged_words(tagset='universal')")
print(nltk.corpus.nps_chat.tagged_words(tagset='universal'))
show_subtitle("nltk.corpus.conll2000.tagged_words()")
print(nltk.corpus.conll2000.tagged_words(tagset='universal'))
show_subtitle("nltk.corpus.conll2000.tagged_words()")
print(nltk.corpus.conll2000.tagged_words(tagset='universal'))
# Note:以下的都无法正常转换为通用标注集
# 因为每个语言的标注集都是根据自己的情况定义的,不与通用标注集完全相同
show_subtitle("nltk.corpus.sinica_treebank.tagged_words()")
print(nltk.corpus.sinica_treebank.tagged_words())
show_subtitle("nltk.corpus.sinica_treebank.tagged_words(tagset='universal')")
# print(nltk.corpus.sinica_treebank.tagged_words(tagset='universal'))
show_subtitle("nltk.corpus.indian.tagged_words()")
print(nltk.corpus.indian.tagged_words())
show_subtitle("nltk.corpus.indian.tagged_words(tagset='universal')")
# print(nltk.corpus.indian.tagged_words(tagset='universal'))
show_subtitle("nltk.corpus.mac_morpho.tagged_words()")
print(nltk.corpus.mac_morpho.tagged_words())
show_subtitle("nltk.corpus.mac_morpho.tagged_words(tagset='universal')")
# print(nltk.corpus.mac_morpho.tagged_words(tagset='universal'))
show_subtitle("nltk.corpus.cess_cat.tagged_words()")
print(nltk.corpus.cess_cat.tagged_words())
show_subtitle("nltk.corpus.cess_cat.tagged_words(tagset='universal')")
# print(nltk.corpus.cess_cat.tagged_words(tagset='universal'))
# 使用tagged_sents()可以直接把语料库分割成句子,而不是将所有的词表示成一个链表,句子中的词同样进行了词类标注。
# 因为开发的自动标注器需要在句子链表上进行训练和测试,而不是在词链表上。
show_subtitle("brown.tagged_sents()[0]")
print(brown.tagged_sents()[0])
show_subtitle("brown.tagged_sents(tagset='universal')[0]")
print(brown.tagged_sents(tagset='universal')[0])
# 5.2.3 A Universal Part-of-Speech Tagset, 一个通用的(简化的)标注集
# http://www.nltk.org/book/ch05.html Table2.1 (比书P200 表5-1还要简单)
# Tag Meaning English Examples
# ADJ adjective new, good, high, special, big, local
# ADP adposition on, of, at, with, by, into, under
# ADV adverb really, already, still, early, now
# CONJ conjunction and, or, but, if, while, although
# DET determiner, article the, a, some, most, every, no, which
# NOUN noun year, home, costs, time, Africa
# NUM numeral twenty-four, fourth, 1991, 14:24
# PRT particle at, on, out, over per, that, up, with
# PRON pronoun he, their, her, its, my, I, us
# VERB verb is, say, told, given, playing, would
# . punctuation marks . , ; !
# X other ersatz, esprit, dunno, gr8, univeristy
# 布朗语料库的新闻类中哪些标记最常见
brown_news_tagged = brown.tagged_words(categories='news', tagset='universal')
tag_fd = nltk.FreqDist(tag for (word, tag) in brown_news_tagged)
print("list(tag_fd)= ", list(tag_fd))
print("tag_fd.keys()= ", tag_fd.keys())
print("tag_fd.most_common()= ", tag_fd.most_common())
show_subtitle("tag_fd.tabulate()")
tag_fd.tabulate()
tag_fd.plot()
# 图形化的POS一致性工具,可以用来寻找任一词和POS标记的组合
# 例如:"VERB VERB" 或者 "was missing" 或者 "had VERB" 或者 "DET money" 等等
nltk.app.concordance()
# 5.2.4 名词
# 名词:一般指人、地点、事情和概念。可能出现在限定词和形容词之后,可以是动词的主语或者宾语。
# 表5-2 名词的句法模式
# 统计构成二元模型(W1,W2)中W2=‘NOUN’的W1的词性的比例
brown_news_tagged = brown.tagged_words(categories='news', tagset='universal')
word_tag_pairs = nltk.bigrams(brown_news_tagged) # 构建双词链表
noun_precedes = [
a[1]
for (a, b) in word_tag_pairs
if b[1] == 'NOUN'
]
fdist = nltk.FreqDist(noun_precedes)
print("fdist.most_common()= ", fdist.most_common())
tag_list = [
tag
for (tag, _) in fdist.most_common()
]
print("tag_list= ", tag_list)
fdist.plot()
# 结论:名词最容易出现在名词后面
# 其次是出现在限定词和形容词之后,包括数字形容词(即数词,标注为NUM)
# 5.2.5 动词
# 动词:描述事件和行动的词。在句子中,动词通常表示涉及一个或多个名词短语所指示物的关系。
# 表5-3 动词的句法模式
# 找出新闻文本中最常见的动词(频率分布中计算的项目是词——标记对)
wsj = nltk.corpus.treebank.tagged_words(tagset='universal')
word_tag_fd = nltk.FreqDist(wsj)
show_subtitle("word_tag_fd.most_common(20)")
print(word_tag_fd.most_common(20))
word_tag_list = [
wt[0]
for (wt, _) in word_tag_fd.most_common()
if wt[1] == 'VERB'
]
show_subtitle("word_tag_list[:20]")
print(word_tag_list[:20])
fdist = nltk.FreqDist(word_tag_fd)
show_subtitle("fdist.most_common(20)")
print(fdist.most_common(20))
# fdist.plot() # 不能执行,会死机,因为动词单词数目太多
wsj = nltk.corpus.treebank.tagged_words(tagset='universal')
word_tag_pairs = nltk.bigrams(wsj)
verb_precedes = [
a[1]
for (a, b) in word_tag_pairs
if b[1] == 'VERB'
]
fdist = nltk.FreqDist(verb_precedes)
print("fdist.most_common()= ", fdist.most_common())
show_subtitle("fdist.tabulate()")
fdist.tabulate()
# fdist.plot()
# 结论:动词出现在名词、动词、名字和副词后面。
# 因为词汇和标记是成对的
# 所以把词汇作为条件,把标记作为事件,使用条件——事件对的链表初始化条件频率分布。
wsj = nltk.corpus.treebank.tagged_words(tagset='universal')
cfd1 = nltk.ConditionalFreqDist(wsj)
print("cfd1['yield'].most_common(20)= ", cfd1['yield'].most_common(20))
print("cfd1['cut'].most_common(20)= ", cfd1['cut'].most_common(20))
# 动词太多,执行时间过长,显示效果不好
# cfd1.tabulate()
# cfd1.plot()
# 也可以颠倒配对,把标记作为条件,词汇作为事件,生成条件频率分布,就可以直接查找标记对应哪些词了。
wsj = nltk.corpus.treebank.tagged_words()
cfd2 = nltk.ConditionalFreqDist((tag, word) for (word, tag) in wsj)
print("cfd2= ", cfd2)
print("cfd2['VBN']= ", cfd2['VBN'])
show_subtitle("cfd2['VBN'].most_common(20)")
print(cfd2['VBN'].most_common(20))
show_subtitle("list(cfd2['VBN'].keys())[:20]")
print(list(cfd2['VBN'].keys())[:20])
show_subtitle("cfd2['VBN'].most_common(20)")
print(cfd2['VBN'].most_common(20))
show_subtitle("in")
print("'been' in cfd2['VBN'].keys()= ", 'been' in cfd2['VBN'].keys())
# 尝试分辨VD(过去式)和VN(过去分词)之间的区别
# 先找出同是VD和VN的词汇,然后分析它们的上下文区别
wsj = nltk.corpus.treebank.tagged_words()
cfd3 = nltk.ConditionalFreqDist(wsj)
# cfd.conditions() 返回所有的条件构成的链表,等价于list(cfd1.keys())返回所有的关键字。
show_subtitle("cfd3.conditions()[:20]")
print(cfd3.conditions()[:20])
show_subtitle("list(cfd3.keys())[:20]")
print(list(cfd3.keys())[:20])
# 寻找既可以作为 VBD 还可以作为 VBN 的单词
word_list = [
w
for w in cfd3.conditions()
if 'VBD' in cfd3[w] and 'VBN' in cfd3[w]
]
show_subtitle("word_list[:20]")
print(word_list[:20])
# kicked 作为 VBD 的示例句子
idx1 = wsj.index(('kicked', 'VBD'))
show_subtitle("idx1")
print(' '.join(word for word, tag in wsj[idx1 - 10:idx1 + 10]))
# kicked 作为 VBN 的示例句子
idx2 = wsj.index(('kicked', 'VBN'))
show_subtitle("idx2")
print(' '.join(word for word, tag in wsj[idx2 - 10:idx2 + 10]))
# 5.2.6 其他词类
# (形容词、副词、介词、冠词(限定词)、情态动词、人称代词)
# 形容词:修饰名词,可以作为修饰符 或 谓语。
# 副词:修饰动词,指定时间、方式、地点或动词描述的事件发展方向;修饰形容词。
# 5.2.7 未简化的标记(P204)
# Ex5-1 找出最频繁的名词标记的程序
def find_tags(tag_prefix, tagged_text):
cfd = nltk.ConditionalFreqDist(
(tag, word)
for (word, tag) in tagged_text
if tag.startswith(tag_prefix))
return dict(
(tag, cfd[tag].most_common(5))
for tag in cfd.conditions()
)
brown_tagged_words = brown.tagged_words(categories='news')
tag_dict = find_tags('NN', brown_tagged_words)
for tag in sorted(tag_dict):
print(tag, tag_dict[tag])
# 5.2.8 探索已经标注的语料库
# 观察 often 后面的词汇
brown_learned_text = brown.tagged_words(categories='learned')
print(sorted(set(
b
for (a, b) in nltk.bigrams(brown_learned_text)
if a == 'often'
)))
brown_learned_tagged = brown.tagged_words(categories='learned', tagset='universal')
brown_learned_bigrams = nltk.bigrams(brown_learned_tagged)
print("brown_learned_bigrams= ", brown_learned_bigrams)
a_b_list = [
(a, b)
for (a, b) in brown_learned_bigrams
]
show_subtitle("a_b_list")
print(a_b_list[:20])
# 观察 often 后面的词汇
tags = [
b[1]
for (a, b) in nltk.bigrams(brown_learned_tagged)
if a[0] == 'often'
]
fd = nltk.FreqDist(tags)
fd.tabulate()
fd.plot()
# P205 Ex5-2 使用POS标记寻找三词短语(<Verb>to<Verb>)
def process(sentence):
for (w1, t1), (w2, t2), (w3, t3) in nltk.trigrams(sentence):
if t1.startswith('V') and t2 == 'TO' and t3.startswith('V'):
print(w1, w2, w3)
for i, tagged_sent in enumerate(brown.tagged_sents()):
if i < 100 and len(tagged_sent) >= 3:
process(tagged_sent)
brown_news_tagged = brown.tagged_words(categories='news', tagset='universal')
data = nltk.ConditionalFreqDist(
(word.lower(), tag)
for (word, tag) in brown_news_tagged
)
for word in sorted(data.conditions()):
if len(data[word]) > 3:
tags = [
tag
for (tag, _) in data[word].most_common()
]
print(word, ' '.join(tags))
print("data['works']= ", data['works'])
print("data['$1']= ", data['$1'])
print("data['$222']= ", data['$222'])
# data.tabulate() # 执行结果过多,时间过长
show_subtitle("data.conditions()")
print(data.conditions()[:20])
show_subtitle("data.values()")
# print(data.values()) # 执行结果过多,时间过长
for i, (cfd_key, cfd_value) in enumerate(zip(data.keys(), data.values())):
if i < 20:
show_subtitle(cfd_key)
for j, fd_value in enumerate(cfd_value.values()):
print(fd_value, end=',')
nltk.app.concordance()
| [
"zhuyuanxiang@gmail.com"
] | zhuyuanxiang@gmail.com |
328697067ce65e17a1fc75506e3125b383a93687 | fcc307a37e476afe6974feba52a9c81e9feede55 | /Importable Files/Circuits.py | 48fdedd0b66f4d437639cdc787445f0566355065 | [
"Apache-2.0"
] | permissive | thealexrk/Optimizing-Gate-Synthesis | ab8be5cccbc91b8b300cc56fd4e15972feafab8a | 3bc36f5d591d746d7b0d142797b53fb52382edec | refs/heads/main | 2023-06-24T08:36:30.743415 | 2021-07-18T02:35:08 | 2021-07-18T02:35:08 | 383,876,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,261 | py | import numpy as np
import tensorflow as tf
import strawberryfields as sf
from strawberryfields.ops import *
def one_mode_variational_quantum_circuit(cutoff, input_state=None, batch_size=None,
depth=25, active_sd=0.0001, passive_sd=0.1, **kwargs):
with tf.name_scope('variables'):
d_r = tf.Variable(tf.random_normal(shape=[depth], stddev=active_sd))
d_phi = tf.Variable(tf.random_normal(shape=[depth], stddev=passive_sd))
r1 = tf.Variable(tf.random_normal(shape=[depth], stddev=passive_sd))
sq_r = tf.Variable(tf.random_normal(shape=[depth], stddev=active_sd))
sq_phi = tf.Variable(tf.random_normal(shape=[depth], stddev=passive_sd))
r2 = tf.Variable(tf.random_normal(shape=[depth], stddev=passive_sd))
kappa = tf.Variable(tf.random_normal(shape=[depth], stddev=active_sd))
parameters = [d_r, d_phi, r1, sq_r, sq_phi, r2, kappa]
def layer(i, q, m):
with tf.name_scope('layer_{}'.format(i)):
Dgate(d_phi[i]) | q[m]
Rgate(r1[i]) | q[m]
Sgate(sq_r[i], sq_phi[i]) | q[m]
Rgate(r2[i]) | q[m]
Kgate(kappa[i]) | q[m]
return q
sf.hbar = 0.5
prog = sf.Program(1)
with prog.context as q:
if input_state is not None:
Ket(input_state) | q
for k in range(depth):
q = layer(k, q, 0)
if batch_size is not None:
eng = sf.Engine("tf", backend_options={"cutoff_dim": cutoff, "batch_size": batch_size})
else:
eng = sf.Engine("tf", backend_options={"cutoff_dim": cutoff})
state = eng.run(prog, run_options={"eval": False}).state
ket = state.ket()
return ket, parameters
def two_mode_variational_quantum_circuit(cutoff, input_state=None, batch_size=None,
depth=25, active_sd=0.0001, passive_sd=0.1, **kwargs):
with tf.name_scope('variables'):
theta1 = tf.Variable(tf.random_normal(shape=[depth], stddev=passive_sd))
phi1 = tf.Variable(tf.random_normal(shape=[depth], stddev=passive_sd))
r1 = tf.Variable(tf.random_normal(shape=[depth], stddev=passive_sd))
sq_r = tf.Variable(tf.random_normal(shape=[2, depth], stddev=active_sd))
sq_phi = tf.Variable(tf.random_normal(shape=[2, depth], stddev=passive_sd))
theta2 = tf.Variable(tf.random_normal(shape=[depth], stddev=passive_sd))
phi2 = tf.Variable(tf.random_normal(shape=[depth], stddev=passive_sd))
r2 = tf.Variable(tf.random_normal(shape=[depth], stddev=passive_sd))
d_r = tf.Variable(tf.random_normal(shape=[2, depth], stddev=active_sd))
d_phi = tf.Variable(tf.random_normal(shape=[2, depth], stddev=passive_sd))
kappa = tf.Variable(tf.random_normal(shape=[2, depth], stddev=active_sd))
parameters = [theta1, phi1, r1, sq_r, sq_phi, theta2, phi2, r2, d_r, d_phi, kappa]
def layer(i, q):
with tf.name_scope('layer_{}'.format(i)):
BSgate(theta1[k], phi1[k]) | (q[0], q[1])
Rgate(r1[i]) | q[0]
for m in range(2):
Sgate(sq_r[m, i], sq_phi[m, i]) | q[m]
BSgate(theta2[k], phi2[k]) | (q[0], q[1])
Rgate(r2[i]) | q[0]
for m in range(2):
Dgate(d_r[m, i], d_phi[m, i]) | q[m]
Kgate(kappa[m, i]) | q[m]
return q
sf.hbar = 2
prog = sf.Program(2)
with eng:
if input_state is not None:
Ket(input_state) | q
for k in range(depth):
q = layer(k, q)
if batch_size is not None:
eng = sf.Engine("tf", backend_options={"cutoff_dim": cutoff, "batch_size": batch_size})
else:
eng = sf.Engine("tf", backend_options={"cutoff_dim": cutoff})
state = eng.run(prog, run_options={"eval": False}).state
ket = state.ket()
return ket, parameters
def variational_quantum_circuit(*, modes, cutoff, input_state=None, batch_size=None,
depth=25, active_sd=0.0001, passive_sd=0.1, **kwargs):
if modes == 2:
return two_mode_variational_quantum_circuit(cutoff, input_state, batch_size, depth, active_sd, passive_sd, **kwargs)
return one_mode_variational_quantum_circuit(cutoff, input_state, batch_size, depth, active_sd, passive_sd, **kwargs) | [
"noreply@github.com"
] | noreply@github.com |
2174b795b603fa21096a6785c9d48f544f644a5f | f30163c5c3c2051a699062a2baa4a632e2d47ad6 | /openspeech/models/openspeech_encoder_decoder_model.py | de45acc8ed693c7d77c9502a55c9ffc3282f54ff | [
"MIT",
"LicenseRef-scancode-secret-labs-2011",
"Unlicense",
"HPND",
"BSD-3-Clause",
"ISC",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | dnfcallan/openspeech | 6740672df1e9c43e898ff9eaa5fafdc20bf9593a | 55e50cb9b3cc3e7a6dfddcd33e6e698cca3dae3b | refs/heads/main | 2023-06-20T19:40:28.953644 | 2021-07-16T10:16:05 | 2021-07-16T10:16:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,191 | py | # MIT License
#
# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from torch import Tensor
from collections import OrderedDict
from typing import Dict
from omegaconf import DictConfig
from openspeech.models import OpenspeechModel
from openspeech.utils import get_class_name
from openspeech.vocabs.vocab import Vocabulary
class OpenspeechEncoderDecoderModel(OpenspeechModel):
r"""
Base class for OpenSpeech's encoder-decoder models.
Args:
configs (DictConfig): configuration set.
vocab (Vocabulary): the class of vocabulary
Inputs:
- **inputs** (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be
a padded `FloatTensor` of size ``(batch, seq_length, dimension)``.
- **input_lengths** (torch.LongTensor): The length of input tensor. ``(batch)``
Returns:
- **y_hats** (torch.FloatTensor): Result of model predictions.
"""
def __init__(self, configs: DictConfig, vocab: Vocabulary, ) -> None:
super(OpenspeechEncoderDecoderModel, self).__init__(configs, vocab)
self.teacher_forcing_ratio = configs.model.teacher_forcing_ratio
self.encoder = None
self.decoder = None
self.criterion = self.configure_criterion(self.configs.criterion.criterion_name)
def set_beam_decoder(self, beam_size: int = 3):
raise NotImplementedError
def collect_outputs(
self,
stage: str,
logits: Tensor,
encoder_logits: Tensor,
encoder_output_lengths: Tensor,
targets: Tensor,
target_lengths: Tensor,
) -> OrderedDict:
cross_entropy_loss, ctc_loss = None, None
if get_class_name(self.criterion) == "JointCTCCrossEntropyLoss":
loss, ctc_loss, cross_entropy_loss = self.criterion(
encoder_logits=encoder_logits.transpose(0, 1),
logits=logits,
output_lengths=encoder_output_lengths,
targets=targets[:, 1:],
target_lengths=target_lengths,
)
elif get_class_name(self.criterion) == "LabelSmoothedCrossEntropyLoss" \
or get_class_name(self.criterion) == "CrossEntropyLoss":
loss = self.criterion(logits, targets[:, 1:])
else:
raise ValueError(f"Unsupported criterion: {self.criterion}")
predictions = logits.max(-1)[1]
wer = self.wer_metric(targets[:, 1:], predictions)
cer = self.cer_metric(targets[:, 1:], predictions)
self.info({
f"{stage}_loss": loss,
f"{stage}_cross_entropy_loss": cross_entropy_loss,
f"{stage}_ctc_loss": ctc_loss,
f"{stage}_wer": wer,
f"{stage}_cer": cer,
})
return OrderedDict({
"loss": loss,
"cross_entropy_loss": cross_entropy_loss,
"ctc_loss": ctc_loss,
"predictions": predictions,
"targets": targets,
"logits": logits,
"learning_rate": self.get_lr(),
})
def forward(self, inputs: Tensor, input_lengths: Tensor) -> Dict[str, Tensor]:
r"""
Forward propagate a `inputs` and `targets` pair for inference.
Inputs:
inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded
`FloatTensor` of size ``(batch, seq_length, dimension)``.
input_lengths (torch.LongTensor): The length of input tensor. ``(batch)``
Returns:
* dict (dict): Result of model predictions that contains `predictions`, `logits`, `encoder_outputs`,
`encoder_logits`, `encoder_output_lengths`.
"""
logits = None
encoder_outputs, encoder_logits, encoder_output_lengths = self.encoder(inputs, input_lengths)
if get_class_name(self.decoder) in ("BeamSearchLSTM", "BeamSearchTransformer"):
predictions = self.decoder(encoder_outputs, encoder_output_lengths)
else:
logits = self.decoder(
encoder_outputs=encoder_outputs,
encoder_output_lengths=encoder_output_lengths,
teacher_forcing_ratio=0.0,
)
predictions = logits.max(-1)[1]
return {
"predictions": predictions,
"logits": logits,
"encoder_outputs": encoder_outputs,
"encoder_logits": encoder_logits,
"encoder_output_lengths": encoder_output_lengths,
}
def training_step(self, batch: tuple, batch_idx: int) -> OrderedDict:
r"""
Forward propagate a `inputs` and `targets` pair for training.
Inputs:
train_batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths`
batch_idx (int): The index of batch
Returns:
loss (torch.Tensor): loss for training
"""
inputs, targets, input_lengths, target_lengths = batch
encoder_outputs, encoder_logits, encoder_output_lengths = self.encoder(inputs, input_lengths)
if get_class_name(self.decoder) == "TransformerDecoder":
logits = self.decoder(
encoder_outputs=encoder_outputs,
targets=targets,
encoder_output_lengths=encoder_output_lengths,
target_lengths=target_lengths,
teacher_forcing_ratio=self.teacher_forcing_ratio,
)
else:
logits = self.decoder(
encoder_outputs=encoder_outputs,
targets=targets,
encoder_output_lengths=encoder_output_lengths,
teacher_forcing_ratio=self.teacher_forcing_ratio,
)
return self.collect_outputs(
stage='train',
logits=logits,
encoder_logits=encoder_logits,
encoder_output_lengths=encoder_output_lengths,
targets=targets,
target_lengths=target_lengths,
)
def validation_step(self, batch: tuple, batch_idx: int) -> OrderedDict:
r"""
Forward propagate a `inputs` and `targets` pair for validation.
Inputs:
train_batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths`
batch_idx (int): The index of batch
Returns:
loss (torch.Tensor): loss for training
"""
inputs, targets, input_lengths, target_lengths = batch
encoder_outputs, encoder_logits, encoder_output_lengths = self.encoder(inputs, input_lengths)
logits = self.decoder(
encoder_outputs,
encoder_output_lengths=encoder_output_lengths,
teacher_forcing_ratio=0.0,
)
return self.collect_outputs(
stage='val',
logits=logits,
encoder_logits=encoder_logits,
encoder_output_lengths=encoder_output_lengths,
targets=targets,
target_lengths=target_lengths,
)
def test_step(self, batch: tuple, batch_idx: int) -> OrderedDict:
r"""
Forward propagate a `inputs` and `targets` pair for test.
Inputs:
train_batch (tuple): A train batch contains `inputs`, `targets`, `input_lengths`, `target_lengths`
batch_idx (int): The index of batch
Returns:
loss (torch.Tensor): loss for training
"""
inputs, targets, input_lengths, target_lengths = batch
encoder_outputs, encoder_logits, encoder_output_lengths = self.encoder(inputs, input_lengths)
logits = self.decoder(
encoder_outputs,
encoder_output_lengths=encoder_output_lengths,
teacher_forcing_ratio=0.0,
)
return self.collect_outputs(
stage='test',
logits=logits,
encoder_logits=encoder_logits,
encoder_output_lengths=encoder_output_lengths,
targets=targets,
target_lengths=target_lengths,
)
| [
"sooftware@Soohwanui-MacBookPro.local"
] | sooftware@Soohwanui-MacBookPro.local |
8c6e4b91e7b6f79b3630064c37797349bea0cd72 | ae926cc3cf35b56db4788792c0f6efeee6fadfe6 | /backend/backend/settings.py | ae37e1bf39f04cc72e335d49b78c23616e139976 | [
"MIT"
] | permissive | dgrechka/RAAHAnomalies | c3dcfab601a55be2a592f030350927bcbedd1334 | db045f77a2cfade903c4206958641ee0a0e94566 | refs/heads/master | 2021-01-13T17:09:52.856709 | 2016-09-30T05:49:12 | 2016-09-30T05:49:12 | 69,476,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,099 | py | """
Django settings for backend project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 't!i3o(%j0ve43=34av9-2l_hylmdtogag_p^zgp=)_5&*ox9*y'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"dmitry@grechka.family"
] | dmitry@grechka.family |
0383b2b2b507b402219ade21d8941ecaa8968754 | 035b1bf87df0c27222da1ce0495b9e6f65ac3ce1 | /user/urls.py | a05cae585fd00a00a1256983e0bc058d8202552d | [] | no_license | klevamane/Airtech | 445760eb779ba65c7f878f0cbd72f5583702b241 | c04cf002c26f774428cddc8572886f933a14ab10 | refs/heads/develop | 2022-12-11T18:55:10.251077 | 2021-06-11T03:04:16 | 2021-06-11T03:04:16 | 201,683,952 | 0 | 0 | null | 2022-12-08T08:29:34 | 2019-08-10T21:18:28 | Python | UTF-8 | Python | false | false | 418 | py | from django.urls import path, include
from user.views import ListCreateUsers, UpdateUser, RetrieveUser, UserPassport
urlpatterns = [
path('', ListCreateUsers.as_view(), name='list_user'),
path('update/<int:pk>/', UpdateUser.as_view(), name='update_user'),
path('<int:pk>/', RetrieveUser.as_view(), name='retrieve_user'),
path('delete/<int:pk>/', UserPassport.as_view(), name='delete_user_passport')
]
| [
"klevamane@gmail.com"
] | klevamane@gmail.com |
e42aacb10289b7c12bad3e11cd8976449bf40bab | a52aa2887c455cbf5356f2daf18b227eaa9b2bea | /PageObject/Sites.py | 2ae3b6b908c926ad228a773b3bca34dbea19accc | [] | no_license | soneygeorge20/Test | 70cd0deeeb362b8eed1615925caa80f6f947936b | 3c8aa2836dd5e91bea4816a9fcba5495c10a44b5 | refs/heads/master | 2022-08-28T20:43:01.608571 | 2020-05-27T05:45:50 | 2020-05-27T05:45:50 | 267,230,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,929 | py | from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
import time
from Tests.test_Login import login
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.common.action_chains import ActionChains
class EstateManagement():
def __init__(self, driver):
self.driver = driver
self.Intellegence = "//div[@class='ant-menu-submenu-title']"
# 3.Sites Details:
self.Sites = "//*[@id='root']/section/section/section/main/div/div[1]/div/div/div/div/div[1]/div[3]"
self.AddSites = "//button[@class = 'ant-btn permit-primary-btn add-to-collection-btn']"
# self.SelectManCoName = "/html/body/div[7]/div/div[2]/div/div/div[2]/form/div/div[1]/div[1]/div[2]/div[1]/div[2]/div/span/div/div/span/i/svg"
# self.SelectClientName = "/html/body/div[7]/div/div[2]/div/div/div[2]/form/div/div[1]/div[1]/div[2]/div[2]/div[2]/div/span/div/div/span/i/svg"
self.SiteName = "//Input[@name = 'siteName']"
self.SiteId = "//Input[@name = 'siteId']"
self.Status = "//*[contains(text(), 'Select Status')]"
# Address
self.BuildingName = "//Input[@name = 'buildingName']"
self.AddressLine1 = "//Input[@name = 'address1']"
self.AddressLine2 = "//Input[@name = 'address2']"
self.Town = "//Input[@name = 'town']"
self.County = "//Input[@name = 'county']"
self.PostCode = "//Input[@name = 'postCode']"
self.Country = "//div[contains(text(),'Country')]"
# Contact
self.Title = "//div[contains(text(),'Mr')]"
self.FirstName = "//Input[@name = 'contacts[0].firstName']"
self.LastName = "//Input[@name = 'contacts[0].lastName']"
self.Email = "//Input[@name = 'contacts[0].email']"
self.CountryCode = "//Input[@name = 'countryCode']"
self.MobileNumber = "//Input[@name = 'contacts[0].mobileNumber']"
self.LandLine = "//Input[@name = 'contacts[0].landLine']"
self.AddSitesSubmit = "//button[@class = 'ant-btn permit-primary-btn']"
self.ClickCarPark = "[role='tabpanel']:nth-of-type(3) .ant-row-flex-middle:nth-of-type(1) div:nth-of-type(1) .link-display"
def Click_Intellegence(self):
wait = WebDriverWait(self.driver, 10)
element = wait.until(ec.element_to_be_clickable((By.XPATH, self.Intellegence)))
element.click()
def Click_Sites_and_AddSites(self):
wait = WebDriverWait(self.driver, 10)
element = wait.until(ec.element_to_be_clickable((By.XPATH, self.Sites)))
element.click()
# wait = WebDriverWait(self.driver, 10)
# element = wait.until(ec.element_to_be_clickable((By.XPATH, self.AddSites)))
# element.click()
# wait = WebDriverWait(self.driver, 10)
# element = wait.until(ec.element_to_be_clickable((By.XPATH, self.Sites)))
# element.click()
time.sleep(5)
#Hover_Sites:
element7 = self.driver.find_element_by_xpath("/html//div[@id='root']/section/section/section[@class='ant-layout']/main[@class='ant-layout-content']//div[@class='ant-tabs-content ant-tabs-content-animated ant-tabs-top-content']/div[3]//div[@class='ant-table-body']/table[@class='ant-table-fixed']/tbody/tr[3]//span[.='BMWsite']")
actions = ActionChains(self.driver)
actions.move_to_element(element7)
actions.perform()
wait = WebDriverWait(self.driver, 15)
element8 = wait.until(ec.presence_of_element_located((By.CSS_SELECTOR, self.ClickCarPark)))
actions.move_to_element(element8)
actions.perform()
element8.click()
time.sleep(5)
''' def Click_SitesDetails(self):
wait = WebDriverWait(self.driver, 10)
element = wait.until(ec.presence_of_element_located((By.XPATH, self.SiteName)))
element.send_keys("ABABA")
wait = WebDriverWait(self.driver, 10)
element = wait.until(ec.presence_of_element_located((By.XPATH, self.SiteId)))
element.send_keys("CDCDCD")
wait = WebDriverWait(self.driver, 10)
element = wait.until(ec.presence_of_element_located((By.XPATH, self.Status)))
element.click()
element = self.driver.find_element_by_xpath("//li[contains(text(),'Active')]")
element.click()
wait = WebDriverWait(self.driver, 10)
element = wait.until(ec.presence_of_element_located((By.XPATH, self.BuildingName)))
element.send_keys("RRRR")
wait = WebDriverWait(self.driver, 10)
element = wait.until(ec.presence_of_element_located((By.XPATH, self.AddressLine1)))
element.send_keys("19th Street")
wait = WebDriverWait(self.driver, 10)
element = wait.until(ec.presence_of_element_located((By.XPATH, self.AddressLine2)))
element.send_keys("DownCourt")
wait = WebDriverWait(self.driver, 10)
element = wait.until(ec.presence_of_element_located((By.XPATH, self.Town)))
element.send_keys("QWERTY")
wait = WebDriverWait(self.driver, 10)
element = wait.until(ec.presence_of_element_located((By.XPATH, self.County)))
element.send_keys("RedField")
wait = WebDriverWait(self.driver, 10)
element = wait.until(ec.presence_of_element_located((By.XPATH, self.PostCode)))
element.send_keys("QWERTY09999")
wait = WebDriverWait(self.driver, 10)
element = wait.until(ec.presence_of_element_located((By.XPATH, self.Country)))
element.click()
element = self.driver.find_element_by_xpath("//li[contains(text(),'UK')]")
element.click()
wait = WebDriverWait(self.driver, 10)
element = wait.until(ec.presence_of_element_located((By.XPATH, self.Title)))
element.click()
element = self.driver.find_element_by_xpath("(//li[contains(text(),'Mr')])[1]")
element.click()
wait = WebDriverWait(self.driver, 10)
element = wait.until(ec.presence_of_element_located((By.XPATH, self.FirstName)))
element.send_keys("Jhames")
wait = WebDriverWait(self.driver, 10)
element = wait.until(ec.presence_of_element_located((By.XPATH, self.LastName)))
element.send_keys("Robert")
wait = WebDriverWait(self.driver, 10)
element = wait.until(ec.presence_of_element_located((By.XPATH, self.Email)))
element.send_keys("jhames.robert777@gmail.com")
wait = WebDriverWait(self.driver, 10)
element = wait.until(ec.presence_of_element_located((By.XPATH, self.MobileNumber)))
element.send_keys("9090909090")
wait = WebDriverWait(self.driver, 10)
element = wait.until(ec.presence_of_element_located((By.XPATH, self.LandLine)))
element.send_keys("0101909090")
time.sleep(10)
# element = wait.until(ec.presence_of_element_located((By.XPATH, self.AddClientsSubmit)))
# element.click()
''' | [
"ramanan.renjith@hashedin.com"
] | ramanan.renjith@hashedin.com |
d213dcb78a7385c863ee504ef0b48d0dd068f1cc | 248f4ca913ccee282611ef2300f31a7bfd0d7add | /0x00-python-hello_world/code/0-main.py | b4e714ac1ef649522cb646efdbf3b0a45e056f7b | [
"MIT"
] | permissive | Chivylif/alx-higher_level_programming-1 | c17b2e390e19c208ce6328faf7281f71bccea026 | 0555feb195ee7373d1d6f9eb300d30ca10a967d8 | refs/heads/main | 2023-09-04T16:39:43.168964 | 2021-11-15T12:45:56 | 2021-11-15T12:45:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40 | py | #!/usr/bin/python3
print('Best School')
| [
"paannang@st.ug.edu.gh"
] | paannang@st.ug.edu.gh |
4a5dfdff1d4b9a470669aab31e774e0f0efe5b61 | 7eb3ca539f3419056181ec6dcaf81ce43772aed3 | /Covid19/settings.py | 58dbd7c7ab25d0d96ec4da83959849155a841c50 | [] | no_license | XSarthakJain/Covid19 | b3e57f0abf40b0986e8eee99dc52f8488034a081 | f2b8a8d8639a4b534522ff6d229faf6863bacd45 | refs/heads/master | 2021-04-24T00:17:18.395687 | 2020-12-18T18:22:45 | 2020-12-18T18:22:45 | 250,041,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,200 | py | """
Django settings for Covid19 project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7d1z1cjj705vemd$=m9leaw08lu4hyf$f7kgnht8nye%s3f#*+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Covid'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Covid19.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Covid19.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
MEDIA_ROOT='Covid\\media\\'
MEDIA_URL='/media/'
STATIC_URL = '/static/'
#MEDIA_ROOT=os.path.join(BASE_DIR,'Covid19/media'
| [
"sarthakjain31797@gmail.com"
] | sarthakjain31797@gmail.com |
e5ddd7aba69f9a89bd05e9fe964134631de6076e | c5a60e12ee8ccfe3d54af7002ea2415dfe1d465f | /Utils/OfTheSample.py | 731b34287f72a7b6a07c94ef64cdf67d2155b949 | [] | no_license | ptax/WebCrawler_FR | 3e2e34ca4cde20dd3590a4e9d554e744a62d663f | 3fa69996862b2d921a594a3160f800486d8c2ab8 | refs/heads/master | 2020-12-03T03:52:09.027456 | 2017-09-18T07:23:05 | 2017-09-18T07:23:05 | 95,782,184 | 1 | 1 | null | 2017-08-14T06:48:22 | 2017-06-29T13:46:30 | Python | UTF-8 | Python | false | false | 6,181 | py | # -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
sys.setdefaultencoding('utf-8')
import os
from bs4 import BeautifulSoup
import urllib2
import codecs
import re
import urllib
import collections
import Utils.SaveAndLoadDictFile
import Utils.GetListInFile
import Utils.convert_to_latin
def UrlWikiConverName(Url):
Url = urllib.unquote(Url)
Url = Url.replace('/wiki/', '')
Url = re.sub('_\(.*', '', Url)
return Url
def NameWinkiConvertUrl(Name):
Name = Name.replace(' ', '_')
return Name
def get_moderation_canton():
Up_Commune = r'../WorkBaseFile/Up_Commune'
Up_Commune_list = Utils.GetListInFile.Run(Up_Commune)
print len(Up_Commune_list)
load_base = Utils.SaveAndLoadDictFile.LoadDict('../WorkBaseFile/27_07_17_Up_Moreration_commune_3')
Dict_Up_Commune = {}
for up_key in Up_Commune_list:
Data = load_base[up_key]
Dict_Up_Commune[up_key] = Data
name_dict = '../WorkBaseFile/28_07_17_Moderation_Commun_Onli'
Utils.SaveAndLoadDictFile.SaveDict(Dict_Up_Commune, name_dict)
def inseeName_not_egual_wiki_url():
load_base = Utils.SaveAndLoadDictFile.LoadDict('../WorkBaseFile/02_08_17_release_3')
Dict_not_egual = {}
for Data, Keys in zip(load_base.values(), load_base.keys()):
try:
if UrlWikiConverName(Data['Wiki_Url']) == NameWinkiConvertUrl(Data['InseeXls_NameCommune']):
F_Compare_InseeXls_NameCommune_Wiki_Url = True
else:
F_Compare_InseeXls_NameCommune_Wiki_Url = False
except:
F_Compare_InseeXls_NameCommune_Wiki_Url = 'None'
if F_Compare_InseeXls_NameCommune_Wiki_Url == False:
Dict_not_egual[Keys] = Data
else:
pass
name_dict = '../WorkBaseFile/02_08_17_inseeName_not_egual_wiki_url'
Utils.SaveAndLoadDictFile.SaveDict(Dict_not_egual, name_dict)
def inseeName_not_egual_google_name():
load_base = Utils.SaveAndLoadDictFile.LoadDict(
'../WorkBaseFile/01_08_17_inseeName_not_egual_google_name_UP_coodinates')
Dict_not_egual = {}
for Data, Keys in zip(load_base.values(), load_base.keys()):
try:
F_ComunName_Comprasions = Utils.convert_to_latin.comun_name_wiki_google_comparisons(
str(Data['G_Locality_long_name']).decode('utf-8'), str(Data['InseeXls_NameCommune']).decode('utf-8'))
NameWiki = F_ComunName_Comprasions['Wiki_NameSnipet_lower'].replace(' ', '')
NameGoogle = F_ComunName_Comprasions['G_Locality_short_name_lower'].replace(' ', '')
if NameWiki == NameGoogle:
F_Compare_InseeXls_NameCommune_G_Locality_long_name = True
else:
F_Compare_InseeXls_NameCommune_G_Locality_long_name = False
except:
F_Compare_InseeXls_NameCommune_G_Locality_long_name = 'None'
if F_Compare_InseeXls_NameCommune_G_Locality_long_name == False:
print NameWiki, NameGoogle
Dict_not_egual[Keys] = Data
else:
pass
name_dict = '../WorkBaseFile/01_08_17_inseeName_not_egual_google_name_UP_coodinates_test'
Utils.SaveAndLoadDictFile.SaveDict(Dict_not_egual, name_dict)
def g_type_nonel():
load_base = Utils.SaveAndLoadDictFile.LoadDict('../WorkBaseFile/01_08_17_Up_Moderation')
print len(load_base)
Dict_not_egual = {}
for Data, Keys in zip(load_base.values(), load_base.keys()):
print Keys, Data['G_Types']
try:
G_Types = Data['G_Types']
except:
G_Types = 'None'
if G_Types in 'None':
Dict_not_egual[Keys] = Data
else:
pass
name_dict = '../WorkBaseFile/01_08_17_G_Type_none'
Utils.SaveAndLoadDictFile.SaveDict(Dict_not_egual, name_dict)
def southwest_and_northeast_none():
load_base = Utils.SaveAndLoadDictFile.LoadDict('../WorkBaseFile/02_08_17_release_2')
print len(load_base)
Dict_not_egual = {}
for Data, Keys in zip(load_base.values(), load_base.keys()):
try:
G_Coordinates_northeast_Lat_1 = Data['G_Coordinates_northeast_Lat_1']
except:
G_Coordinates_northeast_Lat_1 = 'None'
if G_Coordinates_northeast_Lat_1 == 'None':
Dict_not_egual[Keys] = Data
else:
pass
name_dict = '../WorkBaseFile/02_08_17_G_Coordinates_northeast'
Utils.SaveAndLoadDictFile.SaveDict(Dict_not_egual, name_dict)
def post_code_not_int():
load_base = Utils.SaveAndLoadDictFile.LoadDict('../WorkBaseFile/02_08_17_release_2')
print len(load_base)
Dict_not_egual = {}
for Data, Keys in zip(load_base.values(), load_base.keys()):
try:
W_CodePostal = Data['W_CodePostal'].replace(',', '').strip()
except:
W_CodePostal = 'None'
try:
W_CodePostal = int(W_CodePostal)
except:
W_CodePostal = False
if W_CodePostal == False:
Dict_not_egual[Keys] = Data
# print Keys,Data['W_CodePostal']
else:
pass
name_dict = '../WorkBaseFile/02_08_17_wiki_post_code_not_int'
Utils.SaveAndLoadDictFile.SaveDict(Dict_not_egual, name_dict)
def G_type_not_locality():
load_base = Utils.SaveAndLoadDictFile.LoadDict('../WorkBaseFile/02_08_17_release_2')
print len(load_base)
Dict_not_egual = {}
for Data, Keys in zip(load_base.values(), load_base.keys()):
try:
G_type = Data['G_Types']
except:
G_type = 'None'
if 'locality' in str(G_type):
pass
else:
Dict_not_egual[Keys] = Data
name_dict = '../WorkBaseFile/08_09_17_not_in_locality'
Utils.SaveAndLoadDictFile.SaveDict(Dict_not_egual, name_dict)
if __name__ == '__main__':
# post_code_not_int()
#G_type_not_locality()
load_base = Utils.SaveAndLoadDictFile.LoadDict('../WorkBaseFile/08_09_17_not_in_locality')
print len(load_base)
#print load_base[39177]['G_Types']
#print load_base['70369']['Wiki_Url']
for Data, Keys in zip(load_base.values(), load_base.keys()):
print Data['G_Types']
| [
"truhanski@gmail.com"
] | truhanski@gmail.com |
ea7bf0f7096a8349299ef936f938975ab7b73adb | 01816e373df80cd141d99bb206d354f36606b73b | /collegecircles/profiles/forms.py | 28af29223fa8744b1b389e8b82286322c2429b7e | [] | no_license | enliven-/task-it | f9304e01190e28ec9f1c461b34d7da61929926ae | 8af0b0bf878f4f1a5419c5075fbceaa07fe1b68f | refs/heads/master | 2021-01-10T21:01:48.574874 | 2012-10-16T06:48:37 | 2012-10-16T06:48:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,590 | py | from django import forms
from django.utils.translation import ugettext_lazy as _
from userena.forms import SignupForm
class SignupFormExtra(SignupForm):
"""
A form to demonstrate how to add extra fields to the signup form, in this
case adding the first and last name.
"""
first_name = forms.CharField(label=_(u'First name'),
max_length=30,
required=False)
last_name = forms.CharField(label=_(u'Last name'),
max_length=30,
required=False)
def __init__(self, *args, **kw):
"""
A bit of hackery to get the first name and last name at the top of the
form instead at the end.
"""
super(SignupFormExtra, self).__init__(*args, **kw)
# Put the first and last name at the top
new_order = self.fields.keyOrder[:-2]
new_order.insert(0, 'first_name')
new_order.insert(1, 'last_name')
self.fields.keyOrder = new_order
def save(self):
"""
Override the save method to save the first and last name to the user
field.
"""
# First save the parent form and get the user.
new_user = super(SignupFormExtra, self).save()
new_user.first_name = self.cleaned_data['first_name']
new_user.last_name = self.cleaned_data['last_name']
new_user.save()
# Userena expects to get the new user from this form, so return the new
# user.
return new_user
| [
"enliven.viksit@gmail.com"
] | enliven.viksit@gmail.com |
5937a083574b20b77de3073d1b7317e4f94be9ec | c9cf4e7acd3ff09412610965dc83988b3f501e5e | /utils/readWrite/read.py | 2fe030fe3668a47d797bc8bc787023f8779bee51 | [] | no_license | Noba1anc3/General-Doc-SemSeg | 31df6cc0c747c5586fbbeb9dace6170d3fbef4bd | 27d9761fd45b2d5d52cfe3ed50413f902912b238 | refs/heads/master | 2021-05-19T04:15:42.604378 | 2020-03-31T06:59:45 | 2020-03-31T06:59:45 | 251,524,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,247 | py | import os
import sys
from configparser import ConfigParser
from utils.logging.syslog import Logger
class Configuration():
def __init__(self):
self.logging = Logger(__name__)
Logger.get_log(self.logging).info('Start processing ConfigFile')
self.config()
Logger.get_log(self.logging).info('ConfigFile Processed\n')
def config(self):
cp = ConfigParser()
cp.read('conf.cfg')
self.folder = cp.get('configuration', 'folder')
self.filename = cp.get('configuration', 'filename')
self.tit_choice = cp.getint('configuration', 'tit_choice')
self.text_level = cp.getint('configuration', 'text_level')
self.table_level = cp.getint('configuration', 'table_level')
self.save_text = cp.getboolean('configuration', 'save_text')
self.save_image = cp.getboolean('configuration', 'save_image')
self.configCheck()
self.output_folder = 'output/'
if not os.path.exists(self.output_folder):
os.mkdir(self.output_folder)
if self.save_text or self.save_image:
self.prediction_folder = self.output_folder + 'prediction/'
if not os.path.exists(self.prediction_folder):
os.mkdir(self.prediction_folder)
if self.save_text == True:
self.json_folder = self.prediction_folder + 'json/'
if not os.path.exists(self.json_folder):
os.mkdir(self.json_folder)
if self.save_image == True:
self.img_folder = self.prediction_folder + 'image/'
if not os.path.exists(self.img_folder):
os.mkdir(self.img_folder)
if self.filename == 'all':
self.fileList = sorted(os.listdir(self.folder))
else:
self.fileList = [self.filename]
def configCheck(self):
if not self.folder[-1] == '/':
Logger.get_log(self.logging).critical('Configuration - Folder Format Error')
print("Configuration - Folder may loss '/' to the end of the path")
y_n = input("Do you want system add '/' to the end of path ? (Y/N)\n")
if y_n.lower() == 'y' or y_n.lower() == 'yes':
self.folder += '/'
else:
sys.exit()
if not self.filename == 'all' and not self.filename[-4:] == '.pdf':
Logger.get_log(self.logging).critical('Configuration - FileName Not End With .pdf ')
print('Configuration - FileName Not End With \'.pdf\'')
y_n = input("Do you want system add '.pdf' to the end of filename ? (Y/N)\n")
if y_n.lower() == 'y' or y_n.lower() == 'yes':
self.filename += '.pdf'
else:
sys.exit()
if not (self.tit_choice == 0 or self.tit_choice == 1 or self.tit_choice == 2 or self.tit_choice == 3):
Logger.get_log(self.logging).critical('Configuration - tit_choice Format Error ')
while True:
print('Configuration - tit_choice Format Error')
tit_choice = input("Please press 0/1/2/3 to specify a tit_choice \n")
if tit_choice == '0' or tit_choice == '1' or tit_choice == '2' or tit_choice == '3':
self.tit_choice = tit_choice
break
if not (self.text_level == 1 or self.text_level == 2):
Logger.get_log(self.logging).critical('Configuration - text_level Format Error ')
while True:
print('Configuration - text_level Format Error ')
text_level = input("Please press 1/2 to specify a text_level \n")
if text_level == '1' or text_level == '2':
self.text_level = text_level
break
if not (self.table_level == 1 or self.table_level == 2):
Logger.get_log(self.logging).critical('Configuration - table_level Format Error ')
while True:
print('Configuration - table_level Format Error ')
table_level = input("Please press 1/2 to specify a table_level \n")
if table_level == '1' or table_level == '2':
self.text_level = table_level
break | [
"zxryhjp@yahoo.co.jp"
] | zxryhjp@yahoo.co.jp |
f136d9db8b94f0e53efbbc70c47580ef221ffdba | b37c3880d70bcddc4d5bbf6a9eda1448d6479ccf | /models/actions/collect_stats_from_model.py | f55d8effb6b0ab678dc23a2b1fcf3ef6a2aaabb1 | [
"Apache-2.0"
] | permissive | marco-foscato/Lib-INVENT | 1e04efafa831f37520d713edefc65c44295a0937 | fe6a65ab7165abd87b25752a6b4208c8703d11f7 | refs/heads/main | 2023-08-20T00:59:47.398695 | 2021-10-12T09:26:56 | 2021-10-12T09:26:56 | 414,649,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,471 | py | import random
import scipy.stats as sps
import numpy as np
import torch
from rdkit import Chem
from reinvent_chemistry.library_design import BondMaker, AttachmentPoints
from models.actions.calculate_nlls_from_model import CalculateNLLsFromModel
from models.actions.sample_model import SampleModel
class CollectStatsFromModel:
"""Collects stats from an existing RNN model."""
def __init__(self, model, epoch, training_set, validation_set, sample_size,
decoration_type="all", with_weights=False, other_values=None):
"""
Creates an instance of CollectStatsFromModel.
: param model: A model instance initialized as sampling_mode.
: param epoch: Epoch number to be sampled(informative purposes).
: param training_set: Iterator with the training set.
: param validation_set: Iterator with the validation set.
: param writer: Writer object(Tensorboard writer).
: param other_values: Other values to save for the epoch.
: param sample_size: Number of molecules to sample from the training / validation / sample set.
: param decoration_type: Kind of decorations (single or all).
: param with_weights: To calculate or not the weights.
: return:
"""
self.model = model
self.epoch = epoch
self.sample_size = sample_size
self.training_set = training_set
self.validation_set = validation_set
self.other_values = other_values
self.decoration_type = decoration_type
self.with_weights = with_weights
self.sample_size = max(sample_size, 1)
self._bond_maker = BondMaker()
self._attachment_points = AttachmentPoints()
self._calc_nlls_action = CalculateNLLsFromModel(self.model, 128)
self._sample_model_action = SampleModel(self.model, 128)
@torch.no_grad()
def run(self):
"""
Collects stats for a specific model object, epoch, validation set, training set and writer object.
: return: A dictionary with all the data saved for that given epoch.
"""
data = {}
sliced_training_set = list(random.sample(self.training_set, self.sample_size))
sliced_validation_set = list(random.sample(self.validation_set, self.sample_size))
sampled_training_mols, sampled_training_nlls = self._sample_decorations(next(zip(*sliced_training_set)))
sampled_validation_mols, sampled_validation_nlls = self._sample_decorations(next(zip(*sliced_validation_set)))
training_nlls = np.array(list(self._calc_nlls_action.run(sliced_training_set)))
validation_nlls = np.array(list(self._calc_nlls_action.run(sliced_validation_set)))
data.update({"sampled_training_mols": sampled_training_mols, "sampled_validation_mols": sampled_validation_mols,
"training_nlls": training_nlls, "validation_nlls": validation_nlls,
"binned_jsd": self.jsd([sampled_training_nlls, sampled_validation_nlls,
training_nlls, validation_nlls], binned=True),
"unbinned_jsd": self.jsd([sampled_training_nlls, sampled_validation_nlls,
training_nlls, validation_nlls], binned=False)
})
return data
def _sample_decorations(self, scaffold_list):
mols = []
nlls = []
for scaff, decoration, nll in self._sample_model_action.run(scaffold_list):
labeled_scaffold = self._attachment_points.add_attachment_point_numbers(scaff, canonicalize=False)
molecule = self._bond_maker.join_scaffolds_and_decorations(labeled_scaffold, decoration)
if molecule:
mols.append(Chem.MolToSmiles(molecule))
nlls.append(nll)
return mols, np.array(nlls)
def bin_dist(self, dist, bins=1000, dist_range=(0, 100)):
bins = np.histogram(dist, bins=bins, range=dist_range, density=False)[0]
bins[bins == 0] = 1
return bins / bins.sum()
def jsd(self, dists, binned=False):
min_size = min(len(dist) for dist in dists)
dists = [dist[:min_size] for dist in dists]
if binned:
dists = [self.bin_dist(dist) for dist in dists]
num_dists = len(dists)
avg_dist = np.sum(dists, axis=0) / num_dists
return sum((sps.entropy(dist, avg_dist) for dist in dists)) / num_dists
| [
"vendy.fialkova@gmail.com"
] | vendy.fialkova@gmail.com |
43213fd8ed7802e09246a9ad9f3a8db7e018b0c0 | 4e0edae5e6d44efb29ac10983565b5898be66cc7 | /Backend/JobDecision/universities/schema.py | 2bdbe7415fdc4b3d90f3ea2881dcc199abe7a8eb | [] | no_license | deaddesert/Job-Decision-Blog | 3fbf8a3258bf0a50654db7d97d1b99be121e6bd9 | 8bdeadfe76b3d8366fa8469f28098f6ce71aa7d2 | refs/heads/master | 2023-04-22T02:40:10.729697 | 2021-05-13T06:24:27 | 2021-05-13T06:24:27 | 366,952,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | import graphene
from .models import Universities
from graphene_django import DjangoObjectType
class UniversityType(DjangoObjectType):
class Meta:
model = Universities
class Query(graphene.ObjectType):
all_universities = graphene.List(UniversityType)
def resolve_all_universities(self, info, **kwargs):
return Universities.objects.all() | [
"daipham1302@gmail.com"
] | daipham1302@gmail.com |
d5daa8b5cd6d5f6b59bd29392c68fe322c4c8036 | 0938650b1b4c8bedfc1c1c9a20ed7d5f128d4336 | /money/dbs.py | c7a0cc32d547643931c9d0b80c5b3826e2772be4 | [] | no_license | hussachai/money | 21a5d3234aa0ac1dc5d259c42a9195e46bbc994e | 689a88528dad3e146115ecdfec3a45d49d22da2b | refs/heads/master | 2021-05-04T22:50:37.161863 | 2018-02-03T07:12:26 | 2018-02-03T07:12:26 | 120,067,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,329 | py | from django.db import connection
from django.db.models.functions import Coalesce
from money.models import *
def get_saving_goal(year):
return AnnualGoal.objects.filter(year=year).values_list('target_saving', flat=True).get(pk=1)
def get_record_years():
# return StmtBalance.objects.order_by('-closing_year')\
# .distinct().values_list('closing_year', flat=True)
query = "SELECT DISTINCT closing_year FROM money_stmt_balances ORDER by closing_year DESC"
with connection.cursor() as cursor:
cursor.execute(query)
rows = cursor.fetchall()
return [row[0] for row in rows]
def mode_to_field_proj(mode):
if mode == "billed":
return "sb.closing_date"
elif mode == "due":
return "sb.due_date"
else:
return "sd.tx_date"
def get_statement_summary(year, month=None, mode="purchased"):
extra_cond = ""
if month is not None:
extra_cond = "AND s.c_month = %s"
prj_field = mode_to_field_proj(mode)
query = f"""
SELECT s.c_year, s.c_month, s.category_name, s.category_color, SUM(s.sum_amount) AS total FROM
(
(
SELECT s.c_year, s.c_month, c.id AS category_id, c.name AS category_name,
c.color AS category_color, SUM(s.amount) * -1 AS sum_amount FROM
(SELECT YEAR({prj_field}) AS c_year, MONTH({prj_field}) AS c_month, sd.category_id, sd.amount
FROM money_stmt_details sd JOIN money_stmt_balances sb ON sd.balance_id = sb.id
WHERE sb.closing_year = %s
) s
JOIN money_categories c ON s.category_id = c.id
WHERE c.cat_type <> 0
GROUP BY c_year, c_month, s.category_id
)
UNION
(
SELECT YEAR(stmt.tx_date) AS c_year, MONTH(stmt.tx_date) AS c_month,
c.id AS category_id, c.name AS category_name, c.color AS category_color, SUM(stmt.amount) AS sum_amount
FROM money_income_stmts stmt JOIN money_categories c ON stmt.category_id = c.id
WHERE c.cat_type <> 0 AND stmt.tx_year = %s
GROUP BY c_year, c_month, stmt.category_id
)
) s
WHERE c_year = %s {extra_cond}
GROUP BY s.c_year, s.c_month, s.category_name, s.category_color
ORDER BY s.c_month DESC, total DESC
"""
with connection.cursor() as cursor:
if month is not None:
cursor.execute(query, [year, year, year, month])
else:
cursor.execute(query, [year, year, year])
row = fetchall_as_dict(cursor)
return row
def get_statement_details(year, month, mode):
prj_field = mode_to_field_proj(mode)
query = f"""
SELECT * FROM
(
(
SELECT 'C' AS tx_type, sd.id AS tx_id, ca.name AS account_name, sd.tx_date, sb.closing_date, sb.due_date, c.name AS category, sd.amount
FROM money_stmt_details sd
JOIN money_stmt_balances sb ON sd.balance_id = sb.id
JOIN money_credit_accounts ca ON sb.account_id = ca.id
JOIN money_categories c ON sd.category_id = c.id
WHERE {prj_field} >= '%s-01-01' AND {prj_field} < '%s-01-01' AND month({prj_field}) = %s AND c.cat_type <> 0
)
UNION
(
SELECT 'D' AS tx_type, mis.id AS tx_id, ba.name AS account_name, mis.tx_date, mis.tx_date AS closing_date,
mis.tx_date AS due_date, c.name AS category, mis.amount
FROM money_income_stmts mis
JOIN money_bank_accounts ba ON mis.account_id = ba.id
JOIN money_categories c ON mis.category_id = c.id
WHERE mis.tx_date >= '%s-01-01' AND mis.tx_date < '%s-01-01' AND month(mis.tx_date) = %s AND c.cat_type <> 0
)
) acc
ORDER BY acc.tx_date;
"""
with connection.cursor() as cursor:
next_year = int(year) + 1
params = [int(year), next_year, month] * 2
print(params)
cursor.execute(query, params)
row = fetchall_as_dict(cursor)
return row
def fetchall_as_dict(cursor):
"""Return all rows from a cursor as a dict"""
columns = [col[0] for col in cursor.description]
return [
dict(zip(columns, row))
for row in cursor.fetchall()
]
| [
"hussachai.puripunpinyo@workday.com"
] | hussachai.puripunpinyo@workday.com |
81f661054b560fbd42aecda12a314ae90688b328 | baf950e39fd81ab0006de7fe51e957f85e7f8c44 | /pytorch9.py | 8ccc616fdf816de6631da01da0a01f7b9bb6f308 | [] | no_license | LuisStruggle/pytorch | ef8a569d5410c7f25785a4aa5d1374a0688bff1a | 4b64054502d6e91ee387f8437a7bf7fd3fa15e56 | refs/heads/master | 2020-03-27T18:58:46.195290 | 2018-09-01T01:26:58 | 2018-09-01T01:26:58 | 146,957,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,776 | py | #!/usr/bin/python
# -*- coding:utf-8 -*-
# @Time : 2018/8/30 15:46
# @Author : ly
# @File : pytorch9.py
import torch
from torch import nn
from torch.autograd import Variable
import numpy as np
torch.manual_seed(1) # reproducible
# Hyper Parameters
TIME_STEP = 10 # rnn time step / image height
INPUT_SIZE = 1 # rnn input size / image width
LR = 0.02 # learning rate
DOWNLOAD_MNIST = False # set to True if haven't download the data
class RNN(nn.Module):
def __init__(self):
super(RNN, self).__init__()
self.rnn = nn.RNN( # 这回一个普通的 RNN 就能胜任
input_size=1,
hidden_size=32, # rnn hidden unit
num_layers=1, # 有几层 RNN layers
batch_first=True, # input & output 会是以 batch size 为第一维度的特征集 e.g. (batch, time_step, input_size)
)
self.out = nn.Linear(32, 1)
def forward(self, x, h_state): # 因为 hidden state 是连续的, 所以我们要一直传递这一个 state
# x (batch, time_step, input_size)
# h_state (n_layers, batch, hidden_size)
# r_out (batch, time_step, output_size)
r_out, h_state = self.rnn(x, h_state) # h_state 也要作为 RNN 的一个输入
outs = [] # 保存所有时间点的预测值
for time_step in range(r_out.size(1)): # 对每一个时间点计算 output
outs.append(self.out(r_out[:, time_step, :]))
return torch.stack(outs, dim=1), h_state
rnn = RNN()
print(rnn)
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all rnn parameters
loss_func = nn.MSELoss()
h_state = None # 要使用初始 hidden state, 可以设成 None
for step in range(61):
start, end = step * np.pi, (step + 1) * np.pi # time steps
# sin 预测 cos
steps = np.linspace(start, end, 10, dtype=np.float32)
x_np = np.sin(steps) # float32 for converting torch FloatTensor
y_np = np.cos(steps)
x = Variable(torch.from_numpy(x_np[np.newaxis, :, np.newaxis])) # shape (batch, time_step, input_size)
y = Variable(torch.from_numpy(y_np[np.newaxis, :, np.newaxis]))
if step == 60:
pass
print("预测", rnn(x, h_state)[0])
print("真实", y)
else:
prediction, h_state = rnn(x, h_state) # rnn 对于每个 step 的 prediction, 还有最后一个 step 的 h_state
# !! 下一步十分重要 !!
h_state = Variable(h_state.data) # 要把 h_state 重新包装一下才能放入下一个 iteration, 不然会报错
loss = loss_func(prediction, y) # cross entropy loss
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
| [
"18300767078@163.com"
] | 18300767078@163.com |
b5d584c0987fe45dac2d1daf4fcd072b9b34ef63 | 82a7ae58802527ef4728b7842eaad654c65333fe | /Python/Segurança/Redes/UDP/01_Unidirecional/client.py | a971765e0c5df61342cc83b0c68be9cfb3409fd2 | [] | no_license | Alhenri/Codigos | 77929651091d2af63fe689e2a90758126f8ffb5c | 681ef7abeee61e843e6a6717ca5f0be8d9ce0eb5 | refs/heads/master | 2022-12-19T23:39:02.773176 | 2020-10-07T19:30:22 | 2020-10-07T19:30:22 | 259,753,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | import socket
HOST = '10.0.0.103' # Endereco IP do Servidor
PORT = 3000 # Porta que o Servidor esta
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
dest = (HOST, PORT)
print('Para sair use CTRL+X')
msg = input("Mensagem: ").encode()
while msg != '\x18':
udp.sendto (msg, dest)
msg = input("Mensagem: ").encode()
udp.close() | [
"ahssf@cin.ufpe.br"
] | ahssf@cin.ufpe.br |
6ffe89477a6ba30eede26110722d8e5903edaec3 | 676b857c58d12732107b777292dfd30a38628f6e | /Web_App/migrations/0005_auto_20191216_2017.py | bc13754bea62b30741b5300d0df787c17de8576c | [] | no_license | Vadi3994/ProjectCDEMCA | 889a8a229e618a7d7ed179162e4038d756f9aa96 | d6875107507b98b3937e81efa8b31dc9d62fe36a | refs/heads/master | 2022-12-11T08:41:03.123339 | 2020-01-07T10:34:39 | 2020-01-07T10:34:39 | 220,671,890 | 0 | 0 | null | 2022-12-08T03:22:02 | 2019-11-09T16:41:02 | CSS | UTF-8 | Python | false | false | 3,223 | py | # Generated by Django 2.2.5 on 2019-12-16 14:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Web_App', '0004_auto_20191216_1957'),
]
operations = [
migrations.RemoveField(
model_name='dish',
name='restaurant',
),
migrations.RemoveField(
model_name='dish',
name='user',
),
migrations.DeleteModel(
name='LocEvent',
),
migrations.RemoveField(
model_name='restaurant',
name='user',
),
migrations.AlterField(
model_name='propertydetails',
name='Address',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='propertydetails',
name='AgentName',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='propertydetails',
name='Area',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='propertydetails',
name='Availability',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='propertydetails',
name='Bath',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='propertydetails',
name='Bed',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='propertydetails',
name='DimView',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='propertydetails',
name='Email',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='propertydetails',
name='Furnished',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='propertydetails',
name='Location',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='propertydetails',
name='PhoneNumber',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='propertydetails',
name='Pool',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='propertydetails',
name='Price',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='propertydetails',
name='PropertyName',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='propertydetails',
name='PropertyType',
field=models.CharField(max_length=100),
),
migrations.DeleteModel(
name='Dish',
),
migrations.DeleteModel(
name='Restaurant',
),
]
| [
"57568956+Vadi3994@users.noreply.github.com"
] | 57568956+Vadi3994@users.noreply.github.com |
3bdd6251f6f7f7c4622a1699b8f4ec4bec29f69c | 7d48435ff8a3d68fe2f4dd13e45416018e08f068 | /migrations/versions/54d450ac3d88_.py | 174708466aef2534c76fd288ec795e8158d6858d | [] | no_license | blessingodede/Fyyur-1 | ba930bb5e78ed57923ec58f325f6fb1fada2b3e6 | ba878826aff28c8a2313d0447218c4fccafbdc65 | refs/heads/master | 2023-08-15T17:03:18.382190 | 2020-06-26T23:13:08 | 2020-06-26T23:13:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,682 | py | """empty message
Revision ID: 54d450ac3d88
Revises:
Create Date: 2020-05-02 11:16:19.255130
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '54d450ac3d88'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('Artist',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('city', sa.String(length=120), nullable=True),
sa.Column('state', sa.String(length=120), nullable=True),
sa.Column('phone', sa.String(length=120), nullable=True),
sa.Column('genres', sa.String(length=120), nullable=True),
sa.Column('image_link', sa.String(length=500), nullable=True),
sa.Column('facebook_link', sa.String(length=120), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('Venue',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('city', sa.String(length=120), nullable=True),
sa.Column('state', sa.String(length=120), nullable=True),
sa.Column('address', sa.String(length=120), nullable=True),
sa.Column('phone', sa.String(length=120), nullable=True),
sa.Column('image_link', sa.String(length=500), nullable=True),
sa.Column('facebook_link', sa.String(length=120), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('Venue')
op.drop_table('Artist')
# ### end Alembic commands ###
| [
"szalai.balazs.david@gmail.com"
] | szalai.balazs.david@gmail.com |
bd37d6634f405523c79a877228689da80f242c6a | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_89/46.py | 2b2b33f6462a2c18f37ba5fc20391f0621f9a50f | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,842 | py | #!/usr/bin/env python
# encoding: utf-8
"""
Waiters en LCM
"""
import sys, time, copy
from pdb import set_trace as DEBUG
def p(*s):
print >> sys.stderr, s
def gcd(a, b):
while b:
a, b = b, a % b
return a
def lcm(a, b):
return a * b // gcd(a, b)
def lcmm(*args):
return reduce(lcm, args)
def factors(n):
fact={1:1}
check=2
while check<=n:
if n%check==0:
n/=check
t = fact.get(check, 0)
fact[check] = t+1
else:
check+=1
return fact
#problem specific functions
def parseInput(f):
return int(f.readline())
def main(N):
if N ==1: return 0
l = lcmm(*range(1,N+1))
f = factors(l)
facts = {1:1}
maxturns = 0
for i in range(1,N+1):
fact = factors(i)
contribute = 0
for k,v in fact.items():
if k not in facts:
contribute+=1
if facts.get(k,0)<v:
facts[k] = v
maxturns+=contribute
return sum(f.values()) - maxturns
#for i in range(N, 0, -1):
#fact = factors(i)
#for k,v in fact.items():
#fk = facts.get(k,0)
#if fk>v:
#facts[k]-=v
#elif fk==v:
#del(facts[k])
#else:
#continue
#pass
#maxturns = i
#return maxturns
if __name__ == "__main__":
if len(sys.argv)==1:
filename = 'test.in'
else:
filename = sys.argv[1]
f = open('primes.txt')
primes = f.read().split()
primes = map(int, primes)
f.close()
#print primes
f = open(filename)
cases = int(f.readline())
for case in range(cases):
#p("Case #%i" % (case+1))
args = parseInput(f)
print "Case #%i: %s" % (case+1, main(args))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
78834feda7d793f9bb22256459dfe7e441573300 | 9042ed3100891c2fbe063d56b59e850a8e7f97cf | /src/example2/receive.py | 706ea40da000be5e486aed3c3e71e017fccb7e38 | [] | no_license | gonzalo123/rabbit_examples | 6b94fa0ec9673cec41c7ac398bbd8407a0392ddb | a3922344f6f76826c3596f37bb16f8e8b9f8cb87 | refs/heads/master | 2020-04-14T06:24:18.601886 | 2019-01-01T19:38:52 | 2019-01-01T19:38:52 | 163,685,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,363 | py | import pika
import logging
from dotenv import load_dotenv
import os
current_dir = os.path.dirname(os.path.abspath(__file__))
load_dotenv(dotenv_path="{}/../.env".format(current_dir))
queue_name = 'example2_queue'
exchange_name = 'example2_exchange'
logging.basicConfig(level=logging.WARNING)
def callback(ch, method, properties, body):
logging.warning("body {}".format(body))
# emit ack manually
# we need to specify the delivery_tag
# delivery_tag is an autoincrement number
# depends on the channel. If I stop the scrip and start again
# delivery_tag will start again
ch.basic_ack(delivery_tag=method.delivery_tag)
# Connect to Rabbit using credentials
broker_connection = pika.BlockingConnection(pika.URLParameters(os.getenv('AMQP_URI')))
# create a new channel
channel = broker_connection.channel()
# create the queue if doesn't exits
channel.queue_declare(queue=queue_name)
# create the exchange if it doesn't exists
channel.exchange_declare(exchange=exchange_name, exchange_type='fanout', durable=True)
# bind exchange to queue
channel.queue_bind(exchange=exchange_name, queue=queue_name)
# register callback to the queue
# no_ack=False means that I need to send ack manually
channel.basic_consume(consumer_callback=callback, queue=queue_name, no_ack=False)
channel.basic_qos(prefetch_count=1)
channel.start_consuming()
| [
"gonzalo123@gmail.com"
] | gonzalo123@gmail.com |
ad0e5761a1f31f49b7f39cfb2155fa2919f90598 | 4ed2353da08d7acb2c70ee2f4df4f31491cbddc8 | /Python/kakao-crane.py | 04750bece328d708293dbea921b81418b37af890 | [] | no_license | hayoungishere/Algorithm | 95f0b6a76002a00c677dcfb1d276858c79824a40 | 46bcc06705fb9e1c0d102e415381b3d88dc29600 | refs/heads/main | 2023-03-04T14:25:13.185809 | 2021-02-17T08:31:44 | 2021-02-17T08:31:44 | 315,022,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | '''
문제 : 크레인 인형뽑기 게임
접근 : 인형의 위치별로 Queue를 생성해서 먼저들어간 인형을 뽑기 완료 통에 옮긴후
뽑기 완료 통의 마지막 두개의 인형을 비교해서
같으면 완료통에서 제거 및 제거된 인형의 수를 2개 증가시킨다.
'''
def solution(board, moves):
answer = 0
q=[]
for i in range(len(board[0])):
q.append([])
for b in board:
for idx in range(len(b)):
if b[idx] != 0:
q[idx].append(b[idx])
out = []
tail = 0
for i in moves:
if len(q[i-1]) > 0:
out.append(q[i-1][0])
del q[i-1][0]
size = len(out)
if size > 1 :
if out[size-1] == out[size-2]:
del out[size-1]
del out[size-2]
answer+=2
return answer
if __name__=="__main__":
b=[[0,0,0,0,0],[0,0,1,0,3],[0,2,5,0,1],[4,2,4,4,2],[3,5,1,3,1]]
m=[1,5,3,5,1,2,1,4]
print(solution(b,m)) | [
"hayoung.is.here@gmail.com"
] | hayoung.is.here@gmail.com |
095caea73aa96ae1296741d2bdaebcccd396fe96 | 7361ee0129351b8bb594b7a7bb47c143de84dd36 | /1009.py | 85da83df649bda6d6593278319f2d1310a44abef | [] | no_license | antonioroddev/Uri | 5d22c3ba3e01977674874cc4d1ad38ecb8be1ee9 | 12e7c447e79db71ff13587de0079d0a5bcdac218 | refs/heads/master | 2022-05-22T12:03:33.717084 | 2022-04-14T01:52:26 | 2022-04-14T01:52:26 | 216,880,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | if __name__ == '__main__':
lista = [input() for i in range (0,3)]
valor = float(lista[1]) + (float(lista[2]) * 0.15)
print('TOTAL = R$ {:.2f}'.format(valor)) | [
"juninhor-51@hotmail.com"
] | juninhor-51@hotmail.com |
c0e7b618b41f6bf572931b8cb5856b7a0df3f2a6 | c72dae37d94d8f9e80db232a9838244e2bb33fb2 | /src/manage.py | f540c9bbd28fadb0d88b4cc35e2e4f64a9735e3f | [] | no_license | Asifrahman96/DjangoTeachzy | 956ae3c2e9b917ec1bb181fb06babe32f7f48083 | 89e3a07b0beb12e328747a0bc369d731d63b10ec | refs/heads/master | 2023-02-24T03:08:17.274863 | 2021-01-28T20:18:41 | 2021-01-28T20:18:41 | 333,869,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'teachzy.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"asifrahmananvar@gmail.com"
] | asifrahmananvar@gmail.com |
666af051b388d54ac1c5626df13b9623c27559f9 | 031689bc515f36124f717163eedc3e26104ac9c2 | /numbers/numbers.py | def438419584f744602cfceeacac6fac3a7142eb | [] | no_license | maduoma/Python | 8bbe7fb04e3e8546a4f09bdfd06888ed9289adef | 54ba02d3e5b84f9d18a0fbf95cdf4b716785b6a7 | refs/heads/master | 2023-01-25T00:43:27.170455 | 2020-12-09T17:07:17 | 2020-12-09T17:07:17 | 212,003,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | print('Adding numbers')
x = 34 + 4
print(x)
print('Substracting numbers')
j = x - 5
print(j)
print('Dividing numbers') #USE this comment to debug or detect where errors occur in your code easily
y = x / 2
print(y)
print('Multiplying numbers')
z = y * 2
print(z)
print('Exponential')
k = z ** 2 # z is raised to the power 2
print(k)
print('The strings bellow must be converted to numbers before they are used in calculations')
x = '5'
y = '6'
#Converting strings to int numbers before printing
print(int(x) + int(y))
| [
"aachilefu@yahoo.com"
] | aachilefu@yahoo.com |
df83d5a7dd4519c161af3d3ee44ec13728526be2 | df0131275eee8ca8094bf976d21c06fb15beb050 | /mysite/polls/migrations/0002_auto_20170627_0547.py | 7aec92622c08c269175c232389aad17a584f3e8d | [] | no_license | tallestross/django-blog | adecbbd589301f542a781b474e173b350079481e | d2bbd1fde3fda601d95a6190ac17fe62af998f32 | refs/heads/master | 2020-12-03T03:44:23.426402 | 2017-06-29T10:42:09 | 2017-06-29T10:42:09 | 95,767,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-27 05:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('polls', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='choice',
name='choice_text',
),
migrations.RemoveField(
model_name='choice',
name='question',
),
migrations.RemoveField(
model_name='choice',
name='votes',
),
migrations.RemoveField(
model_name='question',
name='pub_date',
),
migrations.RemoveField(
model_name='question',
name='question_text',
),
]
| [
"rossmaude@daedalus.local"
] | rossmaude@daedalus.local |
8cde3781272c47dc93995ad7a393be282fd619be | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/9/wuz.py | 25bf44e8b6d42681dc820d4df1c4e481b029d13a | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'wUZ':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
72c850969dfe5e6528309e706ffd673c82f7a44c | 5b93930ce8280b3cbc7d6b955df0bfc5504ee99c | /nodes/VanderPlas17Python/E_Chapter4/E_VisualizingErrors/index.py | c2c770e9b709e97993efbbfb79962c767157f91e | [] | no_license | nimra/module_gen | 8749c8d29beb700cac57132232861eba4eb82331 | 2e0a4452548af4fefd4cb30ab9d08d7662122cf4 | refs/heads/master | 2022-03-04T09:35:12.443651 | 2019-10-26T04:40:49 | 2019-10-26T04:40:49 | 213,980,247 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,314 | py | # Lawrence McAfee
# ~~~~~~~~ import ~~~~~~~~
from modules.node.HierNode import HierNode
from modules.node.LeafNode import LeafNode
from modules.node.Stage import Stage
from modules.node.block.CodeBlock import CodeBlock as cbk
from modules.node.block.HierBlock import HierBlock as hbk
from modules.node.block.ImageBlock import ImageBlock as ibk
from modules.node.block.ListBlock import ListBlock as lbk
from modules.node.block.MarkdownBlock import MarkdownBlock as mbk
from .A_BasicErrorbars.index import BasicErrorbars as A_BasicErrorbars
from .B_ContinuousErrors.index import ContinuousErrors as B_ContinuousErrors
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
blocks = [
# Figure 4-26. Using point properties to encode features of the Iris data
#
# We can see that this scatter plot has given us the ability to simultaneously explore
# four different dimensions of the data: the (x, y) location of each point corresponds to
# the sepal length and width, the size of the point is related to the petal width, and the
# color is related to the particular species of flower. Multicolor and multifeature scatter
# plots like this can be useful for both exploration and presentation of data.
#
# plot Versus scatter: A Note on Efficiency
# Aside from the different features available in plt.plot and plt.scatter, why might
# you choose to use one over the other? While it doesn’t matter as much for small
# amounts of data, as datasets get larger than a few thousand points, plt.plot can be
# noticeably more efficient than plt.scatter. The reason is that plt.scatter has the
# capability to render a different size and/or color for each point, so the renderer must
# do the extra work of constructing each point individually. In plt.plot, on the other
# hand, the points are always essentially clones of each other, so the work of determin‐
# ing the appearance of the points is done only once for the entire set of data. For large
# datasets, the difference between these two can lead to vastly different performance,
# and for this reason, plt.plot should be preferred over plt.scatter for large
# datasets.
#
# Visualizing Errors
# For any scientific measurement, accurate accounting for errors is nearly as important,
# if not more important, than accurate reporting of the number itself. For example,
# imagine that I am using some astrophysical observations to estimate the Hubble Con‐
# stant, the local measurement of the expansion rate of the universe. I know that the
# current literature suggests a value of around 71 (km/s)/Mpc, and I measure a value of
# 74 (km/s)/Mpc with my method. Are the values consistent? The only correct answer,
# given this information, is this: there is no way to know.
#
#
# Visualizing Errors | 237
#
# Suppose I augment this information with reported uncertainties: the current litera‐
# ture suggests a value of around 71 ± 2.5 (km/s)/Mpc, and my method has measured a
# value of 74 ± 5 (km/s)/Mpc. Now are the values consistent? That is a question that
# can be quantitatively answered.
# In visualization of data and results, showing these errors effectively can make a plot
# convey much more complete information.
#
# Basic Errorbars
# A basic errorbar can be created with a single Matplotlib function call (Figure 4-27):
# In[1]: %matplotlib inline
# import matplotlib.pyplot as plt
# plt.style.use('seaborn-whitegrid')
# import numpy as np
# In[2]: x = np.linspace(0, 10, 50)
# dy = 0.8
# y = np.sin(x) + dy * np.random.randn(50)
#
# plt.errorbar(x, y, yerr=dy, fmt='.k');
#
#
#
#
# Figure 4-27. An errorbar example
#
# Here the fmt is a format code controlling the appearance of lines and points, and has
# the same syntax as the shorthand used in plt.plot, outlined in “Simple Line Plots”
# on page 224 and “Simple Scatter Plots” on page 233.
# In addition to these basic options, the errorbar function has many options to fine-
# tune the outputs. Using these additional options you can easily customize the aesthet‐
# ics of your errorbar plot. I often find it helpful, especially in crowded plots, to make
# the errorbars lighter than the points themselves (Figure 4-28):
# In[3]: plt.errorbar(x, y, yerr=dy, fmt='o', color='black',
# ecolor='lightgray', elinewidth=3, capsize=0);
#
#
#
#
# 238 | Chapter 4: Visualization with Matplotlib
#
]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Content(LeafNode):
def __init__(self):
super().__init__(
"Visualizing Errors",
# Stage.REMOVE_EXTRANEOUS,
# Stage.ORIG_BLOCKS,
# Stage.CUSTOM_BLOCKS,
# Stage.ORIG_FIGURES,
# Stage.CUSTOM_FIGURES,
# Stage.CUSTOM_EXERCISES,
)
[self.add(a) for a in blocks]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class VisualizingErrors(HierNode):
def __init__(self):
super().__init__("Visualizing Errors")
self.add(Content())
self.add(A_BasicErrorbars())
self.add(B_ContinuousErrors())
# eof
| [
"lawrence.mcafee@gmail.com"
] | lawrence.mcafee@gmail.com |
c3eb45757b31826b76c4a5bccf76f83a9879853c | 547bcde193880e1eb4f106bba86b6f834bff67f1 | /Python training/random&statistic.py | ba3fcf5955a2661d4c93a5be7007c6beaf957cae | [] | no_license | samwang1228/python_university | e68865dcfac1013cba1a3902f5ad2b977878d907 | 699c413fc8602e9e4f2e6c79a1e9a13717c8711e | refs/heads/main | 2023-07-11T16:56:49.809966 | 2021-08-25T06:29:43 | 2021-08-25T06:29:43 | 382,614,912 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | # import random
# data = random.choice([3, 5, 7, 8, 9])#隨機選取數
# data2 = random.sample([56, 7, 8, 99, 55, 789], 3) #隨機選取n數
# print(data)
# print(data2)
# data3 = [6, 7, 8, 9, 56]
# random.shuffle(data3)#隨機對調
# print(data3)
# data4 = random.random() #隨機得到0-1的亂數
# data5 = random.uniform(60, 700) #隨機得到自訂範圍的亂數
# print(data4, '\n', data5)
# data6 = random.normalvariate(100, 10) #得到平均為100 標準差為10的亂數
# print(data6)
import statistics as s
num_list = [99,678,777]
# for i in range(3):由使用者輸入的方法
# num_list += [int(input())]
average = s.mean(num_list)#得到平均數
print(average)
med = s.median(num_list) #取得中位數
print(med)
stand = s.stdev(num_list) #取得標準差
print(stand) | [
"samwang19991228@gmail.com"
] | samwang19991228@gmail.com |
ddf18c6a060e40517c149887c561b4e03e87793a | 8079653aa4e6c4b9a0cb8e0443b7733a59f97d4c | /conduit/apps/authentication/migrations/0001_initial.py | 722bfc8c740fa3cc87fbbcdbf6f6b3775ac54741 | [] | no_license | awesomeo184/conduit-django | 0d1b7fedc1708bc45c0b94b47f31e54394922cd4 | e78a92a70716db39d084405f84864e5a837a8af9 | refs/heads/master | 2023-04-24T07:21:21.888778 | 2021-05-15T09:58:57 | 2021-05-15T09:58:57 | 365,478,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,897 | py | # Generated by Django 2.1.7 on 2021-05-08 08:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(db_index=True, max_length=255, unique=True)),
('email', models.EmailField(db_index=True, max_length=254, unique=True)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| [
"63030569+awesomeo184@users.noreply.github.com"
] | 63030569+awesomeo184@users.noreply.github.com |
244432cad8114ac63b3727be89be73ba4dde304e | 00c7e4998c4fc598a94659bb26aff134a07f233d | /book_list_processor/admin.py | 737794e456ad7d84edd3d4361901d1a67ef447b3 | [] | no_license | majajj/bibliotecos | 169ae84f3bcddd6193d39c48e82eb7e04c828326 | 9352a8dddb77078a23f5ba0967e626236812c9b2 | refs/heads/main | 2023-09-03T13:14:18.805642 | 2021-10-22T19:14:10 | 2021-10-22T19:14:10 | 419,840,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | from django.contrib import admin
# Register your models here.
from .models import Books, Languages, Authors
admin.site.register(Books)
admin.site.register(Languages)
admin.site.register(Authors) | [
"maja_jj@interia.pl"
] | maja_jj@interia.pl |
09de20572af603359e9a5141a7b3b235bf3c8d92 | 734a4a8515a60d35129199864832d074db22b490 | /GWC_Unit_2/U2L4/Project_DictionaryAttack.py | d0cedf207a19a2f6dd92baa1314cc305dcedba5a | [] | no_license | Ale-ZJ/GWC-SIP-2019 | 751bf55ff6f977e22c9e9ed9473a9c901d3c71f4 | a956ac46ac06defc3f494cf09a59f431b3ed4a5f | refs/heads/master | 2020-10-01T11:51:21.091875 | 2019-12-12T06:06:07 | 2019-12-12T06:06:07 | 227,532,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 920 | py | #Opens a file. You can now look at each line in the file individually with a statement like "for line in f:
f = open("dictionary.txt","r")
def main():
print("Can your password survive a dictionary attack?")
#Take input from the keyboard, storing in the variable test_password
password = input("Type in a trial password: ").strip().lower() #strip the whitespaces in the password
test_password(password)
def test_password(password):
Continue = True
x = 0
for line in f:
if len(line) == len(password):
for i in range(len(line)):
if line[i] == password[i]:
x += 1
if x > len(password) // 1.2:
print("Your password is weak!")
Continue = False
break
if Continue == True:
print("Wow, your password survived!")
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
708ec5e9efed78704e36ae71c7532a84aa87f723 | 8e2962ac3aeb1c584bdddbc993a2873037139030 | /play_turf_book/main.py | 4e0655d1c42db6d658aeb827dadf34404673b177 | [] | no_license | shakhawat009/terf-booking-system | ff1a1b913aade135b24af5ace8ddd7fc50d71bfe | d394d7e003fadec3d95444ee16b328c79df0f897 | refs/heads/main | 2023-04-10T01:00:27.651409 | 2021-04-17T16:54:35 | 2021-04-17T16:54:35 | 358,913,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,676 | py | from flask import Flask, render_template, request, redirect, session
import mysql.connector
import os
app = Flask(__name__)
app.secret_key = os.urandom(24)
conn = mysql.connector.connect(host="localhost", user="root", password="", database="turf")
cursor = conn.cursor()
@app.route('/')
def login():
return render_template('login.html')
@app.route('/register')
def about():
return render_template('register.html')
@app.route('/home')
def home():
if 'user_id' in session:
return render_template('home.html')
else:
return redirect('/')
@app.route('/login_validation', methods=['post'])
def login_validation():
email = request.form.get('email')
password = request.form.get('password')
cursor.execute("""SELECT * FROM `users` WHERE `email` LIKE '{}' AND `password` LIKE '{}'""".format(email, password))
users = cursor.fetchall()
if len(users) > 0:
session['user_id'] = users[0][0]
return redirect('/home')
else:
return redirect('/')
@app.route('/add_user,methods=[POST]')
def add_user():
name = request.form.get('uname')
email = request.form.get('uemail')
password = request.form.get('upassword')
cursor.execute("""INSET INTO `users`(`user_id`,`name`,`email`,`password`)VALUES(NULL,'{}','{}','{}'""".format(name,email, password))
conn.commit()
cursor.execute("""SELECT*FROM`user`WHERE`email` LIKE '{}'""".format(email))
myuser=cursor.fetchall()
session['user_id']=myuser[0][0]
return redirect('/home')
@app.route('/logout')
def logout():
session.pop('user_id')
return redirect('/')
if __name__ == "__main__":
app.run(debug=True)
| [
"anol182006@gmail.com"
] | anol182006@gmail.com |
0fbaab7562dfc9e920f442142b34da9865161986 | 7fdff3ab45f5fef05cc76f97ee44e44779f87120 | /peerloan/migrations/0018_auto_20160912_1536.py | e45c005b5ff05f11df1b1b9a437414fdb3067bda | [] | no_license | Calvin66der/project_peerloan | 4a132c7464b21e75a80f091d44c389cbd10c2cc5 | 99a02843addbfcffec5c7d7a964f0b3347a03962 | refs/heads/master | 2021-01-12T07:45:00.811952 | 2016-12-20T08:44:42 | 2016-12-20T08:44:42 | 77,006,043 | 0 | 0 | null | 2016-12-21T01:47:48 | 2016-12-21T01:47:48 | null | UTF-8 | Python | false | false | 425 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('peerloan', '0017_borrowrequest_overpay_amount'),
]
operations = [
migrations.AlterField(
model_name='loanschedule',
name='received_amount',
field=models.FloatField(default=0),
),
]
| [
"15113029g@connect.polyu.hk"
] | 15113029g@connect.polyu.hk |
74dd618d18c4dd6fd9ec1f4f37e5133181f704b5 | 07aa4ceb23e978f56c0b0d0252cb956fcbd0f406 | /stock/forms.py | 70de2cb433f70135940e48b446314e6daba8deb6 | [] | no_license | roneluis/atv_1 | 59d051ea65fb91ce3f60d9b441e61ce43501f958 | a0b0e862f51ef337bd373076a1b59ca88d224bc7 | refs/heads/master | 2020-07-31T19:13:49.988000 | 2019-10-31T22:59:15 | 2019-10-31T22:59:15 | 210,724,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | from django.urls import path
from . models import Product
class ProductForm():
class Meta:
model = Product
fields= '__all__' | [
"aluno@ifpi.edu.br"
] | aluno@ifpi.edu.br |
b9a172d7ba31bd020ab4da9502ea8144497f4b18 | d58e4c4d955700979a093013488cc154cf1416f9 | /launcher.py | bf45f5f57e9a63b6230c63f784e5f413dfdbdf98 | [] | no_license | sgh304/YouTube-to-Podcast | 8ed5f3a363eb1bb29c9994ac3405335ea29df0fb | 864e3cc89d807b4e6e9a4c4af52a17f62974b9d8 | refs/heads/master | 2021-04-09T16:17:58.409523 | 2018-03-21T01:44:27 | 2018-03-21T01:44:27 | 125,786,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | # Running this script will launch YouTube to Podcast
from gui.app import YTPApp
YTPApp().start() | [
"sgh304@nyu.edu"
] | sgh304@nyu.edu |
db61be2c3b26ca80b961f9b324f981d7de1be14a | 99361c45166c3e39bdc1e5e7ff796b60e5edc20e | /setup.py | 59352d3cc65d0277c567478e0470ebd9187c11c0 | [] | no_license | wkcn/WorldCup | 2b358b73aab5496b3f7e209dc615c97c0181abff | 1acef2d2cadf5e8cbb911b05a8ecfd98aa43920d | refs/heads/master | 2020-03-08T10:38:08.558059 | 2018-04-04T15:03:07 | 2018-04-04T15:03:07 | 128,077,995 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | # -*- coding: utf-8 -*-
from distutils.core import setup
import py2exe
import sys
sys.argv.append('py2exe')
setup(
windows=[
{"script":"run.py","icon_resources":[(1,"logo.ico"),]}],
options={
"py2exe":{"includes":["sip"],"dll_excludes":["MSVCP90.dll"],\
"bundle_files": 3,"optimize": 2,
}},
data_files=[
("image", ["./logo.ico",])]
) | [
"wkcn@live.cn"
] | wkcn@live.cn |
3037cc9f0d5675cef844ea03c08be30f015cdeb3 | fe7996f7110211e8c2df7cd7a4d81cc572204a70 | /synthetic-enumeration/sprint-12/03-collect-experimental-data-from-Lauren-assignments.py | afb9cf0e066c49396bcbc2bd77a5215fad858d7a | [
"MIT"
] | permissive | FoldingAtHome/covid-moonshot | 78c2bc7e6d00f371d626fcb0a4381cf528413eef | 814189c239f8f0189c6cc48afcbca1f96c87dd09 | refs/heads/master | 2023-02-23T04:23:00.064389 | 2023-02-19T23:18:10 | 2023-02-19T23:18:10 | 249,626,873 | 62 | 11 | MIT | 2022-03-01T20:43:56 | 2020-03-24T06:07:39 | Python | UTF-8 | Python | false | false | 5,603 | py | #!/bin/env python
"""
Collect experimental data from Lauren's reassignments via CSV file
"""
import numpy as np
import json
import math
import itertools
import datetime
from rich.progress import track
from openeye import oechem
xchem_project = 'Mpro'
creator = 'John Chodera <john.chodera@choderalab.org>'
creation_date = datetime.datetime.now()
prefix = 'sprint-12'
description = 'COVID Moonshot Sprint 12 for optimizing 5-spiro compounds'
csv_filename = 'experimental-data/Fl_agg_data_all_data_11_01_2022_11_13_20-cleaned-reassigned_isomers.csv'
#
# Now pull in all submitted designs
#
def smiles_is_racemic(suspected_smiles):
"""
Return True if compound is racemic.
Examples:
"CNC(=O)CN1Cc2ccc(Cl)cc2[C@@]2(CCN(c3cncc4c3CCCC4)C2=O)C1 |o1:14|" : compound is enantiopure, but stereochemistry is uncertain
"CNC(=O)CN1Cc2ccc(Cl)cc2[C@@]2(CCN(c3cncc4c3CCCC4)C2=O)C1" : compound is enantiopure, stereochemistry is certain
"CNC(=O)CN1Cc2ccc(Cl)cc2[C]2(CCN(c3cncc4c3CCCC4)C2=O)C1" : compound is racemic
"""
smiles = suspected_smiles.split()[0] # truncate suffix
return stereochemistry_is_uncertain(smiles)
def stereochemistry_is_uncertain(suspected_smiles):
"""
Return True if there is uncertainty in the enantiopure compound or mixture is racemic.
"""
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.EnumerateStereoisomers import EnumerateStereoisomers, StereoEnumerationOptions
rdmol = Chem.MolFromSmiles(suspected_smiles)
smi_list = []
opts = StereoEnumerationOptions(unique=True)
isomers = tuple(EnumerateStereoisomers(rdmol, options=opts))
for smi in sorted(Chem.MolToSmiles(isomer, isomericSmiles=True) for isomer in isomers):
smi_list.append(smi)
if len(smi_list) > 1:
return True
else:
return False
# Read all submitted designs
print('Reading CSV export...')
compounds_with_experimental_data = list()
# Drop columns that cause trouble for OpenEye
import pandas as pd
df = pd.read_csv(csv_filename, dtype=str)
# Drop columns
#drop_columns = []
#df.drop(columns=drop_columns, inplace=True)
# Replace suspected_SMILES with SMILES
#df['suspected_SMILES'].fillna(df['SMILES'], inplace=True)
# Exchange columns so suspected_SMILES is first
#title_column_index = df.columns.get_loc("Canonical PostEra ID")
#smiles_column_index = df.columns.get_loc("suspected_SMILES")
#cols = df.columns.tolist()
#cols = cols[smiles_column_index:(smiles_column_index+1)] + cols[title_column_index:(title_column_index+1)] + cols[:]
#df = df[cols]
# Replace < and > with limits
#df.applymap(lambda x: str(x))
#df.applymap(lambda x: 0.050 if "<" in str(x) else x)
#df.applymap(lambda x: 99.0 if ">" in str(x) else x)
# Eliminate stuff after spaces
#df = df.applymap(lambda x: str(x).split()[0])
ncompounds_dropped_due_to_uncertain_stereochemistry = 0
ncompounds_racemic = 0
# Iterate over molecules
# Fields: compound_name,compound_structure,measurement,qualifier,reassigned_structure
# Format: PostEra ID,SMILES,pIC50,comparator,reassigned_structure
delta_pIC50 = 0.2 # 95% CI is this many units in either direction
from fah_xchem.schema import ExperimentalCompoundData, ExperimentalCompoundDataUpdate
for index, row in df.iterrows():
row = row.to_dict()
suspected_smiles = row['compound_structure']
compound_id = row['compound_name']
is_racemic = smiles_is_racemic(suspected_smiles)
# Skip inequalities
if row['qualifier'] != '=':
continue
pIC50 = float(row['measurement'])
pIC50_lower = pIC50 - delta_pIC50
pIC50_upper = pIC50 + delta_pIC50
# Canonicalize with OpenEye SMILES
suspected_smiles = suspected_smiles.split()[0] # truncate stuff after whitespace
oemol = oechem.OEGraphMol()
oechem.OESmilesToMol(oemol, suspected_smiles)
suspected_smiles = oechem.OEMolToSmiles(oemol)
experimental_data = dict()
experimental_data['pIC50'] = pIC50
experimental_data['pIC50_lower'] = pIC50_lower
experimental_data['pIC50_upper'] = pIC50_upper
if is_racemic:
ncompounds_racemic += 1
# Store compound experimental data
experimental_compound_data = ExperimentalCompoundData(
compound_id=compound_id,
smiles=suspected_smiles,
is_racemic=is_racemic,
experimental_data=experimental_data,
)
compounds_with_experimental_data.append(experimental_compound_data)
print(f'{len(compounds_with_experimental_data)} measurements read and retained')
print(f'{ncompounds_dropped_due_to_uncertain_stereochemistry} enantiopure compounds with uncertain stereochemistry dropped.')
print(f'{ncompounds_racemic} compounds assayed as racemates')
dataset = ExperimentalCompoundDataUpdate(compounds=compounds_with_experimental_data)
print(f'There are {len(compounds_with_experimental_data)} compounds in this sprint with in-range IC50 measurements')
# Write JSON
def write_json(compound_series, json_filename):
print(f'Writing JSON to {json_filename}')
if '.bz2' in json_filename:
import bz2
with bz2.open(json_filename, "wt") as f:
f.write(compound_series.json())
elif '.gz' in json_filename:
import gzip
with gzip.open(json_filename, "wt") as f:
f.write(compound_series.json())
else:
with open(json_filename, "wt") as f:
f.write(compound_series.json())
import os
os.makedirs('json', exist_ok=True)
print(f'Generating experimental data JSON for {prefix}...')
json_filename = f'json/{prefix}-experimental-data.json' # output filename
write_json(dataset, json_filename)
| [
"john.chodera@choderalab.org"
] | john.chodera@choderalab.org |
dafdc684d69ccb7e4b1bf6da6b8f289643e6dcdc | 056e68b3d1f69a1fe39ed309e02021510fb42c86 | /flaskblog/models.py | 891616024bf296701924735920ffa2c5e53e3ba1 | [] | no_license | thecatfather/ccusingflask | 478e9b8ead59e2337a17a5b792298478f109546f | f39cd7d280bb16a34bb9ca2ceb046ac959033221 | refs/heads/master | 2020-04-20T16:57:32.481600 | 2019-02-13T08:19:02 | 2019-02-13T08:19:02 | 168,974,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,148 | py | from datetime import datetime
from flaskblog import db, login_manager
from flask_login import UserMixin
from flaskblog import ma
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=True)
email = db.Column(db.String(120), unique=False, nullable=True)
#image_file = db.Column(db.String(20), nullable=False, default='default.jpg')
password = db.Column(db.String(60), nullable=True)
posts = db.relationship('Post', backref='author', lazy=True)
#def __repr__(self):
# return f"User('{self.username}', '{self.email}', '{self.image_file}')"
def __init__(self, username, email, password):
self.username = username
self.email = email
self.password = password
class UserSchema(ma.Schema):
class Meta:
fields = ('id', 'username', 'email', 'password')
class Category(db.Model):
#id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(120), unique=True, nullable=False,primary_key=True)
num_acts = db.Column(db.Integer, nullable=True)
posts = db.relationship('Post', backref='cat', lazy=True)
def __init__(self, name):
self.name = name
class CategorySchema(ma.Schema):
class Meta:
fields = ('name',)
#def __repr__(self):
# return f"Category('{self.name}', '{self.num_acts}')"
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
content = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
cat_name = db.Column(db.Integer, db.ForeignKey('category.name'), nullable=False)
image_file = db.Column(db.String(20), nullable=False, default='default.jpg')
upvotes = db.Column(db.Integer)
def __repr__(self):
return f"Post('{self.title}', '{self.date_posted}','{self.content}','{self.image_file}')"
| [
"rbongole@gmail.com"
] | rbongole@gmail.com |
eca69742d6ec30ac047d2b79b46fa7b0ddc3cf56 | 237cc38de0cf7a6e3661ed552ae771bd972d7438 | /base/obj2_demo.py | ce08920ba539aeb6829dc7a411f369bec63a4e60 | [] | no_license | chydream/python | af5ad8a98c78de71e255f7b776f936c4b89c616e | e5bfef53a7770d4f323bd2877f93c8166c563695 | refs/heads/master | 2020-05-07T17:00:33.558178 | 2020-05-05T13:45:19 | 2020-05-05T13:45:19 | 180,708,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,949 | py | class Point(object):
# 自定义Point类的构造(初始化)方法
def __init__(self, x, y):
self.x = x
self.y = y
# 自定义Point类对象的格式化输出函数(string())
def string(self):
print(print("{{X:{0},Y:{1}}}".format(self.x, self.y)))
class Circle(Point):
# 自定义Circle类的构造(初始化)方法
def __init__(self, x, y, radius):
Point.__init__(self, x, y)
# super().__init__(x, y)
self.radius = radius
# 自定义Circle类对象的格式化输出函数(string())
def string(self):
print("该图形初始化点为:{{X:{0},Y:{1}}};{{半径为:{2}}}".format(self.x, self.y, self.radius))
class Size(object):
# 自定义Size类的构造(初始化)方法
def __init__(self, width, height):
self.width = width
self.height = height
# 自定义Size类对象的格式化输出函数(string())
def string(self):
print("{{Width:{0},Height:{1}}}".format(self.width, self.height))
class Rectangle(Point, Size):
# 自定义Rectangle类的构造(初始化)方法,并在方法中调用父类的初始化方法以完成初始化
def __init__(self, x, y, width, height):
Point.__init__(self, x, y)
Size.__init__(self, width, height)
# 自定义Rectangle类对象的格式化输出函数(string())
def string(self):
print("该图形初始化点为:{{X:{0},Y:{1}}};长宽分别为:{{Width:{2}, Height:{3}}}".format(self.x, self.y, self.width, self.height))
if __name__ == "__main__":
# 实例化Circle对象,圆心为(5,5),半径为8
c = Circle(5, 5, 8)
c.string()
# 实例化Rectangle对象,顶点位置(15,15),长和宽分别为15和15
r1 = Rectangle(15, 15, 15, 15)
r1.string()
# 实例化Rectangle对象,顶点位置(40,30),长和宽分别为11和14
r2 = Rectangle(40, 30, 11, 14)
r2.string() | [
"yong.chen@doone.com.cn"
] | yong.chen@doone.com.cn |
c35ab579b433069684ee0bf476370e083fcfb453 | aeb80fbbdf88c34db80622f5217a4d8063ac9e61 | /d05/d05.py | 085ba6f0d0884ff06260de3f699f88cddc9bc2c3 | [
"MIT"
] | permissive | panaC/aoc2020 | f8b0344832ec1c6ac3f5479a039824226d5ea511 | f56557dace7ffb127dd5cb65aaaa9545be3aa7fb | refs/heads/main | 2023-02-25T01:00:23.667314 | 2021-01-28T11:21:46 | 2021-01-28T11:21:46 | 317,681,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | #! /usr/local/bin/python3
import sys
hight = 0
for line in sys.stdin:
code = line[:-1]
row = code[:-3].replace('F', '0').replace('B', '1')
col = code[len(code)-3:].replace('L', '0').replace('R', '1')
rowNb = int(row, 2)
colNb = int(col, 2)
result = rowNb * 8 + colNb
hight = result if result > hight else hight
print(hight)
| [
"pierre@leroux.tech"
] | pierre@leroux.tech |
61b29a161fb3a5b414f6c50b219b64635692cc93 | 5d8d03c6a76d3135b197bf384196da1e09cc05ed | /pca_refit.py | 48bcbc4f05b5290fe89ef0c5e5f865956d75375c | [] | no_license | Rizo-R/halo-emulator | c68da732bf9f6a00850268c129141062f8ec7d03 | 50c383008a038859ac1e7877791ba0aca2ad055b | refs/heads/master | 2022-12-17T10:24:00.556646 | 2020-09-09T04:57:12 | 2020-09-09T04:57:12 | 294,006,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,470 | py | import glob
# import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import scipy.integrate as integrate
import scipy.linalg
import scipy.optimize as optimize
import scipy.stats as stats
import sys
import time
from emulator import *
from HMF_piecewise import *
from likelihood import *
from pca import *
def sort_data(X, Y):
''' Sorts input by mu_n, in ascending order.'''
# Concatenate X and Y to make sorting easier.
M = np.concatenate((X, Y), axis=1)
M_new = M[M[:, 0].argsort()]
# Sorting by mu_n messed up the order within each cosmology -
# mass is no longer sorted in ascending order. This is fixed in the
# for-loop below.
for i in range(M_new.shape[0]//20):
idx_lo = 20*i
idx_hi = 20*(i+1)
chunk = M_new[idx_lo:idx_hi]
M_new[idx_lo:idx_hi] = chunk[chunk[:, 4].argsort()]
X_new = M_new[:, :5]
Y_new = np.expand_dims(M_new[:, 5], axis=1)
return X_new, Y_new
a = HaloEmulator()
b = RedshiftTester(M_low=12, M_high=16)
print("Input shape: ", b.X.shape)
redshift = 0.
redshift_ind = np.where(b.X[:, 3] == redshift)
X = b.X[redshift_ind]
Y = b.Y[redshift_ind]
X, Y = sort_data(X, Y)
filelist = glob.glob('./hmf_params/*.npy')
filelist.sort()
filelist
# Params is a numpy array of shape (27, 23).
params = np.ones((len(filelist), 23))
for i in range(params.shape[0]):
params[i] = np.load(filelist[i])
ind_h, ind_v = np.where(params[:, 1:] > 0)
params[ind_h, ind_v+1] = 0
Mpiv = 1e14
Y_fit = np.ones((1001, params.shape[0]))
for i in range(params.shape[0]):
Y_fit[:, i], _ = get_HMF_piecewise(
params[i][1:], reg_bins=19, offset=0, Mpiv=1e14)
# 1. Plot all HMF fits.
HMF_mean = np.average(Y_fit, axis=1).reshape(-1, 1)
# for i in range(Y_fit.shape[1]):
# input_HMFs, = plt.semilogx(np.logspace(
# 13, 15.5, 1001), Y_fit[:, i], color='dimgrey')
# blue_line, = plt.semilogx(np.logspace(
# 13, 15.5, 1001), HMF_mean, color='blue')
# input_HMFs.set_label("Input HMFs $z = 0$")
# blue_line.set_label("Mean HMF")
# plt.legend(handles=[input_HMFs, blue_line], loc='lower left')
# plt.xlabel("Mass [$M_\odot / h$]")
# plt.ylabel("ln(HMF)")
# plt.show()
# 2. Plot the residuals around the mean HMF.
Y_res = Y_fit - np.broadcast_to(HMF_mean, (HMF_mean.size, len(filelist)))
# for i in range(Y_res.shape[1]):
# residual_lines, = plt.plot(np.logspace(
# 13, 15.5, 1001), Y_res[:, i], color='dimgrey')
# residual_lines.set_label("Residual around mean")
# plt.legend(handles=[residual_lines], loc='upper left', prop={'size': 13})
# plt.xlabel("Mass [$M_\odot / h$]")
# plt.ylabel("ln(HMF)")
# plt.show()
# 3. Conduct PCA and the first four plot eigenvectors.
pca = Pca.calculate(Y_res)
# pca.basis_vectors[:, :4]
# handles = []
# for i in range(4):
# globals()["evector" + str(i+1)], = plt.semilogx(np.logspace(13,
# 15, 1001), pca.basis_vectors[:, i])
# globals()["evector" + str(i+1)].set_label("EV #" + str(i+1))
# handles.append(globals()["evector" + str(i+1)])
# plt.legend(handles=handles, loc='upper left')
# plt.title("The first four eigenvectors (over $99.9995 \%$ explained variance)")
# plt.xlabel("Mass [$M_\odot / h$]")
# plt.ylabel("ln(HMF)")
# plt.show()
var_sum = 0
for i in range(4):
var_sum += pca.explained_variance[i]
explained_percentage = 100 * var_sum / np.sum(pca.explained_variance)
print("The first %d basis vectors explain %f %% of the variance." %
(i+1, explained_percentage))
def fit_weights(weights, evectors):
'''Returns HMF that is based on the given N weights and the first N
given eigenvectors. Returns a Numpy array.
Prerequisites: len(evectors) >= len(weights).'''
res = np.zeros((1001,))
for i in range(len(weights)):
res += weights[i] * evectors[:, i]
return res
def integrate_dn_dlnM(HMF, n):
'''Integrates the given HMF for a given cosmology. The cosmology is
provided by the number in the (sorted) list of cosmologies, starting at
[0., 0.3, 2.1].'''
# 20 bins.
M_logspace = np.logspace(13, 15.5, 1001)
vals = np.zeros((20, 1))
for i in range(20):
ind_lo = 50*i
ind_hi = 50*(i+1)+1
x = M_logspace[ind_lo:ind_hi]
dn_dlnM = np.exp(HMF_mean.flatten()
[ind_lo:ind_hi] + HMF[ind_lo:ind_hi])
y = 1e9 * dn_dlnM / x
vals[i] = integrate.trapz(y, x)
# plt.loglog(np.logspace(13.0625, 15.4375, 20), vals)
# plt.loglog(np.logspace(13.0625, 15.4375, 20), Y[20*n:20*(n+1)])
# plt.show()
return vals
def load_cov_matrices(path):
pic_in = open(path, "rb")
cov_matrices = pickle.load(pic_in, encoding="latin1")
return cov_matrices
def remove_zeros(C):
idx = np.argwhere(np.all(C == 0, axis=0) | np.all(C == 0, axis=1))
for i in idx[::-1]:
C = np.delete(C, i, axis=0)
C = np.delete(C, i, axis=1)
print("Covariance matrix shape: ", C.shape)
return C, idx
def flip_coin(r):
u = np.random.uniform(0, 1)
if r > 1 or r > u:
return True
else:
return False
def log_P(weights, pca, n):
HMF = fit_weights(weights, pca.u)
N = integrate_dn_dlnM
y = kwargs['y']
n = kwargs['n']
lp = log_prior(weights)
if not np.isfinite(lp):
return -np.inf
return lp + log_likelihood_mcmc(weights, y, n)
def mcmc_step(N_hops, pca, std_dev=0.5, n=0):
'''My attempt to run MCMC using the Metropolis-Hastings algorithm. Returns
the last 1000 states in the chain.'''
# Load the covariance matrix.
filelist_mat = glob.glob("./covmat/covmat_M200c_*.pkl")
filelist_mat.sort()
filelist_mat
cov_matrices = load_cov_matrices(filelist_mat[n])[-1]
states = np.zeros((N_hops, 4))
acc = 0
tot = 0
cur = []
C, idx = remove_zeros(cov_matrices)
# Create a random initial state.
for i in range(4):
cur.append(np.random.uniform(0, 400))
for i in range(N_hops):
tot += 1
states[i] = cur
# Initialize the next state, based on the current state.
next = []
for j in range(4):
next.append(np.random.normal(cur[j], std_dev))
# if i % 100 == 0:
# print(next)
# Calculate and compare the two likelihood function values for the
# current and the next states.
HMF_cur = fit_weights(cur, pca.u[:, :4])
HMF_next = fit_weights(next, pca.u[:, :4])
N_cur = integrate_dn_dlnM(HMF_cur, 0)
N_next = integrate_dn_dlnM(HMF_next, 0)
log_likelihood_cur = -likelihood_sim(N_cur, Y[20*n:20*(n+1)], C, idx)
log_likelihood_next = -likelihood_sim(N_next, Y[20*n:20*(n+1)], C, idx)
if i % 100 == 0:
print(log_likelihood_cur)
print(log_likelihood_next)
# The (log) ratio of the likelihoods determines the probability of
# switching to the new state.
lnR = log_likelihood_next - log_likelihood_cur
if flip_coin(np.exp(lnR)):
cur = next
acc += 1
print("Acceptance rate: %f" % (acc/tot))
return states[-1000:]
tic = time.perf_counter()
states = mcmc_step(N_hops=100000, pca=pca, std_dev=0.5, n=0)
toc = time.perf_counter()
print(states[-100:])
print("Time: %f seconds." % (toc-tic))
# -likelihood_sim(integrate_dn_dlnM(fit_weights(states[-1], pca.u[:, :4]), 0), Y[20*n:20*(n+1)], C, idx)
| [
"rr555@cornell.edu"
] | rr555@cornell.edu |
bcecf19a76654b9735e8f6b836de6dcc2ee25033 | 1496c7886cf599cc2168aba4948b4928018c3639 | /0x0A-python-inheritance/100-my_int.py | 9d35dc64e881227b3ea25e363b7895b572123aac | [] | no_license | RhysMurage/alx-higher_level_programming | 5365e3617bdba54d8093de7f0869f89bf89ccebd | 08c50efb3602ce7d4ff1782351fac8103fd38e12 | refs/heads/main | 2023-03-16T02:01:40.105333 | 2022-09-24T13:51:40 | 2022-09-24T13:51:40 | 497,946,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | #!/usr/bin/python3
"""Module: 100-my_int.
Defines a class that inherits from int.
"""
class MyInt(int):
"""MyInt inheriting from int
and alters the behaviour of __eq__ and
__ne__ methods
"""
def __eq__(self, other):
"""Equal to returns not equal to."""
return super().__ne__(other)
def __ne__(self, other):
"""Not equal to returns equal to."""
return super().__eq__(other)
| [
"rhysmwangi96@gmail.com"
] | rhysmwangi96@gmail.com |
807e44571462068ec800b4c50f9971c9cb9ca44e | 99921fdadad851381cb3181389bbaec3afa216f4 | /src/process_data.py | 4f1fc60c9d4c951b2a896e3e2f2870f96dafe789 | [] | no_license | 0ncorhynchus/brcn | 3352e6cf7da502baba86fe8393687d6d05053e53 | c58379dd2e1d4863fc2ecc9c8817cae491f54012 | refs/heads/master | 2020-04-08T14:23:39.829433 | 2018-12-07T00:07:22 | 2018-12-07T00:07:22 | 159,435,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,708 | py | import cv2
import numpy as np
import os
from yuv import *
import tensorflow as tf
from example import encode
video_size = (176, 144)
spatial_size, spatial_stride = (44, 14)
temporal_size, temporal_stride = (22, 8)
blur_size = 2
scale = 2
def generate_low_resolution(image):
(height, width) = image.shape
blur = cv2.GaussianBlur(image, (5, 5), blur_size)
small = cv2.resize(blur, (width//scale, height//scale), interpolation = cv2.INTER_CUBIC)
return cv2.resize(small, (width, height), interpolation = cv2.INTER_CUBIC)
def extract(movie, n, i, j):
return movie[n:n+temporal_size, i:i+spatial_size, j:j+spatial_size]
def main(filename):
reader = YV12Reader(video_size, filename)
print('Loading frames from "{}".'.format(filename))
high_reso_frames = reader.read_raw_frames()['Y']
low_reso_frames = np.array(list(map(generate_low_resolution, high_reso_frames)))
num_frames = len(high_reso_frames)
basename = os.path.basename(filename)
outfile = '{}.tfrecord'.format(os.path.splitext(basename)[0])
print('Writing training data to "{}".'.format(outfile))
(height, width) = video_size
with tf.python_io.TFRecordWriter(outfile) as writer:
for seq in range(0, num_frames-temporal_size+1, temporal_stride):
for i in range(0, width-spatial_size+1, spatial_stride):
for j in range(0, height-spatial_size+1, spatial_stride):
hr_sample = extract(high_reso_frames, seq, i, j)
lr_sample = extract(low_reso_frames, seq, i, j)
writer.write(encode(lr_sample, hr_sample).SerializeToString());
if __name__ == '__main__':
import sys
main(sys.argv[1])
| [
"salmon.bioinformatics@gmail.com"
] | salmon.bioinformatics@gmail.com |
9933283ab2b775642801fc8a449775d294696780 | d94b4a5bc2102c44fc94febdd20f9f4f68539f54 | /Assignment-7/PoC/Decrypt-Exec-Hello.py | fa6a02e0f4862a0eb67e89100d72e4b458acbcd0 | [] | no_license | andrea-perfetti/SLAE32-Exam | 6bf997f1a9b7d6a6eeea553f908d2e866549251f | 01062ec7ae8b31bf1f27368ab42592bae60c1517 | refs/heads/main | 2023-03-22T10:12:57.258489 | 2021-03-15T21:58:53 | 2021-03-15T21:58:53 | 344,601,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py | ########################################
# Filename: Encoder.py
# Author : Andrea Perfetti
# SLAE ID : SLAE - 1547
#:::::::::::::::::::::::::::::::::::::::
# USAGE
# Add your shellcode in the 'shellcode' variable following the example
# then run the script to get the encoded version.
# Copy it into Decrypt-Exec.py and follow related instructions.
########################################
import string
import base64
from cryptography.fernet import Fernet
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from ctypes import *
# Update here the shellcode to be decrypted
encrypted_payload = b"K3J4Q8JHHPJIFS0VgAAAAABgS8XMbASpd5TeZEn8lhdF8eqC1DtTWJHJqDSgPUipFTN8H0yil4Csly27wyPsLADNI-KWLOlvL5U1NjoNM9muzpHmrOUsJxwKebHf9xsqcH__cAhYUZqhn1L6xY3ZuVZ4dd-7"
salt = encrypted_payload[:16]
cipher_text = encrypted_payload[16:]
plain_pwd = input("Enter the password: ")
kdf = PBKDF2HMAC(
algorithm = hashes.SHA256(),
length = 32,
salt = salt,
iterations = 1000,
)
key = base64.urlsafe_b64encode(kdf.derive(plain_pwd.encode()))
cipher_suite = Fernet(key)
shellcode_data = cipher_suite.decrypt(cipher_text)
shellcode=create_string_buffer(shellcode_data)
invoke_shellcode = cast(shellcode, CFUNCTYPE(None))
libc = CDLL('libc.so.6')
pagesize = libc.getpagesize()
address = cast(invoke_shellcode, c_void_p).value
address_page = (address // pagesize) * pagesize
for page_start in range(address_page, address+len(shellcode_data), pagesize):
assert libc.mprotect(page_start, pagesize, 0x7) == 0
invoke_shellcode() | [
"dont-delete-me.ko@protonmail.com"
] | dont-delete-me.ko@protonmail.com |
328b84e77f3572ea1cbccf24d81a1b205bb7d20e | e27124070b4603400111e6904b5ccf89a02fc3b6 | /manage.py | b16005ea12184873baf0c6cf5c8e53d76270ec9c | [] | no_license | Kamigami55/eason-web | 1e4ee7a24add696a16c92069caa94419ab3a0d64 | fcbea248b4a661a0be07d6652edb6226a96ecc05 | refs/heads/master | 2021-04-27T06:43:29.080862 | 2018-02-26T19:57:47 | 2018-02-26T19:57:47 | 122,618,565 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "easonweb.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"samkami@icloud.com"
] | samkami@icloud.com |
de7cace9fcdd435663d1c5929d8486dd6f76c4f8 | 1cfc389ea896917b9e37942807ff28775b254cbb | /Mask_RCNN/mrcnn/config.py | d8d72ae842d249e25b29aea6623bd90518d1f5e4 | [
"MIT"
] | permissive | lykhahaha/Lyft_Perception_challenge | c047fd3dc6dc609e9abe2637c24da24d7976677c | e34e32beb2195d80bd27a6990cecfb030f65c686 | refs/heads/master | 2020-03-19T08:02:12.908056 | 2018-06-03T17:49:56 | 2018-06-03T17:49:56 | 136,170,019 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,402 | py | """
Mask R-CNN
Base Configurations class.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import math
import numpy as np
# Base Configuration Class
# Don't use this class directly. Instead, sub-class it and override
# the configurations you need to change.
class Config(object):
"""Base configuration class. For custom configurations, create a
sub-class that inherits from this one and override properties
that need to be changed.
"""
# Name the configurations. For example, 'COCO', 'Experiment 3', ...etc.
# Useful if your code needs to do things differently depending on which
# experiment is running.
NAME = None # Override in sub-classes
# NUMBER OF GPUs to use. For CPU training, use 1
GPU_COUNT = 1
# Number of images to train with on each GPU. A 12GB GPU can typically
# handle 2 images of 1024x1024px.
# Adjust based on your GPU memory and image sizes. Use the highest
# number that your GPU can handle for best performance.
IMAGES_PER_GPU = 2
# Number of training steps per epoch
# This doesn't need to match the size of the training set. Tensorboard
# updates are saved at the end of each epoch, so setting this to a
# smaller number means getting more frequent TensorBoard updates.
# Validation stats are also calculated at each epoch end and they
# might take a while, so don't set this too small to avoid spending
# a lot of time on validation stats.
STEPS_PER_EPOCH = 1000
# Number of validation steps to run at the end of every training epoch.
# A bigger number improves accuracy of validation stats, but slows
# down the training.
VALIDATION_STEPS = 50
# Backbone network architecture
# Supported values are: resnet50, resnet101
# BACKBONE = "resnet101"
BACKBONE = "resnet50"
# The strides of each layer of the FPN Pyramid. These values
# are based on a Resnet101 backbone.
BACKBONE_STRIDES = [4, 8, 16, 32, 64]
# Number of classification classes (including background)
NUM_CLASSES = 1 # Override in sub-classes
# Length of square anchor side in pixels
RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512)
# Ratios of anchors at each cell (width/height)
# A value of 1 represents a square anchor, and 0.5 is a wide anchor
RPN_ANCHOR_RATIOS = [0.5, 1, 2]
# Anchor stride
# If 1 then anchors are created for each cell in the backbone feature map.
# If 2, then anchors are created for every other cell, and so on.
RPN_ANCHOR_STRIDE = 1
# Non-max suppression threshold to filter RPN proposals.
# You can increase this during training to generate more propsals.
RPN_NMS_THRESHOLD = 0.7
# How many anchors per image to use for RPN training
RPN_TRAIN_ANCHORS_PER_IMAGE = 256
# ROIs kept after non-maximum supression (training and inference)
POST_NMS_ROIS_TRAINING = 2000
POST_NMS_ROIS_INFERENCE = 1000
# If enabled, resizes instance masks to a smaller size to reduce
# memory load. Recommended when using high-resolution images.
USE_MINI_MASK = True
MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask
# Input image resizing
# Generally, use the "square" resizing mode for training and inferencing
# and it should work well in most cases. In this mode, images are scaled
# up such that the small side is = IMAGE_MIN_DIM, but ensuring that the
# scaling doesn't make the long side > IMAGE_MAX_DIM. Then the image is
# padded with zeros to make it a square so multiple images can be put
# in one batch.
# Available resizing modes:
# none: No resizing or padding. Return the image unchanged.
# square: Resize and pad with zeros to get a square image
# of size [max_dim, max_dim].
# pad64: Pads width and height with zeros to make them multiples of 64.
# If IMAGE_MIN_DIM or IMAGE_MIN_SCALE are not None, then it scales
# up before padding. IMAGE_MAX_DIM is ignored in this mode.
# The multiple of 64 is needed to ensure smooth scaling of feature
# maps up and down the 6 levels of the FPN pyramid (2**6=64).
# crop: Picks random crops from the image. First, scales the image based
# on IMAGE_MIN_DIM and IMAGE_MIN_SCALE, then picks a random crop of
# size IMAGE_MIN_DIM x IMAGE_MIN_DIM. Can be used in training only.
# IMAGE_MAX_DIM is not used in this mode.
IMAGE_RESIZE_MODE = "square"
IMAGE_MIN_DIM = 800
IMAGE_MAX_DIM = 1024
# Minimum scaling ratio. Checked after MIN_IMAGE_DIM and can force further
# up scaling. For example, if set to 2 then images are scaled up to double
# the width and height, or more, even if MIN_IMAGE_DIM doesn't require it.
# Howver, in 'square' mode, it can be overruled by IMAGE_MAX_DIM.
IMAGE_MIN_SCALE = 0
# Image mean (RGB)
MEAN_PIXEL = np.array([123.7, 116.8, 103.9])
# Number of ROIs per image to feed to classifier/mask heads
# The Mask RCNN paper uses 512 but often the RPN doesn't generate
# enough positive proposals to fill this and keep a positive:negative
# ratio of 1:3. You can increase the number of proposals by adjusting
# the RPN NMS threshold.
TRAIN_ROIS_PER_IMAGE = 200
# Percent of positive ROIs used to train classifier/mask heads
ROI_POSITIVE_RATIO = 0.33
# Pooled ROIs
POOL_SIZE = 7
MASK_POOL_SIZE = 14
# Shape of output mask
# To change this you also need to change the neural network mask branch
MASK_SHAPE = [28, 28]
# Maximum number of ground truth instances to use in one image
MAX_GT_INSTANCES = 100
# Bounding box refinement standard deviation for RPN and final detections.
RPN_BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
# Max number of final detections
DETECTION_MAX_INSTANCES = 100
# Minimum probability value to accept a detected instance
# ROIs below this threshold are skipped
DETECTION_MIN_CONFIDENCE = 0.7
# Non-maximum suppression threshold for detection
DETECTION_NMS_THRESHOLD = 0.3
# Learning rate and momentum
# The Mask RCNN paper uses lr=0.02, but on TensorFlow it causes
# weights to explode. Likely due to differences in optimzer
# implementation.
LEARNING_RATE = 0.001
LEARNING_MOMENTUM = 0.9
# Weight decay regularization
WEIGHT_DECAY = 0.0001
# Loss weights for more precise optimization.
# Can be used for R-CNN training setup.
LOSS_WEIGHTS = {
"rpn_class_loss": 1.,
"rpn_bbox_loss": 1.,
"mrcnn_class_loss": 1.,
"mrcnn_bbox_loss": 1.,
"mrcnn_mask_loss": 1.
}
# Use RPN ROIs or externally generated ROIs for training
# Keep this True for most situations. Set to False if you want to train
# the head branches on ROI generated by code rather than the ROIs from
# the RPN. For example, to debug the classifier head without having to
# train the RPN.
USE_RPN_ROIS = True
# Train or freeze batch normalization layers
# None: Train BN layers. This is the normal mode
# False: Freeze BN layers. Good when using a small batch size
# True: (don't use). Set layer in training mode even when inferencing
TRAIN_BN = False # Defaulting to False since batch size is often small
# Gradient norm clipping
GRADIENT_CLIP_NORM = 5.0
def __init__(self):
"""Set values of computed attributes."""
# Effective batch size
self.BATCH_SIZE = self.IMAGES_PER_GPU * self.GPU_COUNT
# Input image size
if self.IMAGE_RESIZE_MODE == "crop":
self.IMAGE_SHAPE = np.array([self.IMAGE_MIN_DIM, self.IMAGE_MIN_DIM, 3])
else:
self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM, 3])
# Image meta data length
# See compose_image_meta() for details
self.IMAGE_META_SIZE = 1 + 3 + 3 + 4 + 1 + self.NUM_CLASSES
def display(self):
"""Display Configuration values."""
print("\nConfigurations:")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)):
print("{:30} {}".format(a, getattr(self, a)))
print("\n")
| [
"ameya555@gmail.com"
] | ameya555@gmail.com |
a4c898b3529cb20f808c845d565a55d25d400d66 | 20378aeeab98be5d6076c055127c4af90c96f5fe | /catastrophe_clock/serializers.py | 258ae131af9830a23b5de346a9677a1957181828 | [
"MIT"
] | permissive | MCGallaspy/catastrophe-clock | 03fb9c3a6c33f92c67de012eaced355cbc20f8d7 | c7cd7def54c76fa7893b502433b211edbb9b97aa | refs/heads/master | 2021-01-10T17:00:56.190533 | 2015-12-28T05:38:27 | 2015-12-28T05:38:27 | 48,357,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | from django.contrib.auth import get_user_model
from rest_framework import serializers
from .models import Catastrophe
class CatastropheSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Catastrophe
fields = ('name', 'description', 'arrival_date', 'url', 'more_info', )
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = get_user_model()
fields = ('username', 'is_staff', )
| [
"gallaspy.michael@gmail.com"
] | gallaspy.michael@gmail.com |
2ae8c6ff55e061d11b944f6c13ce94d8714d9c5e | a815e987868355a80a7117a1a326132863d2e4ec | /dingdian/spiders/dingdian.py | d361abb54e2230581914c2adf1523266a8b6f83f | [] | no_license | CvnYv/Scrapy-dingdian | aec42eac2b0fc8ca84391be3e265b41ee06a8f77 | f99111b849f465f9a2d8f102619c7ac4d6ac2ac3 | refs/heads/master | 2021-01-20T08:07:25.066300 | 2017-05-04T04:11:21 | 2017-05-04T04:11:21 | 90,099,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,695 | py | #! usr/bin/env python3
# -*- coding:utf-8 -*-
import scrapy
import re
from dingdian.items import DingdianItem
from scrapy.http import Request
from bs4 import BeautifulSoup
class DingdianSpider(scrapy.Spider):
name = 'dingdian' # 项目名称,必须是唯一的
allowed_domains = ['23us.com'] # 域名
start_urls = [] # 构建各个类型首页url的列表
for i in range(1, 11):
url = 'http://www.23us.com/class/%d_1.html' % i
start_urls.append(url)
def parse(self, response):
'''
解析每一个类型的首页url并返回这个类型的所有页面url
:param response:
:return:
'''
pattern = '>1/(\d+)<'
html = response.text
max_num = re.findall(pattern, html)[0] # 构建re获取各个类型的最大页面数
prefix_url = str(response.url)[0:28]
for num in range(1, int(max_num)+1):
url = prefix_url + str(num) + '.html' # 构建每一页的完整url
yield Request(url, callback=self.get_url)
# 将页面的response交给get_url()函数处理
def get_url(self, response):
'''
根据每个页面的url找到这个页面中所有书籍的简介url
:param response:
:return:
'''
# pattern1 = 'title="(.*?)简介"' # name的正则表达式(偷懒用re)
pattern2 = 'a href="(.*?)" title=' # 构造简介的url的正则表达式
html = response.text
# names = re.findall(pattern1, html)
urls = re.findall(pattern2, html)
for u in urls:
yield Request(u, callback=self.get_all) # 将简介的url交给get_all处理
def get_all(self, response):
'''
处理页面,匹配各项内容并返回item字典
:param response:
:return:
'''
item = DingdianItem()
html = response.text
name = BeautifulSoup(html, 'lxml').find('h1').get_text().split()[0]
novelurl = BeautifulSoup(html, 'lxml').find('a', class_='read')['href']
bs_table = BeautifulSoup(html, 'lxml').find('table')
author = bs_table.find_all('td')[1].get_text().split()[0]
status = bs_table.find_all('td')[2].get_text().split()[0]
number = bs_table.find_all('td')[4].get_text().split()[0][:-1]
category = bs_table.find_all('td')[0].get_text().split()[0]
name_id = re.findall('down/(\d+)', html)[0]
item['name'] = name
item['author'] = author
item['novelurl'] = novelurl
item['status'] = status
item['number'] = number
item['category'] = category
item['name_id'] = name_id
return item | [
"cvnyv21@163.com"
] | cvnyv21@163.com |
f31a50aaf5650420eddc7d4b4b4b0b17edbae209 | 3fd7adb56bf78d2a5c71a216d0ac8bc53485b034 | /experiments/cem_exp/benchmarks_goalimage/hor15_easygoal/mod_hyper.py | 1060f0f55147f0e67cf53d1bef3020b1c04858e0 | [] | no_license | anair13/lsdc | 6d1675e493f183f467cab0bfe9b79a4f70231e4e | 7760636bea24ca0231b4f99e3b5e8290c89b9ff5 | refs/heads/master | 2021-01-19T08:02:15.613362 | 2017-05-12T17:13:54 | 2017-05-12T17:13:54 | 87,596,344 | 0 | 0 | null | 2017-04-08T00:18:55 | 2017-04-08T00:18:55 | null | UTF-8 | Python | false | false | 640 | py |
current_dir = '/'.join(str.split(__file__, '/')[:-1])
bench_dir = '/'.join(str.split(__file__, '/')[:-2])
from lsdc.algorithm.policy.cem_controller_goalimage import CEM_controller
policy = {
'type' : CEM_controller,
'use_goalimage':"",
'low_level_ctrl': None,
'usenet': True,
'nactions': 5,
'repeat': 3,
'initial_std': 7,
'netconf': current_dir + '/conf.py',
'use_first_plan': False, # execute MPC instead using firs plan
'iterations': 5,
'load_goal_image':'make_easy_goal',
}
agent = {
'T': 25,
'use_goalimage':"",
'start_confs': bench_dir + '/make_easy_goal/configs_easy_goal'
} | [
"frederik.ebert@mytum.de"
] | frederik.ebert@mytum.de |
7ec6fad1a11bda30f343039f927bce7454d86242 | 59c83b78a52f8a04b4b0a36164281b94d8690e51 | /hparams/data_hparams.py | 195dc65ae16a862ce6b4bc664d843bf5c7c17182 | [] | no_license | Interfish/deep_hunter | e387d2bd1da376f92347109ddad2286eb9146901 | cb313a0375bf84c1da2d59e0025b90b944b2b1be | refs/heads/master | 2021-06-18T22:32:03.482534 | 2019-06-15T11:53:33 | 2019-06-15T11:53:33 | 191,005,769 | 0 | 1 | null | 2021-03-25T22:41:00 | 2019-06-09T12:49:26 | Python | UTF-8 | Python | false | false | 91 | py | def params():
return {
# database path
'db_path' : 'data.sqlite3'
} | [
"330508653@qq.com"
] | 330508653@qq.com |
61a2273792e89593374119c156d6c173f0dd59e9 | 8aa7defd57ab4dbf29b06bef3baef2736e1d453c | /analysis_2020/Snakefile | 61895366288a9d406b248022329dbbb88e64f912 | [] | no_license | paraslonic/g7c_analysis | 0d0ac5d3eddd7b72a490b5d87c07b4280fd0d5fc | 4e05b80b106ae12ae38d1b5cd24b4679c0755572 | refs/heads/master | 2021-01-08T02:19:50.777917 | 2020-02-26T09:26:21 | 2020-02-26T09:26:21 | 241,883,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,881 | configfile: 'config.yml'
GENOMES, = glob_wildcards("fna/{genome}.fna")
#localrules: all, make_path, calc_complexity
def aggregate_input(wildcards):
return expand("Results/ortho/coreogs_aligned/{og}.fasta.iqtree",
og=glob_wildcards(os.path.join("Results/ortho/coreogs/", "{og}.fasta")).og )
rule all:
input: aggregate_input
rule tree_for_core:
input:
"Results/ortho/coreogs_aligned/{og}.fasta"
output:
"Results/ortho/coreogs_aligned/{og}.fasta.iqtree"
shell:
"{config[iqtree.bin]} -s {input}"
rule align_core:
input:
"Results/ortho/coreogs/{og}.fasta"
output:
"Results/ortho/coreogs_aligned/{og}.fasta"
shell:
"scripts/./muscle -in {input} -out {output} -quiet"
checkpoint makeCoreOGfastas:
input:
og="Results/ortho/Orthogroups.txt",
coreOg="Results/ortho/SingleCopyOrthogroups.txt",
ffns=expand("ffn/{qu}.fasta", qu=GENOMES)
output:
coreog=directory("Results/ortho/coreogs/")
shell:
"""
cat ffn/*.fasta > tmp/all_genes.fasta
mkdir -p Results/ortho/coreogs
perl scripts/splitToOg.pl Results/ortho/Orthogroups.txt tmp/all_genes.fasta Results/ortho/coreogs Results/ortho/SingleCopyOrthogroups.txt
"""
rule orthofinder:
input:
expand("faa/{qu}.fasta", qu=GENOMES)
output:
"Results/ortho/Orthogroups.txt"
threads: 4
conda: "envs/ortho.yaml"
log: "log_of.txt"
shell:
"bash scripts/run_orthofinder.sh {threads} > {log}"
rule prokka:
input:
ancient("fna/{qu}.fna")
output:
directory("prokka/{qu}")
threads: 4
conda: "envs/prokka.yaml"
shell:
"""
name=$(basename {input} .fna);
prokka --cpus {threads} --outdir {output} --force --prefix $name --locustag $name {input}
"""
rule make_faa:
input:
ancient("prokka/{qu}")
output:
"faa/{qu}.fasta"
conda: "envs/scripts.yaml"
shell:
"""
name=$(basename {input})
python3 scripts/GBfaa.py -gb {input}/$name.gbk > {output}
"""
| [
"paraslonic@gmail.com"
] | paraslonic@gmail.com | |
844a927a21cc0f2bb904c04382157d0ca5130018 | 94e0f438eb3c23851d9a9ef4a0def0197647fafd | /pombru/pushsafer.py | 7d3b75446f928b8fb7b020439ec8fcafc0922b74 | [] | no_license | miklospocsaji/pombru | 7f95f8bcd9fece2805e7c69f9ad2910354e71010 | 97d21c829383465526b4d7ed0dc49170ada9fbf2 | refs/heads/master | 2021-06-03T07:32:13.408103 | 2021-05-22T08:58:19 | 2021-05-22T08:58:19 | 97,268,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,658 | py | # pushsafer 0.2
#
# Copyright (C) 2016 Kevin Siml <info@appzer.de>
# forked from https://github.com/Thibauth/python-pushover
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import os
import requests
__all__ = ["init", "Client", "MessageRequest",
"InitError"]
MESSAGE_URL = "https://www.pushsafer.com/api"
PRIVATEKEY = None
def init(privatekey):
global PRIVATEKEY
PRIVATEKEY = privatekey
class InitError(Exception):
def __str__(self):
return ("No privatekey provided. Init the pushsafer module by "
"calling the init function")
class Request:
def __init__(self, request_type, url, payload):
if not PRIVATEKEY:
raise InitError
payload["k"] = PRIVATEKEY
request = getattr(requests, request_type)(url, verify=False, params=payload)
try:
self.answer = request.json()
except ValueError as ve:
self.answer = str(ve)
def __str__(self):
return str(self.answer)
class MessageRequest(Request):
def __init__(self, payload):
Request.__init__(self, "post", MESSAGE_URL, payload)
class Client:
def __init__(self, device=None, privatekey=None):
self.devices = []
def send_message(self, message, title, device, icon, sound, vibration, url, urltitle, time2live, picture1, picture2, picture3):
payload = {"m": message}
if device:
payload["d"] = device
if icon:
payload["i"] = icon
if sound:
payload["s"] = sound
if vibration:
payload["v"] = vibration
if title:
payload["t"] = title
if url:
payload["u"] = url
if urltitle:
payload["ut"] = urltitle
if time2live:
payload["l"] = time2live
if picture1:
payload["p"] = picture1
if picture2:
payload["p2"] = picture2
if picture3:
payload["p3"] = picture3
return MessageRequest(payload)
| [
"pocsaji.miklos@alerant.hu"
] | pocsaji.miklos@alerant.hu |
bf155f5ea309916b064684b3ae20bdd51831ef73 | 142da5dac7715414517f37ad2fce1731f8a28dcb | /lib/python3.6/site-packages/pyxb/bundles/wssplat/raw/soap12.py | 00e916a98a3d291f3154d78f99efe8e205da6288 | [] | no_license | t6fore/network-automation | fcd0b015922b654c69e38184383bae7bec42011c | a5124053b86334f714f2372ab44e214e81d78ce4 | refs/heads/master | 2022-12-10T18:08:36.482371 | 2022-12-04T14:37:40 | 2022-12-04T14:37:40 | 229,970,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44,222 | py | # ./pyxb/bundles/wssplat/raw/soap12.py
# -*- coding: utf-8 -*-
# PyXB bindings for NM:d8b77ba08b421cd2387db6ece722305a4cdf2cdc
# Generated 2017-09-03 06:16:45.433027 by PyXB version 1.2.6 using Python 2.7.12.final.0
# Namespace http://www.w3.org/2003/05/soap-envelope
from __future__ import unicode_literals
import pyxb
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
import pyxb.utils.six as _six
# Unique identifier for bindings created at the same time
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:5edd6a50-9099-11e7-b77e-3497f68b2e96')
# Version of PyXB used to generate the bindings
_PyXBVersion = '1.2.6'
# Generated bindings are not compatible across PyXB versions
if pyxb.__version__ != _PyXBVersion:
raise pyxb.PyXBVersionError(_PyXBVersion)
# A holder for module-level binding classes so we can access them from
# inside class definitions where property names may conflict.
_module_typeBindings = pyxb.utils.utility.Object()
# Import bindings for namespaces imported into schema
import pyxb.binding.datatypes
import pyxb.binding.xml_
# NOTE: All namespace declarations are reserved within the binding
Namespace = pyxb.namespace.NamespaceForURI('http://www.w3.org/2003/05/soap-envelope', create_if_missing=True)
Namespace.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a
Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement, default_namespace=default_namespace)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, _six.text_type):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
"""Create a Python instance from the given DOM node.
The node tag must correspond to an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}."""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
# Atomic simple type: {http://www.w3.org/2003/05/soap-envelope}faultcodeEnum
class faultcodeEnum (pyxb.binding.datatypes.QName, pyxb.binding.basis.enumeration_mixin):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'faultcodeEnum')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 112, 2)
_Documentation = None
faultcodeEnum._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=faultcodeEnum, enum_prefix=None)
faultcodeEnum._CF_enumeration.addEnumeration(value=pyxb.namespace.ExpandedName(Namespace, 'DataEncodingUnknown'), tag=None)
faultcodeEnum._CF_enumeration.addEnumeration(value=pyxb.namespace.ExpandedName(Namespace, 'MustUnderstand'), tag=None)
faultcodeEnum._CF_enumeration.addEnumeration(value=pyxb.namespace.ExpandedName(Namespace, 'Receiver'), tag=None)
faultcodeEnum._CF_enumeration.addEnumeration(value=pyxb.namespace.ExpandedName(Namespace, 'Sender'), tag=None)
faultcodeEnum._CF_enumeration.addEnumeration(value=pyxb.namespace.ExpandedName(Namespace, 'VersionMismatch'), tag=None)
faultcodeEnum._InitializeFacetMap(faultcodeEnum._CF_enumeration)
Namespace.addCategoryObject('typeBinding', 'faultcodeEnum', faultcodeEnum)
_module_typeBindings.faultcodeEnum = faultcodeEnum
# Complex type {http://www.w3.org/2003/05/soap-envelope}Envelope with content type ELEMENT_ONLY
class Envelope_ (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://www.w3.org/2003/05/soap-envelope}Envelope with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'Envelope')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 28, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.w3.org/2003/05/soap-envelope}Header uses Python identifier Header
__Header = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Header'), 'Header', '__httpwww_w3_org200305soap_envelope_Envelope__httpwww_w3_org200305soap_envelopeHeader', False, pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 36, 2), )
Header = property(__Header.value, __Header.set, None, None)
# Element {http://www.w3.org/2003/05/soap-envelope}Body uses Python identifier Body
__Body = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Body'), 'Body', '__httpwww_w3_org200305soap_envelope_Envelope__httpwww_w3_org200305soap_envelopeBody', False, pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 49, 2), )
Body = property(__Body.value, __Body.set, None, None)
_AttributeWildcard = pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://www.w3.org/2003/05/soap-envelope'))
_ElementMap.update({
__Header.name() : __Header,
__Body.name() : __Body
})
_AttributeMap.update({
})
_module_typeBindings.Envelope_ = Envelope_
Namespace.addCategoryObject('typeBinding', 'Envelope', Envelope_)
# Complex type {http://www.w3.org/2003/05/soap-envelope}Header with content type ELEMENT_ONLY
class Header_ (pyxb.binding.basis.complexTypeDefinition):
"""
Elements replacing the wildcard MUST be namespace qualified, but can be in the targetNamespace
"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'Header')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 37, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
_AttributeWildcard = pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://www.w3.org/2003/05/soap-envelope'))
_HasWildcardElement = True
_ElementMap.update({
})
_AttributeMap.update({
})
_module_typeBindings.Header_ = Header_
Namespace.addCategoryObject('typeBinding', 'Header', Header_)
# Complex type {http://www.w3.org/2003/05/soap-envelope}Body with content type ELEMENT_ONLY
class Body_ (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://www.w3.org/2003/05/soap-envelope}Body with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'Body')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 50, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
_AttributeWildcard = pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://www.w3.org/2003/05/soap-envelope'))
_HasWildcardElement = True
_ElementMap.update({
})
_AttributeMap.update({
})
_module_typeBindings.Body_ = Body_
Namespace.addCategoryObject('typeBinding', 'Body', Body_)
# Complex type {http://www.w3.org/2003/05/soap-envelope}Fault with content type ELEMENT_ONLY
class Fault_ (pyxb.binding.basis.complexTypeDefinition):
"""
Fault reporting structure
"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'Fault')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 72, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.w3.org/2003/05/soap-envelope}Code uses Python identifier Code
__Code = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Code'), 'Code', '__httpwww_w3_org200305soap_envelope_Fault__httpwww_w3_org200305soap_envelopeCode', False, pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 79, 6), )
Code = property(__Code.value, __Code.set, None, None)
# Element {http://www.w3.org/2003/05/soap-envelope}Reason uses Python identifier Reason
__Reason = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Reason'), 'Reason', '__httpwww_w3_org200305soap_envelope_Fault__httpwww_w3_org200305soap_envelopeReason', False, pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 80, 6), )
Reason = property(__Reason.value, __Reason.set, None, None)
# Element {http://www.w3.org/2003/05/soap-envelope}Node uses Python identifier Node
__Node = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Node'), 'Node', '__httpwww_w3_org200305soap_envelope_Fault__httpwww_w3_org200305soap_envelopeNode', False, pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 81, 6), )
Node = property(__Node.value, __Node.set, None, None)
# Element {http://www.w3.org/2003/05/soap-envelope}Role uses Python identifier Role
__Role = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Role'), 'Role', '__httpwww_w3_org200305soap_envelope_Fault__httpwww_w3_org200305soap_envelopeRole', False, pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 82, 3), )
Role = property(__Role.value, __Role.set, None, None)
# Element {http://www.w3.org/2003/05/soap-envelope}Detail uses Python identifier Detail
__Detail = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Detail'), 'Detail', '__httpwww_w3_org200305soap_envelope_Fault__httpwww_w3_org200305soap_envelopeDetail', False, pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 83, 6), )
Detail = property(__Detail.value, __Detail.set, None, None)
_ElementMap.update({
__Code.name() : __Code,
__Reason.name() : __Reason,
__Node.name() : __Node,
__Role.name() : __Role,
__Detail.name() : __Detail
})
_AttributeMap.update({
})
_module_typeBindings.Fault_ = Fault_
Namespace.addCategoryObject('typeBinding', 'Fault', Fault_)
# Complex type {http://www.w3.org/2003/05/soap-envelope}faultreason with content type ELEMENT_ONLY
class faultreason (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://www.w3.org/2003/05/soap-envelope}faultreason with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'faultreason')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 87, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.w3.org/2003/05/soap-envelope}Text uses Python identifier Text
__Text = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Text'), 'Text', '__httpwww_w3_org200305soap_envelope_faultreason_httpwww_w3_org200305soap_envelopeText', True, pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 89, 3), )
Text = property(__Text.value, __Text.set, None, None)
_ElementMap.update({
__Text.name() : __Text
})
_AttributeMap.update({
})
_module_typeBindings.faultreason = faultreason
Namespace.addCategoryObject('typeBinding', 'faultreason', faultreason)
# Complex type {http://www.w3.org/2003/05/soap-envelope}reasontext with content type SIMPLE
class reasontext (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://www.w3.org/2003/05/soap-envelope}reasontext with content type SIMPLE"""
_TypeDefinition = pyxb.binding.datatypes.string
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_SIMPLE
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'reasontext')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 94, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.string
# Attribute {http://www.w3.org/XML/1998/namespace}lang uses Python identifier lang
__lang = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(pyxb.namespace.XML, 'lang'), 'lang', '__httpwww_w3_org200305soap_envelope_reasontext_httpwww_w3_orgXML1998namespacelang', pyxb.binding.xml_.STD_ANON_lang, required=True)
__lang._DeclarationLocation = None
__lang._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 97, 5)
lang = property(__lang.value, __lang.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__lang.name() : __lang
})
_module_typeBindings.reasontext = reasontext
Namespace.addCategoryObject('typeBinding', 'reasontext', reasontext)
# Complex type {http://www.w3.org/2003/05/soap-envelope}faultcode with content type ELEMENT_ONLY
class faultcode (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://www.w3.org/2003/05/soap-envelope}faultcode with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'faultcode')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 102, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.w3.org/2003/05/soap-envelope}Value uses Python identifier Value
__Value = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Value'), 'Value', '__httpwww_w3_org200305soap_envelope_faultcode_httpwww_w3_org200305soap_envelopeValue', False, pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 104, 6), )
Value = property(__Value.value, __Value.set, None, None)
# Element {http://www.w3.org/2003/05/soap-envelope}Subcode uses Python identifier Subcode
__Subcode = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Subcode'), 'Subcode', '__httpwww_w3_org200305soap_envelope_faultcode_httpwww_w3_org200305soap_envelopeSubcode', False, pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 106, 6), )
Subcode = property(__Subcode.value, __Subcode.set, None, None)
_ElementMap.update({
__Value.name() : __Value,
__Subcode.name() : __Subcode
})
_AttributeMap.update({
})
_module_typeBindings.faultcode = faultcode
Namespace.addCategoryObject('typeBinding', 'faultcode', faultcode)
# Complex type {http://www.w3.org/2003/05/soap-envelope}subcode with content type ELEMENT_ONLY
class subcode (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://www.w3.org/2003/05/soap-envelope}subcode with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'subcode')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 122, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.w3.org/2003/05/soap-envelope}Value uses Python identifier Value
__Value = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Value'), 'Value', '__httpwww_w3_org200305soap_envelope_subcode_httpwww_w3_org200305soap_envelopeValue', False, pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 124, 6), )
Value = property(__Value.value, __Value.set, None, None)
# Element {http://www.w3.org/2003/05/soap-envelope}Subcode uses Python identifier Subcode
__Subcode = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Subcode'), 'Subcode', '__httpwww_w3_org200305soap_envelope_subcode_httpwww_w3_org200305soap_envelopeSubcode', False, pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 126, 6), )
Subcode = property(__Subcode.value, __Subcode.set, None, None)
_ElementMap.update({
__Value.name() : __Value,
__Subcode.name() : __Subcode
})
_AttributeMap.update({
})
_module_typeBindings.subcode = subcode
Namespace.addCategoryObject('typeBinding', 'subcode', subcode)
# Complex type {http://www.w3.org/2003/05/soap-envelope}detail with content type ELEMENT_ONLY
class detail (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://www.w3.org/2003/05/soap-envelope}detail with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'detail')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 132, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
_AttributeWildcard = pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://www.w3.org/2003/05/soap-envelope'))
_HasWildcardElement = True
_ElementMap.update({
})
_AttributeMap.update({
})
_module_typeBindings.detail = detail
Namespace.addCategoryObject('typeBinding', 'detail', detail)
# Complex type {http://www.w3.org/2003/05/soap-envelope}NotUnderstoodType with content type EMPTY
class NotUnderstoodType (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://www.w3.org/2003/05/soap-envelope}NotUnderstoodType with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'NotUnderstoodType')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 141, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute qname uses Python identifier qname
__qname = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'qname'), 'qname', '__httpwww_w3_org200305soap_envelope_NotUnderstoodType_qname', pyxb.binding.datatypes.QName, required=True)
__qname._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 142, 4)
__qname._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 142, 4)
qname = property(__qname.value, __qname.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__qname.name() : __qname
})
_module_typeBindings.NotUnderstoodType = NotUnderstoodType
Namespace.addCategoryObject('typeBinding', 'NotUnderstoodType', NotUnderstoodType)
# Complex type {http://www.w3.org/2003/05/soap-envelope}SupportedEnvType with content type EMPTY
class SupportedEnvType (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://www.w3.org/2003/05/soap-envelope}SupportedEnvType with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'SupportedEnvType')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 146, 154)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute qname uses Python identifier qname
__qname = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'qname'), 'qname', '__httpwww_w3_org200305soap_envelope_SupportedEnvType_qname', pyxb.binding.datatypes.QName, required=True)
__qname._DeclarationLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 147, 4)
__qname._UseLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 147, 4)
qname = property(__qname.value, __qname.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__qname.name() : __qname
})
_module_typeBindings.SupportedEnvType = SupportedEnvType
Namespace.addCategoryObject('typeBinding', 'SupportedEnvType', SupportedEnvType)
# Complex type {http://www.w3.org/2003/05/soap-envelope}UpgradeType with content type ELEMENT_ONLY
class UpgradeType (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://www.w3.org/2003/05/soap-envelope}UpgradeType with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'UpgradeType')
_XSDLocation = pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 151, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.w3.org/2003/05/soap-envelope}SupportedEnvelope uses Python identifier SupportedEnvelope
__SupportedEnvelope = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'SupportedEnvelope'), 'SupportedEnvelope', '__httpwww_w3_org200305soap_envelope_UpgradeType_httpwww_w3_org200305soap_envelopeSupportedEnvelope', True, pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 153, 3), )
SupportedEnvelope = property(__SupportedEnvelope.value, __SupportedEnvelope.set, None, None)
_ElementMap.update({
__SupportedEnvelope.name() : __SupportedEnvelope
})
_AttributeMap.update({
})
_module_typeBindings.UpgradeType = UpgradeType
Namespace.addCategoryObject('typeBinding', 'UpgradeType', UpgradeType)
Envelope = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Envelope'), Envelope_, location=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 27, 2))
Namespace.addCategoryObject('elementBinding', Envelope.name().localName(), Envelope)
Header = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Header'), Header_, location=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 36, 2))
Namespace.addCategoryObject('elementBinding', Header.name().localName(), Header)
Body = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Body'), Body_, location=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 49, 2))
Namespace.addCategoryObject('elementBinding', Body.name().localName(), Body)
Fault = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Fault'), Fault_, location=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 71, 2))
Namespace.addCategoryObject('elementBinding', Fault.name().localName(), Fault)
NotUnderstood = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'NotUnderstood'), NotUnderstoodType, location=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 140, 2))
Namespace.addCategoryObject('elementBinding', NotUnderstood.name().localName(), NotUnderstood)
Upgrade = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Upgrade'), UpgradeType, location=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 150, 2))
Namespace.addCategoryObject('elementBinding', Upgrade.name().localName(), Upgrade)
Envelope_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Header'), Header_, scope=Envelope_, location=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 36, 2)))
Envelope_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Body'), Body_, scope=Envelope_, location=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 49, 2)))
def _BuildAutomaton ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton
del _BuildAutomaton
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 30, 6))
counters.add(cc_0)
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(Envelope_._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Header')), pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 30, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
symbol = pyxb.binding.content.ElementUse(Envelope_._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Body')), pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 31, 6))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
Envelope_._Automaton = _BuildAutomaton()
def _BuildAutomaton_ ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_
del _BuildAutomaton_
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 44, 6))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=pyxb.binding.content.Wildcard.NC_any), pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 44, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
Header_._Automaton = _BuildAutomaton_()
def _BuildAutomaton_2 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_2
del _BuildAutomaton_2
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 52, 6))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=pyxb.binding.content.Wildcard.NC_any), pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 52, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
Body_._Automaton = _BuildAutomaton_2()
Fault_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Code'), faultcode, scope=Fault_, location=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 79, 6)))
Fault_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Reason'), faultreason, scope=Fault_, location=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 80, 6)))
Fault_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Node'), pyxb.binding.datatypes.anyURI, scope=Fault_, location=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 81, 6)))
Fault_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Role'), pyxb.binding.datatypes.anyURI, scope=Fault_, location=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 82, 3)))
Fault_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Detail'), detail, scope=Fault_, location=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 83, 6)))
def _BuildAutomaton_3 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_3
del _BuildAutomaton_3
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 81, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 82, 3))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 83, 6))
counters.add(cc_2)
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(Fault_._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Code')), pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 79, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
symbol = pyxb.binding.content.ElementUse(Fault_._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Reason')), pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 80, 6))
st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(Fault_._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Node')), pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 81, 6))
st_2 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(Fault_._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Role')), pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 82, 3))
st_3 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(Fault_._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Detail')), pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 83, 6))
st_4 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
]))
transitions.append(fac.Transition(st_3, [
]))
transitions.append(fac.Transition(st_4, [
]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_1, False) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, True) ]))
st_4._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
Fault_._Automaton = _BuildAutomaton_3()
faultreason._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Text'), reasontext, scope=faultreason, location=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 89, 3)))
def _BuildAutomaton_4 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_4
del _BuildAutomaton_4
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(faultreason._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Text')), pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 89, 3))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
faultreason._Automaton = _BuildAutomaton_4()
faultcode._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Value'), faultcodeEnum, scope=faultcode, location=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 104, 6)))
faultcode._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Subcode'), subcode, scope=faultcode, location=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 106, 6)))
def _BuildAutomaton_5 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_5
del _BuildAutomaton_5
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 106, 6))
counters.add(cc_0)
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(faultcode._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Value')), pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 104, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(faultcode._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Subcode')), pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 106, 6))
st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
faultcode._Automaton = _BuildAutomaton_5()
subcode._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Value'), pyxb.binding.datatypes.QName, scope=subcode, location=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 124, 6)))
subcode._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Subcode'), subcode, scope=subcode, location=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 126, 6)))
def _BuildAutomaton_6 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_6
del _BuildAutomaton_6
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 126, 6))
counters.add(cc_0)
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(subcode._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Value')), pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 124, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(subcode._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Subcode')), pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 126, 6))
st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
subcode._Automaton = _BuildAutomaton_6()
def _BuildAutomaton_7 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_7
del _BuildAutomaton_7
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 134, 6))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=pyxb.binding.content.Wildcard.NC_any), pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 134, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
detail._Automaton = _BuildAutomaton_7()
UpgradeType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'SupportedEnvelope'), SupportedEnvType, scope=UpgradeType, location=pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 153, 3)))
def _BuildAutomaton_8 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_8
del _BuildAutomaton_8
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(UpgradeType._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'SupportedEnvelope')), pyxb.utils.utility.Location('/tmp/pyxbdist.WUiBAra/PyXB-1.2.6/pyxb/bundles/wssplat/schemas/soap12.xsd', 153, 3))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
UpgradeType._Automaton = _BuildAutomaton_8()
| [
"t6fore@gmail.com"
] | t6fore@gmail.com |
b3ca4e3a59fb16b2b7566c20b240f0eb9bcf93bf | 81d3f5dab24825549a77b53ab6ea01a04e9275dc | /merge_sort.py | 40e049cbd7bb400961526be078b3821dbe1efac5 | [] | no_license | pavel-kostyanov/sorting-algorithms | 7eaa5b7d0fe1c0da69b1b879171610149efb8201 | d1882374006719a343c7ecf24f9834b93fd2e698 | refs/heads/master | 2020-04-03T19:27:23.271531 | 2018-11-04T07:20:10 | 2018-11-04T07:20:10 | 155,524,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | def merge_sort(arr):
if len(arr) <= 1:
return arr
middle_index = len(arr) // 2
left_side = arr[: middle_index]
right_side = arr[middle_index :]
left_sorted = merge_sort(left_side)
right_sorted = merge_sort(right_side)
return merge(left_sorted, right_sorted)
def merge(left, right):
result = []
while (left and right):
if left[0] < right[0]:
result.append(left[0])
left.pop(0)
else:
result.append(right[0])
right.pop(0)
if left:
result += left
if right:
result += right
return result
unsorted_list = [830, 921, 163, 373, 961, 559, 89, 199]
sorted_list = merge_sort(unsorted_list)
print(sorted_list)
| [
"kspavel777@gmail.com"
] | kspavel777@gmail.com |
b178fbf34786963f7e9c098bb41308a7714d2352 | ebf3160dbe77e900ec9381209e2fee7d9bce1eff | /backend/pavement.py | 2470b3ec47bd94d8dcb1987ac543f9d8db34f4cd | [] | no_license | kyleddude007/pasaHero | 928c17619232d0790e41afd40309224d280820a8 | b459de19f5592ad5fafe5c6090bb9750f7cebadf | refs/heads/master | 2021-01-23T02:29:09.057230 | 2013-09-15T16:32:06 | 2013-09-15T16:32:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,513 | py | import rethinkdb as r
from optparse import make_option
from paver.easy import *
from datetime import datetime
import os
import json
options (
dbhost = 'localhost',
dbport = 28015,
dbname = 'pasahero'
)
@task
def dbcreate(options):
"""Create pasahero database and tables"""
conn = r.connect(options.dbhost, options.dbport)
if options.dbname in r.db_list().run(conn):
return
r.db_create(options.dbname).run(conn)
conn.use(options.dbname)
r.table_create('users').run(conn)
r.table_create('migrations', primary_key='name').run(conn)
def seed_table(conn, table, dirname, relname='seed'):
data = json.load(open(os.path.join(dirname, relname, table + '.json')))
result = r.table(table).insert(data, upsert=True).run(conn)
@task
def dbseed(options):
"""Populate pasahero database with seed data"""
conn = r.connect(options.dbhost, options.dbport)
conn.use(options.dbname)
dirname = os.path.dirname(os.path.abspath(__file__))
table_dumps = sorted(os.listdir(os.path.join(dirname, 'seed')))
for dump in table_dumps:
if dump.endswith('.json'):
table = dump[:-5]
print "Seeding "
print table
seed_table(conn, table, dirname)
@task
def dbdrop(options):
"""Drop pasahero database"""
conn = r.connect(options.dbhost, options.dbport)
if options.dbname not in r.db_list().run(conn):
return
r.db_drop(options.dbname).run(conn)
@task
def dbmigrate(options):
conn = r.connect(options.dbhost, options.dbport)
conn.use(options.dbname)
migrations = sorted(os.listdir('migrations'))
for migration in migrations:
if migration.endswith('.py'):
if not r.table('migrations').get(migration).run(conn):
print('Running migration {}...'.format(migration))
with open('migrations/{}'.format(migration), 'r') as f:
exec(f.read(), dict(dbhost=options.dbhost, dbport=options.dbport, dbname=options.dbname), {})
r.table('migrations').insert(dict(name=migration)).run(conn)
@task
@cmdopts([
('name=', 'n', 'Name of migration')
])
def generate_migration(options):
"""
Generates a scaffold for a migration
"""
date_str = datetime.utcnow().strftime('%Y-%m-%d.%H-%M-%S')
with open('migrations/{}.{}.py'.format(date_str, options.dbname), 'w') as f:
f.write('''
import rethinkdb as r
conn = r.connect(dbhost, dbport)
conn.use(dbname)
''') | [
"ceassi@yahoo.com"
] | ceassi@yahoo.com |
f2ff21626ec0dde7d1acfc600d26160ee6a50201 | 38fbb33abba27094be67e514dbb4c5a84d27b7cc | /pathogen/cipsite/views.py | c0a04f72e4932fa9c4e1647d6e9adea9e0a14e63 | [] | no_license | MFlores2021/PNIArep | 14a75f501b278fe26cdec7b90e31048cde9d6ef9 | 32feb5dd896db7bb765f5ecca4fea89809a20d57 | refs/heads/master | 2021-06-06T11:24:01.240995 | 2020-05-14T15:40:44 | 2020-05-14T15:40:44 | 102,232,053 | 0 | 0 | null | 2017-09-11T16:02:38 | 2017-09-03T00:50:29 | HTML | UTF-8 | Python | false | false | 507 | py | from django.shortcuts import render
def index(request):
return render(request, "index.html")
# def map(request):
# return render(request, "map.html")
def tables(request):
return render(request, "tables.html")
def participant(request):
return render(request, "participant.html")
def publication(request):
return render(request, "publication.html")
def contact(request):
return render(request, "contact.html")
def downloadr(request):
return render(request, "downloadr.html") | [
"mire_flor3@hotmail.com"
] | mire_flor3@hotmail.com |
58cc862e69dd431adf40230e3329687e7cbee05d | 435218e08c3dc62fbe52a2f59a1ee1e3651ddebc | /word_dict/base_dict.py | 12e04f662e793d42bdcacda35066c5ac5a014e63 | [] | no_license | Wingsdh/WordTranslator | 0041fa34500eee55d53d154daa8489ff3066d28d | a729e58ebd2c7109e61f3e74f46e1d7163994497 | refs/heads/master | 2021-05-17T11:52:11.751005 | 2020-03-31T13:09:04 | 2020-03-31T13:09:04 | 250,759,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,346 | py | # -*- encoding: utf-8 -*-
"""
-------------------------------------------------
File Name: base_dict.py
Description :
Author : Wings DH
Time: 2020/3/28 5:50 下午
-------------------------------------------------
Change Activity:
2020/3/28: Create
-------------------------------------------------
"""
from abc import ABC, abstractmethod
class WordEntry(ABC):
"""
单词词条
"""
@property
def word(self):
"""
:return: str, 单词
"""
return self._word
@property
def cn_paraphrase(self):
"""
:return: str, 中文解释
"""
return self._cn_paraphrase
@property
def en_paraphrase(self):
"""
:return: str, 英文解释
"""
return self._en_paraphrase
def __init__(self, word, cn_paraphrase, en_paraphrase):
"""
Args:
:param word: str, 单词
:param cn_paraphrase: str, 中文解释
:param en_paraphrase: str, 英文解释
"""
self._word = word
self._cn_paraphrase = cn_paraphrase
self._en_paraphrase = en_paraphrase
class WordRecord(ABC):
"""
一个单词在词典中的记录, 包含若干词条
"""
@property
def word(self):
"""
:return: str, 被描述的单词
"""
return self._word
@classmethod
def create(cls, word):
return cls(word)
def __init__(self, word):
self._items = []
self._word = word
def add_entry(self, word_entry):
"""
新增单词词条
:param word_entry: WordEntry, 需要新增的词条
:return: None
"""
if isinstance(word_entry, WordEntry):
self._items.append(word_entry)
else:
raise ValueError('Only inst of WordEntry can be added but {}'.format(type(word_entry)))
class BaseDict(ABC):
@classmethod
@abstractmethod
def create(cls, *args, **kwargs):
"""
构造方法
:return:BaseDict,BaseDict实例
"""
pass
@abstractmethod
def refer_to_word(self, word):
"""
:param word: str, 需要查询的单词
:return: WordRecord, word对应的单词记录
"""
pass
| [
"wingsdh@gmail.com"
] | wingsdh@gmail.com |
e17f92d3d343d5272ea4fbcebd7c5a86df5c6a2d | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2811/60768/235290.py | 44942420c792f233946644b79e4acce40a08ea76 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | pAndn = input().split(' ')
map = int(pAndn[0]) * ['']
num = int(pAndn[1])
conflict = False
for i in range(num):
index = int(input())
if map[index % len(map)] == '':
map[index % len(map)] = index
else:
print(i + 1)
conflict = True
break
if not conflict:
print(-1) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
dd44247bd638d2aa03261edee5c4c1520992ec10 | 4012189144a34d4e12ac5607d30ddd1c29fbb324 | /ex5-ListOverlap.py | 4aaa0a958e1f64e383c9e25dfe59043048430206 | [
"MIT"
] | permissive | Philthy-Phil/practice-python | abbf41a8ca4af1402b386f3dcee9ac80b9e84377 | c2a7eed4754aa401c04e9d060a7dec5223b12f45 | refs/heads/main | 2023-04-14T01:35:34.199717 | 2021-04-24T07:05:50 | 2021-04-24T07:05:50 | 356,037,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | __author__ = 'phil.ezb'
import random
# hard coded lists
# list_a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
# list_b = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
# random num list
list_a = random.sample(range(0, 100), 20)
list_b = random.sample(range(0, 100), 20)
# set() to prevent duplicates
list_total = set(list_a + list_b)
list_common = []
# loop through total and compare
for num in list_total:
if (num in list_a) and (num in list_b):
list_common.append(num)
# output
print(list_common)
| [
"phil.ziegelbauer@gmx.at"
] | phil.ziegelbauer@gmx.at |
9c1b8ad47ddcb7428095e0a9e4d6082a4f7a15eb | 54ebd7f82e10707de7e3f7729e56025a7d4a211b | /pygen/pygen_src/riscv_instr_stream.py | c02fac36afc05679b20442a9ecb007ddad757973 | [
"Apache-2.0"
] | permissive | bugfreee/riscv-dv | 04ef058ba97570816560e8a6a2489832ff8edd68 | 17eec2d00c6f1d162016902806c73bdbeec6f566 | refs/heads/master | 2022-11-17T14:59:52.444436 | 2020-07-09T13:55:15 | 2020-07-09T13:55:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,613 | py | """
Copyright 2020 Google LLC
Copyright 2020 PerfectVIPs Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
import random
from pygen_src.riscv_instr_pkg import riscv_instr_name_t, riscv_instr_format_t,\
riscv_instr_category_t, riscv_reg_t
from pygen_src.isa.riscv_instr import riscv_instr, riscv_instr_ins, cfg
import logging
import sys
class riscv_instr_stream:
'''
Base class for RISC-V instruction stream
A instruction stream here is a queue of RISC-V basic instructions.
This class also provides some functions to manipulate the instruction stream, like insert a new
instruction, mix two instruction streams etc.
'''
def __init__(self):
self.instr_list = []
self.instr_cnt = 0
self.label = ""
# User can specify a small group of available registers to generate various hazard condition
self.avail_regs = []
# Some additional reserved registers that should not be used as rd register
# by this instruction stream
self.reserved_rd = []
self.hart = 0
def initialize_instr_list(self, instr_cnt):
self.instr_list.clear()
self.instr_cnt = instr_cnt
self.create_instr_instance()
def create_instr_instance(self):
for i in range(self.instr_cnt):
instr = riscv_instr()
self.instr_list.append(instr)
def insert_instr(self, instr, idx=-1):
"""
Insert an instruction to the existing instruction stream at the given index
When index is -1, the instruction is injected at a random location
"""
current_instr_cnt = len(self.instr_list)
if idx == -1:
idx = random.randint(0, current_instr_cnt - 1)
while self.instr_list[idx].atomic:
idx = idx + 1
if idx == (current_instr_cnt - 1):
self.instr_list.append(instr)
return
elif idx > current_instr_cnt or idx < 0:
logging.error("Cannot insert instr:%0s at idx %0d", instr.convert2asm(), idx)
self.instr_list.insert(idx, instr)
def insert_instr_stream(self, new_instr, idx=-1, replace=0):
"""
Insert an instruction to the existing instruction stream at the given index
When index is -1, the instruction is injected at a random location
When replace is 1, the original instruction at the inserted position will be replaced
"""
current_instr_cnt = len(self.instr_list)
if current_instr_cnt == 0:
self.instr_list = new_instr
return
if idx == -1:
idx = random.randint(0, current_instr_cnt - 1)
# cares must be taken to avoid targeting
# an atomic instruction (while atomic, find a new idx)
for i in range(10):
if self.instr_list[idx].atomic:
break
idx = random.randint(0, current_instr_cnt - 1)
if self.instr_list[idx].atomic:
for i in range(len(self.instr_list)):
if not self.instr_list[i].atomic:
idx = i
break
if self.instr_list[idx].atomic:
logging.critical("Cannot inject the instruction")
sys.exit(1)
elif idx > current_instr_cnt or idx < 0:
logging.error("Cannot insert instr stream at idx %0d", idx)
sys.exit(1)
# When replace is 1, the original instruction at this index will be removed.
# The label of the original instruction will be copied to the head
# of inserted instruction stream.
if replace:
new_instr[0].label = self.instr_list[idx].label
new_instr[0].has_label = self.instr_list[idx].has_label
if idx == 0:
self.instr_list = new_instr + self.instr_list[idx + 1:current_instr_cnt - 1]
else:
self.instr_list = self.instr_list[0:idx - 1] + new_instr + \
self.instr_list[idx + 1:current_instr_cnt - 1]
else:
if idx == 0:
self.instr_list = new_instr + self.instr_list[idx:current_instr_cnt - 1]
else:
self.instr_list = self.instr_list[0:idx - 1] + new_instr + \
self.instr_list[idx:current_instr_cnt - 1]
def mix_instr_stream(self, new_instr, contained=0):
"""
Mix the input instruction stream with the original instruction, the instruction order is
preserved. When 'contained' is set, the original instruction stream will be inside the
new instruction stream with the first and last instruction from the input instruction
stream.
new_instr is a list of riscv_instr
"""
current_instr_cnt = len(self.instr_list)
new_instr_cnt = len(new_instr)
insert_instr_position = [0] * new_instr_cnt
if len(insert_instr_position) > 0:
insert_instr_position.sort()
for i in range(new_instr_cnt):
insert_instr_position[i] = random.rangeint(0, current_instr_cnt)
if len(insert_instr_position) > 0:
insert_instr_position.sort()
if contained:
insert_instr_position[0] = 0
if new_instr_cnt > 1:
insert_instr_position[new_instr_cnt - 1] = current_instr_cnt - 1
for i in range(len(new_instr)):
self.insert_instr(new_instr[i], insert_instr_position[i] + i)
def convert2string(self):
s = ""
for i in range(len(self.instr_list)):
s = s + self.instr_list[i].convert2asm() + "\n"
return s
class riscv_rand_instr_stream(riscv_instr_stream):
"""
Generate a random instruction stream based on the configuration
There are two ways to use this class to generate instruction stream
1. For short instruction stream, you can call randomize() directly.
2. For long instruction stream (>1K), randomize() all instructions together might take a
long time for the constraint solver. In this case, you can call gen_instr to generate
instructions one by one. The time only grows linearly with the instruction count
"""
def __init__(self):
# calling super constructor
super().__init__(self)
self.kernel_mode = 0
self.allowed_instr = []
self.category_dist = []
def create_instr_instance(self):
for i in range(self.instr_cnt):
self.instr_list.append(None)
def setup_allowed_instr(self, no_branch=0, no_load_store=1):
self.allowed_instr = riscv_instr_ins.basic_instr
if no_branch == 0:
self.allowed_instr.append(
riscv_instr_ins.instr_category[riscv_instr_category_t.BRANCH.name])
if no_load_store == 0:
self.allowed_instr.append(
riscv_instr_ins.instr_category[riscv_instr_category_t.LOAD.name])
self.allowed_instr.append(
riscv_instr_ins.instr_category[riscv_instr_category_t.STORE.name])
self.setup_instruction_dist(no_branch, no_load_store)
# TODO
def randomize_avail_regs(self):
pass
def setup_instruction_dist(self, no_branch=0, no_load_store=1):
if cfg.dist_control_mode:
self.category_dist = cfg.category_dist
if no_branch:
self.category_dist[riscv_instr_category_t.BRANCH.name] = 0
if no_load_store:
self.category_dist[riscv_instr_category_t.LOAD.name] = 0
self.category_dist[riscv_instr_category_t.STORE.name] = 0
logging.info("setup_instruction_dist: %0d", category_dist.size())
def gen_instr(self, no_branch=0, no_load_store=1, is_debug_program=0):
self.setup_allowed_instr(no_branch, no_load_store)
for i in range(len(self.instr_list)):
self.instr_list[i] = self.randomize_instr(self.instr_list[i], is_debug_program)
while self.instr_list[-1].category == riscv_instr_category_t.BRANCH:
self.instr_list.pop()
if len(self.instr_list):
break
def randomize_instr(self, instr, is_in_debug=0, disable_dist=0):
exclude_instr = []
is_SP_in_reserved_rd = riscv_reg_t.SP in self.reserved_rd
is_SP_in_reserved_regs = riscv_reg_t.SP in cfg.reserved_regs
is_SP_in_avail_regs = riscv_reg_t.SP in self.avail_regs
if ((is_SP_in_reserved_rd or is_SP_in_reserved_regs) or (not is_SP_in_avail_regs)):
exclude_instr.append(riscv_instr_name_t.C_ADDI4SPN)
exclude_instr.append(riscv_instr_name_t.C_ADDI16SP)
exclude_instr.append(riscv_instr_name_t.C_LWSP)
exclude_instr.append(riscv_instr_name_t.C_LDSP)
if is_in_debug and (not cfg.enable_ebreak_in_debug_rom):
exclude_instr.append(riscv_instr_name_t.EBREAK)
exclude_instr.append(riscv_instr_name_t.C_EBREAK)
instr = riscv_instr_ins.get_rand_instr(
include_instr=self.allowed_instr, exclude_instr=exclude_instr)
instr = self.randomize_gpr(instr)
return instr
def randomize_gpr(self, instr):
# TODO
avail_regs_set = set(self.avail_regs)
reserved_rd_set = set(self.reserved_rd)
reserved_regs_set = set(cfg.reserved_regs)
excluded_avail_regs = list(avail_regs_set - reserved_rd_set - reserved_regs_set)
if len(self.avail_regs) > 0:
if self.has_rs1:
if self.format == riscv_instr_format_t.CB_FORMAT:
self.rs1 = random.choice(excluded_avail_regs)
else:
self.rs1 = random.choice(self.avail_regs)
if self.has_rs2:
self.rs2 = random.choice(self.avail_regs)
if self.has_rd:
self.rd = random.choice(excluded_avail_regs)
return instr
| [
"b150023ec@nitsikkim.ac.in"
] | b150023ec@nitsikkim.ac.in |
ee6b02d18f5a98597a3cc9da3c4ccc30900a6b45 | f5ad49b17b9dcc924790adf5cae94cb315e952e7 | /tests/contracts/test_package_version.py | 861d22380feb7d45205cf05dc20357e1d695ec84 | [
"MIT"
] | permissive | devopshq/crosspm2 | de0eed8024a05335ad1acbdb83882bc388c229a0 | b97539430b1d163fbd960c57e780022ddeb90a2b | refs/heads/master | 2023-06-15T18:10:20.053225 | 2021-07-15T07:15:41 | 2021-07-15T07:15:41 | 281,886,426 | 3 | 0 | MIT | 2021-07-15T07:15:41 | 2020-07-23T07:47:36 | Python | UTF-8 | Python | false | false | 1,243 | py | from crosspm.contracts.package_version import PackageVersion
def test_package_version():
assert PackageVersion("1.2.3") < PackageVersion("10.2.3")
assert PackageVersion("1.3") == PackageVersion("1.3.0")
assert PackageVersion("1.3") < PackageVersion("1.3.1")
assert PackageVersion("1.2.3") < PackageVersion("1.2.3-feature1-super-puper")
assert PackageVersion("1.2.3") != PackageVersion("1.2.3-feature1-super-puper")
assert PackageVersion("1.3-SS7AD") != PackageVersion("1.3")
assert PackageVersion("1.3-SS7AD") != PackageVersion("1.3-TEST")
assert PackageVersion("1.3-SS7AD") == PackageVersion("1.3-SS7AD")
assert PackageVersion("1.3-123") < PackageVersion("1.3-124")
assert PackageVersion("1.3-124") > PackageVersion("1.3-123")
assert PackageVersion("1.3-ABC") == PackageVersion("1.3-abc")
assert PackageVersion("1.3-a") < PackageVersion("1.3-aa")
assert PackageVersion("1.3-a") < PackageVersion("1.3-ba")
def test_package_version_properties():
package_version = PackageVersion("1.2.3-feature1-super-puper")
assert 1 == package_version.major
assert 2 == package_version.minor
assert 3 == package_version.micro
assert 'feature1.super.puper' == package_version.local
| [
"apazdnikov@ptsecurity.com"
] | apazdnikov@ptsecurity.com |
ecfb08069558a9bb83d38c1599cd117f26077fa6 | 51edacc7eb7c90a9c3dbb8218ce70533646c031a | /pythonProject2/FIRSTPROJECT/FIRSTPROJECT/settings.py | d1cd0b75707d44740e0f214b77456c142dd7839b | [] | no_license | Neelima1234534-tech/Django | 013be682c8d07845e25b811d75742495335ee128 | 1b56ef48925238517204391f44f209992276d337 | refs/heads/master | 2023-02-25T02:06:01.502335 | 2021-01-22T21:26:32 | 2021-01-22T21:26:32 | 332,064,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,257 | py | """
Django settings for FIRSTPROJECT project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path, os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
template_dir = os.path.join(BASE_DIR,"TEMPLATE-PATH")
static_dir = os.path.join(BASE_DIR,"static")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'eg29hj-uqi(rpaaaw+^1mk_*k_%w@%8d^7bb0hajdtf*e)j+6h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'FIRSTAPPLICATION',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'FIRSTPROJECT.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [template_dir,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'FIRSTPROJECT.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
static_dir
] | [
"76076133+Neelima1234534-tech@users.noreply.github.com"
] | 76076133+Neelima1234534-tech@users.noreply.github.com |
6f30f0587a27efa73334f093ee5bbe39f6015019 | 0825774028251f020352ac9ff3cbf2e34cb3af02 | /watcher-folder/file-copy/config/vars.py | ca503c9382d8b94cdf913f018fb15d93760ec385 | [] | no_license | wsalles/watcher-folder-simple-project-with-docker | 1b73b2a38567f84517975281fd733677055d0e01 | 25c4681bf1e92f162b53a7907e10d2060a335c68 | refs/heads/master | 2023-03-11T01:57:20.991868 | 2019-06-26T23:13:22 | 2019-06-26T23:13:22 | 339,831,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | import os
data = {
'WATCHDIR': os.environ['WATCHDIR'],
'WATCH_FOLDER': os.environ['WATCH_FOLDER'],
'DESTINATION': os.environ['DESTINATION'],
'TIMEOUT': int(os.environ['TIMEOUT']),
'EXT': ['*.mxf', '*.MXF'],
'REMOVE_META': False
}
| [
"wallace.salles@tvglobo.com.br"
] | wallace.salles@tvglobo.com.br |
fe93f57251e701508ed2152775bb40788f39915f | 881e0569d00139ff18b25f010093d8e85e8a0de5 | /deployments/docker/academy/services/models.py | fd977b0ecd1f4177802969ce31d0871de98650be | [] | no_license | Gulmira83/academy | fb72c46e9dfb6646ec55ab45f001eea80bc367c1 | f2330b69197af0d362ac27077a4a075167c1ae18 | refs/heads/master | 2022-11-14T16:29:48.410836 | 2020-07-02T01:58:13 | 2020-07-02T01:58:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,131 | py | from django.db import models
from kubernetes.client.apis import core_v1_api
from kubernetes import client, config
from django.contrib.auth.models import User
from django.conf import settings
from kubernetes.client.rest import ApiException
import yaml
import random
import logging
import os
def get_kube_config():
if os.path.isfile('/var/run/secrets/kubernetes.io/serviceaccount/token'):
return config.load_incluster_config()
else:
return config.load_kube_config()
class UserService(models.Model):
name = models.CharField(max_length=200)
port = models.CharField(max_length=5)
username = models.ForeignKey(User, on_delete=models.CASCADE)
password = models.CharField(max_length=200)
service = models.CharField(max_length=10)
path = models.CharField(max_length=50)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "userService"
class Pynote:
## Model to create PyNote
name = 'Pynote'
get_kube_config()
kube = client.ExtensionsV1beta1Api()
api = core_v1_api.CoreV1Api()
## Environment is used for namespace
environment = getattr(settings, 'ENVIRONMENT', None)
namespace = f'{environment}-students'
def available_port(self):
## Function will find available port
while True:
random_port = random.choice(list(range(7000, 7100)))
if not UserService.objects.filter(port=random_port).first():
return random_port
def generate_templates(self, username, password, environment, service_path=None):
## Function generates all templates <pod>, <service>, <ingress>
templates = {}
template_port = self.available_port()
host = f'{self.environment}.academy.fuchicorp.com'
ingress_name = f'{self.environment}-pynote-ingress'
namespace = f'{self.environment}-students'
if service_path is None:
templates['pynotelink'] = f'/services/pynotes/{username}'
templates['path'] = {'path': f'/services/pynotes/{username}', 'backend': {'serviceName': username, 'servicePort': template_port}}
else:
templates['pynotelink'] = service_path
templates['path'] = {'path': f'{service_path}', 'backend': {'serviceName': username, 'servicePort': template_port}}
with open('kubernetes/pynote-pod.yaml' ) as file:
pod = yaml.load(file, Loader=yaml.FullLoader)
pod['metadata']['name'] = username
pod['metadata']['labels']['run'] = username
pod['spec']['containers'][0]['name'] = username
pod['spec']['containers'][0]['args'] = [ f"--username={username}", f"--password={password}"]
if service_path is None:
pod['spec']['containers'][0]['env'].append({"name": "URL_PATH", "value": f'/services/pynotes/{username}'})
else:
pod['spec']['containers'][0]['env'].append({"name": "URL_PATH", "value": f'{service_path}'})
templates['pod'] = pod
with open('kubernetes/pynote-service.yaml') as file:
service = yaml.load(file, Loader=yaml.FullLoader)
service['metadata']['labels']['run'] = username
service['spec']['ports'][0]['port'] = template_port
service['spec']['selector']['run'] = username
service['metadata']['name'] = username
templates['service'] = service
with open('kubernetes/pynote-ingress.yaml') as file:
ingress = yaml.load(file, Loader=yaml.FullLoader)
ingress['spec']['rules'][0]['host'] = host
ingress['spec']['rules'][0]['http']['paths'].append(templates['path'])
ingress['metadata']['name'] = ingress_name
ingress['metadata']['namespace'] = namespace
templates['ingress'] = ingress
return templates
def existing_ingess(self, ingerssname, namespace):
list_ingress = self.kube.list_namespaced_ingress(namespace).items
for item in list_ingress:
if item.metadata.name == ingerssname:
return item
else:
return False
def create_service(self, username, password, service_path=None):
## Function to create service pynote
pynote_name = username.lower()
pynote_pass = password
ingress_name = f'{self.environment}-pynote-ingress'
namespace = f'{self.environment}-students'
if service_path is None:
deployment = self.generate_templates(pynote_name, pynote_pass, self.environment)
else:
deployment = self.generate_templates(pynote_name, pynote_pass, self.environment, service_path)
if not self.is_pod_exist(pynote_name):
self.api.create_namespaced_pod(body=deployment['pod'], namespace=namespace)
if not self.is_service_exist(pynote_name):
self.api.create_namespaced_service(body=deployment['service'], namespace=namespace)
exist_ingress = self.existing_ingess(ingress_name, namespace)
if exist_ingress:
exist_ingress.spec.rules[0].http.paths.append(deployment['path'])
self.kube.replace_namespaced_ingress(exist_ingress.metadata.name, namespace, body=exist_ingress)
else:
self.kube.create_namespaced_ingress(namespace, body=deployment['ingress'])
return deployment
def is_pod_exist(self, username):
try:
self.api.read_namespaced_pod(username, self.namespace)
return True
except ApiException:
return False
def is_service_exist(self, username):
try:
self.api.read_namespaced_service(username, self.namespace)
return True
except ApiException:
return False
def is_ingress_exist(self):
try:
self.kube.read_namespaced_ingress(f"{self.environment}-pynote-ingress", self.namespace)
return True
except ApiException:
return False
def delete_service(self, username):
## Function to delete the service
pynote_name = username.lower()
ingress_name = f'{self.environment}-pynote-ingress'
namespace = f'{self.environment}-students'
exist_ingress = self.existing_ingess(ingress_name, namespace)
try:
self.api.delete_namespaced_pod(pynote_name, namespace)
logging.warning(f'Deleted a pod {pynote_name}')
self.api.delete_namespaced_service(pynote_name, namespace)
logging.warning(f'Deleted a service {pynote_name}')
except:
logging.warning('Trying to delete service and pod was not successed.')
if exist_ingress:
if 1 < len(exist_ingress.spec.rules[0].http.paths):
for i in exist_ingress.spec.rules[0].http.paths:
if username in i.path:
exist_ingress.spec.rules[0].http.paths.remove(i)
exist_ingress.metadata.resource_version = ''
self.kube.patch_namespaced_ingress(exist_ingress.metadata.name, namespace, body=exist_ingress)
else:
self.kube.delete_namespaced_ingress(ingress_name, namespace)
try:
user = User.objects.get(username=username)
service = UserService.objects.get(username=user).first()
service.delete()
logging.warning(f"Pynote has been delete for {username}")
except Exception as e:
logging.error(f"Error: {e}")
class Jenkins:
name = 'Jenkins'
pass
class WebShell:
name = 'WebShell'
pass
class GoNote:
name = 'GoNote'
pass
def get_service(name):
if 'pynote' in name.lower():
return Pynote()
elif 'jenkins' in name.lower():
Jenkins()
elif 'webshell' in name.lower():
WebShell()
elif 'webshell' in name.lower():
GoNote() | [
"sadykovfarkhod@gmail.com"
] | sadykovfarkhod@gmail.com |
9c05c4731a38f745c6bdbd8a3c089c350368dbc4 | 53e82d26572532c2a7ab94a21a43ee0b4632a378 | /06_testing/06_01_test_math_module.py | 997bbeee465abafffabd9b6166bd9b30138181f5 | [] | no_license | KacperCiepielewski/python301-labs | bae02bfa69b9ad87ffe4b2dc4d5cf163032e6bee | 326fc6cce44f3c262a4a8a020fc892f1948e42ee | refs/heads/master | 2023-05-25T07:14:12.840070 | 2021-06-12T22:09:45 | 2021-06-12T22:09:45 | 376,387,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | # Write a unittest test suite with at least two methods that test
# the functionality of the built-in `math` module.
import unittest
import math
class TestMath(unittest.TestCase):
def test_floor_rounds_down(self):
self.assertEqual(math.floor(3.4), 3)
def test_isnan_recognizes_numbers(self):
self.assertFalse(math.isnan(4))
if __name__ == "__main__":
unittest.main()
| [
"KacperCiepielewski@gmail.com"
] | KacperCiepielewski@gmail.com |
4f9c184eba07a1dd1be714a247a3ae4a73641987 | 6b2d85ca45485973ff918a0269ba5dca194777b3 | /scatterPlot.py | 111e651f37872fd1e2d6bcdf98068d728528e39f | [] | no_license | BoriaK/ComunicationSysModel | 2e1a2ab46db4bcafcfd8ab1e08778a9ac7a8fb7b | d918071bfbdba35da10414f9598a9e8a4f94bff5 | refs/heads/master | 2023-04-06T18:06:03.413158 | 2021-04-23T12:36:51 | 2021-04-23T12:36:51 | 306,825,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 900 | py | import numpy as np
from matplotlib import pyplot as plt
# SymNum = int(10 * 1e6)
#
# ModNum = 16
#
# Ix = 2 * np.random.random_integers(low=1, high=np.sqrt(ModNum), size=SymNum) - 1 - np.sqrt(ModNum)
# Qx = 2 * np.random.random_integers(low=1, high=np.sqrt(ModNum), size=SymNum) - 1 - np.sqrt(ModNum)
# X = Ix + 1j * Qx
#
# ni = np.random.normal(loc=0, scale=0.5, size=SymNum)
# nq = np.random.normal(loc=0, scale=0.5, size=SymNum)
# n = ni + 1j * nq
#
# R = X + n
# NOB = 30 * ModNum # Number of Bins in histogram
def scatter(InputSig, ModNum, SNR_bit):
NOB = 30 * ModNum # Number of Bins in histogram
plt.figure()
plt.hist2d(np.real(InputSig), np.imag(InputSig), bins=NOB, cmap='jet')
plt.xlabel('Infase')
plt.ylabel('Quadrature')
plt.title('Constellation of Rx OFDM signal ' + str(ModNum) + '-QAM ' + 'with SNR = ' + str(SNR_bit) + 'dB')
plt.grid()
plt.show()
| [
"boris.kupcha@intel.com"
] | boris.kupcha@intel.com |
c85727fb88c601c6ac65efb6caf7501ada9b8752 | c6a4a06774c184d2659e0b04bbdf73c9c0bcf7dc | /Face_Dectection_DeMO/FaceCaptureOnPhotoPremier.py | bcb1547357de86c16798b7ff7a1011f0f5dc3604 | [] | no_license | DanferWang/Python_Project | 601427d12519e61b88676b46c13b82da88e1cb4d | 0af2dbc7152ba826644341843c653afab04b21ad | refs/heads/master | 2020-09-13T02:18:53.757643 | 2019-11-19T07:20:08 | 2019-11-19T07:20:08 | 222,631,584 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #导入库
import cv2
#加载照片
img = input("copy your photo to the aim directory, then input the filename of the photo which you wanna recognize:")
image = cv2.imread(img)
#加载人脸模型
face = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
#调整图片灰度:提高性能
gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
#检查人脸
faces = face.detectMultiScale(gray)
#标记人脸
for (x,y,w,h) in faces:
cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
#显示图片
cv2.imshow('Danfer', image)
#暂停窗口
cv2.waitKey(0)
#关闭窗口
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | noreply@github.com |
1ab6b871a38d865a54fcdc6e51f9368d137e61d8 | 0d3ed3ba6bd188128c25e1eac17c62d01dd816f2 | /env/lib/python2.7/site-packages/uszipcode/packages/fuzzywuzzy/utils.py | 18657028eee34081b841d04c4240102abc04f2b2 | [] | no_license | shawnaness/wheather | 2706b2292d6ce00e5b6835e5edc2a7e1eaf2eef8 | c2ff4d8f190255b1564cb77897a0c0bab7fe6cf4 | refs/heads/master | 2021-05-06T22:27:22.516985 | 2017-12-06T03:13:37 | 2017-12-06T03:13:37 | 112,817,531 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,904 | py | from __future__ import unicode_literals
import sys
from .string_processing import StringProcessor
PY3 = sys.version_info[0] == 3
def validate_string(s):
try:
return len(s) > 0
except TypeError:
return False
bad_chars = str("").join([chr(i) for i in range(128, 256)]) # ascii dammit!
if PY3:
translation_table = dict((ord(c), None) for c in bad_chars)
unicode = str
def asciionly(s):
if PY3:
return s.translate(translation_table)
else:
return s.translate(None, bad_chars)
def asciidammit(s):
if type(s) is str:
return asciionly(s)
elif type(s) is unicode:
return asciionly(s.encode('ascii', 'ignore'))
else:
return asciidammit(unicode(s))
def make_type_consistent(s1, s2):
"""If both objects aren't either both string or unicode instances force them to unicode"""
if isinstance(s1, str) and isinstance(s2, str):
return s1, s2
elif isinstance(s1, unicode) and isinstance(s2, unicode):
return s1, s2
else:
return unicode(s1), unicode(s2)
def full_process(s, force_ascii=False):
"""Process string by
-- removing all but letters and numbers
-- trim whitespace
-- force to lower case
if force_ascii == True, force convert to ascii"""
if s is None:
return ""
if force_ascii:
s = asciidammit(s)
# Keep only Letters and Numbers (see Unicode docs).
string_out = StringProcessor.replace_non_letters_non_numbers_with_whitespace(s)
# Force into lowercase.
string_out = StringProcessor.to_lower_case(string_out)
# Remove leading and trailing whitespaces.
string_out = StringProcessor.strip(string_out)
return string_out
def intr(n):
'''Returns a correctly rounded integer'''
return int(round(n))
| [
"moziyan@yahoo.com"
] | moziyan@yahoo.com |
465f00c2c7cbdc3ba8b8108fe475a748598a12d7 | bade2e15e7c3ff165958c1a35a383a0221a897f3 | /details.py | 3eebf7bedb25a181f261c7d403de8b5e207bbe5a | [] | no_license | paulram2810/Python-Programs | 25daaaea3e057ef26ebca28ce901a8820c60656e | 436ae4f9287b02ab35991646bfafc72b3fba9df5 | refs/heads/master | 2020-05-02T18:44:45.332474 | 2019-03-28T06:18:56 | 2019-03-28T06:18:56 | 178,138,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | print("Enter Details : ")
print("First Name : ",end="")
fn=input()
print("Last Name : ",end="")
ln=input()
print("Contact : ",end="")
num=input()
address=input("Address : ")
print("\n")
print("Full Name : ",fn," ",ln)
print("Contact : ",num)
print("Address : ",address)
| [
"noreply@github.com"
] | noreply@github.com |
a7dc38662a958b0dc522f5d78a49129d092ed16d | 6bc9aacb414f62bdac4375305b8e7f401ab5af50 | /chapter2.数据抓取/2.2.6_scrape_callback2.py | 38a0d807aeb6261088e930dcc5c72e9bb421bf9f | [] | no_license | zero1248/web-scraping | d1d4ba82fb450fddd839d49e7faaba75a51d25bf | 8d4aaa2a5a50690adede16707044bcc4c9c9e214 | refs/heads/master | 2020-05-05T02:03:47.254575 | 2019-04-06T11:42:48 | 2019-04-06T11:42:48 | 179,623,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,194 | py | import csv
import re
import urllib.parse
import lxml.html
from cpt2public import link_crawler
class ScrapeCallback:
def __init__(self):
self.writer = csv.writer(open('countries.csv', 'w'))
self.fields = ('area', 'population', 'iso', 'country', 'capital', 'continent', 'tld', 'currency_code',
'currency_name', 'phone', 'postal_code_format', 'postal_code_regex', 'languages', 'neighbours')
self.writer.writerow(self.fields)
def __call__(self, url, html):
if re.search('/view/', url):
if re.search('/user/', url):
return
tree = lxml.html.fromstring(html)
row = []
for field in self.fields:
row.append(tree.cssselect('table > tr#places_{}__row > td.w2p_fw'.format(field))[0].text_content())
self.writer.writerow(row)
print(url, row)
if __name__ == '__main__':
link_crawler('http://example.webscraping.com/', '/(index|view)', scrape_callback = ScrapeCallback())
# link_crawler('http://example.webscraping.com/places/default/view/Brazil-32', '/(index|view)', scrape_callback = ScrapeCallback()) | [
"gaoyu10010@126.com"
] | gaoyu10010@126.com |
ef921343d682ac3fa05976f5c29c34274f6a83f6 | 997cededcb811cd7da0db4283356a065b248a9f5 | /web/app/models.py | 6637eba54478f96a243af8b5eb843ada422fd6f8 | [] | no_license | mfcardenas/tfm-server | 1f275f9574224da85f4b657e03efc39844f798a5 | 530e7f29a344649d9814131c1a943fc91dc50e4a | refs/heads/master | 2020-12-10T08:13:18.217992 | 2020-01-12T21:38:11 | 2020-01-12T21:38:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,507 | py | from datetime import datetime
from hashlib import md5
from app import db, login
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), index=True)
surname1 = db.Column(db.String(64), index=True)
surname2 = db.Column(db.String(64), index=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
last_seen = db.Column(db.DateTime, default=datetime.utcnow)
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def set_surname1(self, surname1):
self.surname1 = surname1
def get_surname1(self):
return self.surname1
def set_surname2(self, surname2):
self.surname2 = surname2
def get_surname2(self):
return self.surname2
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def get_id(self):
return self.id
'''
def avatar(self, size):
digest = md5(self.email.lower().encode('utf-8')).hexdigest()
return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(
digest, size)
'''
@login.user_loader
def load_user(id):
return User.query.get(int(id))
| [
"assddo@gmail.com"
] | assddo@gmail.com |
d2eb504a02f64da5c9e1772011810882693711af | 55b5ea0b5f06324c94182af4becb2961e5454a63 | /polls/tests.py | cfa86da56b211ca5ef1b51017f19ebadc2667304 | [] | no_license | serhii73/first_django_app | c58b1861cd7ed76aef88fbeeeea25bc10eaa10c9 | f93a7287880b2244c3d1e0c1ff44c455fb09cc3d | refs/heads/master | 2020-06-05T04:39:03.684831 | 2019-07-26T15:42:47 | 2019-07-26T15:42:47 | 192,316,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,848 | py | import datetime
from django.urls import reverse
from django.test import TestCase
from django.utils import timezone
from .models import Question
class QuestionModelTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() returns False for questions whose pub_date
is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recently() returns False for questions whose pub_date
is older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days=1, seconds=1)
old_question = Question(pub_date=time)
self.assertIs(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() returns True for questions whose pub_date
is within the last day.
"""
time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)
recent_question = Question(pub_date=time)
self.assertIs(recent_question.was_published_recently(), True)
def create_question(question_text, days):
"""
Create a question with the given `question_text` and published the
given number of `days` offset to now (negative for questions published
in the past, positive for questions that have yet to be published).
"""
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text, pub_date=time)
class QuestionIndexViewTests(TestCase):
def test_no_questions(self):
"""
If no questions exist, an appropriate message is displayed.
"""
response = self.client.get(reverse("polls:index"))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context["latest_question_list"], [])
def test_past_question(self):
"""
Questions with a pub_date in the past are displayed on the
index page.
"""
create_question(question_text="Past question.", days=-30)
response = self.client.get(reverse("polls:index"))
self.assertQuerysetEqual(
response.context["latest_question_list"], ["<Question: Past question.>"]
)
def test_future_question(self):
"""
Questions with a pub_date in the future aren't displayed on
the index page.
"""
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse("polls:index"))
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context["latest_question_list"], [])
def test_future_question_and_past_question(self):
"""
Even if both past and future questions exist, only past questions
are displayed.
"""
create_question(question_text="Past question.", days=-30)
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse("polls:index"))
self.assertQuerysetEqual(
response.context["latest_question_list"], ["<Question: Past question.>"]
)
def test_two_past_questions(self):
"""
The questions index page may display multiple questions.
"""
create_question(question_text="Past question 1.", days=-30)
create_question(question_text="Past question 2.", days=-5)
response = self.client.get(reverse("polls:index"))
self.assertQuerysetEqual(
response.context["latest_question_list"],
["<Question: Past question 2.>", "<Question: Past question 1.>"],
)
class QuestionDetailViewTests(TestCase):
def test_future_question(self):
"""
The detail view of a question with a pub_date in the future
returns a 404 not found.
"""
future_question = create_question(question_text="Future question.", days=5)
url = reverse("polls:detail", args=(future_question.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_past_question(self):
"""
The detail view of a question with a pub_date in the past
displays the question's text.
"""
past_question = create_question(question_text="Past Question.", days=-5)
url = reverse("polls:detail", args=(past_question.id,))
response = self.client.get(url)
self.assertContains(response, past_question.question_text)
| [
"aserhii@protonmail.com"
] | aserhii@protonmail.com |
076ebf05d5c2ab02b5f048e8ab31d338adc8e45a | 6102a4107e45e02f4fc9f692da54161d06d0446c | /Screen pet.py | 5848f403502b145f85551462f89dea31de5bfecc | [] | no_license | KavyaSai-T/ScreenPet | 671ee59c3d586b1a32a2a97a649afa35da5d6776 | c1120e238f03370e7988a2822c5e49e68aa46d59 | refs/heads/master | 2022-11-22T06:00:29.716283 | 2020-07-23T12:03:27 | 2020-07-23T12:03:27 | 281,939,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,017 | py | from tkinter import Tk , HIDDEN , NORMAL , Canvas
def toggle_eyes():
current_color = c.itemcget(eye_left,'fill')
new_color = c.body_color if current_color == 'white' else 'white'
current_state = c.itemcget(pupil_left,'state')
new_state = NORMAL if current_state == HIDDEN else HIDDEN
c.itemconfigure(pupil_left,state=new_state)
c.itemconfigure(pupil_right,state=new_state)
c.itemconfigure(eye_left,fill=new_color)
c.itemconfigure(eye_right,fill=new_color)
def blink():
toggle_eyes()
win.after(250,toggle_eyes)
win.after(3000,blink)
def toggle_pupils():
if not c.crossed_eyes:
c.move(pupil_left , 10,-5)
c.move(pupil_right , -10,-5)
c.crossed_eyes = True
else:
c.move(pupil_left , -10,5)
c.move(pupil_right , 10,5)
c.crossed_eyes = False
def toggle_tongue():
if not c.tongue_out:
c.itemconfigure(tongue_tip , state= NORMAL)
c.itemconfigure(tongue_main , state= NORMAL)
c.tongue_out = True
else:
c.itemconfigure(tongue_tip , state= HIDDEN)
c.itemconfigure(tongue_main , state= HIDDEN)
c.tongue_out = False
def cheeky(event):
toggle_tongue()
toggle_pupils()
hide_happy(event)
win.after(1000,toggle_tongue)
win.after(1000,toggle_pupils)
return
def show_happy(event):
if(20 <= event.x and event.x <= 350) and (20 <= event.y and event.y <= 350):
c.itemconfigure(cheek_left,state=NORMAL)
c.itemconfigure(cheek_right,state=NORMAL)
c.itemconfigure(mouth_happy,state=NORMAL)
c.itemconfigure(mouth_normal,state=HIDDEN)
c.itemconfigure(mouth_sad,state=HIDDEN)
c.happy_level = 10
return
def hide_happy(event):
c.itemconfigure(cheek_left,state=HIDDEN)
c.itemconfigure(cheek_right,state=HIDDEN)
c.itemconfigure(mouth_happy,state=HIDDEN)
c.itemconfigure(mouth_normal,state=NORMAL)
c.itemconfigure(mouth_sad,state=HIDDEN)
return
def sad():
if c.happy_level == 0:
c.itemconfigure(mouth_happy,state=HIDDEN)
c.itemconfigure(mouth_normal,state=HIDDEN)
c.itemconfigure(mouth_sad,state=NORMAL)
else:
c.happy_level -= 1
win.after(500,sad)
win = Tk()
c = Canvas(win , width = 400 , height = 400)
c.config(bg = 'dark blue' , highlightthickness = 0)
c.body_color = 'SkyBlue1'
body = c.create_oval(35,20,365,350,outline=c.body_color, fill=c.body_color)
foot_left = c.create_oval(65,320,145,360,outline=c.body_color, fill=c.body_color)
foot_right = c.create_oval(250,320,330,360,outline=c.body_color, fill=c.body_color)
ear_left = c.create_polygon(75,80,75,10,165,70,outline=c.body_color, fill=c.body_color)
ear_right = c.create_polygon(255,45,325,10,320,70,outline=c.body_color, fill=c.body_color)
eye_left = c.create_oval(130,110,160,170,outline='black', fill='white')
pupil_left = c.create_oval(140,145,150,155,outline='black', fill='black')
eye_right = c.create_oval(230,110,260,170,outline='black', fill='white')
pupil_right = c.create_oval(240,145,250,155,outline='black', fill='black')
mouth_normal = c.create_line(170,250,200,272,230,250,smooth=1,width=2,state=NORMAL)
mouth_happy = c.create_line(170,250,200,282,230,250,smooth=1,width=2,state= HIDDEN)
mouth_sad = c.create_line(170,250,200,232,230,250,smooth=1,width=2,state= HIDDEN)
tongue_main = c.create_rectangle(170,250,230,290,outline='red', fill='red',state= HIDDEN)
tongue_tip = c.create_oval(170,285,230,300,outline='red', fill='red',state= HIDDEN)
cheek_left = c.create_oval(70,180,120,230,outline='pink', fill='pink',state= HIDDEN)
cheek_right = c.create_oval(280,180,330,230,outline='pink', fill='pink',state= HIDDEN)
c.pack()
c.bind('<Motion>',show_happy)
c.bind('<Leave>',hide_happy)
c.bind('<Double-1>',cheeky)
c.crossed_eyes = False
c.tongue_out = False
c.happy_level = 10
win.after(1000,blink)
win.after(5000,sad)
win.mainloop() | [
"noreply@github.com"
] | noreply@github.com |
376f82bf1be280037aaad21374b43a1e4dce82eb | 69889d51e933b4e8a1d4c8397a317aa1d1365a5a | /Stack/17299.py | 3de2e8eff8d86d4a1485e3e058e23e566d2857dc | [] | no_license | ddraa/Algorithm | a35c87631420ceccec6f7094da6f2b22ddb66c8c | a97c6628d5389f7f93603a2e95ac3b569057f556 | refs/heads/master | 2023-06-25T17:12:39.925821 | 2021-07-18T05:53:28 | 2021-07-18T05:53:28 | 279,240,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | import sys
input = sys.stdin.readline
N = int(input())
F, stack = {}, []
arr = list(map(int, input().split()))
res = [-1 for _ in range(N)]
for n in arr:
if n in F:
F[n] += 1
else:
F[n] = 1
for i in range(N - 1, -1, -1):
while stack and stack[-1][0] <= F[arr[i]]:
stack.pop()
if stack:
res[i] = stack[-1][1]
stack.append((F[arr[i]], arr[i]))
print(*res)
| [
"ruuddyd@gmail.com"
] | ruuddyd@gmail.com |
bfb31bbaa48485e6c87d4b9683dbf6fc1c4d2f7b | 91a9f5a7afb398f4238527708cbc155dc972cbfa | /older/Grapher_app0/Names_Module.py | 1ff52a69c3e05a1e89a15ebd6b1cc78a4dd3597e | [] | no_license | bddmodelcar/kzpy3.2 | cd6f9bf6b7b8b920c79b4ee36c2592b992ae4332 | b044b26649b19b240bd580feca20424a237374b1 | refs/heads/master | 2021-01-19T21:01:58.687712 | 2017-08-23T22:39:56 | 2017-08-23T22:39:56 | 101,243,308 | 0 | 1 | null | 2017-08-24T02:04:50 | 2017-08-24T02:04:50 | null | UTF-8 | Python | false | false | 1,681 | py | from Paths_Module import *
exec(identify_file_str)
for _name in [
'pts_plot','img','purpose','name','xyz_sizes','data_type','x','y',
'xmin','ymin','xmax','ymax','xscale','yscale','floats_to_pixels',
'pixels_to_floats','ysize','xsize','lines_plot','color',
'reject_run',
'left',
'out1_in2',
'dic',
'name',
'test',
'dic_type',
'purpose',
'batch_size',
'net',
'camera_data',
'metadata',
'target_data',
'names',
'states',
'loss_dic',
'train',
'val',
'ctr',
'all_steer',
'epoch_counter',
'get_data',
'next',
'run_code',
'seg_num',
'offset',
'all_data_moment_id_codes',
'left',
'right',
'fill',
'clear',
'forward',
'backward',
'display',
'GPU',
'BATCH_SIZE',
'DISPLAY',
'VERBOSE',
'LOAD_ARUCO',
'BAIR_CAR_DATA_PATH',
'RESUME',
'IGNORE',
'REQUIRE_ONE',
'USE_STATES',
'N_FRAMES',
'N_STEPS',
'STRIDE',
'save_net_timer',
'print_timer',
'epoch_timer',
'WEIGHTS_FILE_PATH',
'SAVE_FILE_NAME',
'mode',
'criterion',
'optimizer',
'data_ids',
'data_moment',
'racing',
'caffe',
'follow',
'direct',
'play',
'furtive',
'labels',
'LCR',
'data_moment_loss_record',
'loss',
'outputs',
'print_now',
'network',
'metadata',
'steer',
'motor',
'data',
'NETWORK_OUTPUT_FOLDER',
'code','data_moment_loss_records','loss_history','weights',
'save_net',
'CODE_PATH',
'rate_ctr',
'rate_timer',
'step',
'rate_counter',
'loss_record',
'add','loss',
'TRAIN_TIME',
'VAL_TIME','INITIAL_WEIGHTS_FOLDER',
'activiations',
'moment_index', 'imgs', 'view','camera_input','final_output',
'pre_metadata_features','pre_metadata_features_metadata','post_metadata_features','scales','delay'
]:exec(d2n(_name,'=',"'",_name,"'"))
#
#EOF | [
"karlzipser@berkeley.edu"
] | karlzipser@berkeley.edu |
9159c331cb841b16ee912799461db8d6c1c669c8 | decf37b7209aee037eb445862ff5746791d978bd | /config.example.py | b8b27f3e5ff446fa844915cc2fb90f959e80d626 | [] | no_license | mbirth/wipy-theta | 9bcbd428eb32fc725224a2a1f83c386bba25c093 | 8c2f99951febce6996a183ec7e505f26d838098b | refs/heads/master | 2021-01-10T07:32:07.428076 | 2015-11-03T15:01:40 | 2015-11-03T15:01:40 | 44,282,060 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | # modify and rename to config.py
HOME_SSID = 'MyHomeNetwork'
HOME_PASSWORD = 'ultrasecret!'
| [
"mbirth@gmail.com"
] | mbirth@gmail.com |
37dcddbb5760b82cc718a99321054fb899fc11bf | ba3231b25c60b73ca504cd788efa40d92cf9c037 | /nitro-python-13.0.36/nssrc/com/citrix/netscaler/nitro/resource/config/cmp/cmppolicy.py | 27844dcdfaf065883b411a91b98dd7aa313f7a18 | [
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | zhuweigh/vpx13 | f6d559ae85341e56472e3592cbc67062dac34b93 | b36caa3729d3ca5515fa725f2d91aeaabdb2daa9 | refs/heads/master | 2020-07-04T22:15:16.595728 | 2019-09-20T00:19:56 | 2019-09-20T00:19:56 | 202,435,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,184 | py | #
# Copyright (c) 2008-2019 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class cmppolicy(base_resource) :
""" Configuration for compression policy resource. """
def __init__(self) :
self._name = None
self._rule = None
self._resaction = None
self._feature = None
self._newname = None
self._expressiontype = None
self._reqaction = None
self._hits = None
self._txbytes = None
self._rxbytes = None
self._clientttlb = None
self._clienttransactions = None
self._serverttlb = None
self._servertransactions = None
self._description = None
self._isdefault = None
self.___count = None
@property
def name(self) :
r"""Name of the HTTP compression policy. Must begin with an ASCII alphabetic or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters.
Can be changed after the policy is created.
The following requirement applies only to the Citrix ADC CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my cmp policy" or 'my cmp policy').<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""Name of the HTTP compression policy. Must begin with an ASCII alphabetic or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters.
Can be changed after the policy is created.
The following requirement applies only to the Citrix ADC CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my cmp policy" or 'my cmp policy').<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def rule(self) :
r"""Expression that determines which HTTP requests or responses match the compression policy.
The following requirements apply only to the Citrix ADC CLI:
* If the expression includes one or more spaces, enclose the entire expression in double quotation marks.
* If the expression itself includes double quotation marks, escape the quotations by using the \ character.
* Alternatively, you can use single quotation marks to enclose the rule, in which case you do not have to escape the double quotation marks.
"""
try :
return self._rule
except Exception as e:
raise e
@rule.setter
def rule(self, rule) :
r"""Expression that determines which HTTP requests or responses match the compression policy.
The following requirements apply only to the Citrix ADC CLI:
* If the expression includes one or more spaces, enclose the entire expression in double quotation marks.
* If the expression itself includes double quotation marks, escape the quotations by using the \ character.
* Alternatively, you can use single quotation marks to enclose the rule, in which case you do not have to escape the double quotation marks.
"""
try :
self._rule = rule
except Exception as e:
raise e
@property
def resaction(self) :
r"""The built-in or user-defined compression action to apply to the response when the policy matches a request or response.<br/>Minimum length = 1.
"""
try :
return self._resaction
except Exception as e:
raise e
@resaction.setter
def resaction(self, resaction) :
r"""The built-in or user-defined compression action to apply to the response when the policy matches a request or response.<br/>Minimum length = 1
"""
try :
self._resaction = resaction
except Exception as e:
raise e
@property
def feature(self) :
r"""The feature to be checked while applying this config.
"""
try :
return self._feature
except Exception as e:
raise e
@feature.setter
def feature(self, feature) :
r"""The feature to be checked while applying this config.
"""
try :
self._feature = feature
except Exception as e:
raise e
@property
def newname(self) :
r"""New name for the compression policy. Must begin with an ASCII alphabetic or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters.
Choose a name that reflects the function that the policy performs.
The following requirement applies only to the Citrix ADC CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my cmp policy" or 'my cmp policy').<br/>Minimum length = 1.
"""
try :
return self._newname
except Exception as e:
raise e
@newname.setter
def newname(self, newname) :
r"""New name for the compression policy. Must begin with an ASCII alphabetic or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters.
Choose a name that reflects the function that the policy performs.
The following requirement applies only to the Citrix ADC CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my cmp policy" or 'my cmp policy').<br/>Minimum length = 1
"""
try :
self._newname = newname
except Exception as e:
raise e
@property
def expressiontype(self) :
r"""Type of policy (Classic/Advanced) .<br/>Possible values = Classic Policy, Advanced Policy.
"""
try :
return self._expressiontype
except Exception as e:
raise e
@property
def reqaction(self) :
r"""The compression action to be performed on requests.
"""
try :
return self._reqaction
except Exception as e:
raise e
@property
def hits(self) :
r"""Number of hits.
"""
try :
return self._hits
except Exception as e:
raise e
@property
def txbytes(self) :
r"""Number of bytes transferred.
"""
try :
return self._txbytes
except Exception as e:
raise e
@property
def rxbytes(self) :
r"""Number of bytes received.
"""
try :
return self._rxbytes
except Exception as e:
raise e
@property
def clientttlb(self) :
r"""Total client TTLB value.
"""
try :
return self._clientttlb
except Exception as e:
raise e
@property
def clienttransactions(self) :
r"""Number of client transactions.
"""
try :
return self._clienttransactions
except Exception as e:
raise e
@property
def serverttlb(self) :
r"""Total server TTLB value.
"""
try :
return self._serverttlb
except Exception as e:
raise e
@property
def servertransactions(self) :
r"""Number of server transactions.
"""
try :
return self._servertransactions
except Exception as e:
raise e
@property
def description(self) :
r"""Description of the policy.
"""
try :
return self._description
except Exception as e:
raise e
@property
def isdefault(self) :
r"""A value of true is returned if it is a default policy.
"""
try :
return self._isdefault
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(cmppolicy_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.cmppolicy
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
r""" Use this API to add cmppolicy.
"""
try :
if type(resource) is not list :
addresource = cmppolicy()
addresource.name = resource.name
addresource.rule = resource.rule
addresource.resaction = resource.resaction
addresource.feature = resource.feature
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ cmppolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].rule = resource[i].rule
addresources[i].resaction = resource[i].resaction
addresources[i].feature = resource[i].feature
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
r""" Use this API to delete cmppolicy.
"""
try :
if type(resource) is not list :
deleteresource = cmppolicy()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ cmppolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ cmppolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
r""" Use this API to update cmppolicy.
"""
try :
if type(resource) is not list :
updateresource = cmppolicy()
updateresource.name = resource.name
updateresource.rule = resource.rule
updateresource.resaction = resource.resaction
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ cmppolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].rule = resource[i].rule
updateresources[i].resaction = resource[i].resaction
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def rename(cls, client, resource, new_name) :
r""" Use this API to rename a cmppolicy resource.
"""
try :
renameresource = cmppolicy()
if type(resource) == cls :
renameresource.name = resource.name
else :
renameresource.name = resource
return renameresource.rename_resource(client,new_name)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the cmppolicy resources that are configured on netscaler.
"""
try :
if not name :
obj = cmppolicy()
response = obj.get_resources(client, option_)
else :
if type(name) is not list :
if type(name) == cls :
raise Exception('Invalid parameter name:{0}'.format(type(name)))
obj = cmppolicy()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
if type(name[0]) == cls :
raise Exception('Invalid parameter name:{0}'.format(type(name[0])))
response = [cmppolicy() for _ in range(len(name))]
obj = [cmppolicy() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = cmppolicy()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
r""" Use this API to fetch filtered set of cmppolicy resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = cmppolicy()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
r""" Use this API to count the cmppolicy resources configured on NetScaler.
"""
try :
obj = cmppolicy()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
r""" Use this API to count filtered the set of cmppolicy resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = cmppolicy()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Expressiontype:
Classic_Policy = "Classic Policy"
Advanced_Policy = "Advanced Policy"
class Feature:
WL = "WL"
WebLogging = "WebLogging"
SP = "SP"
SurgeProtection = "SurgeProtection"
LB = "LB"
LoadBalancing = "LoadBalancing"
CS = "CS"
ContentSwitching = "ContentSwitching"
CR = "CR"
CacheRedirection = "CacheRedirection"
SC = "SC"
SureConnect = "SureConnect"
CMP = "CMP"
CMPcntl = "CMPcntl"
CompressionControl = "CompressionControl"
PQ = "PQ"
PriorityQueuing = "PriorityQueuing"
HDOSP = "HDOSP"
HttpDoSProtection = "HttpDoSProtection"
SSLVPN = "SSLVPN"
AAA = "AAA"
GSLB = "GSLB"
GlobalServerLoadBalancing = "GlobalServerLoadBalancing"
SSL = "SSL"
SSLOffload = "SSLOffload"
SSLOffloading = "SSLOffloading"
CF = "CF"
ContentFiltering = "ContentFiltering"
IC = "IC"
IntegratedCaching = "IntegratedCaching"
OSPF = "OSPF"
OSPFRouting = "OSPFRouting"
RIP = "RIP"
RIPRouting = "RIPRouting"
BGP = "BGP"
BGPRouting = "BGPRouting"
REWRITE = "REWRITE"
IPv6PT = "IPv6PT"
IPv6protocoltranslation = "IPv6protocoltranslation"
AppFw = "AppFw"
ApplicationFirewall = "ApplicationFirewall"
RESPONDER = "RESPONDER"
HTMLInjection = "HTMLInjection"
push = "push"
NSPush = "NSPush"
NetScalerPush = "NetScalerPush"
AppFlow = "AppFlow"
CloudBridge = "CloudBridge"
ISIS = "ISIS"
ISISRouting = "ISISRouting"
CH = "CH"
CallHome = "CallHome"
AppQoE = "AppQoE"
ContentAccelerator = "ContentAccelerator"
SYSTEM = "SYSTEM"
RISE = "RISE"
FEO = "FEO"
LSN = "LSN"
LargeScaleNAT = "LargeScaleNAT"
RDPProxy = "RDPProxy"
Rep = "Rep"
Reputation = "Reputation"
URLFiltering = "URLFiltering"
VideoOptimization = "VideoOptimization"
ForwardProxy = "ForwardProxy"
SSLInterception = "SSLInterception"
AdaptiveTCP = "AdaptiveTCP"
CQA = "CQA"
CI = "CI"
ContentInspection = "ContentInspection"
class cmppolicy_response(base_response) :
def __init__(self, length=1) :
self.cmppolicy = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.cmppolicy = [cmppolicy() for _ in range(length)]
| [
"zhuwei@xsky.com"
] | zhuwei@xsky.com |
53ae3685e3c2944ececf857d97562c509471aa2a | ce5052348295d08d3221ff6f474ad36d7fe6445a | /05-OOP_Exceptions/oop_2/classes/homework_result.py | 685fd3aba4fa65e2af9c4097cad4c75da000e70f | [] | no_license | AleksejSamarin/EpamPython2019 | bc045488d0b720379a4a21d86f389f69f182514a | b71943e4a544f602ce952b8a2af29ecf006ca5ef | refs/heads/master | 2020-05-24T12:49:11.443343 | 2019-07-04T11:00:57 | 2019-07-04T11:00:57 | 187,275,830 | 0 | 0 | null | 2019-05-17T20:11:30 | 2019-05-17T20:11:30 | null | UTF-8 | Python | false | false | 395 | py | from datetime import datetime
from .homework import Homework
class HomeworkResult:
def __init__(self, author, homework: Homework, solution: str):
if not isinstance(homework, Homework):
raise TypeError('You gave a not Homework object')
self.author = author
self.homework = homework
self.solution = solution
self.created = datetime.now()
| [
"samarin113@gmail.com"
] | samarin113@gmail.com |
de85cd4ef55a89d3753d09221a1c3e2b4e6ce5d0 | 4adcb0d77047fc04f4f02b9828708eff47ca40ef | /user/views.py | 19c734e3ae1b696604a62449d33020b6d0cd48f6 | [] | no_license | gmjjatin/datapeace-assignment | 5cb042be0823cb8940c5f4d4672167075169c617 | 07f731dc35ea7c30df6caf8822577c2118d0288e | refs/heads/master | 2023-02-21T21:21:23.048948 | 2022-10-09T10:08:24 | 2022-10-09T10:08:24 | 184,530,280 | 0 | 2 | null | 2023-02-07T22:03:12 | 2019-05-02T06:12:50 | JavaScript | UTF-8 | Python | false | false | 2,367 | py | from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import filters
from user.models import *
# api views
from rest_framework import generics
# from django_filters import rest_framework as filters
from .serializers import UserSerializer
class UsersView(generics.ListCreateAPIView):
serializer_class = UserSerializer
filter_backends = (filters.SearchFilter ,filters.OrderingFilter,)
ordering_fields = '__all__'
# filterset_fields = ('id','first_name','last_name','company_name','city','state','zip','email','web','age',)
search_fields = ('id','first_name','last_name','company_name','city','state','zip','email','web','age',)
def get_queryset(self):
id = self.request.query_params.get('id', None)
first_name = self.request.query_params.get('first_name', None)
last_name = self.request.query_params.get('last_name', None)
company_name = self.request.query_params.get('company_name', None)
age = self.request.query_params.get('age', None)
state = self.request.query_params.get('state', None)
zip = self.request.query_params.get('zip', None)
email = self.request.query_params.get('email', None)
web = self.request.query_params.get('web', None)
city = self.request.query_params.get('city', None)
queryset = User.objects.all()
if id:
queryset =queryset.filter(id=id)
if first_name:
queryset =queryset.filter(first_name=first_name)
if last_name:
queryset =queryset.filter(last_name=last_name)
if company_name:
queryset =queryset.filter(company_name=company_name)
if city:
queryset =queryset.filter(city=city)
if state:
queryset =queryset.filter(state=state)
if zip:
queryset =queryset.filter(zip=zip)
if email:
queryset =queryset.filter(email=email)
if web:
queryset =queryset.filter(web=web)
if age:
queryset =queryset.filter(age=age)
return queryset
def perform_create(self, serializer):
"""Save the post data when creating a new bucketlist."""
serializer.save()
class UserDetailsView(generics.RetrieveUpdateDestroyAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
| [
"jatin.irsc@gmail.com"
] | jatin.irsc@gmail.com |
7540b3442e53b36dbb55bce5a3c058d967207818 | 296d4fec38b2a5ec2f4eb402d1b2145980dd184b | /aliens.py | ac6d5f3a8d3bb0c5203dcb6a7cf851111dbd07b3 | [] | no_license | RayGutt/python | 9464ae7c63850240df58ff78c6050bc6e1d35b3e | a9b68d43923f13b58e7d59fdabf649820d48bd52 | refs/heads/master | 2020-11-27T01:17:57.136062 | 2020-01-22T14:36:25 | 2020-01-22T14:36:25 | 229,254,199 | 0 | 0 | null | 2020-01-05T19:47:27 | 2019-12-20T11:39:53 | HTML | UTF-8 | Python | false | false | 844 | py | alien_0 = {'color': 'green', 'points': 5}
alien_1 = {'color': 'yellow', 'points': 10}
alien_2 = {'color': 'yellow', 'points': 15}
aliens = [alien_0, alien_1, alien_2]
for alien in aliens:
print(alien)
print("_________")
# Make an empty list for storing aliens.
aliens = []
# Make 30 green aliens.
for alien_number in range(30):
new_alien = {'color': 'green', 'points': 5, 'speed': 'slow'}
aliens.append(new_alien)
for alien in aliens[0:3]:
if alien['color'] == 'green':
alien['color'] = 'yellow'
alien['speed'] = 'medium'
alien['points'] = 10
elif alien['color'] == 'yellow':
alien['color'] = 'red'
alien['speed'] = 'fast'
alien['points'] = 15
# Show the first 5 aliens.
for alien in aliens[:5]:
print(alien)
print("...")
# Show hown many aliens have been created.
print("Total number of aliens: " + str(len(aliens)))
| [
"le.caribou@gmail.com"
] | le.caribou@gmail.com |
2604150d960f651c0c307ed9da2d7154ec888a78 | de1045bcefaa44a4d8a7b1d9eb20a4b1dcce18b1 | /python/interview_questions/remove_nth_node_from_linked_list.py | 719e752b9a39dd0362ef4c73f7a54ad04ad881a4 | [
"MIT"
] | permissive | rcanepa/cs-fundamentals | d7c46383d759a841ce0ddb83ab56e86659dbf2c9 | b362fc206417501e53a5739df1edf7568901eef8 | refs/heads/master | 2022-11-05T21:30:56.546540 | 2020-06-07T01:10:56 | 2020-06-07T01:10:56 | 101,583,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,240 | py | """Given a linked list, remove the nth node from the end of list and return its head.
For example,
Given linked list: 1->2->3->4->5, and n = 2.
After removing the second node from the end, the linked list becomes 1->2->3->5.
Note:
Given n will always be valid.
Try to do this in one pass.
"""
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def remove_nth_from_end(head, n):
ptr0 = head
ptr1 = head
ptr2 = head
ptr2_steps = 0
while ptr2.next:
ptr2 = ptr2.next
ptr2_steps += 1
if ptr2_steps >= n:
ptr0 = ptr1
ptr1 = ptr1.next
ptr0.next = ptr1.next
if ptr0 == ptr1:
head = head.next
return head
if __name__ == "__main__":
n = 2
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
head.next.next.next = ListNode(4)
head.next.next.next.next = ListNode(5)
source = []
ptr = head
while ptr:
source.append(ptr.val)
ptr = ptr.next
print(source)
head = remove_nth_from_end(head, n)
ptr = head
result = []
while ptr:
result.append(ptr.val)
ptr = ptr.next
print(result)
| [
"rcanepag@gmail.com"
] | rcanepag@gmail.com |
81e48fe24428936962f09d70e653c1c9e72b5e24 | 383b7a62d18a3efa358b526124fc52c157e9c5e8 | /parties/utils.py | cae18d54989a7a4547d9569ca84c2a6ade1d5845 | [] | no_license | josiahkhoo/barter-django | acb245a44376b56aae145fb363efc80f94aa2c2f | 4b933463f83579ec60b488bf4dfdd2f49599039a | refs/heads/master | 2022-11-26T10:55:01.476993 | 2020-07-27T13:46:52 | 2020-07-27T13:46:52 | 269,992,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | from enum import IntEnum
class PartyState(IntEnum):
ONGOING = 0
COMPLETED = 1
@classmethod
def choices(cls):
return [(key.value, key.name) for key in cls]
class PartyEventType(IntEnum):
UI_PARTY_UPDATE = 0
UI_PARTY_COMPLETE = 1
@classmethod
def choices(cls):
return [(key.value, key.name) for key in cls]
| [
"josiahkhooshaoqi@gmail.com"
] | josiahkhooshaoqi@gmail.com |
19b7c412203b5be443f23f850a39b509ecdb5a04 | 2ad6c1940b777a51669f6a8c61f15d04ced443c6 | /Algorithms/test/task_1_9.py | 4eccfca0ec919c6ae6781dc154e6698ba1f69138 | [] | no_license | RomanLopatin/Python | df06e521fd0a4fa468e51b89df835fadc9aac888 | cef8df6f43564189bdd29043b6f8e23468ea438b | refs/heads/main | 2023-04-30T10:47:51.970254 | 2021-05-02T22:08:28 | 2021-05-02T22:08:28 | 317,842,764 | 1 | 0 | null | 2021-05-08T11:50:29 | 2020-12-02T11:37:39 | Python | UTF-8 | Python | false | false | 809 | py | """"coding=utf-8
1.9
ПВводятся три разных числа. Найти, какое из них является средним (больше одного, но меньше другого).
"""
print('Введите 3 числа: a,b,c')
a = float(input('Введите a:'))
b = float(input('Введите b:'))
c = float(input('Введите c:'))
if a > b:
if a < c:
print(f"Среднее число - {a}")
else:
if b < c:
print(f"Среднее число - {c}")
else:
print(f"Среднее число - {b}")
else:
if a > c:
print(f"Среднее число - {a}")
else:
if b < c:
print(f"Среднее число - {b}")
else:
print(f"Среднее число - {c}")
| [
"romanlopatin@gmail.com"
] | romanlopatin@gmail.com |
a232ab5e7b7b3938334e7d69911f01ae956a17eb | 4fdaa61e2fb2d320a0903e17024598c6a67ab0fb | /python/Vaav/kitchen.py | 9ffefc461390bce99d81d0b9e5536c9669c10b11 | [] | no_license | khans/ProgrammingAndDataStructures | 10d5cd5f30f703298ba132be4dfba828f3a0e9e1 | 58c1d822fa5eab17485369bc40dd1376db389f44 | refs/heads/master | 2021-01-25T14:03:40.616633 | 2018-06-19T23:02:44 | 2018-06-19T23:02:44 | 123,643,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py |
from collections import deque,defaultdict
class Table:
capacity = 0
availability = True
occupancy = 0
def __init__(self,number):
self.number = number
def addOccupant(self):
self.occupancy += 1
self.availability = False
def setCapacity(self,capacity):
self.capacity = capacity
def getTableNumber(self):
return self.number
class Order:
def __init__(self):
self.orderList = {}
def addOrder(self,item,count):
self.orderList[item] = count
class Kitchen:
queue = deque();
free = False
def make(self,order):
self.queue.append(order)
def isReady(self,order):
if order in self.queue:
return False
else:
return True
def getFood(self):
self.queue.popleft();
def getQueue(self):
return self.queue;
def doneDish(self):
self.queue.popleft()
| [
"isafakhan@gmail.com"
] | isafakhan@gmail.com |
836672426f628f5769afff768523aea5adfa1c6b | 5aa51d9db050c74929ac2296fd2b2bea1c1d6897 | /train.py | 093e9615527811aa86d219b0f00fca1346ca7e82 | [
"MIT"
] | permissive | monkeysforever/Sentiment-Analysis-using-Pretrained-Language-Models | 85cecde4470983f3ca534b1e7dbd0a8b80a2eef1 | 22086fb3f8b2cee9192eaf54bae3acc2ca53f9b1 | refs/heads/master | 2022-11-16T01:10:52.720970 | 2020-07-14T02:23:53 | 2020-07-14T02:23:53 | 279,455,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,317 | py | import argparse
from transformers import BertTokenizer, BertModel
from transformers import AdamW
import torch
import logging
import os
from torch.utils.tensorboard import SummaryWriter
from torch.optim.lr_scheduler import ReduceLROnPlateau
from data import dataset, transforms
from models.lm_classifier import PretrainedClassifier
import time
from utils.misc import AverageMeter, get_accuracy, set_seed
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from tqdm import tqdm
import numpy as np
logger = logging.getLogger(__name__)
best_acc = 0
def get_args(parser):
parser.add_argument('--gpu-id', default='0', type=int,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--model', default='bert',
choices=['bert'],
help='type of language model')
parser.add_argument('--tar', default='model.pth.tar', type=str,
help='path for saving the model')
parser.add_argument('--num-workers', type=int, default=4,
help='number of workers')
parser.add_argument('--dataset', default='imdb', type=str,
choices=['imdb'],
help='dataset name')
parser.add_argument('--epochs', default=100, type=int,
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int,
help='manual epoch number (useful on restarts)')
parser.add_argument('--batch-size', default=64, type=int,
help='train batch size')
parser.add_argument('--test-batch-size', default=40, type=int,
help='test batch size')
parser.add_argument('--lr', '--learning-rate', default=0.001, type=float,
help='initial learning rate')
parser.add_argument('--wdecay', default=0.0, type=float,
help='weight decay')
parser.add_argument('--out', default='results',
help='directory to output the result')
parser.add_argument('--resume', default='', type=str,
help='path to latest checkpoint (default: none)')
parser.add_argument('--seed', type=int, default=-1,
help="random seed (-1: don't use random seed)")
parser.add_argument('--no-progress', action='store_true',
help="don't use progress bar")
parser.add_argument('--train', type=str, default='data/IMDB_train.csv',
help='The path of train dataset')
parser.add_argument('--eval', type=str, default='data/IMDB_test.csv',
help='The path of test dataset')
return parser.parse_args()
def train(train_loader, model, optimizer, criterion, args, epoch):
losses = AverageMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
p_bar = None
if not args.no_progress:
p_bar = tqdm(range(args.iteration))
model.train()
for batch_idx, (texts, labels) in enumerate(train_loader):
texts = texts.to(args.device)
labels = labels.to(args.device)
masks = []
for text in texts:
masks.append([int(token != 0) for token in text])
masks = torch.FloatTensor(masks).to(args.device)
data_time.update(time.time() - end)
logits = model(texts, masks)
loss = criterion(logits, labels)
loss.backward()
losses.update(loss.item())
optimizer.step()
model.zero_grad()
batch_time.update(time.time() - end)
end = time.time()
if not args.no_progress:
if (epoch + 1) % 1 == 0:
p_bar.set_description(
"Train Epoch: {epoch}/{epochs:4}. Iter: {batch:4}/{iter:4}. "
"Loss: {loss:.4f}. ".format(
epoch=epoch + 1,
batch=batch_idx + 1,
epochs=args.epochs,
iter=args.iteration,
data=data_time.avg,
bt=batch_time.avg,
loss=losses.avg))
p_bar.update()
if not args.no_progress:
p_bar.close()
return losses.avg
def test(test_loader, model, criterion, args):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
test_loader = tqdm(test_loader)
predlist = torch.zeros(0, dtype=torch.long, device='cpu')
lbllist = torch.zeros(0, dtype=torch.long, device='cpu')
outlist = torch.zeros(0, dtype=torch.float, device='cpu')
model.eval()
with torch.no_grad():
for batch_idx, (texts, labels) in enumerate(test_loader):
texts = texts.to(args.device)
labels = labels.to(args.device)
masks = []
for text in texts:
masks.append([int(token != 0) for token in text])
masks = torch.FloatTensor(masks).to(args.device)
data_time.update(time.time() - end)
logits = model(texts, masks)
_, predicted = torch.max(logits.data, 1)
loss = criterion(logits, labels)
predlist = torch.cat([predlist, predicted.view(-1).cpu()])
lbllist = torch.cat([lbllist, labels.view(-1).cpu()])
outlist = torch.cat([outlist, logits.view(-1).cpu()])
if len(args.classes) >= 5:
topk = (1, 5)
prec = get_accuracy(logits, labels, args.topk)
losses.update(loss.item(), texts.shape[0])
top1.update(prec[0].item(), texts.shape[0])
if len(prec) > 1:
top5.update(prec[1].item(), texts.shape[0])
batch_time.update(time.time() - end)
end = time.time()
test_loader.set_description("Test Iter: {batch:4}/{iter:4}. Loss: {loss:.4f}. top1: {top1:.2f}. ".format(
batch=batch_idx + 1,
iter=len(test_loader),
loss=losses.avg,
top1=top1.avg,
))
test_loader.close()
logger.info("top-1 acc: {:.2f}".format(top1.avg))
return losses.avg, top1.avg
def main():
parser = argparse.ArgumentParser(description='PyTorch MultiCon Text Training')
args = get_args(parser)
global best_acc
text_transforms = None
writer = None
args.device = torch.device('cuda', args.gpu_id)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO)
if args.seed != -1:
set_seed(args)
if not args.no_progress:
os.makedirs(args.out, exist_ok=True)
writer = SummaryWriter(args.out)
if args.model == 'bert':
args.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
args.max_length = 512
args.pad_token = args.tokenizer.pad_token
print(args.pad_token)
args.unk_token = args.tokenizer.unk_token
args.init_token = args.tokenizer.cls_token
args.end_token = args.tokenizer.sep_token
args.lm = BertModel.from_pretrained('bert-base-uncased')
args.embedding_size = 768
text_transforms = transforms.Compose([
transforms.Tokenize(tokenizer=args.tokenizer.tokenize),
transforms.InsertTokens(init_token=args.init_token, eos_token=args.end_token),
transforms.Pad(max_length=args.max_length, pad_token=args.pad_token, eos_token=args.end_token),
transforms.Numericalize(tokenizer=args.tokenizer),
transforms.ToTensor()
])
if args.dataset == 'imdb':
args.classes = ['neg', 'pos']
args.topk = (1,)
train_dataset = dataset.TextCSVDataset(args.train, text_transforms=text_transforms)
test_dataset = dataset.TextCSVDataset(args.eval, text_transforms=text_transforms)
model = PretrainedClassifier(language_model=args.lm, embedding_size=args.embedding_size,
num_classes=len(args.classes))
logger.info("Total params: {:.2f}M".format(sum(p.numel() for p in model.parameters()) / 1e6))
model.freeze_lm()
model.to(args.device)
optimizer = AdamW(params=model.parameters(), lr = args.lr, weight_decay = args.wdecay)
criterion = torch.nn.CrossEntropyLoss()
train_sampler = RandomSampler
train_loader = DataLoader(
train_dataset,
sampler=train_sampler(train_dataset),
batch_size=args.batch_size,
drop_last=True)
test_loader = DataLoader(
test_dataset,
sampler=SequentialSampler(test_dataset),
batch_size=args.test_batch_size,
drop_last=True)
args.iteration = len(train_dataset) // args.batch_size
args.total_steps = args.epochs * args.iteration
scheduler = ReduceLROnPlateau(optimizer, 'min')
start_epoch = 0
if args.resume:
logger.info("==> Resuming from checkpoint..")
assert os.path.isfile(args.resume), "Error: no checkpoint directory found!"
args.out = os.path.dirname(args.resume)
checkpoint = torch.load(args.resume)
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
logger.info("***** Running training *****")
logger.info(f" Task = {args.dataset}@{len(train_dataset)}")
logger.info(f" Num Epochs = {args.epochs}")
logger.info(f" Total train batch size = {args.batch_size}")
logger.info(f" Total optimization steps = {args.total_steps}")
test_accs = []
test_loss = None
model.zero_grad()
for epoch in range(start_epoch, args.epochs):
train_loss = train(train_loader, model, optimizer, criterion, args, epoch)
writer.add_scalar('Loss/train', train_loss, args.epochs - start_epoch)
if (epoch + 1) % 1 == 0:
logger.info("Epoch {}. train_loss: {:.4f}."
.format(epoch + 1, train_loss))
test_model = model
if (epoch + 1) % 1 == 0:
test_loss, test_acc = test(test_loader, test_model, criterion, args)
writer.add_scalar('Loss/test', test_loss)
writer.add_scalar('Acc/test', test_acc)
is_best = test_acc > best_acc
best_acc = max(test_acc, best_acc)
if is_best:
model_to_save = test_model.module if hasattr(test_model, "module") else test_model
torch.save({
'epoch': epoch + 1,
'arch': 'BERT',
'state_dict': model_to_save.state_dict(),
'best_acc1': best_acc,
'optimizer': optimizer.state_dict(),
}, args.tar)
test_accs.append(test_acc)
logger.info('Best top-1 acc: {:.2f}'.format(best_acc))
logger.info('Mean top-1 acc: {:.2f}\n'.format(np.mean(test_accs[-20:])))
scheduler.step(test_loss)
writer.close()
if __name__ == '__main__':
main()
| [
"ahlawat.randeep@gmail.com"
] | ahlawat.randeep@gmail.com |
597f1b6c57b44b3cb9610921015f1578d30d2124 | 434ced86acc31e03577fb41a1d959b88e8acccc3 | /ML-exercise1-4/ex4/nnCostFunction.py | d6910f2332632d1df963df228af7987f85a26f86 | [] | no_license | wangmx116/wdd | ab1837dc14bd547ecd3105f487a300cd13f2e99d | 1b190786ba6e1d35e101ef1c8bc22c066087a2f4 | refs/heads/master | 2022-11-26T21:30:23.232233 | 2020-07-20T05:40:02 | 2020-07-20T05:40:02 | 281,017,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,045 | py | import numpy as np
from numpy import log
from sigmoid import sigmoid
from sigmoidGradient import sigmoidGradient
def nnCostFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, Lambda):
"""computes the cost and gradient of the neural network. The
parameters for the neural network are "unrolled" into the vector
nn_params and need to be converted back into the weight matrices.
The returned parameter grad should be a "unrolled" vector of the
partial derivatives of the neural network.
"""
# Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices
# for our 2 layer neural network
# Obtain Theta1 and Theta2 back from nn_params
Theta1 = np.reshape(nn_params[:hidden_layer_size * (input_layer_size + 1)],
(hidden_layer_size, input_layer_size + 1), order='F').copy() # (25, 401)
Theta2 = np.reshape(nn_params[hidden_layer_size * (input_layer_size + 1):],
(num_labels, (hidden_layer_size + 1)), order='F').copy() # (10, 26)
# Setup some useful variables
m, _ = X.shape
# ====================== YOUR CODE HERE ======================
# Instructions: You should complete the code by working through the
# following parts.
#
# Part 1: Feedforward the neural network and return the cost in the
# variable J. After implementing Part 1, you can verify that your
# cost function computation is correct by verifying the cost
# computed in ex4.m
#
# Part 2: Implement the backpropagation algorithm to compute the gradients
# Theta1_grad and Theta2_grad. You should return the partial derivatives of
# the cost function with respect to Theta1 and Theta2 in Theta1_grad and
# Theta2_grad, respectively. After implementing Part 2, you can check
# that your implementation is correct by running checkNNGradients
#
# Note: The vector y passed into the function is a vector of labels
# containing values from 1..K. You need to map this vector into a
# binary vector of 1's and 0's to be used with the neural network
# cost function.
#
# Hint: We recommend implementing backpropagation using a for-loop
# over the training examples if you are implementing it for the
# first time.
#
# Part 3: Implement regularization with the cost function and gradients.
#
# Hint: You can implement this around the code for
# backpropagation. That is, you can compute the gradients for
# the regularization separately and then add them to Theta1_grad
# and Theta2_grad from Part 2.
#
X=np.column_stack((np.ones((m, 1)), X)) #(5000, 401) a1=X
a2=sigmoid(X.dot(Theta1.T)) #(5000, 25)
a2=np.column_stack((np.ones((a2.shape[0], 1)), a2)) #(5000, 26)
a3=sigmoid(a2.dot(Theta2.T)) #(5000, 10) output a3
J=0.
delta3=np.zeros((m, num_labels))
for k in range(num_labels):
# iclass=k if k else 10
# y_temp=np.array([1 if x==iclass else 0 for x in y])#.reshape(1,-1) # y_temp (5000,)
y_temp=np.array([1 if x==k+1 else 0 for x in y]).reshape(1,-1)
J=J-y_temp.dot(log(a3[:,k]))-(1-y_temp).dot(log(1.-a3[:,k])) #(1,)
delta3[:,k]=a3[:,k]-y_temp # (5000, 10)
# Theta2_grad
reg=(np.sum(Theta1[:,1:]**2.)+np.sum(Theta2[:, 1:]**2.))*Lambda/2./m #(1,)
J=J/m+reg
# -------------------------------------------------------------
# =========================================================================
Theta1_grad=np.zeros((hidden_layer_size, input_layer_size+1)) # (25, 401)
Theta2_grad=np.zeros((num_labels, hidden_layer_size+1)) # (10, 26)
delta2=(delta3.dot(Theta2[:, 1:]))*sigmoidGradient(X.dot(Theta1.T)) # (5000, 25)
Theta1_grad=Theta1_grad+delta2.T.dot(X) # (25, 401)
Theta2_grad=Theta2_grad+delta3.T.dot(a2) # (10, 26)
# Unroll gradient
grad = np.hstack((Theta1_grad.T.ravel(), Theta2_grad.T.ravel()))
return J, grad | [
"2429860229@qq.com"
] | 2429860229@qq.com |
ddc841703ae4178b72cba0ce52b7bf5ea4be2825 | b9db137d7e61f0d8bf15a5f03cf72e6d42558d6b | /helper.py | dd4d77c773313d7aa45c3b690202562665ab28e6 | [
"MIT"
] | permissive | zliu63/ctc_tf | 044ec42970b1a7970b5b1273a0dec266c94d3844 | 210de74c17bd64b5b2fefed69653e71626f6980a | refs/heads/master | 2021-09-13T12:11:12.688421 | 2018-04-29T21:17:57 | 2018-04-29T21:17:57 | 131,530,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,695 | py | import numpy as np
import os
import shutil
def split_train_val(X, y, train_size):
"""Split dataset for training and validation.
Args:
X: A 1-D numpy array containing pathes of images.
y: A 1-D numpy array containing labels.
train_size: Size of training data to split.
Returns:
1-D numpy array having the same definition with X and y.
"""
total_size = len(X)
# shuffle data
shuffle_indices = np.random.permutation(np.arange(total_size))
X = X[shuffle_indices]
y = y[shuffle_indices]
# split training data
train_indices = np.random.choice(total_size, train_size, replace=False)
X_train = X[train_indices]
y_train = y[train_indices]
# split validation data
val_indices = [i for i in range(total_size) if i not in train_indices]
X_val = X[val_indices]
y_val = y[val_indices]
return X_train, y_train, X_val, y_val
def write_to_file(data, file_to_output):
"""Write X_train/y_train/X_val/y_val/X_infer to file for further
processing (e.g. make input queue of tensorflow).
Args:
data: A 1-D numpy array, e.g, X_train/y_train/X_val/y_val/X_infer.
file_to_output: A file to store data.
"""
# with open('X_train.csv','a') as f_handle:
# np.savetxt(f_handle, X_train, fmt='%s', delimiter=",")
with open(file_to_output, 'w') as f:
for item in data.tolist():
f.write(item + '\n')
def load_labels(file):
labels = list(open(file).readlines())
labels = [s.strip() for s in labels]
labels = [s.split() for s in labels]
labels_dict = dict(labels)
labels = np.asarray(labels, dtype=str)
labels = labels[:, 0]
return labels, labels_dict
def load_img_path(images_path):
tmp = os.listdir(images_path)
#tmp.sort(key=lambda x: int(x.split('.')[0]))
tmp1 = []
for i in tmp:
n = i.split('.')[0]
if n != '':
tmp1.append(int(n))
tmp1.sort()
tmp = [str(i) for i in tmp1]
file_names = [images_path+s for s in tmp]
file_names = np.asarray(file_names)
return file_names
def load_data(file_to_read):
"""Load X_train/y_train/X_val/y_val/X_infer for further
processing (e.g. make input queue of tensorflow).
Args:
file_to_read:
Returns:
X_train/y_train/X_val/y_val/X_infer.
"""
data = np.recfromtxt(file_to_read)
data = np.asarray(data)
return data
def cp_file(imgs_list_para, labels_list_para, dst_para):
for i in range(imgs_list_para.shape[0]):
file_path = imgs_list_para[i]
filename = os.path.basename(file_path)
#l_split = filename.split('.')
#print(filename)
#if len(l_split) <= 1:
# continue
#fn = filename.split('.')[0]
#ext = filename.split('.')[1]
fn = filename
ext = 'png'
dest_filename = dst_para + fn + '_' + labels_list_para[i] + '.' + ext
print(dest_filename)
shutil.copyfile(file_path+'.png', dest_filename)
if __name__ == '__main__':
labels_path = './imgs/labels.txt'
labels, labels_dict = load_labels(labels_path)
# print(labels)
images_path = './imgs/image_contest_level_1/'
image_path_list = load_img_path(images_path)
# print(image_path_list[:10])
X_train, y_train, X_val, y_val = split_train_val(image_path_list, labels, 80000)
write_to_file(X_train, "./imgs/X_train.txt")
write_to_file(y_train, "./imgs/y_train.txt")
write_to_file(X_val, "./imgs/X_val.txt")
write_to_file(y_val, "./imgs/y_val.txt")
cp_file(X_train, y_train, './imgs/train/')
cp_file(X_val, y_val, './imgs/val/')
| [
"zliu63@illinois.edu"
] | zliu63@illinois.edu |
7fbc8d5ca1d93c1ff42c22beefc7772cb15d39ca | 2f8f8171b3b996b0c866ede72367ec26f64eae39 | /sampleproject/book/BeginningPython3_O_REILLY/chapter10/10-8.py | 659dc821caed89f2f69b939227a7fca816939de1 | [] | no_license | kabaksh0507/exercise_python_it-1 | da46edce09301b03a5351ee1885fb01eb69d8240 | 2b6c80a79494c9981e51bd03696c3aa19d6625ec | refs/heads/main | 2023-03-04T03:12:44.188468 | 2021-02-08T08:55:36 | 2021-02-08T08:55:36 | 337,014,697 | 0 | 0 | null | 2021-02-08T08:57:30 | 2021-02-08T08:57:30 | null | UTF-8 | Python | false | false | 169 | py | from datetime import date
birth_day = date(1987, 8, 9)
print(birth_day)
fmt = 'year = %Y , month = %B , day = %d , day of the week = %A'
print(birth_day.strftime(fmt)) | [
"kazkitou9080@gmail.com"
] | kazkitou9080@gmail.com |
fb152061e112dac669111ad1d1e9b0d3d1ce69be | fb7f9d890a5db02846b6a4a20e4a9cbaefa7eda4 | /python_snippets/assert_test.py | e5a46e57ee4c80cb897c0e19e6fe2e0ac3ab121a | [] | no_license | aftab82/practice | 94d86d0f3a403865d53ff4457e83b9b16bd048de | b9e0ba2f5e6c7dbe968dec18069b51144b3227e3 | refs/heads/master | 2020-08-05T15:55:05.431999 | 2020-06-08T06:06:14 | 2020-06-08T06:06:14 | 212,604,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | import unittest
class Test(unittest.TestCase):
def test_empty(self):
self.assertNotEqual(mark1, [])
def avg(marks):
assert len(marks) != 0
return sum(marks)/len(marks)
mark1 = [1]
if __name__ == '__main__':
unittest.main()
| [
"sali@gogoair.com"
] | sali@gogoair.com |
90a954e345f531880f8bfee7f4c958164933934e | aeac5b3cc7a34e3eeaef5d41e8d8ef7f4b3b38dc | /testlib/test_transforms.py | cdc9ecac6b175b53f53b6ebf7dd8e8589d1dffe3 | [
"MIT"
] | permissive | venkatakrishnareddymallu/gramex | 7377f68d7207248b98f846e54a9c458f4300d30a | 725f7564e607f22fc43d06d639aeaf785500f284 | refs/heads/master | 2023-07-28T14:03:20.452158 | 2021-09-07T11:12:23 | 2021-09-07T11:12:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,608 | py | import io
import os
import yaml
import inspect
import unittest
from dis import dis
from types import GeneratorType
from tornado.gen import coroutine, Task
from orderedattrdict import AttrDict
from orderedattrdict.yamlutils import AttrDictYAMLLoader
from gramex.transforms import build_transform, flattener, badgerfish, template, once
from gramex.cache import reload_module
from nose.tools import eq_, assert_raises
folder = os.path.dirname(os.path.abspath(__file__))
def yaml_parse(text):
return yaml.load(text, Loader=AttrDictYAMLLoader)
def remove(path):
if os.path.exists(path):
os.unlink(path)
@coroutine
def gen_str(val):
'''Sample coroutine method'''
yield Task(str, val)
def eqfn(actual, expected):
'''Checks if two functions are the same'''
# msg = parent function's name
msg = inspect.stack()[1][3]
a_code, e_code = actual.__code__, expected.__code__
actual, expected = a_code.co_code, e_code.co_code
if actual != expected:
# Print the disassembled code to make debugging easier
print('\nActual') # noqa
dis(actual)
print(a_code.co_names) # noqa
print('Expected') # noqa
dis(expected)
print(e_code.co_names) # noqa
eq_(actual, expected, '%s: code mismatch' % msg)
src, tgt = a_code.co_argcount, e_code.co_argcount
eq_(src, tgt, '%s: argcount %d != %d' % (msg, src, tgt))
src, tgt = a_code.co_nlocals, e_code.co_nlocals
eq_(src, tgt, '%s: nlocals %d != %d' % (msg, src, tgt))
class BuildTransform(unittest.TestCase):
'''Test build_transform CODE output'''
dummy = os.path.join(folder, 'dummy.py')
files = set([dummy])
def check_transform(self, transform, yaml_code, vars=None, cache=True, iter=True, doc=None):
fn = build_transform(yaml_parse(yaml_code), vars=vars, cache=cache, iter=iter)
eqfn(fn, transform)
if doc is not None:
eq_(fn.__doc__, doc)
return fn
def test_invalid_function_raises_error(self):
with assert_raises(KeyError):
build_transform({})
with assert_raises(KeyError):
build_transform({'function': ''})
with assert_raises(ValueError):
build_transform({'function': 'x = 1'})
with assert_raises(ValueError):
build_transform({'function': 'x(); y()'})
with assert_raises(ValueError):
build_transform({'function': 'import json'})
def test_expr(self):
def transform(x=0):
result = x + 1
return result if isinstance(result, GeneratorType) else [result, ]
self.check_transform(transform, 'function: x + 1', vars={'x': 0}, doc='x + 1')
def transform(x=0):
result = x + 1
return result
self.check_transform(transform, 'function: x + 1', vars={'x': 0}, iter=False, doc='x + 1')
def transform():
result = "abc"
return result if isinstance(result, GeneratorType) else [result, ]
self.check_transform(transform, '''function: '"abc"' ''', vars={}, doc='"abc"')
def transform():
import gramex.cache
import pandas
result = gramex.cache.open('x', pandas.read_csv).to_html()
return result if isinstance(result, GeneratorType) else [result, ]
# This is a complex function. It's not clear whether we should pick up the docs from
# to_html() or gramex.cache.open(). Let the user specify the docs
fn = 'function: gramex.cache.open("x", pandas.read_csv).to_html()'
self.check_transform(transform, fn, vars={}, doc=None)
def transform(s=None):
result = 1 if "windows" in s.lower() else 2 if "linux" in s.lower() else 0
return result if isinstance(result, GeneratorType) else [result, ]
fn = 'function: 1 if "windows" in s.lower() else 2 if "linux" in s.lower() else 0'
self.check_transform(transform, fn, vars={'s': None})
def transform(_val):
result = condition(1, 0, -1) # noqa: this is in gramex.transforms
return result if isinstance(result, GeneratorType) else [result, ]
self.check_transform(transform, 'function: condition(1, 0, -1)')
def transform(_val):
result = str.upper(_val)
return result if isinstance(result, GeneratorType) else [result, ]
self.check_transform(transform, 'function: str.upper')
self.check_transform(transform, 'function: str.upper(_val)', doc=str.upper.__doc__)
def test_fn(self):
def transform(_val):
result = len(_val)
return result if isinstance(result, GeneratorType) else [result, ]
self.check_transform(transform, '''
function: len
''')
def test_fn_no_args(self):
def transform():
result = max(1, 2)
return result if isinstance(result, GeneratorType) else [result, ]
self.check_transform(transform, '''
function: max
args: [1, 2]
''', vars={})
self.check_transform(transform, 'function: max(1, 2)', vars={})
def test_fn_args(self):
def transform(_val):
result = max(1, 2)
return result if isinstance(result, GeneratorType) else [result, ]
self.check_transform(transform, '''
function: max
args: [1, 2]
''')
self.check_transform(transform, 'function: max(1, 2)')
def transform(_val):
result = len('abc')
return result if isinstance(result, GeneratorType) else [result, ]
self.check_transform(transform, '''
function: len
args: abc
''')
self.check_transform(transform, 'function: len("abc")')
def transform(_val):
result = range(10)
return result if isinstance(result, GeneratorType) else [result, ]
self.check_transform(transform, '''
function: range
args: 10
''')
self.check_transform(transform, 'function: range(10)')
def test_fn_args_var(self):
def transform(x=1, y=2):
result = max(x, y, 3)
return result if isinstance(result, GeneratorType) else [result, ]
vars = AttrDict([('x', 1), ('y', 2)])
self.check_transform(transform, '''
function: max
args:
- =x
- =y
- 3
''', vars=vars)
self.check_transform(transform, 'function: max(x, y, 3)', vars=vars)
def transform(x=1, y=2):
result = x
return result if isinstance(result, GeneratorType) else [result, ]
self.check_transform(transform, 'function: x', vars=vars)
def transform(x=1, y=2):
result = x.real
return result if isinstance(result, GeneratorType) else [result, ]
self.check_transform(transform, 'function: x.real', vars=vars)
def transform(x=1, y=2):
result = x.conjugate()
return result if isinstance(result, GeneratorType) else [result, ]
self.check_transform(transform, 'function: x.conjugate()', vars=vars)
def transform(x=1, y=2):
result = x.to_bytes(2, 'big')
return result if isinstance(result, GeneratorType) else [result, ]
self.check_transform(transform, 'function: x.to_bytes(2, "big")', vars=vars)
def test_fn_kwargs(self):
def transform(_val):
result = dict(_val, a=1, b=2)
return result if isinstance(result, GeneratorType) else [result, ]
self.check_transform(transform, '''
function: dict
kwargs: {a: 1, b: 2}
''')
self.check_transform(transform, 'function: dict(_val, a=1, b=2)')
def test_fn_kwargs_complex(self):
def transform(_val):
result = dict(_val, a=[1, 2], b=AttrDict([('b1', 'x'), ('b2', 'y')]))
return result if isinstance(result, GeneratorType) else [result, ]
self.check_transform(transform, '''
function: dict
kwargs:
a: [1, 2]
b:
b1: x
b2: y
''')
self.check_transform(transform, '''
function: 'dict(_val, a=[1, 2], b=AttrDict([("b1", "x"), ("b2", "y")]))'
''')
def test_fn_kwargs_var(self):
def transform(x=1, y=2):
result = dict(x, y, a=x, b=y, c=3, d='=4')
return result if isinstance(result, GeneratorType) else [result, ]
vars = AttrDict([('x', 1), ('y', 2)])
self.check_transform(transform, '''
function: dict
kwargs: {a: =x, b: =y, c: 3, d: ==4}
''', vars=vars)
self.check_transform(transform, 'function: dict(x, y, a=x, b=y, c=3, d="=4")', vars=vars)
def test_fn_args_kwargs(self):
def transform(_val):
result = format(1, 2, a=3, b=4, c=5, d='=6')
return result if isinstance(result, GeneratorType) else [result, ]
self.check_transform(transform, '''
function: format
args: [1, 2]
kwargs: {a: 3, b: 4, c: 5, d: ==6}
''')
self.check_transform(transform, 'function: format(1, 2, a=3, b=4, c=5, d="=6")')
def test_fn_args_kwargs_var(self):
def transform(x=1, y=2):
result = format(x, y, a=x, b=y, c=3)
return result if isinstance(result, GeneratorType) else [result, ]
vars = AttrDict([('x', 1), ('y', 2)])
self.check_transform(transform, '''
function: format
args: [=x, =y]
kwargs: {a: =x, b: =y, c: =3}
''', vars=vars)
self.check_transform(transform, 'function: format(x, y, a=x, b=y, c=3)', vars=vars)
def test_coroutine(self):
def transform(_val):
import testlib.test_transforms
result = testlib.test_transforms.gen_str(_val)
return result if isinstance(result, GeneratorType) else [result, ]
self.check_transform(transform, '''
function: testlib.test_transforms.gen_str
''')
self.check_transform(transform, 'function: testlib.test_transforms.gen_str(_val)')
def test_cache_change(self):
remove(self.dummy.replace('.py', '.pyc'))
with io.open(self.dummy, 'w', encoding='utf-8') as handle:
handle.write('def value():\n\treturn 1\n')
def transform(_val):
import testlib.dummy
reload_module(testlib.dummy)
result = testlib.dummy.value()
return result if isinstance(result, GeneratorType) else [result, ]
fn = self.check_transform(transform, '''
function: testlib.dummy.value
args: []
''', cache=False)
eq_(fn(), [1])
fn = self.check_transform(transform, 'function: testlib.dummy.value()', cache=False)
eq_(fn(), [1])
remove(self.dummy.replace('.py', '.pyc'))
with io.open(self.dummy, 'w', encoding='utf-8') as handle:
handle.write('def value():\n\treturn 100\n')
eq_(fn(), [100])
fn = self.check_transform(transform, 'function: testlib.dummy.value()', cache=False)
eq_(fn(), [100])
def test_invalid_change(self):
fn = build_transform(yaml_parse('function: testlib.dummy.invalid\nargs: []'))
remove(self.dummy.replace('.py', '.pyc'))
with io.open(self.dummy, 'w', encoding='utf-8') as handle:
handle.write('def invalid():\n\tsyntax error\n')
with assert_raises(SyntaxError):
fn()
remove(self.dummy.replace('.py', '.pyc'))
with io.open(self.dummy, 'w', encoding='utf-8') as handle:
handle.write('1/0\ndef invalid():\n\treturn 100\n')
with assert_raises(ZeroDivisionError):
fn()
remove(self.dummy.replace('.py', '.pyc'))
with io.open(self.dummy, 'w', encoding='utf-8') as handle:
handle.write('def invalid():\n\treturn 100\n')
eq_(fn(), [100])
def test_import_levels(self):
def transform(_val):
result = str(_val)
return result if isinstance(result, GeneratorType) else [result, ]
fn = self.check_transform(transform, 'function: str')
eq_(fn(b'abc'), [str(b'abc')])
def transform(content):
result = str.__add__(content, '123')
return result if isinstance(result, GeneratorType) else [result, ]
fn = self.check_transform(transform, '''
function: str.__add__
args: [=content, '123']
''', vars=AttrDict(content=None))
eq_(fn('abc'), ['abc123'])
def transform(handler):
result = str.endswith(handler.current_user.user, 'ta')
return result if isinstance(result, GeneratorType) else [result, ]
fn = self.check_transform(transform, '''
function: str.endswith
args: [=handler.current_user.user, 'ta']
''', vars=AttrDict(handler=None))
@classmethod
def tearDownClass(cls):
# Remove temporary files
for path in cls.files:
if os.path.exists(path):
os.unlink(path)
class Badgerfish(unittest.TestCase):
'Test gramex.transforms.badgerfish'
def test_transform(self):
result = yield badgerfish('''
html:
"@lang": en
p: text
div:
p: text
''')
eq_(
result,
'<!DOCTYPE html>\n<html lang="en"><p>text</p><div><p>text</p></div></html>')
def test_mapping(self):
result = yield badgerfish('''
html:
json:
x: 1
y: 2
''', mapping={
'json': {
'function': 'json.dumps',
'kwargs': {'separators': [',', ':']},
}
})
eq_(
result,
'<!DOCTYPE html>\n<html><json>{"x":1,"y":2}</json></html>')
class Template(unittest.TestCase):
'Test gramex.transforms.template'
def check(self, content, expected, **kwargs):
result = yield template(content, **kwargs)
eq_(result, expected)
def test_template(self):
self.check('{{ 1 }}', '1')
self.check('{{ 1 + 2 }}', '3')
self.check('{{ x + y }}', '3', x=1, y=2)
class Flattener(unittest.TestCase):
def test_dict(self):
fieldmap = {
'all1': '',
'all2': True,
'x': 'x',
'y.z': 'y.z',
'z.1': 'z.1',
}
flat = flattener(fieldmap)
src = {'x': 'X', 'y': {'z': 'Y.Z'}, 'z': ['Z.0', 'Z.1']}
out = flat(src)
eq_(out.keys(), fieldmap.keys())
eq_(out['all1'], src)
eq_(out['all2'], src)
eq_(out['x'], src['x'])
eq_(out['y.z'], src['y']['z'])
eq_(out['z.1'], src['z'][1])
def test_list(self):
# Integer values must be interpreted as array indices
fieldmap = {
'0': 0,
'1': '1',
'2.0': '2.0',
}
flat = flattener(fieldmap)
src = [0, 1, [2]]
out = flat(src)
eq_(out.keys(), fieldmap.keys())
eq_(out['0'], src[0])
eq_(out['1'], src[1])
eq_(out['2.0'], src[2][0])
def test_invalid(self):
# None of these fields are valid. Don't raise an error, just ignore
fieldmap = {
0: 'int-invalid',
('a', 'b'): 'tuple-invalid',
'false-invalid': False,
'none-invalid': None,
'float-invalid': 1.0,
'dict-invalid': {},
'tuple-invalid': tuple(),
'set-invalid': set(),
'list-invalid': [],
}
out = flattener(fieldmap)({})
eq_(len(out.keys()), 0)
fieldmap = {
0.0: 'float-invalid',
}
out = flattener(fieldmap)({})
eq_(len(out.keys()), 0)
def test_default(self):
fieldmap = {'x': 'x', 'y.a': 'y.a', 'y.1': 'y.1', 'z.a': 'z.a', '1': 1}
default = 1
flat = flattener(fieldmap, default=default)
out = flat({'z': {}, 'y': []})
eq_(out, {key: default for key in fieldmap})
class TestOnce(unittest.TestCase):
def test_once(self):
for key in ['►', 'λ', '►', 'λ']:
eq_(once(key, _clear=True), None)
eq_(once(key), True)
eq_(once(key), False)
eq_(once(key), False)
| [
"root.node@gmail.com"
] | root.node@gmail.com |
be382d5197d7824ea29737950dfb67257cf93aaa | a0d06802a47fa67f78af9028e645138f2f2591ee | /app.py | e0ed314319af835434f9222c470ca6e13d40f451 | [] | no_license | andidietz/session-exercise | e7624d72715a4c1a0e4ee38e47f9930cbb0ad2a5 | cdafeb12be0ddc00e415d31c3aa80c39daa0e06b | refs/heads/main | 2023-06-13T00:06:54.637003 | 2021-07-09T21:53:11 | 2021-07-09T21:53:11 | 384,557,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,493 | py | from flask import Flask, request, render_template, redirect, flash, session
from surveys import satisfaction_survey as survey
from flask_debugtoolbar import DebugToolbarExtension
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret-goes-here'
app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
debug = DebugToolbarExtension(app)
@app.route('/')
def show_home():
return render_template('/home.html', survey=survey)
@app.route('/start', methods=['POST'])
def start_survey():
session['responses'] = []
return redirect('/question/0')
@app.route('/question/<int:num>')
def display_question(num):
question = survey.questions[num]
responses = session.get('responses')
if len(responses) == len(survey.questions):
return redirect('/thank_you')
elif num != len(responses):
flash(f'Invalid access to question {num}. Please complete form in the provide order.')
return redirect(f'/question/{len(responses)}', )
else:
return render_template('questions.html', question=question, num=num)
@app.route('/answer', methods=['POST'])
def handle_answer():
answer = request.form['answer']
responses = session['responses']
responses.append('answer')
session['responses'] = responses
if len(responses) == len(survey.questions):
return redirect('/thank_you')
else:
return redirect(f'/question/{len(responses)}')
@app.route('/thank_you')
def thank():
return render_template('thank_you.html') | [
"alexandradietz3@gmail.com"
] | alexandradietz3@gmail.com |
7af6ceaeace0d6efa4faa6104264ab3f50ca1b3f | 25ac3c7d2cbcb9e2634ca0cd0b4ffccb321516ac | /BubbleTracking/AncienTests/TrackPy/trackpytest.py | f1e70de24e68dec0cb9e8113e004f8a1afd16192 | [] | no_license | fabouzz/Marble_processing | 0c5250653fd87192e7b98908fc4b9c994996dbc1 | 1dcbc42d807e500c0c091d4f3ab55d5221347f30 | refs/heads/master | 2020-05-23T01:11:36.203610 | 2019-05-24T14:06:18 | 2019-05-24T14:06:18 | 186,584,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,876 | py | """
Testing the trackpy library
"""
import os
import cv2
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pims
import trackpy as tp
from scipy import ndimage
from skimage import filters, morphology, util
from pims import pipeline
@pipeline
def as_grey(frame):
red = frame[:, :, 0]
green = frame[:, :, 1]
blue = frame[:, :, 2]
return 0.2125 * red + 0.7154 * green + 0.0721 * blue
def crop(img):
"""
Crop the image to select the region of interest
"""
x_min = 300
x_max = 600
y_min = 440
y_max = 672
return img[y_min:y_max,x_min:x_max]
def preprocess_foam(img):
"""
Apply image processing functions to return a binary image
"""
# Crop the pictures as for raw images.
img = crop(img)
# Apply thresholds
block_size = 5
img = filters.threshold_local(img, block_size)
threshold = 0.30
idx = img > img.max() * threshold
idx2 = img < img.max() * threshold
img[idx] = 0
img[idx2] = 255
# Dilatate to get a continous network
# of liquid films
img = ndimage.binary_dilation(img)
img = ndimage.binary_dilation(img)
return util.img_as_int(img)
IMAGE_PATH = "../images/screen_bulles.png"
ORIG_IMAGE = cv2.imread(IMAGE_PATH, cv2.IMREAD_GRAYSCALE)
# Pour éviter d'avoir 3 channels RGB identiques
IMAGE = preprocess_foam(ORIG_IMAGE)
ORIG_IMAGE = crop(ORIG_IMAGE)
fig, ax = plt.subplots(1, 2)
ax[0].imshow(ORIG_IMAGE, cmap='Greys_r')
ax[1].imshow(IMAGE, cmap='Greys')
plt.show()
# ==========================================================
# fig, ax = plt.subplots()
# id_example = 200
# filepath = "/media/mathieu/EHDD/videos_bille/mes_haut5_bille2_2.avi"
# frames = pims.Video(filepath)
# frame_n = frames[id_example]
# frame = as_grey(frame_n)
# print(frame)
# ax.imshow(frame, cmap='Greys_r')
# plt.show() | [
"oupsmajdsl@gmail.com"
] | oupsmajdsl@gmail.com |
bf6f30ccfa37f9d4acc212e1f4ec33d7b4457052 | 09fd456a6552f42c124c148978289fae1af2d5c3 | /Greedy/1282.py | 0aeb767815ec62b1439482c75e3f15c26f9a4fc9 | [] | no_license | hoang-ng/LeetCode | 60b4e68cbcf54cbe763d1f98a70f52e628ab32fb | 5407c6d858bfa43325363503c31134e560522be3 | refs/heads/master | 2021-04-10T11:34:35.310374 | 2020-07-28T10:22:05 | 2020-07-28T10:22:05 | 248,932,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,391 | py | # 1282. Group the People Given the Group Size They Belong To
# There are n people whose IDs go from 0 to n - 1 and each person belongs exactly to one group. Given the array groupSizes of length n telling the group size each person belongs to, return the groups there are and the people's IDs each group includes.
# You can return any solution in any order and the same applies for IDs. Also, it is guaranteed that there exists at least one solution.
# Example 1:
# Input: groupSizes = [3,3,3,3,3,1,3]
# Output: [[5],[0,1,2],[3,4,6]]
# Explanation:
# Other possible solutions are [[2,1,6],[5],[0,4,3]] and [[5],[0,6,2],[4,3,1]].
# Example 2:
# Input: groupSizes = [2,1,3,3,3,2]
# Output: [[1],[0,5],[2,3,4]]
# Constraints:
# groupSizes.length == n
# 1 <= n <= 500
# 1 <= groupSizes[i] <= n
import collections
class Solution(object):
def groupThePeople(self, groupSizes):
dic = collections.defaultdict(list)
for i in range(len(groupSizes)):
dic[groupSizes[i]].append(i)
rs = []
for key in dic.keys():
count = 0
subArr = []
for i in range(len(dic[key])):
subArr.append(dic[key][i])
count += 1
if count == key:
rs.append(subArr)
subArr = []
count = 0
return rs
| [
"hoang2109@gmail.com"
] | hoang2109@gmail.com |
8a63e8b8bbb49a2eeb5b40c9b896777878496c3c | 00e20eedf383eae6404cde2194b01f3c14ccb44f | /1.3.c.py | 4d0b524d41e1b338517bd7f35ee9cac263c67597 | [] | no_license | Nutamy/Brainskills-week2 | 13b4341595ff28c068d9137358565163e1e0b14b | 7ea3a62c5fe126a09901dbfcd49d166ef2ad0b38 | refs/heads/main | 2023-02-24T03:00:42.906694 | 2021-02-01T04:14:00 | 2021-02-01T04:14:00 | 331,017,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | # 1.3.c В первый день спортсмен пробежал Х км, а затем каждый день он увеличивал пробег
# на 10% от предыдущего значения. По данному числу У определите номер дня, на который пробег спортсмена за все дни
# составит не менее У км.
# Программа получает на вход действительные числа Х и У и должна
# вывести одно натуральное число.
firstDayDistance = int(input("Enter how many kilometers have you run today? "))
daysInProcess = firstDayDistance
yourGoal = int(input("Enter your goal distance and I'll tell you how long it takes: "))
daysHavePassed = 1
sumDistance = firstDayDistance
while True:
daysInProcess = daysInProcess + daysInProcess*0.1
sumDistance += daysInProcess
daysHavePassed += 1
if sumDistance > yourGoal:
print('To achive the goal {}km. You should keep rising distance by 10{} every day for {} days.'.format(yourGoal,"%",daysHavePassed))
break
print(yourGoal)
| [
"natasha.pechenka@gmail.com"
] | natasha.pechenka@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.