seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
31621055131 | from fastapi import FastAPI, WebSocket, WebSocketDisconnect, Request, Header
from fastapi.responses import JSONResponse
from pydantic import BaseModel
from google.cloud import datastore
from typing import List
from fastapi.middleware.cors import CORSMiddleware
from functools import wraps
import uuid
import uvicorn
import requests
datastore_client = datastore.Client.from_service_account_json('jobsmaster-761921139a35.json')
app = FastAPI(
title="JobsMaster",
version="0.0.1"
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Class to manage the websockets for chat
class SocketManager:
def __init__(self):
self.active_connections: List[WebSocket] = []
async def connect(self, websocket: WebSocket):
await websocket.accept()
self.active_connections.append(websocket)
def disconnect(self, websocket: WebSocket):
self.active_connections.remove(websocket)
async def broadcast(self, data):
for connection in self.active_connections:
await connection.send_json(data)
manager = SocketManager()
class Chat(BaseModel):
text: str
date: str
user_email: str
def require_api_key(f):
@wraps(f)
async def decorated_function(*args, request: Request,**kwargs):
if request.headers.get('X-API-Key') != 'abcdef123456':
raise Exception401(message= 'Invalid API Key')
return await f(*args, request, **kwargs)
return decorated_function
# API to get all the previous chat messages
@app.get('/chat/getchats')
async def get_chats(request : Request, X_API_Key: str = Header(default=None)):
query = datastore_client.query(kind='Messages')
query.order = ['date']
return list(query.fetch())
websockets = []
# Websocket
@app.websocket("/chat/ws")
async def websocket_endpoint(websocket: WebSocket):
print("entered ws")
await manager.connect(websocket)
response = {"response":"got connected"}
await manager.broadcast(response)
try:
while True:
data = await websocket.receive_json()
r = requests.post('https://us-central1-jobsmaster.cloudfunctions.net/chatCheck', json=data)
print(r)
if r.text == 'Censored':
data['text'] = 'Censored'
print(data['text'])
await websocket.send_json(data)
else:
print("Here")
id = str(uuid.uuid4())
key = datastore_client.key('Messages', id)
entity = datastore.Entity(key)
entity['text'] = data['text']
entity['date'] = data['date']
print(entity)
datastore_client.put(entity)
await manager.broadcast(data)
except WebSocketDisconnect:
manager.disconnect(websocket)
response = {"response":"left"}
await manager.broadcast(response)
class Exception401(Exception):
def __init__(self, message: str ):
self.message = message
@app.exception_handler(Exception401)
async def Exception404Handler(request: Request, exception : Exception401):
return JSONResponse(status_code=401, content={"message": exception.message})
if __name__ == "__main__":
uvicorn.run(app)
| kshithijareddy/jobsmaster | Backend/chat/main.py | main.py | py | 3,344 | python | en | code | 0 | github-code | 13 |
30607414625 | # Import required libraries
# general imports
import numpy as np
from numpy import vstack
from numpy import argmax
# import for calculating the accuracy and confusion matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplay
# import pytorch for neural network
import torch
import torch.nn as nn
from torch import Tensor
# imports for optimizers
from torch.optim import SGD
from torch.optim import Adam
# imports for initializing the weights
from torch.nn.init import kaiming_uniform_
from torch.nn.init import xavier_uniform_
# import for plotting the metrics
import matplotlib.pyplot as plt
##################### Convolutional Neural Network #####################
# Inspired by https://tinyurl.com/dlmodel
# Adapted for current classification problem
# Define the CNN Model
class CustomCNN(nn.Module):
# define the model elements
def __init__(self, conv_input_dim, conv_nfilters, conv_kernel, pool_kernel, fc_hidden_dim, fc_output_dim):
super(CustomCNN, self).__init__()
# Input -> First Convolutional layer
# A 2D Dimension is used because we have image classification
self.conv_1 = nn.Conv2d(
in_channels=conv_input_dim, out_channels=conv_nfilters, kernel_size=conv_kernel)
# Use "He initialization" for the weight
# https://pytorch.org/docs/stable/nn.init.html#torch.nn.init.kaiming_uniform_
kaiming_uniform_(self.conv_1.weight, nonlinearity='relu')
# Use ReLU activation function on the first convolutional layer
self.relu_1 = nn.ReLU()
# The first pooling layer
# A 2D Dimension is used because we have image classification
self.pool_1 = nn.MaxPool2d(kernel_size=pool_kernel, stride=2)
# First pooling layer -> Second convolutional layer
self.conv_2 = nn.Conv2d(
in_channels=conv_nfilters, out_channels=conv_nfilters, kernel_size=conv_kernel)
# Use "He initialization" for the weight
kaiming_uniform_(self.conv_2.weight, nonlinearity='relu')
# Use ReLU activation function on the second hidden layer
self.relu_2 = nn.ReLU()
# The second pooling layer
self.pool_2 = nn.MaxPool2d(kernel_size=pool_kernel, stride=2)
# Second pooling layer -> The fully connected layer
self.hidden_1 = nn.Linear(conv_nfilters*25, fc_hidden_dim)
# Use "He initialization" for the weight
kaiming_uniform_(self.hidden_1.weight, nonlinearity='relu')
# Use ReLU activation function on the first hidden layer
self.relu_3 = nn.ReLU()
# Third hidden layer -> Second hidden layer -> Output layer
self.hidden_2 = nn.Linear(fc_hidden_dim, fc_output_dim)
# Use the "Glorot initialization" for the weight
# https://pytorch.org/docs/stable/nn.init.html#torch.nn.init.xavier_uniform_
xavier_uniform_(self.hidden_2.weight)
# Use Softmax activation function on the output, since this is a multiclass problem
self.softmax = nn.Softmax(dim=1)
# Forward propagate
def forward(self, x):
# Input -> First hidden layer
x = self.conv_1(x)
# Activate ReLU
x = self.relu_1(x)
# Pool results
x = self.pool_1(x)
# Pool layer -> Second hidden layer
x = self.conv_2(x)
# Activate ReLU
x = self.relu_2(x)
# Pool results
x = self.pool_2(x)
# Flatten image -> Transform pooled feature map to a vector
#x = x.view(-1, 5*5*32)
x = x.reshape(x.size(0), -1)
# Third hidden layer
x = self.hidden_1(x)
# Activate ReLU
x = self.relu_3(x)
# Third hidden layer -> Fourth hidden layer -> Output layer
x = self.hidden_2(x)
# Apply Softmax
x = self.softmax(x)
return x
# Function to train the model on the training set and evaluate on the validation set
# Inputs: The training dataloader, the validation dataloader, the model to train,
# the criterion (loss) calculator, the optimizer, learning rate, number of epochs,
# counter and required lists for evaluation
# Outputs: Loss and accuracy on the validation set
def train_val_CNN(train_dataloader, val_dataloader, model, criterion, optimizer, learning_rate, n_epochs, count, loss_lst, iteration_lst, accuracy_lst):
# Select optimizer based on input
if optimizer == "SGD":
# set term of momentum
momentum = 0.9
# select Stohastic Gradient Descent
optimizer = SGD(model.parameters(),
lr=learning_rate, momentum=momentum)
else:
# select Adam
optimizer = Adam(model.parameters(), lr=learning_rate)
# Enumerate epochs
for epoch in range(n_epochs):
model.train()
# Enumerate batches
for i, (images, labels) in enumerate(train_dataloader):
# Clear the gradients
optimizer.zero_grad()
# Compute the output
output = model(images)
# Calculate the SoftMax and CE loss
loss = criterion(output, labels)
# Calculate the gradients
loss.backward()
# Update the weights
optimizer.step()
# Code to evaluate model on the validation set
count += 1
# Control when to calculate accuracy
if count % 50 == 0:
# Accuracy calculation
correct = 0
total = 0
model.eval()
# Make predictions on the validation set
for images_val, labels_val in val_dataloader:
# Compute the output
output_val = model(images_val)
# Get predictions from the maximum value
predicted_val = torch.max(output_val.data, 1)[1]
# Get the total number of labels
total += len(labels_val)
# Get the total correct predictions
correct += (predicted_val == labels_val).sum()
# Calculate Accuracy
accuracy = 100 * correct / float(total)
# Store the loss and iteration number
loss_lst.append(loss.data)
iteration_lst.append(count)
accuracy_lst.append(accuracy)
# Print the loss
if count % 500 == 0:
print('Iteration: {} Loss: {} Accuracy: {} %'.format(
count, loss.data, accuracy))
# Visualize the loss and accuracy in the end
# Loss
plt.plot(iteration_lst, loss_lst)
plt.xlabel("Number of iteration")
plt.ylabel("Loss")
plt.title("CNN: Loss vs Number of iteration")
plt.show()
# Accuracy
plt.plot(iteration_lst, accuracy_lst, color="red")
plt.xlabel("Number of iteration")
plt.ylabel("Accuracy")
plt.title("CNN: Accuracy vs Number of iteration")
plt.show()
# Function to train the model on the whole training set
# Inputs: The training dataloader, the model to train,
# the criterion (loss) calculator, the optimizer, learning rate, number of epochs,
def train_final_CNN(ftrain_dataloader, model, criterion, optimizer, learning_rate, n_epochs):
# Select optimizer based on input
if optimizer == "SGD":
# set term of momentum
momentum = 0.9
# select Stohastic Gradient Descent
optimizer = SGD(model.parameters(),
lr=learning_rate, momentum=momentum)
else:
# select Adam
optimizer = Adam(model.parameters(), lr=learning_rate)
# Enumerate epochs
for epoch in range(n_epochs):
model.train()
# Enumerate batches
for i, (images, labels) in enumerate(ftrain_dataloader):
# Clear the gradients
optimizer.zero_grad()
# Compute the output
output = model(images)
# Calculate the SoftMax and CE loss
loss = criterion(output, labels)
# Calculate the gradients
loss.backward()
# Update the weights
optimizer.step()
# Function to test the model on the final test set
# Inputs: The final test dataloader, the optimized model
# Output: The accuracy of the optimized model on the final test set
def test_CNN(test_dataloader, model):
# Create prediction list
pred_lst = list()
# Create list with labels
labels_lst = list()
model.eval()
# Enumerate batches
for i, (images, labels) in enumerate(test_dataloader):
# Evaluate the model on the validation set
output = model(images)
# Get the numpy arrays
output = output.detach().numpy()
actual = labels.numpy()
# Use argmax to convert to labels
output = argmax(output, axis=1)
# Reshape
output = output.reshape((len(output), 1))
actual = actual.reshape((len(actual), 1))
# Append to lists
pred_lst.append(output)
labels_lst.append(actual)
# Vertical stack
pred_lst = vstack(pred_lst)
labels_lst = vstack(labels_lst)
# Calculate Accuracy
accuracy = accuracy_score(labels_lst, pred_lst)
accuracy = (np.round(accuracy, decimals=3) * 100).astype(int)
# Calculate Confusion Matrix
conmat = confusion_matrix(labels_lst, pred_lst)
# Create confusion matrix display object
disp = ConfusionMatrixDisplay(confusion_matrix=conmat)
# Print Accuracy and F1 Score
print('Accuracy: {}%'.format(accuracy))
# Show the confusion matrix
disp.plot()
plt.title('CNN - Confusion Matrix')
plt.show()
# Function to predict the digit of a given image
# Inputs: A tensor image of a number, the optimized model
# Output: The label of the predicted number
def predict_digit_CNN(image, model):
# convert the row to a list
image = image.tolist()
# convert the row to tensor data
image = Tensor([image])
# Make the prediction
output = model(image)
# Get the numpy array
output = output.detach().numpy()
# Get the label of the number
output = argmax(output)
# Print the number
print('Predicted the number {}'.format(output))
| dmegkos/Noisy-handwritten-digits-classification-using-MLP-and-CNN | customCNN.py | customCNN.py | py | 10,292 | python | en | code | 0 | github-code | 13 |
21909732615 | """
Wizard to help user select parameters for various operations. WIP.
"""
import numpy as np
import sys
from PyQt5 import QtWidgets, QtCore, Qt, QtGui
class HelpWindow(QtWidgets.QMainWindow):
def __init__(self, app):
super(HelpWindow, self).__init__()
#######################
# Basic setup
#######################
screen = app.primaryScreen()
defaultWindowSize = screen.size() * .5
self.resize(defaultWindowSize)
#self.setWindowTitle('pepe Parameter Help')
self.setWindowTitle('float')
self.cWidget = QtWidgets.QWidget()
self.vLayout = QtWidgets.QVBoxLayout()
self.cWidget.setLayout(self.vLayout)
self.titleLbl = QtWidgets.QLabel()
self.titleLbl.setText('Howdy!\nWelcome to the pepe parameter selection wizard!')
self.titleLbl.setFont(QtGui.QFont('Sans', 20))
self.titleLbl.setAlignment(Qt.Qt.AlignCenter)
self.vLayout.addWidget(self.titleLbl)
self.setCentralWidget(self.cWidget)
self.nextBtn = QtWidgets.QPushButton('Next')
self.nextBtn.setMaximumWidth(75)
self.cancelBtn = QtWidgets.QPushButton('Cancel')
self.cancelBtn.setMaximumWidth(75)
self.restartBtn = QtWidgets.QPushButton('Restart')
self.restartBtn.setMaximumWidth(75)
self.bottomHLayout = QtWidgets.QHBoxLayout()
self.bottomHLayout.addWidget(self.cancelBtn)
self.bottomHLayout.addWidget(self.restartBtn)
self.bottomHLayout.addWidget(self.nextBtn)
self.vLayout.addLayout(self.bottomHLayout)
#######################
# Parameter Selection
#######################
# Here we decide which features we want to select parameters
# for
bundledParameters = {'Circle tracking': 'ctrack',
'Rotation tracking': 'rtrack',
'Masking': 'mask',
'Optimization': 'opt'}
availableParameters = {}
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = HelpWindow(app)
window.show()
sys.exit(app.exec())
| Jfeatherstone/pepe | pepe/auto/ParameterSelect.py | ParameterSelect.py | py | 2,190 | python | en | code | 1 | github-code | 13 |
909330423 | import json
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
data = open("polygon.json").read()
data = json.loads(data)
for plot_area in data:
coords = data[plot_area]["Road"]
plt.figure(figsize=(8, 8))
plt.axis('equal')
for coord in coords:
coords_x = []
coords_z = []
for triple in coord:
coords_x.append(-triple[0])
coords_z.append(triple[1])
plt.fill(coords_x, coords_z)
for area in data[plot_area]:
if not area.startswith("Finish") and area != "Area":
continue
bounding_box = data[plot_area][area]
rect = patches.Rectangle((-bounding_box["position"][0], bounding_box["position"][1]), -bounding_box["size"][0], bounding_box["size"][1], facecolor="none", edgecolor="r", linewidth=1)
plt.gca().add_patch(rect)
plt.show()
| CarParty/CarParty | resources/json_examples/polygon.py | polygon.py | py | 900 | python | en | code | 3 | github-code | 13 |
9336145567 | from bs4 import BeautifulSoup
from pprint import pprint
from scraper.sas.sas_models import SASEvent, SASCategory, SASCategoryStage, SASEventStage
from scraper.base_models.models import Event, Category, CategoryStage, EventStage, Participant, Result
from scraper.sas.sas_config import DESTINATION_URL, MTB_EVENT_TYPE, YEARS
from scraper import db
from datetime import datetime
import urllib
import json
import time
def scrape_sas():
pprint("Scraping Events")
get_mtb_events()
pprint("Getting categories and stages")
for event in db.session.query(SASEvent):
pprint(event.event_id)
get_categories_and_stages(event.event_reference, event.event_id)
#time.sleep(2)
for event_stage in db.session.query(SASEventStage):
pprint("Getting event stage results")
base_event_stage = db.session.query(EventStage).filter(EventStage.id==event_stage.event_stage_id).first()
if (base_event_stage.results):
pprint("Event has results")
else:
write_stage_results(event_stage.stage_reference, event_stage.event_stage_id, "event")
for category_stage in db.session.query(SASCategoryStage):
pprint("Getting category stage results")
base_category_stage = db.session.query(CategoryStage).filter(CategoryStage.id==category_stage.category_stage_id).first()
if (base_category_stage.results):
pprint("Category stage has results")
else:
write_stage_results(category_stage.stage_reference, category_stage.category_stage_id, "category")
for category in db.session.query(SASCategory):
pprint("Getting category results")
base_category = db.session.query(Category).filter(Category.id==category.category_id).first()
if (base_category.results):
pprint("Category has results")
else:
if (not base_category.category_stages):
write_category_results(category.stage_reference, category.id)
else:
pprint("No results but has category stages")
pprint("Scrape Complete")
def get_mtb_events():
for year in YEARS:
url = ("%s/participants/event-results/fetch-series-by-type?event_type=%s&event_year=%d" %
(DESTINATION_URL, MTB_EVENT_TYPE, year))
try:
page = urllib.request.urlopen(url)
content = page.read().decode("utf-8")
json_content = json.loads(content)
soup = BeautifulSoup(json_content['HTML'], "html.parser")
anchors = soup.find_all('a')
except (urllib.error.HTTPError, urllib.error.ConnectionResetError):
pass
for anchor in anchors:
event_reference = anchor["href"]
divs = anchor.find_all('div')
for div in divs:
if ("event-date" in div["class"]):
event_date = (div.find(text=True))
elif ("event-title" in div["class"]):
event_name = (div.find(text=True))
db_date = datetime.strptime(event_date, '%d %b %Y')
db_event = Event(event_name, db_date)
db_check = db.session.query(Event.title).filter(Event.title==event_name)
if not (db.session.query(db_check.exists()).scalar()):
db.session.add(db_event)
db.session.flush()
sas_event = SASEvent(db_event.id, event_reference)
db.session.add(sas_event)
db.session.commit()
def get_categories_and_stages(event_reference, event_id):
event = db.session.query(Event).filter(Event.id==event_id).first()
if (event.categories or event.event_stages):
pprint("Event Exists")
else:
url = (DESTINATION_URL + event_reference)
try:
page = urllib.request.urlopen(url)
except (urllib.error.HTTPError, urllib.error.URLError):
return
soup = BeautifulSoup(page, "html.parser")
check_stages = get_categories(soup, event_id)
def get_categories(soup, event_id):
category_div = soup.find('div', attrs={"id" : "category_container"})
#Check to see if event has categories first
if category_div:
divs = category_div.find_all('div')
for div in divs:
if div.has_attr("data-event-category-id"):
#Event has categories
category_reference = div["data-event-category-id"]
category_name = div["data-loading-text"]
category_own_stage_reference = div["data-event-stage-id"]
db_category = Category(category_name, event_id)
#Check both name and event id to allow duplicate names
db_category_check = db.session.query(Category.name).filter(
(Category.name==category_name) &
(Category.event_id==event_id))
#Check SAS category for duplicates as well
db_sas_category_check = db.session.query(SASCategory).filter(
(SASCategory.category_reference==category_reference) &
(SASCategory.stage_reference==category_own_stage_reference))
if not (db.session.query(db_category_check.exists()).scalar()):
db.session.add(db_category)
db.session.flush()
if not (db.session.query(db_sas_category_check.exists()).scalar()):
db_sas_category = SASCategory(category_reference, category_own_stage_reference, db_category.id)
db.session.add(db_sas_category)
db.session.flush()
db.session.commit()
if (div["data-multiple-event-stages"] == "1"):
#Event has stages with their own categories
get_category_stages(soup, db_category.id, category_reference)
else:
#Event does not have categories
get_event_stages(soup, event_id)
def get_category_stages(soup, category_id, category_reference):
stage_group_div = soup.find('div', attrs={"id" : ("ec_" + category_reference)})
stage_divs = stage_group_div.find_all('div')
for stage_div in stage_divs:
if stage_div.has_attr("data-stage-id"):
category_stage_reference = stage_div["data-stage-id"]
category_stage_name = stage_div["data-loading-text"]
db_category_stage = CategoryStage(category_stage_name, category_id)
#Check both name and category id to allow duplicate names
db_category_stage_check = db.session.query(CategoryStage.name).filter(
(CategoryStage.name==category_stage_name) &
(CategoryStage.category_id==category_id))
if not (db.session.query(db_category_stage_check.exists()).scalar()):
db.session.add(db_category_stage)
db.session.flush()
db_sas_category_stage = SASCategoryStage(db_category_stage.id, category_stage_reference)
db.session.add(db_sas_category_stage)
db.session.flush()
db.session.commit()
def get_event_stages(soup, event_id):
all_event_stage_divs = soup.find('div', class_ = "row categories_stages event-sub-types")
#Check if event has stages
if all_event_stage_divs:
event_stage_divs = all_event_stage_divs.find_all ('div')
for event_stage_div in event_stage_divs:
if event_stage_div.has_attr("data-stage-id"):
#Event has stages and no categories
event_stage_reference = event_stage_div["data-stage-id"]
event_stage_name = event_stage_div["data-loading-text"]
db_event_stage = EventStage(event_stage_name, event_id)
#Check if it exists by name and ID and add if it doesn't
db_event_stage_check = db.session.query(EventStage.name).filter(
(EventStage.name==event_stage_name) &
(EventStage.event_id==event_id))
if not (db.session.query(db_event_stage_check.exists()).scalar()):
db.session.add(db_event_stage)
db.session.flush()
db_sas_event_stage = SASEventStage(db_event_stage.id, event_stage_reference)
db.session.add(db_sas_event_stage)
db.session.flush()
db.session.commit()
else:
#Event has no stages or categories
#create new stage for just the overall results, unless event has no results
event_stage_reference_div = soup.find('div', class_ = "result-row load-results")
if event_stage_reference_div:
if event_stage_reference_div.has_attr("data-stage"):
event_stage_reference = event_stage_reference_div["data-stage"]
sas_event = db.session.query(SASEvent).filter(SASEvent.event_id==event_id).first()
db_event_stage_check = db.session.query(EventStage.name).filter(
(EventStage.name=="Overall Results") &
(EventStage.event_id==sas_event.event_id))
if not (db.session.query(db_event_stage_check.exists()).scalar()):
db_event_stage = EventStage("Overall Results", sas_event.event_id)
db.session.add(db_event_stage)
db.session.flush()
db_sas_event_stage = SASEventStage(db_event_stage.id, event_stage_reference)
db.session.add(db_sas_event_stage)
db.session.commit()
def get_results(event_reference):
url = ("%s/participants/event-results/add-results?stage_id=%s&from=0&count=9999" %
(DESTINATION_URL, event_reference))
pprint(url)
try:
page = urllib.request.urlopen(url)
except (urllib.error.HTTPError, urllib.error.ConnectionResetError):
return
content = page.read().decode("utf-8")
json_content = json.loads(content)
json_results = json_content['rows']
return json_results
def write_stage_results(stage_reference, stage_id, stage_type):
results = get_results(stage_reference)
category_stage_id = None
event_stage_id = None
if (stage_type=="event"):
event_stage_id = stage_id
elif (stage_type=="category"):
category_stage_id = stage_id
if results:
for result in results:
participant_id = get_participant(result)
db_result_check = db.session.query(Result).filter(
(Result.position==result['overall_pos']) &
(Result.gender_position==result['gender_pos']) &
(Result.time==result['time_taken_seconds']) &
(Result.event_stage_id==event_stage_id) &
(Result.category_stage_id==category_stage_id))
if not (db.session.query(db_result_check.exists()).scalar()):
if (stage_type=="category"):
db_result = Result(result['overall_pos'], participant_id, result['gender_pos'],
result['time_taken_seconds'], None, category_stage_id, None)
elif (stage_type=="event"):
db_result = Result(result['overall_pos'], participant_id, result['gender_pos'],
result['time_taken_seconds'], event_stage_id, None, None)
db.session.add(db_result)
db.session.commit()
def write_category_results(category_reference, category_id):
results = get_results(category_reference)
for result in results:
participant_id = get_participant(result)
db_result_check = db.session.query(Result).filter(
(Result.position==result['overall_pos']) &
(Result.gender_position==result['gender_pos']) &
(Result.time==result['time_taken_seconds']) &
(Result.category_id==category_id)).first()
if not db_result_check:
db_category_result = Result(result['overall_pos'], participant_id,
result['gender_pos'], result['time_taken_seconds'], None, None, category_id)
db.session.add(db_category_result)
db.session.commit()
def get_participant(result):
if result['date_of_birth']:
birth_date = datetime.strptime(result['date_of_birth'], '%Y-%m-%d').date()
else:
birth_date = None
db_participant_check = db.session.query(Participant).filter(
(Participant.first_name==result['first_name']) &
(Participant.last_name==result['last_name']) &
(Participant.sex==result['person_sex']) &
(Participant.birth_date==birth_date))
if not (db.session.query(db_participant_check.exists()).scalar()):
db_participant = Participant(result['first_name'], result['last_name'],
result['person_sex'], birth_date)
db.session.add(db_participant)
db.session.commit()
return db_participant.id
else:
return db_participant_check.first().id
| coertzec/scraper | scraper/sas/scrape.py | scrape.py | py | 11,067 | python | en | code | 0 | github-code | 13 |
3634652820 | # Triangular Letter Pattern
num = int(input("Enter Number: "))
Num = 65
for i in range(1,num+1):
for j in range(i,i+1):
print(i * chr(Num),end= '')
Num +=1
print("")
# Option-2
for i in range(0,num):
print(chr(65 + i) * (i+1)) | ashish-kumar-hit/python-qt | python/python-basics-100/Loops 2.6.py | Loops 2.6.py | py | 255 | python | en | code | 0 | github-code | 13 |
35060658520 | """
Brightness in an image can be changed simply by adding or subtracting a constant
from each RGB value in the image. This is that implementation.
"""
from typing import List, Tuple
import numpy as np
from PIL import Image
import click
def brighten(img_arr: np.ndarray, brightness_factor: float) -> np.ndarray:
"""
Operates on array representation of image to return a median blurred array
Args:
img_arr (np.ndarray): 3-d array representation of image
brightness_factor (float): factor to change brightness by
Returns:
np.ndarray: Array representation of the brightened image
"""
# new image array creation
new_img_arr = img_arr.copy()
for y, row in enumerate(img_arr):
for x, pixel in enumerate(row):
# increasing brightness
new_pixel = pixel + 255 * brightness_factor
for i, rgb_val in enumerate(new_pixel):
if rgb_val < 0:
new_pixel[i] = 0
if rgb_val > 255:
new_pixel[i] = 255
# assigning new RGB value to pixel
new_img_arr[y, x] = new_pixel
return new_img_arr
# click commands
@click.command(name="brightness")
@click.option('-f', '--filename', type=click.Path(exists=True))
@click.option('-b', '--brightness-factor', type=float, default=0.5)
def change_brightness(filename: str, brightness_factor: float) -> None:
"""
command
"""
if not -1 <= brightness_factor <= 1:
raise ValueError("brightness factor must be between -1 and 1")
with Image.open(filename) as img:
img_arr = np.array(img)
new_img_arr = brighten(img_arr, brightness_factor)
new_img = Image.fromarray(new_img_arr)
new_img.show()
if __name__ == "__main__":
change_brightness() | kathirmeyyappan/edge-detector-algorithms | src/other_algorithms/brightness.py | brightness.py | py | 1,871 | python | en | code | 3 | github-code | 13 |
34040839162 | import numpy as np
import matplotlib
from matplotlib import pyplot as plt
import math
def nearest_idx(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
# D_opt_avg[i][2]+100*D_opt_avg[i][6]
# (B, B_1, earnings, avg_l_price, avg_off_price, B+B_1-sum(cost[1:]), B-cost[0], i-cost[0],cost[0],i)
runs_opt = np.loadtxt("opt_avg.txt", usecols=range(0,10), dtype=np.float64)
runs_new = np.loadtxt("new_avg.txt", usecols=range(0,10), dtype=np.float64)
prob_opt = np.loadtxt("opt_prob_avg.txt", usecols=range(0,20), dtype=np.float64)
prob_new = np.loadtxt("new_prob_avg.txt", usecols=range(0,20), dtype=np.float64)
final_opt = np.zeros(shape=(64,5))
final_new = np.zeros(shape=(64,5))
a = 1 #alpha
pho_s = 100
pho_l = 120
x,y,z =[],[],[]
x_ticks = []
for i in range(64):
Ns = runs_opt[i][0]
B = runs_opt[i][1]
N_opt = runs_opt[i][-1]
N_new = runs_new[i][-1]
rev_opt = runs_opt[i][2]+100*runs_opt[i][6] + 100*(int(runs_opt[i][5]))
rev_new = runs_new[i][2]+100*runs_new[i][6] + 100*(int(runs_new[i][5]))
cus_opt = runs_opt[i][-1] + runs_opt[i][6] + int(runs_opt[i][5])
cus_new = runs_new[i][-1] + runs_new[i][6] + int(runs_new[i][5])
if(Ns == 20000):
x.append(B)
x_ticks.append(math.log(B))
y.append(rev_opt)
z.append(runs_opt[i][2])
# C = max(N_new, N_opt)
# if(C == N_new):
# K_opt = (a*C - N_opt)/(C - N_opt)
# p_opt = pho_s + (nearest_idx(prob_opt[i],K_opt)/19)*(pho_l - pho_s)
# rev_opt += p_opt*(a*C - N_opt)
# else:
# K_new = (a*C - N_new)/(C - N_new)
# p_new = pho_s + (nearest_idx(prob_new[i],K_new)/19)*(pho_l - pho_s)
# rev_new += p_new*(a*C - N_new)
# pc_diff = (rev_new - rev_opt)/rev_opt
# # # if(N_opt<C and N_new < C):
# print(B, Ns, rev_opt, rev_new, rev_new>rev_opt, 100*pc_diff, sep='\t' )
# else:
# print(B, Ns, sep='\t' )
print(y)
# print(z)
fig1, ax1 = plt.subplots()
# fig1.figure(1)
plt.title('Ns = 20000', fontsize='22')
plt.ylabel('Revenue', fontsize='20')
plt.xlabel('\u0394Profit', fontsize='20')
ax1.plot(x,z, 'b')
ax1.plot(x,y, 'r')
ax1.set_xscale('log')
ax1.set_yscale('log')
ax1.legend(['Stopping time', 'Exhausting Residuals'])
# ax1.set_xticks(x_ticks)
# ax1.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
plt.show()
# plt.figure(1)
# plt.title('Ns = 500', fontsize=22)
# plt.ylabel('Revenue')
# plt.xlabel('log(\u0394Profit)')
# plt.plot(x,y, 'b')
# plt.plot(x,z, 'r')
# plt.show()
| ydidwania/Coffee-Conundrum | final/extend.py | extend.py | py | 2,593 | python | en | code | 0 | github-code | 13 |
23028221159 | import os
import sys
import traceback
import types
import gc
import torch
import torch.nn as nn
import torch.optim as optim
from diora.net.diora import DioraTreeLSTM
from diora.net.diora import DioraMLP
from diora.net.diora import DioraMLPShared
from diora.logging.configuration import get_logger
def override_inside_hook(var):
def func(self, level, h, c, s):
length = self.length
B = self.batch_size
L = length - level
assert s.shape[0] == B
assert s.shape[1] == L
assert s.shape[2] == level
assert s.shape[3] == 1
assert len(s.shape) == 4
smax = s.max(2, keepdim=True)[0]
s = s - smax
# TODO try with p
for pos in range(L):
# print("Saving {} {}".format(level,pos))
self.saved_scalars[level][pos] = s[:, pos, :]
# print(len(self.saved_scalars))
var.inside_hook = types.MethodType(func, var)
class ReconstructionLoss(nn.Module):
name = 'reconstruct_loss'
def __init__(self, embeddings, input_size, size, margin=1, k_neg=3, cuda=False):
super(ReconstructionLoss, self).__init__()
self.k_neg = k_neg
self.margin = margin
self.embeddings = embeddings
self.mat = nn.Parameter(torch.FloatTensor(size, input_size))
self._cuda = cuda
self.reset_parameters()
def reset_parameters(self):
params = [p for p in self.parameters() if p.requires_grad]
for i, param in enumerate(params):
param.data.normal_()
def loss_hook(self, sentences, neg_samples, inputs):
pass
def forward(self, sentences, neg_samples, diora, info):
batch_size, length = sentences.shape
input_size = self.embeddings.weight.shape[1]
size = diora.outside_h.shape[-1]
k = self.k_neg
emb_pos = self.embeddings(sentences)
emb_neg = self.embeddings(neg_samples)
# Calculate scores.
## The predicted vector.
cell = diora.outside_h[:, :length].view(batch_size, length, 1, -1)
## The projected samples.
proj_pos = torch.matmul(emb_pos, torch.t(self.mat))
proj_neg = torch.matmul(emb_neg, torch.t(self.mat))
## The score.
xp = torch.einsum('abc,abxc->abx', proj_pos, cell)
xn = torch.einsum('ec,abxc->abe', proj_neg, cell)
score = torch.cat([xp, xn], 2)
# Calculate loss.
lossfn = nn.MultiMarginLoss(margin=self.margin)
inputs = score.view(batch_size * length, k + 1)
device = torch.cuda.current_device() if self._cuda else None
outputs = torch.full((inputs.shape[0],), 0, dtype=torch.int64, device=device)
self.loss_hook(sentences, neg_samples, inputs)
loss = lossfn(inputs, outputs)
ret = dict(reconstruction_loss=loss)
return loss, ret
class ReconstructionSoftmaxLoss(nn.Module):
name = 'reconstruct_softmax_loss'
def __init__(self, embeddings, input_size, size, margin=1, k_neg=3, cuda=False):
super(ReconstructionSoftmaxLoss, self).__init__()
self.k_neg = k_neg
self.margin = margin
self.input_size = input_size
self.embeddings = embeddings
self.mat = nn.Parameter(torch.FloatTensor(size, input_size))
self._cuda = cuda
self.reset_parameters()
def reset_parameters(self):
params = [p for p in self.parameters() if p.requires_grad]
for i, param in enumerate(params):
param.data.normal_()
def loss_hook(self, sentences, neg_samples, inputs):
pass
def forward(self, sentences, neg_samples, diora, info):
batch_size, length = sentences.shape
input_size = self.input_size
size = diora.outside_h.shape[-1]
k = self.k_neg
emb_pos = self.embeddings(sentences)
emb_neg = self.embeddings(neg_samples.unsqueeze(0))
# Calculate scores.
## The predicted vector.
cell = diora.outside_h[:, :length].view(batch_size, length, 1, -1)
## The projected samples.
proj_pos = torch.matmul(emb_pos, torch.t(self.mat))
proj_neg = torch.matmul(emb_neg, torch.t(self.mat))
## The score.
xp = torch.einsum('abc,abxc->abx', proj_pos, cell)
xn = torch.einsum('zec,abxc->abe', proj_neg, cell)
score = torch.cat([xp, xn], 2)
# Calculate loss.
lossfn = nn.CrossEntropyLoss()
inputs = score.view(batch_size * length, k + 1)
device = torch.cuda.current_device() if self._cuda else None
outputs = torch.full((inputs.shape[0],), 0, dtype=torch.int64, device=device)
self.loss_hook(sentences, neg_samples, inputs)
loss = lossfn(inputs, outputs)
ret = dict(reconstruction_softmax_loss=loss)
return loss, ret
# Our rule based loss. The parameter ce_loss controls whether ranking loss or cross entropy loss is used.
class RuleBasedLoss(nn.Module):
name = 'rule_based_loss'
def __init__(self, embeddings, input_size, size, margin=1, k_neg=3, cuda=False, ce_loss=False):
super(RuleBasedLoss, self).__init__()
self.k_neg = k_neg
self.margin = margin
self.input_size = input_size
# self.embeddings = embeddings # 3346
self.mat = nn.Embedding(2502,1,padding_idx=2500) # TODO put number of rules
# These will eventually become rule weights
self._cuda = cuda
self.ce_loss = ce_loss
self.reset_parameters()
def reset_parameters(self):
params = [p for p in self.parameters() if p.requires_grad]
for i, param in enumerate(params):
param.data.normal_()
def loss_hook(self, sentences, neg_samples, inputs):
pass
def forward(self, sentences, rule_labels, diora, info):
batch_size, length = sentences.shape
input_size = self.input_size
size = diora.outside_h.shape[-1]
k = self.k_neg
loss = 0.
rule_lab_idx = 0
rules = rule_labels['rules']
mask = rule_labels['mask']
rule_indices = rule_labels['indices']
# print("----------\n",diora.saved_scalars)
for level in range(1, length):
L = length - level
N = level
for pos in range(L):
# Assumes that the bottom-left most leaf is in the first constituent.
# print(diora.saved_scalars[level][pos].size())
# print(len(rules[rule_lab_idx]),[rules[rule_lab_idx][x].size() for x in range(N)])
# print(rules[rule_lab_idx].size(),rule_indices[rule_lab_idx].size())
spbatch = torch.softmax(diora.saved_scalars[level][pos],1).squeeze(dim=-1) # Is this necessary?
rule_lab = torch.zeros_like(spbatch)
diff_mask = torch.zeros_like(spbatch)
if torch.max(rules[rule_lab_idx]).item() > 2500:
print(rules[rule_lab_idx])
masked_rules = self.mat(rules[rule_lab_idx]).squeeze(dim=-1)*mask[rule_lab_idx]
rule_lab = torch.abs(1*rule_lab.scatter_add(1,rule_indices[rule_lab_idx],masked_rules)) # This has N entries with some rules # TODO vectorize somehow using scatter add
rule_lab = torch.exp(rule_lab) / (torch.exp(rule_lab).sum(dim=-1).view(-1,1)+1e-10)
if self.ce_loss:
loss = loss - 1.5*(torch.abs(rule_lab) * torch.log(spbatch+1e-10)).sum() + 1e-2* torch.abs(rule_lab).sum()
else:
# Get the signs of pairwise difference between r_i
rule_lab_diff = torch.triu(torch.tanh(50*(rule_lab.unsqueeze(1) - rule_lab.unsqueeze(2))))
# Get the signs of pairwise difference between e_i
actual_diff = torch.triu(torch.tanh(50*(spbatch.unsqueeze(1) - spbatch.unsqueeze(2))))
# Construct mask for positions which are not triggering any rule
diff_mask = diff_mask.scatter_add(1,rule_indices[rule_lab_idx],mask[rule_lab_idx])
diff_mask = torch.triu(torch.sign(diff_mask.unsqueeze(1) * diff_mask.unsqueeze(2)))
loss = loss + 1e-2*(((rule_lab_diff - actual_diff)*diff_mask)**2).sum()
rule_lab_idx += 1
# for i in range()
ret = dict(rule_based_loss=loss)
return loss, ret
def get_loss_funcs(options, batch_iterator=None, embedding_layer=None):
input_dim = embedding_layer.weight.shape[1]
size = options.hidden_dim
k_neg = options.k_neg
margin = options.margin
cuda = options.cuda
loss_funcs = []
# Reconstruction Loss
if options.reconstruct_mode == 'margin':
reconstruction_loss_fn = ReconstructionLoss(embedding_layer,
margin=margin, k_neg=k_neg, input_size=input_dim, size=size, cuda=cuda)
elif options.reconstruct_mode == 'softmax':
reconstruction_loss_fn = ReconstructionSoftmaxLoss(embedding_layer,
margin=margin, k_neg=k_neg, input_size=input_dim, size=size, cuda=cuda)
loss_funcs.append(reconstruction_loss_fn)
if options.rule_based:
loss_funcs.append(RuleBasedLoss(embedding_layer,
margin=margin, k_neg=k_neg, input_size=input_dim, size=size, cuda=cuda))
return loss_funcs
class Embed(nn.Module):
def __init__(self, embeddings, input_size, size):
super(Embed, self).__init__()
self.input_size = input_size
self.size = size
self.embeddings = embeddings
self.mat = nn.Parameter(torch.FloatTensor(size, input_size))
self.reset_parameters()
def reset_parameters(self):
params = [p for p in self.parameters() if p.requires_grad]
for i, param in enumerate(params):
param.data.normal_()
def forward(self, x):
batch_size, length = x.shape
e = self.embeddings(x.view(-1))
t = torch.mm(e, self.mat.t()).view(batch_size, length, -1)
return t
class Net(nn.Module):
def __init__(self, embed, diora, loss_funcs=[], epoch_curriculum=1000):
super(Net, self).__init__()
size = diora.size
self.embed = embed
self.diora = diora
self.loss_func_names = [m.name for m in loss_funcs]
self.epoch_curriculum = epoch_curriculum
for m in loss_funcs:
setattr(self, m.name, m)
self.reset_parameters()
def reset_parameters(self):
params = [p for p in self.parameters() if p.requires_grad]
for i, param in enumerate(params):
param.data.normal_()
def compute_loss(self, batch, neg_samples, rules,info,epochs):
ret, loss = {}, []
# Loss
diora = self.diora.get_chart_wrapper()
for func_name in self.loss_func_names:
# if
func = getattr(self, func_name)
if func_name == 'rule_based_loss':
# print("---------------------\n",batch)
neg_samples_ = rules
lamda = epochs*0.1
else:
neg_samples_ = neg_samples
lamda = 1
subloss, desc = func(batch, neg_samples_, diora, info)
loss.append(lamda*subloss.view(1, 1))
for k, v in desc.items():
ret[k] = v
loss = torch.cat(loss, 1)
return ret, loss
def forward(self, batch, neg_samples=None,rules=None, compute_loss=True, info=None, epochs=-1):
# Embed
embed = self.embed(batch)
# Run DIORA
self.diora(embed)
# Compute Loss
# if epochs > self.epoch_curriculum:
# self.loss_func_names = ['rule_based_loss','reconstruct_softmax_loss']
# else:
# self.loss_func_names = ['reconstruct_softmax_loss']
if compute_loss:
ret, loss = self.compute_loss(batch, neg_samples,rules, info=info, epochs=epochs)
else:
ret, loss = {}, torch.full((1, 1), 1, dtype=torch.float32,
device=embed.device)
# Results
ret['total_loss'] = loss
return ret
class Trainer(object):
def __init__(self, net, k_neg=None, ngpus=None, cuda=None, curriculum=False):
super(Trainer, self).__init__()
self.net = net
self.optimizer = None
self.optimizer_cls = None
self.optimizer_kwargs = None
self.cuda = cuda
self.ngpus = ngpus
self.curriculum = curriculum
self.parallel_model = None
print("Trainer initialized with {} gpus.".format(ngpus))
def freeze_diora(self):
for p in self.net.diora.parameters():
p.requires_grad = False
def parameter_norm(self, requires_grad=True, diora=False):
net = self.net.diora if diora else self.net
total_norm = 0
for p in net.parameters():
if requires_grad and not p.requires_grad:
continue
total_norm += p.norm().item()
return total_norm
def init_optimizer(self, optimizer_cls, optimizer_kwargs):
if optimizer_cls is None:
optimizer_cls = self.optimizer_cls
if optimizer_kwargs is None:
optimizer_kwargs = self.optimizer_kwargs
params = [p for p in self.net.parameters() if p.requires_grad]
self.optimizer = optimizer_cls(params, **optimizer_kwargs)
@staticmethod
def get_single_net(net):
if isinstance(net, torch.nn.parallel.DistributedDataParallel):
return net.module
return net
def save_model(self, model_file):
state_dict = self.net.state_dict()
todelete = []
for k in state_dict.keys():
if 'embeddings' in k:
todelete.append(k)
for k in todelete:
del state_dict[k]
torch.save({
'state_dict': state_dict,
}, model_file)
@staticmethod
def load_model(net, model_file):
save_dict = torch.load(model_file, map_location=lambda storage, loc: storage)
state_dict_toload = save_dict['state_dict']
state_dict_net = Trainer.get_single_net(net).state_dict()
# Bug related to multi-gpu
keys = list(state_dict_toload.keys())
prefix = 'module.'
for k in keys:
if k.startswith(prefix):
newk = k[len(prefix):]
state_dict_toload[newk] = state_dict_toload[k]
del state_dict_toload[k]
# Remove extra keys.
keys = list(state_dict_toload.keys())
for k in keys:
if k not in state_dict_net:
print('deleting {}'.format(k))
del state_dict_toload[k]
# Hack to support embeddings.
for k in state_dict_net.keys():
if 'embeddings' in k:
state_dict_toload[k] = state_dict_net[k]
if 'rule_based_loss' in k:
if k not in save_dict.keys():
state_dict_toload[k] = state_dict_net[k]
# else:
# state_dict_toload[k] =
Trainer.get_single_net(net).load_state_dict(state_dict_toload)
def run_net(self, batch_map, compute_loss=True, multigpu=False, epochs=-1):
batch = batch_map['sentences']
neg_samples = batch_map.get('neg_samples', None)
info = self.prepare_info(batch_map)
rules_and_mask = {"rules":batch_map['rules'], "mask": batch_map['rules_mask'], "indices": batch_map['rule_indices']}
out = self.net(batch, neg_samples=neg_samples, rules=rules_and_mask,compute_loss=compute_loss, info=info, epochs=epochs)
return out
def gradient_update(self, loss):
self.optimizer.zero_grad()
loss.backward()
params = [p for p in self.net.parameters() if p.requires_grad]
torch.nn.utils.clip_grad_norm_(params, 5.0)
self.optimizer.step()
def prepare_result(self, batch_map, model_output):
result = {}
result['batch_size'] = batch_map['batch_size']
result['length'] = batch_map['length']
for k, v in model_output.items():
if 'loss' in k:
result[k] = v.mean(dim=0).sum().item()
return result
def prepare_info(self, batch_map):
return {}
def step(self, *args, **kwargs):
try:
return self._step(*args, **kwargs)
except Exception as err:
batch_map = args[0]
print('Failed with shape: {}'.format(batch_map['sentences'].shape))
if self.ngpus > 1:
print(traceback.format_exc())
print('The step failed. Running multigpu cleanup.')
os.system("ps -elf | grep [p]ython | grep adrozdov | grep " + self.experiment_name + " | tr -s ' ' | cut -f 4 -d ' ' | xargs -I {} kill -9 {}")
sys.exit()
else:
raise err
def _step(self, batch_map, train=True, compute_loss=True, epochs=-1):
if train:
self.net.train()
else:
self.net.eval()
multigpu = self.ngpus > 1 and train
if not self.curriculum:
epochs = 10
with torch.set_grad_enabled(train):
model_output = self.run_net(batch_map, compute_loss=compute_loss, multigpu=multigpu , epochs=epochs)
# Calculate average loss for multi-gpu and sum for backprop.
total_loss = model_output['total_loss'].mean(dim=0).sum()
if train:
self.gradient_update(total_loss)
result = self.prepare_result(batch_map, model_output)
return result
# return {"batch_size":1,"length":1}
def get_top_rules(self,model_file):
rule_ten = torch.arange(0,3346).view(1,-1).int()
# print(self.loss_func_names)
# func = getattr(self.net, "rule_based_loss")
save_dict = torch.load(model_file, map_location=lambda storage, loc: storage)
state_dict_toload = save_dict['state_dict']
rule_mat = state_dict_toload['rule_based_loss.mat.weight']
# mat = nn.Embedding(3346,1,padding_idx=3345)
# mat.weight.data.copy_(rule_mat.long())
# mat.weight.data.copy
# emb = torch.abs(mat(rule_ten.long()))
return (torch.argsort(-1.*torch.abs(rule_mat).squeeze())[:200].detach().cpu().numpy())
def build_net(options, embeddings=None, batch_iterator=None, random_seed=None):
logger = get_logger()
lr = options.lr
size = options.hidden_dim
k_neg = options.k_neg
margin = options.margin
normalize = options.normalize
input_dim = embeddings.shape[1]
cuda = options.cuda
rank = options.local_rank
curriculum = options.curriculum
ngpus = 1
if cuda and options.multigpu:
ngpus = torch.cuda.device_count()
os.environ['MASTER_ADDR'] = options.master_addr
os.environ['MASTER_PORT'] = options.master_port
torch.distributed.init_process_group(backend='nccl', init_method='env://')
# Embed
embedding_layer = nn.Embedding.from_pretrained(torch.from_numpy(embeddings), freeze=True)
embed = Embed(embedding_layer, input_size=input_dim, size=size)
# Diora
if options.arch == 'treelstm':
diora = DioraTreeLSTM(size, outside=True, normalize=normalize, compress=False)
elif options.arch == 'mlp':
diora = DioraMLP(size, outside=True, normalize=normalize, compress=False)
elif options.arch == 'mlp-shared':
diora = DioraMLPShared(size, outside=True, normalize=normalize, compress=False)
# Loss
loss_funcs = get_loss_funcs(options, batch_iterator, embedding_layer)
if options.rule_based:
print("Overriding")
override_inside_hook(diora)
# Net
net = Net(embed, diora, loss_funcs=loss_funcs)
# Load model.
if options.load_model_path is not None:
logger.info('Loading model: {}'.format(options.load_model_path))
Trainer.load_model(net, options.load_model_path)
# CUDA-support
if cuda:
if options.multigpu:
torch.cuda.set_device(options.local_rank)
net.cuda()
diora.cuda()
if cuda and options.multigpu:
net = torch.nn.parallel.DistributedDataParallel(
net, device_ids=[rank], output_device=rank)
# Trainer
trainer = Trainer(net, k_neg=k_neg, ngpus=ngpus, cuda=cuda, curriculum=curriculum)
trainer.rank = rank
trainer.experiment_name = options.experiment_name # for multigpu cleanup
trainer.init_optimizer(optim.Adam, dict(lr=lr, betas=(0.9, 0.999), eps=1e-8))
return trainer
| anshuln/Diora_with_rules | pytorch/diora/net/trainer.py | trainer.py | py | 20,666 | python | en | code | 4 | github-code | 13 |
1280044607 | # https://leetcode.com/problems/reverse-string/
def reverseString(s):
left,right = 0,len(s)-1
while(left < right):
# swap s[left] and s[right]
s[left],s[right] = s[right],s[left]
left+=1
right-=1
return s
print(reverseString(["h","e","l","l","o"]))
print(reverseString(["H","a","n","n","a","h"]))
| Rajjada001/LeetCode-Topic_Wise-Problems | Strings/1.reverseString.py | 1.reverseString.py | py | 346 | python | en | code | 0 | github-code | 13 |
9407734339 | import numpy as np
from itertools import groupby
from find_modes_mean_shift import findModesMeanShift
def edgeOrientations(img_angle, img_weight):
# init v1 and v2
v1 = [0, 0]
v2 = [0, 0]
# number of bins (histogram parameters)
bin_num = 32
# convert images to vectors
vec_angle = img_angle.T.reshape(-1)
vec_weight = img_weight.T.reshape(-1)
# convert angles from normals to directions
vec_angle = vec_angle + np.pi / 2
vec_angle[vec_angle > np.pi] -= np.pi
# create histogram
angle_hist = np.zeros(bin_num)
bin0 = np.floor(vec_angle / (np.pi / bin_num)).astype(int)
bin0[bin0 < 0] = 0
bin0[bin0 >= bin_num] = bin_num - 1
d = list(zip(bin0, vec_weight))
for k, g in groupby(sorted(d, key=lambda x: x[0]), lambda x: x[0]):
l = list(g)
angle_hist[k] = sum([x[1] for x in l])
# # create histogram
# angle_hist = np.zeros(bin_num)
# for i in range(0, len(vec_angle)):
# bin = int(max(min(np.floor(vec_angle[i] / (np.pi / bin_num)), bin_num - 1), 0))
# angle_hist[bin] = angle_hist[bin] + vec_weight[i]
modes, angle_hist_smoothed = findModesMeanShift(angle_hist, 1)
# if only one or no mode => return invalid corner
if len(modes) <= 1:
return v1, v2
# compute orientation at modes
new = modes[:, 0] * np.pi / bin_num
new = np.reshape(new, (-1, 1))
modes = np.hstack((modes, new))
# extract 2 strongest modes and sort by angle
modes = modes[:2]
modes = modes[np.argsort(modes[:, 2])]
# compute angle between modes
delta_angle = min(modes[1, 2] - modes[0, 2], modes[0, 2] + np.pi - modes[1, 2])
# if angle too small => return invalid corner
if delta_angle <= 0.3:
return v1, v2
# set statistics: orientations
v1 = [np.cos(modes[0, 2]), np.sin(modes[0, 2])]
v2 = [np.cos(modes[1, 2]), np.sin(modes[1, 2])]
return v1, v2
| postBG/libcbdetect | code/edge_orientation.py | edge_orientation.py | py | 2,000 | python | en | code | 1 | github-code | 13 |
42168561182 | """ As the seeds get bigger we break new records in lenght or in height.
Usage: import records
Usage: from records import heightrecords
Usage: from records import lengthtrecords
>>> lengthrecords(20)
[(1, 1), (2, 2), (3, 8), (6, 9), (7, 17), (9, 20), (18, 21)]
>>> heightrecords(250)
[(1, 1), (2, 2), (3, 16), (7, 52), (15, 160), (27, 9232)]
"""
import height
import length
def lengthrecords(maxseed):
""" As the seed goes from 1 to maxseed the hail stones break new length records.
Return a list of pairs (seed, length) where the length is a new record.
For example:
>>> lengthrecords(20)
[(1, 1), (2, 2), (3, 8), (6, 9), (7, 17), (9, 20), (18, 21)]
"""
# Replace the pass below with your own code.
maxlength = 0
mylist = []
for n in range(1,maxseed + 1):
if length.measure(n) > maxlength:
maxlength = length.measure(n)
mylist.append((n, maxlength))
return mylist
def heightrecords(maxseed):
"""As the seed goes from 1 to maxseed the hail stones break new height records.
Return a list of pairs (seed, height) where the height is a new record.
For example:
>>> heightrecords(250)
[(1, 1), (2, 2), (3, 16), (7, 52), (15, 160), (27, 9232)]
"""
# Replace the pass below with your own code.
maxheight = 0
mylist =[]
for n in range(1, maxseed+1):
if height.measure(n) > maxheight:
maxheight = height.measure(n)
mylist.append((n, maxheight))
return mylist
if __name__ == '__main__':
import doctest
doctest.testmod()
| lzhengem/python-projects | LAB04/hailstoneslab/records.py | records.py | py | 1,625 | python | en | code | 0 | github-code | 13 |
43082964712 | #
# @lc app=leetcode.cn id=1154 lang=python3
#
# [1154] 一年中的第几天
#
# @lc code=start
class Solution:
def dayOfYear(self, date: str) -> int:
# 拆出年月日
year, month, day = [int(x) for x in date.split("-")]
# 模拟每一年的每月天数
amount = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# 闰年二月加一天
if year % 400 == 0 or (year % 4 == 0 and year % 100 != 0):
amount[1] += 1
# 加总前几个月并加上本月的天数
return sum(amount[:month - 1]) + day
# @lc code=end
| Guo-xuejian/leetcode-practice | 1154.一年中的第几天.py | 1154.一年中的第几天.py | py | 592 | python | en | code | 1 | github-code | 13 |
30847785045 | # use torch to build a rbfnet to fit 2-d points.
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
import copy
class RBFNet(nn.Module):
def __init__(self, n = 5):
super(RBFNet, self).__init__()
# the number of neurons in hidden layer.
# we have only one hidden layer here.
# of couse we could build a deeper network
self.N = n
self.linear0 = nn.Linear(1, self.N)
self.linear1 = nn.Linear(self.N, 1)
self.c = 1 / np.sqrt(2 * np.pi)
def forward(self, x):
x = self.linear0(x)
x = torch.mul(torch.pow(x, 2), -0.5)
x = torch.mul(torch.exp(x), self.c)
x = self.linear1(x)
return x
net = RBFNet(10)
max_epoch = 500
optimizer = optim.SGD(net.parameters(), lr = 0.01)
criterion = nn.MSELoss()
def train_model_one_epoch(points):
optimizer.zero_grad()
sum_loss = 0
for p in points:
_input = torch.Tensor([p[0]])
_target = torch.Tensor([p[1]])
output = net(_input)
loss = criterion(output, _target)
loss.backward()
sum_loss += loss.item()
optimizer.step()
return sum_loss
def train_model():
# show the dynamic process of convergence
for epoch in range(max_epoch):
loss = train_model_one_epoch(points)
if epoch % (int(max_epoch/10)) == 0:
print("epoch:", epoch, "loss:", loss)
def onclick_put_points(event):
ix, iy = event.xdata, event.ydata
points.append([ix, iy])
plt.scatter(ix, iy, color="r")
plt.draw()
def draw_model(net, label, color="blue"):
x_coor = np.linspace(-5, 5, 100)
y_coor = [ net(torch.Tensor([tmp_x])).item() for tmp_x in x_coor]
return plt.plot(x_coor, y_coor, label=label, c=color)
if __name__ == "__main__":
# interactively put points
fig = plt.figure()
points = []
fig.canvas.mpl_connect('button_press_event', onclick_put_points)
plt.xlim((-5, 5))
plt.ylim((-2, 2))
plt.show()
# open interactive mode
plt.ion()
# draw trained points
np_points = np.array(points)
x = np_points[:,0]
y = np_points[:,1]
init_net = copy.deepcopy(net)
for epoch in range(max_epoch):
plt.cla()
plt.xlim((-5, 5))
plt.ylim((-2, 2))
loss = train_model_one_epoch(points)
if epoch % (int(max_epoch/10)) == 0:
print("epoch:", epoch, "loss:", loss)
if epoch % (int(max_epoch/100)) == 0:
plt.scatter(x, y, color='red')
draw_model(net, "training", "orange")
plt.pause(0.1)
plt.ioff()
plt.scatter(x, y, color='red')
draw_model(init_net, color="cyan", label="before training")
draw_model(net, color="orange", label="after training")
plt.legend()
plt.show()
| wlsdzyzl/GAMES102 | hw2/rbfnet.py | rbfnet.py | py | 2,845 | python | en | code | 14 | github-code | 13 |
70713148498 | import hid
import time
product_id = 0x2107
vendor_id = 0x413D
usage_page = 0xFF00
# 初始化HID设备
def init_usb(vendor_id, usage_page):
global h
h = hid.device()
hid_enumerate = hid.enumerate()
device_path = 0
for i in range(len(hid_enumerate)):
print(hid_enumerate[i])
# if (hid_enumerate[i]['usage_page'] == usage_page and hid_enumerate[i]['vendor_id'] == vendor_id):
if (hid_enumerate[i]['usage_page'] == usage_page and hid_enumerate[i]['vendor_id'] == vendor_id and
hid_enumerate[i]['product_id'] == product_id):
device_path = hid_enumerate[i]['path']
if (device_path == 0): return "Device not found"
h.open_path(device_path)
h.set_nonblocking(1) # enable non-blocking mode
return 0
# 读写HID设备
def hid_report(buffer=[], r_mode=False, report=0):
buffer = buffer[-1:] + buffer[:-1]
buffer[0] = 0
print("<", buffer)
try:
h.write(buffer)
except (OSError, ValueError):
print("写入设备错误")
return 1
except NameError:
print("未初始化设备")
return 4
if r_mode: # 读取回复
time_start = time.time()
while 1:
try:
d = h.read(64)
except (OSError, ValueError):
print("读取数据错误")
return 2
if d:
print(">", d)
break
if time.time() - time_start > 2:
print("超时未回应")
d = 3
break
else:
d = 0
return d
def hid_close():
if 'h' in locals():
h.close()
| Jackadminx/KVM-Card-Mini | Client/module/hid_def.py | hid_def.py | py | 1,667 | python | en | code | 53 | github-code | 13 |
29530333138 | from __future__ import annotations
import numpy as np
from ConfigSpace import (
Categorical,
Configuration,
ConfigurationSpace,
Float,
Integer,
)
import sklearn
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder, StandardScaler, MinMaxScaler
# from sklearn.model_selection import StratifiedKFold, cross_validate
from sklearn.neural_network import MLPClassifier
from inner_loop.exec_model import ExecModel
from utils.input import ConfDict
class SklearnModel(ExecModel):
def build_model(self, config, budget):
numeric_transformer = Pipeline(
steps=[
("impute", SimpleImputer()),
("scaler", MinMaxScaler()),
]
)
categorical_transformer = Pipeline(
[
("impute", SimpleImputer(strategy="most_frequent")),
("ohe", OneHotEncoder(handle_unknown="ignore")),
]
)
preprocessor = ColumnTransformer(
transformers=[
(
"num",
numeric_transformer,
[
idx
for idx, elem in enumerate(ConfDict()["categorical_indicator"])
if not elem
],
),
(
"cat",
categorical_transformer,
[
idx
for idx, elem in enumerate(ConfDict()["categorical_indicator"])
if elem
],
),
]
)
classifier = MLPClassifier(
hidden_layer_sizes=[config["n_neurons"]] * config["n_layer"],
solver=config["solver"],
activation=config["activation"],
learning_rate_init=config["learning_rate_init"],
alpha=config["alpha"],
max_iter=int(np.ceil(config["n_epochs"])),
random_state=ConfDict()["seed"],
)
self.model = Pipeline(
steps=[
("preprocessor", preprocessor),
("classifier", classifier),
]
)
def train_model(self, X_train, y_train):
self.model = self.model.fit(X_train, y_train)
def evaluate_model(self, X_test):
return self.model.predict(X_test)
| josephgiovanelli/mo-importance | src/inner_loop/sklearn_model.py | sklearn_model.py | py | 2,499 | python | en | code | 0 | github-code | 13 |
7953458575 | import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
LENGTH = 2
ANGLE = np.pi/2
stack = []
dragonX = 'X+YF+'
dragonY = '-FX-Y'
hilbert_curveX = '-YF+XFX+FY-'
hilbert_curveY = '+XF-YFY-FX+'
def draw_fractal(actual_position, rule, angle, number_of_iterations, number_of_rewrites):
actual_angle = angle
for char in rule:
new_position = [0, 0]
if char == '+':
actual_angle += ANGLE
if char == '-':
actual_angle -= ANGLE
if char == '[':
stack.append([actual_position, actual_angle])
if char == ']':
actual_position, actual_angle = stack.pop()
if char == 'X' and number_of_rewrites > 0:
actual_position, actual_angle = draw_fractal(actual_position, hilbert_curveX, actual_angle, number_of_iterations, number_of_rewrites - 1)
if char == 'Y' and number_of_rewrites > 0:
actual_position, actual_angle = draw_fractal(actual_position, hilbert_curveY, actual_angle, number_of_iterations, number_of_rewrites - 1)
if char == 'F':
if number_of_iterations > 1:
actual_position = draw_fractal(actual_position, rule, actual_angle, number_of_iterations - 1, number_of_rewrites)
else:
new_position[0] = actual_position[0] + LENGTH * np.cos(actual_angle)
new_position[1] = actual_position[1] + LENGTH * np.sin(actual_angle)
plt.plot([actual_position[0], new_position[0]], [actual_position[1], new_position[1]])
actual_position = new_position
return actual_position, actual_angle
first_angle = 0
square = 'F+F-F-FF+F+F-F'
snowflake = 'F+F--F+F'
tree1 = 'F[+F]F[-F]F'
tree2 = 'FF+[+F-F-F]-[-F+F+F]'
dragon = 'FX'
hilbert_curve = 'X'
# plt.figure(dpi=200)
first_position = [0, 0]
draw_fractal(first_position, hilbert_curve, first_angle, 1, 5)
plt.axis("equal")
plt.show()
| maras49/NAVY | CV_6/__init__.py | __init__.py | py | 2,120 | python | en | code | 0 | github-code | 13 |
31087824503 | #!/usr/bin/python3
"""
Flask implementation for the HTTP Server
Overall this is more noisy and doesnt just die when you tell it too
"""
import logging
from os import environ
from threading import Thread
from datetime import datetime
from flask import Flask, request
try:
from .logger import logger
except:
from logger import logger
"""
Get the interface to run flask on
"""
def getHost():
try:
ip = environ['HTTP_IP']
return str(ip)
except KeyError:
return "127.0.0.1"
"""
Get the port to run flask on
"""
def getPort():
try:
ip = environ['HTTP_IP']
return str(ip)
except KeyError:
return 5000
app = Flask(__name__)
# Disable normal flask logs
app.logger.disabled = True
log = logging.getLogger('werkzeug')
log.disabled = True
#log.setLevel(logging.ERROR)
@app.before_request
def httplogger():
req = {}
req['ip'] = request.remote_addr
if request.headers.getlist('X-Forwarded-For'):
req['ip'] = str(request.headers.getlist('X-Forwarded-For')[0]).strip()
req['host'] = str(request.host.split(':')[0]).strip()
req['path'] = str(request.path).strip()
req['method'] = str(request.method).strip()
req['headers'] = request.headers
req['args'] = str(request.query_string, 'utf-8').strip()
req['data'] = str(request.data, 'utf-8').strip()
req['url'] = str(request.url).strip()
log = logger(req)
log.start()
@app.route("/", methods=["GET", "POST", "HEAD", "OPTIONS"])
def index():
return ""
@app.route("/<path:path>", methods=["GET", "POST", "HEAD", "OPTIONS"])
def all(path):
return ""
class HTTPServer(Thread):
def __init__(self):
Thread.__init__(self)
global app
self.HOST = getHost()
self.PORT = getPort()
app.host = self.HOST
app.port = self.PORT
self.app = Thread(target=app.run)
def run(self):
self.app.start()
print(f"[*][HTTP] Started Listener at: {datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')}")
| notxesh/sebknary | sebknary/http/flask-app.py | flask-app.py | py | 1,982 | python | en | code | 3 | github-code | 13 |
18484684734 | import numpy as np
import json
from dataclasses import dataclass
import math
import time
import arcade
import algorithms
import isometric
import constants as c
import interaction
from vision import VisionCalculator
from map_tile import Tile
# GATES and POI_LIGHTS are the highlights used to show the player points of interest and gates. each index represents a
# direction in order: south, east, north, west
GATES = {index: data for index, data in enumerate(isometric.generate_iso_data_other("gate_highlight"))}
POI_LIGHTS = {index: data for index, data in enumerate(isometric.generate_iso_data_other("poi_highlight"))}
class Map:
"""
Map holds the tiles and other data for a single tmx map.
"""
def __init__(self, game_view, data, location="tutorial"):
self.game_view = game_view
# The str location of the tmx data and the json data.
self.location = location
self.tmx_map = arcade.read_tmx(f"tiled/tilemaps/{self.location}.tmx")
self.item_data = data[location]
# the maps unique vision handler.
self.lit = location == "tutorial"
self.vision_handler = VisionCalculator(game_view.window, game_view.player, self.lit)
# The size of the map.
self.map_size = self.tmx_map.map_size
self.map_width, self.map_height = self.map_size
# The bots
self.bots = []
# If anything on the map has changed
self.changed = True
# The data of each layer, rooms, and tiles.
self.toggle_sprites = {}
self.layers = {}
self.rooms = {}
self.tile_map = np.empty(self.map_size, Tile)
# sprites with animations.
self.animated_sprites = []
def load_map(self):
"""
The load map scrip runs through the provided map and creates an
IsoLayer object for each layer which stores many
different values, these include the raw tile values as a 2D array
and the tile sprites in a 2D numpy array.
These IsoLayers are then stored by their name in a dictionary.
"""
self.game_view.reset_bots()
self.bots = []
self.vision_handler.setup(tuple(self.map_size))
self.animated_sprites = []
c.set_map_size(self.map_size)
@dataclass()
class BotData:
x: int = 0
y: int = 0
bot_type: str = "basic"
shown: bool = False
ai_type: str = "avoid"
self.toggle_sprites = {}
for layer_num, layer_data in enumerate(self.tmx_map.layers):
location = layer_data.name
if layer_data.properties is not None:
shown = layer_data.properties.get('shown', True)
else:
shown = True
# Create the IsoList for the tiles, renames the layer's raw tile 2D array for better readability,
# and create the 2D numpy array
tile_list = []
map_data = layer_data.layer_data
tile_map = np.empty(self.map_size, list)
def generate_poi(data):
poi_data = self.item_data['interact'].get(str(data), None)
if poi_data is not None:
data = poi_data['tile']
current_tiles = isometric.find_poi_sprites(data,
interaction.load_conversation(poi_data['interaction']),
(e_x, e_y))
tile_directions = set()
tile_list.extend(current_tiles)
tile_map[e_x, e_y] = list(current_tiles)
for tile in current_tiles:
if len(tile.animations):
self.animated_sprites.append(tile)
pos = (tile.e_x, tile.e_y)
if pos not in tile_directions:
tile_directions.add(pos)
if self.tile_map[pos] is None:
self.tile_map[pos] = Tile(pos, self)
self.tile_map[pos].add(tile)
if len(current_tiles):
for tile in current_tiles:
for i in range(4):
direction = (i % 2 * ((math.floor(i / 2) * -2) + 1),
(1 - i % 2) * ((math.floor(i / 2) * -2) + 1))
if (tile.e_x + direction[0], tile.e_y + direction[1]) not in tile_directions:
highlight = isometric.IsoSprite(tile.e_x, tile.e_y, POI_LIGHTS[i])
tile_list.append(highlight)
tile_map[e_x, e_y].append(highlight)
self.tile_map[tile.e_x, tile.e_y].add(highlight)
def generate_door(data):
door_data = self.item_data['door'].get(str(data))
if door_data is not None:
tile_data = door_data['tiles']
target_id = data - 16
current_tile = isometric.find_toggle_sprites(tile_data, target_id, (e_x, e_y))
tile_list.append(current_tile)
tile_map[e_x, e_y] = current_tile
if self.tile_map[e_x, e_y] is None:
self.tile_map[e_x, e_y] = Tile((e_x, e_y), self)
self.tile_map[e_x, e_y].add(current_tile)
if target_id not in self.toggle_sprites:
self.toggle_sprites[target_id] = []
self.toggle_sprites[target_id].append(current_tile)
def generate_gate(data):
gate_data = self.item_data['gates'][str(data)]
if self.tile_map[e_x, e_y] is None:
current_tile = Tile((e_x, e_y), self)
self.tile_map[e_x, e_y] = current_tile
else:
current_tile = self.tile_map[e_x, e_y]
rel_pos = e_x - gate_data['start'][0], e_y - gate_data['start'][1]
next_pos = gate_data['position'][0] + rel_pos[0], gate_data['position'][1] + rel_pos[1]
rel_gate_data = {"target": gate_data["target"], "land_pos": next_pos}
current_tiles = []
gate_tile = isometric.IsoGateSprite(e_x, e_y, GATES[4], rel_gate_data)
tile_list.append(gate_tile)
current_tiles.append(gate_tile)
current_tile.light_add(gate_tile)
for i in range(4):
direction = (i % 2 * ((math.floor(i/2)*-2) + 1), (1 - i % 2) * ((math.floor(i/2)*-2) + 1))
if (e_y+direction[1] > self.map_size[1] or e_x+direction[0] > self.map_size[0] or
(e_y+direction[1] < self.map_size[1] and e_x+direction[0] < self.map_size[0] and
map_data[e_y+direction[1]][e_x+direction[0]] != data)):
tile = isometric.IsoGateSprite(e_x, e_y, GATES[i], rel_gate_data)
current_tile.light_add(tile)
current_tiles.append(tile)
tile_list.append(tile)
tile_map[e_x, e_y] = current_tiles
def generate_decoration(data):
# same as generate layer but takes a str not an int.
generate_layer(str(data))
def generate_layer(data):
# find the pieces(individual sprites) that make up the IsoSprite.
current_tiles = isometric.find_iso_sprites(data, (e_x, e_y))
tile_list.extend(current_tiles)
tile_map[e_x, e_y] = current_tiles
for tile in current_tiles:
if len(tile.animations):
self.animated_sprites.append(tile)
if self.tile_map[tile.e_x, tile.e_y] is None:
self.tile_map[tile.e_x, tile.e_y] = Tile((e_x, e_y), self)
self.tile_map[tile.e_x, tile.e_y].add(tile)
def generate_isoactor(data):
isoactor_data = self.item_data['character'].get(str(data))
if isoactor_data is not None:
if isoactor_data['type'] == "player":
self.game_view.player.new_pos(e_x, e_y)
elif isoactor_data['type'] == "dummy":
iso_data = isometric.generate_iso_data_other(isoactor_data['type'])
dummy = isometric.IsoSprite(e_x, e_y, *iso_data,
{'hit': isometric.IsoAnimation(
"assets/characters/iso_dummy.png",
(160, 320), (160, 0), 4, 1/12)})
tile_list.append(dummy)
tile_map[e_x, e_y] = dummy
self.animated_sprites.append(dummy)
if self.tile_map[e_x, e_y] is None:
self.tile_map[e_x, e_y] = Tile((e_x, e_y), self)
self.tile_map[e_x, e_y].add(dummy)
else:
new_bot = BotData(e_x, e_y, isoactor_data['type'], isoactor_data['start_active'])
self.bots.append(new_bot)
generation_functions = {'floor': generate_layer, 'wall': generate_layer, 'gate': generate_gate,
'poi': generate_poi, 'door': generate_door, 'char': generate_isoactor,
'decoration': generate_decoration}
# Loop through the tile data.
for e_y, row in enumerate(map_data):
for e_x, tile_value in enumerate(row):
# If there is a tile in found in the data create the appropriate tile.
if tile_value:
generation_functions.get(location, generate_layer)(tile_value)
self.layers[location] = isometric.IsoLayer(layer_data, map_data, tile_list, tile_map, shown)
c.set_floor(self.layers['floor'].tiles)
algorithms.find_neighbours(self.tile_map)
for bot in self.bots:
if bot.shown:
self.game_view.new_bot(bot)
def strip_map(self):
"""
complety remove all the sprites on this map from the iso list and the game view. So a new map can be loaded.
"""
self.game_view.reset_bots()
for row in self.tile_map:
for tile in row:
if tile is not None:
if self.game_view.player in tile.actors:
tile.light_remove(self.game_view.player)
c.iso_strip(tile.pieces)
c.iso_strip(tile.actors)
def set_map(self):
"""
show all the items from this map.
"""
c.set_map_size(self.map_size)
c.set_floor(self.layers['floor'].tiles)
for row in self.tile_map:
for tile in row:
if tile is not None:
c.iso_extend(tile.pieces)
c.iso_extend(tile.actors)
self.vision_handler.regenerate = 2
def draw(self):
self.vision_handler.draw()
if self.vision_handler.recalculate == 1:
self.hide_walls()
self.vision_handler.recalculate = 0
def hide_walls(self):
"""
Based on the data generated by the vision handler. Find what tiles to show and what to hide.
"""
s = time.time()
checked = set()
remove = []
show = []
for bot in self.game_view.current_ai:
if self.vision_handler.vision_image.getpixel((bot.e_x, bot.e_y))[0]:
c.iso_append(bot)
else:
c.iso_remove(bot)
for x in self.tile_map:
for y in x:
if y is not None:
if not self.vision_handler.vision_image.getpixel(y.location)[0]:
if y not in checked:
if y.seen:
for piece in y.pieces:
piece.alpha = 150
piece.color = (95, 205, 228)
else:
for piece in y.pieces:
piece.alpha = 0
else:
y.seen = True
for piece in y.pieces:
piece.alpha = 255
if self.lit:
color = 255
else:
# the distance is the value normalised and the map size relative to 15.
# so dist/255 * map/15 or (map*dist)/(255*15) or (map*dist)/3825
distance = (self.vision_handler.vision_image.getpixel(y.location)[1] *
self.map_size[0]/3825)
color = max(int(255 - 255*distance), 0)
piece.color = (color, color, color)
for index, tile in enumerate(y.neighbours):
if (tile is not None and tile not in checked and
not self.vision_handler.vision_image.getpixel(tile.location)[0] and
y.vision[index] and not tile.vision[(index+2) % 4]):
checked.add(tile)
tile.seen = True
for piece in tile.pieces:
piece.alpha = 255
if self.lit:
color = 255
else:
# the distance is the value normalised and the map size relative to 15.
# so dist/255 * map/15 or (map*dist)/(255*15) or (map*dist)/3825
distance = self.vision_handler.vision_image.getpixel(y.location)[1] * \
self.map_size[0] / 3825
color = max(int(255 - 255 * distance), 0)
piece.color = (color, color, color)
def check_seen(self, location):
return bool(self.vision_handler.vision_image.getpixel(location)[0])
class MapHandler:
def __init__(self, game_view):
# Read the map. This will later be a list of maps depending on the area.
self.game_view = game_view
with open("data/map_data.json") as map_data:
self.map_data = json.load(map_data)
self.maps = {}
self.map = Map(game_view, self.map_data, 'tutorial')
self.maps['tutorial'] = self.map
def use_gate(self, gate_data):
if gate_data['target'] == "GameFinish":
self.game_view.window.show_end()
else:
self.map.strip_map()
next_map = self.maps.get(gate_data['target'])
if next_map is None:
next_map = Map(self.game_view, self.map_data, gate_data['target'])
self.maps[gate_data['target']] = next_map
self.map = next_map
self.load_map()
else:
self.map = next_map
self.map.set_map()
self.game_view.player.set_grid(self.map.tile_map)
self.game_view.player.new_map_pos(*gate_data['land_pos'])
self.game_view.selected_tile.new_pos(self.game_view.player.e_x, self.game_view.player.e_y)
self.game_view.set_view(self.game_view.player.center_x-c.SCREEN_WIDTH//2,
self.game_view.player.center_y-c.SCREEN_HEIGHT//2)
self.game_view.pending_motion = []
self.game_view.current_motion = None
self.game_view.motion = False
c.iso_append(self.game_view.player)
def load_map(self):
"""
The load map scrip runs through the provided map and creates an IsoLayer object for each layer which stores many
different values, these include the raw tile values as a 2D array and the tile sprites in a 2D numpy array.
These IsoLayers are then stored by their name in a dictionary.
"""
self.map.load_map()
self.initial_show()
def input_show(self, second_args=('wall', 'poi', 'door')):
shown_tiles = []
for locator_args in second_args:
layer = self.layers[locator_args]
shown_tiles.extend(layer.tiles)
c.iso_extend(shown_tiles)
def initial_show(self):
shown_layers = []
for key, layer in self.layers.items():
if layer.shown and key != 'floor':
shown_layers.append(key)
self.input_show(second_args=shown_layers)
c.set_floor(self.layers['floor'].tiles)
def toggle_target_sprites(self, target_id):
if target_id in self.toggle_sprites:
for door in self.toggle_sprites[target_id]:
door.toggle_states()
def debug_draw(self, a_star=False, display_draw=False):
if a_star:
# A debugging draw that creates 4 points for each tile. one for each direction N, E, S, W.
# The point is red if it is not a connection, white if it is.
if self.full_map is not None:
dirs = ((0, 0.25), (0.25, 0), (0, -0.25), (-0.25, 0))
for x_dex, point_row in enumerate(self.full_map):
for y_dex, value in enumerate(point_row):
if value is not None:
for dir_dex, direction in enumerate(dirs):
t_x = x_dex + direction[0]
t_y = y_dex + direction[1]
iso_x, iso_y, iso_z = isometric.cast_to_iso(t_x, t_y)
if value.neighbours[dir_dex] is None:
arcade.draw_point(iso_x, iso_y - 60, arcade.color.RADICAL_RED, 5)
elif not value.directions[dir_dex]:
arcade.draw_point(iso_x, iso_y - 60, arcade.color.GREEN, 5)
else:
arcade.draw_point(iso_x, iso_y - 60, arcade.color.WHITE, 5)
# draws a line from a tile to the tile it came from. they all lead back to the player.
if self.game_view.player.path_finding_data is not None:
for tile_node in self.game_view.player.path_finding_data[0]:
came_from = self.game_view.player.path_finding_data[0][tile_node]
if came_from is not None:
start_x, start_y, z = isometric.cast_to_iso(*tile_node.location)
end_x, end_y, z = isometric.cast_to_iso(*came_from.location)
arcade.draw_line(start_x, start_y-60, end_x, end_y-60, arcade.color.RADICAL_RED)
@property
def layers(self):
return self.map.layers
@property
def map_size(self):
return self.map.map_size
@property
def map_width(self):
return self.map.map_width
@property
def map_height(self):
return self.map.map_height
@property
def full_map(self):
return self.map.tile_map
@property
def rooms(self):
return self.map.rooms
@property
def toggle_sprites(self):
return self.map.toggle_sprites
@property
def map_bots(self):
return self.map.bots
def draw(self):
self.map.draw()
| DragonMoffon/Temporum | mapdata.py | mapdata.py | py | 20,017 | python | en | code | 2 | github-code | 13 |
25718805079 | import requests
from bs4 import BeautifulSoup
import time
def soupify(url):
try:
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser', from_encoding='utf-8')
soup.prettify()
return r, soup
except:
time.sleep(3)
def rev_address(r, soup):
r_links = []
for a in soup.find_all('a','review_count'):
r_links.append(a['href'])
return r_links
def format_url(r_links):
base = 'http://www.tripadvisor.com{}'
for i in range(len(r_links)):
r_links[i] = base.format(r_links[i])
return r_links
def list_reviews(r, soup):
p_list = soup.find_all('p', 'partial_entry')
for i,x in enumerate(p_list):
p_list[i] = p_list[i].get_text()
return p_list
def pgs_reviews_url(link_old, page):
ind = link_old.find('Reviews-')
link_new = link_old[:ind+8]+page+link_old[ind+8:]
return link_new
def pg_urltag(endpg):
'''input endpg is the end page number
this function returns all url tags for
pages of url
'''
pgs = ['']
for i in range(1,endpg):
pgs.append('or{}0-'.format(i))
return pgs
def last_pg(soup):
end = soup.find('span', 'pageNum last taLnk ')
if end == None:
return 0
return int(end.string)
def format_text(x):
soup = BeautifulSoup(str(x), 'html.parser', from_encoding='utf-8')
| iglee/outrunJulesVerne | src/parse_funcs.py | parse_funcs.py | py | 1,376 | python | en | code | 7 | github-code | 13 |
4974027926 | '''
https://www.acmicpc.net/problem/18222
문제
0과 1로 이루어진 길이가 무한한 문자열 X가 있다. 이 문자열은 다음과 같은 과정으로 만들어진다.
X는 맨 처음에 "0"으로 시작한다.
X에서 0을 1로, 1을 0으로 뒤바꾼 문자열 X'을 만든다.
X의 뒤에 X'를 붙인 문자열을 X로 다시 정의한다.
2~3의 과정을 무한히 반복한다.
즉, X는 처음에 "0"으로 시작하여 "01"이 되고, "0110"이 되고, "01101001"이 되고, ⋯ 의 과정을 거쳐 다음과 같이 나타내어진다.
"011010011001011010010110011010011001011001101001⋯⋯"
자연수 k가 주어졌을 때 X의 k번째에는 무슨 문자가 오는지 구하여라.
입력
첫 번째 줄에 자연수 k (1 ≤ k ≤ 1018) 가 주어진다.
출력
첫 번째 줄에 k번째에 오는 문자를 출력하라.
'''
def solution(place):
if place == 0:
return 0
if place == 1:
return 1
if place%2:
return 1-solution(place//2)
else:
return solution(place//2)
if __name__ == '__main__':
print(solution(int(input())-1))
| yeos60490/algorithm | 백준/18222-투에모스문자열.py | 18222-투에모스문자열.py | py | 1,122 | python | ko | code | 0 | github-code | 13 |
31237114039 | def homework_6(nodes): # 請同學記得把檔案名稱改成自己的學號(ex.1104813.py)
n=len(nodes)
initial = [[float("inf")]*n for i in range(n)] #列出各點之間之距離
for i in range(n):
for j in range(n):
if i==j:
continue
if i!=j:
initial[i][j]=abs(nodes[i][0]-nodes[j][0])+abs(nodes[i][1]-nodes[j][1]) #nodes之間的距離
selected=list() #建立一個空矩陣
for i in range(n):
selected.append(0)
c = 0 #邊的數目初始值為0,總共會有V-1個邊
sum=0
selected[0] = True
while (c < n - 1): #如果邊常數目沒有大於V-1就繼續迴圈
minimum = 99999 #設最小值
x = 0
y = 0
for i in range(n):
if selected[i]:
for j in range(n):
if ((not selected[j]) and initial[i][j]):
# not in selected and there is an edge
if minimum > initial[i][j]:
minimum = initial[i][j]
x = i
y = j
sum+=initial[x][y]
selected[y] = True
c += 1
return sum
if __name__ == '__main__':
nodes = [[0,0],[2,6],[3,9],[6,4],[7,1]]
print(homework_6(nodes))
# 22
| daniel880423/Member_System | file/hw6/1090338/hw6_s1090338_0.py | hw6_s1090338_0.py | py | 1,346 | python | en | code | 0 | github-code | 13 |
21361088473 | import spglib
from ase.io import read
from ase.neighborlist import neighbor_list
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser(description='Calculate and plot bond lengths for given input file and cutoff distance.')
parser.add_argument('input_file', type=str, help='path to input file in POSCAR format')
parser.add_argument('cutoff', type=float, help='maximum distance to consider a bond')
args = parser.parse_args()
# Read in POSCAR file
poscar = read(args.input_file, format="vasp")
# Run the space group analysis
data = (poscar.get_cell(), poscar.get_positions(), poscar.get_atomic_numbers())
result = spglib.get_symmetry_dataset(data)
# Generate list of nearest neighbors and calculate bond lengths
cutoff = args.cutoff
nb_o_lengths = []
b_o_lengths = []
k_o_lengths = []
neighbor_list = neighbor_list("ijdD", poscar, cutoff, self_interaction=False)
for i, atom in enumerate(poscar):
symbol = atom.symbol
neighbors = neighbor_list[0][neighbor_list[1]==i]
for neighbor in neighbors:
neighbor_symbol = poscar[neighbor].symbol
distance = poscar.get_distance(i, neighbor)
if distance < cutoff:
if (symbol, neighbor_symbol) == ("Nb", "O"):
nb_o_lengths.append(distance)
elif (symbol, neighbor_symbol) == ("B", "O"):
b_o_lengths.append(distance)
elif (symbol, neighbor_symbol) == ("K", "O"):
k_o_lengths.append(distance)
#Average bond lengths
nb_o_avg = sum(nb_o_lengths)/len(nb_o_lengths)
b_o_avg = sum(b_o_lengths)/len(b_o_lengths)
k_o_avg = sum(k_o_lengths)/len(k_o_lengths)
print("Average Nb-O bond length: {:.3f} Å".format(nb_o_avg))
print("Average B-O bond length: {:.3f} Å".format(b_o_avg))
print("Average K-O bond length: {:.3f} Å".format(k_o_avg))
# Plot bond lengths histograms
fig, axs = plt.subplots(1, 3, figsize=(10, 3))
axs[0].hist(nb_o_lengths, bins=30)
axs[0].set_xlabel("Nb-O bond length (Å)")
axs[0].set_ylabel("Count")
axs[1].hist(b_o_lengths, bins=30)
axs[1].set_xlabel("B-O bond length (Å)")
axs[1].set_ylabel("Count")
axs[2].hist(k_o_lengths, bins=30)
axs[2].set_xlabel("K-O bond length (Å)")
axs[2].set_ylabel("Count")
plt.tight_layout()
plt.show()
| mzkhalid039/Bond-lengths | bond_lengths.py | bond_lengths.py | py | 2,239 | python | en | code | 3 | github-code | 13 |
25906669302 | from .scraper import NewsScraper
from .console import Console
from .constants import constants
if __name__ == "__main__":
run_program = True
console = Console()
news_scrapper = NewsScraper()
news_scrapper.default()
console.start_program()
while run_program:
menu_execution = 0
while menu_execution <= 0:
console.display_options_menu()
user_input = input(constants.PROMPT_INPUT)
menu_execution, program_termination = console.handler_menu(user_input, news_scrapper)
run_program = True if program_termination is None else False # Exit program
console.end_program() | meobilivang/newsreader-cli | newsreadercli/__main__.py | __main__.py | py | 667 | python | en | code | 6 | github-code | 13 |
42809800591 | """initial migration.
Revision ID: a65f918ff3a0
Revises:
Create Date: 2022-01-24 10:37:29.136316
"""
from alembic import op
from sqlalchemy.dialects import postgresql
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a65f918ff3a0'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('first_name', sa.String(length=64), nullable=True),
sa.Column('last_name', sa.String(length=64), nullable=True),
sa.Column('username', sa.String(length=64), nullable=False),
sa.Column('email', sa.String(length=256), nullable=False),
sa.Column('password', sa.String(length=256), nullable=True),
sa.Column('phone_number', sa.String(length=64), nullable=False),
sa.Column('is_activated', sa.Boolean(), nullable=True),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_id'), 'users', ['id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_users_id'), table_name='users')
op.drop_table('users')
# ### end Alembic commands ###
| BorodaUA/practice_api_server | db/migrations/versions/a65f918ff3a0_initial_migration.py | a65f918ff3a0_initial_migration.py | py | 1,400 | python | en | code | 0 | github-code | 13 |
2957049975 | # -*- coding: UTF-8 -*-
# 开发团队: xx科技
# 开发人员: lee
# 创建时间: 3/8/20 12:06 AM
# 文件名称: 322-coinChange.py
from typing import List
class Solution:
def coinChange2(self, coins: List[int], amount: int) -> int:
coins_sort = list(reversed(coins))
nums, max = 0, len(coins_sort) - 1
results = []
while amount > 0:
if amount >= coins_sort[nums]:
amount -= coins_sort[nums]
results.append(coins_sort[nums])
else:
nums = (nums + 1) if nums < max else max
if nums == max and amount < coins_sort[nums]:
return -1
print(results)
return len(results)
def coinChange(self, coins: List[int], amount: int) -> int:
dp = [amount + 1] * (amount + 1)
dp[0] = 0
for i in range(1, amount + 1):
for coin in coins:
if i >= coin:
dp[i] = min(dp[i], dp[i - coin] + 1)
return -1 if dp[-1] == amount + 1 else dp[-1]
def coinChange3(self, coins: List[int], amount: int) -> int:
max = amount + 1
solution = Solution()
# result = solution.coinChange(coins=[1, 2, 5], amount=11)
result = solution.coinChange(coins=[2], amount=3)
print(result)
| yanhuilee/hello_leetcode | src/main/python3.6/322-coinChange.py | 322-coinChange.py | py | 1,302 | python | en | code | 1 | github-code | 13 |
22165349969 | data = []
count = 0
with open('reviews.txt', 'r') as f:
for line in f:
data.append(line)
count += 1
if count % 10000 == 0:
print(len(data))
print('讀取完成,共有', len(data), '筆資料')
sum_length = 0
for d in data:
sum_length = sum_length + len(d)
print('每一筆的平均長度為', sum_length/len(data), '個字')
wc = {}
for d in data:
words = d.split()
for word in words:
if word in wc:
wc[word] += 1
else:
wc[word] = 1
for word in wc:
if wc[word] > 1000:
print(word, wc[word])
print(len(wc))
print(wc['Allen'])
while True:
word = input ('請問你查什麼字?')
if word == 'q':
break
if word in wc:
print(word, '出現的次數:', wc[word])
else:
print('沒有出現過喔!')
print('感謝查詢')
| iwels/read_count | read.py | read.py | py | 771 | python | en | code | 0 | github-code | 13 |
7008590695 | from DUBtils import *
database_name="三国集团"
sql="insert into information values (%s,%s,%s,%s,%s,%s,%s)"
param=['曹操',56,'男',106,'IBM',500,50]
updata(sql,param,database_name)
sql1="insert into information values (%s,%s,%s,%s,%s,%s,%s)"
param1=['大桥',19,'女',230,'微软',501,60]
updata(sql1,param1,database_name)
sql2="insert into information values (%s,%s,%s,%s,%s,%s,%s)"
param2=['小桥',19,'女',210,'Oracle',600,60]
updata(sql2,param2,database_name)
sql3="insert into information values (%s,%s,%s,%s,%s,%s,%s)"
param3=['许褚',45,'男',230,'Tencent',700,10]
updata(sql3,param3,database_name)
# 1.统计每个人的平均薪资
sql4="SELECT pay FROM information where age > %s"
param4=["0"]
mode="all"
sum=0
data= select(sql4,param4,mode,database_name)
for i in range(len(data)):
sum=data[i][0]+sum
adverge=sum/len(data)
print("平均薪资为:",adverge)
# 2.统计每个人的平均年龄
sql5="SELECT age FROM information where age > %s"
param5=["0"]
mode="all"
sum=0
data= select(sql5,param5,mode,database_name)
for i in range(len(data)):
sum=data[i][0]+sum
adverge1=sum/len(data)
print("平均年龄为:",adverge1)
#3.添加员工
sql6="insert into information values (%s,%s,%s,%s,%s,%s,%s)"
param6=['刘备',45,'男',220,'alibaba',500,30]
updata(sql6, param6,database_name)
#4.统计男女人数
sql7="SELECT sex FROM information where age>%s"
param7=[0]
mode="all"
num=0
num1=0
data= select(sql7,param7,mode,database_name)
for i in range(len(data)):
if data[i][0]=="男":
num=num+1
elif data[i][0]=="女":
num1=num1+1
print("男生人数为:",num)
print("女生人数为:",num1)
#5.每个部门人数
sql8="SELECT department_num FROM information where age> %s"
param9=["0"]
mode="all"
data=select(sql8,param9,mode,database_name)
list=[]
for i in range(len(data)):
a=data[i][0]
c = 0
for n in range(len(data)):
b = data[n][0]
if a==b :
c=c+1
if a in list:
continue
else:
list.append(a)
print("部门编号:",a,"人数:",c)
list={"罗恩": [23, 35, 44],
"哈利": [60, 77, 68, 88, 90],
"赫敏": [97, 99, 89, 91, 95, 90],
"马尔福": [100, 85, 90],
}
print("罗恩总成绩:",sum(list["罗恩"]))
print("哈利总成绩:",sum(list["哈利"]))
print("赫敏总成绩:",sum(list["赫敏"]))
print("马尔福总成绩:",sum(list["马尔福"]))
a=[5,2,4,7,9,1,3,5,4,0,6,1,3]
BubbleSort(a) | zhongyusheng/store | 练习3.py | 练习3.py | py | 2,560 | python | en | code | 0 | github-code | 13 |
39233588354 | # -*- encoding:utf-8 -*-
"""
Author:wangqing
Date: 20190707
Version:1.3
实现模型的建立,模型具体细节:
1. Encoder部分
由于各个段落的长度不一致,因此分别将各个段落送入BertModel中
经过BertModel得到embedding
在这个过程中,注意到有一个函数,model.train()或者model.eval()
这个两个参数仅对模型中有dropout时有影响。
Encoder出来后,得到的output为;encoder_layer,pooled_output,
encoder_layer中的结果为我们所需的hidden_state
2. Decoder
input_size = hidden_size =encoder_layer.hidden_size
采用一层GRU
encoder中的hidden_state即为input,decoder中的hidden_state=None
它不需要hidden_state
由于output的shape为(seq_len,batch_size,hidden_size)
"""
from pytorch_pretrained_bert import BertModel,BertConfig
import torch.nn as nn
import torch
import torch.nn.functional as F
class Summarizer(nn.Module):
def __init__(self, args, w=None, b=None):
# 初始化模型,建立encoder和decoder
super(Summarizer, self).__init__()
if args.mode == "train":
self.encoder = BertModel.from_pretrained('bert-base-cased', cache_dir="./temp")
self.w = w
self.b = b
elif args.mode == "test":
config = BertConfig.from_json_file(args.predict_config)
self.encoder = BertModel(config)
self.args = args
# we choose same hiedden_size with bert embedding
self.decoder = nn.GRU(input_size=768, hidden_size=768, num_layers=1)
# make all of them to gpu
# self.to(device)
def forward(self, para_dict):
"""
构建模型,只针对一个para来
:param input: 假设只传入一个段落,对这一个段落进行
:return:
"""
# Create Encoder,计算一个
#self.encoder.eval()
para_tokens_tensor = torch.tensor([para_dict['src']])
para_segments_tensor = torch.tensor([para_dict['segs']])
print(para_tokens_tensor)
print(para_segments_tensor)
self.encoded_output, _ = self.encoder(para_tokens_tensor, para_segments_tensor, output_all_encoded_layers=False)
# send encoded_output into decoder
self.encoded_output = torch.transpose(self.encoded_output, 0, 1)
self.decoded_output, _ = self.decoder(self.encoded_output)
return self.decoded_output
| CatherineWong1/hierarchy_model | hierarchy_model.py | hierarchy_model.py | py | 2,390 | python | zh | code | 0 | github-code | 13 |
18578522800 | ## extra file to test loading txt file into a list. function was also added in project1.py
def load_txt(filename):
# opening the file in read mode
my_file = open(filename, "r")
# reading the file
data = my_file.read()
# replacing end of line('/n') with ' ' and
# splitting the text it further when '.' is seen.
data_into_list = data.replace('\n', ' ').split()
# make sure to close file
my_file.close()
return data_into_list
print( load_txt("stopwords.txt") ) | Brunozml/ml1_p1_linear_reg | loadtxt.py | loadtxt.py | py | 504 | python | en | code | 0 | github-code | 13 |
16132559683 | import collections
import multiprocessing as mp
Msg = collections.namedtuple("Msg", ["event", "args"])
class BaseProcess(mp.Process):
"""A process backed by an internal queue for simple one-way message passing."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.queue = mp.Queue()
def send(self, event, *args):
"""Puts the event and args as a `Msg` on the queue"""
msg = Msg(event, args)
self.queue.put(msg)
def dispatch(self, msg):
event, args = msg
handler = getattr(self, "do_%s" % event, None)
if not handler:
raise NotImplementedError("Process has no handler for [%s]" % event)
handler(*args)
def run(self):
while True:
msg = self.queue.get()
self.dispatch(msg)
# usage
class MyProcess(BaseProcess):
def do_helloworld(self, arg1, arg2):
print(arg1, arg2)
if __name__ == "__main__":
process = MyProcess()
process.start()
process.send("helloworld", "hello", "world")
| udhayprakash/PythonMaterial | python3/19_Concurrency_and_Parallel_Programming/02_multiprocessing/example2.py | example2.py | py | 1,073 | python | en | code | 7 | github-code | 13 |
39447991925 | import torch
import torch.nn as nn
class TextEncoder(torch.nn.Module):
def __init__(self, hidden_size, input_dim, n_layers=1, dropout=0):
super(TextEncoder, self).__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
self.embedding = nn.Embedding(input_dim, self.hidden_size)
self.gru = nn.GRU(self.hidden_size, self.hidden_size, n_layers,
dropout=(0 if n_layers == 1 else dropout), bidirectional=True)
def forward(self, input_seq, input_lengths, hidden=None):
# Convert word indexes to embeddings
embedded = self.embedding(input_seq)
# Pack padded batch of sequences for RNN module
packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, input_lengths.cpu())
# Forward pass through GRU
outputs, hidden = self.gru(packed, hidden)
# Unpack padding
outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(outputs)
# Sum bidirectional GRU outputs
outputs = outputs[:, :, :self.hidden_size] + outputs[:, :, self.hidden_size:]
# We will use the sum of the final hidden state of the backward and forward pass.
hidden = torch.sum(hidden, dim = 0)
return outputs, hidden
| talk2car/Talk2Car | baseline/models/nlp_models.py | nlp_models.py | py | 1,317 | python | en | code | 50 | github-code | 13 |
28105948739 | #!/usr/bin/env python3
import sys
class Day04:
def __init__(self, file):
self.data = [line.strip() for line in open(file).readlines()]
@staticmethod
def parse_line(line):
def parse_section(section):
first, second = section.split('-')
return set(range(int(first), int(second)+1))
section1, section2 = line.split(',')
return parse_section(section1), parse_section(section2)
def run(self):
answer1 = answer2 = 0
for line in self.data:
set1, set2 = Day04.parse_line(line)
ilen = len(set1.intersection(set2))
if ilen in [len(set1),len(set2)]:
answer1 += 1
if ilen:
answer2 += 1
return answer1, answer2
def test1():
answer1, answer2 = Day04('./day04-test.input').run()
assert answer1 == 2
assert answer2 == 4
def test2():
answer1, answer2 = Day04('./day04.input').run()
assert answer1 == 305
assert answer2 == 811
if __name__ == '__main__':
print("advent of code: day04")
file = './day04.input'
if len(sys.argv) > 1:
file = sys.argv[1]
answer1, answer2 = Day04(file).run()
print(f"part 1: {answer1}")
print(f"part 2: {answer2}")
| danschaffer/aoc | 2022/day04.py | day04.py | py | 1,260 | python | en | code | 0 | github-code | 13 |
8868623369 | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 6 01:36:40 2020
@author: apmle
"""
'''This is a program to convert Temperature in degrees Celsius to degrees Fahrenheit'''
def convert(Celsius):
F= (Celsius*9/5)+32
print(f"{Celsius} degrees in Celsius is equivalent to {F} degrees Fahrenheit")
while True:
C=float(input("What is the temperature in degrees Celsius?.\n"))
convert(C)
cont=input("Do you wish to make another conversion? Type 1 for yes or 0 for no.\n")
if cont == '0':
break
| FabioRochaPoeta/Python-v1-Ana | Celsius to Fahrenheit.py | Celsius to Fahrenheit.py | py | 545 | python | en | code | 1 | github-code | 13 |
27389303153 | import pickle
import numpy as np
from utils import get_sentence_vector, get_glove_matrix
# 导入预处理后的问题列表
preprocessed_question_list = pickle.load(open('model/preprocessed_question_list.pkl', 'rb'))
# # 建立GloVe矩阵
# glove_words, embeddings = get_glove_matrix()
#
# # 保存GloVe中的词头到文件
# with open('model/glove_words.pkl', 'wb') as fw:
# pickle.dump(glove_words, fw)
#
# # 保存矩阵化后的GloVe到文件
# with open('model/glove_embeddings.pkl', 'wb') as fw:
# pickle.dump(embeddings, fw)
# 导入GloVe中的词头
glove_words = pickle.load(open('model/glove_words.pkl', 'rb'))
# 导入GloVe矩阵
embeddings = pickle.load(open('model/glove_embeddings.pkl', 'rb'))
# 初始化向量化后的问题列表
vectorized_question_list = []
for preprocessed_sentence in preprocessed_question_list:
vectorized_sentence = get_sentence_vector(preprocessed_sentence, glove_words, embeddings, 100)
vectorized_question_list.append(vectorized_sentence)
vectorized_question_list = np.asarray(vectorized_question_list)
# 保存向量化后的问题列表结果到文件
with open('model/vectorized_question_list_glove.pkl', 'wb') as fw:
pickle.dump(vectorized_question_list, fw)
| huangjunxin/SimpleQuestionAnsweringSystem | vectorize_corpus_glove.py | vectorize_corpus_glove.py | py | 1,234 | python | en | code | 4 | github-code | 13 |
39709992731 | from manimlib.imports import *
class Equations(Scene):
def construct(self):
#Making equations
first_eq = TextMobject("$$S = \\int_{a}^{b} 2\\pi f(x) \\sqrt{1+[\\frac{dy}{dx}]^2} dx$$")
second_eq = ["$S$", "=", "$\\int_{a}^{b}$", "$2\\pi f(x)$", "$\\sqrt{1+[f'(x)]^2} dx$",]
second_mob = TextMobject(*second_eq)
for i,item in enumerate(second_mob):
if(i != 0):
item.next_to(second_mob[i-1],RIGHT)
eq2 = VGroup(*second_mob)
des1 = TextMobject("The Surface area of f(x) where $$ a\le x \le b$$ around the x-axis is")
des2 = TextMobject("Or this...")
#Coloring equations
second_mob.set_color_by_gradient("#33ccff","#ff00ff")
#Positioning equations
des1.shift(2*UP)
des2.shift(2*UP)
#Animating equations
self.play(Write(des1))
first_eq.shift(DOWN)
self.play(Write(first_eq))
self.play(ReplacementTransform(des1, des2), Transform(first_eq, eq2))
self.wait(1)
for i, item in enumerate(eq2):
if (i<2):
eq2[i].set_color(color=PURPLE)
else:
eq2[i].set_color(color="#00FFFF")
self.add(eq2)
self.wait(1)
| advayk/Manim-CalcII-Project | trial_equations.py | trial_equations.py | py | 1,267 | python | en | code | 0 | github-code | 13 |
21792333050 | import functools
import logbook
import math
import numpy as np
import numpy.linalg as la
from alephnull.finance import trading
import pandas as pd
import risk
from . risk import (
alpha,
check_entry,
information_ratio,
sharpe_ratio,
sortino_ratio,
)
log = logbook.Logger('Risk Period')
choose_treasury = functools.partial(risk.choose_treasury,
risk.select_treasury_duration)
class RiskMetricsPeriod(object):
def __init__(self, start_date, end_date, returns,
benchmark_returns=None):
treasury_curves = trading.environment.treasury_curves
if treasury_curves.index[-1] >= start_date:
mask = ((treasury_curves.index >= start_date) &
(treasury_curves.index <= end_date))
self.treasury_curves = treasury_curves[mask]
else:
# our test is beyond the treasury curve history
# so we'll use the last available treasury curve
self.treasury_curves = treasury_curves[-1:]
self.start_date = start_date
self.end_date = end_date
if benchmark_returns is None:
br = trading.environment.benchmark_returns
benchmark_returns = br[(br.index >= returns.index[0]) &
(br.index <= returns.index[-1])]
self.algorithm_returns = self.mask_returns_to_period(returns)
self.benchmark_returns = self.mask_returns_to_period(benchmark_returns)
self.calculate_metrics()
def calculate_metrics(self):
self.benchmark_period_returns = \
self.calculate_period_returns(self.benchmark_returns)
self.algorithm_period_returns = \
self.calculate_period_returns(self.algorithm_returns)
if not self.algorithm_returns.index.equals(
self.benchmark_returns.index
):
message = "Mismatch between benchmark_returns ({bm_count}) and \
algorithm_returns ({algo_count}) in range {start} : {end}"
message = message.format(
bm_count=len(self.benchmark_returns),
algo_count=len(self.algorithm_returns),
start=self.start_date,
end=self.end_date
)
raise Exception(message)
self.num_trading_days = len(self.benchmark_returns)
self.benchmark_volatility = self.calculate_volatility(
self.benchmark_returns)
self.algorithm_volatility = self.calculate_volatility(
self.algorithm_returns)
self.treasury_period_return = choose_treasury(
self.treasury_curves,
self.start_date,
self.end_date
)
self.sharpe = self.calculate_sharpe()
self.sortino = self.calculate_sortino()
self.information = self.calculate_information()
self.beta, self.algorithm_covariance, self.benchmark_variance, \
self.condition_number, self.eigen_values = self.calculate_beta()
self.alpha = self.calculate_alpha()
self.excess_return = self.algorithm_period_returns - \
self.treasury_period_return
self.max_drawdown = self.calculate_max_drawdown()
def to_dict(self):
"""
Creates a dictionary representing the state of the risk report.
Returns a dict object of the form:
"""
period_label = self.end_date.strftime("%Y-%m")
rval = {
'trading_days': self.num_trading_days,
'benchmark_volatility': self.benchmark_volatility,
'algo_volatility': self.algorithm_volatility,
'treasury_period_return': self.treasury_period_return,
'algorithm_period_return': self.algorithm_period_returns,
'benchmark_period_return': self.benchmark_period_returns,
'sharpe': self.sharpe,
'sortino': self.sortino,
'information': self.information,
'beta': self.beta,
'alpha': self.alpha,
'excess_return': self.excess_return,
'max_drawdown': self.max_drawdown,
'period_label': period_label
}
return {k: None if check_entry(k, v) else v
for k, v in rval.iteritems()}
def __repr__(self):
statements = []
metrics = [
"algorithm_period_returns",
"benchmark_period_returns",
"excess_return",
"num_trading_days",
"benchmark_volatility",
"algorithm_volatility",
"sharpe",
"sortino",
"information",
"algorithm_covariance",
"benchmark_variance",
"beta",
"alpha",
"max_drawdown",
"algorithm_returns",
"benchmark_returns",
"condition_number",
"eigen_values"
]
for metric in metrics:
value = getattr(self, metric)
statements.append("{m}:{v}".format(m=metric, v=value))
return '\n'.join(statements)
def mask_returns_to_period(self, daily_returns):
if isinstance(daily_returns, list):
returns = pd.Series([x.returns for x in daily_returns],
index=[x.date for x in daily_returns])
else: # otherwise we're receiving an index already
returns = daily_returns
trade_days = trading.environment.trading_days
trade_day_mask = returns.index.normalize().isin(trade_days)
mask = ((returns.index >= self.start_date) &
(returns.index <= self.end_date) & trade_day_mask)
returns = returns[mask]
return returns
def calculate_period_returns(self, returns):
period_returns = (1. + returns).prod() - 1
return period_returns
def calculate_volatility(self, daily_returns):
return np.std(daily_returns, ddof=1) * math.sqrt(self.num_trading_days)
def calculate_sharpe(self):
"""
http://en.wikipedia.org/wiki/Sharpe_ratio
"""
return sharpe_ratio(self.algorithm_volatility,
self.algorithm_period_returns,
self.treasury_period_return)
def calculate_sortino(self, mar=None):
"""
http://en.wikipedia.org/wiki/Sortino_ratio
"""
if mar is None:
mar = self.treasury_period_return
return sortino_ratio(self.algorithm_returns,
self.algorithm_period_returns,
mar)
def calculate_information(self):
"""
http://en.wikipedia.org/wiki/Information_ratio
"""
return information_ratio(self.algorithm_returns,
self.benchmark_returns)
def calculate_beta(self):
"""
.. math::
\\beta_a = \\frac{\mathrm{Cov}(r_a,r_p)}{\mathrm{Var}(r_p)}
http://en.wikipedia.org/wiki/Beta_(finance)
"""
# it doesn't make much sense to calculate beta for less than two days,
# so return none.
if len(self.algorithm_returns) < 2:
return 0.0, 0.0, 0.0, 0.0, []
returns_matrix = np.vstack([self.algorithm_returns,
self.benchmark_returns])
C = np.cov(returns_matrix, ddof=1)
eigen_values = la.eigvals(C)
condition_number = max(eigen_values) / min(eigen_values)
algorithm_covariance = C[0][1]
benchmark_variance = C[1][1]
beta = algorithm_covariance / benchmark_variance
return (
beta,
algorithm_covariance,
benchmark_variance,
condition_number,
eigen_values
)
def calculate_alpha(self):
"""
http://en.wikipedia.org/wiki/Alpha_(investment)
"""
return alpha(self.algorithm_period_returns,
self.treasury_period_return,
self.benchmark_period_returns,
self.beta)
def calculate_max_drawdown(self):
compounded_returns = []
cur_return = 0.0
for r in self.algorithm_returns:
try:
cur_return += math.log(1.0 + r)
# this is a guard for a single day returning -100%
except ValueError:
log.debug("{cur} return, zeroing the returns".format(
cur=cur_return))
cur_return = 0.0
# BUG? Shouldn't this be set to log(1.0 + 0) ?
compounded_returns.append(cur_return)
cur_max = None
max_drawdown = None
for cur in compounded_returns:
if cur_max is None or cur > cur_max:
cur_max = cur
drawdown = (cur - cur_max)
if max_drawdown is None or drawdown < max_drawdown:
max_drawdown = drawdown
if max_drawdown is None:
return 0.0
return 1.0 - math.exp(max_drawdown)
| CarterBain/AlephNull | alephnull/finance/risk/period.py | period.py | py | 9,040 | python | en | code | 259 | github-code | 13 |
18859958667 | import json
from time import sleep
import requests
from requests.models import Response
from serial import Serial
ser = Serial('COM2', 9600) # baudrate
print('Serial port is open: ' + str(ser.is_open))
while True:
print('Waiting for data...')
data = str(ser.readline())[2:-5]
print('Data received: ' + data)
if data == '':
print('No data')
continue
data = json.loads(data)
response: Response = requests.post(
'http://localhost:80/Supervision/api/SensorTag/create.php', json=data)
print(response.status_code)
print(response.text)
sleep(0.5)
| BrahR/Supervision | Arduino/serial_COM2.py | serial_COM2.py | py | 605 | python | en | code | 0 | github-code | 13 |
71252112017 | ##########################################################################################
# CityXen 16 Relay Board Serial Bridge Program
# by Deadline
#
# NOTE: This is subject to heavy modification, especially the way it converts the signals
# so don't presume that the state it is in now is the way it will stay
#
# Raspberry Pi (Any model with 40 GPIO should work)
# https://amzn.to/34X5Xnj
#
# The Serial connector board is a MAX3232 based mini board with a 9 pin dsub
# https://amzn.to/32G9Viv
#
# Null modem required to work with Amiga serial port
# https://amzn.to/32BrHDC
#
# 8 Channel Relay Board
# https://amzn.to/2Xwerh4
#
# Prototype Wiring
# https://amzn.to/2LHDNX9
#
# GPIO Pins used for the serial device
#
# Pin 6 Ground
# Pin 8 TXD
# Pin 10 RXD
# Pin 1 3 volt
#
# GPIO Pins used for the Relay Boards
#
# Relay Board 1
# Pin 2 5 volt VCC Relay Board 1
# Pin 9 Ground Relay Board 1
# Pin 12 Relay 1
# Pin 7 Relay 2
# Pin 11 Relay 3
# Pin 13 Relay 4
# Pin 15 Relay 5
# Pin 19 Relay 6
# Pin 21 Relay 7
# Pin 23 Relay 8
#
# Relay Board 2
# Pin 4 5 volt VCC Relay Board 2
# Pin 39 Ground Relay Board 2
# Pin 16 Relay 1
# Pin 18 Relay 2
# Pin 22 Relay 3
# Pin 40 Relay 4
# Pin 38 Relay 5
# Pin 36 Relay 6
# Pin 32 Relay 7
# Pin 37 Relay 8
#
##########################################################################################
import RPi.GPIO as GPIO
import time
import serial
import argparse
# Set up some variables
sb_version = "1.0"
serial_device = "/dev/ttyAMA0"
serial_baud = "19200"
encoding = "DEFAULT"
init_test = False
counter = 0
print("CityXen Serial Bridge version %s" % (sb_version))
print("pass -h for help")
# Parse arguments
ap=argparse.ArgumentParser()
ap.add_argument("-s","--serial_device",required=False,help="Serial Device")
ap.add_argument("-e","--encoding",required=False,help="Encoding Method (Methods: DEFAULT or 16B)\nDEFAULT: 1-8 and q-i operate relays\n16B: 16 bit binary strings should be passed")
ap.add_argument("-b","--serial_baud",required=False,help="Serial Baud Rate")
ap.add_argument("-t","--init_test",required=False,help="Test all relays on startup")
args=vars(ap.parse_args())
if(args["serial_device"]):
serial_device=args["serial_device"]
if(args["serial_baud"]):
serial_baud = args["serial_baud"]
if(args["encoding"]):
encoding = args["encoding"]
if(args["init_test"]):
init_test = True if (args["init_test"]=="1") else False
print("Using "+serial_device+" at "+serial_baud+" baud and "+encoding+" encoding")
# Set up serial device
ser = serial.Serial(
serial_device,
serial_baud,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
xonxoff=0,
timeout=None,
rtscts=0
)
# Set up a dictionary for GPIO pins used for the relay up/down states
gp = {12:False, 7:False,11:False,13:False,15:False,19:False,21:False,23:False,
16:False,18:False,22:False,40:False,38:False,36:False,32:False,37:False}
# Set up GPIO device
GPIO.setwarnings(False) # Ignore some warnings
GPIO.setmode(GPIO.BOARD)
for i in gp:
GPIO.setup(i, GPIO.OUT) # Set pins to out
# Define some functions
def set_gpio(): # Set the GPIO pins from dict values
global gp
for i in gp:
GPIO.output(i,gp[i])
def all_on(): # Turn all dict values to on
global gp
for i in gp:
gp[i]=False
def all_off(): # Turn all dict values to off
global gp
for i in gp:
gp[i]=True
def test_sequence(): # Turn on and off all dict values and then set the GPIO pins
global gp
for i in gp:
gp[i]=False
set_gpio()
time.sleep(.1)
gp[i]=True
set_gpio()
# Do or do not, there is no try...
if(init_test):
print("Initialization Test")
test_sequence() # Do a quick system test
# Turn off all GPIO pins
all_off()
set_gpio()
# Print out a ready message
ser.write(b'CityXen Serial Bridge now active\n\r')
print("CityXen Serial Bridge now active")
# Main program, check for encoding method then loop
if(encoding=="16B"):
while True:
x=ser.readline().lstrip('\x00').rstrip("\x00\n\r")
if(len(x)):
print("IN STRLEN:"+str(len(x))+":"+x)
if(len(x)>15):
gp[12]=False if x[0] =="1" else True
gp[7] =False if x[1] =="1" else True
gp[11]=False if x[2] =="1" else True
gp[13]=False if x[3] =="1" else True
gp[15]=False if x[4] =="1" else True
gp[19]=False if x[5] =="1" else True
gp[21]=False if x[6] =="1" else True
gp[23]=False if x[7] =="1" else True
gp[16]=False if x[8] =="1" else True
gp[18]=False if x[9] =="1" else True
gp[22]=False if x[10]=="1" else True
gp[40]=False if x[11]=="1" else True
gp[38]=False if x[12]=="1" else True
gp[36]=False if x[13]=="1" else True
gp[32]=False if x[14]=="1" else True
gp[37]=False if x[15]=="1" else True
set_gpio()
counter=counter+1
if counter > 1000:
ser.write(b'16B BURmP\n\r')
print("16B BURmP")
counter=0
# Default Encoding method (1-8 and q-i)
if(encoding=="DEFAULT"):
while True:
x=ser.readline()
if x == '1':
if gp[12] == False:
gp[12]=True
else:
gp[12]=False
if x == '2':
if gp[7] == False:
gp[7]=True
else:
gp[7]=False
if x == '3':
if gp[11] == False:
gp[11]=True
else:
gp[11]=False
if x == '4':
if gp[13] == False:
gp[13]=True
else:
gp[13]=False
if x == '5':
if gp[15] == False:
gp[15]=True
else:
gp[15]=False
if x == '6':
if gp[19] == False:
gp[19]=True
else:
gp[19]=False
if x == '7':
if gp[21] == False:
gp[21]=True
else:
gp[21]=False
if x == '8':
if gp[23] == False:
gp[23]=True
else:
gp[23]=False
if x == 'q':
if gp[16] == False:
gp[16]=True
else:
gp[16]=False
if x == 'w':
if gp[18] == False:
gp[18]=True
else:
gp[18]=False
if x == 'e':
if gp[22] == False:
gp[22]=True
else:
gp[22]=False
if x == 'r':
if gp[40] == False:
gp[40]=True
else:
gp[40]=False
if x == 't':
if gp[38] == False:
gp[38]=True
else:
gp[38]=False
if x == 'y':
if gp[36] == False:
gp[36]=True
else:
gp[36]=False
if x == 'u':
if gp[32] == False:
gp[32]=True
else:
gp[32]=False
if x == 'i':
if gp[37] == False:
gp[37]=True
else:
gp[37]=False
set_gpio()
counter=counter+1
if counter > 1000:
ser.write(b'DEFAULT BURmP\n\r')
print("DEFAULT BURmP")
counter=0
GPIO.cleanup()
| cityxen/HACKME | Click-A-Tron/Click-A-Tron.py | Click-A-Tron.py | py | 7,527 | python | en | code | 0 | github-code | 13 |
14505760577 | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
from bs4 import BeautifulSoup
from time import sleep
import random
from project_solo_app.models import Background_Task, Job, Li_Job, Li_Company, Li_Poster, Web_Scrape_Error
def linkedin_search(URL, limit=0):
if not URL.startswith('https://www.linkedin.com/jobs/search/'):
return f"Can't scrape now. Incorrect URL: {URL}"
current_scrape = Background_Task.objects.filter(activity='web_scrape', status=1)
if len(current_scrape) !=0:
return f"Can't scrape now. Existing scrape may be occurring."
curr_scrape_object = Background_Task.objects.create(
activity = 'web_scrape',
status = 1,
url = URL,
current = 0,
total = 0
)
options = Options()
options.headless = False
options.add_argument("--window-size=1920,1080")
search_driver = webdriver.Chrome('/usr/local/bin/chromedriver', options=options)
#search_driver = webdriver.Chrome('/usr/local/bin/chromedriver')
search_driver.get(URL)
search_driver.add_cookie({'name' : 'lang' , 'value' : 'v=2&lang=en-us'})
sleep(3)
action = ActionChains(search_driver)
search_page = search_driver.page_source
search_soup = BeautifulSoup(search_page, "html.parser")
job_results = search_soup.body.main.section.next_sibling
total_job_line = job_results.find('span', class_='results-context-header__job-count')
total_job_count = 0
if total_job_line:
total_job_count = int(total_job_line.text.replace(',','').replace('+',''))
job_list = job_results.find_all('li')
curr_job_count = len(job_list)
print(f'Total job count for this search is: {total_job_count}')
print(f'Initial job count in DOM is: {curr_job_count}')
curr_scrape_object.total = curr_job_count
curr_scrape_object.save()
# ---- previous job count to compare to, as one of the means to know when to break the loop.
prev_job_count = 0
# ---- a value to be used to break the loop, should the prev_job_ount and curr_job_count be the same.
retry_count_max = 3
# ---- a limiter used during development, as another one of the means to break the loop.
if limit == 0:
loop_limit = 50
else:
loop_limit = int(limit/20)
# ---- initial loop value, since the first page load already has a certain amount of jobs.
loop_count = 2
retry_count = 0
scroll_loop=0
see_more_jobs_click = 0
while loop_count <= (total_job_count/25) and loop_count <= loop_limit:
if scroll_loop == 0 and (limit == 0 or (limit > 0 and curr_job_count < limit)):
try:
for scroll_loop in range(20):
search_driver.execute_script("window.scrollTo(0,document.body.scrollHeight)")
print(f'scroll count: {scroll_loop}')
sleep(.5)
except:
pass
sleep(random.randint(2, 5))
try:
see_more_jobs = search_driver.find_element_by_xpath('/html/body/main/div/section/button')
if see_more_jobs:
see_more_jobs.click()
see_more_jobs_click += 1
print(f'see more jobs clicked: {see_more_jobs_click}')
except:
pass
search_page = search_driver.page_source
search_soup = BeautifulSoup(search_page, "html.parser")
job_results = search_soup.body.main.section.next_sibling
job_list = job_results.find_all('li')
curr_job_count = len(job_list)
print(f'Current job count in DOM is: {curr_job_count}')
if limit > 0 and curr_job_count > limit:
print(f'job count is adjusted to limit: {limit}')
curr_scrape_object.total = limit
curr_scrape_object.save()
break
else:
curr_scrape_object.total = curr_job_count
curr_scrape_object.save()
if prev_job_count == curr_job_count:
print('No additional jobs loaded this time...')
retry_count += 1
if retry_count == retry_count_max:
print('Giving up now.')
break
else:
print(f'Trying again... ({retry_count} retries attempted.)')
else:
prev_job_count = curr_job_count
retry_count = 0
loop_count += 1
if loop_count >= loop_limit:
print(f'Breaking loop at reaching the loop limit value of {loop_limit}')
break
print('Loop is completed.')
# ------ beginning of the job pulling section
job_count = 1
for job_loop in range(len(job_list)):
curr_scrape_object.current = job_count
curr_scrape_object.save()
company_name = "<MISSING>"
company_link = ""
company_location = ""
job_link = "<MISSING>"
job_linkedin_data_id = "<MISSING>"
job_linkedin_data_search_id = ""
job_listdate = ""
poster_title = "<MISSING>"
poster_subtitle = ""
poster_link = ""
job_details = {}
job_criteria = {}
ajob = job_list[job_loop]
#For each job listing, we'll try to get the link at a minimum
try:
job_link = ajob.find('a')['href']
except:
pass
#for each job listing, we will try to collect all the information.
try:
job_link = ajob.find('a')['href']
company_line = ajob.find('a', class_='result-card__subtitle-link job-result-card__subtitle-link')
company_location_line = ajob.find('span', class_='job-result-card__location')
job_listdate_line = ajob.find('time', class_='job-result-card__listdate--new')
search_driver.find_element_by_xpath(f'/html/body/main/div/section[2]/ul/li[{job_loop+1}]/a').click()
sleep(random.randint(2, 4))
search_page = search_driver.page_source
job_soup = BeautifulSoup(search_page, "html.parser")
job_title_line = job_soup.find('h2', class_='topcard__title')
poster_title_line = job_soup.find('h3', class_='base-main-card__title')
poster_subtitle_line = job_soup.find('h4', class_='base-main-card__subtitle')
poster_link_line = job_soup.find('a', class_='message-the-recruiter__cta')
print(f'Job #{job_count}')
print('--------------------')
if ajob:
job_linkedin_data_id = ajob['data-id']
job_linkedin_data_search_id = ajob['data-search-id']
print(f'linkedin data ID: {job_linkedin_data_id}')
print(f'linkedin data search ID: {job_linkedin_data_search_id}')
if poster_title_line:
poster_title = poster_title_line.text
print(f'Poster title: {poster_title}')
if poster_subtitle_line:
poster_subtitle = poster_subtitle_line.text
print(f'Poster subtitle: {poster_subtitle}')
if poster_link_line:
poster_link = poster_link_line["href"]
print(f'Poster link: {poster_link}')
if job_title_line:
job_title = job_title_line.text
print(f'job title: {job_title}')
if job_listdate_line:
job_listdate = job_listdate_line['datetime']
print(f'job list date: {job_listdate}')
print(f'job link: {job_link}')
if company_line:
company_name = company_line.text
company_link = company_line["href"]
print(f'company name: {company_name}')
print(f'company link: {company_link}')
if company_location_line:
company_location = company_location_line.text
print(f'company location: {company_name}')
try:
show_more = search_driver.find_element_by_class_name('show-more-less-html__button')
show_more.click()
sleep(.5)
except:
pass
print('attempting details...')
sleep(3)
job_details_soup = job_soup.find('div', class_='description__text description__text--rich')
job_details_lines = job_details_soup.find_all(['div', 'li', 'p'])
for tag_loop in range(len(job_details_lines)):
if len(job_details_lines[tag_loop].text) > 2:
job_details[tag_loop] = (job_details_lines[tag_loop].text)
print('attempting job criteria...')
job_criteria_soup = job_soup.find('ul', class_='job-criteria__list')
job_criteria_headers = job_criteria_soup.find_all('h3', class_='job-criteria__subheader')
job_criteria_values = job_criteria_soup.find_all('span', class_='job-criteria__text job-criteria__text--criteria')
for crit_loop in range(len(job_criteria_headers)):
job_criteria[job_criteria_headers[crit_loop].text] = job_criteria_values[crit_loop].text
print('--------------------')
# Database insertions
li_poster_object = Li_Poster.objects.filter(title=poster_title)
if len(li_poster_object)==0:
li_poster_object = Li_Poster.objects.create(
title = poster_title,
subtitle = poster_subtitle,
url = poster_link
)
else:
li_poster_object = li_poster_object[0]
li_company_object = Li_Company.objects.filter(name=company_name)
if len(li_company_object)==0:
li_company_object = Li_Company.objects.create(
name = company_name,
location = company_location,
url = company_link
)
else:
li_company_object = li_company_object[0]
job_object = Job.objects.filter(title=f'{job_title} - (Li)')
if len(job_object)==0:
job_object = Job.objects.create(
title = f'{job_title} - (Li)',
status = 1
)
else:
job_object = job_object[0]
job_details_text = ""
for j_line in job_details:
job_details_text = f'{job_details_text}{job_details[j_line]}\n'
li_job_object = Li_Job.objects.filter(data_id=job_linkedin_data_id)
if len(li_job_object)==0:
li_job_object = Li_Job.objects.create(
title = job_title,
data_id = job_linkedin_data_id,
data_search_id = job_linkedin_data_search_id,
url = job_link,
details = job_details_text,
criteria = job_criteria,
post_date = job_listdate,
status = 1,
company = li_company_object,
poster = li_poster_object,
job = job_object
)
else:
if li_job_object.details != job_details_text:
li_job_object = li_job_object[0]
li_job_object.details = job_details_text
li_job_object.save()
except:
error_object = Web_Scrape_Error.objects.filter(url=job_link)
if len(error_object)==0:
error_object = Web_Scrape_Error.objects.create(
url = job_link,
count = 1,
status = 1
)
else:
error_object = error_object[0]
error_object.count += 1
error_object.save()
if limit > 0 and job_count == limit:
print(f'Job has reached limit of: {limit}')
break
job_count += 1
print('End of processing.')
curr_scrape_object.delete()
search_driver.quit()
return 'done' | catalystTGJ/01_project_solo | tasks/webscrape_li.py | webscrape_li.py | py | 12,193 | python | en | code | 0 | github-code | 13 |
9730566725 | # coding: UTF-8
from burp import ITab
from burp import IBurpExtender
from burp import IProxyListener
from burp import IBurpExtenderCallbacks
from burp import IContextMenuFactory
from burp import IContextMenuInvocation
from javax.swing import JPanel
from javax.swing import JButton
from javax.swing import JLabel
from javax.swing import JFileChooser
from javax.swing import JOptionPane
from javax.swing import JTextArea
from javax.swing import JScrollPane
from javax.swing import JMenuItem
from javax.swing.filechooser import FileNameExtensionFilter
from java.io import File
from java.io import PrintWriter
from java.awt import Insets
from java.awt import Font
from java.awt.event import ActionListener
import json
import re
class BurpExtender(IBurpExtender, IProxyListener, ITab, IContextMenuFactory, IContextMenuInvocation, ActionListener):
EXTENSION_NAME = "M&R Rules"
TAB_NAME = "M&R Rules"
MENU_NAME = "Add rule"
TARGETS_KEYS = ("Enable", "Method", "Comment", "Pattern", "Replace", "Type")
def __init__(self):
self.replace_targets = {
"https://example.com/": [
{
"Comment": "Sample Math and Replace",
"Enable": True,
"Method": "GET",
"Type": "Request header",
"Pattern": "^Referer.*$",
"Replace": "Referer: https://example.com/"
}
]
}
init_json = json.dumps(self.replace_targets, sort_keys=True, indent=4)
# GUI
self._main_panel = JPanel()
self._main_panel.setLayout(None)
config_panel = JPanel()
config_panel.setBounds(240, 50, 500, 50)
title = JLabel("Math and Replace Rules")
self._save_btn = JButton("Save")
self._import_btn = JButton("Import")
self._export_btn = JButton("Export")
self._json_chooser = JFileChooser()
self._json_chooser.setFileSelectionMode(JFileChooser.FILES_ONLY)
self._json_chooser.setAcceptAllFileFilterUsed(False)
extFilter = FileNameExtensionFilter("JSON files (*.json)", ["json"])
self._json_chooser.addChoosableFileFilter(extFilter)
self._save_btn.addActionListener(self)
self._import_btn.addActionListener(self)
self._export_btn.addActionListener(self)
config_panel.add(title)
config_panel.add(self._save_btn)
config_panel.add(self._import_btn)
config_panel.add(self._export_btn)
self._json_area = JTextArea(init_json)
self._json_area.setWrapStyleWord(True) # 単語単位で折り返し
self._json_area.setCaretPosition(len(init_json))
self._json_area.setTabSize(2)
self._json_area.setMargin(Insets(5, 5, 5, 5))
self._json_area.setFont(Font(Font.DIALOG_INPUT, Font.PLAIN, 16))
scroll_pane = JScrollPane(self._json_area)
scroll_pane.setBounds(300, 130, 1000, 800)
self._main_panel.add(config_panel)
self._main_panel.add(scroll_pane)
def registerExtenderCallbacks(self, callbacks):
self.callbacks = callbacks
self.helpers = callbacks.getHelpers()
self._stdout = PrintWriter(callbacks.getStdout(), True) #self._stdout.println()
callbacks.setExtensionName(self.EXTENSION_NAME)
callbacks.addSuiteTab(self)
callbacks.registerContextMenuFactory(self)
callbacks.registerProxyListener(self)
def getTabCaption(self):
return self.TAB_NAME
def getUiComponent(self):
return self._main_panel
def createMenuItems(self, invocation):
menu = []
menu.append(JMenuItem(self.MENU_NAME, actionPerformed=lambda x, inv=invocation: self.menuActionAdd(inv)))
return menu
def menuActionAdd(self, inv):
messages = inv.getSelectedMessages()
for messageInfo in messages:
requestInfo = self.helpers.analyzeRequest(messageInfo.getHttpService(), messageInfo.getRequest())
url = requestInfo.getUrl()
add_target_url = "{}://{}{}".format(url.getProtocol(), url.getHost(), url.getPath())
self.replace_targets.setdefault(add_target_url, [{}])
if len(messages) > 0:
self._json_area.setText(json.dumps(self.replace_targets, sort_keys=True, indent=4))
def actionPerformed(self, event):
# Clicked Save Button
if event.getSource() is self._save_btn:
try:
self.replace_targets = json.loads(self._json_area.getText())
except ValueError:
self.callbacks.printError("Parse error")
return
# Clicked Import Button
elif event.getSource() is self._import_btn:
dialog_ans = self._json_chooser.showOpenDialog(event.getSource())
if dialog_ans == JFileChooser.CANCEL_OPTION:
return
import_file_path = self._json_chooser.getSelectedFile().getAbsolutePath()
with open(import_file_path, 'r') as f:
try:
import_data = json.loads(f.read())
except:
self.callbacks.printError("Parse error")
return
self._json_area.setText(json.dumps(import_data, sort_keys=True, indent=4))
self.replace_targets = import_data
# Clicked Export Button
elif event.getSource() is self._export_btn:
dialog_ans = self._json_chooser.showSaveDialog(event.getSource())
if dialog_ans == JFileChooser.CANCEL_OPTION:
return
export_file_path = self._json_chooser.getSelectedFile().getAbsolutePath()
file_ext = self._json_chooser.getSelectedFile().getName().split(".")[-1]
if file_ext.lower() != "json":
export_file_path = '{}.json'.format(export_file_path)
self._json_chooser.setSelectedFile(File(export_file_path))
# 上書き保存の確認
if self._json_chooser.getSelectedFile().exists():
message = "{} already exists.\nDo you want to replace it?".format(export_file_path)
ans = JOptionPane.showConfirmDialog(None, message, "Save As", JOptionPane.YES_NO_OPTION)
if (ans == JOptionPane.NO_OPTION):
return
export_data = self._json_area.getText()
with open(export_file_path, 'w') as f:
f.write(export_data)
def processProxyMessage(self, messageIsRequest, message):
messageInfo = message.getMessageInfo()
request = messageInfo.getRequest()
response = messageInfo.getResponse()
requestInfo = self.helpers.analyzeRequest(messageInfo.getHttpService(), request)
url = requestInfo.getUrl()
method = requestInfo.getMethod()
replace_rules = []
try:
replace_rules = self.replace_targets["{}://{}{}".format(url.getProtocol(), url.getHost(), url.getPath())]
except KeyError:
return
# リクエストのMatch and Replace
if messageIsRequest:
request_body_offset = self.helpers.analyzeRequest(messageInfo).getBodyOffset()
request_headers = self.helpers.analyzeRequest(messageInfo).getHeaders()
request_body = self.helpers.bytesToString(request[request_body_offset:])
for rule in replace_rules:
if set(self.TARGETS_KEYS) != set(rule.keys()):
continue
if not rule["Enable"] or rule["Method"] != method:
continue
if rule["Type"] == "Request header":
request_headers = self.replaceRequestHeader(list(request_headers), rule["Pattern"], rule["Replace"])
elif rule["Type"] == "Request body":
request_body = self.replaceRequestBody(request_body, rule["Pattern"], rule["Replace"])
replaced_request = self.helpers.buildHttpMessage(request_headers, request_body)
messageInfo.setRequest(replaced_request)
# レスポンスのMatch and Replace
elif response != None:
response_body_offset = self.helpers.analyzeResponse(response).getBodyOffset()
response_headers = self.helpers.analyzeResponse(response).getHeaders()
response_body = self.helpers.bytesToString(response[response_body_offset:])
for rule in replace_rules:
if set(self.TARGETS_KEYS) != set(rule.keys()):
continue
if not rule["Enable"] or rule["Method"] != method:
continue
if rule["Type"] == "Response header":
response_headers = self.replaceResponseHeader(list(response_headers), rule["Pattern"], rule["Replace"])
elif rule["Type"] == "Response body":
response_body = self.replaceResponseBody(response_body, rule["Pattern"], rule["Replace"])
replaced_response = self.helpers.buildHttpMessage(response_headers, response_body)
messageInfo.setResponse(replaced_response)
def replaceRequestHeader(self, request_headers, replace_pattern, replace_str):
if replace_pattern == "":
request_headers.append(replace_str)
return request_headers
regex = re.compile(replace_pattern)
for idx, header in enumerate(request_headers):
if regex.match(header):
if replace_str == "":
request_headers.remove(request_headers[idx])
break
request_headers[idx] = replace_str
break
return request_headers
def replaceRequestBody(self, request_body, replace_pattern, replace_str):
if replace_pattern == "":
return "{}{}".format(request_body, replace_str)
return re.sub(replace_pattern, replace_str, request_body)
def replaceResponseHeader(self, response_headers, replace_pattern, replace_str):
if replace_pattern == "":
response_headers.append(replace_str)
return response_headers
regex = re.compile(replace_pattern)
for idx, header in enumerate(response_headers):
if regex.match(header):
if replace_str == "":
response_headers.remove(response_headers[idx])
break
response_headers[idx] = replace_str
break
return response_headers
def replaceResponseBody(self, response_body, replace_pattern, replace_str):
if replace_pattern == "":
return "{}{}".format(response_body, replace_str)
return re.sub(replace_pattern, replace_str, response_body) | WhaleMountain/Match-and-Replace | match-and-replace.py | match-and-replace.py | py | 10,837 | python | en | code | 0 | github-code | 13 |
22454449734 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.rst') as f:
README = f.read()
with open('LICENSE') as f:
LICENSE = f.read()
setup(
name='python-sample',
version='0.1.0',
description='Sample Python Project',
long_description=README,
author='Ben Schmitt',
author_email='bens.schmitt@gmail.com',
url='https://gitlab.com/foxdb/python-sample',
license=LICENSE,
packages=find_packages(exclude=('tests', 'docs'))
)
| foxdb/python-sample | setup.py | setup.py | py | 493 | python | en | code | 0 | github-code | 13 |
17153516901 | import argparse
import numpy as np
from torch import nn
from erfnet_cp import customized_erfnet, erfnet
from gpu_energy_eval import GPUEnergyEvaluator
import time
import torch
import random
class CustomizedAlexnet(nn.Module):
def __init__(self, width=None):
super(CustomizedAlexnet, self).__init__()
if width is None:
width = [3, 64, 192, 384, 256, 256, 4096, 4096, 1000]
self.features = nn.Sequential(
nn.Conv2d(width[0], width[1], kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(width[1], width[2], kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(width[2], width[3], kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(width[3], width[4], kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(width[4], width[5], kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(width[5] * 6 * 6, width[6]),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(width[6], width[7]),
nn.ReLU(inplace=True),
nn.Linear(width[7], width[8]),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def conv(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
return x
class CustomizedMobileNet(nn.Module):
def __init__(self, width):
super(CustomizedMobileNet, self).__init__()
if width is None:
width = [3, 32, 64, 128, 128, 256, 256, 512, 512, 512, 512, 512, 512, 1024, 1024, 1001]
assert len(width) == 16
strides = [2, 1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
assert len(strides) == len(width) - 2
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
def conv_dw(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU(inplace=True),
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True),
)
# print('Creating customized mobilenet...')
conv_layers = []
for i in range(len(strides)):
conv_type = conv_bn if i == 0 else conv_dw
conv_layers.append(conv_type(width[i], width[i+1], strides[i]))
# print('{}--{}-->{}'.format(width[i], strides[i], width[i+1]))
self.model = nn.Sequential(*conv_layers, nn.AvgPool2d(7))
self.fc = nn.Linear(width[-2], width[-1])
def forward(self, x):
x = self.model(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate Energy Cost Data')
parser.add_argument('--net', default='alexnet', help='network architecture')
parser.add_argument('--num', type=int, default=10000, help='number of samples to generate')
parser.add_argument('--gpuid', type=int, default=0, help='gpuid')
parser.add_argument('--num_classes', type=int, default=1000, help='number of classes')
parser.add_argument('--test_num', type=int, default=1000, help='number of repeated trails')
parser.add_argument('--conv', action='store_true', help='only use conv layers')
parser.add_argument('--cpu', action='store_true', help='use cpu')
parser.add_argument('--outfile', default='./output.npy', help='the output file of generated data')
args = parser.parse_args()
print(args.__dict__)
if args.net == 'alexnet':
net_class = CustomizedAlexnet
width_ub = [64, 192, 384, 256, 256, 4096, 4096]
h, w = 224, 224
elif args.net == 'mobilenet':
net_class = CustomizedMobileNet
width_ub = [32, 64, 128, 128, 256, 256, 512, 512, 512, 512, 512, 512, 1024, 1024]
h, w = 224, 224
elif args.net == 'erfnet':
net_class = customized_erfnet
width_ub = erfnet().get_cpwub()
h, w = 512, 1024
else:
raise NotImplementedError('not supported network architecture')
test_num = args.test_num
nclasses = args.num_classes
fake_img = torch.randn([1, 3, h, w], dtype=torch.float32)
random.seed(1)
use_cuda = not args.cpu
if use_cuda:
fake_img = fake_img.cuda(device=args.gpuid)
# test the upper-bound and lower-bound
model = net_class(width=[3] + [1] * len(width_ub) + [nclasses])
model.eval()
if use_cuda:
model.cuda(device=args.gpuid)
with torch.no_grad():
evaluator = GPUEnergyEvaluator(gpuid=args.gpuid)
start_time = time.time()
evaluator.start()
for _ in range(test_num):
output = model.forward(fake_img)
torch.cuda.synchronize()
energy_used = evaluator.end()
time_used = time.time() - start_time
print('Empty model: energy used {:.4e} J, time used {:.4e} s'.format(energy_used / float(test_num), time_used))
model = net_class(width=[3] + width_ub + [nclasses])
model.eval()
if use_cuda:
model.cuda(device=args.gpuid)
with torch.no_grad():
evaluator = GPUEnergyEvaluator(gpuid=args.gpuid)
start_time = time.time()
evaluator.start()
for _ in range(test_num):
output = model.forward(fake_img)
torch.cuda.synchronize()
energy_used = evaluator.end()
time_used = time.time() - start_time
print('Full model: energy used {:.4e} J, time used {:.4e} s'.format(energy_used / float(test_num), time_used))
# collecting energy, time info data
result = []
# save the data as [width, energy, time]
item = np.zeros(2 + len(width_ub) + 2, dtype='float32')
width = [0] * len(width_ub)
data_num = args.num
wlb = [1 for w in width_ub]
wub = [w for w in width_ub]
print(wlb)
print(wub)
print('===========================')
for i in range(data_num):
width = [random.randint(w1, w2) for w1, w2 in zip(wlb, wub)]
width = [3] + width + [nclasses]
model = net_class(width=width)
model.eval()
if use_cuda:
model.cuda(device=args.gpuid)
with torch.no_grad():
if args.conv:
forward = model.conv
else:
forward = model.forward
evaluator = GPUEnergyEvaluator(gpuid=args.gpuid)
start_time = time.time()
evaluator.start()
for _ in range(test_num):
output = forward(fake_img)
torch.cuda.synchronize()
energy_used = evaluator.end()
time_used = time.time() - start_time
item[:-2] = width
item[-2] = energy_used / float(test_num)
item[-1] = time_used / float(test_num)
result.append(item.copy())
if i % 100 == 99:
print(item)
print('saved {} items to {}'.format(i+1, args.outfile))
np.save(args.outfile, np.stack(result))
np.save(args.outfile, np.stack(result))
| hyang1990/energy_constrained_compression | energy_tr_gen.py | energy_tr_gen.py | py | 7,618 | python | en | code | 21 | github-code | 13 |
6985844056 | import numpy as np
import copy
def retrieve_index_and_tfidf_from_txt(doc_index):
'''
get term indexes and their unit tfidfs from specified documents
'''
index = []
unit_tfidf = []
pos = './result/doc' + str(doc_index) + '.txt'
with open(pos) as f:
count = 0
for line in f.readlines():
count+= 1
if (count == 1 or count == 2):
continue
else:
index.append(int(line.split()[0]))
unit_tfidf.append(float(line.split()[1]))
index = np.array(index)
unit_tfidf = np.array(unit_tfidf)
return index, unit_tfidf
#14299是從pa2得來的bag-of-word model的長度
def generate_tfidf_matrix(num_doc, len_BOW = 14299):
tfidf_matrix = np.zeros((num_doc, len_BOW))
for doc_index in range(1, num_doc + 1, 1):
indexes, unit_tfidf = retrieve_index_and_tfidf_from_txt(doc_index)
count = 0
for index in indexes:
#print(index, unit_tfidf[count])
tfidf_matrix[doc_index - 1][index - 1] = unit_tfidf[count]
count += 1
return tfidf_matrix
def compute_pairwise_cosine_similarity(array_1, array_2):
cosine_similarity = np.dot(array_1, array_2)
return cosine_similarity
def get_cosine_sim_matrix(num_doc, tfidf_matrix):
cosine_sim_matrix = np.zeros((num_doc, num_doc))
for i in range(num_doc):
for j in range(num_doc):
if j < i:
similarity = compute_pairwise_cosine_similarity(tfidf_matrix[i], tfidf_matrix[j])
cosine_sim_matrix[i][j] = similarity
cosine_sim_matrix[j][i] = similarity
elif j == i:
cosine_sim_matrix[i][j] = 0
cosine_sim_matrix[j][i] = 0
else:
break
return cosine_sim_matrix
#clustering
def get_available_cluster_list(num_doc):
available_cluster = []
for i in range(num_doc):
available_cluster.append(1)
return available_cluster
def HAC(K, num_doc, sim_matrix, tfidf_matrix, available_cluster):
result = []
for i in range(num_doc):
result.append([i+1])
while(sum(available_cluster) > K):
index = np.unravel_index(np.argmax(sim_matrix, axis=None), sim_matrix.shape)
row_index = index[0]
column_index = index[1]
result[row_index].extend(result[column_index])
result[column_index] = 0
available_cluster[column_index] = 0
#update similarity with row_index
for j in range(num_doc):
if available_cluster[j] == 0:
new_sim = 0
elif (j == row_index):
new_sim = 0
else:
new_sim = sim_gacc(j, row_index, tfidf_matrix, result)
sim_matrix[j][row_index] = new_sim
sim_matrix[row_index][j] = new_sim
#change similarity that contains "column_index" to zero
sim_matrix[j][column_index] = 0
sim_matrix[column_index][j] = 0
return result
def sim_gacc(clus_iter, clus_survive, tfidf_matrix, result):
'''
compute similarity for two documents by using gacc
'''
len_clus_iter = len(result[clus_iter])
len_clus_survive = len(result[clus_survive])
index_iter = np.array(result[clus_iter]) - 1
index_survive = np.array(result[clus_survive]) - 1
sum_iter_cluster = np.sum(tfidf_matrix[index_iter], axis = 0)
sum_iter_survive = np.sum(tfidf_matrix[index_survive], axis = 0)
s = np.sum([sum_iter_cluster, sum_iter_survive], axis = 0)
similarity_gacc = (np.dot(s,s) - len_clus_iter - len_clus_survive) / ((len_clus_iter + len_clus_survive) * (len_clus_iter + len_clus_survive - 1))
return similarity_gacc
def save_result(K, result):
location = "./{}.txt".format(K)
with open(location, 'w') as f:
for cluster in result:
if cluster != 0:
cluster = sorted(cluster)
for doc in cluster:
f.write("{}".format(doc))
f.write("\n")
f.write("\n")
##########################################################################################
#compute cosine similarity for each pair of document
num_doc = 1095
tfidf_matrix = generate_tfidf_matrix(num_doc)
similarity_matrix = get_cosine_sim_matrix(num_doc, tfidf_matrix)
#clustering
K_list = [8, 13, 20]
for K in K_list:
available_cluster = get_available_cluster_list(num_doc)
similarity_matrix_2 = copy.copy(similarity_matrix)
result = HAC(K, num_doc, similarity_matrix_2, tfidf_matrix, available_cluster)
save_result(K, result) | shengyenlin/Introduction-to-information-retrieval-and-text-mining-Fall-2020 | hw4/pa4.py | pa4.py | py | 4,839 | python | en | code | 0 | github-code | 13 |
17058371604 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class PromoPageResult(object):
def __init__(self):
self._page_num = None
self._page_size = None
self._total_count = None
self._total_pages = None
@property
def page_num(self):
return self._page_num
@page_num.setter
def page_num(self, value):
self._page_num = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def total_count(self):
return self._total_count
@total_count.setter
def total_count(self, value):
self._total_count = value
@property
def total_pages(self):
return self._total_pages
@total_pages.setter
def total_pages(self, value):
self._total_pages = value
def to_alipay_dict(self):
params = dict()
if self.page_num:
if hasattr(self.page_num, 'to_alipay_dict'):
params['page_num'] = self.page_num.to_alipay_dict()
else:
params['page_num'] = self.page_num
if self.page_size:
if hasattr(self.page_size, 'to_alipay_dict'):
params['page_size'] = self.page_size.to_alipay_dict()
else:
params['page_size'] = self.page_size
if self.total_count:
if hasattr(self.total_count, 'to_alipay_dict'):
params['total_count'] = self.total_count.to_alipay_dict()
else:
params['total_count'] = self.total_count
if self.total_pages:
if hasattr(self.total_pages, 'to_alipay_dict'):
params['total_pages'] = self.total_pages.to_alipay_dict()
else:
params['total_pages'] = self.total_pages
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = PromoPageResult()
if 'page_num' in d:
o.page_num = d['page_num']
if 'page_size' in d:
o.page_size = d['page_size']
if 'total_count' in d:
o.total_count = d['total_count']
if 'total_pages' in d:
o.total_pages = d['total_pages']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/PromoPageResult.py | PromoPageResult.py | py | 2,369 | python | en | code | 241 | github-code | 13 |
1064691272 | import re
from py2neo import Graph, Node, Relationship
test_graph = Graph("http://localhost:7474", username="neo4j", password="08166517416reny")
input_message = input('input your message:')
def kgquery_entity(target):
find_entity = test_graph.find_one(re.search(r".*[老师/学生/项目]", target).group(0), property_key="name", property_value=target)
return find_entity
def kgquery_rel(start, end):
find_relationship = test_graph.match_one(start_node=start, end_node=end, bidirectional=True)
return find_relationship
#参加项目1的老师/学生有?
if re.match(r'^参与...的..有', input_message):
m1 = input_message.find('与')
m2 = input_message.find('的')
m3 = input_message.find('有')
name3 = input_message[m1 + 1:m2]
kind1 = input_message[m2 + 1:m3]
label1 = re.search(r".*[老师/学生/项目]", name3).group(0)
progra1 = test_graph.find_one(label1, property_key="name", property_value=name3)
for rel in test_graph.match(end_node=progra1, rel_type="成员", bidirectional=True):
if re.search(r".*[老师/学生/项目]", rel.start_node()["name"]).group(0) == kind1:
print(rel.start_node()["name"])
for rel in test_graph.match(end_node=progra1, rel_type="负责人", bidirectional=True):
if re.search(r".*[老师/学生/项目]", rel.start_node()["name"]).group(0) == kind1:
print(rel.start_node()["name"])
#老师1/学生1参与的项目有?
elif re.match(r'^...参与的项目有', input_message):
m1 = input_message.find('参')
name3 = input_message[:m1]
label1 = re.search(r".*[老师/学生/项目]", name3).group(0)
progra1 = test_graph.find_one(label1, property_key="name", property_value=name3)
for rel in test_graph.match(start_node=progra1, rel_type="成员", bidirectional=True):
print(rel.end_node()["name"])
for rel in test_graph.match(start_node=progra1, rel_type="负责人", bidirectional=True):
print(rel.end_node()["name"])
#老师1指导的学生有?
elif re.match(r'^...指导的学生有', input_message):
m1 = input_message.find('指')
name3 = input_message[:m1]
label1 = re.search(r".*[老师/学生/项目]", name3).group(0)
progra1 = test_graph.find_one(label1, property_key="name", property_value=name3)
for rel in test_graph.match(start_node=progra1, rel_type="师生", bidirectional=True):
print(rel.end_node()["name"])
#老师1负责的项目有?
elif re.match(r'^...负责的项目有', input_message):
m1 = input_message.find('负')
name3 = input_message[:m1]
label1 = re.search(r".*[老师/学生/项目]", name3).group(0)
progra1 = test_graph.find_one(label1, property_key="name", property_value=name3)
for rel in test_graph.match(start_node=progra1, rel_type="负责人", bidirectional=True):
print(rel.end_node()["name"])
#查找属性
elif len(input_message) < 4:
print(kgquery_entity(input_message))
#查找关系
elif re.match(r'^...和...的关系是', input_message):
a = input_message.find('和')
b = input_message.find('的')
name1 = input_message[:a]
name2 = input_message[a + 1:b]
# 提取实体的类别名,在find_one中,类别名跟输入有关系
label1 = re.search(r".*[老师/学生/项目]", name1).group(0)
label2 = re.search(r".*[老师/学生/项目]", name2).group(0)
# find_one函数中,类别名由输入定,属性值也由输入值定
n1 = test_graph.find_one(label1, property_key="name", property_value=name1)
n2 = test_graph.find_one(label2, property_key="name", property_value=name2)
ans = kgquery_rel(n1, n2)
ans = str(ans)
print(name1 + '和' + name2 + '的关系是:' + ans) | Veronica1997/knowledge-graph | match_test.py | match_test.py | py | 3,805 | python | en | code | 6 | github-code | 13 |
14624965323 | from gtts import gTTS
from base.models import RecipeInstruction
from pydub import AudioSegment
import os
def getRecipeAudio(recipe_instructions, user_id = 0, language = 'en'):
for i in recipe_instructions:
myobj = gTTS(text=i.instruction, lang=language, slow=False)
myobj.save('media/%d_%d_%d.mp3' % (user_id, i.r_id_id, i.seq_no))
recipe_audio = AudioSegment.from_file('media/%d_%d_1.mp3' % (user_id, i.r_id_id), 'mp3')
try:
os.remove('media/%d_%d_1.mp3' % (user_id, i.r_id_id))
except:
print('media/%d_%d_1.mp3 not deleted.' % (user_id, i.r_id_id))
recipe_instructions = recipe_instructions[1:]
for i in recipe_instructions:
instruction = AudioSegment.from_file('media/%d_%d_%d.mp3' % (user_id, i.r_id_id, i.seq_no), 'mp3')
hours, minutes, seconds = [int(x) for x in i.time_stamp.split(':')]
milliseconds = (seconds + minutes*60 + hours*3600) * 1000
silence = AudioSegment.silent(duration=milliseconds)
recipe_audio += silence + instruction
try:
os.remove('media/%d_%d_%d.mp3' % (user_id, i.r_id_id, i.seq_no))
except:
print('media/%d_%d_%d.mp3' % (user_id, i.r_id_id, i.seq_no))
recipe_audio.export("media/recipe_%d.mp3"%(i.r_id_id))
return "recipe_%d.mp3"%(i.r_id_id)
| jincy-p-janardhanan/recipettsdjango | services/tts_service.py | tts_service.py | py | 1,330 | python | en | code | 0 | github-code | 13 |
15289497457 | import datetime
import re
from django import forms
from django.forms.widgets import DateInput
from django.core.exceptions import ValidationError
from django.contrib.postgres.fields import ArrayField
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from django.utils import timezone
from django.db import models
from django.shortcuts import get_object_or_404, render
from django.db.models import Avg
from .validators import validate_is_pdf
from ckeditor.fields import RichTextField
class CustomUserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
if not email:
raise ValueError('The Email field must be set')
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password=None, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
return self.create_user(email, password, **extra_fields)
class CustomUser(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(unique=True, max_length=255)
username= models.CharField(max_length=30, blank=True)
first_name=models.CharField(max_length=30, blank=True)
last_name=models.CharField(max_length=30, blank=True)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
date_joined = models.DateTimeField(auto_now_add=True)
objects = CustomUserManager()
USERNAME_FIELD = 'email'
EMAIL_FIELD = 'email'
REQUIRED_FIELDS = []
def __str__(self):
return self.email
def get_short_name(self):
return self.first_name
def get_rating(self):
avg_rating = self.recipient_ratings.aggregate(Avg('rating'))['rating__avg']
if avg_rating is not None:
return round(avg_rating, 2)
else:
return None
def save(self, *args, **kwargs):
if not self.username:
self.username = self.first_name +" "+ self.last_name
super().save(*args, **kwargs)
class Company(models.Model):
user = models.OneToOneField(CustomUser, on_delete=models.CASCADE)
name = models.CharField(max_length=255)
field = models.CharField(max_length=30, blank=True)
description = RichTextField(blank=True, null=True)
logo = models.ImageField(default='default_picture.png', upload_to='company_profile_images', blank=True)
skills = ArrayField(models.CharField(null=True, blank=True, max_length=30), null=True, blank=True)
instagram_link = models.URLField(max_length=200, blank=True)
linkedin_link = models.URLField(max_length=200, blank=True)
facebook_link = models.URLField(max_length=200, blank=True)
website_link = models.URLField(max_length=200, blank=True)
created_at = models.DateTimeField(default=timezone.now)
def get_num_ratings(self):
return Rating.objects.filter(recipient=self.user).count()
def get_rating(self):
return self.user.get_rating()
def get_num_jobs_posted(self):
return Job.objects.filter(company=self).count()
def get_member_since(self):
return self.created_at.strftime("%Y")
def save(self, *args, **kwargs):
if not self.name:
self.name = self.user.first_name
if not self.field:
self.field = self.user.last_name
super().save(*args, **kwargs)
# class Skill(models.Model):
# name = models.CharField(default='m', max_length=50)
# def __str__(self):
# return self.name
# class Freelancer(models.Model):
# user = models.OneToOneField(CustomUser, on_delete=models.CASCADE)
# first_name = models.CharField(default='', max_length=20)
# last_name = models.CharField(default='', max_length=20)
# occupation = models.CharField(default='', max_length=20)
# level = models.CharField(default='', max_length=15)
# bio = RichTextField(blank=True, null=True)
# photo = models.ImageField(default='default_picture.png', upload_to='freelancer_profile_images', blank=True)
# skills = models.ManyToManyField(Skill)
# education_university = models.CharField(default='', max_length=30)
# education_specialization = models.CharField(default='', max_length=30)
# education_year_of_study = models.CharField(default='', max_length=20)
# experience_position = models.CharField(default='', max_length=30)
# experience_company_name = models.CharField(default='', max_length=30)
# experience_work_duration = models.CharField(default='', max_length=20)
# experience_description = models.TextField(default='')
# created_at = models.DateTimeField(auto_now_add=True)
# portfolio_link = models.URLField(blank=True)
# def get_rating(self):
# return self.user.get_rating()
class Skill(models.Model):
name = models.CharField(max_length=50, unique=True)
def __str__(self):
return self.name
# class Position(models.Model):
# name = models.CharField(default='', null=True, max_length=25)
# def __str__(self):
# return self.name
class Freelancer(models.Model):
user = models.OneToOneField(CustomUser, on_delete=models.CASCADE)
first_name = models.CharField(default='', max_length=20)
last_name = models.CharField(default='', max_length=20)
occupation = models.CharField(default='', max_length=20)
level = models.CharField(default='', max_length=15)
bio = RichTextField(blank=True, null=True)
photo = models.ImageField(default='default_picture.png', upload_to='freelancer_profile_images', blank=True)
created_at = models.DateTimeField(auto_now_add=True)
portfolio_link = models.URLField(blank=True)
skills = models.ManyToManyField(Skill, blank=True)
def get_num_ratings(self):
return Rating.objects.filter(recipient=self.user).count()
def get_rating(self):
return self.user.get_rating()
def save(self, *args, **kwargs):
if not self.first_name:
self.first_name = self.user.first_name
if not self.last_name:
self.last_name = self.user.last_name
super().save(*args, **kwargs)
class Project(models.Model):
title = models.CharField(max_length=100)
description = models.TextField()
freelancer = models.ForeignKey(Freelancer, on_delete=models.CASCADE)
link = models.URLField()
platform = models.CharField(max_length=50)
class Education(models.Model):
freelancer = models.ForeignKey(Freelancer, on_delete=models.CASCADE, related_name='educations')
university = models.CharField(max_length=30)
specialization = models.CharField(max_length=30)
year_of_study = models.CharField(max_length=20)
class Experience(models.Model):
freelancer = models.ForeignKey(Freelancer, on_delete=models.CASCADE, related_name='experiences')
position = models.CharField(max_length=30)
company_name = models.CharField(max_length=30)
work_duration = models.CharField(max_length=20)
description = models.TextField()
class Category(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class DateField(forms.DateField):
input_formats = ['%d.%m.%Y']
widget = forms.DateInput(format='%d.%m.%Y')
def validate(self, value):
super().validate(value)
if value is not None and isinstance(value, (str, bytes)) and not re.match(r"^\d{1,2}\.\d{2}\.\d{4}$", value):
raise ValidationError("Invalid date format. Use dd.mm.yyyy.")
class Job(models.Model):
title = models.CharField(max_length=255)
description = RichTextField(blank=True, null=True)
file = models.FileField(upload_to='company_files/', validators=(validate_is_pdf,), blank=True)
category = models.ForeignKey(Category, on_delete=models.CASCADE)
skills = models.ManyToManyField(Skill, blank=True)
position = models.CharField(default='', max_length=20)
created_at = models.DateTimeField(auto_now_add=True)
company = models.ForeignKey(Company, on_delete=models.CASCADE)
is_active = models.BooleanField(default=True)
salary = models.PositiveIntegerField(null=True, blank=True)
timeline = models.DateField()
freelancer = models.ForeignKey(Freelancer, on_delete=models.SET_NULL, blank=True, null=True)
status = models.CharField(max_length=255, default='pending')
def get_timeline_display(self):
return self.timeline.strftime("%d.%m.%Y")
def freelancer_has_applied(self, freelancer):
return JobApplication.objects.filter(job=self, freelancer=freelancer).exists()
class JobApplication(models.Model):
job = models.ForeignKey(Job, on_delete=models.CASCADE)
freelancer = models.ForeignKey(Freelancer, on_delete=models.CASCADE)
status=models.CharField(max_length=20,default='pending')
class Submission(models.Model):
job = models.ForeignKey(Job, on_delete=models.CASCADE)
freelancer = models.ForeignKey(Freelancer, on_delete=models.CASCADE)
link = models.URLField(max_length=255)
file = models.FileField(upload_to='freelancer_files/', blank=True, null=True)
submitted_at = models.DateTimeField(auto_now_add=True)
order = models.IntegerField(default=0)
STATUS_CHOICES = [
('pending', 'Pending'),
('accepted', 'Accepted'),
('rejected', 'Rejected'),
]
status = models.CharField(max_length=20, choices=STATUS_CHOICES, default='pending')
def __str__(self):
return f'Submission for "{self.job.title}" by {self.freelancer.user.username}'
class Rating(models.Model):
RATING_CHOICES = (
(1, '1 star'),
(2, '2 stars'),
(3, '3 stars'),
(4, '4 stars'),
(5, '5 stars'),
)
job = models.ForeignKey(Job, on_delete=models.CASCADE)
reviewer = models.ForeignKey(CustomUser, on_delete=models.CASCADE, related_name='reviewer_ratings')
recipient = models.ForeignKey(CustomUser, on_delete=models.CASCADE, related_name='recipient_ratings')
rating = models.IntegerField(choices=RATING_CHOICES)
comment = models.TextField(blank=True)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
unique_together = ('job', 'reviewer', 'recipient')
class Chat(models.Model):
sender = models.ForeignKey(CustomUser, on_delete=models.CASCADE, related_name='sender')
receiver = models.ForeignKey(CustomUser, on_delete=models.CASCADE, related_name='receiver')
message = models.TextField()
timestamp = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['-timestamp'] | DiplomaTeamSDU/PartTimersApp | jobs/models.py | models.py | py | 10,819 | python | en | code | 0 | github-code | 13 |
22648490107 | #!/usr/bin/env python
import boto3
import json
import logging
import pystac
import requests
import landsat
from boto3 import Session
from boto3utils import s3
from cirruslib import Catalog
from cirruslib.errors import InvalidInput
from dateutil.parser import parse
from os import getenv, environ, path as op
from shutil import rmtree
from tempfile import mkdtemp
from traceback import format_exc
from urllib.parse import urlparse
# configure logger - CRITICAL, ERROR, WARNING, INFO, DEBUG
logger = logging.getLogger(__name__)
logger.setLevel(getenv('CIRRUS_LOG_LEVEL', 'DEBUG'))
DATA_BUCKET = getenv('CIRRUS_DATA_BUCKET')
def fetch_url_as_text(url):
resp = requests.get(url, stream=True)
if resp.status_code != 200:
msg = f"landsat-to-stac: failed fetching {url}: {resp.text}"
logger.error(msg)
logger.error(format_exc())
raise InvalidInput(msg)
return resp.text
def handler(payload, context={}):
logger.debug('Payload: %s' % json.dumps(payload))
catalog = Catalog.from_payload(payload)
# TODO - make this more general for more items/collections
assert(len(catalog['features']) == 1)
# configuration options
#config = catalog['process']['functions'].get('landsat-to-stac', {})
#output_options = catalog['process'].get('output_options', {})
#output_credentials = output_options.get('credentials', {})
# this process assumes single output collection, as it's just converting from original Sentinel to STAC for 1 scene
#output_collection = list(catalog['process']['output_options']['collections'].keys())[0]
#output_collection = 'landsat-c1-l2a'
items = []
# get metadata
url = s3().s3_to_https(catalog['features'][0]['assets']['txt']['href'].rstrip())
base_url = url.rstrip('_MTL.txt')
# get metadata and convert to JSON
metadata = landsat.mtl_to_json(fetch_url_as_text(url))
# get ANG metadata, used for geometry
ang_text = fetch_url_as_text(base_url + '_ANG.txt')
bbox = landsat.get_bbox(metadata)
try:
item = pystac.Item(
id = metadata['LANDSAT_PRODUCT_ID'],
datetime = landsat.get_datetime(metadata),
bbox = bbox,
geometry = landsat.get_geometry(ang_text, bbox),
properties={}
)
# add common metadata
item.common_metadata.gsd = 30.0
item.common_metadata.platform = metadata['SPACECRAFT_ID']
item.common_metadata.instruments = metadata['SENSOR_ID'].split('_')
# add EO extension
item.ext.enable('eo')
item.ext.eo.cloud_cover = float(metadata['CLOUD_COVER'])
# add proj extension
item.ext.enable('projection')
item.ext.projection.epsg = landsat.get_epsg(metadata, item.bbox[1], item.bbox[3])
item.ext.enable('view')
view_info = landsat.get_view_info(metadata)
item.ext.view.sun_azimuth = view_info['sun_azimuth']
item.ext.view.sun_elevation = view_info['sun_elevation']
item.ext.view.off_nadir = abs(view_info['off_nadir'])
# collection 2
#item.ext.enable('scientific')
#item.ext.sci.doi = metadata['DIGITAL_OBJECT_IDENTIFIER']
item.ext.enable('landsat')
item.ext.landsat.apply(**landsat.get_landsat_info(metadata))
#item.ext.landsat
landsat.add_assets(item, base_url)
#item.validate()
items.append(item.to_dict())
except Exception as err:
msg = f"landsat-to-stac: failed creating STAC for {catalog['id']} ({err})"
logger.error(msg)
logger.error(format_exc())
raise Exception(msg)
# discard if crossing antimeridian
logger.debug(f"bbox = {item.bbox}")
if item.bbox[2] - item.bbox[0] > 300:
msg = f"{item['id']} crosses antimeridian, discarding"
logger.error(msg)
raise InvalidInput(msg)
# update STAC catalog
catalog['features'] = items
logger.debug(f"STAC Output: {json.dumps(catalog)}")
logger.debug(f"Items: {json.dumps(items)}")
return catalog
if __name__ == "__main__":
payload = {
'id': 'landsat-8-l1-c1-aws/workflow-publish-landsat/LC08_L1TP_202033_20131129_20170428_01_T1_MTL',
'type': 'FeatureCollection',
'features': [{
'type': 'Feature',
'id': 'LC08_L1TP_202033_20131129_20170428_01_T1_MTL',
'collection': 'landsat-8-l1-c1',
'properties': {},
'assets': {
'txt': {
'href': 's3://landsat-pds/c1/L8/202/033/LC08_L1TP_202033_20131129_20170428_01_T1/LC08_L1TP_202033_20131129_20170428_01_T1_MTL.txt'
}
}
}],
'process': {
"description": "Convert Landsat MTL metadata to STAC and publish",
"input_collections": ["landsat-8-l1-c1-aws"],
"workflow": "publish-landsat",
"output_options": {
"path_template": "/${collection}/${landsat:wrs_path}/${landsat:wrs_row}/${year}/${month}/${id}",
"collections": {
"landsat-8-l1-c1": ".*"
}
},
"tasks": {
"publish": {
"public": True
}
}
}
}
handler(payload) | cirrus-geo/cirrus-earth-search | tasks/landsat-to-stac/task.py | task.py | py | 5,303 | python | en | code | 21 | github-code | 13 |
12679648787 | """
给定一个链表,两两交换其中相邻的结点,并返回交换后的链表。
注:不能只是单纯的改变结点内部的值,而是需要实际的进行结点交换
"""
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def swapPairs(self, head: ListNode) -> ListNode:
def helper(start):
if not start.next:
return
#start.val, end.val = end.val, start.val
start.next = start.next.next
start.next = start
helper(start.next.next)
start = head
helper(start)
class Solution2:
def swapPairs(self, head:ListNode) -> ListNode:
if not head or not head.next:
return head
first_node = head
second_node = head.next
first_node.next = self.swapPairs(second_node.next)
second_node.next = first_node
return second_node
if __name__ == '__main__':
node1 = ListNode(1)
node2 = ListNode(2)
node3 = ListNode(3)
node4 = ListNode(4)
node1.next = node2
node2.next = node3
node3.next = node4
head = node1
s = Solution2()
head = s.swapPairs(head)
while head:
print(head.val)
head = head.next
| ayingxp/LeetCode | recursion/common/swap_pairs.py | swap_pairs.py | py | 1,290 | python | en | code | 0 | github-code | 13 |
38275394215 | from machine import Pin
import time
def toggle(p):
p.value(not p.value())
def callback(p):
print('pin change', p)
led = Pin(2, Pin.OUT)
led.irq(trigger=Pin.IRQ_RISING | Pin.IRQ_FALLING, handler=callback)
led.on()
while True:
toggle(led)
time.sleep_ms(500) | Shinner/ESP32Demo | common/led.py | led.py | py | 275 | python | en | code | 0 | github-code | 13 |
17814527825 | import visitAllNodes
input = open("input/9", "r").read().strip()
lines = input.splitlines()
distances = {}
for journey in lines:
route, distance = journey.split(" = ")
distances[frozenset(route.split(" to "))] = int(distance)
print(visitAllNodes.getWeight(distances)) | QuarkNerd/adventOfCode | 2015/9.py | 9.py | py | 279 | python | en | code | 1 | github-code | 13 |
10205779525 | import selectors
import socket
import types
# Where to connect to.
HOST = 'localhost'
PORT = 65432
# Selector for handling multiple connections.
sel = selectors.DefaultSelector()
# The byte messages that will be sent.
m1 = b''
m2 = b''
for i in range(0, 250):
m1 += b'a'
m2 += b'b'
MESSAGES = [m1, m2]
def start_connections(host, port, num_conns):
"""
Establishes connections to the sat2rf1_tcpserver.
"""
server_addr = (host, port)
for i in range(0, num_conns):
connid = i + 1
print('Starting connection %s to %s:%s' % (connid, server_addr[0], server_addr[1]))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(False)
sock.connect_ex(server_addr)
events = selectors.EVENT_READ | selectors.EVENT_WRITE
data = types.SimpleNamespace(connid=connid,
msg_total=sum(len(m) for m in MESSAGES),
recv_total=0,
messages=list(MESSAGES),
outb=b'')
sel.register(sock, events, data=data)
def service_connection(key, mask):
"""
Services established connections. (Either sends or receives the byte strings.)
"""
sock = key.fileobj
data = key.data
if mask & selectors.EVENT_READ:
recv_data = sock.recv(1024) # Should be ready to read
if recv_data:
print('Received "%s" from connection %s' % (repr(recv_data), data.connid))
data.recv_total += len(recv_data)
if not recv_data or data.recv_total == data.msg_total:
print('Closing connection %s' % data.connid)
sel.unregister(sock)
sock.close()
if mask & selectors.EVENT_WRITE:
if not data.outb and data.messages:
data.outb = data.messages.pop(0)
if data.outb:
print('Sending %s to connection %s' % (repr(data.outb), data.connid))
sent = sock.send(data.outb) # Should be ready to write
data.outb = data.outb[sent:]
# Asks for the amount of connections to initiate before starting them.
start_connections(HOST, PORT, int(input('Input number of connections: ')))
# The test loop.
try:
while True:
events = sel.select(timeout=1)
if events:
for key, mask in events:
service_connection(key, mask)
# Check for a socket being monitored to continue.
if not sel.get_map():
break
except KeyboardInterrupt:
print('Caught keyboard interrupt. Exiting...')
finally:
sel.close()
| jlangvand/Sat2RF1-TCP | tests/connection_tester.py | connection_tester.py | py | 2,629 | python | en | code | 1 | github-code | 13 |
20694503697 | from rss_fetch import CheckFeeds
from load_config import load_section
#LOAD INI CONFIG TO GET ALL THE RSS URLs
THE_FEEDS = load_section()
print("THE FEEDS: ", THE_FEEDS)
test_feed_url = 'http://ourbigdumbmouth.libsyn.com/rss'
def run_get_feed(the_feed_url):
"""
do stuff
:return:
"""
print("TEST MAIN |")
test = CheckFeeds(feed_url=the_feed_url)
test.rss_parse_most_recent()
print("SEE DICT: ", test.podcast_obj)
test.create_dir_name()
test.local_make_dir()
print("FULL PATH TO dir ", test.podcast_ab_path_dir)
test.local_episodes_list()
print("Local Episodes: ", test.podcast_dir_content)
test.clean_mp3_url()
test.create_file_name()
print(" ATTRIBUTE : ", test.episode_file_name, test.episode_full_path)
#Write MP3 to local dir
test.rss_download_most_recent_ep()
test.local_episodes_list()
for i in test.podcast_dir_content:
print(i)
if __name__ == "__main__":
run_config() | maxnotmin/podcast_transfer | main.py | main.py | py | 976 | python | en | code | 0 | github-code | 13 |
37320269273 | from projects.qm_brain.utils.utils import *
import scipy.stats as ss
import numpy as np
import pandas as pd
cond10_taken_avg_tpm = load_matrix('/home/user/Desktop/QMBrain/tpm/8_regions/tpm_us_taken.csv')
cond12_taken_avg_tpm = load_matrix('/home/user/Desktop/QMBrain/tpm/8_regions/tpm_s_taken.csv')
cond10_byd_avg_tpm = load_matrix('/home/user/Desktop/QMBrain/tpm/8_regions/tpm_us_byd.csv')
cond12_byd_avg_tpm = load_matrix('/home/user/Desktop/QMBrain/tpm/8_regions/tpm_s_byd.csv')
avg_tpm_rest = load_matrix('/home/user/Desktop/QMBrain/tpm/8_regions/tpm_rest.csv')
cond10_taken_avg_hist = load_matrix('/home/user/Desktop/QMBrain/tpm/8_regions/hist_us_taken.csv')
cond12_taken_avg_hist = load_matrix('/home/user/Desktop/QMBrain/tpm/8_regions/hist_s_taken.csv')
cond10_byd_avg_hist = load_matrix('/home/user/Desktop/QMBrain/tpm/8_regions/hist_us_byd.csv')
cond12_byd_avg_hist = load_matrix('/home/user/Desktop/QMBrain/tpm/8_regions/hist_s_byd.csv')
avg_hist_rest = load_matrix('/home/user/Desktop/QMBrain/tpm/8_regions/hist_rest.csv')
tpm_list = [cond10_taken_avg_tpm,cond12_taken_avg_tpm,cond10_byd_avg_tpm,cond12_byd_avg_tpm,avg_tpm_rest]
hist_list = [cond10_taken_avg_hist,cond12_taken_avg_hist,cond10_byd_avg_hist,cond12_byd_avg_hist,avg_hist_rest]
types = ['Taken US','Taken S','BYD US','BYD S','Rest']
comparison_inds_1, comparison_inds_2 = np.triu_indices(5,k=1)
x = ss.ttest_ind(cond10_taken_avg_hist,avg_hist_rest)
for ind1,ind2 in zip(comparison_inds_1,comparison_inds_2):
statIndTPM, pvalIndTPM = ss.ttest_ind(tpm_list[ind1].flatten(), tpm_list[ind2].flatten())
statRelTPM, pvalRelTPM = ss.ttest_rel(tpm_list[ind1].flatten(), tpm_list[ind2].flatten())
statMWTPM, pvalMWTPM = ss.mannwhitneyu(tpm_list[ind1].flatten(), tpm_list[ind2].flatten())
print('The t-test TPM (independent) result is: ', statIndTPM, 'with a p-value of: ', pvalIndTPM,
'for the comparison: ' + types[ind1] + ' vs. ' + types[ind2])
print('The t-test TPM(related) result is: ', statRelTPM, 'with a p-value of: ', pvalRelTPM,
'for the comparison: ' + types[ind1] + ' vs. ' + types[ind2])
print('The Mann-Whitney TPM result is: ', statMWTPM, 'with a p-value of: ', pvalMWTPM,
'for the comparison: ' + types[ind1] + ' vs. ' + types[ind2])
print('\n ')
statInd, pvalInd = ss.ttest_ind(hist_list[ind1], hist_list[ind2])
statRel, pvalRel = ss.ttest_rel(hist_list[ind1], hist_list[ind2])
statMW, pvalMW = ss.mannwhitneyu(hist_list[ind1], hist_list[ind2])
print('The t-test (independent) result is: ', statInd, 'with a p-value of: ', pvalInd,
'for the comparison: ' + types[ind1] + ' vs. ' + types[ind2])
print('The t-test (related) result is: ', statRel, 'with a p-value of: ', pvalRel, 'for the comparison: ' + types[ind1] + ' vs. ' + types[ind2])
print('The Mann-Whitney result is: ', statMW, 'with a p-value of: ', pvalMW, 'for the comparison: ' + types[ind1] + ' vs. ' + types[ind2])
print('\n ')
| jrudascas/brain_lab | projects/qm_brain/TPM/avg_tpm_stat.py | avg_tpm_stat.py | py | 2,991 | python | en | code | 2 | github-code | 13 |
37086500206 | from typing import Union
import torch
from e3nn import o3
from e3nn.util.jit import compile_mode
from nequip.data import AtomicDataDict
from nequip.nn import GraphModuleMixin
@compile_mode("script")
class EdgeSymmetricEmbedding(GraphModuleMixin, torch.nn.Module):
"""Construct edge attrs as a concatenation of a sum and difference of node-level representations.
Args:
out_field (str, default: AtomicDataDict.EDGE_ATTRS_KEY: data/irreps field
"""
out_field: str
def __init__(
self,
num_types,
irreps_in=None,
out_field: str = AtomicDataDict.EDGE_ATTRS_KEY,
):
super().__init__()
self.num_types = num_types
self.out_field = out_field
out_irrep = o3.Irreps([(2*num_types, (0, 1))])
self._init_irreps(
irreps_in=irreps_in,
irreps_out={out_field: out_irrep},
)
def forward(self, data: AtomicDataDict.Type) -> AtomicDataDict.Type:
one_hot = data[AtomicDataDict.NODE_ATTRS_KEY]
edge_index = data[AtomicDataDict.EDGE_INDEX_KEY]
ti = one_hot[edge_index[0]]
tj = one_hot[edge_index[1]]
plus = ti + tj
minus = ti - tj
data[self.out_field] = torch.cat([plus, minus], axis=-1)
return data
| klarh/gala-nequip-plugin | gala_nequip_plugin/nn/embedding/EdgeSymmetricEmbedding.py | EdgeSymmetricEmbedding.py | py | 1,293 | python | en | code | 1 | github-code | 13 |
40366557176 | import tensorflow as tf
from google.protobuf import text_format
from object_detection.protos import pipeline_pb2
from absl import app
from absl import flags
import os
flags.DEFINE_integer(
'step', 10000, """num_steps""")
flags.DEFINE_integer(
'batch', 8, """batch size""")
flags.DEFINE_integer(
'num_class', 3, """Red, Blue, and Yellow ball are the defaults(3 classes)""")
FLAGS = flags.FLAGS
def main(argv):
pipeline = pipeline_pb2.TrainEvalPipelineConfig()
config_path = '/src/models/research/object_detection/samples/configs/ssdlite_mobiledet_edgetpu_320x320_coco_sync_4x4.config'
with tf.gfile.GFile(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, pipeline)
pipeline.train_input_reader.tf_record_input_reader.input_path[:] = ['/src/models/research/train_data/*.tfrecord']
pipeline.train_input_reader.label_map_path = '/src/pretrained_model/pbtxt/tf_label_map.pbtxt'
pipeline.eval_input_reader[0].tf_record_input_reader.input_path[:] = ['/src/models/research/val_data/*.tfrecord']
pipeline.eval_input_reader[0].label_map_path = '/src/pretrained_model/pbtxt/tf_label_map.pbtxt'
pipeline.train_config.fine_tune_checkpoint = '/src/pretrained_model/ssdlite_mobiledet_edgetpu_320x320_coco_2020_05_19/fp32/model.ckpt'
pipeline.train_config.batch_size = FLAGS.batch
pipeline.train_config.num_steps = FLAGS.step
pipeline.model.ssd.num_classes = FLAGS.num_class
# Enable ssdlite, this should already be enabled in the config we downloaded, but this is just to make sure.
pipeline.model.ssd.box_predictor.convolutional_box_predictor.kernel_size = 3
pipeline.model.ssd.box_predictor.convolutional_box_predictor.use_depthwise = True
pipeline.model.ssd.feature_extractor.use_depthwise = True
# Quantization Aware Training
pipeline.graph_rewriter.quantization.delay = 0
pipeline.graph_rewriter.quantization.weight_bits = 8
pipeline.graph_rewriter.quantization.activation_bits = 8
config_text = text_format.MessageToString(pipeline)
with tf.gfile.Open(config_path, "wb") as f:
f.write(config_text)
if __name__ == '__main__':
app.run(main)
| Dansato1203/TFLite_ObjectDetector | src/fix_pipeline.py | fix_pipeline.py | py | 2,091 | python | en | code | 1 | github-code | 13 |
69837490579 | import math
import sys
import numpy as np
from hexgames.hexGrid import HexGrid
from simWorld import SimWorld
from actor_and_critic import *
import random
import time
import matplotlib.pyplot as plt
class Agent:
def __init__(self, env, actor, critic, epsilon = 0.5):
self.epsilon = epsilon
self.simWorld = env
self.critic = critic
self.actor = actor
def choose_action(self):
state = self.simWorld.createSimpleState()
actions = self.simWorld.getValidActions()
choice = self.actor.predict(state, actions, self.epsilon)
return choice
def doAction(self, action):
#Gets in a number action which maps to the action we wanna take
actions = self.simWorld.getValidActions()
act = actions[action]
if not (self.simWorld.createSimpleState(), action) in self.actor.policy:
self.actor.policy[self.simWorld.createSimpleState(), action] = random.random()/10
reward = self.simWorld.step(act, self.actor.policy[self.simWorld.createSimpleState(), action])
start = time.time()
self.simWorld.render()
end = time.time()
# if end-start > 0.001:
# print(end-start)
return reward
| Brakahaugen/Peg-solitaire-RL | agent.py | agent.py | py | 1,262 | python | en | code | 0 | github-code | 13 |
70148065617 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# from ..utils import *
from daakg.sampling import typed_sampling
class Decoder(nn.Module):
def __init__(self, name, params):
super(Decoder, self).__init__()
self.print_name = name
if name.startswith("[") and name.endswith("]"):
self.name = name[1:-1]
else:
self.name = name
p = 1 if params["train_dist"] == "manhattan" else 2
transe_sp = True if params["train_dist"] == "normalize_manhattan" else False
self.feat_drop = params["feat_drop"]
self.k = params["k"]
self.alpha = params["alpha"]
self.margin = params["margin"]
self.boot = params["boot"]
if self.name == "mtranse_align":
self.func = Alignment(p=p, dim=params["dim"])
elif self.name == "transe":
self.func = TransE(p=p, feat_drop=self.feat_drop, transe_sp=transe_sp)
elif self.name == "rotate":
self.func = RotatE(p=p, feat_drop=self.feat_drop, dim=params["dim"], params=self.margin)
else:
raise NotImplementedError("bad decoder name: " + self.name)
if params["sampling"] == "T":
# self.sampling_method = multi_typed_sampling
self.sampling_method = typed_sampling
elif params["sampling"] == "N":
self.sampling_method = nearest_neighbor_sampling
elif params["sampling"] == "R":
self.sampling_method = random_sampling
elif params["sampling"] == ".":
self.sampling_method = None
# elif params["sampling"] == "SLEF-DESIGN":
# self.sampling_method = SLEF-DESIGN_sampling
else:
raise NotImplementedError("bad sampling method: " + self.sampling_method)
if hasattr(self.func, "loss"):
self.loss = self.func.loss
else:
self.loss = nn.MarginRankingLoss(margin=self.margin)
if hasattr(self.func, "mapping"):
self.mapping = self.func.mapping
def forward(self, ins_emb, rel_emb, sample):
if type(ins_emb) == tuple:
ins_emb, weight = ins_emb
rel_emb_ = torch.matmul(rel_emb, weight)
else:
rel_emb_ = rel_emb
func = self.func if self.sampling_method else self.func.only_pos_loss
if self.name in ["align", "mtranse_align"]:
return func(ins_emb[sample[:, 0]], ins_emb[sample[:, 1]])
elif self.name == "n_r_align":
nei_emb, ins_emb = ins_emb, rel_emb
return func(ins_emb[sample[:, 0]], ins_emb[sample[:, 1]], nei_emb[sample[:, 0]], nei_emb[sample[:, 1]])
# elif self.name == "SLEF-DESIGN":
# '''SLEF-DESIGN: special decoder forward'''
else:
return func(ins_emb[sample[:, 0]], rel_emb_[sample[:, 1]], ins_emb[sample[:, 2]])
def __repr__(self):
return '{}(name={}): {}'.format(self.__class__.__name__, self.print_name, self.func.__repr__())
class Alignment(nn.Module):
def __init__(self, p, dim, orth=False):
super(Alignment, self).__init__()
self.p = p
self.weight = nn.Parameter(torch.Tensor(dim, dim))
self.orth = orth
if self.orth:
nn.init.orthogonal_(self.weight)
self.I = nn.Parameter(torch.eye(dim), requires_grad=False)
def forward(self, e1, e2):
return - torch.norm(torch.matmul(e1, self.weight) - e2, p=self.p, dim=1)
def mapping(self, emb):
return torch.matmul(emb, self.weight)
def only_pos_loss(self, e1, e2):
if self.p == 1:
map_loss = torch.sum(torch.abs(torch.matmul(e1, self.weight) - e2), dim=1).sum()
else:
map_loss = torch.sum(torch.pow(torch.matmul(e1, self.weight) - e2, 2), dim=1).sum()
orthogonal_loss = torch.pow(torch.matmul(self.weight, self.weight.t()) - self.I, 2).sum(dim=1).sum(dim=0)
return map_loss + orthogonal_loss
def __repr__(self):
return '{}(mode={})'.format(self.__class__.__name__, self.mode)
class TransE(nn.Module):
def __init__(self, p, feat_drop, transe_sp=False):
super(TransE, self).__init__()
self.p = p
self.feat_drop = feat_drop
self.transe_sp = transe_sp
def forward(self, e1, r, e2):
e1 = F.dropout(e1, p=self.feat_drop, training=self.training)
if self.transe_sp:
pred = - F.normalize(e1 + r - e2, p=2, dim=1).sum(dim=1)
else:
pred = - torch.norm(e1 + r - e2, p=self.p, dim=1)
return pred
def only_pos_loss(self, e1, r, e2):
e1 = F.dropout(e1, p=self.feat_drop, training=self.training)
if self.p == 1:
return torch.sum(torch.abs(e1 + r - e2), dim=1).sum()
else:
return torch.sum(torch.pow(e1 + r - e2, 2), dim=1).sum()
def uncertainty(self, e1, r, e2):
return 0
def projection(self, e1, r, e2):
return r
class RotatE(nn.Module):
def __init__(self, p, feat_drop, dim, params=None):
super(RotatE, self).__init__()
# self.p = p
self.feat_drop = feat_drop
self.margin = params
self.rel_range = (self.margin + 2.0) / (dim / 2)
self.pi = 3.14159265358979323846
def forward(self, e1, r, e2):
e1 = F.dropout(e1, p=self.feat_drop, training=self.training)
re_head, im_head = torch.chunk(e1, 2, dim=1)
re_tail, im_tail = torch.chunk(e2, 2, dim=1)
r = r / (self.rel_range / self.pi)
re_relation = torch.cos(r)
im_relation = torch.sin(r)
re_score = re_head * re_relation - im_head * im_relation
im_score = re_head * im_relation + im_head * re_relation
re_score = re_score - re_tail
im_score = im_score - im_tail
score = torch.stack([re_score, im_score], dim=0)
pred = score.norm(dim=0).sum(dim=-1)
return pred
def loss(self, pos_score, neg_score, target):
return - (F.logsigmoid(self.margin - pos_score) + F.logsigmoid(neg_score - self.margin)).mean()
def uncertainty(self, e1, r, e2):
return 0
def projection(self, e1, r, e2):
re_head, im_head = torch.chunk(e1, 2, dim=1)
re_relation = torch.cos(r)
im_relation = torch.sin(r)
re_score = re_head * re_relation - im_head * im_relation
im_score = re_head * im_relation + im_head * re_relation
re_score = re_score - re_head
im_score = im_score - im_head
score = torch.stack([re_score, im_score], dim=0)
return score
class MLP(nn.Module):
def __init__(self, act=torch.relu, hiddens=[], l2_norm=False):
super(MLP,self).__init__()
self.hiddens = hiddens
self.fc_layers = nn.ModuleList()
self.num_layers = len(self.hiddens) - 1
self.activation = act
self.l2_norm = l2_norm
for i in range(self.num_layers):
self.fc_layers.append(nn.Linear(self.hiddens[i], self.hiddens[i+1]))
def forward(self, e):
for i, fc in enumerate(self.fc_layers):
if self.l2_norm:
e = F.normalize(e, p=2, dim=1)
e = fc(e)
if i != self.num_layers-1:
e = self.activation(e)
return e | nju-websoft/DAAKG | daakg/model/decoder.py | decoder.py | py | 7,346 | python | en | code | 3 | github-code | 13 |
44407873101 | # -*- coding: utf-8 -*-
"""Tricks for defining numeric types
Routine Listings
----------------
The following return mixin classes for defining numeric operators / functions:
convert_mixin
Methods for conversion to `complex`, `float`, `int`.
ordered_mixin
Comparison operators
mathops_mixin
Arithmetic operators
rounder_mixin
Rounding and modular arithmetic
number_mixin
Union of all the above.
bitwise_mixin
Bit-wise operators
integr_mixin
Union of the two above.
imaths_mixin
Inplace arithmetic operators
iround_mixin
Inplace rounding and modular arithmetic
inumbr_mixin
Union of the two above with `number_like_mixin`.
ibitws_mixin
Inplace bit-wise operators
iintgr_mixin
Union of the two above with `int_like_mixin`.
The following return functions to wrap methods and functions with type
conversion:
in_method_wrapper
Decorator to wrap a method whose outputs do not require conversion
one_method_wrapper
Decorator to wrap a method whose outputs do require conversion
inr_method_wrappers
Decorator to turn one function into two magic methods - forward, reverse -
whose outputs do not require conversion
opr_method_wrappers
Decorator to turn one function into two magic methods - forward, reverse -
whose outputs do require conversion
iop_method_wrapper
Decorator to wrap an inplace magic method
function_wrappers
Two decorators to wrap functions whose outputs do/do not require conversion
set_objclasses
Finalises mehod wrappers after class definition.
Notes
-----
You should call `set_objclasses(class)` after defining `class`,
especially if you used any of `one_method_wrapper`, `opr_method_wrappers`.
The `__objclass__` attribute is needed to convert the outputs back.
To define all the abstracts for `numbers.Complex`:
subclass `mathops_mixin(...)`,
define `__complex__` or subclass `convert_mixin(...)`,
define `conjugate()` and properties `real` and `imag`,
To define all the abstracts for `numbers.Real`:
subclass `mathops_mixin(...)`, `ordered_mixin(...)`, `rounder_mixin(...)`,
define `__float__` or subclass `convert_mixin(...)`.
To define all the abstracts for `numbers.Integral`:
subclass `bitwise_mixin(...)` and everything listed for `numbers.Real`,
define `__int__` or subclass `convert_mixin(...)`.
"""
from __future__ import annotations
import builtins
import math
import operator
import typing
from functools import wraps
from numbers import Integral, Number, Real
from types import new_class
from typing import Any, Callable, Optional, Tuple, Type
from .arg_tricks import default, eval_or_default
from .containers import InstanceOrSeq, Val, Var, tuplify, unseqify
from .numeric_protocols import (
Arithmetic, BitWise, Convertible, InplaceArithmetic, InplaceBitWise,
InplaceRoundable, Ordered, Roundable)
# =============================================================================
# Wrapper helpers
# =============================================================================
def _multi_conv(out_type: Type[Obj], result: InstOrTup[Number],
types: TypeArg = None) -> InstOrTup[Obj]:
"""helper for converting multiple outputs
Parameters
----------
out_type : Type
Function used to convert a single output.
result: types or Iterable[types]
A single output, or an iterable of them
types : Type or Tuple[Type] or None
The types of output that should be converted. Others are passed
"""
if result is NotImplemented:
return result
types = default(types, Number)
def conv(val):
if isinstance(val, types):
return out_type(val)
return val
return unseqify(tuple(map(conv, tuplify(result))))
def _implement_in(func: Func, args: Tuple[Other, ...], conv: Conv) -> Number:
"""Implement method for class, converting inputs, leaving outputs as is.
Parameters
----------
func : Callable
The function being wrapped.
args : Tuple[types]
The inputs to the function being wrapped.
conv : Callable
Function used to convert a tuple of inputs.
"""
try:
return func(*conv(args))
except TypeError:
return NotImplemented
# -----------------------------------------------------------------------------
# Method wrapper helpers
# -----------------------------------------------------------------------------
def _implement_op(func: Func, args: Tuple[Other, ...], conv: Conv,
method: Operator, types: TypeArg = None) -> InstOrTup[Obj]:
"""Implement operator for class, converting inputs and outputs.
Parameters
----------
func : Callable
The function being wrapped.
args : Tuple[types]
The inputs to the function being wrapped.
conv : Callable
Function used to convert a tuple of inputs.
method
The resulting method. Its `__objclass__` property is used to convert
the output.
types : Type or Tuple[Type] or None
The types of output that should be converted.
"""
result = _implement_in(func, args, conv)
return _multi_conv(method.__objclass__, result, types)
def _impl_mutable_iop(func: Func, args: Tuple[Other, ...], conv: Conv) -> Obj:
"""Implement mutable inplace operator for class
Parameters
----------
func : Callable
The function being wrapped.
args : Tuple[types]
The inputs to the function being wrapped.
conv : Callable
Function used to convert a tuple of inputs.
"""
result = _implement_in(func, args, conv)
return result if result is NotImplemented else args[0]
def _implement_iop(func: Func, args: Tuple[Other, ...], conv: Conv,
attr: str = '') -> Obj:
"""Implement inplace operator for class
Parameters
----------
func : Callable[Number,... -> Number,...]
The function being wrapped.
args : Tuple[types]
The inputs to the function being wrapped.
conv : Callable[Obj->Number]
Function used to convert a tuple of inputs.
attr : str, optional
The name of the attribute that is updated for inplace operations.
"""
if not attr:
return _impl_mutable_iop(func, args, conv)
result = _implement_in(func, args, conv)
if result is NotImplemented:
return result
# if attr is immutable (like numbers), no need to use inplace function
setattr(args[0], attr, result)
return args[0]
def _magic_name(func: Func, prefix: Optional[str] = None) -> str:
"""convert function name into magic method format"""
prefix = default(prefix, '')
return '__' + prefix + func.__name__.strip('_') + '__'
# =============================================================================
# Setting method __objclass__
# =============================================================================
def _add_set_objclass(*meth: typing.Union[Wrapped, Operator]) -> None:
"""label method wrapper to set __objclass__ later.
"""
for mth in meth:
mth.__objclass__ = type(None)
def set_objclasses(objclass: type) -> None:
"""Set the __objclass__ attributes of methods.
Must be called immediately after class definition.
The `__objclass__` attribute is used here to convert outputs of methods.
Parameters
----------
objclass
What we are setting `__objclass__` attributes to. It will be used to
convert outputs.
Notes
-----
You should call `set_objclasses(class)` after defining `class`,
especially if you used any of `one_method_wrapper`, `opr_method_wrappers`.
The `__objclass__`attribute is needed to convert the outputs back.
"""
for attr in dir(objclass):
meth = getattr(objclass, attr)
if getattr(meth, '__objclass__', None) is type(None):
meth.__objclass__ = objclass
def dummy_method(name: str) -> Callable:
"""Return a dummy function that always returns NotImplemented.
This can be used to effectively remove an unwanted operator from a mixin,
allowing fallbacks, whereas setting it to `None` would disallow it.
"""
# pylint: disable=unused-argument
def dummy(*args, **kwds):
"""Dummy function"""
return NotImplemented
dummy.__name__ = name
return dummy
# =============================================================================
# Method wrappers
# =============================================================================
WRAPPER_ASSIGNMENTS_N = ('__doc__', '__annotations__', '__text_signature__')
WRAPPER_ASSIGNMENTS = ('__name__',) + WRAPPER_ASSIGNMENTS_N
def in_method_wrapper(conv: Conv) -> Wrapper:
"""make wrappers for some class, converting inputs, leaving outputs as is.
Parameters
----------
conv : Callable
Function used to convert a tuple of inputs.
Returns
-------
wrap_input : Callable
A decorator for a method, so that the method's inputs are pre-converted
and its outputs are left as is.
Notes
-----
You should call `set_objclasses(class)` after defining `class`,
especially if you used any of `one_method_wrapper`, `opr_method_wrappers`.
The `__objclass__`attribute is needed to convert the outputs back.
"""
def wrap_input(func: Func) -> Wrapped:
"""Wrap method, so that the method's inputs are pre-converted and its
outputs are left as is.
"""
@wraps(func, assigned=WRAPPER_ASSIGNMENTS_N)
def method(*args) -> Number:
return _implement_in(func, args, conv)
method.__name__ = _magic_name(func)
_add_set_objclass(method)
return method
return wrap_input
def inr_methods_wrapper(conv: Conv) -> Wrappers:
"""make wrappers for operator doublet: forward, reverse, converting inputs,
leaving outputs as is.
Parameters
----------
conv : Callable
Function used to convert a tuple of inputs.
Returns
-------
wrap_operators : Callable
A decorator for a method, returning two methods, so that the method's
inputs are preconverted and its outputs are left as is.
The two methods are for forward and reverse operators.
Notes
-----
You should call `set_objclasses(class)` after defining `class`,
especially if you used any of `one_method_wrapper`, `opr_method_wrappers`.
The `__objclass__`attribute is needed to convert the outputs back.
"""
def wrap_operators(func: Func) -> Tuple[Wrapped, Wrapped]:
"""Wrap operator set.
A decorator for a method, returning two methods, so that the method's
inputs are pre-converted and its outputs are left as is.
The two methods are for forward, and reverse operators.
"""
@wraps(func, assigned=WRAPPER_ASSIGNMENTS_N)
def method(*args) -> Number:
return _implement_in(func, args, conv)
@wraps(func, assigned=WRAPPER_ASSIGNMENTS_N)
def rmethod(*args) -> Number:
return _implement_in(func, reversed(args), conv)
method.__name__ = _magic_name(func)
rmethod.__name__ = _magic_name(func, 'r')
_add_set_objclass(method, rmethod)
return method, rmethod
return wrap_operators
def one_method_wrapper(conv: Conv, types: TypeArg = None) -> OpWrapper:
"""make wrappers for some class, converting inputs and outputs.
Conversion back to class uses method's `__objclass__`.
Parameters
----------
conv : Callable
Function used to convert a tuple of inputs.
types : Type or Tuple[Type] or None
The types of output that should be converted.
Returns
-------
wrapper : Callable
A decorator for a method, so that the method's inputs are pre-converted
and its outputs are post-converted.
Notes
-----
You must call `set_objclasses(class)` after defining `class` if you used
any of these wrappers. The `__objclass__` attribute is needed to convert
the outputs back.
"""
def wrap_in_out(func: Func) -> Operator:
"""Wrap method, so that the method's inputs are pre-converted and its
outputs are post-converted.
"""
@wraps(func, assigned=WRAPPER_ASSIGNMENTS_N)
def method(*args) -> Obj:
return _implement_op(func, args, conv, method, types)
method.__name__ = _magic_name(func)
_add_set_objclass(method)
return method
return wrap_in_out
def opr_methods_wrapper(conv: Conv, types: TypeArg = None) -> OpsWrapper:
"""make wrapper for operator doublet: forward, reverse, converting inputs
and outputs
Conversion back to class uses methods' `__objclass__`.
Parameters
----------
conv : Callable
Function used to convert a tuple of inputs.
types : Type or Tuple[Type] or None
The types of output that should be converted.
Returns
-------
wrap_operators : Callable
A decorator for a method, returning two methods, so that the method's
inputs are pre-converted and its outputs are post-converted.
The two methods are for forward and reverse operators.
Notes
-----
You must call `set_objclasses(class)` after defining `class` if you used
any of these wrappers. The `__objclass__` attribute is needed to convert
the outputs back.
"""
def wrap_operators(func: Func) -> Tuple[Operator, Operator]:
"""Wrap operator set.
A decorator for a method, returning two methods, so that the method's
inputs are pre-converted and its outputs are post-converted.
The two methods are for forward, and reverse operators.
"""
@wraps(func, assigned=WRAPPER_ASSIGNMENTS_N)
def method(*args) -> InstOrTup[Obj]:
return _implement_op(func, args, conv, method, types)
@wraps(func, assigned=WRAPPER_ASSIGNMENTS_N)
def rmethod(*args) -> InstOrTup[Obj]:
return _implement_op(func, reversed(args), conv, rmethod, types)
method.__name__ = _magic_name(func)
rmethod.__name__ = _magic_name(func, 'r')
_add_set_objclass(method, rmethod)
return method, rmethod
return wrap_operators
def iop_method_wrapper(conv: Conv, attr: str = '') -> OpWrapper:
"""make wrapper for inplace operator, immutable data
Parameters
----------
conv : Callable
Function used to convert a tuple of inputs.
attr : str, optional
The name of the attribute that is updated for inplace operations on
immutable data, or `None` for mutable data. By default `''`.
Returns
-------
wrap_inplace : Callable
A decorator for a method, returning one method, so that its inputs are
pre-converted and its outputs is assigned to the appropriate attribute
if necessary. The method is for an inplace operator.
Notes
-----
You should call `set_objclasses(class)` after defining `class`,
especially if you used any of `one_method_wrapper`, `opr_method_wrappers`.
The `__objclass__`attribute is needed to convert the outputs back.
"""
prefix = eval_or_default(attr, lambda x: 'i', '')
def wrap_inplace(func: Func) -> Operator:
"""Wrap inplace operator.
A decorator for a method, returning an inplace operator method,
so that the method's inputs are pre-converted and its output is
assigned to the appropriate attribute if necessary.
"""
@wraps(func, assigned=WRAPPER_ASSIGNMENTS_N)
def imethod(*args) -> Obj:
return _implement_iop(func, args, conv, attr)
imethod.__name__ = _magic_name(func, prefix)
_add_set_objclass(imethod)
return imethod
return wrap_inplace
# =============================================================================
# Function wrappers
# =============================================================================
def function_decorators(conv: Conv, class_out: Type[Obj], types: TypeArg = None
) -> Tuple[Wrapper, OpWrapper]:
"""make decorators for conversting inputs/outputs from/to some class
Parameters
----------
conv : Callable
Function used to convert a tuple of inputs.
class_out : type
The class that outputs should be converted to.
types : Type or Tuple[Type] or None
The types of output that should be converted.
Returns
-------
wrap_input : Callable
A decorator for a function, so that the function's inputs are
pre-converted.
wrap_in_out : Callable
A decorator for a function, so that the function's inputs are
pre-converted and its outputs are post-converted.
"""
def wrap_input(func: Func) -> Wrapped:
"""Wrap function that returns another type
"""
@wraps(func, assigned=WRAPPER_ASSIGNMENTS)
def fun_input(*args) -> Number:
return _implement_in(func, args, conv)
return fun_input
def wrap_in_out(func: Func) -> Operator:
"""Wrap function to return class or Number
"""
@wraps(func, assigned=WRAPPER_ASSIGNMENTS)
def fun_out(*args) -> InstOrTup[Obj]:
result = _implement_in(func, args, conv)
if any(isinstance(x, class_out) for x in args):
return _multi_conv(class_out, result, types)
return result
return fun_out
return wrap_input, wrap_in_out
# =============================================================================
# Mixins
# =============================================================================
def convert_mixin(conv: Conv, names: Any = None) -> Type[Convertible]:
"""Mixin class for conversion to number types.
Defines the functions `complex`, `float`, `int`.
Parameters
----------
conv : Callable
Function used to convert a tuple of inputs.
names : Any, optional
namespace with function attributes: {'complex', `float`, `int`}.
By default `None -> builtins`.
Notes
-----
You should call `set_objclasses(class)` after defining `class`,
especially if you used any of `one_method_wrapper`, `opr_method_wrappers`.
The `__objclass__`attribute is needed to convert the outputs back.
"""
names = default(names, builtins)
method_input = in_method_wrapper(conv)
def exec_body(nsp: dict) -> None:
"""Mixin class for conversion to number types.
"""
nsp['__complex__'] = method_input(names.complex)
nsp['__float__'] = method_input(names.float)
nsp['__int__'] = method_input(names.int)
nsp['__index__'] = method_input(names.int)
return new_class('ConvertibleMixin', exec_body=exec_body)
def ordered_mixin(conv: Conv, names: Any = None) -> Type[Ordered]:
"""Mixin class for arithmetic comparisons.
Defines all of the comparison operators, `==`, `!=`, `<`, `<=`, `>`, `>=`.
Parameters
----------
conv : Callable
Function used to convert a tuple of inputs.
names : Any, optional
namespace w/ function attributes: {'eq', `ne`, `lt`, 'le', `gt`, `ge`}.
By default `None -> operator`.
Notes
-----
You should call `set_objclasses(class)` after defining `class`,
especially if you used any of `one_method_wrapper`, `opr_method_wrappers`.
The `__objclass__`attribute is needed to convert the outputs back.
"""
names = default(names, operator)
method_in = in_method_wrapper(conv)
# @total_ordering # not nan friendly
def exec_body(nsp: dict) -> None:
"""Mixin class to mimic real arithmetic number types.
"""
nsp['__eq__'] = method_in(names.eq)
nsp['__ne__'] = method_in(names.ne)
nsp['__lt__'] = method_in(names.lt)
nsp['__le__'] = method_in(names.le)
nsp['__gt__'] = method_in(names.gt)
nsp['__ge__'] = method_in(names.ge)
return new_class('OrderedMixin', exec_body=exec_body)
def mathops_mixin(conv: Conv, types: TypeArg = None, names: Any = None
) -> Type[Arithmetic]:
"""Mixin class to mimic arithmetic number types.
Defines the arithmetic operators `+`, `-`, `*`, `/`, `**`, `==`, `!=` and
the functions `pow`, `abs`. Operators `//`, `%` are in `rounder_mixin`,
operators `<`, `<=`, `>`, `>=` are in `ordered_mixin` and `<<`, `>>`, `&`,
`^`, `|`, `~` are in `bit_twiddle_mixin`.
Parameters
----------
conv : Callable
Function used to convert a tuple of inputs.
types : Type or Tuple[Type] or None, optional
The types of output that will be converted. By default `None -> Number`
names : Any, optional
namespace with function attributes: {'eq', `ne`, `add`, `sub`, `mul`,
`truediv`, `pow`, `neg`, `pos`, `abs`}. By default `None -> operator`.
Notes
-----
You must call `set_objclasses(class)` after defining the `class`.
The `__objclass__` attribute is needed to convert the outputs back.
"""
names = default(names, operator)
method_in = in_method_wrapper(conv)
method = one_method_wrapper(conv, types)
ops = opr_methods_wrapper(conv, types)
def exec_body(nsp: dict) -> None:
"""Mixin class to mimic arithmetic number types.
"""
nsp['__eq__'] = method_in(names.eq)
nsp['__ne__'] = method_in(names.ne)
nsp['__add__'], nsp['__radd__'] = ops(names.add)
nsp['__sub__'], nsp['__rsub__'] = ops(names.sub)
nsp['__mul__'], nsp['__rmul__'] = ops(names.mul)
nsp['__truediv__'], nsp['__rtruediv__'] = ops(names.truediv)
nsp['__pow__'], nsp['__rpow__'] = ops(names.pow)
nsp['__neg__'] = method(names.neg)
nsp['__pos__'] = method(names.pos)
nsp['__abs__'] = method(names.abs)
return new_class('ArithmeticMixin', exec_body=exec_body)
def rounder_mixin(conv: Conv, types: TypeArg = None, names: Any = None
) -> Type[Roundable]:
"""Mixin class for rounding/modular routines.
Defines the operators `%`, `//`, and the functions `divmod`, `round`,
`math.floor,ceil,trunc`.
Parameters
----------
conv : Callable
Function used to convert a tuple of inputs.
types : Type or Tuple[Type] or None, optional
The types of output that are converted. By default `None -> Number`.
names : Any or Tuple[Any, ...], optional
Name spaces `(opspace, namespace, mathspace)`. A single namespace is
expanded to 3. By default `None -> (operator, builtins, math)`.
opspace : default - operator
namespace with function attributes: {`floordiv`, `mod`}.
defspace : default - builtins
namespace with function attributes: {`divmod`, `round`}.
mathspace : default - math
namespace with function attributes: {`trunc`, `floor`, `ceil`}.
Notes
-----
You must call `set_objclasses(class)` after defining the `class`.
The `__objclass__` attribute is needed to convert the outputs back.
"""
opsp, dfsp, masp = tuplify(default(names, (operator, builtins, math)), 3)
method = one_method_wrapper(conv, types)
ops = opr_methods_wrapper(conv, types)
def exec_body(nsp: dict) -> None:
"""Mixin class for rounding/modular routines.
"""
nsp['__floordiv__'], nsp['__rfloordiv__'] = ops(opsp.floordiv)
nsp['__mod__'], nsp['__rmod__'] = ops(opsp.mod)
nsp['__divmod__'], nsp['__rdivmod__'] = ops(dfsp.divmod)
nsp['__round__'] = method(dfsp.round)
nsp['__trunc__'] = method(masp.trunc)
nsp['__floor__'] = method(masp.floor)
nsp['__ceil__'] = method(masp.ceil)
return new_class('RoundableMixin', exec_body=exec_body)
def bitwise_mixin(conv: Conv, types: TypeArg = None, names: Any = None
) -> Type[BitWise]:
"""Mixin class to mimic bit-string types.
Defines all of the bit-wise operators: `<<`, `>>`, `&`, `^`, `|`, `~`.
Parameters
----------
conv : Callable
Function used to convert a tuple of inputs.
types : Type or Tuple[Type] or None, optional
The types of output that will be converted. By default `None -> Number`
names : Any, optional
namespace with function attributes: {'lshift', `rshift`, `and_`, `xor`,
`or_`, `invert`}. By default `None -> operator`.
Notes
-----
You must call `set_objclasses(class)` after defining the `class`.
The `__objclass__` attribute is needed to convert the outputs back.
"""
names = default(names, operator)
method = one_method_wrapper(conv, types)
ops = opr_methods_wrapper(conv, types)
def exec_body(nsp: dict) -> None:
"""Mixin class to mimic bit-string types.
"""
nsp['__lshift__'], nsp['__rlshift__'] = ops(names.lshift)
nsp['__rshift__'], nsp['__rrshift__'] = ops(names.rshift)
nsp['__and__'], nsp['__rand__'] = ops(names.and_)
nsp['__xor__'], nsp['__rxor__'] = ops(names.xor)
nsp['__or__'], nsp['__ror__'] = ops(names.or_)
nsp['__invert__'] = method(names.invert)
return new_class('BitTwiddleMixin', exec_body=exec_body)
# -----------------------------------------------------------------------------
# Inplace Mixins
# -----------------------------------------------------------------------------
def imaths_mixin(conv: Conv, attr: str = '', names: Any = None
) -> Type[InplaceArithmetic]:
"""Mixin class for in-place updaters to mimic arithmetic number types.
Defines the arithmetic operators `+=`, `-=`, `*=`, `/=`, `**=`.
Operators `//=`, `%=` are in `iround_mixin`,
and `<<=`, `>>=`, `&=`, `^=`, `|=` are in `ibitws_mixin`.
Parameters
----------
conv : Callable
Function used to convert a tuple of inputs.
attr : str, optional
The name of the attribute that is updated for inplace operations on
immutable data, or `None` for mutable data. By default `''`.
names : Any, optional
namespace with function attributes:
for immutable data - {`add`, `sub`, `mul`, `truediv`, `pow`};
for mutable data - {`iadd`, `isub`, `imul`, `itruediv`, `ipow`}.
By default `None -> operator`.
Notes
-----
You should call `set_objclasses(class)` after defining `class`,
especially if you used any of `one_method_wrapper`, `opr_method_wrappers`.
The `__objclass__`attribute is needed to convert the outputs back.
"""
names = default(names, operator)
iop = iop_method_wrapper(conv, attr)
prefix = 'i' if attr is None else ''
def exec_body(nsp: dict) -> None:
"""Mixin class to mimic arithmetic number types.
"""
nsp['__iadd__'] = iop(getattr(names, prefix + 'add'))
nsp['__isub__'] = iop(getattr(names, prefix + 'sub'))
nsp['__imul__'] = iop(getattr(names, prefix + 'mul'))
nsp['__itruediv__'] = iop(getattr(names, prefix + 'truediv'))
nsp['__ipow__'] = iop(getattr(names, prefix + 'pow'))
return new_class('IArithmeticMixin', exec_body=exec_body)
def iround_mixin(conv: Conv, attr: str = '', names: Any = None
) -> Type[InplaceRoundable]:
"""Mixin class for in-place updaters with rounding/modular routines.
Defines the operators `%=` and `//=`.
Parameters
----------
conv : Callable
Function used to convert a tuple of inputs.
attr : str, optional
The name of the attribute that is updated for inplace operations on
immutable data, or `None` for mutable data. By default `''`.
names : Any, optional
namespace with function attributes:
for immutable data - {'floordiv', `mod`};
for mutable data - {'ifloordiv', `imod`}.
By default `None -> operator`.
Notes
-----
You should call `set_objclasses(class)` after defining `class`,
especially if you used any of `one_method_wrapper`, `opr_method_wrappers`.
The `__objclass__`attribute is needed to convert the outputs back.
"""
names = default(names, operator)
iop = iop_method_wrapper(conv, attr)
prefix = 'i' if attr is None else ''
def exec_body(nsp: dict) -> None:
"""Mixin class for rounding/modular routines.
"""
nsp['__ifloordiv__'] = iop(getattr(names, prefix + 'floordiv'))
nsp['__imod__'] = iop(getattr(names, prefix + 'mod'))
return new_class('IRoundableMixin', exec_body=exec_body)
def ibitws_mixin(conv: Conv, attr: str = '', names: Any = None
) -> Type[InplaceBitWise]:
"""Mixin class for in-place updaters to mimic bit-string types.
Defines the bit-wise updaters: `<<=`, `>>=`, `&=`, `^=`, `|=`.
Parameters
----------
conv : Callable
Function used to convert a tuple of inputs.
attr : str, optional
The name of the attribute that is updated for inplace operations on
immutable data, or `None` for mutable data. By default `''`.
names : Any, optional
namespace with function attributes:
for immutable data - {'lshift', `rshift`, `and_`, `xor`, `or_`};
for mutable data - {'ilshift', `irshift`, `iand`, `ixor`, `ior`}.
By default `None -> operator`.
Notes
-----
You should call `set_objclasses(class)` after defining `class`,
especially if you used any of `one_method_wrapper`, `opr_method_wrappers`.
The `__objclass__`attribute is needed to convert the outputs back.
"""
names = default(names, operator)
iop = iop_method_wrapper(conv, attr)
prefix = 'i' if attr is None else ''
suffix = '' if attr is None else '_'
def exec_body(nsp: dict) -> None:
"""Mixin class to mimic bit-string types.
"""
nsp['__ilshift__'] = iop(getattr(names, prefix + 'lshift'))
nsp['__irshift__'] = iop(getattr(names, prefix + 'rshift'))
nsp['__iand__'] = iop(getattr(names, prefix + 'and' + suffix))
nsp['__ixor__'] = iop(getattr(names, prefix + 'xor'))
nsp['__ior__'] = iop(getattr(names, prefix + 'or' + suffix))
return new_class('IBitTwiddleMixin', exec_body=exec_body)
# -----------------------------------------------------------------------------
# Combined Mixins
# -----------------------------------------------------------------------------
def number_mixin(conv: Conv, types: TypeArg = None, names: Any = None
) -> Type[Real]:
"""Mixin class to mimic number types.
Defines all of the operators and the functions `complex`, `float`, `int`,
`pow`, `abs`, `divmod`, 'round', `math.floor,ceil,trunc`, *except for*
the bit-wise and the inplace operators.
Parameters
----------
conv : Callable
Function used to convert a tuple of inputs.
types : Type or Tuple[Type] or None, optional
The types of output that will be converted. By default `None -> Number`
names : Any or Tuple[Any, ...], optional
For `convert_mixin, ordered_mixin, mathops_mixin, rounder_mixin`.
A single namespace is expanded to 4. By default `None`.
Notes
-----
You must call `set_objclasses(class)` after defining the `class`.
The `__objclass__`attribute is needed to convert the outputs back.
"""
convspace, ordspace, mathspace, rndspace = tuplify(names, 4)
bases = (convert_mixin(conv, convspace),
ordered_mixin(conv, ordspace),
mathops_mixin(conv, types, mathspace),
rounder_mixin(conv, types, rndspace))
return new_class('NumberLikeMixin', bases=bases)
def integr_mixin(conv: Conv, types: TypeArg = None, names: Any = None
) -> Type[Integral]:
"""Mixin class to mimic integer types.
Defines all of the operators and the functions `complex`, `float`, `int`,
`pow`, `abs`, `divmod`, 'round', `math.floor,ceil,trunc`, *except for*
the inplace operators.
Parameters
----------
conv : Callable
Function used to convert a tuple of inputs.
types : Type or Tuple[Type] or None, optional
The types of output that will be converted. By default `None -> Number`
names : Any or Tuple[Any, ...], optional
For `number_mixin, bitwise_mixin`. A single namespace is expanded to 2.
By default `None`.
Notes
-----
You must call `set_objclasses(class)` after defining the `class`,.
The `__objclass__`attribute is needed to convert the outputs back.
"""
numspace, bitspace = tuplify(names, 2)
bases = (number_mixin(conv, types, numspace),
bitwise_mixin(conv, types, bitspace))
return new_class('IntegerLikeMixin', bases=bases)
def inumbr_mixin(conv: Conv, types: TypeArg = None, attr: str = '',
names: Any = None) -> Type[Real]:
"""Mixin class to mimic number types, with inplace operators.
Defines all of the operators and the functions `complex`, `float`, `int`,
`pow`, `abs`, `divmod`, 'round', `math.floor,ceil,trunc`, *except for*
the bit-wise operators.
Parameters
----------
conv : Callable
Function used to convert a tuple of inputs.
attr : str, optional
The name of the attribute that is updated for inplace operations on
immutable data, or `None` for mutable data. By default `''`.
types : Type or Tuple[Type] or None, optional
The types of output that will be converted. By default `None -> Number`
names : Any or Tuple[Any, ...], optional
For `number_mixin, imaths_mixin, iround_mixin`. A single namespace is
expanded to 3. By default `None`.
Notes
-----
You must call `set_objclasses(class)` after defining the `class`.
The `__objclass__`attribute is needed to convert the outputs back.
"""
numspace, imspace, irndspace = tuplify(names, 3)
bases = (number_mixin(conv, types, numspace),
imaths_mixin(conv, attr, imspace),
iround_mixin(conv, attr, irndspace))
return new_class('NumberLikeMixin', bases=bases)
def iintgr_mixin(conv: Conv, types: TypeArg = None, attr: str = '',
names: Any = None) -> Type[Integral]:
"""Mixin class to mimic integer types, with inplace operators.
Defines all of the operators and the functions `complex`, `float`, `int`,
`pow`, `abs`, `divmod`, 'round', `math.floor,ceil,trunc`.
Parameters
----------
conv : Callable
Function used to convert a tuple of inputs.
attr : str, optional
The name of the attribute that is updated for inplace operations on
immutable data, or `None` for mutable data. By default `''`.
types : Type or Tuple[Type] or None
The types of output that should be converted.
names : Tuple[Any, ...] or None, optional
For `inumbr_mixin, bitwise_mixin, ibitwise_mixin`. A single namespace
is expanded to 3. By default `None`.
Notes
-----
You must call `set_objclasses(class)` after defining the `class`.
The `__objclass__`attribute is needed to convert the outputs back.
"""
inumspace, bitspace, ibitspace = tuplify(names, 3)
bases = (inumbr_mixin(conv, types, attr, inumspace),
bitwise_mixin(conv, types, bitspace),
ibitws_mixin(conv, attr, ibitspace))
return new_class('IntegerLikeMixin', bases=bases)
# =============================================================================
# Typing aliases
# =============================================================================
Obj = typing.TypeVar("Obj")
Other = typing.Union[Number, Obj]
InstOrTup = typing.Union[Var, Tuple[Var, ...]]
OfOneOrTwo = typing.Union[Callable[[Var], Val], Callable[[Var, Var], Val]]
Conv = Callable[[typing.Sequence[Obj]], typing.Sequence[Number]]
Func = OfOneOrTwo[Number, InstOrTup[Number]]
Wrapped = OfOneOrTwo[Other, Number]
Operator = Callable[[Obj, Other], InstOrTup[Obj]]
Wrapper = Callable[[Func], Wrapped]
OpWrapper = Callable[[Func], Operator]
Wrappers = Callable[[Func], Tuple[Wrapped, Wrapped]]
OpsWrapper = Callable[[Func], Tuple[Operator, Operator]]
TypeArg = Optional[InstanceOrSeq[Type[Number]]]
| subhylahiri/sl_py_tools | number_like.py | number_like.py | py | 36,268 | python | en | code | 1 | github-code | 13 |
23406900532 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.autograd import Variable
import numpy as np
import torch_util
from tqdm import tqdm
from model.baseModel import model_eval
import util.save_tool as save_tool
import os
from datetime import datetime
import util.data_loader as data_loader
import config
import fire
class StackBiLSTMMaxout(nn.Module):
def __init__(self, h_size=[512, 1024, 2048], v_size=10, d=300, mlp_d=1600, dropout_r=0.1, max_l=60):
super(StackBiLSTMMaxout, self).__init__()
self.Embd = nn.Embedding(v_size, d)
self.lstm = nn.LSTM(input_size=d, hidden_size=h_size[0],
num_layers=1, bidirectional=True)
self.lstm_1 = nn.LSTM(input_size=(d + h_size[0] * 2), hidden_size=h_size[1],
num_layers=1, bidirectional=True)
self.lstm_2 = nn.LSTM(input_size=(d + (h_size[0] + h_size[1]) * 2), hidden_size=h_size[2],
num_layers=1, bidirectional=True)
self.max_l = max_l
self.h_size = h_size
self.mlp_1 = nn.Linear(h_size[2] * 2 * 4, mlp_d)
self.mlp_2 = nn.Linear(mlp_d, mlp_d)
self.sm = nn.Linear(mlp_d, 3)
self.classifier = nn.Sequential(*[self.mlp_1, nn.ReLU(), nn.Dropout(dropout_r),
self.mlp_2, nn.ReLU(), nn.Dropout(dropout_r),
self.sm])
def display(self):
for param in self.parameters():
print(param.data.size())
def forward(self, s1, l1, s2, l2):
if self.max_l:
l1 = l1.clamp(max=self.max_l)
l2 = l2.clamp(max=self.max_l)
if s1.size(0) > self.max_l:
s1 = s1[:self.max_l, :]
if s2.size(0) > self.max_l:
s2 = s2[:self.max_l, :]
p_s1 = self.Embd(s1)
p_s2 = self.Embd(s2)
s1_layer1_out = torch_util.auto_rnn_bilstm(self.lstm, p_s1, l1)
s2_layer1_out = torch_util.auto_rnn_bilstm(self.lstm, p_s2, l2)
# Length truncate
len1 = s1_layer1_out.size(0)
len2 = s2_layer1_out.size(0)
p_s1 = p_s1[:len1, :, :] # [T, B, D]
p_s2 = p_s2[:len2, :, :] # [T, B, D]
# Using residual connection
s1_layer2_in = torch.cat([p_s1, s1_layer1_out], dim=2)
s2_layer2_in = torch.cat([p_s2, s2_layer1_out], dim=2)
s1_layer2_out = torch_util.auto_rnn_bilstm(self.lstm_1, s1_layer2_in, l1)
s2_layer2_out = torch_util.auto_rnn_bilstm(self.lstm_1, s2_layer2_in, l2)
s1_layer3_in = torch.cat([p_s1, s1_layer1_out, s1_layer2_out], dim=2)
s2_layer3_in = torch.cat([p_s2, s2_layer1_out, s2_layer2_out], dim=2)
s1_layer3_out = torch_util.auto_rnn_bilstm(self.lstm_2, s1_layer3_in, l1)
s2_layer3_out = torch_util.auto_rnn_bilstm(self.lstm_2, s2_layer3_in, l2)
s1_layer3_maxout = torch_util.max_along_time(s1_layer3_out, l1)
s2_layer3_maxout = torch_util.max_along_time(s2_layer3_out, l2)
# Only use the last layer
features = torch.cat([s1_layer3_maxout, s2_layer3_maxout,
torch.abs(s1_layer3_maxout - s2_layer3_maxout),
s1_layer3_maxout * s2_layer3_maxout],
dim=1)
out = self.classifier(features)
return out
def train(combined_set=False):
torch.manual_seed(6)
snli_d, mnli_d, embd = data_loader.load_data_sm(
config.DATA_ROOT, config.EMBD_FILE, reseversed=False, batch_sizes=(32, 200, 200, 30, 30), device=0)
s_train, s_dev, s_test = snli_d
m_train, m_dev_m, m_dev_um, m_test_m, m_test_um = mnli_d
s_train.repeat = False
m_train.repeat = False
model = StackBiLSTMMaxout()
model.Embd.weight.data = embd
model.display()
if torch.cuda.is_available():
embd.cuda()
model.cuda()
start_lr = 2e-4
optimizer = optim.Adam(model.parameters(), lr=start_lr)
criterion = nn.CrossEntropyLoss()
date_now = datetime.now().strftime("%m-%d-%H:%M:%S")
name = '[512,1024,2048]-3stack-bilstm-last_maxout'
file_path = save_tool.gen_prefix(name, date_now)
message = " "
save_tool.logging2file(file_path, 'code', None, __file__)
save_tool.logging2file(file_path, 'message', message, __file__)
iterations = 0
best_m_dev = -1
best_um_dev = -1
param_file_prefix = "{}/{}".format(file_path, "saved_params")
if not os.path.exists(os.path.join(config.ROOT_DIR, param_file_prefix)):
os.mkdir(os.path.join(config.ROOT_DIR, param_file_prefix))
for i in range(6):
s_train.init_epoch()
m_train.init_epoch()
if not combined_set:
train_iter, dev_iter, test_iter = s_train, s_dev, s_test
train_iter.repeat = False
print(len(train_iter))
else:
train_iter = data_loader.combine_two_set(s_train, m_train, rate=[0.15, 1], seed=i)
dev_iter, test_iter = s_dev, s_test
start_perf = model_eval(model, dev_iter, criterion)
i_decay = i // 2
lr = start_lr / (2 ** i_decay)
epoch_start_info = "epoch:{}, learning_rate:{}, start_performance:{}/{}\n".format(i, lr, *start_perf)
print(epoch_start_info)
save_tool.logging2file(file_path, 'log', epoch_start_info)
if i != 0:
SAVE_PATH = os.path.join(config.ROOT_DIR, file_path, 'm_{}'.format(i - 1))
model.load_state_dict(torch.load(SAVE_PATH))
for batch_idx, batch in tqdm(enumerate(train_iter)):
iterations += 1
model.train()
s1, s1_l = batch.premise
s2, s2_l = batch.hypothesis
y = batch.label - 1
out = model(s1, (s1_l - 1), s2, (s2_l - 1))
loss = criterion(out, y)
optimizer.zero_grad()
for pg in optimizer.param_groups:
pg['lr'] = lr
loss.backward()
optimizer.step()
if i == 0 or i == 1:
mod = 9000
else:
mod = 100
if (1 + batch_idx) % mod == 0:
dev_score, dev_loss = model_eval(model, dev_iter, criterion)
print('SNLI:{}/{}'.format(dev_score, dev_loss), end=' ')
model.max_l = 150
mdm_score, mdm_loss = model_eval(model, m_dev_m, criterion)
mdum_score, mdum_loss = model_eval(model, m_dev_um, criterion)
print(' MNLI_M:{}/{}'.format(mdm_score, mdm_loss), end=' ')
print(' MNLI_UM:{}/{}'.format(mdum_score, mdum_loss))
model.max_l = 60
now = datetime.now().strftime("%m-%d-%H:%M:%S")
log_info_mnli = "dev_m:{}/{} um:{}/{}\n".format(mdm_score, mdm_loss, mdum_score, mdum_loss)
save_tool.logging2file(file_path, "log", log_info_mnli)
saved = False
if best_m_dev < mdm_score:
best_m_dev = mdm_score
save_path = os.path.join(config.ROOT_DIR, param_file_prefix,
'e({})_m_m({})_um({})'.format(i, mdm_score, mdum_score))
torch.save(model.state_dict(), save_path)
saved = True
if best_um_dev < mdum_score:
best_um_dev = mdum_score
save_path = os.path.join(config.ROOT_DIR, param_file_prefix,
'e({})_m_m({})_um({})'.format(i, mdm_score, mdum_score))
if not saved:
torch.save(model.state_dict(), save_path)
SAVE_PATH = os.path.join(config.ROOT_DIR, file_path, 'm_{}'.format(i))
torch.save(model.state_dict(), SAVE_PATH)
def build_kaggle_submission_file(model_path):
torch.manual_seed(6)
snli_d, mnli_d, embd = data_loader.load_data_sm(
config.DATA_ROOT, config.EMBD_FILE, reseversed=False, batch_sizes=(32, 32, 32, 32, 32), device=0)
m_train, m_dev_m, m_dev_um, m_test_m, m_test_um = mnli_d
m_test_um.shuffle = False
m_test_m.shuffle = False
m_test_um.sort = False
m_test_m.sort = False
model = StackBiLSTMMaxout()
model.Embd.weight.data = embd
# model.display()
if torch.cuda.is_available():
embd.cuda()
model.cuda()
criterion = nn.CrossEntropyLoss()
model.load_state_dict(torch.load(model_path))
m_pred = model_eval(model, m_test_m, criterion, pred=True)
um_pred = model_eval(model, m_test_um, criterion, pred=True)
model.max_l = 150
print(um_pred)
print(m_pred)
with open('./sub_um.csv', 'w+') as f:
index = ['entailment', 'contradiction', 'neutral']
f.write("pairID,gold_label\n")
for i, k in enumerate(um_pred):
f.write(str(i) + "," + index[k] + "\n")
with open('./sub_m.csv', 'w+') as f:
index = ['entailment', 'contradiction', 'neutral']
f.write("pairID,gold_label\n")
for j, k in enumerate(m_pred):
f.write(str(j + 9847) + "," + index[k] + "\n")
def eval_model(model_path, mode='dev'):
torch.manual_seed(6)
snli_d, mnli_d, embd = data_loader.load_data_sm(
config.DATA_ROOT, config.EMBD_FILE, reseversed=False, batch_sizes=(32, 32, 32, 32, 32), device=0)
m_train, m_dev_m, m_dev_um, m_test_m, m_test_um = mnli_d
m_dev_um.shuffle = False
m_dev_m.shuffle = False
m_dev_um.sort = False
m_dev_m.sort = False
m_test_um.shuffle = False
m_test_m.shuffle = False
m_test_um.sort = False
m_test_m.sort = False
model = StackBiLSTMMaxout()
model.Embd.weight.data = embd
if torch.cuda.is_available():
embd.cuda()
model.cuda()
criterion = nn.CrossEntropyLoss()
model.load_state_dict(torch.load(model_path))
model.max_l = 150
m_pred = model_eval(model, m_dev_m, criterion)
um_pred = model_eval(model, m_dev_um, criterion)
print("dev_mismatched_score (acc, loss):", um_pred)
print("dev_matched_score (acc, loss):", m_pred)
if __name__ == '__main__':
fire.Fire()
| easonnie/multiNLI_encoder | model/tested_model/stack_3bilstm_last_encoder.py | stack_3bilstm_last_encoder.py | py | 10,220 | python | en | code | 59 | github-code | 13 |
30643406912 | import random
def roll_dice(num_dice):
print(f"\nRolling {num_dice} dice...")
for _ in range(num_dice):
dice_value = random.randint(1, 6)
print(f"Dice: {dice_value}")
def main():
while True:
print("\nWelcome to the Dice Rolling App!")
print("1. Roll the dice")
print("2. Quit")
choice = input("Enter your choice (1-2): ")
if choice == "1":
num_dice = int(input("Enter the number of dice to roll: "))
roll_dice(num_dice)
elif choice == "2":
break
else:
print("Invalid choice!")
if __name__ == "__main__":
main()
| akash-inft1905/aidTec_Diceroller | diceroller.py | diceroller.py | py | 677 | python | en | code | 0 | github-code | 13 |
7867303342 | import datetime
from .base import ConnectionType
from copaco.utils import getFile
from copaco.constants.mappings import PRICELISTITEM_MAPPINGS, STOCK_MAPPINGS
from copaco.constants.constants import PRICELISTITEM_STATUS
from copaco.models.pricelist import PriceListItem, PriceList
class PriceListType(ConnectionType):
def __init__(self, connection):
super().__init__(connection)
def get(self):
"""
Retrieves the pricelist from the Copaco FTP server and creates objects for each row in the list.
Looks at the mapping to determine which column in the file maps to which attribute on the object.
Returns a PriceList object that contains a list of PriceListItem objects.
"""
# 1: Retrieve the price list for the specific customer
customerPricelistPath = '{login}/Out/CopacoBE_prijslijst_{loginNoBE}.csv'.format(login=self.connection.login, loginNoBE=self.connection.login.replace('BE', ''))
internalPath = self.connection.ftpHandler.retrFile(customerPricelistPath)
pdFile = getFile(internalPath)
priceList = PriceList()
for index, row in pdFile.iterrows():
item = PriceListItem()
for mAttr, mValues in PRICELISTITEM_MAPPINGS.items():
for value in mValues:
if value in row: setattr(item, mAttr, row[value])
if item.statusCode >= 0:
if item.statusCode in PRICELISTITEM_STATUS:
item.status = PRICELISTITEM_STATUS[item.statusCode]
priceList.add(item)
# 2: Retrieve the stock list/details for articles and add to existing PriceListItem objects
stockListPath = 'CopacoBE/6010_ATP.CSV'
internalPath = self.connection.ftpHandler.retrFile(stockListPath)
pdFile = getFile(internalPath)
for index, row in pdFile.iterrows():
articleNumber = None
for aKey in PRICELISTITEM_MAPPINGS['article']:
if aKey in row: articleNumber = row[aKey]
if articleNumber:
item = priceList.getItemByNumber(articleNumber)
if item:
for mAttr, mValues in STOCK_MAPPINGS.items():
for value in mValues:
if value in row: setattr(item, mAttr, row[value])
item.nextDelivery = datetime.datetime.strptime(item.nextDelivery, '%m-%d-%Y').date()
# 3: retrieve ATP status and add human-readable format to PriceListItem objects
atpListPath = 'CopacoBE/6010_ATP_KWALIFICATIES.CSV'
internalPath = self.connection.ftpHandler.retrFile(atpListPath)
pdFile = getFile(internalPath)
mappings = {}
for index, row in pdFile.iterrows():
mappings[row[0]] = row[1]
for key, item in priceList.items.items():
if item.inventoryStatusCode: item.inventoryStatus = mappings[item.inventoryStatusCode]
return priceList.toList() | alexander-schillemans/python-copaco-connections | copaco/types/pricelist.py | pricelist.py | py | 3,079 | python | en | code | 0 | github-code | 13 |
1139150469 | # -*- coding: utf-8 -*-
"""
Created on 13.04.23
"""
import numpy as np
def cutout_image(annos, image, cutout_ratio, scale=1):
"""
Cut out the image according to the annotations, adding 20% margin to all sides
:param annos: num_keypoints x 2 or 3
:param image:
:param cutout_ratio: width to height ratio that is desired
:param scale: scale coordinates
:return:
"""
x_max = int(round(np.max(annos[:, 0])))
x_min = int(round(np.min(annos[:, 0])))
y_max = int(round(np.max(annos[:, 1])))
y_min = int(round(np.min(annos[:, 1])))
w = x_max - x_min
h = y_max - y_min
x_max = min(x_max + int(0.2 * w), image.shape[1] * 1 / scale)
x_min = max(x_min - int(0.2 * w), 0)
y_max = min(y_max + int(0.2 * h), image.shape[0] * 1 / scale)
y_min = max(y_min - int(0.2 * h), 0)
w = x_max - x_min
h = y_max - y_min
ratio_w, ratio_h = cutout_ratio
if w / ratio_w > h / ratio_h:
h_final = int(w / ratio_w * ratio_h)
offset = (h_final - h) // 2
if y_min - offset < 0:
offset = 2 * offset - y_min
y_min = 0
else:
y_min = y_min - offset
y_max = min(y_max + offset, image.shape[0] * 1 / scale)
elif w / ratio_w < h / ratio_h:
w_final = int(h / ratio_h * ratio_w)
offset = (w_final - w) // 2
if x_min - offset < 0:
offset = 2 * offset - x_min
x_min = 0
else:
x_min = x_min - offset
x_max = min(x_max + offset, image.shape[1] * 1 / scale)
y_min = int(round(y_min * scale))
y_max = int(round(y_max * scale)) + 1
x_min = int(round(x_min * scale))
x_max = int(round(x_max * scale)) + 1
image = image[y_min: y_max, x_min: x_max]
return image | kaulquappe23/all-keypoints-jump-broadcast | utils/visualization.py | visualization.py | py | 1,780 | python | en | code | 1 | github-code | 13 |
42746779375 | import math
import numpy as np
from poptransformer import ops
from poptransformer.utils import shard, repeat, shard_fused_qkv
from poptransformer.layers import BaseLayer
from poptransformer.layers import Linear
class BaseAttention(BaseLayer):
softmax_fn_map = {
'aionnx': ops.softmax,
'ce': ops.softmax_ce,
}
def __init__(self, context, name, input_size, num_head, cache_max_length, fp8_cache=False):
super().__init__(context, name)
self.input_size = input_size
self.num_head = num_head
self.head_size = self.input_size // self.num_head
self.cache_max_length = cache_max_length
self.fp8_cache = fp8_cache
self.scale = input_size // num_head
self.collect_bind_layer_weights()
def fixed_pos_embedding(self, graph, step, head_dim):
inv_freq_value = np.array(
[1.0 / (10000 ** (i / head_dim)) for i in range(0, head_dim, 2)]).astype(self.np_float_type)
inv_freq = ops.constant(graph, inv_freq_value, 'inv_freq')
inv_freq = ops.reshape(graph, inv_freq, [1, -1])
ind = ops.reshape(graph, step, [-1, 1])
ind = ops.cast(graph, ind, self.popart_float_type)
sinusoid_inp = ops.matmul(graph, ind, inv_freq)
return (graph.aiOnnx.sin([sinusoid_inp]), graph.aiOnnx.cos([sinusoid_inp]))
def rotate_half(self, graph, x, n_head, head_dim, batch_size=1):
x1, x2 = ops.split(graph, x, 2, 3, [head_dim//2, head_dim//2], "split_rotate_every_two")
x2 = ops.mul(graph, x2, ops.constant(graph, np.array([-1]).astype(self.np_float_type)))
x = ops.concat(graph, x2, x1, 3)
return ops.reshape(graph, x, [batch_size, n_head, 1, head_dim])
def apply_rotary_pos_emb(self, graph, q, k, sincos, n_head, head_dim,batch_size=1):
sin = ops.concat(graph, sincos[0], sincos[0],1)
sin = ops.reshape(graph, sin, [1, 1, 1, -1])
cos = ops.concat(graph, sincos[1], sincos[1],1)
cos = ops.reshape(graph, cos, [1, 1, 1, -1])
q_rotate_every_two = self.rotate_half(graph, q, n_head, head_dim, batch_size)
q = ops.add(graph, ops.mul(graph, q, cos), ops.mul(graph, q_rotate_every_two, sin))
k_rotate_every_two = self.rotate_half(graph, k, n_head, head_dim, batch_size)
k = ops.add(graph, ops.mul(graph, k, cos), ops.mul(graph, k_rotate_every_two, sin))
return q, k
def collect_bind_layer_weights(self):
self.q_proj = Linear(self.context, 'q_proj', self.input_size, self.input_size, use_bias=False)
self.k_proj = Linear(self.context, 'k_proj', self.input_size, self.input_size, use_bias=False)
self.v_proj = Linear(self.context, 'v_proj', self.input_size, self.input_size, use_bias=False)
self.o_proj = Linear(self.context, 'o_proj', self.input_size, self.input_size, use_bias=False)
def forward_qkv(self, graph, x, step):
q = self.q_proj(graph, x)
k = self.k_proj(graph, x)
v = self.v_proj(graph, x)
q = ops.reshape(graph, q, [self.batch_size, self.sequence_length, self.num_head, self.head_size])
k = ops.reshape(graph, k, [self.batch_size, self.sequence_length, self.num_head, self.head_size])
v = ops.reshape(graph, v, [self.batch_size, self.sequence_length, self.num_head, self.head_size])
q = ops.transpose(graph, q, [0, 2, 1, 3]) # q: [B, N, L, H]
k = ops.transpose(graph, k, [0, 2, 1, 3]) # k: [B, N, L, H]
v = ops.transpose(graph, v, [0, 2, 1, 3]) # v: [B, N, L, H]
sincos = self.fixed_pos_embedding(graph, step, self.head_size)
q,k = self.apply_rotary_pos_emb(graph, q, k, sincos, self.num_head, self.head_size, self.batch_size)
kv = ops.concat(graph, k, v, 0) #kv: [2, B, N, L, H]
kv = ops.reshape( graph, kv, [2, self.batch_size, self.num_head, self.sequence_length, self.head_size])
# layer_past: [2, B, N, L, h]
with graph.nameScope('attn_past_update'):
layer_past = ops.kv_cache(graph, step, kv, self.cache_max_length, 3, self.sequence_length)
layer_past_key, layer_past_value = ops.split(
graph, layer_past, 2, axis=0, splits=[1, 1], name='split_past'
)
layer_past_key = ops.squeeze(graph, layer_past_key, [0])
layer_past_value = ops.squeeze(graph, layer_past_value, [0])
layer_past_key_temp = ops.transpose(
graph, layer_past_key, [0, 1, 3, 2])
return q, layer_past_key_temp, layer_past_value
def forward_attention(self, graph, q, k, attention_mask, softmax_type):
w = ops.matmul(graph, q, k)
w = ops.mul(graph, w, ops.constant(graph, np.array([1/math.sqrt(self.head_size)]).astype(self.np_float_type)))
w = ops.add(graph, w, attention_mask)
w = ops.cast(graph, w, 'FLOAT')
softmax_fn = self.softmax_fn_map.get(softmax_type, None)
if not softmax_fn:
raise ValueError(f"Invalid softmax_fn {softmax_type}, options: {self.softmax_fn_map.keys()}")
w = softmax_fn(graph, w, -1, stable_mode=self.sequence_length != 1)
w = ops.cast(graph, w, self.popart_float_type)
return w
def forward_output(self, graph, score, v):
a = ops.matmul(graph, score, v)
a = ops.transpose(graph, a, [0, 2, 1, 3])
a = ops.reshape(graph, a, [self.batch_size, self.sequence_length, -1])
return self.o_proj(graph, a)
def __call__(self, graph, x, step, attention_mask, sequence_length, softmax_type='ce'):
with graph.nameScope(self.context):
self.sequence_length = sequence_length
q, k, v = self.forward_qkv(graph, x, step)
score = self.forward_attention(graph, q, k, attention_mask, softmax_type)
output = self.forward_output(graph, score, v)
return output
class TPAttention(BaseAttention):
def collect_bind_layer_weights(self):
self.num_head_beforeTP = self.num_head
self.num_head = math.ceil(self.num_head / self.num_replicas)
assert self.num_head_beforeTP == self.num_head * self.num_replicas
qkv_tp_settings = {
'strategy_name': 'start',
}
proj_tp_setting = {
'strategy_name': 'end',
}
self.q_proj = Linear(
self.context, 'q_proj', self.input_size, self.input_size, use_bias=False, **qkv_tp_settings)
self.k_proj = Linear(
self.context, 'k_proj', self.input_size, self.input_size, use_bias=False, **qkv_tp_settings)
self.v_proj = Linear(
self.context, 'v_proj', self.input_size, self.input_size, use_bias=False, **qkv_tp_settings)
self.o_proj = Linear(
self.context, 'o_proj', self.input_size, self.input_size, use_bias=False, **proj_tp_setting)
class Attention(TPAttention, BaseAttention):
layer_class_map = {
'tp': TPAttention,
'shard': BaseAttention}
def __init__(self, context, name, input_size, num_head, cache_max_length, fp8_cache=False):
model_type = self.model_type
self.layer_class = self.layer_class_map.get(model_type, None)
if not self.layer_class:
raise ValueError(f"Invalid model_type {model_type}, options: {self.layer_class_map.keys()}")
self.logger.debug(f'initializing model type: {self.layer_class.__name__}')
super().__init__(context, name, input_size, num_head, cache_max_length, fp8_cache)
def __call__(self, graph, x, step, attention_mask, sequence_length, softmax_type='ce'):
return self.layer_class.__call__(self, graph, x, step, attention_mask, sequence_length, softmax_type)
def collect_bind_layer_weights(self):
return self.layer_class.collect_bind_layer_weights(self)
def forward_attention(self, graph, q, k, attention_mask, softmax_type):
return self.layer_class.forward_attention(self, graph, q, k, attention_mask, softmax_type)
def forward_qkv(self, graph, x, step):
return self.layer_class.forward_qkv(self, graph, x, step)
def forward_output(self, graph, score, v):
return self.layer_class.forward_output(self, graph, score, v)
| graphcore/PopTransformer | poptransformer/models/llama2/attention.py | attention.py | py | 8,185 | python | en | code | 6 | github-code | 13 |
13418984523 | # -*- coding:utf-8 -*-
# 作者:IT小学生蔡坨坨
# 时间:2020/12/4 15:09
from django.conf.urls import url
from web.views import account
from web.views import home
urlpatterns = [
url(r'^send/sms/$', account.send_sms, name='send_sms'), # 发送短信验证码
url(r'^register/$', account.register, name='register'), # 注册
url(r'^login/sms/$', account.login_sms, name='login_sms'), # 验证码登录
url(r'^login/$', account.login, name='login'), # 用户名和密码登录
url(r'^image/code/$', account.image_code, name='image_code'), # 用户名和密码登录
url(r'^index/$', home.index, name='index'), # 主页
url(r'^logout/$', account.logout, name='logout'), # 退出
]
| y297374507/saas | web/urls.py | urls.py | py | 725 | python | en | code | 0 | github-code | 13 |
73251601618 | import matplotlib.pyplot as plt
import numpy as np
from utils import TFuncs
import os
def plot_gp(ax, X, m, C, no_last_data,training_points=None):
""" Plotting utility to plot a GP fit with 95% confidence interval"""
# Plot 95% confidence interval
ax.fill_between(X[:, 0],
m - 1.96*np.sqrt(np.diag(C)),
m + 1.96*np.sqrt(np.diag(C)),
alpha=0.5)
# Plot GP mean and initial training points
ax.plot(X, m, "-", label="Predicted GP mean")
# plt.show()
# Plot training points if included
if no_last_data != False:
if training_points is not None:
X_, Y_, varY_ = training_points
l, _, _ = ax.errorbar(X_[:, 0], Y_[:, 0], yerr=np.sqrt(varY_[:, 0]),
ls="",
marker="o",
markersize=5,
color="red")
l.set_label("Training points")
return ax.get_lines()
def plot_AL_iteration(ax, X_grid, mean, Cov, alpha_full, X_samples, Y_samples, Y_var, next_sample, last,sample=True,no_last_data=True):
l1 = plot_gp(ax, X_grid, mean, Cov, no_last_data,training_points=(X_samples, Y_samples, Y_var))
# ax2 = ax.twinx()
ax.set_ylabel(r"pressure $p$")
ax.set_xlabel(r"density $\rho$")
total_data = np.shape(X_samples)[0]
# ax2.set_ylabel(r"var($p$)", color='r')
# ax2.tick_params(axis='y', labelcolor='r')
edited_alpha = '{:.2e}'.format(np.max(alpha_full))
if sample==True:
ax.text(0.33, 0.8,f'Maximum variance = {edited_alpha} \n Total data points = {total_data}', ha='center', va='center', transform=ax.transAxes, fontsize=7, bbox=dict(facecolor='red', alpha=0.5))
# l2 = ax2.plot(X_grid, alpha_full, 'r', label="Aquisition function")
if not last:
l3 = ax.plot([X_grid[next_sample], X_grid[next_sample]], ax.get_ylim(), 'g--', label="Next sample")
else:
l3 = ax.plot([], [], 'g--', label="Next sample")
lns = l1 + l3
# lns = l1 + l2 + l3
return lns
def plot_summary(path,N_init,N, X_grid, X_samples, Y_samples, index_list, Mean, Cov,fig1, ax1,start_data=0,x_yes=True, no_last_data=True):
last = True
global_error = []
mean = Mean
cov = Cov
# axis = ax1.flat[N-1]
alpha_full = np.diag(cov)
global_error.append(np.linalg.norm(alpha_full))
if (x_yes==False):
if(no_last_data == False):
Y_var = np.zeros_like(Y_samples)
lns = plot_AL_iteration(ax1, X_grid, mean, cov, alpha_full,
X_samples, Y_samples, Y_var, index_list, last=last, sample=False, no_last_data=False)
else:
Y_var = np.zeros_like(Y_samples)
lns = plot_AL_iteration(ax1, X_grid, mean, cov, alpha_full,
X_samples, Y_samples, Y_var, index_list, last=last, sample=False, no_last_data=True)
else:
Y_var = np.zeros_like(Y_samples[start_data:N_init+N])
lns = plot_AL_iteration(ax1, X_grid, mean, cov, alpha_full,
X_samples[start_data:N_init+N], Y_samples[start_data:N_init+N], Y_var, index_list[N_init+N], last=last)
ax1.plot(X_grid, TFuncs.target_function(X_grid, path), '--', color='0.0')
def plot_gp_visc(ax, X, m, C, scale, training_points=None):
""" Plotting utility to plot a GP fit with 95% confidence interval """
# Plot 95% confidence interval
if(scale == 'log'):
ax.set_xscale('log')
ax.set_yscale('log')
ax.fill_between(X[:, 0],
m - 1.96*np.sqrt(np.diag(C)),
m + 1.96*np.sqrt(np.diag(C)),
alpha=0.5)
# Plot GP mean and initial training points
ax.plot(X, m, "-", label="Predicted GP mean")
# Plot training points if included
if training_points is not None:
X_, Y_, varY_ = training_points
l, _, _ = ax.errorbar(X_[:, 0], Y_[:, 0], yerr=np.sqrt(varY_[:, 0]),
ls="",
marker="x",
markersize=6,
color="0.0")
l.set_label("Training points")
return ax.get_lines()
def plot_AL_iteration_visc(ax,fig1, X_grid, mean, Cov, alpha_full, X_samples, Y_samples, Y_var, next_sample, last, scale, x_label, y_label):
l1 = plot_gp_visc(ax, X_grid, mean, Cov, training_points=(X_samples, Y_samples, Y_var), scale = scale)
ax2 = ax.twinx()
if(scale == 'log'):
ax2.set_xscale('log')
ax2.set_yscale('log')
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_ylabel(y_label)
# ax.set_ylabel(r"Shear viscosity η ")
ax.set_xlabel(x_label)
ax2.set_ylabel(r"variance ", color='r')
ax2.tick_params(axis='y', labelcolor='r')
l2 = ax2.plot(X_grid, alpha_full, 'r', label="Aquisition function")
if not last:
l3 = ax.plot([X_grid[next_sample], X_grid[next_sample]], ax.get_ylim(), 'g--', label="Next sample")
else:
l3 = ax.plot([], [], 'g--', label="Next sample")
lns = l1 + l2 + l3
path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'data\\results'))
save_path = os.path.join(path, f'al_rbf_lin_{scale}.png')
fig1.savefig(save_path, pad_inches=1)
return lns
def plot_summary_visc(N, X_grid, X_samples, Y_samples, index_list, Mean, Cov, target_function,N_init,num_list,scale, x_label, y_label, ar=1.61, zoom=3.5):
# create subplot grid
Nx, Ny = (len(num_list) // 2 + len(num_list) % 2, 2)
fig1, ax1 = plt.subplots(Nx, Ny, figsize=(Ny*ar*zoom, Nx*zoom), sharex=True)
fig2, ax2 = plt.subplots(1, figsize=(ar*zoom, zoom))
last = False
global_error = []
for i in range(len(num_list)):
if i == N - 1:
last = True
axis = ax1.flat[i]
mean = Mean[:, num_list[i]]
cov = Cov[:, :, num_list[i]]
alpha_full = np.diag(cov)
global_error.append(np.linalg.norm(alpha_full))
Y_var = np.zeros_like(Y_samples[:N_init+num_list[i]])
lns = plot_AL_iteration_visc(axis, fig1, X_grid, mean, cov, alpha_full,
X_samples[:N_init+num_list[i]], Y_samples[:N_init+num_list[i]], Y_var, index_list[N_init+num_list[i]], last=last, scale = scale, x_label = x_label, y_label = y_label)
axis.plot(X_grid, target_function, '--', color='0.0')
labs = [line.get_label() for line in lns]
ax1.flat[0].legend(lns, labs, loc="lower center", bbox_to_anchor=(1., 1.), ncol=5, frameon=False)
# ax2.plot(np.arange(N), global_error, '-')
ax2.set_xlabel("iteration")
ax2.set_ylabel(r"$||\mathsf{var}(f)||$") | Dwaipayan-R-C/SimCFD_ML | utils/plots.py | plots.py | py | 6,773 | python | en | code | 0 | github-code | 13 |
36940080710 | # Extended Rauch-Tung-Striebel smoother or Extended Kalman Smoother (EKS)
import jax
import chex
import jax.numpy as jnp
from .base import NLDS
from functools import partial
from typing import Dict, List, Tuple, Callable
from jsl.nlds import extended_kalman_filter as ekf
def smooth_step(state: Tuple[chex.Array, chex.Array, int],
xs: Tuple[chex.Array, chex.Array],
params: NLDS,
Dfz: Callable,
eps: float,
return_params: Dict
) -> Tuple[Tuple[chex.Array, chex.Array, int], Dict]:
mean_next, cov_next, t = state
mean_kf, cov_kf = xs
mean_next_hat = params.fz(mean_kf)
cov_next_hat = Dfz(mean_kf) @ cov_kf @ Dfz(mean_kf).T + params.Qz(mean_kf, t)
cov_next_hat_eps = cov_next_hat + eps * jnp.eye(mean_next_hat.shape[0])
kalman_gain = jnp.linalg.solve(cov_next_hat_eps, Dfz(mean_kf).T) @ cov_kf
mean_prev = mean_kf + kalman_gain @ (mean_next - mean_next_hat)
cov_prev = cov_kf + kalman_gain @ (cov_next - cov_next_hat) @ kalman_gain.T
prev_state = (mean_prev, cov_prev, t-1)
carry = {"mean": mean_prev, "cov": cov_prev}
carry = {key: val for key, val in carry.items() if key in return_params}
return prev_state, carry
def smooth(params: NLDS,
init_state: chex.Array,
observations: chex.Array,
covariates: chex.Array = None,
Vinit: chex.Array = None,
return_params: List = None,
eps: float = 0.001,
return_filter_history: bool = False,
) -> Dict[str, Dict[str, chex.Array]]:
kf_params = ["mean", "cov"]
Dfz = jax.jacrev(params.fz)
_, hist_filter = ekf.filter(params, init_state, observations, covariates, Vinit,
return_params=kf_params, eps=eps, return_history=True)
kf_hist_mean, kf_hist_cov = hist_filter["mean"], hist_filter["cov"]
kf_last_mean, kf_hist_mean = kf_hist_mean[-1], kf_hist_mean[:-1]
kf_last_cov, kf_hist_cov = kf_hist_cov[-1], kf_hist_cov[:-1]
smooth_step_partial = partial(smooth_step, params=params, Dfz=Dfz,
eps=eps, return_params=return_params)
init_state = (kf_last_mean, kf_last_cov, len(kf_hist_mean) - 1)
xs = (kf_hist_mean, kf_hist_cov)
_, hist_smooth = jax.lax.scan(smooth_step_partial, init_state, xs, reverse=True)
hist = {
"smooth": hist_smooth,
"filter": hist_filter if return_filter_history else None
}
return hist
| gileshd/JSL | jsl/nlds/extended_kalman_smoother.py | extended_kalman_smoother.py | py | 2,520 | python | en | code | null | github-code | 13 |
10191740225 |
n= int(input())
arr = list(map(int, input().split()))
dy = [0] * n
dy[0] = 1
for i in range(1, n):
res = 0
for j in range(i):
if arr[j] < arr[i]:
if dy[j] > res:
res = dy[j]
dy[i] = res + 1
print(max(dy))
| Jinnie-J/Algorithm-study | python/동적계획법/최대_선_연결하기.py | 최대_선_연결하기.py | py | 254 | python | en | code | 0 | github-code | 13 |
71347980818 | from cryptozen.Euclid import GCD
import random
class Transpose:
def __init__(self, key):
self.key = key
self.encoded = ""
self.decoded = ""
def encrypt(self, message=None):
if message is None:
message = input("Enter message to encrypt: ")
if message == "":
raise Exception("Please Enter a message to encode")
if self.key == len(message):
while True:
x = random.randint(3, len(message))
if GCD(len(message), x).gcd() == 1:
self.key = x
break
else:
self.key = self.key % len(message)
for i in range(self.key):
self.encoded += message[i :: self.key]
return self.encoded
def decrypt(self, coded):
if len(coded) % self.key == 0:
times = len(coded) // self.key
else:
times = (len(coded) // self.key) + 1
if coded == "":
raise Exception("Enter a value to be decoded")
kill = times * self.key - len(coded)
plain = [""] * times
col = 0
row = 0
for sym in coded:
plain[col] += sym
col += 1
if (col == times) or (col == times - 1 and row >= self.key - kill):
col = 0
row += 1
self.decoded = "".join(plain)
return self.decoded
def check_if_transposition(self):
pass | Darknez07/CyberSec-Ciphers-hashes | cryptozen/cryptozen/Transposition.py | Transposition.py | py | 1,470 | python | en | code | 0 | github-code | 13 |
18697945245 | import clip
import torch
from PIL import Image
class ObjectDetector:
def __init__(self):
# Load the model
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.model, self.preprocess = clip.load('ViT-B/32', self.device)
def runDetect(self,filename,options = ["smoke detector", "storm shutter", "roof", "heating", "plumbing", "electrical", "security camera", "deadbolt lock", "sprinkler system","nothing"]):
# Prepare the inputs
image = self.preprocess(Image.open(filename)).unsqueeze(0).to(self.device)
text = clip.tokenize(options).to(self.device)
with torch.no_grad():
image_features = self.model.encode_image(image)
text_features = self.model.encode_text(text)
logits_per_image, logits_per_text = self.model(image, text)
probs = logits_per_image.softmax(dim=-1).cpu().numpy()
index_max = max(range(len(probs)), key=probs.__getitem__)
print(probs)
print(probs[0][index_max])
if probs[0][index_max]>0.7:
return options[index_max]
else:
return "nothing" | ez314/Live-Safe-and-Save | backend/objectDetect.py | objectDetect.py | py | 1,180 | python | en | code | 2 | github-code | 13 |
2871922038 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from contextlib import contextmanager
import functools
import inspect
import threading
import numpy as np
from scipy import stats
import six
from . import log_probs as _log_probs
def make_log_joint_fn(model):
"""Takes PPLHam probabilistic program and returns its log joint function.
Args:
model: Python callable which executes the generative process of a
computable probability distribution using PPLham random variables.
Returns:
A log-joint probability function. Its inputs are `model`'s original inputs
and random variables which appear during the program execution. Its output
is a scalar `np.ndarray`.
#### Examples
Below we define Bayesian logistic regression as an PPLHam program, which
represents the model's generative process. We apply `make_log_joint_fn` in
order to alternatively represent the model in terms of its joint probability
function.
```python
import pplham as ph
def model(X):
beta = ph.norm.rvs(loc=0., scale=0.1, size=X.shape[1])
loc = np.einsum('ij,j->i', X, beta)
y = ph.norm.rvs(loc=loc, scale=1.)
return y
log_joint = ph.make_log_joint_fn(model)
X = np.random.normal(size=[3, 2])
beta = np.random.normal(size=[2])
y = np.random.normal(size=[3])
out = log_joint(X, beta, y)
```
One can use kwargs in `log_joint` if `rvs` are given `name` kwargs.
```python
def model(X):
beta = ph.norm.rvs(loc=0., scale=0.1, size=X.shape[1], name="beta")
loc = np.einsum('ij,j->i', X, beta)
y = ph.norm.rvs(loc=loc, scale=1., name="y")
return y
log_joint = ph.make_log_joint_fn(model)
out = log_joint(X, y=y, beta=beta)
```
#### Notes
For implementation, we make several requirements:
1. The `log_probs` module has a supported `log_prob` function for each
random variable choice.
2. A random variable's `rvs` method has the same kwargs as scipy.stats'
`logpmf`/`logpdf` up to `size` and `random_state`.
3. The event outcome is the first argument of the `log_prob` function in the
`log_probs` module.
4. User must use explicit kwargs (no positional arguments) when specifying
`size` and `random_state` in the `rvs` method.
TODO(trandustin): Relax this requirement.
"""
def log_joint_fn(*args, **kwargs):
"""Log-probability of inputs according to a joint probability distribution.
Args:
*args: Positional arguments. They are the model's original inputs and can
alternatively be specified as part of `kwargs`.
**kwargs: Keyword arguments, where for each key-value pair `k` and `v`,
`v` is passed as a `value` to the random variable(s) whose keyword
argument `name` during construction is equal to `k`.
Returns:
Scalar `np.ndarray`, which represents the model's log-probability summed
over all PPLHam random variables and their dimensions.
Raises:
TypeError: If a random variable in the model has no specified value in
`**kwargs`.
"""
log_probs = []
args_counter = []
def interceptor(rv_call, *rv_args, **rv_kwargs):
"""Overrides a random variable's `value` and accumulates its log-prob."""
if len(args) - len(args_counter) > 0:
value = args[len(args_counter)]
args_counter.append(0)
else:
# Set value to keyword argument indexed by `name` (an input tensor).
rv_name = rv_kwargs.get("name")
if rv_name is None:
raise KeyError("Random variable call {} has no name in its arguments."
.format(rv_call.im_class.__name__))
value = kwargs.get(rv_name)
if value is None:
raise LookupError("Keyword argument specifying value for {} is "
"missing.".format(rv_name))
log_prob_fn = getattr(_log_probs, rv_call.im_class.__name__ + "_log_prob")
rv_kwargs.pop("size", None)
rv_kwargs.pop("random_state", None)
rv_kwargs.pop("name", None)
log_prob = log_prob_fn(value, *rv_args, **rv_kwargs)
log_probs.append(log_prob)
return value
args, model_args, model_kwargs = _get_function_inputs(
model, *args, **kwargs)
with interception(interceptor):
model(*model_args, **model_kwargs)
log_prob = sum(log_probs)
return log_prob
return log_joint_fn
def _get_function_inputs(f, *args, **kwargs):
"""Filters inputs to be compatible with function `f`'s signature.
Args:
f: Function according to whose input signature we filter arguments.
*args: Keyword arguments to filter according to `f`.
**kwargs: Keyword arguments to filter according to `f`.
Returns:
New original args, args of f, kwargs of f.
"""
if hasattr(f, "_func"): # functions returned by tf.make_template
argspec = inspect.getargspec(f._func) # pylint: disable=protected-access
else:
argspec = inspect.getargspec(f)
fkwargs = {}
for k, v in six.iteritems(kwargs):
if k in argspec.args:
fkwargs[k] = v
kwargs.pop(k)
num_args = len(argspec.args) - len(fkwargs)
fargs = args[:num_args]
new_args = args[num_args:]
return new_args, fargs, fkwargs
class _InterceptorStack(threading.local):
"""A thread-local stack of interceptors."""
def __init__(self):
super(_InterceptorStack, self).__init__()
self.stack = [lambda f, *args, **kwargs: f(*args, **kwargs)]
_interceptor_stack = _InterceptorStack()
@contextmanager
def interception(interceptor):
"""Python context manager for interception.
Upon entry, an interception context manager pushes an interceptor onto a
thread-local stack. Upon exiting, it pops the interceptor from the stack.
Args:
interceptor: Function which takes a callable `f` and inputs `*args`,
`**kwargs`.
Yields:
None.
"""
try:
_interceptor_stack.stack.append(interceptor)
yield
finally:
_interceptor_stack.stack.pop()
def get_interceptor():
"""Returns the top-most (last) interceptor on the thread's stack.
The bottom-most (first) interceptor in the stack is a function which takes
`f, *args, **kwargs` as input and returns `f(*args, **kwargs)`. It is the
default if no `interception` contexts have been entered.
"""
return _interceptor_stack.stack[-1]
def interceptable(func):
"""Decorator that wraps `func` so that its execution is intercepted.
The wrapper passes `func` to the interceptor for the current thread.
Args:
func: Function to wrap.
Returns:
The decorated function.
"""
@functools.wraps(func)
def func_wrapped(*args, **kwargs):
return get_interceptor()(func, *args, **kwargs)
return func_wrapped
# Automatically generate random variables from scipy.stats. We wrap all
# distributions by registering their `rvs` method as `interceptable`.
#
# A vanilla Edward 2.0-like PPL in SciPy would introduce a RandomVariable
# abstraction: it wraps SciPy frozen distributions and calls `rvs` to associate
# the RandomVariable with a sampled value. SciPy distributions already enable
# parameters as input to `rvs`. Therefore instead of introducing a new
# abstraction, we just wrap `rvs`. This enables the same manipulations.
_globals = globals()
for _name in sorted(dir(stats)):
_candidate = getattr(stats, _name)
if isinstance(_candidate, (stats._multivariate.multi_rv_generic, # pylint: disable=protected-access
stats.rv_continuous,
stats.rv_discrete,
stats.rv_histogram)):
_candidate.rvs = interceptable(_candidate.rvs)
_globals[_name] = _candidate
del _candidate
class categorical_gen(stats._multivariate.multi_rv_generic): # pylint: disable=invalid-name,protected-access
"""Categorical distribution.
Implementation follows `scipy.stats.multinomial_gen`. We build this manually
as scipy.stats does not support a categorical distribution.
"""
def __init__(self, seed=None):
super(categorical_gen, self).__init__(seed)
def __call__(self, p, seed=None):
return categorical_frozen(p, seed)
def _process_parameters(self, p):
p = np.array(p, dtype=np.float64, copy=True)
p[..., -1] = 1. - p[..., :-1].sum(axis=-1)
return p
def rvs(self, p, size=None, random_state=None):
if size != 1:
raise NotImplementedError()
p = self._process_parameters(p)
random_state = self._get_random_state(random_state)
scores = (random_state.uniform(size=p.shape[:-1] + (1,)) -
np.cumsum(p, axis=-1))
scores[scores < 0] = 0
return np.argmin(scores, axis=-1)
categorical = categorical_gen()
categorical.rvs = interceptable(categorical.rvs) # register `rvs` for PPLHam
class categorical_frozen(stats._multivariate.multi_rv_frozen): # pylint: disable=invalid-name,protected-access
def __init__(self, p, seed=None):
self._dist = categorical_gen(seed)
self.p = self._dist._process_parameters(p) # pylint: disable=protected-access
self._dist._process_parameters = lambda p: self.p # pylint: disable=protected-access
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.p, size, random_state)
| google-research/autoconj | autoconj/pplham.py | pplham.py | py | 9,231 | python | en | code | 36 | github-code | 13 |
39580130106 | # -*- coding: utf-8 -*-
"""
Global configuration file for TG2-specific settings in turbogag.
This file complements development/deployment.ini.
Please note that **all the argument values are strings**. If you want to
convert them into boolean, for example, you should use the
:func:`paste.deploy.converters.asbool` function, as in::
from paste.deploy.converters import asbool
setting = asbool(global_conf.get('the_setting'))
"""
from tg.configuration import AppConfig
import turbogag
from turbogag import model
from turbogag.lib import app_globals, helpers
base_config = AppConfig()
base_config.renderers = []
base_config.prefer_toscawidgets2 = True
base_config.package = turbogag
#Enable json in expose
base_config.renderers.append('json')
#Enable genshi in expose to have a lingua franca for extensions and pluggable apps
#you can remove this if you don't plan to use it.
base_config.renderers.append('genshi')
#Set the default renderer
base_config.default_renderer = 'jinja'
base_config.renderers.append('jinja')
base_config.jinja_extensions = ['jinja2.ext.with_']
#Configure the base SQLALchemy Setup
base_config.use_sqlalchemy = True
base_config.model = turbogag.model
base_config.DBSession = turbogag.model.DBSession
# Configure the authentication backend
# YOU MUST CHANGE THIS VALUE IN PRODUCTION TO SECURE YOUR APP
base_config.sa_auth.cookie_secret = "ChangeME"
base_config.auth_backend = 'sqlalchemy'
# what is the class you want to use to search for users in the database
base_config.sa_auth.user_class = model.User
from tg.configuration.auth import TGAuthMetadata
#This tells to TurboGears how to retrieve the data for your user
class ApplicationAuthMetadata(TGAuthMetadata):
def __init__(self, sa_auth):
self.sa_auth = sa_auth
def get_user(self, identity, userid):
return self.sa_auth.dbsession.query(self.sa_auth.user_class).filter_by(user_name=userid).first()
def get_groups(self, identity, userid):
return [g.group_name for g in identity['user'].groups]
def get_permissions(self, identity, userid):
return [p.permission_name for p in identity['user'].permissions]
base_config.sa_auth.dbsession = model.DBSession
base_config.sa_auth.authmetadata = ApplicationAuthMetadata(base_config.sa_auth)
# You can use a different repoze.who Authenticator if you want to
# change the way users can login
#base_config.sa_auth.authenticators = [('myauth', SomeAuthenticator()]
# You can add more repoze.who metadata providers to fetch
# user metadata.
# Remember to set base_config.sa_auth.authmetadata to None
# to disable authmetadata and use only your own metadata providers
#base_config.sa_auth.mdproviders = [('myprovider', SomeMDProvider()]
# override this if you would like to provide a different who plugin for
# managing login and logout of your application
base_config.sa_auth.form_plugin = None
# override this if you are using a different charset for the login form
base_config.sa_auth.charset = 'utf-8'
# You may optionally define a page where you want users to be redirected to
# on login:
base_config.sa_auth.post_login_url = '/post_login'
# You may optionally define a page where you want users to be redirected to
# on logout:
base_config.sa_auth.post_logout_url = '/post_logout'
| mengu/turbogag | turbogag/config/app_cfg.py | app_cfg.py | py | 3,285 | python | en | code | 8 | github-code | 13 |
23693546930 | from googlesearch import search,get_random_user_agent
import re,sys,time,os
r = '\033[031m'
g = '\033[032m'
b = '\033[036m'
y = '\033[033m'
n = '\033[00m'
class ghd(object):
settings = {
'count':20,
'sleep':2,
'mode':"all",
'dork': '',
'target': '',
'output' : "{dork}_%H%M%d%m.txt"
}
helper = {
'set': 'Set new value to existing value example.',
'show': 'Show the settings configuration.',
'run': 'Run the dork',
'help': 'Show this message'
}
filters = []
regex = {
'fi': r"(\w*\.[A-Za-z0-9]{,12}(\?|#).*$|\w*\.[A-Za-z0-9]*$|\/\w*(\?|#).*$)",
'fo': r"(\/\w*\/|\/[^ \.]*$|\/(\?|#).*$)$",
'all': r"^(http|https)://.*"
}
result = []
def ps(self):
query = input(f"[{y}GHD{n}]> ")
if len(query.split(" ")) > 3:
print(f"[{r}-{n}] Invalid input")
self.ps()
while not query:
self.ps()
return query.lower()
def dorker(self, dork):
if self.settings['target'] != None:
domain = [self.settings['target']]
else:
domain = None
self.result = search(dork, tld="com", lang='en', num=int(self.settings['count']), stop=int(self.settings['count']), pause=float(self.settings['sleep']), domains=domain,user_agent=get_random_user_agent())
def filter(self, dork):
t=0
fit=0
for i in self.result:
if re.search(self.regex[self.settings['mode']], i):
print(f"[ {g}FOUND{n} ] {b}{i}{n}")
f = open(time.strftime(self.settings['output'].format(dork=dork)), 'a+')
f.write(i+'\n')
t += 1
else:
self.filters.append(i)
fit += 1
print()
print(f"[ {g}FINISHED{n} ] Total result {y}{t}{n}")
print(f"[ {y}FILTER{n} ] Total filter {y}{fit}{n}")
def start(self):
try:
while True:
query = self.ps()
command = query.split(" ")
if command[0] == 'help':
for h in self.helper:
print(f"{h.upper()} {self.helper[h]}")
elif command[0] == 'set':
if command[1] in self.settings:
self.settings[command[1]] = command[2]
print(f"[{g}+{n}] {command[1]} -> {self.settings[command[1]]}")
else:
print(f"[{r}-{n}]Settings not found!")
elif command[0] == 'show':
if command[1] == "info":
si = 1
for s in self.settings:
print(f"{si}) {s} -> {self.settings[s]}")
si+=1
elif command[1] == 'filters':
for fil in self.filters:
print(f"[ {y}FILTER{n} ] {fil}")
else:
print(f"[{r}-{n}] No action found!")
elif command[0] == 'run':
if self.settings['mode'] not in self.regex:
print(f"[{r}-{n}] Filter unsupported mode! @ {self.settings['mode']} Reset to default... ", end='')
self.settings['mode'] = "all"
print(f"{g}OK{n}")
print(f"Using the '{self.settings['mode']}' mode @ {self.settings['dork']}")
print("Start Dorking... ", end='')
print(f"{g}DONE{n}")
print()
print(f"[{g}+{n}] Checking the file... ", end='')
if os.path.isfile(self.settings['dork']):
print(f"{g}OK{n}")
if input(f"[{b}*{n}] Dork file found! Do u want to process as mass dorker?(Y/n) ").lower() == 'y':
f = open(self.settings['dork'], 'r')
fd = f.readlines()
for dk in fd:
if not self.settings['target']:
self.settings['target'] = None
tb = "-"*10+f"{g}{dk.strip()}{n}"+"-"*10
print(tb)
print()
self.dorker(dk)
self.filter(dk)
time.sleep(2)
print()
else:
print(f"[{g}+{n}] Searching as a dork instead of mass dorker.")
tb = "-"*10+f"{g}{dk.strip()}{n}"+"-"*10
print(tb)
if not self.settings['target']:
self.settings['target'] = None
self.dorker(self.settings['dork'])
self.filter(self.settings['dork'])
else:
print(f"{r}ERROR{n}")
print(f"[{g}+{n}] Searching as a dork instead of mass dorker.")
if not self.settings['target']:
self.settings['target'] = None
tb = "-"*10+f"{g}{self.settings['dork']}{n}"+"-"*10
print(tb)
self.dorker(self.settings['dork'])
self.filter(self.settings['dork'])
elif command[0] == 'exit':
break
else:
print(f"[{r}-{n}] Command not found!")
except KeyboardInterrupt:
print("[-] User interrupted!")
sys.exit(0)
except OSError as e:
if e:
print(f"[{r}-{n}]IP Blocked by Google. Try again later!")
sys.exit(0)
if __name__ == "__main__":
banner = r"""{y} __ {r} _ {n}
{y}/__|_ _ __|_ {g}|_| .__|_o._ _ {r}| \ _ ._| _ ._ {n}
{y}\_|| |(_)_> |_ {g}| ||_|| ||_|| |(_| {r}|_/(_)| |<(/_| {n}
{g} _| {n}""".format(y=y,g=g,r=r,n=n)
print(banner)
ghd().start() | HanZawNyine/Special_Created | h4k3rTools/tool collect/ghd.py | ghd.py | py | 6,267 | python | en | code | 1 | github-code | 13 |
17047278044 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserPeerpayprodAgreementModifyModel(object):
def __init__(self):
self._alipay_related_uid = None
self._alipay_user_id = None
self._quota = None
self._request_from = None
@property
def alipay_related_uid(self):
return self._alipay_related_uid
@alipay_related_uid.setter
def alipay_related_uid(self, value):
self._alipay_related_uid = value
@property
def alipay_user_id(self):
return self._alipay_user_id
@alipay_user_id.setter
def alipay_user_id(self, value):
self._alipay_user_id = value
@property
def quota(self):
return self._quota
@quota.setter
def quota(self, value):
self._quota = value
@property
def request_from(self):
return self._request_from
@request_from.setter
def request_from(self, value):
self._request_from = value
def to_alipay_dict(self):
params = dict()
if self.alipay_related_uid:
if hasattr(self.alipay_related_uid, 'to_alipay_dict'):
params['alipay_related_uid'] = self.alipay_related_uid.to_alipay_dict()
else:
params['alipay_related_uid'] = self.alipay_related_uid
if self.alipay_user_id:
if hasattr(self.alipay_user_id, 'to_alipay_dict'):
params['alipay_user_id'] = self.alipay_user_id.to_alipay_dict()
else:
params['alipay_user_id'] = self.alipay_user_id
if self.quota:
if hasattr(self.quota, 'to_alipay_dict'):
params['quota'] = self.quota.to_alipay_dict()
else:
params['quota'] = self.quota
if self.request_from:
if hasattr(self.request_from, 'to_alipay_dict'):
params['request_from'] = self.request_from.to_alipay_dict()
else:
params['request_from'] = self.request_from
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserPeerpayprodAgreementModifyModel()
if 'alipay_related_uid' in d:
o.alipay_related_uid = d['alipay_related_uid']
if 'alipay_user_id' in d:
o.alipay_user_id = d['alipay_user_id']
if 'quota' in d:
o.quota = d['quota']
if 'request_from' in d:
o.request_from = d['request_from']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayUserPeerpayprodAgreementModifyModel.py | AlipayUserPeerpayprodAgreementModifyModel.py | py | 2,571 | python | en | code | 241 | github-code | 13 |
43110440559 | #! /usr/bin/env python
import os
from math import pi
import pandas
from bokeh.io import curdoc
from bokeh import plotting as plt
from bokeh.layouts import column
from bokeh.models.tools import PanTool, WheelZoomTool, BoxZoomTool, CrosshairTool, HoverTool, ResetTool, SaveTool
column_names = [
'timestamp', 'date', 'symbol', 'open', 'high', 'low', 'close', 'volume'
]
columns_to_use = ['timestamp', 'open', 'high', 'low', 'close', 'volume']
original_dataframe = pandas.read_csv("./bitcoin-daily.csv",
header=0,
names=column_names,
index_col='timestamp',
usecols=columns_to_use)
original_dataframe.index = pandas.to_datetime(original_dataframe.index, unit='ms')
dataframe = original_dataframe.sort_values(by='timestamp')
inc = dataframe.close > dataframe.open
dec = dataframe.open > dataframe.close
w = 12 * 60 * 60 * 1000 # half day in ms
output_file_path = "./bokeh.html"
curdoc().theme = "dark_minimal"
wheel_zoom_tool = WheelZoomTool()
candlesticks = plt.figure(
x_axis_type="datetime",
tools=[
PanTool(),
wheel_zoom_tool,
BoxZoomTool(),
CrosshairTool(line_alpha=0.2, line_color="#FFFFFF"),
HoverTool(),
ResetTool(),
SaveTool(),
],
title="Bitcoin Candlestick",
sizing_mode="stretch_both",
# x_axis_label="Date",
y_axis_label="Price",
output_backend="webgl",
)
candlesticks.min_border = 0
candlesticks.xaxis.visible = False
# candlesticks.xaxis.major_label_orientation = pi / 4
candlesticks.grid.grid_line_alpha = 0.3
candlesticks.toolbar.active_scroll = wheel_zoom_tool
candlesticks.toolbar.logo = None
candlesticks.segment(dataframe.index[inc], dataframe.high[inc], dataframe.index[inc], dataframe.low[inc], color="#42AF51")
candlesticks.segment(dataframe.index[dec], dataframe.high[dec], dataframe.index[dec], dataframe.low[dec], color="#E21E2A")
candlesticks.vbar(dataframe.index[inc], w, dataframe.open[inc], dataframe.close[inc], fill_color="#42AF51", line_color="#42AF51")
candlesticks.vbar(dataframe.index[dec], w, dataframe.open[dec], dataframe.close[dec], fill_color="#E21E2A", line_color="#E21E2A")
volume = plt.figure(
x_axis_type="datetime",
sizing_mode="stretch_both",
x_axis_label="Date",
y_axis_label="Price",
output_backend="webgl",
x_range=candlesticks.x_range,
)
volume.xaxis.major_label_orientation = pi / 4
volume.grid.grid_line_alpha = 0.3
volume.vbar(dataframe.index[inc], w, 0, dataframe.volume[inc], fill_color="#42AF51", line_color="#42AF51")
volume.vbar(dataframe.index[dec], w, 0, dataframe.volume[dec], fill_color="#E21E2A", line_color="#E21E2A")
volume.min_border = 0
layout = column(children=[candlesticks, volume], sizing_mode="stretch_both")
layout.margin = 0
plt.save(obj=layout, filename=output_file_path)
os.system(f"wslview {output_file_path}")
| Elindorath/finance | test-bokeh.py | test-bokeh.py | py | 2,968 | python | en | code | 0 | github-code | 13 |
36603704384 | # 드래곤커브
N = int(input())
grid = [[0 for _ in range(101)] for _ in range(101)] # 격자
dx, dy = [1,0,-1,0], [0,-1,0,1] # 0: x좌표 증가 1: y좌표 감소 2: x좌표 감소 3: y좌표 증가
endPoint = [0,0] # 회전 기준점의 x, y 좌표
def turn(sPoint,bPoint): # sPoint 회전 기준점, bPoint 회전 시키려는 점
sx, sy = sPoint[0], sPoint[1]
bx, by = bPoint[0], bPoint[1]
if sx==bx or sy == by:
if sx - bx > 0 : # 0 -> 1
return [sx, sy-(sx-bx)]
if sy - by < 0 : # 1 -> 2
return [sx+(sy-by),sy]
if sx - bx < 0 : # 2 -> 3
return [sx, sy-(sx-bx)]
if sy - by > 0: # 3 -> 0
return [sx+(sy-by),sy]
else: # 대각선 회전
if sx - bx > 0 and sy - by < 0:
return [sx+(sy-by),sy-(sx-bx)]
if sx - bx > 0 and sy - by > 0:
return [sx+(sy-by),sy-(sx-bx)]
if sx - bx < 0 and sy - by > 0:
return [sx+(sy-by),sy-(sx-bx)]
if sx - bx < 0 and sy - by < 0:
return [sx+(sy-by),sy-(sx-bx)]
for _ in range(N):
x, y, d, g = map(int,input().split())
dragon = [[x,y]]
endPoint = [x+dx[d],y+dy[d]] # 0세대 끝 점
dragon.append(endPoint)
while(g>0):
temp = []
for point in dragon:
if point != endPoint:
temp.append(turn(endPoint,point)) # 회전시켜 얻은 점 추가
dragon += temp
endPoint = turn(endPoint,[x,y])
g -= 1
for x,y in dragon: # 격자에 드래곤 커브 표시
if x >= 0 and x <= 100 and y >=0 and y <= 100:
grid[y][x] = 1
square = 0
for i in range(100):
for j in range(100):
if grid[i][j] !=0 and grid[i+1][j] != 0 and grid[i][j+1]!=0 and grid[i+1][j+1]!=0:
square += 1
print(square) | majung2/CTpractice | python/2020summer_study/boj_15685.py | boj_15685.py | py | 1,828 | python | en | code | 0 | github-code | 13 |
41017219670 | import sys, os
sys.path.append(os.getcwd())
from tornado import gen, ioloop, web
from lib.conf import KBConfig
from lib.log import KBLogger
from lib.store import Store
from lib.stm import STM
from lib.buffer import Buffer
from ui_handlers.admin.routing import adminRouting
from ui_handlers.front.routing import frontRouting
from ui_handlers.user.routing import userRouting
from ui_handlers.node.routing import nodeRouting
from ct_handlers.routing import clientRouting
conf = None
log = None
db = None
"""
Init configuration and Log system
"""
try:
print("rplexus server initializing...")
conf = KBConfig("config","./")
log = KBLogger(conf.LOG.log_file, "rplexus")
log.level = conf.LOG.log_level
stage1 = log.job("Stage1")
stage1.i("Configuration loaded",
"log_level:"+conf.LOG.log_level,
"maintenance:"+conf.SERVER.maintenance)
except Exception as inst:
print("Initializing failed")
print(type(inst))
print(inst.args)
print(inst)
sys.exit(-1)
"""
Init Db connection
"""
try:
stage1.i("DB Connecting",
str(conf.SERVER.db_ip),
str(conf.SERVER.db_port)
)
db = Store()
def initStatus(f):
if(not f.exception()):
stage1.d("DB connected")
else:
stage1.e("DB connection error", str(f.exception()))
db.connect(
conf.SERVER.db_ip,
conf.SERVER.db_username,
conf.SERVER.db_password,
conf.SERVER.db_port,
initStatus)
except Exception as inst:
stage1.e_tb("DB connection error", inst)
sys.exit(-1)
@gen.coroutine
def reload():
if not ioloop.IOLoop.current().reloader.is_running():
ioloop.IOLoop.current().reloader.start()
else:
try:
conf.reload()
log.level = conf.LOG.log_level
stage1.d("Conf updated")
except Exception as inst:
stage1.e_tb("Conf updating failed", inst)
routing = frontRouting + adminRouting + userRouting + nodeRouting + clientRouting
application = web.Application(
routing,
debug = True,
conf = conf,
log = log,
db = db,
cookie_secret = conf.SERVER.cookie_key,
xsrf_cookies = True,
template_path = conf.SERVER.template_path,
login_url = "/user/login",
alive = STM(),
touch = STM(),
cmd = Buffer()
)
if __name__ == "__main__":
application.listen(8000)
stage1.i("Server listening...")
mainloop = ioloop.IOLoop.instance()
mainloop.reloader = ioloop.PeriodicCallback(reload, 600000)
mainloop.add_callback(reload)
mainloop.start()
| alierkanimrek/rpui | src/server.py | server.py | py | 2,648 | python | en | code | 0 | github-code | 13 |
24698089880 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import logging
from logging.config import dictConfig
from dynaconf import LazySettings
# Configure Dynaconf
settings = LazySettings(
ENVVAR_PREFIX_FOR_DYNACONF="APP_{{ cookiecutter.app_name | upper }}",
ENVVAR_FOR_DYNACONF="APP_SETTINGS",
)
dictConfig(settings.LOGGING)
logger = logging.getLogger(__package__)
# Show loaded config
config = settings.as_dict()
if settings.get("CONFIG_PRETTYPRINT", False):
config = json.dumps(settings.as_dict(), indent=2)
logger.info(
f"Microservice initialized for ({settings.current_env} env) with: " f"{config}"
)
| Lowess/cookiecutter-python-app | {{cookiecutter.app_name}}/app/__init__.py | __init__.py | py | 632 | python | en | code | 0 | github-code | 13 |
18127409792 | import numpy as np
from facerec.distance import AbstractDistance
from facerec.util import asColumnMatrix
class OSS(AbstractDistance):
"""This metric calculates the One-Shot Similarity (OSS) using LDA as the underlying classifier
OSS was originally described in the paper:
Lior Wolf, Tal Hassner and Yaniv Taigman, "The One-Shot Similarity Kernel,"
IEEE International Conference on Computer Vision (ICCV), Sept. 2009
http://www.openu.ac.il/home/hassner/projects/Ossk/WolfHassnerTaigman_ICCV09.pdf
This implementation is based on the MATLAB implementation available at:
http://www.openu.ac.il/home/hassner/projects/Ossk/
Copyright 2009, Lior Wolf, Tal Hassner, and Yaniv Taigman
Input:
XSN [list] A list of samples
"""
def __init__(self, XSN):
if XSN is None:
raise ValueError("XSN cannot be None")
# Reshape into a column matrix:
XSN = asColumnMatrix(XSN)
self.meanXSN = np.mean(XSN, axis=1)
Sw = np.cov(XSN.T)
w,v = np.linalg.eigh(Sw)
idx = np.argsort(-w)
w = w[idx]
# Take the largest eigenvalue:
maxeig = w[0]
Sw = Sw + 0.1 * np.eye(Sw.shape[0])*maxeig
self.iSw = np.inv(Sw)
self.sizeXSN = XSN.shape[1]
def __call__(self, i, j):
mm = x1 - self.meanXSN
v = np.dot(self.iSw,mm)
v = v/np.norm(v)
v0 = np.dot(v.T,(x1+self.meanXSN))/2.
score = np.dot(v.T,x2)-v0
return score | naveenmm/facerec | py/facerec/oss.py | oss.py | py | 1,594 | python | en | code | 0 | github-code | 13 |
2338298671 |
# Q program accepts n numbers from user add into list and return minimum and minimum number in list:
def minimum (List):
min = List[0]
for i in range(0, len(List), 1):
if min > List[i]:
min = List[i]
return min
def maximum(List):
max=List[0]
for i in range(0,len(List),1):
if max < List[i]:
max = List[i]
return max
def main():
print("Enter the number of element you want to add into list:")
size =int(input())
print("Enter the numbers in list")
List =[]
for i in range(0 ,size ,):
values = eval(input())
List.append(values)
print("Data in list:" ,List)
min =minimum(List)
max =maximum(List)
print("maximum number is {} and minimum number is {}".format(min,max))
if __name__ == "__main__":
main()
| mayurgore2023/Python | list5.py | list5.py | py | 839 | python | en | code | 0 | github-code | 13 |
4265040764 | import os
from invoke import task
from app.product.model import Product as ProductModel
from app.db import get_db, Base, engine
from wsgi import app
from app.config import source_path
from app.utils.reader import Reader
@task
def init_db(ctx):
print("Creating all resources.")
Base.metadata.create_all()
engine.execute("insert into widget values (1, 'hey', 'there');")
print(engine.execute("select * from widget;"))
@task
def drop_all(ctx):
if input("Are you sure you want to drop all tables? (y/N)\n").lower() == "y":
print("Dropping tables...")
Base.metadata.drop_all()
def seed_things():
classes = [ProductModel]
for klass in classes:
seed_thing(klass)
def seed_thing(cls):
ingest = Reader(source_path)
data_list = ingest.get_file_items()
session = next(get_db())
session.bulk_insert_mappings(cls, data_list)
session.commit()
@task
def seed_db(ctx):
if (
input("Are you sure you want to drop all tables and recreate? (y/N)\n").lower()
== "y"
):
print("Dropping tables...")
Base.metadata.drop_all()
Base.metadata.create_all()
seed_things()
print("DB successfully seeded.")
| linikerdev/sapo-products | tasks.py | tasks.py | py | 1,228 | python | en | code | 0 | github-code | 13 |
22188309012 | import pandas as pd
def get_course_title(course_code):
try:
return courses.get(course_code).get('title')
except:
print(course_code +
' doesn\'t exist. Please verify the entry or contact the developer.')
return 'TITLE NOT FOUND ERROR'
def get_course_credit(course_code):
try:
return courses.get(course_code).get('credit')
except:
print(course_code +
' doesn\'t exist. Please verify the entry or contact the developer.')
return 9999
def is_course_acquired(grade):
try:
return 0 if (grade == 'P') else 1
except:
print(grade + ' doesn\'t exist. Please verify the entry or contact the developer.')
return 0
courses = {}
for course in list(open("data/courses_credit.txt", "r")):
course_arr = course.split(" ", 2)
courses[course_arr[0]] = {
'title': course_arr[2].rstrip(),
'credit': int(course_arr[1].rstrip())
}
grade_values = {}
for grade in list(open("data/grade_values.txt", "r")):
grade_arr = grade.split(" ")
grade_values[grade_arr[0]] = float(grade_arr[1].rstrip())
df = pd.read_csv('grade_report.csv')
df['Title'] = df['Code'].apply(get_course_title)
df['Credit'] = df['Code'].apply(get_course_credit)
df['Credit Acquired'] = df['Credit'] * df['Grade'].apply(is_course_acquired)
df['Value'] = df['Grade'].map(grade_values)
df['Grade Points'] = df['Credit'] * df['Value']
df = df[['Trimester', 'Code', 'Title', 'Credit',
'Credit Acquired', 'Grade', 'Value', 'Grade Points']]
print(df.sort_values(by=['Grade Points'], ascending=False))
sum_credit = df['Credit'].sum()
sum_credit_acquired = df['Credit Acquired'].sum()
sum_grade_points = df['Grade Points'].sum()
gpa = round(sum_grade_points / sum_credit_acquired, 2)
summary = {'Credits': [sum_credit], 'Credits Acquired': [
sum_credit_acquired], 'Grade Points': [sum_grade_points], 'GPA': [gpa]}
print(pd.DataFrame(data=summary))
retake = df[['Code', 'Credit', 'Grade']].copy()
grades = pd.DataFrame({'Expected': list(grade_values.keys())})
retake['key'] = 0
grades['key'] = 0
retake = retake.merge(grades, how='left', on='key')
retake.drop('key', 1, inplace=True)
retake['Difference GPA'] = round(retake['Credit'] * (retake['Expected'].map(
grade_values) - retake['Grade'].map(grade_values)) / sum_credit_acquired, 2)
retake['New GPA'] = gpa + retake['Difference GPA']
excluded_courses = ['INF1995', 'INF3005', 'INF3005A',
'INF3005I', 'INF3995', 'LOG2990', 'INF3995']
retake = retake[(retake['Difference GPA'] > 0) & ~retake['Code'].isin(
excluded_courses) & ~retake['Expected'].isin(['A*'])]
print(retake.sort_values(by=['New GPA'], ascending=False).head(15))
retake_one = retake.copy()
retake_one.drop('New GPA', 1, inplace=True)
retake_one = retake_one.rename(columns={'Code': 'Code 1', 'Credit': 'Credit 1',
'Grade': 'Grade 1', 'Expected': 'Expected 1', 'Difference GPA': 'Difference GPA 1'})
retake_two = retake.copy()
retake_two.drop('New GPA', 1, inplace=True)
retake_two = retake_two.rename(columns={'Code': 'Code 2', 'Credit': 'Credit 2',
'Grade': 'Grade 2', 'Expected': 'Expected 2', 'Difference GPA': 'Difference GPA 2'})
retake_one['key'] = 0
retake_two['key'] = 0
retake_new = retake_one.merge(retake_two, how='left', on='key')
retake_new = retake_new[retake_new['Code 1'] < retake_new['Code 2']]
retake_new['New GPA'] = gpa + retake_new['Difference GPA 1'] + \
retake_new['Difference GPA 2']
print(retake_new.sort_values(by=['New GPA'], ascending=False).head(15))
for grade in grade_values:
if grade not in ('P'):
expected_gpa = round((sum_grade_points + (120 - sum_credit) *
grade_values[grade]) / (120 - (sum_credit - sum_credit_acquired)), 2)
gpa_comparison = "+" if expected_gpa > gpa else "-"
print("GPA if only " + grade + " : " +
str(expected_gpa) + " (" + gpa_comparison + ")")
| jeandecian/polymtl-gpa-calculator | main.py | main.py | py | 4,038 | python | en | code | 0 | github-code | 13 |
23745230091 | class ExtratorArgumentosUrl:
url: str = None
def __init__(self, url):
if self.url_eh_valida(url):
self.url = url.lower()
else:
raise LookupError("Url invalida!")
def __len__(self):
return len(self.url)
def __str__(self):
moeda_origem, moeda_destino = self.retorna_moedas()
valor = self.retorna_valor()
representacao_string = "Valor: {}\nMoeda Origem: {}\nMoeda Destino: {}".format(valor, moeda_origem, moeda_destino)
return representacao_string
def __eq__(self, objeto):
return self.url == objeto.url
@staticmethod
def url_eh_valida(url):
if url:
return True
else:
return False
def retorna_moedas(self):
busca_moeda_origem = "moedaorigem"
busca_moeda_destino = "moedadestino"
inicio_substring_moeda_origem = self.encontra_indice_inicio_substring(busca_moeda_origem)
final_substring_moeda_origem = self.url.find("&")
moeda_origem = self.url[inicio_substring_moeda_origem:final_substring_moeda_origem]
if moeda_origem == "moedadestino":
moeda_origem = self.verifica_moeda_origem(busca_moeda_origem)
inicio_substring_moeda_destino = self.encontra_indice_inicio_substring(busca_moeda_destino)
final_substring_moeda_destino = self.url.find("&valor")
moeda_destino = self.url[inicio_substring_moeda_destino:final_substring_moeda_destino]
return moeda_origem, moeda_destino
def encontra_indice_inicio_substring(self, moeda_ou_valor):
return self.url.find(moeda_ou_valor) + len(moeda_ou_valor) + 1
def verifica_moeda_origem(self, busca_moeda_origem):
self.url = self.url.replace("moedadestino", "real", 1)
inicio_substring_moeda_origem = self.encontra_indice_inicio_substring(busca_moeda_origem)
final_substring_moeda_origem = self.url.find("&")
return self.url[inicio_substring_moeda_origem:final_substring_moeda_origem]
def retorna_valor(self):
busca_valor = "Valor".lower()
inicio_substring_valor = self.encontra_indice_inicio_substring(busca_valor)
valor = self.url[inicio_substring_valor:]
return valor
| DAT-Alura/Formation_Python | String_manipulation/content/extrator_argumentos_url.py | extrator_argumentos_url.py | py | 2,253 | python | pt | code | 0 | github-code | 13 |
31078500963 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Advent of Code 2020 day 13 module."""
import math
def _earliest_time(bus, arrival):
return math.ceil(arrival / bus) * bus
def earliest(buses, arrival):
earliest_bus = None
earliest_time = None
for bus, _ in buses:
if bus is None:
continue
time = _earliest_time(bus, arrival)
if earliest_time is None or time < earliest_time:
earliest_bus = bus
earliest_time = time
return earliest_bus, earliest_time
def crt(nums):
# chinese remainder theorem
m_prod = math.prod(a for a, _ in nums)
return (
sum(
b * m_prod // a * pow(m_prod // a, -1, a)
for a, b in nums
) % m_prod
)
def process(puzzle_input, verbose=False):
arrival = int(puzzle_input[0])
buses = [(int(bus), i) for i, bus in enumerate(puzzle_input[1].split(",")) if bus != "x"]
bus, time = earliest(buses, arrival)
p1 = bus * (time - arrival)
p2 = crt([(bus, bus - offset) for bus, offset in buses])
return p1, p2
def main():
"""Main entry point."""
import argparse
import fileinput
parser = argparse.ArgumentParser()
parser.add_argument('infile', help='input file to read ("-" for stdin)')
parser.add_argument('-v', '--verbose', '-d', '--debug',
action='store_true', dest='verbose', help='verbose output')
args = parser.parse_args()
try:
puzzle_input = [line.strip() for line in fileinput.input(args.infile) if line.strip()]
p1, p2 = process(puzzle_input, verbose=args.verbose)
print(f'Part one: {p1}')
print(f'Part two: {p2}')
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| pmrowla/aoc2020 | day13.py | day13.py | py | 1,776 | python | en | code | 0 | github-code | 13 |
4593672785 | class Solution:
"""
@param nums: A list of integers
@param k: An integer denote to find k non-overlapping subarrays
@return: An integer denote the sum of max k non-overlapping subarrays
"""
def maxSubArray(self, nums, k):
o = -sys.maxint
matrixLocal = [[o] * (k + 1) for i in range(len(nums) + 1)]
matrixGlobal = [[o] * (k + 1) for i in range(len(nums) + 1)]
matrixLocal[0][0] = 0
matrixGlobal[0][0] = 0
for i in range(1, len(nums) + 1):
matrixLocal[i][0] = 0
matrixGlobal[i][0] = 0
for j in range(1, k + 1):
matrixLocal[i][j] = max(matrixLocal[i - 1][j] + nums[i - 1],
matrixGlobal[i - 1][j - 1] + nums[i - 1])
# continuous or begin new
matrixGlobal[i][j] = max(matrixGlobal[i - 1][j],
matrixLocal[i][j])
return matrixGlobal[-1][-1]
def maxSubArray1(self, nums, k):
# write your code here
oo = 2 ** 32
n = len(nums)
f = [[-oo] * (k + 1), [-oo] * (k + 1)]
g = [[-oo] * (k + 1), [-oo] * (k + 1)]
f[0][0] = 0
g[0][0] = 0
for i in range(1, n + 1):
f[i % 2][0] = 0
g[i % 2][0] = 0
for j in range(1, k + 1):
f[i % 2][j] = max(f[(i - 1) % 2][j] + nums[i - 1],
g[(i - 1) % 2][j - 1] + nums[i - 1])
g[i % 2][j] = max(g[(i - 1) % 2][j], f[i % 2][j])
return g[n % 2][k] | ultimate010/codes_and_notes | 43_maximum-subarray-iii/maximum-subarray-iii.py | maximum-subarray-iii.py | py | 1,606 | python | en | code | 0 | github-code | 13 |
32873653543 | import torch
import torchvision.transforms.functional as TF
import kornia.geometry.transform as K
import cv2
from torch.utils.data import Dataset
from pathlib import Path
import numpy as np
from superpoint.settings import DATA_PATH
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
class HPatches(Dataset):
def __init__(self, data_config, device="cpu") -> None:
super(HPatches,self).__init__()
self.config = data_config
self.device = device
self.samples = self._init_dataset()
def _init_dataset(self):
data_dir = Path(DATA_PATH, self.config["name"])
folder_dirs = [x for x in data_dir.iterdir() if x.is_dir()]
image_paths = []
warped_image_paths = []
homographies = []
names = []
for folder_dir in folder_dirs:
if self.config["alteration"] == 'i' != folder_dir.stem[0] != 'i':
continue
if self.config["alteration"] == 'v' != folder_dir.stem[0] != 'v':
continue
num_images = 1 if self.config['name'] == 'COCO' else 5
file_ext = '.ppm' if self.config['name'] == 'HPatches' else '.jpg'
for i in range(2, 2 + num_images):
image_paths.append(str(Path(folder_dir, "1" + file_ext)))
warped_image_paths.append(str(Path(folder_dir, str(i) + file_ext)))
homographies.append(np.loadtxt(str(Path(folder_dir, "H_1_" + str(i)))))
names.append(f"{folder_dir.stem}_{1}_{i}")
files = {'image_paths': image_paths,
'warped_image_paths': warped_image_paths,
'homography': homographies,
'names': names}
return files
def __len__(self):
return len(self.samples['image_paths'])
def read_image(self, image):
image = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
return torch.as_tensor(image, dtype=torch.float32, device=self.device)
def ratio_preserving_resize(self, image):
"""
Resize image while preserving the aspect ratio.
"""
target_size = torch.as_tensor(self.config["preprocessing"]["resize"], dtype=torch.int32)
scales = torch.divide(target_size, torch.as_tensor(image.shape, dtype=torch.float32))
new_size = (torch.as_tensor(image.shape[:2], dtype=torch.float32) * torch.max(scales)).to(torch.int32)
image = K.resize(image,size=[new_size[0], new_size[1]], interpolation='bilinear' ,align_corners=False)
image = TF.center_crop(image,output_size=[target_size[0].item(), target_size[1].item()])
return image
def adapt_homography_to_resize(self, homographies):
source_size = homographies["image_shape"]
source_warped_size = homographies["warped_image_shape"]
target_size = torch.as_tensor(self.config["preprocessing"]["resize"], dtype=torch.float32, device=self.device)
s = torch.max(torch.divide(target_size, source_size))
up_scale = torch.diag(torch.stack([1. / s, 1. / s, torch.tensor(1.)]))
warped_s = torch.max(torch.divide(target_size, source_warped_size))
down_scale = torch.diag(torch.stack([warped_s, warped_s, torch.tensor(1.)]))
pad_y = ((source_size[0] * s - target_size[0]) / torch.tensor(2.)).to(torch.int32)
pad_x = ((source_size[1] * s - target_size[1]) / torch.tensor(2.)).to(torch.int32)
translation = torch.diag(torch.stack([torch.tensor(1.), torch.tensor(1.), torch.tensor(1.)]))
translation[0, -1] = pad_x
translation[1, -1] = pad_y
translation = translation.to(torch.float32)
pad_y = ((source_warped_size[0] * warped_s - target_size[0]) / torch.tensor(2.)).to(torch.int32)
pad_x = ((source_warped_size[1] * warped_s - target_size[1]) / torch.tensor(2.)).to(torch.int32)
warped_translation = torch.diag(torch.stack([torch.tensor(1.), torch.tensor(1.), torch.tensor(1.)]))
warped_translation[0, -1] = -pad_x
warped_translation[1, -1] = -pad_y
warped_translation = warped_translation.to(torch.float32)
H = warped_translation @ down_scale @ homographies["homography"] @ up_scale @ translation
return H
def __getitem__(self, index):
image = self.read_image(self.samples['image_paths'][index])
warped_image = self.read_image(self.samples['warped_image_paths'][index])
homography = torch.as_tensor(self.samples['homography'][index], dtype=torch.float32, device=self.device)
name = self.samples['names'][index]
if self.config["preprocessing"]["resize"]:
image_shape = torch._shape_as_tensor(image)
warped_image_shape = torch._shape_as_tensor(warped_image)
homographies = {"homography": homography,
"image_shape": image_shape,
"warped_image_shape": warped_image_shape}
homography = self.adapt_homography_to_resize(homographies)
image = self.ratio_preserving_resize(image)
warped_image = self.ratio_preserving_resize(warped_image)
image /= 255.
warped_image /= 255.
data = {"image": image,
"warped_image": warped_image,
"homography": homography,
"name": name}
return data
def batch_collator(self,batch):
images = torch.stack([item["image"].unsqueeze(0) for item in batch])
warped_images = torch.stack([item["warped_image"].unsqueeze(0) for item in batch])
homographies = torch.stack([item["homography"] for item in batch])
names = [item["name"] for item in batch]
output = {"image": images,
"warped_image": warped_images,
"homography": homographies,
"name": names}
return output | AliYoussef97/SuperPoint-NeRF-Pytorch | superpoint/superpoint/data/HPatches.py | HPatches.py | py | 5,995 | python | en | code | 5 | github-code | 13 |
26702363144 | import argparse, boto3, json, logging, os, requests, time, yaml, zlib
from datetime import datetime
from multiprocessing.pool import ThreadPool
from config import *
def pull_subreddit(packed_item):
subreddit = packed_item[0]
item_type = packed_item[1]
item_count = 0
retry_count = 0
additional_backoff = 1
start_epoch = int(datetime.utcnow().timestamp())
previous_epoch = start_epoch
item_count = 0
s3_pool = ThreadPool(processes=pool_s3_threads_per_subreddit)
logger.info(f"{subreddit}: Ingesting {item_type}s")
while True:
new_url = pushshift_query_url.format(item_type, subreddit) + str(previous_epoch)
try:
fetched_data = requests.get(
new_url, headers=pushshift_query_headers, timeout=pushshift_timeout
)
except Exception as e:
additional_backoff = additional_backoff * 2
logger.info(f"{subreddit}: Backing off due to api error: {e}")
retry_count = retry_count + 1
time.sleep(additional_backoff)
if retry_count >= pushshift_retries:
break
continue
try:
json_data = fetched_data.json()
except Exception as e:
additional_backoff = additional_backoff * 2
logger.info(f"{subreddit}: Backing off due to json error: {e}")
retry_count = retry_count + 1
time.sleep(additional_backoff)
if retry_count >= pushshift_retries:
break
continue
if "data" not in json_data:
additional_backoff = additional_backoff * 2
logger.info(f"{subreddit}: Backing off due to data error: no data")
retry_count = retry_count + 1
time.sleep(additional_backoff)
if retry_count >= pushshift_retries:
break
continue
items = json_data["data"]
retry_count = 0
additional_backoff = 1
if len(items) == 0:
logger.info(
f"{subreddit}: Pushshift API returned no more {item_type}s for {subreddit}"
)
break
clean_items = []
for item in items:
if not "id" in item.keys():
logger.critical(f"{subreddit}: No 'id' in result, cannot create key")
else:
if not "created_utc" in item.keys():
logger.warning(
f"{subreddit}: No 'created_utc' in result {item['id']}, may cause loop"
)
else:
previous_epoch = item["created_utc"] - 1
item_count += 1
clean_items.append([subreddit, item_type, item])
tempstamp = datetime.fromtimestamp(previous_epoch).strftime("%Y-%m-%d")
logger.info(
f"{subreddit}: Retrieved {item_count} {item_type}s through {tempstamp}"
)
s3_pool.map(s3_upload, clean_items)
logger.info(
f"{subreddit}: Archived {item_count} {item_type}s through {tempstamp}"
)
if args.update:
update_limit_in_seconds = args.update * 60 * 60 * 24
if start_epoch - previous_epoch > update_limit_in_seconds:
logger.info(
f"{subreddit}: Stopping pull for {subreddit} due to update flag"
)
break
def s3_upload(packed_item):
subreddit = packed_item[0]
item_type = packed_item[1]
item = packed_item[2]
key = f"{subreddit}/{item_type}/{item['id']}.zz"
body = zlib.compress(str.encode(json.dumps(item)), level=9)
logger.debug(f"Attempting to save {key}")
client.put_object(Bucket=s3_bucket_name, Key=key, Body=body)
logger.debug(f"Saved {key} successfully")
parser = argparse.ArgumentParser(
description=(
"Consumes Reddit data from Pushshift, compresses, then stores it in S3 in parallel. "
"Optionally, pulls only the most recent data, or pulls from a YAML file of subreddits. "
)
)
parser.add_argument(
"-u",
"--update",
type=int,
help="How many days in the past we should fetch data for",
)
parser.add_argument(
"-t",
"--type",
help="Changes type of data to fetch (default: submission) (can use 'both')",
choices={"comment", "submission", "both"},
default="submission",
)
parser.add_argument(
"-s",
"--subreddits",
type=str,
nargs="+",
help="List of subreddits to fetch data for; can also be a YAML file",
)
parser.add_argument(
"-l",
"--log",
type=str,
help="File to put logs out to",
)
parser.add_argument(
"-d", "--debug", help="Output a metric shitton of runtime data", action="store_true"
)
parser.add_argument(
"-v",
"--verbose",
help="Output a reasonable amount of runtime data",
action="store_true",
)
args = parser.parse_args()
if args.debug:
log_level = logging.DEBUG
elif args.verbose:
log_level = logging.INFO
else:
log_level = logging.WARNING
if args.log:
logging.basicConfig(
filename=args.log,
format="%(asctime)s %(levelname)-8s %(message)s",
level=log_level,
)
else:
logging.basicConfig(
format="%(asctime)s %(levelname)-8s %(message)s", level=log_level
)
logger = logging.getLogger()
session = boto3.session.Session()
client = session.client(
"s3",
region_name=s3_region_name,
endpoint_url=s3_endpoint_url,
aws_access_key_id=s3_access_key_id,
aws_secret_access_key=s3_secret_key,
)
subreddits = []
if os.path.isfile(args.subreddits[0]):
with open(args.subreddits[0], "r") as config_file:
yaml_config = yaml.safe_load(config_file)
for classification, subreddits_from_classification in yaml_config.items():
for subreddit_from_classification in subreddits_from_classification:
subreddits.append(subreddit_from_classification)
else:
subreddits = args.subreddits
types_to_fetch = []
if args.type == "both":
types_to_fetch.append("submission")
types_to_fetch.append("comment")
else:
types_to_fetch.append(args.type)
packed_items = []
for type_to_fetch in types_to_fetch:
for subreddit in subreddits:
packed_items.append([subreddit, type_to_fetch])
subreddit_pool = ThreadPool(processes=pool_subreddits)
subreddit_pool.map(pull_subreddit, packed_items) | r-cybersecurity/pushshift-to-s3 | main.py | main.py | py | 6,414 | python | en | code | 1 | github-code | 13 |
21308681889 | '''
This module contains helper functions for plotting rampedpyrox data.
'''
from __future__ import(
division,
print_function,
)
__docformat__ = 'restructuredtext en'
__all__ = ['_bd_plot_bge',
'_plot_dicts',
'_plot_dicts_iso',
'_rem_dup_leg',
]
import numpy as np
#define function to plot carbon flux overlaid by BGE
def _bd_plot_bge(
t_elapsed,
bge,
bge_err = None,
ax = None,
ymin = 0.0,
ymax = 1.0):
'''
Function to plot the carbon flux (in ugC min-1 L-1) overlaid by bacterial
growth efficiency for each time bin.
Parameters
----------
t_elapsed : pd.Series
Series containing the time elapsed (in minutes), with pd.DatetimeIndex
as index.
bge : pd.Series
Series containing calculated BGE values, reported at the final
timepoint for a given value.
bge_err : None or pd.Series
Series containing uncertainties for BGE values, reported at the final
timepoint for a given value. If `None`, no uncertainty is plotted.
Defaults to `None`.
ax : None or matplotlib.axis
Axis to plot BGE data on. If `None`, automatically creates a
``matplotlip.axis`` instance to return. Defaults to `None`.
ymin : float
Minimum y value for BGE axis. Defaults to `0.0`.
ymax : float
Maximum y value for BGE axis. Defaults to `1.0`.
Returns
-------
ax : matplotlib.axis
Axis containing BGE data
'''
#create axis if necessary and label
if ax is None:
_, ax = plt.subplots(1, 1)
#find t_elapsed values for each entry in bge
bge_inds = bge.index
bge_times = t_elapsed[bge_inds]
#loop through each time range and plot BGE
for i, ind in enumerate(bge_inds[1:]):
#find bounding time points
t0 = t_elapsed[bge_inds[i]]
tf = t_elapsed[ind]
b = bge[i+1]
#plot results
ax.plot(
[t0, tf],
[b, b],
c = 'k',
linewidth = 2
)
#include uncertainty as a shaded box
if bge_err is not None:
berr = bge_err[i+1]
ax.fill_between(
[t0, tf],
b - berr,
b + berr,
alpha = 0.5,
color = 'k',
linewidth = 0
)
#set limits and label
ax.set_ylim([ymin, ymax])
ax.set_ylabel('Bacterial Growth Efficiency (BGE)')
return ax
#define function to pull plotting dicts
def _plot_dicts(case, td):
'''
Function to access different plotting dicts.
Parameters
----------
case : str
The case that defines the dict to pull.
Acceptable strings:
'bd_labs', \n
'bd_md', \n
'bd_rd', \n
'rpo_labs', \n
'rpo_md', \n
'rpo_rd'
td : TimeData or subclass
``rp.TimeData`` instance containing the data to plot.
Returns
-------
pl_dict : dict
The resulting dictionary containing plotting info.
'''
if case == 'bd_labs':
#create a dict to keep track of axis labels
pl_dict = {'secs':
{'fraction' : ('time (s)', 'g (unitless)'),
'rate' : ('time (s)', r'fraction/time $(s^{-1})$')
},
'mins' :
{'fraction' : ('time (min)', 'g (unitless)'),
'rate' : ('time (min)', r'fraction/temp $(K^{-1})$')
},
'hours' :
{'fraction' : ('time (hr)', 'g (unitless)'),
'rate' : ('time (hr)', r'fraction/temp $(K^{-1})$')
},
'days' :
{'fraction' : ('time (d)', 'g (unitless)'),
'rate' : ('time (d)', r'fraction/temp $(K^{-1})$')}
}
elif case == 'bd_md':
#create a dict to keep track of cases of modeled data
pl_dict = {'secs':
{'fraction' : (td.t, td.ghat),
'rate' : (td.t, -td.dghatdt)
},
'mins':
{'fraction' : (td.t / 60, td.ghat),
'rate' : (td.t / 60, -td.dghatdT)
},
'hours':
{'fraction' : (td.t / (60*60), td.ghat),
'rate' : (td.t / (60*60), -td.dghatdT)
},
'days':
{'fraction' : (td.t / (60*60*24), td.ghat),
'rate' : (td.t / (60*60*24), -td.dghatdT)}
}
elif case == 'bd_rd':
#create a dict to keep track of cases for real data
pl_dict = {'secs':
{'fraction' : (td.t, td.g),
'rate' : (td.t, -td.dgdt)
},
'mins':
{'fraction' : (td.t / 60, td.g),
'rate' : (td.t / 60, -td.dgdT)
},
'hours':
{'fraction' : (td.t / (60*60), td.g),
'rate' : (td.t / (60*60), -td.dgdT)
},
'days':
{'fraction' : (td.t / (60*60*24), td.g),
'rate' : (td.t / (60*60*24), -td.dgdT)}
}
elif case == 'rpo_labs':
#create a nested dict to keep track of axis labels
pl_dict = {'time':
{'fraction' : ('time (s)', 'g (unitless)'),
'rate' : ('time (s)', r'fraction/time $(s^{-1})$')
},
'temp' :
{'fraction' : ('temp (K)', 'g (unitless)'),
'rate' : ('temp (K)', r'fraction/temp $(K^{-1})$')}
}
elif case == 'rpo_md':
#create a nested dict to keep track of cases of modeled data
pl_dict = {'time':
{'fraction' : (td.t, td.ghat),
'rate' : (td.t, -td.dghatdt)
},
'temp':
{'fraction' : (td.T, td.ghat),
'rate' : (td.T, -td.dghatdT)}
}
elif case == 'rpo_rd':
#create a nested dict to keep track of cases for real data
pl_dict = {'time':
{'fraction' : (td.t, td.g),
'rate' : (td.t, -td.dgdt)
},
'temp':
{'fraction' : (td.T, td.g),
'rate' : (td.T, -td.dgdT)}
}
return pl_dict
#define function to pull plotting dicts
def _plot_dicts_iso(case, ri):
'''
Function to access different plotting dicts.
Parameters
----------
case : str
The case that defines the dict to pull.
Acceptable strings:
'rpo_rd', \n
'rpo_labs', \n
'rpo_md'
ri : Results or subclass
``rp.Results`` instance containing the data to plot.
Returns
-------
pl_dict : dict
The resulting dictionary containing plotting info.
'''
if case == 'rpo_iso_labs':
#create a nested dict to keep track of isotope result axis labels
pl_dict = {'E':
{'p0E' : (r'E (kJ $mol^{-1}$)',
r'p(0,E)'),
'Fm' : (r'E (kJ $mol^{-1}$)', r'Fm'),
'd13C' : (r'E (kJ $mol^{-1}$)',
r'$\delta^{13}C$ (VPDB)')}
}
elif case == 'iso_corr':
#create a nested dict to keep track of cases of scatter
pl_dict = {'E':
{'Fm_corr' : (ri.E_frac, ri.Fm_corr,
ri.E_frac_std, ri.Fm_corr_std),
'd13C_corr' : (ri.E_frac, ri.d13C_corr,
ri.E_frac_std, ri.d13C_corr_std)}
}
elif case == 'iso_raw':
#create a nested dict to keep track of cases of scatter
pl_dict = {'E':
{'Fm_raw' : (ri.E_frac, ri.Fm_raw,
ri.E_frac_std, ri.Fm_raw_std),
'd13C_raw' : (ri.E_frac, ri.d13C_raw,
ri.E_frac_std, ri.d13C_raw_std)}
}
return pl_dict
#define function to remove duplicate legend entries
def _rem_dup_leg(ax):
'''
Removes duplicate legend entries.
Parameters
----------
ax : plt.axishandle
Axis handle containing entries to remove.
Returns
-------
han_list : list
List of axis handles.
lab_list : list
List of axis handle labels.
'''
han, lab = ax.get_legend_handles_labels()
han_list, lab_list = [], []
for h, l in zip(han, lab):
if l not in lab_list:
han_list.append(h)
lab_list.append(l)
return han_list, lab_list
| FluvialSeds/rampedpyrox | build/lib/rampedpyrox/plotting_helper.py | plotting_helper.py | py | 7,021 | python | en | code | 4 | github-code | 13 |
3128938195 | from independent_set import MIS
from utilities import *
from vertex_covers import MVC
def experiment4():
num_nodes = 8
edge_ranges = range(1, 31, 4)
num_graphs = 1000
x_vals = []
mvc_avg_size, mis_avg_size, sum_size = [], [], []
for num_edges in edge_ranges:
mvc_total, mis_total = 0, 0
x_vals.append(min(num_edges, (num_nodes ** 2 - num_nodes) // 2))
for _ in range(num_graphs):
graph = create_random_graph(num_nodes, num_edges)
mvc = MVC(graph)
mis = MIS(graph)
mvc_total += len(mvc)
mis_total += len(mis)
mvc_avg_size.append(mvc_total / num_graphs)
mis_avg_size.append(mis_total / num_graphs)
sum_size.append((mvc_total + mis_total) / num_graphs)
create_plot(x_vals,
[mvc_avg_size, mis_avg_size, sum_size],
legend_labels=["Average MVC Size", "Average MIS Size", "Sum of Average MIS and MVC Sizes"],
title="Average MVC and MIS Sizes",
description=f"Repeated for {num_graphs} graphs, nodes = {num_nodes}, edges ranging from 1-28",
x_label="Number of Edges",
y_label="Size",
scale=1)
def main():
experiment4()
if __name__ == '__main__':
main()
| MahboobMMonza/3XB3-Lab2 | experiment4.py | experiment4.py | py | 1,311 | python | en | code | 0 | github-code | 13 |
25523551348 | # run in the medimg conda environment
from pathlib import Path
import pickle
import SimpleITK as sitk
import numpy as np
import pandas as pd
import h5py
class GetPatches():
def __init__(
self,
bbox_file,
path_file,
side_file,
output_file,
spacing,
min_size,
crop='before',
bias_correction=[],
rescale=False,
normalize=[],
bbox_cols=[
'L_min',
'L_max',
'P_min',
'P_max',
'S_min',
'S_max',
],
modalities=[
't1_non_FS',
't1_FS',
'segmentation',
]
):
# merge tissue bbox file with path file
# in the case_df data frame
# -------------------------------------------------
tissue_bbox_df = pd.read_csv(bbox_file)
self.bbox_cols = bbox_cols
path_df = pd.read_csv(path_file)
case_df = tissue_bbox_df.merge(
path_df,
on='identifier',
how='inner',
)
self.case_df = case_df.set_index('identifier')
# -------------------------------------------------
# read file holding info about the side
# of the respective pathology
# -------------------------------------------------
side_df = pd.read_excel(
side_file,
skiprows=1
).iloc[1:]
side_df['Side'] = side_df['Position'].str[:1]
self.side_df = side_df.set_index('Patient ID')
# -------------------------------------------------
self.output_file = Path(output_file)
self.spacing = np.array(spacing)
self.min_size = min_size
# crop patch before or after intensity value manipulations
assert crop == 'before' or crop == 'after'
self.crop = crop
self.bias_correction = bias_correction
self.rescale = rescale
self.normalize = normalize
self.modalities = modalities
for bc in bias_correction:
if bc not in modalities:
raise ValueError(f'bias correction {bc} not in modalities: {modalities}')
for norm in normalize:
if norm not in modalities:
raise ValueError(f'normalize {norm} not in modalities: {modalities}')
if not rescale and not normalize:
print('no rescaling and no normalization used!')
if rescale and normalize:
print('!!!! both and normalization used !!!!')
######################## IO stuff ##########################
def get_itk_from_path(self, path, orientation='LPS'):
path = Path(path)
if path.is_dir():
return self.read_dcm_folder(path, orientation)
if (path.suffix == '.gz') or (path.suffix == '.nii'):
return self.read_nii_file(path, orientation)
raise ValueError('unknown file format')
@staticmethod
def read_dcm_folder(path, orientation):
reader = sitk.ImageSeriesReader()
dicom_names = reader.GetGDCMSeriesFileNames(str(path))
reader.SetFileNames(dicom_names)
data = reader.Execute()
data = sitk.DICOMOrient(data, orientation)
return data
@staticmethod
def read_nii_file(path, orientation='LPS'):
data = sitk.ReadImage(str(path))
data = sitk.DICOMOrient(data, orientation)
return data
@staticmethod
def get_array_from_itk(image):
data = sitk.GetArrayFromImage(image)
return np.moveaxis(data, 0, -1)
#################### read bbox #############################
def get_bbox_LPS(self, pid):
row = self.case_df.loc[pid]
bbox = row[self.bbox_cols].values
if self.min_size:
bbox = self.fit_bbox(bbox)
return bbox
def fit_bbox(self, bbox):
for jj in range(0, 3):
diff = bbox[1::2][jj] - bbox[::2][jj]
margin = self.min_size[jj] - diff
margin = np.clip(margin, a_min=0, a_max=None)
bbox[::2][jj] -= margin / 2
bbox[1::2][jj] += margin / 2
return bbox
def get_side_split(self, pid, pathology):
assert pathology in ['normal', 'anomaly']
side = self.side_df.loc[pid]['Side']
if pathology == 'normal':
if side == 'R':
return lambda x: [x//2, x]
if side == 'L':
return lambda x: [0, x//2]
if pathology == 'anomaly':
if side == 'L':
return lambda x: [x//2, x]
if side == 'R':
return lambda x: [0, x//2]
return False
######################### crop ##############################
def resample_img(self, itk_image, is_label=False):
original_spacing = itk_image.GetSpacing()
original_size = itk_image.GetSize()
spacing = np.zeros(3)
for ii in range(0, 3):
if self.spacing[ii] == False:
spacing[ii] = original_spacing[ii]
else:
spacing[ii] = self.spacing[ii]
out_size = [
int(np.round(original_size[0] * (original_spacing[0] / spacing[0]))),
int(np.round(original_size[1] * (original_spacing[1] / spacing[1]))),
int(np.round(original_size[2] * (original_spacing[2] / spacing[2])))]
resample = sitk.ResampleImageFilter()
resample.SetOutputSpacing(spacing)
resample.SetSize(out_size)
resample.SetOutputDirection(itk_image.GetDirection())
resample.SetOutputOrigin(itk_image.GetOrigin())
resample.SetTransform(sitk.Transform())
if is_label:
resample.SetDefaultPixelValue(0)
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetDefaultPixelValue(itk_image.GetPixelIDValue())
resample.SetInterpolator(sitk.sitkBSpline)
return resample.Execute(itk_image)
def zero_pad(self, itk_img, bbox, const=0):
size = np.array(itk_img.GetSize())
spacing = np.array(itk_img.GetSpacing())
img_lower = itk_img.GetOrigin()
img_upper = img_lower + size * spacing
bb_lower = bbox[::2]
bb_upper = bbox[1::2]
lower_pad = np.zeros(3)
upper_pad = np.zeros(3)
for ii in range(0, 3):
lower_diff = img_lower[ii] - bb_lower[ii]
if lower_diff > 0:
lower_pad[ii] = np.ceil(lower_diff / spacing[ii]).astype(int)
upper_diff = bb_upper[ii] - img_upper[ii]
if upper_diff > 0:
upper_pad[ii] = (np.ceil(upper_diff / spacing[ii])).astype(int)
if not lower_pad.any() and not upper_pad.any():
return itk_img
print('zero padding')
# convert to list due to sitk bug
lower_pad = lower_pad.astype('int').tolist()
upper_pad = upper_pad.astype('int').tolist()
return sitk.ConstantPad(itk_img, lower_pad, upper_pad, constant=const)
@staticmethod
def bias_field_correction(image):
mask = sitk.OtsuThreshold(image, 0, 1, 200)
mask = sitk.Cast(mask, sitk.sitkUInt8)
corrector = sitk.N4BiasFieldCorrectionImageFilter()
numberFittingLevels = 4
corrected_image = corrector.Execute(
sitk.Cast(image, sitk.sitkFloat32),
mask,
)
return corrected_image
@staticmethod
def rescale_linear(data, scaling):
[in_low, in_high], [out_low, out_high] = scaling
m = (out_high - out_low) / (in_high - in_low)
b = out_low - m * in_low
data = m * data + b
data = np.clip(data, out_low, out_high)
return data
@staticmethod
def normalize_image(img):
raise NotImplementedError('normalization function not implemented')
@staticmethod
def crop_patch(data, bbox):
xii, xff, yii, yff, zii, zff = bbox.astype(int)
return data[xii:xff, yii:yff, zii:zff]
def get_bbox_RCS(self, ds, bbox_LPS):
bbox_RCS = np.zeros(6)
bbox_RCS[::2] = ds.TransformPhysicalPointToIndex(
bbox_LPS[::2]
)
bbox_RCS[1::2] = ds.TransformPhysicalPointToIndex(
bbox_LPS[1::2]
)
return bbox_RCS.astype(int)
def get_case(self, pid, modality, rtn_bbox=False):
bbox_LPS = self.get_bbox_LPS(pid)
path = self.case_df.loc[pid][modality]
try:
# read and reample image
ds = self.get_itk_from_path(path)
if self.spacing.any():
ds = self.resample_img(ds, is_label=False)
ds = self.zero_pad(ds, bbox_LPS)
# get bbox
bbox_RCS = self.get_bbox_RCS(ds, bbox_LPS)
if self.crop == 'before':
ds = self.crop_patch(ds, bbox_RCS)
# intensity modifications
if modality in self.bias_correction:
print('bias correction')
ds = self.bias_field_correction(ds)
if modality in self.normalize:
print('normalization')
ds = self.normalize_image(ds)
if self.crop == 'after':
ds = self.crop_patch(ds, bbox_RCS)
img = self.get_array_from_itk(ds)
if self.rescale:
img = self.rescale_linear(img, self.rescale)
except (Exception, ArithmeticError) as e:
raise ValueError(f'{pid} failed: {e}')
if rtn_bbox:
return img, bbox_LPS
return img
def pathology_of_side(self, pid, side):
assert side in ['L', 'R']
anomaly_side = self.side_df.loc[pid]['Side']
if side == anomaly_side:
return 'anomaly'
return 'normal'
def to_disk(self):
if self.output_file.exists():
raise ValueError('output file exists')
if not self.output_file.parent.exists():
self.output_file.parent.mkdir(parents=True)
disk_df = pd.DataFrame(
columns=['identifier', 'modality']+self.bbox_cols
)
disk_idx = 0
with h5py.File(self.output_file, 'w') as ff:
for pid in self.case_df.index:
for mod in self.modalities:
print(f'{pid}-{mod}')
try:
data, bbox = self.get_case(pid, mod, rtn_bbox=True)
width = data.shape[1]
# left body side is on the right of the image for LPS!!
# ris: right image side
# lis: left image side
ris = self.pathology_of_side(pid, 'L')
ff.create_dataset(
f'{pid}/{ris}/{mod}',
data=data[:, width//2:, :]
)
ff[f'{pid}'].attrs['ris'] = ris
lis = self.pathology_of_side(pid, 'R')
ff.create_dataset(
f'{pid}/{lis}/{mod}',
data=data[:, :width//2, :],
)
ff[f'{pid}'].attrs['lis'] = lis
except (Exception, ArithmeticError) as e:
print(f'failed: {e}')
continue
disk_df.loc[disk_idx] = [pid, mod, *bbox]
disk_idx += 1
disk_df.to_csv(
self.output_file.parent / (self.output_file.stem + '.csv'),
index=False
)
if __name__ == '__main__':
spacing = (0.75, 0.75, 1.0)
min_size_mm = [False]*3
rescale = False
crop = 'before'
modalities=['t1_non_FS', 't1_FS', 'segmentation']
normalize=['t1_non_FS', 't1_FS']
bias_correction = []
std = 0.25
mean = 0.5
mean_str = str(mean).replace('.', '')
std_str = str(std).replace('.', '')
space_str = 'x'.join([str(ii) for ii in spacing]).replace('.', '')
size_str = '200x200x50' #'x'.join([str(int(ii)) for ii in min_size_mm]).replace('.', '')
bbox_file = f'./../../data/bboxes/adjusted_size_200-350x200-350x50-100/breast_tissue_complete_from_sgmt.csv'
path_file = './../../data/file_paths.csv'
side_file = './../../data/TCIA/Clinical_and_Other_Features.xlsx'
output_file = f'./../../data/patch_data/complete_clc_mean{mean_str}_'\
f'std{std_str}_space{space_str}_size{size_str}/patches.h5'
patch_gen = GetPatches(
bbox_file=bbox_file,
path_file=path_file,
side_file=side_file,
output_file=output_file,
crop=crop,
bias_correction=bias_correction,
rescale=rescale,
normalize=normalize,
spacing=spacing,
min_size=min_size_mm,
modalities=modalities,
)
def normalize_image(data, std, mean):
f = sitk.NormalizeImageFilter()
data = f.Execute(data)
return (std*data)+mean
patch_gen.normalize_image = lambda img: normalize_image(img, std=std, mean=mean)
# testing
# patch_gen.case_df = patch_gen.case_df.iloc[:2]
patch_gen.to_disk()
output_file = Path(output_file)
script = output_file.parent / (output_file.stem + '.py')
with open(script, 'w') as ff:
ff.write(Path(__file__).read_text())
to_dump = {
'spacing': spacing,
'min_size_mm': min_size_mm,
'rescale': rescale,
'normalize': normalize,
'std_value': std,
'mean_value': mean,
'bbox_file': bbox_file,
}
dump_file = output_file.parent / (output_file.stem + '.pkl')
with open(dump_file, 'wb') as ff:
pickle.dump(to_dump, ff)
| LangDaniel/MAEMI | utils/generate_patches/image_patches.py | image_patches.py | py | 13,829 | python | en | code | 4 | github-code | 13 |
28133642295 | # 주차 요금 계산 https://programmers.co.kr/learn/courses/30/lessons/92341
from math import ceil
def solution(fees, records):
dic = {}
for rec in records:
print(dic)
time = int(rec[0:2])*60+int(rec[3:5])
carnum = int(rec[6:10])
# 처음 보는 차가 들어올 때
if carnum not in dic: dic[carnum]=[0, time]
# 이전에 있는 차가 나갈 때
elif len(dic[carnum])==2:
dic[carnum][0]+=(time-dic[carnum][1]) #사용시간
dic[carnum].pop()
else:
#다시 들어올 때
dic[carnum].append(time)
for value in dic.values():
if len(value)==2:
value[0]+=(23*60+59-value[1])
value.pop()
ans = []
for key in dic:
fee = fees[1] if dic[key][0]<=fees[0] else fees[1]+ceil((dic[key][0]-fees[0])/fees[2])*fees[3]
ans.append([key, fee])
ans.sort()
result = []
for S in ans:
result.append(S[1])
return result
fees = [180, 5000, 10, 600]
records = ["05:34 5961 IN", "06:00 0000 IN", "06:34 0000 OUT", "07:59 5961 OUT", "07:59 0148 IN", "18:59 0000 IN", "19:09 0148 OUT", "22:59 5961 IN", "23:00 5961 OUT"]
# solution(fees, records)
dit = {1: [1,3], 2:[2,3]}
dit[1].pop()
print(dit[1]) | yypark21/my_coding_test | level2_python/7.py | 7.py | py | 1,285 | python | en | code | 0 | github-code | 13 |
38362735628 | class Solution(object):
def convert(self, s, numRows):
"""
:type s: str
:type numRows: int
:rtype: str
"""
if numRows==1: return s
Mod_k = (numRows-1)*2
Rows = [[] for r in range(numRows)]
for i,c in enumerate(s):
for r in range(numRows):
if i%Mod_k==r or i%Mod_k==(Mod_k-r): Rows[r] += [c]
return ''.join([''.join(x) for x in Rows])
| rctzeng/AlgorithmDataStructuresPractice | Leetcode/ZigZagConversion_6.py | ZigZagConversion_6.py | py | 446 | python | en | code | 0 | github-code | 13 |
13092848920 | from typing import Optional, Any
import attr
from .initialize_from_attr import InitializeFromAttr
from .transform_common import (
IsTransformed,
IsPrototype,
GetDecoratedClass,
GetTransformedInstanceVars,
GetMemberType)
from .signal import ModelSignal, InterfaceSignal
def Initialize(
series: str,
instance: Any,
prototype: Any,
namePrefix: Optional[str] = None) -> None:
if IsTransformed(type(instance)):
for name in GetTransformedInstanceVars(type(instance)):
memberType = GetMemberType(prototype, name)
if IsPrototype(memberType, series):
# This member was used as a prototype to a transformed class.
# Use it directly as a nested transformation
decoratedClass = GetDecoratedClass(memberType, series)
if namePrefix is not None:
memberName = "{}.{}".format(namePrefix, name)
else:
memberName = name
setattr(
instance,
name,
decoratedClass(getattr(prototype, name), memberName))
elif issubclass(memberType, ModelSignal):
# ModelSignal is always transformed to InterfaceSignal
setattr(
instance,
name,
InterfaceSignal.Create(getattr(prototype, name)))
else:
setattr(
instance,
name,
instance.TransformMember(name, prototype, namePrefix))
if attr.has(type(instance)):
# Initialize additional attrs members.
InitializeFromAttr(instance, prototype, namePrefix)
def InitializeModel(
instance: Any,
prototype: Any,
namePrefix: Optional[str] = None) -> None:
Initialize("model", instance, prototype, namePrefix)
def InitializeInterface(
instance: Any,
prototype: Any,
namePrefix: Optional[str] = None) -> None:
Initialize("interface", instance, prototype, namePrefix)
| JiveHelix/pex | python/pex/initializers.py | initializers.py | py | 2,133 | python | en | code | 0 | github-code | 13 |
10438364906 | # #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
from __future__ import absolute_import
import os
import re
import logging
import shutil
from six.moves import map
log = logging.getLogger("main")
from ..master_task import ModelTesterTask
from ..master_job import Job
from ..errors import TaskError
from ..utils import basename, PhyloTree, GLOBALS, pjoin
__all__ = ["Prottest"]
class Prottest(ModelTesterTask):
def __init__(self, nodeid, alg_fasta_file, alg_phylip_file,
constrain_tree, seqtype, conf, confname):
GLOBALS["citator"].add('phyml')
self.alg_phylip_file = alg_phylip_file
self.alg_fasta_file = alg_fasta_file
self.confname = confname
self.conf = conf
self.lk_mode = conf[confname]["_lk_mode"]
if self.lk_mode == "raxml":
phyml_optimization = "n"
elif self.lk_mode == "phyml":
phyml_optimization = "lr"
else:
raise ValueError("Choose a valid lk_mode value (raxml or phyml)")
base_args = {
"--datatype": "aa",
"--input": self.alg_phylip_file,
"--bootstrap": "0",
"-o": phyml_optimization,
"--model": None, # I will iterate over this value when
# creating jobs
"--quiet": ""
}
self.models = conf[confname]["_models"]
task_name = "Prottest-[%s]" %','.join(self.models)
ModelTesterTask.__init__(self, nodeid, "mchooser", task_name,
base_args, conf[confname])
if seqtype == "nt":
log.error('Prottest can only be used with amino-acid alignments!')
raise TaskError(self, 'Prottest can only be used with amino-acid alignments!')
self.best_model = None
self.seqtype = "aa"
self.init()
def load_jobs(self):
conf = self.conf
for m in self.models:
args = self.args.copy()
args["--model"] = m
bionj_job = Job(conf["app"]["phyml"], args,
parent_ids=[self.nodeid])
bionj_job.jobname += "-bionj-" + m
bionj_job.jobcat = "bionj"
bionj_job.add_input_file(self.alg_phylip_file, bionj_job.jobdir)
self.jobs.append(bionj_job)
if self.lk_mode == "raxml":
raxml_args = {
"-f": "e",
"-s": pjoin(bionj_job.jobdir, self.alg_phylip_file),
"-m": "PROTGAMMA%s" % m,
"-n": self.alg_phylip_file+"."+m,
"-t": pjoin(bionj_job.jobdir,
self.alg_phylip_file+"_phyml_tree.txt")
}
raxml_job = Job(conf["app"]["raxml"], raxml_args,
parent_ids=[bionj_job.jobid])
raxml_job.jobname += "-lk-optimize"
raxml_job.dependencies.add(bionj_job)
raxml_job.model = m
raxml_job.jobcat = "raxml"
self.jobs.append(raxml_job)
def finish(self):
lks = []
if self.lk_mode == "phyml":
for job in self.jobs:
if job.jobcat != "bionj": continue
phyml_job = job
tree_file = pjoin(phyml_job.jobdir,
self.alg_phylip_file+"_phyml_tree.txt")
stats_file = pjoin(phyml_job.jobdir,
self.alg_phylip_file+"_phyml_stats.txt")
tree = PhyloTree(tree_file)
m = re.search('Log-likelihood:\s+(-?\d+\.\d+)',
open(stats_file).read())
lk = float(m.groups()[0])
tree.add_feature("lk", lk)
tree.add_feature("model", phyml_job.args["--model"])
lks.append([float(tree.lk), tree.model, tree])
elif self.lk_mode == "raxml":
for job in self.jobs:
if job.jobcat != "raxml": continue
raxml_job = job
lk = open(pjoin(raxml_job.jobdir, "RAxML_log.%s"
%raxml_job.args["-n"])).readline().split()[1]
tree = PhyloTree(raxml_job.args["-t"])
tree.add_feature("lk", lk)
tree.add_feature("model", raxml_job.model)
lks.append([float(tree.lk), tree.model, tree])
# sort lks in ASC order
lks.sort()
# choose the model with higher likelihood, the lastone in the list
best_model = lks[-1][1]
best_tree = lks[-1][2]
log.log(22, "%s model selected from the following lk values:\n%s" %(best_model, '\n'.join(map(str, lks))))
ModelTesterTask.store_data(self, best_model, lks)
| dongzhang0725/PhyloSuite | PhyloSuite/ete3/tools/ete_build_lib/task/prottest.py | prottest.py | py | 6,215 | python | en | code | 118 | github-code | 13 |
12812260216 | # This program performs sentimental analysis on movie reviews
# Importing libraries
import nltk
import nltk.classify.util
from nltk.classify.naivebayes import NaiveBayesClassifier
from nltk.corpus import movie_reviews
from nltk.corpus.reader import wordlist
# download the data
nltk.download("movie_reviews")
# define the function call to extract features
def extract_features (wordlist):
return dict([(word,True) for word in wordlist])
if __name__ == '__main__':
# loading positive and negative reviews
positive_fileids = movie_reviews.fileids("pos")
negative_fileids = movie_reviews.fileids("neg")
features_positive =[(extract_features(movie_reviews.words(fileids = [f])),"positive") for f in positive_fileids]
features_negative =[(extract_features(movie_reviews.words(fileids = [f])),"negative") for f in negative_fileids]
# split the dataset into training and testing
threshold_factor = 0.8
threshold_positive = int (threshold_factor * len (features_positive))
threshold_negative = int (threshold_factor * len (features_negative))
features_train = features_positive [:threshold_positive] + features_negative [:threshold_negative]
features_test = features_positive [threshold_positive:] + features_negative [threshold_negative:]
print("\n Number of Training datapoints: ", len (features_train))
print("\n Number of Test datapoints: ", len (features_test))
# Training the NaiveBayesClassifier
classifier = NaiveBayesClassifier.train (features_train)
print("\n Accuracy of the classifier: ", nltk.classify.util.accuracy (classifier, features_train))
print("\nTop 10 informative words: ")
for item in classifier.most_informative_features()[:10]:
print(item[0])
# input reviews
input_reviews =[
"This movie is fun",
"The movie is very bad",
"The movie is well constructed",
"The movie is too poor",
"Please dont watch",
"It is a waste of time watching this movie"
]
print("\n predictions: ")
for review in input_reviews:
print("\nReview: ", review)
probdist = classifier.prob_classify (extract_features(review.split()))
pred_sentiment = probdist.max()
print ("Predicted Sentiment: ",pred_sentiment)
print ("Probability: ", round(probdist.prob(pred_sentiment), 2)) | blacdev/Machine-learning-Nltk-Library- | nlp_demo.py | nlp_demo.py | py | 2,382 | python | en | code | 0 | github-code | 13 |
3022438493 | #!/usr/bin/python3
"""Defines a class Square"""
from models.rectangle import Rectangle
class Square(Rectangle):
""" Represents a square"""
def __init__(self, size, x=0, y=0, id=None):
"""Initializes a new Square
Args:
size (int): size of the new square
x (int): x coordinate of new square
y (int): y coordinate of the square
id (int): id of the new square
"""
super().__init__(size, size, x, y, id)
@property
def size(self):
"""Getter for the size of the square"""
return (self.width)
@size.setter
def size(self, value):
"""Setter for the size of the square"""
self.width = value
self.height = value
def __str__(self):
"""returns the print and str representation of the square"""
return ("[Square] ({}) {}/{} - {}".format(self.id, self.x, self.y, self.width))
def update(self, *args, **kwargs):
"""Updates the attributes of the square based on *args and **kwargs
Args:
args (int): attributes values
kwargs (dict): key/value pairs of attributes"""
if args:
attrs = ["id", "size", "x", "y"]
for i, arg in enumerate(args):
if i < len(attrs):
setattr(self, attrs[i], arg)
else:
break
elif kwargs:
for key, value in kwargs.items():
setattr(self, key, value)
def to_dictionary(self):
"""returns dictionary representation of a square"""
return ({
'id': self.id,
'size': self.width,
'x': self.x,
'y': self.y
})
| JozeSIMAO/alx-higher_level_programming | 0x0C-python-almost_a_circle/models/square.py | square.py | py | 1,740 | python | en | code | 0 | github-code | 13 |
18875461221 | import torch
import torch.nn as nn
import torch.nn.init
import torchvision.models as models
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.backends.cudnn as cudnn
from torch.nn.utils.clip_grad import clip_grad_norm
import torch.nn.functional as F
import numpy as np
import bottleneck as bn
def rnn_mask(context_lens, max_step):
"""
Creates a mask for variable length sequences
"""
num_batches = len(context_lens)
mask = torch.FloatTensor(num_batches, max_step).zero_()
if torch.cuda.is_available():
mask = mask.cuda()
for b, batch_l in enumerate(context_lens):
mask[b, :batch_l] = 1.0
mask = Variable(mask)
return mask
def top_n_indexes(arr, n):
idx = bn.argpartition(arr, arr.size-n, axis=None)[-n:]
width = arr.shape[1]
return [divmod(i, width) for i in idx]
class Seq2seqAttention(nn.Module):
def __init__(self, args):
super(Seq2seqAttention, self).__init__()
self.args = args
self.enable_cuda = args.cuda
self.vid_dim = args.vid_dim
self.embed_size = args.embed
self.hidden_dim = args.hid
self.vocab_size = args.max_vocab_size
self.num_layers = args.num_layers
self.birnn = args.birnn
self.encoder = EncoderFrames(self.args)
self.decoder = DecoderRNN(self.args)
def forward(self, frames, flengths, captions, lengths):
video_features = self.encoder(frames, flengths)
outputs = self.decoder(video_features, flengths, captions, lengths)
return outputs
def sample(self, frames, flengths):
video_features = self.encoder.forward(frames, flengths)
predicted_target = self.decoder.sample(video_features, flengths)
return predicted_target
def sample_rl(self, frames, flengths, sampling='multinomial'):
video_features = self.encoder.forward(frames, flengths)
predicted_target, outputs = self.decoder.rl_sample(video_features, flengths, sampling=sampling)
return predicted_target, outputs
def beam_search(self, frames, flengths, beam_size=5):
video_features = self.encoder.forward(frames, flengths)
predicted_target = self.decoder.beam_search(video_features, flengths, beam_size=beam_size)
return predicted_target
# Based on tutorials/08 - Language Model
# RNN Based Language Model
class EncoderFrames(nn.Module):
def __init__(self, args):
super(EncoderFrames, self).__init__()
# self.use_abs = use_abs
self.vid_dim = args.vid_dim
self.embed_size = args.embed
self.hidden_dim = args.hid
self.enable_cuda = args.cuda
self.num_layers = args.num_layers
self.args = args
if args.birnn:
self.birnn = 2
else:
self.birnn = 1
# projection layer
self.linear = nn.Linear(self.vid_dim, self.embed_size, bias=False)
# video embedding
self.rnn = nn.LSTM(self.embed_size, self.hidden_dim, self.num_layers, batch_first=True, bidirectional=self.args.birnn, dropout=args.dropout)
self.dropout = nn.Dropout(args.dropout)
self.init_weights()
def init_weights(self):
self.rnn.weight_hh_l0.data.uniform_(-0.08, 0.08)
self.rnn.weight_ih_l0.data.uniform_(-0.08, 0.08)
self.rnn.bias_ih_l0.data.fill_(0)
self.rnn.bias_hh_l0.data.fill_(0)
self.linear.weight.data.uniform_(-0.08, 0.08)
#self.linear.bias.data.fill_(0)
def init_hidden(self, batch_size):
if self.birnn:
return (Variable(torch.zeros(self.birnn*self.num_layers, batch_size, self.hidden_dim)),
Variable(torch.zeros(self.birnn*self.num_layers, batch_size, self.hidden_dim)))
def forward(self, frames, flengths):
"""Handles variable size frames
frame_embed: video features
flengths: frame lengths
"""
batch_size = flengths.shape[0]
#frames = self.linear(frames)
#frames = self.dropout(frames) # adding dropout layer
self.init_rnn = self.init_hidden(batch_size)
if self.enable_cuda:
self.init_rnn = self.init_rnn[0].cuda(), self.init_rnn[1].cuda()
if batch_size > 1:
# Sort by length (keep idx)
flengths, idx_sort = np.sort(flengths)[::-1], np.argsort(-flengths)
if self.enable_cuda:
frames = frames.index_select(0, Variable(torch.cuda.LongTensor(idx_sort)))
else:
frames = frames.index_select(0, Variable(torch.LongTensor(idx_sort)))
frames = self.linear(frames)
frame_packed = nn.utils.rnn.pack_padded_sequence(frames, flengths, batch_first=True)
outputs, (ht, ct) = self.rnn(frame_packed, self.init_rnn)
outputs,_ = pad_packed_sequence(outputs,batch_first=True)
if batch_size > 1:
# Un-sort by length
idx_unsort = np.argsort(idx_sort)
if self.enable_cuda:
outputs = outputs.index_select(0, Variable(torch.cuda.LongTensor(idx_unsort)))
else:
outputs = outputs.index_select(0, Variable(torch.LongTensor(idx_unsort)))
# print 'Encoder Outputs:',outputs.size()
return outputs
# Based on tutorials/03 - Image Captioning
class DecoderRNN(nn.Module):
def __init__(self, args):
"""Set the hyper-parameters and build the layers."""
super(DecoderRNN, self).__init__()
self.enable_cuda = args.cuda
self.embed_size = args.embed
self.hidden_size = args.hid
self.vocab_size = args.max_vocab_size
if args.birnn:
self.birnn = 2
else:
self.birnn = 1
self.num_layers = args.num_layers
self.args = args
self.input_proj = nn.Linear(self.birnn*self.hidden_size+self.embed_size, self.embed_size)
self.embed = nn.Embedding(self.vocab_size, self.embed_size)
self.atten = Attention(args, self.birnn*self.hidden_size, self.hidden_size)
self.lstm = nn.LSTM(self.embed_size+self.birnn*self.hidden_size, self.hidden_size, self.num_layers, batch_first=True, dropout=args.dropout)
self.linear = nn.Linear(self.hidden_size, self.vocab_size)
self.init_weights()
def init_weights(self):
"""Initialize weights."""
#self.lstm.weight_hh_l0.data.uniform_(-0.08, 0.08)
self.lstm.weight_hh_l0.data.uniform_(-0.08, 0.08)
self.lstm.weight_ih_l0.data.uniform_(-0.08, 0.08)
self.lstm.bias_ih_l0.data.fill_(0)
self.lstm.bias_hh_l0.data.fill_(0)
self.embed.weight.data.uniform_(-0.08, 0.08)
self.input_proj.weight.data.uniform_(-0.08, 0.08)
self.input_proj.bias.data.fill_(0)
self.linear.weight.data.uniform_(-0.08, 0.08)
self.linear.bias.data.fill_(0)
def forward(self, video_features, flengths, captions, lengths):
"""Decode image feature vectors and generates captions."""
"""
:param video_features:
video encoder output hidden states of size batch_size x max_enc_steps x hidden_dim
:param flengths:
video frames length of size batch_size
:param captions:
input target captions of size batch_size x max_dec_steps
:param lengths:
input captions lengths of size batch_size
"""
# print features.size(), captions.size(), self.embed_size
# print 'Input features, captions, lengths', features.size(), captions.size(), lengths, np.sum(lengths)
# appending <start> token to the input captions
batch_size,step_size = captions.shape
max_enc_steps = video_features.shape[1]
context_mask = rnn_mask(flengths,max_enc_steps)
captions = torch.cat((Variable(torch.LongTensor(np.ones([batch_size,1]))).cuda(),captions), 1)
embeddings = self.embed(captions)
hidden_output = Variable(torch.FloatTensor(batch_size,self.hidden_size).zero_()).cuda()
state = None
outputs = []
for i in range(step_size):
c_t, alpha = self.atten(hidden_output, video_features, context_mask)
inp = torch.cat((embeddings[:,i,:], c_t), 1).unsqueeze(1)
#inp = self.input_proj(inp)
hidden_output,state = self.lstm(inp,state)
hidden_output = hidden_output.squeeze(1)
outputs.append(hidden_output)
outputs = torch.transpose(torch.stack(outputs), 0, 1) # converting from step_size x batch_size x hidden_size to batch_size x step_size x hidden_size
outputs = pack_padded_sequence(outputs, lengths, batch_first=True)[0]
outputs = self.linear(outputs)
return outputs
def sample(self, video_features, flengths, max_len=30, state=None):
"""Samples captions for given image features (Greedy search)."""
sampled_ids = []
state = None
batch_size, _, _ = video_features.shape
max_enc_steps = video_features.shape[1]
context_mask = rnn_mask(flengths, max_enc_steps)
hidden_output = Variable(torch.FloatTensor(batch_size,self.hidden_size).zero_()).cuda()
inputs = self.embed(Variable(torch.LongTensor(np.ones([batch_size,1]))).cuda()).squeeze(1)
for i in range(max_len + 1): # maximum sampling length
c_t, alpha = self.atten(hidden_output, video_features, context_mask)
inp = torch.cat((inputs, c_t), 1).unsqueeze(1)
#inp = self.input_proj(inp)
hidden_output,state = self.lstm(inp,state)
hidden_output = hidden_output.squeeze(1)
output = self.linear(hidden_output) # (batch_size, vocab_size)
predicted = output.max(1)[1]
sampled_ids.append(predicted.unsqueeze(1))
inputs = self.embed(predicted)
sampled_ids = torch.cat(sampled_ids, 1) # (batch_size, 20)
return sampled_ids.squeeze()
def beam_search(self, video_features, flengths, max_len=20, beam_size=5):
""" Beam search Implementation during Inference"""
prev_state = None
outputs = []
batch_size, max_enc_steps, _ = video_features.shape
context_mask = rnn_mask(flengths, max_enc_steps)
hidden_output = Variable(torch.FloatTensor(batch_size,self.hidden_size).zero_()).cuda()
inputs = self.embed(Variable(torch.LongTensor(np.ones([batch_size,1]))).cuda()).squeeze(1)
# handle the zero step case seperately
c_t, alpha = self.atten(hidden_output, video_features, context_mask)
inp = torch.cat((inputs,c_t),1).unsqueeze(1)
next_hidden, next_state = self.lstm(inp, prev_state)
next_hidden = next_hidden.squeeze(1)
output = self.linear(next_hidden)
output = F.softmax(output,1)
next_probs, next_words = torch.topk(output,beam_size)
prev_words = torch.t(next_words)
prev_state = []
prev_hidden = []
#print next_state
for i in range(beam_size):
prev_state.append(next_state)
prev_hidden.append(next_hidden)
#print prev_state
all_probs = next_probs.cpu().data.numpy()
generated_sequence = np.zeros((batch_size,beam_size,max_len),dtype=np.int32)
generated_sequence[:,:,0] = next_words.cpu().data.numpy()
# variables for final results storing
final_results = np.zeros((batch_size,beam_size,max_len), dtype=np.int32)
final_all_probs = np.zeros((batch_size,beam_size))
final_results_counter = np.zeros((batch_size),dtype=np.int32) # to check the overflow of beam in fina results
for i in range(1,max_len):
probs = []
state = []
hidden = []
words = []
for j in range(beam_size):
inputs = self.embed(prev_words[j])
#print inputs
c_t, alpha = self.atten(prev_hidden[j], video_features, context_mask)
inp = torch.cat((inputs,c_t),1).unsqueeze(1)
next_hidden, next_state = self.lstm(inp, prev_state[j])
next_hidden = next_hidden.squeeze(1)
output = self.linear(next_hidden)
output = F.softmax(output,1)
next_probs, next_words = torch.topk(output, beam_size)
probs.append(next_probs)
words.append(next_words)
state.append(next_state)
hidden.append(next_hidden)
probs = np.transpose(np.array(torch.stack(probs).cpu().data.numpy()),(1,0,2))
#state = np.transpose(np.array(state.cpu().data.numpy()),(1,0,2))
hidden = np.transpose(np.array(torch.stack(hidden).cpu().data.numpy()),(1,0,2))
words = np.transpose(np.array(torch.stack(words).cpu().data.numpy()),(1,0,2))
state = [torch.cat(s,0) for s in state]
state = torch.stack(state)
#print state
prev_state = []
prev_words = []
prev_hidden = []
for k in range(batch_size):
probs[k] = np.transpose(np.transpose(probs[k])*all_probs[k]) # multiply each beam words with each beam probs so far
top_indices = top_n_indexes(probs[k],beam_size)
beam_idx,top_choice_idx = zip(*top_indices)
all_probs[k] = (probs[k])[beam_idx,top_choice_idx]
prev_state.append([state[idx,:,k,:] for idx in beam_idx])
prev_hidden.append([hidden[k,idx,:] for idx in beam_idx])
prev_words.append([words[k,idx,idy] for idx,idy in top_indices])
generated_sequence[k] = generated_sequence[k,beam_idx,:]
generated_sequence[k,:,i] = [words[k,idx,idy] for idx,idy in top_indices]
# code to extract complete summaries ending with [EOS] or [STOP] or [END]
for beam_idx in range(beam_size):
if generated_sequence[k,beam_idx,i] == 2 and final_results_counter[k]<beam_size: # [EOS] or [STOP] or [END] word / check overflow
# print generated_sequence[k,beam_idx]
final_results[k,final_results_counter[k],:] = generated_sequence[k,beam_idx,:]
final_all_probs[k,final_results_counter[k]] = all_probs[k,beam_idx]
final_results_counter[k] += 1
all_probs[k,beam_idx] = 0.0 # supress this sentence to flow further through the beam
if np.sum(final_results_counter) == batch_size*beam_size: # when suffiecient hypothsis are obtained i.e. beam size hypotheis, break the process
# print "Encounter a case"
break
# transpose batch to usual
#print prev_state
prev_state = [torch.stack(s,0) for s in prev_state]
prev_state = torch.stack(prev_state,0)
prev_state = torch.transpose(prev_state,0,1)
tmp_state = torch.transpose(prev_state,1,2)
prev_state = []
for k in range(beam_size):
prev_state.append(tuple((tmp_state[k,0,:,:].unsqueeze(0).contiguous(),tmp_state[k,1,:,:].unsqueeze(0).contiguous())))
#print prev_state
prev_words = np.transpose(np.array(prev_words),(1,0)) # set order [beam_size, batch_size]
prev_words = Variable(torch.LongTensor(prev_words)).cuda()
prev_hidden = np.transpose(np.array(prev_hidden),(1,0,2))
prev_hidden = Variable(torch.FloatTensor(prev_hidden)).cuda()
#print prev_hidden[0]
#print prev_state[0]
#print generated_sequence
sampled_ids = []
for k in range(batch_size):
avg_log_probs = []
for j in range(beam_size):
try:
num_tokens = final_results[k,j,:].tolist().index(2)+1 #find the stop word and get the lenth of the sequence based on that
except:
num_tokens = 1 # this case is when the number of hypotheis are not equal to beam size, i.e., durining the process sufficinet hypotheisis are not obtained
probs = np.log(final_all_probs[k][j])/num_tokens
avg_log_probs.append(probs)
avg_log_probs = np.array(avg_log_probs)
sort_order = np.argsort(avg_log_probs)
sort_order[:] = sort_order[::-1]
sort_generated_sequence = final_results[k,sort_order,:]
sampled_ids.append(sort_generated_sequence[0])
#print sort_generated_sequence
return np.asarray(sampled_ids)
def rl_sample(self, video_features, flengths, max_len=20, sampling='multinomial'):
sampled_ids = []
state = None
outputs = []
batch_size, max_enc_steps, _ = video_features.shape
context_mask = rnn_mask(flengths,max_enc_steps)
hidden_output = Variable(torch.FloatTensor(batch_size,self.hidden_size).zero_()).cuda()
inputs = self.embed(Variable(torch.LongTensor(np.ones([batch_size,1]))).cuda()).squeeze(1)
for i in range(max_len): # maximum sampling length
c_t, alpha = self.atten(hidden_output, video_features, context_mask)
inp = torch.cat((inputs, c_t), 1).unsqueeze(1)
# inp = self.input_proj(inp)
hidden_output,state = self.lstm(inp,state)
hidden_output = hidden_output.squeeze(1)
output = self.linear(hidden_output) # (batch_size, vocab_size)
outputs.append(output)
prob = F.softmax(output, 1)
if sampling == 'multinomial':
predicted = torch.multinomial(prob, 1)
predicted = predicted.squeeze(1)
elif sampling == 'argmax':
predicted = prob.max(1)[1]
sampled_ids.append(predicted.unsqueeze(1))
inputs = self.embed(predicted)
sampled_ids = torch.cat(sampled_ids, 1) # (batch_size, 20)
outputs = torch.transpose(torch.stack(outputs), 0, 1)
return sampled_ids.squeeze(), outputs
class Attention(nn.Module):
def __init__(self, args, enc_dim, dec_dim, attn_dim=None):
super(Attention, self).__init__()
self.args = args
self.enc_dim = enc_dim
self.dec_dim = dec_dim
self.attn_dim = self.dec_dim if attn_dim is None else attn_dim
if self.args.birnn:
self.birnn = 2
else:
self.birnn = 1
self.encoder_in = nn.Linear(self.enc_dim, self.attn_dim, bias=True)
self.decoder_in = nn.Linear(self.dec_dim, self.attn_dim, bias=False)
self.attn_linear = nn.Linear(self.attn_dim, 1, bias=False)
self.init_weights()
def init_weights(self):
self.encoder_in.weight.data.uniform_(-0.08, 0.08)
self.encoder_in.bias.data.fill_(0)
self.decoder_in.weight.data.uniform_(-0.08, 0.08)
self.attn_linear.weight.data.uniform_(-0.08, 0.08)
def forward(self, dec_state, enc_states, mask, dag=None):
"""
:param dec_state:
decoder hidden state of size batch_size x dec_dim
:param enc_states:
all encoder hidden states of size batch_size x max_enc_steps x enc_dim
:param flengths:
encoder video frame lengths of size batch_size
"""
dec_contrib = self.decoder_in(dec_state)
batch_size, max_enc_steps, _ = enc_states.size()
enc_contrib = self.encoder_in(enc_states.contiguous().view(-1, self.enc_dim)).contiguous().view(batch_size, max_enc_steps, self.attn_dim)
pre_attn = F.tanh(enc_contrib + dec_contrib.unsqueeze(1).expand_as(enc_contrib))
energy = self.attn_linear(pre_attn.view(-1, self.attn_dim)).view(batch_size, max_enc_steps)
alpha = F.softmax(energy, 1)
# mask alpha and renormalize it
alpha = alpha* mask
alpha = torch.div(alpha, alpha.sum(1).unsqueeze(1).expand_as(alpha))
context_vector = torch.bmm(alpha.unsqueeze(1), enc_states).squeeze(1) # (batch_size, enc_dim)
return context_vector, alpha
| ramakanth-pasunuru/video_captioning_rl | models/seq2seq_atten.py | seq2seq_atten.py | py | 20,298 | python | en | code | 43 | github-code | 13 |
278579122 | from django.shortcuts import render
from django.http import HttpResponse
from .models import Line
from django.template import loader, Context
def add(request):
if request.method == 'GET':
context = {'状态': 'GET方法不允许提交'}
return render(request,'line_add.html', context=context)
else:
name = request.POST.get('name')
device_id = request.POST.get('device_id')
degree = request.POST.get('degree')
length = request.POST.get('length')
line = Line(name=name, device_id=device_id, degree=degree, length=length)
line.save()
context = {'状态':'提交成功'}
return HttpResponse('添加成功')
# return render(request,'line_add.html', context=context)
def index(request):
if request.method == 'GET':
line = Line.objects.filter(name__contains='')
context = {'line':line}
return render(request,'line_index.html',context=context)
else:
id = request.POST.get('id')
print(id)
if id == '':
name = request.POST.get('name')
line = Line.objects.filter(name__contains=name)
context = {'line': line}
return render(request, 'line_index.html', context=context)
else:
line = Line.objects.get(pk=id)
line.delete()
line = Line.objects.filter(name__contains='')
context = { 'state':'删除成功',
'line':line}
return render(request, 'line_index.html', context=context)
def csv_download(request):
response = HttpResponse(content_type='text/csv;charset=gb2312')
response['Content-Disposition'] = "attachment;filename=result.csv"
line = Line.objects.filter(name__contains='')
instant=[['序号','线路名称','设备id','电压等级','线路长度']]
count = 1
for i in line:
instant.append([count,i.name,i.device_id,i.degree,i.length])
count +=1
context = {
'rows': instant
}
template = loader.get_template('csv_download.txt')
csv_template = template.render(context)
response.content = csv_template
return response
| ApostleMelody/Django | Test1/DB_manager/views.py | views.py | py | 2,264 | python | en | code | 0 | github-code | 13 |
38622958265 | #coding: utf-8
__autor__ = 'Cleber Augusto Dias Da Silva'
#Números Primos
print('Descubra se um número é primo!!')
n = int(input('Digte o número que deseja saber:\n'))
l = [2,3,5,7,9,11]
b = 0
for idx,item in enumerate(l):
x = n%l[idx]
if x == 0:
b += 1
if b == 0 :
print('é primo')
if b > 0:
print('Não é primo')
| CleberSilva93/Study-Exercicios-Python | Exercicio_PythonBrasil/EstruturadeRepetição/ex21.py | ex21.py | py | 350 | python | pt | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.