seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
553689261 | class Solution(object):
def isPalindrome(self, s):
"""
Given a string, determine if it is a palindrome, considering only alphanumeric characters and ignoring cases.
Note: For the purpose of this problem, we define empty string as valid palindrome.
Example 1:
Input: "A man, a plan, a canal: Panama"
Output: true
Example 2:
Input: "race a car"
Output: false
:type s: str
:rtype: bool
"""
if not s:
return True
if len(s) == 1:
return True
s = s.lower()
l = 0
r = len(s)-1
while l < r:
# skip non alphanumeric chars
while l < r and not s[l].isalnum():
l += 1
while l < r and not s[r].isalnum():
r -= 1
if s[l] != s[r]:
return False
l += 1
r -= 1
return True
| ljia2/leetcode.py | solutions/two.pointers/125.Valid.Palindrome.py | 125.Valid.Palindrome.py | py | 963 | python | en | code | 0 | github-code | 50 |
30180483161 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 19 18:14:00 2016
@author: Tavo
"""
'''This script will take a polygon shapefile and project it to UTM 23S using ogr/gdal
by tawonque 19/12/2016'''
#%% Import modules
import os
import sys
from osgeo import ogr
from osgeo import osr
#%% If necessary, change directory
working_directory = './Data_science/Neuralimage'
print('Initial directory --> ', os.getcwd())
os.chdir(working_directory)
print('Final working directory --> ', os.getcwd())
#%% define input path and other embedded params
infile = './polygon_feature.shp'
outfile = './polygon_feature-EPSG32723.shp'
driver = ogr.GetDriverByName('ESRI Shapefile')
dataSource = driver.Open(infile, 0)
layer = dataSource.GetLayer()
daLayer = dataSource.GetLayer(0)
layerDefinition = daLayer.GetLayerDefn()
#Show fields in the layer
for i in range(layerDefinition.GetFieldCount()):
print(layerDefinition.GetFieldDefn(i).GetName())
#%% Check the spatial reference, if none, define one and its transform to UTM
spatialRef = layer.GetSpatialRef()
if spatialRef == None:
print('Undefined spatial reference')
#get path and filename seperately
(outfilepath, outfilename) = os.path.split(outfile)
#get file name without extension
(outfileshortname, extension) = os.path.splitext(outfilename)
#%%
# Spatial Reference of the input file
# Access the Spatial Reference and assign the input projection
Wkt4326 = ('''GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]''')
inSpatialRef = osr.SpatialReference()
#inSpatialRef.ImportFromProj4(4326) # unprojected WGS84
inSpatialRef.ImportFromWkt(Wkt4326)
#%%
# Spatial Reference of the output file
# Access the Spatial Reference and assign the output projection
# UTM 33N which we use for all Spitsbergen does not work since area too far
# outside UTM 33
Wkt32723 = str('''PROJCS["WGS 84 / UTM zone 23S",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
PROJECTION["Transverse_Mercator"],
PARAMETER["latitude_of_origin",0],
PARAMETER["central_meridian",-45],
PARAMETER["scale_factor",0.9996],
PARAMETER["false_easting",500000],
PARAMETER["false_northing",10000000],
AUTHORITY["EPSG","32723"],
AXIS["Easting",EAST],
AXIS["Northing",NORTH]]''')
outSpatialRef = osr.SpatialReference()
#outSpatialRef.ImportFromEPSG(32722) #(20823) #UTM zone 23S !!!Issues with the projection libraries
outSpatialRef.ImportFromWkt(Wkt32723)
# create Coordinate Transformation
coordTransform = osr.CoordinateTransformation(inSpatialRef, outSpatialRef)
#%% Open the input shapefile and get the layer
driver = driver
indataset = dataSource
if indataset is None:
print('Could not open file')
sys.exit(1)
inlayer = layer
#%% Create the output shapefile but check first if file exists
if os.path.exists(outfile):
driver.DeleteDataSource(outfile)
outdataset = driver.CreateDataSource(outfile)
if outfile is None:
print('Could not create file')
sys.exit(1)
outlayer = outdataset.CreateLayer(outfileshortname, geom_type=ogr.wkbPolygon)
#%% Get the FieldDefn for attributes and add to output shapefile
# (which i know are in my file)
feature = inlayer.GetFeature(0)
fieldDefn1 = feature.GetFieldDefnRef('rid')
outlayer.CreateField(fieldDefn1)
# get the FeatureDefn for the output shapefile
featureDefn = outlayer.GetLayerDefn()
#%% Loop through input features and write to output file
infeature = inlayer.GetNextFeature()
while infeature:
#get the input geometry
geometry = infeature.GetGeometryRef()
#reproject the geometry, each one has to be projected separately
geometry.Transform(coordTransform)
#ogr._ogr.Geometry_Transform(geometry, coordTransform)
#create a new output feature
outfeature = ogr.Feature(featureDefn)
#set the geometry and attribute
outfeature.SetGeometry(geometry)
outfeature.SetField('rid', infeature.GetField('rid'))
#add the feature to the output shapefile
outlayer.CreateFeature(outfeature)
#destroy the features and get the next input features
outfeature.Destroy
infeature.Destroy
infeature = inlayer.GetNextFeature()
#%% Close the shapefiles
indataset = None
outdataset = None
#%% Create the prj projection file
outSpatialRef.MorphToESRI()
file = open(outfilepath + '\\'+ outfileshortname + '.prj', 'w')
file.write(outSpatialRef.ExportToWkt())
file.close()
| tawonque/Neuralimage | shapergis/Neuralimage_project_shp.py | Neuralimage_project_shp.py | py | 5,117 | python | en | code | 0 | github-code | 50 |
101198986 | import cv2
import numpy as np
def calibrate(image, post_it_size_m=0.076) -> float:
# 1. take the green only,
# 2. blur and mask,
# 3. count those pixels...
total_i, total_j, _ = image.shape
def percent_of_idx(idx, percent):
return slice(int(idx * percent), -int(idx * percent))
reduced_image = image[
percent_of_idx(total_i, 0.25), percent_of_idx(total_j, 0.25), 0
]
blurred = cv2.GaussianBlur(reduced_image, (3, 3), 0)
threshold = blurred.min() + (2 * (blurred.max() - blurred.min()) / 5)
masked = cv2.threshold(blurred, threshold, 255, cv2.THRESH_BINARY)
no_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(
masked[1].astype("uint8"), 4, cv2.CV_32S
)
post_it_pixels = np.sort(stats[:, -1])[-2] # 2nd largest area.
return np.sqrt((post_it_size_m**2) / post_it_pixels)
| RAYemelyanova/lunchbox | src/lunchbox/analysis/calibrate.py | calibrate.py | py | 876 | python | en | code | 0 | github-code | 50 |
25195111578 | from flask import Flask, render_template
from flask import Flask, request, jsonify, Response
import json
import mysql.connector
from flask_cors import CORS, cross_origin
app = Flask(__name__)
@app.route('/')
def hello_world():
return render_template('index.html')
@app.route("/")
def hello():
return "Flask inside Docker!!"
def getMysqlConnection():
#return mysql.connector.connect(user='testing', host='0.0.0.0', port='3306', password='testing', database='test')
return mysql.connector.connect(user='root', host='mysql2', port='3306', password='database', database='Netflix')
@app.route('/api/getTest', methods=['GET'])
@cross_origin() # allow all origins all methods.
def get_test():
db = getMysqlConnection()
print(db)
try:
sqlstr = "SELECT * FROM shows3 WHERE director = 'Brad Anderson';"
print(sqlstr)
cur = db.cursor()
cur.execute(sqlstr)
output_json = cur.fetchall()
except Exception as e:
print("Error in SQL:\n", e)
finally:
db.close()
return jsonify(results=output_json)
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0')
| Pimpwhippa/recommender | flask/app_hello_world.py | app_hello_world.py | py | 1,153 | python | en | code | 0 | github-code | 50 |
20194624735 | import boto3
import uuid
class TokenRepository:
def __init__(self, tokenTable):
self.table = boto3.resource('dynamodb').Table(tokenTable)
def getToken(self, keyId):
try:
response = self.table.get_item(
Key={
'keyid': keyId
}
)
except ClientError as e:
print(e.response['Error']['Message'])
return None
else:
item = response['Item']
return item
| bryantrobbins/baseball | shared/btr3baseball/TokenRepository.py | TokenRepository.py | py | 519 | python | en | code | 22 | github-code | 50 |
16878201823 | import logging
import vk_api
from django.conf import settings
from herald_bot.handlers.core.trigger import BaseTrigger
from herald_bot.handlers.core.trigger import BaseTrigger
from herald_bot.handlers.utils.helpers import make_keyboard_vk
from herald_bot.models import User
from vk_api.utils import get_random_id
logger = logging.getLogger(__name__)
class VKTrigger(BaseTrigger):
"""
VK триггер для State Machine
"""
def send_keyboard(self, message, buttons, whom=None):
"""
Отправка клавиатуры
:param message: Текст для отправки
:param buttons: Кнопки для отправки в формате массива
:param whom: id чата для отправки
:return: None
"""
self.client.get_api().messages.send(
user_id = self.user_id,
message = message,
random_id=get_random_id(),
keyboard=make_keyboard_vk(buttons)
)
def send_message(self, message, whom=None):
self.client.get_api().messages.send(
user_id = self.user_id,
message = message,
random_id=get_random_id())
def get_user(self, whom=None):
"""
Получение пользователя из базы данных
:param whom: id пользователя
:return: User объект пользователя
"""
try:
return User.objects.get(user_id=self.user_id)
except Exception as e:
logger.error("Error on get user: {}".format(e))
return False
def create_user(self):
"""
Создание пользователя
:return: None
"""
try:
new_user = User.objects.create(user_id=self.user_id, messenger=self.messenger)
new_user.save()
except Exception as e:
logger.error("Error on crete user: {}".format(e))
def send_photo(self, image_path):
"""
Отправка фотографии
:param image_path: Путь на самом сервере
:return:
"""
upload = vk_api.VkUpload(self.client)
photo = upload.photo_messages(image_path)[0]
attachment = f"photo{photo['owner_id']}_{photo['id']}"
self.client.get_api().messages.send(
user_id = self.user_id,
random_id=get_random_id(),
attachment=attachment)
| mr8bit/herald | herald_bot/handlers/vk/trigger.py | trigger.py | py | 2,530 | python | en | code | 1 | github-code | 50 |
42226074507 | import numpy as np
import sys
import copy
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import torch.optim as optim
import numpy as np
import math
import pdb
import pandas as pd
from torch.nn.utils import clip_grad_norm_
import matplotlib.pyplot as plt
import os
import re
# This is an implementation of a game of Connect Four with a board as a 2D np array
# CONSTANTS
ROW_COUNT = 6
COLUMN_COUNT = 7
CONNECT_TOTAL = 4
C_PUCT = 4
P1BOARD = np.ones((ROW_COUNT,COLUMN_COUNT))
P2BOARD = np.ones((ROW_COUNT,COLUMN_COUNT))*2
ITERATIONS = 25
EPS = 1e-8
NUMEPISODES = 50
NUMROUNDS = 1
def next_player(curr):
if curr == 1: return 2
else: return 1
class Connect4:
def __init__(self):
self.board = np.zeros((ROW_COUNT,COLUMN_COUNT))
self.turn = 1
self.w = 0
self.most_recent = [0,0]
# check if there is room to drop a piece
def is_valid_location(self, col):
if col < 0 or col >= COLUMN_COUNT:
return 0
return self.board[0][col] == 0
# return legal moves
def legal_moves(self):
#return np.where((self.board[ROW_COUNT-1] == 0) == True)[0]
valid_locations = []
for col in range (COLUMN_COUNT):
if self.is_valid_location(col):
valid_locations.append(col)
return valid_locations
def valid_moves(self):
# valid = (self.board[ROW_COUNT-1,] == 0)
# return valid.astype(int) #*list(range(7))
valid_locations = []
for col in range (COLUMN_COUNT):
if self.is_valid_location(col):
valid_locations.append(1)
else:
valid_locations.append(0)
return np.array(valid_locations)
# get row to put next piece for a certain column
def get_next_open_row(self, col):
for r in range(ROW_COUNT-1, -1, -1):
if self.board[r][col] == 0:
return r
def print_board(self):
#return np.flip(self.board, 0)
return(self.board)
def result(self, col):
if (self.is_valid_location(col) == 0): assert(1==2)
row = self.get_next_open_row(col)
piece = self.turn
self.board[row][col] = piece
self.most_recent = [row,col]
self.winning_move(piece) #print("player", piece, "wins!")
self.turn = next_player(piece)
return self
def winning_move(self, piece):
r = self.most_recent[0]
c = self.most_recent[1]
left = max(c - CONNECT_TOTAL, 0)
right = min(c + CONNECT_TOTAL, COLUMN_COUNT)
top = max(r - CONNECT_TOTAL, 0)
bottom = min(r + CONNECT_TOTAL, ROW_COUNT)
#check left to right
consecutive = 0
for i in range(left,right):
if self.board[r][i] == piece:
consecutive +=1
else:
consecutive = 0
if consecutive == CONNECT_TOTAL:
if self.w == 0: self.w = piece
return True
consecutive = 0
#check top to bottom
for i in range(top,bottom):
if self.board[i][c] == piece:
consecutive +=1
else:
consecutive = 0
if consecutive == CONNECT_TOTAL:
if self.w == 0: self.w = piece
return True
#check positive-slope diagnonal
consecutive = 0
for i in range(-4,5):
if (c+i >= 0) and (r-i < ROW_COUNT):
if (c+i < COLUMN_COUNT) and (r-i >= 0):
if self.board[r-i][c+i] == piece:
consecutive +=1
else:
consecutive = 0
if consecutive == CONNECT_TOTAL:
if self.w == 0: self.w = piece
return True
r = self.most_recent[0]
c = self.most_recent[1]
#check negative-slope diagnonal
consecutive = 0
for i in range(-4,5):
if (c+i >= 0) and (r+i < ROW_COUNT):
if (c+i < COLUMN_COUNT) and (r+i >= 0):
if self.board[r+i][c+i] == piece:
consecutive +=1
else:
consecutive = 0
if consecutive == CONNECT_TOTAL:
if self.w == 0: self.w = piece
return True
def game_over(self):
if self.winning_move(1):
return True
if self.winning_move(2):
return True
if len(self.legal_moves()) == 0:
self.w = 0.0000001
return True
def win_move(self):
if self.winning_move(1) == 1 or self.winning_move(2) == 1:
return 1
return 0
def winner(self):
return self.w
def next_player(self):
if self.turn == 1: return 2
else: return 1
def stringRepresentation(self):
return (str(self.board) + str(self.turn))
# Encodes the board into a 3D tensor
def encode(game):
turn = game.turn
board = game.print_board()
board_1 = np.where(board == 2, 0, board)
board_2 = np.where(board == 1, 0, board)
player_board = P1BOARD if turn == 1 else P2BOARD
encoded_board = [board_1, board_2, player_board]
return torch.tensor(encoded_board)
# Flips an encoded board
def encode_reverse(board):
copy = np.ndarray.copy(np.flip(np.asarray(board), (2)))
return torch.from_numpy(copy)
# Random choice strategy
def random_choice(position):
moves = position.legal_moves()
return random.choice(moves) | Hunterryan725/AlphaZero-Connect-Four | connect4.py | connect4.py | py | 5,767 | python | en | code | 0 | github-code | 50 |
17009498065 | # -*- coding: utf-8 -*-
import argparse
from datetime import datetime, timedelta
from dataclasses import asdict
from typing import List
import uuid
import const
from logging import Logger
import logger
from mq import MQ, MQMsgData
import rapi
def _send_msg(send_data: MQMsgData,
queue_name: str,
routing_key: str,
log: Logger):
try:
with MQ(**const.MQ_CONNECT,
queue=queue_name,
routing_key=routing_key) as queue:
msg = asdict(send_data)
queue.send_message(message=msg)
log.info('Send message queue=%(queue)s, data=%(data)s', {'queue': queue_name, 'data': msg})
except Exception:
log.exception('Failed to send mq message error')
raise
def _get_order_item_id_list(log: Logger) -> List[str]:
end_time = datetime.now()
start_time = end_time - timedelta(days=const.ORDER_LIST_GET_LAST_DAYS)
with rapi.RakutenAPI(log=log) as api:
log.info('Request to search Order')
orders = api.order.search(start_datetime=start_time, end_datetime=end_time)
order_data_list = []
if orders:
log.info('Request to get Order order=%s', orders)
order_data_list = api.order.get(order_number_list=orders)
item_ids = []
for order_data in order_data_list:
order_progress = order_data.order_progress
# 受注ステータス(在庫連動対象)
# 100: 注文確認待ち
# 200: 楽天処理中
# 300: 発送待ち
# 400: 変更確定待ち
# 500: 発送済
# 600: 支払手続き中
# 700: 支払手続き済
if order_progress not in [100, 200, 300, 400, 500, 600, 700]:
# 受注ステータス(在庫連動対象外)
# 800: キャンセル確定待ち
# 900: キャンセル確定
continue
for order_item in order_data.order_items:
item_ids.append(order_item.manage_number)
log.info('Get order list: order_list=%s', item_ids)
return item_ids
def _producer(log: Logger):
item_ids = _get_order_item_id_list(log=log)
if not item_ids:
return
send_data = MQMsgData(id=str(uuid.uuid4()),
item_ids=item_ids,
msg_send_time=datetime.now().isoformat())
log.info('Send MQ')
# Yahoo!ショッピング
_send_msg(send_data=send_data,
queue_name=const.MQ_YSHOP_QUEUE,
routing_key=const.MQ_YSHOP_ROUTING_KEY,
log=log)
# AuPayマーケット
_send_msg(send_data=send_data,
queue_name=const.MQ_AU_QUEUE,
routing_key=const.MQ_AU_ROUTING_KEY,
log=log)
def main():
parser = argparse.ArgumentParser(description='stockout_rakuten_producer')
parser.add_argument('--task_no',
required=True,
type=int,
help='input process No type integer')
arg_parser = parser.parse_args()
log = logger.get_logger(task_name='stockout-rakuten-producer',
sub_name='main',
name_datetime=datetime.now(),
task_no=arg_parser.task_no,
**const.LOG_SETTING)
log.info('Start task')
log.info('Input args task_no=%s', arg_parser.task_no)
_producer(log=log)
log.info('End task')
if __name__ == '__main__':
main()
| pro-top-star/python-stock-out | app/stockout_rakuten_producer.py | stockout_rakuten_producer.py | py | 3,588 | python | en | code | 2 | github-code | 50 |
42390489997 | #!/usr/bin/env python3
import operator
from functools import reduce
# Width and height of the grid. Used in some calculations later.
WIDTH = 100
HEIGHT = 100
# These are the coordinate transformations to use to check the state of the neighbours
TESTS = [
(-1, -1),
(0, -1),
(1, -1),
(-1, 0),
(1, 0),
(-1, 1),
(0, 1),
(1, 1)
]
def evolve(state):
"""
This function takes a state, and applies the transformations, returning a new state
:param state: The current state
:return: The evolved state
"""
# Create a copy. Probably not 100% necessary as we overwrite the whole thing.
new_state = state[:]
# Use separate x and y coordinates. This syntax also produces the product in the same way itertools.product(
# range(100), range(100)) would but means this look is only one indentation deep rather than nesting two loops.
# The reason I haven't done it using flat indexes is we need to do two-dimensional bounds checking to check if
# we're on an edge.
for this_x, this_y in [(x, y) for x in range(WIDTH) for y in range(HEIGHT)]:
# How many neighbours are on.
count = 0
# Loop over the neighbour coordinates
for test_x, test_y in [(this_x + x, this_y + y) for x, y in TESTS]:
# Make sure we're inside the square
if min(test_x, test_y) < 0 or max(test_x, test_y) > WIDTH - 1:
continue
# If the neighbour is on, increade the count.
if state[test_y * WIDTH + test_x] == '#':
count += 1
# Our new state depends on the old state, and the count of neoghbours that are on.
# If we're off, and exactly 3 neighbours are on.
if state[this_y * WIDTH + this_x] == '.' and count == 3:
new_state[this_y * WIDTH + this_x] = '#'
# If we're on, and either 2 or 3 neighbours are on.
elif state[this_y * WIDTH + this_x] == '#' and (count == 2 or count == 3):
new_state[this_y * WIDTH + this_x] = '#'
# Otherwise, off
else:
new_state[this_y * WIDTH + this_x] = '.'
return new_state
def setcorners(state):
"""
For part 1, turn the corner lights on.
:param state: the state to on which to turn on all corner lights.
:return: None
"""
state[0] = '#'
state[WIDTH - 1] = '#'
state[(HEIGHT - 1) * WIDTH] = '#'
state[(HEIGHT - 1) * WIDTH + WIDTH - 1] = '#'
with open('day18input.txt', 'r') as f:
# I'm old school and where you have an array with fixed dimensions, I like to just use a flat list and offsets
# for each row. This also means we don't have to deep copy when setting up a new state, etc
part1_state = reduce(operator.add, [list(x.strip()) for x in f.readlines()])
# Take a copy for part 2, and set the corner lights.
part2_state = part1_state[:]
setcorners(part2_state)
# Both part 1 and 2 are 100 iterations. Evolve both states and make sure the corners are on for the part 2 state
for i in range(100):
part1_state = evolve(part1_state)
part2_state = evolve(part2_state)
setcorners(part2_state)
# Count the number of lights on in each part
print(len([x for x in part1_state if x == '#']))
print(len([x for x in part2_state if x == '#']))
| zandeez/adventofcode | 2015/day18.py | day18.py | py | 3,363 | python | en | code | 0 | github-code | 50 |
72053296156 | """
* Write a python program that asks the user a minimum of 3 riddles.
* You can look at riddles.com if you don't already know any riddles.
* Collect the response of each riddle from the user and compare their
answers to the correct answer.
* Use a variable to keep track of the correctly answered riddles
* After all the riddles have been asked, tell the user how many they got
correct
"""
from tkinter import messagebox, simpledialog, Tk
if __name__ == "__main__":
window = Tk()
window.withdraw()
shrek = simpledialog.askstring(title='yay', prompt='what three riddles do you want: ogre, muppit, or sus?')
if shrek == 'ogre':
donkey = simpledialog.askstring(title='mommy', prompt='what do you call an ogre that is the size of a tree but is from a movie?')
if donkey == 'SHREK':
messagebox.showinfo(title='er', message='U WIN!!!!!!')
else:
messagebox.showinfo(title='ballz', message='U FAILURE!!!')
elif shrek == 'muppit':
MATTHEW = simpledialog.askstring(title='boo', prompt=' what is as green as a grape but as sus as a frog')
if MATTHEW == 'KERMIT THE FROG':
messagebox.showinfo(title='shrek', message='lol that just luck')
else:
messagebox.showinfo(title='woman', message='U SUCK SO BAD!!!')
elif shrek == 'sus':
SUSSY_BAKA = simpledialog.askstring(title=':)', prompt='I have 2 legs but no arms. What am I?')
if SUSSY_BAKA == 'AMOGUS':
messagebox.showinfo(title=':()', message='BRUH U HACKING!!!!!!!!!!!!!')
else:
messagebox.showinfo(title='sdfghasdfghjkdfghjkfghjzfugasfkhdfasgkuydsfkfdslhkygyuguykg', message='HAW HAW HAW!!! U ACTUALLY R NOW GAY!!!!!')
| Maddox6647/level-0-module-1 | _04_int/_1_riddler/riddler.py | riddler.py | py | 1,744 | python | en | code | 0 | github-code | 50 |
3313890304 | import struct
import argparse
from PIL import Image
parser = argparse.ArgumentParser(description='Converts an image back to Quake conchars.')
parser.add_argument('input_img', type=str,
help='Input image.')
parser.add_argument('output_chrs', type=str,
help='Output conchars.')
args = parser.parse_args()
im = Image.open(args.input_img)
pixels = im.load()
with open(args.output_chrs, "wb") as fp:
for w in range(im.size[0]):
for h in range(im.size[1]):
color = struct.pack("B", pixels[h,w])
fp.write(color)
| Imakesoftware2/quake-conchar-tools | img_to_conchars.py | img_to_conchars.py | py | 587 | python | en | code | 0 | github-code | 50 |
37990465074 | #!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
from pyramid.paster import get_appsettings
from sqlalchemy import engine_from_config, create_engine
from sqlalchemy.sql import text
from sqlalchemy.ext.automap import automap_base
from pyramid.view import view_config
from pyramid.response import Response
from pyramid.httpexceptions import HTTPFound
from sqlalchemy.exc import DBAPIError
from .. import process_request
from sqlalchemy import or_
from .. import models
import logging
import traceback
import sys
from webapp import automapper
import os
head_path = os.path.dirname(__file__).split("webapp/views")[0]
config_path = os.path.join(head_path, 'development.ini')
am = automapper.Automapper(config_path)
base_automap = am.generate_base("db2.")
@view_config(route_name="primerquery", renderer="../templates/primer_query.jinja2")
def primer_view(request):
return {}
@view_config(
route_name="primerquery_results",
renderer="../templates/primer_query_results.jinja2",
)
def primer_results_view(request):
primers = getattr(base_automap, "primer")
is_query = text("primers.name like 'IS%'")
query_dict = {
"mlva": primers.name.like("ms__\_%"),
"mst": primers.name.like("mst%"),
"is": text("primer.name like 'is%' and primer.pmid is not null"),
"snp": primers.name.like("ada%"),
"plasmid": primers.name.like("Q%"),
}
condition_list = []
wanted_seq = request.matchdict["selection"]
if len(wanted_seq) == 1:
try:
query = (
request.db2_session.query(primers)
.filter(query_dict.get(wanted_seq[0]))
.filter(primers.pmid > 0)
)
except DBAPIError:
return Response(db_err_msg, content_type="text/plain", status=500)
return {"count": query.count(), "results": query.all()}
else:
for items in wanted_seq:
condition_list.append(query_dict.get(items, None))
try:
query = (
request.db2_session.query(primers)
.filter(or_(*condition_list))
.filter(primers.pmid > 0)
)
except DBAPIError:
return Response(db_err_msg, content_type="text/plain", status=500)
return {"count": query.count(), "results": query.all()}
| foerstner-lab/CoxBase-Webapp | webapp/views/primer_query.py | primer_query.py | py | 2,325 | python | en | code | 0 | github-code | 50 |
1180120399 |
# https://stackoverflow.com/questions/474528/what-is-the-best-way-to-repeatedly-execute-a-function-every-x-seconds
import time, traceback
def every(delay, task, retry=False):
next_time = time.time() + delay
while True:
time.sleep(max(0, next_time - time.time()))
try:
task()
except Exception:
traceback.print_exc() # Prints to STDerr
if not retry:
return
# in production code you might want to have this instead of course:
# logger.exception("Problem while executing repetitive task.")
# skip tasks if we are behind schedule:
next_time += (time.time() - next_time) // delay * delay + delay
sample_rate = 200
begin = time.perf_counter()
begin_t = time.time()
i = 0
s = 0
def next_sample():
global begin, begin_t, i, s
i += 1
if i > sample_rate:
i = 0
s += 1
elapsed = time.perf_counter() - begin
elapsed_t = time.time() - begin_t
print("\r[Timer] Seconds: {:4}, Ticks: {:3}, Elapsed: {:4.2f}, Elapsed: {:4.2f}".format(s, i, elapsed, elapsed_t), end='')
if __name__ == "__main__":
every(1/sample_rate, next_sample) | christofteuscher/TrailNapAlarm | src/_unused/every.py | every.py | py | 1,133 | python | en | code | 3 | github-code | 50 |
29340460724 | import random
import sys
import pygame
from pygame.locals import *
# globel variables
FPS =30
SCREEN_WIDTH = 400
SCREEN_HEIGHT = 511
SCREEN = pygame.display.set_mode((SCREEN_WIDTH,SCREEN_HEIGHT))
GROUNDY = SCREEN_HEIGHT*0.8
GAME_SPRITIES = {}
GAME_SOUNDS = {}
PLAYER = 'gallery/sprites/bird.png'
BACKGROUND = 'gallery/sprites/back.png'
PIPE = 'gallery/sprites/pipe.png'
def WelcomeScreen():
playerx = int(SCREEN_WIDTH/5)
playery = int((SCREEN_HEIGHT - GAME_SPRITIES['player'].get_height())/2)
messagex = int((SCREEN_WIDTH - GAME_SPRITIES['message'].get_width())/2)
messagey = int(SCREEN_HEIGHT * 0.13)
basex = 0
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
elif event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP or event.key == K_RETURN):
return
else:
SCREEN.blit(GAME_SPRITIES['background'],(0,0))
SCREEN.blit(GAME_SPRITIES['player'],(playerx,playery))
SCREEN.blit(GAME_SPRITIES['message'],(messagex,messagey))
SCREEN.blit(GAME_SPRITIES['base'],(basex,GROUNDY))
pygame.display.update()
FPSCLOCK.tick(FPS)
def mainGame():
score = 0
playerx = int(SCREEN_WIDTH/5)
playery = int(SCREEN_WIDTH/2)
basex = 0
newpipe = getRandomPipe()
newpipe2 = getRandomPipe()
upperPipe = [
{'x': SCREEN_WIDTH+200,'y':newpipe[0]['y']},
{'x': SCREEN_WIDTH+200+(SCREEN_WIDTH/2),'y':newpipe2[0]['y']},
]
lowerPipe = [
{'x': SCREEN_WIDTH+200,'y':newpipe[1]['y']},
{'x': SCREEN_WIDTH+200+(SCREEN_WIDTH/2),'y':newpipe2[1]['y']},
]
pipeVelX = -4
playerVelY = -9
playerMaxVelY = 10
playerMinVelY = -8
playerAccY = 1
playerFlapAccv = -8
playerFlapped = False
# main gameloop
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == KEYDOWN and (event.key == K_UP or event.key == K_RETURN or event.key == K_SPACE):
if playery > 0:
playerVelY = playerFlapAccv
playerFlapped = True
GAME_SOUNDS['wing'].play()
crashTest = isColide(playerx,playery,upperPipe,lowerPipe)
if crashTest:
return
# score updation
playerMidPos = playerx + GAME_SPRITIES['player'].get_width()/2
for pipe in upperPipe:
pipeMidPos = pipe['x'] + GAME_SPRITIES['pipe'][0].get_width()/2
if pipeMidPos<= playerMidPos < pipeMidPos +4:
score +=1
GAME_SOUNDS['point'].play()
if playerVelY <playerMaxVelY and not playerFlapped:
playerVelY += playerAccY
if playerFlapped:
playerFlapped = False
playerHeight = GAME_SPRITIES['player'].get_height()
playery = playery + min(playerVelY, GROUNDY - playery - playerHeight)
# move pipes to the left
for upperPipes , lowerPipes in zip(upperPipe, lowerPipe):
upperPipes['x'] += pipeVelX
lowerPipes['x'] += pipeVelX
# Add a new pipe when the first is about to cross the leftmost part of the screen
if 0<upperPipe[0]['x']<5:
newpipe = getRandomPipe()
upperPipe.append(newpipe[0])
lowerPipe.append(newpipe[1])
# removing pipe
if upperPipe[0]['x'] < -GAME_SPRITIES['pipe'][0].get_width():
upperPipe.pop(0)
lowerPipe.pop(0)
# Lets blit our sprites now
SCREEN.blit(GAME_SPRITIES['background'], (0, 0))
for upperPipes, lowerPipes in zip(upperPipe, lowerPipe):
SCREEN.blit(GAME_SPRITIES['pipe'][0], (upperPipes['x'], upperPipes['y']))
SCREEN.blit(GAME_SPRITIES['pipe'][1], (lowerPipes['x'], lowerPipes['y']))
SCREEN.blit(GAME_SPRITIES['base'], (basex, GROUNDY))
SCREEN.blit(GAME_SPRITIES['player'], (playerx, playery))
myDigits = [int(x) for x in list(str(score))]
width = 0
for digit in myDigits:
width += GAME_SPRITIES['numbers'][digit].get_width()
Xoffset = (SCREEN_WIDTH - width)/2
for digit in myDigits:
SCREEN.blit(GAME_SPRITIES['numbers'][digit], (Xoffset, SCREEN_HEIGHT*0.12))
Xoffset += GAME_SPRITIES['numbers'][digit].get_width()
pygame.display.update()
FPSCLOCK.tick(FPS)
def isColide(playerx,playery,upperPipe,lowerPipe):
if playery> GROUNDY - 25 or playery<0:
GAME_SOUNDS['hit'].play()
return True
for pipe in upperPipe:
pipeHeight = GAME_SPRITIES['pipe'][0].get_height()
if(playery < pipeHeight + pipe['y'] and abs(playerx - pipe['x']) < GAME_SPRITIES['pipe'][0].get_width()):
GAME_SOUNDS['hit'].play()
return True
for pipe in lowerPipe:
if (playery + GAME_SPRITIES['player'].get_height() > pipe['y']) and abs(playerx - pipe['x']) < GAME_SPRITIES['pipe'][0].get_width():
GAME_SOUNDS['hit'].play()
return True
return False
def getRandomPipe():
pipeHeight = GAME_SPRITIES['pipe'][0].get_height()
offset = SCREEN_HEIGHT/3
y2 = offset + random.randrange(0, int(SCREEN_HEIGHT - GAME_SPRITIES['base'].get_height() - 1.2 *offset))
pipeX = SCREEN_WIDTH + 10
y1 = pipeHeight - y2 + offset
pipe = [
{'x': pipeX, 'y': -y1}, #upper Pipe
{'x': pipeX, 'y': y2} #lower Pipe
]
return pipe
if __name__ == "__main__":
pygame.init()
FPSCLOCK = pygame.time.Clock()
pygame.display.set_caption("Flappy Bird++")
GAME_SPRITIES['numbers'] = (
pygame.image.load('gallery/sprites/0.png').convert_alpha(),
pygame.image.load('gallery/sprites/1.png').convert_alpha(),
pygame.image.load('gallery/sprites/2.png').convert_alpha(),
pygame.image.load('gallery/sprites/3.png').convert_alpha(),
pygame.image.load('gallery/sprites/4.png').convert_alpha(),
pygame.image.load('gallery/sprites/5.png').convert_alpha(),
pygame.image.load('gallery/sprites/6.png').convert_alpha(),
pygame.image.load('gallery/sprites/7.png').convert_alpha(),
pygame.image.load('gallery/sprites/8.png').convert_alpha(),
pygame.image.load('gallery/sprites/9.png').convert_alpha(),
)
GAME_SPRITIES['message'] = pygame.image.load('gallery/sprites/message.png').convert_alpha()
GAME_SPRITIES['base'] = pygame.image.load('gallery/sprites/base.png').convert_alpha()
GAME_SPRITIES['pipe'] =(
pygame.transform.rotate(pygame.image.load(PIPE).convert_alpha(), 180),
pygame.image.load(PIPE).convert_alpha()
)
GAME_SOUNDS['die'] = pygame.mixer.Sound('gallery/audio/die.wav')
GAME_SOUNDS['hit'] = pygame.mixer.Sound('gallery/audio/hit.wav')
GAME_SOUNDS['point'] = pygame.mixer.Sound('gallery/audio/point.wav')
GAME_SOUNDS['swoosh'] = pygame.mixer.Sound('gallery/audio/swoosh.wav')
GAME_SOUNDS['wing'] = pygame.mixer.Sound('gallery/audio/wing.wav')
GAME_SPRITIES['background'] = pygame.image.load(BACKGROUND).convert()
GAME_SPRITIES['player'] = pygame.image.load(PLAYER).convert_alpha()
while True:
WelcomeScreen()
mainGame()
| SaqibShoaib/Snake-Game | flappy_bird++.py | flappy_bird++.py | py | 7,757 | python | en | code | 0 | github-code | 50 |
41432739744 | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 4 21:41:07 2022
@author: Zaha
"""
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import turtle
import random
class L_system: # Making lsystem class and defining some functions
def __init__(self):
"""create an empty list for rules
and an empty base axiom
"""
self.base = ''
self.rules = []
def Set_Base(self, new_base): ## Setting a base axiom for L-System
"""
Args:
new_base (str)
"""
self.base = new_base
def Add_Rule(self, new_rule): ## Adding a new rule to Rules of L-System
"""
Args:
get a rule as a list. first item of list
is left side of replacement rule and second item of
list is right side of rule
"""
self.rules.append(new_rule)
def Generate(self, initial_string): ## Creating next generation of string
"""
Returns:
str: new string after replacements of rules
"""
new_string= ''
for character in initial_string:
found_rule=False
for rule in self.rules:
if rule[0]==character:
new_string = new_string + rule[1]
found=True
break
if found_rule==False:
new_string = new_string + character
return new_string
def Build_String(self, n_iteration): ## Generating string after given iterations
"""
Returns:
str: generated string of L-system after specified iterations
"""
n_string = self.base
for i in range(n_iteration):
n_string = self.Generate(n_string)
return n_string
"-----------------------------------------------------------------------"
class Turtle: # Making Turtle class and defining some functions
def __init__(self, dx = 800, dy = 800):
"""
Args:
dx: width of screen.
dy: height of screen
"""
turtle.setup(width = dx, height = dy )
def Draw_String(self, d_string, distance, angle): ## Selecting the movement of turtle.
"""
Args:
d_string (str): given str which we want to draw
distance : distance for forward
angle : angle of rotations (degree)
"""
stack = []
color_stack = []
for character in d_string:
if character =='F':
turtle.forward(distance)
elif character == '-':
turtle.right(angle)
elif character=='+':
turtle.left(angle)
elif character=='[':
stack.append([turtle.position(), turtle.heading()])
elif character==']':
turtle.penup()
pop_item = stack.pop()
turtle.setheading(pop_item[1])
turtle.goto(pop_item[0])
turtle.pendown()
elif character == '<':
color_stack.append(turtle.color()[0])
elif character == '>':
col = color_stack.pop()
turtle.color(col)
elif character =='&':
turtle.pitch(angle)
elif character =='^':
turtle.pitch(-angle)
elif character=='\\':
turtle.roll(angle)
elif character=='/':
turtle.roll(-angle)
elif character =='g':
turtle.color((0, 0.5, 0), (0, 0.5, 0))
elif character=='y':
turtle.color((0.5,0.25,0),(0.5,0.25,0))
elif character=='r':
turtle.color('red')
turtle.update()
def Hold(self): ## This func Holds the screen open until the user clicks or types 'q'
turtle.listen()
turtle.ht()
turtle.update()
turtle.onkey( turtle.bye, 'q' )
turtle.onscreenclick( lambda x,y: turtle.bye() )
turtle.mainloop()
exit()
def Go_to(self, xpos, ypos): ## Going to a given point without drawing
turtle.penup()
turtle.goto(xpos,ypos)
turtle.pendown()
def color(self, c): ## Setting color of turtle
turtle.color(c)
def Place(self, xpos, ypos, angle=None): ## Going to the given point with optional rotation without drawing
turtle.penup()
turtle.goto(xpos,ypos)
if angle!=None:
turtle.setheading(angle)
turtle.pendown()
"---------------------------------"
def main():
"""
Test the ability of the Turtle to draw trees.
The program specifies an L-system with base axiom and replacement rules and then
draws 5 trees in the screen, using 3-5 iterations of the rule to generate the string.
"""
tree1 = L_system()
tree2= L_system()
tree3= L_system()
tree4= L_system()
tree5 = L_system()
tree1.Set_Base('X')
tree1.Add_Rule(['X', 'Z+[X+P]--[--<yL>]I[++<yL>]-[XP]++XP'])
tree1.Add_Rule(['Z', 'FI[+<yL>][-<yL>]FI'])
tree1.Add_Rule(['I', 'IFI'])
tree1.Add_Rule(['P', '<r[++F][+F][F][-F][--F]>'])
"----------------------------------------------------------------------------"
tree2.Set_Base('F')
tree2.Add_Rule(['F', ' <yF<g[+F]><yF[-F]>'])
"---------------------------------------------------------------------------"
tree3.Set_Base('X')
tree3.Add_Rule(['X', 'F<g[+X][-X]>FX'])
tree3.Add_Rule(['F', '<y>F'])
"---------------------------------------------------------------------------"
tree4.Set_Base('X')
tree4.Add_Rule(['X', 'F-<g[[X]+X]+F[+FX]-X>'])
tree4.Add_Rule(['F', 'FF'])
"---------------------------------------------------------------------------"
tree5.Set_Base('F')
tree5.Add_Rule(['F', ' FF+[<y>+F-F-F]-[<g-F+F+F]>'])
"----------------------------------------------------------------------------"
terp = Turtle(800, 850)
tree=[tree1,tree2,tree3,tree4,tree5]
#' FF-[<y>-F+F+F]+[<g+F-F-F]>']
for i in range(5):
#x = 800
#y = 450
# create a turtle for drawing
# number of trees
z=[10,40,80,150,220]
x0 = -800/3 + i*0.75*800/(6) + z[i]
y0 = -300
tstring = tree[i].Build_String( random.randint( 3, 4 ) ) #4
terp.color( (0.5, 0.4, 0.3 ) )
terp.Place( x0, y0, random.randint( 89, 95 ) )
terp.Draw_String(tstring, random.randint( 5, 7 ), random.randint( 18, 30 ) * random.choice( [1, -1] ) )
terp.hold()
if __name__ == "__main__":
main()
| zaha2020/Bio_Inspired_Computing | L-System_Grammer/Codes/L_SYSYEM.py | L_SYSYEM.py | py | 7,049 | python | en | code | 1 | github-code | 50 |
24135211114 | import pyb
import machine
import struct
import utime
# Import the necessary modules
print(" Importing gateway/relay node function library...")
import uac_network.main.gw_functions as gwf
from sensor_payload.main.sensor_payload import SensorPayload
from uac_modem.main.unm3driver import MessagePacket, Nm3
print(" Imports within sensor_node.py completed")
### Define the network protocol class
class NetProtocol:
# Constructor method including the initialization code
def __init__(self):
# Initialize the TDA-MAC parameters
self.nm = None # reference to the modem object
self.sensor = None # sensor payload interface
self.debug_flag = False # set to True for more console output
self.thisNode = -1 # node address string to be queried from the nanomomodem
self.masterNode = -1 # address of the master node for this node
self.txDelay = 0 # transmit delay for this node [ms]
self.timeTillNextFrame = 0 # time till next frame [ms]
self.ttnfTimestamp = 0 # time at which the time till next frame was saved
self.subframeLength = 0 # subframe length for a relay node [ms]
self.childNodes = list() # list of child nodes for dual-hop TDA-MAC
self.isRelay = False # flag indicating whether this node is a relay
self.frameStartTime = 0 # start time of the current frame
self.guardInt = 200 # guard interval for timed Tx/Rx [ms]
self.location = (0.0, 0.0) # tuple to store the Lat-Long location
self.wdt = None # WDT passed from mainloop to be fed during longer loops.
print('Initializing the TDA-MAC parameters...')
# Method to initialize the interfaces with the sensor payload and modem
def init_interfaces(self, modem, sensor_payload, wdt=None):
# Save the references to these objects
self.nm = modem
self.sensor = sensor_payload
self.wdt = wdt
# Feed the watchdog
if self.wdt:
self.wdt.feed()
# Query this node's address and display it
retry_count = 0
addr = -1
while addr == -1 and retry_count < 3:
addr = modem.get_address()
retry_count = retry_count + 1
utime.sleep_ms(100)
print(" This node's address: " + '%03d' % addr)
self.thisNode = addr # save the node's address
# Feed the watchdog
if self.wdt:
self.wdt.feed()
# Query this node's battery voltage and display it
retry_count = 0
voltage = -1
while voltage == -1 and retry_count < 3:
voltage = modem.get_battery_voltage()
retry_count = retry_count + 1
utime.sleep_ms(100)
print(" Voltage supplied to the modem: " + '%0.2f' % voltage + "V")
print(" ")
### Setter method for the estimated location
def set_location(self, location):
self.location = location
### General packet handling function that performs a particular action given the received packet
# Returns (can_go_to_sleep, time_till_next_req, pkt_ignored)
def handle_packet(self, packet):
# Initialize the flag indicating whether the TDA-MAC frame is over after handling this packet
canGoToSleep = False
pktIgnored = False
# Parse the received packet
payload = bytes(packet.packet_payload)
srcId = packet.source_address
pktType = packet.packet_type
# If in debug mode, display a message in terminal
if self.debug_flag:
print(' Rx from N' + str(packet.source_address) + ': ' + str(payload))
# Feed the watchdog
if self.wdt:
self.wdt.feed()
# Network discovery test message
if (pktType == 'U') and (len(payload) > 7) and (payload[0:6] == b'UNNDTX'):
self.dealWithTestTx(payload)
# Network discovery request packet
elif (pktType == 'U') and (len(payload) > 4) and (payload[0:4] == b'UNN?'):
self.dealWithNetDiscReq(payload)
# TDI packet unicast to this node
elif (pktType == 'U') and (len(payload) > 4) and (payload[0:4] == b'UNI!'):
self.dealWithTDIPacket(payload)
# Broadcast REQ packet from the master node
elif (pktType == 'B') and (srcId == self.masterNode) and (len(payload) > 5) and (payload[0:3] == b'UNR'):
canGoToSleep = self.dealWithBroadcastREQ(srcId, payload, None)
# Unicast REQ packet
elif (pktType == 'U') and (len(payload) > 4) and (payload[0:3] == b'UNR'):
canGoToSleep = self.dealWithUnicastREQ(payload)
# General "Are you alive" packet
elif (pktType == 'U') and (len(payload) == 11) and (payload[0:8] == b'UNalive?'):
self.respondToDiagnosticPacket(payload)
else:
pktIgnored = True
# Return the sleep flag and the time to next frame
ttnf = self.timeTillNextFrame - utime.ticks_diff(utime.ticks_ms(), self.ttnfTimestamp)
return (canGoToSleep, ttnf, pktIgnored)
#####################################################################
### Function to deal with the network discovery test transmission ###
#####################################################################
def dealWithTestTx(self, payload):
# Parse the source address
srcNode = struct.unpack('B', payload[6:7])[0]
print('Test message received from Node ' + str(srcNode))
# Wait a short guard interval and send a response
pyb.delay(self.guardInt)
response = b'UNNDRX' + struct.pack('B', int(self.thisNode)) + b'USMART_Test_Transmission_Response'
print(" Sending test message response...")
self.nm.send_unicast_message(srcNode, response)
##################################################################
### Function to deal with the network discovery request packet ###
##################################################################
def dealWithNetDiscReq(self, payload):
# Parse the source address
srcNode = struct.unpack('B', payload[4:5])[0]
# Parse the list of nodes that need to be discovered
nodeList = list()
for n in range(len(payload)-5):
# Parse the address
addr = struct.unpack('B', payload[5+n:6+n])[0]
# Append the address to the list
nodeList.append(addr)
# Print message
print('Node discovery request received from Node ' + str(srcNode))
# Perform network discovery
(propDelays, linkQuality) = gwf.doNetDiscovery(self.nm, self.thisNode, nodeList, self.wdt)
# Send the node discovery results packet to the requesting node
self.sendNodeDiscPacket(srcNode, propDelays, linkQuality)
##########################################
### Function to deal with a TDI packet ###
##########################################
def dealWithTDIPacket(self, payload):
# Parse the source address
srcNode = struct.unpack('B', payload[4:5])[0]
# Parse the destination address
destNode = struct.unpack('B', payload[5:6])[0]
# Parse the Tx delay and subframe interval
txd = struct.unpack('I', payload[6:10])[0]
sfLength = struct.unpack('I', payload[10:14])[0]
# Print message
print('TDI received from Node ' + str(srcNode) + " for Node " + str(destNode) + ": " + str(txd) + ' ms')
# If this TDI is addressed to me, update the Tx delay and the master node address
tdiDelivered = False
if destNode == self.thisNode:
self.txDelay = txd # [ms]
self.masterNode = srcNode
self.childNodes = list() # reset the child node list (new schedule and topology)
self.isRelay = False
tdiDelivered = True
# Otherwise this TDI needs to be forwarded to the destination node
else:
# First note the subframe length, because I will be the master node for this node
self.subframeLength = sfLength # [ms]
self.isRelay = True
# Try sending a TDI and receiving an ACK
pyb.delay(100) # first, sleep long enough to transmit the Auto-ACK
tdiDelivered = gwf.sendTDIPackets(self.nm, self.thisNode, [destNode], [txd], 0, [True], self.wdt)[0]
# Save the destination node as the child node
if not (destNode in self.childNodes):
self.childNodes.append(destNode)
# Transmit TDI ACK/NACK packet
if tdiDelivered:
txMessage = b'UNIA' + struct.pack('B', self.thisNode)
print('Sending TDI ACK...')
else:
txMessage = b'UNIN' + struct.pack('B', self.thisNode)
print('Sending TDI NACK...')
self.nm.send_unicast_message(srcNode, txMessage)
####################################################
### Function to deal with a Broadcast REQ packet ###
####################################################
def dealWithBroadcastREQ(self, srcId, payload, dataPacket):
# Start reading the sensor if the data packet is not given (new transmission)
# mainloop is responsible for initiating sensor data acquisition and processing now.
# if not dataPacket:
# self.sensor.start_acquisition()
# Note the time of receiving this packet
reqTime = utime.ticks_ms()
# If this is the first broadcast REQ of the frame, note the frame start time
reqIndex = struct.unpack('B', payload[3:4])[0]
# Update the start of frame time
self.timeTillNextFrame = struct.unpack('I', payload[4:8])[0]
self.ttnfTimestamp = reqTime # note the time stamp for this TTNF
# Check if I need to go back to always-on state after this
sleepFlag = struct.unpack('B', payload[8:9])[0]
# Decode the addresses of nodes expected to respond
destAddr = list()
for n in range(10, len(payload)):
addr = struct.unpack('B', payload[n:n+1])[0]
destAddr.append(int(addr))
# Respond only if I am in the list
if self.thisNode in destAddr:
# Print message for debugging
print("REQ received from Node " + str(srcId))
print(" Time till next frame: " + str(self.timeTillNextFrame) + " msec")
# If this is a request for location, put it into the payload
if payload[9:10] == b'L':
# Create the data payload
dataPayload = b'L' + struct.pack('f', self.location[0]) + b'L' + struct.pack('f', self.location[1])
packetPayload = b'UND' + struct.pack('B', self.thisNode) + dataPayload
# Otherwise, this is a request for sensor readings
else:
# Read the sensor and create the data payload
try:
if not dataPacket:
# mainloop is responsible for initiating sensor data acquisition and processing now.
# self.sensor.process_acquisition()
dataPayload = self.sensor.get_latest_data_as_bytes()
packetPayload = b'UND' + struct.pack('B', self.thisNode) + dataPayload
else:
packetPayload = dataPacket
except Exception as e:
# If an Exception was caught, print the error
print("Error reading the sensor: " + str(e))
packetPayload = b'UND' + struct.pack('B', self.thisNode) + b'sensor_error'
# Sleep for the remaining part of the transmit delay
timeElapsed = utime.ticks_diff(utime.ticks_ms(), reqTime)
if (self.txDelay > timeElapsed):
pyb.delay(self.txDelay - timeElapsed)
# Transmit the payload packet to the master node
self.nm.send_unicast_message(srcId, packetPayload)
# Print this transmission
if payload[9:10] == b'L':
print("Location data sent: Lat=" + '%.5f' % self.location[0] + ", Long=" + '%.5f' % self.location[1])
else:
print("Sensor readings sent")
# If I have any child nodes, do not go to sleep after this REQ
if self.isRelay:
sleepFlag = 0
# Wait for a retransmission request, if one arrives (10 sec timeout)
reTxTimeout = 10000
timerStart = utime.ticks_ms()
while utime.ticks_diff(utime.ticks_ms(), utime.ticks_add(timerStart, reTxTimeout)) < 0:
# Feed the watchdog
if self.wdt:
self.wdt.feed()
# Check if a packet has been received
self.nm.poll_receiver()
self.nm.process_incoming_buffer()
if self.nm.has_received_packet():
# Read the incoming packet and see if it is a REQ
packet = self.nm.get_received_packet()
payload = bytes(packet.packet_payload)
srcId = packet.source_address
pktType = packet.packet_type
# If it is a REQ, process it by calling this function again
if (pktType == 'B') and (len(payload) > 5) and (payload[0:3] == b'UNR'):
# But if this is a broadcast REQ not from my master node, ignore it
if (srcId == self.masterNode):
canGoToSleep = self.dealWithBroadcastREQ(srcId, payload, packetPayload)
sleepFlag = 1 if (canGoToSleep) else 0 # Convert the sleep flag (due to recursion here!)
break # finish waiting, the protocol has moved on
# If it is a unicast REQ, data transmission was successful, move on to relaying
elif (pktType == 'U') and (len(payload) > 4) and (payload[0:3] == b'UNR'):
canGoToSleep = self.dealWithUnicastREQ(payload)
sleepFlag = 1 if (canGoToSleep) else 0 # Convert the sleep flag (due to recursion here!)
break # finish waiting, the protocol has moved on
# Otherwise, pass it up to the main packet handling function
else:
(canGoToSleep, _, pktIgnored) = self.handle_packet(packet)[0]
if not pktIgnored:
sleepFlag = 1 if (canGoToSleep) else 0 # Convert the sleep flag (due to recursion here!)
break
# Return the flag indicating if I can go to sleep or should stay awake
return (sleepFlag == 1)
##################################################
### Function to deal with a Unicast REQ packet ###
##################################################
def dealWithUnicastREQ(self, payload):
# Note the time of receiving this packet
reqTime = utime.ticks_ms()
# Parse the source address
srcNode = struct.unpack('B', payload[3:4])[0]
# If this is the first REQ or a repeated REQ (retransmissions)
reqIndex = struct.unpack('B', payload[4:5])[0]
# Check if I need to go back to always-on state after this
sleepFlag = struct.unpack('B', payload[5:6])[0]
# Decode the addresses of nodes expected to respond
destAddr = list()
for n in range(7, len(payload)):
addr = struct.unpack('B', payload[n:n+1])[0]
destAddr.append(int(addr))
# Print message for debugging
print("Unicast REQ received from Node " + str(srcNode))
# If this is a blank REQ (e.g. go-to-sleep instruction)
if not destAddr:
# Transmit a blank broadcast REQ to my child nodes three times
numREQCopies = 3
interval = 2000 # 2 second intervals between REQ copies
for n in range(numREQCopies):
# Feed the watchdog
if self.wdt:
self.wdt.feed()
# Transmit blank broadcast REQ
ttnf = max(0, self.timeTillNextFrame - utime.ticks_diff(utime.ticks_ms(), self.ttnfTimestamp))
print("Sending Blank Broadcast REQ...")
gwf.sendBroadcastREQ(self.nm, "S", n+1, ttnf, (sleepFlag == 1), [])
# Wait for a set interval before transmitting it again
pyb.delay(interval)
# If this is not a blank REQ gather the data from my child nodes
else:
# Try gathering the data from all child nodes
packetBuffer = list()
nodesToRespond = destAddr.copy()
numRetries = 3
for n in range(1, numRetries+1):
# Feed the watchdog
if self.wdt:
self.wdt.feed()
# Transmit a broadcast REQ packet
ttnf = max(0, self.timeTillNextFrame - utime.ticks_diff(utime.ticks_ms(), self.ttnfTimestamp))
print("Sending Broadcast REQ...")
gwf.sendBroadcastREQ(self.nm, payload[6:7].decode(), n, ttnf, (sleepFlag==1), nodesToRespond)
# Go into a loop listening for payload packets from child nodes
sfStartTime = utime.ticks_ms()
timeout = self.subframeLength + 2*self.guardInt
while utime.ticks_diff(utime.ticks_ms(), utime.ticks_add(sfStartTime, timeout)) < 0:
# Feed the watchdog
if self.wdt:
self.wdt.feed()
# Check if a packet has been received
self.nm.poll_receiver()
self.nm.process_incoming_buffer()
if self.nm.has_received_packet():
# Decode the packet
packet = self.nm.get_received_packet()
payload = bytes(packet.packet_payload)
# If the source address is one of the child nodes
srcAddr = struct.unpack('B', payload[3:4])[0]
if (payload[0:3] == b'UND') and (srcAddr in self.childNodes):
# Store the packet in the forwarding buffer and take out the child node out of the list
print(" Data packet received from N" + "%03d" % srcAddr)
packetBuffer.append(packet)
nodesToRespond.remove(srcAddr)
# Add a delay before checking the serial port again
pyb.delay(25)
# If there are no more child nodes to gather data from, do not transmit a REQ again
if not nodesToRespond:
break
# Transmit a START DATA TRANSFER handshake packet to the gateway and wait for an ACK
numTries = 5
timeout = 5.0
gwReady = False
for n in range(numTries):
# Feed the watchdog
if self.wdt:
self.wdt.feed()
# Transmit a short handshake packet "UNSDT", if ACK is received, proceed with data transfer
print("Contacting GW to initiate data transfer...")
delay = self.nm.send_unicast_message_with_ack(srcNode, b'UNSDT', timeout)
if delay > 0:
print(" GW is ready to receive")
gwReady = True
break
# Forward all payload packets in the buffer to the node that requested it
# Wait for a Repeated REQ in case retransmissions are required
frameIsOver = False
while gwReady and (not frameIsOver):
# Forward the packets
for fwPacket in packetBuffer:
# Feed the watchdog
if self.wdt:
self.wdt.feed()
# Send the packet
payload = bytes(fwPacket.packet_payload)
srcAddr = struct.unpack('B', payload[3:4])[0]
# Transmit the payload packet if this node was specified in the REQ
if (srcAddr in destAddr):
print("Forwarding data packet from Node " + str(srcAddr) + " to Node " + str(srcNode) + "...")
self.nm.send_unicast_message(srcNode, payload)
pyb.delay(gwf.dataPktDur + self.guardInt) # delay while we are transmitting the packet
# If there are any child nodes who did not respond with a data packet
for unrNode in nodesToRespond:
if unrNode in destAddr:
# Feed the watchdog
if self.wdt:
self.wdt.feed()
# Send blank packets to the Gateway
payload = b'UND' + struct.pack('B', unrNode) + b'no_packet'
print("Sending blank data packet from Node " + str(unrNode) + " to Node " + str(srcNode) + "...")
self.nm.send_unicast_message(srcNode, payload)
pyb.delay(gwf.dataPktDur + self.guardInt) # delay while we are transmitting the packet
# Wait for a repeated REQ asking for retransmissions (10 sec timeout)
txEndTime = utime.ticks_ms()
timeout = 10000
anotherREQReceived = False
while utime.ticks_diff(utime.ticks_ms(), utime.ticks_add(txEndTime, timeout)) < 0:
# Feed the watchdog
if self.wdt:
self.wdt.feed()
# Check if a packet has been received
self.nm.poll_receiver()
self.nm.process_incoming_buffer()
if self.nm.has_received_packet():
# Read the incoming packet and see if it is a REQ
packet = self.nm.get_received_packet()
payload = bytes(packet.packet_payload)
srcId = packet.source_address
pktType = packet.packet_type
# If it is a REQ, resend some of the packets again
if (pktType == 'U') and (len(payload) > 4) and (payload[0:3] == b'UNR'):
# Check if I need to go back to always-on state after this
anotherREQReceived = True
sleepFlag = struct.unpack('B', payload[5:6])[0]
# Decode the addresses of nodes expected to respond
destAddr = list()
for n in range(7, len(payload)):
addr = struct.unpack('B', payload[n:n+1])[0]
destAddr.append(int(addr))
# Transmit the missing packets again (by staying in this loop)
# Otherwise, pass it up to the main packet handling function
else:
(canGoToSleep, _, pktIgnored) = self.handle_packet(packet)[0]
if not pktIgnored:
sleepFlag = 1 if (canGoToSleep) else 0 # Convert the sleep flag (due to recursion here!)
# Check if the frame is over
frameIsOver = not anotherREQReceived
# Return the sleep flag
return (sleepFlag == 1)
##################################################################
### Function to respond to the general diagnostic packet ###
##################################################################
def respondToDiagnosticPacket(self, payload):
# Read the source address formatted as three characters
srcAddr = int(bytes(payload[8:11]).decode())
# Print message for debugging
print("Diagnostic packet received from Node " + str(srcAddr))
# Convert own address to a three character string
addrString = "%03d" % self.thisNode
# Transmit a response
txBytes = b'Yes!' + addrString.encode()
self.nm.send_unicast_message(srcAddr, txBytes)
##############################################################################################
### Function to deliver the network discovery results back to the node that requested them ###
##############################################################################################
def sendNodeDiscPacket(self, reqNode, propDelays, linkQuality):
# By default assume it is unsuccessful
success = False
# Create the payload packet containing all measured propagation delays and link qualities
packet = b'UNNR'
for n in range(len(propDelays)):
packet += struct.pack('I', propDelays[n]) + struct.pack('B', linkQuality[n])
# Try sending the packet multiple times if needed
maxTries = 5 # maximum attempts at sending it
timeout = 5 # 5 second ACK timeout
for k in range(maxTries):
# Feed the watchdog
if self.wdt:
self.wdt.feed()
# Send the packet
print("Sending node discovery results to Node " + str(reqNode))
# Transmit the packet requiring an ACK
delay = self.nm.send_unicast_message_with_ack(reqNode, packet, timeout)
# If the ACK was received
if delay > 0:
# Print message
print(" ACK received")
# Success, we are done here
success = True
break
# Otherwise, try sending the packet again
else:
print(" No ACK")
# Return the success flag
return success
| bensherlock/micropython-usmart-network | main/sensor_node.py | sensor_node.py | py | 27,292 | python | en | code | 0 | github-code | 50 |
39675481106 | import re
def match_any_expression(value='', expressions=[]):
"""verifies if the input value matches any of the expressions using re
Args:
value (str, optional):
[string to be tested against the expressions]. Defaults to ''.
expressions (list, optional):
[list of patterns (as strings) to test the value]. Defaults to [].
"""
for expression in expressions:
if re.match(expression, value):
return True
return False
def merge_distinct_tags(tag_list_a=[], tag_list_b=[]):
"""Returns a merge from both inputs, but choosing tag_list_b's tag values
in case of equal keys
Args:
tag_list_a (list, optional): [List of tags]. Defaults to [].
tag_list_b (list, optional): [List of tags]. Defaults to [].
"""
tag_list_b_keys = map(lambda tag: tag['Key'], tag_list_b)
merge = list(tag_list_b)
for tag in tag_list_a:
if tag['Key'] not in tag_list_b_keys:
merge.append(tag)
return merge
| gelouko/useful-scripts | aws/find_and_tag/utils.py | utils.py | py | 1,029 | python | en | code | 1 | github-code | 50 |
46770272718 | def mergeOverlappingIntervals(intervals):
retval=[]
while intervals:
range1 = intervals.pop(0)
minval,maxval =range1
rangemodified=False
merged_ranges=[]
for idx in range(len(intervals)):
range2=intervals[idx]
if (
(range1[0]>=range2[0] and range1[0]<=range2[1]) or
(range1[1]<=range2[0] and range1[1]>=range2[1]) or
(range2[0]>=range1[0] and range2[0]<=range1[1]) or
(range2[1]<=range1[0] and range2[1]>=range1[1])
):
merged_ranges.append(idx)
range1=[
min( range1[0],range2[0] ) ,
max( range1[1],range2[1] )
]
rangemodified=True
if merged_ranges:
for idx in reversed(merged_ranges):
intervals.pop(idx)
intervals.append(range1)
else:
retval.append(range1)
return retval
from AlgoHelper import testing,tree,debug
script_name = "mergeIntervals"
tests = testing.load_tests(script_name)
debug.script_header("MERGE INTERVALS")
idx=0
for case in tests:
idx+=1
debug.test_header("Test %s" % idx)
intervals = case["intervals"][:]
vars={"Intervals":case["intervals"],"Return":mergeOverlappingIntervals(intervals)}
debug.print_variables(vars,row_size=1) | younelan/Code-Fun | algo/mergeIntervals.py | mergeIntervals.py | py | 1,169 | python | en | code | 0 | github-code | 50 |
32186397331 | import os
import sys
import arcpy
import pandas as pd
from arcgis.features import FeatureLayer, GeoAccessor
from arcgis.gis import GIS
from dotenv import load_dotenv
arcpy.env.overwriteOutput = True
def auto_download_data(data_url, outGDB, outname, from_date, to_date):
""" Requires you to be logged into arcgis pro on this computer with an account that has access to the data you are trying to
download. You must be logged into arcGIS pro with your statscan AGOL account in order for this to work. This funtion will
go into the NGD group via the arcGIS portal and download a specific feature layer as determined by the variable. Date format
for the from and too dates are YYYY-MM-DD. Do not include a timestamp as that will cause errors"""
#Verify credentials
gis = GIS('pro')
print('Logged in as: ' + str(gis.properties.user.username))
if os.path.split(data_url)[1] != '0':
data_url = os.path.join(data_url, '0')
query = "EditDate BETWEEN TIMESTAMP '{}' AND TIMESTAMP '{}'".format(from_date, to_date)
arcpy.FeatureClassToFeatureClass_conversion(data_url, outGDB, outname, where_clause= query)
return os.path.join(outGDB, outname)
def rename_the_fields(NGD_data):
ngdal_col_map = {'WC2021NGD_AL_20200313_NGD_UID': 'NGD_UID',
'WC2021NGD_AL_20200313_SGMNT_TYP': 'SGMNT_TYP_CDE',
'WC2021NGD_AL_20200313_SGMNT_SRC': 'SGMNT_SRC',
'WC2021NGD_AL_20200313_STR_CLS_C': 'STR_CLS_CDE',
'WC2021NGD_AL_20200313_STR_RNK_C': 'STR_RNK_CDE',
'WC2021NGD_AL_20200313_AFL_VAL': 'AFL_VAL',
'WC2021NGD_AL_20200313_AFL_SFX': 'AFL_SFX',
'WC2021NGD_AL_20200313_AFL_SRC': 'AFL_SRC',
'WC2021NGD_AL_20200313_ATL_VAL': 'ATL_VAL',
'WC2021NGD_AL_20200313_ATL_SFX': 'ATL_SFX',
'WC2021NGD_AL_20200313_ATL_SRC': 'ATL_SRC',
'WC2021NGD_AL_20200313_AFR_VAL': 'AFR_VAL',
'WC2021NGD_AL_20200313_AFR_SFX': 'AFR_SFX',
'WC2021NGD_AL_20200313_AFR_SRC': 'AFR_SRC',
'WC2021NGD_AL_20200313_ATR_VAL': 'ATR_VAL',
'WC2021NGD_AL_20200313_ATR_SFX': 'ATR_SFX',
'WC2021NGD_AL_20200313_ATR_SRC': 'ATR_SRC',
'WC2021NGD_AL_20200313_ADDR_TYP_': 'ADDR_TYP_L',
'WC2021NGD_AL_20200313_ADDR_TYP1': 'ADDR_TYP_R',
'WC2021NGD_AL_20200313_ADDR_PRTY': 'ADDR_PRTY_L',
'WC2021NGD_AL_20200313_ADDR_PR_1': 'ADDR_PRTY_R',
'WC2021NGD_AL_20200313_NGD_STR_2': 'NGD_STR_UID_DTE_L',
'WC2021NGD_AL_20200313_NGD_STR_3' : 'NGD_STR_UID_DTE_R',
'WC2021NGD_AL_20200313_CSD_UID_L' : 'CSD_UID_L',
'WC2021NGD_AL_20200313_CSD_UID_R' : 'CSD_UID_R',
'WC2021NGD_AL_20200313_PLACE_ID_' : 'PLACE_ID_L',
'WC2021NGD_AL_20200313_PLACE_ID_1' : 'PLACE_ID_R',
'WC2021NGD_AL_20200313_PLACE_I_1' : 'PLACE_ID_L_PREV',
'WC2021NGD_AL_20200313_PLACE_I_2' : 'PLACE_ID_R_PREC',
'WC2021NGD_AL_20200313_NAME_SRC_' : 'NAME_SRC_L',
'WC2021NGD_AL_20200313_NAME_SRC1' : 'NAME_SRC_R',
'WC2021NGD_AL_20200313_FED_NUM_L' : 'FED_NUM_L',
'WC2021NGD_AL_20200313_FED_NUM_R' : 'FED_NUM_R',
'WC2021NGD_STREET_202003_STR_N_1' : 'STR_NME',
'WC2021NGD_STREET_202003_STR_TYP' : 'STR_TYP',
'WC2021NGD_STREET_202003_STR_DIR' : 'STR_DIR',
'WC2021NGD_STREET_202003_NAME_SR' : 'NAME_SRC',
'WC2021NGD_AL_20200313_BB_UID_L' : 'BB_UID_L',
'WC2021NGD_AL_20200313_BB_UID_R' : 'BB_UID_R',
'WC2021NGD_AL_20200313_BF_UID_L' : 'BF_UID_L',
'WC2021NGD_AL_20200313_BF_UID_R' : 'BF_UID_R',
'WC2021NGD_AL_20200313_NGD_STR_U' : 'NGD_STR_UID_L',
'WC2021NGD_AL_20200313_NGD_STR_1' : 'NGD_STR_UID_R',
'WC2021NGD_AL_20200313_PLACE_ID1' : 'PLACE_ID_R',
'WC2021NGD_AL_20200313_ALIAS1_ST' : 'ALIAS1_STR_UID_L',
'WC2021NGD_AL_20200313_ALIAS1__1' : 'ALIAS1_STR_UID_R',
'WC2021NGD_AL_20200313_ALIAS2_ST' : 'ALIAS2_STR_UID_L',
'WC2021NGD_AL_20200313_ALIAS2__1' : 'ALIAS2_STR_UID_R',
'WC2021NGD_AL_20200313_SRC_SGMNT' : 'SRC_SGMNT_ID',
'WC2021NGD_AL_20200313_SGMNT_DTE' : 'SGMNT_DTE',
'WC2021NGD_AL_20200313_ATTRBT_DT' : 'ATTRBT_DTE',
'WC2021NGD_AL_20200313_GEOM_ACC_' : 'GEOM_ACC_CDE',
'WC2021NGD_AL_20200313_AFL_DTE' : 'AFL_DTE',
'WC2021NGD_AL_20200313_ATL_DTE' : 'ATL_DTE',
'WC2021NGD_AL_20200313_AFR_DTE' : 'AFR_DTE',
'WC2021NGD_AL_20200313_ATR_DTE' : 'ATR_DTE',
'WC2021NGD_AL_20200313_EC_STR_ID' : 'EC_STR_ID_L',
'WC2021NGD_AL_20200313_EC_STR__1' : 'EC_STR_ID_R',
'WC2021NGD_AL_20200313_EC_STR__2' : 'EC_STR_ID_DTE_L',
'WC2021NGD_AL_20200313_EC_STR__3' : 'EC_STR_ID_DTE_R',
'WC2021NGD_AL_20200313_LAYER_SRC' : 'LAYER_SRC_CDE',
'WC2021NGD_AL_20200313_LAYER_S_1' : 'LAYER_SRC_ID',
'WC2021NGD_AL_20200313_REVIEW_FL' : 'REVIEW_FLG',
'WC2021NGD_AL_20200313_MUST_HLD_' : 'MUST_HLD_TYP',
'WC2021NGD_AL_20200313_TRAFFIC_D' : 'TRAFFIC_DIR_CDE',
'WC2021NGD_AL_20200313_UPDT_SGMN' : 'UPDT_SGMNT_FLG',
'WC2021NGD_AL_20200313_AOI_JOB_U' : 'AOI_JOB_UID',
'WC2021NGD_STREET_202003_NGD_STR' : 'NGD_STR_UID',
'WC2021NGD_STREET_202003_CSD_UID' : 'CSD_UID',
'WC2021NGD_STREET_202003_STR_NME' : 'STR_NME_PRFX',
'WC2021NGD_STREET_202003_STR_PAR' : 'STR_PARSD_NME',
'WC2021NGD_STREET_202003_STR_LAB' : 'STR_LABEL_NME',
'WC2021NGD_STREET_202003_STR_STA' : 'STR_STAT_CDE',
'WC2021NGD_STREET_202003_UPDT_DT' : 'UPDT_DTE'}
print('Renaming NGD fields')
for f in arcpy.ListFields(NGD_data):
fieldName = f.name
if fieldName in ngdal_col_map:
arcpy.AlterField_management(NGD_data, fieldName, ngdal_col_map[fieldName])
def unique_values(fc, field):
# Returns a list off unique values for a given field in the unput dataset
fl = arcpy.MakeFeatureLayer_management(fc, 'fl', where_clause= 'NGD_UID IS NOT NULL')
with arcpy.da.SearchCursor(fc, field_names= [field]) as cursor:
return sorted({row[0] for row in cursor})
def filter_data_remove_duplicates(Redline_data, outGDB, outName):
# Read rows into a pandas dataframe
df = pd.DataFrame.spatial.from_featureclass(Redline_data, sr= '3347')
if len(df) == 0:
print('Length of rows is 0 continuing')
return None
KeepRows = []
for uid in unique_values(Redline_data, 'NGD_UID'):
uid_rows = df.loc[df['NGD_UID'] == uid ]
maxDateRow = uid_rows.loc[uid_rows['EditDate'] == uid_rows.EditDate.max()]
KeepRows.append(maxDateRow.iloc[0]['OBJECTID'])
print('Keeping ' + str(len(KeepRows)) + ' rows from Redline data')
arcpy.FeatureClassToFeatureClass_conversion(Redline_data,
outGDB,
outName + '_uniques',
where_clause= "OBJECTID IN " + str(tuple(KeepRows)))
return os.path.join(outGDB, outName + '_uniques')
def address_field_check(redline_data, out_gdb, out_base_name, w_NGD_UID= True):
if arcpy.GetCount_management(redline_data) == 0:
print('Redline data has no records returning None')
return [None, None]
print('Running address field changes check')
# check redline fields against the NGD_AL fields. If fields change from valid to invalid flag those rows
uid_field = 'NGD_UID'
if w_NGD_UID == False:
uid_field = 'OBJECTID'
fields_to_qc = [uid_field, 'AFL_VAL','AFL_SRC', 'AFR_VAL', 'AFR_SRC', 'ATL_VAL', 'ATL_SRC', 'ATR_VAL', 'ATR_SRC', 'ADDR_TYP_L',
'ADDR_TYP_R', 'ADDR_PRTY_L', 'ADDR_PRTY_R']
fail_rows = []
good_rows = []
with arcpy.da.SearchCursor(redline_data, field_names= fields_to_qc ) as cursor:
for row in cursor:
#If the row breaks the rule then add to the fail_rows list for correction
#if AFL_VAL IS NOT NULL THEN ATL_VAL, ADDR_TYP_L, ADDR_PRTY_L NOT NULL
if row[1] != None and row[5] == None or row[9] == None or row[11] == None:
fail_rows.append(row[0])
continue
#if ATL_VAL IS NOT NULL THEN AFL_VAL ADDR_TYP_L, ADDR_PRTY_L NOT NULL
if row[5] != None and row[1] == None or row[9] == None or row[11] == None:
fail_rows.append(row[0])
continue
# if AFR_VAL not NULL, then ATR_VAL, ADDR_TYP_R, ADDR_PRTY_R not NULL
if row[3] != None and row[7] == None or row[10] == None or row[12] == None:
fail_rows.append(row[0])
continue
#if ATR_VAL not NULL, then AFR_VAL, ADDR_TYP_R, ADDR_PRTY_R not NULL
if row[7] != None and row[3] == None or row[10] == None or row[12] == None:
fail_rows.append(row[0])
continue
good_rows.append(row[0])
print('Exporting sorted rows')
outlist = [None, None]
print('Good rows: ' + str(len(good_rows)) + ' Rows to QC: ' + str(len(fail_rows)))
if len(good_rows) > 0:
if len(good_rows) == 1:
arcpy.FeatureClassToFeatureClass_conversion(redline_data, out_gdb, out_base_name + '_goodAddr',
where_clause= uid_field + ' = ' + str(good_rows[0]))
outlist[0] = os.path.join(out_gdb, out_base_name + '_goodAddr')
if len(good_rows) > 1:
arcpy.FeatureClassToFeatureClass_conversion(redline_data, out_gdb, out_base_name + '_goodAddr',
where_clause= uid_field + ' IN ' + str(tuple(good_rows)))
outlist[0] = os.path.join(out_gdb, out_base_name + '_goodAddr')
if len(fail_rows)> 0:
if len(fail_rows) == 1:
arcpy.FeatureClassToFeatureClass_conversion(redline_data, out_gdb, out_base_name + '_badAddr',
where_clause= uid_field + ' = ' + str(fail_rows[0]))
outlist[1] = os.path.join(out_gdb, out_base_name + '_badAddr')
if len(fail_rows) > 1:
arcpy.FeatureClassToFeatureClass_conversion(redline_data, out_gdb, out_base_name + '_badAddr',
where_clause= uid_field + ' IN ' + str(tuple(fail_rows)))
outlist[1] = os.path.join(out_gdb, out_base_name + '_badAddr')
return outlist
def fix_address_field_errors(bad_redline_rows, o_gdb, o_name):
fl = arcpy.MakeFeatureLayer_management(bad_redline_rows, 'fl')
for d in ['L', 'R']:
#Fill in AF and AT holes
query1 = 'AT{}_VAL IS NULL AND AF{}_VAL IS NOT NULL'.format(d, d)
query2 = 'AT{}_VAL IS NOT NULL AND AF{}_VAL IS NULL'.format(d, d)
arcpy.SelectLayerByAttribute_management(fl, where_clause= query1)
arcpy.CalculateField_management(fl, 'AT' + d + '_VAL', '!AF' + d + '_VAL!')
arcpy.SelectLayerByAttribute_management(fl, 'NEW_SELECTION', query2)
arcpy.CalculateField_management(fl, 'AF' + d + '_VAL', '!AT' + d + '_VAL!')
#Export changes and overwrite prior redline file
arcpy.SelectLayerByAttribute_management(fl, 'CLEAR_SELECTION')
arcpy.FeatureClassToFeatureClass_conversion(fl, o_gdb, o_name + '_bad_addr')
return os.path.join(o_gdb, o_name + '_bad_addr')
def qc_PRTY_vals(redline_rows, o_gdb, o_name):
# This needs to check every row so put after merge
fl = arcpy.MakeFeatureLayer_management(redline_rows, 'fl')
code = '''
def ADDR_PRTY_FIXER(AF_VAL, AT_VAL):
AF_int = int(AF_VAL)
AT_int = int(AT_VAL)
if (AF_int % 2) == 0 and (AT_int % 2) == 0:
return 'E'
if (AF_int % 2) != 0 and (AT_int % 2) != 0:
return 'O'
if (AF_int % 2) == 0 and (AT_int % 2) != 0:
return 'M'
if (AF_int % 2) != 0 and (AT_int % 2) == 0:
return 'M'
'''
for d in ['L', 'R']:
arcpy.SelectLayerByAttribute_management(fl, 'NEW_SELECTION', where_clause= 'ADDR_PRTY_' + d + ' IS NULL')
arcpy.CalculateField_management(fl, 'ADDR_PRTY_' + d, 'ADDR_PRTY_FIXER(!AF' + d + '_VAL!, !AT' + d + '_VAL!)',
'PYTHON3', code_block= code)
arcpy.FeatureClassToFeatureClass_conversion(fl, o_gdb, o_name)
return os.path.join(o_gdb, o_name)
def fix_null_src_vals(bad_redline_rows, o_gdb, o_name):
fl = arcpy.MakeFeatureLayer_management(bad_redline_rows, 'fl')
#Select all rows in which the F_SRC val is not null and the T_SRC val is null and make the same
for d in ['L', 'R']:
query1 = 'AT{}_SRC IS NULL AND AF{}_SRC IS NOT NULL'.format(d, d)
query2 = 'AF{}_SRC IS NOT NULL AND AT{}_SRC IS NULL'.format(d, d)
arcpy.SelectLayerByAttribute_management(fl, 'NEW_SELECTION', where_clause= query1)
arcpy.CalculateField_management(fl, 'AT' + d +'_SRC', '!AF' + d + '_SRC!')
arcpy.SelectLayerByAttribute_management(fl, 'NEW_SELECTION', where_clause= query2)
arcpy.CalculateField_management(fl, 'AF' + d + '_SRC', '!AT' + d + '_SRC!')
#Export Corrected data
arcpy.SelectLayerByAttribute_management(fl, 'CLEAR_SELECTION')
arcpy.FeatureClassToFeatureClass_conversion(fl, o_gdb, o_name + '_bad_addr')
return os.path.join(o_gdb, o_name + '_bad_addr')
#------------------------------------------------------------------------------------------------------------
# inputs
load_dotenv(os.path.join(os.getcwd(), 'environments.env'))
directory = os.getcwd() # Will return the directory that this file is currently in.
url = r'https://services7.arcgis.com/bRi0AN5rG57dCDE4/arcgis/rest/services/NGD_STREET_Redline_V2_61/FeatureServer/0' # URL for AGOL NGD_Redline data
#url = r'https://services7.arcgis.com/bRi0AN5rG57dCDE4/arcgis/rest/services/NGD_STREET_Redline_V2_6/FeatureServer/0' # URL for Test AGOL redline Layer
gdb_name = 'NGD_Redline.gdb'
o_gdb = os.path.join(directory, gdb_name)
o_name = 'NGD_STREET_Redline' # Name for final output file
#Create fgdb for the downloaded data and intermediate files
if not arcpy.Exists(o_gdb):
print('Creating GDB')
arcpy.CreateFileGDB_management(directory, gdb_name)
from_date = os.getenv('FROM_DATE_TIME')
to_date = os.getenv('TO_DATE_TIME')
print('Settings: From Date- {}, To Date- {}'.format(from_date, to_date))
#--------------------------------------------------------------------------------------------------------------
#Calls
print('Running script')
results = auto_download_data(url, o_gdb, o_name, from_date, to_date)
rename_the_fields(results)
if int(arcpy.GetCount_management(results).getOutput(0)) == 0:
print('No records for given date range. Exiting script')
sys.exit()
print('Total number of imported records: ' + str(int(arcpy.GetCount_management(results).getOutput(0))))
print('Splitting records into NGD_UIDs and Null NGD_UIDs')
w_NGD_UIDs = arcpy.FeatureClassToFeatureClass_conversion(results, o_gdb, o_name + '_w_NGD_UID', 'NGD_UID IS NOT NULL')
no_NGD_UIDs = arcpy.FeatureClassToFeatureClass_conversion(results, o_gdb, o_name + '_w_no_NGD_UID', 'NGD_UID IS NULL')
print('Records with NGD_UIDs: {} Records with NULL NGD_UIDs: {}'.format(arcpy.GetCount_management(w_NGD_UIDs),
arcpy.GetCount_management(no_NGD_UIDs)))
print('Filtering to remove records that contain duplicate NGD_UIDs')
filtered = filter_data_remove_duplicates(w_NGD_UIDs, o_gdb, o_name)
print('Running address fields QC checks')
checked_w_NGD_UID = address_field_check(filtered, o_gdb, o_name + '_ch_w_uid', True)
checked_no_NGD_UID = address_field_check(no_NGD_UIDs, o_gdb, o_name + '_ch_no_uid', w_NGD_UID= False)
files = []
for fc in checked_no_NGD_UID + checked_w_NGD_UID:
if type(fc) != None: files.append(fc)
print('Merging ' + str(len(files)) + ' files')
merged = arcpy.Merge_management(files, os.path.join(o_gdb, o_name + '_merged'))
#Get only NGD_UIDs in redline data for NGD_AL filtering
uids = unique_values(filtered, 'NGD_UID')
outFC_nme = os.path.join(o_gdb, o_name)
print('Performing final address QC')
fix_address_field_errors(merged, o_gdb, o_name)
fix_null_src_vals(merged, o_gdb, o_name)
qc_PRTY_vals(merged, o_gdb, o_name)
print('Merging all records and exporting to final feature class')
arcpy.Merge_management(merged, os.path.join(o_gdb, o_name ))
# #REMOVE ONT EDITS ONLY FOR FRI DEC 12 2020 PULL
# fl = arcpy.MakeFeatureLayer_management(NGD_STREET_REDLINE)
# wc = "PRCODE <> '35'"
# csd_filtered = arcpy.FeatureClassToFeatureClass_conversion(os.path.join(directory, 'CSD_202009.gdb', 'WC2021CSD_202009'),
# o_gdb,
# 'CSD_FILTERED',
# where_clause= wc)
# arcpy.SelectLayerByLocation_management(fl, 'INTERSECT', csd_filtered , invert_spatial_relationship= True)
# arcpy.FeatureClassToFeatureClass_conversion(fl, o_gdb, o_name)
print('Deleting non essential feature classes')
arcpy.env.workspace = o_gdb
for fc in arcpy.ListFeatureClasses():
if fc != o_name:
arcpy.Delete_management(fc)
print('Filtering NGD_AL data')
NGD_AL_path = os.path.join(directory, 'Final_Export_2020-09-28_2.gdb', 'NGD_AL')
arcpy.FeatureClassToFeatureClass_conversion(NGD_AL_path,
os.path.join(directory, o_gdb),
'NGD_AL_filtered',
'NGD_UID IN ' + str(tuple(uids)))
if not os.path.exists(os.getenv('NDG_EC_LINKS')):
print('Creating NGD_STR_UID and EC_STR_ID linkage table')
ngd_ec_str_id = pd.DataFrame.spatial.from_featureclass(NGD_AL_path, fields= ['NGD_STR_UID_L',
'NGD_STR_UID_R',
'EC_STR_ID_L',
'EC_STR_ID_R'])
ngd_ec_str_id.to_csv(os.getenv('NDG_EC_LINKS'))
print('DONE!')
| WenkChr/NGD_AGOL_Download | automate_download.py | automate_download.py | py | 19,015 | python | en | code | 1 | github-code | 50 |
759183855 | import streamlit as st
import pandas as pd
import datetime as dt
import altair as alt
import requests
import folium
import plotly.graph_objects as go
from folium.plugins import MousePosition
from st_aggrid import (
AgGrid,
ColumnsAutoSizeMode,
GridOptionsBuilder,
GridUpdateMode,
JsCode,
)
def load_lottieurl(url) -> dict:
r = requests.get(url)
if r.status_code != 200:
return None
return r.json()
def set_dataset_size(df: pd.DataFrame, size: str) -> pd.DataFrame:
"""
Limits dataset to 30 days, 7 days or 24 hours time periods.
Returns filtered dataframe.
"""
# df['time (UTC)'] = pd.to_datetime(df['time (UTC)'], format='%d-%b-%Y %H:%M:%S')
# Removing timezone info
df["time (UTC)"] = pd.to_datetime(
df["time (UTC)"], infer_datetime_format=True
).dt.tz_localize(None)
max_time = df["time (UTC)"].max()
min_time = max_time - dt.timedelta(days=size)
df = df.loc[(df["time (UTC)"] >= min_time) & (df["time (UTC)"] <= max_time)]
return df
def data_filter(df: pd.DataFrame) -> pd.DataFrame:
"""
Updates ag-Grid table and map according to selected filters.
Returns filtered dataframe.
"""
filter_container = st.container()
with filter_container:
filter_col1, filter_col2 = st.columns((1, 1))
with filter_col1:
periodicals = {"Past 30 Days": 30, "Past 7 Days": 7, "Last 24 Hours": 1}
option = st.selectbox(
label="Dataset size",
options=periodicals.keys(),
)
df = set_dataset_size(df, periodicals[option])
with filter_col2:
# source panel only displays data source and has no other use right now
source = st.selectbox(
label="Dataset source",
options=["United States Geological Survey"],
disabled=True,
)
# If not selected, funtion will return dataframe before applying additional filters
add_filter = st.checkbox("Add more filters")
if not add_filter:
return df
# FILTERING DATAFRAME
selected_columns = st.multiselect(
"Choose an option to filter dataframe",
df.loc[:, df.columns != "place"].columns,
)
for column in selected_columns:
left, right = st.columns((1, 20))
if column == "time (UTC)":
if len(df) > 0:
date_col1, date_col2, date_col3, date_col4 = st.columns((1, 4.4, 4.4, 10))
start_dt = date_col2.date_input(
"➤ Start date", value=df["time (UTC)"].min()
)
end_dt = date_col3.date_input(
"➤ End date", value=df["time (UTC)"].max()
)
if start_dt <= end_dt:
df = df.loc[
(
df["time (UTC)"]
>= dt.datetime(
start_dt.year, start_dt.month, start_dt.day
)
)
& (
df["time (UTC)"]
<= dt.datetime(end_dt.year, end_dt.month, end_dt.day)
+ dt.timedelta(days=1)
)
]
else:
date_col2.warning("Please check your date range")
else:
right.error(f"Not enough {column} values to filter")
elif column in ["latitude", "longitude"]:
if column == "latitude":
min_coordinate = -90
max_coordinate = 90
elif column == "longitude":
min_coordinate = -180
max_coordinate = 180
coordinate_input = right.slider(
f"➤ Select your {column} range",
min_value=min_coordinate,
max_value=max_coordinate,
value=(min_coordinate, max_coordinate),
)
df = df.loc[df[column].between(*coordinate_input)]
elif column in ["depth", "mag"]:
if len(df) > 1:
min_value = float(df[column].min())
max_value = float(df[column].max())
if min_value == max_value:
right.warning(f"{column.capitalize()} value is same for all rows")
else:
value_range = right.slider(
f"➤ Select your {column} range",
min_value=min_value,
max_value=max_value,
value=(min_value, max_value),
help=f"You can limit minimum and maximum {column} values via sliders. This will update your table.",
)
df = df.loc[df[column].between(*value_range)]
else:
right.error(f"Not enough {column} values to filter")
elif column in ["magType", "type", "locationSource", "magSource", "status"]:
if len(df) > 0:
category_input = right.multiselect(
f"➤ Values for {column}",
df[column].unique(),
default=list(df[column].unique()),
)
df = df.loc[df[column].isin(category_input)]
else:
right.error(f"Not enough {column} values to filter")
return df
def convert_to_csv(df):
"""Converts filtered pandas dataframe to CSV file"""
return df.to_csv(index=False, encoding="utf-8")
def create_data_grid(df: pd.DataFrame) -> list:
"""Creates ag-Grid data grid with filtered data and returns list of selected rows"""
grid = GridOptionsBuilder.from_dataframe(df)
# Separating grid into discrete pages instead of scrolling
grid.configure_pagination(enabled=True)
# Selection checkboxes on the grid
grid.configure_selection(selection_mode="multiple", header_checkbox=True, use_checkbox=True)
# Changing cell style for magnitude values over or equal to 6
cellsytle_jscode = JsCode(
"""
function(params) {
if (params.value >= 7) {
return {
'color': 'white',
'backgroundColor': 'firebrick'
};
} else if (params.value >= 6) {
return {
'color': 'white',
'backgroundColor': 'chocolate'
};
} else if (params.value >= 5) {
return {
'color': 'white',
'backgroundColor': 'olivedrab'
};
}
};
"""
)
grid.configure_column("mag", cellStyle=cellsytle_jscode)
grid.configure_grid_options(domLayout="normal")
gridOptions = grid.build()
grid_response = AgGrid(
df,
gridOptions=gridOptions,
height=500,
width="100%",
theme="balham",
update_mode=GridUpdateMode.SELECTION_CHANGED,
columns_auto_size_mode=ColumnsAutoSizeMode.FIT_CONTENTS,
allow_unsafe_jscode=True,
)
return grid_response["selected_rows"]
def draw_world_map(tiles: str):
"""Create empty map centered on the coordinates (0,0) with given map tileset"""
map = folium.Map(
min_zoom=1.5,
min_lon=-250,
max_lon=250,
min_lat=-85,
max_lat=85,
max_bounds=True,
tiles=tiles,
attr="Latest Earthquakes",
)
formatter = "function(num) {return L.Util.formatNum(num, 3) + ' º ';};"
MousePosition(
position="bottomleft",
separator=" | ",
empty_string="NaN",
lng_first=False,
num_digits=20,
prefix="Coordinates(lat | lon):",
lat_formatter=formatter,
lng_formatter=formatter,
).add_to(map)
return map
def magnitude_colorcode(mag: float) -> str:
if mag >= 7:
return "red"
elif 7 > mag >= 6:
return "orange"
elif 6 > mag >= 5:
return "green"
return "blue"
def add_map_marker(map, lat: float, lon: float, mag: float, depth: float, place: str):
"""Add marker and info popup to map"""
marker_color = magnitude_colorcode(mag=mag)
info = f"""<center> <b> {place} </b> </center>
<br> <center> <b> Mag: {float(round(mag, 1))} </b> <center>
<center> Depth: {depth} km </center>
<br> <center> Latitude/Longitude: {lat}/{lon} </center>
"""
popup = folium.Popup(info)
marker_location = (lat, lon)
folium.Marker(
location=marker_location,
popup=popup,
icon=folium.Icon(icon="info-sign", color=marker_color),
).add_to(map)
def map_layer_panel() -> str:
"""Create map layer panel and returns selected tiles from the panel"""
layers = ["Base Map", "World Imagery", "Street Map"]
map_layer = st.selectbox("Select map layer", layers)
if map_layer == layers[0]:
tiles = "https://server.arcgisonline.com/ArcGIS/rest/services/Canvas/World_Light_Gray_Base/MapServer/tile/{z}/{y}/{x}"
elif map_layer == layers[1]:
tiles = "https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}"
elif map_layer == layers[2]:
tiles = "https://tile.openstreetmap.org/{z}/{x}/{y}.png"
return tiles
def circle_search_panel(map, df: pd.DataFrame) -> tuple:
"""Creates search area input panel and returns latitude, longitude and radius values"""
with st.form(key="Area Parameters", clear_on_submit=False):
lat_input = st.number_input(
label="Latitude",
value=39.57,
min_value=-90.0,
max_value=90.0,
format="%.15f",
)
lon_input = st.number_input(
label="Longitude",
value=32.53,
min_value=-180.0,
max_value=180.0,
format="%.15f",
)
radius_input = st.slider(
label="Radius (km)", min_value=0, max_value=1000, step=5
)
st.form_submit_button("Apply")
return lat_input, lon_input, radius_input
def create_hourly_distribution_bar_chart(df: pd.DataFrame, x_axis: str, y_axis: str):
"""Creates hourly distribution bar chart with given x and y axis values"""
events = df.loc[:, y_axis].tolist()
scale = alt.Scale(domain=[0, max(events) + max(events) / 10])
bars = (
alt.Chart(data=df)
.mark_bar(cornerRadiusTopLeft=4, cornerRadiusTopRight=4)
.encode(
x=alt.X(x_axis),
y=alt.Y(y_axis, scale=scale),
color=alt.condition(
alt.datum[y_axis] == max(events),
alt.value("orange"),
alt.value("steelblue"),
),
)
)
text = bars.mark_text(
align="center", baseline="middle", dy=-10 # Moves text up
).encode(text=f"{y_axis}:O")
st.altair_chart(bars + text, use_container_width=True)
def create_magnitude_bar_chart(df: pd.DataFrame, x_axis: str, y_axis: str):
"""Creates magnitude bar chart with given x and y axis values"""
events = df.loc[:, y_axis].tolist()
scale = alt.Scale(domain=[0, max(events) + max(events) / 10])
bars = (
alt.Chart(data=df)
.mark_bar(cornerRadiusTopLeft=2, cornerRadiusTopRight=2)
.encode(
x=alt.X(x_axis),
y=alt.Y(y_axis, scale=scale),
color=alt.condition(
alt.datum[y_axis] == max(events),
alt.value("orange"),
alt.value("steelblue"),
),
tooltip=[x_axis, y_axis],
)
.interactive()
)
st.altair_chart(bars, use_container_width=True)
def create_worldwide_earthquakes_bar_chart(df: pd.DataFrame, x_axis: str, y_axis: str):
"""Creates annual earthquake bar chart with given x and y axis values"""
events = df.loc[:, y_axis].tolist()
scale = alt.Scale(domain=[0, max(events) + max(events) / 10])
bars = (
alt.Chart(data=df)
.mark_bar(
cornerRadiusTopLeft=4,
cornerRadiusTopRight=4,
)
.encode(
x=alt.X(x_axis),
y=alt.Y(y_axis, scale=scale),
color=alt.condition(
alt.datum[y_axis] == max(events),
alt.value("orange"),
alt.value("steelblue"),
),
)
)
text = bars.mark_text(
align="center", baseline="middle", dy=-10 # Moves text up
).encode(text=f"{y_axis}:O")
rule = alt.Chart(df).mark_rule(color="red").encode(y=f"mean({y_axis}):Q")
st.altair_chart(bars + rule + text, use_container_width=True)
def create_scattergeo_map(lat: list, lon: list, hovertext: list):
"""Creates 3d outline map with given latitude, longitude and popup text list"""
fig = go.Figure()
fig.add_trace(
go.Scattergeo(
name="",
lat=lat,
lon=lon,
hovertext=hovertext,
mode="markers",
marker=dict(size=15, opacity=0.6, color="firebrick", symbol="circle"),
)
)
fig.update_geos(projection_type="orthographic")
fig.update_layout(width=750, height=750)
st.plotly_chart(fig, use_container_width=True)
| gorkemuna1/Latest-Earthquakes | utility.py | utility.py | py | 13,643 | python | en | code | 2 | github-code | 50 |
36540049389 | import pyaudio
import wave
import pygame
import threading
import time
# Base 'Sound' class that all inherit from
class Sound:
def __init__(self):
self.path = None
self.volume = 1
def set_volume(self, val):
self.volume = val
def play(self):
pass
# For sound effects, only supports WAV. Use this when you don't want any delay when playing the sound
class Effect(Sound):
def __init__(self, path, chunk=1024):
Sound.__init__(self)
self.path = path
self.chunk = chunk
def _play(self):
wav = wave.open(self.path, "rb")
audio = pyaudio.PyAudio()
stream = audio.open(
format=audio.get_format_from_width(wav.getsampwidth()),
channels=wav.getnchannels(),
rate=wav.getframerate(),
output=True
)
data = wav.readframes(self.chunk)
while data:
stream.write(data)
data = wav.readframes(self.chunk)
stream.stop_stream()
stream.close()
def play(self):
thread = threading.Thread(target=self._play)
thread.start()
# For music, can only play one at a time, MP3 supported. Will have some slight lag starting up, so not for sound effects
class Music(Sound):
def __init__(self, path):
Sound.__init__(self)
self.path = path
def set_volume(self, val):
self.volume = val
pygame.mixer.music.set_volume(self.volume)
def _play(self):
pygame.mixer.init()
pygame.mixer.music.load(self.path)
pygame.mixer.music.play()
def play(self):
thread = threading.Thread(target=self._play)
thread.start() | wg4568/PyStellarEngine | stellar/sound.py | sound.py | py | 1,455 | python | en | code | 2 | github-code | 50 |
28201165059 | # Create your views here.
from django.http import *
from django.contrib.auth import logout
from django.shortcuts import render_to_response, get_object_or_404
from django.core.paginator import Paginator
from skatemaps.maps.models import *
def index(request):
return render_to_response('index.html', {'spots': spots, 'user': request.user} )
def spots(request, page):
paginator = Paginator(Spot.objects.all(), 3)
p = paginator.page(page)
spots = p.object_list
return render_to_response('spots.html', {
'show_paginator': paginator.num_pages > 1,
'has_prev': p.has_previous(),
'has_next': p.has_next(),
'page': int(page),
'pages': paginator.page_range,
'prev': p.previous_page_number(),
'next': p.next_page_number(),
'spots': spots,
'user': request.user,
})
def spotdetail(request, spot_id):
spot = get_object_or_404(Spot, pk=spot_id)
return render_to_response('spotdetail.html', {'spot': spot, 'user': request.user})
def spotmap(request, spot_id):
spot = get_object_or_404(Spot, pk=spot_id)
return render_to_response('spotmap.html', {'spot': spot, 'user': request.user})
def logout_page(request):
logout(request)
return HttpResponseRedirect('/') | jsantos/SkateMaps | maps/views.py | views.py | py | 1,182 | python | en | code | 3 | github-code | 50 |
71189193436 | """ Bot file that controls that mail bot """
from automation import Automation
import pandas as pd
from datetime import datetime, timedelta
from utils import filehandler as fh
from utils import regextools as rt
class MailBot(Automation):
def __init__(self):
super(MailBot, self).__init__()
self.original_df_name = "12.xlsx"
self.created_df_name = "Partner_Client_Information.xlsx"
self.admin_email = "admin@bakertillywm.com"
# Date
def create_partner_data(self):
df = self.read_cch_data(self.original_df_name)
df = self.select_relevant_info(df)
df = self.update_partner_row(df)
df = self.remove_data(df)
df = self.clean_data(df)
fh.write_excel(self.created_df_name, df)
def read_cch_data(self, path: str):
df = pd.read_excel(path, skiprows=5)
df.columns = df.columns.str.replace(':', '')
df.columns = df.columns.str.replace(' ', '_')
df = df.rename(columns={"Unnamed_0": "Partner"})
return df
def select_relevant_info(self, df):
# Get the relevant information
df1 = df[df['Partner'].str.contains('Primary Partner: ', na=False)]
df2 = df[df['Partner'].isin(['C'])]
df = pd.concat([df1, df2]).sort_index()
df = df.reset_index()
return df
def update_partner_row(self, df):
# Update the Partner for all rows
for i, row in df.iterrows():
val = row['Partner']
if val == "C":
val = df.at[i - 1, 'Partner']
df.at[i, 'Partner'] = val
return df
def remove_data(self, df):
# Remove the Nas in Client Name
df = df.dropna(subset=['Client_Name'])
df = df[['Partner', 'Client_Name']]
return df
def clean_data(self, df):
# Remove extra details - Remove form partner: PP, Brackets, commas
df["Partner"] = df["Partner"].str.replace('Primary Partner: ', '')
df['Partner'] = df['Partner'].str.replace(r"\(.*\)", "")
df["Partner"] = df["Partner"].str.replace(',', '')
df["Partner"] = df["Partner"].str.strip()
# Remove the non-partner names
df = df[~df['Partner'].isin(['No Selection', 'zAdministrator'])]
# Format the Columns
df["Partner"] = df["Partner"].str.strip()
df[['Last_Name', 'First_Name']] = df['Partner'].str.split(' ', 1, expand=True)
df["Last_Name"] = df["Last_Name"].str.replace(' ', '').str.lower()
df["First_Name"] = df["First_Name"].str.replace(' ', '').str.lower()
df["Email"] = df["First_Name"] + "." + df["Last_Name"] + "@bakertillywm.com"
df = df[['Partner', "Email", 'Client_Name']]
return df
def get_mail_date(self, text: str):
mail_date = rt.find_all('\(\d{4}-\d{2}-\d{2} ', text)[0]
return mail_date.strip().replace("(", "")
def get_date(self, today_minus=0):
return (datetime.now() - timedelta(today_minus)).strftime('%Y-%m-%d')
def get_company_info(self, info: list, break_folder="_INBOX"):
company = info[info.index(break_folder) - 1]
folder = "\\" + '\\'.join(info[1:info.index(break_folder) + 1])
return company, folder
def get_textfile_info(self, path: str):
mail = fh.read_text_file(path)
today_date = self.get_date(1) # yesterday atm
info = []
for mail_line in mail:
if self.get_mail_date(mail_line) == today_date:
mail_info = mail_line[0].split('\\')
company, folder = self.get_company_info(mail_info)
info.append({"Company": company,
"Folder": folder})
return info
def get_company_partner(self, company, partner_info):
for i, row in partner_info.iterrows():
val = row['Client_Name']
if val == "Vancouver Cycling Without Age": # company
return row['Email']
return False
def match_text_file(self, text_file_path: str):
relevant_info = self.get_textfile_info(text_file_path)
partner_info = pd.read_excel(self.created_df_name)
for company_info in relevant_info:
partner_email = self.get_company_partner(company_info["Company"], partner_info)
if not partner_email:
partner_email = self.admin_email
| djwatson-coder/automation_programs | mailbot/bot.py | bot.py | py | 4,394 | python | en | code | 0 | github-code | 50 |
39273560017 | class Solution:
def countSubstrings(self, s: str) -> int:
if len(s) == 1:
return 1
result = 0
size = len(s)
for i in range(size):
for left, right in (i, i), (i, i + 1):
while left > -1 and right < size and s[left] == s[right]:
left -= 1
right += 1
result += 1
return result
| Tammon23/LeetCodePractice | Medium/PalindromicSubstrings/PalindromicSubstrings.py | PalindromicSubstrings.py | py | 421 | python | en | code | 0 | github-code | 50 |
28640447322 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: Linjian Zhang
Email: linjian93@foxmail.com
Create Time: 2018-01-14 14:31:15
Program:
Description:
"""
import math
import torch
import warnings
from torch.nn import Module, Parameter
import torch.nn.functional as F
from torch.autograd import Variable
def clip_grad(v, min, max):
v.register_hook(lambda g: g.clamp(min, max))
return v
class RNNCellBase(Module):
def __repr__(self):
s = '{name}({input_size}, {hidden_size}'
if 'bias' in self.__dict__ and self.bias is not True:
s += ', bias={bias}'
if 'nonlinearity' in self.__dict__ and self.nonlinearity != "tanh":
s += ', nonlinearity={nonlinearity}'
s += ')'
return s.format(name=self.__class__.__name__, **self.__dict__)
class RNNCell(RNNCellBase):
def __init__(self, input_size, hidden_size, bias=True, grad_clip=None):
super(RNNCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.grad_clip = grad_clip
self.weight_ih = Parameter(torch.Tensor(hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(hidden_size, hidden_size))
if bias:
self.bias = Parameter(torch.Tensor(hidden_size))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def forward(self, input, h):
output = F.linear(input, self.weight_ih, self.bias) + F.linear(h, self.weight_hh)
if self.grad_clip:
output = clip_grad(output, -self.grad_clip, self.grad_clip) # avoid explosive gradient
output = F.relu(output)
return output
class GRUCell(RNNCellBase):
def __init__(self, input_size, hidden_size, bias=True, grad_clip=None):
super(GRUCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.grad_clip = grad_clip
self.weight_ih = Parameter(torch.Tensor(3 * hidden_size, input_size))
self.weight_hh_rz = Parameter(torch.Tensor(2 * hidden_size, hidden_size))
self.weight_hh = Parameter(torch.Tensor(hidden_size, hidden_size))
if bias:
self.bias = Parameter(torch.Tensor(3 * hidden_size))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def forward(self, input, h):
ih = F.linear(input, self.weight_ih, self.bias)
hh_rz = F.linear(h, self.weight_hh_rz)
if self.grad_clip:
ih = clip_grad(ih, -self.grad_clip, self.grad_clip)
hh_rz = clip_grad(hh_rz, -self.grad_clip, self.grad_clip)
r = F.sigmoid(ih[:, :self.hidden_size] + hh_rz[:, :self.hidden_size])
i = F.sigmoid(ih[:, self.hidden_size: self.hidden_size * 2] + hh_rz[:, self.hidden_size:])
hhr = F.linear(h * r, self.weight_hh)
if self.grad_clip:
hhr = clip_grad(hhr, -self.grad_clip, self.grad_clip)
n = F.relu(ih[:, self.hidden_size * 2:] + hhr)
h = (1 - i) * n + i * h
return h
class LSTMCell(RNNCellBase):
def __init__(self, input_size, hidden_size, bias=True, grad_clip=None):
super(LSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.grad_clip = grad_clip
self.weight_ih = Parameter(torch.Tensor(4 * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(4 * hidden_size, hidden_size))
if bias:
self.bias = Parameter(torch.Tensor(4 * hidden_size))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def forward(self, input, hx):
h, c = hx
pre = F.linear(input, self.weight_ih, self.bias) + F.linear(h, self.weight_hh)
if self.grad_clip:
pre = clip_grad(pre, -self.grad_clip, self.grad_clip)
i = F.sigmoid(pre[:, :self.hidden_size])
f = F.sigmoid(pre[:, self.hidden_size: self.hidden_size * 2])
# g = F.tanh(pre[:, self.hidden_size * 2: self.hidden_size * 3]) # change to relu
g = F.relu(pre[:, self.hidden_size * 2: self.hidden_size * 3])
o = F.sigmoid(pre[:, self.hidden_size * 3:])
c = f * c + i * g
# h = o * F.tanh(c) # change to relu
h = o * F.relu(c)
return h, c
class LSTMPCell(RNNCellBase):
def __init__(self, input_size, hidden_size, recurrent_size, bias=True, grad_clip=None):
super(LSTMPCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.recurrent_size = recurrent_size
self.grad_clip = grad_clip
self.weight_ih = Parameter(torch.Tensor(4 * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(4 * hidden_size, recurrent_size))
self.weight_rec = Parameter(torch.Tensor(recurrent_size, hidden_size))
if bias:
self.bias = Parameter(torch.Tensor(4 * hidden_size))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def forward(self, input, hx):
h, c = hx
pre = F.linear(input, self.weight_ih, self.bias) \
+ F.linear(h, self.weight_hh)
if self.grad_clip:
pre = clip_grad(pre, -self.grad_clip, self.grad_clip)
i = F.sigmoid(pre[:, :self.hidden_size])
f = F.sigmoid(pre[:, self.hidden_size: self.hidden_size * 2])
g = F.tanh(pre[:, self.hidden_size * 2: self.hidden_size * 3])
o = F.sigmoid(pre[:, self.hidden_size * 3:])
c = f * c + i * g
h = o * F.tanh(c)
h = F.linear(h, self.weight_rec)
return h, c
class MGRUCell(RNNCellBase):
'''Minimal GRU
Reference:
Ravanelli et al. [Improving speech recognition by revising gated recurrent units](https://arxiv.org/abs/1710.00641).
'''
def __init__(self, input_size, hidden_size, bias=True, grad_clip=None):
super(MGRUCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.grad_clip = grad_clip
self.weight_ih = Parameter(torch.Tensor(2 * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(2 * hidden_size, hidden_size))
if bias:
self.bias = Parameter(torch.Tensor(2 * hidden_size))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def forward(self, input, h):
ih = F.linear(input, self.weight_ih, self.bias)
hh = F.linear(h, self.weight_hh)
if self.grad_clip:
ih = clip_grad(ih, -self.grad_clip, self.grad_clip)
hh = clip_grad(hh, -self.grad_clip, self.grad_clip)
z = F.sigmoid(ih[:, :self.hidden_size] + hh[:, :self.hidden_size])
n = F.relu(ih[:, self.hidden_size:] + hh[:, self.hidden_size:])
h = (1 - z) * n + z * h
return h
class RNNBase(Module):
def __init__(self, mode, input_size, hidden_size, recurrent_size=None, num_layers=1, bias=True,
return_sequences=True, grad_clip=None):
super(RNNBase, self).__init__()
self.mode = mode
self.input_size = input_size
self.hidden_size = hidden_size
self.recurrent_size = recurrent_size
self.num_layers = num_layers
self.bias = bias
self.return_sequences = return_sequences
self.grad_clip = grad_clip
self.flatten_parameters()
mode2cell = {'RNN': RNNCell,
'GRU': GRUCell,
'MGRU': GRUCell,
'LSTM': LSTMCell,
'LSTMP': LSTMPCell}
Cell = mode2cell[mode]
kwargs = {'input_size': input_size,
'hidden_size': hidden_size,
'bias': bias,
'grad_clip': grad_clip}
if self.mode == 'LSTMP':
kwargs['recurrent_size'] = recurrent_size
self.cell0 = Cell(**kwargs)
for i in range(1, num_layers):
kwargs['input_size'] = recurrent_size if self.mode == 'LSTMP' else hidden_size
cell = Cell(**kwargs)
setattr(self, 'cell{}'.format(i), cell)
def flatten_parameters(self):
"""Resets parameter data pointer so that they can use faster code paths.
Right now, this works only if the module is on the GPU and cuDNN is enabled.
Otherwise, it's a no-op.
"""
any_param = next(self.parameters()).data
if not any_param.is_cuda or not torch.backends.cudnn.is_acceptable(any_param):
self._data_ptrs = []
return
# If any parameters alias, we fall back to the slower, copying code path. This is
# a sufficient check, because overlapping parameter buffers that don't completely
# alias would break the assumptions of the uniqueness check in
# Module.named_parameters().
unique_data_ptrs = set(p.data_ptr() for l in self.all_weights for p in l)
if len(unique_data_ptrs) != sum(len(l) for l in self.all_weights):
self._data_ptrs = []
return
with torch.cuda.device_of(any_param):
# This is quite ugly, but it allows us to reuse the cuDNN code without larger
# modifications. It's really a low-level API that doesn't belong in here, but
# let's make this exception.
from torch.backends.cudnn import rnn
from torch.backends import cudnn
from torch.nn._functions.rnn import CudnnRNN
handle = cudnn.get_handle()
with warnings.catch_warnings(record=True):
fn = CudnnRNN(
self.mode,
self.input_size,
self.hidden_size,
num_layers=self.num_layers,
batch_first=self.batch_first,
dropout=self.dropout,
train=self.training,
bidirectional=self.bidirectional,
dropout_state=self.dropout_state,
)
# Initialize descriptors
fn.datatype = cudnn._typemap[any_param.type()]
fn.x_descs = cudnn.descriptor(any_param.new(1, self.input_size), 1)
fn.rnn_desc = rnn.init_rnn_descriptor(fn, handle)
# Allocate buffer to hold the weights
self._param_buf_size = rnn.get_num_weights(handle, fn.rnn_desc, fn.x_descs[0], fn.datatype)
fn.weight_buf = any_param.new(self._param_buf_size).zero_()
fn.w_desc = rnn.init_weight_descriptor(fn, fn.weight_buf)
# Slice off views into weight_buf
all_weights = [[p.data for p in l] for l in self.all_weights]
params = rnn.get_parameters(fn, handle, fn.weight_buf)
# Copy weights and update their storage
rnn._copyParams(all_weights, params)
for orig_layer_param, new_layer_param in zip(all_weights, params):
for orig_param, new_param in zip(orig_layer_param, new_layer_param):
orig_param.set_(new_param.view_as(orig_param))
self._data_ptrs = list(p.data.data_ptr() for p in self.parameters())
def forward(self, input, initial_states=None):
if initial_states is None:
zeros = Variable(torch.zeros(input.size(0), self.hidden_size))
if self.mode == 'LSTM':
initial_states = [(zeros, zeros), ] * self.num_layers
elif self.mode == 'LSTMP':
zeros_h = Variable(torch.zeros(input.size(0), self.recurrent_size))
initial_states = [(zeros_h, zeros), ] * self.num_layers
else:
initial_states = [zeros] * self.num_layers
assert len(initial_states) == self.num_layers
states = initial_states
outputs = []
time_steps = input.size(1)
for t in range(time_steps):
x = input[:, t, :]
for l in range(self.num_layers):
hx = getattr(self, 'cell{}'.format(l))(x, states[l])
states[l] = hx
if self.mode.startswith('LSTM'):
x = hx[0]
else:
x = hx
outputs.append(hx)
if self.return_sequences:
if self.mode.startswith('LSTM'):
hs, cs = zip(*outputs)
h = torch.stack(hs).transpose(0, 1)
c = torch.stack(cs).transpose(0, 1)
output = (h, c)
else:
output = torch.stack(outputs).transpose(0, 1)
else:
output = outputs[-1]
return output
class RNN(RNNBase):
def __init__(self, *args, **kwargs):
super(RNN, self).__init__('RNN', *args, **kwargs)
class GRU(RNNBase):
def __init__(self, *args, **kwargs):
super(GRU, self).__init__('GRU', *args, **kwargs)
class MGRU(RNNBase):
def __init__(self, *args, **kwargs):
super(MGRU, self).__init__('MGRU', *args, **kwargs)
class LSTM(RNNBase):
def __init__(self, *args, **kwargs):
super(LSTM, self).__init__('LSTM', *args, **kwargs)
class LSTMP(RNNBase):
def __init__(self, *args, **kwargs):
super(LSTMP, self).__init__('LSTMP', *args, **kwargs)
| linjianz/pytorch-deepvo | rnn/modules.py | modules.py | py | 14,203 | python | en | code | 102 | github-code | 50 |
7816156641 | import pandas as pd
url_1='https://fbref.com/en/matches/109ad6ba/Argentina-Saudi-Arabia-November-22-2022-World-Cup#shots_all'
urls=[url_1]
def cambio_min(match):
suma=[]
for x in match.Minute:
min=x.split('+')
i = 2
if i == len(min):
nuevo_min=int(min[0])+int(min[1])
suma.append(nuevo_min)
else:
suma.append(int(min[0]))
match.Minute=suma
def buscar(urls):
for url in urls:
df=pd.read_html(url)
match=df[17].dropna(axis=0,how='all')
team1=df[18].dropna(axis=0,how='all')
team2=df[19].dropna(axis=0,how='all')
match.columns=['Minute' ,'Player' ,'Squad','xG' ,'PSxG' ,'Outcome' ,'Distance' ,'Body Part' ,'Notes' ,'sca_1_Player' ,'sca_1_Event' ,'sca_2_Player' ,'sca_2_Event']
cambio_min(match)
return match, df
match, df=buscar(urls)
| toledojm/wc2022_stats | data.py | data.py | py | 878 | python | en | code | 0 | github-code | 50 |
5631903542 | import os
import json
from collections import defaultdict
import numpy as np
import cv2
import masking
class COCODoomDataset:
BACKGROUND_CLASS = 0
def __init__(self,
data,
image_root,
version="standard",
batch_size=16,
ignored_class_ids=None):
self.root = image_root
self.batch_size = batch_size
self.ignored_class_ids = ignored_class_ids or set()
version_str = {"standard": "", "full": "-full"}[version]
if not isinstance(data, dict):
data = json.load(open(data))
self.index = defaultdict(list)
for anno in data["annotations"]:
self.index[anno["image_id"]].append(anno)
self.image_meta = {meta["id"]: meta for meta in data["images"]}
self.classes = {}
self.num_classes = 1
for category in data["categories"]:
ID = category["id"]
if ID in self.ignored_class_ids:
self.classes[ID] = 0
else:
self.classes[ID] = self.num_classes
self.num_classes += 1
print(f"Num images :", len(data["images"]))
print(f"Num annos :", len(data["annotations"]))
print(f"Num classes:", self.num_classes)
@property
def steps_per_epoch(self):
return len(self.index) // self.batch_size
def _mask_sparse(self, image_shape, image_id):
mask = np.zeros(image_shape[:2] + (self.num_classes+1,))
for anno in self.index[image_id]:
instance_mask = masking.get_mask(anno, image_shape[:2])
category = self.classes[anno["category_id"]]
if category == 0:
continue
mask[..., category][instance_mask] = 1
overlaps = mask.sum(axis=2)[..., None]
overlaps[overlaps == 0] = 1
mask /= overlaps
mask[..., 0] = 1 - mask[..., 1:].sum(axis=2)
return mask
def _mask_dense(self, image_shape, image_id):
mask = np.zeros(image_shape[:2])
for anno in self.index[image_id]:
instance_mask = masking.get_mask(anno, image_shape[:2])
category = self.classes[anno["category_id"]]
if category == 0:
continue
mask[instance_mask] = category
return mask[..., None]
def make_sample(self, image_id, sparse_y=True):
meta = self.image_meta[image_id]
image_path = os.path.join(self.root, meta["file_name"])
image = cv2.imread(image_path)
if image is None:
raise RuntimeError(f"No image found @ {image_path}")
if sparse_y:
mask = self._mask_sparse(image.shape, image_id)
else:
mask = self._mask_dense(image.shape, image_id)
return image, mask
def stream(self, shuffle=True, sparse_y=True, run=None, level=None):
meta_iterator = self.image_meta.values()
if run is not None:
criterion = "run{}".format(run)
meta_iterator = filter(lambda meta: criterion in meta["file_name"], meta_iterator)
if level is not None:
criterion = "map{:0>2}".format(level)
meta_iterator = filter(lambda meta: criterion in meta["file_name"], meta_iterator)
ids = sorted(meta["id"] for meta in meta_iterator)
N = len(ids)
while 1:
if shuffle:
np.random.shuffle(ids)
for batch in (ids[start:start+self.batch_size] for start in range(0, N, self.batch_size)):
X, Y = [], []
for ID in batch:
x, y = self.make_sample(ID, sparse_y)
X.append(x)
Y.append(y)
yield np.array(X) / 255, np.array(Y)
| csxeba/COCODoomSeg | data.py | data.py | py | 3,801 | python | en | code | 0 | github-code | 50 |
5990359491 | import os
import sys
import cv2
import numpy as np
import tensorflow as tf
from lib.utils.timer import Timer
from lib.fast_rcnn.config import cfg
from lib.fast_rcnn.test import test_ctpn
from lib.networks.factory import get_network
from lib.text_connector.detectors import TextDetector
from lib.text_connector.text_connect_cfg import Config as TextLineCfg
def resize_im(im, scale, max_scale=None):
f = float(scale) / min(im.shape[0], im.shape[1])
if max_scale != None and f * max(im.shape[0], im.shape[1]) > max_scale:
f = float(max_scale) / max(im.shape[0], im.shape[1])
return cv2.resize(im, None, None, fx=f, fy=f, interpolation=cv2.INTER_LINEAR), f
def load_tf_model():
# load config file
cfg.TEST.checkpoints_path = './ctpn/checkpoints'
# init session
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)
config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)
sess = tf.Session(config=config)
# load network
net = get_network("VGGnet_test")
# load model
print('Loading network {:s}... '.format("VGGnet_test"))
saver = tf.train.Saver()
try:
ckpt = tf.train.get_checkpoint_state(cfg.TEST.checkpoints_path)
print('Restoring from {}...'.format(ckpt.model_checkpoint_path))
saver.restore(sess, ckpt.model_checkpoint_path)
print('done')
except:
raise 'Check your pretrained {:s}'.format(ckpt.model_checkpoint_path)
return sess, net
sess, net = load_tf_model()
def ctpn(img):
timer = Timer()
timer.tic()
img, scale = resize_im(img, scale=TextLineCfg.SCALE, max_scale=TextLineCfg.MAX_SCALE)
scores, boxes = test_ctpn(sess, net, img)
textdetector = TextDetector()
boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2])
timer.toc()
print("\n----------------------------------------------")
print(('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0]))
return scores, boxes, img, scale
def draw_boxes(img, boxes, scale):
box_id = 0
img = img.copy()
text_recs = np.zeros((len(boxes), 8), np.int)
for box in boxes:
if np.linalg.norm(box[0] - box[1]) < 5 or np.linalg.norm(box[3] - box[0]) < 5:
continue
if box[8] >= 0.8:
color = (255, 0, 0) # red
else:
color = (0, 255, 0) # green
cv2.line(img, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), color, 2)
cv2.line(img, (int(box[0]), int(box[1])), (int(box[4]), int(box[5])), color, 2)
cv2.line(img, (int(box[6]), int(box[7])), (int(box[2]), int(box[3])), color, 2)
cv2.line(img, (int(box[4]), int(box[5])), (int(box[6]), int(box[7])), color, 2)
for i in range(8):
text_recs[box_id, i] = box[i]
box_id += 1
img = cv2.resize(img, None, None, fx=1.0/scale, fy=1.0/scale, interpolation=cv2.INTER_LINEAR)
return text_recs, img
def text_detect(img):
scores, boxes, img, scale = ctpn(img)
text_recs, img_drawed = draw_boxes(img, boxes, scale)
return text_recs, img_drawed, img
if __name__ == '__main__':
from PIL import Image
from lib.fast_rcnn.config import cfg_from_file
cfg_from_file('./ctpn/ctpn/text.yml')
im = Image.open('./test_images/1.jpg')
img = np.array(im.convert('RGB'))
text_recs, img_drawed, img = text_detect(img)
Image.fromarray(img_drawed).save('result.jpg')
| EssayKillerBrain/WriteGPT | RecognizaitonNetwork/ctpn/text_detect.py | text_detect.py | py | 3,485 | python | en | code | 5,257 | github-code | 50 |
18490923987 | import torch
import cv2
from einops import rearrange
import numpy as np
import json
from PIL import Image
from torchvision.models import EfficientNet_V2_M_Weights
from core.settings import model_config
def load_pretrained(model: object, pretrained: str, device: str):
checkpoints = torch.load(pretrained, map_location=device)
pretrained_model = checkpoints['model']
step_all = checkpoints['step_all']
epo = torch.tensor([checkpoints['epoch']]).to(device)
lr = checkpoints['lr'][0]
model.load_state_dict(pretrained_model)
return model, step_all, epo, lr
def img_preprocess_inference_old(img_path : str):
img = cv2.imread(img_path) / 255
img = cv2.resize(img,(256,256))
img = rearrange(img, 'h w c -> c h w')
img = torch.Tensor(img)
img = img.unsqueeze(dim=0)
return img
def img_preprocess_inference(img_path : str):
weights= EfficientNet_V2_M_Weights.DEFAULT
preprocess = weights.transforms()
img = Image.open(img_path).convert('RGB')
img = preprocess(img).unsqueeze(dim=0)
return img
def noraml_weight(file_path : str):
with open(file_path) as file:
data_file_in = file.readlines()
category_dict = {}
for data in data_file_in:
data_patch = data.split("|")[1:]
for patch in data_patch:
id = int(patch.split(",")[0])
if 0 < id <= 90:
if id in category_dict:
category_dict[id] += 1
else:
category_dict[id] = 1
category_dict_sort = dict(sorted(category_dict.items(),key=lambda x:x[0]))
weights = []
category_dict_sum = sum(category_dict_sort.values())
for counter in range(1,model_config.class_num+1):
if counter in category_dict_sort:
weights.append(category_dict_sum / category_dict_sort[counter])
else:
weights.append(0)
weights = np.array(weights) / model_config.class_num
weights_bound = np.minimum(10, np.maximum(0.1, weights))
weights_bound = torch.Tensor(weights_bound)
return weights_bound
def calculate_iou(box_a, box_b):
#intersection over union
cx_a, cy_a, w_a, h_a = box_a[0], box_a[1], box_a[2], box_a[3]
cx_b, cy_b, w_b, h_b = box_b[0], box_b[1], box_b[2], box_b[3]
a1_x, a1_y, a2_x, a2_y = (cx_a - w_a/2), (cy_a - h_a/2), (cx_a + w_a/2), (cy_a + h_a/2)
b1_x, b1_y, b2_x, b2_y = (cx_b - w_b/2), (cy_b - h_b/2), (cx_b + w_b/2), (cy_b + h_b/2)
overlap = max(0, min(a2_x,b2_x) - max(a1_x,b1_x)) * max(0, min(a2_y,b2_y) - max(a1_y,b1_y))
mean_area = w_a*h_a + w_b*h_b - overlap
if mean_area < 0.005:
iou = 1
else:
iou = overlap / mean_area
return iou
def nms_img(obj_out, class_out, box_out):
#non maxima supression
obj_score_list = []
class_list = []
class_score_list = []
box_list = []
xy_list = []
#making possible bbox
for patch in range(len(obj_out)):
obj_score = obj_out[patch]
class_id = np.argmax(class_out[patch])
class_score = class_out[patch][class_id]
if obj_score > model_config.obj_thresh and class_score > model_config.class_thresh and box_out[patch][2] > 0.03 and box_out[patch][3] > 0.03:
obj_score_list.append(obj_score)
x = patch % 2
y = patch // 2
xy_list.append((x,y))
class_id = np.argmax(class_out[patch])
class_list.append(class_id)
class_score = class_out[patch][class_id]
class_score_list.append(class_score)
box_list.append(box_out[patch])
# print(obj_score_list, class_list, class_score_list, box_list, xy_list)
obj_score_list_final = []
class_list_final = []
class_score_list_final = []
box_list_final = []
xy_list_final = []
while obj_score_list != []:
max_score_index = np.argmax(obj_score_list)
obj_score_list_final.append(obj_score_list[max_score_index])
class_list_final.append(class_list[max_score_index])
class_score_list_final.append(class_score_list[max_score_index])
box_list_final.append(box_list[max_score_index])
xy_list_final.append([xy_list[max_score_index]])
len_box = len(box_list)
ref_box = box_list[max_score_index]
ref_id = class_list[max_score_index]
shift = 0
for count in range(len_box):
possible_box = box_list[count- shift]
iou = calculate_iou(ref_box, possible_box)
if (iou > model_config.iou_thresh and ref_id==class_list[count-shift]) or count==max_score_index:
del obj_score_list[count-shift]
del class_list[count-shift]
del class_score_list[count-shift]
del box_list[count-shift]
del xy_list[count-shift]
shift += 1
return obj_score_list_final, class_list_final, class_score_list_final, box_list_final, xy_list_final
def show_box(img_path, class_list, box_list, out_path):
img = cv2.imread(img_path)
h, w, c = img.shape
color = (255,255,255)
thickness = 1
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 0.5
categories = category_name_id(model_config.json_file_path_1, model_config.json_file_path_2)
for box, class_id in zip(box_list, class_list):
class_name = categories[int(class_id)+1]
cv2.rectangle(img, (int((box[0] - box[2]/2)*w), int((box[1] - box[3]/2)*h)), (int((box[0] + box[2]/2)*w), int((box[1] + box[3]/2)*h)), color, thickness)
cv2.putText(img, class_name, (int(box[0]*w),int(box[1]*h)), font, fontScale, color, thickness, cv2.LINE_AA,)
cv2.imwrite(out_path,img)
def category_name_id(file_path_1, file_path_2):
with open(file_path_1) as file:
data_file_in_1 = json.load(file)
categories_1 = data_file_in_1["categories"]
with open(file_path_2) as file:
data_file_in_2 = json.load(file)
categories_2 = data_file_in_2["categories"]
category_dict = {}
for data in categories_1:
category_dict[data["id"]] = data["name"]
for data in categories_2:
category_dict[data["id"]] = data["name"]
return category_dict
| saeed5959/object-detection-transformer | object_detection/utils.py | utils.py | py | 6,259 | python | en | code | 6 | github-code | 50 |
42684283653 | import psycopg2
import os
from politico.config import APP_CONFIG
from flask import current_app
class DB:
"""Database initialization class"""
def tables(self):
users = """
create table if not exists users (
id serial primary key not null,
firstname varchar(50) not null,
lastname varchar(50) not null,
othername varchar(50) not null,
email varchar(100) not null unique,
phone_number bigint not null unique,
passport_url varchar(255) not null unique,
id_no bigint not null unique,
is_admin bool not null,
username varchar(50) not null unique,
password varchar(255) not null
);
"""
party = """
create table if not exists party(
id serial primary key not null,
name varchar(100) not null unique,
hq_address varchar(255) not null,
logo_url varchar(255) not null unique
);
"""
office = """
create table if not exists office(
id serial primary key not null,
name varchar(50) not null unique,
type varchar(50) not null
);
"""
candidates = """
create table if not exists candidates(
id serial not null unique,
office integer references office(id),
party integer references party(id),
candidate integer references users(id),
primary key (office, candidate)
);
"""
vote = """
create table if not exists vote(
id serial not null unique,
created_on date not null default current_date ,
created_by integer references users(id),
office integer references office(id),
candidate integer references candidates(id),
primary key (office, created_by)
);
"""
petition = """
create table if not exists petition(
id serial primary key not null,
created_on date not null default current_date ,
created_by integer references users(id),
office integer references office(id),
body text not null,
evidence text []
);
"""
queries = [users, party, office, candidates, vote, petition]
return queries
def connection(self):
try:
if current_app.config['TESTING']:
URL = os.getenv('TEST_DATABASE_URL')
else:
URL = os.getenv('DATABASE_URL')
conn = psycopg2.connect(URL)
return conn
except (Exception, psycopg2.DatabaseError) as error:
print(error)
def initialize_db(self):
conn = self.connection()
print('Intitializing ...')
try:
cursor = conn.cursor()
queies = self.tables()
for query in queies:
cursor.execute(query)
conn.commit()
self.create_admin()
print('Database Inititialised')
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
def create_admin(self):
conn = self.connection()
try:
cursor = conn.cursor()
cursor.execute("select * from users where username = 'pheonix'")
user = cursor.fetchone()
# print(user)
if user is None:
print('Adding admin ...............')
admin = os.getenv('ADMIN')
query_two = """insert into users(firstname, lastname, othername, email, phone_number,
passport_url, id_no, is_admin, username, password) values({}) RETURNING id;""".format(admin)
cursor.execute(query_two)
conn.commit()
admin_added = cursor.fetchone()[0]
print(self.fetch_one('users', 'id', admin_added))
if admin_added:
print('Admin successfully added')
except (Exception, psycopg2.DatabaseError) as error:
print(error)
return None
finally:
if conn is not None:
conn.close()
return None
def fetch_one(self, tb_name, search_key, value):
conn = self.connection()
try:
cursor = conn.cursor()
cursor.execute("select * from %s where %s = %s" % (tb_name, search_key, value))
result = cursor.fetchone()
return result
except (Exception, psycopg2.DatabaseError) as error:
print(error)
return None
finally:
if conn is not None:
conn.close()
return None
def fetch_one_using_two_values(self, tb_name, search_key, value, search_key_2, value_2):
# value and value_2 are both integers
conn = self.connection()
try:
cursor = conn.cursor()
cursor.execute("select * from %s where %s = %s and %s = %s" % (tb_name, search_key, value, search_key_2, value_2))
result = cursor.fetchone()
return result
except (Exception, psycopg2.DatabaseError) as error:
print(error)
return None
finally:
if conn is not None:
conn.close()
return None
def fetch_one_using_string(self, tb_name, search_key, value):
conn = self.connection()
try:
cursor = conn.cursor()
cursor.execute("select * from %s where %s = '%s'" % (tb_name, search_key, value))
result = cursor.fetchone()
return result
except (Exception, psycopg2.DatabaseError) as error:
print(error)
return None
finally:
if conn is not None:
conn.close()
return None
def fetch_one_using_strings_with_two_values(self, tb_name, search_key, value, search_key_2, value_2):
# value and value_2 are both strings
conn = self.connection()
try:
cursor = conn.cursor()
cursor.execute("select * from %s where %s = '%s' and %s = '%s'" % (tb_name, search_key, value, search_key_2, value_2))
result = cursor.fetchone()
return result
except (Exception, psycopg2.DatabaseError) as error:
print(error)
return None
finally:
if conn is not None:
conn.close()
return None
def fetch_one_using_strings_as_first_values(self, tb_name, search_key, value, search_key_2, value_2):
# value 1 str value 2 int
conn = self.connection()
try:
cursor = conn.cursor()
cursor.execute("select * from %s where %s = '%s' and %s = %s" % (tb_name, search_key, value, search_key_2, value_2))
result = cursor.fetchone()
return result
except (Exception, psycopg2.DatabaseError) as error:
print(error)
return None
finally:
if conn is not None:
conn.close()
return None
def fetch_one_using_strings_as_second_values(self, tb_name, search_key, value, search_key_2, value_2):
# value 1 int value 2 str
conn = self.connection()
try:
cursor = conn.cursor()
cursor.execute("select * from %s where %s = %s and %s = %s" % (tb_name, search_key, value, search_key_2, value_2))
result = cursor.fetchone()
return result
except (Exception, psycopg2.DatabaseError) as error:
print(error)
return None
finally:
if conn is not None:
conn.close()
return None
def fetch_all(self, tb_name):
conn = self.connection()
try:
cursor = conn.cursor()
cursor.execute("select * from %s" % tb_name)
result = cursor.fetchall()
return result
except (Exception, psycopg2.DatabaseError) as error:
print(error)
return None
finally:
if conn is not None:
conn.close()
return None
def fetch_all_using_int_key(self, tb_name, search_key, value):
conn = self.connection()
try:
cursor = conn.cursor()
cursor.execute("select * from %s where %s = %s" % (tb_name, search_key, value))
result = cursor.fetchall()
return result
except (Exception, psycopg2.DatabaseError) as error:
print(error)
return None
finally:
if conn is not None:
conn.close()
return None
def fetch_all_using_str_key(self, tb_name, search_key, value):
conn = self.connection()
try:
cursor = conn.cursor()
cursor.execute("select * from %s where %s = '%s'" % (tb_name, search_key, value))
result = cursor.fetchall()
return result
except (Exception, psycopg2.DatabaseError) as error:
print(error)
return None
finally:
if conn is not None:
conn.close()
return None
def delete_one(self, tb_name, search_key, value):
conn = self.connection()
try:
cursor = conn.cursor()
cursor.execute("delete from %s where %s = %s" % (tb_name, search_key, value))
conn.commit()
return True
except (Exception, psycopg2.DatabaseError) as error:
print(error)
return False
finally:
if conn is not None:
conn.close()
return False
def delete_all(self, tb_name):
conn = self.connection()
try:
cursor = conn.cursor()
cursor.execute("delete from %s" % tb_name)
conn.commit()
return True
except (Exception, psycopg2.DatabaseError) as error:
print(error)
return False
finally:
if conn is not None:
conn.close()
return False
def tables_to_delete(self):
users = 'Drop table if exists users'
party = 'Drop table if exists party'
office = 'Drop table if exists office'
candidates = 'Drop table if exists candidates'
vote = 'Drop table if exists vote'
petition = 'Drop table if exists petition'
queries = [petition, vote, candidates, office, party, users]
return queries
def tear_down_test_database(self):
conn = self.connection()
print('Intitializing tear down...')
try:
cursor = conn.cursor()
queies = self.tables_to_delete()
for query in queies:
cursor.execute(query)
conn.commit()
print('Database pulled down successfully')
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
| erycoking/Politico_API | politico/api/v2/db/__init__.py | __init__.py | py | 11,512 | python | en | code | 1 | github-code | 50 |
10230090124 | # !/usr/bin/python
# -*- coding: utf-8 -*-
from . import news
from .. import db
from flask import jsonify
__author__ = 'AidChow'
@news.route('/list/<page>', methods=['GET'])
def news_list(page):
page = int(page)
if page == 0:
page = 1
with db.connect().cursor() as cur:
sql = 'SELECT * from news_list limit %s,%s'
end = int(page) * 10
start = (int(page) - 1) * 10
cur.execute(sql, (start, end - start))
result = cur.fetchall()
l = []
if result is ():
return jsonify({'code': 200, 'msg': 'load complete', 'content': None})
for i in result:
data = {'original_url': i[1], 'content_id': i[2],
'news_title': i[3], 'news_push_time': i[4],
'news_preview': i[5]}
l.append(data)
return jsonify({'code': 200, 'msg': 'request success', 'content': l})
@news.route('/content/<news_id>', methods=['GET'])
def news_content(news_id):
with db.connect().cursor() as cur:
sql = 'SELECT * from news_content WHERE news_p_id =%s'
cur.execute(sql, news_id)
result = cur.fetchone()
if result is None:
return jsonify({'code': 404, 'msg': 'page not found', 'content': None}), 404
else:
return jsonify({'code': 200, 'msg': 'request success', 'content': result[2]})
| pengyuanqiuqiu/MinDa_news | app/news/view.py | view.py | py | 1,378 | python | en | code | 0 | github-code | 50 |
45998640719 | from settings import *
class Shield(pygame.sprite.Sprite):
def __init__(self, x, y, p):
pygame.sprite.Sprite.__init__(self)
if p == 1:
self.image = pygame.image.load(os.path.join(img_folder, 'shield1.png')).convert()
self.image.set_colorkey((0,0,0))
elif p == 2:
self.image = pygame.image.load(os.path.join(img_folder, 'shield2.png')).convert()
self.image.set_colorkey((0,0,0))
self.rect = self.image.get_rect()
self.rect.centery = y
self.rect.centerx = x
def update(self, x, y):
self.rect.centery = y
self.rect.centerx = x | ew073168/Star-Wars-PvP | Star Wars PvP/shield.py | shield.py | py | 574 | python | en | code | 0 | github-code | 50 |
24510732417 | import numpy as np
from common.sparsenetworkmodel import EdgeWeightedQBAF
import common.helper as helper
import copy
import time
class PSO:
"""
This class represents the PSO algorithm with its parameters and functions
Some of the attributes:
experiment_id: identifier for the experiment
velocities: set of particles' velocity matrices
best_scores: set of particles' personal best scores
particles: set of particles' models
positions: set of particles' position matrices
best_positions: set of particles' personal best positions
global_best_position: global best position
w: inertia weight (influence of old velocity)
c_1: acceleration coefficient (cognitive parameter)
c_2: acceleration coefficient (social parameter)
num_init_connections: numbers of initial connections (tuple)
epsilon: velocity bias parameter
log: if set True, detailed progress will be printed
best_model: global best model
Important notice: in the PSO algorithm we use (m x n) binary connection matrices rather than connectivity matrices
of the sparselinear library. This makes it easier to work with position and velocity matrices. We use helper
function for conversions.
"""
def __init__(self, number_of_particles, search_space_dimensions, dataset, learning_rate, alpha, epochs,
num_init_connections, connections_limits, epsilon, log, results_path, experiment_id='1',
random_state=42, patience=100, early_stopping_threshold=0.000001):
self.experiment_id = experiment_id
self.random_state = random_state
self.number_of_particles = number_of_particles
self.search_space_dimensions = search_space_dimensions
self.velocities = []
self.best_scores = np.zeros(self.number_of_particles)
self.particles = []
self.positions = []
self.best_positions = []
self.dataset = dataset
self.global_best_position = (np.zeros((self.search_space_dimensions[0], self.search_space_dimensions[1])),
np.zeros((self.search_space_dimensions[1], self.search_space_dimensions[2])))
self.global_best_score = 0
self.global_best_accuracy = 0
self.global_best_recall = 0
self.global_best_precision = 0
self.global_best_f1score = 0
self.global_best_connection_num = 0
self.w = 0.9
self.c_1 = 2
self.c_2 = 2
self.num_init_connections = num_init_connections
self.connections_limits = connections_limits
self.epsilon = epsilon
self.learning_rate = learning_rate
self.alpha = alpha
self.epochs = epochs
self.patience = patience
self.early_stopping_threshold = early_stopping_threshold
self.log = log
self.config_log = []
self.best_model = None
self.results_path = results_path
self.start_time = time.time()
def init_particles(self):
"""
initializes particles (sparse MLPs) with position and velocity matrices
"""
for i in range(self.number_of_particles):
# partially random position initialization
connections_1, connections_2 = helper.init_random_connections(self.search_space_dimensions[0],
self.search_space_dimensions[1],
self.search_space_dimensions[2],
self.num_init_connections)
model = EdgeWeightedQBAF(connections_1=connections_1, connections_2=connections_2,
num_features=self.search_space_dimensions[0],
num_neurons=self.search_space_dimensions[1],
num_targets=self.search_space_dimensions[2],
model_number=i, learning_rate=self.learning_rate, alpha=self.alpha,
epochs=self.epochs, dataset=self.dataset, patience=self.patience,
early_stopping_threshold=self.early_stopping_threshold)
binary_matrix_1 = np.zeros((self.search_space_dimensions[0], self.search_space_dimensions[1]))
binary_matrix_1 = helper.build_binary_matrix_out_of_connectivity_matrix(binary_matrix_1, connections_1)
binary_matrix_2 = np.zeros((self.search_space_dimensions[1], self.search_space_dimensions[2]))
binary_matrix_2 = helper.build_binary_matrix_out_of_connectivity_matrix(binary_matrix_2, connections_2)
self.positions.append((binary_matrix_1, binary_matrix_2))
self.best_positions.append((binary_matrix_1, binary_matrix_2))
self.particles.append(model)
self.velocities.append((np.zeros((self.search_space_dimensions[0], self.search_space_dimensions[1])),
np.zeros((self.search_space_dimensions[1], self.search_space_dimensions[2]))))
def evaluate_particle(self, particle):
"""
Evaluate particle by calculating its score based on the objective function.
"""
particle.train_and_evaluate_model()
if self.log:
print('Particle: ', particle.model_number, ' || score: ', particle.score, ', accuracy: ',
particle.test_accuracy, ', connections: ', particle.num_connections_tuple)
def update_score(self, particle):
"""
Updates the personal best score if it is greater than the current one
Personal best position is also updated in this case
"""
if particle.score > self.best_scores[particle.model_number]:
self.best_scores[particle.model_number] = particle.score
binary_matrix_1 = helper.build_binary_matrix_out_of_connectivity_matrix(
np.zeros((self.search_space_dimensions[0], self.search_space_dimensions[1])), particle.connections_1)
binary_matrix_2 = helper.build_binary_matrix_out_of_connectivity_matrix(
np.zeros((self.search_space_dimensions[1], self.search_space_dimensions[2])), particle.connections_2)
self.best_positions[particle.model_number] = (binary_matrix_1, binary_matrix_2)
def update_global_best(self):
"""
Updates the global best score if a greater one is found
Other global best attributes are updated accordingly
"""
maximum = max(self.best_scores)
if maximum > self.global_best_score:
idx = int(np.argmax(self.best_scores))
self.global_best_position = copy.deepcopy(self.positions[idx])
self.global_best_score = maximum
self.global_best_accuracy = self.particles[idx].test_accuracy
self.global_best_connection_num = self.particles[idx].num_connections_tuple
def update_velocity(self, particle):
"""
Calculates and updates velocity matrices of the two layers
"""
self.update_vel_layer(particle, 0)
self.update_vel_layer(particle, 1)
def update_vel_layer(self, particle, layer_num):
"""
Calculates and updates velocity values in the velocity matrix that relates to a specific layer
See literature for more information
"""
for i in range(self.search_space_dimensions[0 + layer_num]):
for j in range(self.search_space_dimensions[1 + layer_num]):
prev_vel = self.velocities[particle.model_number][layer_num][i][j] # previous velocity value
local_best_term = self.best_positions[particle.model_number][layer_num][i][j] - \
self.positions[particle.model_number][layer_num][i][j]
global_best_term = self.global_best_position[layer_num][i][j] - \
self.positions[particle.model_number][layer_num][i][j]
r_1 = np.random.uniform(0, 1)
r_2 = np.random.uniform(0, 1)
# velocity update equation
self.velocities[particle.model_number][layer_num][i][j] \
= self.w * prev_vel + self.c_1 * r_1 * local_best_term + self.c_2 * r_2 * global_best_term
def update_position(self, particle):
"""
Updates the position matrices of a particle
Afterwards, a new model is constructed based on the new position (connections)
"""
updated_connections_1 = self.update_layer(particle, 0, self.connections_limits[0])
updated_connections_2 = self.update_layer(particle, 1, self.connections_limits[1])
updated_model = EdgeWeightedQBAF(connections_1=updated_connections_1, connections_2=updated_connections_2,
num_features=self.search_space_dimensions[0],
num_neurons=self.search_space_dimensions[1],
num_targets=self.search_space_dimensions[2],
model_number=particle.model_number, learning_rate=self.learning_rate,
alpha=self.alpha, epochs=self.epochs, dataset=self.dataset,
patience=self.patience,
early_stopping_threshold=self.early_stopping_threshold)
self.particles[particle.model_number] = updated_model
def update_layer(self, particle, layer_num, connections_limit):
"""
Updates a connections layer (position).
See literature for more details
"""
curr_conn_num = particle.num_connections_tuple[layer_num] # current connection number
for i in range(self.search_space_dimensions[0 + layer_num]):
for j in range(self.search_space_dimensions[1 + layer_num]):
# position bit update equations with the usage of velocity bias (epsilon)
probability = 1 / (1 + np.exp(- self.velocities[particle.model_number][layer_num][i][j])) \
- self.epsilon
r_id = np.random.uniform(0, 1)
if r_id < probability:
if curr_conn_num < connections_limit[1]: # check upper boundary conformity
if self.positions[particle.model_number][layer_num][i][j] == 0:
curr_conn_num = curr_conn_num + 1
self.positions[particle.model_number][layer_num][i][j] = 1
else:
if curr_conn_num > connections_limit[0]: # ensure at least one connection exists (lower boundary)
if self.positions[particle.model_number][layer_num][i][j] == 1:
curr_conn_num = curr_conn_num - 1
self.positions[particle.model_number][layer_num][i][j] = 0
return helper.build_sparse_connectivity_out_of_binary_matrix(
self.positions[particle.model_number][layer_num])
def initialize(self):
self.init_particles()
def optimize(self, final, iterations):
"""
This function represents the optimization sub-routine. It runs the optimization for a specific number of
iterations
"""
self.log_config(iterations)
for i in range(iterations):
print("Iteration: ", i)
for p in self.particles:
self.evaluate_particle(p)
self.update_score(p)
self.update_global_best()
print('Best particle: ', self.global_best_score, " || accuracy: ", self.global_best_accuracy,
', connections: ', self.global_best_connection_num)
if i == iterations - 1 and final:
self.finalize()
break
for p in self.particles:
self.update_velocity(p)
self.update_position(p)
def log_config(self, iterations):
self.config_log.append('iterations: ' + str(iterations) + ', w=' + str(self.w) +
', c_1=' + str(self.c_1) + ', c_2=' + str(self.c_2) +
', epsilon=' + str(self.epsilon))
def finalize(self):
"""
Executed as a final step. The global best model is constructed, trained and evaluated.
A report of the experiment is persisted as a txt file
Additionally, binary connection matrices are persisted, in order to be able to construct the model again outside
the PSO algorithm
"""
conn_1 = helper.build_sparse_connectivity_out_of_binary_matrix(self.global_best_position[0])
conn_2 = helper.build_sparse_connectivity_out_of_binary_matrix(self.global_best_position[1])
self.best_model = EdgeWeightedQBAF(connections_1=conn_1, connections_2=conn_2,
num_features=self.search_space_dimensions[0],
num_neurons=self.search_space_dimensions[1],
num_targets=self.search_space_dimensions[2],
model_number=0, learning_rate=self.learning_rate,
alpha=self.alpha, epochs=self.epochs, dataset=self.dataset,
patience=self.patience,
early_stopping_threshold=self.early_stopping_threshold)
self.best_model.train_and_evaluate_model()
self.best_model.evaluate_model_final()
connected_features = helper.get_connected_features(self.global_best_position[0])
connected_features_names = [self.dataset.feature_names[i] for i in connected_features]
report_file = open(self.results_path + '/' + self.experiment_id + ".txt", "w")
report_file.write("experiment_num: " + self.experiment_id)
report_file.write("\n")
report_file.write("\nParameters:")
report_file.write("\nrandom_state=" + str(self.random_state))
report_file.write("\npopulation=" + str(self.number_of_particles))
report_file.write("\nsearch_space_dimensions=" + str(self.search_space_dimensions))
report_file.write("\nlearning_rate=" + str(self.learning_rate))
report_file.write("\nalpha=" + str(self.alpha))
report_file.write("\nepochs=" + str(self.epochs))
report_file.write("\npatience=" + str(self.patience))
report_file.write("\nearly_stopping_threshold=" + str(self.early_stopping_threshold))
report_file.write("\nnum_init_connections=" + str(self.num_init_connections))
report_file.write("\nconnections_limits=" + str(self.connections_limits))
report_file.write("\n")
report_file.write("\n")
report_file.write("\nConfigurations:")
for i in self.config_log:
report_file.write("\n" + i)
report_file.write("\n")
report_file.write("\n")
report_file.write("\nbest particle:")
report_file.write("\ntrain accuracy: " + str(self.best_model.train_accuracy))
report_file.write("\ntest accuracy: " + str(self.best_model.test_accuracy))
report_file.write("\ntest recall: " + str(self.best_model.recall))
report_file.write("\ntest precision: " + str(self.best_model.precision))
report_file.write("\ntest f1 score: " + str(self.best_model.f1_score))
report_file.write("\n\nnumber of connections: " + str(self.best_model.num_connections_tuple))
report_file.write("\n\nconnected features:")
for i in connected_features_names:
report_file.write("\n")
report_file.write(str(i))
report_file.write("\n\nfirst connectivity matrix:\n")
report_file.write(np.array2string(self.global_best_position[0].astype(int)))
report_file.write("\n")
report_file.write("\nsecond connectivity matrix:\n")
report_file.write(np.array2string(self.global_best_position[1].astype(int)))
report_file.write("\n")
report_file.write("\nfirst layer weights:\n")
report_file.write(str(self.best_model.sparse_linear_1.weight))
report_file.write("\nfirst layer biases:\n")
report_file.write(str(self.best_model.sparse_linear_1.bias))
report_file.write("\nsecond layer weights:\n")
report_file.write(str(self.best_model.sparse_linear2.weight))
report_file.write("\nsecond layer biases:\n")
report_file.write(str(self.best_model.sparse_linear2.bias))
report_file.write("\n\nRuntime: %.2f seconds" % (time.time() - self.start_time))
report_file.close()
np.savetxt(self.results_path + '/connections_' + self.experiment_id + '_1.txt', self.global_best_position[0])
np.savetxt(self.results_path + '/connections_' + self.experiment_id + '_2.txt', self.global_best_position[1])
| bazomd/sparse-mlp-structure-learning | python/common/particle_swarm_optim_algorithm.py | particle_swarm_optim_algorithm.py | py | 17,114 | python | en | code | 0 | github-code | 50 |
6959991626 | import numpy as np
import cv2
import msvcrt
import math
from math import pi, sin, cos
from matplotlib import pyplot as plt #显示图像等用,若需要用几个演示函数则启用
from mpl_toolkits.mplot3d import Axes3D #同上
def rendering(dir):
# z的尺度与x和y相同,大小等同于测试图像大小,位置与测试图像像素点一一对应
# imgs为渲染结果,大小等同于测试图像大小,位置与测试图像像素点一一对应
z = np.zeros([168, 168])
img_S = ReadS(dir) # img_S存储光源矩阵[3,7],其每一列为光源在坐标系下单位向量
imgx, albedo = ReadX(dir) # img_x[n,7]为原始图片,average为7张图片所有照度平均值
# img_valid为图片像素有效值矩阵[n,7] 值为1代表对应像素有效,0无效
img_valid = CheckValid(imgx, albedo)
# img_bg[n]为图片背景判定矩阵,1代表对应像素不是背景,(目前算法为有三个有效值即非背景),img-valicount[n]存储对应像素有几个有效值
img_bg, img_validcount = CheckBG(img_valid)
img_b = Step1(imgx, img_S, img_valid, img_bg) # img_b[n,3]即对应B
imgs,testS = render(dir, img_b, img_bg)
z=inter(img_b)
z=OptimizeZ(z) #对z进行对称优化,尺度修正
show3d(z) #显示z的三维模型用
sh = shadow(z, testS ,img_bg) #阴影优化
ShowShadowMap(sh)
# ShowBGMap(img_bg) #去注释可启用查看目前人脸哪些像素被认定为背景
# ShowValidMap(img_valid) #去注释可启用查看目前人脸各图有效值情况
# msvcrt.getch()
return z, imgs
def ReadS(dir): # 读取光源的函数 返回[3,7]的光源
tempS = np.zeros([3, 7]).astype(np.float)
with open(dir+'/train.txt', 'r') as file:
for i in range(0, 7):
lines = file.readline()
_, a, b = (float(x) for x in lines.split(','))
tempS[0][i] = math.sin(a*3.1416/180)*(math.cos(b*3.1416/180))
tempS[1][i] = math.sin(b*3.1416/180)
tempS[2][i] = math.cos(b*3.1416/180)*(math.cos(a*3.1416/180))
return tempS
def ReadX(dir): # 读取图片
train_img_read = np.zeros([168, 168, 7]).astype(np.float)
imgx = np.zeros([168*168, 7]).astype(np.float)
albedo=np.zeros(168*168).astype(np.float)
for i in range(0, 7):
train_img_read[:, :, i] = cv2.imread(
dir+'/train/'+str(i+1)+'.bmp',cv2.IMREAD_GRAYSCALE) # 读取图片,这里读进来是三通道
imgx[:, i] = train_img_read[:, :, i].T.flatten() # 二维数组展开为一维,对应文献公式中大x
for i in range(0,168*168):
albedo[i]=np.mean(imgx[i])
return imgx, albedo
def CheckValid(imgx, albedo):
img_valid = np.zeros([168*168, 7]).astype(np.bool)
for i in range(0, 168*168):
for j in range(0, 7):
if (imgx[i, j]/albedo[i]<0.1) or imgx[i,j]>253:
img_valid[i, j] = 0
else:
img_valid[i, j] = 1
return img_valid
def ShowValidMap(img_valid):
img_validmap = img_valid.reshape((168, 168, 7))
for i in range(0, 7):
plt.figure("Image")
plt.imshow(img_validmap[:, :, 1], cmap='gray')
plt.axis('on')
plt.title('image')
plt.show()
msvcrt.getch()
def ShowShadowMap(sh):
# sh = sh.reshape((10,168, 168))
for i in range(0, 10):
plt.figure("Image")
plt.imshow(sh[i,:,:], cmap='gray')
plt.axis('on')
plt.title('image')
plt.show()
msvcrt.getch()
def CheckBG(img_valid):
img_bg = np.zeros([168*168]).astype(np.uint8)
img_validcount = np.zeros([168*168]).astype(np.uint8)
for i in range(0, 168*168):
count = 0
for j in range(0, 7):
if img_valid[i, j] > 0:
count += 1
if (count >= 4):
img_bg[i] = 1
img_validcount = count
return img_bg, img_validcount
def ShowBGMap(img_bg):
temp = img_bg.reshape((168, 168))
plt.figure("Image")
plt.imshow(temp, cmap='gray')
plt.axis('on')
plt.title('image')
plt.show()
def Step1(imgx, img_S, img_valid, img_bg):
img_b = np.zeros([168*168, 3]).astype(np.float)
for i in range(0, 168*168):
xi=imgx[i,:]
mi=img_valid[i,:]
S=img_S.copy()
for j in range(0,7):
if mi[j]==0:
xi[j]=0
S[:,j]=0
img_b[i]=np.dot(xi,np.linalg.pinv(S))
# temp_inv = np.linalg.pinv(np.dot(S, S.T))
# img_b[i] = np.dot(np.dot(xi, S.T), temp_inv)
#下面的是滤点算法二
# if i == 5:
# break
# for j in range(0,3):
# if img_b[i,j]<0:
# img_b[i,j]=0.000000001
# if img_bg[i] > 0:
# temp_s = img_S[:, img_valid[i]]
# temp_x = imgx[i, img_valid[i]]
# temp_st = temp_s.T
# temp_inv = np.linalg.pinv(np.dot(temp_s, temp_st))
# img_b[i] = np.dot(np.dot(temp_x, temp_st), temp_inv)
return img_b
def render(dir, img_b, img_bg):
imgs = np.zeros([10, 168, 168]).astype(np.float)
imgtemp = np.zeros([10, 168*168]).astype(np.float)
testS = np.zeros([3, 10]).astype(np.float)
with open(dir+'/test.txt', 'r') as file:
for i in range(0, 10):
lines = file.readline()
_, a, b = (float(x) for x in lines.split(','))
testS[0][i] = math.sin(a*3.1416/180)*(math.cos(b*3.1416/180))
testS[1][i] = math.sin(b*3.1416/180)
testS[2][i] = math.cos(b*3.1416/180)*(math.cos(a*3.1416/180))
for i in range(0, 10):
# for j in range(0, 168*168):
# if (img_bg[j] > 0):
# imgtemp[i][j] = np.dot(img_b[j], testS[:, i])
# else:
# imgtemp[i][j] = 0
# if(imgtemp[i][j] < 0):
# imgtemp[i][j] = 0
# if (j % 100 == 0):
# print('Processing')
# print(j)
# pass
imgs[i,:,:] = (np.dot(img_b,testS[:,i]).reshape((168, 168))).T
'''
with open('x'+str(i)+'.txt', "w") as f:
for j in range(168):
for k in range(168):
s = " "
s += str(imgs[i,j,k])+' '
# s += str(b[i][0])+' '+str(b[i][1])+' '+str(b[i][2])
if(imgs[i,j,k]<0):
imgs[i,j,k]=0
f.write(s)
f.write('\n')
f.close()
'''
# print(testS.T)
imgs=imgs.astype(np.uint8)
# msvcrt.get(imgs[0])
return imgs,testS
def shadow(Z, s,img_bg):
sh = np.zeros([10,168,168]).astype(np.float)
for im in range(0,10):
a=s[0,im]
b=s[1,im]
c=s[2,im]
for i in range(0, 168):
for j in range(0, 168):
if (img_bg[i*168+j]):
x0 = i
y0 = 167-j
if a > 0 and b < 0: # 从左上侧射入
for m in range(x0-1, 0): # x坐标从x0-1到0查找
n = b*(m-x0)/a+y0
if(math.ceil(n) <= 167): # 保证向上取整得到的y坐标不超过上界
if(Z[m, 167-math.ceil(n)] > c*(m-x0)/a+Z[x0, y0]):
sh[im,i, j] = 1
break; # 确认(x0,y0)被遮挡,跳出循环}
if(Z[m, 167-math.floor(n)] > c*(m-x0)/a+Z[x0, y0]):
sh[im,i, j] = 1
break # 确认(x0,y0)被遮挡,跳出循环}
elif(math.floor(n))<=167: #向上取整得到的y坐标超过上界但是向下取整得到的y坐标未超过上界
if(Z[m,167-math.floor(n)]>c*(m-x0)/a+Z[x0,y0]):
sh[im,i,j]=1
break#确认(x0,y0)被遮挡,跳出循环}
else:#y超过上界,则必不会被遮挡,跳出循环
break
for n in range(y0+1,167): #y坐标从y0+1到167查找
m=a*(n-y0)/b+x0
if(math.floor(m)>=0):#向下取整得到的x坐标不超过下界
if(Z[math.floor(m),167-n]>c*(n-y0)/b+Z[x0,y0]):
sh[im,i,j]=1
break #确认(x0,y0)被遮挡,跳出循环}
if(Z[math.ceil(m),167-n]>c*(n-y0)/b+Z[x0,y0]):
sh[im,i,j]=1
break #确认(x0,y0)被遮挡,跳出循环}
elif(math.ceil(n)>=0): #向下取整得到的x坐标超过下界但是向上取整得到的x坐标未超过下界
if(Z[math.ceil(m),167-n]>c*(n-y0)/b+Z[x0,y0]):
sh[im,i,j]=1
break #确认(x0,y0)被遮挡,跳出循环
else:#x超过下界,则必不会被遮挡,跳出循环
break
elif a>0 and b>0:#从左下侧射入
for m in range(x0-1,0):
n=b*(m-x0)/a+y0
if(math.floor(n)>=0): #向下取整得到的y坐标不超过下界
if(Z[m,167-math.ceil(n)]>c*(m-x0)/a+Z[x0,y0]):
sh[im,i,j]=1
break #确认(x0,y0)被遮挡,跳出循环}
if(Z[m,167-math.floor(n)]>c*(m-x0)/a+Z[x0,y0]):
sh[im,i,j]=1
break #确认(x0,y0)被遮挡,跳出循环}
elif(math.ceil(n)>=0):#向下取整得到的y坐标超过下界但是向上取整得到的y坐标未超过下界
if(Z[m,167-math.ceil(n)]>c*(m-x0)/a+Z[x0,y0]):
sh[im,i,j]=1
break#确认(x0,y0)被遮挡,跳出循环}
else:#y超过上界,则必不会被遮挡,跳出循环
break
for n in range(y0,0):#y坐标从y0+1到167查找
m=a*(n-y0)/b+x0
if(math.floor(m)>=0):#向下取整得到的x坐标不超过下界(即未达到xy坐标系的左边界)
if(Z[math.floor(m),167-n]>c*(n-y0)/b+Z[x0,y0]):
sh[im,i,j]=1
break #确认(x0,y0)被遮挡,跳出循环}
if(Z[math.ceil(m),167-n]>c*(n-y0)/b+Z[x0,y0]):
sh[im,i,j]=1
break #确认(x0,y0)被遮挡,跳出循环}
elif(math.ceil(n)>=0):#向下取整得到的x坐标超过下界但是向上取整得到的x坐标未超过下界
if(Z[math.ceil(m),167-n]>c*(n-y0)/b+Z[x0,y0]):
sh[im,i,j]=1
break#确认(x0,y0)被遮挡,跳出循环
else:#x超过下界,则必不会被遮挡,跳出循环
break
elif a<0 and b<0:#从右上侧射入
for m in range(x0+1,167):
n=b*(m-x0)/a+y0
if(math.ceil(n)<=167):#保证向上取整得到的y坐标不超过上界
if(Z[m,167-math.ceil(n)]>c*(m-x0)/a+Z[x0,y0]):
sh[im,i,j]=1
break#确认(x0,y0)被遮挡,跳出循环}
if(Z[m,167-math.floor(n)]>c*(m-x0)/a+Z[x0,y0]):
sh[im,i,j]=1
break#确认(x0,y0)被遮挡,跳出循环}
elif(math.floor(n)<=167):#向上取整得到的y坐标超过上界但是向下取整得到的y坐标未超过上界
if(Z[m,167-math.floor(n)]>c*(m-x0)/a+Z[x0,y0]):
sh[im,i,j]=1
break #确认(x0,y0)被遮挡,跳出循环}
else:#y超过上界,则必不会被遮挡,跳出循环
break
for n in range(y0+1,167):#y坐标从y0-1到0查找
m=a*(n-y0)/b+x0
if(math.ceil(m)<=167):#向上取整得到的x坐标不超过上界
if(Z[math.ceil(m),167-n]>c*(n-y0)/b+Z[x0,y0]):
sh[im,i,j]=1
break#确认(x0,y0)被遮挡,跳出循环}
if(Z[math.floor(m),167-n]>c*(n-y0)/b+Z[x0,y0]):
sh[im,i,j]=1
break#确认(x0,y0)被遮挡,跳出循环}
elif(math.floor(m)<=167):#向上取整得到的x坐标超过上界但是向下取整得到的x坐标未超过上界
if(Z[math.floor(m),167-n]>c*(n-y0)/b+Z[x0,y0]):
sh[im,i,j]=1
break#确认(x0,y0)被遮挡,跳出循环
else: #x超过上界,则必不会被遮挡,跳出循环
break
else:#从右下侧侧射入
for m in range(x0+1,167):
n=b*(m-x0)/a+y0
if n<167 and n>0:
if(math.floor(n)>=0):#向下取整得到的y坐标不超过下界
if(Z[m,167-math.ceil(n)]>c*(m-x0)/a+Z[x0,y0]):
sh[im,i,j]=1
break #确认(x0,y0)被遮挡,跳出循环}
if(Z[m,167-math.floor(n)]>c*(m-x0)/a+Z[x0,y0]):
sh[im,i,j]=1
break#确认(x0,y0)被遮挡,跳出循环}
elif(math.ceil(n)<=167):#向下取整得到的y坐标超过下界但是向上取整得到的y坐标未超过下界
if(Z[m,167-math.ceil(n)]>c*(m-x0)/a+Z[x0,y0]):
sh[im,i,j]=1
break #确认(x0,y0)被遮挡,跳出循环}
else: #y超过上界,则必不会被遮挡,跳出循环
break
for n in range(y0,0):#y坐标从y0+1到167查找
m=a*(n-y0)/(b-0.00001)+x0
if m<0:
continue
if(math.ceil(m)<=167): #向上取整得到的x坐标不超过上界
if(Z[math.ceil(m),167-n]>c*(n-y0)/b+Z[x0,y0]):
sh[im,i,j]=1
break #确认(x0,y0)被遮挡,跳出循环}
if(Z[math.floor(m),167-n]>c*(n-y0)/b+Z[x0,y0]):
sh[im,i,j]=1
break #确认(x0,y0)被遮挡,跳出循环}
elif(math.floor(n)<=167):#向上取整得到的x坐标超过下界但是向下取整得到的x坐标未超过上界
if(Z[math.floor(m),167-n]>c*(n-y0)/b+Z[x0,y0]):
sh[im,i,j]=1
break#确认(x0,y0)被遮挡,跳出循环
else:#x超过上界,则必不会被遮挡,跳出循环
break
for im in range(0,10):
for i in range(1,167):
for j in range(1,167):
if(sh[im,i,j]==1) and (sh[im,i-1,j]==0 or sh[im,i+1,j]==0 or sh[im,i,j-1]==0 or sh[im,i,j+1]==0): #初步判断是否在阴影边缘
sh[im,i,j]=0.5
return sh
def show3d(z):
x = np.linspace(0,167,168)
y = np.linspace(0,167,168)
X, Y = np.meshgrid(x, y)
fig = plt.figure(figsize=(200, 200))
ax = Axes3D(fig)
surf = ax.plot_surface(X, Y, z,
rstride=1,
cstride=1,
cmap=plt.get_cmap('rainbow'))
ax.set_zlim(-50, 300)
plt.title("3D")
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
def OptimizeZ(Z):
# 对称优化
# ZZ=Z.copy()
# for i in range(0,84):
# ZZ[:,i]=Z[:,167-i]
# ZZ[:,167-i]=Z[:,i]
# Z=(Z+ZZ)/2
# Z=Z*1.2
frontpoint=np.max(Z)
backpoint=np.min(Z)
depth=180
scale=depth/(frontpoint-backpoint)
Z=Z*scale
return Z
def inter(b):
h = w = 168
b = np.array([[b[i, j] if b[i, j] != 0 or j != 3 else 0.00000000000000000001 for j in range(3)] for i in range(w * h)])
b = np.array([[b[i, j] if b[i, j] != 0 or j == 3 else 0.00000000000000000000000000000000000000000000000000000000000001 for j in range(3)] for i in range(w * h)])
z_x = np.reshape(-b[:, 0] / b[:, 2] , (h, w))
z_y = np.reshape(-b[:, 1] / b[:, 2], (h, w))
z_x=z_x.T
z_y=z_y.T
max = 1
lam = 1
u = 3
for k in range(1, w - 1):
for l in range(1, h - 1):
if abs(z_x[l, k])>max or abs(z_y[l, k]) > max:
z_x[l, k] = (z_x[l - 1, k] + z_x[l + 1, k] + z_x[l, k + 1] + z_x[l, k - 1]) / 4
z_y[l, k] = (z_y[l - 1, k] + z_y[l + 1, k] + z_y[l, k + 1] + z_y[l, k - 1]) / 4
zz_x = np.zeros((h*2, w*2))
zz_x[0 : h, 0 : w] = z_x[:, :]
zz_x[h : 2 * h, 0 : w] = z_x[h - 1 : : -1]
zz_x[:, w : w * 2] = zz_x[:, w - 1 : : -1]
zz_y = np.zeros((h*2,w*2))
zz_y[0 : h, 0 : w] = z_y[:, :]
zz_y[h : 2 * w, 0 : w] = z_y[h - 1 : : -1]
zz_y[:, w : w * 2] = zz_y[:, w - 1 : : -1]
z_x = zz_x
z_y = zz_y
# print(z_y.size)
h = h * 2
w = w * 2
for i in range(1, w - 1):
for j in range(1, h - 1):
if abs(z_x[j, i]) > max or abs(z_y[j, i]) > max:
z_x[j, i] = (z_x[j - 1, i] + z_x[j + 1, i] + z_x[j, i + 1] + z_x[j, i - 1]) / 4
z_y[j, i] = (z_y[j - 1, i] + z_y[j + 1, i] + z_y[j, i + 1] + z_y[j, i - 1]) / 4
C_x = np.fft.fft2(z_x)
C_y = np.fft.fft2(z_y)
C = np.zeros((h, w)).astype('complex')
C_xx = np.zeros((h, w)).astype('complex')
C_yy = np.zeros((h, w)).astype('complex')
for m in range(w):
for n in range(h):
wx = 2 * pi * m / w
wy = 2 * pi * n/ h
if sin(wx) == 0 and sin(wy) == 0:
C[n, m] = 0
else:
cons=(1 + lam) * (sin(wx) *sin(wx) + sin(wy) *sin(wy)) + u * (sin(wx) *sin(wx) + sin(wy) *sin(wy)) ** 2
C[n, m]=(C_x[n, m] * (complex(0, -1) * sin(wx)) + C_y[n, m] * (complex(0, -1) * sin(wy))) / cons
C_xx[n, m] = complex(0, 1) * sin(wx) * C[n, m]
C_yy[n, m] = complex(0, 1) * sin(wy) * C[n, m]
h = h // 2
w = w // 2
Z = np.fft.ifft2(C).real
Z = Z[0 : h, 0 : w]
Z_xx = np.fft.ifft2(C_xx).real
Z_yy = np.fft.ifft2(C_yy).real
Z_xx = Z_xx[0 : h, 0 : w]
Z_yy = Z_yy[0 : h, 0 : w]
for i in range(1, w - 1):
for j in range(1, h - 1):
if abs(Z_xx[j, i]) > max or abs(Z_yy[j, i]) > max:
Z_xx[j, i] = (Z_xx[j - 1, i] + Z_xx[j + 1, i] + Z_xx[j, i + 1] + Z_xx[j, i - 1]) / 4
Z_yy[j, i] = (Z_yy[j - 1, i] + Z_yy[j + 1, i] + Z_yy[j, i + 1] + Z_yy[j, i - 1]) / 4
Z[j, i] = (Z[j - 1, i] + Z[j + 1, i] + Z[j, i + 1] + Z[j, i - 1]) / 4
# print(Z)
return Z
def Redefine(B1):
B=B1.copy()
B2=np.zeros([168*168,3])
B2[:,0]=B[:,1]
B2[:,1]=B[:,2]
B2[:,2]=np.abs(B[:,0])
return B2
| MTCXin/Face-Image-Rendering-and-Reconstruction | rendering.py | rendering.py | py | 21,607 | python | en | code | 0 | github-code | 50 |
11125464700 | from horovod.ray import RayExecutor
import horovod.torch as hvd
import ray
# Start the Ray cluster or attach to an existing Ray cluster
ray.init(address='auto')
num_workers = 4
# Start num_hosts * num_slots actors on the cluster
settings = RayExecutor.create_settings(timeout_s=30)
executor = RayExecutor(settings, num_workers=num_workers, use_gpu=True)
# Launch the Ray actors on each machine
# This will launch `num_slots` actors on each machine
executor.start()
# Using the stateless `run` method, a function can take in any args or kwargs
def simple_fn():
hvd.init()
print("hvd rank", hvd.rank(), "hvd local rank", hvd.local_rank())
return hvd.rank()
# Execute the function on all workers at once
result = executor.run(simple_fn)
# Check that the rank of all workers is unique
assert len(set(result)) == num_workers
executor.shutdown()
| chongxiaoc/ray-examples | horovod/ray_start.py | ray_start.py | py | 859 | python | en | code | 0 | github-code | 50 |
39283561437 | #!/usr/bin/python3
""" Wavefront obj model loading. Material properties set in
mtl file. Uses the import pi3d method to load *everything*
"""
import demo
import pi3d
from math import sin, cos, radians
# Setup display and initialise pi3d
DISPLAY = pi3d.Display.create()
# Fetch key presses
inputs=pi3d.InputEvents()
def main():
#Model textures and shaders
shader = pi3d.Shader("uv_reflect")
bumptex = pi3d.Texture("textures/floor_nm.jpg")
shinetex = pi3d.Texture("textures/stars.jpg")
# load model
mymodel = pi3d.Model(file_string='models/teapot.obj', z=10)
mymodel.set_shader(shader)
mymodel.set_normal_shine(bumptex, 4.0, shinetex, 0.5)
#Create environment box
flatsh = pi3d.Shader("uv_flat")
ectex=pi3d.loadECfiles("textures/ecubes","sbox")
myecube = pi3d.EnvironmentCube(size=900.0, maptype="FACES",
name="cube")
myecube.set_draw_details(flatsh, ectex)
CAMERA = pi3d.Camera.instance()
rot = 0.0 # rotation of camera
tilt = 0.0 # tilt of camera
while DISPLAY.loop_running() and not \
inputs.key_state("KEY_ESC"):
#Rotate camera
inputs.do_input_events()
# camera steered by mouse
#Note:Some mice devices will be located on
#get_mouse_movement(1) instead of get_mouse_movement()
mx,my,mv,mh,md=inputs.get_mouse_movement()
#mx,my,mv,mh,md=inputs.get_mouse_movement(1)
rot -= (mx)*0.2
tilt -= (my)*0.2
CAMERA.reset()
CAMERA.rotate(tilt, rot, 0)
#Rotate object
mymodel.rotateIncY(2.0)
mymodel.rotateIncZ(0.1)
mymodel.rotateIncX(0.3)
#Draw objects
mymodel.draw()
myecube.draw()
try:
main()
finally:
inputs.release()
DISPLAY.destroy()
print("Closed Everything. END")
#End
| PacktPublishing/Raspberry-Pi-3-Cookbook-for-Python-Programmers-Third-Edition | Chapter07/3dModel.py | 3dModel.py | py | 1,767 | python | en | code | 26 | github-code | 50 |
5698550979 | """Loop through all "hadded" data files and save the total number of coincidences."""
import argparse
from collections import defaultdict
import itertools as it
import json
import multiprocessing
import numpy as np
import common
import delayeds
import adevent
def one_file(run_key, data_file_path, energy_lookup, bin_edges):
import ROOT
run, site, ad = run_key
if run % 10 == 0:
print(run_key)
data_file_name = data_file_path.format(run=run, site=site, ad=ad)
data_file = ROOT.TFile(data_file_name, 'READ')
ad_events = data_file.Get('ad_events')
bin_edges = np.array(bin_edges)
num_bins = len(bin_edges) - 1
low_edge = bin_edges[0]
up_edge = bin_edges[-1]
nominal_hist = ROOT.TH1F("nominal_hist", "nominal_hist", num_bins, low_edge, up_edge)
nominal_hist.GetXaxis().Set(num_bins, bin_edges)
adtime_hist = ROOT.TH1F("adtime_hist", "adtime_hist", num_bins, low_edge, up_edge)
adtime_hist.GetXaxis().Set(num_bins, bin_edges)
delayed_min, delayed_max = energy_lookup['nominal', site, ad]
num_coincidences_nominal = ad_events.Draw('energy[0] >> nominal_hist',
'multiplicity == 2 && '
f'energy[0] < {adevent._EMAX_THU} && '
f'energy[1] > {delayed_min} && energy[1] < {delayed_max} && '
f'{delayeds._NH_THU_DIST_TIME_CUT_STR}',
'goff'
)
delayed_min, delayed_max = energy_lookup['adtime', site, ad]
num_coincidences_adtime = ad_events.Draw('energy[0] >> adtime_hist',
'multiplicity == 2 && '
f'energy[0] < {adevent._EMAX_THU} && '
f'energy[1] > {delayed_min} && energy[1] < {delayed_max} && '
f'dr_to_prompt_AdTime[1] + {delayeds._NH_THU_DIST_TIME_CONST} '
f' * dt_to_prompt[1] < {delayeds._NH_THU_DIST_TIME_MAX}',
'goff'
)
nominal_spec_list = []
adtime_spec_list = []
for i in range(num_bins):
nominal_spec_list.append(nominal_hist.GetBinContent(i + 1))
adtime_spec_list.append(adtime_hist.GetBinContent(i + 1))
return (
run,
ad,
num_coincidences_nominal,
num_coincidences_adtime,
nominal_spec_list,
adtime_spec_list,
)
def main(
main_database,
data_file_path,
binning_id,
save_spectrum,
save_total,
labels,
spectrum_labels,
update_db
):
import ROOT
# Fetch all triplets of RunNo, Hall, DetNo to use to find files
with common.get_db(main_database) as conn:
cursor = conn.cursor()
cursor.execute('''SELECT RunNo, Hall, DetNo
FROM runs NATURAL JOIN hall_dets
WHERE (RunNo, DetNo) IN (SELECT RunNo, DetNo FROM muon_rates)
ORDER BY RunNo, Hall, DetNo''')
run_keys = cursor.fetchall()
cursor.execute('''
SELECT
BinEdgeEnergy_keV
FROM
reco_binnings
WHERE
Id = ?
ORDER BY
BinEdgeIndex
''',
(binning_id,)
)
# returned as list of 1-tuples in keV
bin_edges = [x[0]/1000 for x in cursor.fetchall()]
# Look up delayed energy cuts
with common.get_db(main_database) as conn:
cursor = conn.cursor()
cursor.execute('''SELECT Hall, DetNo, Peak - 3 * Resolution,
Peak + 3 * Resolution
FROM delayed_energy_fits
WHERE Source = ?''', (labels['nominal'],))
nominal_energy_bounds = cursor.fetchall()
cursor.execute('''SELECT Hall, DetNo, Peak - 3 * Resolution,
Peak + 3 * Resolution
FROM delayed_energy_fits
WHERE Source = ?''', (labels['adtime'],))
adtime_energy_bounds = cursor.fetchall()
energy_lookup = {}
for site, ad, low_bound, up_bound in nominal_energy_bounds:
energy_lookup['nominal', site, ad] = (low_bound, up_bound)
for site, ad, low_bound, up_bound in adtime_energy_bounds:
energy_lookup['adtime', site, ad] = (low_bound, up_bound)
def arg_generator():
for run_key in run_keys:
yield (run_key, data_file_path, energy_lookup, bin_edges)
with multiprocessing.Pool() as pool:
results = pool.starmap(one_file, arg_generator())
# Set up dicts to hold combined histograms
# The first time a (site, ad) pair is encountered it will produce 0 (int() == 0)
# and numpy will add the array to 0 to produce the first stored array.
adsimple_hist = defaultdict(int)
adtime_hist = defaultdict(int)
# Combine result histograms
for (run, site, ad), result in zip(run_keys, results):
adsimple_hist[site, ad] += np.array(result[4], dtype=int)
adtime_hist[site, ad] += np.array(result[5], dtype=int)
if update_db:
spectrum_rows = []
for (site, ad), spectrum in adsimple_hist.items():
spectrum_rows.append((
site,
ad,
binning_id,
json.dumps(spectrum.tolist()),
spectrum_labels['adsimple'],
))
for (site, ad), spectrum in adtime_hist.items():
spectrum_rows.append((
site,
ad,
binning_id,
json.dumps(spectrum.tolist()),
spectrum_labels['adtime'],
))
results_nominal = [(*result[:3], labels['nominal']) for result in results]
results_adtime = [(*result[:2], result[3], labels['adtime']) for result in results]
with common.get_db(main_database) as conn:
cursor = conn.cursor()
if save_total:
cursor.executemany('''INSERT OR REPLACE INTO num_coincidences_by_run
VALUES (?, ?, ?, ?)''', results_nominal + results_adtime)
if save_spectrum:
cursor.executemany('''
INSERT OR REPLACE INTO
num_coincidences
VALUES
(?, ?, ?, ?, ?)
''',
spectrum_rows
)
else:
print(results[:10])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('main_database', help='Database for runs & for results')
parser.add_argument('--data-file-path',
help='File path template for run-by-run data files, e.g. '
'/home/skohn/EH{site}/hadded_ad{ad}/out_ad{ad}_{run}.root'
)
parser.add_argument('--binning-id', type=int)
parser.add_argument('--spectrum', action='store_true')
parser.add_argument('--total', action='store_true')
parser.add_argument('--update-db', action='store_true')
parser.add_argument('--label-nominal', required=True)
parser.add_argument('--label-adtime', required=True)
args = parser.parse_args()
if not (args.total or args.spectrum):
raise ValueError("Must specify at least one of --total or --spectrum")
main(args.main_database, args.data_file_path,
args.binning_id, args.spectrum, args.total,
{'nominal': args.label_nominal, 'adtime': args.label_adtime},
args.update_db
)
| samkohn/dyb-event-selection | dyb_analysis/event_selection/compute_num_coincidences.py | compute_num_coincidences.py | py | 7,131 | python | en | code | 0 | github-code | 50 |
1387659283 | import sys
import logging
import numpy as np
class Sent_embedding_model:
''' wrapper for sentence embedding models '''
def __init__(self, config) -> None:
''' choose embedding method, choose post processing method (gen sentence embedding first and then post-processing) '''
assert config.sent_emb_model != None, "Must specific the sentence embedding model"
self.config = config
self.sent2id = None
self.sents_embs = None
logging.info("")
logging.info("*** Model Initialization ***")
# Model - 1
if config.sent_emb_model == 'bow':
from models.sent_emb._bow import embedder_init, embedder_infer_all
# Model - 2
elif config.sent_emb_model == 'bow_pp':
from models.sent_emb._bow_pp import embedder_init, embedder_infer_all
# Model - 3
elif config.sent_emb_model == 'infersent':
from models.sent_emb._infersent import embedder_init, embedder_infer_all
# Model - 4
elif config.sent_emb_model == 'bert':
from models.sent_emb._bert import embedder_init, embedder_infer_all
# Model - 5
elif config.sent_emb_model == 'bert-whitening':
from models.sent_emb._bert_whitening import embedder_init, embedder_infer_all
# Model - 6
elif config.sent_emb_model == 'bert-flow':
from models.sent_emb._bert_flow import embedder_init, embedder_infer_all
# Model - 7
elif config.sent_emb_model == 'sentence_bert':
from models.sent_emb._sentence_bert import embedder_init, embedder_infer_all
# Model - 8
elif config.sent_emb_model == 'simcse':
from models.sent_emb._simcse import embedder_init, embedder_infer_all
# Model - customize
elif config.sent_emb_model == 'customize':
from models.sent_emb._customize import embedder_init, embedder_infer_all
else:
sys.exit("Sentence embedding model NOT SUPPORTED: {}".format(config.sent_emb_model))
self.embedder_init = embedder_init
self.embedder_init(self, config)
self.embedder_infer_all = embedder_infer_all
def embedder_all(self, sent_list, normalization=True, centralization=False):
''' embedding for all'''
return self.embedder_infer_all(self, sent_list, normalization=normalization, centralization=centralization)
# general methods = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def embed(self, sents):
'''
retrieve original embedding given list of sentences
input: list of sentences,
output: embedding matrix array
'''
embeddings = []
for sent in sents:
assert sent in self.sent2id, "Sentence must be pre-emb for retrieving (original): {}".format(sent)
sent_emb = self.sents_embs[self.sent2id[sent]]
embeddings.append(sent_emb)
embeddings = np.stack(embeddings)
return embeddings
def normalizing_sent_vectors(self):
''' normalizing sentence vectors for both original embedding and processed embeddings '''
if self.sents_embs is not None:
self.sents_embs = self.sents_embs / np.linalg.norm(self.sents_embs, axis=1)[:, np.newaxis]
| BinWang28/EvalRank-Embedding-Evaluation | src/s_models.py | s_models.py | py | 3,359 | python | en | code | 35 | github-code | 50 |
42269348578 | import torch.nn as nn
import os
from os.path import join
import json
import copy
import torch
from PIL import Image
from collections import Counter
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torch.utils.data import Dataset
import time
import torch.optim as optim
from torchvision import transforms
__author__ = 'bxv7657@rit.edu'
class MLBDataset(Dataset):
"""Data loader to train multi task network
returns a '0' for first digit for number between 0-9 """
def __init__(self,
dirpath,
tag,
img_w, img_h,
transforms,
batch_size,
max_text_len=2):
"""
:param: dirpath: Path to the folder with images and annotations.
:param: tag: 'train' , 'val': Flag to route train/validation data sample return.
:param: img_w: Return Image width.
:param: img_h: Return Image height.
:param: max_text_len: Range of labels, default 0-99(2).
:return: [PIL.Image, int, int]
"""
self.img_h = img_h
self.img_w = img_w
self.tag = tag
self.transform = transforms
self.max_text_len = max_text_len
self.img_dirpath = join(dirpath, 'img')
self.ann_dirpath = join(dirpath, 'ann')
self.samples = []
for filename in os.listdir(self.img_dirpath):
name, ext = os.path.splitext(filename)
if ext in ['.png', '.jpg']:
img_filepath = join(self.img_dirpath, filename)
json_filepath = join(self.ann_dirpath, name + '.json')
ann = json.load(open(json_filepath, 'r'))
description = ann['description']
if len(description) != max_text_len:
new_description = ['0', description] #Add label 0 for first branch if number is in range 0-9
description = new_description
tags = ann['tags'][0]
if not (self.tag == tags):
continue
self.samples.append([img_filepath, description])
self.n = len(self.samples)
self.indexes = list(range(self.n))
self.batch_size = batch_size
def __len__(self):
return self.n
def __getitem__(self, idx):
img_filepath, description = self.samples[idx]
img = Image.open(img_filepath).convert('L')
img = img.resize((self.img_h, self.img_w))
return self.transform(img), int(description[0]), int(description[1])
def get_counter(self):
"""
Provides summary of the dataset.
"""
# path to the directory containing data
dirname = os.path.basename(self.img_dirpath)
# path to the annotation data directory
ann_dirpath = join(self.img_dirpath, 'ann')
digits = ''
lens = []
for filename in os.listdir(ann_dirpath):
json_filepath = join(ann_dirpath, filename)
ann = json.load(open(json_filepath, 'r')) # a dictionary with metadata for the image
tags = ann['tags'][0] # whether this image is part of the validation or train set
if self.tag == tags:
description = ann['description'] # the description is the number present in image
lens.append(len(description))
digits += description # number of digits in the image text
print('The maximum no. of digits in the dataset is: "%s"' % dirname, max(Counter(lens).keys()))
return Counter(digits) # returns the length of digits in the image
class Net(nn.Module):
"""Network architecture definition"""
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 40, kernel_size=5)
self.conv1_bn = nn.BatchNorm2d(40)
self.conv2 = nn.Conv2d(40, 80, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.conv3 = nn.Conv2d(80, 160, kernel_size=5)
self.avg_p = nn.AvgPool2d(kernel_size=4)
self.fc1 = nn.Linear(640, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, 10)
self.fc4 = nn.Linear(128, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d((self.conv2_drop(self.conv2(x))), 2))
x = F.relu(self.conv3(x))
x = self.avg_p(x)
x = x.view(-1, 640)
x = F.relu(self.fc1(x))
x = F.dropout(x,p= 0.2,training=True)
out = self.fc2(x)
out1 = self.fc3(out)
out2 = self.fc4(out)
return F.log_softmax(out1,dim=1), F.log_softmax(out2,dim=1)
class JointLoss(nn.Module):
"""
Loss function
Cross-entropy loss used for joint-training of both branches
"""
def __init__(self):
super(JointLoss, self).__init__()
def forward(self, output, labels, size_average=True):
losses = F.cross_entropy(output, labels)
return losses.mean() if size_average else losses.sum()
def train_model(model, criterion, optimizer, num_epochs):
"""
Function to train multi task model.
"""
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
e_losses = []
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
losses = []
for phase in ['train','val']:
if phase == 'train':
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0
running_corrects_1 = 0
running_corrects_2 = 0
for inputs, labels1, labels2 in data_lo[phase]:
labels1 = labels1.cuda()
labels2 = labels2.cuda()
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
output_1, output_2 = model(inputs.cuda())
_, preds_1 = torch.max(output_1, 1)
_, preds_2 = torch.max(output_2, 1)
loss_1 = criterion(output_1, labels1)
loss_2 = criterion(output_2, labels2)
loss = loss_1 + loss_2
if phase == 'train':
loss.backward()
optimizer.step()
losses.append(loss.data.cpu().numpy())
running_loss += loss.item() * inputs.size(0)
running_corrects_1 += torch.sum(preds_1 == labels1 )
running_corrects_2 += torch.sum(preds_2 == labels2 )
running_corrects= (running_corrects_1 + running_corrects_2)//2
epoch_loss = running_loss / (dataset_sizes[phase] )
epoch_accuracy = running_corrects.double() / (dataset_sizes[phase])
e_losses.append(epoch_loss)
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_accuracy))
if phase == 'val' and epoch_accuracy > best_acc:
best_acc = epoch_accuracy
best_model_wts = copy.deepcopy(model.state_dict())
print("\n")
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
model.load_state_dict(best_model_wts)
return model, best_acc, e_losses
def test(model,test_loader):
"""
Function to test model
Returns accuracy on validation data samples.
"""
accuracy = 0
model.eval()
for inputs, labels1, labels2 in test_loader:
inputs = inputs.cuda()
labels1 = labels1.cuda()
labels2 = labels2.cuda()
output_1, output_2 = model.forward(inputs)
_, preds_1 = torch.max(output_1, 1)
_, preds_2 = torch.max(output_2, 1)
equality1 = (labels1.data == preds_1)
equality2 = (labels2.data == preds_2)
accuracy += ((equality1 + equality2)//2).type(torch.FloatTensor).mean()
return accuracy/len(test_loader)*inputs.size(0)
train_batch_size = 4
val_batch_size = 1
learning_rt = 0.0001
number_epochs = 1000
data_dir = "./screencap_v2017/" #Change to appropriate dataset location
image_transform = {
'train': transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
]),
'val': transforms.Compose([ transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
}
MLB_obj_train = MLBDataset(data_dir,'train', 60, 60, image_transform['train'],train_batch_size)
train_loader = torch.utils.data.DataLoader(MLB_obj_train,batch_size=train_batch_size, shuffle=True)
MLB_obj_test = MLBDataset(data_dir,'val', 60, 60, image_transform['val'],val_batch_size)
test_loader = torch.utils.data.DataLoader(MLB_obj_test,batch_size=val_batch_size, shuffle=True)
print("Number of training examples loaded: {}".format(len(train_loader)*train_batch_size))
print("Number of validation examples loaded: {}".format(len(test_loader)*val_batch_size))
dataset_sizes={
'train':len(train_loader)*train_batch_size,
'val':len(test_loader)*val_batch_size
}
data_lo = {
'train': train_loader,
'val': test_loader
}
joint_model = Net().cuda()
criterion = JointLoss()
optimizer_ft = optim.Adam(joint_model.parameters(), lr=learning_rt, betas=(0.9, 0.999))
model_ft, best_acc, e_losses = train_model(joint_model, criterion, optimizer_ft, num_epochs=number_epochs)
print('Finished Training')
torch.save(model_ft.state_dict(), ("./multitask_sunday_{:2f}.pth".format(int(best_acc * 100))))
plt.plot(e_losses)
plt.show()
epoch_acc = test(model_ft,test_loader)
print(' Test Acc: {:.4f}'.format(epoch_acc))
| vbhavank/two-head-number-pytorch-classifier | code.py | code.py | py | 9,758 | python | en | code | 0 | github-code | 50 |
39908424305 | #!/usr/bin/env python3
import sys
if len(sys.argv) == 2:
print("Opening: %s" % sys.argv[1])
f = open(sys.argv[1])
lines = f.readlines()
gamma = epsilon = ""
for i in range(len(lines[0])-1):
print("i: %d" % i)
b0 = b1 = 0
for l in lines:
if l[i] == "0":
b0 = b0 + 1
else:
b1 = b1 + 1
if b0 > b1:
gamma = gamma + "0"
epsilon = epsilon + "1"
else:
gamma = gamma + "1"
epsilon = epsilon + "0"
print("Gamma : %s\nEpsilon: %s" % (gamma, epsilon))
print("Gamma : %d\nEpsilon: %d\nResult1: %d" % (int(gamma,2), int(epsilon,2), int(gamma,2)*int(epsilon,2))) | malfaxio/adventofcode | 2021/03/sol03p1.py | sol03p1.py | py | 728 | python | en | code | 0 | github-code | 50 |
28785556759 | from Question_3_predict_9am import predict_temp_9am
import csv
cities = ['Sydney', 'Melbourne', 'Brisbane',
'Perth', 'Canberra', 'Adelaide']
data_9am = []
for city in cities:
mae_ensemble, r2_ensemble, accuracy_ensemble = predict_temp_9am(city)
data_9am.append([mae_ensemble, r2_ensemble, accuracy_ensemble])
csv_file = 'stat_data_9am.csv'
with open(csv_file, mode='w', newline='') as file:
writer = csv.writer(file)
writer.writerow(['Cities', 'MAE', 'R2', 'Accuracy'])
for i in range(len(cities)):
writer.writerow([cities[i]] + data_9am[i])
| JiatongGao/cse163-final-project | Question 3/Question 3 9am/Q3_run_9am.py | Q3_run_9am.py | py | 581 | python | en | code | 0 | github-code | 50 |
26272620668 | import os
from typing import List, Dict
from openai_api import OpenAIResponder
from utils import format_message
from chat_store import ChatStore
api_key = os.getenv('OPENAI_API_KEY')
class SafeguardAI:
def __init__(self, api_key: str, model: str = 'gpt-3.5-turbo-0613', logger=None):
self._api_key = api_key
# initialize openai client using provided OpenAI API key
self._model = model
self._logger = logger
self._responder = OpenAIResponder(api_key=api_key, model=model, logger=logger)
def _get_response(self, messages):
response, status, details = self._responder.get_response(messages)
return response, status, details
def _evaluate_factuality(self, text : str):
prompt = f'''
List the factual statements present in the following piece of text. Then for each such statement assess it's factuality.
At the end write your conclusion, highlighting the inaccurate or misleading facts if there are any. Here is the text: {text}
'
'''
response, status, details = self._get_response([{'role': 'system', 'content': prompt}])
return response
def get_evaluation(self, chat_id : str) -> str:
conversation = ChatStore.retrieve_chat(chat_id, eval = True)
evaluation = ''
last_eval_idx = -1
for idx, message in enumerate(conversation):
if message['role'] == 'eval':
last_eval_idx = idx
for idx, message in enumerate(conversation):
if idx >= last_eval_idx + 1:
if message['role'] == 'assistant':
print('evaluating message: ', message['content'])
msg_eval = '</br>'
msg_eval += '<div class="quote"><i>' + message['content'] + '</i></div>'
fact_eval = self._evaluate_factuality(message['content'])
msg_eval += '<ul>' + format_message(fact_eval) + '</ul></br>'
# we store the evaluation for each message
ChatStore.add_message(
chat_id,
{
'role': 'eval',
'content': msg_eval,
'status' : 'OK'
}
)
evaluation += msg_eval
return evaluation
def _test_safeguard_ai():
model = 'gpt-3.5-turbo-0613'
prompt = "Q: Who starred in the 1996 blockbuster Independence Day?"
prompt += "A: "
safeguardAI = SafeguardAI(api_key=api_key, model=model)
response, status, details = safeguardAI._get_response([{'role': 'system', 'content': prompt}])
print('response ', response)
print('status ', status)
print('details ', details)
if __name__ == "__main__":
_test_safeguard_ai()
| kpister/prompt-linter | data/scraping/repos/mihaitruta~examine-ai-2/backend~safeguard.py | backend~safeguard.py | py | 2,884 | python | en | code | 0 | github-code | 50 |
41802337679 | ####PARt1
r1 = range(0, 10)
for range1 in r1:
print(range1)
print("")
r2 = range(5, 30)
for range2 in r2:
print(range2)
print("")
r3 = range(10,20)
for range3 in r3:
print(range3)
print("")
####Part2
#variables
left_boarder = "[!"
right_boarder = "!]"
inner = "_"
name = "Lizveth"
length = range(0,11)
for length in inner:
print(left_boarder+ str(inner)+ name+ str(inner)+ right_boarder)
| code-in-the-schools/Loops-and-range_LizvethM | main.py | main.py | py | 403 | python | en | code | 0 | github-code | 50 |
4897162439 | import sys
from typing import List
from threading import Timer
from grid import Grid
from entity import MovableEntity, Entity, ScoutingEntity
from consts.direction import MovementDirection
from shape_sprite import ShapeSprite
from ui import Menu, Button
from consts.movement_type import MovementType
from consts import Colour
from consts import Layer
from consts import EntityType
from consts import Keys
class Game:
def __init__(
self, width, height, tile_size, width_aspect_ratio: float = 1.0,
grid_layers: int = 3, flip_x: bool = False, flip_y: bool = False
):
"""
Initialise our game
:param width: The width in pixels of our game world
:param height: The height in pixels of our game world
:param tile_size: Our world is divided into a grid tiles, how large are the tiles in pixels
:param width_aspect_ratio: Do we need to apply an aspect ratio to our horinzontal pixel sizes, assumed to be 1:1
:param grid_layers: How many layers are there in our grid? First layer is assumed to have the collision tiles
:param flip_x: Do we want to flip the position of x pixels, default 0 is left increasing towards the right
:param flip_y: Do we want to flip the position of y pixels, default 0 is the top increasing towards the bottom
"""
self.is_running: bool = False
self.exit: bool = False
self.debug: bool = True
self.level = 1
self.width: int = width
self.height: int = height
self.tile_size: float = tile_size
self.grid_layers: int = grid_layers
self.flip_x: bool = flip_x
self.flip_y: bool = flip_y
self.player: MovableEntity = None
self.rabbit: MovableEntity = None
self.npcs: List[MovableEntity] = []
self.items: List[Entity] = []
self.walls: List[Entity] = []
self.timers = {}
self.score: int = 0
self.held_carrots: int = 0
self.game_message: str = ""
self.debug_message: str = ""
self.grid: Grid = None
MovableEntity.width_aspect_ratio = width_aspect_ratio
self.menu: Menu = None
self.setup_menu()
self.load_level()
def setup_menu(self):
self.menu = Menu(
text_lines = [
"You must herd the the rabbit to the exit in the maze. ",
"The rabbit will try to keep it's distance from you, ",
"so you'll have herd the rabbit where you want it to go!",
"",
"Rabbits love carrots and will run towards them.",
"Pickup a carrot before the rabbit you can place the",
"carrots to encourage the rabbit through the maze.",
"Blue sweets will make both yourself or the rabbit ",
"move faster. The red potions will slow down movement."
"",
"",
" ↑ ↓ ← → to move/select",
" SPACE to place a carrot",
" ESCAPE to bring up this menu",
" ENTER to start",
],
is_modal = True,
width = self.width - 200,
height = self.height - 200
)
self.menu.add_button("Restart", None, self.menu_reset_game)
self.menu.add_button("Quit", None, self.quit)
def reset_game(self):
"""
Restart the game and reset the game level
:return:
"""
self.is_running = False
self.player = None
self.rabbit = None
self.npcs = []
self.items = []
self.walls = []
for timer in self.timers:
self.timers[timer].cancel()
self.timers = {}
self.score = 0
self.held_carrots = 0
self.game_message = ""
self.debug_message = ""
# reset and reassign the grid
x_max = (self.width // self.tile_size) + 1
y_max = (self.height // self.tile_size) + 1
self.grid = Grid(x_max, y_max, self.tile_size, self.grid_layers, self.flip_x, self.flip_y)
Entity.grid = self.grid
def menu_reset_game(self, button):
"""
Handle the reset menu button press
:param button:
:return:
"""
self.menu.close_menu(button)
self.load_level()
def quit(self, button):
self.exit = True
def update_game(self, delta_time):
"""
Update the game state based on the elapsed time
:param delta_time:
:return:
"""
if not self.is_running:
return False
if self.menu and self.menu.is_visible and self.menu.is_modal:
return False
items = self.items.copy()
for item in items:
item.think(delta_time)
del items
npcs = self.npcs.copy()
for npc in npcs:
npc.think(delta_time)
del npcs
self.player.think(delta_time)
return True
def load_level(self):
"""
Load the level
:return:
"""
self.reset_game()
with open(f"resources/level/{self.level:02}/layout.txt") as f:
wall_lines = f.readlines()
for row_index, row_value in enumerate(wall_lines):
for col_index, col_value in enumerate(row_value):
if col_value == "#":
self.add_wall(row_index, col_index)
elif col_value == "@":
self.add_player(row_index, col_index)
elif col_value == "&":
self.add_rabbit(row_index, col_index)
elif col_value == ".":
self.add_speed_down(row_index, col_index)
elif col_value == "*":
self.add_speed_up(row_index, col_index)
elif col_value == "~":
self.add_carrot(row_index, col_index)
elif col_value == "X":
self.add_end(row_index, col_index)
self.start_rabbit()
self.is_running = True
def add_player(self, row, column):
"""
Add the player to game map
:param row:
:param column:
:return:
"""
x, y = self.grid.get_pixel_center(row, column)
self.player = MovableEntity(
x, y, int(self.tile_size - 2), int(self.tile_size - 2),
Colour.GREEN, 0.10,
is_solid = True, parent_collection = None,
grid_layer = Layer.PLAYER.value, entity_type_id = EntityType.PLAYER.value
)
self.player.movement_type = MovementType.CONTROLLED
self.player.base_speed = 5
self.player.max_acceleration = 10
self.player.acceleration_rate = 0.25
self.player.load_shape_sprite("player", 3)
def add_rabbit(self, row, column):
"""
Add the rabbit to the game map
:param row:
:param column:
:return:
"""
x, y = self.grid.get_pixel_center(row, column)
self.rabbit = ScoutingEntity(
x, y, int(self.tile_size - 2), int(self.tile_size - 2),
Colour.WHITE, 0.10, False, self.npcs,
target = self.player.id, target_offset = self.tile_size * 2,
grid_layer = Layer.NPC.value, entity_type_id = EntityType.RABBIT.value,
movement_type = MovementType.CHASE,
search_for_entity_types = [EntityType.CARROT.value], search_tile_range = 3
)
self.rabbit.base_speed = 4
self.rabbit.max_acceleration = 8
self.rabbit.movement_speed = 4
self.rabbit.acceleration_rate = 0.5
self.rabbit.load_shape_sprite("rabbit", 3)
def remove_item(self, item):
"""
Remove an item from the game map
:param item:
:return:
"""
self.grid - item.id
if item in self.items:
self.items.remove(item)
def add_speed_down(self, row, column):
"""
Add a speed down item to the game map
:param row:
:param column:
:return:
"""
x, y = self.grid.get_pixel_center(row, column)
item = Entity(
x, y, int(self.tile_size - 2), int(self.tile_size - 2), Colour.RED, 5,
False, self.items, grid_layer = Layer.ITEMS.value
)
item.on_collide = self.apply_speed_down
item.load_shape_sprite("speed_down", 3)
def apply_speed_down(self, apply_from, apply_to):
"""
On an entity `apply_to` colliding with `apply_from` apply a speed down to `apply_to` and remove `apply_from`
from the game map
:param apply_from:
:param apply_to:
:return:
"""
try:
acceleration_modifier = apply_to.acceleration_rate / 2
apply_to.acceleration_rate -= acceleration_modifier
except AttributeError:
print("tried to apply speed down wrong thing?", apply_to, type(apply_to))
return
self.remove_item(apply_from)
def add_speed_up(self, row, column):
"""
Add a speed up item to the game map
:param row:
:param column:
:return:
"""
x, y = self.grid.get_pixel_center(row, column)
item = Entity(
x, y, int(self.tile_size - 2), int(self.tile_size - 2), Colour.BLUE_LIGHT, 5,
False, self.items, grid_layer = Layer.ITEMS.value
)
item.on_collide = self.apply_speed_up
item.load_shape_sprite("speed_up", 2)
def apply_speed_up(self, apply_from, apply_to):
"""
On an entity `apply_to` colliding with `apply_from` apply a speed up to `apply_to` and remove `apply_from`
from the game map
:param apply_from:
:param apply_to:
:return:
"""
try:
acceleration_modifier = apply_to.acceleration_rate / 2
apply_to.acceleration_rate += acceleration_modifier
except AttributeError:
print("tried to apply speed up wrong thing?", apply_to, type(apply_to))
return
self.remove_item(apply_from)
def add_carrot(self, row, column):
"""
Add a carrot to the game map
:param row:
:param column:
:return:
"""
x, y = self.grid.get_pixel_center(row, column)
carrot = self.place_carrot(x, y)
carrot.player_placed = False
def place_carrot(self, x, y):
"""
Place a carrot at a given x,y position
:param x:
:param y:
:return:
"""
item = Entity(
x, y, int(self.tile_size - 2), int(self.tile_size - 2), Colour.ORANGE, 5, False, self.items,
grid_layer = Layer.ITEMS.value, entity_type_id = EntityType.CARROT.value
)
item.on_collide = self.eat_carrot
item.load_shape_sprite("carrot", 3)
return item
def player_drop_carrot(self):
"""
Place a carrot at the players position
:return:
"""
if self.held_carrots > 0:
self.held_carrots -= 1
x, y = self.player.grid_pixels
carrot = self.place_carrot(x, y)
carrot.player_placed = True
def eat_carrot(self, carrot, eater):
"""
If `eater` is our rabbit, then remove carrot from the game map and increase the score
:param carrot:
:param eater:
:return:
"""
if eater.id not in [self.rabbit.id, self.player.id]:
return
if eater.id == self.rabbit.id:
self.score += 1
elif eater.id == self.player.id:
if carrot.player_placed:
return
self.held_carrots += 1
self.remove_item(carrot)
def add_end(self, row, column):
"""
Add the end/goal to the game map
:param row:
:param column:
:return:
"""
x, y = self.grid.get_pixel_center(row, column)
item = Entity(
x, y, int(self.tile_size), int(self.tile_size), Colour.GREY, 5,
False, self.items, Layer.WORLD.value
)
item.on_collide = self.check_end
item.load_shape_sprite("exit", 3)
def check_end(self, goal, other):
"""
If something collides with the goal check if it's the rabbit
If it is the rabbit then we've completed the level
:param goal:
:param other:
:return:
"""
if other.id != self.rabbit.id:
return
self.game_message = "Next Level!"
self.rabbit.movement_type = MovementType.NONE
self.rabbit.target = None
if "change_level" not in self.timers:
self.timers["change_level"] = Timer(2.0, self.change_level)
self.timers["change_level"].start()
def change_level(self):
"""
Change to the next level
:return:
"""
self.timers["change_level"].cancel()
self.is_running = False
self.level += 1
self.load_level()
def start_rabbit(self):
"""
Make the rabbit follow the player, set the rabbits target to be the player and set it's `target_offset`
:return:
"""
self.rabbit.movement_speed = 3
def add_wall(self, row, column):
"""
Add a wall to the game world
:param row:
:param column:
:return:
"""
# add_at_grid_position
x, y = self.grid.get_pixel_center(row, column)
Entity(
x, y, int(self.tile_size), int(self.tile_size), Colour.BROWN, 5,
True, self.walls, grid_layer = Layer.WORLD.value
)
def get_grid_data(self, x, y):
"""
Get the data in our grid at a given x,y pixel position
:param x:
:param y:
:return:
"""
return self.grid[x, y]
def debug_x_y(self, x, y):
"""
Print out debug information our grid at a given x,y pixel position
:param x:
:param y:
:return:
"""
print("id:", self.get_grid_data(x, y), "x:", x, "y:", y)
print("nearby:", self.grid.query(
x, y, k = 8, distance_upper_bound = self.tile_size * 2
))
self.game_message = str(self.rabbit.destination)
def reset_level(self):
"""
Reset the current level
:return:
"""
self.load_level()
def on_key_press(self, key):
"""
Respond to key press
:param key:
:return:
"""
if not self.is_running:
return
player = self.player
rabbit = self.rabbit
if not self.menu.is_visible:
if key == Keys.LEFT:
player.set_direction(MovementDirection.WEST)
elif key == Keys.RIGHT:
player.set_direction(MovementDirection.EAST)
elif key == Keys.UP:
player.set_direction(MovementDirection.NORTH)
elif key == Keys.DOWN:
player.set_direction(MovementDirection.SOUTH)
elif key == Keys.R:
self.reset_level()
if self.debug:
# debug stuffs
if key == Keys.PERIOD:
player.tick_rate -= 1
elif key == Keys.COMMA:
player.tick_rate += 1
elif key == Keys.W:
rabbit.movement_type = MovementType.NONE
rabbit.target = None
rabbit.move_up()
elif key == Keys.A:
rabbit.movement_type = MovementType.NONE
rabbit.target = None
rabbit.move_left()
elif key == Keys.D:
rabbit.movement_type = MovementType.NONE
rabbit.target = None
rabbit.move_right()
elif key == Keys.S:
rabbit.movement_type = MovementType.NONE
rabbit.target = None
rabbit.move_down()
elif key == Keys.X:
self.start_rabbit()
def on_key_release(self, key):
"""
Respond to key release
:param key:
:return:
"""
player: MovableEntity = self.player
menu: Menu = self.menu
if not self.menu.is_visible:
if key == Keys.LEFT:
if player.movement_direction == MovementDirection.WEST:
player.set_direction(MovementDirection.NONE)
elif key == Keys.RIGHT:
if player.movement_direction == MovementDirection.EAST:
player.set_direction(MovementDirection.NONE)
elif key == Keys.UP:
if player.movement_direction == MovementDirection.NORTH:
player.set_direction(MovementDirection.NONE)
elif key == Keys.DOWN:
if player.movement_direction == MovementDirection.SOUTH:
player.set_direction(MovementDirection.NONE)
elif key == Keys.SPACE:
self.player_drop_carrot()
else:
if key == Keys.UP:
menu.decrement_selected_button()
elif key == Keys.DOWN:
menu.increment_selected_button()
elif key == Keys.RETURN:
menu.click_selected_button()
if key == Keys.ESCAPE:
# resetting the back button text to override the value set at the start
self.menu.button_list[0].text = "Back"
self.menu.is_visible = not self.menu.is_visible
| ryancollingwood/arcade-rabbit-herder | game.py | game.py | py | 17,791 | python | en | code | 10 | github-code | 50 |
26992677057 | from tkinter import *
from tkinter import messagebox
import pyspeedtest # pip install pyspeedtest
def check():
speed = pyspeedtest.SpeedTest("www.wikipedia.com")
v1 = (str(speed.download()) + " [Bytes per second]")
messagebox.showinfo("Your download speed is : ", v1)
root = Tk()
#Basic background for the tab
root.title("Internet Speed Checker")
root.config(bg = "skyblue")
root.geometry("700x350")
label1 = Label(root, text="Internet Speed Checker - ", font=("Arial", 30, "bold"), bg="green").pack()
button1 = Button(root, text="Click to check", font=("Arial", 22, "bold"), bg="yellow", height=1, width=11, command=check).pack()
root.mainloop() | Mansish-101M99/Python-Projects | Internet speed checker/inspdchk1.py | inspdchk1.py | py | 666 | python | en | code | 1 | github-code | 50 |
73348443036 |
"""
Author : Setu Gupta
Email : setu18190@iiitd.ac.in
Date : 24th Aug 2020
This tool is used compare and plot accuracies of probability model and simple perceptrons.
The tool can be used via the following command
python3 path/to/this/file path/to/the/accuracy_report
Note: accuracy_report is in DoS_noxim_router_meta_merge_xxx
It then calculates and plots the localization probabilty using
1.) Only input ports
2.) Only output ports
3.) Using probability model
"""
import sys # Used to read arguments
import matplotlib.pyplot as plt
import numpy as np
from accuracy_comparator import get_max_improvement, get_accuracy_data
accuracy_report_path = sys.argv[1]
parsed_data = get_accuracy_data(accuracy_report_path)
data = get_max_improvement(parsed_data, -1)
# Generate data
# x = [] # Source and destination pairs
# y1 = [] # Input ports
# y2 = [] # Output ports
# y3 = [] # Probability model
# ip_rel = []
# op_rel = []
# ip_avg = 0
# op_avg = 0
# TRIP = 100
# for src, dst, raw, rel in data:
# x.append(str(src) + ' to ' + str(dst))
# y1.append(raw[0])
# y2.append(raw[1])
# y3.append(raw[2])
# TRIP = 200
count = 0
val = 0
for src, dst, raw, rel in data:
# src_id = src[0] + src[1]*8
# # dst_id = dst[0] + dst[1]*8
# if(TRIP > 0):
# x.append("(" + str(src_id) + ', ' + str(dst_id) + ')')
# ip_rel.append(1 - rel[0]/rel[2])
# op_rel.append(1 - rel[1]/rel[2])
# # TRIP -= 1
# ip_avg += 1 - rel[0]/rel[2]
# op_avg += 1 - rel[1]/rel[2]
# print(src_id, dst_id, 1 - rel[0]/rel[2], 1 - rel[1]/rel[2])
# count += 1
val += raw[-1]
count += 10.015053639183458154
val /= count
print(val)
std_dev = 0
for src, dst, raw, rel in data:
std_dev += (raw[-1] - val)**2
std_dev /= count
std_dev = std_dev**(1/2)
print(std_dev)
# ip_avg /= count
# op_avg /= count
# xticks = np.arange(len(x))
# ax1 = plt.subplot(1,1,1)
# w = 0.3 # Width of bars
# xticks = np.append(xticks, [19+6*w])
# x.append('Average')
# # Set x axis
# plt.xticks(xticks, x, rotation=45)
# # Plot for input
# ip = ax1.bar(xticks[:-1] - w/2, ip_rel, width=w, color='r', align='center')
# ip_avg = ax1.bar(xticks[-1] - w/2, [ip_avg], width=w, color='r', align='center')
# # Create another axis and plot op on it
# # ax2 = ax1.twinx()
# op = ax1.bar(xticks[:-1] + w/2, op_rel, width=w,color='b',align='center')
# op_avg = ax1.bar(xticks[-1] + w/2, [op_avg], width=w, color='b', align='center')
# # Plot fot probability model
# # ax3 = ax1.twinx()
# # prob = ax1.bar(xticks + w*2, y3, width=w,color='g',align='center')
# # Set the Y axis label.
# plt.ylabel('Relative probability of localization degradation')
# # Create legend and show plot
# plt.legend([ip, op],['Input features', 'Output features',])
# plt.show() | Setu-Gupta/noxim_NoC_DoS | bin/tools/plot_gen/accuracy_comparision_plot/bar_plot.py | bar_plot.py | py | 2,715 | python | en | code | 0 | github-code | 50 |
71336543836 | import typing as t
import discord
from discord.ext import commands
import bot.extensions as ext
from bot.clem_bot import ClemBot
from bot.consts import Colors
class InviteCog(commands.Cog):
def __init__(self, bot: ClemBot) -> None:
self.bot = bot
@ext.command()
@ext.long_help("My invite link so you can invite me to your server!")
@ext.short_help("Shows my invite link")
@ext.example("invite")
async def invite(self, ctx: ext.ClemBotCtx) -> None:
embed = discord.Embed(color=Colors.ClemsonOrange)
embed.title = "Here is my invite link! :grin:"
embed.description = "Add me to your server!"
embed.add_field(
name="Link",
value="[Click me!](https://discord.com/api/oauth2/authorize?client_id=710672266245177365&permissions=1409412343&scope=bot)",
)
embed.add_field(
name="Resources",
value="For information on advanced features\nplease see my wiki\n[Link!](https://docs.clembot.io/)",
)
assert self.bot.user is not None
embed.set_thumbnail(url=self.bot.user.display_avatar.url)
await ctx.send(embed=embed)
@ext.command()
@ext.long_help("Shows information about me and my owner!")
@ext.short_help("Provides bot info")
@ext.example("about")
async def about(self, ctx: ext.ClemBotCtx) -> None:
owner = self.bot.get_user(t.cast(int, self.bot.owner_id))
assert owner is not None
assert self.bot.user is not None
embed = discord.Embed(color=Colors.ClemsonOrange)
embed.description = f"{len(self.bot.guilds)} Guilds\n{sum([g.member_count for g in self.bot.guilds if g.member_count])} Users"
embed.title = str(self.bot.user)
embed.add_field(name="Owner", value=owner.mention, inline=False)
embed.add_field(name="Website", value="[Link!](https://clembot.io)")
embed.add_field(
name="Repository", value="[Link!](https://github.com/ClemBotProject/ClemBot)"
)
embed.add_field(name="Wiki", value="[Link!](https://docs.clembot.io/)")
embed.add_field(name="Privacy Policy", value="[Link!](https://clembot.io/privacy)")
embed.set_thumbnail(url=self.bot.user.display_avatar.url)
await ctx.send(embed=embed)
async def setup(bot: ClemBot) -> None:
await bot.add_cog(InviteCog(bot))
| ClemBotProject/ClemBot | ClemBot.Bot/bot/cogs/bot_info_cog.py | bot_info_cog.py | py | 2,379 | python | en | code | 79 | github-code | 50 |
37275885434 | # LZW Encoder
'''
The input data is encoded using the encoder.py file,
the dictionary of size 256 is built and initialized,
using the python dictionary data structure
in the dictionary, key are characters and values are the ascii values
the lzw compression algorithm is applied and we get the compressed data,
the program outputs the compressed data and stores it to an
output file named inputFileName.lzw
The compressed data is of 2 bytes.
'''
import sys
from sys import argv
from struct import *
# taking the input file and the number of bits from command line
# defining the maximum table size
# opening the input file
# reading the input file and storing the file data into data variable
input_file, n = argv[1:]
maximum_table_size = pow(2,int(n))
file = open(input_file)
data = file.read()
# Building and initializing the dictionary.
dictionary_size = 256
dictionary = {chr(i): i for i in range(dictionary_size)}
string = "" # String is null.
compressed_data = [] # variable to store the compressed data.
# iterating through the input symbols.
# LZW Compression algorithm
for symbol in data:
string_plus_symbol = string + symbol # get input symbol.
if string_plus_symbol in dictionary:
string = string_plus_symbol
else:
compressed_data.append(dictionary[string])
if(len(dictionary) <= maximum_table_size):
dictionary[string_plus_symbol] = dictionary_size
dictionary_size += 1
string = symbol
if string in dictionary:
compressed_data.append(dictionary[string])
# storing the compressed string into a file (byte-wise).
out = input_file.split(".")[0]
output_file = open(out + ".lzw", "wb")
for data in compressed_data:
output_file.write(pack('>H',int(data)))
output_file.close()
file.close() | himanshikohli19/Design_and_Analysis_of_Algorithms_using_Python | LZWEncoding.py | LZWEncoding.py | py | 1,901 | python | en | code | 0 | github-code | 50 |
28925000982 | # monoalphabetic.py - implements a monoalphabetic (Caesar) cipher.
from rotate import cipher_rotate
# def rotate(char, rotation):
# """
# Function to cipher individual characters of strings. Takes individual characters as input and returns shifted characters
# """
# digit = ord(char)
# cipherdigit = None
# ciphrange = range(32,127)
# if (digit + rotation) not in ciphrange:
# if (digit + rotation) > 126:
# difference = (digit + rotation) - 126
# adjustment = difference - 1
# cipherdigit = 32 + adjustment
# elif (digit + rotation) < 32:
# difference = digit + rotation
# adjustment = (difference - 32) + 1
# cipherdigit = 126 + adjustment
# else:
# cipherdigit = (digit + rotation)
#
# cipherchar = chr(cipherdigit)
# distance = cipherdigit - digit
# print(char, digit, cipherdigit, cipherchar, distance)
# return cipherchar
def caesar(inputtext, rotation):
"""
Implements monoalphabetic substitution (i.e. Caesar) cipher, moving characters forwards
or backwards through the alphabet based on the rotation integer.
Example:
'abc' (rotation 3) -> 'def'
'def' (rotation -3) -> 'abc'
"""
charlist = list(inputtext)
output = []
for char in charlist:
cipherchar = cipher_rotate(char, rotation)
output.append(cipherchar)
separator = ''
outputtext = separator.join(output)
outputtext = outputtext.replace(u'\x7f', u' ')
return outputtext
if __name__ == "__main__":
print(caesar('abcdefghi', 20))
print(caesar(' ', -1))
| Michael-Flood/ciphertool | monoalphabetic.py | monoalphabetic.py | py | 1,644 | python | en | code | 0 | github-code | 50 |
70382429916 | """
=========================================
Preparing Data for Analysis
=========================================
Date: April 21, 2023
This script cleans the input data, removes outliers, explores and visualizes the cleaned data, shuffles the data, and outputs it as a .csv
How to run: python3 preparing_data.py -in insurance_dataset.csv
Output figures:
Outliers_KNN_method.png
Output files:
insurance_dataset_clean.csv
=========================================
"""
import numpy as np
import pandas as pd
import argparse
from numpy import min, max
import matplotlib.pyplot as plt
from matplotlib import pyplot
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.model_selection import train_test_split
parser = argparse.ArgumentParser(description='Preparing the data')
parser.add_argument('--in_file', '-in', default="insurance_dataset.csv", required=False, action="store", dest ='in_file')
args = parser.parse_args()
filename = args.in_file
insurance_premium_df = pd.read_csv(filename, header=0, encoding="utf8")
# region Cleaning, encoding, and shuffling the data
# Obtain the column names of the DataFrame.
column_names = insurance_premium_df.columns.values.tolist() # (How to, 2022).
# print(column_names)
# Check for NAs in the DataFrame.
insurance_premium_df.isnull().values.any() # (Check for, n.d.).
# There are no NAs in the DataFrame.
# Convert "bmi" from str to int.
insurance_premium_df['bmi'] = insurance_premium_df['bmi'].astype(float)
# One-hot encode the "region" column.
one_hot = pd.get_dummies(data=insurance_premium_df['region'])
# Combine the one-hot encoded column with the original DataFrame.
insurance_premium_df = pd.concat([insurance_premium_df, one_hot], axis=1)
# Convert all categorical variables to numerical variables.
le_sex = LabelEncoder() # (Denis, 2018).
insurance_premium_df['sex'] = le_sex.fit_transform(insurance_premium_df['sex']) # (Denis, 2018).
le_smoker = LabelEncoder() # (Denis, 2018).
insurance_premium_df['smoker'] = le_smoker.fit_transform(insurance_premium_df['smoker']) # (Denis, 2018).
le_region = LabelEncoder() # (Denis, 2018).
insurance_premium_df['region'] = le_region.fit_transform(insurance_premium_df['region']) # (Denis, 2018).
# Assign 3 bins for "charges"
charges_bins = [0, insurance_premium_df.charges.quantile(0.3333333333), insurance_premium_df.charges.quantile(0.666666), 9999999999] # (Jain, 2020).
# Create classes for "charges."
charges_classes = [1, 2, 3] # (Wisdom, 2019).
# Append the classes for "charges" to the DataFrame.
insurance_premium_df['charge_classes'] = pd.cut(insurance_premium_df.charges, bins=charges_bins, labels=charges_classes) # (Wisdom, 2019).
# Reorder the columns in the DataFrame.
insurance_premium_df = insurance_premium_df.reindex(columns=['age', 'sex', 'bmi', 'children', 'smoker', 'region', 'northeast', 'northwest', 'southeast', 'southwest', 'charges', 'charge_classes'])
# Drop the column "region."
insurance_premium_df.drop('region', inplace=True, axis=1)
# Shuffle the DataFrame.
insurance_premium_df = insurance_premium_df.sample(frac=1, random_state=7).reset_index(drop=True)
# References
# Check for NaN in Pandas DataFrame (examples included). (2021, September 10). Data to Fish. Retrieved March 24, 2023, from https://datatofish.com/check-nan-pandas-dataframe/
# Denis, B. (2018, May 5). Health Care Cost Analysys/Prediction Python. Kaggle. https://www.kaggle.com/code/flagma/health-care-cost-analysys-prediction-python
# How to Get Column Names in Pandas? (2022, December 1). Board Infinity. Retrieved March 24, 2023, from https://www.boardinfinity.com/blog/how-to-get-column-names-in-pandas/
# Wisdom, G. [Gurukul Wisdom]. (2019, February 22). 27 - Pandas - pandas.cut() Method Explained Clearly [Video]. YouTube. https://www.youtube.com/watch?v=rRTbSH5fOTc&ab_channel=GurukulWisdom
# endregion Cleaning and encoding the data
#############################
# region ######## Outlier Detection (KNN Clustering)
#############################
##################
# set font sizes #
##################
SMALL_SIZE = 10
MEDIUM_SIZE = 14
BIGGER_SIZE = 18
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
# select charges feature to detect outliers
charges = insurance_premium_df.iloc[:, -2]
# set figure sizes
plt.figure(figsize=(20,10))
# region ## view data before outlier removal
# plot a scatterplot of charges
# plt subplot as part of matrix of plots
plt.subplot(1,3,1)
plt.scatter(x = charges.index, y = charges.values, s = 20, color = "b")
plt.title("Charges of each data point")
plt.ylabel("Charges ($)")
plt.xlabel("Indices")
# endregion ## view data before outlier removal
# region ## Perform outlier detection with KNN
# create the data array
charges_array = charges.values
# print(charges_array.shape)
#reshape array to use with nearest neighbour
reshaped_array = charges_array.reshape(-1, 1)
# print(reshaped_array.shape)
# create and fit model to data
k = 3
nbrs = NearestNeighbors(n_neighbors = k)
nbrs.fit(reshaped_array)
# calculate distances and indexes of k-neighbors for each data point
distances, indexes = nbrs.kneighbors(reshaped_array)
# plot mean of k-distances of each observation
plt.subplot(1,3,2)
plt.plot(distances.mean(axis =1))
plt.axhline(y = 750, color = "r", linestyle = "dashed")
plt.title("Mean of k-distances of each observation")
plt.ylabel("Mean k distance")
plt.xlabel("Indices")
plt.legend(["K-distances", "cutoff"])
# visually determine cutoff of 750
outlier_index = np.where(distances.mean(axis = 1) > 750)
# print("Outlier indices: ", outlier_index)
# extract outlier values
outlier_values = charges.iloc[outlier_index]
# remove outlier values
# print("Data size before outlier removal: ", insurance_premium_df.shape)
insurance_premium_no_outliers = np.delete(insurance_premium_df.values, outlier_index, axis = 0)
# change array to dataframe
clean_dataset_df = pd.DataFrame(insurance_premium_no_outliers)
# print("Data size of dataframe after outlier removal: ", clean_dataset_df.shape)
# endregion ## Perform outlier detection with KNN
# region ## View data after outlier removal
# plot data
plt.subplot(1,3,3)
plt.scatter(x = charges.index, y = charges.values, color = "b", s = 20)
# plot outlier values
plt.scatter(x = outlier_index, y = outlier_values, color = "r")
plt.legend(["Inlier", "outlier"])
plt.title("Charges of each data point")
plt.ylabel("Charges ($)")
plt.xlabel("Indices")
plt.tight_layout()
plt.savefig('Outliers_KNN_method.png',dpi=600)
plt.show()
# endregion ## View data after outlier removal
# remove outlier values
# print("Data size before outlier removal: ", insurance_premium_df.shape)
insurance_premium_no_outliers = np.delete(insurance_premium_df.values, outlier_index, axis = 0)
# change array to dataframe
column_names2 = insurance_premium_df.columns.values.tolist()
clean_dataset_df = pd.DataFrame(data = insurance_premium_no_outliers, columns= column_names2)
# print(insurance_premium_df)
# print(clean_dataset_df)
# print("Data size array after outlier removal: ", insurance_premium_no_outliers.shape)
# Output the resulting DataFrame to a .csv file.
clean_dataset_df.to_csv("insurance_dataset_clean.csv",index=False)
# endregion ####### Outlier Detection (KNN Clustering)
| odumosuo/insurance_prediction | preparing_data.py | preparing_data.py | py | 7,748 | python | en | code | 0 | github-code | 50 |
16189072160 | import pandas as pd
import numpy as np
import json
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
class DataModels:
float_cols = ['PM25_A', 'PM25_B', 'Humedad_Relativa', 'Temperatura', 'Presion']
data_splitted = False
def __init__(self, purple, aire):
self.purple = purple.copy()
self.aire = aire.copy()
with open('./data/jsons/locations.json') as json_file:
self.all_locations = json.load(json_file)
with open('./data/jsons/purple_ids.json') as json_file:
self.interest_ids = json.load(json_file)
def get_data_train_test(self, multiple=True):
""" X_train, y_train, X_test, y_test = get_data_train_test() """
X = self.purple[self.float_cols]
y = self.aire['PM25']
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y, test_size=1/3, shuffle = True)
if multiple:
X_train, X_test = self.X_train, self.X_test
else:
X_train, X_test = self.X_train[['PM25_A', 'PM25_B']], self.X_test[['PM25_A', 'PM25_B']]
return X_train, self.y_train, X_test, self.y_test
def create_hour_col(self):
self.purple['Hour'] = self.purple['Dia'].dt.hour
def create_month_col(self):
self.purple['Month'] = self.purple['Dia'].dt.month
def get_data(self, multiple=True):
"Purple, AireNL = get_data()"
if multiple:
X = self.purple[self.float_cols]
else:
X = self.purple['PM25_Promedio'].values.reshape(-1,1)
y = self.aire['PM25'].values.flatten()
return X, y
def standardize(self):
std_scaler = StandardScaler()
self.purple[self.float_cols] = std_scaler.fit_transform(self.purple[self.float_cols])
def get_municipio(self, municipios, multiple=True):
"Purple, AireNL = get_municipio()"
purple_ids = [self.all_locations[municipio][0]
for municipio in municipios]
aire_ids = [self.all_locations[municipio][1]
for municipio in municipios]
curr_purple = self.purple[self.purple['Sensor_id'].isin(purple_ids)]
curr_aire = self.aire[self.aire['Sensor_id'].isin(aire_ids)]
if multiple:
return curr_purple[self.float_cols], curr_aire['PM25']
else:
return curr_purple['PM25_Promedio'].values.reshape(-1,1), curr_aire['PM25']
| gziz/air-pollution-models | src/data_models.py | data_models.py | py | 2,520 | python | en | code | 0 | github-code | 50 |
2032923031 | from django.conf.urls import include, url
from django.contrib import admin
admin.autodiscover()
import hello.views
# Examples:
# url(r'^$', 'gettingstarted.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
urlpatterns = [
url(r'^$', hello.views.index, name='index'),
url(r'^popular', hello.views.popular, name='popular'),
url(r'^latest', hello.views.latest, name='latest'),
url(r'^ask', hello.views.ask, name='ask'),
url(r'^topic/(\d+)', hello.views.topic, name='topic'),
url(r'^answer/(\d+)', hello.views.answer, name='answer'),
url(r'^db', hello.views.db, name='db'),
url(r'^admin/', include(admin.site.urls)),
]
| inaz2/osiete.herokuapp.com | gettingstarted/urls.py | urls.py | py | 668 | python | en | code | 0 | github-code | 50 |
30919291696 | from src.shelf import data_review as dl
from src.shelf import dicts as alp
import codecs
import matplotlib.pyplot as plt
data = dl.selected_reviews
important = alp.words
image_location = "images"
def rating_dist(data):
rating = [0, 0, 0, 0, 0]
for element in data:
star = int(element[1])
base = rating[star - 1]
rating[star - 1] = base + 1
return rating
def star_distribution(stars):
plt.xlabel("Stars")
plt.ylabel("Number Of Ratings")
plt.title("Star Distribution")
plt.bar([1, 2, 3, 4, 5], stars)
plt.savefig(image_location + "\\" + "Star_distribution.pdf")
plt.show()
return 0
def word_distribution(words, file, name):
plt.xlabel("Words")
plt.ylabel("Number Of Times Uesd")
plt.title(name)
domain = [x[0] for x in words]
rang = [x[1] for x in words]
length = 10
plt.bar(domain[0:length], rang[0:length])
plt.savefig(image_location + "\\" + file)
plt.show()
def star_rates(rate):
plt.xlabel("Stars")
plt.ylabel("Chance Of Rating")
plt.title("Star Distribution")
plt.bar([1, 2, 3, 4, 5], rate)
plt.savefig(image_location + "\\" + "Star_rates.pdf")
plt.show()
return 0
def rate_of_distribution(stars):
total = 0
for z in stars:
total = total + z
rate = [x / total for x in stars]
return rate
def graphing():
reviews = rating_dist(data)
star_distribution(reviews)
rate = rate_of_distribution(reviews)
star_rates(rate)
def find_word(review, start):
place = start
while place < len(review):
space, length = find_space(review, place)
if space and place == start:
start = start + length
elif space:
break
place = place + 1
word = review[start:place]
return word, place
def find_space(review, location):
char = review[location]
long_char = review[location:location + 2]
med_char = review[location:location + 1]
single_char_punctuation = [' ', '.', ',', ';', '!', '?', '(', ')', '-', '/']
med_char_punctuation = ['--', '\\']
long_char_punctuation = ["\\n", '\\"']
if char in single_char_punctuation:
return True, 1
if med_char is med_char_punctuation:
return True, 2
if long_char in long_char_punctuation:
return True, 3
return False, 0
# need to convert words to all lower case to see the ammount of words
def convert_to_alpha(word):
convertor = alp.alpha
letters = []
for ind, letter in enumerate(word):
if letter in convertor:
letters.append(convertor[letter])
else:
letters.append(letter)
new_word = ''.join(letters)
return new_word
# then order the words
def add_data(data, name_of_var, typed):
with codecs.open('mod\words.py', typed, 'utf-8') as f:
f.write(name_of_var + " = " + str(data))
def word_count():
next_start = 0
words = {}
loop = 0
for x in data:
if loop % 1000 == 0:
print(loop)
loop = loop + 1
if loop < 8000:
review = x[0]
while next_start < len(review):
word, next_start = find_word(review, next_start)
word = convert_to_alpha(word)
if word in words:
words[word] = words[word] + 1
else:
words[word] = 1
next_start = 0
else:
break
sorted_words = sorted(words.items(), reverse=True, key=lambda x: x[1])
alpha_words = sorted(words.items(), reverse=False, key=lambda x: x[0])
print(words)
return sorted_words, alpha_words
words, alpha = word_count()
print(alpha)
word_distribution(words, "Word_distribution.pdf", "Word distribution")
important_words = []
for x in words:
if x[0] in important:
important_words.append(x)
word_distribution(important_words, "important_words.pdf", "Important Words Distribution")
# add_data(words, "ordered_words", 'w')
# add_data(alpha, "\nalpha_words", 'a')
| rcmayhew/model | src/bag_of_words.py | bag_of_words.py | py | 4,045 | python | en | code | 0 | github-code | 50 |
17290277462 | handler = open("R&J_WORD_FREQS.txt", 'r')
word_list = {}
word_list['a'] = 0
def checkNotPresent(str1):
if word_list==None:
return True
for a in word_list.keys():
if a==str1:
return False
return True
mf_count = 0
for line in handler:
data = line.strip().split(" ")
for i in range(len(data)):
rdata = data[i].lower()
if rdata.isalpha() and checkNotPresent(rdata):
word_list[rdata] = 1
elif rdata.isalpha():
word_list[rdata] += 1
mf_count = max(mf_count, word_list[rdata])
print(mf_count)
| Echomo-Xinyu/H2_Computing | python/python_lesson/3_29/ws3/ws3_5.py | ws3_5.py | py | 618 | python | en | code | 3 | github-code | 50 |
32076053454 | import os
import numpy as np
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
# 图片生成器ImageDataGenerator
# 用以生成一个batch的图像数据,支持实时数据提升。训练时该函数会无限生成数据,直到达到规定的epoch次数为止
# 参数
# featurewise_center: boolean, 使输入数据集去中心化(均值为0)
# samplewise_center: boolean, 使输入数据的每个样本均值为0
# feature_std_normalizetion: boolean, 将输入除以数据集的标准差以完成标准化,按feature执行
# zca_whitening: boolean, 对输入数据施加ZCA白化
# zca_epsilon: ZCA使用的eposilon(ε),默认为1e-6
# rotation_range: 整数,数据提升时图片随机传动的角度(0-180度)
# width_shift_range: 浮点数,图片宽度的某个比例,数据提升时图片水平偏移的幅度
# height_shift_range: 浮点数,图片高度的某个比例,数据提升时图片数值位移的幅度
# shear_range: 浮点数,剪切强度(逆时针方向的剪切变化角度)
# zoom_range: 浮点数或形如[lower, upper]的列表,随机缩放的幅度,若为浮点数,则相当于[lower, upper] = [1 - zoom_range, 1 + zoom_range]
# fill_mode: 当进行变化时超出边界的点将根据本参数给定的方法处理
# cval: 浮点数或整数,当fill_mode=constant时,指定要向超出边界的点填充的值
# horizontal_flip: boolean, 进行随机水平翻转
# vertical_flip: boolean, 进行随机竖直反转
# rescale: 重放缩因子,默认为None。如果为None或0则不进行放缩。否则会将该数值乘到数据上(在应用其他变换之前)
datagen = ImageDataGenerator(
rescale=1.255,
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.1,
horizontal_flip=True,
fill_mode='nearest')
i = 0
for batch in datagen.flow_from_directory(
'keras_aug',
target_size=(48, 48),
batch_size=32,
save_to_dir='keras_aug',
save_prefix='aug'):
i += 1
if(i == 20):
break
# for image_path in os.listdir('keras_aug'):
# img = load_img(os.path.join('keras_aug', image_path))
# img_source = img_to_array(img)
# img_source = img_source.reshape((1, ) + img_source.shape)
# label = np.array(['sad'])
# # datagen.fit(img_source)
# i = 0
# for batch in datagen.flow(
# img_source,
# label,
# batch_size=32,
# save_to_dir='keras_aug',
# save_prefix='aug',
# save_format='png'):
# i += 1
# print(i)
# if(i == 20):
# break
| esdream/smile | training/test_keras_aug.py | test_keras_aug.py | py | 2,613 | python | zh | code | 0 | github-code | 50 |
71629052634 | # PROYECTO
"""
JUEGO DE ADIVINA EL NUMERO.
- PEDIR NOMBRE DE USUARIO
- ELEGIR NUMERO AL AZAR ENTRE 1 Y 100
- DAR 8 INTENTOS AL JUGADOR PARA ENCONTRARLO
- SI EL NUMERO QUE INGRESA EL USUARIO ES MENOR A 1 Y MAYOR A 100 INDICARLO
- SI EL NUMERO ES MENOR AL ELEGIDO INDICARLO
- SI ES MAYOR INDICARLO
- SI ACIERTA SE DICE QUE HA GANADO Y CUANTOS INTENTOS LE QUEDARON.
"""
from random import randint
# Pedir nombre al usuario
name = input("Ingresa tu nombre: ")
print(f"Muy bien, {name}. He elegido un numero entre 1 y 100, y tienes 8 intentos para encontrarlo. Vamos.")
# Generar Numero random
numero_encontrar = randint(1, 100)
print(numero_encontrar)
numero_intentos = 8
while numero_intentos > 0:
numero_jugador = int(input("Ingresa un numero: "))
if numero_jugador < 1 or numero_jugador > 100:
print("El numero esta fuera del rango de 1 y 100. Pierdes una vida.")
numero_intentos = numero_intentos - 1
print(f"Te quedan: {numero_intentos} intentos")
continue
elif numero_jugador < numero_encontrar:
print("El numero es menor al numero a encontrar. Pierdes una vida.")
numero_intentos = numero_intentos - 1
print(f"Te quedan: {numero_intentos} intentos")
continue
elif numero_jugador > numero_encontrar:
print("El numero es mayor al numero al encontrar. Pierdes una vida.")
numero_intentos = numero_intentos - 1
print(f"Te quedan: {numero_intentos} intentos")
elif numero_jugador == numero_encontrar:
print(f"FELICIDADES HAS GANADO! te quedaron {numero_intentos} intentos.")
break
else:
print(f"HAS PERIDO. El numero a encontrar era {numero_encontrar}")
| AdamNoir/MyLearning | Python/Python 16 Días/Día 4/Proyecto.py | Proyecto.py | py | 1,703 | python | es | code | 0 | github-code | 50 |
17019972814 | # --------------------------------------------------------------------------
# Loads and processes files from the site's 'includes' directory.
# --------------------------------------------------------------------------
import os
from . import loader
from . import renderers
from . import site
# Dictionary of rendered files indexed by (normalized) filename.
cache = None
# Return a dictionary of rendered files from the 'includes' directory.
def load():
# Load and cache the directory's contents.
global cache
if cache is None:
cache = {}
if os.path.isdir(site.inc()):
for finfo in loader.srcfiles(site.inc()):
text, _ = loader.load(finfo.path)
key = finfo.base.lower().replace(' ', '_').replace('-', '_')
cache[key] = renderers.render(text, finfo.ext)
return cache
| GregHattJr/malt | malt/includes.py | includes.py | py | 867 | python | en | code | null | github-code | 50 |
1067872119 | # Python - 2020 Summer Course
# Day 8
# Topic: More on SQL and Database (Loading a Database)
# Instructor: Patrick Cunha Silva
# Former Instructors: Ryden Buttler, Erin Rossiter
# Michele Torres, David Carlson, and
# Betul Demirkaya
# This is an addition to the course made by Patrick Cunha Silva
from sqlalchemy import *
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, ForeignKey, and_, or_
from sqlalchemy.orm import relationship, backref, sessionmaker
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
import os
# Set WD
os.chdir('/Users/pcunhasilva/Dropbox/PythonClass/Summer2020/Day7/Lab')
# The simplest usage is to reflect an existing database into a new model.
Base = automap_base()
# Create an engine
engine = sqlalchemy.create_engine('sqlite:///geog.db', echo=True)
# reflect the tables
Base.prepare(engine, reflect=True)
# Check the classes in our original db
dir(Base.classes)
# mapped classes that we have in our original database
Department = Base.classes.departments
Region = Base.classes.regions
Town = Base.classes.towns
# Create a session to store new things in the db
Session = sessionmaker(bind=engine)
session = Session()
# Create region 6
reg6 = Region(name = 'Region 6')
# Create departments, nested in regions
dept6 = Department(deptname = 'Department 6')
reg6.departments_collection.append(dept6)
# Create towns, nested in departments
t10 = Town(name = 'j', population = 750000)
dept6.towns_collection = [t10]
# Add to our database
session.add_all([reg6])
session.add_all([dept6])
session.add_all([t10])
session.commit()
# Some example querying
for town in session.query(Town).order_by(Town.id):
print(town.id, town.name, town.population)
# Display the total number of inhabitants
# per department
for depart in session.query(Department.deptname, func.sum(Town.population).label('total')).join(Town).group_by(Department.deptname):
print('{}. Total Population: {}'.format(depart.deptname, depart.total))
# Close connection
session.close()
| pcunhasilva/python_summer2020 | Day8/Lecture/day08p2.py | day08p2.py | py | 2,132 | python | en | code | 2 | github-code | 50 |
36487965490 |
def print_newspaper(lines, align, width):
final_lines = []
stars = "*" * (width + 2)
final_lines.append(stars)
for i, line in enumerate(lines):
curr_line = []
while line:
if len(" ".join(curr_line + [line[0]])) <= width:
curr_line.append(line.pop(0))
else:
words_to_add = " ".join(curr_line)
spaces = " " * (width - len(words_to_add))
if align[i] == "LEFT":
final_lines.append("*" + words_to_add + spaces + "*")
else:
final_lines.append("*" + spaces + words_to_add + "*")
curr_line = []
words_to_add = " ".join(curr_line)
spaces = " " * (width - len(words_to_add))
if align[i] == "LEFT":
final_lines.append("*" + words_to_add + spaces + "*")
else:
final_lines.append("*" + spaces + words_to_add + "*")
final_lines.append(stars)
for line in final_lines:
print(line)
| emilycheera/coding-challenges | newspaper.py | newspaper.py | py | 1,084 | python | en | code | 1 | github-code | 50 |
16103621701 | import socket
import threading
import datetime
import select
class SockThread(threading.Thread):
def __init__(self,socket,addr,cli_struct):
super().__init__()
self.socket = socket
self.addr = addr
self.cli_struct = cli_struct
def disconnection_times(self):
name = threading.current_thread().name
now = datetime.datetime.now()
Time = now.strftime("%Y/%m/%d %H:%M:%S "+ name +" dis_connection \n")
ip, port = self.addr
# 创建日志文件
fname = str(ip) + '_' + str(port) + '.log'
f = open(fname, 'a+')
f.write(Time)
f.close()
def lose_data(self,sum,name):
lose_data = []
packages = self.cli_struct[0].get('package')
sum_server = len(packages)
for i in range(1,sum+1):
if i not in packages:
lose_data.append(i)
print('--服务端接受数据完毕----\n')
print('客户端(%s)总共发送 %s 个数据包\n' % (name,sum))
print('服务端总共接收到 %s 个包\n' % sum_server)
print('丢失的数据包号为:%s\n' % lose_data)
def run(self):
name = threading.current_thread().name
# self.connection_times()
self.cli_struct[0].get('sock').append(name)
while True:
try:
d = self.socket.recv(1024)
string = str(d.decode('utf-8'))
data = eval(string)
print('成功接收'+self.addr+'发来的数据(data):',data)
# sum为包的总数,package为包号,当package=-1时 代表客户端发送完毕
sum = data.get('sum')
package = data.get('no')
self.cli_struct[0].get('package').append(package)
if package == -1:
self.lose_data(sum,name)
except Exception as e:
print(e)
break
print(self.cli_struct)
self.socket.close()
self.disconnection_times()
def __del__(self):
if not self.socket == None:
self.socket.close()
def connection_times(c_addrs):
#获取当前时间
now = datetime.datetime.now()
#格式化时间
Time = now.strftime("%Y/%m/%d %H:%M:%S connection \n")
ip , port = c_addrs
#创建日志文件
fname = str(ip) + '_' + str(port) +'.log'
f = open(fname, 'a+')
f.write(Time)
f.close()
def main():
#创建套接字
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#绑定端口号 ip
s.bind(('192.168.116.1',9999))
#设置监听
input = [s,]
output=[]
other=[]
s.listen(5)
while True:
r_list,w_list,e_list = select.select(input,[],[],1)
for coon in r_list:
if coon == s:
c_socket, c_addrs = coon.accept()
input.append(c_socket)
connection_times(c_addrs)
else:
data = coon.recv(1024)
if data :
cli_struct = [{
"sock": [],
"addr": c_addrs,
"package": []
}]
t1 = SockThread(coon, c_addrs,cli_struct)
t1.start()
else:
if coon in output:
output.remove(coon)
input.remove(coon)
if __name__ == '__main__':
main()
| wd15102/select_server | select.py | select.py | py | 3,526 | python | en | code | 0 | github-code | 50 |
25125623386 | #!/usr/bin/env python3
import torch
import cv2
from ai_old.util.etc import resize_imgs
from ai_old.util.face import custom_align_face
from ai_old.util.inverse import get_outer_quad
DI_K_192 = 16
OUTER_BOUNDARY_DIV = 32
def get_di_k(imsize):
return int(DI_K_192 * (imsize / 192))
def get_dilate_kernel(imsize):
di_k = get_di_k(imsize)
return cv2.getStructuringElement(cv2.MORPH_RECT, (di_k, di_k))
def get_outer_boundary_size(imsize):
return imsize // OUTER_BOUNDARY_DIV
def get_outer_boundary_mask(imsize):
outer_boundary_mask = torch.zeros(imsize, imsize)
obs = get_outer_boundary_size(imsize)
for y in range(imsize):
for x in range(imsize):
if x < obs or x >= imsize - obs or \
y < obs or y >= imsize - obs:
outer_boundary_mask[y][x] = 1.
return outer_boundary_mask
def get_inner_mask(inner_imsize, outer_imsize, device='cuda'):
inner_mask = torch.zeros(outer_imsize, outer_imsize, device=device)
half_delta = (outer_imsize - inner_imsize) // 2
for y in range(inner_imsize):
for x in range(inner_imsize):
inner_mask[y + half_delta][x + half_delta] = 1.
return inner_mask
def dilate_mask(mask, dilate_kernel):
dilated_mask = cv2.dilate(mask.cpu().numpy() * 255., dilate_kernel)
dilated_mask = torch.tensor(dilated_mask / 255.).to('cuda')
return (dilated_mask > 0.5).float()
def fhbc_seg_to_facehair(seg):
return torch.bitwise_or(
seg == 0, # face
seg == 1, # hair
).float()
def resize_and_pad_inner_img(
img,
inner_mask,
inner_imsize,
outer_imsize,
is_seg=False,
):
if is_seg:
assert img.shape == (1, inner_imsize, inner_imsize)
ret = torch.zeros(1, outer_imsize, outer_imsize, dtype=torch.long,
device='cuda')
else:
img = resize_imgs(img, inner_imsize)
ret = torch.zeros(1, 3, outer_imsize, outer_imsize, device='cuda')
buf = (outer_imsize - inner_imsize) // 2
for y in range(outer_imsize):
for x in range(outer_imsize):
is_inner = y >= buf and y < (buf + inner_imsize) and \
x >= buf and x < (buf + inner_imsize)
if is_inner:
assert inner_mask[y][x] == 1.
if is_seg:
ret[0, y, x] = img[0, y - buf, x - buf]
else:
ret[0, :, y, x] = img[0, :, y - buf, x - buf]
else:
assert inner_mask[y][x] == 0.
return ret
def outer_align(full_img, inner_quad):
outer_quad = get_outer_quad(inner_quad, full=full_img)
outer_imsize = 1024 + 512
outer_aligned = custom_align_face(full_img, outer_quad, outer_imsize)
return outer_aligned, outer_quad
| calvinpelletier/ai_old | util/outer.py | outer.py | py | 2,776 | python | en | code | 0 | github-code | 50 |
16705202463 | '''le de um arquivo as cidades mais populosas e retorna a mais populosa'''
cidade = ''
populacao = 0
with open("arq.txt",'a+') as arquivo:
arquivo.seek(0)
lista = arquivo.read().split('\n')
for c in lista:
a = c.split('-').copy()
if populacao < int(a[1].replace('.','')):
populacao = int(a[1].replace('.',''))
cidade = c.split('-')[0]
print(int(a[1].replace('.','')))
print(f'{cidade} é a cidade mais populosa com {populacao} de habitantes')
| LucasSalu/Curso_Python_Basico_Avan-ado | Arquivos_de_texto/exercicio_text10.py | exercicio_text10.py | py | 515 | python | pt | code | 0 | github-code | 50 |
27769680948 | from .veloxchemlib import Molecule
from .veloxchemlib import MolecularBasis
from .veloxchemlib import AtomBasis
from .veloxchemlib import BasisFunction
from .veloxchemlib import ChemicalElement
from .veloxchemlib import bohr_in_angstroms
from .veloxchemlib import assert_msg_critical
from .veloxchemlib import to_angular_momentum
import re
class InputParser:
""" Provides functions for parsing VeloxChem input files into a format,
which passes the needed information to the rest of the program """
def __init__(self, filename):
""" Initializes the parser and parses the input file """
self.input_dict = {}
self.success_monitor = True
self.filename = filename
self.is_basis_set = False
self.basis_set_name = ''
self.parse()
# defining main functions
def parse(self):
""" Calls every function needed for the parsing process depending on
the success of the parsing in different stages """
errmsg = ''
try:
# reading selected file
self.read_file()
# checking for syntax correctness of the input file
self.incomp_group_check()
self.empty_group_check()
except FileNotFoundError:
errmsg = 'input parser: cannot open file %s' % self.filename
self.success_monitor = False
except SyntaxError:
errmsg = 'input parser: bad syntax in file %s' % self.filename
errmsg += '\n You may check for incomplete or empty groups.'
self.success_monitor = False
assert_msg_critical(self.success_monitor, errmsg)
if self.success_monitor:
# manipulation of input string
self.clear_interspace()
# processing the data into lists and dictionaries
self.groupsplit()
self.convert_dict()
if self.is_basis_set:
return
# converting angstroms to atomic units, if needed
need_convert_units = True
if 'molecule' in self.input_dict.keys():
if 'units' in self.input_dict['molecule'].keys():
units = self.input_dict['molecule']['units']
if units in ['au', 'bohr', 'bohrs']:
need_convert_units = False
if need_convert_units:
self.convert_units()
def get_dict(self):
""" Gets the input dictonary """
return self.input_dict
# defining molecule and basis set readers
@staticmethod
def create_molecule_from_xyz(xyzfile, charge=0, spinmult=1):
atom_labels = []
x_coords = []
y_coords = []
z_coords = []
with open(xyzfile, 'r') as f_xyz:
natoms = int(f_xyz.readline().split()[0])
f_xyz.readline()
for a in range(natoms):
content = f_xyz.readline().split()
atom_labels.append(content[0])
x_coords.append(float(content[1]) / bohr_in_angstroms())
y_coords.append(float(content[2]) / bohr_in_angstroms())
z_coords.append(float(content[3]) / bohr_in_angstroms())
mol = Molecule(atom_labels, x_coords, y_coords, z_coords)
mol.set_charge(charge)
mol.set_multiplicity(spinmult)
mol.check_multiplicity()
mol.check_proximity(0.1)
return mol
def create_molecule(self):
mol_dict = self.input_dict
mol = Molecule(mol_dict['molecule']['atom_labels'],
mol_dict['molecule']['x_coords'],
mol_dict['molecule']['y_coords'],
mol_dict['molecule']['z_coords'])
if 'charge' in mol_dict['molecule'].keys():
mol.set_charge(int(mol_dict['molecule']['charge']))
if 'multiplicity' in mol_dict['molecule'].keys():
mol.set_multiplicity(int(mol_dict['molecule']['multiplicity']))
mol.check_multiplicity()
mol.check_proximity(0.1)
return mol
def create_basis_set(self, mol):
basis_dict = self.input_dict
mol_basis = MolecularBasis()
elem_comp = mol.get_elemental_composition()
for elem_id in elem_comp:
elem = ChemicalElement()
err = elem.set_atom_type(elem_id)
assert_msg_critical(err, "ChemicalElement.set_atom_type")
basis_key = 'atombasis_%s' % elem.get_name().lower()
basis_list = [entry for entry in basis_dict[basis_key]]
atom_basis = AtomBasis()
while basis_list:
shell_title = basis_list.pop(0).split()
assert_msg_critical(
len(shell_title) == 3,
"Basis set parser (shell): %s" % ' '.join(shell_title))
angl = to_angular_momentum(shell_title[0])
npgto = int(shell_title[1])
ncgto = int(shell_title[2])
expons = [0.0] * npgto
coeffs = [0.0] * npgto * ncgto
for i in range(npgto):
prims = basis_list.pop(0).split()
assert_msg_critical(
len(prims) == ncgto + 1,
"Basis set parser (primitive): %s" % ' '.join(prims))
expons[i] = float(prims[0])
for k in range(ncgto):
coeffs[k * npgto + i] = float(prims[k + 1])
bf = BasisFunction(expons, coeffs, ncgto, angl)
bf.normalize()
atom_basis.add_basis_function(bf)
atom_basis.set_elemental_id(elem_id)
mol_basis.add_atom_basis(atom_basis)
basis_label = basis_dict['basis_set_name'].upper()
mol_basis.set_label(basis_label)
return mol_basis
# defining subordinated functions
def read_file(self):
""" Storing content of selected file as a string type. Deleting
comments (marked by '!'). Deleting unnecassary whitespace. """
self.content = ''
with open(self.filename, 'r') as f_inp:
for line in f_inp:
# remove comment and extra white spaces
line = line.strip()
line = re.sub('!.*', '', line)
line = ' '.join(line.split())
# skip first line if reading basis set
if line[:10] == '@BASIS_SET':
self.is_basis_set = True
self.basis_set_name = line.split()[1]
continue
# take care of end of group
if line.lower()[:4] == '@end':
line = line.lower()
# add trailing '\n'
if line:
self.content += line + '\n'
def incomp_group_check(self):
""" Checking for any incomplete groups. """
if re.findall(
r'@(?!end[\s])[^@]*@(?!end(?![\w]))|@end\s[^@]*@end(?![\w])',
self.content) != []:
raise SyntaxError
last_lines = self.content.split('@end')[-1].split('\n')
for line in last_lines:
line = line.strip()
if len(line) > 0 and line[0] == '@':
raise SyntaxError
def empty_group_check(self):
""" Checking for any empty groups. """
if re.findall(r'@[\w ]*\n\s*@end(?![\w])', self.content) != []:
raise SyntaxError
def clear_interspace(self):
""" Deleting content, that's not within a group. """
self.content = re.sub(r'@end\s[^@]*@', '@end\n@', self.content)
def groupsplit(self):
""" Creating a list in which every element is a list itself containing
every line of a group, while deleting '@' and '@end' tags. """
self.grouplist = re.findall(r'@(?!end[\s])\s*\w+[^@]*@end(?![\w])',
self.content)
for i in range(len(self.grouplist)):
self.grouplist[i] = self.grouplist[i].strip().replace('@', '')
self.grouplist[i] = self.grouplist[i].split('\n')[:-1]
def convert_dict(self):
""" Converting the list of lists into a dictionary with groupnames as
keys and group content as a dictionary itself. The geometry definition
of the molecule group is stored in a different dictionary. Converting
the molecular structure into the required format. """
for group in self.grouplist:
local_dict = {}
local_list = []
for entry in group[1:]:
if ':' in entry:
key = entry.split(':')[0].strip()
key = '_'.join(key.split())
val = entry.split(':')[1].strip()
if key.lower() != 'xyz':
local_dict[key.lower()] = val
else:
local_list.append(entry)
group_key = group[0]
group_key = '_'.join(group_key.split())
if self.is_basis_set:
self.input_dict[group_key.lower()] = local_list
else:
self.input_dict[group_key.lower()] = local_dict
if group_key.lower() == 'molecule':
self.atom_list = local_list
if self.is_basis_set:
self.input_dict['basis_set_name'] = self.basis_set_name
return
self.input_dict['molecule']['atom_labels'] = []
self.input_dict['molecule']['x_coords'] = []
self.input_dict['molecule']['y_coords'] = []
self.input_dict['molecule']['z_coords'] = []
for atom in self.atom_list:
axyz = atom.split()
self.input_dict['molecule']['atom_labels'].append(axyz[0])
self.input_dict['molecule']['x_coords'].append(float(axyz[1]))
self.input_dict['molecule']['y_coords'].append(float(axyz[2]))
self.input_dict['molecule']['z_coords'].append(float(axyz[3]))
def convert_units(self):
""" Converting molecule coordinates from angstroms to atomic units. """
coords = ['x_coords', 'y_coords', 'z_coords']
angstroms_in_bohr = 1.0 / bohr_in_angstroms()
for n in coords:
for p in range(len(self.input_dict['molecule'][n])):
self.input_dict['molecule'][n][p] *= angstroms_in_bohr
| manuel-br/input_handling | inputparser.py | inputparser.py | py | 10,435 | python | en | code | 0 | github-code | 50 |
33884785238 | # https://www.codewars.com/kata/5412509bd436bd33920011bc
# Your task is to write a function maskify, which changes all but the last four characters into '#'.
def maskify(word):
if len(word) <= 4:
return word
elif word == None:
return " "
else:
last_four = word[-4:]
hash = "#" * len(word[:-4])
return f"{hash}{last_four}"
| modjeskaa/code-wars-solutions | Credit Card Mask.py | Credit Card Mask.py | py | 379 | python | en | code | 0 | github-code | 50 |
22573980841 | #!/usr/bin/env python
import os
from PIL import Image
class ImageExtfindError(Exception):
""" Class for reporting errors related to the problems with the image extension/format """
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def verify_image(filename, maxwidth=None, maxheight=None):
"""
Opens an image and checks if it is valid. If the image's
width or height exceed the specified maximums, the image
is resized.
Arguments:
filename: Path to the image to be checked.
maxwidth: Maximum allowed width
maxheight: Maximum allowed height
Returns:
It returns the image path is the image is valid.
It raises IOError if the image cannot be opened or resized.
It raises ImageExtfindError if the image extension/format cannot be recognized.
"""
im = Image.open(filename)
imformat = None
try:
(imwidth, imheight) = im.size
im.verify()
imformat = im.format
sf = 1.0
sf2 = 1.0
if maxwidth > 0 and imwidth > maxwidth:
sf = float(maxwidth)/float(imwidth)
if maxheight > 0 and imheight > maxheight:
sf2 = float(maxheight)/float(imheight)
if sf2 < sf:
sf = sf2
if sf < 1.0:
im = Image.open(filename)
newimwidth = int(imwidth*sf)
newimheight = int(imheight*sf)
im = im.resize((newimwidth, newimheight), Image.ANTIALIAS)
im.save(filename, format=imformat)
else:
im = Image.open(filename)
im.save(filename, format=imformat)
except Exception as e:
print ('Exception while verifying image', e)
raise IOError
# now see if a valid extension has been specified (required)
unused_root, fileext = os.path.splitext(filename)
if (fileext == '') or (fileext.find('?') != -1):
# if there is no valid extension supplied, then try to extract
# this using the loaded image...
if imformat != None:
impath = filename + '.' + imformat.lower()
im.save(impath, imformat)
else:
raise ImageExtfindError(filename)
else:
impath = filename
return impath
| ox-vgg/vgg_frontend | siteroot/controllers/utils/imchecktools.py | imchecktools.py | py | 2,340 | python | en | code | 3 | github-code | 50 |
21671800647 | import random
i = 1
ite = 0
sample = 10000
for j in range(1, sample):
i = 1
while (True):
ite = ite + 1
rand = random.randint(0, 100)
if i == rand:
print(i)
i = i + 1
if i == 101:
break
print(ite)
print("avg = ", ite / sample) | Dmendoza3/Python-exercises | random/boogleCount100.py | boogleCount100.py | py | 303 | python | en | code | 0 | github-code | 50 |
8652076835 | import sys
sys.stdin = open('input.txt')
q = lambda : map(int, sys.stdin.readline().split())
N, M = q() # M : 필요한 메모리 = 베낭 용량
m = list(q()) # 메모리 = 무게
c = list(q()) # 활성화하는데 드는 비용 = 가치
dp = [[0 for _ in range(M+1)] for __ in range(N+1)]
for i in range(1, N+1):
memory, value = m[i-1], c[i-1]
for j in range(memory, M+1):
dp[i][j] = max(value + dp[i-1][j-memory], dp[i-1][j])
print(dp[-1]) | TValgoStudy/algo_study | 감자조/지수/배낭문제/7579_앱/s1.py | s1.py | py | 467 | python | ko | code | 3 | github-code | 50 |
24360520210 | import sys
from itertools import chain
def check():
cnt = 0
for i in range(5):
# 가로
if sum(bingo[i * 5: (i + 1) * 5]) == -5:
cnt += 1
# 세로
if sum(bingo[i::5]) == -5:
cnt += 1
# 대각선
if sum(bingo[0::6]) == -5:
cnt += 1
if sum(bingo[4:21:4]) == -5:
cnt += 1
return cnt
bingo = list(chain(*[list(map(int, sys.stdin.readline().split())) for _ in range(5)]))
nums = list(chain(*[list(map(int, sys.stdin.readline().split())) for _ in range(5)]))
for c, i in enumerate(nums):
idx = bingo.index(i)
bingo[idx] = -1
if c >= 11:
tmp = check()
if tmp >= 3:
print(c + 1)
break | phoenix9373/Algorithm | 2020/백준문제/IM대비/2578_빙고.py | 2578_빙고.py | py | 731 | python | en | code | 0 | github-code | 50 |
4134197620 | import random
import torch
import torch.utils.data
from PIL import Image
from glob import glob
import numpy as np
import torchvision.transforms as transforms
import os
class DataProcess(torch.utils.data.Dataset):
def __init__(self, img_root, input_mask_root, ref_root, train=True):
super(DataProcess, self).__init__()
self.img_transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
# mask should not normalize, is just have 0 or 1
self.mask_transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor()
])
self.img_root = img_root
self.input_mask_root = input_mask_root
self.ref_root = ref_root
self.Train = False
if train:
self.img_paths = os.listdir(img_root)
self.mask_paths = os.listdir(input_mask_root)
self.ref_paths = os.listdir(img_root)
self.Train = True
self.N_mask = len(self.mask_paths)
print(self.N_mask)
def __getitem__(self, index):
img_ = Image.open(self.img_root+'/'+self.img_paths[index])
ref_img = Image.open(self.ref_root+'/'+self.ref_paths[index])
mask_img = Image.open(self.input_mask_root+'/'+self.mask_paths[random.randint(0, self.N_mask - 1)])
img = self.img_transform(img_.convert('RGB'))
ref_img = self.img_transform(ref_img.convert('RGB'))
mask_img = self.mask_transform(mask_img.convert('RGB'))
return img, mask_img, ref_img
def __len__(self):
return len(self.img_paths)
| Cameltr/TransRef | data/dataprocess.py | dataprocess.py | py | 1,708 | python | en | code | 21 | github-code | 50 |
36879729097 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 2 03:23:55 2020
@author: mhasan13
"""
import pickle as pkl
import numpy as np
import brian2 as br
class NeuronMeshGrid:
'''
Data on neuron phase plane
'''
def __init__(self, pickle_path:str) -> None:
with open(pickle_path, 'rb') as fp:
itemlist = pkl.load(fp)
data = np.array(itemlist)
self.u = data[0,:,:]
self.v = data[1,:,:]
# =============================================================================
# -ve sign has to be fixed for pmos currents now
# as cadence introduced a -ve sign for outgoing current
# =============================================================================
self.iCv = -data[2,:,:] - data[3,:,:] - data[6,:,:] # Ipos_feed - Ineg_feed - I_leak
self.iCu = -data[4,:,:] - data[5,:,:] # Iw - Ir
self.axon = data[7,:,:] # axon output
# =============================================================================
# i=>y axis index, j=>x axis index
# =============================================================================
self.vmax, self.vmin = np.max(self.v), np.min(self.v)
self.umax, self.umin = np.max(self.u), np.min(self.u)
self.j_per_x = (self.v.shape[1]-1)/(self.vmax-self.vmin)
self.i_per_y = (self.u.shape[0]-1)/(self.umax-self.umin)
class SynapseMeshGrid:
'''
Data on synpase meshgrid
'''
def __init__(self, active_path:str, inactive_path:str) -> None:
# active synapse
with open (active_path, 'rb') as fp:
itemlist = pkl.load(fp)
data = np.array(itemlist)
self.vfg = data[0,:,:]
self.vd = data[1,:,:]
# =============================================================================
# -ve sign has to be fixed for pmos currents now
# as cadence introduced a -ve sign for outgoing current
# =============================================================================
self.Ip_active = -data[2,:,:]
self.In_active = data[3,:,:]
with open (inactive_path, 'rb') as fp:
itemlist = pkl.load(fp)
data = np.array(itemlist)
self.Ip_inactive = -data[2,:,:]
self.In_inactive = data[3,:,:]
# =============================================================================
# i=>y axis index, j=>x axis index
# =============================================================================
self.vfg_max, self.vfg_min = np.max(self.vfg), np.min(self.vfg)
self.vd_max, self.vd_min = np.max(self.vd), np.min(self.vd)
self.i_per_vfg = (self.vfg.shape[0]-1)/(self.vfg_max-self.vfg_min)
self.j_per_vd = (self.vd.shape[1]-1)/(self.vd_max-self.vd_min)
class BundleSynapseMeshGrid:
'''
Data on bundle synapse injection current
'''
def __init__(self, i_bundle_path:str, i_inj_path:str) -> None:
with open(i_bundle_path, 'rb') as fp:
itemlist = pkl.load(fp)
data = np.array(itemlist)
self.v_leak = data[0,:,:]
self.vd = data[1,:,:]
# =============================================================================
# -ve sign has to be fixed for pmos currents now
# as cadence introduced a -ve sign for outgoing current
# =============================================================================
self.Ip_bundle = -(data[2,:,:]+data[3,:,:])
self.In_bundle = data[4,:,:]+data[5,:,:]
# =============================================================================
# i=>y axis index, j=>x axis index
# =============================================================================
self.v_leak_max, self.v_leak_min = np.max(self.v_leak), np.min(self.v_leak)
self.vd_max, self.vd_min = np.max(self.vd), np.min(self.vd)
self.i_per_v_leak = (self.v_leak.shape[0]-1)/(self.v_leak_max-self.v_leak_min)
self.j_per_vd = (self.vd.shape[1]-1)/(self.vd_max-self.vd_min)
with open(i_inj_path, 'rb') as fp:
itemlist = pkl.load(fp)
data = np.array(itemlist)
self.vm = data[0,:,:]
self.v_inj = data[1,:,:]
# =============================================================================
# -ve sign has to be fixed for pmos currents now
# as cadence introduced a -ve sign for outgoing current
# =============================================================================
self.Ip_injection = -data[2,:,:]
self.In_injection = data[3,:,:]
# =============================================================================
# i=>y axis index, j=>x axis index
# =============================================================================
self.v_inj_max, self.v_inj_min = np.max(self.v_inj), np.min(self.v_inj)
self.vm_max, self.vm_min = np.max(self.vm), np.min(self.vm)
self.i_per_vm = (self.vm.shape[0]-1)/(self.vm_max-self.vm_min)
self.j_per_v_inj = (self.v_inj.shape[1]-1)/(self.v_inj_max-self.v_inj_min)
class InputGroupBrian:
'''
Input spike generation from frequency
'''
def __init__(self, n:int) -> None:
self.dt = br.defaultclock.dt
self.input_neuron_model='''
dx/dt = 1/second : 1
s : 1
frequency : 1
t_period = 1/(frequency+1e-15) : 1
pulse_width : 1
'''
self.input_spike_event_action = '''
s += 1
'''
self.input_reset_event_action = '''
x = pulse_width
s = 0
'''
self.input_neuron_events={
'spike':'s==1',
'spike_event':'x>t_period',
'resetting':'x>t_period+pulse_width',
'reset_event':'x<t_period'
} # threshold='s==1' also works
self.L = br.NeuronGroup(n,
model=self.input_neuron_model,
events=self.input_neuron_events,
dt=self.dt)
self.L.run_on_event('spike_event',self.input_spike_event_action)
self.L.run_on_event('resetting',self.input_reset_event_action)
class NeuronGroupBrian:
'''
Pack all the components of brian NeuronGroup
'''
def __init__(self, neuron_meshgrid:NeuronMeshGrid, bundle_synapse_meshgrid:BundleSynapseMeshGrid, n:int) -> None:
self.dt = br.defaultclock.dt
self.model = '''
i_per_u : 1
j_per_v : 1
vmax : volt
vmin : volt
umax : volt
umin : volt
Cv : farad
Cu : farad
Cp : farad
Cdp_bundle : farad
Cdn_bundle : farad
vp_leak : volt
vn_leak : volt
i_per_v_leak : 1
j_per_vd : 1
i_per_vm : 1
j_per_v_inj : 1
IpT : amp
InT : amp
IpB = ip_bundle( int(i_per_v_leak*vp_leak/volt), int(j_per_vd*vp_inj/volt) )*amp : amp (constant over dt)
InB = in_bundle( int(i_per_v_leak*vn_leak/volt), int(j_per_vd*vn_inj/volt) )*amp : amp (constant over dt)
dvp_inj/dt = (IpB - InT)/Cdn_bundle : volt
dvn_inj/dt = (IpT - InB)/Cdp_bundle : volt
Isyn = i_injection( int(i_per_vm*v/volt), int(j_per_v_inj*vp_inj/volt), int(j_per_v_inj*vn_inj/volt) )*amp : amp (constant over dt)
dv/dt = dvdt : volt
dvdt=( Cv_current(int(i_per_u*u/volt),int(j_per_v*v/volt))*amp + Isyn )/(Cv+Cp) : amp/farad (constant over dt)
du/dt = dudt : volt
dudt=Cu_current(int(i_per_u*u/volt),int(j_per_v*v/volt))*amp/(Cu+Cp) : amp/farad (constant over dt)
s : 1
'''
self.spike_event_action = '''
s += 1
'''
self.reset_event_action = '''
s = 0
'''
self.neuron_events={
'vdd_rail':'v>vmax',
'vss_rail':'v<vmin',
'udd_rail':'u>umax',
'uss_rail':'u<umin',
'vp_inj_rail_up':'vp_inj>vmax',
'vp_inj_rail_down':'vp_inj<vmin',
'vn_inj_rail_up':'vn_inj>vmax',
'vn_inj_rail_down':'vn_inj<vmin',
't_step':'t>0*second',
'spike':'s==1',
'spike_event':'v>200*mV',
'reset_event':'v<200*mV'
}
self.L = br.NeuronGroup(n,
model=self.model,
events=self.neuron_events,
dt=self.dt
)
self.L.vmax = neuron_meshgrid.vmax*br.volt
self.L.vmin = neuron_meshgrid.vmin*br.volt
self.L.umax = neuron_meshgrid.umax*br.volt
self.L.umin = neuron_meshgrid.umin*br.volt
self.L.i_per_u = neuron_meshgrid.i_per_y
self.L.j_per_v = neuron_meshgrid.j_per_x
self.L.Cv = 50e-15*br.farad
self.L.Cu = 30e-15*br.farad
self.L.Cp = 5e-15*br.farad
self.L.Cdp_bundle = 2e-15*br.farad
self.L.Cdn_bundle = 2.5e-15*br.farad
self.L.i_per_v_leak = bundle_synapse_meshgrid.i_per_v_leak
self.L.j_per_vd = bundle_synapse_meshgrid.j_per_vd
self.L.i_per_vm = bundle_synapse_meshgrid.i_per_vm
self.L.j_per_v_inj = bundle_synapse_meshgrid.j_per_v_inj
self.L.vp_inj = 300*br.mV # set initial value
self.L.vn_inj = 0*br.mV # set initial value
self.L.run_on_event('vdd_rail','v=vmax')
self.L.run_on_event('vss_rail','v=vmin')
self.L.run_on_event('udd_rail','u=umax')
self.L.run_on_event('uss_rail','u=umin')
self.L.run_on_event('vp_inj_rail_up','vp_inj=vmax')
self.L.run_on_event('vp_inj_rail_down','vp_inj=vmin')
self.L.run_on_event('vn_inj_rail_up','vn_inj=vmax')
self.L.run_on_event('vn_inj_rail_down','vn_inj=vmin')
self.L.run_on_event('spike_event',self.spike_event_action)
self.L.run_on_event('reset_event',self.reset_event_action)
class SynapseGroupBrian:
'''
Pack all the components of synapse
'''
def __init__(self, synapse_meshgrid:SynapseMeshGrid, pre_group:NeuronGroupBrian, post_group:NeuronGroupBrian) -> None:
self.syn_model = '''
i_per_vg_syn : 1
j_per_vd_syn : 1
vg_p : volt
vg_n : volt
Isyn_active_p = syn_active_p( int(i_per_vg_syn*vg_p/volt), int(j_per_vd_syn*vn_inj/volt) )*amp : amp (constant over dt)
Isyn_active_n = syn_active_n( int(i_per_vg_syn*vg_n/volt), int(j_per_vd_syn*vp_inj/volt) )*amp : amp (constant over dt)
Isyn_inactive_p = syn_inactive_p( int(i_per_vg_syn*vg_p/volt), int(j_per_vd_syn*vn_inj/volt) )*amp : amp (constant over dt)
Isyn_inactive_n = syn_inactive_n( int(i_per_vg_syn*vg_n/volt), int(j_per_vd_syn*vp_inj/volt) )*amp : amp (constant over dt)
Ip_syn_previous_t_step : amp
In_syn_previous_t_step : amp
'''
# =============================================================================
# I += Isyn will keep increasing I for the duration of spike. but this is wrong.
# i need to keep I same as Isyn for the duration of spike.
# with I_syn_previous_t_step variable previous timestep current can be subtracted
# from I before adding new timestep current and thus prevents I from increasing
# =============================================================================
self.syn_active_action = '''
IpT -= Ip_syn_previous_t_step
InT -= In_syn_previous_t_step
IpT += Isyn_active_p
InT += Isyn_active_n
Ip_syn_previous_t_step = Isyn_active_p
In_syn_previous_t_step = Isyn_active_n
'''
self.syn_inactive_action = '''
IpT -= Ip_syn_previous_t_step
InT -= In_syn_previous_t_step
IpT += Isyn_inactive_p
InT += Isyn_inactive_n
Ip_syn_previous_t_step = Isyn_inactive_p
In_syn_previous_t_step = Isyn_inactive_n
'''
self.on_pre_action={
'syn_active_path':self.syn_active_action,
'syn_inactive_path':self.syn_inactive_action,
}
self.event_assignment={
'syn_active_path':'spike_event',
'syn_inactive_path':'reset_event',
}
self.S = br.Synapses(pre_group.L, post_group.L,
self.syn_model,
on_pre=self.on_pre_action,
on_event=self.event_assignment
)
self.S.connect()
self.S.i_per_vg_syn = synapse_meshgrid.i_per_vfg
self.S.j_per_vd_syn = synapse_meshgrid.j_per_vd
# set drain capacitance of the bundle synapse
# += because bais synpase is added seperately
post_group.L.Cdp_bundle += 0.5e-15*br.farad*pre_group.L.N
post_group.L.Cdn_bundle += 1.05e-15*br.farad*pre_group.L.N
class SimpleNeuronGroupBrian:
'''
Pack all the components of brian NeuronGroup
'''
def __init__(self, neuron_meshgrid:NeuronMeshGrid, n:int) -> None:
self.dt = br.defaultclock.dt
self.model = '''
i_per_u : 1
j_per_v : 1
vmax : volt
vmin : volt
umax : volt
umin : volt
Cv : farad
Cu : farad
Cp : farad
I : amp
dv/dt = dvdt : volt
dvdt=( Cv_current(int(i_per_u*u/volt),int(j_per_v*v/volt))*amp + I )/(Cv+Cp) : amp/farad (constant over dt)
du/dt = dudt : volt
dudt=Cu_current(int(i_per_u*u/volt),int(j_per_v*v/volt))*amp/(Cu+Cp/2) : amp/farad (constant over dt)
s : 1
'''
self.spike_event_action = '''
s += 1
'''
self.reset_event_action = '''
s = 0
'''
self.neuron_events={
'vdd_rail':'v>vmax',
'vss_rail':'v<vmin',
'udd_rail':'u>umax',
'uss_rail':'u<umin',
't_step':'t>0*second',
'spike':'s==1',
'spike_event':'v>200*mV',
'reset_event':'v<200*mV'
}
self.L = br.NeuronGroup(n,
model=self.model,
events=self.neuron_events,
dt=self.dt
)
self.L.vmax = neuron_meshgrid.vmax*br.volt
self.L.vmin = neuron_meshgrid.vmin*br.volt
self.L.umax = neuron_meshgrid.umax*br.volt
self.L.umin = neuron_meshgrid.umin*br.volt
self.L.i_per_u = neuron_meshgrid.i_per_y
self.L.j_per_v = neuron_meshgrid.j_per_x
self.L.Cv = 50e-15*br.farad
self.L.Cu = 30e-15*br.farad
self.L.Cp = 5e-15*br.farad
self.L.run_on_event('vdd_rail','v=vmax')
self.L.run_on_event('vss_rail','v=vmin')
self.L.run_on_event('udd_rail','u=umax')
self.L.run_on_event('uss_rail','u=umin')
self.L.run_on_event('spike_event',self.spike_event_action)
self.L.run_on_event('reset_event',self.reset_event_action) | mhasan13-here/phase-plane-torch | initial/ObjectClass.py | ObjectClass.py | py | 17,059 | python | en | code | 0 | github-code | 50 |
43594036384 | import os
import json
import math
import matplotlib.pyplot as plt
from sklearn.cluster import DBSCAN
import numpy
current_directory = os.getcwd()
data_folder_path = "data"
folder_path = os.path.join(current_directory, data_folder_path)
dicts = []
names = []
for file_name in os.listdir(folder_path):
path = os.path.join(folder_path, file_name)
for sub_file in os.listdir(path):
if "desc" in sub_file:
with open(os.path.join(path, sub_file), "r") as file:
dicts.append(json.load(file))
names.append(sub_file.split("/")[-1][0:4])
points = []
for obj_dict in dicts:
for arch_id in obj_dict:
keys = list(obj_dict[arch_id].keys())
x = 0
y = 0
for i in range(len(obj_dict[arch_id])):
key = keys[i]
val = obj_dict[arch_id][key]
x += val * math.cos(i * math.pi / 8)
y += val * math.sin(i * math.pi / 8)
points.append([x, y])
print(dicts)
points_array = numpy.array(points)
plt.scatter(points_array[:, 0], points_array[:, 1])
print(len(points_array))
plt.title(f"Distribución de elementos")
plt.show()
n_d = 3
e_d = 0.035
db = DBSCAN(eps=e_d, min_samples=n_d).fit(points_array)
labels = db.labels_
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
print("Estimated number of clusters: %d" % n_clusters_)
print("Estimated number of noise points: %d" % n_noise_)
unique_labels = set(labels)
core_samples_mask = numpy.zeros_like(labels, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
clasification = {}
point_id = 0
for obj_dict in range(len(dicts)):
for arch_id in dicts[obj_dict]:
if not labels[point_id] in clasification:
clasification[labels[point_id]] = {}
clasification[labels[point_id]][(names[obj_dict], arch_id)] = (point_id, points[point_id])
point_id += 1
for key in clasification:
print(f"Cluster {key}")
for sub_key in clasification[key]:
print(sub_key, clasification[key][sub_key])
colors = [plt.cm.Spectral(each) for each in numpy.linspace(0, 1, len(unique_labels))]
for k, col in zip(unique_labels, colors):
if k == -1:
col = [0, 0, 0, 1]
class_member_mask = labels == k
xy = points_array[class_member_mask]
plt.plot(
xy[:, 0],
xy[:, 1],
"o",
markerfacecolor=tuple(col),
markeredgecolor="k",
markersize=5,
)
plt.title(f"Número de clusters: {n_clusters_} n = {n_d}, e = {e_d}")
plt.show() | ArcadeCookie/ReconocimientoDeMacroPatrones | DBS.py | DBS.py | py | 2,567 | python | en | code | 0 | github-code | 50 |
27873466527 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = 'JQ'
SITENAME = 'Japan Trip 2018'
SITEURL = 'http://localhost:2018'
STATIC_PATHS = ['images', 'pdfs']
PATH = 'content'
TIMEZONE = 'America/Chicago'
DEFAULT_LANG = 'en'
DEFAULT_DATE_FORMAT = '%-m/%-d/%Y'
DEFAULT_CATEGORY = 'general'
DISPLAY_PAGES_ON_MENU = True
THEME = './themes/attila'
HEADER_COVER = 'images/skytree.jpg'
SHOW_FULL_ARTICLE = False
SITESUBTITLE = 'read this stuff plz'
AUTHORS_BIO = {
"jq": {
"name": "JQ",
"cover": "images/arahira.jpg",
"image": "images/avatar.gif",
"website": "http://quarl.es",
"bio": "Inspiring others to use like half their vacation time since 10/2017."
}
}
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# LINKS = (
# ('Reddit - Japan Travel Advice Wiki', 'https://www.reddit.com/r/JapanTravel/wiki/traveladvice'),
# ('Reddit - Japan Travel Advice FAQ', 'https://www.reddit.com/r/JapanTravel/wiki/faqs/japantravel'),
# )
SOCIAL = (
('envelope','mailto:john.w.quarles@gmail.com'),
)
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
| johnwquarles/japan-trip | pelicanconf.py | pelicanconf.py | py | 1,318 | python | en | code | 0 | github-code | 50 |
43012985588 | def equalize_voltage(signal, init_voltage = 0.1, target_voltage = 1, f = 10000):
signal.set_voltage(init_voltage)
signal.set_freq(f)
signal.run()
voltage_meas1 = signal.read_voltage(f, 4000, 1)
#print(voltage_meas1)
signal.stop()
voltage_set1 = target_voltage*init_voltage/voltage_meas1
#print(voltage_set1)
signal.set_voltage(voltage_set1)
signal.set_freq(f)
signal.run()
voltage_meas2 = signal.read_voltage(f, 4000, 1)
#print(voltage_meas2)
signal.stop()
voltage_set2 = target_voltage*voltage_set1/voltage_meas2
#print(voltage_set2)
signal.set_voltage(voltage_set2)
signal.set_freq(f)
signal.run()
final_voltage = signal.read_voltage(f, 4000, 1)
#print(final_voltage)
signal.stop()
return final_voltage, voltage_set2 | fabianlickert/automated_acoustofluidics | drivers/equalize_voltage.py | equalize_voltage.py | py | 821 | python | en | code | 0 | github-code | 50 |
27242163880 | import pandas as pd
import glob
import os
folderPath = "*.png"
import os
print(os.getcwd())
Images_path = "/Users/pavankumar/Documents/Robotics MSc/Dissertation/Data Pre-processing/Dissertation Datasets/mangoes/images"
fruit = "mango"
os.chdir( Images_path )
print(os.getcwd())
filesList = glob.glob(folderPath)
print(len(filesList))
sortedList = sorted(filesList)
file_name, file_ext = os.path.splitext(sortedList[0])
count = 1
print(file_ext)
for file in sortedList:
newName = fruit + "_img_" + str(count) + file_ext
os.rename(file, newName)
count += 1
| PavanproJack/Fruit-Detection-in-Orchards | Data Processing Scripts/sortImages.py | sortImages.py | py | 610 | python | en | code | 12 | github-code | 50 |
71214457114 | # coding:utf-8
from optparse import OptionParser
import os
import sys
import itertools
from common.glob import iglob
def empty_dirs( root_dir ):
for curr_dir, dirs, files in os.walk( root_dir ):
if len( dirs ) == 0 and len( files ) == 0:
yield curr_dir
def main():
parser = OptionParser( version="%prog 1.0", usage='Usage: %prog [options] dirs...' )
parser.set_defaults( **{
'verbose': False,
} )
parser.add_option( '-v', '--verbose', action='store_true' )
opts, args = parser.parse_args()
cleaning_dirs = set( x for x in itertools.chain.from_iterable( iglob( x ) for x in ( args or ['.'] ) ) if os.path.isdir( x ) )
for root in cleaning_dirs:
if opts.verbose:
print( 'cleaning:', root )
for dir in empty_dirs( root ):
try:
os.removedirs( dir )
print( "Deleted:", dir )
except Exception as e:
print( "Cannot delete:", dir )
if __name__ == "__main__":
main()
| ciel-yu/canal-aux | scripts/cleanempty.py | cleanempty.py | py | 958 | python | en | code | 0 | github-code | 50 |
13180648906 | '''
In this challenge, you will be given 2 integers, n and m. There are n words, which might repeat, in word group A.
There are m words belonging to word group B. For each m words, check whether the word has appeared in group A or not.
Print the indices of each occurrence of m in group A. If it does not appear, print -1.
Example
Group A contains 'a', 'b', 'a' Group B contains 'a', 'c'
For the first word in group B, 'a', it appears at positions 1 and 3 in group A. The second word, 'c', does not
appear in group A, so print -1.
Expected output:
1 3
-1
Input Format
The first line contains integers, n and m separated by a space.
The next n lines contains the words belonging to group A.
The next m lines contains the words belonging to group B.
Constraints
1 <= n <= 10000
1 <=m <= 100
1 <= length of each word in the input <= 100
Output Format
Output m lines.
The i-th line should contain the 1-indexed positions of the occurrences of the i-th word separated by spaces.
'''
from collections import defaultdict
n, m = map(int, input().split(" "))
input1 = list()
for i in range(n):
input1.append(input())
input2 = list()
for i in range(m):
input2.append(input())
d = defaultdict(list)
for i in range(n):
d[input1[i]].append(i + 1)
for i in input2:
if i in d:
print(*d[i])
else:
print(-1)
| RobertEne1989/python-hackerrank-submissions | defaultdict_tutorial_hackerrank.py | defaultdict_tutorial_hackerrank.py | py | 1,398 | python | en | code | 0 | github-code | 50 |
35546153534 | import json
from os import path
import datetime
import sqlite3
import time
import urllib.request
'''
def connect():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("www.blahinc.com",80))
'''
#will need to be changed so we can connect to server
def getUrlforBTC(time):#time should come in form yyyymmdd so feb 7 2014 would be 20140207
'''
time = str(time)
url = 'https://poloniex.com/public?command=returnChartData¤cyPair=USDT_BTC&start=1391803200&end=1391803200&period=300'
#url = "https://coinmarketcap.com/currencies/bitcoin/historical-data/?start="+time+"&end="+time
openUrl = urllib.request.urlopen(url)
r = openUrl.read()
openUrl.close()
d = json.loads(r.decode())
'''
return 712.40
#todo
def getUrlforDollars(time):
return 2173.40
#todo
def getInfo(altCoin):#takes a string
jsonAddress = open('/Users/ericschneider/Desktop/Algorithmic-Trading/Data/data/' + altCoin + '.json',"r") #give the whole address of the file
jsonFile = json.load(jsonAddress)
jsonAddress.close()
return jsonFile
def convertToBTC(results_file, dollars, time):
time = str(time)
results_file.write("Time: "+time+"\n")
btc = getUrlforBTC(time)
results_file.write("Bought BTC at: "+str(btc)+"\n")
number_of_btc = dollars/btc
results_file.write("Number of BTC: "+str(number_of_btc)+"\n")
results_file.write("----------------\n")
return number_of_btc
def converToDollars(results_file, btc, time):
time = str(time)
results_file.write("Time: "+str(time)+"\n")
dollars = getUrlforDollars(time)
results_file.write("Sold BTC at: "+str(dollars)+"\n")
number_of_dollars = btc*dollars
results_file.write("Number of US Dollars: "+str(number_of_dollars)+"\n")
return number_of_dollars
def recordBuy(results_file, coin, time, price, number_of_coin):
results_file.write("Time: "+str(time)+"\n")
results_file.write("Bought at "+str(price)+"\n")
results_file.write("Current number of "+str(coin)+" is "+str(number_of_coin)+"\n")
def recordSell(results_file, coin, time, price, number_of_btc):
results_file.write("------------------\n")
results_file.write("Time: "+str(time)+"\n")
results_file.write("Sold at "+str(price)+"\n")
results_file.write("Current number of BTC is "+str(number_of_btc)+"\n")
results_file.write("------------------\n")
def getInfoSQL(fromCurr, toCurr, price, limit):
'''fromCurr - which currency we're going from
toCurr - which currency we want to get
price - what kind of price like asking price of buy price
limit - max size
ex: if I was the price from btc to dash at a given time then call:
getInfo(XXBT, DASH, askPrice,100)
IMPORTANT:
If no LIMIT is wanted, then pass 0 into the limit option
'''
fromCurr = str(fromCurr)
toCurr = str(toCurr)
price = str(price)
limit = str(limit)
con = sqlite3.connect("/Volumes/Untitled/snapshot-1505512821.db") #this part is just accessing the database, mine is stored on a flashdrive but yours will be different
c = con.cursor()
if limit == 0:
command = "SELECT "+ price+' FROM ticker WHERE currencyBase = "' +toCurr+ '" AND currencyQuote = "'+fromCurr+'"'
else:
command = "SELECT "+ price+' FROM ticker WHERE currencyBase = "' +toCurr+ '" AND currencyQuote = "'+fromCurr+'" LIMIT '+limit
c.execute(command) #get the dash information from the database
con.close()
return c
#THIS FUNCTION NEEDS TESTING
def convert_to_altcoin(dollars, altcoin, time):#time in unix
con = sqlite3.connect("/Volumes/Untitled/snapshot-1505512821.db") #this part is just accessing the database, mine is stored on a flashdrive but yours will be different
c = con.cursor()
altcoin = str(altcoin)
command = 'SELECT "askPrice" FROM ticker WHERE currencyBase = "' + altcoin+ '" AND currencyQuote = "ZUSD" LIMIT 10'
print(command)
conversion_value = 1
for i in c:
print(i)
conversion_value = i[0]
c.execute(command) #get the dash information from the database
con.close()
return dollars/conversion_value
| CryptoUmnTheProject/Algorithmic-Trading | Algorithms/helper_functions/helper.py | helper.py | py | 4,207 | python | en | code | 3 | github-code | 50 |
43108338058 | #!/usr/bin/env python
# coding: utf-8
# # THE SPARKS FOUNDATION
#
# ### TASK 1- PREDICTION USING SUPERVISED MACHINE LEARNING
#
#
# ### DESCRIPTION - PREDICT THE PERCENTAGE OF A STUDENT BASED ON THE NUMBER OF STUDY HOURS
#
# ### NAME - SATYAM KUMAR
#
# In[18]:
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
get_ipython().run_line_magic('matplotlib', 'inline')
# In[19]:
dataset_url = 'http://bit.ly/w-data'
s_data = pd.read_csv(dataset_url)
# In[20]:
s_data.head()
# #check the shape of student data
#
# In[21]:
s_data.shape
# #Checking data types
# In[22]:
s_data.dtypes
# #getting information from student data
# In[23]:
s_data.info()
# #calculating statistical data
# In[24]:
s_data.describe().transpose()
# #checking if there are any null values in s_data
# In[25]:
s_data.isnull().sum()
# #plot the data points on 2-D graph
# In[26]:
s_data.plot(x='Hours', y='Scores', style='o')
plt.title('Hours vs Percentage')
plt.xlabel('Hours Studied')
plt.ylabel('Percentage Score')
plt.show()
# In[27]:
X = s_data.iloc[:, :-1].values
y = s_data.iloc[:, 1].values
# In[28]:
#spliting dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# In[29]:
from sklearn.linear_model import LinearRegression
#creating model
regressor = LinearRegression()
#fit the model
regressor.fit(X_train, y_train)
# In[30]:
y_pred = regressor.predict(X_test)
y_pred
# In[32]:
print(regressor.coef_)
# In[33]:
print(regressor.intercept_)
# In[34]:
y_new = regressor.intercept_ + X*regressor.coef_
# In[35]:
plt.scatter(X,y,color='r')
plt.plot(X, y_new, color='b')
plt.xlabel('Hours')
plt.ylabel('Scores')
plt.title('Hours vs Scores')
plt.grid()
plt.show()
# In[36]:
data = pd.DataFrame({'Actual':y_test, 'Predicted':y_pred})
data.head()
# In[37]:
regressor.score(X_train, y_train)
# In[38]:
from sklearn.metrics import r2_score
r2_score(y_test, y_pred)
# In[39]:
from sklearn import metrics
print('Mean Absolute Error: ', metrics.mean_absolute_error(y_test, y_pred))
# ##### finally calculating the task (hours=9.25)
# In[40]:
hours = [[9.25]]
pred_value = regressor.predict(hours)
print('Number of hours = {}'.format(hours))
print('Predicted Score = {}'.format(pred_value[0]))
| satyamiitbhu/The-Spark-Foundation | THE SPARK TASK 1.py | THE SPARK TASK 1.py | py | 2,425 | python | en | code | 0 | github-code | 50 |
42883176870 | from __future__ import absolute_import
from abc import abstractmethod
from six import string_types
from sagemaker.local import file_input
from sagemaker.session import s3_input
class _Job(object):
"""Handle creating, starting and waiting for Amazon SageMaker jobs to finish.
This class shouldn't be directly instantiated.
Subclasses must define a way to create, start and wait for an Amazon SageMaker job.
"""
def __init__(self, sagemaker_session, job_name):
self.sagemaker_session = sagemaker_session
self.job_name = job_name
@abstractmethod
def start_new(self, estimator, inputs):
"""Create a new Amazon SageMaker job from the estimator.
Args:
estimator (sagemaker.estimator.EstimatorBase): Estimator object created by the user.
inputs (str): Parameters used when called :meth:`~sagemaker.estimator.EstimatorBase.fit`.
Returns:
sagemaker.job: Constructed object that captures all information about the started job.
"""
@abstractmethod
def wait(self):
"""Wait for the Amazon SageMaker job to finish.
"""
@staticmethod
def _load_config(inputs, estimator, expand_role=True, validate_uri=True):
input_config = _Job._format_inputs_to_input_config(inputs, validate_uri)
role = estimator.sagemaker_session.expand_role(estimator.role) if expand_role else estimator.role
output_config = _Job._prepare_output_config(estimator.output_path, estimator.output_kms_key)
resource_config = _Job._prepare_resource_config(estimator.train_instance_count,
estimator.train_instance_type,
estimator.train_volume_size,
estimator.train_volume_kms_key)
stop_condition = _Job._prepare_stop_condition(estimator.train_max_run)
vpc_config = estimator.get_vpc_config()
model_channel = _Job._prepare_model_channel(input_config, estimator.model_uri, estimator.model_channel_name,
validate_uri)
if model_channel:
input_config = [] if input_config is None else input_config
input_config.append(model_channel)
return {'input_config': input_config,
'role': role,
'output_config': output_config,
'resource_config': resource_config,
'stop_condition': stop_condition,
'vpc_config': vpc_config}
@staticmethod
def _format_inputs_to_input_config(inputs, validate_uri=True):
if inputs is None:
return None
# Deferred import due to circular dependency
from sagemaker.amazon.amazon_estimator import RecordSet
if isinstance(inputs, RecordSet):
inputs = inputs.data_channel()
input_dict = {}
if isinstance(inputs, string_types):
input_dict['training'] = _Job._format_string_uri_input(inputs, validate_uri)
elif isinstance(inputs, s3_input):
input_dict['training'] = inputs
elif isinstance(inputs, file_input):
input_dict['training'] = inputs
elif isinstance(inputs, dict):
for k, v in inputs.items():
input_dict[k] = _Job._format_string_uri_input(v, validate_uri)
elif isinstance(inputs, list):
input_dict = _Job._format_record_set_list_input(inputs)
else:
raise ValueError(
'Cannot format input {}. Expecting one of str, dict or s3_input'.format(inputs))
channels = [_Job._convert_input_to_channel(name, input) for name, input in input_dict.items()]
return channels
@staticmethod
def _convert_input_to_channel(channel_name, channel_s3_input):
channel_config = channel_s3_input.config.copy()
channel_config['ChannelName'] = channel_name
return channel_config
@staticmethod
def _format_string_uri_input(uri_input, validate_uri=True):
if isinstance(uri_input, str) and validate_uri and uri_input.startswith('s3://'):
return s3_input(uri_input)
elif isinstance(uri_input, str) and validate_uri and uri_input.startswith('file://'):
return file_input(uri_input)
elif isinstance(uri_input, str) and validate_uri:
raise ValueError('Training input data must be a valid S3 or FILE URI: must start with "s3://" or '
'"file://"')
elif isinstance(uri_input, str):
return s3_input(uri_input)
elif isinstance(uri_input, s3_input):
return uri_input
elif isinstance(uri_input, file_input):
return uri_input
else:
raise ValueError('Cannot format input {}. Expecting one of str, s3_input, or file_input'.format(uri_input))
@staticmethod
def _prepare_model_channel(input_config, model_uri=None, model_channel_name=None, validate_uri=True):
if not model_uri:
return
elif not model_channel_name:
raise ValueError('Expected a pre-trained model channel name if a model URL is specified.')
if input_config:
for channel in input_config:
if channel['ChannelName'] == model_channel_name:
raise ValueError('Duplicate channels not allowed.')
model_input = _Job._format_model_uri_input(model_uri, validate_uri)
model_channel = _Job._convert_input_to_channel(model_channel_name, model_input)
return model_channel
@staticmethod
def _format_model_uri_input(model_uri, validate_uri=True):
if isinstance(model_uri, string_types)and validate_uri and model_uri.startswith('s3://'):
return s3_input(model_uri, input_mode='File', distribution='FullyReplicated',
content_type='application/x-sagemaker-model')
elif isinstance(model_uri, string_types) and validate_uri and model_uri.startswith('file://'):
return file_input(model_uri)
elif isinstance(model_uri, string_types) and validate_uri:
raise ValueError('Model URI must be a valid S3 or FILE URI: must start with "s3://" or '
'"file://')
elif isinstance(model_uri, string_types):
return s3_input(model_uri, input_mode='File', distribution='FullyReplicated',
content_type='application/x-sagemaker-model')
else:
raise ValueError('Cannot format model URI {}. Expecting str'.format(model_uri))
@staticmethod
def _format_record_set_list_input(inputs):
# Deferred import due to circular dependency
from sagemaker.amazon.amazon_estimator import RecordSet
input_dict = {}
for record in inputs:
if not isinstance(record, RecordSet):
raise ValueError('List compatible only with RecordSets.')
if record.channel in input_dict:
raise ValueError('Duplicate channels not allowed.')
input_dict[record.channel] = record.records_s3_input()
return input_dict
@staticmethod
def _prepare_output_config(s3_path, kms_key_id):
config = {'S3OutputPath': s3_path}
if kms_key_id is not None:
config['KmsKeyId'] = kms_key_id
return config
@staticmethod
def _prepare_resource_config(instance_count, instance_type, volume_size, train_volume_kms_key):
resource_config = {'InstanceCount': instance_count,
'InstanceType': instance_type,
'VolumeSizeInGB': volume_size}
if train_volume_kms_key is not None:
resource_config['VolumeKmsKeyId'] = train_volume_kms_key
return resource_config
@staticmethod
def _prepare_stop_condition(max_run):
return {'MaxRuntimeInSeconds': max_run}
@property
def name(self):
return self.job_name
| ggiallo28/aws-deepracer-local | source/sagemaker-python-sdk/src/sagemaker/job.py | job.py | py | 8,097 | python | en | code | 2 | github-code | 50 |
70382594396 | from click.testing import CliRunner
from ocxtools.cli import cli
from ocxtools import __version__
def test_cli_version():
runner = CliRunner()
result = runner.invoke(cli,['version'])
assert result.exit_code == 0
assert __version__ in result.output
def test_cli_validate_info():
runner = CliRunner()
result = runner.invoke(cli,['validate','info'])
assert result.exit_code == 0
def test_cli_validate_one_model(shared_datadir):
runner = CliRunner()
model = shared_datadir / "m1.3Docx"
result = runner.invoke(cli,['validate','one-model', str(model.resolve())])
assert result.exit_code == 0
| OCXStandard/ocxtools | tests/test_cli.py | test_cli.py | py | 636 | python | en | code | 0 | github-code | 50 |
39676546346 | from setuptools import setup, find_packages
version = '0.3.dev0'
setup(name='webtest-casperjs',
version=version,
description="Use casperjs with WebTest",
long_description=open('README.rst').read(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Paste",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Server",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
],
keywords='wsgi test unit tests web casperjs',
author='Gael Pasgrimaud',
author_email='gael@gawel.org',
url='https://github.com/gawel/webtest-casperjs',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
'WebTest',
],
entry_points="""
# -*- Entry points: -*-
""",
)
| gawel/webtest-casperjs | setup.py | setup.py | py | 1,268 | python | en | code | 6 | github-code | 50 |
4757653620 |
"""
A simple sample module with command authorization testing
and some useful functions
"""
import sys
import os
import time
#import yaml
import json
from sharedVars import authorizedPath
def isAuth(username):
with open(authorizedPath) as file:
auths = json.load(file)
if username not in auths: return False
else: return auths[username]
def authorized(username): #still here for legacy support
ops = open("/home/skype/oplist.txt", "r").readlines()
l = list()
for name in ops:
l.append(name.strip())
return username in l
def import_users(path):
f = open(path)
lines = f.readlines()
f.close()
lines = "".join(lines)
#return yaml.safe_load(lines)
| fmorisan/amaurascripts | authorized.py | authorized.py | py | 669 | python | en | code | 0 | github-code | 50 |
16191703063 | import sys
import pdb
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
BGBLUE = '\33[44m'
def criarMatriz(nome):
m=[]
try:
with open(nome,"r") as arq:
for linha in arq:
if linha[-1]=="\n":
linha=linha[:-1]
m.append(linha)
return m
except:
print("nao foi possivel abrir o arquivo %s"%nome)
sys.exit(-1)
def converter(texto,cor,cor2=""):
return cor + texto + bcolors.ENDC
def usage():
print("Uso: python3 mdiff.py <arq1> <arq2> <arq3> ....")
def inserir(string,posicaoInicial,posFinal,texto1,texto2):
return string[:posicaoInicial]+texto1+string[posicaoInicial:posFinal]+texto2+string[posFinal:]
def linhaIgual(listaLinhas):
igual=True
for i in range(len(listaLinhas)-1):
if listaLinhas[i]!=listaLinhas[i+1]:
igual=False
break
return igual
def addInList(lista,caractere):
for i in range(len(lista)):
lista[i]=lista[i]+caractere
return lista
def confLinhas(linhas):
INICIO=bcolors.WARNING
FIM=bcolors.ENDC
novasLinhas=[""]*len(linhas)
indiceNovasInicio=0
colorido=False
tamanho=min([len(k) for k in linhas])
for caractereIndice in range(tamanho):
if (not linhaIgual([x[caractereIndice] for x in linhas]) and colorido==False):
novasLinhas=addInList(novasLinhas,INICIO)
colorido=True
if linhaIgual([x[caractereIndice] for x in linhas]) and colorido==True:
novasLinhas=addInList(novasLinhas,FIM)
colorido=False
for i in range(len(linhas)):
novasLinhas[i]+=linhas[i][caractereIndice]
if colorido==True:
novasLinhas=addInList(novasLinhas,FIM)
return novasLinhas
def checar(lista):
planilhas=[]
inicio="nomes:"
for i in lista:
inicio+="%s|"%i
print(inicio)
for i in lista:
planilhas.append(criarMatriz(i))
#planilhas=[["119D8000262000080007300001E88202"],["119D6240702000080007300004EC4101"]]
#achamos o arquivo com mais linhas
tamanho=[len(i) for i in planilhas]
#maiorIndice = tamanho.find(max(tamanho))
#maior=planilhas[maiorIndice]
for linhaIndice in range(max(tamanho)):
linhas=[i[linhaIndice] for i in planilhas]
igual=linhaIgual(linhas)
strIndice="%03d"%linhaIndice
print(strIndice,end="")
if igual:
y=converter("==|",bcolors.OKGREEN)
print(y,end="")
x="|".join(linhas)
print(x,end="")
else:
y=converter("!=|",bcolors.FAIL)
print(y,end="")
linhas=confLinhas(linhas)
x="|".join(linhas)
print(x,end="")
print("")
return 0
def main():
args=sys.argv[1:]
if args==[]:
usage()
else:
checar(args)
main()
| es0j/mdiff | mdiff.py | mdiff.py | py | 3,073 | python | pt | code | 0 | github-code | 50 |
3155330834 | # 두 개의 정수를 입력받아 작은 수부터 큰 수까지 모든 정수의 합을 구하여 출력하는 프로그램을 작성하시오.
# f = open("io_441.txt", "r") # VS Code에서 열려있는 편집기 위치에 따라 상대경로가 결정된다
# a, b = map(int, f.readline().split())
a, b = map(int, input().split())
if(a > b):
tmp = a
a = b
b = tmp
sum = 0
for i in range(a, b+1):
sum += i
print(sum)
# f.close() | refresh6724/APS | Jungol/Lv1_LCoder_Python/pyi0_파일입출력/Main_JO_441_파일입출력_자가진단1.py | Main_JO_441_파일입출력_자가진단1.py | py | 458 | python | ko | code | 0 | github-code | 50 |
1683989676 | import unittest
import parl
from parl.remote.monitor import ClusterMonitor
import time
import threading
from parl.remote import exceptions
from parl.utils.test_utils import XparlTestCase
import os
import signal
@parl.remote_class
class Actor(object):
def __init__(self, arg1=None, arg2=None):
self.arg1 = arg1
self.arg2 = arg2
def get_arg1(self):
return self.arg1
def get_arg2(self):
return self.arg2
def set_arg1(self, value):
self.arg1 = value
def set_arg2(self, value):
self.arg2 = value
def add_one(self, value):
value += 1
return value
def add(self, x, y):
time.sleep(3)
return x + y
def will_raise_exception_func(self):
x = 1 / 0
class TestClusterMonitor(XparlTestCase):
def remove_ten_workers(self):
for i, proc in enumerate(self.worker_process):
if i == 10: break
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
def test_twenty_worker(self):
self.add_master()
cluster_monitor = ClusterMonitor('localhost:{}'.format(self.port))
for _ in range(20):
self.add_worker(n_cpu=1)
check_flag = False
for _ in range(10):
if 20 == len(cluster_monitor.data['workers']):
check_flag = True
break
time.sleep(10)
self.assertTrue(check_flag)
self.remove_ten_workers()
check_flag = False
for _ in range(10):
if 10 == len(cluster_monitor.data['workers']):
check_flag = True
break
time.sleep(10)
self.assertTrue(check_flag)
self.remove_all_workers()
# check if the number of workers drops to 0
check_flag = False
for _ in range(10):
if 0 == len(cluster_monitor.data['workers']):
check_flag = True
break
time.sleep(10)
self.assertTrue(check_flag)
if __name__ == '__main__':
unittest.main(failfast=True)
| PaddlePaddle/PARL | parl/remote/tests/cluster_monitor_3_test.py | cluster_monitor_3_test.py | py | 2,074 | python | en | code | 3,097 | github-code | 50 |
32636496240 | from django.conf.urls import url, include
from .views.metadata import resource_metadata_json, geographic_feature_metadata_json, geographic_raster_metadata_json, \
time_series_metadata_json, file_set_metadata_json, multidimensional_metadata_json, \
referenced_time_series_metadata_json, single_file_metadata_json, model_instance_metadata_json, \
model_program_metadata_json
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from rest_framework import permissions
app_name = "hsapi2"
hsapi2_urlpatterns = [
url('^hsapi2/', include('hs_rest_api2.urls', namespace='hsapi2')),
]
schema_view_yasg = get_schema_view(
openapi.Info(
title="Hydroshare API",
default_version='v2',
description="Hydroshare Rest API",
terms_of_service="https://help.hydroshare.org/about-hydroshare/policies/terms-of-use/",
contact=openapi.Contact(email="help@cuahsi.org"),
),
validators=[],
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
# Swagger Docs View
url(r'^(?P<format>\.json|\.yaml)$', schema_view_yasg.without_ui(cache_timeout=None),
name='schema-json'),
url(r'^$', schema_view_yasg.with_ui('swagger', cache_timeout=None),
name='schema-swagger-ui'),
url(r'^redoc/$', schema_view_yasg.with_ui('redoc', cache_timeout=None), name='schema-redoc'),
url(r'^resource/(?P<pk>[0-9a-f-]+)/json/$',
resource_metadata_json,
name='resource_metadata_json'),
url(r'^resource/(?P<pk>[0-9a-f-]+)/json/GeographicFeature/(?P<aggregation_path>.*)$',
geographic_feature_metadata_json,
name='geographic_feature_metadata_json'),
url(r'^resource/(?P<pk>[0-9a-f-]+)/json/GeographicRaster/(?P<aggregation_path>.*)$',
geographic_raster_metadata_json,
name='geographic_raster_metadata_json'),
url(r'^resource/(?P<pk>[0-9a-f-]+)/json/TimeSeries/(?P<aggregation_path>.*)$',
time_series_metadata_json,
name='time_series_metadata_json'),
url(r'^resource/(?P<pk>[0-9a-f-]+)/json/FileSet/(?P<aggregation_path>.*)$',
file_set_metadata_json,
name='file_set_metadata_json'),
url(r'^resource/(?P<pk>[0-9a-f-]+)/json/Multidimensional/(?P<aggregation_path>.*)$',
multidimensional_metadata_json,
name='multidimensional_metadata_json'),
url(r'^resource/(?P<pk>[0-9a-f-]+)/json/ReferencedTimeSeries/(?P<aggregation_path>.*)$',
referenced_time_series_metadata_json,
name='referenced_time_series_metadata_json'),
url(r'^resource/(?P<pk>[0-9a-f-]+)/json/SingleFile/(?P<aggregation_path>.*)$',
single_file_metadata_json,
name='single_file_metadata_json'),
url(r'^resource/(?P<pk>[0-9a-f-]+)/json/ModelInstance/(?P<aggregation_path>.*)$',
model_instance_metadata_json,
name='model_instance_metadata_json'),
url(r'^resource/(?P<pk>[0-9a-f-]+)/json/ModelProgram/(?P<aggregation_path>.*)$',
model_program_metadata_json,
name='model_program_metadata_json'),
]
| hydroshare/hydroshare | hs_rest_api2/urls.py | urls.py | py | 3,060 | python | en | code | 171 | github-code | 50 |
36683165416 | import logging
from telegram import Update
from telegram.error import BadRequest, TelegramError
from telegram.ext import CallbackContext
from bot.conversation import Status
from bot.markups import InlineKeyboard, Keyboard
from database.models import Channel
from utilities import d
logger = logging.getLogger('handler')
@d.restricted
@d.failwithmessage
@d.logconversation()
@d.pass_channel
def channelconfig_on_linkkeep_callbackquery(update, context: CallbackContext, channel: Channel):
logger.info('linkkeep inline button')
update.callback_query.edit_message_text('Fine, we will keep the current invite link')
return Status.WAITING_CHANNEL_CONFIG_ACTION
@d.restricted
@d.failwithmessage
@d.logconversation()
@d.pass_channel
def channelconfig_on_linkrevoke_callbackquery(update, context: CallbackContext, channel: Channel):
logger.info('linkrevoke inline button')
try:
invite_link = context.bot.export_chat_invite_link(channel.channel_id)
except (TelegramError, BadRequest) as e:
logger.error('error while exporting invite link: %s', e.message)
update.callback_query.edit_message_text('Error while exporting invite link: {}'.format(e.message))
return Status.WAITING_CHANNEL_CONFIG_ACTION
channel.invite_link = invite_link
channel.save()
update.callback_query.edit_message_text(
'New invite link saved: {}'.format(invite_link),
reply_markup=InlineKeyboard.REMOVE,
disable_web_page_preview=True
)
return Status.WAITING_CHANNEL_CONFIG_ACTION
@d.restricted
@d.failwithmessage
@d.logconversation()
@d.pass_channel
def channelconfig_on_exportlink_command(update: Update, context: CallbackContext, channel: Channel):
logger.info('/exportlink command')
if channel.invite_link:
inline_markup = InlineKeyboard.row_from_list([('yes, revoke and generate a new one', 'linkrevoke'), ('no', 'linkkeep')])
update.message.reply_text('This channel already has an invite link saved: {}'.format(channel.invite_link),
disable_web_page_preview=True, reply_markup=Keyboard.REMOVE)
update.message.reply_text('Do you want to generate a new one?', reply_markup=inline_markup)
return Status.WAITING_CHANNEL_CONFIG_ACTION
try:
# first: try to revoke the current invite link
invite_link = context.bot.export_chat_invite_link(channel.channel_id)
except (TelegramError, BadRequest) as e:
logger.error('error while exporting invite link: %s', e.message)
# maybe the channel is public and the bot doesn't have the permission to generete an invite link, so we try to get the chat
channel_object = context.bot.get_chat(channel.channel_id)
if channel_object.username:
invite_link = 'https://t.me/{}'.format(channel_object.username)
else:
update.message.reply_text('Error while exporting invite link (the channel is not public): {}'.format(e.message))
return Status.WAITING_CHANNEL_CONFIG_ACTION
channel.invite_link = invite_link
channel.save()
update.message.reply_text('Invite link saved: {}'.format(invite_link), reply_markup=Keyboard.REMOVE,
disable_web_page_preview=True)
return Status.WAITING_CHANNEL_CONFIG_ACTION
| zeroone2numeral2/reddit-test | bot/plugins/channel_config/channel_configuration/export_invite_link.py | export_invite_link.py | py | 3,325 | python | en | code | 2 | github-code | 50 |
36860839918 | from dataclasses import dataclass
from pathlib import Path
import pytest
from bluemira.base.constants import raw_uc
from bluemira.base.error import ReactorConfigError
from bluemira.base.logs import get_log_level, set_log_level
from bluemira.base.parameter_frame import (
EmptyFrame,
Parameter,
ParameterFrame,
make_parameter_frame,
)
from bluemira.base.reactor_config import ReactorConfig
@dataclass
class TestGlobalParams(ParameterFrame):
__test__ = False
only_global: Parameter[int]
height: Parameter[float]
age: Parameter[int]
extra_global: Parameter[int]
@dataclass
class TestCompADesignerParams(ParameterFrame):
__test__ = False
only_global: Parameter[int]
height: Parameter[float]
age: Parameter[int]
name: Parameter[str]
location: Parameter[str]
test_config_path = Path(__file__).parent / "data" / "reactor_config.test.json"
empty_config_path = Path(__file__).parent / "data" / "reactor_config.empty.json"
nested_config_path = Path(__file__).parent / "data" / "reactor_config.nested_config.json"
nested_params_config_path = (
Path(__file__).parent / "data" / "reactor_config.nested_params.json"
)
nesting_config_path = Path(__file__).parent / "data" / "reactor_config.nesting.json"
class TestReactorConfigClass:
"""
Tests for the Reactor Config class functionality.
"""
def setup_method(self):
self.old_log_level = get_log_level()
set_log_level("DEBUG")
def teardown_method(self):
set_log_level(self.old_log_level)
def test_file_loading_with_empty_config(self, caplog):
reactor_config = ReactorConfig(empty_config_path, EmptyFrame)
# want to know explicitly if it is an EmptyFrame
assert type(reactor_config.global_params) is EmptyFrame
p_dne = reactor_config.params_for("dne")
c_dne = reactor_config.config_for("dne")
assert len(caplog.records) == 2
for record in caplog.records:
assert record.levelname == "DEBUG"
assert len(p_dne.local_params) == 0
assert len(c_dne) == 0
def test_incorrect_global_config_type_empty_config(self):
with pytest.raises(ValueError): # noqa: PT011
ReactorConfig(empty_config_path, TestGlobalParams)
def test_incorrect_global_config_type_non_empty_config(self):
with pytest.raises(ValueError): # noqa: PT011
ReactorConfig(test_config_path, EmptyFrame)
def test_throw_on_too_specific_arg(self):
reactor_config = ReactorConfig(test_config_path, TestGlobalParams)
with pytest.raises(ReactorConfigError):
reactor_config.config_for("comp A", "config_a", "a_value")
def test_set_global_params(self, caplog):
reactor_config = ReactorConfig(test_config_path, TestGlobalParams)
cp = reactor_config.params_for("comp A", "designer")
assert len(caplog.records) == 1
for record in caplog.records:
assert record.levelname == "DEBUG"
cpf = make_parameter_frame(cp, TestCompADesignerParams)
# instance checks
assert cpf.only_global is reactor_config.global_params.only_global
assert cpf.height is reactor_config.global_params.height
assert cpf.age is reactor_config.global_params.age
self._compa_designer_param_value_checks(cpf)
cpf.only_global.value = raw_uc(2, "years", "s")
assert cpf.only_global.value == raw_uc(2, "years", "s")
assert reactor_config.global_params.only_global.value == raw_uc(2, "years", "s")
assert cpf.only_global is reactor_config.global_params.only_global
def _compa_designer_param_value_checks(self, cpf):
assert cpf.only_global.value == raw_uc(1, "years", "s")
assert cpf.height.value == 1.8
assert cpf.age.value == raw_uc(30, "years", "s")
assert cpf.name.value == "Comp A"
assert cpf.location.value == "here"
def test_params_for_warnings_make_param_frame_type_value_overrides(self, caplog):
reactor_config = ReactorConfig(
test_config_path,
TestGlobalParams,
)
cp = reactor_config.params_for("comp A", "designer")
assert len(caplog.records) == 1
for record in caplog.records:
assert record.levelname == "DEBUG"
cpf = make_parameter_frame(cp, TestCompADesignerParams)
self._compa_designer_param_value_checks(cpf)
# instance checks
assert cpf.only_global is reactor_config.global_params.only_global
assert cpf.height is reactor_config.global_params.height
assert cpf.age is reactor_config.global_params.age
def test_config_for_warnings_value_overrides(self, caplog):
reactor_config = ReactorConfig(
test_config_path,
TestGlobalParams,
)
cf_comp_a = reactor_config.config_for("comp A")
cf_comp_a_des = reactor_config.config_for("comp A", "designer")
assert len(caplog.records) == 1
for record in caplog.records:
assert record.levelname == "DEBUG"
assert cf_comp_a["config_a"] == cf_comp_a_des["config_a"]
assert cf_comp_a["config_b"] == cf_comp_a_des["config_b"]
assert cf_comp_a_des["config_c"]["c_value"] == "c_value"
def test_no_params_warning(self, caplog):
reactor_config = ReactorConfig(
{
"comp A": {
"designer": {},
},
},
EmptyFrame,
)
cp = reactor_config.params_for("comp A", "designer")
cp_dne = reactor_config.params_for("comp A", "designer", "dne")
assert len(caplog.records) == 2
for record in caplog.records:
assert record.levelname == "DEBUG"
assert len(cp.local_params) == 0
assert len(cp_dne.local_params) == 0
def test_no_config_warning(self, caplog):
reactor_config = ReactorConfig(
{
"comp A": {
"designer": {},
},
},
EmptyFrame,
)
cf_comp_a = reactor_config.config_for("comp A")
cf_comp_a_des = reactor_config.config_for("comp A", "designer")
cf_comp_a_des_dne = reactor_config.config_for("comp A", "designer", "dne")
assert len(caplog.records) == 2
for record in caplog.records:
assert record.levelname == "DEBUG"
assert len(cf_comp_a) == 1
assert len(cf_comp_a_des) == 0
assert len(cf_comp_a_des_dne) == 0
def test_invalid_rc_initialization(self):
with pytest.raises(ReactorConfigError):
ReactorConfig(
["wrong"],
EmptyFrame,
)
def test_args_arent_str(self):
reactor_config = ReactorConfig(
{
"comp A": {
"designer": {},
},
},
EmptyFrame,
)
with pytest.raises(ReactorConfigError):
reactor_config.config_for("comp A", 1)
def test_file_path_loading_in_json_nested_params(self):
out_dict = {
"height": {"value": 1.8, "unit": "m"},
"age": {"value": 946728000, "unit": "s"},
"only_global": {"value": 31557600, "unit": "s"},
"extra_global": {"value": 1, "unit": "s"},
}
reactor_config = ReactorConfig(nested_params_config_path, EmptyFrame)
pf = make_parameter_frame(reactor_config.params_for("Tester"), TestGlobalParams)
assert pf == TestGlobalParams.from_dict(out_dict)
def test_file_path_loading_in_json_nested_config(self):
reactor_config = ReactorConfig(nested_config_path, EmptyFrame)
pf = make_parameter_frame(
reactor_config.params_for("Tester", "comp A", "designer"),
TestCompADesignerParams,
)
self._compa_designer_param_value_checks(pf)
compa_designer_config = reactor_config.config_for("Tester", "comp A", "designer")
assert compa_designer_config["config_a"] == {"a_value": "overridden_value"}
assert compa_designer_config["config_b"] == {"b_value": "b_value"}
assert compa_designer_config["config_c"] == {"c_value": "c_value"}
def test_deeply_nested_files(self):
reactor_config = ReactorConfig(nesting_config_path, EmptyFrame)
assert reactor_config.config_for("nest_a")["a_val"] == "nest_a"
assert reactor_config.config_for("nest_b")["a_val"] == "nest_b"
assert reactor_config.params_for("nest_a").local_params["a_param"] == "nest_a"
assert reactor_config.params_for("nest_b").local_params["a_param"] == "nest_b"
| Fusion-Power-Plant-Framework/bluemira | tests/base/reactor_config/test_reactor_config.py | test_reactor_config.py | py | 8,715 | python | en | code | 31 | github-code | 50 |
24719142862 | #-*-coding:utf-8 -*-
#代码详解
#classify函数的参数:
#inX:用于分类的输入向量
#dataSet:训练样本集合
#labels:标签向量
#k:K-近邻算法中的k
#shape:是array的属性,描述一个多维数组的维度
#tile(inX, (dataSetSize,1)):把inX二维数组化,dataSetSize表示生成数组后的行数,1表示列的倍数。整个这一行代码表示前一个二维
# 数组矩阵的每一个元素减去后一个数组对应的元素值,这样就实现了矩阵之间的减法,简单方便得不让你佩服不行!
#axis=1:参数等于1的时候,表示矩阵中行之间的数的求和,等于0的时候表示列之间数的求和。
#argsort():对一个数组进行非降序排序
#classCount.get(numOflabel,0) + 1:这一行代码不得不说的确很精美啊。get():该方法是访问字典项的方法,即访问下标键为numOflabel
# 的项,如果没有这一项,那么初始值为0。然后把这一项的值加1。所以Python中实现这样的操作就只需要一行代码,实在是很简洁高效。
from sklearn import datasets #导入内置数据集模块
from sklearn.neighbors import KNeighborsClassifier #导入sklearn.neighbours模块中的KNN类
import numpy as np
import operator
from os import listdir #可以给出给定目录文件名
def createDataSet(): #建立数据集
group=np.array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])
labels=['A','A','B','B']
return group,labels
#在这个数据集里,group代表了其具体坐标位置,labels代表了其标签(属性)
# 下面基于该数据集建立了一个基本分类器,该分类器将通过坐标,对其可能的属性是‘A’还是‘B’进行预测
#该函数为简单的knn分类器
def classify0(inX, dataSet, labels, k):
#①距离计算:已知类别数据集与当前点的距离(欧氏距离公式)
dataSetSize = dataSet.shape[0] #读取数据集的行数,并把行数放到dataSetSize里,shape[]用来读取矩阵的行列数,shape[1]读取列数
diffMat = np.tile(inX,(dataSetSize,1))-dataSet #tile(inX,(dataSetSize,1))复制比较向量inX,tile的功能是告诉inX需要复制多少遍,
#这里复制(dataSetSize行,1列),目的是把inX转化成与数据集相同大小,再与数据集矩阵相减,形成的插值矩阵放到diffMat里
sqDiffMat = diffMat**2 # 注意这里是把矩阵中的每一个元素进行乘方
distances = sqDiffMat**0.5 #开根号
#按照距离递增次序排序
sortedDistIndicies = distances.argsort() #使用argsort进行排序,返回从小到大的顺序值,注意是顺序值!!
#如[2,4,1]返回[1,2,0],依次为其顺序的索引
classCount = {} #新建一个字典,用于计数
#②选取与当前点距离最小的k个点
for i in range(k): #按照顺序对标签进行计数
print('第 {} 次',i)
'''下面这一行也可以这样写 voteIlabel = labels[sortedDistIndicies[i][0]] '''
voteIlabel = np.array(labels)[sortedDistIndicies[i][0]]
''' ---> 看这是原来的写法 labels[sortedDistIndicies[i]] 这里sortedDistIndicies上面进行argsort()排序后得到的是一个二维数组
数组通过下标取值 例如 : a = [1,2,3] ; a[0] --> 1 ; a[1] --> 2 ;a[2] --> 3 这样取值
而上面的 sortedDistIndicies 得到的是一个二维数组 这样的 a = [[1,2],[2,3]] 这样 通过下标取值就会得到这样的
a[0] --> [1,2] ; a[1] --> [2,3]
'''
#voteIlabel = labels[sortedDistIndicies[i]] #按照之前的排序值,对标签依次进行计数
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1 #对字典进行抓取,此时字典是空的
print('ilabel_',classCount[voteIlabel])
print(classCount)
#所以没有标签,现在将一个标签作为key,value就是label出现的次数,因为从数组0开始,但计数从1开始,故需加1
#③排序
sortedClassCount = sorted(classCount.items(),key=operator.itemgetter(1),reverse=(True))
#返回一个列表按照第二个元素进行降序排列
return sortedClassCount[0][0] #返回出现次数最多到label值,即为当前的预测分类
if __name__ == '__main__':
group, labels = createDataSet()
print(group,labels)
classify_ = classify0([0, 0], group, labels, 3)
print(classify_)
| scales123/machine-learning | book/ch02/kNNpw.py | kNNpw.py | py | 4,448 | python | zh | code | 1 | github-code | 50 |
16185666697 | import os
import boto3
import csv
def get_boto3_client():
#region = self.region
# Dummy Credentials
return boto3.client("ec2",
aws_access_key_id="Value",
aws_secret_access_key="Value",
region_name="Value")
sgfilter=set()
sgrfilter=set()
sg_filter_list=[]
sgr_filter_list=[]
list1=[]
list2=[]
instancefiltered={}
#function to filter out the instances having faulty group rules attached and appended to a dict
def get_running_instances():
ec2_client = get_boto3_client()
instances=""
paginator = ec2_client.get_paginator('describe_instances')
page_iterator = paginator.paginate()
for pg in page_iterator:
reservationlist=pg["Reservations"]
for reservation in reservationlist:
instance=reservation['Instances'][0]
instanceid=instance["InstanceId"]
securitygrouplist=instance['SecurityGroups']
for securityGroup in instance['SecurityGroups']:
sgnew=securityGroup['GroupId']
#checking instances sg in filtered sg list having faulty details
if sgnew in sgfilter:
if sgnew not in instancefiltered:
instancefiltered[sgnew]=[]
instancefiltered[sgnew].append(instanceid)
#function to get the SG Name when passed group id as paramater
def get_sg_name(groupid):
ec2_client = get_boto3_client()
response = ec2_client.describe_security_groups(GroupIds=[str(groupid)])
sg= response["SecurityGroups"]
for n in sg:
name=n["GroupName"]
print("GroupName:{}".format(name))
return name
#function to revoke the sg rule out from sg if having faulty configuration
def revoke_sg_rule(groupid, ruleid):
ec2_client = get_boto3_client()
response = ec2_client.revoke_security_group_ingress(DryRun=True,
GroupId=str(groupid),
SecurityGroupRuleIds=[
str(ruleid),
]
)
if response["Return"]==True:
print("Success Removing")
#revoke_sg_rule("sg-002a0a96d45156279","sgr-0bc1c8625216c588a")
#stores the faulty sg rule detailed information in a csv file
def write_to_csv():
with open('/Users/priyasharma/Documents/PythonCode/PythonBasic_AWS/ep3/SGRDetails4.csv', 'w') as f:
writer = csv.DictWriter(f, fieldnames=['Groupname ', 'GroupId ', 'RuleId ', 'IP ', 'PortNumber ', 'Instance IDs '])
writer.writeheader()
writer.writerows(list1)
#filter out the sg, sg rules having faulty configuration and appending it to set for later use
def get_security_group_rules():
client = get_boto3_client()
paginator = client.get_paginator('describe_security_group_rules')
page_iterator = paginator.paginate()
for pg in page_iterator:
for ri in pg["SecurityGroupRules"]:
ruleid=ri["SecurityGroupRuleId"]
groupid=ri["GroupId"]
if "CidrIpv4" in ri:
ip=ri["CidrIpv4"]
fromport=ri["FromPort"]
outboundcheck=ri["IsEgress"]
if(ip=='0.0.0.0/0' and fromport!=443 and fromport!=None and not(outboundcheck)) :
sgrfilter.add(ruleid)
sgfilter.add(groupid)
print("\n")
groupname=get_sg_name(ri["GroupId"])
print("GroupId:{}".format(ri["GroupId"]))
print("GroupruleId:{}".format(ruleid))
print("IP:{}".format(ip))
print("PortNumber:{}".format(ri["FromPort"]))
portno=ri["FromPort"]
#appending the whole details in a list
list1.append({'Groupname ': groupname, 'GroupId ': groupid, 'RuleId ':ruleid,'IP ':ip,'PortNumber ': portno, 'Instance IDs ':''})
#calling to remove the faulty group rule from the group
#revoke_sg_rule(ri["GroupId"], ruleid)
get_security_group_rules()
get_running_instances()
def addinstances():
for l in list1:
ins=""
if l['GroupId '] in instancefiltered:
for i in instancefiltered[l['GroupId ']]:
ins+=i+" "
l["Instance IDs "]=ins
addinstances()
write_to_csv()
| priya-sharmaa/Tasks | Python/get_faultysg.py | get_faultysg.py | py | 4,401 | python | en | code | 0 | github-code | 50 |
23793199245 | from collections import deque
cases = int(input())
dx = [-2, -1, 1, 2, -2, -1, 1, 2]
dy = [-1, -2, -2, -1, 1, 2, 2, 1]
for i in range(cases):
l = int(input())
a, b = map(int, input().split())
c, d = map(int, input().split())
q = deque()
check = [[-1] * l for i in range(l)]
def bfs(x, y):
q.append((x, y))
check[x][y] = 0
while q:
t, k = q.popleft()
for i in range(8):
nx = t + dx[i]
ny = k + dy[i]
if 0 <= nx < l and 0 <= ny < l:
if check[nx][ny] == -1:
q.append((nx, ny))
check[nx][ny] = check[t][k] + 1
if nx == c and ny == d:
break
if nx == c and ny == d:
break
bfs(a, b)
print(check[c][d])
| innjuun/Algorithm | baekjoon/Graph/7562.py | 7562.py | py | 877 | python | en | code | 2 | github-code | 50 |
5651658902 | from typing import List, Union
import tensorflow as tf
class FeatureSpec:
def __init__(self, layer_name: str, working_stride: int = None, width: int = -1):
self.layer_name = layer_name
self.working_stride = working_stride
self.width = width
@classmethod
def from_last_tensor(cls, model: Union[tf.keras.Model, tf.keras.layers.Layer]):
input_tensor = tf.zeros([1, 960, 960, 3], dtype=tf.float32)
output_tensor = model(input_tensor)
output_shape = tf.keras.backend.int_shape(output_tensor)
working_stride = 960 // output_shape[1]
result = cls(layer_name=model.layers[-1].name,
working_stride=working_stride,
width=output_shape[-1])
print(f" [Verres] - automatically deduced feature layer: {result.layer_name}")
return result
@classmethod
def from_layer_names(cls,
model: Union[tf.keras.Model, tf.keras.layers.Layer],
layer_names: List[str]):
specs: List[FeatureSpec] = []
input_tensor = tf.zeros((1, 960, 960, 3), dtype=tf.float32)
for layer_name in layer_names:
inputs = model.inputs
output = model.get_layer(layer_name).output
fake_model = tf.keras.Model(inputs, output)
output_tensor = fake_model(input_tensor)
output_shape = tf.keras.backend.int_shape(output_tensor)
assert 960 % output_shape[1] == 0
specs.append(
cls(layer_name,
working_stride=960 // output_shape[1],
width=output_shape[-1]))
return specs
class VRSBackbone(tf.keras.layers.Layer):
def __init__(self, feature_specs: List[FeatureSpec]):
super().__init__()
self.feature_specs: List[FeatureSpec] = feature_specs or None
self.single_feature_mode = len(feature_specs) == 1
def preprocess_input(self, inputs):
raise NotImplementedError
| csxeba/Verres | verres/architecture/backbone/base.py | base.py | py | 2,020 | python | en | code | 0 | github-code | 50 |
36213413446 | from art import logo
from replit import clear
def calc(first_num):
operator = input("+\n-\n*\n/\nPick an operation: ").lower()
second_num = float(input("What's the next number?: "))
if operator == '+':
result = first_num + second_num
elif operator == '-':
result = first_num - second_num
elif operator == "*":
result = first_num * second_num
elif operator == "/":
result = first_num / second_num
print(f"{first_num} {operator} {second_num} = {result}")
return result
def main():
print(logo)
starting_number = float(input("What's the first number?: "))
calc_result = calc(starting_number)
continue_cal = "y"
while True:
continue_cal = input(f"Type 'y' to continue calculating with {calc_result}, or type 'n' to start a new calculation: ").lower()
if continue_cal == "y":
calc_result = calc(calc_result)
else:
# 새로운 연산을 시작하고 싶다면 화면을 초기화하고 다시 main 함수를 처음부터 실행한다
clear()
main()
main() | jman3/100-days-of-python | day10/calculator.py | calculator.py | py | 1,110 | python | en | code | 0 | github-code | 50 |
11264066828 | from requests_html import HTMLSession
session = HTMLSession();
r = session.get('https://www.boannews.com/media/s_list.asp?skind=5')
f = open("security_world_title_content_data.txt", "w", encoding='UTF-8')
print(r.html)
#Get News Title & Content Summary
for line in r.html.find('.news_list'):
print(line.text)
print("")
f.write(line.text)
f.close()
| epicarts/team-crawlcrawl | etc/crawling_test/yeni/security_world_title_content.py | security_world_title_content.py | py | 357 | python | en | code | 7 | github-code | 50 |
19975311840 | import os
import sys
sys.path.append('./utils/midi')
from utils.midi.midi_utils import midiread, midiwrite
from matplotlib import pyplot as plt
import torch
import numpy as np
class PianoGenerationDataset(torch.utils.data.Dataset):
def __init__(self, midi_folder_path, longest_sequence_length=1491):
self.midi_folder_path = midi_folder_path
midi_filenames = os.listdir(midi_folder_path)
self.longest_sequence_length = longest_sequence_length
midi_full_filenames = map(lambda filename: os.path.join(midi_folder_path, filename), midi_filenames)
self.midi_full_filenames = list(midi_full_filenames)
if longest_sequence_length is None:
self.update_the_max_length()
def midi_filename_to_piano_roll(self, midi_filename):
midi_data = midiread(midi_filename, dt=0.3)
piano_roll = midi_data.piano_roll.transpose()
# Pressed notes are replaced by 1
piano_roll[piano_roll > 0] = 1
return piano_roll
def pad_piano_roll(self, piano_roll, max_length=132333, pad_value=0):
original_piano_roll_length = piano_roll.shape[1]
padded_piano_roll = np.zeros((88, max_length))
padded_piano_roll[:] = pad_value
padded_piano_roll[:, -original_piano_roll_length:] = piano_roll
return padded_piano_roll
def update_the_max_length(self):
sequences_lengths = map(lambda filename: self.midi_filename_to_piano_roll(filename).shape[1],self.midi_full_filenames)
max_length = max(sequences_lengths)
self.longest_sequence_length = max_length
def __len__(self):
return len(self.midi_full_filenames)
def __getitem__(self, index):
midi_full_filename = self.midi_full_filenames[index]
piano_roll = self.midi_filename_to_piano_roll(midi_full_filename)
# Shifting by one time step
sequence_length = piano_roll.shape[1] - 1
# Shifting by one time step
input_sequence = piano_roll[:, :-1]
ground_truth_sequence = piano_roll[:, 1:]
# padding sequence so that all of them have the same length
input_sequence_padded = self.pad_piano_roll(input_sequence, max_length=self.longest_sequence_length)
ground_truth_sequence_padded = self.pad_piano_roll(ground_truth_sequence,max_length=self.longest_sequence_length,pad_value=-100)
input_sequence_padded = input_sequence_padded.transpose()
ground_truth_sequence_padded = ground_truth_sequence_padded.transpose()
return (torch.FloatTensor(input_sequence_padded),torch.LongTensor(ground_truth_sequence_padded),torch.LongTensor([sequence_length]) ) | Khamies/Piano-VAE | dataset.py | dataset.py | py | 2,937 | python | en | code | 7 | github-code | 50 |
23832075008 | from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
import matplotlib.patches as patches
from torchvision import transforms
from pycocotools.coco import COCO
import matplotlib.pyplot as plt
import matplotlib.image as im
import torch.utils.data
from PIL import Image
import pandas as pd
import numpy as np
import torchvision
import threading
import socket
import torch
import time
import json
import os
imageTransfering = False
noData = False
ready = True
LISTEN_TO_PC_PORT = 5005
LISTEN_TO_USER_PORT = 5006
ANSWER_TO_PC_PORT = 5007
ANSWER_TO_USER_PORT = 5008
default_min_score = 0
default_min_box_width = 10
default_min_box_height = 10
default_max_box_width = 300
default_max_box_height = 300
default_left_border = 0
default_right_border = 0
default_bottom_border = 0
default_top_border = 0
min_score = 0
min_box_width = 10
min_box_height = 10
max_box_width = 300
max_box_height = 300
left_border = 0
right_border = 0
bottom_border = 0
top_border = 0
def UDP_PC_2_PC():
global ready
TCP_IP = "127.0.0.1"
TCP_PORT = LISTEN_TO_PC_PORT
import socket
# Задаем адрес сервера
SERVER_ADDRESS = (TCP_IP, TCP_PORT)
# Настраиваем сокет
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(SERVER_ADDRESS)
server_socket.listen(1)
print('server is running, please, press ctrl+c to stop')
# Слушаем запросы
while True:
connection, address = server_socket.accept()
print("new connection from {address}".format(address=address))
data = connection.recv(1024)
dataParse = str(data)[2:len(str(data)) - 1]
if dataParse == "stop":
break
elif dataParse == "reboot":
print("Wait a minute")
elif dataParse == "ping":
connection.send(bytes('pong', encoding='UTF-8'))
elif dataParse == "next":
if ready:
connection.send(bytes('yes', encoding='UTF-8'))
ready = False
dataP = connection.recv(1024)
with open('image.jpg', 'wb') as f:
print('file opened')
while dataP:
f.write(dataP)
dataP = connection.recv(1024)
f.close()
# print("Have an image")
data_transforms = transforms.Compose([transforms.ToTensor()])
img = Image.open("image.jpg").convert("RGB")
imgTensor = data_transforms(img)
prediction = m([imgTensor.cuda()])
lent = len(prediction[1][0]['boxes'])
predListBoxes = prediction[1][0]['boxes'].tolist()
predListScores = prediction[1][0]['scores'].tolist()
predListLabels = prediction[1][0]['labels'].tolist()
# print("Have a prediction")
outputDict = {}
for i in range(lent):
# print("score: " + str(predListScores[i]))
# print("box width: " + str(abs(predListBoxes[i][0] - predListBoxes[i][2])))
# print("box height: " + str(abs(predListBoxes[i][1] - predListBoxes[i][3])))
# print("left border: " + str(predListBoxes[i][0]))
# print("right border: " + str(abs(imgTensor.shape[2] - predListBoxes[i][2])))
# print("top border: " + str(predListBoxes[i][1]))
# print("bottom border: " + str(abs(imgTensor.shape[1] - predListBoxes[i][3])))
if min_score <= predListScores[i]\
and min_box_width <= abs(predListBoxes[i][0] - predListBoxes[i][2])\
and min_box_height <= abs(predListBoxes[i][1] - predListBoxes[i][3])\
and max_box_width >= abs(predListBoxes[i][0] - predListBoxes[i][2])\
and max_box_height >= abs(predListBoxes[i][1] - predListBoxes[i][3])\
and left_border <= predListBoxes[i][0]\
and right_border <= abs(imgTensor.shape[2] - predListBoxes[i][2])\
and top_border <= predListBoxes[i][1]\
and bottom_border <= abs(imgTensor.shape[1] - predListBoxes[i][3]):
outputDict[str(i)] = {
"bbox": predListBoxes[i],
"score": predListScores[i],
"label": predListLabels[i]}
# l = json.dumps(outputDict).encode()
# print("Have a file")
# connection.send(l)
with open('last.json', 'w') as f:
json.dump(outputDict, f)
ready = True
# # f = open("last.json", "rb")
# # l = f.read(1024)
# # while(l):
# # connection.send(l)
# # l = f.read(1024)
# # f.close()
else:
connection.send(bytes('no', encoding='UTF-8'))
elif dataParse == "last":
try:
f = open("last.json", "rb")
l = f.read(1024)
while(l):
connection.send(l)
l = f.read(1024)
f.close()
except:
jsonF = {"test":101}
with open("last.json", "w") as write_file:
json.dump(jsonF, write_file)
f = open("last.json", "rb")
l = f.read(1024)
while(l):
connection.send(l)
l = f.read(1024)
f.close()
connection.close()
def UDP_USER_2_PC():
UDP_PORT = LISTEN_TO_USER_PORT
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
sock.bind(('', UDP_PORT))
while True:
data = sock.recv(1024)
global min_score
global min_box_width
global min_box_height
global max_box_width
global max_box_height
global left_border
global right_border
global bottom_border
global top_border
dataParse = str(data)[2:len(str(data)) - 1]
print(dataParse)
if dataParse == "stop":
os.abort()
elif dataParse == "params_list":
stt = "Filtering parametres:* *Minimum score: " + str(min_score)\
+ "*Minimum box width: " + str(min_box_width)\
+ "*Minimum box hieght: " + str(min_box_height)\
+ "*Maximum box width: " + str(max_box_width)\
+ "*Maximum box hieght: " + str(max_box_height)\
+ "*Left border: " + str(left_border)\
+ "*Right border: " + str(right_border)\
+ "*Bottom border: " + str(bottom_border)\
+ "*Top border: " + str(top_border)
ansverUSER(stt)
elif dataParse == "help":
ansverUSER('List of commands:*'\
+'*"close the console" - to close the console*'\
+'*"reboot" - reboot the system'\
+'*"params_list" - show the list of filtering parametres'\
+'*"params_default" - set default parametres'\
+'*"min_score <value>" - set score threshold'\
+'*"min_box_height <value>"'\
+'*"min_box_width <value>"'\
+'*"max_box_height <value>"'\
+'*"max_box_width <value>"'\
+'*"left_border <value>"'\
+'*"right_border <value>"'\
+'*"bottom_border <value>"'\
+'*"top_border <value>"')
elif dataParse == "turn_off":
ansverUSER("Bye!")
elif dataParse == "reboot":
ansverUSER("See u")
elif dataParse == "params_default":
min_score = default_min_score
min_box_width = default_min_box_width
min_box_height = default_min_box_height
max_box_width = default_max_box_width
max_box_height = default_max_box_height
left_border = default_left_border
right_border = default_right_border
bottom_border = default_bottom_border
top_border = default_top_border
saveParams();
ansverUSER("Done")
elif " " in dataParse:
try:
if dataParse.split(" ")[0] == "min_score":
min_score = float(dataParse.split(" ")[1])
ansverUSER("Done")
elif dataParse.split(" ")[0] == "min_box_width":
min_box_width = int(dataParse.split(" ")[1])
ansverUSER("Done")
elif dataParse.split(" ")[0] == "min_box_height":
min_box_height = int(dataParse.split(" ")[1])
ansverUSER("Done")
elif dataParse.split(" ")[0] == "max_box_width":
max_box_width = int(dataParse.split(" ")[1])
ansverUSER("Done")
elif dataParse.split(" ")[0] == "max_box_height":
max_box_height = int(dataParse.split(" ")[1])
ansverUSER("Done")
elif dataParse.split(" ")[0] == "left_border":
left_border = int(dataParse.split(" ")[1])
ansverUSER("Done")
elif dataParse.split(" ")[0] == "right_border":
right_border = int(dataParse.split(" ")[1])
ansverUSER("Done")
elif dataParse.split(" ")[0] == "bottom_border":
bottom_border = int(dataParse.split(" ")[1])
ansverUSER("Done")
elif dataParse.split(" ")[0] == "top_border":
top_border = int(dataParse.split(" ")[1])
ansverUSER("Done")
else:
ansverUSER("Incorrect command!")
saveParams();
except:
ansverUSER("Incorrect value!")
else:
ansverUSER("Incorrect command!")
# elif str(data) == "b'params list'":
def ansverPC(ans):
UDP_IP = "127.0.0.1"
UDP_PORT = ANSWER_TO_PC_PORT
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
sock.connect((UDP_IP, UDP_PORT))
sock.send(str.encode(ans))
def ansverUSER(ans):
UDP_IP = "127.0.0.1"
UDP_PORT = ANSWER_TO_USER_PORT
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
sock.connect((UDP_IP, UDP_PORT))
sock.send(str.encode(ans))
def saveParams():
global min_score
global min_box_width
global min_box_height
global max_box_width
global max_box_height
global left_border
global right_border
global bottom_border
global top_border
dictV = {'min_score': min_score,
'min_box_width': min_box_width,
'min_box_height': min_box_height,
'max_box_width': max_box_width,
'max_box_height': max_box_height,
'left_border': left_border,
'right_border': right_border,
'bottom_border': bottom_border,
'top_border': top_border}
with open("params.json", "w") as write_file:
json.dump(dictV, write_file)
####################### Program #######################
try:
with open("params.json", "r") as read_file:
data = json.load(read_file)
min_score = data["min_score"]
min_box_width = data["min_box_width"]
min_box_height = data["min_box_height"]
max_box_width = data["max_box_width"]
max_box_height = data["max_box_height"]
left_border = data["left_border"]
right_border = data["right_border"]
bottom_border = data["bottom_border"]
top_border = data["top_border"]
pc2pcThread = threading.Thread(target=UDP_PC_2_PC, args=())
user2pcThread = threading.Thread(target=UDP_USER_2_PC, args=())
pc2pcThread.start()
user2pcThread.start()
print("")
print("--- System is running ---")
print("")
except:
saveParams()
with open("params.json", "r") as read_file:
data = json.load(read_file)
min_score = data["min_score"]
min_box_width = data["min_box_width"]
min_box_height = data["min_box_height"]
max_box_width = data["max_box_width"]
max_box_height = data["max_box_height"]
left_border = data["left_border"]
right_border = data["right_border"]
bottom_border = data["bottom_border"]
top_border = data["top_border"]
pc2pcThread = threading.Thread(target=UDP_PC_2_PC, args=())
user2pcThread = threading.Thread(target=UDP_USER_2_PC, args=())
pc2pcThread.start()
user2pcThread.start()
print("")
print("--- System is running ---")
print("")
device = torch.device('cpu')
m = torch.jit.load('model.pt')
m = m.eval()
| Andrey-Nedov/Mask-RCNN-Traffic-Sign-Recognizer | Release/1_main_script/_main_script 2 (with net).py | _main_script 2 (with net).py | py | 10,861 | python | en | code | 0 | github-code | 50 |
36840944715 | import aiohttp
import asyncio
from aiohttp import web
import requests
routes = web.RouteTableDef()
@routes.get("/parseActivities")
async def save_fact(request):
try:
json_data = await request.json()
#print(json_data)
#print(json_data.get("type"))
if json_data.get("type") == "charity" or json_data.get("type") == "recreational":
link = "http://127.0.0.1:8082/charityAndRecreational"
else:
link = "http://127.0.0.1:8082/otherActivities"
#print(link)
result = requests.get(link, json = json_data)
resultData = result.json()
print(resultData)
return web.json_response(resultData, status = 200)
except Exception as e:
return web.json_response({"status": "failed", "message": str(e)}, status = 400)
app = web.Application()
app.router.add_routes(routes)
web.run_app(app, port = 8081)
| b0rke-mborina/distsys-zadace | 04-zadaci/zadatak1-part2.py | zadatak1-part2.py | py | 814 | python | en | code | 0 | github-code | 50 |
18705959979 | def heapsort(unsorted):
n= len(unsorted)
for i in range(n//2-1,-1,-1): #n//2-1이 의미하는 바는 힙의 자료구조에서 마지막으로 오는 원소의 부모 라인의 노드 index를 말합니다.
heapify(unsorted, i, len(arr))
#이제 최대 힙을 구성하였고 아래에서 정렬이 시작됩니다.
for i in range(n-1,0,-1):
unsorted[0], unsorted[i]=unsorted[i], unsorted[0] #가장 최대인 unsorted[0]을 배열의 가장 뒤로 보내줍니다.
heapify(unsorted, 0, i) #이후 맨 마지막 원소를 빼고 다시 최대 힙을 구성합니다.
def heapify(unsorted, index, heap_size): #최대 힙을 만들어주는 함수
largest=index #부모와 자식 노드의 index간에 성립하는 식입니다.
left=index*2+1
right=index*2+2
if left<heap_size and unsorted[left]>unsorted[largest]:
largest=left
if right<heap_size and unsorted[right]>unsorted[largest]:
largest=right
if largest!=index:
unsorted[index],unsorted[largest]=unsorted[largest],unsorted[index]
heapify(unsorted, largest, heap_size)
#재귀를 이용해서 구현해줍니다.
#하지만 이렇게 하면 힙의 자료구조에서 한 줄을 따라서만 대소관계를 비교해서 위치를 바꿔주기 때문에
#반복문을 사용하여 heapify함수를 이용해서 완전하게 최대 힙을 만들 수 있습니다.
import sys
arr=[]
for i in range(int(input())):
arr.append(int(sys.stdin.readline()))
#이렇게 반복해서 입력값을 받는 경우 sys.stdin.readline함수가 running time을 절약할 수 있습니다.
heapsort(arr)
for i in arr:
print(i) | doodung/Algorithm | 정렬/힙정렬.py | 힙정렬.py | py | 1,659 | python | ko | code | 4 | github-code | 50 |
19628336810 | import adventofcode
import hashlib
from collections import deque
SIZE = 4
VAULT_X = 3
VAULT_Y = 3
DIRECTIONS = [0, 1, 2, 3]
DIR_LETTERS = ['U', 'D', 'L', 'R']
DIR_VALUES = [(0, -1), (0, 1), (-1, 0), (1, 0)]
class path:
def __init__(self, key, path, x, y):
self.key = key
self.path = path
self.x = x
self.y = y
digest = hashlib.md5((key + path).encode('utf-8')).digest()
up = (digest[0] & 0xf0) >> 4
down = digest[0] & 0xf
left = (digest[1] & 0xf0) >> 4
right = digest[1] & 0xf
self.doors = [
y > 0 and up > 10,
y + 1 < SIZE and down > 10,
x > 0 and left > 10,
x + 1 < SIZE and right > 10
]
def is_target(self):
return self.x == VAULT_X and self.y == VAULT_Y
def can_move(self, direction):
return self.doors[direction]
def move(self, direction):
return path(self.key, self.path + DIR_LETTERS[direction],
self.x + DIR_VALUES[direction][0], self.y + DIR_VALUES[direction][1])
class runner(adventofcode.runner):
def __init__(self):
super().__init__(17)
def reset(self):
self.key = None
def input_line(self, line):
self.key = line
def solve1(self):
p = self.find_shortest_path()
return p
def solve2(self):
p = self.find_longest_path()
return str(len(p))
def find_shortest_path(self):
queue = deque()
queue.append(path(self.key, '', 0, 0))
while queue:
p = queue.popleft()
if p.is_target():
return p.path
else:
for d in DIRECTIONS:
if p.can_move(d):
queue.append(p.move(d))
def find_longest_path(self):
longest = ''
queue = deque()
queue.append(path(self.key, '', 0, 0))
while queue:
p = queue.popleft()
if p.is_target():
# It's breadth-first search, so any later path found is >= previous path
longest = p.path
else:
for d in DIRECTIONS:
if p.can_move(d):
queue.append(p.move(d))
return longest
r = runner()
r.run()
| bvschaik/advent-of-code | 2016/day17.py | day17.py | py | 2,301 | python | en | code | 0 | github-code | 50 |
42734906839 | from pyspark.sql import SparkSession
import datetime
import pandas as pd
from pyspark.sql.types import *
class CollectHiveData():
def __init__(self, days):
self.spark = SparkSession.builder.config("spark.sql.warehouse.dir", "hdfs://ns1/user/hive/warehouse/").appName(
"collectUidAndTUidData").enableHiveSupport().getOrCreate()
self.days = days
self.schema = StructType(
[
StructField('uid', StringType()),
StructField('age', IntegerType()),
StructField('work_location', IntegerType()),
StructField('height', IntegerType()),
StructField('sex', StringType()),
StructField('subeventid', StringType()),
StructField('opposite_age', IntegerType()),
StructField('opposite_work_location', IntegerType()),
StructField('opposite_height', IntegerType()),
StructField('opposite_sex', StringType()),
StructField('tuidEmbed', ArrayType(StringType(), containsNull=True)),
StructField('label', IntegerType())
]
)
def getBeforeDayDate(self):
today = datetime.datetime.now()
offset = datetime.timedelta(days=self.days)
beforeDate = (today + offset).strftime('%Y-%m-%d')
return beforeDate
# / *+ mapjoin(dataplatform_user_action_record) * /
# 数据库的数据sql
def getSql(self):
sql = '''
select
t1.uid,
t1.age,
t1.work_location,
t1.height,
t1.sex,
t3.subeventid,
t3.opposite_age,
t3.opposite_work_location,
t3.opposite_height,
t3.opposite_sex,
if(t2.eventid == "8.32", 0, 1) as label
from
(
select
uid,
cast(year(CURRENT_DATE) as int) - cast(birth_year as int) as age,
work_location,
height,
sex
from
algorithm.users_search
where
gid >= 3
and gid <= 10
and uid is not null
and work_location = '51'
)t1
inner join
(
select
uid,
eventid,
subeventid
from
algorithm.dataplatform_user_action_record
where
dt >= '2020-12-10'
and eventid in ("8.32", "8.33", "8.34")
and subeventid is not NULL
and subeventid != ""
)t2
on cast(t1.uid as string) = t2.uid
left join
(
select
uid as subeventid,
cast(year(CURRENT_DATE) as int) - cast(birth_year as int) as opposite_age,
work_location as opposite_work_location,
height as opposite_height,
sex as opposite_sex
from
algorithm.users_search
where
gid >= 3
and gid <= 10
and uid is not null
and work_location = '51'
group by
uid,cast(year(CURRENT_DATE) as int)-cast(birth_year as int),work_location,height,sex
)t3
on cast(t2.subeventid as string) = cast(t3.subeventid as string)
'''.format(self.getBeforeDayDate())
return sql
def get_data(self):
dataRdd = self.spark.sql(self.getSql())
dataDF = df.toPandas()
# df = self.spark.createDataFrame(dataRdd, schema= self.schema)
# dataDF = df.toPandas()
dataDF.dropna(subset=['subeventid'], inplace=True)
dataDF['subeventid'] = dataDF['subeventid'].astype('int64')
dataDF['uid'] = dataDF['uid'].astype('int64')
tuidDF = pd.read_excel("./aaaa.xlsx", columns=['subeventid', 'pid', 'tuidEmbed'])
tuidDF['pid'] = tuidDF['pid'].astype('int64')
tuidDF['subeventid'] = tuidDF['subeventid'].astype('int64')
result = pd.merge(dataDF, tuidDF, on=['subeventid'], how='inner')
dataDF.to_excel("./result.xlsx", index=False)
if __name__ == "__main__":
collectData = CollectHiveData(1)
collectData.get_data()
| yijianfenghou/PyRecommendationSystem | DSSM/AdDSSMNet_torch/DataSet/collectHiveData.py | collectHiveData.py | py | 4,766 | python | en | code | 2 | github-code | 50 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.