seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
37706480646 | import logging
import torch
from torch.optim import SGD
import configs.classification.gradient_alignment as reg_parser
import environment.animal_learning as environments
from experiment.experiment import experiment
from utils import utils
from torch import nn
from model import lstm
from copy import deepcopy
gamma = 0.9
logger = logging.getLogger('experiment')
p = reg_parser.Parser()
total_seeds = len(p.parse_known_args()[0].seed)
rank = p.parse_known_args()[0].run
all_args = vars(p.parse_known_args()[0])
args = utils.get_run(all_args, rank)
my_experiment = experiment(args["name"], args, args["output_dir"], sql=True,
run=int(rank / total_seeds),
seed=total_seeds)
my_experiment.results["all_args"] = all_args
my_experiment.make_table("error_table", {"run": 0, "step": 0, "error": 0.0}, ("run", "step"))
my_experiment.make_table("predictions", {"run": 0, "step": 0, "x0":0, "x1":0, "x2":0, "x3":0, "x4":0, "x5":0, "x6":0, "pred":0.0, "target":0.0}, ("run", "step"))
error_table_keys = ["run", "step", "error"]
predictions_table_keys = ["run", "step", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "pred", "target"]
error_list = []
predictions_list = []
logger = logging.getLogger('experiment')
gradient_error_list = []
gradient_alignment_list = []
running_error = 0.05
hidden_units = args["features"]
utils.set_seed(args["seed"])
env = environments.TracePatterning(seed=args["seed"], ISI_interval=(14, 26), ITI_interval = (80, 120), gamma = 0.9, num_CS = 6, num_activation_patterns = 10, activation_patterns_prob= 0.5, num_distractors = 5, activation_lengths= {"CS": 1, "US": 1, "distractor": 1}, noise= 0)
h = torch.zeros(1, 1, hidden_units).float()
c = torch.zeros(1, 1, hidden_units).float()
input = torch.tensor(env.reset().observation).view(1, 1, -1).float()
n = lstm.LSTMNet(hidden_units)
opti = SGD(n.parameters(), args["step_size"])
error_grad_mc = 0
sum_of_error = None
for i in range(0, 5000000):
value_prediction, (h, c) = n(input, (h, c))
# print(value_prediction)
gt_target = env.get_real_target()
if (i % 100000 < 400):
temp_list = [str(rank), str(i)]
counter = 0
for t in input.squeeze():
if counter!= 0 and counter < 7:
temp_list.append(str(t.item()))
counter+=1
temp_list.append(str(input.squeeze()[0].item()))
temp_list.append(str(value_prediction.item()))
temp_list.append(str(gt_target))
predictions_list.append(temp_list)
input = env.step(0)
input = torch.tensor(input.observation).view(1, 1, -1).float()
n_copy = deepcopy(n)
with torch.no_grad():
next_pred, _ = n_copy(input, (h.detach(), c.detach()))
target = input[0, 0, 0].detach() + gamma * next_pred.detach().item()
# print(gt_target, target)
real_error = (target - value_prediction) ** 2
gt_error = (gt_target - value_prediction)**2
if sum_of_error is None:
sum_of_error = real_error
else:
sum_of_error = sum_of_error + real_error
running_error = running_error * 0.9999 + gt_error.detach().item() * 0.0001
if(i%args["truncation"] == 0):
opti.zero_grad()
sum_of_error.backward()
opti.step()
h = h.detach()
c = c.detach()
sum_of_error = None
if (i % 50000 == 20000):
error_list.append([str(rank), str(i), str(running_error)])
if(i % 100000 == 4):
my_experiment.insert_values("predictions", predictions_table_keys, predictions_list)
predictions_list = []
my_experiment.insert_values("error_table", error_table_keys, error_list)
error_list = []
if(i%100000 == 0):
print("Step", i, "Running error = ", running_error)
# | khurramjaved96/columnar_networks | NoisyPatterns.py | NoisyPatterns.py | py | 3,764 | python | en | code | 0 | github-code | 13 |
70278950097 | # Run this only once.
import sqlite3
DATABASE_PATH = "processed_data.sqlite"
def db_connection(path: str):
connection = None
try:
connection = sqlite3.connect(path)
except sqlite3.Error as e:
print("Error:", e)
return connection
connection = db_connection(DATABASE_PATH)
cur = connection.cursor()
cur.execute("CREATE TABLE col (id INTEGER PRIMARY KEY, lat real not null, lon real not null, cost real not null)")
connection.commit()
for row in cur.execute("SELECT name FROM sqlite_schema WHERE type IN ('table','view') AND name NOT LIKE 'sqlite_%' ORDER BY 1;"):
print(row)
connection.close() | LassiLuukkonen/3D-Globe-Vis | Data/create_database.py | create_database.py | py | 633 | python | en | code | 0 | github-code | 13 |
13129376348 | from django.shortcuts import render
from django.http import HttpResponse
from django.http import JsonResponse
from .models import programa
from .models import categoria
from .models import atajo
# lista inicial de programas o datos de un programa especifico
def index(request):
id = request.GET.get('id', 0)
if (id == 0):
lista = programa.objects.all()
output = [{'id': item.pk, 'nombre': item.nombre, 'descripcion': item.descripcion.encode('utf-8'), 'logo': item.logo} for item in lista]
else:
item = programa.objects.get(pk = id)
lista = atajo.objects.filter(programa = id)
output = {'id': item.pk, 'nombre': item.nombre, 'atajos': [{'id': item2.pk, 'categoria': categoria.objects.get(pk = item2.categoria.pk).nombre, 'descripcion': item2.descripcion, 'combinacion': item2.combinacion} for item2 in lista]}
return JsonResponse(output, safe=False)
# datos de un atajo
def atajoPorId(request):
id = request.GET.get('id', 0)
if (id == 0):
output = {'message': 'insert params'}
else:
item = atajo.objects.get(pk = id)
output = {'id': item.pk, 'categoria': categoria.objects.get(pk = item.categoria.pk).nombre, 'descripcion': item.descripcion, 'combinacion': item.combinacion}
return JsonResponse(output, safe=False)
| cgalvist/LinuxTricks | back_end/linuxtricks/programas/views.py | views.py | py | 1,317 | python | es | code | 0 | github-code | 13 |
25927064880 | from pydbl.test.management import Manager
import unittest
import sqlite3
class TestListsMgmt(unittest.TestCase):
def test_list_operations(self):
manager = Manager(verbose=True)
list_name = "test-list"
list_url = "http://test.example.com/list"
list_description = "description"
status = manager.run([
"-A",
"--list-name", list_name,
"--list-url", list_url,
"--list-description", list_description,
])
self.assertEqual(status, 0)
db = sqlite3.connect(manager.get_db())
cur = db.cursor()
cur.row_factory = sqlite3.Row
st = cur.execute("SELECT * FROM domain_lists WHERE name = ?", (list_name, ))
record = st.fetchone()
self.assertEqual(record["name"], list_name)
self.assertEqual(record["url"], list_url)
self.assertEqual(record["description"], list_description)
self.assertEqual(record["active"], 1, "Created as active")
# add some domains
max_domains = 3
for i in range(0, max_domains):
cur.execute(
"INSERT INTO domains(name, list_id) VALUES (?, ?)",
("example-%d.com" % i, record["id"])
)
db.commit()
cur.execute(
"SELECT count(*) AS cnt FROM domains d" +
" JOIN domain_lists AS dl ON dl.id = d.list_id " +
" WHERE dl.name = ?",
(list_name, )
)
record = cur.fetchone()
self.assertEqual(record["cnt"], max_domains)
status = manager.run([
"--disable-list", list_name
])
self.assertEqual(status, 0, "Disable list command ok")
st = cur.execute(
"SELECT * FROM domain_lists WHERE name = ?",
(list_name, )
)
record = st.fetchone()
self.assertEqual(record["active"], 0, "List disabled")
status = manager.run([
"--enable-list", list_name
])
self.assertEqual(status, 0, "Enable list command ok")
st = cur.execute(
"SELECT * FROM domain_lists WHERE name = ?",
(list_name, ))
record = st.fetchone()
self.assertEqual(record["active"], 1, "List enanled")
status = manager.run([
"--delete-list", list_name
])
self.assertEqual(status, 0)
st = cur.execute(
"SELECT count(*) AS cnt FROM domain_lists WHERE name = ?",
(list_name, ))
record = st.fetchone()
self.assertEqual(record["cnt"], 0, "List removed")
cur.execute(
"SELECT count(*) AS cnt FROM domains d" +
" JOIN domain_lists dl ON dl.id = d.list_id " +
" WHERE dl.name = ?",
(list_name, )
)
record = cur.fetchone()
self.assertEqual(record["cnt"], 0)
| mcptr/dbl-service | test/test_cli/test_lists.py | test_lists.py | py | 2,372 | python | en | code | 0 | github-code | 13 |
70492336978 | #Getting cube of key values in a dictionary.
def Dict(a,b):
Dic=dict()
for i in range (a,b+1):
Dic[i]=i**3
print('The Dictionary with cube of keyvalues:',Dic)
def main():
print('To get the cube of keys in dictionary.')
a=1
b=int(input('Enter the Range till you want to print dictionary.'))
Dict(a,b)
if __name__=='__main__':
main()
| karmveershubham/Python-Codes | Dictionary_cube.py | Dictionary_cube.py | py | 399 | python | en | code | 1 | github-code | 13 |
42517228226 | from datetime import date
from rest_framework import serializers
from reviews.models import Category, Comment, Genre, Review, Title, User
class CustomSlugRelatedField(serializers.SlugRelatedField):
def to_representation(self, obj):
return {'name': obj.name,
'slug': obj.slug}
class UserSerializer(serializers.ModelSerializer):
class Meta:
fields = (
'username', 'email', 'first_name', 'last_name', 'bio', 'role',
)
model = User
class UserOwnerProfileSerializer(serializers.ModelSerializer):
class Meta:
fields = (
'username', 'email', 'first_name', 'last_name', 'bio', 'role',
)
model = User
read_only_fields = ('role', )
class UserRegistrationSerializer(serializers.ModelSerializer):
class Meta:
fields = ('username', 'email', )
model = User
def validate_username(self, value: str) -> str:
if value == 'me':
raise serializers.ValidationError(
f'{value} не корректное имя пользователя'
)
return value
class CategorySerializer(serializers.ModelSerializer):
class Meta:
fields = ('name', 'slug')
model = Category
class ReviewSerializer(serializers.ModelSerializer):
author = serializers.SlugRelatedField(
slug_field='username',
read_only=True
)
class Meta:
fields = (
'id', 'text', 'author', 'score', 'pub_date',
)
model = Review
class CommentSerializer(serializers.ModelSerializer):
author = serializers.SlugRelatedField(
slug_field='username',
read_only=True
)
class Meta:
fields = ('id', 'text', 'author', 'pub_date', )
model = Comment
class GenreSerializer(serializers.ModelSerializer):
class Meta:
fields = ('name', 'slug')
model = Genre
class TitleSerializer(serializers.ModelSerializer):
rating = serializers.IntegerField(
source='average_rating', read_only=True,
)
category = CustomSlugRelatedField(
queryset=Category.objects.all(),
slug_field='slug',
read_only=False,
)
genre = CustomSlugRelatedField(
queryset=Genre.objects.all(),
slug_field='slug',
read_only=False,
many=True,
)
class Meta:
fields = (
'id', 'name', 'year', 'rating', 'description', 'category', 'genre',
)
model = Title
def validate_year(self, value):
year = date.today().year
if value > year or year <= 0:
raise serializers.ValidationError(
'Некорректное значение года'
)
return value
| ArtemKAF/api_yamdb | api_yamdb/api/serializers.py | serializers.py | py | 2,776 | python | en | code | 1 | github-code | 13 |
38910371406 | import inject
import json
import logging
from typing import Optional
logger = logging.getLogger(__name__)
class StorageService:
"""
This class will be used for storing and retrieving data in/from redis
storage.
"""
MAIN_PATH = 'STORAGE'
def __init__(
self,
identity: str,
storage_path: Optional[str] = None
):
self.identity = identity
self.storage_path = storage_path or self.MAIN_PATH
self.redis_client = inject.instance('redis')
@property
def entity_key(self):
"""
Returns created key.
:return: created key.
:rtype: str
"""
return '{}-{}'.format(self.storage_path, self.identity)
@property
def all_keys_key(self):
"""
Creates all_keys key which will be used for getting/storing all
entity keys list.
:return: All keys key.
:rtype: str
"""
return '{}-KEYS'.format(self.storage_path)
async def get_all_keys(self):
"""
Returns all keys stored within storage_path.
:return: Returns all keys stored within storage_path.
:rtype: list of str
"""
data = await self.redis_client.get(self.all_keys_key)
if data is not None:
data = json.loads(data)
else:
data = []
return data
async def add_new_key(self, key: Optional[str] = None):
"""
Adds and stores provided key in all_keys list. If key is not provided,
identity value will be used.
:param key: Key which will be used for storing. If is not provided,
identity value will be used.
:return: List of updated list.
:rtype: list of str
"""
new_key = key or self.identity
all_keys = await self.get_all_keys()
if new_key not in all_keys:
all_keys.append(new_key)
content = json.dumps(all_keys)
await self.redis_client.set(self.all_keys_key, content)
return all_keys
async def remove_key(self, key: Optional[str] = None):
"""
Removes provided key in all_keys list and stores it in storage. If
key is not provided, identity value will be used.
:param key: Key which will be used for updating list. If is
not provided, identity value will be used.
:return: List of updated list.
:rtype: list of str
"""
removing_key = key or self.identity
all_keys = await self.get_all_keys()
if removing_key in all_keys:
all_keys.remove(removing_key)
content = json.dumps(all_keys)
await self.redis_client.set(self.all_keys_key, content)
return all_keys
async def get_all_storage_entities(self):
"""
Gets all stored entities within storage path.
:return: All stored entities within storage path.
:rtype: dict
"""
result = {}
for key in await self.get_all_keys():
result.update({
key: json.loads(await self.redis_client.get(
'{}-{}'.format(self.storage_path, key)
))
})
return result
async def get_storage_entity(self, key: Optional[str] = None):
"""
Gets dict from redis using key. If key is not provided, will be
created from storage_path and identity.
:param key: Key which will be used for getting value.
:return: Returns value from redis.
:rtype: dict
"""
the_key = key or self.entity_key
logger.info(
'Getting value from redis using key: {}'.format(the_key)
)
data = await self.redis_client.get(the_key)
if data is not None:
await self.add_new_key()
data = json.loads(data)
logger.info('... and value is found...')
return data
async def store_storage_entity(self, data, key: Optional[str] = None):
"""
Sets dict in redis using key. If key is not provided, will be
created from storage_path and identity.
:param data: Dict or str which will be stored in redis.
:type data: dict | str
:param key: Key which will be used for storing value.
:return: Sets and returns data in redis.
:rtype: dict
"""
the_key = key or self.entity_key
logger.info(
'Storing value in redis using key: {}'.format(the_key)
)
content = json.dumps(data) if isinstance(data, dict) else data
await self.redis_client.set(the_key, content)
return data
async def delete_storage_entity(self, key: Optional[str] = None):
"""
Removes dict from redis using key. If key is not provided, will be
created from storage_path and identity.
:param key: Key which will be used for deleting value.
:return: Removes and returns data in redis.
:rtype: dict
"""
the_key = key or self.entity_key
logger.info(
'Deleting value in redis using key: {}'.format(the_key)
)
data = await self.get_storage_entity()
if data is not None:
await self.remove_key()
await self.redis_client.delete(the_key)
logger.info('... it is deleted.')
return data
async def update_storage_entity(self, data, key: Optional[str] = None):
"""
Merge dict found in redis using key with provided data. If key is
not provided, will be created from storage_path and identity.
:param data: Dict which will be merged and stored in redis.
:type data: dict
:param key: Key which will be used for storing value.
:return: Sets and returns data in redis.
:rtype: dict
"""
the_key = key or self.entity_key
logger.info(
'Merging value in redis using key: {}'.format(the_key)
)
redis_data = await self.get_storage_entity()
if redis_data is not None:
redis_data.update(data)
else:
redis_data = data
await self.redis_client.set(
the_key, json.dumps(redis_data)
)
return redis_data
| stefan2811/port-16 | port_16/api/common/service/storage.py | storage.py | py | 6,303 | python | en | code | 0 | github-code | 13 |
21708719778 | import requests
import os
import json
from flask import Flask
from flask import request
from flask import make_response
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print(">>> Request:")
print(json.dumps(req, indent=4))
action = req.get("result").get("action")
param = req.get("result").get("parameters")
if action is not None:
query = makeQuery(action, param)
else:
print("action is None")
print("query")
print(json.dumps(query, indent=4))
# pass to push server
if query is not None:
speech = "Got it"
pushToServer(query)
response = makeSpeechResponse(speech)
res = json.dumps(response, indent=4)
print(">>> Response:")
print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def pushToServer(query):
URL = "http://52.39.36.22:8000"
if query is not None:
requests.post(URL, data=json.dumps(query))
def makeSpeechResponse(speech):
if speech is None:
speech = "Sorry, I cannot understand your command"
return {
"speech" : speech,
"displayText" : speech,
"source" : "beanbird"
}
def makeQuery(action, param):
print("aciton")
print(action)
print("param")
print(json.dumps(param, indent=4))
if param is None:
print("param is None")
return {}
return {
"action" : action,
"param" : param
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
| wizehack/beanbird | app.py | app.py | py | 1,535 | python | en | code | 0 | github-code | 13 |
36245907482 | from dataclasses import dataclass
@dataclass
class Point:
x: int
y: int
def distance(self, other: "Point") -> int:
return abs(self.x - other.x) + abs(self.y - other.y)
def __hash__(self):
return hash((self.x, self.y))
def __eq__(self, other):
return self.x == other.x and self.y == other.y
Data = list[tuple[Point, Point]]
Return = int
def parse_data(data: str) -> Data:
pairs = []
for line in data.splitlines():
parts = line.split(" ")
x1 = int(parts[2].strip(",")[2:])
y1 = int(parts[3].strip(":")[2:])
x2 = int(parts[8].strip(",")[2:])
y2 = int(parts[9].strip(":")[2:])
pairs.append((Point(x1, y1), Point(x2, y2)))
return pairs
def clamp(x: int, _min: int, _max: int):
return max(min(x, _max), _min)
def part_1(input: str, row=2_000_000) -> Return:
sensors = parse_data(input)
points = {}
for sensor, beacon in sensors:
reach = sensor.distance(beacon)
dist = abs(sensor.y - row)
for x in range(sensor.x - reach + dist, sensor.x + reach - dist + 1):
if x not in points:
points[x] = False
if beacon.y == row:
points[beacon.x] = True
return len([x for x in points.values() if x == False])
def part_2(input: str, bound=4_000_000) -> Return:
sensors = parse_data(input)
reaches = [sensor.distance(beacon) for sensor, beacon in sensors]
for row in range(bound):
lines = []
for (sensor, beacon), reach in zip(sensors, reaches):
dist = abs(sensor.y - row)
lines.append(
(
clamp(sensor.x - (reach - dist), 0, bound),
clamp(sensor.x + (reach - dist), 0, bound) + 1,
)
)
lines.sort()
x = 0
for start, end in lines:
if start > x:
return x * 4_000_000 + row
x = max(x, end)
if __name__ == "__main__":
import sys
fp = open(sys.argv[1]) if len(sys.argv) > 1 else sys.stdin
data = fp.read()
print(f"Part 1: {part_1(data)}")
print(f"Part 2: {part_2(data)}")
| arjandepooter/advent-of-code-2022 | aoc_2022/day15/solution.py | solution.py | py | 2,185 | python | en | code | 0 | github-code | 13 |
35209235373 | #PRACTICA5
#Repite la Práctica 4 pero guardando los datos del usuario en una variable de tipo diccionario en lugar de en una lista.
varNombre = input("Tu nombre: ")
varNaci = int(input("Cuantos años tienes: "))
dicci = {"Edad" : varNaci , "Nombre" : varNombre}
for elemento in dicci:
print ("Elemento : " + str(dicci[elemento]))
#Listas:
#CREARLA: lista=[variable,"pepito", 13, ...]
#"pepito": lista[1]
#Posicion "pepito": lista.index("pepito")
#longitud(tamaño) : len(lista)
#Diccionarios:
#CREARLO: diccionario={"calle" : variable,"nombre" : "pepito", "edad" : 13, ...}
#"pepito": diccionario["nombre"]
#keys : datos_basicos.keys())
#values : datos_basicos.values()
#items : datos_basicos.items()
#longitud(tamaño) : len(diccionario)
#for clave, valor in diccionario.iteritems():
#print "El valor de la clave %s es %s" % (clave, valor)
#print("El valor de la clave "+str(clave)+" es "+str(valor))
#La verdad tras un diccionario:
#En los demás lenguajes un diccionario se llama Objecto
#El objeto siempre se hace con llaves: varObject = {}
#El objeto siempre tiene claves: varObject = {
# "nombre": "Juan"
# "apellido": "Retana"
#}
#En otros lenguajes para acceder a "Juan" es con varObject.nombre | brago12/Practicas_Python | practica5.py | practica5.py | py | 1,227 | python | es | code | 0 | github-code | 13 |
29048309458 | import random as python_random
import argparse
import numpy as np
import json
from sklearn.metrics import classification_report, confusion_matrix, ConfusionMatrixDisplay, accuracy_score
from sklearn.preprocessing import LabelBinarizer
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.optimizers.schedules import PolynomialDecay, CosineDecay
from transformers import TFAutoModelForSequenceClassification, AutoTokenizer, pipeline
from itertools import product
import tensorflow as tf
import matplotlib.pyplot as plt
# import os
# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# Make reproducible as much as possible
np.random.seed(1234)
tf.random.set_seed(1234)
python_random.seed(1234)
def read_corpus(corpus_file):
'''Read in review data set and returns docs and labels'''
documents = []
labels = []
with open(corpus_file, encoding='utf-8') as f:
for line in f:
tokens = line.strip()
documents.append(" ".join(tokens.split()[3:]).strip())
# 6-class problem: books, camera, dvd, health, music, software
labels.append(tokens.split()[0])
return documents, labels
def create_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--train_file", default='data/train.txt', type=str,
help="Input file to learn from (default train.txt)")
parser.add_argument("-d", "--dev_file", type=str, default='data/dev.txt',
help="Separate dev set to read in (default dev.txt)")
parser.add_argument("-t", "--test_file", type=str,
help="If added, use trained model to predict on test set")
args = parser.parse_args()
return args
def train_model(lm, tokens_train, Y_train_bin, num_labels, epochs, batch_size, learning_rate):
print("Loading model....")
model = TFAutoModelForSequenceClassification.from_pretrained(lm, num_labels=num_labels)
loss_function = CategoricalCrossentropy(from_logits=True)
num_decay_steps = len(Y_train_bin) * epochs
if learning_rate == "PolynomialDecay":
lr_scheduler = PolynomialDecay(
initial_learning_rate=5e-5, end_learning_rate=0.0, decay_steps=num_decay_steps
)
elif learning_rate == "CosineDecay":
lr_scheduler = CosineDecay(
initial_learning_rate=5e-5, decay_steps = num_decay_steps
)
else:
lr_scheduler = learning_rate
optim = Adam(learning_rate=lr_scheduler)
print("Training model....")
model.compile(loss=loss_function, optimizer=optim, metrics=['accuracy'])
model.fit(tokens_train, Y_train_bin, verbose=1, epochs=epochs,
batch_size=batch_size)
print("Done!")
return model
def evaluate_model(lm, tokens_dev, Y_dev_bin, labels):
print("Evaluating model....")
pred = lm.predict(tokens_dev)["logits"]
# Get predictions using the trained model
# Finally, convert to numerical labels to get scores with sklearn
Y_pred = np.argmax(pred, axis=1)
# If you have gold data, you can calculate accuracy
Y_test = np.argmax(Y_dev_bin, axis=1)
report = classification_report(Y_test, Y_pred, target_names=labels, digits=3)
print(report)
cm = confusion_matrix(Y_test, Y_pred)
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=labels)
disp.plot()
plt.savefig(figpath)
return accuracy_score(Y_test, Y_pred)
def create_param_grid():
param_grid = {'epochs': [1, 2, 3, 4, 5], 'max_seq_len': [50, 100, 150],
'batches': [16, 32, 64],
'lr_schedulers': ["PolynomialDecay", "CosineDecay", 5e-5, 4e-5, 3e-5]}
keys, values = zip(*param_grid.items())
result = [dict(zip(keys, p)) for p in product(*values)]
return result
def main():
lm = 'bert-base-uncased'
args = create_arg_parser()
# Read in the data and embeddings
print("..........................\n")
print("Loading data...")
X_train, Y_train = read_corpus(args.train_file)
X_dev, Y_dev = read_corpus(args.dev_file)
encoder = LabelBinarizer()
Y_train_bin = encoder.fit_transform(Y_train) # Use encoder.classes_ to find mapping back
Y_dev_bin = encoder.fit_transform(Y_dev)
labels = encoder.classes_
param_grid = create_param_grid()
performances = []
max_accuracy = 0
for parameters in param_grid:
tf.keras.backend.clear_session()
tokenizer = AutoTokenizer.from_pretrained(lm)
tokens_train = tokenizer(X_train, padding=True, max_length=parameters['max_seq_len'],
truncation=True, return_tensors="np").data
tokens_dev = tokenizer(X_dev, padding=True, max_length=parameters['max_seq_len'],
truncation=True, return_tensors="np").data
tokens_test = tokenizer(X_test, padding=True, max_length=parameters['max_seq_len'],
truncation=True, return_tensors="np").data
model = train_model(lm, tokens_train, Y_train_bin, len(labels),
epochs=parameters['epochs'], batch_size=parameters['batches'], learning_rate=parameters['lr_schedulers'])
acc = evaluate_model(model, tokens_dev, Y_dev_bin, labels)
performances.append(acc)
# Writing results to txt file
output_file = open('results.txt', 'w', encoding='utf-8')
i = 0
for parameters in param_grid:
parameters['accuracy'] = performances[i]
json.dump(parameters, output_file)
output_file.write("\n")
i += 1
if __name__ == "__main__":
main()
| chadji15/LearningFromData_Assignment3 | src/grid_search.py | grid_search.py | py | 5,813 | python | en | code | 0 | github-code | 13 |
34991762130 | from stateMachine.statesEnum import ENVIAR_MENSAJES, EXPLORAR
from threading import Timer
from utils import convertTupleToString
from properties import POI_POSITIONS, POI_TIMERS
class actualizarMapa():
def __init__(self, bebop, dataBuffer, previousState, poisVigilar, poiVigilarTimeout, poiVigilarTimeoutDict, poisCriticos, assignedPOIs, logStats, messages):
self.nextState = ENVIAR_MENSAJES
self.bebop = bebop
self.previousState = previousState
self.messages = messages
self.dataBuffer = dataBuffer
self.poisVigilar = poisVigilar
self.poiVigilarTimeout = poiVigilarTimeout
self.poiVigilarTimeoutDict = poiVigilarTimeoutDict
self.poisCriticos = poisCriticos
self.assignedPOIs = assignedPOIs
self.logStats = logStats
def getNextState(self):
return self.nextState
def execute(self):
current_position = self.bebop.current_position
nextState = self.dataBuffer
if (self.bebop.poi_position == current_position):
self.bebop.poi_position = None
poiKey = convertTupleToString(current_position)
if (poiKey in self.poiVigilarTimeoutDict):
executionTimer = self.poiVigilarTimeoutDict[poiKey]
executionTimer.cancel()
executionTimerNew = Timer(POI_TIMERS[POI_POSITIONS.index(current_position)], self.poiVigilarTimeout, (current_position, ))
executionTimerNew.start()
self.poiVigilarTimeoutDict[poiKey] = executionTimerNew
self.logStats.poiExplorado(poiKey)
if current_position in self.poisVigilar:
self.poisVigilar.remove(current_position)
if current_position in self.poisCriticos:
self.poisCriticos.remove(current_position)
if poiKey in self.assignedPOIs:
del(self.assignedPOIs[poiKey])
nextState = EXPLORAR
self.bebop.updateSearchMap(self.bebop.current_position)
return nextState
def handleMessage(self, message):
self.messages.append(message)
| gRondan/MultipleUAVExploration | stateMachine/states/actualizarMapa.py | actualizarMapa.py | py | 2,125 | python | en | code | 1 | github-code | 13 |
70922155539 | import random
from django import forms
from django.contrib.auth import get_user_model
from django.forms import formset_factory
from . import models
# CHOICES is the tuple that defines what colors a user can pick for their
# background color in the ProfileForm
CHOICES = [
('random', 'Random'),
('blue', 'Blue'),
('', 'Blue-Green'),
('green', 'Green'),
('orange', 'Orange'),
('red', 'Red'),
('pink', 'Pink'),
('purple', 'Purple'),
]
class NewSkillForm(forms.Form):
"""Form for potential Skills"""
skill = forms.CharField(max_length=35)
def __init__(self, *args, **kwargs):
super(NewSkillForm, self).__init__(*args, **kwargs)
self.fields['skill'].widget.attrs['placeholder'] = 'New skill...'
NewSkillFormSet = formset_factory(NewSkillForm, extra=0)
class SkillForm(forms.ModelForm):
"""Form for the AllSkills model"""
class Meta:
model = models.AllSkills
fields = [
'skills'
]
labels = {
'name': 'Skill'
}
SkillFormSet = formset_factory(SkillForm, extra=0)
class ProjectForm(forms.ModelForm):
"""Form for the Project model"""
def __init__(self, *args, **kwargs):
super(ProjectForm, self).__init__(*args, **kwargs)
self.fields['title'].widget.attrs['placeholder'] = 'Project Title'
self.fields['description'].widget.attrs['placeholder'] = \
'Project description...'
self.fields['time_line'].widget.attrs['placeholder'] = \
'Project time line...'
self.fields['requirements'].widget.attrs['placeholder'] =\
'Project Requirements...'
class Meta:
model = models.Project
fields = [
'title',
'time_line',
'requirements',
'description'
]
widgets = {
'description': forms.Textarea(attrs={'rows': 13, 'cols': 20}),
'requirements': forms.Textarea(attrs={'rows': 7, 'cols': 20})}
class PositionForm(forms.ModelForm):
"""Form for the Position model"""
def __init__(self, *args, **kwargs):
super(PositionForm, self).__init__(*args, **kwargs)
self.fields['information'].widget.attrs['placeholder'] = \
'Position Information...'
class Meta:
model = models.Position
fields = [
'skill',
'time_commitment',
'information',
]
widgets = {'information': forms.Textarea(attrs={'rows': 10})}
class EditPositionForm(PositionForm):
"""Subclass of PositionForm that bypasses a bug when the user submits
the PositionFormSet when deleting a Position and does nothing else
Could potentially allow a user to edit a newly created project
to have no positions though."""
def is_valid(self):
"""Allows the form to count as valid if no information is present"""
valid = super(PositionForm, self).is_valid()
if self['skill'].data == '' and self['information'].data == '':
valid = True
return valid
PositionFormSet = formset_factory(EditPositionForm, extra=0)
class UserForm(forms.ModelForm):
"""This is the form for the user portion of the user model"""
honey_pot = forms.CharField(widget=forms.HiddenInput, required=False)
class Meta:
model = get_user_model()
fields = [
'avatar',
'bio',
'color',
'username'
]
widgets = {'color': forms.Select(choices=CHOICES)}
def clean_color(self):
"""If the color is random, choice a random color"""
color = self.cleaned_data['color']
if color == 'random':
color = random.choice(CHOICES[1:])[0]
return color
def clean_honey_pot(self):
"""This creates a honeypot to get rid of some bots"""
honey_pot = self.cleaned_data['honey_pot']
if honey_pot == '':
return honey_pot
else:
raise forms.ValidationError("Take that bot!") | Zachary-Jackson/Social-Team-Builder | team_builder/profiles/forms.py | forms.py | py | 4,048 | python | en | code | 0 | github-code | 13 |
29008075130 |
from typing import List
from models.metrics import MetricBase
# from models.modeltrainer import ModelTrainerBase
class CallbackBase(object):
def __call__(self, *args, **kwargs):
raise NotImplementedError
def on_train_begin(self, *args, **kwargs):
raise NotImplementedError
def on_epoch_end(self, *args, **kwargs):
raise NotImplementedError
class RecordLossHistoryBase(CallbackBase):
def __init__(self,
loss_filename: str,
list_metrics: List[MetricBase] = None,
is_hist_validation: bool = True
) -> None:
self._loss_filename = loss_filename
self._names_hist_fields = ['loss']
if list_metrics:
self._names_hist_fields += [imetric._name_fun_out for imetric in list_metrics]
if is_hist_validation:
names_hist_fields_new = []
for iname in self._names_hist_fields:
names_hist_fields_new += [iname, 'val_%s' % (iname)]
self._names_hist_fields = names_hist_fields_new
def on_train_begin(self) -> None:
list_names_header = ['/epoch/'] + ['/%s/' % (elem) for elem in self._names_hist_fields]
str_header = ' '.join(list_names_header) + '\n'
with open(self._loss_filename, 'w') as fout:
fout.write(str_header)
def on_epoch_end(self, epoch: int, data_output: List[float]) -> None:
list_data_line = ['%d' % (epoch + 1)] + ['%0.6f' % (elem) for elem in data_output]
str_data_line = ' '.join(list_data_line) + '\n'
with open(self._loss_filename, 'a') as fout:
fout.write(str_data_line)
class EarlyStoppingBase(CallbackBase):
def __init__(self,
delta: float = 0.005,
patience: int = 10
) -> None:
self._threshold = (1.0 - delta)
self._patience = patience
def on_train_begin(self) -> None:
self._best_epoch = 0
self._best_valid_loss = 1.0e+03
self._waiting = -1.0e+03
def on_epoch_end(self, epoch: int, valid_loss: float) -> None:
if (valid_loss < self._threshold * self._best_valid_loss):
self._best_epoch = epoch
self._best_valid_loss = valid_loss
self._waiting = 0
else:
self._waiting += 1
class ModelCheckpointBase(CallbackBase):
def __init__(self,
model_filename: str,
model_trainer,
freq_save_model: int = 1,
type_save_model: str = 'full_model',
update_filename_epoch: bool = False
) -> None:
self._model_filename = model_filename
self._model_trainer = model_trainer
self._freq_save_model = freq_save_model
self._type_save_model = type_save_model
self._update_filename_epoch = update_filename_epoch
super(ModelCheckpointBase, self).__init__()
def on_train_begin(self) -> None:
pass
def on_epoch_end(self, epoch: int) -> None:
if (epoch % self._freq_save_model == 0):
if self._update_filename_epoch:
model_filename_this = self._model_filename % (epoch + 1)
else:
model_filename_this = self._model_filename
if self._type_save_model == 'only_weights':
self._model_trainer.save_model_only_weights(model_filename_this)
elif self._type_save_model == 'full_model':
self._model_trainer.save_model_full(model_filename_this)
| antonioguj/bronchinet | src/models/callbacks.py | callbacks.py | py | 3,569 | python | en | code | 42 | github-code | 13 |
15791373340 | import subprocess
import time
import threading
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient as mqttClient
import logging
import os
import signal
import json
_logger = logging.getLogger(__name__)
def startStreaming():
bash_script = './startStreaming.sh'
global trigger_flag
# Start the bash script
process = subprocess.Popen(['/bin/bash',bash_script], start_new_session=True)
time.sleep(60)
os.killpg(os.getpgid(process.pid), signal.SIGTERM)
process.terminate()
process.wait()
trigger_flag = True
threading.Timer(30.0, reset_trigger_flag).start()
try:
process.communicate(timeout=10)
except subprocess.TimeoutExpired:
process.kill()
def turn_camera_on(value):
global trigger_flag
if value < 10 and not trigger_flag:
startStreaming()
_logger.critical('camera turned on')
def reset_trigger_flag():
global trigger_flag
trigger_flag = False
def on_message(client, userdata, message):
print(f'received message: {message.payload.decode()}')
_logger.critical(f'received message: {message.payload.decode()}')
messageDict = json.loads(message.payload.decode())
turn_camera_on(messageDict['value'])
def mqtt_client(host:str, port:int):
myMqttClient = mqttClient('myClient')
myMqttClient.configureEndpoint(host, port)
myMqttClient.configureCredentials("./readerKeys/rootCA.pem", "./readerKeys/private.pem.key", "./readerKeys/certificate.pem.crt")
myMqttClient.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing
myMqttClient.configureDrainingFrequency(2) # Draining: 2 Hz
myMqttClient.configureConnectDisconnectTimeout(10) # 10 sec
myMqttClient.configureMQTTOperationTimeout(5) # 5 sec
return myMqttClient
if __name__ == '__main__':
trigger_flag = False
myMqttClient = mqtt_client("a3nrtfu6i3fchr-ats.iot.ap-southeast-2.amazonaws.com", 8883)
myMqttClient.connect()
myMqttClient.subscribe('RasPi/data', 1, on_message)
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print('Disconnecting...')
myMqttClient.disconnect()
print('Disconnected') | JuanNavarro-DD/iotLearning | RasPi/turnCameraOn.py | turnCameraOn.py | py | 2,213 | python | en | code | 0 | github-code | 13 |
35196207620 | # Refazendo o desafio de progressão aritmetica com while
n = int(input('Digite um numero para obter sua PA: '))
r = int(input('Qual é a razão ? '))
count = 0
primeiro_termo = n
while True:
n = n + r
count += 1
print(n)
if count == 10:
break | Kaykynog/exercicios_guanabara | exercicios/063.py | 063.py | py | 275 | python | pt | code | 1 | github-code | 13 |
17046788464 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserAgreementSignConfirmModel(object):
def __init__(self):
self._apply_token = None
self._cert_no = None
self._confirm_no = None
@property
def apply_token(self):
return self._apply_token
@apply_token.setter
def apply_token(self, value):
self._apply_token = value
@property
def cert_no(self):
return self._cert_no
@cert_no.setter
def cert_no(self, value):
self._cert_no = value
@property
def confirm_no(self):
return self._confirm_no
@confirm_no.setter
def confirm_no(self, value):
self._confirm_no = value
def to_alipay_dict(self):
params = dict()
if self.apply_token:
if hasattr(self.apply_token, 'to_alipay_dict'):
params['apply_token'] = self.apply_token.to_alipay_dict()
else:
params['apply_token'] = self.apply_token
if self.cert_no:
if hasattr(self.cert_no, 'to_alipay_dict'):
params['cert_no'] = self.cert_no.to_alipay_dict()
else:
params['cert_no'] = self.cert_no
if self.confirm_no:
if hasattr(self.confirm_no, 'to_alipay_dict'):
params['confirm_no'] = self.confirm_no.to_alipay_dict()
else:
params['confirm_no'] = self.confirm_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserAgreementSignConfirmModel()
if 'apply_token' in d:
o.apply_token = d['apply_token']
if 'cert_no' in d:
o.cert_no = d['cert_no']
if 'confirm_no' in d:
o.confirm_no = d['confirm_no']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayUserAgreementSignConfirmModel.py | AlipayUserAgreementSignConfirmModel.py | py | 1,895 | python | en | code | 241 | github-code | 13 |
9070650630 | n = int(input())
dungchi = []
for i in range(n):
x, y = map(int, input().split())
dungchi.append((x, y))
res = [1] * n
for i in range(n):
for j in range(n):
if dungchi[i][0] < dungchi[j][0] and dungchi[i][1] < dungchi[j][1]:
res[i] += 1
for i in res:
print(i, end=' ')
| mins1031/coding-test | baekjoon/DungChi_7568.py | DungChi_7568.py | py | 308 | python | en | code | 0 | github-code | 13 |
4170213077 | # -*- coding: utf-8 -*-
# @Time : 2020-08-02 11:18
# @Author : zcw
# @Site :
# @File : InputNameForm.py
from PyQt5.QtWidgets import QDialog
from PyQt5.QtCore import pyqtSignal, Qt
from .InputNameUI import Ui_InputName
class InputNameForm(QDialog):
input_name = ''
# 构造器,完成UI初始化,摄像头设备初始化等。
def __init__(self, parent=None):
super(InputNameForm, self).__init__(parent, Qt.FramelessWindowHint)
self.ui = Ui_InputName()
self.ui.setupUi(self)
# 设置按钮为默认按钮
self.ui.btnOK.setDefault(True)
# 绑定关闭处理槽函数
self.ui.btnOK.clicked.connect(self.close)
def close(self):
self.input_name = self.ui.edtName.text()
self.done(0)
| wenwen1205/faceRecognition | capturefaces/InputNameForm.py | InputNameForm.py | py | 778 | python | en | code | 1 | github-code | 13 |
16991882789 | # Python program to demonstrate delete operation
# in binary search tree
# A Binary Tree Node
class Node:
# Constructor to create a new node
def __init__(self, data):
self.data = data
self.left = None
self.right = None
# A utility function to do inorder traversal of BST
def inorder(root):
if root is not None:
inorder(root.left)
print(root.data, end=" ")
inorder(root.right)
# A utility function to insert a
# new node with given data in BST
def insert(node, data):
# If the tree is empty, return a new node
if node is None:
return Node(data)
# Otherwise recur down the tree
if data < node.data:
node.left = insert(node.left, data)
else:
node.right = insert(node.right, data)
# return the (unchanged) node pointer
return node
# Given a non-empty binary
# search tree, return the node
# with minimum data value
# found in that tree. Note that the
# entire tree does not need to be searched
def minValueNode(node):
current = node
# loop down to find the leftmost leaf
while (current.left is not None):
current = current.left
return current
# Given a binary search tree and a data, this function
# delete the data and returns the new root
def deleteNode(root, data):
# Base Case
if root is None:
return root
# If the data to be deleted
# is smaller than the root's
# data then it lies in left subtree
if data < root.data:
root.left = deleteNode(root.left, data)
# If the kye to be delete
# is greater than the root's data
# then it lies in right subtree
elif (data > root.data):
root.right = deleteNode(root.right, data)
# If data is same as root's data, then this is the node
# to be deleted
else:
# Node with only one child or no child
if root.left is None:
temp = root.right
root = None
return temp
elif root.right is None:
temp = root.left
root = None
return temp
# Node with two children:
# Get the inorder successor
# (smallest in the right subtree)
temp = minValueNode(root.right)
# Copy the inorder successor's
# content to this node
root.data = temp.data
# Delete the inorder successor
root.right = deleteNode(root.right, temp.data)
return root
def display(node):
lines, *_ = display_aux(node)
for line in lines:
print(line)
def display_aux(node):
"""Returns list of strings, width, height, and horizontal coordinate of the root."""
# No child.
if node.right is None and node.left is None:
line = '%s' % node.data
width = len(line)
height = 1
middle = width // 2
return [line], width, height, middle
# Only left child.
if node.right is None:
lines, n, p, x = display_aux(node.left)
s = '%s' % node.data
u = len(s)
first_line = (x + 1) * ' ' + (n - x - 1) * '_' + s
second_line = x * ' ' + '/' + (n - x - 1 + u) * ' '
shifted_lines = [line + u * ' ' for line in lines]
return [first_line, second_line] + shifted_lines, n + u, p + 2, n + u // 2
# Only right child.
if node.left is None:
lines, n, p, x = display_aux(node.right)
s = '%s' % node.data
u = len(s)
first_line = s + x * '_' + (n - x) * ' '
second_line = (u + x) * ' ' + '\\' + (n - x - 1) * ' '
shifted_lines = [u * ' ' + line for line in lines]
return [first_line, second_line] + shifted_lines, n + u, p + 2, u // 2
# Two children.
left, n, p, x = display_aux(node.left)
right, m, q, y = display_aux(node.right)
s = '%s' % node.data
u = len(s)
first_line = (x + 1) * ' ' + (n - x - 1) * '_' + s + y * '_' + (m - y) * ' '
second_line = x * ' ' + '/' + (n - x - 1 + u + y) * ' ' + '\\' + (m - y - 1) * ' '
if p < q:
left += [n * ' '] * (q - p)
elif q < p:
right += [m * ' '] * (p - q)
zipped_lines = zip(left, right)
lines = [first_line, second_line] + [a + u * ' ' + b for a, b in zipped_lines]
return lines, n + m + u, max(p, q) + 2, n + u // 2
# Driver code
""" Let us create following BST
50
/ \
30 70
/ \ / \
20 40 60 80 """
root = None
root = insert(root, 50)
root = insert(root, 30)
root = insert(root, 20)
root = insert(root, 40)
root = insert(root, 70)
root = insert(root, 60)
root = insert(root, 80)
display(root)
print("Inorder traversal of the given tree")
inorder(root)
print("\nDelete 20")
root = deleteNode(root, 20)
print("Inorder traversal of the modified tree")
inorder(root)
print("\nDelete 30")
root = deleteNode(root, 30)
print("Inorder traversal of the modified tree")
inorder(root)
print("\nDelete 50")
root = deleteNode(root, 50)
print("Inorder traversal of the modified tree")
inorder(root)
# This code is contributed by Nikhil Kumar Singh(nickzuck_007)
| NuthanReddy/Nuthan | DataStructures/MyBinarySearchTree.py | MyBinarySearchTree.py | py | 5,042 | python | en | code | 1 | github-code | 13 |
30199274460 | import gym
from tqdm import tqdm
import numpy as np
import random
env = gym.make('CartPole-v0')
max_episodes = 10000
decay = 0.999
epsilon = 1
q = np.zeros((49, 200, 41, 200, 2))
alpha = 0.1
gamma = 1
pbar = tqdm(range(max_episodes), ascii=" .oO0", bar_format="{l_bar}{bar}|{postfix}")
def format(st):
st[0] = round(st[0], 1)
st[0] *= 10
st[0] += 24
st[1] = round(st[1], 1)
st[1] *= 10
st[1] += 100
st[2] = round(st[2], 1)
st[2] *= 10
st[2] += 20
st[3] = round(st[3], 1)
st[3] *= 10
st[3] += 100
integer = st.astype(int)
return integer
for ep in range(max_episodes):
pbar.update(1)
pbar.set_postfix(Epsilon=str(round(epsilon, 2)))
state = format(env.reset())
epsilon *= decay
if epsilon < 0.1:
epsilon = 0.1
while True:
if random.uniform(0, 1) < epsilon:
action = env.action_space.sample()
else:
action = np.argmax(q[state[0], state[1], state[2], state[3]])
if ep % 1000 == 0:
env.render()
else:
env.close()
new_state, reward, _, _ = env.step(action)
new_state = format(new_state)
if new_state[2] < 0 or new_state[2] > 40:
break
if new_state[0] < 0 or new_state[0] > 48:
break
q[state[0], state[1], state[2], state[3], action] += alpha * (reward + (gamma * (np.max(q[new_state[0], new_state[1], new_state[2], new_state[3]]) - q[state[0], state[1], state[2], state[3], action])))
state = new_state
| iamPres/cart-pole-RL | main.py | main.py | py | 1,559 | python | en | code | 0 | github-code | 13 |
41727877312 | import pymysql
import config
def lambda_handler(event, context):
# db setting
try:
conn = pymysql.connect(
host=config.db_hostname,
user=config.db_username,
password=config.db_password,
db=config.db_name
)
except pymysql.MySQLError as e:
return {
"success": False,
"message": "Database Error"
}
cursor = conn.cursor(pymysql.cursors.DictCursor)
cafe_id = event['params']['path']['cafe-id']
# 이미지 리스트 조회
sql = '\
SELECT image_url \
FROM image \
WHERE `cafe_id` = ' + str(cafe_id)
cursor.execute(sql)
imageList = cursor.fetchall()
if not len(imageList):
return {
"success": False,
"message": "이미지 존재하지 않습니다."
}
data = list(imageList[i]['image_url'] for i in range(len(imageList)))
conn.commit()
conn.close()
return {
"success": True,
"data": data
} | GDG-Summer-Hackathon-Group12/serverless-backend | get_cafe_image_list.py | get_cafe_image_list.py | py | 1,047 | python | en | code | 4 | github-code | 13 |
1797509147 | from pydantic import ValidationError
from pytest import raises
from pathfinder_network.datamodel.string import String
def test_valid_string():
# Test valid strings
s1 = String(__root__="Hello, World!")
assert s1 == "Hello, World!"
s2 = String(__root__="1234")
assert s2 == "1234"
s3 = String(__root__="🐍 is the best")
assert s3 == "🐍 is the best"
def test_empty_string():
empty_string = String(__root__="")
assert empty_string == ""
def test_other_types():
s1 = String(__root__=123)
assert s1 == "123"
s2 = String(__root__=0.0001)
assert s2 == "0.0001"
def test_none_string():
# Test None string
with raises(ValidationError):
String(__root__=None)
| JohnVonNeumann/pathfinder_network | tests/datamodel/test_string.py | test_string.py | py | 731 | python | en | code | 2 | github-code | 13 |
42515334359 | from pwn import *
from ctypes import *
def main():
binary = context.binary = ELF("./chall_17")
p = process("./chall_17")
libc = cdll.LoadLibrary("libc.so.6")
libc.srand(libc.time(None))
p.sendline(str(libc.rand()))
print(p.recv())
main()
| Joel9241/Joelf_CPEG476_Speedruns | chall17.py | chall17.py | py | 245 | python | en | code | 0 | github-code | 13 |
29423029149 | from datetime import datetime
import hashlib
import string
from io import BytesIO
from PIL import Image
from werkzeug.datastructures import FileStorage
from __main__ import db
class StoredImage(db.Model):
__tablename__ = 'stored_images'
id = db.Column(db.Integer, primary_key=True)
md5_hash = db.Column(db.String(32), unique=True)
mime_type = db.Column(db.String())
format = db.Column(db.String())
width = db.Column(db.Integer)
height = db.Column(db.Integer)
data = db.Column(db.LargeBinary)
datetime_created = db.Column(db.DateTime)
num_views = db.Column(db.Integer)
original_id = db.Column(db.Integer, db.ForeignKey('stored_images.id'))
def __init__(self, file=None, datauri=None, original=None):
if file:
# Create an image from a file or an instance of Werkzeug's FileStorage class
if file.__class__ == FileStorage:
self.data = file.stream.read()
else:
self.data = file.getvalue()
self.original_id = None
elif datauri:
# Read the image data from a Base64-encoded stream in the format:
# 'data:image/png;base64,iVBORw0KGgo...'
self.data = datauri.split(',')[1].decode('base64')
self.original_id = None
elif original:
# Create a new image by duplicating one from the database
self.data = original.data
self.original_id = original.id
else:
return None
# Check that the data represents a valid image
try:
tmp = Image.open(BytesIO(self.data))
except IOError:
return None
# The image was opened without errors
self.format = tmp.format
self.mime_type = Image.MIME[tmp.format]
self.width = tmp.width
self.height = tmp.height
self.datetime_created = datetime.utcnow()
self.num_views = 0
self.update_md5()
@classmethod
def from_file(cls, file):
return cls(file=file)
@classmethod
def from_datauri(cls, datauri):
return cls(datauri=datauri)
@classmethod
def from_original(cls, original):
return cls(original=original)
@classmethod
def from_database_md5(cls, md5_hash):
"""Returns the image in the database with the given MD5 hash, or None if it doesn't exist"""
return StoredImage.query.filter_by(md5_hash=md5_hash).first()
def make_square(self):
"""If the image isn't square then crop it and keep only the central square part."""
# Read image in memory
original = Image.open(BytesIO(self.data))
# Do nothing if the image is already square
if self.width == self.height:
return True
# Crop if not square
if self.width > self.height:
box = (
(self.width - self.height)/2,
0,
(self.width + self.height)/2,
self.height
)
elif self.height > self.width:
box = (
0,
(self.height - self.width)/2,
self.width,
(self.height + self.width)/2
)
square_image = original.crop(box)
# Update the data and size values
tmp_stream = BytesIO()
square_image.save(tmp_stream, string.upper(self.format))
self.data = tmp_stream.getvalue()
self.width = square_image.width
self.height = square_image.height
self.update_md5()
def fit_within(self, width=1200, height=1200):
"""Make the image fit within (width, height). Replaces the original data."""
# Read image in memory
original = Image.open(BytesIO(self.data))
# If the image doesn't fit within (witdth, height) then resample it
if original.size[0] > width or original.size[1] > height:
original.thumbnail((width, height), resample=Image.LANCZOS)
# Store the resampled data
resampled_stream = BytesIO()
original.save(resampled_stream, string.upper(self.format))
self.data = resampled_stream.getvalue()
self.width = original.width
self.height = original.height
self.update_md5()
def crop(self, x0=0, y0=0, x1=0, y1=0):
"""Crop the image. Replaces the original data."""
# Read image in memory
original = Image.open(BytesIO(self.data))
# Perform the crop
cropped = original.crop((x0, y0, x1, y1))
# Store the resulting image
tmp_stream = BytesIO()
cropped.save(tmp_stream, string.upper(self.format))
self.data = tmp_stream.getvalue()
self.width = cropped.width
self.height = cropped.height
self.update_md5()
def update_md5(self):
self.md5_hash = hashlib.md5(self.data).hexdigest()
def save_to_db(self):
"""Saves an image to the database only if the same image isn't there already. Returns the ID of the newly saved image, or the ID of the existing one."""
tmp = StoredImage.query.filter_by(md5_hash=self.md5_hash).first()
if tmp is None:
db.session.add(self)
db.session.commit()
return self.id
else:
return tmp.id
| javilm/msx-center | models/StoredImage.py | StoredImage.py | py | 4,565 | python | en | code | 0 | github-code | 13 |
70099253458 | # apriamo il file da esaminare. Con with evitiamo il close alla fine
with open('C:\\Users\\u_ex210831.log', 'r') as reader:
key = '114.119.147.205' # chiave da ricercare
i = 0 # contatore per le corrispondenze
# Legge e stampa l'intero file riga per riga
for line in reader:
if key in line:
i+=1
print(i,' ', key, ' ---> ',line, end='')
if i > 0:
print(i, 'occorrenze di',key)
else:
print('Nessuna corrispondenza trovata!') | FabGianc/Script | ricerca_nel_file.py | ricerca_nel_file.py | py | 507 | python | it | code | 0 | github-code | 13 |
26275357820 | """Database models."""
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from uuid import uuid4
from flask_login import UserMixin
from sqlalchemy.dialects.postgresql import UUID, JSONB
from sqlalchemy.schema import PrimaryKeyConstraint
from sync_calendars.extensions import db
user_cal_association = db.Table('user_calendar',
db.Column('user_id', UUID(as_uuid=True), db.ForeignKey('users.id', ondelete='CASCADE')),
db.Column('calendar_id', UUID(as_uuid=True), db.ForeignKey('calendars.id', ondelete='CASCADE'))
)
@dataclass
class User(UserMixin, db.Model):
"""User account model."""
id: str
email: str
__tablename__ = 'users'
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid4, nullable=False)
name = db.Column(db.String(100), nullable=False)
email = db.Column(db.String(40), unique=True, nullable=False)
created_at = db.Column(db.DateTime, default=datetime.utcnow)
last_login = db.Column(db.DateTime, default=datetime.utcnow)
calendars = db.relationship('Calendar',
secondary=user_cal_association,
back_populates='users')
def __repr__(self):
return '<User {}>'.format(self.username)
@dataclass
class CalendarEnum(str, Enum):
"""Enum defining what calendars app supports"""
O365 = 'o365' # pylint: disable=invalid-name
def __hash__(self):
return hash(self.value)
@dataclass
class Calendar(db.Model):
"""Email account model."""
id: str
email: str
type: str
__tablename__ = 'calendars'
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid4, nullable=False)
type = db.Column(db.Enum(CalendarEnum))
email = db.Column(db.String(40), unique=True, nullable=False)
access_token = db.Column(db.Text, nullable=False)
refresh_token = db.Column(db.Text, nullable=False)
expires_at = db.Column(db.DateTime, nullable=False)
last_update_at = db.Column(db.DateTime, default=datetime.utcnow)
created_at = db.Column(db.DateTime, default=datetime.utcnow)
subscription_id = db.Column(db.String(40), unique=True, nullable=True)
change_subscrition = db.Column(JSONB)
users = db.relationship('User',
secondary=user_cal_association,
back_populates='calendars')
def __repr__(self):
return '<Calendar {}>'.format(self.email)
def to_simple_obj(self):
return {
'id': str(self.id),
'type': str(self.type.value),
'email': self.email
}
@dataclass
class SyncFlow(db.Model):
"""SyncFlow model"""
id: str
source: str
destination: str
user: str
__tablename__ = 'sync_flows'
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid4, nullable=False)
source = db.Column(UUID(as_uuid=True), db.ForeignKey(Calendar.id, ondelete='CASCADE'))
destination = db.Column(UUID(as_uuid=True), db.ForeignKey(Calendar.id, ondelete='CASCADE'))
user = db.Column(UUID(as_uuid=True), db.ForeignKey(User.id, ondelete='CASCADE'))
created_at = db.Column(db.DateTime, default=datetime.utcnow)
def __repr__(self):
return '<SyncFlow {}>'.format(self.id)
class EventMap(db.Model):
"""Model for mapping copied event IDs"""
__tablename__ = 'event_map'
source_cal = db.Column(UUID(as_uuid=True), db.ForeignKey(Calendar.id, ondelete='CASCADE'))
source_event = db.Column(UUID(as_uuid=True), nullable=False)
dest_cal = db.Column(UUID(as_uuid=True), db.ForeignKey(Calendar.id, ondelete='CASCADE'))
dest_event = db.Column(UUID(as_uuid=True), nullable=False)
is_deleted = db.Column(db.Boolean, unique=False, default=False)
__table_args__ = (
PrimaryKeyConstraint(
source_event,
dest_event
),
{}
)
| smurfpandey/sync_calendars | sync_calendars/models.py | models.py | py | 3,805 | python | en | code | 0 | github-code | 13 |
7328507106 | from turtle import back
import pandas as pd
import numpy as np
import os
from sklearn.metrics import accuracy_score, mean_squared_error, r2_score
from sklearn import linear_model
class LR:
def __init__(self,):
self.cwd = os.path.dirname(os.getcwd()) #获取当前文件的绝对路径
self.file_dirname = os.path.dirname(os.path.abspath(__file__))
self.model = linear_model.LinearRegression()
self.dataset_path = ' '
self.test_size = ' '
def train(self, seed=0, data_type='csv'):
np.random.seed(seed)
if data_type == 'csv':
dataset = pd.read_csv(self.dataset_path,sep=',',header=None).values
np.random.shuffle(dataset)
data, label = dataset[:,:-1],dataset[:,-1]
train_index = int((1-self.test_size)*len(dataset))
train_data, train_label = data[:train_index,],label[:train_index]
self.test_set = {
'data': data[train_index:,],
'label': label[train_index:]
}
self.model.fit(train_data,train_label)
def inference(self, mode='cls'):
pred = self.model.predict(self.test_set['data'])
loss = mean_squared_error(self.test_set['label'],pred)
print('Loss: {}'.format(loss))
def load_dataset(self,path,test_size=0.2):
self.dataset_path = path
self.test_size = test_size
| OpenXLab-Edu/OpenBaseLab-Edu | BaseML/LR.py | LR.py | py | 1,380 | python | en | code | 5 | github-code | 13 |
10348295531 | import tensorflow as tf
class BaseLSTMClass:
def __init__(self, units, num_layers,
output_units=None,
drop_prob=None):
if drop_prob is None:
self.drop_prob = tf.placeholder_with_default(1., shape=())
else:
self.drop_prob = drop_prob
self.units = units
self.num_layers = num_layers
self.output_units = output_units
return
def get_initial_state(self, batch_size):
# initial_state has get_shape (batch_size, latent_size), same as psi_mean in the prev code
init_state = [tf.random.truncated_normal([batch_size, self.units],
stddev=0.001)] * self.num_layers
# curr_out = tf.zeros([batch_size, units])
return init_state
def create_lstm_cell(self):
cells1 = []
for _ in range(self.num_layers):
cell1 = tf.contrib.cudnn_rnn.CudnnCompatibleGRUCell(self.units)
cell1 = tf.nn.rnn_cell.DropoutWrapper(cell1,
state_keep_prob=self.drop_prob)
cells1.append(cell1)
cell1 = tf.nn.rnn_cell.MultiRNNCell(cells1)
return cell1
def create_projections(self):
# projection matrices for output
projection_w = tf.get_variable('projection_w', [self.units, self.output_units])
projection_b = tf.get_variable('projection_b', [self.output_units])
return projection_w, projection_b
| rohanmukh/nsg | program_helper/sequence/base_lstm_class.py | base_lstm_class.py | py | 1,519 | python | en | code | 20 | github-code | 13 |
17333187194 | """Module for PublishDiagnostics Provider which handles publishing of diagnostics for AaC Language Server."""
import logging
from pygls.lsp import Diagnostic, DiagnosticSeverity, PublishDiagnosticsParams
from pygls.server import LanguageServer
from pygls.uris import to_fs_path
from typing import Optional
from aac.io.parser import parse
from aac.io.parser._parser_error import ParserError
from aac.plugins.first_party.lsp_server.conversion_helpers import source_location_to_range
from aac.plugins.first_party.lsp_server.providers.lsp_provider import LspProvider
from aac.plugins.validators._validator_finding import ValidatorFinding, FindingSeverity
from aac.validate._validate import _validate_definitions
class PublishDiagnosticsProvider(LspProvider):
"""Handler for Publishing Diagnostics for AaC LSP."""
def handle_request(self, ls: LanguageServer, params: PublishDiagnosticsParams) -> list[Diagnostic]:
"""Handle publishing validation findings as diagnostics."""
self.language_server = ls
diagnostics = self.get_diagnostics(params.uri)
ls.publish_diagnostics(params.uri, diagnostics)
return diagnostics
def get_diagnostics(self, document_uri: str) -> list[Diagnostic]:
"""Add the Diagnostics found on document_uri."""
findings = self.get_findings_for_document(document_uri)
return [self.finding_to_diagnostic(finding) for finding in findings]
def get_findings_for_document(self, document_uri: str) -> list[ValidatorFinding]:
"""
Return all the ValidatorFindings for the specified document.
Args:
self (PublishDiagnosticsProvider): Instance of class.
document_uri (str): Specified document.
Returns:
List of ValidatorFindings for the definitions within the specified document.
"""
findings = []
if self.language_server.workspace:
document = self.language_server.workspace.get_document(document_uri)
if document:
try:
parsed_definitions = parse(document.source, to_fs_path(document_uri))
except ParserError as error:
raise ParserError(error.source, error.errors) from None
else:
result = _validate_definitions(parsed_definitions, self.language_server.language_context, validate_context=False)
findings = result.findings.get_all_findings()
else:
logging.debug(f"Can't provide diagnostics, {document_uri} not found in the workspace.")
else:
logging.debug("Can't provide diagnostics, the workspace doesn't exist in the LSP.")
return findings
def finding_to_diagnostic(self, finding: ValidatorFinding) -> Diagnostic:
"""Convert a ValidatorFinding to an LSP Diagnostic."""
severity = self.finding_severity_to_diagnostic_severity(finding.severity)
return Diagnostic(
range=source_location_to_range(finding.location.location),
severity=severity.value if severity else None,
code=finding.location.validation_name,
source="aac",
message=finding.message,
)
def finding_severity_to_diagnostic_severity(self, finding_severity: FindingSeverity) -> Optional[DiagnosticSeverity]:
"""Return the DiagnosticSeverity that corresponds most closely to the correpsonding FindingSeverity."""
finding_severity_name = finding_severity.name.title()
for severity_name in DiagnosticSeverity._member_names_:
if finding_severity_name in severity_name:
return DiagnosticSeverity[severity_name]
| jondavid-black/AaC | python/src/aac/plugins/first_party/lsp_server/providers/publish_diagnostics_provider.py | publish_diagnostics_provider.py | py | 3,712 | python | en | code | 14 | github-code | 13 |
6143682962 | #-*- coding: utf-8 -*-
from antlr4.error.ErrorListener import *
class MiniJava_ErrorListener(ErrorListener):
'''
An inherited listener class to listen to the syntax errors.
The error triger is defined in the .g4 file.
'''
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
'''
An overwrite of the original method.
See https://www.antlr.org/api/Java/org/antlr/v4/runtime/ANTLRErrorListener.html for more details
recognizer: What parser got the error
offendingSymbol: The offending token in the input token stream
'''
print('line ' + str(line) + ':' + str(column) + '\t' + msg, file=sys.stderr)
self.print_detail(recognizer, offendingSymbol, line, column, msg, e)
def print_detail(self, recognizer, offendingSymbol, line, column, msg, e):
token = recognizer.getCurrentToken()
in_stream = token.getInputStream()
string = str(in_stream)
string = string.split('\n')[line - 1] # get the error line
print(string)
# Using '↑' to show the wrong token
# e.g. int 0number
# ↑
underline = ''
for i in range(column):
if string[i] == '\t':
underline += '\t'
else:
underline += ' '
underline += '↑'
print(underline)
| vahidmohsseni/bscCompilerFa | MiniJavaError_Presenter.py | MiniJavaError_Presenter.py | py | 1,207 | python | en | code | 2 | github-code | 13 |
20919581466 | import sys
import math
import aocd
from icecream import ic
fuel = lambda m: max(math.floor(m/3)-2,0)
def main():
# Read the input
data = aocd.get_data(year=2019, day=1)
modules = [int(line) for line in data.splitlines()]
# Fuel for just the modules
load = sum(map(lambda m: fuel(m), modules))
ic('part a:', load)
aocd.submit(load, year=2019, day=1, part='a')
# Fuel for modules + fuel for fuel
for m in modules:
if fuel(m) > 0:
modules.append(fuel(m))
load = sum(map(fuel,modules))
ic('part b:', load)
aocd.submit(load, year=2019, day=1, part='b')
if __name__ == "__main__":
main() | colematt/advent-code | 2019/day1.py | day1.py | py | 607 | python | en | code | 0 | github-code | 13 |
29861870369 |
import kivy
kivy.require('2.1.0')
from kivy.graphics import Color, Rectangle
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.scrollview import ScrollView
from kivy.uix.button import Button
from kivy.uix.image import Image
from kivy.uix.popup import Popup
from kivy.uix.label import Label
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.textinput import TextInput
# from kivy.uix.screenmanager import Screen, ScreenManager
from modified_widget import *
class Signup_Widget(FloatLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
#write your code from here
offset = 60
with self.canvas:
Color(0.3, 0.4, 0.6, 0.5)
Rectangle(pos=(400, 20), size=(380, 560))
with self.canvas:
Color(0.95, 0.96, 1, 0.95)
Rectangle(pos=(20, 140), size=(380, 380), source="img/signup.png")
self.login_label = Label(text="Sign Up",
pos=(450,offset+40*11+20),
size=(80, 40),
size_hint=(None, None),
font_size='28sp',
color=(41/255, 50/255, 70/255, 1),
font_name="assets/static/Nunito-Bold")
self.user_name = HoverTextInput((1, 1, 1, 1), (239/255, 243/255, 255/255, 1),
text='',
hint_text = 'User Name',
background_color=(239/255, 243/255, 255/255, 1),
pos=(440, offset+40*10),
size=(300, 30),
font_name="assets/static/Nunito-Regular",
font_size=14,
multiline=False,
size_hint=(None, None))
self.first_name = HoverTextInput((1, 1, 1, 1), (239/255, 243/255, 255/255, 1),
text='',
hint_text = 'First Name',
background_color=(239/255, 243/255, 255/255, 1),
pos=(440, offset+40*9),
size=(300, 30),
font_name="assets/static/Nunito-Regular",
font_size=14,
multiline=False,
size_hint=(None, None))
self.last_name = HoverTextInput((1, 1, 1, 1), (239/255, 243/255, 255/255, 1),
text='',
hint_text = 'Last Name',
background_color=(239/255, 243/255, 255/255, 1),
pos=(440, offset+40*8),
size=(300, 30),
font_name="assets/static/Nunito-Regular",
font_size=14,
multiline=False,
size_hint=(None, None))
self.password = HoverTextInput((1, 1, 1, 1), (239/255, 243/255, 255/255, 1),
text='',
password = True,
hint_text = 'Password',
background_color=(239/255, 243/255, 255/255, 1),
pos=(440, offset+40*7),
size=(300, 30),
font_name="assets/static/Nunito-Regular",
font_size=14,
multiline=False,
size_hint=(None, None))
self.password_confirm = HoverTextInput((1, 1, 1, 1), (239/255, 243/255, 255/255, 1),
text='',
password = True,
hint_text = 'Confirm Password',
background_color=(239/255, 243/255, 255/255, 1),
pos=(440, offset+40*6),
size=(300, 30),
font_name="assets/static/Nunito-Regular",
font_size=14,
multiline=False,
size_hint=(None, None))
self.gender = HoverTextInput((1, 1, 1, 1), (239/255, 243/255, 255/255, 1),
text='',
hint_text = 'Gender',
background_color=(239/255, 243/255, 255/255, 1),
pos=(440, offset+40*5),
size=(300, 30),
font_name="assets/static/Nunito-Regular",
font_size=14,
multiline=False,
size_hint=(None, None))
self.date_of_birth = HoverTextInput((1, 1, 1, 1), (239/255, 243/255, 255/255, 1),
text='',
hint_text = 'Date of Birth eg.31-08-2022',
background_color=(239/255, 243/255, 255/255, 1),
pos=(440, offset+40*4),
size=(300, 30),
font_name="assets/static/Nunito-Regular",
font_size=14,
multiline=False,
size_hint=(None, None))
self.mobile = HoverTextInput((1, 1, 1, 1), (239/255, 243/255, 255/255, 1),
text='',
hint_text = 'Mobile Number',
background_color=(239/255, 243/255, 255/255, 1),
pos=(440, offset+40*3),
size=(300, 30),
font_name="assets/static/Nunito-Regular",
font_size=14,
multiline=False,
size_hint=(None, None))
self.email = HoverTextInput((1, 1, 1, 1), (239/255, 243/255, 255/255, 1),
text='',
hint_text = 'Email Address',
background_color=(239/255, 243/255, 255/255, 1),
pos=(440, offset+40*2),
size=(300, 30),
font_name="assets/static/Nunito-Regular",
font_size=14,
multiline=False,
size_hint=(None, None))
self.register_button = HoverButton((41/255, 50/255, 70/255, 0.7), (41/255, 50/255, 70/255, 0.9),
text="REGISTER",
background_color=(41/255, 50/255, 70/255, 0.9),
pos=(440, offset+27),
size=(300, 35),
font_name="assets/static/Nunito-Bold",
font_size='15sp',
color=(1, 1, 1, 1),
size_hint=(None, None))
self.login_button = HoverButton((160/255, 198/255, 255/255, 0.7), (160/255, 198/255, 255/255, 0.9),
text="LOGIN",
background_color=(160/255, 198/255, 255/255, 0.9),
pos=(440, offset-15),
size=(300, 35),
font_name="assets/static/Nunito-Bold",
font_size='15sp',
color=(1, 1, 1, 1),
size_hint=(None, None))
self.agent_customer_label = Label(text="Sign Up as:",
pos=(60,offset-15),
size=(100, 40),
size_hint=(None, None),
font_size='22sp',
color=(41/255, 50/255, 70/255, 1),
font_name="assets/static/Nunito-Bold")
self.signup_as = "Customer"
self.agent_customer_selection = HoverButton((160/255, 198/255, 255/255, 0.7), (160/255, 198/255, 255/255, 0.9),
text="Customer",
background_color=(160/255, 198/255, 255/255, 0.9),
pos=(180, offset-15),
size=(150, 35),
font_name="assets/static/Nunito-Bold",
font_size='15sp',
color=(1, 1, 1, 1),
size_hint=(None, None))
self.agent_customer_selection.bind(on_release=self.agent_customer_selection_callback)
self.add_widget(self.login_label)
self.add_widget(self.user_name)
self.add_widget(self.password)
self.add_widget(self.password_confirm)
self.add_widget(self.first_name)
self.add_widget(self.last_name)
self.add_widget(self.gender)
self.add_widget(self.date_of_birth)
self.add_widget(self.mobile)
self.add_widget(self.email)
self.add_widget(self.register_button)
self.add_widget(self.login_button)
self.add_widget(self.agent_customer_label)
self.add_widget(self.agent_customer_selection)
def agent_customer_selection_callback(self, i):
if self.signup_as == "Customer":
self.signup_as = "Agent"
else:
self.signup_as = "Customer"
i.text = self.signup_as
| Blaiteray/CCMS | UI/signup.py | signup.py | py | 7,768 | python | en | code | 0 | github-code | 13 |
20660483166 | from math import ceil
principal = float(input("Enter the loan principal: "))
print("""
What do you want to calculate?
Enter 'm' to know how many months it will take to pay off the loan.
Enter 'p' to know how much you should pay per month.
""")
selection = input()
if selection == "m":
mthly_pay_amt = int(input("Enter the monthly payment amount: "))
mths_to_zero = round(principal / mthly_pay_amt)
if mths_to_zero == 1:
print("It will take {} month to repay the loan.".format(mths_to_zero))
else:
print("It will take {} months to repay the loan.".format(mths_to_zero))
elif selection == "p":
num_mths = int(input("Enter the number of months: "))
proposed_mthly_pay_amt = ceil(principal / num_mths)
last_payment = int(principal - (num_mths - 1) * proposed_mthly_pay_amt)
if proposed_mthly_pay_amt == last_payment:
print("Your monthly payment = {}.".format(proposed_mthly_pay_amt))
else:
print("Your monthly payment = {} and the last payment = {}.".format(proposed_mthly_pay_amt, last_payment)) | marydCodes/jetbrains_loancalculator | dreamworld.py | dreamworld.py | py | 1,062 | python | en | code | 0 | github-code | 13 |
73643684497 | # Ejercicio 217: Alternar el segundo carácter entre dos palabras de tres letras.
# Solución:
# las los => los las
def intercambiar_caracteres(palabra1, palabra2):
if len(palabra1) == 3 and len(palabra2) == 3:
nueva_palabra1 = palabra1[0] + palabra2[1] + palabra1[2]
nueva_palabra2 = palabra2[0] + palabra1[1] + palabra2[2]
return nueva_palabra1, nueva_palabra2
else:
raise ValueError('Las palabras no son de 3 caracteres.')
print(('las', 'los'))
print(intercambiar_caracteres('las', 'los'))
| Fhernd/PythonEjercicios | Parte001/ex217_alternar_caracteres.py | ex217_alternar_caracteres.py | py | 539 | python | es | code | 126 | github-code | 13 |
28396242593 | import random
import numpy as np
import matplotlib.pyplot as plt
import time
#import json
#import numpy as np
#import random
from keras.models import Sequential
from keras.layers.core import Dense
from keras.optimizers import sgd
#import matplotlib.pyplot as plt
# import matplotlib.animation
# import IPython.display
NUMCOUNTRIES = 15
AVGPOP = 10000
AVGPRODCOST = 0.01
AVGPRODBASE = 5
DEMAND = 3
TARIFF = 0.01
# parameters
epsilon = .05 # probability of exploration (choosing a random action instead of the current best one)
#state_space = NUMCOUNTRIES ** 4 + NUMCOUNTRIES ** 2
state_space = 2 * NUMCOUNTRIES ** 2
action_space = 2
max_memory = 500
hidden_size = 2 * state_space
batch_size = 50
class Country():
def __init__(self):
self.population = int(np.random.normal(AVGPOP, AVGPOP / 7))
self.production_cost = np.random.normal(AVGPRODCOST, AVGPRODCOST / 7)
self.production_base = np.random.normal(AVGPRODBASE, AVGPRODCOST / 7)
self.demand_slope = DEMAND
self.relative_gains = float()
self.tariffs = {self:[0, False]}
self.state = None
self.countries = None
self.index = None
self.new_tariffs = {self:[0, False]}
@staticmethod
def index(i, j, p):
return i * p + j
def tariffs(self):
return sum([i[0] for i in list(self.tariffs.values())])
def initialize(self, countries):
self.index = countries.index(self)
self.countries = countries[:self.index] + countries[self.index + 1:] + [self]
for country in self.countries[:-1]:
self.new_tariffs[country] = [TARIFF, round(random.random())]
self.tariffs = self.new_tariffs
def adjust_state(self, country):
p = NUMCOUNTRIES
state = list(self.state)
state = state[p * country: p * (country + 1)] + state[:p * country] + state[p * (country + 1): p**2] + \
state[p**2 + p * country: p**2 + p * (country + 1)] + state[p**2:p**2 + p * country] + \
state[p**2 + p * (country + 1):]
return np.array(state)
def _evaluatePolicy(self, world_state):
p = NUMCOUNTRIES
ws = list(world_state)
self.state = ws[: p * self.index] + ws[p * (self.index + 1): p**2] + ws[p * self.index: p * (self.index + 1)] \
+ ws[p**2: p**2 + p * self.index] + ws[p**2 + p * (self.index + 1):] + \
ws[p**2 + p * self.index: p**2 + p * (self.index + 1)]
self.state = np.array(self.state)
def resolve_policies(self):
for country in self.countries[:-1]:
if country.new_tariffs[self][1]:
if self.new_tariffs[country][1]:
self.tariffs[country] = [0, True]
else:
self.tariffs[country] = [TARIFF, False]
class Actor(Country):
"""The object for the model that's actually training"""
def _get_reward(self):
"""Normalization is not implemented correctly right now"""
p = NUMCOUNTRIES
productions = self.state[:p**2]
consumptions = [sum(productions.reshape((p,p))[:,i]) for i in range(p)]
prices = [(self.countries[i].population - consumptions[i]) / self.countries[i].demand_slope for i in range(p)]
sales = 0
for market in range(self.countries.index(self) * p, (self.countries.index(self) + 1) * p):
sales += productions[market] * prices[market%p] * (1- self.countries[market%p].tariffs[self][0])
producer_surplus = sales - self.production_cost * sum(productions[self.countries.index(self) * p:\
(self.countries.index(self) + 1) * p])**2 / 2 - \
sum(productions[self.countries.index(self) * p:(self.countries.index(self) + 1) * p]) * self.production_base
consumer_surplus = (self.population / self.demand_slope - prices[self.countries.index(self)]) * \
consumptions[self.countries.index(self)] / 2
max_consumption_surplus = self.population**2 / (2 * self.demand_slope)
y = np.array([ (1 - self.countries[country].tariffs[self][0]) * \
self.countries[country].population / self.countries[country].demand_slope + \
self.production_base for country in range(p)])
X = np.zeros((p,p))
for market in range(p):
for production in range(p):
if production == market:
X[market, production] = 2 * (1 - self.countries[production].tariffs[self][0]) \
/ self.countries[production].demand_slope - self.production_cost
else:
X[market, production] = -1 * self.production_cost
mx_prd = np.linalg.solve(X,y)
max_prod_surplus = sum([mx_prd[i] * prices[i] * (1 - self.countries[i].tariffs[self][0]) for i in range(p)])
# print (max_prod_surplus, self.production_cost * sum(mx_prd)**2 / 2 + self.production_base * sum(mx_prd))
max_prod_surplus -= self.production_cost * sum(mx_prd)**2 / 2 + self.production_base * sum(mx_prd)
print (producer_surplus, max_prod_surplus, producer_surplus/ max_prod_surplus, consumer_surplus / max_consumption_surplus)
return producer_surplus / max_prod_surplus + consumer_surplus / max_consumption_surplus
class Agent(Country):
"""The object for the agents interacting with the model but not training"""
def __init__(self, model):
Country.__init__(self)
self.model = model
def set_policies(self, world_state):
p = NUMCOUNTRIES
self._evaluatePolicy(world_state)
for country in range(len(self.countries[:-1])):
t = np.argmax(self.model.predict(np.expand_dims(self.adjust_state(country), axis = 0))[0])
self.new_tariffs[self.countries[country]] = [TARIFF, t]
if self.new_tariffs[self] != [0, False]:
raise RuntimeError("Reflexive tariff policy is being adjusted")
def update_model(model):
self.model = model
class World():
def __init__(self, starting_model):
self.countries = None
self.state = None
self.reset(starting_model)
def display(self):
for country in range(len(self.countries)):
FTAs = sum([i[1] for i in self.countries[country].tariffs.values()])
plt.bar(country, FTAs)
plt.show(block = False)
def reset(self, starting_model):
self.countries = [Agent(starting_model) for country in range(NUMCOUNTRIES - 1)] + [Actor()]
for i in self.countries:
i.initialize(self.countries)
for i in self.countries:
i.resolve_policies()
self._evaluatePolicy()
for i in self.countries:
i._evaluatePolicy(self.state)
def _evaluatePolicy(self):
p = NUMCOUNTRIES
# y = np.array([self.countries[int(country / p)].demand_slope * self.countries[int(country / p)].tariffs\
# [self.countries[country%p]][0] + self.countries[int(country / p)].population for country in range(p**2)])
y = np.array([ (1 - self.countries[country%p].tariffs[self.countries[int(country/p)]][0]) * \
self.countries[country%p].population / self.countries[country%p].demand_slope + \
self.countries[int(country/p)].production_base for country in range(p**2)])
X = np.zeros((p**2, p**2))
for producer in range(p):
for market in range(p):
for i in range(p):
for j in range(p):
if j == market:
if i == producer:
X[Country.index(producer, market, p), Country.index(i,j,p)] = 2 * \
(1 - self.countries[j].tariffs[self.countries[i]][0]) / self.countries[j].demand_slope \
- self.countries[i].production_cost
else:
X[Country.index(producer, market, p), Country.index(i,j,p)] = \
(1 - self.countries[j].tariffs[self.countries[i]][0]) / self.countries[j].demand_slope
elif i == producer:
X[Country.index(producer, market, p), Country.index(i,j,p)] = -1 * \
self.countries[i].production_cost
# productions = np.maximum(np.linalg.solve(X,y), 0)
productions = np.linalg.solve(X,y).flatten()
tariffs = np.array([[i[0] for i in list(country.tariffs.values())] for country in self.countries]).flatten()
self.state = np.concatenate((productions, tariffs))
for i in self.state:
if i<0:
print (i)
#all the below is old code we used to sequentially update productions after setting negative productions to 0
# print (productions)
#
# order = {i:i for i in range(p**2)}
# new_variables = [i for i in range(p**2)]
# for s in range(p**2):
# if productions[s] < 0:
# new_variables.remove(s)
# for i in range(s + 1, p**2):
# order[i] -= 1
#
# y = []
# X = np.zeros((len(new_variables), len(new_variables)))
# for producer in range(p):
# for market in range(p):
# if Country.index(producer, market, p) in new_variables:
# y.append((self.countries[market].tariffs[self.countries[producer]][0] - 1) * \
# self.countries[market].population / self.countries[market].demand_slope)
#
# for i in range(p):
# for j in range(p):
# if Country.index(i, j, p) in new_variables:
# if j == market:
# if i == producer:
# X[order[Country.index(producer, market, p)], order[Country.index(i,j,p)]] = 2 * \
# (1 - self.countries[j].tariffs[self.countries[i]][0]) / self.countries[j].demand_slope \
# + self.countries[i].production_cost
# else:
# X[order[Country.index(producer, market, p)], order[Country.index(i,j,p)]] = \
# (1 - self.countries[j].tariffs[self.countries[i]][0]) / self.countries[j].demand_slope
# elif i == producer:
# X[order[Country.index(producer, market, p)], order[Country.index(i,j,p)]] = \
# self.countries[i].production_cost
# new_productions = np.linalg.solve(X,y)
# final_productions = np.zeros(p**2)
# for production in range(p**2):
# if production in new_variables:
# final_productions[production] = new_productions[order[production]]
#
# self.state = np.concatenate((final_productions, tariffs))
# kms = 0
# for i in self.state:
# if i < 0:
# kms += 1
#
# print (kms)
def _update_state(self, actions):
self._evaluatePolicy()
self.countries[-1].new_tariffs = {self.countries[-1].countries[i]:[TARIFF, actions[i]] for i in range(len(actions))}
self.countries[-1].new_tariffs[self.countries[-1]] = [0, False]
for country in self.countries[:-1]:
country.set_policies(self.state)
for country in self.countries:
country.resolve_policies()
def _get_reward(self):
return self.countries[-1]._get_reward()
def act(self, actions):
self._update_state(actions)
reward = self.countries[-1]._get_reward()
return reward
class ExperienceReplay(object):
def __init__(self, max_memory=100):
self.max_memory = max_memory
self.memory = list()
def remember(self, states):
'''
Input:
states: [starting_observation, action_taken, reward_received, new_observation]
game_over: boolean
Add the states and game over to the internal memory array. If the array is longer than
self.max_memory, drop the oldest memory
'''
self.memory.append(states)
if len(self.memory) > self.max_memory:
del self.memory[0]
def get_batch(self, model, batch_size=10):
'''
Randomly chooses batch_size memories, possibly repeating.
For each of these memories, updates the models current best guesses about the value of taking a
certain action from the starting state, based on the reward received and the model's current
estimate of how valuable the new state is.
'''
len_memory = len(self.memory)
action_space = model.output_shape[-1] # the number of possible actions
env_dim = len(self.memory[0][0]) # the size of the state space --> @jen can i just make this state_space?
input_size = min(len_memory, state_space) #@jen why isn't this env_dim?
inputs = np.zeros((input_size, env_dim))
targets = np.zeros((input_size, action_space))
for i, idx in enumerate(np.random.randint(0, len_memory, size=input_size)):
starting_observation, action_taken, reward_received, new_observation = self.memory[idx]
# Set the input to the state that was observed in the game before an action was taken
inputs[i:i+1] = starting_observation
# Start with the model's current best guesses about the value of taking each action from this state
targets[i] = model.predict(starting_observation.reshape((1,state_space)))[0] #honestly i have no clue why i
#have to reshape but it works now
targets[i, action_taken] = reward_received
return inputs, targets
def build_model():
'''
Returns three initialized objects: the model, the environment, and the replay.
'''
model = Sequential()
model.add(Dense(hidden_size, input_shape=(state_space,), activation='relu')) #@jen is that comma supposed to be there?
model.add(Dense(hidden_size, activation='relu'))
model.add(Dense(action_space))
model.compile(sgd(lr=.04, clipvalue = 3.0), "mse")
agent_model = Sequential()
agent_model.add(Dense(hidden_size, input_shape=(state_space,), activation='relu')) #@jen is that comma supposed to be there?
agent_model.add(Dense(hidden_size, activation='relu'))
agent_model.add(Dense(action_space))
agent_model.compile(sgd(lr=.04, clipvalue = 3.0), "mse")
# Define environment/game
env = World(agent_model) #JEN: I'm not sure if I can actually just give the untrained model as a starting model and it
#will correctly act as functionally random...
#--> make this a copy
# Initialize experience replay object
exp_replay = ExperienceReplay(max_memory=max_memory)
return model, agent_model, env, exp_replay
def train_model(model, agent_model, env, exp_replay, num_episodes):
'''
Inputs:
model, env, and exp_replay objects as returned by build_model
num_episodes: integer, the number of episodes that should be rolled out for training
'''
for episode in range(1, num_episodes + 1): #I've changed this from Jen's basket game such that countries go through a set
#number of rounds of setting trade policies before the world is reset
if episode%100 == 0:
agent_model.set_weights(model.get_weights())
exp_replay.memory = list()
loss = 0.
env.reset(agent_model)
for i in range(30):
# get next action
actions = []
starting_observations = [env.countries[-1].adjust_state(country) for country in range(NUMCOUNTRIES - 1)]
for country in range(NUMCOUNTRIES - 1):
if np.random.rand() <= epsilon:
# epsilon of the time, we just choose randomly
actions.append(np.random.randint(2))
else:
# find which action the model currently thinks is best from this state
# print (np.expand_dims(starting_observation, axis = 0).shape, len(starting_observation))
q = model.predict(np.expand_dims(starting_observations[country], axis = 0))
actions.append(np.argmax(q[0]))
# apply action, get rewards and new state
reward = env.act(actions)
# store experience
for country in range(NUMCOUNTRIES - 1):
exp_replay.remember([starting_observations[country], actions[country], \
reward, env.countries[-1].adjust_state(country)])
# get data updated based on the stored experiences
inputs, targets = exp_replay.get_batch(model, batch_size=batch_size)
# train model on the updated data
loss += model.train_on_batch(inputs, targets) #JENN why isn't memory being cleared after every train_model?
# Print update from this episode
print("Episode {:04d}/{:04d} | Loss {:.4f}".format(episode, num_episodes-1, loss))
model, agent_model, env, exp_replay = build_model()
train_model(model, agent_model, env, exp_replay, num_episodes=1000)
| alpaco42/EconThesisSeminar | EconAgentModel_v3_SingleCountryDecisions.py | EconAgentModel_v3_SingleCountryDecisions.py | py | 17,447 | python | en | code | 0 | github-code | 13 |
74880179538 | import os
import argparse
def delete_simulation_output():
"""
This function is called by a forward model in ERT, deleting unnecessary
simulation output files.
Returns:
Nothing
"""
parser = argparse.ArgumentParser(prog="Delete simulation output.")
parser.add_argument(
"ecl_base", type=str, help="Base name of the simulation DATA file"
)
args = parser.parse_args()
for suffix in ["EGRID", "INIT", "UNRST", "LOG", "PRT"]:
if os.path.exists(f"{args.ecl_base}.{suffix}"):
os.remove(f"{args.ecl_base}.{suffix}")
| equinor/flownet | src/flownet/ert/forward_models/_delete_simulation_output.py | _delete_simulation_output.py | py | 590 | python | en | code | 57 | github-code | 13 |
20022522863 | import datetime
import functools
import gc
import hashlib
import sys
import time
import threading
def blah(func):
@functools.wraps(func)
def inner(*args, **kwargs):
print("work it")
return func(*args, **kwargs)
return inner
import wsgo
@wsgo.cron(-2, -1, -1, -1, -1)
@blah
def every_two_minutes():
print("hey")
@wsgo.timer(4)
def yep():
print("sometimes")
def application(env, start_response):
#time.sleep(0.01)
# def func():
# print("Thread starting!")
# time.sleep(2)
# print("Thread finishing!")
# threading.Thread(target=func).start()
h = hashlib.md5()
n = 0
while True:
to_read = 100000
data = env['wsgi.input'].read(to_read)
h.update(data)
#if n==0:
# print(data[:1000])
n += len(data)
if len(data)<to_read:
break
#print(n, h.hexdigest())
#env['wsgi.errors'].write('reporting an error!\n')
#env['wsgi.errors'].flush()
#gc.collect()
#print(sys._debugmallocstats())
start_response('200 OK', [
#('Content-Type','text/html'),
('SomeHeader', 'yeah'),
('X-sendfile', 'go.mod'),
])
return [("The time is %s!"%(datetime.datetime.now())).encode('utf-8')]
run_at = datetime.datetime.now() - datetime.timedelta(seconds=60)
@wsgo.cron(run_at.minute, run_at.hour, -1, -1, 0)
@blah
def cron_test():
print("cron!")
data = {'hi':'there'}
| jonny5532/wsgo | wsgi.py | wsgi.py | py | 1,461 | python | en | code | 2 | github-code | 13 |
21880685991 | """
pyt_binary_classification.py: binary classification of 2D data
@author: Manish Bhobe
My experiments with Python, Machine Learning & Deep Learning.
This code is meant for education purposes only & is not intended for commercial/production use!
Use at your own risk!! I am not responsible if your CPU or GPU gets fried :D
"""
import warnings
warnings.filterwarnings('ignore')
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.utils import shuffle
from imblearn.over_sampling import SMOTE
# tweaks for libraries
np.set_printoptions(precision = 6, linewidth = 1024, suppress = True)
plt.style.use('seaborn')
sns.set(style = 'whitegrid', font_scale = 1.1, palette = 'muted')
# Pytorch imports
import torch
print('Using Pytorch version: ', torch.__version__)
import torch.nn as nn
import torchmetrics
from torchmetrics.classification import (
BinaryAccuracy, BinaryF1Score,
BinaryAUROC
)
print(f"Using torchmetrics: {torchmetrics.__version__}")
# My helper functions for training/evaluating etc.
import torch_training_toolkit as t3
SEED = t3.seed_all()
NUM_EPOCHS = 25
BATCH_SIZE = 1024
LR = 0.01
DATA_FILE = os.path.join('.', 'csv_files', 'weatherAUS.csv')
print(f"Data file: {DATA_FILE}")
MODEL_SAVE_PATH = os.path.join('.', 'model_states', 'weather_model.pt')
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# ---------------------------------------------------------------------------
# load data, select fields & apply scaling
# ---------------------------------------------------------------------------
def get_data(
test_split = 0.20, shuffle_it = True, balance = False,
sampling_strategy = 0.85,
debug = False
):
from imblearn.over_sampling import SMOTE
df = pd.read_csv(DATA_FILE)
if shuffle_it:
df = shuffle(df)
cols = ['Rainfall', 'Humidity3pm', 'Pressure9am', 'RainToday',
'RainTomorrow']
df = df[cols]
# convert categorical cols - RainToday & RainTomorrow to numeric
df['RainToday'].replace({"No": 0, "Yes": 1}, inplace = True)
df['RainTomorrow'].replace({"No": 0, "Yes": 1}, inplace = True)
# drop all rows where any cols == Null
df = df.dropna(how = 'any')
# display plot of target
sns.countplot(data = df, x = df.RainTomorrow)
plt.title("RainTomorrow: existing counts")
plt.show()
X = df.drop(['RainTomorrow'], axis = 1).values
y = df['RainTomorrow'].values
if debug:
print(
f"{'Before balancing ' if balance else ''} X.shape = {X.shape}, "
f"y.shape = {y.shape}, y-count = {np.bincount(y)}"
)
if balance:
ros = SMOTE(sampling_strategy = sampling_strategy, random_state = SEED)
X, y = ros.fit_resample(X, y)
if debug:
print(
f"Resampled -> X.shape = {X.shape}, y.shape = {y.shape}, "
f"y-count = {np.bincount(y)}"
)
# display plot of target
df2 = pd.DataFrame(
X, columns = ['Rainfall', 'Humidity3pm', 'Pressure9am', 'RainToday']
)
df2['RainTomorrow'] = y
sns.countplot(data = df2, x = df2.RainTomorrow)
plt.title("RainTomorrow: after re-balancing")
plt.show()
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size = test_split, random_state = SEED)
if debug:
print(
f"Split data -> X_train.shape = {X_train.shape}, y_train.shape = {y_train.shape}, "
f"X_test.shape = {X_test.shape}, y_test.shape = {y_test.shape}"
)
ss = StandardScaler()
X_train = ss.fit_transform(X_train)
X_test = ss.transform(X_test)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
# y_train = np.expand_dims(y_train, axis=1)
# y_test = np.expand_dims(y_test, axis=1)
# NOTE: BCELoss() expects labels to be floats - why???
# y_train = y_train.astype(np.float32)
# y_test = y_test.astype(np.float32)
y_train = y_train[:, np.newaxis]
y_test = y_test[:, np.newaxis]
return (X_train, y_train), (X_test, y_test)
class WeatherDataset(torch.utils.data.Dataset):
def __init__(
self, data_file_path, shuffle_it = True, balance = True,
sampling_strategy = 0.85, seed = SEED
):
assert os.path.exists(data_file_path), \
f"FATAL: {data_file_path} - file does not exist!"
df = pd.read_csv(data_file_path)
if shuffle_it:
df = shuffle(df)
cols = ['Rainfall', 'Humidity3pm', 'Pressure9am', 'RainToday',
'RainTomorrow']
df = df[cols]
# convert categorical cols - RainToday & RainTomorrow to numeric
df['RainToday'].replace({"No": 0, "Yes": 1}, inplace = True)
df['RainTomorrow'].replace({"No": 0, "Yes": 1}, inplace = True)
# drop all rows where any cols == Null
df = df.dropna(how = 'any')
# assign X & y
self.X = df.drop(['RainTomorrow'], axis = 1).values
self.y = df['RainTomorrow'].values
if balance:
ros = SMOTE(
sampling_strategy = sampling_strategy, random_state = seed
)
self.X, self.y = ros.fit_resample(self.X, self.y)
ss = StandardScaler()
self.X = ss.fit_transform(self.X)
self.X = torch.tensor(self.X, dtype = torch.float32)
self.y = torch.tensor(self.y, dtype = torch.float32).reshape(-1, 1)
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
features = self.X[idx, :]
label = self.y[idx, :]
return features, label
class Net(nn.Module):
def __init__(self, num_features):
super(Net, self).__init__()
self.net = nn.Sequential(
t3.Linear(num_features, 16),
nn.ReLU(),
# t3.Linear(32, 16),
# nn.ReLU(),
t3.Linear(16, 8),
nn.ReLU(),
t3.Linear(8, 1),
nn.Sigmoid()
)
def forward(self, x):
return self.net(x)
DO_TRAINING = True
DO_EVAL = True
DO_PREDICTION = True
import pickle, sys
def main():
# # load & preprocess data
# (X_train, y_train), (X_test, y_test) = get_data(balance = True, sampling_strategy = 0.90,
# debug = True)
# print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
dataset = WeatherDataset(DATA_FILE)
print(f"Loaded {len(dataset)} records", flush = True)
# set aside 10% as test dataset
train_dataset, test_dataset = t3.split_dataset(dataset, split_perc = 0.1)
print(
f"train_dataset: {len(train_dataset)} recs, test_dataset: {len(test_dataset)} recs"
)
metrics_map = {
"acc": BinaryAccuracy(),
"f1": BinaryF1Score(),
"roc_auc": BinaryAUROC(thresholds = None)
}
loss_fn = nn.BCELoss()
trainer = t3.Trainer(
loss_fn = loss_fn, device = DEVICE, metrics_map = metrics_map,
epochs = NUM_EPOCHS, batch_size = BATCH_SIZE
)
if DO_TRAINING:
# build model
model = Net(4)
print(model)
optimizer = torch.optim.Adam(model.parameters(), lr = LR)
hist = trainer.fit(model, optimizer, train_dataset, validation_split = 0.2)
hist_pkl = os.path.join(os.path.dirname(__file__), 'model_states', "history2.pkl")
with open(hist_pkl, "wb") as f:
pickle.dump(hist, f)
sys.exit(-1)
hist.plot_metrics(title = "Model Performance", fig_size = (16, 8))
t3.save_model(model, MODEL_SAVE_PATH)
del model
if DO_EVAL:
model = Net(4)
model = t3.load_model(model, MODEL_SAVE_PATH)
print(model)
# evaluate performance
print('Evaluating performance...')
print('Training dataset')
# evaluate training dataset (just re-confirming similar results as during training)
metrics = trainer.evaluate(model, train_dataset)
print(f"Training metrics: {metrics}")
print("Testing dataset")
# evaluate test dataset (for a good model, these should not be much different from training)
metrics = trainer.evaluate(model, test_dataset)
print(f"Testing metrics: {metrics}")
del model
if DO_PREDICTION:
model = Net(4)
model = t3.load_model(model, MODEL_SAVE_PATH)
print(model)
preds, actuals = trainer.predict_dataset(model, test_dataset)
preds = np.round(preds).ravel()
actuals = actuals.ravel()
incorrect_counts = (preds != actuals).sum()
print(f"We got {incorrect_counts} of {len(actuals)} predictions wrong!")
print(classification_report(actuals, preds))
t3.plot_confusion_matrix(
confusion_matrix(actuals, preds),
class_names = ["No Rain", "Rain"],
title = "Rain prediction for tomorrow"
)
del model
if __name__ == '__main__':
main()
# Results:
# Training (1000 epochs)
# - loss: 0.377 acc: 84.0%
# Training (1000 epochs)
# - loss: 0.377 acc: 84.1%
# Conclusion: No overfitting, but accuracy is low. Possibly due to very imbalanced data
#
# Training (1000 epochs) with re-sampling
# - loss: 0.377 acc: 84.0%
# Training (1000 epochs)
# - loss: 0.377 acc: 84.1%
# Conclusion: No overfitting, but accuracy is low. Possibly due to very imbalanced data
| mjbhobe/dl-pytorch | pyt_binary_classification.py | pyt_binary_classification.py | py | 9,630 | python | en | code | 8 | github-code | 13 |
17078826364 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.JsonOpenApiVO import JsonOpenApiVO
class AlipayBossFncGfcenterBanklogtransferCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayBossFncGfcenterBanklogtransferCreateResponse, self).__init__()
self._result_set = None
@property
def result_set(self):
return self._result_set
@result_set.setter
def result_set(self, value):
if isinstance(value, JsonOpenApiVO):
self._result_set = value
else:
self._result_set = JsonOpenApiVO.from_alipay_dict(value)
def parse_response_content(self, response_content):
response = super(AlipayBossFncGfcenterBanklogtransferCreateResponse, self).parse_response_content(response_content)
if 'result_set' in response:
self.result_set = response['result_set']
| alipay/alipay-sdk-python-all | alipay/aop/api/response/AlipayBossFncGfcenterBanklogtransferCreateResponse.py | AlipayBossFncGfcenterBanklogtransferCreateResponse.py | py | 969 | python | en | code | 241 | github-code | 13 |
72773726099 | import javascript.proxy
from javascript import require
import logging
import bot_functions
import config
Vec3 = require('vec3')
mcData = require('minecraft-data')(config.settings['minecraft_version'])
logger = logging.getLogger('bot_tasks')
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename=config.settings['bot_log_path'], encoding='utf-8', mode='a')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
def mine_block(bot: javascript.proxy.Proxy, bot_tasks: list, block_name):
try:
# get block type by name
block_type = bot_functions.get_block_type_by_name(block_name)
except bot_functions.NoBlockTypeForName:
bot.chat(f"don't know block_name {block_name}")
return
# get the closest location of block by type
block_location = bot_functions.get_closest_block_location(bot.entity.position, block_type)
logger.debug(f"mine_block block_location={block_location}")
bot_tasks.extend([{
"function": bot_functions.go_to_location,
"arguments": {"location": block_location, "distance_from": 1}
}, {
"function": bot_functions.dig_block_by_location,
"arguments": {"block_location": block_location}
}])
# path to block
bot_functions.go_to_location(bot=bot, location=block_location, distance_from=1)
def display_inventory(bot: javascript.proxy.Proxy, bot_tasks: list):
inventory = bot_functions.get_inventory_items(bot=bot)
named_inventory = {}
for item_id, count in inventory.items():
if item_id in mcData.items:
named_inventory[f"{mcData.items[item_id].name} {item_id}"] = count
bot.chat(f"{named_inventory}")
def craft_item(bot: javascript.proxy.Proxy, bot_tasks: list, item_name: str):
try:
# get item id
item_id = bot_functions.get_item_id_by_name(item_name)
except bot_functions.NoItemIdForName:
bot.chat(f"don't know item_name {item_name}")
return
# get inventory items
inventory_items = bot_functions.get_inventory_items(bot)
# check recipies for missing items
missing_recipe_items = bot_functions.get_recipe_missing_items(item_id=item_id, inventory_items=inventory_items)
logger.debug(f"missing items for {item_name}: {missing_recipe_items}")
# get id for crafting table
table_type = bot_functions.get_block_type_by_name("crafting_table")
table_location = bot_functions.get_closest_block_location(
origin_location=bot.entity.position, block_type=table_type)
valid_recipe_idx = None
for recipe_idx, recipe_map in enumerate(missing_recipe_items):
if len(recipe_map['missing'].keys()) == 0: # if there are no missing items
if (not recipe_map['requires_table']) or (recipe_map['requires_table'] and table_location is not None):
valid_recipe_idx = recipe_idx
break
if valid_recipe_idx is not None:
bot_tasks.extend([{
"function": bot_functions.craft_item_with_recipe,
"arguments": {
"item_id": item_id,
"recipe_idx": valid_recipe_idx,
"count": 1,
"table_location": table_location
}}])
else:
bot.chat(f"no valid recipie found for {item_name}")
| rkaganda/minecraft_explore_bot | bot_tasks.py | bot_tasks.py | py | 3,345 | python | en | code | 0 | github-code | 13 |
20325435785 | import json
from ibmcloudant.cloudant_v1 import CloudantV1
# 1. Create a client with `CLOUDANT` default service name ============
client = CloudantV1.new_instance()
# 2. Get server information ===========================================
server_information = client.get_server_information(
).get_result()
print(f'Server Version: {server_information["version"]}')
# 3. Get database information for "orders" ==========================
db_name = "dealerships"
db_information = client.get_database_information(
db=db_name
).get_result()
# 4. Show document count in database ==================================
document_count = db_information["doc_count"]
print(f'Document count in \"{db_information["db_name"]}\" '
f'database is {document_count}.')
# 5. Get "example" document out of the database by document id ============
document_example = client.get_document(
db=db_name
).get_result()
print(f'Document retrieved from database:\n'
f'{json.dumps(document_example, indent=2)}') | trls888s/django-IBM-captson | cloudant_test.py | cloudant_test.py | py | 1,004 | python | en | code | 0 | github-code | 13 |
35869353302 | from PIL import Image, ImageEnhance
from cloudio import ImageIO, BucketConfig
import logging, traceback, sys
import json
img_io = ImageIO()
enhancers = {
'sharpness': ImageEnhance.Sharpness,
'contrast': ImageEnhance.Contrast,
'brightness': ImageEnhance.Brightness,
}
def generate_preview_images(img, name):
w, h = img.width // 48, img.height // 48
if w < 48:
w = 48
h = img.height * w // img.width
img = img.resize((w, h))
results = []
for ename, e in enhancers.items():
factor = 0.2
while factor <= 1.8:
str_factor = '%.2f' % factor
preview_name = f'{ename}_{str_factor}_{name}'
img_io.write_image(e(img).enhance(factor), preview_name, bucket=BucketConfig.PREVIEW_BUCKET)
results.append({
'name': preview_name,
ename: str_factor
})
factor += 0.2
return results
def log_exception():
exc_type, exc_value, exc_traceback = sys.exc_info()
logging.error(''.join(traceback.format_exception(exc_type, exc_value, exc_traceback)))
def cors_response(s):
return {
'statusCode': 200,
'headers': {
'Access-Control-Allow-Headers': 'Content-Type,Access-Control-Allow-Origin',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'OPTIONS,POST,GET'
},
'body': s,
}
def enhance_image(event, context):
if event['requestContext']['http']['method'] == 'OPTIONS':
return cors_response('')
try:
body = json.loads(event['body'])
name = body['name']
img = img_io.read_image(name)
modified = False
for ename, e in enhancers.items():
if ename in body:
img = e(img).enhance(float(body[ename]))
modified = True
if modified:
img_io.write_image(img, name, bucket=BucketConfig.ENHANCE_BUCKET)
return cors_response('done')
else:
return cors_response(json.dumps(generate_preview_images(img, name)))
except Exception as e:
log_exception()
raise e
| WayneGGG/ECE1779-A3 | enhancer/enhancement.py | enhancement.py | py | 2,193 | python | en | code | 0 | github-code | 13 |
71251535697 | from typing import List
from scripts.debugCommands.command import Command
from scripts.debugCommands.utils import add_output_line_to_log
from scripts.game_structure.game_essentials import game
from scripts.cat.cats import Cat
class addCatCommand(Command):
name = "add"
description = "Add a cat"
aliases = ["a"]
def callback(self, args: List[str]):
cat = Cat()
game.clan.add_cat(cat)
add_output_line_to_log(f"Added {cat.name} with ID {cat.ID}")
class removeCatCommand(Command):
name = "remove"
description = "Remove a cat"
aliases = ["r"]
usage = "<cat name|id>"
def callback(self, args: List[str]):
if len(args) == 0:
add_output_line_to_log("Please specify a cat name or ID")
return
for cat in Cat.all_cats_list:
if str(cat.name).lower() == args[0].lower() or cat.ID == args[0]:
game.clan.remove_cat(cat.ID)
add_output_line_to_log(f"Removed {cat.name} with ID {cat.ID}")
return
add_output_line_to_log(f"Could not find cat with name or ID {args[0]}")
class listCatsCommand(Command):
name = "list"
description = "List all cats"
aliases = ["l"]
def callback(self, args: List[str]):
for cat in Cat.all_cats_list:
add_output_line_to_log(f"{cat.ID} - {cat.name}, {cat.status}, {cat.moons} moons old")
class ageCatsCommand(Command):
name = "age"
description = "Age a cat"
aliases = ["a"]
usage = "<cat name|id> [number]"
def callback(self, args: List[str]):
if len(args) == 0:
add_output_line_to_log("Please specify a cat name or ID")
return
for cat in Cat.all_cats_list:
if str(cat.name).lower() == args[0].lower() or cat.ID == args[0]:
if len(args) == 1:
add_output_line_to_log(f"{cat.name} is {cat.moons} moons old")
return
else:
if args[1].startswith("+"):
cat.moons += int(args[1][1:])
elif args[1].startswith("-"):
cat.moons -= int(args[1][1:])
else:
cat.moons = int(args[1])
add_output_line_to_log(f"{cat.name} is now {cat.moons} moons old")
class CatsCommand(Command):
name = "cats"
description = "Manage Cats"
aliases = ["cat"]
subCommands = [
addCatCommand(),
removeCatCommand(),
listCatsCommand(),
ageCatsCommand()
]
def callback(self, args: List[str]):
add_output_line_to_log("Please specify a subcommand") | Thlumyn/clangen | scripts/debugCommands/cat.py | cat.py | py | 2,691 | python | en | code | 135 | github-code | 13 |
3746376478 | #!/usr/bin/env python
from __future__ import print_function
from optparse import OptionParser
import re
LANGUAGES_HPP_TEMPLATE = """\
#pragma once
#include <array>
#include <string>
// This file is autogenerated while exporting sounds.csv from the google table.
// It contains the list of languages which can be used by TTS.
// It shall be included to Android(jni) and iOS part to get the languages list.
namespace routing
{{
namespace turns
{{
namespace sound
{{
std::array<std::string, {lang_list_size}> const kLanguageList =
{{{{
{lang_list}
}}}};
}} // namespace sound
}} // namespace turns
}} // namespace routing
"""
def parse_args():
opt_parser = OptionParser(
description="Creates a language.hpp out of the sound.txt file.",
usage="python %prog <path_to_sound.txt> <path_to_languages.hpp>",
version="%prog 1.0"
)
(options, args) = opt_parser.parse_args()
if len(args) != 2:
opt_parser.error("Wrong number of arguments.")
return args
def read_languages(strings_name):
langs = set()
RE_LANG = re.compile(r'^ *([\w-]+) *=')
with open(strings_name, "r") as langs_file:
for line in langs_file:
m = RE_LANG.search(line)
if m:
langs.add(m.group(1))
return langs
def make_languages_hpp(langs, hpp_name):
print ("Creating {}".format(hpp_name))
lang_str = ",\n".join([" \"{}\"".format(language) for language in sorted(langs)])
with open(hpp_name, "w") as hpp_file:
hpp_file.write(LANGUAGES_HPP_TEMPLATE.format(lang_list_size=len(langs), lang_list=lang_str))
def run():
strings_name, langs_hpp_name = parse_args()
langs = read_languages(strings_name)
make_languages_hpp(langs, langs_hpp_name)
if __name__ == "__main__":
run()
| organicmaps/organicmaps | tools/python/tts_languages.py | tts_languages.py | py | 1,793 | python | en | code | 7,565 | github-code | 13 |
14570753668 | """
Created on Fri Mar 7 18:42:21 2014
@author: fritz
"""
import numpy as np
#import matplotlib.pyplot as plt
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
window = 0
width, height = 800, 600
def draw_stuff():
data = [
[0, 0],
[90, 0],
[0, 90],
[100, 100],
[100, 10],
[10, 100]]
glBegin(GL_TRIANGLES)
for d in data:
glVertex2f(d[0], d[1])
glEnd()
def refresh2d(width, height):
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0.0, width, 0.0, height, 0.0, 1.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def draw():
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
refresh2d(width, height)
glColor3f(0.0, 0.0, 1.0)
draw_stuff()
glutSwapBuffers()
def main():
glutInit()
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH)
glutInitWindowSize(width, height)
glutInitWindowPosition(100, 100)
window = glutCreateWindow("first person")
glutDisplayFunc(draw)
glutIdleFunc(draw)
glutMainLoop()
if __name__ == '__main__':
main() | fgroes/pyFirstPerson | main.py | main.py | py | 1,198 | python | en | code | 0 | github-code | 13 |
26387294210 | #!/usr/bin/env python
# coding: utf-8
import time
import os
import copy
import numpy as np
import pandas as pd
from PIL import Image, ImageChops
#from tqdm import tqdm
from matplotlib import pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torch.nn.functional as F
import torchvision.utils as vutils
from torch.autograd import Variable
from torchvision import datasets, models, transforms
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
from torch.utils.data import WeightedRandomSampler
from efficientnet_pytorch import EfficientNet
from conf_classifier_funcs import process_csv, CustomDatasetFromImages
from conf_classifier_funcs import Generator, Discriminator, set_parameter_requires_grad, initialize_model
# Getting data
df = process_csv(path = 'data/ISIC_2019_Training_GroundTruth.csv',
strat_train_idx_frac=0.94,
seed=42,
ood_classes=[])
df.groupby(['class_label']).agg({'image': 'count'})
df.groupby(['class_label','train_idx']).agg({'image': 'count'}) .groupby(level=0).apply(lambda x: 100 * x / float(x.sum()))
# Data transforms / loaders
data_dir = "data/ISIC_2019_Training_Input/"
# Number of classes in the dataset
num_classes = len(np.unique(df.class_label)) + 1
# Number of epochs to train for
epochs = 100
input_size = 128
batch_size = 16
data_transforms = {
'train': transforms.Compose([
transforms.Resize(input_size),
transforms.CenterCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(input_size),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
datasets_dict= {'train': CustomDatasetFromImages(data_info = df[df.train_idx==1.],
num_classes = None,
img_folder_path = data_dir,
transform = data_transforms['train']),
'val': CustomDatasetFromImages(data_info = df[df.train_idx==0.],
num_classes = None,
img_folder_path = data_dir,
transform = data_transforms['val'])
}
weights = 1 / np.unique(datasets_dict['train'].data_info['class_label'], return_counts=True)[1]
samples_weight = torch.from_numpy(np.array([weights[c] for c in datasets_dict['train'].data_info['class_label']]))
weighted_sampler = WeightedRandomSampler(samples_weight, len(samples_weight))
if weighted_sampler is not None:
dataloaders_dict = {'train': DataLoader(datasets_dict['train'], batch_size=batch_size,
num_workers=2, sampler=weighted_sampler),
'val': DataLoader(datasets_dict['val'], batch_size=batch_size,
num_workers=2),
'test': DataLoader(datasets_dict['val'], batch_size=1,
num_workers=2)}
else:
dataloaders_dict = {'train': DataLoader(datasets_dict['train'], batch_size=batch_size,
num_workers=2, shuffle=True),
'val': DataLoader(datasets_dict['val'], batch_size=batch_size,
num_workers=2),
'test': DataLoader(datasets_dict['val'], batch_size=1,
num_workers=2)}
# Model parameters
# Models to choose from [resnet, alexnet, vgg, squeezenet, densenet, inception]
model_name = "efficientnet-b0"
# Flag for feature extracting. When False, we finetune the whole model,
# when True we only update the reshaped layer params
feature_extract = False
# Detect if we have a GPU available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Initialize the model for this run
model_ft, input_size = initialize_model(model_name,
num_classes,
feature_extract,
input_size=input_size,
use_pretrained=True)
# Send the model to GPU
model_ft = model_ft.to(device)
# Gather the parameters to be optimized/updated in this run. If we are
# finetuning we will be updating all parameters. However, if we are
# doing feature extract method, we will only update the parameters
# that we have just initialized, i.e. the parameters with requires_grad
# is True.
params_to_update = model_ft.parameters()
print("Params to learn:")
if feature_extract:
params_to_update = []
for name,param in model_ft.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print("Number of parameters that are being learned:", sum(p.numel() for p in model_ft.parameters() if p.requires_grad))
# GAN parameters
nz = 100
ngf = input_size
ndf = int(ngf/4)
lr = 3e-4
wd = 0.0
decrease_lr = 60
droprate = 0.1
beta = 0.1
print('load GAN')
netG = Generator(1, nz, ngf, 3) # ngpu, nz, ngf, nc
netD = Discriminator(1, 3, ndf) # ngpu, nc, ndf
# Initial setup for GAN
real_label = 1
fake_label = 0
criterion = nn.BCELoss()
fixed_noise = torch.FloatTensor(ndf, nz, 1, 1).normal_(0, 1)
# Put GAN on device
if torch.cuda.is_available():
netD.cuda()
netG.cuda()
criterion.cuda()
fixed_noise = fixed_noise.cuda()
fixed_noise = Variable(fixed_noise)
print('Setup optimizer')
optimizer = optim.Adam(params_to_update, lr=lr) #, weight_decay=wd)
optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(0.5, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(0.5, 0.999))
#decreasing_lr = list(map(int, decrease_lr.split(',')))
print("Number of parameters that are being learned in Discriminator:", sum(p.numel() for p in netD.parameters() if p.requires_grad))
print("Number of parameters that are being learned in Generator:", sum(p.numel() for p in netG.parameters() if p.requires_grad))
def train(epoch):
model_ft.train()
correct = 0
for batch_idx, (data, target) in enumerate(dataloaders_dict['train']):
gan_target = torch.FloatTensor(target.size()).fill_(0)
uniform_dist = torch.Tensor(data.size(0), num_classes).fill_((1./num_classes))
if torch.cuda.is_available():
data, target = data.cuda(), target.cuda()
gan_target, uniform_dist = gan_target.cuda(), uniform_dist.cuda()
data, target, uniform_dist = Variable(data), Variable(target), Variable(uniform_dist)
###########################
# (1) Update D network #
###########################
# train with real
gan_target.fill_(real_label)
targetv = Variable(gan_target)
optimizerD.zero_grad()
output = netD(data)
errD_real = criterion(output, targetv)
errD_real.backward()
D_x = output.data.mean()
# train with fake
noise = torch.FloatTensor(data.size(0), nz, 1, 1).normal_(0, 1).cuda()
if torch.cuda.is_available():
noise = noise.cuda()
noise = Variable(noise)
fake = netG(noise)
targetv = Variable(gan_target.fill_(fake_label))
output = netD(fake.detach())
errD_fake = criterion(output, targetv)
errD_fake.backward()
D_G_z1 = output.data.mean()
errD = errD_real + errD_fake
optimizerD.step()
###########################
# (2) Update G network #
###########################
optimizerG.zero_grad()
# Original GAN loss
targetv = Variable(gan_target.fill_(real_label))
output = netD(fake)
errG = criterion(output, targetv)
D_G_z2 = output.data.mean()
# minimize the true distribution
KL_fake_output = F.log_softmax(model_ft(fake), dim=0)
errG_KL = F.kl_div(KL_fake_output, uniform_dist, reduction='batchmean')*num_classes
generator_loss = errG + beta*errG_KL
generator_loss.backward()
optimizerG.step()
###########################
# (3) Update classifier #
###########################
# cross entropy loss
optimizer.zero_grad()
output = F.log_softmax(model_ft(data), dim=0)
loss = F.nll_loss(output, target.squeeze())
# KL divergence
noise = torch.FloatTensor(data.size(0), nz, 1, 1).normal_(0, 1).cuda()
if torch.cuda.is_available():
noise = noise.cuda()
noise = Variable(noise)
fake = netG(noise)
KL_fake_output = F.log_softmax(model_ft(fake), dim=0)
KL_loss_fake = F.kl_div(KL_fake_output, uniform_dist, reduction='batchmean')*num_classes
total_loss = loss + beta*KL_loss_fake
total_loss.backward()
optimizer.step()
_, preds = torch.max(output, 1)
correct += torch.sum(preds == target.data.squeeze())
print('Loss: {:.6f}, KL fake Loss: {:.6f}'.format(loss.item(), KL_loss_fake.item()))
#print('\nAccuracy: {}/{} ({:.0f}%)\n'.format(correct, len(dataloaders_dict['train'].dataset),
#100. * correct / len(dataloaders_dict['train'].dataset)))
def test(epoch):
model_ft.eval()
test_loss = 0
correct = 0
for data, target in dataloaders_dict['val']:
if torch.cuda.is_available():
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = F.log_softmax(model_ft(data), dim=0)
test_loss += F.nll_loss(output, target.squeeze()).item()
_, preds = torch.max(output, 1) # get the index of the max log-probability
correct += torch.sum(preds == target.data.squeeze())
test_loss = test_loss
test_loss /= len(dataloaders_dict['val']) # loss function already averages over batch size
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(dataloaders_dict['val'].dataset),
100. * correct / len(dataloaders_dict['val'].dataset)))
for epoch in range(epochs):
print('Epoch {}/{}'.format(epoch, epochs - 1))
print('-' * 10)
train(epoch)
test(epoch)
if epoch % 10 == 0:
# do checkpointing
torch.save(netG.state_dict(), 'models/netG9.pth')
torch.save(netD.state_dict(), 'models/netD9.pth')
torch.save(model_ft.state_dict(), 'models/effnetb0_cc9.pth') | nsalas24/isic-2019 | confident_classifier/conf_classifier_training.py | conf_classifier_training.py | py | 10,924 | python | en | code | 1 | github-code | 13 |
4787029983 | import pygame
class Box:
def __init__(self, number, row, col, width, height):
self.number = number
self.row = row
self.col = col
self.width = width
self.height = height
self.isSelected = False
def draw_boxes(self, window):
spacing = self.width / 9
x = spacing * self.col
y = spacing * self.row
ft = pygame.font.SysFont('comicsans', 40)
if self.number != 0:
num = ft.render(str(self.number), 1, (255, 255, 255))
x_pos = spacing / 2 - num.get_width() / 2
y_pos = spacing / 2 - num.get_height() / 2
window.blit(num, (x + x_pos, y + y_pos))
def add_num(self, x):
self.number = x
def update_box(self, window, flag=True):
ft = pygame.font.SysFont('comicsans', 40)
dim = self.width / 9
x = dim * self.col
y = dim * self.row
pygame.draw.rect(window, (0, 0, 0), (x, y, dim, dim), 0)
num = ft.render(str(self.number), 1, (255, 255, 255))
x_pos = dim / 2 - num.get_width() / 2
y_pos = dim / 2 - num.get_height() / 2
window.blit(num, (x + x_pos, y + y_pos))
if flag:
pygame.draw.rect(window, (0, 255, 0), (x, y, dim, dim), 3)
else:
pygame.draw.rect(window, (255, 0, 0), (x, y, dim, dim), 3)
| Madhur215/Sudoku | Box.py | Box.py | py | 1,363 | python | en | code | 1 | github-code | 13 |
42852894497 | import math
n = int(input().split()[0])
for i in range(0, n):
line = input().split()
r, h1, h2 = line
r, h1, h2 = float(r), float(h1), float(h2)
h1 = h1 / 1000 # m to km
h2 = h2 / 1000 # m to km
theta1 = math.acos(r / (r + h1))
theta2 = math.acos(r / (r + h2))
print(((theta1 + theta2) / (2 * math.pi)) * (2 * math.pi * r)) | sreedhara-aneesh/open-kattis-submissions | exoplanetlighthouse.py | exoplanetlighthouse.py | py | 357 | python | en | code | 0 | github-code | 13 |
17089266254 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.PreRepayPlanTermVO import PreRepayPlanTermVO
class AlipayPcreditLoanBudgetQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayPcreditLoanBudgetQueryResponse, self).__init__()
self._pre_repay_plan_term_list = None
self._repay_amt_total = None
self._repay_int_amt_total = None
self._repay_prin_amt_total = None
@property
def pre_repay_plan_term_list(self):
return self._pre_repay_plan_term_list
@pre_repay_plan_term_list.setter
def pre_repay_plan_term_list(self, value):
if isinstance(value, list):
self._pre_repay_plan_term_list = list()
for i in value:
if isinstance(i, PreRepayPlanTermVO):
self._pre_repay_plan_term_list.append(i)
else:
self._pre_repay_plan_term_list.append(PreRepayPlanTermVO.from_alipay_dict(i))
@property
def repay_amt_total(self):
return self._repay_amt_total
@repay_amt_total.setter
def repay_amt_total(self, value):
self._repay_amt_total = value
@property
def repay_int_amt_total(self):
return self._repay_int_amt_total
@repay_int_amt_total.setter
def repay_int_amt_total(self, value):
self._repay_int_amt_total = value
@property
def repay_prin_amt_total(self):
return self._repay_prin_amt_total
@repay_prin_amt_total.setter
def repay_prin_amt_total(self, value):
self._repay_prin_amt_total = value
def parse_response_content(self, response_content):
response = super(AlipayPcreditLoanBudgetQueryResponse, self).parse_response_content(response_content)
if 'pre_repay_plan_term_list' in response:
self.pre_repay_plan_term_list = response['pre_repay_plan_term_list']
if 'repay_amt_total' in response:
self.repay_amt_total = response['repay_amt_total']
if 'repay_int_amt_total' in response:
self.repay_int_amt_total = response['repay_int_amt_total']
if 'repay_prin_amt_total' in response:
self.repay_prin_amt_total = response['repay_prin_amt_total']
| alipay/alipay-sdk-python-all | alipay/aop/api/response/AlipayPcreditLoanBudgetQueryResponse.py | AlipayPcreditLoanBudgetQueryResponse.py | py | 2,303 | python | en | code | 241 | github-code | 13 |
73210239698 | from xmlrpc.server import (
SimpleXMLRPCServer,
list_public_methods
)
from caninehotel_backend.utils import register_rpc_operations
from caninehotel_backend.handler import RequestHandler
from caninehotel_backend.config import (
HOST,
PORT,
)
from caninehotel_backend.database import connect_to_mongodb
def main():
with SimpleXMLRPCServer((HOST, PORT), requestHandler = RequestHandler, allow_none = True) as server:
server.register_introspection_functions()
server = register_rpc_operations(server)
connect_to_mongodb()
try:
print("INICIANDO SERVIDOR...")
print("SERVIDOR CORRIENDO EN : {}:{}".format(HOST, PORT))
server.serve_forever()
except KeyboardInterrupt:
print("\nInterrupcion de teclado detectada.")
sys.exit(0)
if __name__ == '__main__':
main() | HeinerAlejandro/caninehotel | caninehotel_backend/main.py | main.py | py | 800 | python | en | code | 0 | github-code | 13 |
35185555216 | import boto3
import json
import os
# Iris flower categories.
irisCategory = {
0: 'setosa',
1: 'versicolor',
2: 'virginica'
}
def handler(event, context):
"""
Lambda handler. Processes MongoDB Change Events, invokes SageMaker enpoint
with input read from event and writes results back to event bus.
"""
print(json.dumps(event))
try:
# Read environment variables.
SAGEMAKER_ENDPOINT = os.environ['model_endpoint']
REGION_NAME = os.environ['region_name']
EVENTBUS_NAME = os.environ['eventbus_name']
# Enable the SageMaker runtime.
runtime = boto3.client(
'runtime.sagemaker', region_name=REGION_NAME
)
# Read MongoDB change event.
doc = event['detail']['fullDocument']
payload = json.dumps({'Input': doc['data']})
# Predict from model.
response = runtime.invoke_endpoint(
EndpointName=SAGEMAKER_ENDPOINT,
ContentType='application/json',
Body=payload
)
output = json.loads(response['Body'].read().decode())['Output']
# Write result back to eventbus.
prediction = [irisCategory[catID] for catID in output]
response = push_to_eventbus(
EVENTBUS_NAME, REGION_NAME, prediction, doc['_id']
)
print(json.dumps(response))
return response
except Exception as ex:
print("Exception: " + str(ex))
raise ex
# Push events to eventbus.
def push_to_eventbus(EVENTBUS_NAME, REGION_NAME, prediction, inputID):
# Set up client for AWS.
client = boto3.client(
'events',
region_name=REGION_NAME
)
# Create JSON for pushing to eventbus.
detailJsonString = {
"prediction": prediction,
"inp_id": inputID
}
# Put events to eventbus.
response = client.put_events(
Entries=[
{
'Source': 'user-event',
'DetailType': 'user-preferences',
'Detail': json.dumps(detailJsonString),
'EventBusName': EVENTBUS_NAME
}
]
)
return response
| mongodb/mongodbatlas-cloudformation-resources | examples/quickstart-mongodb-atlas-analytics-amazon-sagemaker-integration/sagemaker-example/lambda_functions/process_mdb_change_event/app.py | app.py | py | 2,165 | python | en | code | 51 | github-code | 13 |
39476914995 | userInp = input('Zadejte cele cislo na preklad :') #Tohle vezme input od uzivatele
userInp = int(userInp) #Tohle prevede input od uzivatele na int aby to program mohl zpracovat
class romanNum:
#Vytvorime funcki ktera vezme userInp jako vstupni parametr
def intToRom(self, userInp):
#Nadefinujeme si cisla na ktere pak budem odkazovat
val = [
1000, 900, 500, 400,
100, 90, 50, 40,
10, 9, 5, 4,
1
]
#Nadefinujeme si symboly na ktere pak budem odkazovat
syb = [
"M", "CM", "D", "CD",
"C", "XC", "L", "XL",
"X", "IX", "V", "IV",
"I"
]
transNum = ''
i = 0
while userInp > 0:
for _ in range(userInp // val[i]):
transNum += syb[i]
userInp -= val[i]
i += 1
return transNum
print(romanNum().intToRom(userInp))
| KordacVojtech/Zaverecny-projekt-VS | romNumVS.py | romNumVS.py | py | 969 | python | hr | code | 0 | github-code | 13 |
33246511843 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
from models import losses
from models import preprocessing
from models import vgg
from models import vgg_decoder
slim = tf.contrib.slim
network_map = {
'vgg_16': vgg.vgg_16,
'vgg_19': vgg.vgg_19,
}
class AutoEncoder(object):
def __init__(self, options):
self.weight_decay = options.get('weight_decay')
self.default_size = options.get('default_size')
self.content_size = options.get('content_size')
# network architecture
self.network_name = options.get('network_name')
# the loss layers for content and style similarity
self.content_layers = options.get('content_layers')
# the weights for the losses when trains the invertible network
self.content_weight = options.get('content_weight')
self.recons_weight = options.get('recons_weight')
self.tv_weight = options.get('tv_weight')
# gather the summaries and initialize the losses
self.summaries = None
self.total_loss = 0.0
self.recons_loss = {}
self.content_loss = {}
self.tv_loss = {}
self.train_op = None
def auto_encoder(self, inputs, content_layer=2, reuse=True):
# extract the content features
image_features = losses.extract_image_features(inputs, self.network_name)
content_features = losses.compute_content_features(image_features, self.content_layers)
# used content feature
selected_layer = self.content_layers[content_layer]
content_feature = content_features[selected_layer]
input_content_features = {selected_layer: content_feature}
# reconstruct the images
with slim.arg_scope(vgg_decoder.vgg_decoder_arg_scope(self.weight_decay)):
outputs = vgg_decoder.vgg_decoder(
content_feature,
self.network_name,
selected_layer,
reuse=reuse,
scope='decoder_%d' % content_layer)
return outputs, input_content_features
def build_train_graph(self, inputs):
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
for i in range(len(self.content_layers)):
# skip some networks
if i < 3:
continue
selected_layer = self.content_layers[i]
outputs, inputs_content_features = self.auto_encoder(
inputs, content_layer=i, reuse=False)
outputs = preprocessing.batch_mean_image_subtraction(outputs)
########################
# construct the losses #
########################
# 1) reconstruction loss
recons_loss = tf.losses.mean_squared_error(
inputs, outputs, scope='recons_loss/decoder_%d' % i)
self.recons_loss[selected_layer] = recons_loss
self.total_loss += self.recons_weight * recons_loss
summaries.add(tf.summary.scalar(
'recons_loss/decoder_%d' % i, recons_loss))
# 2) content loss
outputs_image_features = losses.extract_image_features(
outputs, self.network_name)
outputs_content_features = losses.compute_content_features(
outputs_image_features, [selected_layer])
content_loss = losses.compute_content_loss(
outputs_content_features, inputs_content_features, [selected_layer])
self.content_loss[selected_layer] = content_loss
self.total_loss += self.content_weight * content_loss
summaries.add(tf.summary.scalar(
'content_loss/decoder_%d' % i, content_loss))
# 3) total variation loss
tv_loss = losses.compute_total_variation_loss_l1(outputs)
self.tv_loss[selected_layer] = tv_loss
self.total_loss += self.tv_weight * tv_loss
summaries.add(tf.summary.scalar(
'tv_loss/decoder_%d' % i, tv_loss))
image_tiles = tf.concat([inputs, outputs], axis=2)
image_tiles = preprocessing.batch_mean_image_summation(image_tiles)
image_tiles = tf.cast(tf.clip_by_value(image_tiles, 0.0, 255.0), tf.uint8)
summaries.add(tf.summary.image(
'image_comparison/decoder_%d' % i, image_tiles, max_outputs=8))
self.summaries = summaries
return self.total_loss
def get_training_operations(self, optimizer, global_step,
variables_to_train=tf.trainable_variables()):
# gather the variable summaries
variables_summaries = []
for var in variables_to_train:
variables_summaries.append(tf.summary.histogram(var.op.name, var))
variables_summaries = set(variables_summaries)
# add the training operations
train_ops = []
grads_and_vars = optimizer.compute_gradients(
self.total_loss, var_list=variables_to_train)
train_op = optimizer.apply_gradients(
grads_and_vars, global_step=global_step)
train_ops.append(train_op)
self.summaries |= variables_summaries
self.train_op = tf.group(*train_ops)
return self.train_op
| LucasSheng/avatar-net | models/autoencoder.py | autoencoder.py | py | 5,339 | python | en | code | 173 | github-code | 13 |
8004189178 | #http://www.codeskulptor.org/#user40_Pr2t3Pg6M4Wkq7O.py
"""
Student template code for Project 3
Student will implement five functions:
slow_closest_pair(cluster_list)
fast_closest_pair(cluster_list)
closest_pair_strip(cluster_list, horiz_center, half_width)
hierarchical_clustering(cluster_list, num_clusters)
kmeans_clustering(cluster_list, num_clusters, num_iterations)
where cluster_list is a 2D list of clusters in the plane
"""
import math
import alg_cluster
######################################################
# Code for closest pairs of clusters
def pair_distance(cluster_list, idx1, idx2):
"""
Helper function that computes Euclidean distance between two clusters in a list
Input: cluster_list is list of clusters, idx1 and idx2 are integer indices for two clusters
Output: tuple (dist, idx1, idx2) where dist is distance between
cluster_list[idx1] and cluster_list[idx2]
"""
return (cluster_list[idx1].distance(cluster_list[idx2]), min(idx1, idx2), max(idx1, idx2))
def slow_closest_pair(cluster_list):
"""
Compute the distance between the closest pair of clusters in a list (slow)
Input: cluster_list is the list of clusters
Output: tuple of the form (dist, idx1, idx2) where the centers of the clusters
cluster_list[idx1] and cluster_list[idx2] have minimum distance dist.
"""
min_dist = (float('inf'),-1,-1)
temp = ()
for idx1 in range(len(cluster_list)):
for idx2 in range(len(cluster_list)):
if idx1 != idx2:
temp = pair_distance(cluster_list,idx1, idx2)
if temp[0] < min_dist[0]:
min_dist = temp
#print min_dist
return min_dist
def fast_closest_pair(cluster_list):
"""
Compute the distance between the closest pair of clusters in a list (fast)
Input: cluster_list is list of clusters SORTED such that horizontal positions of their
centers are in ascending order
Output: tuple of the form (dist, idx1, idx2) where the centers of the clusters
cluster_list[idx1] and cluster_list[idx2] have minimum distance dist.
"""
temp_list = []
for item in cluster_list:
temp_list.append(item.copy())
temp_list.sort(key = lambda cluster: cluster.horiz_center())
min_dist = ()
if len(temp_list)<=3:
min_dist = slow_closest_pair(temp_list)
#print temp_list
# if min_dist[1] != -1 and min_dist[2] != -1:
# cidx1 = cluster_list.index(temp_list[min_dist[1]])
# cidx2 = cluster_list.index(temp_list[min_dist[2]])
# temp_lst = [min_dist[0],min(cidx1,cidx2),max(cidx1,cidx2)]
# min_dist = tuple(temp_lst)
#print min_dist
return min_dist
else:
l_list = temp_list[:len(temp_list)/2]
r_list = temp_list[len(temp_list)/2:]
temp1 = fast_closest_pair(l_list)
temp2 = fast_closest_pair(r_list)
if temp1[0]<temp2[0]:
min_dist = temp1
else:
min_point = [temp2[0],(temp2[1]+len(temp_list)/2),(temp2[2]+len(temp_list)/2)]
min_dist = tuple(min_point)
mid_xcoor = (temp_list[len(temp_list)/2-1].horiz_center()+temp_list[len(temp_list)/2].horiz_center())/2
temp3 = closest_pair_strip(temp_list,mid_xcoor,min_dist[0])
if temp3[0]<min_dist[0]:
min_dist = temp3
#print min_dist
return min_dist
def closest_pair_strip(cluster_list, horiz_center, half_width):
"""
Helper function to compute the closest pair of clusters in a vertical strip
Input: cluster_list is a list of clusters produced by fast_closest_pair
horiz_center is the horizontal position of the strip's vertical center line
half_width is the half the width of the strip (i.e; the maximum horizontal distance
that a cluster can lie from the center line)
Output: tuple of the form (dist, idx1, idx2) where the centers of the clusters
cluster_list[idx1] and cluster_list[idx2] lie in the strip and have minimum distance dist.
"""
min_dist = (float('inf'), -1, -1)
temp = ()
select = []
for item in cluster_list:
if math.fabs(item.horiz_center()-horiz_center) < half_width:
select.append(item)
select.sort(key = lambda cluster: cluster.vert_center())
for idx1 in range(len(select)-1):
for idx2 in range(idx1+1, (min((idx1+3),(len(select)-1))+1) ,1):
temp = pair_distance(select, idx1, idx2)
if temp[0] < min_dist[0]:
min_dist = temp
#print min_dist
if min_dist[1] != -1 and min_dist[2] != -1:
cidx1 = cluster_list.index(select[min_dist[1]])
cidx2 = cluster_list.index(select[min_dist[2]])
temp_lst = [min_dist[0],min(cidx1,cidx2),max(cidx1,cidx2)]
min_dist = tuple(temp_lst)
return min_dist
#slow_closest_pair([alg_cluster.Cluster(set([]), 0.32, 0.16, 1, 0), alg_cluster.Cluster(set([]), 0.39, 0.4, 1, 0), alg_cluster.Cluster(set([]), 0.54, 0.8, 1, 0), alg_cluster.Cluster(set([]), 0.61, 0.8, 1, 0), alg_cluster.Cluster(set([]), 0.76, 0.94, 1, 0)])
#fast_closest_pair([alg_cluster.Cluster(set([]), 0.32, 0.16, 1, 0), alg_cluster.Cluster(set([]), 0.39, 0.4, 1, 0)])
#fast_closest_pair([alg_cluster.Cluster(set([]), 0.32, 0.16, 1, 0), alg_cluster.Cluster(set([]), 0.39, 0.4, 1, 0), alg_cluster.Cluster(set([]), 0.54, 0.8, 1, 0), alg_cluster.Cluster(set([]), 0.61, 0.8, 1, 0), alg_cluster.Cluster(set([]), 0.76, 0.94, 1, 0)])
######################################################################
# Code for hierarchical clustering
def hierarchical_clustering(cluster_list, num_clusters):
"""
Compute a hierarchical clustering of a set of clusters
Note: the function may mutate cluster_list
Input: List of clusters, integer number of clusters
Output: List of clusters whose length is num_clusters
"""
return []
######################################################################
# Code for k-means clustering
def kmeans_clustering(cluster_list, num_clusters, num_iterations):
"""
Compute the k-means clustering of a set of clusters
Note: the function may not mutate cluster_list
Input: List of clusters, integers number of clusters and number of iterations
Output: List of clusters whose length is num_clusters
"""
# position initial clusters at the location of clusters with largest populations
return []
| chickenoverrice/python_game | python_closestPair.py | python_closestPair.py | py | 6,522 | python | en | code | 0 | github-code | 13 |
2295554766 | from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
app = Flask(__name__)
ENV = 'prod'
#ENV = 'dev'
app.config['SECRET_KEY'] = "my super secret key"
if ENV == 'dev':
app.debug = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:ert33MNB@localhost/lexus'
else:
app.debug = True
#app.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://zirttrbffmhloz:339ab0e6aea18db86a4fa458b520179730e285ce522efa7f88715f6a2cbc6066@ec2-3-208-74-199.compute-1.amazonaws.com:5432/datjvvm56fr58q'
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://jackson_inventory_user:AjssKyOevzJkzP0GrjZu7OQ9SUkoSneX@dpg-chb67f2k728tp9c87hsg-a.oregon-postgres.render.com/jackson_inventory'
#postgres://jackson_inventory_user:AjssKyOevzJkzP0GrjZu7OQ9SUkoSneX@dpg-chb67f2k728tp9c87hsg-a.oregon-postgres.render.com/jackson_inventory
app.config['SQLALCHECMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class Feedback(db.Model):
__tablename__ = 'feedback'
id = db.Column(db.Integer, primary_key=True)
customer = db.Column(db.String(200), unique=True)
dealer = db.Column(db.String(200))
rating = db.Column(db.Integer)
comments = db.Column(db.Text())
def __init__(self, customer, dealer, rating, comments):
self.customer = customer
self.dealer = dealer
self.rating = rating
self.comments = comments
# Create a Blog Post Model
class Items(db.Model):
id = db.Column(db.Integer, primary_key=True, nullable=False)
barcode = db.Column(db.String(100), nullable=False, unique=True)
name = db.Column(db.String(250), nullable=False)
category = db.Column(db.String(150))
minqty = db.Column(db.Integer)
ohqty = db.Column(db.Integer)
imgurl = db.Column(db.String(250))
def __repr__(self):
return '<barcode %r>' % self.barcode
class ItemForm(FlaskForm):
barcode = StringField("barcode", validators=[DataRequired()])
name = StringField("name", validators=[DataRequired()])
category = StringField("category")
minqty = StringField("minqty")
ohqty = StringField("ohqty")
imgurl = StringField("imgurl")
submit = SubmitField("Submit")
@app.route('/')
def index():
return render_template('index.html')
@app.route('/addinv')
def addinv():
return render_template('addinv.html')
#ADD database record
@app.route('/addtodb', methods=['GET', 'POST'])
def addtodb():
barcode = None
form = ItemForm()
if form.validate_on_submit():
item = Items.query.filter_by(barcode=form.barcode.data).first()
if item is None:
item = Items(barcode = form.barcode.data, name=form.name.data, category = form.category.data, minqty = form.minqty.data, ohqty = form.ohqty.data, imgurl=form.imgurl.data)
db.session.add(item)
db.session.commit()
barcode = form.barcode.data
form.barcode.data = ''
form.name.data = ''
form.category.data = ''
form.minqty.data = ''
form.ohqty.data = ''
form.imgurl = ''
our_items = Items.query.order_by(Items.name)
return render_template("addtodb.html", form = form, barcode=barcode, our_items = our_items)
@app.route('/add_inv', methods=['POST'])
def add_inv():
barcode = request.form['barcode']
name = request.form['name']
category = request.form['category']
ohqty = request.form['ohqty']
minqty = request.form['minqty']
imgurl = request.form['imgurl']
print (imgurl)
#imgurl = "http://www.test.com"
# extract the image URL from the form data
#image_url = ...
# create a new Item object and add it to the database
item = Items(barcode=barcode, name=name, category=category, ohqty=ohqty, minqty=minqty, imgurl=imgurl)
db.session.add(item)
db.session.commit()
return 'Item added to database'
@app.route('/list_inv')
def list_inv():
#Grab all of the items from the database
items = Items.query.order_by(Items.category)
return render_template("list_inv.html", items=items)
@app.route('/submit', methods=['POST'])
def submit():
if request.method == 'POST':
customer = request.form['customer']
dealer = request.form['dealer']
rating = request.form['rating']
comments = request.form['comments']
#print(customer, dealer, rating, comments)
if customer == '' or dealer == '':
return render_template('index.html', message='Please enter required fields')
if db.session.query(Feedback).filter(Feedback.customer == customer).count() == 0: #customer does not exist
data = Feedback(customer, dealer, rating, comments)
db.session.add(data)
db.session.commit()
return render_template('success.html')
return render_template('index.html', message='you have already submitted feedback')
if __name__ == '__main__':
app.run() | sjackson1837/lexusfeedback | app.py | app.py | py | 5,044 | python | en | code | 0 | github-code | 13 |
26148505680 | from decimal import Decimal
from math import ceil, log
def isqrt(n):
res = 0
# smallest power of 4 >= the argument
bit = 4**int(ceil(log(n, 4))) if n else 0
while bit:
if n >= res + bit:
n -= res + bit
res = (res >> 1) + bit
else:
res >>= 1
bit >>= 2
return res
def findLength(n):
count = 1
m0 = 0
d0 = 1
a0 = int(n ** 0.5)
mi = d0 * a0 - m0
di = (n - mi * mi) / d0
ai = int((a0 + mi) / di)
while ai != 2 * a0:
count += 1
mi = di * ai - mi
di = (n - mi * mi) / di
ai = int((a0 + mi) / di)
return count
count = 0
limit = 10000
items = (i for i in range(2, limit + 1) if isqrt(i) ** 2 != i)
print("Generated items")
for i in items:
if findLength(i) & 1:
count += 1
print(count)
| maartenterpstra/Euler | euler64.py | euler64.py | py | 844 | python | en | code | 0 | github-code | 13 |
2328044396 | # getverse.py
# gets a verse from verses database
# e.g., when user enters "John 3:16" it pulls up the verse and reference
import sqlite3
def get_verse(book, chapter, verse):
connection = sqlite3.connect('verses.db')
cursor = connection.cursor()
# Query the database for the specified verse
cursor.execute("SELECT text FROM verses WHERE book=? AND chapter=? AND verse=?", (book, chapter, verse))
result = cursor.fetchone()
connection.close()
return result[0] if result else None
def main():
user_input = input("Enter the book, chapter, and verse (e.g., John 3:16): ").strip()
try:
book, reference = user_input.split(' ', 1)
chapter, verse = map(int, reference.split(':'))
except ValueError:
print("Invalid input. Please enter the book, chapter, and verse in the format 'John 3:16'.")
return
verse_text = get_verse(book, chapter, verse)
if verse_text:
print(f"\n{user_input} - {verse_text}\n")
else:
print(f"\nVerse not found for {user_input}\n")
if __name__ == "__main__":
main()
| jwelkener/InstaBible | getverse.py | getverse.py | py | 1,094 | python | en | code | 0 | github-code | 13 |
36754387428 | #!/usr/bin/env python3
import pandas as pd
def average_temperature():
df = pd.read_csv("src/kumpula-weather-2017.csv")
mask = df["m"]==7
df1 = df[mask]
df2 = df1["Air temperature (degC)"]
return df2.mean()
def main():
print("Average temperature in July: "+str())
return
if __name__ == "__main__":
main()
| tugee/dap2020 | part04-e10_average_temperature/src/average_temperature.py | average_temperature.py | py | 339 | python | en | code | 0 | github-code | 13 |
73564103696 | #One hot encoding
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import cross_val_score
def get_mae(X, y):
# multiple by -1 to make positive MAE score instead of neg value returned as sklearn convention
return -1 * cross_val_score(RandomForestRegressor(50),
X, y,
scoring = 'neg_mean_absolute_error').mean()
train_data = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
target = train_data.SalePrice
#Eliminamos el objectivo, y el id porque no sirve de nada
prediction_data = train_data.drop(['Id'],axis=1)
#Dividimos la data, en train y test
X_train, X_test, y_train,y_test = train_test_split(prediction_data, target)
#Eliminamos columnas que esten vacias
my_imputer = Imputer()
#Hacemos el get_dummies de pandas
train_one_hot_encoding = pd.get_dummies(X_train)
test_one_hot_encoding = pd.get_dummies(X_test)
#Alineamos test_data y train_data
final_train_data, final_test_data = train_one_hot_encoding.align(test_one_hot_encoding, join='left',axis=1)
#Creamos la imputacion de la data
imputed_X_train = my_imputer.fit_transform(final_train_data)
imputed_X_test = my_imputer.transform(final_test_data)
#Cogemos el valor que queremos predecir
target =final_train_data.SalePrice
#Creamos el modelo
modelo= RandomForestRegressor()
modelo.fit(imputed_X_train,target)
#Y predecimos
prediccion = modelo.predict(imputed_X_test)
print("Primeras predicciones")
#Esto devuelve un array asi que lo pasamos a DataFrame de pandas
predicciones = pd.DataFrame(prediccion)
print(predicciones.head())
#Y sacamos el error absoluto
error_absuluto_one_hot = get_mae(imputed_X_train,target)
print('Mean Abslute Error with One-Hot Encoding: ' + str(int(error_absuluto_one_hot)))
#Creamos one hot encoding para las columnas categoricas
#one_hot_encoding_train = pd.get_dummies(train_reduced_data)
#one_hot_encoding_test = pd.get_dummies(test_reduced_data)
#Alineamos test_data y train_data
#final_train_data, final_test_data = one_hot_encoding_train.align(one_hot_encoding_test, join='left',axis=1)
#print("-----Data alineana y con one hot encoding-------")
#print(final_train_data.describe())
#final_target =final_train_data.SalePrice
#modelo = RandomForestRegressor()
#modelo.fit(final_train_data,final_target)
#prediccion = modelo.predict(final_test_data)
#print("La prediccion con one_hot_encoding es de " + str(int(prediccion)))
#error_absoluto_onehot = get_mae(final_train_data, final_target)
#print('Mean Abslute Error with One-Hot Encoding: ' + str(int(error_absoluto_onehot)))
| jaimecuellar14/MachineLearning | onehotencoding.py | onehotencoding.py | py | 2,799 | python | en | code | 0 | github-code | 13 |
773523086 | import re
p = re.compile(r'42')
text1 = "23 street"
text2 = "42 meaning of life"
efr = p.findall(text1)
trg = p.findall(text2)
if '42' in efr:
print('42 not in the text1')
print('42 in the text2')
elif '42' in trg:
print('42 not in the text1')
print('42 in the text2') | Babushka312/dream_team | chapter_1/test_3.py | test_3.py | py | 285 | python | en | code | 0 | github-code | 13 |
41726570064 | from pygame import image, transform
from constants import ICON_SIZE
class FileReadingException(Exception):
def __init__(self):
super().__init__('Something went wrong while reading your file')
def music_icon(window, music):
# window - pygame.dispaly
if music is True:
is_on = 'on'
else:
is_on = 'off'
image_p = image.load(f'Images/sound-{is_on}.png')
image_p = transform.scale(image_p, ICON_SIZE)
rect = image_p.get_rect()
rect.top = 5
rect.left = 5
window.blit(image_p, rect)
def best_score_icon(window):
image_p = image.load('Images/best_score.jpeg')
image_p = transform.scale(image_p, ICON_SIZE)
rect = image_p.get_rect()
rect.top = 7
rect.left = 70
window.blit(image_p, rect)
def take_best_score(txt_file_name):
try:
with open(txt_file_name, 'r') as file_handle:
best_score = file_handle.read()
except Exception:
raise FileReadingException()
try:
best_score = int(best_score)
except Exception:
best_score = 0
with open(txt_file_name, 'w') as file_handle:
file_handle.write('0')
return int(best_score)
def write_best_score(txt_file_name, score):
with open(txt_file_name, 'w') as file_handle:
file_handle.write(str(score))
| Sebastian-Abramowski/Snake | other.py | other.py | py | 1,319 | python | en | code | 0 | github-code | 13 |
42647147157 | import argparse, pickle, json, time, sys, os
import pandas as pd
sys.path.insert(1, os.getcwd()+"/../")
sys.path.insert(1, os.getcwd()+"/../compass")
sys.path.insert(1, os.getcwd()+"/../gestalt")
sys.path.insert(1, os.getcwd()+"/../utils")
sys.path.insert(1, os.getcwd()+"/../experiments")
sys.path.insert(1, os.getcwd()+"/../../data")
sys.path.insert(1, os.getcwd()+"/../code")
sys.path.insert(1, os.getcwd()+"/../code/compass")
sys.path.insert(1, os.getcwd()+"/../code/gestalt")
sys.path.insert(1, os.getcwd()+"/../code/utils")
sys.path.insert(1, os.getcwd()+"/../code/experiments")
sys.path.insert(1, os.getcwd()+"/../../data")
from compass import Point
from compass import Compass
from search import InvertedIndex
from conceptMapping import ConceptMapper
from exp_compass import CompassExperimentRunner
class Experimenter():
def __init__(self, invertedIndexFile, referenceLocations, conceptMapFile, cardinality_invariant=False):
self.invertedIndex = InvertedIndex(invertedIndexFile)
self.cardinality_invariant = cardinality_invariant
self.ER = CompassExperimentRunner()
self.CM = ConceptMapper()
with open(referenceLocations, "r") as inFile:
self.referenceLocations = json.load(inFile)
with open(conceptMapFile, "rb") as inFile:
self.conceptMaps = pickle.load(inFile)
self.LO_experimentsDict = {}
with open('../data/SV/ground_truth_queries/Location-Centric_ground_truth_queries.json') as f:
self.LO_experimentsDict = json.load(f)
with open('../data/SV/ground_truth_queries/Location-Centric_ground_truth_results.json') as f:
self.LO_experimentsResultsDict = json.load(f)
self.OO_experimentsDict = {}
with open('../data/SV/ground_truth_queries/Object-Centric_ground_truth_queries.json') as f:
self.OO_experimentsDict = json.load(f)
with open('../data/SV/ground_truth_queries/Object-Centric_ground_truth_results.json') as f:
self.OO_experimentsResultsDict = json.load(f)
self.OOI_experimentsDict = {}
with open('../data/SV/ground_truth_queries/Object-Centric-Inv_ground_truth_queries.json') as f:
self.OOI_experimentsDict = json.load(f)
with open('../data/SV/ground_truth_queries/Object-Centric-Inv_ground_truth_results.json') as f:
self.OOI_experimentsResultsDict = json.load(f)
def precision(self, expected_results:set, actual_results:set):
correct = len(expected_results.intersection(actual_results))
if len(actual_results) == 0:
return 1.0
return correct/len(actual_results)
def recall(self, expected_results:set, actual_results:set):
correct = len(expected_results.intersection(actual_results))
if len(expected_results) == 0:
return 1.0
return correct/len(expected_results)
def compareLocStructures(self, a, b):
matches = 0
for quadrant in a:
matches += len(set(a[quadrant]).intersection(set(b[quadrant])))
return matches
def runLoExperiments(self):
running_precision = 0
running_recall = 0
for i, experiment in enumerate(self.LO_experimentsDict.keys()):
start_wall_time = time.time()
start_proc_time=time.process_time()
locationHitCounter = {}
for loc in self.referenceLocations:
matches = self.compareLocStructures(self.referenceLocations[loc], self.LO_experimentsDict[experiment])
if matches > 0:
locationHitCounter[loc] = matches
end_proc_time=time.process_time()
end_wall_time = time.time()
print("\nReturned", len(locationHitCounter), "candidate locations for query:",self.LO_experimentsDict[experiment].items() )
querylist = [self.LO_experimentsDict[experiment][x] for x in self.LO_experimentsDict[experiment]]
num_query_terms = len(querylist[0]) + len(querylist[1]) + len(querylist[2]) + len(querylist[3])
actual_results = []
for loc in locationHitCounter:
if locationHitCounter[loc] == num_query_terms:
actual_results.append(loc)
print("GESTALT said: ", actual_results)
print("GT says: ", list(self.LO_experimentsResultsDict[experiment]))
print(locationHitCounter)
precision = self.precision(actual_results=set(actual_results), expected_results=set(self.LO_experimentsResultsDict[experiment]))
running_precision += precision
print("PRECISION: ", precision)
recall = self.recall(actual_results=set(actual_results), expected_results=set(self.LO_experimentsResultsDict[experiment]))
running_recall += recall
print("RECALL: ", recall)
print("PROCESSOR TIME TO EXECUTE PICTORIAL LO QUERY #:", experiment, "was", end_proc_time-start_proc_time)
print("WALL TIME TAKEN TO EXECUTE PICTORIAL LO QUERY #:", "was",end_wall_time-start_wall_time,"\n")
print("OVERALL PRECISION: ", running_precision/len(self.LO_experimentsDict.keys()))
print("OVERALL RECALL: ", running_recall/len(self.LO_experimentsDict.keys()))
def runOoExperiments(self):
running_precision = 0
running_recall = 0
for i, experiment in enumerate(self.OO_experimentsDict):
if i == 7: # skip duplicate obejct query until fixed, check lines 266,267 if removing this
continue
print(self.OO_experimentsDict[experiment])
names = self.OO_experimentsDict[experiment]['names']
lats = self.OO_experimentsDict[experiment]['lats']
longs = self.OO_experimentsDict[experiment]['longs']
points = []
preLocs = []
for j in range(len(names)):
pt = Point(name=names[j],
x_coord=longs[j],
y_coord=lats[j])
points.append(pt)
preLocs.append("PICTORIAL_QUERY")
self.OO_experimentsDict[experiment]['point'] = points
self.OO_experimentsDict[experiment]['preLocs'] = preLocs
query_dict = {
"name":self.OO_experimentsDict[experiment]['names'],
"longitude":self.OO_experimentsDict[experiment]['longs'],
"latitude":self.OO_experimentsDict[experiment]['lats'],
"predicted_location":self.OO_experimentsDict[experiment]['preLocs']
}
query_df = pd.DataFrame(data=query_dict)
actual_results = []
#Cardinality Assumed to be north
print("Querying experiment", i)
for locationCM in self.conceptMaps.keys():
cm_dict = self.ER.generateQueryMapDict(query=query_df)
#print("CM_DICT", cm_dict)
##queryMap = cm_dict["PICTORIAL_QUERY"]["concept_map"].copy()
searchOrder = cm_dict["PICTORIAL_QUERY"]["search_order"].copy()
#print("SEARCHORDER", searchOrder)
result = self.CM.searchMatrix(self.conceptMaps[locationCM],searchOrder.copy())
if result == True:
actual_results.append(locationCM)
result = False
print("GESTALT said: ", actual_results)
print("GT says: ", list(self.OO_experimentsResultsDict[experiment]))
precision = self.precision(actual_results=set(actual_results), expected_results=set(self.OO_experimentsResultsDict[experiment]))
running_precision += precision
print("PRECISION: ", precision)
recall = self.recall(actual_results=set(actual_results), expected_results=set(self.OO_experimentsResultsDict[experiment]))
running_recall += recall
print("RECALL: ", recall)
print("OVERALL PRECISION: ", running_precision/(len(self.OO_experimentsDict.keys()) -1)) #minus 1 since skipping query 7
print("OVERALL RECALL: ", running_recall/(len(self.OO_experimentsDict.keys()) -1)) #minus 1 since skipping query 7
def runOoInvExperiments(self):
running_precision = 0
running_recall = 0
for i, experiment in enumerate(self.OOI_experimentsDict):
if i == 7: # skip duplicate obejct query until fixed
continue
print(self.OOI_experimentsDict[experiment])
names = self.OOI_experimentsDict[experiment]['names']
lats = self.OOI_experimentsDict[experiment]['lats']
longs = self.OOI_experimentsDict[experiment]['longs']
points = []
preLocs = []
for j in range(len(names)):
pt = Point(name=names[j],
x_coord=longs[j],
y_coord=lats[j])
points.append(pt)
preLocs.append("PICTORIAL_QUERY")
self.OOI_experimentsDict[experiment]['point'] = points
self.OOI_experimentsDict[experiment]['preLocs'] = preLocs
query_dict = {
"name":self.OOI_experimentsDict[experiment]['names'],
"longitude":self.OOI_experimentsDict[experiment]['longs'],
"latitude":self.OOI_experimentsDict[experiment]['lats'],
"predicted_location":self.OOI_experimentsDict[experiment]['preLocs']
}
query_df = pd.DataFrame(data=query_dict)
actual_results = []
print("Querying experiment", i)
all_rotations = self.ER.getQueryMapConfigurations(points=self.OOI_experimentsDict[experiment]['point'])
print("GOT", len(all_rotations), "ROTATIONS")
for rotation in all_rotations:
for locationCM in self.conceptMaps.keys():
result = self.CM.searchMatrix(self.conceptMaps[locationCM],rotation['PICTORIAL_QUERY']['search_order'].copy())
if result == True:
actual_results.append(locationCM)
result = False
print("GESTALT said: ", actual_results)
print("GT says: ", list(self.OOI_experimentsResultsDict[experiment]))
precision = self.precision(actual_results=set(actual_results), expected_results=set(self.OOI_experimentsResultsDict[experiment]))
running_precision += precision
print("PRECISION: ", precision)
recall = self.recall(actual_results=set(actual_results), expected_results=set(self.OOI_experimentsResultsDict[experiment]))
running_recall += recall
print("RECALL: ", recall)
print("OVERALL PRECISION: ", running_precision/(len(self.OOI_experimentsDict.keys()) -1)) #minus 1 since skipping query 7
print("OVERALL RECALL: ", running_recall/(len(self.OOI_experimentsDict.keys()) -1)) #minus 1 since skipping query 7
if __name__=="__main__":
argparser = argparse.ArgumentParser() # initialize the argParser
argparser.add_argument( "-if", "--inputFile",
help="The file used to buid an inverted index, should be a CSV with location predictions",
type=str,
default=None,
required=True)
argparser.add_argument( "-cmf", "--conceptMapFile",
help="The File that stores all the concept Maps, should be a PKL file with location predictions",
type=str,
default=None,
required=True)
argparser.add_argument( "-lf", "--locationsFile",
help="The File that holds the reference locations for the query interface",
type=str,
default=None,
required=True)
argparser.add_argument( "--cardinalityInvariant",
help="Tell the system to query in cardinality invariant mode",
default=False,
action="store_true",
required=False)
argparser.add_argument( "--locationCentric",
help="Tell the system to query in Location-centric mode",
default=False,
action="store_true",
required=False)
argparser.add_argument( "--objectCentric",
help="Tell the system to query in Location-centric mode",
default=False,
action="store_true",
required=False)
flags = argparser.parse_args()
madScientist = Experimenter(invertedIndexFile=flags.inputFile,
referenceLocations=flags.locationsFile,
conceptMapFile=flags.conceptMapFile,
cardinality_invariant=flags.cardinalityInvariant)
if flags.locationCentric:
madScientist.runLoExperiments()
if flags.objectCentric and not flags.cardinalityInvariant:
madScientist.runOoExperiments()
if flags.objectCentric and flags.cardinalityInvariant:
madScientist.runOoInvExperiments() | osullik/GESTALT | code/experimentVariablesGT.py | experimentVariablesGT.py | py | 13,593 | python | en | code | 0 | github-code | 13 |
21679027552 | import actions.util as util
import util.logger as logger
import discord_handler.player_singleton as player_singleton
import discord_handler.embed as embed
import discord.ext.commands as commands
import yt_dlp
import os
import asyncio
import traceback
YTDL_FORMAT_OPTIONS = {
'format': 'bestaudio/best',
'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0', # bind to ipv4 since ipv6 addresses cause issues sometimes
}
"""YoutubeDL options. These are used to download the song from youtube using yt_dlp library."""
FFMPEG_OPTIONS = {
'options': '-vn',
'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5'
}
"""FFMPEG options. These are used to play the song using ffmpeg library."""
async def __start_queue(ctx: commands.Context):
"""Starts the queue if not playing. When queue is finished or stopped, disconnects the bot.
Args:
ctx (commands.Context): The context of the command.
Returns:
None
"""
while not player_singleton.is_empty():
song = player_singleton.music_queue.pop(0)
await ctx.send(embed=embed.create_card(
"Now playing",
f"Song **{song['title']}** is now playing."
))
player_singleton.play(ctx, FFMPEG_OPTIONS, os.getenv("FFMPEG_PATH"), song)
while ctx.voice_client.is_playing():
await asyncio.sleep(0.5)
player_singleton.current_song = None
await ctx.voice_client.disconnect()
async def play(ctx: commands.Context, url: str):
"""Plays a song from youtube."""
if util.is_a_bot_message(ctx):
return
if not util.is_an_authorized_guild(ctx):
await ctx.send("Guild is not authorized.")
return
if not util.author_is_in_voice_channel(ctx):
await ctx.send("You are not in a voice channel. Please join a voice channel and try again.")
return
if not util.bot_is_in_voice_channel(ctx):
await ctx.author.voice.channel.connect()
video = None
try:
youtube = yt_dlp.YoutubeDL(params=YTDL_FORMAT_OPTIONS)
video = youtube.extract_info(url, download=False)
except:
await ctx.send(embed=embed.create_card(
"Error",
f"Could not get the video **{url}**."
))
logger.emit_log(traceback.format_exc(), 'CRITICAL')
if not ctx.voice_client.is_playing():
await ctx.voice_client.disconnect()
return
player_singleton.add_song_to_queue({
'url': video['url'],
'title': video['title']
})
if not ctx.voice_client.is_playing():
await __start_queue(ctx)
else:
await ctx.send(embed=embed.create_card(
"Song added to queue",
f"Song **{video['title']}** was added to the queue.")) | Guisilcol/milharaaska_bot | milharaaska_bot/actions/play_command.py | play_command.py | py | 3,054 | python | en | code | 0 | github-code | 13 |
23321799799 | import scrapy
class FacebookSpider(scrapy.Spider):
name = "facebook"
start_urls = ["https://www.metacareers.com/jobs?page=1&results_per_page=100#search_result"]
def parse(self, response):
for job_opening in response.xpath('//a[@class="_8sef"]'):
relative_link = job_opening.xpath('./@href').extract_first()
application_link = response.urljoin(relative_link)
job_name = job_opening.xpath('.//div[@class="_8sel"]/text()').extract_first()
location_div = job_opening.xpath('.//div[@class="_8sen"]')
location_div_specific = location_div.xpath('.//div[@class="_8see"]')
if location_div_specific:
locations = self.extract_subtitle(location_div_specific[0])
else:
locations = "N/A"
category_div = job_opening.xpath('.//div[@class="_8seh"]')
category_div_specific = category_div.xpath('.//div[@class="_8see"]')
if len(category_div_specific) > 0:
categories = []
for div in category_div_specific:
category = self.extract_subtitle(div)
categories.append(category)
categories = ":".join(categories)
else:
categories = "N/A"
yield {
'company' : 'Facebook',
'job' : job_name.strip(),
'application' : application_link.strip(),
'category' : categories.strip(),
'location': locations.strip()
}
buttons = response.xpath('//a[@role="button"]')
for button in buttons:
text = button.xpath('./text()').extract_first()
if text == "Next":
next_page_link = button.xpath('./@href').extract_first()
next_page_url = response.urljoin(next_page_link)
yield scrapy.Request(url=next_page_url, callback=self.parse)
def extract_subtitle(self, subtitle_selector):
primary_text = subtitle_selector.xpath('./text()').extract_first()
supplemental = subtitle_selector.xpath('./div[@class="_9o36"]')
if supplemental:
supplemental_texts = supplemental.xpath('./@data-tooltip-content').extract_first()
supplemental_texts = supplemental_texts.split('\n')
supplemental_texts = [t.strip() for t in supplemental_texts]
combined_texts = [primary_text] + supplemental_texts
combined_texts = ":".join(combined_texts)
return combined_texts
return primary_text
| SW386/job-scraper | careers/spiders/facebook.py | facebook.py | py | 2,623 | python | en | code | 1 | github-code | 13 |
33080014104 | from django.db import models
from django.contrib.auth.models import User
from cloudinary.models import CloudinaryField
STATUS = ((0, "Draft"), (1, "Published"))
SCORE_CHOICES = [
(0, '0.0 - Worst Game Ever'),
(0.5, '0.5 - Horrible'),
(1, '1.0 - Terrible'),
(1.5, '1.5 - Rubbish'),
(2, '2.0 - Bad'),
(2.5, '2.5 - Mediocre'),
(3, '3.0 - Playable'),
(3.5, '3.5 - Ok'),
(4, '4.0 - Good'),
(4.5, '4.5 - Great'),
(5, '5.0 - Master Piece'),
]
class Game(models.Model):
""" Game Model """
title = models.CharField(max_length=200, unique=True)
slug = models.SlugField(max_length=200, unique=True)
developer = models.CharField(max_length=200)
score = models.DecimalField(decimal_places=2, max_digits=3)
image = CloudinaryField('image')
description = models.TextField()
status = models.IntegerField(choices=STATUS, default=0)
class Meta:
ordering = ['-score']
def __str__(self):
return self.title
class Review(models.Model):
""" Game Review Model """
username = models.ForeignKey(User, on_delete=models.CASCADE)
game = models.ForeignKey(
Game, on_delete=models.CASCADE, related_name='reviews')
score = models.DecimalField(
choices=SCORE_CHOICES, decimal_places=1, max_digits=2)
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
body = models.TextField(blank=True)
approved = models.BooleanField(default=False)
class Meta:
ordering = ['-created_on']
def __str__(self):
return f'Review of {self.game} by {self.username}'
| JordanCH05/VideoGameReviews | reviewsite/models.py | models.py | py | 1,639 | python | en | code | 1 | github-code | 13 |
36979126253 | from contextlib import closing
from pathlib import Path
import tempfile
import unittest
from shark_engine.support.compiler_dl import *
from shark_engine.support.compiler_api import *
class DlFlagsTest(unittest.TestCase):
def testDefaultFlags(self):
session = Session()
flags = session.get_flags()
print(flags)
self.assertIn("--iree-input-type=auto", flags)
def testNonDefaultFlags(self):
session = Session()
flags = session.get_flags(non_default_only=True)
self.assertEqual(flags, [])
session.set_flags("--iree-input-type=none")
flags = session.get_flags(non_default_only=True)
self.assertIn("--iree-input-type=none", flags)
def testFlagsAreScopedToSession(self):
session1 = Session()
session2 = Session()
session1.set_flags("--iree-input-type=tosa")
session2.set_flags("--iree-input-type=none")
self.assertIn("--iree-input-type=tosa", session1.get_flags())
self.assertIn("--iree-input-type=none", session2.get_flags())
def testFlagError(self):
session = Session()
with self.assertRaises(ValueError):
session.set_flags("--does-not-exist=1")
class DlInvocationTest(unittest.TestCase):
def testCreate(self):
session = Session()
inv = session.invocation()
class DlOutputTest(unittest.TestCase):
def testOpenMembuffer(self):
out = Output.open_membuffer()
def testOpenMembufferExplicitClose(self):
out = Output.open_membuffer()
out.close()
def testOpenMembufferWrite(self):
out = Output.open_membuffer()
out.write(b"foobar")
mem = out.map_memory()
self.assertEqual(b"foobar", bytes(mem))
out.close()
def testOpenFileNoKeep(self):
file_path = tempfile.mktemp()
out = Output.open_file(file_path)
try:
out.write(b"foobar")
self.assertTrue(Path(file_path).exists())
finally:
out.close()
# Didn't call keep, so should be deleted.
self.assertFalse(Path(file_path).exists())
def testOpenFileKeep(self):
file_path = tempfile.mktemp()
out = Output.open_file(file_path)
try:
try:
out.write(b"foobar")
out.keep()
finally:
out.close()
# Didn't call keep, so should be deleted.
with open(file_path, "rb") as f:
contents = f.read()
self.assertEqual(b"foobar", contents)
finally:
Path(file_path).unlink()
class CompilerAPITest(unittest.TestCase):
def testCreate(self):
compiler = Compiler()
def testLoadFromBytes(self):
compiler = Compiler()
p = compiler.load_buffer("module {}".encode(), buffer_name="foobar")
def testPipelineClose(self):
compiler = Compiler()
p = compiler.load_buffer("module {}".encode(), buffer_name="foobar")
p.close()
def testLoadFromFile(self):
compiler = Compiler()
with tempfile.NamedTemporaryFile("w", delete=False) as tf:
tf.write("module {}")
tf.close()
p = compiler.load_file(tf.name)
p.close()
def testExecuteIR(self):
compiler = Compiler()
p = compiler.load_buffer("module {}".encode(), buffer_name="foobar")
p.execute()
with closing(compiler.open_output_membuffer()) as output:
p.output_ir(output)
ir_contents = bytes(output.map_memory())
print(ir_contents)
self.assertEqual(b"module {\n}", ir_contents)
def testExecuteVMFB(self):
compiler = Compiler()
compiler.set_flags("--iree-hal-target-backends=vmvx")
p = compiler.load_buffer(
"module {func.func @main(%arg0: i32) -> (i32) {return %arg0 : i32}}".encode(),
buffer_name="foobar",
)
p.execute()
with closing(compiler.open_output_membuffer()) as output:
p.output_vm_bytecode(output)
ir_contents = bytes(output.map_memory())
print(len(ir_contents))
self.assertGreater(len(ir_contents), 0)
if __name__ == "__main__":
unittest.main()
| stellaraccident/shark-engine | tests/support/compiler_test.py | compiler_test.py | py | 4,300 | python | en | code | 0 | github-code | 13 |
16276208377 | from os import system
def batch(scriptname,*args,**kwargs):
'''
Run script as a slurm batch (i.e. queued)
*args are the bind parameters for the script,
and must all be strings.
'''
outfile = kwargs.pop('outfile','~/slurm/out%j.txt')
errfile = kwargs.pop('errfile','~/slurm/err%j.txt')
partition = kwargs.pop('partition','all')
share = kwargs.pop('share',False)
nice = kwargs.pop('nice',8)
nodes = kwargs.pop('nodes',2)
switches = "-o %s -e %s -p %s -c %d" % (outfile,errfile,partition,nodes)
if share:
switches += ' -s'
if nice is not None:
switches += ' --nice=%d' % nice
for switch,value in kwargs.items():
switches += ' --%s' % switch
if value is not None:
switches += '=%s' % value
system("sbatch %s %s %s" % (switches,
scriptname,
' '.join(str(a) for a in args))) | theunissenlab/tlab | src/slurm.py | slurm.py | py | 962 | python | en | code | 0 | github-code | 13 |
2718522291 | n, k = map(int, input().split())
cds = []
for i in range(1, n + 1):
if n % i == 0:
cds.append(i)
if len(cds) < k:
print(0)
else:
print(cds[k - 1])
| jinlee9270/algo | 백준/Bronze/2501. 약수 구하기/약수 구하기.py | 약수 구하기.py | py | 169 | python | en | code | 0 | github-code | 13 |
73936447059 | from re import match
def grade_average(
first_subject: str,
second_subject: str,
third_subject: str,
first: float,
second: float,
third: float
) -> dict:
grade = dict()
grade[first_subject] = first
grade[second_subject] = second
grade[third_subject] = third
grade_quantity = 0
total_grades = 0
for _, numb in grade.items():
total_grades += numb
grade_quantity += 1
av = total_grades / grade_quantity
grade['average'] = av
return grade
def validate_input(input_str, pattern, error_message):
while True:
if match(pattern, input_str):
return input_str
else:
print(error_message)
input_str = input('Retry: ')
def main():
subject_pattern = r"^[a-zA-Z0-9]+$"
grade_pattern = r"^(?:10|[1-9](?:\.\d+)?)$"
while True:
print('Please type 3 subjects and 3 grades to see your average grade.')
first_subject = validate_input(
input('Type the first subject: '),
subject_pattern,
'Only letters and numbers are allowed in subject input.'
)
first_grade = validate_input(
input('Type the first grade between 0 and 10: '),
grade_pattern,
'Only numbers between 0 and 10 are allowed in grade input.'
)
second_subject = validate_input(
input(
'Type the second subject: '),
subject_pattern,
'Only letters and numbers are allowed in subject input.'
)
second_grade = validate_input(
input('Type the second grade between 0 and 10: '),
grade_pattern,
'Only numbers between 0 and 10 are allowed in grade input.'
)
third_subject = validate_input(
input('Type the third subject: '),
subject_pattern,
'Only letters and numbers are allowed in subject input.'
)
third_grade = validate_input(
input('Type the third grade between 0 and 10: '),
grade_pattern,
'Only numbers between 0 and 10 are allowed in grade input.'
)
first_float = float(first_grade)
second_float = float(second_grade)
third_float = float(third_grade)
grade = grade_average(
first_subject,
second_subject,
third_subject,
first_float,
second_float,
third_float
)
for subject, grade in grade.items():
print(f'{subject} -> {grade}')
if __name__ == '__main__':
main()
| Wellinton-A/test-stag | 7-grade_average/grade_av.py | grade_av.py | py | 2,638 | python | en | code | 0 | github-code | 13 |
17988511642 | import requests
from bs4 import BeautifulSoup
from utils import write_data
def get_data(url):
html = requests.get(url).text
soup = BeautifulSoup(html, 'html.parser')
updated = soup.select('.timetable > .info > span')[0].text # 업데이트날짜
data = soup.select('.rpsa_detail > div > div')
data.pop()
return data, updated
def parse_data(data, updated):
confirmed_region = [] # 시도별확진자
for i, d in enumerate(data):
region = d.find_all('h4', class_='cityname')[0].text # 지역이름
temp = d.find_all('span', class_='num')
confirmed, _, recovered, deaths, confirmed_rate = [
element.text.replace(',', '') for element in temp]
confirmed = int(confirmed) # 확진자수
recovered = int(recovered) # 격리해제수
deaths = int(deaths) # 사망자수
confirmed_rate = float(confirmed_rate) # 십만명당발생율
if i != 0:
slicing = d.find_all('p', class_='citytit')[0].text
confirmed_region_rate = float(
slicing[:slicing.find('%')]) # 지역별확진자비율
else:
confirmed_region_rate = ''
confirmed_region.append({
'지역이름': region,
'확진자수': confirmed,
'격리해제수': recovered,
'사망자수': deaths,
'십만명당발생율': confirmed_rate,
'지역별확진자비율': confirmed_region_rate,
})
confirmed_region.append({'업데이트날짜': updated})
return confirmed_region
def run():
data, updated = get_data(
"http://ncov.mohw.go.kr/bdBoardList_Real.do?brdId=1&brdGubun=13&ncvContSeq=&contSeq=&board_id=&gubun=")
confirmed_region = parse_data(data, updated)
save_dir = './data/koreaRegionalData.js'
crawler_name = 'crawlKoreaRegionalData.py'
var_name = 'koreaRegionalData'
write_data(confirmed_region, save_dir, crawler_name, var_name)
print("#####################################")
print("############ 한국 지역별 데이터 #############")
print("######## koreaRegionalData.js #########")
run()
print("############### 완료!! ###############")
print("#####################################")
| LiveCoronaDetector/livecod | data/crawlKoreaRegionalData.py | crawlKoreaRegionalData.py | py | 2,264 | python | en | code | 67 | github-code | 13 |
39814483010 | import warnings
warnings.filterwarnings("once", category=DeprecationWarning)
import logging
logging.basicConfig(format='%(asctime)s: %(name)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', level=logging.DEBUG)
import unittest
import copy
import time
import pandas as pd
from numpy.testing import assert_allclose
# Local imports
from oggm.core import massbalance
from oggm.core.massbalance import LinearMassBalance
from oggm.tests import is_slow, RUN_MODEL_TESTS
import xarray as xr
from oggm import utils, workflow
from oggm.cfg import N, SEC_IN_DAY, SEC_IN_YEAR, SEC_IN_MONTH
# Tests
from oggm.tests.funcs import *
# after oggm.test
import matplotlib.pyplot as plt
# do we event want to run the tests?
if not RUN_MODEL_TESTS:
raise unittest.SkipTest('Skipping all model tests.')
do_plot = False
DOM_BORDER = 80
class TestInitFlowline(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_init_present_time_glacier(self):
gdir = init_hef(border=DOM_BORDER)
flowline.init_present_time_glacier(gdir)
fls = gdir.read_pickle('model_flowlines')
ofl = gdir.read_pickle('inversion_flowlines')[-1]
self.assertTrue(gdir.rgi_date.year == 2003)
self.assertTrue(len(fls) == 3)
vol = 0.
area = 0.
for fl in fls:
refo = 1 if fl is fls[-1] else 0
self.assertTrue(fl.order == refo)
ref = np.arange(len(fl.surface_h)) * fl.dx
np.testing.assert_allclose(ref, fl.dis_on_line,
rtol=0.001,
atol=0.01)
self.assertTrue(len(fl.surface_h) ==
len(fl.bed_h) ==
len(fl.bed_shape) ==
len(fl.dis_on_line) ==
len(fl.widths))
self.assertTrue(np.all(fl.widths >= 0))
vol += fl.volume_km3
area += fl.area_km2
if refo == 1:
rmsd = utils.rmsd(ofl.widths[:-5] * gdir.grid.dx,
fl.widths_m[0:len(ofl.widths)-5])
self.assertTrue(rmsd < 5.)
rtol = 0.02
np.testing.assert_allclose(0.573, vol, rtol=rtol)
np.testing.assert_allclose(6900.0, fls[-1].length_m, atol=101)
np.testing.assert_allclose(gdir.rgi_area_km2, area, rtol=rtol)
if do_plot:
plt.plot(fls[-1].bed_h)
plt.plot(fls[-1].surface_h)
plt.show()
def test_present_time_glacier_massbalance(self):
gdir = init_hef(border=DOM_BORDER)
flowline.init_present_time_glacier(gdir)
mb_mod = massbalance.PastMassBalance(gdir)
fls = gdir.read_pickle('model_flowlines')
glacier = flowline.FlowlineModel(fls)
mbdf = gdir.get_ref_mb_data()
hgts = np.array([])
widths = np.array([])
for fl in glacier.fls:
hgts = np.concatenate((hgts, fl.surface_h))
widths = np.concatenate((widths, fl.widths_m))
tot_mb = []
refmb = []
grads = hgts * 0
for yr, mb in mbdf.iterrows():
refmb.append(mb['ANNUAL_BALANCE'])
mbh = mb_mod.get_annual_mb(hgts, yr) * SEC_IN_YEAR * cfg.RHO
grads += mbh
tot_mb.append(np.average(mbh, weights=widths))
grads /= len(tot_mb)
# Bias
self.assertTrue(np.abs(utils.md(tot_mb, refmb)) < 50)
# Gradient
dfg = pd.read_csv(utils.get_demo_file('mbgrads_RGI40-11.00897.csv'),
index_col='ALTITUDE').mean(axis=1)
# Take the altitudes below 3100 and fit a line
dfg = dfg[dfg.index < 3100]
pok = np.where(hgts < 3100)
from scipy.stats import linregress
slope_obs, _, _, _, _ = linregress(dfg.index, dfg.values)
slope_our, _, _, _, _ = linregress(hgts[pok], grads[pok])
np.testing.assert_allclose(slope_obs, slope_our, rtol=0.15)
class TestOtherGlacier(unittest.TestCase):
def setUp(self):
# test directory
self.testdir = os.path.join(get_test_dir(), 'tmp_div')
if not os.path.exists(self.testdir):
os.makedirs(self.testdir)
# self.clean_dir()
# Init
cfg.initialize()
cfg.PATHS['dem_file'] = utils.get_demo_file('srtm_oetztal.tif')
cfg.PATHS['climate_file'] = utils.get_demo_file('histalp_merged_hef.nc')
def tearDown(self):
self.rm_dir()
def rm_dir(self):
shutil.rmtree(self.testdir)
def clean_dir(self):
shutil.rmtree(self.testdir)
os.makedirs(self.testdir)
def test_define_divides(self):
from oggm.core import centerlines
from oggm.core import climate
from oggm.core import inversion
from oggm.core import gis
from oggm import GlacierDirectory
import geopandas as gpd
hef_file = utils.get_demo_file('rgi_oetztal.shp')
rgidf = gpd.GeoDataFrame.from_file(hef_file)
# This is another glacier with divides
entity = rgidf.loc[rgidf.RGIId == 'RGI50-11.00719_d01'].iloc[0]
gdir = GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir, entity=entity)
gis.glacier_masks(gdir)
centerlines.compute_centerlines(gdir)
centerlines.initialize_flowlines(gdir)
centerlines.compute_downstream_line(gdir)
centerlines.compute_downstream_bedshape(gdir)
centerlines.catchment_area(gdir)
centerlines.catchment_width_geom(gdir)
centerlines.catchment_width_correction(gdir)
climate.process_histalp_nonparallel([gdir])
climate.local_mustar(gdir, tstar=1930, bias=0, prcp_fac=2.5)
climate.apparent_mb(gdir)
inversion.prepare_for_inversion(gdir)
v, ainv = inversion.mass_conservation_inversion(gdir)
flowline.init_present_time_glacier(gdir)
myarea = 0.
cls = gdir.read_pickle('inversion_flowlines')
for cl in cls:
myarea += np.sum(cl.widths * cl.dx * gdir.grid.dx**2)
np.testing.assert_allclose(ainv, gdir.rgi_area_m2, rtol=1e-2)
np.testing.assert_allclose(myarea, gdir.rgi_area_m2, rtol=1e-2)
myarea = 0.
cls = gdir.read_pickle('inversion_flowlines')
for cl in cls:
myarea += np.sum(cl.widths * cl.dx * gdir.grid.dx**2)
np.testing.assert_allclose(myarea, gdir.rgi_area_m2, rtol=1e-2)
fls = gdir.read_pickle('model_flowlines')
glacier = flowline.FlowlineModel(fls)
if cfg.PARAMS['grid_dx_method'] == 'square':
self.assertEqual(len(fls), 3)
vol = 0.
area = 0.
for fl in fls:
ref = np.arange(len(fl.surface_h)) * fl.dx
np.testing.assert_allclose(ref, fl.dis_on_line,
rtol=0.001,
atol=0.01)
self.assertTrue(len(fl.surface_h) ==
len(fl.bed_h) ==
len(fl.bed_shape) ==
len(fl.dis_on_line) ==
len(fl.widths))
self.assertTrue(np.all(fl.widths >= 0))
vol += fl.volume_km3
area += fl.area_km2
rtol = 0.08
np.testing.assert_allclose(gdir.rgi_area_km2, area, rtol=rtol)
np.testing.assert_allclose(v*1e-9, vol, rtol=rtol)
class TestMassBalance(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_past_mb_model(self):
F = SEC_IN_YEAR * cfg.RHO
gdir = init_hef(border=DOM_BORDER)
flowline.init_present_time_glacier(gdir)
df = pd.read_csv(gdir.get_filepath('local_mustar'))
mu_star = df['mu_star'][0]
bias = df['bias'][0]
prcp_fac = df['prcp_fac'][0]
# Climate period
yrp = [1851, 2000]
# Flowlines height
h, w = gdir.get_inversion_flowline_hw()
_, t, p = climate.mb_yearly_climate_on_height(gdir, h, prcp_fac,
year_range=yrp)
mb_mod = massbalance.PastMassBalance(gdir, bias=0)
for i, yr in enumerate(np.arange(yrp[0], yrp[1]+1)):
ref_mb_on_h = p[:, i] - mu_star * t[:, i]
my_mb_on_h = mb_mod.get_annual_mb(h, yr) * F
np.testing.assert_allclose(ref_mb_on_h, my_mb_on_h,
atol=1e-2)
ela_z = mb_mod.get_ela(year=yr)
totest = mb_mod.get_annual_mb([ela_z], year=yr) * F
assert_allclose(totest[0], 0, atol=1)
mb_mod = massbalance.PastMassBalance(gdir)
for i, yr in enumerate(np.arange(yrp[0], yrp[1]+1)):
ref_mb_on_h = p[:, i] - mu_star * t[:, i]
my_mb_on_h = mb_mod.get_annual_mb(h, yr) * F
np.testing.assert_allclose(ref_mb_on_h, my_mb_on_h + bias,
atol=1e-2)
ela_z = mb_mod.get_ela(year=yr)
totest = mb_mod.get_annual_mb([ela_z], year=yr) * F
assert_allclose(totest[0], 0, atol=1)
for i, yr in enumerate(np.arange(yrp[0], yrp[1]+1)):
ref_mb_on_h = p[:, i] - mu_star * t[:, i]
my_mb_on_h = ref_mb_on_h*0.
for m in np.arange(12):
yrm = utils.date_to_floatyear(yr, m + 1)
tmp = mb_mod.get_monthly_mb(h, yrm)*SEC_IN_MONTH*cfg.RHO
my_mb_on_h += tmp
np.testing.assert_allclose(ref_mb_on_h,
my_mb_on_h + bias,
atol=1e-2)
# real data
h, w = gdir.get_inversion_flowline_hw()
mbdf = gdir.get_ref_mb_data()
mbdf.loc[yr, 'MY_MB'] = np.NaN
mb_mod = massbalance.PastMassBalance(gdir)
for yr in mbdf.index.values:
my_mb_on_h = mb_mod.get_annual_mb(h, yr) * SEC_IN_YEAR * cfg.RHO
mbdf.loc[yr, 'MY_MB'] = np.average(my_mb_on_h, weights=w)
np.testing.assert_allclose(mbdf['ANNUAL_BALANCE'].mean(),
mbdf['MY_MB'].mean(),
atol=1e-2)
mbdf['MY_ELA'] = mb_mod.get_ela(year=mbdf.index.values)
assert mbdf[['MY_ELA', 'MY_MB']].corr().values[0, 1] < -0.9
assert mbdf[['MY_ELA', 'ANNUAL_BALANCE']].corr().values[0, 1] < -0.7
mb_mod = massbalance.PastMassBalance(gdir, bias=0)
for yr in mbdf.index.values:
my_mb_on_h = mb_mod.get_annual_mb(h, yr) * SEC_IN_YEAR * cfg.RHO
mbdf.loc[yr, 'MY_MB'] = np.average(my_mb_on_h, weights=w)
np.testing.assert_allclose(mbdf['ANNUAL_BALANCE'].mean() + bias,
mbdf['MY_MB'].mean(),
atol=1e-2)
mb_mod = massbalance.PastMassBalance(gdir)
for yr in mbdf.index.values:
my_mb_on_h = mb_mod.get_annual_mb(h, yr) * SEC_IN_YEAR * cfg.RHO
mbdf.loc[yr, 'MY_MB'] = np.average(my_mb_on_h, weights=w)
mb_mod.temp_bias = 1
my_mb_on_h = mb_mod.get_annual_mb(h, yr) * SEC_IN_YEAR * cfg.RHO
mbdf.loc[yr, 'BIASED_MB'] = np.average(my_mb_on_h, weights=w)
mb_mod.temp_bias = 0
np.testing.assert_allclose(mbdf['ANNUAL_BALANCE'].mean(),
mbdf['MY_MB'].mean(),
atol=1e-2)
self.assertTrue(mbdf['ANNUAL_BALANCE'].mean() > mbdf['BIASED_MB'].mean())
def test_constant_mb_model(self):
gdir = init_hef(border=DOM_BORDER)
flowline.init_present_time_glacier(gdir)
df = pd.read_csv(gdir.get_filepath('local_mustar'))
mu_star = df['mu_star'][0]
bias = df['bias'][0]
prcp_fac = df['prcp_fac'][0]
h = np.array([])
w = np.array([])
h, w = gdir.get_inversion_flowline_hw()
cmb_mod = massbalance.ConstantMassBalance(gdir, bias=0)
ombh = cmb_mod.get_annual_mb(h) * SEC_IN_YEAR * cfg.RHO
otmb = np.average(ombh, weights=w)
np.testing.assert_allclose(0., otmb, atol=0.2)
cmb_mod = massbalance.ConstantMassBalance(gdir)
ombh = cmb_mod.get_annual_mb(h) * SEC_IN_YEAR * cfg.RHO
otmb = np.average(ombh, weights=w)
np.testing.assert_allclose(0, otmb + bias, atol=0.2)
mb_mod = massbalance.ConstantMassBalance(gdir, y0=2003 - 15)
nmbh = mb_mod.get_annual_mb(h) * SEC_IN_YEAR * cfg.RHO
ntmb = np.average(nmbh, weights=w)
self.assertTrue(ntmb < otmb)
if do_plot: # pragma: no cover
plt.plot(h, ombh, 'o', label='tstar')
plt.plot(h, nmbh, 'o', label='today')
plt.legend()
plt.show()
cmb_mod.temp_bias = 1
biasombh = cmb_mod.get_annual_mb(h) * SEC_IN_YEAR * cfg.RHO
biasotmb = np.average(biasombh, weights=w)
self.assertTrue(biasotmb < (otmb - 500))
cmb_mod.temp_bias = 0
nobiasombh = cmb_mod.get_annual_mb(h) * SEC_IN_YEAR * cfg.RHO
nobiasotmb = np.average(nobiasombh, weights=w)
np.testing.assert_allclose(0, nobiasotmb + bias, atol=0.2)
months = np.arange(12)
monthly_1 = months * 0.
monthly_2 = months * 0.
for m in months:
yr = utils.date_to_floatyear(0, m + 1)
cmb_mod.temp_bias = 0
tmp = cmb_mod.get_monthly_mb(h, yr) * SEC_IN_MONTH * cfg.RHO
monthly_1[m] = np.average(tmp, weights=w)
cmb_mod.temp_bias = 1
tmp = cmb_mod.get_monthly_mb(h, yr) * SEC_IN_MONTH * cfg.RHO
monthly_2[m] = np.average(tmp, weights=w)
# check that the winter months are close but summer months no
np.testing.assert_allclose(monthly_1[1: 5], monthly_2[1: 5], atol=1)
self.assertTrue(np.mean(monthly_1[5:]) > (np.mean(monthly_2[5:]) + 100))
if do_plot: # pragma: no cover
plt.plot(monthly_1, '-', label='Normal')
plt.plot(monthly_2, '-', label='Temp bias')
plt.legend();
plt.show()
# Climate info
h = np.sort(h)
cmb_mod = massbalance.ConstantMassBalance(gdir, bias=0)
t, tm, p, ps = cmb_mod.get_climate(h)
# Simple sanity checks
assert np.all(np.diff(t) <= 0)
assert np.all(np.diff(tm) <= 0)
assert np.all(np.diff(p) == 0)
assert np.all(np.diff(ps) >= 0)
if do_plot: # pragma: no cover
f, axs = plt.subplots(1, 3, figsize=(9, 3))
axs = axs.flatten()
axs[0].plot(h, t, label='Temp')
axs[0].legend();
axs[1].plot(h, tm, label='TempMelt')
axs[1].legend();
axs[2].plot(h, p, label='Prcp')
axs[2].plot(h, ps, label='SolidPrcp')
axs[2].legend();
plt.tight_layout()
plt.show()
# ELA
elah = cmb_mod.get_ela()
t, tm, p, ps = cmb_mod.get_climate([elah])
mb = ps - cmb_mod.mbmod.mu_star * tm
# not perfect because of time/months/zinterp issues
np.testing.assert_allclose(mb, 0, atol=0.08)
def test_random_mb(self):
gdir = init_hef(border=DOM_BORDER)
flowline.init_present_time_glacier(gdir)
ref_mod = massbalance.ConstantMassBalance(gdir)
mb_mod = massbalance.RandomMassBalance(gdir, seed=10)
h, w = gdir.get_inversion_flowline_hw()
ref_mbh = ref_mod.get_annual_mb(h, None) * SEC_IN_YEAR
# two years shoudn't be equal
r_mbh1 = mb_mod.get_annual_mb(h, 1) * SEC_IN_YEAR
r_mbh2 = mb_mod.get_annual_mb(h, 2) * SEC_IN_YEAR
assert not np.all(np.allclose(r_mbh1, r_mbh2))
# the same year should be equal
r_mbh1 = mb_mod.get_annual_mb(h, 1) * SEC_IN_YEAR
r_mbh2 = mb_mod.get_annual_mb(h, 1) * SEC_IN_YEAR
np.testing.assert_allclose(r_mbh1, r_mbh2)
# After many trials the mb should be close to the same
ny = 2000
yrs = np.arange(ny)
r_mbh = 0.
mbts = yrs * 0.
for i, yr in enumerate(yrs):
mbts[i] = mb_mod.get_specific_mb(h, w, yr)
r_mbh += mb_mod.get_annual_mb(h, yr) * SEC_IN_YEAR
r_mbh /= ny
np.testing.assert_allclose(ref_mbh, r_mbh, atol=0.2)
elats = mb_mod.get_ela(yrs[:200])
assert np.corrcoef(mbts[:200], elats)[0, 1] < -0.95
mb_mod.temp_bias = -0.5
r_mbh_b = 0.
for yr in yrs:
r_mbh_b += mb_mod.get_annual_mb(h, yr) * SEC_IN_YEAR
r_mbh_b /= ny
self.assertTrue(np.mean(r_mbh) < np.mean(r_mbh_b))
# Compare sigma from real climate and mine
mb_ref = massbalance.PastMassBalance(gdir)
mb_mod = massbalance.RandomMassBalance(gdir, y0=2003 - 15,
seed=10)
mb_ts = []
mb_ts2 = []
yrs = np.arange(1973, 2003, 1)
for yr in yrs:
mb_ts.append(np.average(mb_ref.get_annual_mb(h, yr) * SEC_IN_YEAR, weights=w))
mb_ts2.append(np.average(mb_mod.get_annual_mb(h, yr) * SEC_IN_YEAR, weights=w))
np.testing.assert_allclose(np.std(mb_ts), np.std(mb_ts2), rtol=0.1)
# Monthly
time = pd.date_range('1/1/1973', periods=31*12, freq='MS')
yrs = utils.date_to_floatyear(time.year, time.month)
ref_mb = np.zeros(12)
my_mb = np.zeros(12)
for yr, m in zip(yrs, time.month):
ref_mb[m-1] += np.average(mb_ref.get_monthly_mb(h, yr) * SEC_IN_MONTH, weights=w)
my_mb[m-1] += np.average(mb_mod.get_monthly_mb(h, yr) * SEC_IN_MONTH, weights=w)
my_mb = my_mb / 31
ref_mb = ref_mb / 31
self.assertTrue(utils.rmsd(ref_mb, my_mb) < 0.1)
def test_mb_performance(self):
gdir = init_hef(border=DOM_BORDER)
flowline.init_present_time_glacier(gdir)
h, w = gdir.get_inversion_flowline_hw()
# Climate period, 10 day timestep
yrs = np.arange(1850, 2003, 10/365)
# models
start_time = time.time()
mb1 = massbalance.ConstantMassBalance(gdir)
for yr in yrs:
_ = mb1.get_monthly_mb(h, yr)
t1 = time.time() - start_time
start_time = time.time()
mb2 = massbalance.PastMassBalance(gdir)
for yr in yrs:
_ = mb2.get_monthly_mb(h, yr)
t2 = time.time() - start_time
# not faster as two times t2
try:
assert t1 >= (t2 / 2)
except AssertionError:
# no big deal
unittest.skip('Allowed failure')
class TestModelFlowlines(unittest.TestCase):
def test_rectangular(self):
map_dx = 100.
dx = 1.
nx = 200
coords = np.arange(0, nx - 0.5, 1)
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
bed_h = np.linspace(3000, 1000, nx)
surface_h = bed_h + 100
surface_h[:20] += 50
surface_h[-20:] -= 100
widths = bed_h * 0. + 20
widths[:30] = 40
widths[-30:] = 10
rec = flowline.RectangularBedFlowline(line=line, dx=dx, map_dx=map_dx,
surface_h=surface_h, bed_h=bed_h,
widths=widths)
thick = surface_h - bed_h
widths_m = widths * map_dx
section = thick * widths_m
vol_m3 = thick * map_dx * widths_m
area_m2 = map_dx * widths_m
area_m2[thick == 0] = 0
assert_allclose(rec.thick, thick)
assert_allclose(rec.widths, widths)
assert_allclose(rec.widths_m, widths_m)
assert_allclose(rec.section, section)
assert_allclose(rec.area_m2, area_m2.sum())
assert_allclose(rec.volume_m3, vol_m3.sum())
# We set something and everything stays same
rec.thick = thick
assert_allclose(rec.thick, thick)
assert_allclose(rec.surface_h, surface_h)
assert_allclose(rec.widths, widths)
assert_allclose(rec.widths_m, widths_m)
assert_allclose(rec.section, section)
assert_allclose(rec.area_m2, area_m2.sum())
assert_allclose(rec.volume_m3, vol_m3.sum())
rec.section = section
assert_allclose(rec.thick, thick)
assert_allclose(rec.surface_h, surface_h)
assert_allclose(rec.widths, widths)
assert_allclose(rec.widths_m, widths_m)
assert_allclose(rec.section, section)
assert_allclose(rec.area_m2, area_m2.sum())
assert_allclose(rec.volume_m3, vol_m3.sum())
rec.surface_h = surface_h
assert_allclose(rec.thick, thick)
assert_allclose(rec.surface_h, surface_h)
assert_allclose(rec.widths, widths)
assert_allclose(rec.widths_m, widths_m)
assert_allclose(rec.section, section)
assert_allclose(rec.area_m2, area_m2.sum())
assert_allclose(rec.volume_m3, vol_m3.sum())
# More adventurous
rec.section = section / 2
assert_allclose(rec.thick, thick/2)
assert_allclose(rec.widths, widths)
assert_allclose(rec.widths_m, widths_m)
assert_allclose(rec.section, section/2)
assert_allclose(rec.area_m2, area_m2.sum())
assert_allclose(rec.volume_m3, (vol_m3/2).sum())
def test_trapeze_mixed_rec(self):
# Special case of lambda = 0
map_dx = 100.
dx = 1.
nx = 200
coords = np.arange(0, nx - 0.5, 1)
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
bed_h = np.linspace(3000, 1000, nx)
surface_h = bed_h + 100
surface_h[:20] += 50
surface_h[-20:] -= 80
widths = bed_h * 0. + 20
widths[:30] = 40
widths[-30:] = 10
lambdas = bed_h*0.
is_trap = np.ones(len(lambdas), dtype=np.bool)
# tests
thick = surface_h - bed_h
widths_m = widths * map_dx
section = thick * widths_m
vol_m3 = thick * map_dx * widths_m
area_m2 = map_dx * widths_m
area_m2[thick == 0] = 0
rec1 = flowline.TrapezoidalBedFlowline(line=line, dx=dx, map_dx=map_dx,
surface_h=surface_h, bed_h=bed_h,
widths=widths, lambdas=lambdas)
rec2 = flowline.MixedBedFlowline(line=line, dx=dx, map_dx=map_dx,
surface_h=surface_h, bed_h=bed_h,
section=section, bed_shape=lambdas,
is_trapezoid=is_trap, lambdas=lambdas)
recs = [rec1, rec2]
for rec in recs:
assert_allclose(rec.thick, thick)
assert_allclose(rec.widths, widths)
assert_allclose(rec.widths_m, widths_m)
assert_allclose(rec.section, section)
assert_allclose(rec.area_m2, area_m2.sum())
assert_allclose(rec.volume_m3, vol_m3.sum())
# We set something and everything stays same
rec.thick = thick
assert_allclose(rec.thick, thick)
assert_allclose(rec.surface_h, surface_h)
assert_allclose(rec.widths, widths)
assert_allclose(rec.widths_m, widths_m)
assert_allclose(rec.section, section)
assert_allclose(rec.area_m2, area_m2.sum())
assert_allclose(rec.volume_m3, vol_m3.sum())
rec.section = section
assert_allclose(rec.thick, thick)
assert_allclose(rec.surface_h, surface_h)
assert_allclose(rec.widths, widths)
assert_allclose(rec.widths_m, widths_m)
assert_allclose(rec.section, section)
assert_allclose(rec.area_m2, area_m2.sum())
assert_allclose(rec.volume_m3, vol_m3.sum())
rec.surface_h = surface_h
assert_allclose(rec.thick, thick)
assert_allclose(rec.surface_h, surface_h)
assert_allclose(rec.widths, widths)
assert_allclose(rec.widths_m, widths_m)
assert_allclose(rec.section, section)
assert_allclose(rec.area_m2, area_m2.sum())
assert_allclose(rec.volume_m3, vol_m3.sum())
# More adventurous
rec.section = section / 2
assert_allclose(rec.thick, thick/2)
assert_allclose(rec.widths, widths)
assert_allclose(rec.widths_m, widths_m)
assert_allclose(rec.section, section/2)
assert_allclose(rec.area_m2, area_m2.sum())
assert_allclose(rec.volume_m3, (vol_m3/2).sum())
def test_trapeze_mixed_lambda1(self):
# Real lambdas
map_dx = 100.
dx = 1.
nx = 200
coords = np.arange(0, nx - 0.5, 1)
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
bed_h = np.linspace(3000, 1000, nx)
surface_h = bed_h + 100
surface_h[:20] += 50
surface_h[-20:] -= 80
widths_0 = bed_h * 0. + 20
widths_0[:30] = 40
widths_0[-30:] = 10
lambdas = bed_h*0. + 1
# tests
thick = surface_h - bed_h
widths_m = widths_0 * map_dx + lambdas * thick
widths = widths_m / map_dx
section = thick * (widths_0 * map_dx + widths_m) / 2
vol_m3 = section * map_dx
area_m2 = map_dx * widths_m
area_m2[thick == 0] = 0
is_trap = np.ones(len(lambdas), dtype=np.bool)
rec1 = flowline.TrapezoidalBedFlowline(line=line, dx=dx, map_dx=map_dx,
surface_h=surface_h, bed_h=bed_h,
widths=widths, lambdas=lambdas)
rec2 = flowline.MixedBedFlowline(line=line, dx=dx, map_dx=map_dx,
surface_h=surface_h, bed_h=bed_h,
section=section, bed_shape=lambdas,
is_trapezoid=is_trap, lambdas=lambdas)
recs = [rec1, rec2]
for rec in recs:
assert_allclose(rec.thick, thick)
assert_allclose(rec.widths, widths)
assert_allclose(rec.widths_m, widths_m)
assert_allclose(rec.section, section)
assert_allclose(rec.area_m2, area_m2.sum())
assert_allclose(rec.volume_m3, vol_m3.sum())
# We set something and everything stays same
rec.thick = thick
assert_allclose(rec.thick, thick)
assert_allclose(rec.surface_h, surface_h)
assert_allclose(rec.widths, widths)
assert_allclose(rec.widths_m, widths_m)
assert_allclose(rec.section, section)
assert_allclose(rec.area_m2, area_m2.sum())
assert_allclose(rec.volume_m3, vol_m3.sum())
rec.section = section
assert_allclose(rec.thick, thick)
assert_allclose(rec.surface_h, surface_h)
assert_allclose(rec.widths, widths)
assert_allclose(rec.widths_m, widths_m)
assert_allclose(rec.section, section)
assert_allclose(rec.area_m2, area_m2.sum())
assert_allclose(rec.volume_m3, vol_m3.sum())
rec.surface_h = surface_h
assert_allclose(rec.thick, thick)
assert_allclose(rec.surface_h, surface_h)
assert_allclose(rec.widths, widths)
assert_allclose(rec.widths_m, widths_m)
assert_allclose(rec.section, section)
assert_allclose(rec.area_m2, area_m2.sum())
assert_allclose(rec.volume_m3, vol_m3.sum())
def test_parab_mixed(self):
# Real parabolas
map_dx = 100.
dx = 1.
nx = 200
coords = np.arange(0, nx - 0.5, 1)
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
bed_h = np.linspace(3000, 1000, nx)
surface_h = bed_h + 100
surface_h[:20] += 50
surface_h[-20:] -= 80
shapes = bed_h*0. + 0.003
shapes[:30] = 0.002
shapes[-30:] = 0.004
# tests
thick = surface_h - bed_h
widths_m = np.sqrt(4 * thick / shapes)
widths = widths_m / map_dx
section = 2 / 3 * widths_m * thick
vol_m3 = section * map_dx
area_m2 = map_dx * widths_m
area_m2[thick == 0] = 0
is_trap = np.zeros(len(shapes), dtype=np.bool)
rec1 = flowline.ParabolicBedFlowline(line=line, dx=dx, map_dx=map_dx,
surface_h=surface_h, bed_h=bed_h,
bed_shape=shapes)
rec2 = flowline.MixedBedFlowline(line=line, dx=dx, map_dx=map_dx,
surface_h=surface_h, bed_h=bed_h,
section=section, bed_shape=shapes,
is_trapezoid=is_trap, lambdas=shapes)
recs = [rec1, rec2]
for rec in recs:
assert_allclose(rec.thick, thick)
assert_allclose(rec.widths, widths)
assert_allclose(rec.widths_m, widths_m)
assert_allclose(rec.section, section)
assert_allclose(rec.area_m2, area_m2.sum())
assert_allclose(rec.volume_m3, vol_m3.sum())
# We set something and everything stays same
rec.thick = thick
assert_allclose(rec.thick, thick)
assert_allclose(rec.widths, widths)
assert_allclose(rec.widths_m, widths_m)
assert_allclose(rec.section, section)
assert_allclose(rec.area_m2, area_m2.sum())
assert_allclose(rec.volume_m3, vol_m3.sum())
rec.section = section
assert_allclose(rec.thick, thick)
assert_allclose(rec.widths, widths)
assert_allclose(rec.widths_m, widths_m)
assert_allclose(rec.section, section)
assert_allclose(rec.area_m2, area_m2.sum())
assert_allclose(rec.volume_m3, vol_m3.sum())
assert_allclose(rec.surface_h, surface_h)
def test_mixed(self):
# Set a section and see if it all matches
map_dx = 100.
dx = 1.
nx = 200
coords = np.arange(0, nx - 0.5, 1)
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
bed_h = np.linspace(3000, 1000, nx)
surface_h = bed_h + 100
surface_h[:20] += 50
surface_h[-20:] -= 80
widths_0 = bed_h * 0. + 20
widths_0[:30] = 40
widths_0[-30:] = 10
lambdas = bed_h*0. + 1
lambdas[0:50] = 0
thick = surface_h - bed_h
widths_m = widths_0 * map_dx + lambdas * thick
widths = widths_m / map_dx
section_trap = thick * (widths_0 * map_dx + widths_m) / 2
rec1 = flowline.TrapezoidalBedFlowline(line=line, dx=dx, map_dx=map_dx,
surface_h=surface_h, bed_h=bed_h,
widths=widths, lambdas=lambdas)
shapes = bed_h*0. + 0.003
shapes[-30:] = 0.004
# tests
thick = surface_h - bed_h
widths_m = np.sqrt(4 * thick / shapes)
widths = widths_m / map_dx
section_para = 2 / 3 * widths_m * thick
rec2 = flowline.ParabolicBedFlowline(line=line, dx=dx, map_dx=map_dx,
surface_h=surface_h, bed_h=bed_h,
bed_shape=shapes)
is_trap = np.ones(len(shapes), dtype=np.bool)
is_trap[100:] = False
section = section_trap.copy()
section[~is_trap] = section_para[~is_trap]
rec = flowline.MixedBedFlowline(line=line, dx=dx, map_dx=map_dx,
surface_h=surface_h, bed_h=bed_h,
section=section, bed_shape=shapes,
is_trapezoid=is_trap, lambdas=lambdas)
thick = rec1.thick
thick[~is_trap] = rec2.thick[~is_trap]
assert_allclose(rec.thick, thick)
widths = rec1.widths
widths[~is_trap] = rec2.widths[~is_trap]
assert_allclose(rec.widths, widths)
widths_m = rec1.widths_m
widths_m[~is_trap] = rec2.widths_m[~is_trap]
assert_allclose(rec.widths_m, widths_m)
section = rec1.section
section[~is_trap] = rec2.section[~is_trap]
assert_allclose(rec.section, section)
# We set something and everything stays same
area_m2 = rec.area_m2
volume_m3 = rec.volume_m3
rec.thick = rec.thick
assert_allclose(rec.thick, thick)
assert_allclose(rec.widths, widths)
assert_allclose(rec.widths_m, widths_m)
assert_allclose(rec.section, section)
assert_allclose(rec.area_m2, area_m2)
assert_allclose(rec.volume_m3, volume_m3)
rec.section = rec.section
assert_allclose(rec.thick, thick)
assert_allclose(rec.widths, widths)
assert_allclose(rec.widths_m, widths_m)
assert_allclose(rec.section, section)
assert_allclose(rec.area_m2, area_m2)
assert_allclose(rec.volume_m3, volume_m3)
rec.surface_h = rec.surface_h
assert_allclose(rec.thick, thick)
assert_allclose(rec.surface_h, surface_h)
assert_allclose(rec.widths, widths)
assert_allclose(rec.widths_m, widths_m)
assert_allclose(rec.section, section)
assert_allclose(rec.area_m2, area_m2)
assert_allclose(rec.volume_m3, volume_m3)
rec.surface_h = rec.surface_h - 10
assert_allclose(rec.thick, thick - 10)
assert_allclose(rec.surface_h, surface_h - 10)
class TestIO(unittest.TestCase):
def setUp(self):
self.test_dir = os.path.join(get_test_dir(), 'tmp_io')
if not os.path.exists(self.test_dir):
os.makedirs(self.test_dir)
self.gdir = init_hef(border=DOM_BORDER)
flowline.init_present_time_glacier(self.gdir)
self.glen_a = 2.4e-24 # Modern style Glen parameter A
def tearDown(self):
self.rm_dir()
def rm_dir(self):
if os.path.exists(self.test_dir):
shutil.rmtree(self.test_dir)
def test_flowline_to_dataset(self):
beds = [dummy_constant_bed, dummy_width_bed, dummy_noisy_bed,
dummy_bumpy_bed, dummy_parabolic_bed, dummy_trapezoidal_bed,
dummy_mixed_bed]
for bed in beds:
fl = bed()[0]
ds = fl.to_dataset()
fl_ = flowline.flowline_from_dataset(ds)
ds_ = fl_.to_dataset()
self.assertTrue(ds_.equals(ds))
def test_model_to_file(self):
p = os.path.join(self.test_dir, 'grp.nc')
if os.path.isfile(p):
os.remove(p)
fls = dummy_width_bed_tributary()
model = flowline.FluxBasedModel(fls)
model.to_netcdf(p)
fls_ = flowline.glacier_from_netcdf(p)
for fl, fl_ in zip(fls, fls_):
ds = fl.to_dataset()
ds_ = fl_.to_dataset()
self.assertTrue(ds_.equals(ds))
self.assertTrue(fls_[0].flows_to is fls_[1])
self.assertEqual(fls[0].flows_to_indice, fls_[0].flows_to_indice)
# They should be sorted
to_test = [fl.order for fl in fls_]
assert np.array_equal(np.sort(to_test), to_test)
# They should be able to start a run
mb = LinearMassBalance(2600.)
model = flowline.FluxBasedModel(fls_, mb_model=mb, y0=0.,
glen_a=self.glen_a)
model.run_until(100)
@is_slow
def test_run(self):
mb = LinearMassBalance(2600.)
fls = dummy_constant_bed()
model = flowline.FluxBasedModel(fls, mb_model=mb, y0=0.,
glen_a=self.glen_a)
ds, ds_diag = model.run_until_and_store(500)
ds = ds[0]
fls = dummy_constant_bed()
model = flowline.FluxBasedModel(fls, mb_model=mb, y0=0.,
glen_a=self.glen_a)
years = utils.monthly_timeseries(0, 500)
vol_ref = []
a_ref = []
l_ref = []
vol_diag = []
a_diag = []
l_diag = []
ela_diag = []
for yr in years:
model.run_until(yr)
vol_diag.append(model.volume_m3)
a_diag.append(model.area_m2)
l_diag.append(model.length_m)
ela_diag.append(model.mb_model.get_ela(year=yr))
if int(yr) == yr:
vol_ref.append(model.volume_m3)
a_ref.append(model.area_m2)
l_ref.append(model.length_m)
if int(yr) == 500:
secfortest = model.fls[0].section
np.testing.assert_allclose(ds.ts_section.isel(time=-1),
secfortest)
np.testing.assert_allclose(ds_diag.volume_m3, vol_diag)
np.testing.assert_allclose(ds_diag.area_m2, a_diag)
np.testing.assert_allclose(ds_diag.length_m, l_diag)
np.testing.assert_allclose(ds_diag.ela_m, ela_diag)
fls = dummy_constant_bed()
run_path = os.path.join(self.test_dir, 'ts_ideal.nc')
diag_path = os.path.join(self.test_dir, 'ts_diag.nc')
if os.path.exists(run_path):
os.remove(run_path)
if os.path.exists(diag_path):
os.remove(diag_path)
model = flowline.FluxBasedModel(fls, mb_model=mb, y0=0.,
glen_a=self.glen_a)
model.run_until_and_store(500, run_path=run_path,
diag_path=diag_path)
ds_ = xr.open_dataset(diag_path)
# the identical (i.e. attrs + names) doesn't work because of date
del ds_diag.attrs['creation_date']
del ds_.attrs['creation_date']
xr.testing.assert_identical(ds_diag, ds_)
fmodel = flowline.FileModel(run_path)
fls = dummy_constant_bed()
model = flowline.FluxBasedModel(fls, mb_model=mb, y0=0.,
glen_a=self.glen_a)
for yr in years:
model.run_until(yr)
if yr in [100, 300, 500]:
# this is sloooooow so we test a little bit only
fmodel.run_until(yr)
np.testing.assert_allclose(model.fls[0].section,
fmodel.fls[0].section)
np.testing.assert_allclose(model.fls[0].widths_m,
fmodel.fls[0].widths_m)
np.testing.assert_allclose(fmodel.volume_m3_ts(), vol_ref)
np.testing.assert_allclose(fmodel.area_m2_ts(), a_ref)
np.testing.assert_allclose(fmodel.length_m_ts(), l_ref)
# Can we start a run from the middle?
fmodel.run_until(300)
model = flowline.FluxBasedModel(fmodel.fls, mb_model=mb, y0=300,
glen_a=self.glen_a)
model.run_until(500)
fmodel.run_until(500)
np.testing.assert_allclose(model.fls[0].section,
fmodel.fls[0].section)
def test_gdir_copy(self):
new_dir = os.path.join(get_test_dir(), 'tmp_testcopy')
if os.path.exists(new_dir):
shutil.rmtree(new_dir)
self.gdir.copy_to_basedir(new_dir, setup='all')
new_gdir = utils.GlacierDirectory(self.gdir.rgi_id, base_dir=new_dir)
flowline.init_present_time_glacier(new_gdir)
shutil.rmtree(new_dir)
self.gdir.copy_to_basedir(new_dir, setup='run')
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.GeoDataFrame.from_file(hef_file).iloc[0]
new_gdir = utils.GlacierDirectory(entity, base_dir=new_dir)
flowline.random_glacier_evolution(new_gdir, nyears=10)
shutil.rmtree(new_dir)
self.gdir.copy_to_basedir(new_dir, setup='inversion')
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.GeoDataFrame.from_file(hef_file).iloc[0]
new_gdir = utils.GlacierDirectory(entity, base_dir=new_dir)
inversion.prepare_for_inversion(new_gdir, invert_all_rectangular=True)
inversion.volume_inversion(new_gdir)
inversion.filter_inversion_output(new_gdir)
flowline.init_present_time_glacier(new_gdir)
cfg.PARAMS['use_optimized_inversion_params'] = False
flowline.run_constant_climate(new_gdir, nyears=10, bias=0)
shutil.rmtree(new_dir)
def test_hef(self):
p = os.path.join(self.test_dir, 'grp_hef.nc')
if os.path.isfile(p):
os.remove(p)
flowline.init_present_time_glacier(self.gdir)
fls = self.gdir.read_pickle('model_flowlines')
model = flowline.FluxBasedModel(fls)
model.to_netcdf(p)
fls_ = flowline.glacier_from_netcdf(p)
for fl, fl_ in zip(fls, fls_):
ds = fl.to_dataset()
ds_ = fl_.to_dataset()
for v in ds.variables.keys():
np.testing.assert_allclose(ds_[v], ds[v], equal_nan=True)
for fl, fl_ in zip(fls[:-1], fls_[:-1]):
self.assertEqual(fl.flows_to_indice, fl_.flows_to_indice)
# mixed flowline
fls = self.gdir.read_pickle('model_flowlines')
model = flowline.FluxBasedModel(fls)
p = os.path.join(self.test_dir, 'grp_hef_mix.nc')
if os.path.isfile(p):
os.remove(p)
model.to_netcdf(p)
fls_ = flowline.glacier_from_netcdf(p)
np.testing.assert_allclose(fls[0].section, fls_[0].section)
np.testing.assert_allclose(fls[0]._ptrap, fls_[0]._ptrap)
np.testing.assert_allclose(fls[0].bed_h, fls_[0].bed_h)
for fl, fl_ in zip(fls, fls_):
ds = fl.to_dataset()
ds_ = fl_.to_dataset()
np.testing.assert_allclose(fl.section, fl_.section)
np.testing.assert_allclose(fl._ptrap, fl_._ptrap)
np.testing.assert_allclose(fl.bed_h, fl_.bed_h)
xr.testing.assert_allclose(ds, ds_)
for fl, fl_ in zip(fls[:-1], fls_[:-1]):
self.assertEqual(fl.flows_to_indice, fl_.flows_to_indice)
class TestBackwardsIdealized(unittest.TestCase):
def setUp(self):
self.fs = 5.7e-20
# Backwards
_fd = 1.9e-24
self.glen_a = (N+2) * _fd / 2.
self.ela = 2800.
origfls = dummy_constant_bed(nx=120, hmin=1800)
mb = LinearMassBalance(self.ela)
model = flowline.FluxBasedModel(origfls, mb_model=mb,
fs=self.fs, glen_a=self.glen_a)
model.run_until(500)
self.glacier = copy.deepcopy(model.fls)
def tearDown(self):
pass
@is_slow
def test_iterative_back(self):
y0 = 0.
y1 = 150.
rtol = 0.02
mb = LinearMassBalance(self.ela + 50.)
model = flowline.FluxBasedModel(self.glacier, mb_model=mb,
fs=self.fs, glen_a=self.glen_a,
time_stepping='ambitious')
ite, bias, past_model = flowline._find_inital_glacier(model, mb, y0,
y1, rtol=rtol)
bef_fls = copy.deepcopy(past_model.fls)
past_model.run_until(y1)
self.assertTrue(bef_fls[-1].area_m2 > past_model.area_m2)
np.testing.assert_allclose(past_model.area_m2, self.glacier[-1].area_m2,
rtol=rtol)
if do_plot: # pragma: no cover
plt.plot(self.glacier[-1].surface_h, 'k', label='ref')
plt.plot(bef_fls[-1].surface_h, 'b', label='start')
plt.plot(past_model.fls[-1].surface_h, 'r', label='end')
plt.plot(self.glacier[-1].bed_h, 'gray', linewidth=2)
plt.legend(loc='best')
plt.show()
mb = LinearMassBalance(self.ela - 50.)
model = flowline.FluxBasedModel(self.glacier, mb_model=mb, y0=y0,
fs=self.fs, glen_a=self.glen_a,
time_stepping='ambitious')
ite, bias, past_model = flowline._find_inital_glacier(model, mb, y0,
y1, rtol=rtol)
bef_fls = copy.deepcopy(past_model.fls)
past_model.run_until(y1)
self.assertTrue(bef_fls[-1].area_m2 < past_model.area_m2)
np.testing.assert_allclose(past_model.area_m2, self.glacier[-1].area_m2,
rtol=rtol)
if do_plot: # pragma: no cover
plt.plot(self.glacier[-1].surface_h, 'k', label='ref')
plt.plot(bef_fls[-1].surface_h, 'b', label='start')
plt.plot(past_model.fls[-1].surface_h, 'r', label='end')
plt.plot(self.glacier[-1].bed_h, 'gray', linewidth=2)
plt.legend(loc='best')
plt.show()
mb = LinearMassBalance(self.ela)
model = flowline.FluxBasedModel(self.glacier, mb_model=mb, y0=y0,
fs=self.fs, glen_a=self.glen_a)
# Hit the correct one
ite, bias, past_model = flowline._find_inital_glacier(model, mb, y0,
y1, rtol=rtol)
past_model.run_until(y1)
np.testing.assert_allclose(past_model.area_m2, self.glacier[-1].area_m2,
rtol=rtol)
@is_slow
def test_fails(self):
y0 = 0.
y1 = 100.
mb = LinearMassBalance(self.ela - 150.)
model = flowline.FluxBasedModel(self.glacier, mb_model=mb, y0=y0,
fs=self.fs, glen_a=self.glen_a)
self.assertRaises(RuntimeError, flowline._find_inital_glacier, model,
mb, y0, y1, rtol=0.02, max_ite=5)
class TestIdealisedInversion(unittest.TestCase):
def setUp(self):
# test directory
self.testdir = os.path.join(get_test_dir(), 'tmp_ideal_inversion')
from oggm import GlacierDirectory
from oggm.tasks import define_glacier_region
import geopandas as gpd
# Init
cfg.initialize()
cfg.PATHS['dem_file'] = get_demo_file('hef_srtm.tif')
cfg.PATHS['climate_file'] = get_demo_file('histalp_merged_hef.nc')
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.GeoDataFrame.from_file(hef_file).iloc[0]
self.gdir = GlacierDirectory(entity, base_dir=self.testdir, reset=True)
define_glacier_region(self.gdir, entity=entity)
def tearDown(self):
self.rm_dir()
def rm_dir(self):
if os.path.exists(self.testdir):
shutil.rmtree(self.testdir)
def simple_plot(self, model): # pragma: no cover
ocls = self.gdir.read_pickle('inversion_output')
ithick = ocls[-1]['thick']
pg = model.fls[-1].thick > 0
plt.figure()
bh = model.fls[-1].bed_h[pg]
sh = model.fls[-1].surface_h[pg]
plt.plot(sh, 'k')
plt.plot(bh, 'C0', label='Real bed')
plt.plot(sh - ithick, 'C3', label='Computed bed')
plt.title('Compare Shape')
plt.xlabel('[dx]')
plt.ylabel('Elevation [m]')
plt.legend(loc=3)
plt.show()
def double_plot(self, model): # pragma: no cover
ocls = self.gdir.read_pickle('inversion_output')
f, axs = plt.subplots(1, 2, figsize=(8, 4), sharey=True)
for i, ax in enumerate(axs):
ithick = ocls[i]['thick']
pg = model.fls[i].thick > 0
bh = model.fls[i].bed_h[pg]
sh = model.fls[i].surface_h[pg]
ax.plot(sh, 'k')
ax.plot(bh, 'C0', label='Real bed')
ax.plot(sh - ithick, 'C3', label='Computed bed')
ax.set_title('Compare Shape')
ax.set_xlabel('[dx]')
ax.legend(loc=3)
plt.show()
def test_inversion_vertical(self):
fls = dummy_constant_bed(map_dx=self.gdir.grid.dx, widths=10)
mb = LinearMassBalance(2600.)
model = flowline.FluxBasedModel(fls, mb_model=mb, y0=0.)
model.run_until_equilibrium()
fls = []
for fl in model.fls:
pg = np.where(fl.thick > 0)
line = shpg.LineString([fl.line.coords[int(p)] for p in pg[0]])
flo = centerlines.Centerline(line, dx=fl.dx,
surface_h=fl.surface_h[pg])
flo.widths = fl.widths[pg]
flo.is_rectangular = np.ones(flo.nx).astype(np.bool)
fls.append(flo)
self.gdir.write_pickle(copy.deepcopy(fls), 'inversion_flowlines')
climate.apparent_mb_from_linear_mb(self.gdir)
inversion.prepare_for_inversion(self.gdir)
v, _ = inversion.mass_conservation_inversion(self.gdir)
assert_allclose(v, model.volume_m3, rtol=0.01)
if do_plot: # pragma: no cover
self.simple_plot(model)
def test_inversion_parabolic(self):
fls = dummy_parabolic_bed(map_dx=self.gdir.grid.dx)
mb = LinearMassBalance(2500.)
model = flowline.FluxBasedModel(fls, mb_model=mb, y0=0.)
model.run_until_equilibrium()
fls = []
for fl in model.fls:
pg = np.where(fl.thick > 0)
line = shpg.LineString([fl.line.coords[int(p)] for p in pg[0]])
flo = centerlines.Centerline(line, dx=fl.dx,
surface_h=fl.surface_h[pg])
flo.widths = fl.widths[pg]
flo.is_rectangular = np.zeros(flo.nx).astype(np.bool)
fls.append(flo)
self.gdir.write_pickle(copy.deepcopy(fls), 'inversion_flowlines')
climate.apparent_mb_from_linear_mb(self.gdir)
inversion.prepare_for_inversion(self.gdir)
v, _ = inversion.mass_conservation_inversion(self.gdir)
assert_allclose(v, model.volume_m3, rtol=0.01)
inv = self.gdir.read_pickle('inversion_output')[-1]
bed_shape_gl = 4 * inv['thick'] / (flo.widths * self.gdir.grid.dx) ** 2
bed_shape_ref = 4 * fl.thick[pg] / (flo.widths * self.gdir.grid.dx) ** 2
# assert utils.rmsd(fl.bed_shape[pg], bed_shape_gl) < 0.001
if do_plot: # pragma: no cover
plt.plot(bed_shape_ref[:-3])
plt.plot(bed_shape_gl[:-3])
plt.show()
@is_slow
def test_inversion_mixed(self):
fls = dummy_mixed_bed(deflambdas=0, map_dx=self.gdir.grid.dx,
mixslice=slice(10, 30))
mb = LinearMassBalance(2600.)
model = flowline.FluxBasedModel(fls, mb_model=mb, y0=0.,
time_stepping='conservative')
# This reduces the test's accuracy but makes it much faster.
model.run_until_equilibrium(rate=0.01)
fls = []
for fl in model.fls:
pg = np.where(fl.thick > 0)
line = shpg.LineString([fl.line.coords[int(p)] for p in pg[0]])
sh = fl.surface_h[pg]
flo = centerlines.Centerline(line, dx=fl.dx,
surface_h=sh)
flo.widths = fl.widths[pg]
flo.is_rectangular = fl.is_trapezoid[pg]
fls.append(flo)
self.gdir.write_pickle(copy.deepcopy(fls), 'inversion_flowlines')
climate.apparent_mb_from_linear_mb(self.gdir)
inversion.prepare_for_inversion(self.gdir)
v, _ = inversion.mass_conservation_inversion(self.gdir)
assert_allclose(v, model.volume_m3, rtol=0.05)
if do_plot: # pragma: no cover
self.simple_plot(model)
@is_slow
def test_inversion_cliff(self):
fls = dummy_constant_bed_cliff(map_dx=self.gdir.grid.dx,
cliff_height=100)
mb = LinearMassBalance(2600.)
model = flowline.FluxBasedModel(fls, mb_model=mb, y0=0.,
time_stepping='conservative')
model.run_until_equilibrium()
fls = []
for fl in model.fls:
pg = np.where(fl.thick > 0)
line = shpg.LineString([fl.line.coords[int(p)] for p in pg[0]])
sh = fl.surface_h[pg]
flo = centerlines.Centerline(line, dx=fl.dx,
surface_h=sh)
flo.widths = fl.widths[pg]
flo.is_rectangular = np.ones(flo.nx).astype(np.bool)
fls.append(flo)
self.gdir.write_pickle(copy.deepcopy(fls), 'inversion_flowlines')
climate.apparent_mb_from_linear_mb(self.gdir)
inversion.prepare_for_inversion(self.gdir)
v, _ = inversion.mass_conservation_inversion(self.gdir)
assert_allclose(v, model.volume_m3, rtol=0.05)
if do_plot: # pragma: no cover
self.simple_plot(model)
def test_inversion_noisy(self):
fls = dummy_noisy_bed(map_dx=self.gdir.grid.dx)
mb = LinearMassBalance(2600.)
model = flowline.FluxBasedModel(fls, mb_model=mb, y0=0.,
time_stepping='conservative')
model.run_until_equilibrium()
fls = []
for fl in model.fls:
pg = np.where(fl.thick > 0)
line = shpg.LineString([fl.line.coords[int(p)] for p in pg[0]])
sh = fl.surface_h[pg]
flo = centerlines.Centerline(line, dx=fl.dx,
surface_h=sh)
flo.widths = fl.widths[pg]
flo.is_rectangular = np.ones(flo.nx).astype(np.bool)
fls.append(flo)
self.gdir.write_pickle(copy.deepcopy(fls), 'inversion_flowlines')
climate.apparent_mb_from_linear_mb(self.gdir)
inversion.prepare_for_inversion(self.gdir)
v, _ = inversion.mass_conservation_inversion(self.gdir)
assert_allclose(v, model.volume_m3, rtol=0.05)
if do_plot: # pragma: no cover
self.simple_plot(model)
def test_inversion_tributary(self):
fls = dummy_width_bed_tributary(map_dx=self.gdir.grid.dx)
mb = LinearMassBalance(2600.)
model = flowline.FluxBasedModel(fls, mb_model=mb, y0=0.,
time_stepping='conservative')
model.run_until_equilibrium()
fls = []
for fl in model.fls:
pg = np.where(fl.thick > 0)
line = shpg.LineString([fl.line.coords[int(p)] for p in pg[0]])
sh = fl.surface_h[pg]
flo = centerlines.Centerline(line, dx=fl.dx,
surface_h=sh)
flo.widths = fl.widths[pg]
flo.is_rectangular = np.ones(flo.nx).astype(np.bool)
fls.append(flo)
fls[0].set_flows_to(fls[1])
self.gdir.write_pickle(copy.deepcopy(fls), 'inversion_flowlines')
climate.apparent_mb_from_linear_mb(self.gdir)
inversion.prepare_for_inversion(self.gdir)
v, _ = inversion.mass_conservation_inversion(self.gdir)
assert_allclose(v, model.volume_m3, rtol=0.02)
if do_plot: # pragma: no cover
self.double_plot(model)
def test_inversion_non_equilibrium(self):
fls = dummy_constant_bed(map_dx=self.gdir.grid.dx)
mb = LinearMassBalance(2600.)
model = flowline.FluxBasedModel(fls, mb_model=mb, y0=0.)
model.run_until_equilibrium()
mb = LinearMassBalance(2800.)
model = flowline.FluxBasedModel(fls, mb_model=mb, y0=0)
model.run_until(50)
fls = []
for fl in model.fls:
pg = np.where(fl.thick > 0)
line = shpg.LineString([fl.line.coords[int(p)] for p in pg[0]])
sh = fl.surface_h[pg]
flo = centerlines.Centerline(line, dx=fl.dx,
surface_h=sh)
flo.widths = fl.widths[pg]
flo.is_rectangular = np.ones(flo.nx).astype(np.bool)
fls.append(flo)
self.gdir.write_pickle(copy.deepcopy(fls), 'inversion_flowlines')
climate.apparent_mb_from_linear_mb(self.gdir)
inversion.prepare_for_inversion(self.gdir)
v, _ = inversion.mass_conservation_inversion(self.gdir)
# expected errors
assert v > model.volume_m3
ocls = self.gdir.read_pickle('inversion_output')
ithick = ocls[0]['thick']
assert np.mean(ithick) > np.mean(model.fls[0].thick)*1.1
if do_plot: # pragma: no cover
self.simple_plot(model)
def test_inversion_and_run(self):
fls = dummy_parabolic_bed(map_dx=self.gdir.grid.dx)
mb = LinearMassBalance(2500.)
model = flowline.FluxBasedModel(fls, mb_model=mb, y0=0.)
model.run_until_equilibrium()
fls = []
for fl in model.fls:
pg = np.where(fl.thick > 0)
line = shpg.LineString([fl.line.coords[int(p)] for p in pg[0]])
sh = fl.surface_h[pg]
flo = centerlines.Centerline(line, dx=fl.dx,
surface_h=sh)
flo.widths = fl.widths[pg]
flo.is_rectangular = np.zeros(flo.nx).astype(np.bool)
fls.append(flo)
self.gdir.write_pickle(copy.deepcopy(fls), 'inversion_flowlines')
climate.apparent_mb_from_linear_mb(self.gdir)
inversion.prepare_for_inversion(self.gdir)
v, _ = inversion.mass_conservation_inversion(self.gdir)
assert_allclose(v, model.volume_m3, rtol=0.01)
inv = self.gdir.read_pickle('inversion_output')[-1]
bed_shape_gl = 4 * inv['thick'] / (flo.widths * self.gdir.grid.dx) ** 2
bed_shape_ref = 4 * fl.thick[pg] / (flo.widths * self.gdir.grid.dx) ** 2
ithick = inv['thick']
fls = dummy_parabolic_bed(map_dx=self.gdir.grid.dx,
from_other_shape=bed_shape_gl[:-2],
from_other_bed=sh-ithick)
model2 = flowline.FluxBasedModel(fls, mb_model=mb, y0=0.,
time_stepping='conservative')
model2.run_until_equilibrium()
assert_allclose(model2.volume_m3, model.volume_m3, rtol=0.01)
if do_plot: # pragma: no cover
plt.figure()
plt.plot(model.fls[-1].bed_h, 'C0')
plt.plot(model2.fls[-1].bed_h, 'C3')
plt.plot(model.fls[-1].surface_h, 'C0')
plt.plot(model2.fls[-1].surface_h, 'C3')
plt.title('Compare Shape')
plt.xlabel('[m]')
plt.ylabel('Elevation [m]')
plt.show()
class TestHEF(unittest.TestCase):
def setUp(self):
self.gdir = init_hef(border=DOM_BORDER, invert_with_rectangular=False)
d = self.gdir.read_pickle('inversion_params')
self.fs = d['fs']
self.glen_a = d['glen_a']
def tearDown(self):
pass
@is_slow
def test_equilibrium(self):
flowline.init_present_time_glacier(self.gdir)
mb_mod = massbalance.ConstantMassBalance(self.gdir)
fls = self.gdir.read_pickle('model_flowlines')
model = flowline.FluxBasedModel(fls, mb_model=mb_mod, y0=0.,
fs=self.fs,
glen_a=self.glen_a,
min_dt=SEC_IN_DAY/2.,
mb_elev_feedback='never')
ref_vol = model.volume_km3
ref_area = model.area_km2
ref_len = model.fls[-1].length_m
np.testing.assert_allclose(ref_area, self.gdir.rgi_area_km2, rtol=0.03)
model.run_until_equilibrium(rate=1e-4)
self.assertFalse(model.dt_warning)
assert model.yr > 50
after_vol = model.volume_km3
after_area = model.area_km2
after_len = model.fls[-1].length_m
np.testing.assert_allclose(ref_vol, after_vol, rtol=0.08)
np.testing.assert_allclose(ref_area, after_area, rtol=0.03)
np.testing.assert_allclose(ref_len, after_len, atol=500.01)
@is_slow
def test_commitment(self):
flowline.init_present_time_glacier(self.gdir)
mb_mod = massbalance.ConstantMassBalance(self.gdir, y0=2003 - 15)
fls = self.gdir.read_pickle('model_flowlines')
model = flowline.FluxBasedModel(fls, mb_model=mb_mod, y0=0.,
fs=self.fs,
glen_a=self.glen_a)
ref_vol = model.volume_km3
ref_area = model.area_km2
ref_len = model.fls[-1].length_m
np.testing.assert_allclose(ref_area, self.gdir.rgi_area_km2, rtol=0.02)
model.run_until_equilibrium()
self.assertTrue(model.yr > 100)
after_vol_1 = model.volume_km3
after_area_1 = model.area_km2
after_len_1 = model.fls[-1].length_m
_tmp = cfg.PARAMS['mixed_min_shape']
cfg.PARAMS['mixed_min_shape'] = 0.001
flowline.init_present_time_glacier(self.gdir)
cfg.PARAMS['mixed_min_shape'] = _tmp
glacier = self.gdir.read_pickle('model_flowlines')
fls = self.gdir.read_pickle('model_flowlines')
model = flowline.FluxBasedModel(fls, mb_model=mb_mod, y0=0.,
fs=self.fs,
glen_a=self.glen_a)
ref_vol = model.volume_km3
ref_area = model.area_km2
ref_len = model.fls[-1].length_m
np.testing.assert_allclose(ref_area, self.gdir.rgi_area_km2, rtol=0.02)
model.run_until_equilibrium()
self.assertTrue(model.yr > 100)
after_vol_2 = model.volume_km3
after_area_2 = model.area_km2
after_len_2 = model.fls[-1].length_m
self.assertTrue(after_vol_1 < (0.5 * ref_vol))
self.assertTrue(after_vol_2 < (0.5 * ref_vol))
if do_plot: # pragma: no cover
fig = plt.figure()
plt.plot(glacier[-1].surface_h, 'b', label='start')
plt.plot(model.fls[-1].surface_h, 'r', label='end')
plt.plot(glacier[-1].bed_h, 'gray', linewidth=2)
plt.legend(loc='best')
plt.show()
@is_slow
def test_random(self):
flowline.init_present_time_glacier(self.gdir)
flowline.random_glacier_evolution(self.gdir, nyears=100, seed=4,
bias=0, filesuffix='_rdn')
flowline.run_constant_climate(self.gdir, nyears=100,
bias=0, filesuffix='_ct')
paths = [self.gdir.get_filepath('model_run', filesuffix='_rdn'),
self.gdir.get_filepath('model_run', filesuffix='_ct'),
]
for path in paths:
with flowline.FileModel(path) as model:
vol = model.volume_km3_ts()
len = model.length_m_ts()
area = model.area_km2_ts()
np.testing.assert_allclose(vol.iloc[0], np.mean(vol),
rtol=0.1)
np.testing.assert_allclose(area.iloc[0], np.mean(area),
rtol=0.1)
if do_plot:
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(6, 10))
vol.plot(ax=ax1)
ax1.set_title('Volume')
area.plot(ax=ax2)
ax2.set_title('Area')
len.plot(ax=ax3)
ax3.set_title('Length')
plt.tight_layout()
plt.show()
@is_slow
def test_random_sh(self):
flowline.init_present_time_glacier(self.gdir)
self.gdir.hemisphere = 'sh'
climate.process_cru_data(self.gdir)
flowline.random_glacier_evolution(self.gdir, nyears=20, seed=4,
bias=0, filesuffix='_rdn')
flowline.run_constant_climate(self.gdir, nyears=20,
bias=0, filesuffix='_ct')
paths = [self.gdir.get_filepath('model_run', filesuffix='_rdn'),
self.gdir.get_filepath('model_run', filesuffix='_ct'),
]
for path in paths:
with flowline.FileModel(path) as model:
vol = model.volume_km3_ts()
len = model.length_m_ts()
area = model.area_km2_ts()
np.testing.assert_allclose(vol.iloc[0], np.mean(vol),
rtol=0.1)
np.testing.assert_allclose(area.iloc[0], np.mean(area),
rtol=0.1)
if do_plot:
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(6, 10))
vol.plot(ax=ax1)
ax1.set_title('Volume')
area.plot(ax=ax2)
ax2.set_title('Area')
len.plot(ax=ax3)
ax3.set_title('Length')
plt.tight_layout()
plt.show()
self.gdir.hemisphere = 'nh'
@is_slow
def test_cesm(self):
gdir = self.gdir
# init
f = get_demo_file('cesm.TREFHT.160001-200512.selection.nc')
cfg.PATHS['gcm_temp_file'] = f
f = get_demo_file('cesm.PRECC.160001-200512.selection.nc')
cfg.PATHS['gcm_precc_file'] = f
f = get_demo_file('cesm.PRECL.160001-200512.selection.nc')
cfg.PATHS['gcm_precl_file'] = f
climate.process_cesm_data(self.gdir)
# Climate data
with warnings.catch_warnings():
# Long time series are currently a pain pandas
warnings.filterwarnings("ignore",
message='Unable to decode time axis')
fh = gdir.get_filepath('climate_monthly')
fcesm = gdir.get_filepath('cesm_data')
with xr.open_dataset(fh) as hist, xr.open_dataset(fcesm) as cesm:
tv = cesm.time.values
time = pd.period_range(tv[0].strftime('%Y-%m-%d'),
tv[-1].strftime('%Y-%m-%d'),
freq='M')
cesm['time'] = time
cesm.coords['year'] = ('time', time.year)
cesm.coords['month'] = ('time', time.month)
# Let's do some basic checks
shist = hist.sel(time=slice('1961', '1990'))
scesm = cesm.isel(time=((cesm.year >= 1961) &
(cesm.year <= 1990)))
# Climate during the chosen period should be the same
np.testing.assert_allclose(shist.temp.mean(),
scesm.temp.mean(),
rtol=1e-3)
np.testing.assert_allclose(shist.prcp.mean(),
scesm.prcp.mean(),
rtol=1e-3)
np.testing.assert_allclose(shist.grad.mean(),
scesm.grad.mean())
# And also the anual cycle
scru = shist.groupby('time.month').mean()
scesm = scesm.groupby(scesm.month).mean()
np.testing.assert_allclose(scru.temp, scesm.temp, rtol=5e-3)
np.testing.assert_allclose(scru.prcp, scesm.prcp, rtol=1e-3)
np.testing.assert_allclose(scru.grad, scesm.grad)
# Mass balance models
mb_cru = massbalance.PastMassBalance(self.gdir)
mb_cesm = massbalance.PastMassBalance(self.gdir,
filename='cesm_data')
# Average over 1961-1990
h, w = self.gdir.get_inversion_flowline_hw()
yrs = np.arange(1961, 1991)
ts1 = mb_cru.get_specific_mb(h, w, year=yrs)
ts2 = mb_cesm.get_specific_mb(h, w, year=yrs)
# due to non linear effects the MBs are not equivalent! See if they
# aren't too far:
assert np.abs(np.mean(ts1) - np.mean(ts2)) < 100
# For my own interest, some statistics
yrs = np.arange(1851, 2004)
ts1 = mb_cru.get_specific_mb(h, w, year=yrs)
ts2 = mb_cesm.get_specific_mb(h, w, year=yrs)
if do_plot:
df = pd.DataFrame(index=yrs)
k1 = 'Histalp (mean={:.1f}, stddev={:.1f})'.format(np.mean(ts1),
np.std(ts1))
k2 = 'CESM (mean={:.1f}, stddev={:.1f})'.format(np.mean(ts2),
np.std(ts2))
df[k1] = ts1
df[k2] = ts2
df.plot()
plt.plot(yrs,
df[k1].rolling(31, center=True, min_periods=15).mean(),
color='C0', linewidth=3)
plt.plot(yrs,
df[k2].rolling(31, center=True, min_periods=15).mean(),
color='C1', linewidth=3)
plt.title('SMB Hintereisferner Histalp VS CESM')
plt.show()
# See what that means for a run
flowline.init_present_time_glacier(gdir)
flowline.run_from_climate_data(gdir, ys=1961, ye=1990,
filesuffix='_hist')
flowline.run_from_climate_data(gdir, ys=1961, ye=1990,
filename='cesm_data',
filesuffix='_cesm')
ds1 = utils.compile_run_output([gdir], path=False, filesuffix='_hist')
ds2 = utils.compile_run_output([gdir], path=False, filesuffix='_cesm')
assert_allclose(ds1.volume.isel(rgi_id=0, time=-1),
ds2.volume.isel(rgi_id=0, time=-1),
rtol=0.1)
# ELA should be close
assert_allclose(ds1.ela.mean(), ds2.ela.mean(), atol=50)
@is_slow
def test_elevation_feedback(self):
flowline.init_present_time_glacier(self.gdir)
feedbacks = ['annual', 'monthly', 'always', 'never']
# Mutliproc
tasks = []
for feedback in feedbacks:
tasks.append((flowline.random_glacier_evolution,
dict(nyears=200, seed=5, mb_elev_feedback=feedback,
filesuffix=feedback)))
workflow.execute_parallel_tasks(self.gdir, tasks)
out = []
for feedback in feedbacks:
out.append(utils.compile_run_output([self.gdir], path=False,
filesuffix=feedback))
# Check that volume isn't so different
assert_allclose(out[0].volume, out[1].volume, rtol=0.05)
assert_allclose(out[0].volume, out[2].volume, rtol=0.05)
assert_allclose(out[1].volume, out[2].volume, rtol=0.05)
# Except for "never", where things are different
assert out[3].volume.mean() < out[2].volume.mean()
if do_plot:
plt.figure()
for ds, lab in zip(out, feedbacks):
(ds.volume*1e-9).plot(label=lab)
plt.xlabel('Vol (km3)')
plt.legend()
plt.show()
@is_slow
def test_find_t0(self):
self.skipTest('This test is too unstable')
gdir = init_hef(border=DOM_BORDER, invert_with_sliding=False)
flowline.init_present_time_glacier(gdir)
glacier = gdir.read_pickle('model_flowlines')
df = pd.read_csv(utils.get_demo_file('hef_lengths.csv'), index_col=0)
df.columns = ['Leclercq']
df = df.loc[1950:]
vol_ref = flowline.FlowlineModel(glacier).volume_km3
init_bias = 94. # so that "went too far" comes once on travis
rtol = 0.005
flowline.iterative_initial_glacier_search(gdir, y0=df.index[0], init_bias=init_bias,
rtol=rtol, write_steps=True)
past_model = flowline.FileModel(gdir.get_filepath('model_run'))
vol_start = past_model.volume_km3
bef_fls = copy.deepcopy(past_model.fls)
mylen = past_model.length_m_ts()
df['oggm'] = mylen[12::12].values
df = df-df.iloc[-1]
past_model.run_until(2003)
vol_end = past_model.volume_km3
np.testing.assert_allclose(vol_ref, vol_end, rtol=0.05)
rmsd = utils.rmsd(df.Leclercq, df.oggm)
self.assertTrue(rmsd < 1000.)
if do_plot: # pragma: no cover
df.plot()
plt.ylabel('Glacier length (relative to 2003)')
plt.show()
fig = plt.figure()
lab = 'ref (vol={:.2f}km3)'.format(vol_ref)
plt.plot(glacier[-1].surface_h, 'k', label=lab)
lab = 'oggm start (vol={:.2f}km3)'.format(vol_start)
plt.plot(bef_fls[-1].surface_h, 'b', label=lab)
lab = 'oggm end (vol={:.2f}km3)'.format(vol_end)
plt.plot(past_model.fls[-1].surface_h, 'r', label=lab)
plt.plot(glacier[-1].bed_h, 'gray', linewidth=2)
plt.legend(loc='best')
plt.show()
| Chris35Wills/oggm | oggm/tests/test_models.py | test_models.py | py | 73,389 | python | en | code | null | github-code | 13 |
7575309031 | import argparse
from visualize import VisualizationStats
def main(args):
v = VisualizationStats(args.view_diff)
v.open()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Tegrastats Graph')
parser.add_argument("--view_diff", action='store_true')
args = parser.parse_args()
main(args)
| hjhwang-qed/jetson-usage-graph | main.py | main.py | py | 336 | python | en | code | 0 | github-code | 13 |
15816866665 | """Plot results"""
import os.path
import numpy as np
import math
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from cmc_robot import ExperimentLogger
from save_figures import save_figures
from parse_args import save_plots
import os
def plot_positions(times, link_data):
"""Plot positions"""
for i, data in enumerate(link_data.T):
plt.plot(times, data, label=["x", "y", "z"][i])
plt.legend()
plt.xlabel("Time [s]")
plt.ylabel("Distance [m]")
plt.grid(True)
def plot_trajectory(link_data):
"""Plot positions"""
plt.plot(link_data[:, 0], link_data[:, 2])
plt.xlabel("x [m]")
plt.ylabel("z [m]")
plt.axis("equal")
plt.grid(True)
def plot_trajectoryDirect():
"""Plot positions"""
pathfile = 'logs/9b/'
with np.load(pathfile + 'test_7.npz') as data:
# timestep = float(data["timestep"])
# rhead = data["rhead"]
# rtail = data["rtail"]
link_data = data["links"][:, 0, :]
# angle = data["joints"][:, :, 0]
# torque = data["joints"][:, :, 3]
plt.plot(link_data[:, 0], link_data[:, 2])
plt.xlabel("x [m]")
plt.ylabel("z [m]")
plt.axis("equal")
plt.grid(True)
def plot_spine(timestep, joint_data, turn_rev, num_iter=5, plot_name="", subplot=0):
# Plot spine angles
# cut out transient
num_points = np.shape(joint_data)[0]
if num_points > 500:
joint_data = joint_data[(num_points - 500):-1, :]
times = np.arange(0, timestep * np.shape(joint_data)[0], timestep)
plt.figure("Spine Frames " + plot_name)
if subplot > 0:
plt.subplot(2, 1, subplot)
L_link = .1 # link length in cm
x_spacing = .5
index_step = np.shape(joint_data)[0] // num_iter
# plot spine from fixed head position
for i in range(num_iter):
index = i * index_step
x_offset = i * x_spacing
joint_state = joint_data[index, 0:10]
spine_x = np.zeros([len(joint_state) + 1, 1]) + x_offset
spine_y = np.zeros([len(joint_state) + 1, 1])
for j in range(10):
spine_x[j + 1] = spine_x[j] + L_link * np.sin(joint_state[j])
spine_y[j + 1] = spine_y[j] - L_link * np.cos(joint_state[j])
plt.plot(spine_x, spine_y, label="t = %.1f" % times[index])
plt.plot(spine_x[0] * np.ones([2, 1]), [spine_y[0], spine_y[-1]], color='r', linestyle='--')
plt.title("Spine Angle Frames (" + turn_rev + ")")
plt.xlabel("x [m]")
plt.ylabel("z [m]")
plt.legend()
plt.axis("equal")
plt.grid(True)
# integrate spine angle to uncover bias towards one side or another
# first find zero as starting point
integrated_angles = np.zeros([10, 1])
period_indices = np.zeros([10, 2])
for i in range(10):
period_time = 0
zeros = (np.multiply(joint_data[0:-1, i], joint_data[1:, i]) < 0)
j = 0
while zeros[j] != 1 or joint_data[j, i] < 0:
j += 1
zero_count = 0
period_time -= times[j]
period_indices[i, 0] = j
# then integrate over one period
while zero_count < 2:
integrated_angles[i] += joint_data[j, i] * 360 / 2 / np.pi * timestep # deg/s
j += 1
if zeros[j] == 1:
zero_count += 1
period_indices[i, 1] = j
period_time += times[j]
avg_turn_bias = np.average(integrated_angles) / period_time # average offset in deg
# print(turn_rev)
# print(integrated_angles)
print(period_time)
plt.figure("Spine Angle Time Series" + plot_name)
if subplot == 2:
color_ = 'r'
offset_time = 0.456
else:
color_ = 'b'
offset_time = 0.0
label_ = turn_rev
times = np.arange(0, timestep * np.shape(joint_data)[0] * 2, timestep)
# plot timeseries for each spine angle
subplot = max(subplot, 1)
for i in range(10):
# phase shift all timeseries so that they start with the 1st joint
tpi = period_indices[i, :]
if period_indices[i, 0] < period_indices[0, 0] and turn_rev != "Reverse":
tpi = tpi + period_indices[0, 1] - period_indices[0, 0]
elif turn_rev == "Reverse":
if i == 0:
tpi = tpi # + period_indices[0,1]-period_indices[0,0]
if period_indices[i, 0] < period_indices[-1, 0]:
tpi = tpi + period_indices[0, 1] - period_indices[0, 0]
# plt.subplot(10,1,i+1)
plt.subplot(10, 2, i * 2 + (subplot))
joint_data_ = joint_data[int(period_indices[i, 0]):int(period_indices[i, 1]), i]
times_ = times[int(tpi[0]):(int(tpi[0]) + np.shape(joint_data_)[0])]
plt.plot(times_, joint_data_, color=color_, label=label_)
if turn_rev != "Reverse":
plt.plot([times[int(period_indices[-1, 0])], times[int(period_indices[-1, 0])] + period_time * 2],
np.zeros([2, 1]), color='k', linestyle='--')
else:
plt.plot([times[int(period_indices[0, 0])], times[int(period_indices[0, 0])] + period_time * 2],
np.zeros([2, 1]), color='k', linestyle='--')
if i == 0:
plt.title(
"Spine Angle Timeseries for Different Turn Directions") # \n %.1f deg avg. angle bias over period" % (turn_rev, avg_turn_bias))
if i == 5:
plt.ylabel("Joint Angle [rad]")
if i < 9:
plt.xticks([])
plt.ylabel("J%d" % i)
plt.xlabel("t [s]")
plt.legend()
def plot_9d():
plot_9d1()
plot_9d2()
def plot_9d1():
# load files in folder
file_number = 1
for file in os.listdir('logs/9d1'):
with np.load(os.path.join('logs/9d1/', file)) as data:
# amplitude = data["amplitudes"]
# phase_lag = data["phase_lag"]
turn = data["turn"] * 2
link_data = data["links"][:, 0, :]
joint_data = data["joints"][:, :, 0]
# Plot trajectory
plt.figure("9d1: Trajectory")
plt.plot(link_data[400:, 0], link_data[400:, 2], label="Drive Diff = %.2f" % turn)
plt.xlabel("x [m]")
plt.ylabel("z [m]")
plt.legend()
plt.axis("equal")
plt.grid(True)
# Plot spine angles
# only plot 1 time per direction...
if file_number == 1:
timestep = float(data["timestep"])
turn_rev = "Drive Diff = %.2f" % turn
plot_spine(timestep, joint_data[78:], turn_rev, 8, "d1", 1)
if file_number == len(os.listdir('logs/9d1')):
timestep = float(data["timestep"])
turn_rev = "Drive Diff = %.2f" % turn
plot_spine(timestep, joint_data[194:], turn_rev, 8, "d1", 2)
# if file_number==1:
# torques = data["joints"][500:-1,:,3]
# angle_changes = data["joints"][501:,:,0]-data["joints"][500:-1,:,0]
# energy = np.sum(np.multiply(torques,angle_changes))
# print("energy:")
# print(energy)
file_number = file_number + 1
def plot_9d2():
"""Plot positions"""
epsilon = 0.0001
subplot = 1
for file in os.listdir('logs/9d2'):
with np.load(os.path.join('logs/9d2/', file)) as data:
# amplitude = data["amplitudes"]
# phase_lag = data["phase_lag"]
turn = data["turn"] * 2
reverse = data["reverse"]
if (reverse != 0.0):
reversed = "Rev:"
rev_title = "Reverse"
else:
reversed = "Fwd:"
rev_title = "Forward"
link_data = data["links"][:, 0, :]
joint_data = data["joints"][:, :, 0]
# Plot data
plt.figure("9d2: Trajectory")
plt.plot(link_data[400:, 0], link_data[400:, 2], label=reversed + " Drive Diff = %.2f" % turn)
plt.xlabel("x [m]")
plt.ylabel("z [m]")
plt.legend()
plt.axis("equal")
plt.grid(True)
# Plot spine angles
# only plot 1 time per direction...
if abs(turn) < epsilon:
timestep = float(data["timestep"])
plot_spine(timestep, joint_data, rev_title, 8, "d2", subplot)
subplot = subplot + 1
def calc_9d_energy():
# load files in folder
file_number = 1
for file in os.listdir('logs/9b'):
with np.load(os.path.join('logs/9b/', file)) as data:
link_data = data["links"][:, 0, :]
joint_data = data["joints"][:, :, 0]
torques = data["joints"][500:-1, :, 3]
angle_changes = data["joints"][501:, :, 0] - data["joints"][500:-1, :, 0]
energy = np.sum(np.multiply(torques, angle_changes))
print("energy:")
print(energy)
file_number = file_number + 1
def plot_9b(plot=True):
"""Plot for exercise 9c"""
# Load data
pathfile = 'logs/9b/dr6_bPhases7_42/'
num_files = len([f for f in os.listdir(pathfile)])
gradient = np.zeros(num_files)
energy_plot = np.zeros((num_files, 3))
speed_plot = np.zeros((num_files, 3))
cost_transport = np.zeros((num_files, 3))
nb_body_joints = 10
clean_val = 500
for i in range(num_files):
with np.load(pathfile + 'test_{}.npz'.format(i)) as data:
timestep = float(data["timestep"])
link_data = data["links"][:, 0, :]
angle = data["joints"][:, :, 0]
torque = data["joints"][:, :, 3]
nominal_ampl = data['nominal_amplitudes']
body_phase_bias = data['body_phase_bias']
# Speed calculation
times = np.arange(0, timestep * np.shape(link_data)[0], timestep)
speed = np.linalg.norm(link_data[-1] - link_data[clean_val]) / (times[-1] - times[clean_val])
# Plot sum energy over a simulation
tot_energy = np.sum(torque[clean_val:-1, :nb_body_joints] * (
angle[clean_val + 1:, :nb_body_joints] - angle[clean_val:-1, :nb_body_joints]))
energy_plot[i] = [nominal_ampl, body_phase_bias, tot_energy]
speed_plot[i] = [nominal_ampl, body_phase_bias, speed]
cost = np.linalg.norm(link_data[-1] - link_data[clean_val]) / tot_energy
print(link_data[-1])
print(cost)
cost_transport[i] = [nominal_ampl, body_phase_bias, cost]
print(energy_plot)
print(speed_plot)
# # Plot energy data in 2D
name1 = "Energy in grid: Amplitude vs phase"
plt.figure(name1)
plt.title(name1)
labels = ['CPG Amplitude', 'CPG body phase bias [rad]', 'Energy [J]']
plot_2d(energy_plot, labels)
name2 = "Speed in grid: Amplitude vs phase"
plt.figure(name2)
plt.title(name2)
labels = ['CPG Amplitude', 'CPG body phase bias [rad]', 'Speed [m/s]']
plot_2d(speed_plot, labels)
#
name3 = "Cost of transport in grid: Amplitude vs phase"
plt.figure(name3)
plt.title(name3)
labels = ['CPG Amplitude', 'CPG body phase bias [rad]', 'transport cost [m/s/J]']
plot_2d(cost_transport, labels)
# Show plots
if plot:
plt.show()
else:
save_figures()
def plot_9c(plot=True):
"""Plot for exercise 9c"""
# Load data
pathfile = 'logs/9c/random/'
num_files = len([f for f in os.listdir(pathfile)])
gradient = np.zeros(num_files)
speed_plot = np.zeros((num_files, 3))
energy_plot = np.zeros((num_files, 3))
speed_energy = np.zeros((num_files, 3))
nb_body_joints = 10
clean_val = 500
for i in range(num_files):
with np.load(pathfile + 'simulation_{}.npz'.format(i)) as data:
timestep = float(data["timestep"])
rhead = data["rhead"]
rtail = data["rtail"]
link_data = data["links"][:, 0, :]
angle = data["joints"][:, :, 0]
torque = data["joints"][:, :, 3]
times = np.arange(0, timestep * np.shape(link_data)[0], timestep)
speed = np.linalg.norm(link_data[clean_val:], axis=1) / times[clean_val:]
# Plot sum energy over a simulation
gradient[i] = (rhead - rtail) / nb_body_joints
tot_energy = np.sum(torque[clean_val:-1, :nb_body_joints] * (
angle[clean_val + 1:, :nb_body_joints] - angle[clean_val:-1, :nb_body_joints]))
energy_plot[i, 0] = rhead
energy_plot[i, 1] = rtail
energy_plot[i, 2] = tot_energy
speed_plot[i, 0] = rhead
speed_plot[i, 1] = rtail
speed_plot[i, 2] = speed[-1]
speed_energy[i, 0] = rhead
speed_energy[i, 1] = rtail
speed_energy[i, 2] = speed[-1] / tot_energy
# Plot energy data in 2D
plt.figure("Energy vs Gradient amplitude")
labels = ['rhead', 'rtail', 'Energy [J]']
plot_2d(energy_plot, labels)
plt.figure("Speed vs Gradient amplitude")
labels = ['rhead', 'rtail', 'Mean speed [m/s]']
plot_2d(speed_plot, labels)
plt.figure("Speed/Energy vs Gradient amplitude")
labels = ['rhead', 'rtail', 'Mean speed / energy [m/sJ]']
plot_2d(speed_energy, labels)
# Show plots
if plot:
plt.show()
else:
save_figures()
def plot_9f():
"""Plot positions"""
for file in os.listdir('logs/9f'):
with np.load(os.path.join('logs/9f/', file)) as data:
# amplitude = data["amplitudes"]
# phase_lag = data["phase_lag"]
# Plot data
n_joints = len(data["joints"][0, :, 0])
joint_data = data["joints"]
timestep = float(data["timestep"])
times = np.arange(0, timestep * np.shape(joint_data[1500:2500, :, :])[0], timestep)
plt.figure("Phase Differences for: " + file)
for j in range(2):
for i in range((n_joints - 4) // 2):
# plt.plot(times, joint_data[1500:2500, i , 0]+i*np.pi, label = "x%d" % (i+1))
plt.subplot(2, 1, j + 1)
if i > 0:
plt.plot(times, joint_data[1500:2500, i + j * 5, 0] - joint_data[1500:2500, i + j * 5 - 1, 0],
label="x%d" % (i + j * 5 + 1))
else:
plt.plot(times, joint_data[1500:2500, i + j * 5, 0] - joint_data[1500:2500, i + j * 5, 0],
label="x%d" % (i + j * 5 + 1))
plt.xlabel("t [s]")
plt.ylabel("link phase lag [rad]")
plt.legend()
plt.figure("Phase for: " + file)
for j in range(2):
for i in range((n_joints - 4) // 2):
# plt.plot(times, joint_data[1500:2500, i , 0]+i*np.pi, label = "x%d" % (i+1))
plt.subplot(2, 1, j + 1)
if i > 0:
plt.plot(times, joint_data[1500:2500, i + j * 5, 0], label="x%d" % (i + j * 5 + 1))
else:
plt.plot(times, joint_data[1500:2500, i + j * 5, 0], label="x%d" % (i + j * 5 + 1))
plt.xlabel("t [s]")
plt.ylabel("link phase [rad]")
plt.legend()
def plot_9f_network():
"""Plot positions"""
subplot = 1
color = 'b'
title = "Swimming"
for file in os.listdir('logs/9f'):
with np.load(os.path.join('logs/9f/', file)) as data:
joints = data["joints"]
# Plot data
n_links = (len(joints[0, :, 0])) - 4
network_output = joints[1500:2500, :, 2]
timestep = float(data["timestep"])
times = np.arange(0, timestep * np.shape(network_output)[0], timestep)
for i in range(n_links):
plt.subplot(10, 2, i * 2 + (subplot))
plt.plot(times, network_output[:, i], color=color, label=title)
plt.ylabel("J%d" % i)
if i == 0:
plt.title(
"Joint Commands During %s" % title) # \n %.1f deg avg. angle bias over period" % (turn_rev, avg_turn_bias))
if i == 5:
plt.ylabel("Joint Command\nJ%d" % i)
if i < 9:
plt.xticks([])
plt.xlabel("t [s]")
plt.legend()
subplot = subplot + 1
color = 'r'
title = "Walking"
def plot_9f3():
plt.figure("Salamander Walking with Different Spine-Limb Phase Offsets")
num_files = 9
i = 0
clean_val = 1000
speeds = np.zeros([num_files, 1])
biases = np.zeros([num_files, 1])
for file in os.listdir('logs/9f3'):
with np.load(os.path.join('logs/9f3/', file)) as data:
link_data = data["links"][:, 0, :]
timestep = float(data["timestep"])
times = np.arange(0, timestep * np.shape(link_data)[0], timestep)
speed = np.linalg.norm(link_data[clean_val, :] - link_data[-1, :]) / (times[-1] - times[clean_val])
speeds[i] = speed
temp_bias = data["body_limb_phase_bias"]
biases[i] = temp_bias
i += 1
plt.plot(biases, speeds)
plt.title("Walking with Different Spine-Limb Phase Offsets")
plt.xlabel("Body-Limb Phase Offset [rad]")
plt.ylabel("Walking Speed [m/s]")
def plot_9f4():
# plt.figure("Salamander Walking with Different Spine-Limb Phase Offsets")
fig, ax1 = plt.subplots()
num_files = 15
i = 0
clean_val = 1000
speeds = np.zeros([num_files, 1])
body_amp = np.zeros([num_files, 1])
powers = np.zeros([num_files, 1])
for file in os.listdir('logs/9f4'):
with np.load(os.path.join('logs/9f4/', file)) as data:
link_data = data["links"][:, 0, :]
timestep = float(data["timestep"])
times = np.arange(0, timestep * np.shape(link_data)[0], timestep)
speed = np.linalg.norm(link_data[clean_val, :] - link_data[-1, :]) / (times[-1] - times[clean_val])
speeds[i] = speed
body_amplitude = data["nominal_amplitudes"]
body_amp[i] = body_amplitude
torques = data["joints"][clean_val:, :, 3]
joint_vels = data["joints"][(clean_val):, :, 1]
avg_power = np.average(np.multiply(torques, joint_vels))
powers[i] = avg_power
i += 1
cost_of_transport = np.divide(speeds, powers)
ax1.plot(body_amp, speeds, color='b', label='Speed')
plt.title("Walking with Different Spine Curvatures")
ax1.set_xlabel("Body Amplitude [rad]")
ax1.set_ylabel("Walking Speed [m/s]")
plt.legend()
ax2 = ax1.twinx()
ax2.set_ylabel("Cost of Transport (m/J))")
ax2.plot(body_amp, cost_of_transport, color='r', label='COT')
plt.legend()
def plot_9g():
"""Plot positions"""
pathfile = 'logs/9g/water_to_land_0.npz'
with np.load(pathfile) as data:
links = data["links"][:, 0, :]
joint_data = data["joints"]
timestep = float(data["timestep"])
times = np.arange(0, timestep * np.shape(joint_data)[0], timestep)
# plt.figure("Trajectory")
# plot_positions(times, link_data=links)
# plt.show()
subplot = 1
color = 'b'
# Plot data
n_links = (len(joint_data[0, :, 0]))
network_output = joint_data[:, :, 0]
timestep = float(data["timestep"])
times = np.arange(0, timestep * np.shape(network_output)[0], timestep)
for i in range(n_links):
plt.subplot(15, 1, i + (subplot))
if i < 10:
title = "spine joints angle"
elif 10 <= i <= 14:
title = "limb joints angle"
plt.plot(times, network_output[:, i], color=color, label=title)
plt.ylabel("J%d" % i)
if i == 0:
plt.title(
"Joint Commands During Transition water to land") # \n %.1f deg avg. angle bias over period" % (turn_rev, avg_turn_bias))
if i == 5:
plt.ylabel("Spine joints angle\nJ%d" % i)
if i == 11:
plt.ylabel("Limb joints angle\nJ%d" % i)
if i < 9:
plt.xticks([])
plt.subplot(15, 1, 15)
plt.plot(times, links[:, 0])
plt.xlabel("t [s]")
plt.ylabel("x position [m]", rotation=0, labelpad=40)
plt.show()
def plot_2d(results, labels, n_data=300, log=False, cmap=None):
"""Plot result
results - The results are given as a 2d array of dimensions [N, 3].
labels - The labels should be a list of three string for the xlabel, the
ylabel and zlabel (in that order).
n_data - Represents the number of points used along x and y to draw the plot
log - Set log to True for logarithmic scale.
cmap - You can set the color palette with cmap. For example,
set cmap='nipy_spectral' for high constrast results.
"""
xnew = np.linspace(min(results[:, 0]), max(results[:, 0]), n_data)
ynew = np.linspace(min(results[:, 1]), max(results[:, 1]), n_data)
grid_x, grid_y = np.meshgrid(xnew, ynew)
results_interp = griddata(
(results[:, 0], results[:, 1]), results[:, 2],
(grid_x, grid_y),
method='linear' # nearest, cubic
)
extent = (
min(xnew), max(xnew),
min(ynew), max(ynew)
)
plt.plot(results[:, 0], results[:, 1], "r.")
imgplot = plt.imshow(
results_interp,
extent=extent,
aspect='auto',
origin='lower',
interpolation="none",
norm=LogNorm() if log else None
)
if cmap is not None:
imgplot.set_cmap(cmap)
plt.xlabel(labels[0])
plt.ylabel(labels[1])
cbar = plt.colorbar()
cbar.set_label(labels[2])
def main(plot=True, file=None):
"""Main"""
# Load data
if file is None:
# plot_9d()
# calc_9d_energy()
# plot_9f()
# plot_9f_network()
# plot_9f3()
# plot_9f4()
plot_9g()
else:
with np.load(file) as data:
timestep = float(data["timestep"])
# amplitude = data["amplitudes"]
# phase_lag = data["phase_lag"]
link_data = data["links"][:, 0, :]
joints_data = data["joints"]
times = np.arange(0, timestep * np.shape(link_data)[0], timestep)
# Plot data
plt.figure("Positions")
plot_positions(times, link_data)
plt.figure("Trajectory")
plot_trajectory(link_data)
print(joints_data)
if __name__ == '__main__':
main(plot=not save_plots())
| shonigmann/ComputationalMotorControl | Lab9/Webots/controllers/pythonController/plot_results.py | plot_results.py | py | 22,784 | python | en | code | 0 | github-code | 13 |
42675687954 | from minepy import MINE
import matplotlib.pyplot as plt
import numpy as np
def mic():
lgb_test_A = np.loadtxt("../result/lgb_A.txt")
lgb_test_A_no = np.loadtxt("../result/lgb_A_no_useful2.txt")
xgb_test_A = np.loadtxt("../result/xgb_A.txt")
lr_test_A = np.loadtxt("../result/lr_A.txt")
rf_test_A = np.loadtxt("../result/rf_A.txt")
res = [lgb_test_A, lgb_test_A_no, xgb_test_A, lr_test_A, rf_test_A]
cm = []
for i in range(5):
tmp = []
for j in range(5):
m = MINE()
m.compute_score(res[i], res[j])
tmp.append(m.mic())
cm.append(tmp)
return cm
def plot_confusion_matrix(cm, title='mic', cmap=plt.cm.Blues):
fs = ['lgb','lgb_no', 'xgb', 'lr', 'rf']
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(5)
plt.xticks(tick_marks, fs, rotation=45)
plt.yticks(tick_marks, fs)
plt.tight_layout()
plt.show()
if __name__ == '__main__':
plot_confusion_matrix(mic())
| squirrelmaster/rong360-8 | src/base/analyze.py | analyze.py | py | 1,054 | python | en | code | 0 | github-code | 13 |
19335873553 | import csv
import numpy as np
import os
# ten files test_cloud_1.txt, test_cloud2.txt, ..., test_cloud10.txt
file_names = ['./test_clouds/test_cloud0.txt', 'test_clouds/test_cloud1.txt', './test_clouds/test_cloud2.txt', './test_clouds/test_cloud3.txt', './test_clouds/test_cloud4.txt', './test_clouds/test_cloud5.txt', './test_clouds/test_cloud6.txt', './test_clouds/test_cloud7.txt', './test_clouds/test_cloud8.txt', './test_clouds/test_cloud9.txt', './test_clouds/test_cloud10.txt']
# now convert all of these to csv files, delimter is space
for file_name in file_names:
with open(file_name, 'r') as in_file:
stripped = (line.strip() for line in in_file)
lines = (line.split() for line in stripped if line)
with open(file_name + '.csv', 'w') as out_file:
writer = csv.writer(out_file)
writer.writerows(lines)
# move all of these csv files to a new folder called csv_files outside of test_clouds folder
for file_name in file_names:
os.rename(file_name + '.csv', './csv_files/' + file_name[14:] + '.csv')
import pandas as pd
# put all of these csvs into a list of dataframes
df_master_list = []
for file_name in os.listdir('./csv_files/'):
df_master_list.append(pd.read_csv('./csv_files/' + file_name, header=None))
# for each dataframe, add the label column x,y,z and scale
for df in df_master_list:
df.drop([3], axis=1, inplace=True)
# export all of these dataframes to csv files again and call them revised
for i in range(len(df_master_list)):
df_master_list[i].to_csv('./revised_csv_files/test_cloud' + str(i+1) + '.csv', index=False)
import pandas as pd
# put all of these csvs into a list of dataframes
df_master_list = []
for file_name in os.listdir('./revised_csv_files/'):
df_master_list.append(pd.read_csv('./revised_csv_files/' + file_name, header=None))
# now, using this list of dataframes, create a 3d scatter plot
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from matplotlib.animation import FuncAnimation
# create an animation of each of these dataframes, and show them
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.animation import FuncAnimation
# from all these dataframes, find max and min of x,y,z
min_x = 10000000
max_x = -10000000
min_y = 10000000
max_y = -10000000
min_z = 10000000
max_z = -10000000
for df in df_master_list:
for i in range(len(df)):
if df[0][i] < min_x:
min_x = df[0][i]
if df[0][i] > max_x:
max_x = df[0][i]
if df[1][i] < min_y:
min_y = df[1][i]
if df[1][i] > max_y:
max_y = df[1][i]
if df[2][i] < min_z:
min_z = df[2][i]
if df[2][i] > max_z:
max_z = df[2][i]
print(min_x, max_x, min_y, max_y, min_z, max_z)
# Create a function to update the 3D scatter plot for each frame
def update_3d_scatter(frame):
ax.clear()
df = df_master_list[frame]
x = df[0]
y = df[1]
z = df[2]
ax.scatter(x, y, z, c='b', marker='o') # Customize the marker and color as needed
# Set fixed axis limits for all frames
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y, max_y)
ax.set_zlim(min_z, max_z)
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
ax.set_title(f'Frame {frame+1}')
# Create a 3D scatter plot
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Set up the animation
ani = FuncAnimation(fig, update_3d_scatter, frames=len(df_master_list), repeat=False)
# You can adjust the frames, interval, and other animation settings as needed.
# To display the animation, you can use a suitable viewer or save it as a video, e.g., MP4 or GIF.
# To save the animation as an MP4 video (requires FFmpeg), you can use the following line:
ani.save('animation.gif', writer='ffmpeg', fps=10)
| johngunerli/test_cloud_points_plot | mp.py | mp.py | py | 3,930 | python | en | code | 0 | github-code | 13 |
17049319754 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class BizFundSettleSummary(object):
def __init__(self):
self._charge = None
@property
def charge(self):
return self._charge
@charge.setter
def charge(self, value):
self._charge = value
def to_alipay_dict(self):
params = dict()
if self.charge:
if hasattr(self.charge, 'to_alipay_dict'):
params['charge'] = self.charge.to_alipay_dict()
else:
params['charge'] = self.charge
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = BizFundSettleSummary()
if 'charge' in d:
o.charge = d['charge']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/BizFundSettleSummary.py | BizFundSettleSummary.py | py | 837 | python | en | code | 241 | github-code | 13 |
12392302796 | N = int(input())
lst = [list(map(int,input().split())) for i in range(N)]
for i in range(N-2):
for j in range(i+1,N-1):
for k in range(j+1,N):
x0,y0 = lst[i]
x1,y1 = lst[j]
x2,y2 = lst[k]
x0 -= x2
x1 -= x2
y0 -= y2
y1 -= y2
if x0*y1==x1*y0:
print("Yes")
exit()
print("No") | 06keito/study-atcoder | src/abc181_c.py | abc181_c.py | py | 415 | python | en | code | 0 | github-code | 13 |
16177240390 | #Escreva um programa que leia um número N inteiro qualquer e mostre na tela os N primeiros elementos de uma Sequência de Fibonacci. Exemplo:
#0 – 1 – 1 – 2 – 3 – 5 – 8
print('-' * 30)
total = int(input('Quanto termos você quer mostrar? '))
print('-' * 30)
c = 0
p3 = p2 = p1 = 1
print('0 -> 1 -> ', end='')
while c < (total -2):
print('{}'.format(p3), end='')
print(' -> ' if c < (total - 3) else ' FIM ', end='')
p3 = p1 + p2
p1 = p2
p2 = p3
c += 1
| acksonpires/Phyton-Course- | ex063.py | ex063.py | py | 489 | python | pt | code | 0 | github-code | 13 |
20368914250 | from lxml import etree
from requests import get
from json import loads
from urllib import urlencode
def dbpedia(query):
# dbpedia autocompleter
autocomplete_url = 'http://lookup.dbpedia.org/api/search.asmx/KeywordSearch?' # noqa
response = get(autocomplete_url
+ urlencode(dict(QueryString=query)))
results = []
if response.ok:
dom = etree.fromstring(response.content)
results = dom.xpath('//a:Result/a:Label//text()',
namespaces={'a': 'http://lookup.dbpedia.org/'})
return results
def google(query):
# google autocompleter
autocomplete_url = 'http://suggestqueries.google.com/complete/search?client=toolbar&' # noqa
response = get(autocomplete_url
+ urlencode(dict(q=query)))
results = []
if response.ok:
dom = etree.fromstring(response.text)
results = dom.xpath('//suggestion/@data')
return results
def wikipedia(query):
# wikipedia autocompleter
url = 'https://en.wikipedia.org/w/api.php?action=opensearch&{0}&limit=10&namespace=0&format=json' # noqa
resp = loads(get(url.format(urlencode(dict(q=query)))).text)
if len(resp) > 1:
return resp[1]
return []
backends = {'dbpedia': dbpedia,
'google': google,
'wikipedia': wikipedia
}
| opi/searx_ynh | sources/searx/autocomplete.py | autocomplete.py | py | 1,363 | python | en | code | 1 | github-code | 13 |
24337611042 | import json
import datetime
# Функция для чтения заметок из файла
def read_notes():
try:
with open("notes.json", "r") as file:
notes = json.load(file)
except FileNotFoundError:
notes = []
return notes
# Функция для сохранения заметок в файл
def save_notes(notes):
with open("notes.json", "w") as file:
json.dump(notes, file, indent=4)
# Функция для создания новой заметки
def create_note():
title = input("Введите заголовок заметки: ")
body = input("Введите текст заметки: ")
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
note = {
"id": len(notes) + 1,
"title": title,
"body": body,
"timestamp": timestamp
}
notes.append(note)
save_notes(notes)
print("Заметка успешно создана.")
# Функция для просмотра всех заметок
def view_notes():
if len(notes) == 0:
print("Нет доступных заметок.")
else:
for note in notes:
print(f"ID: {note['id']}")
print(f"Заголовок: {note['title']}")
print(f"Текст: {note['body']}")
print(f"Дата/время: {note['timestamp']}")
print("-------------------------")
# Функция для редактирования заметки
def edit_note():
note_id = int(input("Введите ID заметки, которую хотите отредактировать: "))
for note in notes:
if note['id'] == note_id:
new_title = input("Введите новый заголовок заметки: ")
new_body = input("Введите новый текст заметки: ")
note['title'] = new_title
note['body'] = new_body
note['timestamp'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
save_notes(notes)
print("Заметка успешно отредактирована.")
return
print("Заметка с указанным ID не найдена.")
# Функция для удаления заметки
def delete_note():
note_id = int(input("Введите ID заметки, которую хотите удалить: "))
for note in notes:
if note['id'] == note_id:
notes.remove(note)
save_notes(notes)
print("Заметка успешно удалена.")
return
print("Заметка с указанным ID не найдена.")
# Основная функция для работы с приложением
def main():
global notes
notes = read_notes()
while True:
print("1. Создать заметку")
print("2. Просмотреть все заметки")
print("3. Редактировать заметку")
print("4. Удалить заметку")
print("5. Выйти")
choice = input("Выберите действие: ")
print("-------------------------")
if choice == "1":
create_note()
elif choice == "2":
view_notes()
elif choice == "3":
edit_note()
elif choice == "4":
delete_note()
elif choice == "5":
break
else:
print("Неверный выбор. Попробуйте снова.")
# Запуск приложения
if __name__ == "__main__":
main() | SHarldaevVladimir/NotesPython | import json.py | import json.py | py | 3,654 | python | ru | code | 0 | github-code | 13 |
6847978865 | # TODO: This file should be deprecated, it's just for testing.
# TODO: TEST!!!
import sys
import os
sys.path.append('/home/ubuntu/openhgnn/')
os.chdir('/home/ubuntu/openhgnn/')
from openhgnn.models.MAGNN import MAGNN
from openhgnn.sampler.MAGNN_sampler import MAGNN_sampler, collate_fn
from openhgnn.sampler.test_config import CONFIG
import argparse
import warnings
import time
import dgl
import numpy as np
import torch as th
import torch.nn.functional as F
from torch.utils.data import DataLoader
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience, verbose=False, delta=0, save_path='checkpoint.pt'):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
self.save_path = save_path
def __call__(self, val_loss, model):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model)
elif score < self.best_score - self.delta:
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model)
self.counter = 0
def save_checkpoint(self, val_loss, model):
"""Saves model when validation loss decrease."""
if self.verbose:
print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
th.save(model.state_dict(), self.save_path)
self.val_loss_min = val_loss
def load_hg(args):
hg_dir = 'openhgnn/dataset/'
hg,_ = dgl.load_graphs(hg_dir+'{}/graph.bin'.format(args.dataset), [0])
hg = hg[0]
return hg
def svm_test(X, y, test_sizes=(0.2, 0.4, 0.6, 0.8), repeat=10):
# This method is implemented by author
random_states = [182318 + i for i in range(repeat)]
result_macro_f1_list = []
result_micro_f1_list = []
for test_size in test_sizes:
macro_f1_list = []
micro_f1_list = []
for i in range(repeat):
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_size, shuffle=True, random_state=random_states[i])
svm = LinearSVC(dual=False)
svm.fit(X_train, y_train)
y_pred = svm.predict(X_test)
macro_f1 = f1_score(y_test, y_pred, average='macro')
micro_f1 = f1_score(y_test, y_pred, average='micro')
macro_f1_list.append(macro_f1)
micro_f1_list.append(micro_f1)
result_macro_f1_list.append([np.mean(macro_f1_list), np.std(macro_f1_list)])
result_micro_f1_list.append([np.mean(micro_f1_list), np.std(micro_f1_list)])
return result_macro_f1_list, result_micro_f1_list
if __name__ == '__main__':
warnings.filterwarnings('ignore')
args = argparse.Namespace(**CONFIG)
hg = load_hg(args)
train_mask = hg.nodes[args.category].data['train_mask']
val_mask = hg.nodes[args.category].data['val_mask']
test_mask = hg.nodes[args.category].data['test_mask']
model = MAGNN.build_model_from_args(args, hg)
print(args)
sampler = MAGNN_sampler(g=hg, mask=train_mask.cpu().numpy(), num_layers=args.num_layers, category=args.category,
metapath_list=model.metapath_list, num_samples=args.num_samples, dataset_name=args.dataset)
dataloader = DataLoader(dataset=sampler, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers,
collate_fn=collate_fn, drop_last=False)
optimizer = th.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
early_stop = EarlyStopping(patience=args.patience, verbose=True,
save_path='/home/ubuntu/openhgnn/openhgnn/sampler/checkpoint/checkpoint_dblp.pt')
model = model.to(args.device)
for epoch in range(args.max_epoch):
sampler.mask = train_mask
t = time.perf_counter()
model.train()
print("...Start the mini batch training...")
for num_iter, (sub_g, mini_mp_inst, seed_nodes) in enumerate(dataloader):
print("Sampling {} seed_nodes with duration(s): {}".format(len(seed_nodes[args.category]), time.perf_counter() - t))
model.mini_reset_params(mini_mp_inst)
sub_g = sub_g.to(args.device)
pred, _ = model(sub_g)
pred = pred[args.category][seed_nodes[args.category]]
lbl = sub_g.nodes[args.category].data['labels'][seed_nodes[args.category]]
loss = F.cross_entropy(pred, lbl)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("Iter:{}, the training loss of this batch is {}".format(
num_iter, loss.item()
))
t = time.perf_counter()
print()
model.eval()
with th.no_grad():
sampler.mask = val_mask
val_loader = DataLoader(dataset=sampler, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers, collate_fn=collate_fn, drop_last=False)
logp_val_all = []
embed_val_all = []
for num_iter, (sub_g, mini_mp_inst, seed_nodes) in enumerate(val_loader):
sub_g = sub_g.to(args.device)
model.mini_reset_params(mini_mp_inst)
pred_val, embed_val = model(sub_g)
pred_val = pred_val[args.category][seed_nodes[args.category]]
embed_val = embed_val[args.category][seed_nodes[args.category]]
logp_val = F.log_softmax(pred_val, 1)
logp_val_all.append(logp_val)
embed_val_all.append(embed_val.cpu().numpy())
lbl_val = hg.nodes[args.category].data['labels'][val_mask]
lbl_val = lbl_val.cuda()
embed_val_all = np.concatenate(embed_val_all, 0)
loss_val = F.nll_loss(th.cat(logp_val_all, 0), lbl_val)
lbl_val = lbl_val.cpu().numpy()
val_f1_macro_list, val_f1_micro_list = svm_test(embed_val_all, lbl_val)
print('Epoch {}. val_loss is {}'.format(epoch, loss_val))
print('Macro-F1: ' + ', '.join(['{:.6f}~{:.6f} ({:.1f})'.format(
macro_f1[0], macro_f1[1], train_size) for macro_f1, train_size in
zip(val_f1_macro_list, [0.8, 0.6, 0.4, 0.2])]))
print('Micro-F1: ' + ', '.join(['{:.6f}~{:.6f} ({:.1f})'.format(
micro_f1[0], micro_f1[1], train_size) for micro_f1, train_size in
zip(val_f1_micro_list, [0.8, 0.6, 0.4, 0.2])]))
early_stop(loss_val, model)
if early_stop.early_stop:
print("Early Stopping!")
break
print('----------TEST-----------')
model.eval() # Test on full graphprint()
with th.no_grad():
sampler.mask = test_mask
test_loader = DataLoader(dataset=sampler, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers, collate_fn=collate_fn, drop_last=False)
logp_test_all = []
embed_test_all = []
for num_iter, (sub_g, mini_mp_inst, seed_nodes) in enumerate(test_loader):
sub_g = sub_g.to(args.device)
model.mini_reset_params(mini_mp_inst)
pred_test, embed_test = model(sub_g)
pred_test = pred_test[args.category][seed_nodes[args.category]]
embed_test = embed_test[args.category][seed_nodes[args.category]]
logp_test = F.log_softmax(pred_test, 1)
logp_test_all.append(logp_test)
embed_test_all.append(embed_test.cpu().numpy())
lbl_test = hg.nodes[args.category].data['labels'][test_mask]
lbl_test = lbl_test.cuda()
embed_test_all = np.concatenate(embed_test_all, 0)
loss_test = F.nll_loss(th.cat(logp_test_all, 0), lbl_test)
lbl_test = lbl_test.cpu().numpy()
test_f1_macro_list, test_f1_micro_list = svm_test(embed_test_all, lbl_test)
print('Macro-F1: ' + ', '.join(['{:.6f}~{:.6f} ({:.1f})'.format(
macro_f1[0], macro_f1[1], train_size) for macro_f1, train_size in
zip(test_f1_macro_list, [0.8, 0.6, 0.4, 0.2])]))
print('Micro-F1: ' + ', '.join(['{:.6f}~{:.6f} ({:.1f})'.format(
micro_f1[0], micro_f1[1], train_size) for micro_f1, train_size in
zip(test_f1_micro_list, [0.8, 0.6, 0.4, 0.2])]))
| BUPT-GAMMA/OpenHGNN | openhgnn/sampler/test_MAGNN_sampler.py | test_MAGNN_sampler.py | py | 9,360 | python | en | code | 710 | github-code | 13 |
11407940522 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from config import user_configuration
from logging.handlers import RotatingFileHandler
FORMATTER = logging.Formatter(
"%(asctime)s — %(name)s — "
+ "%(levelname)s — %(filename)s:%(lineno)d — "
+ "%(message)s"
)
LOG_FILE = user_configuration()["events_log_file"]
def get_file_handler():
"""
Returns file handler for the log file
"""
file_handler = RotatingFileHandler(LOG_FILE)
file_handler.setFormatter(FORMATTER)
return file_handler
def get_logger(logger_name):
"""
Creates the logger with the logger name and returns it
Args:
logger_name
"""
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG) # highest level
logger.addHandler(get_file_handler())
# with this pattern, it's rarely necessary to propagate error upto parent
logger.propagate = False
return logger
| OWASP/Python-Honeypot | core/log.py | log.py | py | 944 | python | en | code | 383 | github-code | 13 |
42168555802 | """Calculates how high the hailstone numbers get for a given seed.
Usage: import height
Usage: from height import measure
Usage: from height import measures
>>> measure(5)
16
>>> measure(7)
52
>>> measure(1)
1
>>> measures(6)
[(1, 1), (2, 2), (3, 16), (4, 4), (5, 16), (6, 16)]
>>> measures(5)
"""
from hailstones import stones
from formulas import ulam
def measure(seed):
""" For a given seed, returns the biggest of the stones.
"""
# Replace the pass below with your own code.
global max
max = ulam(seed)
numbers = stones(seed)
for number in numbers:
if number > max:
max = number
return max
def measures(maxseed):
"""Returns a list of pairs (seed, measure) for all seeds from to maxseed.
For example:
>>> measures(6)
[(1, 1), (2, 2), (3, 16), (4, 4), (5, 16), (6, 16)]
"""
# Replace the pass below with your own code.
mylist = []
for n in range(1, maxseed + 1):
mylist.append((n,measure(n)))
return mylist
| lzhengem/python-projects | LAB04/hailstoneslab/height.py | height.py | py | 1,036 | python | en | code | 0 | github-code | 13 |
27803476982 | N = int(input())
data = map(int, input().split())
def binary_search(start, end):
global result
mid = (start + end) // 2
dist = 0
for d in data:
dist += abs(d - mid)
result = min(result, dist)
result = 0
binary_search(0, 100_000)
| tkdgns8234/DataStructure-Algorithm | training/이코테/24.py | 24.py | py | 263 | python | en | code | 0 | github-code | 13 |
5858736743 | import os
def create(file):
open(file, 'w').close()
def add_file_content(file_name, content):
with open(file_name, 'a') as file:
file.write(content)
file.write('\n')
def replace(file_name, old_str, new_str):
with open(file_name, 'r+') as file:
file_content = file.read()
new_content = file_content.replace(old_str, new_str)
file.seek(0)
file.truncate()
file.write(new_content)
command = input()
while not command == 'End':
if command.startswith('Create'):
file_name = command.split('-')[1]
create(file_name)
elif command.startswith('Add'):
_, file_name, content = command.split('-')
add_file_content(file_name, content)
elif command.startswith('Replace'):
_, file_name, old_string, new_string = command.split('-')
if os.path.exists(file_name):
replace(file_name, old_string, new_string)
else:
print("An error occurred")
elif command.startswith('Delete'):
_, file_name = command.split('-')
if os.path.exists(file_name):
os.remove(file_name)
else:
print("An error occurred")
command = input()
"""
Create-file.txt
Add-file.txt-First Line
Add-file.txt-Second Line
Replace-random.txt-Some-some
Replace-file.txt-First-1st
Replace-file.txt-Second-2nd
Delete-random.txt
End
"""
| Ilian-Kossev/File_handling-exercises | 3_file_manipulator/manipulator.py | manipulator.py | py | 1,400 | python | en | code | 0 | github-code | 13 |
73049234577 | import os.path as osp
from typing import List
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader
from data.dataset import CMD3Dataset
class CMD3DataModule(LightningDataModule):
def __init__(
self,
root_dir: str,
batch_size: int,
num_workers: int,
):
super(CMD3DataModule, self).__init__()
self.root_dir = root_dir
self.audio_dir = osp.join(self.root_dir, 'dataset/audio')
self.label_dir = osp.join(self.root_dir, "dataset/set_splits")
self.batch_size = batch_size
self.num_workers = num_workers
def train_dataloader(self) -> DataLoader:
"""Load train set loader."""
self.train_set = CMD3Dataset(
root_path = self.root_dir,
data_path=osp.join(self.label_dir, "train_split.csv"),
audio_path_prefix=self.audio_dir,
)
return DataLoader(
self.train_set,
shuffle=True,
batch_size=self.batch_size,
num_workers=self.num_workers,
)
def val_dataloader(self) -> DataLoader:
"""Load validation set loader."""
self.val_set = CMD3Dataset(
root_path = self.root_dir,
data_path=osp.join(self.label_dir, "val_split.csv"),
audio_path_prefix=self.audio_dir,
)
return DataLoader(
self.val_set,
shuffle=True,
batch_size=self.batch_size,
num_workers=self.num_workers,
)
def test_dataloader(self) -> DataLoader:
"""Load test set loader."""
self.test_set = CMD3Dataset(
root_path = self.root_dir,
data_path=osp.join(self.label_dir, "test_split.csv"),
audio_path_prefix=self.audio_dir,
)
return DataLoader(
self.test_set,
batch_size=self.batch_size,
num_workers=self.num_workers,
) | hangyeol013/cmd3 | cmd3_audio/data/datamodule.py | datamodule.py | py | 1,970 | python | en | code | 0 | github-code | 13 |
7216228999 | import numpy as np
class MatchCalculation:
@classmethod
def calculation(self,text1: str,text2: str,num_100 = False,ratio_calc = True):
s = text1.lower()
t = text2.lower()
# Initialize matrix of zeros
rows = len(s) + 1
cols = len(t) + 1
distance = np.zeros((rows,cols),dtype = int)
# Populate matrix of zeros with the indeces of each character of both strings
for i in range(1, rows):
for k in range(1,cols):
distance[i][0] = i
distance[0][k] = k
# Iterate over the matrix to compute the cost of deletions,insertions and/or substitutions
for col in range(1, cols):
for row in range(1, rows):
if s[row - 1] == t[col - 1]:
cost = 0 # If the characters are the same in the two strings in a given position [i,j] then the cost is 0
else:
# In order to align the results with those of the Python Levenshtein package, if we choose to calculate the ratio
# the cost of a substitution is 2. If we calculate just distance, then the cost of a substitution is 1.
if ratio_calc:
cost = 2
else:
cost = 1
distance[row][col] = min(distance[row - 1][col] + 1, # Cost of deletions
distance[row][col - 1] + 1, # Cost of insertions
distance[row - 1][col - 1] + cost) # Cost of substitutions
# Computation of the Levenshtein Distance Ratio
Ratio = ((len(s) + len(t)) - distance[row][col]) / (len(s) + len(t))
# Raw data calculation Automatch
raw_cal = str(round(Ratio,3))
last_digit = raw_cal[4:] if raw_cal[4:] else '0'
if not num_100:
result = float(raw_cal[2:4] + '.' + last_digit)
else:
result = float(str(100 - (50 - int(raw_cal[2:4]))) + '.' + last_digit)
return result
| paulussimanjuntak/Automatch | restapi/services/libs/MatchCalculation.py | MatchCalculation.py | py | 2,067 | python | en | code | 0 | github-code | 13 |
20007337817 | from sys import argv
import recurrent
from json import JSONEncoder as encoder
json = encoder().encode
try:
text_content = ' '.join(argv[1:])
parsed_result = recurrent.parse(text_content) or None
friendly_result = recurrent.format(parsed_result) if parsed_result else None
print(
json({
"parameterised": str(parsed_result),
"friendly": str(friendly_result) if not friendly_result.startswith("RRULE") else None,
})
)
exit(0)
except Exception as ex:
print(
json({
"message": ex.args[0]
})
)
exit(1)
| maxichrome/reminderer | nlp/process.py | process.py | py | 525 | python | en | code | 1 | github-code | 13 |
4999720305 | """
you can use and to combine of statements
ands come before or's in order of operations in python
and is considered multiplications
addition equates to or
o's equate to false
amd 1's equate to true
x > and
+ > or
0 > false
1 > true
a and false == false
a and true == a
a or false == a
a or True == true
not not = double negatives will cancel out
boolean algebra
PROPERTIES :
DISTRIBUTIVE PROPERTY :
- a or (b and c ) == (a or b ) and (a or c)
- a and (b or c) == (a and b) or (a and c)
DEMORGAN'S LAW :
not (a or b ) = not a and not b # if there is an "or" it has to be an and so it swtiches
not(a and b ) == not a or not b
while loops
while true:
continoulsy run until false
break - if youre ina loop it will break out of the loop
truthy/falsey values, they are not booleans values but they are treated as such
0, '', [] = are not false but act like it
score = 0
if score:
print('nice')
else:
print('no')
>noooooooo
a string with nothign in it is false
for number 0 is considered false and any other number is considered true
x and y = if x is false reunr x, otherwise, return y.
x or y = if x is true return x other wise return y
not x = if x is false retrun true. otherwise reurn false.
"""
def deMorgan_one(a, b):
return not (a or b) == (not a and not b)
def deMorgan_test():
tests = [[True, True], [True, False], [False, False], [False, True]]
for test in tests:
a = test[0]
b = test[1]
result = deMorgan_one(a, b)
print('input, {}, output: {}'.format(test, result))
def whoops():
ans = input('do you want to transfer all of your money ou tof your bank account?')
if ans == 'y' or ans == 'yes':
print('ok, transferring..')
else:
print('good idea')
def ice_cream():
ans = input('what is your favorite icecream ? [Vanilla]') or 'vanilla'
if ans:
favorite = ans
else:
favorite = 'vanilla'
print('Your favorite is {}'.format(favorite))
def average_while():
count = eval(input('how many numbers do you have?'))
acc = 0
i = 0
while i < count:
num = eval(input('enter number:'))
acc = acc + num
i = i + 1
print('average is {}'.format((acc / count)))
def average_interactive():
acc = 0
count = 0
ans = 'y'
while ans[0] == 'y':
num = eval(input('enter number :'))
acc = acc + num
ans = input('do you want to keep going?')
count = count + 1
print('average is {}'.format((acc / count)))
def average_sentinel():
acc = 0
count = 0
num = 0
while num >= 0:
acc = acc + num
count = count + 1
num = eval(input('enter number (negative to stop):'))
print('average is {}'.format(acc / (count - 1)))
def average_sentinel_2():
acc = 0
count = 0
num = eval(input('enter number (negative to stop):'))
while num != '':
acc = acc + eval(num)
count = count + 1
num = eval(input('enter number (enter to stop):'))
print('average is {}'.format(acc / count))
def average_file():
acc = 0
count = 0
file_name = 'file_data.txt'
file = open(file_name, 'r')
for line in file:
acc = acc + eval(line)
count = count + 1
print('average:{}'.format(acc/count))
def average_file_while():
acc = 0
count = 0
file_name = 'file_data.txt'
file = open(file_name, 'r')
line = file.readline()
while line != '':
acc = acc + eval(line)
count = count + 1
line = file.readline()
print('average:{}'.format(acc/count))
def average_file_nested():
acc = 0
count = 0
file_name = 'file_data.txt'
file = open(file_name, 'r')
line = file.readline()
while line != '':
nums_string= line.split(',')
i = 0
while i < len(nums_string):
acc = acc + eval(nums_string[i])
count = count +1
i = i + 1
line = file.readline()
print('average:{}'.format(acc/count))
if __name__ == '__main__':
# deMorgan_test()
# whoops()
# ice_cream()
# average_while()
# average_sentinel_2()
average_file_nested()
| kateculpepper/220 | notes/notes.3.16.py | notes.3.16.py | py | 4,182 | python | en | code | 0 | github-code | 13 |
10659926755 | from django.contrib import admin
# Core
from core.models import BusinessUnitLov
# Welderlist
from core.models import fNumberLov
from core.models import ProcessLov
from core.models import tQualLov
from core.models import DiameterLov
from core.models import PositionLov
from core.models import CesscoWeldProcedureLov
from core.models import WelderStampLov
# Calibration
from core.models import UnitTypeLov
from core.models import UnitMakeLov
from core.models import UnitRenewalPeriodLov
# Globally disable delete selected on list views
admin.site.disable_action('delete_selected')
# Core admin
class BusinessUnitLovAdmin(admin.ModelAdmin):
list_display = ['business_unit_code', 'business_unit_description']
def has_delete_permission(self, request, obj=None):
return_value = False
# Welderlist admin
class fNumberLovAdmin(admin.ModelAdmin):
list_display = ['f_number_code', 'f_number_description']
def has_delete_permission(self, request, obj=None):
return_value = False
class ProcessLovAdmin(admin.ModelAdmin):
list_display = ['process_code', 'process_description']
def has_delete_permission(self, request, obj=None):
return_value = False
class tQualLovAdmin(admin.ModelAdmin):
list_display = ['t_qual_code', 't_qual_description']
def has_delete_permission(self, request, obj=None):
return_value = False
class DiameterLovAdmin(admin.ModelAdmin):
list_display = ['diameter_code', 'diameter_description']
def has_delete_permission(self, request, obj=None):
return_value = False
class PositionLovAdmin(admin.ModelAdmin):
list_display = ['position_code', 'position_description']
def has_delete_permission(self, request, obj=None):
return_value = False
class CesscoWeldProcedureLovAdmin(admin.ModelAdmin):
list_display = ['cessco_weld_procedure_code', 'cessco_weld_procedure_description']
def has_delete_permission(self, request, obj=None):
return_value = False
class WelderStampLovAdmin(admin.ModelAdmin):
list_display = ['welder_stamp_code', 'welder_stamp_description']
def has_delete_permission(self, request, obj=None):
return_value = False
# Welderlist admin
class UnitTypeLovAdmin(admin.ModelAdmin):
list_display = ['unit_type_code', 'unit_type_description']
def has_delete_permission(self, request, obj=None):
return_value = False
class UnitMakeLovAdmin(admin.ModelAdmin):
list_display = ['unit_make_code', 'unit_make_description']
def has_delete_permission(self, request, obj=None):
return_value = False
class UnitRenewalPeriodLovAdmin(admin.ModelAdmin):
list_display = ['unit_renewal_period_code', 'unit_renewal_period_description']
def has_delete_permission(self, request, obj=None):
return_value = False
admin.site.register(BusinessUnitLov, BusinessUnitLovAdmin)
admin.site.register(fNumberLov, fNumberLovAdmin)
admin.site.register(ProcessLov, ProcessLovAdmin)
admin.site.register(tQualLov, tQualLovAdmin)
admin.site.register(DiameterLov, DiameterLovAdmin)
admin.site.register(PositionLov, PositionLovAdmin)
admin.site.register(CesscoWeldProcedureLov, CesscoWeldProcedureLovAdmin)
admin.site.register(WelderStampLov, WelderStampLovAdmin)
admin.site.register(UnitTypeLov, UnitTypeLovAdmin)
admin.site.register(UnitMakeLov, UnitMakeLovAdmin)
admin.site.register(UnitRenewalPeriodLov, UnitRenewalPeriodLovAdmin)
| rsombach/btm419_demo | cessco/core/admin.py | admin.py | py | 3,379 | python | en | code | 0 | github-code | 13 |
16179930395 | import os
import pickle
import tempfile
import mdtraj as md
import numpy as np
import pytest
from mdtraj.testing import eq
try:
from openmm import app
import openmm.unit as u
HAVE_OPENMM = True
except ImportError:
HAVE_OPENMM = False
needs_openmm = pytest.mark.skipif(not HAVE_OPENMM, reason='needs OpenMM')
@needs_openmm
def test_topology_openmm(get_fn):
topology = md.load(get_fn('1bpi.pdb')).topology
topology_with_bond_order = md.load(get_fn('imatinib.mol2')).topology
# the openmm trajectory doesn't have the distinction
# between resSeq and index, so if they're out of whack
# in the openmm version, that cant be preserved
for top in [topology, topology_with_bond_order]:
for residue in top.residues:
residue.resSeq = residue.index
mm = top.to_openmm()
assert isinstance(mm, app.Topology)
topology2 = md.Topology.from_openmm(mm)
eq(top, topology2)
@needs_openmm
def test_topology_openmm_boxes(get_fn):
traj = md.load(get_fn('1vii_sustiva_water.pdb'))
mmtop = traj.topology.to_openmm(traj=traj)
box = mmtop.getUnitCellDimensions() / u.nanometer
def test_topology_pandas(get_fn):
topology = md.load(get_fn('native.pdb')).topology
atoms, bonds = topology.to_dataframe()
topology2 = md.Topology.from_dataframe(atoms, bonds)
eq(topology, topology2)
# Make sure default argument of None works, see issue #774
topology3 = md.Topology.from_dataframe(atoms)
def test_topology_pandas_TIP4PEW(get_fn):
topology = md.load(get_fn('GG-tip4pew.pdb')).topology
atoms, bonds = topology.to_dataframe()
topology2 = md.Topology.from_dataframe(atoms, bonds)
eq(topology, topology2)
def test_topology_pandas_2residues_same_resSeq(get_fn):
topology = md.load(get_fn('two_residues_same_resnum.gro')).topology
atoms, bonds = topology.to_dataframe()
topology2 = md.Topology.from_dataframe(atoms, bonds)
eq(topology, topology2)
def test_topology_numbers(get_fn):
topology = md.load(get_fn('1bpi.pdb')).topology
assert len(list(topology.atoms)) == topology.n_atoms
assert len(list(topology.residues)) == topology.n_residues
assert all([topology.atom(i).index == i for i in range(topology.n_atoms)])
def test_topology_unique_elements_bpti(get_fn):
traj = md.load(get_fn('bpti.pdb'))
top, bonds = traj.top.to_dataframe()
atoms = np.unique(["C", "O", "N", "H", "S"])
eq(atoms, np.unique(top.element.values))
def test_chain(get_fn):
top = md.load(get_fn('bpti.pdb')).topology
chain = top.chain(0)
assert chain.n_residues == len(list(chain.residues))
atoms = list(chain.atoms)
assert chain.n_atoms == len(atoms)
for i in range(chain.n_atoms):
assert atoms[i] == chain.atom(i)
def test_residue(get_fn):
top = md.load(get_fn('bpti.pdb')).topology
residue = top.residue(0)
assert len(list(residue.atoms)) == residue.n_atoms
atoms = list(residue.atoms)
for i in range(residue.n_atoms):
assert residue.atom(i) == atoms[i]
def test_segment_id(get_fn):
top = md.load(get_fn('ala_ala_ala.pdb')).topology
assert next(top.residues).segment_id == "AAL", "Segment id is not being assigned correctly for ala_ala_ala.psf"
df = top.to_dataframe()[0]
assert len(df["segmentID"] == "AAL") == len(
df), "Segment id is not being assigned correctly to topology data frame ala_ala_ala.psf"
def test_nonconsective_resSeq(get_fn):
t = md.load(get_fn('nonconsecutive_resSeq.pdb'))
assert eq(np.array([r.resSeq for r in t.top.residues]), np.array([1, 3, 5]))
df1 = t.top.to_dataframe()
df2 = md.Topology.from_dataframe(*df1).to_dataframe()
assert eq(df1[0], df2[0])
# round-trip through a PDB load/save loop
fd, fname = tempfile.mkstemp(suffix='.pdb')
os.close(fd)
t.save(fname)
t2 = md.load(fname)
assert eq(df1[0], t2.top.to_dataframe()[0])
os.unlink(fname)
def test_pickle(get_fn):
# test pickling of topology (bug #391)
topology_without_bond_order = md.load(get_fn('bpti.pdb')).topology
topology_with_bond_order = md.load(get_fn('imatinib.mol2')).topology
for top in [topology_with_bond_order, topology_without_bond_order]:
loaded_top = pickle.loads(pickle.dumps(top))
assert loaded_top == top
def test_atoms_by_name(get_fn):
top = md.load(get_fn('bpti.pdb')).topology
atoms = list(top.atoms)
for atom1, atom2 in zip(top.atoms_by_name('CA'), top.chain(0).atoms_by_name('CA')):
assert atom1 == atom2
assert atom1 in atoms
assert atom1.name == 'CA'
assert len(list(top.atoms_by_name('CA'))) == sum(1 for _ in atoms if _.name == 'CA')
assert top.residue(15).atom('CA') == [a for a in top.residue(15).atoms if a.name == 'CA'][0]
with pytest.raises(KeyError):
top.residue(15).atom('sdfsdf')
def test_select_atom_indices(get_fn):
top = md.load(get_fn('native.pdb')).topology
assert eq(top.select_atom_indices('alpha'), np.array([8]))
assert eq(top.select_atom_indices('minimal'),
np.array([4, 5, 6, 8, 10, 14, 15, 16, 18]))
with pytest.raises(ValueError):
top.select_atom_indices('sdfsdf')
@needs_openmm
def test_top_dataframe_openmm_roundtrip(get_fn):
t = md.load(get_fn('2EQQ.pdb'))
top, bonds = t.top.to_dataframe()
t.topology = md.Topology.from_dataframe(top, bonds)
omm_top = t.top.to_openmm()
def test_n_bonds(get_fn):
t = md.load(get_fn('2EQQ.pdb'))
for atom in t.top.atoms:
if atom.element.symbol == 'H':
assert atom.n_bonds == 1
elif atom.element.symbol == 'C':
assert atom.n_bonds in [3, 4]
elif atom.element.symbol == 'O':
assert atom.n_bonds in [1, 2]
def test_load_unknown_topology(get_fn):
try:
md.load(get_fn('frame0.dcd'), top=get_fn('frame0.dcd'))
except IOError as e:
# we want to make sure there's a nice error message than includes
# a list of the supported topology formats.
assert all(s in str(e) for s in ('.pdb', '.psf', '.prmtop'))
else:
assert False # fail
def test_unique_pairs():
n = 10
a = np.arange(n)
b = np.arange(n, n + n)
eq(md.Topology._unique_pairs(a, a).sort(), md.Topology._unique_pairs_equal(a).sort())
eq(md.Topology._unique_pairs(a, b).sort(), md.Topology._unique_pairs_mutually_exclusive(a, b).sort())
def test_select_pairs(get_fn):
traj = md.load(get_fn('tip3p_300K_1ATM.pdb'))
select_pairs = traj.top.select_pairs
assert len(select_pairs(selection1='name O', selection2='name O')) == 258 * (258 - 1) // 2
assert len(select_pairs(selection1='name H1', selection2='name O')) == 258 * 258
selections = iter([
# Equal
("(name O) or (name =~ 'H.*')", "(name O) or (name =~ 'H.*')"),
('all', 'all'),
# Exclusive
('name O', 'name H1'),
('name H1', 'name O'),
# Overlap
(range(traj.n_atoms), 'name O'),
('all', 'name O')])
for select1, select2 in selections:
select3, select4 = next(selections)
assert eq(select_pairs(selection1=select1, selection2=select2).sort(),
select_pairs(selection1=select3, selection2=select4).sort())
def test_to_fasta(get_fn):
t = md.load(get_fn('2EQQ.pdb'))
assert t.topology.to_fasta(0) == "ENFSGGCVAGYMRTPDGRCKPTFYQLIT"
def test_subset(get_fn):
t1 = md.load(get_fn('2EQQ.pdb')).top
t2 = t1.subset([1, 2, 3])
assert t2.n_residues == 1
def test_subset_re_index_residues(get_fn):
t1 = md.load(get_fn('2EQQ.pdb')).top
t2 = t1.subset(t1.select('resid 0 2'))
np.testing.assert_array_equal([0, 1], [rr.index for rr in t2.residues])
def test_molecules(get_fn):
top = md.load(get_fn('4OH9.pdb')).topology
molecules = top.find_molecules()
assert sum(len(mol) for mol in molecules) == top.n_atoms
assert sum(1 for mol in molecules if len(mol) > 1) == 2 # All but two molecules are water
def test_copy_and_hash(get_fn):
t = md.load(get_fn('traj.h5'))
t1 = t.topology
t2 = t.topology.copy()
assert t1 == t2
assert hash(tuple(t1._chains)) == hash(tuple(t2._chains))
assert hash(tuple(t1._atoms)) == hash(tuple(t2._atoms))
assert hash(tuple(t1._bonds)) == hash(tuple(t2._bonds))
assert hash(tuple(t1._residues)) == hash(tuple(t2._residues))
assert hash(t1) == hash(t2)
def test_topology_sliced_residue_indices(get_fn):
# https://github.com/mdtraj/mdtraj/issues/1585
full = md.load(get_fn('1bpi.pdb'))
residues = full.top.select("resid 1 to 10")
sliced = full.atom_slice(residues)
idx = [res.index for res in sliced.top.residues][-1]
assert idx == sliced.top.n_residues-1
# Now see if this works
_ = sliced.topology.residue(idx)
def test_topology_join(get_fn):
top_1 = md.load(get_fn('2EQQ.pdb')).topology
top_2 = md.load(get_fn('4OH9.pdb')).topology
out_topology = top_1.join(top_2)
eq(out_topology.n_atoms, top_1.n_atoms + top_2.n_atoms)
eq(out_topology.n_residues, top_1.n_residues + top_2.n_residues)
eq(top_1.atom(0).residue.name, out_topology.atom(0).residue.name)
eq(top_2.atom(-1).residue.name, out_topology.atom(-1).residue.name)
eq(top_1.atom(0).element, out_topology.atom(0).element)
eq(top_2.atom(-1).element, out_topology.atom(-1).element)
def test_topology_join_keep_resSeq(get_fn):
top_1 = md.load(get_fn('2EQQ.pdb')).topology
top_2 = md.load(get_fn('4OH9.pdb')).topology
out_topology_keepId_True = top_1.join(top_2, keep_resSeq=True)
out_topology_keepId_False = top_1.join(top_2, keep_resSeq=False)
out_resSeq_keepId_True = [residue.resSeq for residue in out_topology_keepId_True.residues]
out_resSeq_keepId_False = [residue.resSeq for residue in out_topology_keepId_False.residues]
expected_resSeq_keepId_True = (
[residue.resSeq for residue in top_1.residues
] + [
residue.resSeq for residue in top_2.residues])
expected_resSeq_keepId_False = list(range(1, len(expected_resSeq_keepId_True) + 1))
eq(out_resSeq_keepId_True, expected_resSeq_keepId_True)
eq(out_resSeq_keepId_False, expected_resSeq_keepId_False)
| mdtraj/mdtraj | tests/test_topology.py | test_topology.py | py | 10,270 | python | en | code | 505 | github-code | 13 |
14039943929 | #!/usr/bin/env python
from distutils.core import setup
DISTUTILS_DEBUG = True
setup(name='PySpectrograph',
version='0.3',
description='Spectrograph Modelling Software',
author='Steve Crawford',
author_email='crawfordsm@gmail.com',
url='http://code.google.com/p/pyspectrograph/',
packages=[
'PySpectrograph',
'PySpectrograph/Spectrograph',
'PySpectrograph/Utilities',
'PySpectrograph/WavelengthSolution',
'PySpectrograph/Spectra',
'PySpectrograph/Models'],
)
| cmccully/pyspectrograph | setup.py | setup.py | py | 563 | python | en | code | null | github-code | 13 |
16013037410 | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from numpy import arange
from math import sin, cos, pi
import sys
def init():
glClearColor(1, 1, 1, 1)
gluOrtho2D(-2, 2, -2, 2)
def plot_func():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(0, 0, 0)
glPointSize(1)
# glRotatef(-45, 0, 0, 1)
# Plot the coordinate axes
# glBegin(GL_LINES)
# glVertex2f(-3.0, -3.0)
# glVertex2f(3.0, 3.0)
# glVertex2f(-3.0, 3.0)
# glVertex2f(3.0, -3.0)
# glEnd()
glBegin(GL_POINTS)
a, b = 1.5, 1
for t in arange(-250, 250, 0.001):
x = sin(0.99*t) - 0.7*cos(3.01*t)
y = cos(1.01*t) + 0.1*sin(15.03*t)
# ellipse in y=x major axis
# x = a*cos(t)*cos(pi/4)-b*sin(t)*sin(pi/4)
# y = b*sin(t)*cos(pi/4)+a*cos(t)*sin(pi/4)
glVertex2f(x, y)
glEnd()
glFlush()
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_RGB | GLUT_SINGLE)
glutInitWindowSize(500, 500)
glutInitWindowPosition(50, 50)
glutCreateWindow(b'Sine and Cosine')
glutDisplayFunc(plot_func)
init()
glutMainLoop()
main()
| sagar-spkt/Learning | Etch-a-sketch/ex7.py | ex7.py | py | 1,140 | python | en | code | 0 | github-code | 13 |
41995510879 | #!/usr/bin/env python
import sys
from prettytable import PrettyTable,HEADER,NONE,FRAME
DELIMITER=';'
disp = PrettyTable()
#disp.border = False
disp.hrules = HEADER
disp.vrules = NONE
input_tags = list()
col_names = list()
tag2name = dict()
tag2name = {
'1' : 'Account' ,
'6' : 'AvgPx',
'31' : 'LastPx',
'32' : 'LastShares',
'35' : 'MsgType',
'38' : 'OrderQty',
'39' : 'OrdStatus',
'40' : 'OrdType',
'44' : 'Price',
'47' : 'Rule80A',
'49' : 'SenderCompID',
'50' : 'SenderSubID',
'52' : 'SendingTime',
'60' : 'TransactTime'
}
def parseFIXMsg(msg):
if msg=='':
pass
print("msg=[{}]".format(msg))
tag2val = msg.split(DELIMITER)
t2v = dict()
for s in tag2val:
if '=' in s:
tag, val = s.split('=')
print("{} => {}".format(tag,val))
t2v[tag] = val
print("====t2v====")
print(t2v)
row = list()
for t in input_tags:
if t in t2v.keys():
row.append(t2v[t])
else:
row.append(' ')
print("row {}".format(row))
disp.add_row(row)
if not sys.stdin.isatty():
input_stream = sys.stdin
input_tags = sys.argv[1:]
print("input_tags=[{}]".format(input_tags))
else:
try:
input_filename = sys.argv[1]
except IndexError:
message = 'need filename as first argument if stdin is not full'
raise IndexError(message)
else:
input_stream = open(input_filename, 'rU')
for tag in input_tags:
tag_name = tag2name.get(tag)
print("{}:{}".format(tag, tag_name))
col_names.append(tag_name)
print("col_names:{}".format(col_names))
disp.field_names = col_names
for line in input_stream:
l = line.strip()
if l != '':
print("[{}]".format(l))
parseFIXMsg(l)
print(disp)
| kyoxiao/toolbox | extf.py | extf.py | py | 1,721 | python | en | code | 0 | github-code | 13 |
71165003539 | #!/usr/bin/env python
import numpy as np
import math
import cv2
#Enter PGM or PGMA you would like to compress
#If you would like to skip using the program I have provided some examples of results
#Higher numbers = higher variance accepted before an area is all changed to the same value
data=cv2.imread('baboon.pgma',-1)
#Converts 2D array To 1D array
def flat(arr):
return np.array(arr).flatten()
#Flatten before entering findVariance
#Find Variance of a given area
def findVariance(arr):
total=0
for i in arr:
total+=i
average=(total/len(arr))
arr2=[]
for i in arr:
arr2.append((i-average)*(i-average))
result=0
for i in arr2:
result+=i
result=result/(len(arr)-1)
return result
#finds average of a given area
def findAverage(arr):
total = 0
for i in arr:
total += i
average = (total / len(arr))
return average
#turns a given area into 1 value
def homogenize(arr):
final = []
if(len(arr)==1):
return arr[0]
replacement=math.floor(findAverage(flat(arr)))
for i in range(0,len(arr)):
pushArr=[]
for j in range(0,len(arr[i])):
pushArr.append(replacement)
final.append(pushArr)
print(final)
return final
#runs quad tree pgm compression
def quadTree(arr,threshold):
if (len(arr)==1):
return arr
if((findVariance(flat(arr))<=threshold)):
return homogenize(arr)
midHorizontal=math.floor(len(arr[0])/2)
midVertical=math.floor(len(arr)/2)
arr1=[]
for i in range(0,midVertical):
pushArr = []
for j in range(0,midHorizontal):
pushArr.append(arr[i][j])
arr1.append(pushArr)
arr1=quadTree(arr1,threshold)
arr2=[]
for i in range(0, midVertical):
pushArr = []
for j in range(midHorizontal,len(arr[i])):
pushArr.append(arr[i][j])
arr2.append(pushArr)
arr2 = quadTree(arr2, threshold)
arr3=[]
for i in range(midVertical,len(arr)):
pushArr = []
for j in range(0,midHorizontal):
pushArr.append(arr[i][j])
arr3.append(pushArr)
arr3 = quadTree(arr3, threshold)
arr4=[]
for i in range(midVertical,len(arr)):
pushArr = []
for j in range(midHorizontal,len(arr[i])):
pushArr.append(arr[i][j])
arr4.append(pushArr)
arr4 = quadTree(arr4, threshold)
merged=[]
for i in range (0,midVertical):
pushArr= []
pushArr.append(arr1[i])
pushArr.append(arr2[i])
newArr=np.array(pushArr).flatten()
merged.append(newArr.tolist())
for i in range (0,midVertical):
pushArr= []
pushArr.append(arr3[i])
pushArr.append(arr4[i])
newArr=np.array(pushArr).flatten()
merged.append(newArr.tolist())
return merged
data2=data
#change value to produce images with more or less variance.
#the higher the number the more compressed it will be.
data2=quadTree(data2,200)
data3=np.array(data2)
#for consistency it helps to also change this number to the same as the second argument in the quadTree function used above
cv2.imwrite('baboon200.pgm',data3) | elijahjackson42/Quad-Tree-PGM-Compression | main.py | main.py | py | 3,194 | python | en | code | 1 | github-code | 13 |
23725124846 | from converter.items import *
from .base_classes import LomBase
from .base_classes import JSONBase
import json
import logging
import requests
import html
from converter.constants import *
import scrapy
# Spider to fetch RSS from planet schule
class WirLernenOnlineSpider(scrapy.Spider, LomBase, JSONBase):
name = "wirlernenonline_spider"
friendlyName = "WirLernenOnline"
url = "https://wirlernenonline.de/"
version = "0.1.3"
apiUrl = "https://wirlernenonline.de/wp-json/wp/v2/%type/?per_page=50&page=%page"
keywords = {}
mappings = {
'conditionsOfAccess': {
'20': 'no_login',
'21': 'login_for_additional_features',
'22': 'login'
},
'price': {
'30': 'no',
'31': 'yes_for_additional',
'32': 'yes'
},
'accessibilitySummary': {
'60': 'a',
'61': 'none',
'62': 'invalid_value'
},
'dataProtectionConformity': {
'50': 'generalDataProtectionRegulation',
'51': 'noGeneralDataProtectionRegulation',
'52': 'invalid_value',
},
'oer' : {
'10': '0',
'11': '1',
'12': '2',
}
}
def __init__(self, **kwargs):
LomBase.__init__(self, **kwargs)
def mapResponse(self, response):
r = LomBase.mapResponse(self, response, fetchData=False)
r.replace_value("text", "")
r.replace_value("html", "")
r.replace_value("url", response.meta["item"].get("link"))
return r
def getId(self, response):
return response.meta["item"].get("id")
def getHash(self, response):
return response.meta["item"].get("modified") + self.version
def startRequest(self, type, page=1):
return scrapy.Request(
url=self.apiUrl.replace("%page", str(page)).replace("%type", type),
callback=self.parseRequest,
headers={"Accept": "application/json", "Content-Type": "application/json"},
meta={"page": page, "type": type},
)
def start_requests(self):
keywords = json.loads(
requests.get(
"https://wirlernenonline.de/wp-json/wp/v2/tags/?per_page=100"
).content.decode("UTF-8")
)
for keyword in keywords:
self.keywords[keyword["id"]] = keyword["name"]
yield self.startRequest("edusource")
yield self.startRequest("edutool")
def parseRequest(self, response):
results = json.loads(response.body)
if results:
for item in results:
copyResponse = response.copy()
copyResponse.meta["item"] = item
if self.hasChanged(copyResponse):
yield self.handleEntry(copyResponse)
yield self.startRequest(response.meta["type"], response.meta["page"] + 1)
def handleEntry(self, response):
return LomBase.parse(self, response)
def getType(self, response):
if response.meta["type"] == "edusource":
return Constants.NEW_LRT_MATERIAL
elif response.meta["type"] == "edutool":
return Constants.NEW_LRT_TOOL
return None
# thumbnail is always the same, do not use the one from rss
def getBase(self, response):
base = LomBase.getBase(self, response)
base.replace_value(
"thumbnail", self.get("acf.thumbnail.url", json=response.meta["item"])
)
fulltext = self.get("acf.long_text", json=response.meta["item"])
base.replace_value("fulltext", html.unescape(fulltext))
try:
notes = '\n'.join(list(map(lambda x: x['notes'], self.get('acf.notizen', json=response.meta["item"]))))
base.replace_value('notes', notes)
except:
pass
return base
def getLOMGeneral(self, response):
general = LomBase.getLOMGeneral(self, response)
general.replace_value(
"title",
html.unescape(
self.get("title.rendered", json=response.meta["item"])
),
)
keywords = self.get("tags", json=response.meta["item"])
if keywords:
keywords = list(map(lambda x: self.keywords[x], keywords))
general.add_value("keyword", keywords)
general.add_value(
"description",
html.unescape(
self.get("acf.short_text", json=response.meta["item"])
),
)
return general
def getLOMTechnical(self, response):
technical = LomBase.getLOMTechnical(self, response)
technical.replace_value("format", "text/html")
technical.replace_value(
"location", self.get("acf.url", json=response.meta["item"])
)
return technical
def getLicense(self, response):
license = LomBase.getLicense(self, response)
try:
licenseId = self.get("acf.licence", json=response.meta["item"])[0]["value"]
if licenseId == "10":
license.add_value("oer", OerType.ALL)
elif licenseId == "11":
license.add_value("oer", OerType.MIXED)
elif licenseId == "12":
license.add_value("oer", OerType.NONE)
except:
pass
return license
def getValuespaces(self, response):
valuespaces = LomBase.getValuespaces(self, response)
valuespaces.replace_value("new_lrt", self.getType(response))
discipline = list(
map(
lambda x: x["value"],
self.get("acf.fachgebiet", json=response.meta["item"]),
)
)
valuespaces.add_value("discipline", discipline)
lernresourcentyp = self.get("acf.lernresourcentyp", json=response.meta["item"])
if lernresourcentyp:
lernresourcentyp = list(map(lambda x: x["value"], lernresourcentyp))
valuespaces.add_value("sourceContentType", lernresourcentyp)
category = self.get("acf.category", json=response.meta["item"])
if category:
category = list(map(lambda x: x["value"], category))
valuespaces.add_value("toolCategory", category)
context = list(
map(
lambda x: x["value"],
self.get("acf.schulform", json=response.meta["item"]),
)
)
valuespaces.add_value("educationalContext", context)
role = list(
map(lambda x: x["value"], self.get("acf.role", json=response.meta["item"]))
)
valuespaces.add_value("intendedEndUserRole", role)
self.addValuespace(valuespaces, 'conditionsOfAccess', 'acf.nutzung', response)
valuespaces.add_value("containsAdvertisement", 'yes' if self.get('acf.advertisment', json=response.meta['item']) else 'no')
self.addValuespace(valuespaces, 'price', 'acf.costs', response)
self.addValuespace(valuespaces, 'accessibilitySummary', 'acf.accessibility', response)
self.addValuespace(valuespaces, 'dataProtectionConformity', 'acf.dsgvo', response)
self.addValuespace(valuespaces, 'oer', 'acf.licence', response)
return valuespaces
def addValuespace(self, valuespaces, key, key_wp, response):
try:
apiData = self.get(key_wp, json=response.meta['item'])
if not isinstance(apiData, list):
apiData = [apiData]
data = list(
map(lambda x: self.mappings[key][x['value']], apiData)
)
valuespaces.add_value(key, data)
except:
logging.info('Could not map ' + key_wp + ' to ' + key + ' for item ' + str(self.getId(response)))
pass
| openeduhub/oeh-search-etl | converter/spiders/wirlernenonline_spider.py | wirlernenonline_spider.py | py | 7,791 | python | en | code | 7 | github-code | 13 |
24853300184 | """
'Calliope'
A locally deployable Mistral7B based chatbot optimized to function as a
conversation partner and idea generator.
Adapted from the example provided at:
https://github.com/holoviz-topics/panel-chat-examples/blob/main/docs/examples/mistral/mistral_with_memory.py
To execute:
$ panel serve Calliope.py --show
"""
import panel as pn
from ctransformers import AutoConfig, AutoModelForCausalLM, Config
import pandas as pd
pn.extension(design="material")
# Initialize an empty DataFrame to store messages
messages_df = pd.DataFrame(columns=["User", "Message"])
SYSTEM_INSTRUCTIONS = "You are Calliope; your job is to aid user in generating and developing ideas, insights, plans, and creative works. Your tone should be intimate but not flirty or emotive. Above all, you should be an engaging conversation partner in whatever mode user decides. Do the following: Give short responses, always ask a question, ask user leading questions, discuss concepts adjacent to user's own statements, call back to unresolved lines of thought, and encourage lateral thinking. Do not do the following: summarize user's statement, use excessive exclamation points of emoji, regurgitate facts or instructional material unless explicitly prompted."
CHAT_USER="dude@abc.com"
def apply_template(history):
global messages_df
global CHAT_USER
# Append the user's message to the DataFrame
messages_df = messages_df._append({"User": CHAT_USER, "Message": history[-1].object}, ignore_index=True)
# Print the updated DataFrame
print(messages_df)
history = [message for message in history if message.user != "Calliope"]
prompt = ""
for i, message in enumerate(history):
if i == 0:
prompt += f"<s>[INST]{SYSTEM_INSTRUCTIONS} {message.object}[/INST]"
else:
if message.user == "Calliope":
prompt += f"{message.object}</s>"
else:
prompt += f"""[INST]{message.object}[/INST]"""
return prompt
async def callback(contents: str, user: str, instance: pn.chat.ChatInterface):
global messages_df
global CHAT_USER
if "mistral" not in llms:
instance.placeholder_text = "Downloading model; please wait..."
config = AutoConfig(
config=Config(
temperature=0.7, max_new_tokens=512, context_length=8184, top_p=0.8
),
)
llms["mistral"] = AutoModelForCausalLM.from_pretrained(
"TheBloke/Mistral-7B-Instruct-v0.1-GGUF",
model_file="mistral-7b-instruct-v0.1.Q4_K_M.gguf",
config=config,
)
llm = llms["mistral"]
history = [message for message in instance.objects]
prompt = apply_template(history)
response = llm(prompt, stream=True)
message = ""
for token in response:
message += token
yield message
llms = {}
chat_interface = pn.chat.ChatInterface(
callback=callback,
callback_user="Calliope",
)
chat_interface.send(
"Hello, I am Calliope. What are you thinking about today?", user="Calliope", respond=False
)
chat_interface.servable()
print("Starting Calliope...") | Marcus-Sarcina/exitbot | exitbot_2/Calliope.py | Calliope.py | py | 3,145 | python | en | code | 0 | github-code | 13 |
2463789859 | from math import prod
from itertools import combinations
def domain_name(url):
s = url.split('://')
if 'http' in s[0]:
ans = s[1].split('.')
else:
ans = s[0].split('.')
if ans[0] != 'www':
return ans[0]
else:
return ans[1]
def int32_to_ip(int32):
ip_1 = int32 // 256**3
ip_2 = int32 % 256**3 // 256**2
ip_3 = int32 % 256**3 % 256**2 // 256
ip_4 = int32 % 256**3 % 256**2 % 256
ipv4 = map(str, [ip_1, ip_2, ip_3, ip_4])
return '.'.join(ipv4)
def zeros(n):
x = n//5
return x+zeros(x) if x else 0
def bananas(s):
res = []
for c in combinations(range(len(s)), len(s)-6):
x = list(s)
for i in c:
x[i] = '-'
x = ''.join(x)
if x.replace('-', '') == 'banana':
res.append(x)
return set(res)
def count_find_num(primesL, limit):
p = prod(primesL)
if p > limit:
return []
else:
count = 0
minim = min(primesL) # минимальное из простых чисел, вдруг там не в порядке возрастания даны
while minim * p <= limit:
count += 1
p = minim * p
p = prod(primesL)
ans = [p]
maxim = p # максимальное значение, если нет чисел, удовлетворящих усовию кроме чисел из произведения списка
count_ans = 1
for i in range(count):
ans1 = []
for j in ans:
for k in primesL:
if k * j <= limit:
ans1.append(k * j)
ans = list(set(ans1))
count_ans += len(ans)
maxim = max(maxim, max(ans))
return [count_ans, maxim]
| Zheka-m-p/Python-Ylab | HomeWork1/HomeWork1.py | HomeWork1.py | py | 1,870 | python | ru | code | 0 | github-code | 13 |
35013153204 | from rpgpy import utils
from numpy.testing import assert_array_equal
import pytest
def test_rpg_seconds2date():
date = utils.rpg_seconds2date(0)
date_only = utils.rpg_seconds2date(0, date_only=True)
res = ['2001', '01', '01', '00', '00', '00']
assert_array_equal(date, res)
assert_array_equal(date_only, res[:3])
@pytest.mark.parametrize("input, result", [
(0, ['2001', '01', '01', '00', '00', '00']),
(24*60*60*10 + 1, ['2001', '01', '11', '00', '00', '01']),
(24*60*60 - 1, ['2001', '01', '01', '23', '59', '59']),
(625107602, ['2020', '10', '23', '01', '00', '02'])
])
def test_seconds2date(input, result):
assert utils.rpg_seconds2date(input) == result
| KarlJohnsonnn/rpgpy | tests/unit/test_utils.py | test_utils.py | py | 701 | python | en | code | null | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.