blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
463c0b005dd87e4d7bb757c3bc853de5bac1f441 | 26365e45a64f7809f62be4981c9f77ff8796047f | /tests/test_source.py | 2b3ce42d19be0e146ad7c5760943135307d2e6c7 | [] | no_license | gabrielelanaro/scripting | 42e79d77c9568e8b45144a7a02dbfe084cbb96b5 | 65c3a127611f15782e548189d1f29535e12da3bf | refs/heads/master | 2021-01-15T22:29:22.475259 | 2013-05-24T17:45:11 | 2013-05-24T17:45:11 | 1,071,502 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | from scripting import source
from os.path import join as jn
import os
DATADIR = 'tests/data'
def env_test():
source(jn(DATADIR, "testenv.sh"))
assert os.environ["TESTVAR"] == 'TESTVALUE'
| [
"gabriele.lanaro@gmail.com"
] | gabriele.lanaro@gmail.com |
fb3cd341af401e8d56a180fbc32a49b4403c813b | ce663c8e9d980367adf4fe7d3e8340bcd52cb441 | /Part-Two/app.py | 255486e4945f8955702898df88084706f78b4c52 | [] | no_license | hanyun2019/Cloud-Native-Python | 48afe58c32b5fb6cc25eb4b0550a087fc70240e2 | f360c61958f5cc9cbb4b1ad76ae57ff9c15b6065 | refs/heads/master | 2020-07-29T18:43:32.574123 | 2019-09-24T15:24:13 | 2019-09-24T15:24:13 | 209,921,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,969 | py | # Haowen Huang modified on Sep 23, 2019
# Implementation of Cloud Native web applications
# Part Two: Developing a web application with Python
#
# http://localhost:5000/
#
from flask import Flask, render_template, request, jsonify, redirect, session
from flask import abort
from flask_cors import CORS, cross_origin
from flask import make_response, url_for
import json
from time import gmtime, strftime
import sqlite3
app = Flask(__name__)
app.config.from_object(__name__)
app.secret_key = 'F12Zr47j\3yX R~X@H!jmM]Lwf/,?KT'
CORS(app)
def list_users():
conn = sqlite3.connect('mydb.db')
print ("Opened database successfully");
api_list=[]
cursor = conn.execute("SELECT username, full_name, email, password, id from users")
for row in cursor:
a_dict = {}
a_dict['username'] = row[0]
a_dict['name'] = row[1]
a_dict['email'] = row[2]
a_dict['password'] = row[3]
a_dict['id'] = row[4]
api_list.append(a_dict)
conn.close()
return jsonify({'user_list': api_list})
def list_user(user_id):
print (user_id)
conn = sqlite3.connect('mydb.db')
print ("Opened database successfully");
api_list=[]
cursor=conn.cursor()
cursor.execute("SELECT * from users where id=?",(user_id,))
data = cursor.fetchall()
print (data)
if len(data) == 0:
abort(404)
else:
user = {}
user['username'] = data[0][0]
user['name'] = data[0][1]
user['email'] = data[0][2]
user['password'] = data[0][3]
user['id'] = data[0][4]
conn.close()
return jsonify(user)
def list_tweet(user_id):
print (user_id)
conn = sqlite3.connect('mydb.db')
print ("Opened database successfully");
api_list=[]
cursor=conn.cursor()
cursor.execute("SELECT * from tweets where id=?",(user_id,))
data = cursor.fetchall()
print (data)
if len(data) == 0:
abort(404)
else:
user = {}
user['id'] = data[0][0]
user['username'] = data[0][1]
user['body'] = data[0][2]
user['tweet_time'] = data[0][3]
conn.close()
return jsonify(user)
def add_user(new_user):
conn = sqlite3.connect('mydb.db')
print ("Opened database successfully");
api_list=[]
cursor=conn.cursor()
cursor.execute("SELECT * from users where username=? or email=?",(new_user['username'],new_user['email']))
data = cursor.fetchall()
if len(data) != 0:
abort(409)
else:
cursor.execute("insert into users (username, email, password, full_name) values(?,?,?,?)",(new_user['username'],new_user['email'], new_user['password'], new_user['name']))
conn.commit()
return "Success"
conn.close()
return jsonify(a_dict)
def del_user(del_user):
conn = sqlite3.connect('mydb.db')
print ("Opened database successfully");
cursor=conn.cursor()
cursor.execute("SELECT * from users where username=? ",(del_user,))
data = cursor.fetchall()
print ("Data" ,data)
if len(data) == 0:
abort(404)
else:
cursor.execute("delete from users where username==?",(del_user,))
conn.commit()
return "Success"
def list_tweets():
conn = sqlite3.connect('mydb.db')
print ("Opened database successfully");
api_list=[]
cursor=conn.cursor()
cursor.execute("SELECT username, body, tweet_time, id from tweets")
data = cursor.fetchall()
print (data)
print (len(data))
if len(data) == 0:
return api_list
else:
for row in data:
tweets = {}
tweets['tweetedby'] = row[0]
tweets['body'] = row[1]
tweets['timestamp'] = row[2]
tweets['id'] = row[3]
print (tweets)
api_list.append(tweets)
conn.close()
print (api_list)
return jsonify({'tweets_list': api_list})
def add_tweet(new_tweets):
conn = sqlite3.connect('mydb.db')
print ("Opened database successfully");
cursor=conn.cursor()
cursor.execute("SELECT * from users where username=? ",(new_tweets['username'],))
data = cursor.fetchall()
if len(data) == 0:
abort(404)
else:
cursor.execute("INSERT into tweets (username, body, tweet_time) values(?,?,?)",(new_tweets['username'],new_tweets['body'], new_tweets['created_at']))
conn.commit()
return "Success"
def upd_user(user):
conn = sqlite3.connect('mydb.db')
print ("Opened database successfully");
cursor=conn.cursor()
cursor.execute("SELECT * from users where id=? ",(user['id'],))
data = cursor.fetchall()
print (data)
if len(data) == 0:
abort(404)
else:
key_list=user.keys()
for i in key_list:
if i != "id":
print (user, i)
# cursor.execute("UPDATE users set {0}=? where id=? ", (i, user[i], user['id']))
cursor.execute("""UPDATE users SET {0} = ? WHERE id = ?""".format(i), (user[i], user['id']))
conn.commit()
return "Success"
def sumSessionCounter():
try:
session['counter'] += 1
except KeyError:
session['counter'] = 1
@app.route('/')
def main():
sumSessionCounter()
return render_template('main.html')
@app.route('/addname')
def addname():
sumSessionCounter()
if request.args.get('yourname'):
session['name'] = request.args.get('yourname')
# And then redirect the user to the main page
return redirect(url_for('main'))
else:
# If no name has been sent, show the form
return render_template('addname.html', session=session)
@app.route('/clear')
def clearsession():
# Clear the session
session.clear()
# Redirect the user to the main page
return redirect(url_for('main'))
@app.route('/adduser')
def adduser():
return render_template('adduser.html')
@app.route('/addtweets')
def addtweetjs():
return render_template('addtweets.html')
@app.route("/api/v1/info")
def home_index():
conn = sqlite3.connect('mydb.db')
print ("Opened database successfully");
api_list=[]
cursor = conn.execute("SELECT buildtime, version, methods, links from apirelease")
for row in cursor:
api = {}
api['version'] = row[0]
api['buildtime'] = row[1]
api['methods'] = row[2]
api['links'] = row[3]
api_list.append(api)
conn.close()
return jsonify({'api_version': api_list}), 200
@app.route('/api/v1/users', methods=['GET'])
def get_users():
return list_users()
@app.route('/api/v1/users/<int:user_id>', methods=['GET'])
def get_user(user_id):
return list_user(user_id)
@app.route('/api/v1/users', methods=['POST'])
def create_user():
if not request.json or not 'username' in request.json or not 'email' in request.json or not 'password' in request.json:
abort(400)
user = {
'username': request.json['username'],
'email': request.json['email'],
'name': request.json.get('name',""),
'password': request.json['password']
}
return jsonify({'status': add_user(user)}), 201
@app.route('/api/v1/users', methods=['DELETE'])
def delete_user():
if not request.json or not 'username' in request.json:
abort(400)
user=request.json['username']
return jsonify({'status': del_user(user)}), 200
@app.route('/api/v1/users/<int:user_id>', methods=['PUT'])
def update_user(user_id):
user = {}
# if not request.json:
# abort(400)
user['id']=user_id
key_list = request.json.keys()
for i in key_list:
user[i] = request.json[i]
print (user)
return jsonify({'status': upd_user(user)}), 200
@app.route('/api/v2/tweets', methods=['GET'])
def get_tweets():
return list_tweets()
@app.route('/api/v2/tweets', methods=['POST'])
def add_tweets():
user_tweet = {}
if not request.json or not 'username' in request.json or not 'body' in request.json:
abort(400)
user_tweet['username'] = request.json['username']
user_tweet['body'] = request.json['body']
user_tweet['created_at']=strftime("%Y-%m-%dT%H:%M:%SZ", gmtime())
print (user_tweet)
return jsonify({'status': add_tweet(user_tweet)}), 201
@app.route('/api/v2/tweets/<int:id>', methods=['GET'])
def get_tweet(id):
return list_tweet(id)
# @auth.error_handler
# def unauthorized():
# return make_response(jsonify( { 'error': 'Unauthorized access' } ), 403)
# # return 403 instead of 401 to prevent browsers from displaying the default auth dialog
@app.errorhandler(404)
def resource_not_found(error):
return make_response(jsonify({'error': 'Resource not found!'}), 404)
@app.errorhandler(409)
def user_found(error):
return make_response(jsonify({'error': 'Conflict! Record exist'}), 409)
@app.errorhandler(400)
def invalid_request(error):
return make_response(jsonify({'error': 'Bad Request'}), 400)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
| [
"noreply@github.com"
] | noreply@github.com |
8a1806c6d7537366e06a476aee25366fc44b0ad2 | e43f701b20769d793fe9841e1c1b495f9c49b080 | /ex33_3.py | d7853d514e3bfe263b578f3b4e9f0ec7a5bdde78 | [] | no_license | Vovchik22/Python_test | fa0b8bcfc8d71203294523ed6fb5335e441d4b70 | 6a070880ab3a9efdedc1efc98ddb56c0a9fde700 | refs/heads/master | 2021-01-01T15:21:18.156922 | 2019-06-03T12:31:59 | 2019-06-03T12:31:59 | 97,599,076 | 0 | 0 | null | 2019-06-03T12:32:00 | 2017-07-18T13:05:29 | Python | UTF-8 | Python | false | false | 284 | py | def NewNumb(first):
i = 0
numbers = []
for i in range(0, 10):
print(f"Start from {i}")
numbers.append(i)
print("Numbers now", numbers)
print(f"Last number {i}")
return numbers
numbers = NewNumb(1)
for num in numbers:
print(num)
| [
"vvpodufalov@gmail.com"
] | vvpodufalov@gmail.com |
4926fdf03e813c8a831cd3f819995d1ec1bd0b20 | 1a707f6079e1ab5666e2bfec33642600db0cd1b8 | /class_attention/utils.py | 2294849eed42e6d06d2827bf8c0775e201d03c64 | [] | no_license | Guitaricet/class_attention | a862d6f4ea48a78534fc5155c1c50653280e7cb8 | 76e92f16d49b031fdba388d0dbe67340e6e35d70 | refs/heads/master | 2023-08-17T17:37:23.398281 | 2021-09-20T18:43:09 | 2021-09-20T18:43:09 | 332,091,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,871 | py | import re
import logging
import os
import random
import sys
from collections import Counter
import torch
import torch.utils.data
import numpy as np
import datasets
import tokenizers
import tokenizers.pre_tokenizers
import tokenizers.normalizers
from tokenizers.models import WordLevel
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger(os.path.basename(__file__))
def make_whitespace_tokenizer(texts, max_vocab_size=10_000, unk_token="[UNK]", pad_token="[PAD]"):
"""
Creates a simple tokenizer that splits a lowercased string into words via whitespace.
Args:
texts: a collection of texts to extract vocabulary from
max_vocab_size: maximum size of the vocabulary
unk_token: UNK token string representation, if None, UNK is not used
pad_token: PAD token string representation
Returns:
tokenizers.Tokenizer object
"""
pre_tokenizer = tokenizers.pre_tokenizers.Whitespace()
normalizer = tokenizers.normalizers.Lowercase()
tokenized_texts = [
[w for w, _ in pre_tokenizer.pre_tokenize_str(normalizer.normalize_str(t))] for t in texts
]
c = Counter()
for text in tokenized_texts:
c.update(text)
if unk_token is None:
token2id = {word: i + 1 for i, (word, count) in enumerate(c.most_common(max_vocab_size))}
token2id[pad_token] = 0
else:
token2id = {word: i + 2 for i, (word, count) in enumerate(c.most_common(max_vocab_size))}
token2id[pad_token] = 0
token2id[unk_token] = 1
tokenizer = tokenizers.Tokenizer(WordLevel(token2id, unk_token))
tokenizer.enable_padding(pad_token=pad_token, pad_id=0)
tokenizer.normalizer = normalizer
tokenizer.pre_tokenizer = pre_tokenizer
tokenizer.pad_token_id = 0
return tokenizer
def sample_dataset(dataset, p):
"""Samples a smaller version of a dataset.
Mainly used for debugging and testing.
Args:
dataset: datasets.arrow_dataset.Dataset object
p: float, 0 < p <= 1
Returns:
datasets.arrow_dataset.Dataset of size len(dataset) * p with random examples from the dataset
sampled without replacement
"""
if not 0 < p <= 1:
raise ValueError(p)
dataset_len = len(dataset)
sample_size = int(p * dataset_len)
ids = random.sample(range(len(dataset)), sample_size)
# indexing actually creates dict with elements of len(ids), not a list
sampled_dataset_dict = dataset[ids]
sampled_dataset = datasets.arrow_dataset.Dataset.from_dict(sampled_dataset_dict)
return sampled_dataset
def split_classes(dataset, class_field, p_test_classes=None, test_classes=None, verbose=False):
"""
Move classes to a class-test set (i.e. meta-test).
All dataset examples with these classes are removed from the original dataset
Args:
dataset: datasets.arrow_dataset.Dataset object
p_test_classes: 0 < float < 1
test_classes: alternative to p_test_classes, a list of classes to move to the class-test set,
capitalization is ignored
class_field: name of the class field in the dataset
verbose: log splitted classes info
Returns:
(train_set, class_test_set)
where both objects are ArrowDataset and all test classes are moved to class_test_set
"""
if class_field is None:
raise ValueError("class_field is required")
if not isinstance(dataset, datasets.arrow_dataset.Dataset):
raise ValueError(type(dataset))
if not ((p_test_classes is None) ^ (test_classes is None)):
raise ValueError(
"Only one of p_test_classes or test_classes should be specified. "
f"Got p_test_classes = {p_test_classes}\n"
f"test_classes = {test_classes}"
)
if p_test_classes == 0:
if test_classes is not None:
raise ValueError("test classes should not be specified if p_test_classes=0")
return dataset, None
if p_test_classes is not None:
all_classes = list(set(dataset[class_field]))
n_test_classes = int(len(all_classes) * p_test_classes)
if n_test_classes == 0:
raise ValueError(
f"p_test_classes={p_test_classes} is too small for the dataset with {len(all_classes)} classes."
)
test_classes = random.sample(all_classes, k=n_test_classes)
if verbose:
print(f"Moving the following classes to a class-test set: {test_classes}")
test_classes = {t.lower() for t in test_classes}
train_ids, test_ids = [], []
for i, c in enumerate(dataset[class_field]):
if c.lower() in test_classes:
test_ids.append(i)
else:
train_ids.append(i)
test_subset = dataset[test_ids]
train_subset = dataset[train_ids] # NOTE: dict of lists, not a list of dicts
assert set(test_classes) == set(c.lower() for c in test_subset[class_field])
test_dataset = datasets.arrow_dataset.Dataset.from_dict(test_subset)
train_dataset = datasets.arrow_dataset.Dataset.from_dict(train_subset)
return train_dataset, test_dataset
def monospace_html(text):
return f"""<code><pre>{text}</code></pre>"""
def get_dataset_by_name_or_path(name_or_path):
try:
dataset = datasets.load_from_disk(name_or_path)
except FileNotFoundError:
try:
dataset = datasets.load_dataset(name_or_path)
except FileNotFoundError:
raise ValueError(f"The dataset {name_or_path} wasn't found locally or downloaded")
return dataset
def infinite_iterator(iterable):
if isinstance(iterable, torch.utils.data.DataLoader):
if not isinstance(iterable.sampler, torch.utils.data.sampler.RandomSampler):
raise RuntimeError("this dataloader should use random sampling")
while True:
for x in iter(iterable):
yield x
def filter_words(words, extra_filter=None):
res = [w for w in words if (not w.isdigit() and len(w) > 2 and "'" not in w)]
if extra_filter is not None:
res = [w for w in res if extra_filter(w)]
return res
def infer_field_names(dataset_name, text_field=None, class_field=None):
if (text_field is None) ^ (class_field is None):
raise ValueError("--text-field and --class-field need to be provided together")
if text_field is not None:
return text_field, class_field
if "news-category" in dataset_name:
return "headline", "category"
if "emotion" in dataset_name or "topic" in dataset_name:
return "text", "category"
raise ValueError(f"Cannot infer field names from the dataset `{dataset_name}`")
def encode_classes(classes_str, label_tokenizer):
return label_tokenizer.batch_encode_plus(
classes_str,
return_tensors="pt",
add_special_tokens=True,
padding=True,
)["input_ids"]
def get_cced(model, train_classes_str, test_classes_str, label_tokenizer, device):
is_train = model.training
model.eval()
train_classes_h, test_classes_h = get_class_vectors(
model, train_classes_str, test_classes_str, label_tokenizer, device
)
if is_train:
model.train()
train_classes_h_center = torch.mean(train_classes_h, dim=0)
test_classes_h_center = torch.mean(test_classes_h, dim=0)
return torch.dist(train_classes_h_center, test_classes_h_center)
def get_rmasp(model, train_classes_str, test_classes_str, label_tokenizer, device="cpu"):
is_train = model.training
model.eval()
train_classes_h, test_classes_h = get_class_vectors(
model, train_classes_str, test_classes_str, label_tokenizer, device
)
if is_train:
model.train()
correlation_matrix = train_classes_h @ test_classes_h.T
return torch.sqrt(torch.mean(torch.abs(correlation_matrix)))
def get_class_vectors(model, train_classes_str, test_classes_str, label_tokenizer, device):
train_classes_ids = encode_classes(train_classes_str, label_tokenizer).to(device)
test_classes_ids = encode_classes(test_classes_str, label_tokenizer).to(device)
# 5 because it is not a special token and because it is small
fake_text_ids = torch.LongTensor([[5]]).to(device) # (batch=1, seq=1)
_, _, train_classes_h = model(
text_input=fake_text_ids, labels_input=train_classes_ids, return_embeddings=True
)
_, _, test_classes_h = model(
text_input=fake_text_ids, labels_input=test_classes_ids, return_embeddings=True
)
return train_classes_h, test_classes_h
# source: https://discuss.pytorch.org/t/how-to-get-the-row-index-of-specific-values-in-tensor/28036/7
def get_index(unique_tensors, instances):
assert unique_tensors.shape[1] == instances.shape[1]
diff = instances.unsqueeze(1) - unique_tensors.unsqueeze(0)
dsum = torch.abs(diff).sum(-1)
loc = torch.nonzero(dsum <= 1e-4) # -4 because of fp16
return loc[:, -1]
def get_difference(t1, t2):
"""Compute set difference t1 / t2"""
sim_matrix = t1.unsqueeze(1) == t2
sim_index = sim_matrix.all(-1).any(-1)
difference = t1[~sim_index]
return difference
def batch_to_html(text_ids, label_ids, targets, text_tokenizer, label_tokenizer):
aligned_labels = label_ids.index_select(0, targets)
x_text = text_tokenizer.batch_decode(text_ids, skip_special_tokens=True)
c_text = label_tokenizer.batch_decode(aligned_labels, skip_special_tokens=True)
x_text = "".join(f"\n\n\t{i}:" + t for i, t in enumerate(x_text))
c_text = "".join(f"\n\t{i}:" + t for i, t in enumerate(c_text))
batch_html = "<h1> Text batch </h1>\n" + monospace_html(x_text) + "<h1> Label batch </h1>" + monospace_html(c_text)
return batch_html
| [
"guitaricet@gmail.com"
] | guitaricet@gmail.com |
f11ac19addca9963ce504f52f1a6b9a5d4f7bb9e | 3e035bf4561508b64be7d5559ec643d421372b58 | /matthew-lavin/app/application/ml/__init__.py | b61c4b1c943c64d8dc2bc395d1d5d91780ea31a1 | [] | no_license | rebeccamlee/lavin_webserver | ac88e7b983eb9bd535ea6d30f3eb0370d1980e36 | ddd67f8ed53a2fa8973ef9a020702f5826d87831 | refs/heads/master | 2020-04-30T04:16:40.797483 | 2019-03-19T20:35:25 | 2019-03-19T20:35:25 | 176,607,498 | 0 | 0 | null | 2019-03-19T22:21:16 | 2019-03-19T22:21:15 | null | UTF-8 | Python | false | false | 588 | py | from flask import Blueprint, render_template
from application.models import *
ml_blueprint = Blueprint('ml', __name__, template_folder='templates')
@ml_blueprint.route("/teaching")
@ml_blueprint.route("/teaching/")
def teaching():
data = StaticPage.query.filter(StaticPage.route == "teaching").one_or_none()
return render_template('teaching.html', data=data)
@ml_blueprint.route("/projects")
@ml_blueprint.route("/projects/")
def projects():
data = StaticPage.query.filter(StaticPage.route == "projects").one_or_none()
return render_template('projects.html', data=data) | [
"lavin@pitt.edu"
] | lavin@pitt.edu |
a8ab2cbb290117a33ace9548c6ae50c5e155ace2 | e2181e9439866cbf1455fdff66e4e27879eb034e | /src_my/managescripts/reloadconfig.py | d348351348ed9760f844c5af067470af54035a0e | [] | no_license | lineCode/game_server-1 | 3fb841d3a4973d47e9c2692bbf9b1e440c499e5d | d9178ee8a0a504e5873bee3616d323056c9c9764 | refs/heads/master | 2020-05-19T08:01:02.835874 | 2019-05-03T11:10:57 | 2019-05-03T11:10:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,227 | py | #!/usr/bin/env python
#!-*- coding:utf-8 -*-
import os, re, sys
import subprocess
import getopt
import signal
logind = 0
scened = 0
gmd = 0
child_path = os.getcwd()
parent_path = os.path.dirname(child_path)
os.chdir(parent_path)
def relogconfig_help():
print "\n relogconfig使用说明"
print " %-20s说明选项" %"-h"
print " %-20s表示logind进程重新加载配置, 选项后面不需要加任何参数" %"--logind"
print " %-20s表示scened进程重新加载配置, 选项后面不需要加任何参数" %"--scened"
print " %-20s表示gmd进程重新加载配置, 选项后面不需要加任何参数" %"--gmd"
os._exit(1)
def read_file(file_name):
with open(file_name, 'r') as fn:
return fn.read()
def start():
if logind == 0 and scened == 0 and gmd == 0:
relogconfig_help()
if os.path.exists(parent_path + "/pids"):
os.chdir(parent_path + "/pids")
if logind == 0 and scened == 0 and gmd == 0:
relogconfig_help()
os._exit(1)
if logind == 1:
if os.path.exists("logind_pid"):
read_pid = read_file("logind_pid")
os.kill(int(read_pid), signal.SIGUSR1)
if scened == 1:
if os.path.exists("scened_pid"):
read_pid = read_file("scened_pid")
os.kill(int(read_pid), signal.SIGUSR1)
if gmd == 1:
if os.path.exists("gmd_pid"):
read_pid = read_file("gmd_pid")
os.kill(int(read_pid), signal.SIGUSR1)
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["logind", "scened", "gmd"])
for op, value in opts:
if op == "--logind":
logind = 1
elif op == "--scened":
scened = 1
elif op == "--gmd":
gmd = 1
except:
print "\n参数错误"
relogconfig_help()
start()
| [
"kilen@localhost.localdomain"
] | kilen@localhost.localdomain |
180d8b2c616d975e03c508cf26d2054234cf6aa2 | 7225f1f2942ed76038d5b0e2b7e1590b52468f1e | /wright-fischer.py | 4c5891de339abd2813f2e60c5451c254c81a7c50 | [] | no_license | badain/bioEvol | db8bab494099a9412f30f6e989a36c5e9b6a5f86 | 3a2dcf81729cbd9517b09a160d5c77a965beab42 | refs/heads/main | 2023-03-02T06:10:19.672407 | 2021-02-09T00:47:31 | 2021-02-09T00:47:31 | 313,290,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,789 | py | #############################################################
# Description: simula o modelo estocastico de Wright-Fischer
# para a transmissão de cópias gênicas
# Usage: dist_estocastica(K, N)
#
# Pre-Condition: K, N:integer
# Post-Condition: retorna um dicionario relacionando a
# o número de cópias para cada característica
#
# Author: Rafael Badain @ University of Sao Paulo
#############################################################
### Dependencies
import argparse
import random # designed for modelling and simulation, not security or cryptography
import array # takes less space since supports only one datatype
import math # ceil()
import collections
from collections import Counter
### Argument Parsing
parser = argparse.ArgumentParser(description='Plots a graphical representation of a stochastic distribution.')
parser.add_argument('n', type=int, metavar='N', help='number of individuals')
parser.add_argument('g', type=int, metavar='G', help='number of aditional generations')
parser.add_argument('p', type=int, metavar='P', help='number of generated populations')
parser.add_argument('s', type=int, metavar='S', help='number of simulations')
parser.add_argument('--v', '--verbose', default=False, action='store_true', help='prints every generation',)
args = parser.parse_args()
### Population Generation
double_n = 2*args.n
similar = {"true": 0, "false": 0}
for simulation in range(args.s):
popularity = [None] * args.p
for population in range(args.p):
## Generates initial population
generation_parent = array.array('I') # unsigned int
for i in range(double_n):
generation_parent.append(i) # gera array com 2n distintos alelos
if(args.v):print("Population: "+str(population+1)+" N: "+str(args.n)+"\n"+str(generation_parent))
for i in range(args.g):
## Generates Next Generation
generation_child = array.array('I') # unsigned int
popular_aleles = Counter()
# Sorteio dos Alelos
for i in range(double_n):
viable_alele = random.choice(range(double_n)) # seleciona um dos indicies aleatoriamente
generation_child.append(generation_parent[viable_alele]) # adiciona o alelo viavel na proxima geracao
popular_aleles[generation_parent[viable_alele]] += 1 # conta a ocorrencia dos alelos
generation_parent = generation_child # as geracoes sao discretas
if(args.v):print(generation_parent, popular_aleles)
if(len(popular_aleles) == 1): break
popularity[population] = popular_aleles
### Similarity Evaluation
# Medida de similaridade: top 2% eh igual
podium = math.ceil(0.02 * double_n)
if(args.v):print("Medida de Similaridade: "+str(podium))
# Extraindo os alelos mais populares
top_aleles = [None] * podium # alelos mais populares de uma populacao
population_top_aleles = [None] * args.p # lista com lista de alelos mais populares das populacoes
for i in range(args.p):
top_aleles = [None] * podium
for j in range(podium):
top_aleles[j] = popularity[i].most_common(podium)[j][0] # extrai os alelos x% mais populares
population_top_aleles[i] = top_aleles
if(args.v):print("Pop "+str(i)+": "+str(top_aleles))
# Comparando os alelos mais populares
if args.p > 1:
if collections.Counter(population_top_aleles[0]) == collections.Counter(population_top_aleles[1]):
similar["true"] += 1
if(args.v):print("True")
else:
similar["false"] += 1
if(args.v):print("False")
if args.p > 1:
print(similar)
print(similar["true"] / similar["false"])
| [
"rafabadain@gmail.com"
] | rafabadain@gmail.com |
db359bbb0cdf7ab15ded08b84fa9757efd26f9d2 | 8d6e6c91a1b7aa3cadc1b0549805f92f9121aa9e | /python/ray/util/sgd/v2/trainer.py | ec5a08bd07ddc02addf4ec8338d06d80b751f26b | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | DavidRother/ray | 9da5d75132960d9c73de75fc581c7dafce0123b3 | e53aeca6bb156f6a5621afdc85148d17c3820948 | refs/heads/master | 2023-07-05T17:24:25.579997 | 2021-08-12T14:53:42 | 2021-08-12T14:53:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,781 | py | import inspect
import logging
from typing import Union, Callable, List, TypeVar, Optional, Any, Dict
from ray.tune import Trainable
from ray.util.sgd.v2.backends.backend import BackendConfig, BackendExecutor, \
InactiveWorkerGroupError, SGDBackendError
from ray.util.sgd.v2.backends.tensorflow import TensorflowConfig
from ray.util.sgd.v2.backends.torch import TorchConfig
from ray.util.sgd.v2.callbacks.callback import SGDCallback
T = TypeVar("T")
S = TypeVar("S")
logger = logging.getLogger(__name__)
BACKEND_NAME_TO_CONFIG_CLS = {
"tensorflow": TensorflowConfig,
"torch": TorchConfig
}
class Trainer:
"""A class for enabling seamless distributed deep learning.
Args:
backend (Union[str, BackendConfig]): The backend used for
distributed communication. If configurations are needed,
a subclass of ``BackendConfig`` can be passed in.
Supported ``str`` values: {"torch"}.
num_workers (int): The number of workers (Ray actors) to launch.
Defaults to 1. Each worker will reserve 1 CPU by default.
use_gpu (bool): If True, training will be done on GPUs (1 per
worker). Defaults to False.
resources_per_worker (Optional[Dict]): If specified, the resources
defined in this Dict will be reserved for each worker.
"""
def __init__(self,
backend: Union[str, BackendConfig],
num_workers: int = 1,
use_gpu: bool = False,
resources_per_worker: Optional[Dict[str, float]] = None):
"""A class for distributed training.
Args:
backend (Union[str, BackendConfig]): The backend used for
distributed communication. If configurations are needed,
a subclass of ``BackendConfig`` can be passed in.
Supported ``str`` values: {"torch"}.
num_workers (int): The number of workers (Ray actors) to launch.
Defaults to 1. Each worker will reserve 1 CPU by default.
use_gpu (bool): If True, training will be done on GPUs (1 per
worker). Defaults to False.
resources_per_worker (Optional[Dict]): If specified, the resources
defined in this Dict will be reserved for each worker.
"""
# Setup executor.
backend_config = self._get_backend_config(backend)
if resources_per_worker:
raise NotImplementedError("`resources_per_worker` argument is not "
"supported yet.")
self._executor = BackendExecutor(backend_config, num_workers, 1,
int(use_gpu))
def _get_backend_config(
self, backend: Union[str, BackendConfig]) -> BackendConfig:
"""Gets the ``BackendConfig`` to use for training.
Args:
backend (Union[str, BackendConfig]): If a ``BackendConfig`` is
passed in, then it will also be returned. If a ``str`` is
passed in, then the default config for that backend will be
returned.
Returns:
The ``BackendConfig`` that will be used to set up the
``BackendExecutor``.
"""
if isinstance(backend, BackendConfig):
return backend
elif isinstance(backend, str):
try:
return BACKEND_NAME_TO_CONFIG_CLS[backend]()
except KeyError:
raise ValueError(f"Invalid backend: {backend}. "
f"Supported string values are: "
f"{BACKEND_NAME_TO_CONFIG_CLS.keys()}")
else:
raise TypeError(f"Invalid type for backend: {type(backend)}.")
def start(self,
initialization_hook: Optional[Callable[[], None]] = None,
train_cls: Optional[S] = None,
*args,
**kwargs):
"""Starts the training execution service.
Args:
initialization_hook (Optional[Callable]): The function to call on
each worker when it is instantiated.
train_cls (Optional[cls]): The training class that each worker
should be instantiated as.
args, kwargs: The arguments to pass into ``train_cls.__init__``.
"""
self._executor.start(initialization_hook)
def run(self,
train_func: Union[Callable[[], T], Callable[[Dict[str, Any]], T]],
config: Optional[Dict[str, Any]] = None,
callbacks: Optional[List[SGDCallback]] = None) -> List[T]:
"""Runs a training function in a distributed manner.
Args:
train_func (Callable): The training function to execute.
This can either take in no arguments or a ``config`` dict.
config (Optional[Dict]): Configurations to pass into
``train_func``. If None then an empty Dict will be created.
callbacks (Optional[List[SGDCallback]]): A list of Callbacks which
will be executed during training. If this is not set,
currently there are NO default Callbacks.
Returns:
A list of results from the training function. Each value in the
list corresponds to the output of the training function from
each worker.
"""
train_func = self._get_train_func(train_func, config)
# TODO(matt): Set default callbacks.
callbacks = [] if callbacks is None else callbacks
finished_with_errors = False
try:
for callback in callbacks:
callback.start_training()
self._executor.start_training(train_func)
while True:
intermediate_results = self._executor.fetch_next_result()
if intermediate_results is None:
break
else:
for callback in callbacks:
callback.handle_result(intermediate_results)
return self._executor.finish_training()
except InactiveWorkerGroupError:
finished_with_errors = True
raise RuntimeError(
"This Trainer is not active. It is either shutdown already or "
"never started in the first place. Either create a new "
"Trainer or start this one.") from None
except SGDBackendError:
finished_with_errors = True
raise RuntimeError("Training failed. You should not be seeing "
"this error and this is a bug. Please create "
"a new issue at "
"https://github.com/ray-project/ray.") from None
finally:
for callback in callbacks:
callback.finish_training(error=finished_with_errors)
def _get_train_func(
self,
train_func: Union[Callable[[], T], Callable[[Dict[str, Any]], T]],
config: Optional[Dict[str, Any]]) -> Callable[[], T]:
"""Validates and constructs the training function to execute.
Args:
train_func (Callable): The training function to execute.
This can either take in no arguments or a ``config`` dict.
config (Optional[Dict]): Configurations to pass into
``train_func``. If None then an empty Dict will be created.
Returns:
A valid training function.
Raises:
ValueError: if the input ``train_func`` is invalid.
"""
signature = inspect.signature(train_func)
num_params = len(signature.parameters)
if num_params > 1:
raise ValueError("train_func should take in a 0 or 1 arguments.")
elif num_params == 1:
config = {} if config is None else config
return lambda: train_func(config)
else: # num_params == 0
return train_func
def execute(self, func: Callable[..., T], *args, **kwargs) -> List[T]:
"""Executes a function for all instances of ``self.train_cls``.
Args:
func (Callable): The function that should be executed.
The first argument should be an instance of
``self.train_cls``.
args, kwargs: The arguments to pass into ``func``.
Returns:
A list of results from ``func``. Each value in the
list corresponds to the output of ``func`` from
each worker.
"""
raise NotImplementedError
def execute_single(self, func: Callable[..., T], *args, **kwargs) -> T:
"""Executes a function on a single instance of ``self.train_cls``.
Args:
func (Callable): The function that should be executed.
The first argument should be an instance of
``self.train_cls``.
args, kwargs: The arguments to pass into ``func``.
Returns:
The output of ``func`` from a single worker.
"""
raise NotImplementedError
def shutdown(self):
"""Shuts down the training execution service."""
self._executor.shutdown()
def to_tune_trainable(
self, train_func: Callable[[Dict[str, Any]], T]) -> Trainable:
"""Creates a Tune ``Trainable`` from the input training function.
Args:
func (Callable): The function that should be executed on each
training worker.
Returns:
A Trainable that can directly be passed into ``tune.run()``.
"""
def trainable_func(config: Dict[str, Any]) -> T:
pass
raise NotImplementedError
| [
"noreply@github.com"
] | noreply@github.com |
9eb17bced61c9637bdc0115f1027ff7430200168 | 1ea0dc0c5d5c0fa54907a31e637344081dd5e9a8 | /Daily_Stock_Prices.py | d0018a98d220f74d1a80586e5b64350e5f534c50 | [] | no_license | Row-Bell/Python_Basic | 7b77411542fc33868211c9b14ab220050ee25dcb | 929bb2d8dff0de00b43dfe66e697f9bd5fa42636 | refs/heads/main | 2023-02-19T02:19:47.078071 | 2021-01-17T03:18:17 | 2021-01-17T03:18:17 | 325,171,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,310 | py | '''import requests
import pandas as pd
import matplotlib.pyplot as plt
#Enter tge ticker of the companies that you want to analyse
companies = ['AAPL', 'FB', 'GOOG', 'F', 'TSLA']
#empty list to add each of the companies
listofdf = []
#API end point request
#requests.get(f"https://financialmodelingprep.com/api/v3/historical-price-full/aapl?serietype=line")
#For loop for AAPL
for item in companies:
histprices = requests.get(f"https://financialmodelingprep.com/api/v3/historical-price-full/{item}?serietype=line")
##convert response to json
histprices = histprices.json()
##Parse the API response and select only last 600 days of prices
#histprices = histprices["historical"][-600:]
##Convert from dict to pandas datafram
histpricesdf = pd.DataFrame.from_dict(histprices)
##rename column from close to the name of the company
histpricesdf = histpricesdf.rename({'close': item}, axis=1)
##append all dfs to list
listofdf.append(histpricesdf)
#Set index of each DataFrame by common column before concatinating them
dfs = [df.set_index('date') for df in listofdf]
histpriceconcat = pd.concat(dfs, axis=1)
#Plotting the stocks
for i, col in enumerate(histpriceconcat.columns):
histpriceconcat[col].plot()
plt.title('Price Evolution Comparison')
plt.xticks(rotation=70)
plt.legend(histpriceconcat.columns)
#Saving the graph into a JPG file
plt.savefig('fool.png', bbox_inches='tight')
# Import the yfinance. If you get module not found error the run !pip install yfinance from your Jupyter notebook
import yahoofinance as yf
# Get the data for the stock AAPL
data = yf.download('AAPL','2016-01-01','2019-08-01')
# Import the plotting library
import matplotlib.pyplot as plt
# Plot the close price of the AAPL
data['Adj Close'].plot()
plt.show()'''
#################################################
from pandas_datareader import data
import matplotlib.pyplot as plt
import pandas as pd
# Define the instruments to download. We would like to see Apple, Microsoft and the S&P500 index.
tickers = ['AAPL', 'MSFT', '^GSPC']
# We would like all available data from 01/01/2000 until 12/31/2016.
start_date = '2010-01-01'
end_date = '2016-12-31'
# User pandas_reader.data.DataReader to load the desired data. As simple as that.
panel_data = data.DataReader('INPX', 'google', start_date, end_date)
panel_data.to_frame().head(9)
# Getting just the adjusted closing prices. This will return a Pandas DataFrame
# The index in this DataFrame is the major index of the panel_data.
close = panel_data['Close']
# Getting all weekdays between 01/01/2000 and 12/31/2016
all_weekdays = pd.date_range(start=start_date, end=end_date, freq='B')
# How do we align the existing prices in adj_close with our new set of dates?
# All we need to do is reindex close using all_weekdays as the new index
close = close.reindex(all_weekdays)
# Reindexing will insert missing values (NaN) for the dates that were not present
# in the original set. To cope with this, we can fill the missing by replacing them
# with the latest available price for each instrument.
close = close.fillna(method='ffill')
print(all_weekdays)
# DatetimeIndex(['2010-01-01', '2010-01-04', '2010-01-05', '2010-01-06',
# '2010-01-07', '2010-01-08', '2010-01-11', '2010-01-12',
# '2010-01-13', '2010-01-14',
# ...
# '2016-12-19', '2016-12-20', '2016-12-21', '2016-12-22',
# '2016-12-23', '2016-12-26', '2016-12-27', '2016-12-28',
# '2016-12-29', '2016-12-30'],
# dtype='datetime64[ns]', length=1826, freq='B')
close.head(10)
close.describe()
# Get the MSFT timeseries. This now returns a Pandas Series object indexed by date.
msft = close.loc[:, 'MSFT']
# Calculate the 20 and 100 days moving averages of the closing prices
short_rolling_msft = msft.rolling(window=20).mean()
long_rolling_msft = msft.rolling(window=100).mean()
# Plot everything by leveraging the very powerful matplotlib package
fig, ax = plt.subplots(figsize=(16,9))
ax.plot(msft.index, msft, label='MSFT')
ax.plot(short_rolling_msft.index, short_rolling_msft, label='20 days rolling')
ax.plot(long_rolling_msft.index, long_rolling_msft, label='100 days rolling')
ax.set_xlabel('Date')
ax.set_ylabel('Adjusted closing price ($)')
ax.legend() | [
"robel.arega2@gmail.com"
] | robel.arega2@gmail.com |
7e7b0845eb0bd1cb2e431181c32e85ab8d805b9f | 6da32e0b0694faf8db43baf617be428a7a547009 | /antzodpars.py | 12b493892b064ff6401f886583ac9612b9fd4d78 | [] | no_license | hotgulabjamun/morinus-console | f603cbb691ebe7e31c9c744af6cd4aa70d8740a7 | b73106824b579a863691e858980882d5bc72c510 | refs/heads/master | 2023-04-12T16:43:28.938835 | 2021-05-08T19:03:34 | 2021-05-08T19:03:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | import astrology
import planets
import zodparsbase
class AntZodPars(zodparsbase.ZodParsBase):
"""Computes zodiacal parallels of the antiscia of the planets"""
def __init__(self, ant, cant, obl):
zodparsbase.ZodParsBase.__init__(self, obl)
self.ant = ant
self.cant = cant
self.apars = []
self.cpars = []
self.calc()
def calc(self):
NODES = 2
for p in range(planets.Planets.PLANETS_NUM-NODES):#Nodes are excluded
lon = self.ant[p].lon
lat = self.ant[p].lat
decl = self.ant[p].decl
onEcl = False
if p == astrology.SE_SUN or lat == 0.0:
onEcl = True
self.apars.append(self.getEclPoints(lon, decl, onEcl))
for p in range(planets.Planets.PLANETS_NUM-NODES):#Nodes are excluded
lon = self.cant[p].lon
lat = self.cant[p].lat
decl = self.cant[p].decl
onEcl = False
if p == astrology.SE_SUN or lat == 0.0:
onEcl = True
self.cpars.append(self.getEclPoints(lon, decl, onEcl))
| [
"pablo@citla.com"
] | pablo@citla.com |
7537b54bab44dc8f46b1c1c38c0d6b02d131616e | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /public_hand_or_big_woman.py | a6465a3098d82b969e3ffb571a87aeeb368e3bf7 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py |
#! /usr/bin/env python
def be_next_work(str_arg):
week(str_arg)
print('woman_or_time')
def week(str_arg):
print(str_arg)
if __name__ == '__main__':
be_next_work('year')
| [
"jingkaitang@gmail.com"
] | jingkaitang@gmail.com |
e94468310cfc2f6d71923509ae21a08b9fac3ab2 | a1b2b0f7a91182577705b11dad7e5029d2a2ec81 | /manage.py | cef83f2950c038a518398d78b88688fc3a93ce3b | [] | no_license | Artur-Sukhytskyi/Django-API-Development | cb50129e97e8d386343c69027009984da10fdc5e | ad9c0e81d81703f606991787603a9513ef20ad12 | refs/heads/master | 2023-08-13T18:40:51.740771 | 2020-11-25T11:00:14 | 2020-11-25T11:00:14 | 267,881,206 | 0 | 0 | null | 2021-09-22T19:07:09 | 2020-05-29T14:41:11 | Python | UTF-8 | Python | false | false | 639 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blog.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | noreply@github.com |
d20616418c9031d8b31aa97301c3f90a2b3c7b2d | 08a498fe90e43ab0647c472f195c654e8f2728a3 | /parsec/node.py | e09ba663d593976a530d967b3047cbc915a08e1e | [] | no_license | ftkalcevic/flat_file_sql | 91637d7238df0f842ad1dd55e346c1fa4a8c7608 | 23b93d8c30c9475eafbc6dedea6d74ca546d7ed3 | refs/heads/master | 2023-04-20T18:23:19.385207 | 2021-05-09T20:50:12 | 2021-05-09T20:50:12 | 356,706,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | # From https://github.com/mastermay/sql-parser/tree/master/py-sql-parser
class node:
def __init__(self, data):
self._data = data
self._children = []
def getdata(self):
return self._data
def getchildren(self):
return self._children
def add(self, node):
self._children.append(node)
def find(self,name):
if self._data == name:
return self
else:
for c in self._children:
match = c.find(name)
if match != None:
return match
return None
def print_node(self, prefix):
print( ' '*prefix,'+',self._data)
for child in self._children:
child.print_node(prefix+1)
| [
"frank@franksworkshop.com.au"
] | frank@franksworkshop.com.au |
8ebba19af8f773f17fb07d8413e3497452552cd3 | 6712116b4827f9fe6224b0564865f96817d57391 | /fluxi/muxe.py | 875115f2d6301ba5468af1170a7a04db5484a171 | [] | no_license | JayFF/lab_fluxi | fb49912c1f38ab413065a3ee18979ea88827e147 | b4ecff2bf222e33da8510e95b56392fca71217ad | refs/heads/master | 2020-03-28T20:59:05.818941 | 2018-09-17T14:35:52 | 2018-09-17T14:35:52 | 149,119,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,260 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 30 18:16:01 2014
"Muxe": "Magic User eXperience Elements"
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import str
try:
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt
except ImportError:
from PyQt5 import QtGui, QtCore
from PyQt5.QtCore import Qt
import numpy as np
import time
import pyqtgraph as pg
from fluxi.muxe_base import MuxBase
class MuxBaseParam(MuxBase):
def __init__(self,id,fluxi):
super().__init__(id,fluxi)
from fluxi.ptree import mappedNames#,getChildGroup,ParamGroup
parent=self.getfluxi().g("tree:Parameters").mainwidget.invisibleRootItem()
if len(self.pathstr)>0:
parent=fluxi["group:%s"%self.pathstr].p.treeitem
elif self.type!="group":
parent=fluxi["group:General"].p.treeitem
self.p=mappedNames[self.type].__call__(parent,self.name)#,digits=digits)
self.p.sigValueChanged.connect(self._ui_element_changed)
self.p.sigContextMenu.connect(self._ui_context_menu_opened)
if self.type!="l":
self.set(self.p.getValue())#TODO:
self._hasbeenset=False
self.contextMenu = QtGui.QMenu()
ac=self.contextMenu.addAction("Remove Parameter")
ac.triggered.connect(self._remove_action)
ac=self.contextMenu.addAction("Properties")
ac.triggered.connect(self.display_dialog)
if self.type=="a":
self.options.update({
"Save/Save Value":{"type":"bool","value":False},
})
if self.type=="group":
self.options.update({"Expanded":{"type":"bool","value":True}})
def _ui_context_menu_opened(self,ev):
self.contextMenu.popup(ev.globalPos())
def _ui_element_changed(self,param,value,noaction=False):
self.value=value
# if self.type=="l":
# print("_ui_element_changed",self.value)
if self.type=="a":
self.value=True
if not noaction:
self.emitChanged()
def _remove_action(self,*args):
self.remove()
def get(self):
#move this in a derived class later
if self.type=="a" and self.value==True:#Latch button behaviour
self.value=False
return True
return self.value
def set(self, value):
if self.type=="f":
self.value=float(value)
elif self.type=="s":
self.value=str(value)
elif self.type=="i":
self.value=int(value)
elif self.type=="l":
# print ("val",self.type,value)
self.value=str(value)
elif self.type=="b":
if not isinstance(value, int):#bool is subclass of int
raise ValueError("This needs to be an boolean (or an integer)")
self.value=value
else:
# print ("valx",self.type,value)
self.value=value
self._hasbeenset=True
self._requestRedraw()
return self
def draw(self):
self.mutex.lock()
# if self.id:
# print (self.id,self.name,repr(self.value))
# if self.type=="l":
# print("draw_setvalue",self.value)
self.p.setValue(self.value)
self.mutex.unlock()
def setValues(self,values):
self.values=[str(v) for v in values]
self.requestDrawAction(self.draw_setvalues)
if not hasattr(self,"value"):
self.value=self.values[0]
self.set(self.value)
return self
def draw_setvalues(self):
# print("draw_setvalues",self.values)
self.p.setOptions(self.values)
def delete(self):
if hasattr(self,"p"):
self.p.remove()
del self.p
def addSelectionHandler(self):
self.p.itemActivated.connect(self.show_properties)
def get_opt_Expanded(self):
return self.p.expanded
def set_opt_Expanded(self,value):
#print("set_opt_Expanded",value)
self.p.expanded=value
self.requestDrawAction(self.draw_expanded)
def draw_expanded(self):
#print("draw_expanded",self.p.expanded)
self.p.treeitem.setExpanded(self.p.expanded)
class MuxDocked(MuxBase):
def __init__(self, *args, **kwargs):
super().__init__(*args,**kwargs)
def createDock(self, widget,**kwargs):
#super().__init__(name,fluxi)
self.dock=self.getfluxi()._createDockIfNA(self.name,**kwargs)
self.dock.addWidget(widget)
def delete(self):
#print("deleting dock et al")
#layout.removeWidget(self.widget_name)
if hasattr(self,"dock") and self.dock:
self.dock.deleteLater()
del self.dock
if hasattr(self,"mainwidget") and self.mainwidget:
self.mainwidget.deleteLater()
del self.mainwidget
if hasattr(self,"plot") and self.plot:
self.plot.deleteLater()
del self.plot
class MuxC(MuxDocked):
""" A running waveform chart like in Labview"""
def __init__(self,name,fluxi,value=None,length=200,trim=15,**kwargs):
super().__init__(name,fluxi)
self.plot = pg.PlotWidget()
self.createDock(self.plot,**kwargs)
self.curves={}
self.trim=trim
self.trim=False
self.pad=50 # padding in %
self.setLength(length)
self.name=name
self.mainwidget=self.plot
self.type="Chart"
self.addSelectionHandler()
self.styles=[{"pen":pg.mkPen((0,0,0), width=1)},{"pen":(27,64,94)},{"pen":(94,2,2)},{"pen":(28,94,55)},{"pen":(85,23,71)}]
self.arrpos=0
self.roll=True
self.options.update({
"Save/Save Value":{"type":"bool","value":False},
})
# def addCurve():
# self.curves[]
def setLength(self,length):
if int(length)<=0:
raise ValueError("A length smaller than 1 was specified")
self.length=int(length)
self.arrs=np.full((1,self.length),np.nan)
def fill(self,curvenum1,curvenum2,brush=(50,0,0,50)):
for i in range(max(curvenum1,curvenum2)):
try:
self.curves[i]
except:
self.curves[i]=self.plot.plot(**self.styles[min(i,len(self.styles)-1)])
fill=pg.FillBetweenItem(self.curves[curvenum1], self.curves[curvenum2], brush)
self.plot.addItem(fill)
def add(self,values):
try:
L=len(values)
except:
values=[values]
L=1
if L>20:
raise ValueError("More than 20 Lines")
values=np.array(values)
#if more or less values than curves
s=self.arrs.shape
if s[0]<L:
self.arrs=np.append(self.arrs, np.full([L-s[0], s[1]],np.nan),axis=0)
if s[0]>L:
values=np.append(values, np.full((s[0]-L,),np.nan))
#if length to show was changed
s=self.arrs.shape
if s[1]!=self.length:
if s[1]<self.length:
self.arrs=np.append(np.full([s[0], self.length-s[1]],np.nan),self.arrs,axis=1)
elif s[1]>self.length:
self.arrs=self.arrs[:,-self.length:]
self.arrs[:len(values),self.arrpos]=values#TODO: move also the values that have not been set
self.arrpos=(self.arrpos+1)%self.length
self._requestRedraw()
@property
def v(self):
return np.roll(self.arrs, -self.arrpos,axis=1)
@v.setter
def v(self, value):
self.arrs=np.array(value)
self.arrpos=0
self._requestRedraw()
def draw(self):
if self.roll:
arrs=self.v
else:
arrs=self.arrs
for i in range(self.arrs.shape[0]):
try:
self.curves[i]
except:
self.curves[i]=self.plot.plot(**self.styles[min(i,len(self.styles)-1)])
self.curves[i].setData(arrs[i])
# if self.trim:
# x=np.array(self.arrs[0])
# minv=np.percentile(x, self.trim)
# maxv=np.percentile(x, 100-self.trim)
# pad=abs(minv-maxv)*self.pad/100
# self.curves[0].parentItem().parentItem().setYRange(minv-pad, maxv+pad)
def mean(self):
return np.nanmean(self.arrs,1)
def draw_clear(self):
self.plot.clear()
self._requestRedraw()
def clear(self):
self.setLength(self.length)
self.curves={}
self.requestDrawAction(self.draw_clear)
return self
class MuxG(MuxDocked):
""" A graph"""
def __init__(self,name,fluxi,trim=15,**kwargs):
super().__init__(name,fluxi)
self.plot = pg.PlotWidget()
self.createDock(self.plot,**kwargs)
self.dock.addWidget(self.plot)
self.curves={}
self.trim=trim
self.trim=False
self.pad=50 # padding in %
self.name=name
self.mainwidget=self.plot
self.plot.addLegend()
self.type="Graph"
self.styles=[{"pen":(0,0,0)},{"pen":(152,214,160)},{"pen":(94,2,2)},{"pen":(28,94,55)}]
self.datas={}
#self.curve=self.plot.plot(name=curveName)
#viewbox.setMouseMode(viewbox.RectMode)
# def mouseClickEvent(self, ev):
# if ev.button() == Qt.RightButton:
# self.autoRange()
self.options.update({
"Save/Save Value":{"type":"bool","value":False},
})
def setMult(self,xs,ys=None):
if ys is None:
ys=xs
xs=[None]*len(ys)
for i in range(len(ys)):
self.set(xs[i],ys[i],i=i)
def set(self,xs,ys=None,i=0,dots=False):
kwargs=self.styles[min(i,len(self.styles)-1)]
if dots:
kwargs={"pen":None, "symbolBrush":(255,0,0), "symbolPen":'w'}
if ys is None and xs is None:
return
if ys is None:
ys=xs
xs=np.arange(len(ys))
if xs is None and not ys is None:
xs=np.arange(len(ys))
#self.redraw_list[i]=True
self.datas[i]={"xs":xs,"ys":ys,"kwargs":kwargs}
self._requestRedraw()
@property
def v(self):
return self.datas
@v.setter
def v(self, value):
self.datas=value
self._requestRedraw()
def draw(self):
for n in self.datas:
try:
self.curves[n]
except:
self.curves[n]=self.plot.plot()
v=self.datas[n]
self.curves[n].setData(v["xs"],v["ys"],**v["kwargs"])
def draw_clear(self):
self.curves={}
self.plot.clear()
def clear(self):
self.datas={}
self.requestDrawAction(self.draw_clear)
return self
class MuxDump(MuxBase):
""" Dump some variables and they can be saved an displayed etc """
def __init__(self,name,fluxi,value=None):
self.name=name
self.value=value
@property
def v(self):
return self.value
@v.setter
def v(self, value):
self.value=value
#%%
class MuxImg(MuxDocked):
""" An Image"""
def __init__(self,name,fluxi,trim=15,value=None,**kwargs):
super().__init__(name,fluxi)
self.view=pg.PlotItem()
self.plot = pg.ImageView(view=self.view)
self.createDock(self.plot,**kwargs)
self.mainwidget=self.plot
self.type="Image"
self.pos=None
self.scale=None
self.data=np.array([[]])
self._lastdraw=0
self.maxredraw_rate=10.
self.autoLevels,self.autoHistogramRange,self.autoRange=False,False,False
self.autoAll=True
self.options.update({
"Save/Save Value":{"type":"bool","value":False},
})
def setTitles(self):
#TODO
# plt = pg.PlotItem(labels={'bottom': ('x axis title', 'm'), 'left': ('y axis title', 'm')})
# view = pg.ImageView(view=plt)
# plt.setAspectLocked(False)
pass
def setImage(self,data, pos=None,scale=None,autoRange=None):
if autoRange is not None:
self.autoRange=autoRange
if pos is not None:
self.pos=pos
if scale is not None:
self.scale=scale
if not isinstance(data, np.ndarray):
raise TypeError("Must be a numpy array")
self.data=data
self._requestRedraw()
@property
def v(self):
return self.data
@v.setter
def v(self, value):
self.setImage(value)
def draw(self):
#limit the redraws per seocond. Otherwise pyqtgraph will hang
t=time.time()
if t-self._lastdraw<1./self.maxredraw_rate:
self._requestRedraw()
return
self._lastdraw=t
scale=self.scale
if not self.scale:
scale=[1,1]
if isinstance(self.data, np.ndarray):
data=self.data
else:
return
if self.autoAll:
self.plot.setImage(data,autoRange=True,pos=self.pos, scale=scale,autoLevels=True,autoHistogramRange=True)
self.autoAll=False
else:
self.plot.setImage(data,autoRange=False,pos=self.pos, scale=scale,autoLevels=self.autoLevels,autoHistogramRange=self.autoHistogramRange)
if self.autoRange:
self.plot.getView().autoRange(padding=0)
def add_region_item(self,minitem,maxitem,horizontal=False):
if hasattr(self,"ri"):
self.plot.removeItem(self.ri)
if horizontal:
orientation=pg.LinearRegionItem.Horizontal
else:
orientation=None
self.ri=pg.LinearRegionItem(orientation=orientation)
self.plot.addItem(self.ri)
self.ri.setRegion([minitem.v,maxitem.v])
def regionChanged(*args):
minitem.v,maxitem.v=self.ri.getRegion()
minitem.emitChanged()
maxitem.emitChanged()
self.ri.sigRegionChanged.connect(regionChanged)
def add_crosshair(self, name=None, pos=None):
if name is None:
name="Crosshair"
try:
self.ch.remove()
except:
pass
self.ch=self.getfluxi().g("crosshair:%s@%s"%(name,self.name))
self.ch.attach(self)
if self.pos is not None:
self.ch.v=pos
#%%
#from pyqtgraph.widgets.MatplotlibWidget import MatplotlibWidget
##%%
#class MuxMPL(MuxDocked):
# """ A Matplotlib plot"""
# type="Matplotlib"
# def __init__(self,name,fluxi,trim=15,value=None,**kwargs):
# super().__init__(name,fluxi)
# self.widget = MatplotlibWidget()
# self.createDock(self.widget,**kwargs)
#
# def getFig(self):
# return self.widget.getFigure()
## subplot = mw..add_subplot(111)
## subplot.plot(x,y)
## mw.draw()
#%%
class MuxTable(MuxDocked):
""" An Image"""
def __init__(self,name,fluxi,value=None,**kwargs):
super().__init__(name,fluxi)
self.t=self.mainwidget=MyTableWidget(editable=True,sortable=True)
self.createDock(self.t,**kwargs)
self.type="table"
self.options.update({
"Save/Save Value":{"type":"bool","value":False},
})
#self.t.setFormat("%f.5")
def get_selected_row(self):
return self.get_row(self.t.currentRow())
def get_row(self,num):
return [str(self.t.item(num,i).text()) for i in range(self.t.columnCount())]
def add(self, row):
a=self.v
a.append(row)
self.v=a
@property
def v(self):
self._is_in_main_thread()
return [self.get_row(rn) for rn in range(self.t.rowCount())]
@v.setter
def v(self, value):
self._is_in_main_thread()
self.t.setData(value)
class MyTableWidget(pg.TableWidget):
def contextMenuEvent(self, event):
self.menu = QtGui.QMenu(self)
ad = QtGui.QAction('Delete Selected Rows', self)
ad.triggered.connect(self.removeSelectedRows)
self.menu.addAction(ad)
# add other required actions
self.menu.popup(QtGui.QCursor.pos())
def removeSelectedRows(self):
rows = self.selectionModel().selectedRows()
#log("remove")
#log(rows)
for r in rows:
self.removeRow(r.row())
#mt=MuxTable("woot!",value=[[1,2,3],[1,2],[1,2],[1,2]])
class MuxList(MuxDocked):
""" A (collapsable) list """
def __init__(self,name,fluxi,**kwargs):
super().__init__(name,fluxi)
mw=self.mainwidget = QtGui.QTreeWidget()
self.createDock(self.mainwidget)
self.name=name
self.type="List"
self.root=self.mainwidget.invisibleRootItem()
self.mainwidget.itemChanged.connect (self._handleChanged)
self.mainwidget.itemSelectionChanged.connect (self._handleSelectionChanged)
mw.header().setStretchLastSection(False)
if "headers" in kwargs:
self.setHeaders(kwargs["headers"],[None,20,40],["stretch","content","content"])
#self.treeWidget.setHeaderHidden(False)
mw.setSortingEnabled(True)
mw.setDragDropMode(QtGui.QAbstractItemView.InternalMove)
mw.setSelectionMode(4)#ContiguousSelection
#TODO: set properties of columns
def setHeaders(self,labels=None,sizes=[],resizing=[]):
self._is_in_main_thread()
mw=self.mainwidget
if labels:
mw.setColumnCount(len(labels))
mw.setHeaderLabels(labels)
for i, s in enumerate(sizes):
if s!=None:
mw.header().resizeSection(1, s)
qhv=QtGui.QHeaderView
resizers={"interactive":qhv.Interactive,"stretch":qhv.Stretch,"content":qhv.ResizeToContents,"fixed":qhv.Fixed}
for i, s in enumerate(resizing):
if not s:
s="interactive"
mw.header().setResizeMode(i, resizers[str(s)])
return self
def addParent(self, data=None,parent=None,checked=None,widgets={}):
if not parent:
parent=self.root
item=self.add(data=data,parent=parent,checked=checked,widgets=widgets)
item.setFlags(item.flags() | Qt.ItemIsDropEnabled)
self._lastparent=item
return item
def addChild(self, data=None,parent=None,checked=None,widgets={}):
if not parent:
parent=self._lastparent
item=self.add(data=data,parent=parent,checked=checked,widgets=widgets)
item.setFlags(item.flags() ^ Qt.ItemIsDropEnabled)
return item
def add(self, data=None,parent=None,checked=None,widgets={}):
self._is_in_main_thread()
item = QtGui.QTreeWidgetItem(parent)
item.setExpanded(True)
# print(data)
for i, d in enumerate(data):
item.setData(i, Qt.DisplayRole, d)#EditRole
item.setChildIndicatorPolicy(QtGui.QTreeWidgetItem.DontShowIndicatorWhenChildless)
item.setFlags(item.flags() | Qt.ItemIsEditable | Qt.ItemIsDragEnabled)
# print("checked",checked)
if checked!=None:
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
if checked:
item.setCheckState(0, Qt.Checked)
else:
item.setCheckState(0, Qt.Unchecked)
else:
#print ("none")
item.setFlags(item.flags() ^ Qt.ItemIsUserCheckable)
for pos in widgets:
self.mainwidget.setItemWidget(item, pos, widgets[pos])
return item
@property
def v(self):
return self.getDict()#_listChildren()[u"children"]
@v.setter
def v(self, value):
#TODO
return self
def _listChildren(self,parent=None):
if not parent:
parent=self.root
children=[]
for i in range(parent.childCount()):
c=parent.child(i)
children.append(self._listChildren(c))
#.data(columne,role)
data=[parent.data(i,0) for i in range(parent.columnCount())]
d={'data':data}
if parent.flags() & Qt.ItemIsDropEnabled:
d["isParent"]=True
if parent.flags() & Qt.ItemIsUserCheckable:
d["checked"]=(parent.checkState(0)==Qt.Checked)
if len(children)>0:
d["children"]=children
if parent.isExpanded():
d["expanded"]=parent.isExpanded()
return d
def getSublist(self,name,parent=None):
child=self.getByName(name,parent)
return self._listChildren(child)
def getByName(self,name,parent=None):
#TODO:name.split("/")
if not parent:
parent=self.mainwidget.invisibleRootItem()
child=None
for i in range(parent.childCount()):
child=parent.child(i)
#print(child.data(0,0),name,str(child.data(0,0))==str(name))
if str(child.data(0,0))==str(name):
return child
raise NameError("Element not found")
def getDict(self):
return self._listChildren()
def setDict(self,dictionary,item=None):
d=dictionary
if not item:
nitem=self.root
else:
checked=None
if "checked" in d:
checked=d["checked"]
if "isParent" in d and d["isParent"]:
nitem=self.addParent(d["data"],item,checked)
else:
nitem=self.addChild(d["data"],item,checked)
if "expanded" in d:
nitem.setExpanded(d["expanded"])
else:
nitem.setExpanded(False)
if "children" in d:
for n in d["children"]:
self.setDict(n,nitem)
return self
def clear(self,parent=None):
if not parent:
parent=self.root
while parent.childCount()>0:
parent.removeChild(parent.child(0))
return self
def removeItem(self,item):
parent=item.parent()
if parent is None:
parent=self.root
parent.removeChild(item)
return self
# def getSublist(self,name):
# parent=self.mainwidget.invisibleRootItem()
# child=None
# for i in range(parent.childCount()):
# child=parent.child(i)
# print(child.data(0,0),name,str(child.data(0,0))==str(name))
# if str(child.data(0,0))==str(name):
# return [child.data(i,0) for i in range(child.columnCount())]
# return None
def _handleChanged(self, item, column):
self.emitChanged()
def _handleSelectionChanged(self):
self._selectedItems=self.mainwidget.selectedItems()
self.emitChanged()
# if item.checkState(column) == Qt.Checked:
# print "checked", item, item.text(column)
# if item.checkState(column) == Qt.Unchecked:
# print "unchecked", item, item.text(column)
def get_selected_items(self):
return self._selectedItems
def get_selected_item_data(self):
#todo: add to fluxi
item=self.get_selected_items()[0]
data=[item.data(i,0) for i in range(item.columnCount())]
return data
def remove_selected_items(self):
items=self.mainwidget.selectedItems()
#print(items)
for i in items:
self.removeItem(i)
| [
"tv@in-hd.de"
] | tv@in-hd.de |
2ce9104b0758d48a3de995d90d05fa6c079a87b3 | 1d7a468a1554d17f8d2d061c67b7f12d8c475dd7 | /example_implementation/NimRL-master/4Heaps/NimTraining.py | 5b298fa0cf2553935751680a311e9cd7d8e9ef02 | [] | no_license | bramtoula/RL_Nim | 842869c05ef736f80b018335a48c13e29be6ea47 | 2779a09eef6fcfde9275ae637869e7d23a6c9338 | refs/heads/master | 2021-03-24T13:07:59.342576 | 2017-05-19T13:13:48 | 2017-05-19T13:13:48 | 86,991,082 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,677 | py | import numpy as np
import random as rnd
from SA import SA
from AgentQ import Agent
from AgentSARSA import AgentSARSA
import matplotlib.pyplot as plt
import pickle
###### Training Agent vs Computer #######
def play1(board,end,Agent):
"""Agent vs Smart"""
Agent.move(board)
if board == end:
## print(s,a)
Agent.winUpdate(Agent.state,Agent.action,1)
return False
smartMove(board)
if board == end:
Agent.loseUpdate(-1)
return False
s = Agent.state
a = Agent.action
sp = Agent.readBoard(board)
Agent.update(s,a,sp,0)
def play2(board, end , Agent,c):
""" Smart vs Agent
Computer first """
smartMove(board)
if board == end:
Agent.loseUpdate(-1)
return False
if c != 0:
s = Agent.state
a = Agent.action
sp = Agent.readBoard(board)
Agent.update(s,a,sp,0)
Agent.move(board)
if board == end:
Agent.winUpdate(Agent.state,Agent.action,1)
return False
######### Policy #############
def policyPlay1(board, end, Agent):
""" Agent vs Smart"""
before = board[0]^board[1]^board[2]^board[3]
if before != 0:
Agent.moves += 1
Agent.policyMove(board)
after = board[0]^board[1]^board[2]^board[3]
if after == 0:
Agent.t += 1
if board == end:
Agent.won += 1
return False
smartMove(board)
if board == end:
return False
def policyPlay2(board, end, Agent):
""" Smart vs Agent """
smartMove(board)
if board == end:
return False
before = board[0]^board[1]^board[2]^board[3]
if before != 0:
Agent.moves += 1
Agent.policyMove(board)
after = board[0]^board[1]^board[2]^board[3]
if after == 0:
Agent.t += 1
if board == end:
Agent.won += 1
return False
###### Computer Opponents ##########
def randomMove(board):
r1 = rnd.randint(0,len(board)-1) # get heap
if board[r1] == 0:
return randomMove(board)
elif board[r1] == 1:
board[r1] -= 1
return board
else:
r2 = rnd.randint(1,board[r1]) # get amount
board[r1] -= r2
return board
def smartMove(board):
tryHeap = 0
bestMove = False
b = list(board)
while tryHeap < len(b) and bestMove == False:
tryValue = 1
while tryValue <= b[tryHeap] and bestMove == False:
b[tryHeap] -= tryValue
if b[0]^b[1]^b[2]^b[3] == 0:
bestMove = True
else:
b[tryHeap] += tryValue
tryValue += 1
tryHeap += 1
if bestMove == True:
board[tryHeap-1] -= tryValue -1
return board
else:
return randomMove(board)
##### Agents ##########
board = [1,3,5,7]
end = [0,0,0,0]
stac = SA(board) # initialise states and actions
a = [0.1,0.5,0.99] # learning rate parameter
eps = [0.2,0.8] # epsilon
gam = [0.1,0.5,1] # discount factor
for y in range(len(a)):
alpha = a[y]
for z in range(len(eps)):
epsilon = eps[z]
for w in range(len(gam)):
gamma = gam[w]
a1 = Agent(stac, alpha, gamma, epsilon) # initialise agent
n = 20000
rnd.seed(0)
########## Train A1 ########
episode = []
wins = []
optmoves = []
"""Against smart """
for j in range(0,n):
interval = 25
if j % interval == 0: # Increase Epsilon over time
epslimit = 10000
a1.epsilon += interval*(1-epsilon)/epslimit
x = 250 # Performance : play 100 games each 1000 episodes
a1.ngames = 0
a1.won = 0
a1.moves = 0
a1.t = 0
started = 0
for i in range(0,x):
r = rnd.randrange(2)
if r == 0:
started += 1
while True: # Agent first
if policyPlay1(board, end, a1) == False:
break
board = [1,3,5,7]
if r == 1:
while True: # Computer first
if policyPlay2(board, end, a1) == False:
break
board = [1,3,5,7]
episode.append(j)
wins.append(a1.won/(x-started))
optmoves.append(a1.t/a1.moves)
r = rnd.randrange(2)
if r == 0:
while True: # a1 goes first
if play1(board,end,a1) == False:
break
board = [1,3,5,7]
if r == 1:
c = 0
while True: # comp goes first
if play2(board,end,a1,c) == False:
break
c += 1
board = [1,3,5,7]
with open('AgentvsSmartEpisodeAlpha_'+str(a1.alpha)+' Gamma_'+str(a1.gamma)+' EpsInc_'+str(epsilon)+'.txt', 'wb') as f:
pickle.dump(episode, f)
with open('AgentvsSmartMovesAlpha_'+str(a1.alpha)+' Gamma_'+str(a1.gamma)+' EpsInc_'+str(epsilon)+'.txt', 'wb') as f:
pickle.dump(optmoves, f)
with open('AgentvsSmartWinsAlpha_'+str(a1.alpha)+' Gamma_'+str(a1.gamma)+' EpsInc_'+str(epsilon)+'.txt', 'wb') as f:
pickle.dump(wins, f)
| [
"benjir21@hotmail.fr"
] | benjir21@hotmail.fr |
d59decd5ba84f7ba41412b6d8f8af2f359d48daa | de5e976e4c6b07a9087e38744b1d4f9ab4fda577 | /tests/test_server.py | a73a4ae4fdc6e7d7de52b480360b19bcaf8340ba | [] | no_license | Luceurre/Ponthon | 06f21717c7f74d02216ad9e67a75f3062bde9a21 | 6e2aa3ba8ef66c26f4e357edb5a1e94ad06fc1cd | refs/heads/main | 2023-03-03T02:54:48.292914 | 2021-02-14T22:29:25 | 2021-02-14T22:29:25 | 338,915,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | import unittest
from lsp.LanguageServer import LanguageServer
class TestServer(unittest.TestCase):
def test_server_start_tcp(self):
server = LanguageServer()
server.start_tcp("127.0.0.1", 5000)
if __name__ == '__main__':
unittest.main()
| [
"pglandon78@gmail.com"
] | pglandon78@gmail.com |
949d895fb08cec452a2f86bc1cfe122fcf7c7581 | 716729bcb0fb071794583fc448c9d853b099b314 | /PFA_2A/log/admin.py | ad22c2636adfb3b17a7a4689b7af785f56cc76fe | [] | no_license | Taddist/Generate_Timetabling | 7ea19ef1bbcb2c04f44aafc9cbbbec8cfeb8f248 | 378a51dbf96b279cfab52605b727256cd06b60a7 | refs/heads/master | 2021-07-12T07:05:09.666910 | 2017-10-13T16:32:52 | 2017-10-13T16:32:52 | 106,847,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,154 | py | from django.contrib import admin
from log.models import Year , Filiere ,Group, Room ,Subject , Timeslote ,Tag, Notification , Contact,TypeExam,Exam,TypeElement,FileCsv ,Semaine,FreeTime ,SubActivity
# Register your models here.
@admin.register(FileCsv)
class FileCsvAdmin(admin.ModelAdmin):
list_display = ('user','file', 'semaine', 'date_upload')
search_fields = ['user__username','file','semaine__semaine','date_upload']
#search_fields = ('user', 'file','semaine')
list_per_page = 15
@admin.register(Timeslote)
class TimesloteAdmin(admin.ModelAdmin):
list_display = ('jour','horaire')
search_fields = ['jour','horaire']
@admin.register(Room)
class RoomAdmin(admin.ModelAdmin):
list_display = ('salle','capacite')
search_fields = ['salle','capacite']
@admin.register(Exam)
class ExamAdmin(admin.ModelAdmin):
list_display = ('user','element', 'typeExam', 'groupe')
search_fields = ['user__username','groupe__groupe','typeExam__typeExam','element__element']
@admin.register(Contact)
class ContactAdmin(admin.ModelAdmin):
list_display = ('user','date', 'message', 'subject')
search_fields = ['user__username','date','message','subject__element']
@admin.register(FreeTime)
class FreeTimeAdmin(admin.ModelAdmin):
list_display = ('user','timeslote','semaine')
search_fields = ['user__username','timeslote__jour','timeslote__horaire','semaine__semaine']
@admin.register(SubActivity)
class SubActivityAdmin(admin.ModelAdmin):
list_display=('user','groupe','element','semaine','typeelement')
search_fields = ['user__username','groupe__groupe','element__element','semaine__semaine','typeelement__typeElement']
list_per_page = 18 #number max of Activity in a week
@admin.register(Semaine)
class SemaineAdmin(admin.ModelAdmin):
list_display=('semaine','date_debut')
search_fields = ['semaine','date_debut']
admin.site.register(Year)
admin.site.register(Filiere)
admin.site.register(Group)
admin.site.register(Subject)
admin.site.register(Tag)
admin.site.register(Notification)
admin.site.register(TypeExam)
admin.site.register(TypeElement)
#admin.site.register(FileCsv,FileCsvAdmin)
| [
"afaf.taddist71@gmail.com"
] | afaf.taddist71@gmail.com |
00c9442341892b772603c6e9f4d9d0cd562630e0 | fcbdae0db78a0bf2fc0d15360588d075e1205dd4 | /manim/mobject/types/opengl_point_cloud_mobject.py | c9894a21b7b84c091fa1a200150843e49d5e4e15 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | westre3/manim | d0352adf384a2368db7c31118b8ebaa96e3f5262 | daf23c9d1031b12d9c119b8f6b7e60727d7f9242 | refs/heads/master | 2022-02-18T13:53:11.952626 | 2022-01-20T04:26:21 | 2022-01-20T04:26:21 | 366,182,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,101 | py | from __future__ import annotations
__all__ = ["OpenGLPMobject", "OpenGLPGroup", "OpenGLPMPoint"]
import moderngl
import numpy as np
from ...constants import *
from ...mobject.opengl_mobject import OpenGLMobject
from ...utils.bezier import interpolate
from ...utils.color import BLACK, WHITE, YELLOW, color_gradient, color_to_rgba
from ...utils.config_ops import _Uniforms
from ...utils.iterables import resize_with_interpolation
class OpenGLPMobject(OpenGLMobject):
shader_folder = "true_dot"
# Scale for consistency with cairo units
OPENGL_POINT_RADIUS_SCALE_FACTOR = 0.01
shader_dtype = [
("point", np.float32, (3,)),
("color", np.float32, (4,)),
]
point_radius = _Uniforms()
def __init__(
self, stroke_width=2.0, color=YELLOW, render_primitive=moderngl.POINTS, **kwargs
):
self.stroke_width = stroke_width
super().__init__(color=color, render_primitive=render_primitive, **kwargs)
self.point_radius = (
self.stroke_width * OpenGLPMobject.OPENGL_POINT_RADIUS_SCALE_FACTOR
)
def reset_points(self):
self.rgbas = np.zeros((1, 4))
self.points = np.zeros((0, 3))
return self
def get_array_attrs(self):
return ["points", "rgbas"]
def add_points(self, points, rgbas=None, color=None, opacity=None):
"""Add points.
Points must be a Nx3 numpy array.
Rgbas must be a Nx4 numpy array if it is not None.
"""
if rgbas is None and color is None:
color = YELLOW
self.append_points(points)
# rgbas array will have been resized with points
if color is not None:
if opacity is None:
opacity = self.rgbas[-1, 3]
new_rgbas = np.repeat([color_to_rgba(color, opacity)], len(points), axis=0)
elif rgbas is not None:
new_rgbas = rgbas
elif len(rgbas) != len(points):
raise ValueError("points and rgbas must have same length")
self.rgbas = np.append(self.rgbas, new_rgbas, axis=0)
return self
def thin_out(self, factor=5):
"""
Removes all but every nth point for n = factor
"""
for mob in self.family_members_with_points():
num_points = mob.get_num_points()
def thin_func():
return np.arange(0, num_points, factor)
if len(mob.points) == len(mob.rgbas):
mob.set_rgba_array_direct(mob.rgbas[thin_func()])
mob.set_points(mob.points[thin_func()])
return self
def set_color_by_gradient(self, *colors):
self.rgbas = np.array(
list(map(color_to_rgba, color_gradient(*colors, self.get_num_points()))),
)
return self
def set_colors_by_radial_gradient(
self,
center=None,
radius=1,
inner_color=WHITE,
outer_color=BLACK,
):
start_rgba, end_rgba = list(map(color_to_rgba, [inner_color, outer_color]))
if center is None:
center = self.get_center()
for mob in self.family_members_with_points():
distances = np.abs(self.points - center)
alphas = np.linalg.norm(distances, axis=1) / radius
mob.rgbas = np.array(
np.array(
[interpolate(start_rgba, end_rgba, alpha) for alpha in alphas],
),
)
return self
def match_colors(self, pmobject):
self.rgbas[:] = resize_with_interpolation(pmobject.rgbas, self.get_num_points())
return self
def fade_to(self, color, alpha, family=True):
rgbas = interpolate(self.rgbas, color_to_rgba(color), alpha)
for mob in self.submobjects:
mob.fade_to(color, alpha, family)
self.set_rgba_array_direct(rgbas)
return self
def filter_out(self, condition):
for mob in self.family_members_with_points():
to_keep = ~np.apply_along_axis(condition, 1, mob.points)
for key in mob.data:
mob.data[key] = mob.data[key][to_keep]
return self
def sort_points(self, function=lambda p: p[0]):
"""
function is any map from R^3 to R
"""
for mob in self.family_members_with_points():
indices = np.argsort(np.apply_along_axis(function, 1, mob.points))
for key in mob.data:
mob.data[key] = mob.data[key][indices]
return self
def ingest_submobjects(self):
for key in self.data:
self.data[key] = np.vstack([sm.data[key] for sm in self.get_family()])
return self
def point_from_proportion(self, alpha):
index = alpha * (self.get_num_points() - 1)
return self.points[int(index)]
def pointwise_become_partial(self, pmobject, a, b):
lower_index = int(a * pmobject.get_num_points())
upper_index = int(b * pmobject.get_num_points())
for key in self.data:
self.data[key] = pmobject.data[key][lower_index:upper_index]
return self
def get_shader_data(self):
shader_data = np.zeros(len(self.points), dtype=self.shader_dtype)
self.read_data_to_shader(shader_data, "point", "points")
self.read_data_to_shader(shader_data, "color", "rgbas")
return shader_data
class OpenGLPGroup(OpenGLPMobject):
def __init__(self, *pmobs, **kwargs):
if not all([isinstance(m, OpenGLPMobject) for m in pmobs]):
raise Exception("All submobjects must be of type OpenglPMObject")
super().__init__(**kwargs)
self.add(*pmobs)
def fade_to(self, color, alpha, family=True):
if family:
for mob in self.submobjects:
mob.fade_to(color, alpha, family)
class OpenGLPMPoint(OpenGLPMobject):
def __init__(self, location=ORIGIN, stroke_width=4.0, **kwargs):
self.location = location
super().__init__(stroke_width=stroke_width, **kwargs)
def init_points(self):
self.points = np.array([self.location], dtype=np.float32)
| [
"noreply@github.com"
] | noreply@github.com |
b7ab18a5e62fd51381ae87268bc63f20d853de23 | bd16f4af25d34e6a8113a0d1797fc0324fffa670 | /venv/lib/python2.7/site-packages/vertica_python/vertica/messages/backend_messages/notice_response.py | e0dc35cc08ac5689d8e1f462a51c44a56c3ee5ce | [
"MIT"
] | permissive | bocaaust/Schrodingers-Closet | 41307ef1ba25b5e279efc1ff5b7ee61204a6ba50 | 608edd171ed008f51324d2f12cbf5effd0e9ce2b | refs/heads/master | 2021-01-11T08:11:05.042686 | 2016-10-03T03:54:42 | 2016-10-03T03:55:55 | 69,749,235 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,190 | py | from __future__ import absolute_import
import string
from struct import unpack_from
from vertica_python.vertica.messages.message import BackendMessage
class NoticeResponse(BackendMessage):
FIELDS_DEFINITIONS = [
{'type': 'q', 'name': "Internal Query", 'method': 'internal_query'},
{'type': 'S', 'name': "Severity", 'method': 'severity'},
{'type': 'M', 'name': "Message", 'method': 'message'},
{'type': 'C', 'name': "Sqlstate", 'method': 'sqlstate'},
{'type': 'D', 'name': "Detail", 'method': 'detail'},
{'type': 'H', 'name': "Hint", 'method': 'hint'},
{'type': 'P', 'name': "Position", 'method': 'position'},
{'type': 'W', 'name': "Where", 'method': 'where'},
{'type': 'p', 'name': "Internal Position", 'method': 'internal_position'},
{'type': 'R', 'name': "Routine", 'method': 'routine'},
{'type': 'F', 'name': "File", 'method': 'file'},
{'type': 'L', 'name': "Line", 'method': 'line'}
]
def FIELDS(self):
pairs = []
for field in self.FIELDS_DEFINITIONS:
pairs.append((field['type'], field['name']))
return dict(pairs)
def __init__(self, data):
self.values = {}
pos = 0
while pos < len(data) - 1:
null_byte = string.find(data, '\x00', pos)
# This will probably work
unpacked = unpack_from('c{0}sx'.format(null_byte - 1 - pos), data, pos)
key = unpacked[0]
value = unpacked[1]
self.values[self.FIELDS()[key]] = value
pos += (len(value) + 2)
# May want to break out into a function at some point
for field_def in self.FIELDS_DEFINITIONS:
if self.values.get(field_def['name'], None) is not None:
setattr(self, field_def['method'], self.values[field_def['name']])
def error_message(self):
ordered = []
for field in self.FIELDS_DEFINITIONS:
if self.values.get(field['name']) is not None:
ordered.append("{0}: {1}".format(field['name'], self.values[field['name']]))
return ', '.join(ordered)
NoticeResponse._message_id('N')
| [
"danalhsiao@gmail.com"
] | danalhsiao@gmail.com |
107467f95d1d5f14efb0291a149d167ada0b059f | cb82e798d1ea875e87d973d87602baa07166fb7b | /net/url/obj.py | d67c2f4f133dd4d34cc41c0537d9858393d75cfa | [] | no_license | rowanpang/noteGit | e9470be20bfdb04ac6b80c93f0f1cd3fd97ef565 | 120ca5329addf3a780b2299a0ab74de997b77785 | refs/heads/master | 2023-05-31T05:04:58.731953 | 2023-05-31T02:34:14 | 2023-05-31T02:34:14 | 52,506,290 | 1 | 0 | null | 2021-06-04T01:08:05 | 2016-02-25T07:41:49 | C | UTF-8 | Python | false | false | 467 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import urlobject
# ref https://urlobject.readthedocs.io/en/latest/quickstart.html
for l in open('./url.txt').readlines():
uobj = urlobject.URLObject(l.strip())
print uobj
print 'scheme:',uobj.scheme
print 'hostname:',uobj.hostname
print 'port:',uobj.port
print 'defport:',uobj.default_port
print 'path:',uobj.path
print 'query:',uobj.query
print 'query_dict:',uobj.query_dict
print
| [
"pangweizhen.2008@hotmail.com"
] | pangweizhen.2008@hotmail.com |
958d10213b2c05b768ced6c6cda03fb7c7d10b0b | bdc10ba57424040129cc72ad018ff26bc8bca66a | /ConfigDefinitions/BranchAdditions/UserDefinedBranches/Triggers_18_MC.py | fa59a97a8c09e3b16d9403906e1fd565dd4e9943 | [] | no_license | aloeliger/Jesterworks | 61e0ac38ca325fefbbd8ccedaa8eb02d8a76ebbe | 96a22bac4ce20b91aba5884eb0e5667fcea3bc9a | refs/heads/master | 2021-06-09T15:39:06.976110 | 2021-04-23T11:25:06 | 2021-04-23T11:25:06 | 157,698,363 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,383 | py | import ConfigDefinitions.BranchAdditions.BranchDef as Branch
def CalculateTrigger24(TheBranch,TheChain):
if (TheChain.passMu24 and TheChain.matchMu24_1
and TheChain.filterMu24_1 and TheChain.pt_1 > 25.0):
TheBranch.BranchValue[0]=1.0
else:
TheBranch.BranchValue[0]=0.0
def CalculateTrigger27(TheBranch,TheChain):
if(TheChain.passMu27 and TheChain.matchMu27_1
and TheChain.filterMu27_1 and TheChain.pt_1 > 25.0):
TheBranch.BranchValue[0]=1.0
else:
TheBranch.BranchValue[0]=0.0
def CalculateTrigger2027(TheBranch,TheChain):
if (TheChain.passMu20HPSTau27
and TheChain.matchMu20HPSTau27_1
and TheChain.matchMu20HPSTau27_2
and TheChain.pt_1 > 21 and TheChain.pt_1 < 25
and TheChain.pt_2 > 32
and abs(TheChain.eta_1) < 2.1
and abs(TheChain.eta_2) < 2.1
and TheChain.filterMu20HPSTau27_1
and TheChain.filterMu20HPSTau27_2):
TheBranch.BranchValue[0] = 1.0
else:
TheBranch.BranchValue[0] = 0.0
Trigger24 = Branch.UserBranch()
Trigger24.Name = "Trigger24"
Trigger24.CalculateValue = CalculateTrigger24
Trigger27 = Branch.UserBranch()
Trigger27.Name = "Trigger27"
Trigger27.CalculateValue = CalculateTrigger27
Trigger2027 = Branch.UserBranch()
Trigger2027.Name = "Trigger2027"
Trigger2027.CalculateValue = CalculateTrigger2027
| [
"aloelige@cern.ch"
] | aloelige@cern.ch |
271552d01f195d3c29fa9a58ded6978f835d3f95 | 068fbb8bdd43970f66a64c6dd77a8a70c5915b65 | /RandomWalkTest.py | 49899e66db7229926c1effe93915f3df885b0e31 | [] | no_license | utkarshrutgers/FinancialModelling1 | 5fe653a60a3a192dccb7d8fcfae5a6d2478eb5b3 | 01788f0092b02ca492f02d1115bd59d9f8da7023 | refs/heads/master | 2020-04-22T02:37:55.507059 | 2019-03-30T00:44:14 | 2019-03-30T00:44:14 | 170,057,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,584 | py | '''
Are Stock Prices a Random Walk?
Most stock prices follow a random walk (perhaps with a drift). You will look at a time series of Amazon stock prices, pre-loaded in the DataFrame AMZN, and run the 'Augmented Dickey-Fuller Test' from the statsmodels library to show that it does indeed follow a random walk.
With the ADF test, the "null hypothesis" (the hypothesis that we either reject or fail to reject) is that the series follows a random walk. Therefore, a low p-value (say less than 5%) means we can reject the null hypothesis that the series is a random walk.
INSTRUCTIONS
100XP
Import the adfuller module from statsmodels.
Run the Augmented Dickey-Fuller test on the series of closing stock prices, which is the column Adj Close in the AMZN DataFrame.
Print out the entire output, which includes the test statistic, the p-values, and the critical values for tests with 1%, 10%, and 5% levels.
Print out just the p-value of the test (results[0] is the test statistic, and results[1] is the p-value).
'''
# Import the adfuller module from statsmodels
from statsmodels.tsa.stattools import adfuller
# Run the ADF test on the price series and print out the results
results = adfuller(MSFT['Adj Close'])
print(results)
# Just print out the p-value
print('The p-value of the test on prices is: ' + str(results[1]))
import random
count = 0
countsum = 0
for j in range (1,1000):
for i in range ( 0,50):
toss = random.randint(1,100)
if toss > 50:
count+=1
else:
count-=1
countsum = countsum+count
print(countsum/100)
| [
"noreply@github.com"
] | noreply@github.com |
84ed96d178c03a6c6abcf7b75edfbc8a0653584d | 5450f8c470bd2de5ee2fa03f20de18a908756c58 | /leetcode solutions/Python/swap nodes in pairs/swapNodesInPairs.py | 71efe890d76149a6378be8729936ec8d6ee125ac | [] | no_license | ruchir-hj/fun-coding | c69d140faede83c91e26a42e54a48860eef53d81 | c29253a1aed51c14edee751d87f76ea759c86966 | refs/heads/master | 2021-01-18T18:09:07.315209 | 2016-11-18T21:05:25 | 2016-11-18T21:05:25 | 69,759,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | class ListNode(object):
def __init__(self, x):
self.val
self.next = None
class Solution(object):
def swapPairs(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head or not head.next:
return head
dummy = ListNode(0)
dummy.next = head
curr = dummy
while curr.next and curr.next.next:
nextOne = curr.next
nextTwo = curr.next.next
nextThree = curr.next.next.next
curr.next = nextTwo
nextTwo.next = nextOne
nextOne.next = nextThree
curr = nextOne
return dummy.next | [
"ruchirhajela@dhcp-rhodes-2444.redrover.cornell.edu"
] | ruchirhajela@dhcp-rhodes-2444.redrover.cornell.edu |
4ac9c1731b711ac918a7000dfa8258ec06c69006 | 4ccaf062d0cceb116280ff231adce2ea6c99650d | /dynamic_dilation.py | cf90e7f9759428de53ec3499690d121930031029 | [] | no_license | martinmeinke/dynamic_dilation | 1a99acc0b39e8e21890a596d7b500c7e30cf0cc9 | e9f0e4a2cc61e130b7892a5cbc47d4ca375c6fb6 | refs/heads/master | 2022-10-13T02:21:05.082356 | 2020-06-08T19:28:57 | 2020-06-08T19:28:57 | 270,795,053 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,360 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
class DynamicDilation(nn.Module):
def __init__(self, in_ch, out_ch, min_dil_range, max_dil_range, smallest_dil, largest_dil):
"""[summary]
Args:
in_ch (int): number of input channels
out_ch (int): number of output channels
min_dil_range ([type]): range at which the minimal dilation applies
max_dil_range ([type]): range at which the maximal dilation applies
smallest_dil ([type]): smallest dilation size (applies at min_dil_range)
largest_dil ([type]): largest dilation size (applies at max_dil_range)
"""
super(DynamicDilation, self).__init__()
self.n_dilation_levels = largest_dil - smallest_dil + 1
self.in_ch = in_ch
self.out_ch = out_ch
self.min_dil_range = min_dil_range
self.max_dil_range = max_dil_range
self.smallest_dil = smallest_dil
self.largest_dil = largest_dil
# create convolution layer for each dilation
self.conv_layers = nn.ModuleList([nn.Conv2d(in_ch, out_ch, (3, 3), stride=(1, 1), dilation=(
d, d), padding=(d, d)) for d in range(smallest_dil, largest_dil+1)])
def forward(self, x, range_image):
conv_features = [c(x) for c in self.conv_layers]
# interpolate range image to match input feature size
range_image_interp = F.interpolate(range_image, x.shape[2:])
dilstack = torch.stack(conv_features)
# select best dilation by looking into the range_image_interp
range_bin_size = (self.min_dil_range - self.max_dil_range) / \
self.n_dilation_levels
# clamp range image to the range in which we intend to scale dilation
range_image_interp_clamp = torch.clamp(
range_image_interp, self.max_dil_range, self.min_dil_range)
dilations = ((self.min_dil_range - range_image_interp_clamp) /
range_bin_size + 1).long()
dilations = torch.clamp(
dilations, self.smallest_dil, self.largest_dil) - 1
dilations = dilations.repeat(1, self.out_ch, 1, 1)
dilations = dilations.unsqueeze(0)
# gather features from proper dilation
thatsit = torch.gather(dilstack, 0, dilations)
return thatsit.squeeze(0)
| [
"meinke.martin@googlemail.com"
] | meinke.martin@googlemail.com |
bbe8129b09d85cd20a4dcbad5bcd0f14703eb61a | ae79aa8458230fe2331b267308a29adff215bbfe | /armi/nuclearDataIO/tests/test_xsCollections.py | 9088f3c05a114f56505279c386786363dec4e6f4 | [
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] | permissive | paulromano/armi | 3727cf3c52de5e412e8db4d5bf5d9998a720616c | 6c4fea1ca9d256a2599efd52af5e5ebe9860d192 | refs/heads/master | 2023-01-10T05:43:27.691791 | 2020-08-07T00:33:35 | 2020-08-07T00:33:35 | 285,824,692 | 1 | 0 | Apache-2.0 | 2020-08-07T12:32:54 | 2020-08-07T12:32:53 | null | UTF-8 | Python | false | false | 3,832 | py | # Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module that tests methods within xsCollections
"""
import unittest
from armi.nuclearDataIO import xsCollections
from armi import nuclearDataIO
from armi.tests import ISOAA_PATH
from armi.physics.neutronics.tests import test_cross_section_manager
class TestXsCollections(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.microLib = nuclearDataIO.ISOTXS(ISOAA_PATH)
def setUp(self):
self.mc = xsCollections.MacroscopicCrossSectionCreator()
self.block = test_cross_section_manager.MockBlock()
self.block.setNumberDensity("U235", 0.02)
self.block.setNumberDensity("FE", 0.01)
def test_generateTotalScatteringMatrix(self):
"""Generates the total scattering matrix by summing elastic, inelastic, and n2n scattering matrices."""
nuc = self.microLib.nuclides[0]
totalScatter = nuc.micros.getTotalScatterMatrix()
self.assertAlmostEqual(
totalScatter[0, 0],
(
nuc.micros.elasticScatter[0, 0]
+ nuc.micros.inelasticScatter[0, 0]
+ 2.0 * nuc.micros.n2nScatter[0, 0]
),
)
def test_generateTotalScatteringMatrixWithMissingData(self):
"""
Generates the total scattering matrix by summing elastic and n2n scattering matrices.
Notes
-----
This tests that the total scattering matrix can be produced when the inelastic scattering matrix is not defined.
"""
nuc = self.microLib.nuclides[0]
nuc.micros.inelasticScatter = None
totalScatter = nuc.micros.getTotalScatterMatrix()
self.assertAlmostEqual(
totalScatter[0, 0],
(nuc.micros.elasticScatter[0, 0] + 2.0 * nuc.micros.n2nScatter[0, 0]),
)
def test_createMacrosFromMicros(self):
self.mc.createMacrosFromMicros(self.microLib, self.block)
totalMacroFissionXs = 0.0
totalMacroAbsXs = 0.0
for nuc, density in self.mc.densities.items():
nuclideXS = self.mc.microLibrary.getNuclide(nuc, "AA")
for microXs in nuclideXS.micros.fission:
totalMacroFissionXs += microXs * density
for microXsName in xsCollections.ABSORPTION_XS:
for microXs in getattr(nuclideXS.micros, microXsName):
totalMacroAbsXs += microXs * density
self.assertAlmostEqual(sum(self.mc.macros.fission), totalMacroFissionXs)
self.assertAlmostEqual(sum(self.mc.macros.absorption), totalMacroAbsXs)
def test_collapseCrossSection(self):
"""
Tests cross section collapsing
Notes
-----
The expected 1 group cross section was generated by running the collapse cross section method. This tests
that this method has not been modified to produce a different result.
"""
expected1gXs = 2.35725262208
micros = self.microLib["U235AA"].micros
flux = list(reversed(range(33)))
self.assertAlmostEqual(
micros.collapseCrossSection(micros.nGamma, flux), expected1gXs
)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'TestXsCollections.test_generateTotalScatteringMatrix']
unittest.main()
| [
"ntouran@terrapower.com"
] | ntouran@terrapower.com |
f7543e5d841ceb31ee2674b563c1e772576e185c | 366b2ff9cd498808438bf7c48f697c05b361d02c | /app.py | afd82cb98a6734228c58e3cf1b2d768b487eb5e6 | [] | no_license | c-bata/AngularJS-Bottle-TodoApp | 1aef6b09fd85fabaa63898ab3fb9a2d586216b93 | 8f03820b7949b0c28477970c58f25ccd1856b2a9 | refs/heads/master | 2021-03-12T22:40:32.000758 | 2015-11-04T11:14:47 | 2015-11-04T11:14:47 | 38,732,944 | 2 | 0 | null | 2015-11-04T11:11:39 | 2015-07-08T05:02:47 | Python | UTF-8 | Python | false | false | 1,290 | py | from bottle import (
route, response, run, template, static_file, install, post, request
)
import json
import os
import jsonschema
import models
import schemas
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_DIR = os.path.join(BASE_DIR, 'static')
install(models.plugin)
@route('/')
def index():
return template('tasks')
@route('/api/tasks')
def tasks(db):
response.content_type = 'application/json'
tasks = [task.serialize for task in db.query(models.Task).all()]
return json.dumps({'tasks': tasks})
@post('/api/tasks')
def create_task(db):
response.content_type = 'application/json'
try:
jsonschema.validate(request.json, schemas.task_schema)
task = models.Task(title=request.json['title'])
db.add(task)
db.commit() # ここでコミットしないとidとかdefault値を返せない
return json.dumps(task.serialize)
except jsonschema.ValidationError:
response.status_code = 400
return json.dumps({
'error': {'message': 'Validation is failed...'}
})
@route('/static/<filename:path>')
def send_static(filename):
return static_file(filename, root=STATIC_DIR)
if __name__ == '__main__':
run(host='localhost', port=8080, debug=True, reloader=True)
| [
"contact@c-bata.link"
] | contact@c-bata.link |
c84a55dd992ae1628b0780c38e1917efe61e7ace | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03399/s036631775.py | eaffb9a399aacef430540ccd833a1b116ff7cb11 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | n = [int(input()) for _ in range(4)]
print(min(n[0],n[1])+min(n[2],n[3])) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4288da7aa872027efef2838115ab620d090d1047 | 7b8ad61c3696c6412a03a3a98f840932a3d21b72 | /GPac/tree.py | 082820d18dfa9e8495e5b588c0b86b898f3e7551 | [] | no_license | matthewia94/cs5401 | 4c7d7f585994fdf442bb65f8221865256ff61de1 | 42e379de687ad58c10b0524cec4a27e699652105 | refs/heads/master | 2016-08-08T02:17:15.756030 | 2015-12-07T05:57:04 | 2015-12-07T05:57:04 | 41,692,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,836 | py | __author__ = 'matt'
__eamil__ = 'mia2n4@mst.edu'
from Queue import Queue
import random
import copy
class Tree:
def __init__(self, data, depth):
self.data = data
self.children = []
self.depth = depth
def add_child(self, data):
self.children.append(data)
# Find the depth of a tree
@staticmethod
def find_depth(tree):
if tree.children == []:
return 0
else:
return max(Tree.find_depth(tree.children[0]), Tree.find_depth(tree.children[1])) + 1
# Select a random node from the tree
def rand_node(self):
nodes = Queue()
nodes.put(self)
num_nodes = 1
selected = self
# Randomly pick an element
while not nodes.empty():
n = nodes.get()
if random.randint(1, num_nodes) == num_nodes:
selected = n
num_nodes += 1
for i in n.children:
nodes.put(i)
return selected
@staticmethod
def crossover(parent1, parent2):
child1 = copy.deepcopy(parent1)
child2 = copy.deepcopy(parent2)
sel1 = child1.rand_node()
sel2 = child2.rand_node()
q = Queue()
q.put(child1)
while not q.empty():
node = q.get()
for i in range(len(node.children)):
q.put(node.children[i])
if node.children[i] is sel1:
node.children[i] = sel2
break
q = Queue()
q.put(child2)
while not q.empty():
node = q.get()
for i in range(len(node.children)):
q.put(node.children[i])
if node.children[i] is sel2:
node.children[i] = sel1
break
return child1, child2
| [
"matthewia94@gmail.com"
] | matthewia94@gmail.com |
31aa213c9c86f5b8d70e9e11bc46afd9f18f080f | f89cd292ab7d70bcfd1a07d383196269ab616d0d | /src/App/models/__init__.py | f7bc322f6ac5c40665683414e81f5b9f43f48a62 | [] | no_license | edwinvanders/django-neomodel-sample | 54f8d54b539631a381df74d06990c23cc3b2a23e | d953cae0f97c6cc665bb3994a4b82e373e69c69b | refs/heads/master | 2021-06-25T00:54:42.357241 | 2017-07-29T11:26:27 | 2017-07-29T11:26:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | from .user import *
from .option import *
from .question import * | [
"farsad@Seyeds-MacBook-Pro.local"
] | farsad@Seyeds-MacBook-Pro.local |
9e02f1f5e378de2d29593ff5b0c7234dc46017ae | ff81a9d7880f1b85a1dc19d5eba5ac72d7179c86 | /pychron/hardware/apis_controller.py | 3f9007cdfd86fd568ce9d3cbf6a0909680c9efef | [
"Apache-2.0"
] | permissive | UManPychron/pychron | 2fb7e479a9f492423c0f458c70102c499e1062c4 | b84c9fd70072f9cbda30abe2c471e64fe3dd75d8 | refs/heads/develop | 2022-12-03T23:32:45.579326 | 2020-01-29T19:02:20 | 2020-01-29T19:02:20 | 36,100,637 | 0 | 0 | null | 2015-05-23T00:10:06 | 2015-05-23T00:10:05 | null | UTF-8 | Python | false | false | 5,881 | py | # ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
import time
from traits.api import Property, provides
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.hardware.actuators.iactuator import IActuator
from pychron.hardware.core.core_device import CoreDevice
CMD_MAP = {'list_blanks': '100',
'list_airs': '101',
'last_runid': '102',
'pipette_record': '103',
'status': '104',
'load_blank': '105',
'load_air': '106',
'cancel': '107',
'set_external_pumping': '108'}
STATUS_MAP = {'0': 'Idle',
'1': 'Pumping pipette',
'2': 'Loading pipette',
'3': 'Expanding pipettes',
'4': 'Expansion complete'}
@provides(IActuator)
class ApisController(CoreDevice):
connection_url = Property
# close `isolation_valve` `isolation_delay` seconds after loading of pipette started
isolation_delay = 25
# name of valve to make analytical section static
isolation_valve = 'U'
isolation_info = 'isolate microbone'
# instead of the simple wait/close sequence use the a gosub
# use this for a more complex/flexible pattern i.e open/close multiple valves
isolation_gosub = None
def load_additional_args(self, config):
v = self.config_get(config, 'Isolation', 'valve', optional=False, default='U')
self.isolation_delay = self.config_get(config, 'Isolation', 'delay', optional=False, cast='int', default=25)
self.isolation_info = self.config_get(config, 'Isolation', 'info', optional=True)
self.isolation_gosub = self.config_get(config, 'Isolation', 'gosub', optional=True)
self.isolation_valve = v.replace('"', '').replace("'", '')
return True
#iacuator protocol
def close_channel(self, obj):
self.set_external_pumping(False)
return True
def open_channel(self, obj):
self.set_external_pumping(True)
return True
def get_channel_state(self, obj):
pass
def get_lock_state(self, obj):
pass
def script_loading_block(self, script, **kw):
"""
wait for script loading to complete.
this process has three steps.
1. wait for loading to start. status changes from 1 to 2
2. if isolation_gosub
do gosub
else
wait `isolation_delay` seconds then close the `isolation valve`
3. wait for apis script to complete
return True if completed successfully
"""
script.console_info('waiting for pipette to load')
if not self.blocking_poll('loading_started', script=script, **kw):
return
script.console_info('loading started')
if self.isolation_gosub:
self.debug('executing isolation gosub= {}'.format(self.isolation_gosub))
script.gosub(self.isolation_gosub)
else:
ws = self.isolation_delay
self.debug('wait {}s'.format(ws))
time.sleep(ws)
if self.isolation_info:
script.console_info(self.isolation_info)
iv = self.isolation_valve
iv=iv.split(',')
for v in iv:
script.close(v.strip())
script.console_info('wait for apis to complete expansion')
return self.blocking_poll('get_loading_complete', script=script, **kw)
def make_command(self, cmd):
try:
return CMD_MAP[cmd]
except KeyError:
return 'invalid command cmd={}'.format(cmd)
def load_blank(self, name):
cmd = self.make_command('load_blank')
self.ask('{},{}'.format(cmd, name))
def load_pipette(self, name):
cmd = self.make_command('load_air')
self.ask('{},{}'.format(cmd, name))
def get_status(self):
cmd = self.make_command('status')
status = self.ask(cmd)
return status
def get_loading_status(self):
status = self.get_status()
try:
status = STATUS_MAP[status]
return status
except KeyError:
pass
def loading_started(self):
status = self.get_loading_status()
return status == 'Loading pipette'
def get_loading_complete(self):
status = self.get_loading_status()
return status == 'Expansion complete'
def get_available_blanks(self):
cmd = self.make_command('list_blanks')
return self.ask(cmd)
def get_available_airs(self):
cmd = self.make_command('list_airs')
return self.ask(cmd)
def set_external_pumping(self, state):
cmd = self.make_command('set_external_pumping')
cmd = '{},{}'.format(cmd, 'true' if state else 'false')
return self.ask(cmd)
def _get_connection_url(self):
return '{}:{}'.format(self.communicator.host, self.communicator.port)
# ============= EOF =============================================
| [
"jirhiker@gmail.com"
] | jirhiker@gmail.com |
402f85f8af5e0597df4d226ac43f8420da09a275 | 475e2a7d8547c9d17c28eb0a28d2d27a86031d9a | /multiagent-particle-envs/bin/interactive.py | ea4937610f844c1796d48238ec4f635e5975f4fe | [
"MIT"
] | permissive | dch133/MADDPG-With-Robot-Arms | b5c89a73e142d25552b482a366e5d2cdfa7786e5 | cb9f09202e6d363cef5d60da4aeaadfb95f09b6d | refs/heads/master | 2020-11-24T03:14:00.467224 | 2020-01-07T21:20:45 | 2020-01-07T21:20:45 | 227,941,246 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,998 | py | #!/usr/bin/env python
import os, sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import argparse
from multiagent.environment import MultiAgentEnv
from multiagent.policy import InteractivePolicy
import multiagent.scenarios as scenarios
# Two Robot Arms environment
if __name__ == '__main__':
import gym
import numpy as np
right_hand_shift = 0.1
env = gym.make('FetchReach-v1')
env = env.unwrapped
env2 = gym.make('FetchReach-v1')
env2 = env2.unwrapped
# Set the goal of both arms to the same
env2.goal = np.copy(env.goal)
# Set position of env2 slightly on the right of env (mimicking right arm reference frame)
env2.goal[1] -= right_hand_shift
env.render()
env2.render()
# create interactive policies for each agent
policies = [InteractivePolicy(env, i) for i in range(env.n)]
policies2 = [InteractivePolicy(env2, i) for i in range(env.n)]
# execution loop
obs_n = env.reset()
obs_n2 = env2.reset()
while True:
# query for action from each agent's policy
act_n = []
act_n2 = []
for i, policy in enumerate(policies):
act_n.append(policy.action(obs_n[i]))
for i, policy in enumerate(policies2):
act_n.append(policy.action(obs_n2[i]))
# step environment
obs_n, reward_n, done_n, _ = env.step(act_n)
obs_n2, reward_n2, done_n2, _ = env2.step(act_n2)
# render all agent views
env.render()
env2.render()
# display rewards
print("Left Arm reward: %0.3f" % reward_n)
print("Right Arm reward: %0.3f" % reward_n2)
# MultiAgent Environment
# if __name__ == '__main__':
# # parse arguments
# parser = argparse.ArgumentParser(description=None)
# parser.add_argument('-s', '--scenario', default='simple.py', help='Path of the scenario Python script.')
# args = parser.parse_args()
#
# # load scenario from script
# scenario = scenarios.load(args.scenario).Scenario()
# # create world
# world = scenario.make_world()
# # create multiagent environment
# env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation, info_callback=None, shared_viewer = False)
# # render call to create viewer window (necessary only for interactive policies)
# env.render()
# # create interactive policies for each agent
# policies = [InteractivePolicy(env,i) for i in range(env.n)]
# # execution loop
# obs_n = env.reset()
# while True:
# # query for action from each agent's policy
# act_n = []
# for i, policy in enumerate(policies):
# act_n.append(policy.action(obs_n[i]))
# # step environment
# obs_n, reward_n, done_n, _ = env.step(act_n)
# # render all agent views
# env.render()
# # display rewards
# #for agent in env.world.agents:
# # print(agent.name + " reward: %0.3f" % env._get_reward(agent))
| [
"noreply@github.com"
] | noreply@github.com |
d2382445e3bfadc1fc1b835d2858c44096c8b906 | 1585a02847a2525087cbe1e18df45d9e2b589492 | /OFF-POLICY/envs/hanabi/rl_env.py | cc7e2f12f785321fd0b860159e5f5c8c684ab4bb | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | umersheikh846/benchmarkmarl_repo | 0f93e24cc12bd0164eca78a2c6a522f4cc20a748 | 44411fddac496c7ee63abbce2cf277ebcf4c28e8 | refs/heads/main | 2023-02-06T06:06:55.802795 | 2021-01-03T08:29:21 | 2021-01-03T08:29:21 | 326,361,025 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,373 | py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RL environment for Hanabi, using an API similar to OpenAI Gym."""
from __future__ import absolute_import
from __future__ import division
from . import pyhanabi
from .pyhanabi import color_char_to_idx
from gym.spaces import Discrete
import numpy as np
MOVE_TYPES = [_.name for _ in pyhanabi.HanabiMoveType]
#-------------------------------------------------------------------------------
# Environment API
#-------------------------------------------------------------------------------
class Environment(object):
"""Abstract Environment interface.
All concrete implementations of an environment should derive from this
interface and implement the method stubs.
"""
def reset(self, config):
r"""Reset the environment with a new config.
Signals environment handlers to reset and restart the environment using
a config dict.
Args:
config: dict, specifying the parameters of the environment to be
generated.
Returns:
observation: A dict containing the full observation state.
"""
raise NotImplementedError("Not implemented in Abstract Base class")
def step(self, action):
"""Take one step in the game.
Args:
action: dict, mapping to an action taken by an agent.
Returns:
observation: dict, Containing full observation state.
reward: float, Reward obtained from taking the action.
done: bool, Whether the game is done.
info: dict, Optional debugging information.
Raises:
AssertionError: When an illegal action is provided.
"""
raise NotImplementedError("Not implemented in Abstract Base class")
class HanabiEnv(Environment):
"""RL interface to a Hanabi environment.
```python
environment = rl_env.make()
config = { 'players': 5 }
observation = environment.reset(config)
while not done:
# Agent takes action
action = ...
# Environment take a step
observation, reward, done, info = environment.step(action)
```
"""
def __init__(self, hanabi_name, num_players, seed):
r"""Creates an environment with the given game configuration.
Args:
config: dict, With parameters for the game. Config takes the following
keys and values.
- colors: int, Number of colors \in [2,5].
- ranks: int, Number of ranks \in [2,5].
- players: int, Number of players \in [2,5].
- hand_size: int, Hand size \in [4,5].
- max_information_tokens: int, Number of information tokens (>=0).
- max_life_tokens: int, Number of life tokens (>=1).
- observation_type: int.
0: Minimal observation.
1: First-order common knowledge observation.
- seed: int, Random seed.
- random_start_player: bool, Random start player.
"""
if (hanabi_name == "Hanabi-Full" or hanabi_name == "Hanabi-Full-CardKnowledge"): # max:action 48 obs=1380 min:action=20 obs=783 score=25
config={
"colors":5,
"ranks":5,
"players":num_players,
"max_information_tokens":8,
"max_life_tokens":3,
"observation_type":pyhanabi.AgentObservationType.CARD_KNOWLEDGE.value,
"seed":seed
}
elif hanabi_name == "Hanabi-Full-Minimal":# max:action 48 obs=680 min:action=20 obs=433 score=25 use memory
config={
"colors": 5,
"ranks": 5,
"players": num_players,
"max_information_tokens": 8,
"max_life_tokens": 3,
"observation_type": pyhanabi.AgentObservationType.MINIMAL.value,
"seed":seed
}
elif hanabi_name == "Hanabi-Small": # max:action=32 obs=356 min:action=11 obs=191 score=10
config={
"colors":2,
"ranks":5,
"players":num_players,
"hand_size":2,
"max_information_tokens":3,
"max_life_tokens":1,
"observation_type":pyhanabi.AgentObservationType.CARD_KNOWLEDGE.value,
"seed":seed
}
elif hanabi_name == "Hanabi-Very-Small": # max:action=28 obs=215 min:action=10 obs=116 score=5
config={
"colors":1,
"ranks":5,
"players":num_players,
"hand_size":2,
"max_information_tokens":3,
"max_life_tokens":1,
"observation_type":pyhanabi.AgentObservationType.CARD_KNOWLEDGE.value,
"seed":seed
}
else:
raise ValueError("Unknown environment {}".format(args.hanabi_name))
assert isinstance(config, dict), "Expected config to be of type dict."
self.game = pyhanabi.HanabiGame(config)
self.observation_encoder = pyhanabi.ObservationEncoder(
self.game, pyhanabi.ObservationEncoderType.CANONICAL)
self.players = self.game.num_players()
self.num_agents = self.players
self.agent_ids = [i for i in range(self.num_agents)]
self.action_space = []
self.observation_space = []
self.share_observation_space = []
for i in range(self.players):
self.action_space.append(Discrete(self.num_moves()))
self.observation_space.append([self.vectorized_observation_shape()[0]+self.players])
self.share_observation_space.append([self.vectorized_share_observation_shape()[0]+self.players])
self.observation_space = self._convert_to_dict(self.observation_space)
self.share_observation_space = self._convert_to_dict(self.share_observation_space)
self.action_space = self._convert_to_dict(self.action_space)
def reset(self, choose=True):
"""Resets the environment for a new game.
Returns:
observation: dict, containing the full observation about the game at the
current step. *WARNING* This observation contains all the hands of the
players and should not be passed to the agents.
An example observation:
{'current_player': 0,
'player_observations': [{'current_player': 0,
'current_player_offset': 0,
'deck_size': 40,
'discard_pile': [],
'fireworks': {'B': 0,
'G': 0,
'R': 0,
'W': 0,
'Y': 0},
'information_tokens': 8,
'legal_moves': [{'action_type': 'PLAY',
'card_index': 0},
{'action_type': 'PLAY',
'card_index': 1},
{'action_type': 'PLAY',
'card_index': 2},
{'action_type': 'PLAY',
'card_index': 3},
{'action_type': 'PLAY',
'card_index': 4},
{'action_type':
'REVEAL_COLOR',
'color': 'R',
'target_offset': 1},
{'action_type':
'REVEAL_COLOR',
'color': 'G',
'target_offset': 1},
{'action_type':
'REVEAL_COLOR',
'color': 'B',
'target_offset': 1},
{'action_type': 'REVEAL_RANK',
'rank': 0,
'target_offset': 1},
{'action_type': 'REVEAL_RANK',
'rank': 1,
'target_offset': 1},
{'action_type': 'REVEAL_RANK',
'rank': 2,
'target_offset': 1}],
'life_tokens': 3,
'observed_hands': [[{'color': None, 'rank':
-1},
{'color': None, 'rank':
-1},
{'color': None, 'rank':
-1},
{'color': None, 'rank':
-1},
{'color': None, 'rank':
-1}],
[{'color': 'G', 'rank': 2},
{'color': 'R', 'rank': 0},
{'color': 'R', 'rank': 1},
{'color': 'B', 'rank': 0},
{'color': 'R', 'rank':
1}]],
'num_players': 2,
'vectorized': [ 0, 0, 1, ... ]},
{'current_player': 0,
'current_player_offset': 1,
'deck_size': 40,
'discard_pile': [],
'fireworks': {'B': 0,
'G': 0,
'R': 0,
'W': 0,
'Y': 0},
'information_tokens': 8,
'legal_moves': [],
'life_tokens': 3,
'observed_hands': [[{'color': None, 'rank':
-1},
{'color': None, 'rank':
-1},
{'color': None, 'rank':
-1},
{'color': None, 'rank':
-1},
{'color': None, 'rank':
-1}],
[{'color': 'W', 'rank': 2},
{'color': 'Y', 'rank': 4},
{'color': 'Y', 'rank': 2},
{'color': 'G', 'rank': 0},
{'color': 'W', 'rank':
1}]],
'num_players': 2,
'vectorized': [ 0, 0, 1, ... ]}]}
"""
if choose:
self.state = self.game.new_initial_state()
while self.state.cur_player() == pyhanabi.CHANCE_PLAYER_ID:
self.state.deal_random_card()
observation = self._make_observation_all_players()
observation["current_player"] = self.state.cur_player()
agent_turn=np.zeros(self.players).astype(np.int).tolist()
agent_turn[self.state.cur_player()]=1
obs=[]
share_obs=[]
available_actions = np.zeros((self.players,self.num_moves()))
for i in range(self.players):
obs.append(observation['player_observations'][i]['vectorized']+agent_turn)
share_obs.append(observation['player_observations'][i]['vectorized_ownhand']+observation['player_observations'][i]['vectorized']+agent_turn)
available_actions[i][observation['player_observations'][i]['legal_moves_as_int']]=1.0
else:
obs = np.zeros((self.players,self.vectorized_observation_shape()[0]+self.players))
share_obs = np.zeros((self.players, self.vectorized_share_observation_shape()[0]+self.players))
available_actions = np.zeros((self.players,self.num_moves()))
obs = self._convert_to_dict(obs)
share_obs = self._convert_to_dict(share_obs)
available_actions = self._convert_to_dict(available_actions)
return [obs], [share_obs], [available_actions] # pick share obs of agent 0, but need to double check
def vectorized_observation_shape(self):
"""Returns the shape of the vectorized observation.
Returns:
A list of integer dimensions describing the observation shape.
"""
return self.observation_encoder.shape()
def vectorized_share_observation_shape(self):
"""Returns the shape of the vectorized observation.
Returns:
A list of integer dimensions describing the observation shape.
"""
return [self.observation_encoder.ownhandshape()[0] + self.observation_encoder.shape()[0]]
def num_moves(self):
"""Returns the total number of moves in this game (legal or not).
Returns:
Integer, number of moves.
"""
return self.game.max_moves()
def step(self, action):
"""Take one step in the game.
Args:
action: dict, mapping to a legal action taken by an agent. The following
actions are supported:
- { 'action_type': 'PLAY', 'card_index': int }
- { 'action_type': 'DISCARD', 'card_index': int }
- {
'action_type': 'REVEAL_COLOR',
'color': str,
'target_offset': int >=0
}
- {
'action_type': 'REVEAL_RANK',
'rank': str,
'target_offset': int >=0
}
Alternatively, action may be an int in range [0, num_moves()).
Returns:
observation: dict, containing the full observation about the game at the
current step. *WARNING* This observation contains all the hands of the
players and should not be passed to the agents.
An example observation:
{'current_player': 0,
'player_observations': [{'current_player': 0,
'current_player_offset': 0,
'deck_size': 40,
'discard_pile': [],
'fireworks': {'B': 0,
'G': 0,
'R': 0,
'W': 0,
'Y': 0},
'information_tokens': 8,
'legal_moves': [{'action_type': 'PLAY',
'card_index': 0},
{'action_type': 'PLAY',
'card_index': 1},
{'action_type': 'PLAY',
'card_index': 2},
{'action_type': 'PLAY',
'card_index': 3},
{'action_type': 'PLAY',
'card_index': 4},
{'action_type': 'REVEAL_COLOR',
'color': 'R',
'target_offset': 1},
{'action_type': 'REVEAL_COLOR',
'color': 'G',
'target_offset': 1},
{'action_type': 'REVEAL_COLOR',
'color': 'B',
'target_offset': 1},
{'action_type': 'REVEAL_RANK',
'rank': 0,
'target_offset': 1},
{'action_type': 'REVEAL_RANK',
'rank': 1,
'target_offset': 1},
{'action_type': 'REVEAL_RANK',
'rank': 2,
'target_offset': 1}],
'life_tokens': 3,
'observed_hands': [[{'color': None, 'rank': -1},
{'color': None, 'rank': -1},
{'color': None, 'rank': -1},
{'color': None, 'rank': -1},
{'color': None, 'rank': -1}],
[{'color': 'G', 'rank': 2},
{'color': 'R', 'rank': 0},
{'color': 'R', 'rank': 1},
{'color': 'B', 'rank': 0},
{'color': 'R', 'rank': 1}]],
'num_players': 2,
'vectorized': [ 0, 0, 1, ... ]},
{'current_player': 0,
'current_player_offset': 1,
'deck_size': 40,
'discard_pile': [],
'fireworks': {'B': 0,
'G': 0,
'R': 0,
'W': 0,
'Y': 0},
'information_tokens': 8,
'legal_moves': [],
'life_tokens': 3,
'observed_hands': [[{'color': None, 'rank': -1},
{'color': None, 'rank': -1},
{'color': None, 'rank': -1},
{'color': None, 'rank': -1},
{'color': None, 'rank': -1}],
[{'color': 'W', 'rank': 2},
{'color': 'Y', 'rank': 4},
{'color': 'Y', 'rank': 2},
{'color': 'G', 'rank': 0},
{'color': 'W', 'rank': 1}]],
'num_players': 2,
'vectorized': [ 0, 0, 1, ... ]}]}
reward: float, Reward obtained from taking the action.
done: bool, Whether the game is done.
info: dict, Optional debugging information.
Raises:
AssertionError: When an illegal action is provided.
"""
action_dict = action[0]
action_list = list(action_dict.values())
action = int(np.argmax(action_list[self.state.cur_player()]))
if isinstance(action, dict):
# Convert dict action HanabiMove
action = self._build_move(action)
elif isinstance(action, int):
if action == -1:# invalid action
obs = np.zeros((self.players,self.vectorized_observation_shape()[0]+self.players))
share_obs = np.zeros((self.players, self.vectorized_share_observation_shape()[0]+self.players))
rewards = np.zeros((self.players))
done = None
infos = {'score':self.state.score()}
available_actions = np.zeros((self.players,self.num_moves()))
return obs, share_obs, rewards, [done] * self.players, infos, available_actions
# Convert int action into a Hanabi move.
action = self.game.get_move(action)
else:
raise ValueError("Expected action as dict or int, got: {}".format(
action))
last_score = self.state.score()
# Apply the action to the state.
self.state.apply_move(action)
while self.state.cur_player() == pyhanabi.CHANCE_PLAYER_ID:
self.state.deal_random_card()
observation = self._make_observation_all_players()
obs = []
share_obs = []
available_actions = np.zeros((self.players,self.num_moves()))
agent_turn=np.zeros(self.players).astype(np.int).tolist()
agent_turn[self.state.cur_player()]=1
for i in range(self.players):
obs.append(observation['player_observations'][i]['vectorized'] + agent_turn)
share_obs.append(observation['player_observations'][i]['vectorized_ownhand']+observation['player_observations'][i]['vectorized']+agent_turn)
available_actions[i][observation['player_observations'][i]['legal_moves_as_int']]=1.0
done = self.state.is_terminal()
# Reward is score differential. May be large and negative at game end.
reward = self.state.score() - last_score
rewards = [reward]*self.players
infos = {'score': self.state.score()}
obs = self._convert_to_dict(obs)
share_obs = self._convert_to_dict(share_obs)
rewards = self._convert_to_dict(rewards)
done_list = [done] * self.players
dones = self._convert_to_dict(done_list)
dones['env'] = all([dones[agent_id] for agent_id in self.agent_ids])
available_actions = self._convert_to_dict(available_actions)
return [obs], [share_obs], [rewards], [dones], [infos], [available_actions]
def _make_observation_all_players(self):
"""Make observation for all players.
Returns:
dict, containing observations for all players.
"""
obs = {}
player_observations = [self._extract_dict_from_backend(
player_id, self.state.observation(player_id))
for player_id in range(self.players)] # pylint: disable=bad-continuation
obs["player_observations"] = player_observations
obs["current_player"] = self.state.cur_player()
return obs
def _extract_dict_from_backend(self, player_id, observation):
"""Extract a dict of features from an observation from the backend.
Args:
player_id: Int, player from whose perspective we generate the observation.
observation: A `pyhanabi.HanabiObservation` object.
Returns:
obs_dict: dict, mapping from HanabiObservation to a dict.
"""
obs_dict = {}
obs_dict["current_player"] = self.state.cur_player()
obs_dict["current_player_offset"] = observation.cur_player_offset()
obs_dict["life_tokens"] = observation.life_tokens()
obs_dict["information_tokens"] = observation.information_tokens()
obs_dict["num_players"] = observation.num_players()
obs_dict["deck_size"] = observation.deck_size()
obs_dict["fireworks"] = {}
fireworks = self.state.fireworks()
for color, firework in zip(pyhanabi.COLOR_CHAR, fireworks):
obs_dict["fireworks"][color] = firework
obs_dict["legal_moves"] = []
obs_dict["legal_moves_as_int"] = []
for move in observation.legal_moves():
obs_dict["legal_moves"].append(move.to_dict())
obs_dict["legal_moves_as_int"].append(self.game.get_move_uid(move))
obs_dict["observed_hands"] = []
for player_hand in observation.observed_hands():
cards = [card.to_dict() for card in player_hand]
obs_dict["observed_hands"].append(cards)
obs_dict["discard_pile"] = [
card.to_dict() for card in observation.discard_pile()
]
# Return hints received.
obs_dict["card_knowledge"] = []
for player_hints in observation.card_knowledge():
player_hints_as_dicts = []
for hint in player_hints:
hint_d = {}
if hint.color() is not None:
hint_d["color"] = pyhanabi.color_idx_to_char(hint.color())
else:
hint_d["color"] = None
hint_d["rank"] = hint.rank()
player_hints_as_dicts.append(hint_d)
obs_dict["card_knowledge"].append(player_hints_as_dicts)
# ipdb.set_trace()
obs_dict["vectorized"] = self.observation_encoder.encode(observation)
obs_dict["vectorized_ownhand"] = self.observation_encoder.encodeownhand(observation)
obs_dict["pyhanabi"] = observation
return obs_dict
def _build_move(self, action):
"""Build a move from an action dict.
Args:
action: dict, mapping to a legal action taken by an agent. The following
actions are supported:
- { 'action_type': 'PLAY', 'card_index': int }
- { 'action_type': 'DISCARD', 'card_index': int }
- {
'action_type': 'REVEAL_COLOR',
'color': str,
'target_offset': int >=0
}
- {
'action_type': 'REVEAL_RANK',
'rank': str,
'target_offset': int >=0
}
Returns:
move: A `HanabiMove` object constructed from action.
Raises:
ValueError: Unknown action type.
"""
assert isinstance(action, dict), "Expected dict, got: {}".format(action)
assert "action_type" in action, ("Action should contain `action_type`. "
"action: {}").format(action)
action_type = action["action_type"]
assert (action_type in MOVE_TYPES), (
"action_type: {} should be one of: {}".format(action_type, MOVE_TYPES))
if action_type == "PLAY":
card_index = action["card_index"]
move = pyhanabi.HanabiMove.get_play_move(card_index=card_index)
elif action_type == "DISCARD":
card_index = action["card_index"]
move = pyhanabi.HanabiMove.get_discard_move(card_index=card_index)
elif action_type == "REVEAL_RANK":
target_offset = action["target_offset"]
rank = action["rank"]
move = pyhanabi.HanabiMove.get_reveal_rank_move(
target_offset=target_offset, rank=rank)
elif action_type == "REVEAL_COLOR":
target_offset = action["target_offset"]
assert isinstance(action["color"], str)
color = color_char_to_idx(action["color"])
move = pyhanabi.HanabiMove.get_reveal_color_move(
target_offset=target_offset, color=color)
else:
raise ValueError("Unknown action_type: {}".format(action_type))
legal_moves = self.state.legal_moves()
assert (str(move) in map(
str,
legal_moves)), "Illegal action: {}. Move should be one of : {}".format(
move, legal_moves)
return move
def _convert_to_dict(self, vals):
"""
Convert a list of per-agent values into a dict mapping agent_id to the agent's corresponding value.
Args:
vals: list of per-agent values. Must be of length self.num_agents
Returns:
dict: dictionary mapping agent_id to the agent' corresponding value, as specified in vals
"""
return dict(zip(self.agent_ids, vals))
def _convert_action(self, action_space, action):
if isinstance(action_space, Discrete):
if type(action) == np.ndarray and len(action) == action_space.n:
converted_action = action
else:
converted_action = np.zeros(action_space.n)
if type(action) == np.ndarray or type(action) == list:
converted_action[action[0]] = 1.0
else:
converted_action[action] = 1.0
elif isinstance(action_space, Box):
converted_action = action
else:
#TODO(akash): support MultiDiscrete
raise Exception("Unknown env, must be Discrete or Box")
return converted_action
def make(environment_name="Hanabi-Full", num_players=2, pyhanabi_path=None):
"""Make an environment.
Args:
environment_name: str, Name of the environment to instantiate.
num_players: int, Number of players in this game.
pyhanabi_path: str, absolute path to header files for c code linkage.
Returns:
env: An `Environment` object.
Raises:
ValueError: Unknown environment name.
"""
if pyhanabi_path is not None:
prefixes=(pyhanabi_path,)
assert pyhanabi.try_cdef(prefixes=prefixes), "cdef failed to load"
assert pyhanabi.try_load(prefixes=prefixes), "library failed to load"
if (environment_name == "Hanabi-Full" or
environment_name == "Hanabi-Full-CardKnowledge"):
return HanabiEnv(
config={
"colors":
5,
"ranks":
5,
"players":
num_players,
"max_information_tokens":
8,
"max_life_tokens":
3,
"observation_type":
pyhanabi.AgentObservationType.CARD_KNOWLEDGE.value
})
elif environment_name == "Hanabi-Full-Minimal":
return HanabiEnv(
config={
"colors": 5,
"ranks": 5,
"players": num_players,
"max_information_tokens": 8,
"max_life_tokens": 3,
"observation_type": pyhanabi.AgentObservationType.MINIMAL.value
})
elif environment_name == "Hanabi-Small":
return HanabiEnv(
config={
"colors":
2,
"ranks":
5,
"players":
num_players,
"hand_size":
2,
"max_information_tokens":
3,
"max_life_tokens":
1,
"observation_type":
pyhanabi.AgentObservationType.CARD_KNOWLEDGE.value
})
elif environment_name == "Hanabi-Very-Small":
return HanabiEnv(
config={
"colors":
1,
"ranks":
5,
"players":
num_players,
"hand_size":
2,
"max_information_tokens":
3,
"max_life_tokens":
1,
"observation_type":
pyhanabi.AgentObservationType.CARD_KNOWLEDGE.value
})
else:
raise ValueError("Unknown environment {}".format(environment_name))
#-------------------------------------------------------------------------------
# Hanabi Agent API
#-------------------------------------------------------------------------------
class Agent(object):
"""Agent interface.
All concrete implementations of an Agent should derive from this interface
and implement the method stubs.
```python
class MyAgent(Agent):
...
agents = [MyAgent(config) for _ in range(players)]
while not done:
...
for agent_id, agent in enumerate(agents):
action = agent.act(observation)
if obs.current_player == agent_id:
assert action is not None
else
assert action is None
...
```
"""
def __init__(self, config, *args, **kwargs):
r"""Initialize the agent.
Args:
config: dict, With parameters for the game. Config takes the following
keys and values.
- colors: int, Number of colors \in [2,5].
- ranks: int, Number of ranks \in [2,5].
- players: int, Number of players \in [2,5].
- hand_size: int, Hand size \in [4,5].
- max_information_tokens: int, Number of information tokens (>=0)
- max_life_tokens: int, Number of life tokens (>=0)
- seed: int, Random seed.
- random_start_player: bool, Random start player.
*args: Optional arguments
**kwargs: Optional keyword arguments.
Raises:
AgentError: Custom exceptions.
"""
raise NotImplementedError("Not implemeneted in abstract base class.")
def reset(self, config):
r"""Reset the agent with a new config.
Signals agent to reset and restart using a config dict.
Args:
config: dict, With parameters for the game. Config takes the following
keys and values.
- colors: int, Number of colors \in [2,5].
- ranks: int, Number of ranks \in [2,5].
- players: int, Number of players \in [2,5].
- hand_size: int, Hand size \in [4,5].
- max_information_tokens: int, Number of information tokens (>=0)
- max_life_tokens: int, Number of life tokens (>=0)
- seed: int, Random seed.
- random_start_player: bool, Random start player.
"""
raise NotImplementedError("Not implemeneted in abstract base class.")
def act(self, observation):
"""Act based on an observation.
Args:
observation: dict, containing observation from the view of this agent.
An example:
{'current_player': 0,
'current_player_offset': 1,
'deck_size': 40,
'discard_pile': [],
'fireworks': {'B': 0,
'G': 0,
'R': 0,
'W': 0,
'Y': 0},
'information_tokens': 8,
'legal_moves': [],
'life_tokens': 3,
'observed_hands': [[{'color': None, 'rank': -1},
{'color': None, 'rank': -1},
{'color': None, 'rank': -1},
{'color': None, 'rank': -1},
{'color': None, 'rank': -1}],
[{'color': 'W', 'rank': 2},
{'color': 'Y', 'rank': 4},
{'color': 'Y', 'rank': 2},
{'color': 'G', 'rank': 0},
{'color': 'W', 'rank': 1}]],
'num_players': 2}]}
Returns:
action: dict, mapping to a legal action taken by this agent. The following
actions are supported:
- { 'action_type': 'PLAY', 'card_index': int }
- { 'action_type': 'DISCARD', 'card_index': int }
- {
'action_type': 'REVEAL_COLOR',
'color': str,
'target_offset': int >=0
}
- {
'action_type': 'REVEAL_RANK',
'rank': str,
'target_offset': int >=0
}
"""
raise NotImplementedError("Not implemented in Abstract Base class")
| [
"umer_siddique@sjtu.edu.cn"
] | umer_siddique@sjtu.edu.cn |
46f9680533117479bb46841d8e622a2bda27aa68 | d7e58c88325d769de656ba802dc8ecd1c3b337fd | /LogisticReg.py | ffd0dcd061a020998eb2066b052c5b6adf68cf33 | [] | no_license | shailesh001/learningtf | dcc6fd744a0e3566d55bf46c383379510b9daa71 | f0dbf8dc2e4a3d30d38a466221dde0abd827320f | refs/heads/master | 2020-04-10T10:11:59.213798 | 2019-01-25T11:26:08 | 2019-01-25T11:26:08 | 160,958,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,499 | py | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
N = 20000
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# === Create Data and Simulate Results ===
x_data = np.random.randn(N, 3)
w_real = [0.3, 0.5, 0.1]
b_real = -0.2
wxb = np.matmul(w_real,x_data.T) + b_real
y_data_pre_noise = sigmoid(wxb)
y_data = np.random.binomial(1, y_data_pre_noise)
plt.scatter(wxb,y_data)
plt.show()
NUM_STEPS = 50
g = tf.Graph()
wb_ = []
with g.as_default():
x = tf.placeholder(tf.float32, shape=[None, 3])
y_true = tf.placeholder(tf.float32, shape=None)
with tf.name_scope('inference') as scope:
w = tf.Variable([[0, 0, 0]], dtype=tf.float32, name='weights')
b = tf.Variable(0, dtype=tf.float32, name='bias')
y_pred = tf.matmul(w, tf.transpose(x)) + b
with tf.name_scope('loss') as scope:
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=y_pred)
loss = tf.reduce_mean(loss)
with tf.name_scope('train') as scope:
learning_rate = 0.5
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for step in range(NUM_STEPS):
sess.run(train, {x: x_data, y_true: y_data})
if step % 5 == 0:
print(step, sess.run([w, b]))
wb_.append(sess.run([w, b]))
print(50, sess.run([w, b]))
| [
"shailesh001@mac.com"
] | shailesh001@mac.com |
a8fd1afad2e1760d1cd20440d230b920825c615f | d05e5800ceb26c21f0fafcfca387603c81f75d21 | /board_project/board_project/urls.py | 04f40596411273f0b23e803037823da243119766 | [] | no_license | omarelalfy1/django3 | e5ecb30349eafdf546d679acbaa04ec201fba4bc | b78bce3f62dfea314f7cb17d764024970ee53972 | refs/heads/master | 2022-12-05T02:53:30.443597 | 2020-08-14T23:00:49 | 2020-08-14T23:00:49 | 287,587,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | """board_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"69329830+omarelalfy1@users.noreply.github.com"
] | 69329830+omarelalfy1@users.noreply.github.com |
814f4bef1abd03b042ba1449f3ab0c764afdd97e | a0ce16f4e5c6c17b1bd45a39461c0c899067fda7 | /apps/app2.py | a56eea547bc3fef1bbad491eb62f5d39cb55d4e4 | [] | no_license | revanth-reddy/dashmultidate | 46f98f91cee7723fa0acbf8d328beaf646d3d8c5 | e7d2de8635df9ead466896fd42a71b3bf4815a06 | refs/heads/master | 2022-12-10T20:11:43.940070 | 2019-05-30T11:23:21 | 2019-05-30T11:23:21 | 189,394,807 | 0 | 0 | null | 2022-12-08T05:11:39 | 2019-05-30T10:34:32 | Python | UTF-8 | Python | false | false | 725 | py | import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from app import app
layout = html.Div([
dcc.Link(html.H3('App 2'), href = '/link2'),
dcc.Dropdown(
id='app-2-dropdown',
options=[
{'label': 'App 2 - {}'.format(i), 'value': i} for i in [
'NYC', 'MTL', 'LA'
]
]
),
html.Div(id='app-1-display-value'),
dcc.Link('Go to App 1', href='/link1'),
html.Br(),
dcc.Link('Go to home', href='/'),
])
@app.callback(
Output('app-2-display-value', 'children'),
[Input('app-2-dropdown', 'value')])
def display_value(value):
return 'You have selected "{}"'.format(value) | [
"malerevanthreddy3099@gmail.com"
] | malerevanthreddy3099@gmail.com |
61439b21ce106c7faac47be9a0de689e7bd7640f | 93d11ff6a5a380ebef768f51bf6b9d680120cb43 | /pfe/service/models.py | 962b390a2d0dc6e06d891a4928cbcdd65282c783 | [] | no_license | houidiichrak/pfe | 09bb8b4c2c884585a3a49e1ca2bdbdc0c0d514b9 | 0e51ca3c4e484ca406260388cada837473d59e2e | refs/heads/master | 2020-06-04T16:36:40.131381 | 2019-06-15T18:39:56 | 2019-06-15T18:39:56 | 192,106,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,882 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.conf import settings
# Create your models here.
class Message(models.Model):
name = models.CharField(max_length=20)
email = models.CharField(max_length=20)
subject = models.CharField(max_length=20)
msg = models.TextField()
def __str__(self):
return self.email
class Categorie(models.Model):
name = models.CharField(max_length=25)
img = models.ImageField(upload_to='upload/')
def __str__(self):
return self.name
class Worker(models.Model):
name = models.CharField(max_length=20)
lastname = models.CharField(max_length=20)
username = models.CharField(max_length=20)
password = models.CharField(max_length=20)
email = models.EmailField()
adresse = models.CharField(max_length=20)
phone = models.CharField(max_length=20)
ville = models.CharField(max_length=20)
desc = models.TextField()
datejoind = models.DateTimeField('date joind', auto_now_add=True, auto_now=False)
cat = models.ForeignKey(Categorie,on_delete=models.CASCADE,)
img = models.ImageField(upload_to='upload/')
def __str__(self):
return self.name
class Prix(models.Model):
name = models.CharField(max_length=20)
about = models.CharField(max_length=20)
valeur = models.FloatField()
def __str__(self):
return self.name
class PositionWorker(models.Model):
currentUser = models.ForeignKey(Worker,on_delete=models.CASCADE,)
x = models.FloatField()
y = models.FloatField()
def __str__(self):
return str(self.id) + " - position"
class PositionClient(models.Model):
currentUser = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE,)
x = models.FloatField()
y = models.FloatField()
def __str__(self):
return str(self.id) + " - position"
class Reservation(models.Model):
client = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE,)
resworker = models.ForeignKey(Worker,on_delete=models.CASCADE,)
posclient = models.ForeignKey(PositionClient,on_delete=models.CASCADE,)
posworker = models.ForeignKey(PositionWorker,on_delete=models.CASCADE,)
date = models.DateTimeField('date joind', auto_now_add=True, auto_now=False)
cat = models.ForeignKey(Categorie,on_delete=models.CASCADE,)
tache = models.TextField()
is_approved = models.BooleanField(editable=False, default=False)
def __str__(self):
return str(self.date)
class Commentaire(models.Model):
resworker = models.ForeignKey(Worker,on_delete=models.CASCADE,)
com = models.TextField()
date = models.DateTimeField('date joind', auto_now_add=True, auto_now=False)
name = models.CharField(max_length=20)
email = models.EmailField()
def __str__(self):
return self.name | [
"ichrakhod@gmail.com"
] | ichrakhod@gmail.com |
cca2715fb5c518733df026e62303e3e1d83dcc93 | 04756361cba9da6398af8611e5fd2166bbada88c | /api/mail.py | 7ea831b22343ea86a3d597f007b2dcb1d27ea761 | [] | no_license | Saiid2001/TamamServer | 35ed65cc20664a13041b8473d271254914f5d0df | 87cc433d4d1a36e292df026cd6e60a52d0920756 | refs/heads/main | 2023-08-25T20:01:07.547279 | 2021-10-22T16:17:24 | 2021-10-22T16:17:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | import smtplib, ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
smtp_server = "smtp.gmail.com"
port = 465 # For SSL
sender_email = 'tamamsignup@gmail.com'
password = 'T.amam.24.35.42'
def send_confirmation(recipient, token):
message = MIMEMultipart("alternative")
message["Subject"] = "Confirm your email - TAMAM"
message["From"] = sender_email
message["To"] = recipient
html = """\
<html>
<body>
<p>Welcome to TAMAM!</p>
<p>Please click <a href=http://localhost:4000/authenticate/confirm-email/"""+token+""">here</a> to confirm your account.</p>
</body>
</html>
"""
msgcontent = MIMEText(html, "html")
message.attach(msgcontent)
# Create a secure SSL context
context = ssl.create_default_context()
with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:
server.login(sender_email, password)
server.sendmail(sender_email, recipient, message.as_string())
| [
"nwzantout@gmail.com"
] | nwzantout@gmail.com |
699ea1d33083dbe690ac1495e2b02345c3ab0360 | 9a1538123b8abec14410dad46c437cf735684dd9 | /news/migrations/0001_initial.py | 552caaeb8e0ba8c9555709ab96355304db3f721e | [] | no_license | asmuratbek/zastroy24 | deec6bd65229aeb29eb313d915c6c47ca036a8aa | d68ce21beefc644752a1271a4d8981cd2423afba | refs/heads/master | 2020-04-27T18:44:26.845151 | 2019-03-08T18:09:13 | 2019-03-08T18:09:13 | 174,585,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,000 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-01 14:58
from __future__ import unicode_literals
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=True, verbose_name='\u0410\u043a\u0442\u0438\u0432\u043d\u0430\u044f \u043d\u043e\u0432\u043e\u0441\u0442\u044c?')),
('title', models.CharField(help_text='\u041e\u043d \u0436\u0435 \u0438 meta_title', max_length=255, verbose_name='\u041d\u0430\u0438\u043c\u0435\u043d\u043e\u0432\u0430\u043d\u0438\u0435 \u043d\u043e\u0432\u043e\u0441\u0442\u0438')),
('slug', models.CharField(help_text='\u041d\u0443\u0436\u0435\u043d \u0434\u043b\u044f URL', max_length=255, verbose_name='slug')),
('preview', models.ImageField(blank=True, null=True, upload_to='news/', verbose_name='\u041f\u0440\u0435\u0434\u043e\u0441\u043c\u043e\u0442\u0440 \u043d\u043e\u0432\u043e\u0441\u0442\u0438')),
('content', ckeditor_uploader.fields.RichTextUploadingField(verbose_name='\u0422\u0435\u043b\u043e \u043d\u043e\u0432\u043e\u0441\u0442\u0438')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='\u0414\u0430\u0442\u0430 \u0441\u043e\u0437\u0434\u0430\u043d\u0438\u044f')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='\u0414\u0430\u0442\u0430 \u043e\u0431\u043d\u043e\u0432\u043b\u0435\u043d\u0438\u044f')),
],
options={
'verbose_name': '\u043d\u043e\u0432\u043e\u0441\u0442\u044c',
'verbose_name_plural': '\u041d\u043e\u0432\u043e\u0441\u0442\u0438',
},
),
]
| [
"asmuratbek@gmail.com"
] | asmuratbek@gmail.com |
8da3918f48522266414a6531b91e672234edbf17 | a7c29ce3f4e25fba872853dc080913506521a205 | /google-cloud-translate/synth.py | cafcb0b1158c3e441202af6f2e49f56ec838a42a | [
"Apache-2.0"
] | permissive | WebElZeN/google-cloud-ruby | 285898488437da762d87f643a9821a85d45a9fef | c091a97f668010965809c450c54dbcdfe850fad5 | refs/heads/master | 2022-04-25T17:20:33.666365 | 2020-04-30T19:05:22 | 2020-04-30T19:05:22 | 260,346,990 | 1 | 0 | Apache-2.0 | 2020-05-01T00:25:10 | 2020-05-01T00:23:15 | null | UTF-8 | Python | false | false | 5,999 | py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.ruby as ruby
import logging
import os
import re
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
v3_library = gapic.ruby_library(
'translate', 'v3', artman_output_name='google-cloud-ruby/google-cloud-translate',
config_path='/google/cloud/translate/artman_translate_v3.yaml'
)
s.copy(v3_library / 'lib/google/cloud/translate/v3.rb')
s.copy(v3_library / 'lib/google/cloud/translate/v3')
s.copy(v3_library / 'test/google/cloud/translate/v3')
s.copy(v3_library / 'LICENSE')
s.copy(v3_library / '.gitignore')
# Update file paths in generated files
s.replace(
[
'lib/google/cloud/translate/v3.rb',
'lib/google/cloud/translate/v3/**/*.rb',
'test/google/cloud/translate/v3/**/*.rb'
],
'Google::Cloud::Translation',
'Google::Cloud::Translate'
)
s.replace(
'lib/google/cloud/translate/v3/doc/**/*.rb',
'module Translation\n',
'module Translate\n'
)
# Support for service_address
s.replace(
[
'lib/google/cloud/translate/v3.rb',
'lib/google/cloud/translate/v3/*_client.rb'
],
'\n(\\s+)#(\\s+)@param exception_transformer',
'\n\\1#\\2@param service_address [String]\n' +
'\\1#\\2 Override for the service hostname, or `nil` to leave as the default.\n' +
'\\1#\\2@param service_port [Integer]\n' +
'\\1#\\2 Override for the service port, or `nil` to leave as the default.\n' +
'\\1#\\2@param exception_transformer'
)
s.replace(
[
'lib/google/cloud/translate/v3.rb',
'lib/google/cloud/translate/v3/*_client.rb'
],
'\n(\\s+)metadata: nil,\n\\s+exception_transformer: nil,\n',
'\n\\1metadata: nil,\n\\1service_address: nil,\n\\1service_port: nil,\n\\1exception_transformer: nil,\n'
)
s.replace(
[
'lib/google/cloud/translate/v3.rb',
'lib/google/cloud/translate/v3/*_client.rb'
],
',\n(\\s+)lib_name: lib_name,\n\\s+lib_version: lib_version',
',\n\\1lib_name: lib_name,\n\\1service_address: service_address,\n\\1service_port: service_port,\n\\1lib_version: lib_version'
)
s.replace(
'lib/google/cloud/translate/v3/*_client.rb',
'service_path = self\\.class::SERVICE_ADDRESS',
'service_path = service_address || self.class::SERVICE_ADDRESS'
)
s.replace(
'lib/google/cloud/translate/v3/*_client.rb',
'port = self\\.class::DEFAULT_SERVICE_PORT',
'port = service_port || self.class::DEFAULT_SERVICE_PORT'
)
# Remove legacy release level from documentation
s.replace(
'lib/google/cloud/**/*.rb',
'\\s+\\(\\[\\w+\\]\\(https://github\\.com/(googleapis|GoogleCloudPlatform)/google-cloud-ruby#versioning\\)\\)',
''
)
# https://github.com/googleapis/gapic-generator/issues/2242
def escape_braces(match):
expr = re.compile('^([^`]*(`[^`]*`[^`]*)*)([^`#\\$\\\\])\\{([\\w,]+)\\}')
content = match.group(0)
while True:
content, count = expr.subn('\\1\\3\\\\\\\\{\\4}', content)
if count == 0:
return content
s.replace(
[
'lib/google/cloud/translate/v3.rb',
'lib/google/cloud/translate/v3/*_client.rb'
],
'\n(\\s+)#[^\n]*[^\n#\\$\\\\]\\{[\\w,]+\\}',
escape_braces)
# https://github.com/googleapis/gapic-generator/issues/2243
s.replace(
'lib/google/cloud/translate/v3/*_client.rb',
'(\n\\s+class \\w+Client\n)(\\s+)(attr_reader :\\w+_stub)',
'\\1\\2# @private\n\\2\\3')
# https://github.com/googleapis/gapic-generator/issues/2279
s.replace(
'lib/google/cloud/translate/v3/**/*.rb',
'\\A(((#[^\n]*)?\n)*# (Copyright \\d+|Generated by the protocol buffer compiler)[^\n]+\n(#[^\n]*\n)*\n)([^\n])',
'\\1\n\\6')
# https://github.com/googleapis/gapic-generator/issues/2323
s.replace(
[
'lib/**/*.rb',
'README.md'
],
'https://github\\.com/GoogleCloudPlatform/google-cloud-ruby',
'https://github.com/googleapis/google-cloud-ruby'
)
s.replace(
[
'lib/**/*.rb',
'README.md'
],
'https://googlecloudplatform\\.github\\.io/google-cloud-ruby',
'https://googleapis.github.io/google-cloud-ruby'
)
# https://github.com/googleapis/google-cloud-ruby/issues/3058
s.replace(
'lib/google/cloud/translate/v3/*_client.rb',
'(require \".*credentials\"\n)\n',
'\\1require "google/cloud/translate/version"\n\n'
)
s.replace(
'lib/google/cloud/translate/v3/*_client.rb',
'Gem.loaded_specs\[.*\]\.version\.version',
'Google::Cloud::Translate::VERSION'
)
# Fix links for devsite migration
for file in ['lib/**/*.rb', '*.md']:
s.replace(
file,
'https://googleapis.github.io/google-cloud-ruby/#/docs/google-cloud-logging/latest/google/cloud/logging/logger',
'https://googleapis.dev/ruby/google-cloud-logging/latest'
)
s.replace(
'*.md',
'https://googleapis.github.io/google-cloud-ruby/#/docs/.*/authentication',
'https://googleapis.dev/ruby/google-cloud-translate/latest/file.AUTHENTICATION.html'
)
s.replace(
'lib/**/*.rb',
'https://googleapis.github.io/google-cloud-ruby/#/docs/.*/authentication',
'https://googleapis.dev/ruby/google-cloud-translate/latest/file.AUTHENTICATION.html'
)
s.replace(
'README.md',
'github.io/google-cloud-ruby/#/docs/google-cloud-translate/latest/.*$',
'dev/ruby/google-cloud-translate/latest'
)
| [
"noreply@github.com"
] | noreply@github.com |
e1acc0cf186cf5bdf80998a4488cb64bc49e68cb | caec91f89b23dce88f849cb08c862a7108a77ce4 | /dadi_two_populations_with_migration.py | cc88db7feaedac0767e6803316d6ce8389baa71c | [] | no_license | Captain-Blackstone/wright_fisher_vs_moran | 44a93c31c04a10cab228e10449d6139626fa0652 | d66f55a4e0f71744f354f9f4d03e83bc07b454e4 | refs/heads/master | 2020-11-26T00:06:42.356019 | 2019-12-26T15:03:04 | 2019-12-26T15:03:04 | 228,900,391 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,109 | py | # # The optimized parameters inferred by model below 'nu1F': 1.881, 'nu2B': 0.0710, 'nu2F': 1.845,
# 'm': 0.911, 'Tp': 0.355, 'T': 0.111]
# 'Optimized log-likelihood:' -1066.66
# Numpy is the numerical library dadi is built upon
import numpy
from numpy import array
import dadi
# In demographic_models.py, we've defined a custom model for this problem
import demographic_models
# Load the data
data = dadi.Spectrum.from_file('YRI_CEU.fs')
ns = data.sample_sizes
# These are the grid point settings will use for extrapolation.
pts_l = [40,50,60]
# The Demographics1D and Demographics2D modules contain a few simple models,
# mostly as examples. We could use one of those.
func = dadi.Demographics2D.split_mig
# ll for this model: -1136.61
params = array([1.792, 0.426, 0.309, 1.551])
upper_bound = [100, 100, 3, 20]
# Instead, we'll work with our custom model
func = demographic_models.prior_onegrow_mig
# ll for this model: -1066.66
params = array([1.881, 0.0710, 1.845, 0.911, 0.355, 0.111])
# The upper_bound array is for use in optimization. Occasionally the optimizer
# will try wacky parameter values. We in particular want to exclude values with
# very long times, as they will take a long time to evaluate.
upper_bound = [100, 100, 100, 100, 3, 3]
lower_bound = [1e-2, 1e-2, 1e-2, 0, 0, 0]
# Makde the extrapolating version of our demographic model function.
func_ex = dadi.Numerics.make_extrap_log_func(func)
# Calculate the model AFS.
model = func_ex(params, ns, pts_l)
# Likelihood of the data given the model AFS.
ll_model = dadi.Inference.ll_multinom(model, data)
print 'Model log-likelihood:', ll_model
# The optimal value of theta given the model.
theta = dadi.Inference.optimal_sfs_scaling(model, data)
# Perturb our parameter array before optimization. This does so by taking each
# parameter a up to a factor of two up or down.
p0 = dadi.Misc.perturb_params(params, fold=1, upper_bound=upper_bound)
# Do the optimization. By default we assume that theta is a free parameter,
# since it's trivial to find given the other parameters. If you want to fix
# theta, add a multinom=False to the call.
# (This is commented out by default, since it takes several minutes.)
# The maxiter argument restricts how long the optimizer will run. For production
# runs, you may want to set this value higher, to encourage better convergence.
popt = dadi.Inference.optimize_log(p0, data, func_ex, pts_l,
lower_bound=lower_bound,
upper_bound=upper_bound,
verbose=len(params))
print 'Optimized parameters', repr(popt)
model = func_ex(popt, ns, pts_l)
ll_opt = dadi.Inference.ll_multinom(model, data)
print 'Optimized log-likelihood:', ll_opt
# Plot a comparison of the resulting fs with the data.
import pylab
pylab.figure()
dadi.Plotting.plot_2d_comp_multinom(model, data, vmin=1, resid_range=3,
pop_ids =('YRI','CEU'))
# This ensures that the figure pops up. It may be unecessary if you are using
# ipython.
pylab.show()
pylab.savefig('YRI_CEU.png', dpi=50) | [
"noreply@github.com"
] | noreply@github.com |
cd7ab987337fa0a49ef33a72a037969e49158c19 | b5f226826098a4ddc39b0cd61abb2eef182afbea | /production/fourierModel.py | 03fe14a3fa163e2045bca0af05392f0bfa4e2af6 | [] | no_license | upschemistry/icecontinuum | f31478720cee6bc613e051d58c5a41d35e94d46b | e3e9159ce388031d317f11eb784403f31f9718c7 | refs/heads/master | 2023-08-25T12:20:26.524321 | 2023-08-12T00:04:53 | 2023-08-12T00:04:53 | 48,130,067 | 2 | 1 | null | 2023-06-14T23:27:54 | 2015-12-16T19:18:44 | Jupyter Notebook | UTF-8 | Python | false | false | 5,954 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 14 15:01:47 2015
@author: nesh, jonathan, jake
"""
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
#import copy
def fftnorm(u_full):
"""Computes normalized FFT (such that FFT and IFFT are symmetrically normalized)
Parameters
----------
u_full : 1D Numpy Array (N,)
The vector whose discrete FFT is to be computed
Returns
-------
normalizedFFT : 1D Numpy Array (N,)
The transformed version of that vector
"""
normalizedFFT = np.fft.rfft(u_full,norm = "forward")
return normalizedFFT
def ifftnorm(u_full):
"""Computes normalized IFFT (such that FFT and IFFT are symmetrically normalized)
Parameters
----------
u_full : 1D Numpy Array (N,)
The vector whose discrete IFFT is to be computed
Returns
-------
normalizedIFFT : 1D Numpy Array (N,)
The transformed version of that vector
"""
normalizedIFFT = np.fft.irfft(u_full, norm = "forward")
return normalizedIFFT
def convolution(nT,nu_kin,depRate,Nstar):
"""Computes Fourier transform of the nonlinear term in the QLL PDE
2 pi N^* sigmaM vKin cos(Ntot)
Computed in real space and then converted back
to Fourier space.
Parameters
----------
nT : 1D Numpy Array (N,)
Total water layers
nu_kin : TBD
TBD
sigmastep : TBD
TBD
Nstar : TBD
TBD
Returns
-------
convo : 1D Numpy Array (N,)
Fourier transform of the nonlinear term
"""
# compute double sum in real space, then apply scalar multiplier
convo = 2 * np.pi * Nstar * nu_kin * fftnorm(depRate * np.cos(ifftnorm(nT)))
return convo
#@njit("f8[:](f8[:],i4,f8[:],f8[:],f8)")
def nTotRHS(nQLL,nu_kin,depRate_FFT,k,D):
"""Computes RHS of the ODE for the positive modes of Ntot
dnk/dt = -k^2 D nkQLL + 2 pi FFT(sigma_m) nu_kin
Parameters
----------
nQLL : 1D Numpy Array (N,)
Positive modes of state vector for quasi-liquid layers
nu_kin : TBD
TBD
sigmastep_FFT : TBD
TBD
k : 1D Numpy Array (N,)
Vector of wavenumbers
D : float
Diffusion coefficient
Returns
-------
dnTot : 1D Numpy Array (N,)
Rate of change of positive modes of nTot
"""
dnTot = -k**2 * D * nQLL + depRate_FFT
return dnTot
def nQLLRHS(nTot,nQLL,nu_kin,depRate,k,D,Nstar,N):
"""Computes RHS of the ODE for the positive modes of Ntot
dn0/dt = 2 * pi * sigma_m * nu_kin
dnk/dt = -k^2 D nkQLL
Parameters
----------
nTot : 1D Numpy Array (N,)
Positive modes of state vector for total layers
nQLL : 1D Numpy Array (N,)
Positive modes of state vector for quasi-liquid layers
nu_kin : TBD
TBD
sigmastep_FFT : TBD
TBD
k : 1D Numpy Array (N,)
Vector of wavenumbers
D : float
Diffusion coefficient
Nstar : float
TBD
Returns
-------
dnQLL : 1D Numpy Array (N,)
Rate of change of positive modes of nTot
"""
convo = convolution(nTot,nu_kin,depRate, Nstar)
dnQLL = -k**2 * D * nQLL + convo
return dnQLL
def RHS(t,n,params):
"""
Computes the RHS for a full KdV or ROM simulation. For use in solver.
Parameters
----------
t : float
Current time
n : Numpy array (2N,)
Current state vector of positive modes (total first, then QLL)
params : Dictionary
Dictionary of relevant parameters (see below)
N : float, number of positive modes in simulation
nu_kin :
sigmastep :
sigmastep_FFT :
k :
D :
Returns
-------
RHS : 1D Numpy array (2N,)
Derivative of each positive mode in state vector
"""
# extract parameters from dictionary
N = params['N']
nu_kin = params['nu_kin']
depRate = params['depRate']
depRate_FFT = params['depRate_FFT']
k = params['k']
D = params['D']
Nstar = params['Nstar']
nTot = n[0:N]
nQLL = n[N:]
dnT = nTotRHS(nQLL,nu_kin,depRate_FFT,k,D)
dnQ = nQLLRHS(nTot,nQLL,nu_kin,depRate,k,D,Nstar,N)
RHS = np.concatenate((dnT,dnQ))
return RHS
def runSim(params):
"""
Runs a simulation of the ice continuum in Fourier space
Parameters
----------
params : Dictionary
Dictionary of relevant parameters (see below)
N : float, number of positive modes in simulation
nu_kin :
sigmastep :
sigmastep_FFT :
k :
D :
Returns
-------
uSim : ODE solver output
Output solution from sp.integrate.solve_ivp (includes state vector at all timesteps, time vector, etc.)
"""
# unpack parameters from dictionary
N = params['N']
ICNT = params['ICNT']
ICNQLL = params['ICNQLL']
endtime = params['endtime']
timesteps = params['timesteps']
nTotIC = fftnorm(ICNT)[0:N]
nQLLIC = fftnorm(ICNQLL)[0:N]
n = np.concatenate((nTotIC,nQLLIC))
# define RHS in form appropriate for solve_ivp
def myRHS(t,y):
out = RHS(t,y,params)
return out
# solve the IVP
uSim = sp.integrate.solve_ivp(fun = myRHS, t_span = [0,endtime], y0 = n, t_eval = timesteps)
return uSim
def makeReal(fourierSol):
N = int(fourierSol.shape[0]/2)
timesteps = fourierSol.shape[1]
NTot = np.zeros((timesteps,2*N-2))
NQLL = np.zeros((timesteps,2*N-2))
for i in range(timesteps):
NTot[i,:] = ifftnorm(fourierSol[0:N,i])
NQLL[i,:] = ifftnorm(fourierSol[N:,i])
return [NTot, NQLL] | [
"mbloom@pugetsound.edu"
] | mbloom@pugetsound.edu |
169532ee642ffb033a0cb14bc61a6fee9e95c924 | bf88d5acc67cc9130bcde042a2d365945c9b13e5 | /library/transmission/transmission.py | 77d1e1e16f19e71467b3193ff8b33d75e23555e4 | [
"MIT"
] | permissive | mparaz/shutit | 6b10e58158954921505e33cf1ed7486deea44f0d | 4f973aa462ff05eeb9d6f18dac223c15024d3fc5 | refs/heads/master | 2021-01-21T00:29:14.763314 | 2014-08-01T23:30:41 | 2014-08-01T23:30:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py |
# Created from dockerfile: /space/git/dockerfiles_repos/Thermionix/Dockerfiles/transmission/Dockerfile
from shutit_module import ShutItModule
class transmission(ShutItModule):
def is_installed(self, shutit):
return False
def build(self, shutit):
shutit.send('export DEBIAN_FRONTEND=noninteractive')
shutit.send('apt-get update && apt-get install -y transmission-daemon')
shutit.send('sed -i -e \'/^OPTION/s/"$/ --foreground"/\' /etc/default/transmission-daemon')
return True
def finalize(self, shutit):
return True
def test(self, shutit):
return True
def is_installed(self, shutit):
return False
def get_config(self, shutit):
return True
def module():
return transmission(
'shutit.tk.transmission.transmission', 0.15246246246,
depends=['shutit.tk.setup']
)
| [
"ian.miell@openbet.com"
] | ian.miell@openbet.com |
84145cb19ffc01c0a4940bdb3740868e8ba1d406 | eb03d7943b22643799be6b7a9e4d0cfd3aaf966f | /calc/Calc.py | c86883dc932da2ae86ba985b888b0d2d6780a6ba | [] | no_license | JayLee92/ezen_tf_190406 | 52d0ecceced5c195ac5a7c431bf1e9d8d8f70bbf | 7decf1858639d8830ddab189ed82c12ccf7e4203 | refs/heads/master | 2020-05-05T06:02:39.741632 | 2019-04-06T05:50:47 | 2019-04-06T05:50:47 | 179,772,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | class Calc:
def __init__(self, first, second):
self.first = first
self.second = second
def set_num(self, first, second):
self.first = first
self.second = second
def sum(self):
return self.first + self.second
def mul(self):
return self.first * self.second
def minus(self):
return self.first - self.second
def divide(self):
return self.first / self.second
| [
"allen.mopark@gmail.com"
] | allen.mopark@gmail.com |
35f41a6f7db13c4e70b3c3fee4560df78482f787 | ac4988c7f9cc98f6dcc31280cd208eeb57d674f2 | /Semana6Sesion3/martinperez/app/conexion.py | 9df1602e723fec45ec4550c037bbe56918c62f6c | [] | no_license | BrucePorras/PachaQTecMayo2020-1 | 77e396ceb2619d028dd740b9414f8a3160eee2cd | 5f39146cc4f69d844dae22a12098479194fd6332 | refs/heads/master | 2022-12-18T02:20:39.828175 | 2020-09-25T05:39:20 | 2020-09-25T05:39:20 | 269,999,646 | 1 | 0 | null | 2020-06-06T14:06:19 | 2020-06-06T14:06:18 | null | UTF-8 | Python | false | false | 2,082 | py | #CRUD en una clase
#Mysql
import mysql.connector
from mysql.connector import errorcode
#Posgres
import psycopg2
from psycopg2 import Error
#sqlite
import sqlite3
class conexionBDD:
def __init__(self,intBDD):
self.intBDD= intBDD
# si es 1, conectarnos a Mysql
# si es 2, conectarnos a postgres
# si es 3, conectarnos sqlite
def conexion(self):
if(self.intBDD == 1):
try:
conn = mysql.connector.connect(user='root',
password='passmysqlmartin',
host="localhost",
port="3306",
database="martinperez")
return conn
except (mysql.connector.Error, Exception) as error:
error
return False
elif(self.intBDD == 2):
try:
conn = psycopg2.connect(user='postgres',
password='passmysqlmartin',
host="localhost",
port="5432",
database="martinperez")
return conn
except Exception as error:
error
return False
else:
try:
conn = sqlite3.connect('martinperez.db')
return conn
except Exception as error:
return False
def consultarBDD(self, query):
try:
conexion = self.conexion()
cur = conexion.cursor()
cur.execute(query)
records = cur.fetchall()
return records
except Error as error:
error
return False
def ejecutarBDD(self, query):
try:
conexion = self.conexion()
cur = conexion.cursor()
cur.execute(query)
conexion.commit()
exito = True
return exito
except Exception as identifier:
identifier
return False
| [
"perez_13lo@hotmail.com"
] | perez_13lo@hotmail.com |
e8084fbc13580d94a42d713b09cbb70753c86768 | 21a9ed29a474b3a87fa2a012cd9f7fd8eabc6f4f | /meiduo_mall/meiduo_mall/utils/views.py | e437b5d350bfc3a4d76005b67c30aac43e9007ca | [
"MIT"
] | permissive | Noah-Smith-wgp/meiduo_project | fd979237505a212e9aa7d5cc12549cea13def1ae | f1350a50dabd33d90c2c5b3509d590603aeda6e4 | refs/heads/master | 2021-04-16T13:18:28.181307 | 2020-01-02T13:29:29 | 2020-01-02T13:29:29 | 249,358,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | from django.contrib.auth.mixins import LoginRequiredMixin
from django import http
from meiduo_mall.utils.response_code import RETCODE
class LoginRequiredJSONMixin(LoginRequiredMixin):
"""自定义限制用户访问的扩展类,返回JSON"""
def handle_no_permission(self):
return http.JsonResponse({'code':RETCODE.SESSIONERR, 'errmsg':'用户未登录'}) | [
"18211672297@163.com"
] | 18211672297@163.com |
45eec0b44998ef2397101b6786c74cd95fe3aab4 | 90275584cc15c16d60ffaad1e405ccc57ad3d8f4 | /scripts/trigger.py | 1e5e8236e2a6894adaf80721eff45f2ccfc847c3 | [] | no_license | SrikarSiddarth/raksha | b9cfe696c2de44a6895da49a92b9073ac37f9a72 | 3dc2e759a56d88fd95e04387436c46454febab31 | refs/heads/main | 2023-04-20T03:22:29.895384 | 2021-04-26T05:10:22 | 2021-04-26T05:10:22 | 307,312,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | #!/usr/bin/env python
import rospy
from std_msgs.msg import Empty
import sys, select, os
import tty, termios
msg = """
Launch counter missile from your Laser Turret!
Press l key
Ctrl-C to quit
"""
e = """
Communications Failed
"""
def getKey():
tty.setraw(sys.stdin.fileno())
rlist, _, _ = select.select([sys.stdin], [], [], 0.1)
if rlist:
key = sys.stdin.read(1)
else:
key = ''
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
if __name__ == '__main__':
settings = termios.tcgetattr(sys.stdin)
rospy.init_node('trig_turret_teleop')
pub = rospy.Publisher('/trig',Empty,queue_size=10)
try:
print(msg)
while(1):
key=getKey()
if key=='l':
pub.publish()
elif key=='\x03':
break
except:
print(e)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
| [
"noreply@github.com"
] | noreply@github.com |
ff6e252f9f211e7218985e20aa09f2b62a4ab996 | f92eaef05a9d4946fde399317b839b106bda6d4d | /reportlab/lib/logger.py | 23629aea1ad979f52387c8c0d158cb27916d77a8 | [] | no_license | Gilles00/Book-Loan-Database-System | c3dfd644da5fc5ecf9985252eb226d3e2db7d116 | 4c55b7941360e2dd10657c8c142179639acdb6cc | refs/heads/master | 2021-05-17T18:44:59.517167 | 2016-10-29T23:13:07 | 2016-10-29T23:13:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,826 | py | #!/bin/env python
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/logger.py
__version__=''' $Id$ '''
__doc__="Logging and warning framework, predating Python's logging package"
from sys import stderr
class Logger:
'''
An extended file type thing initially equivalent to sys.stderr
You can add/remove file type things; it has a write method
'''
def __init__(self):
self._fps = [stderr]
self._fns = {}
def add(self,fp):
'''add the file/string fp to the destinations'''
if isinstance(fp,str):
if fp in self._fns: return
fp = open(fn,'wb')
self._fns[fn] = fp
self._fps.append(fp)
def remove(self,fp):
'''remove the file/string fp from the destinations'''
if isinstance(fp,str):
if fp not in self._fns: return
fn = fp
fp = self._fns[fn]
del self.fns[fn]
if fp in self._fps:
del self._fps[self._fps.index(fp)]
def write(self,text):
'''write text to all the destinations'''
if text[-1]!='\n': text=text+'\n'
for fp in self._fps: fp.write(text)
def __call__(self,text):
self.write(text)
logger=Logger()
class WarnOnce:
def __init__(self,kind='Warn'):
self.uttered = {}
self.pfx = '%s: '%kind
self.enabled = 1
def once(self,warning):
if warning not in self.uttered:
if self.enabled: logger.write(self.pfx + warning)
self.uttered[warning] = 1
def __call__(self,warning):
self.once(warning)
warnOnce=WarnOnce()
infoOnce=WarnOnce('Info')
| [
"jas_96@hotmail.co.uk"
] | jas_96@hotmail.co.uk |
172e8b43391e0009df8389c94777e219b597b85a | 75136c3303564e9791ef0306b0fbfdba76b0616f | /torch_connectomics/data/dataset/misc.py | 97be4f19217dc054428ca8559702f23a71530f23 | [
"MIT"
] | permissive | pragyasingh7/pytorch_connectomics | 1f793c52afe079b7e12dc93c617183ae1ec1251c | fdc8e1900b0a38d19ea50f78f8c81da2a4f015a9 | refs/heads/master | 2020-05-31T21:03:51.686680 | 2019-06-10T19:51:42 | 2019-06-10T19:51:42 | 186,615,290 | 0 | 0 | MIT | 2019-05-14T12:15:17 | 2019-05-14T12:15:17 | null | UTF-8 | Python | false | false | 2,306 | py | from __future__ import print_function, division
import numpy as np
import random
import torch
####################################################################
## Process image stacks.
####################################################################
def count_volume(data_sz, vol_sz, stride):
return 1 + np.ceil((data_sz - vol_sz) / stride.astype(float)).astype(int)
def crop_volume(data, sz, st=(0, 0, 0)): # C*D*W*H, C=1
return data[st[0]:st[0]+sz[0], st[1]:st[1]+sz[1], st[2]:st[2]+sz[2]]
def crop_volume_mul(data, sz, st=(0, 0, 0)): # C*D*W*H, for multi-channel input
return data[:, st[0]:st[0]+sz[0], st[1]:st[1]+sz[1], st[2]:st[2]+sz[2]]
####################################################################
## Rebalancing.
####################################################################
def rebalance_binary_class(label, mask=None, base_w=1.0):
"""Binary-class rebalancing."""
weight_factor = label.float().sum() / torch.prod(torch.tensor(label.size()).float())
weight_factor = torch.clamp(weight_factor, min=1e-2)
alpha = 1.0
weight = alpha * label*(1-weight_factor)/weight_factor + (1-label)
return weight_factor, weight
####################################################################
## Affinitize.
####################################################################
def check_volume(data):
"""Ensure that data is a numpy 3D array."""
assert isinstance(data, np.ndarray)
if data.ndim == 2:
data = data[np.newaxis,...]
elif data.ndim == 3:
pass
elif data.ndim == 4:
assert data.shape[0]==1
data = np.reshape(data, data.shape[-3:])
else:
raise RuntimeError('data must be a numpy 3D array')
assert data.ndim==3
return data
# def affinitize(img, dst=(1,1,1), dtype=np.float32):
# """
# Transform segmentation to an affinity map.
# Args:
# img: 3D indexed image, with each index corresponding to each segment.
# Returns:
# ret: an affinity map (4D tensor).
# """
# img = check_volume(img)
# if ret is None:
# ret = np.zeros(img.shape, dtype=dtype)
# # Sanity check.
# (dz,dy,dx) = dst
# assert abs(dx) < img.shape[-1]
# assert abs(dy) < img.shape[-2]
# assert abs(dz) < img.shape[-3] | [
"linzudi@g.harvard.edu"
] | linzudi@g.harvard.edu |
cd5b89d80c13954c33d02e51a9b11a28c0766c40 | 21fb2474085aba9be8613b01978260cf95f20c3b | /tutorial/urls.py | 5b4803ef9c9e886bab354aacf619c40750f53863 | [] | no_license | DesmondMM/Django_Rest_Framework_Tutorial | d988f4ec3f7a36f8c68e1491b7a03e1844eee984 | 889cd56a23ef5b42804dbcdf00d574b2512ad15c | refs/heads/master | 2016-09-16T10:51:37.722429 | 2015-12-31T15:27:04 | 2015-12-31T15:27:04 | 48,800,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | """tutorial URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import *
from django.conf.urls import include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('snippets.urls')),
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
]
| [
"mutsakaradesmond@gmail.com"
] | mutsakaradesmond@gmail.com |
c9e7f9ca51308d273ab6ae9ee2cb0d3ff678295d | 265a3bc51432acc72790886247c85247a16dfc3c | /sum/app/main.py | 0843943958a4988b9eb58e71aeb69c4256172943 | [
"BSD-3-Clause"
] | permissive | asdkant/hello-fastapi | f02321a3abe6badd09693d1005fa0901618fab42 | c1b7418b806e04625f210b46a3d7488a7331abad | refs/heads/main | 2023-06-30T11:07:48.112337 | 2021-07-28T20:27:39 | 2021-07-28T20:27:39 | 385,408,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | # to test this locally, run "uvicorn main:app --reload" in the same folder
from typing import List, Optional, Union
from fastapi import FastAPI
from pydantic import BaseModel
app = FastAPI()
class params_sum_two(BaseModel):
a: int
b: int
@app.post("/two")
async def sum_two(p:params_sum_two):
return p.a + p.b
class params_sum_more(BaseModel):
l: List[int]
@app.post("/more")
async def sum_more(p:params_sum_more):
return sum(p.l)
| [
"ariel@aknt.com.ar"
] | ariel@aknt.com.ar |
c8f736f1f5f9f1dbc563eb64d4da67e89ae94351 | 16292c665e7bbc835dd4eff58f7b0cf0ef8a4c2d | /makecst.py | b23061762a6a7b1511642849f0fed383fb996d73 | [
"MIT"
] | permissive | BILAB/scripts | aad331ddf2f422deb9608e33b8390cb0236f68a9 | 8d9dbc1eddcf97d7da41930c29c8034480eab85d | refs/heads/master | 2021-03-16T09:50:54.454995 | 2018-12-02T10:23:47 | 2018-12-02T10:23:47 | 14,160,619 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,687 | py | #!/usr/bin/env python3
'''
ver 1.0.1
This script works on Python 3.6 or more.
'''
import argparse
parser = argparse.ArgumentParser(description='Generate cst file for Rosetta AbinitioRelax from a fasta and a map file.')
parser.add_argument("-f","--fasta", dest="fasta",
help="Required. Input your fasta file to generate cst file for Rosetta AbinitioRelax", required=True)
parser.add_argument("-m","--map", dest="map",
help="Contact map file. Default is 'hmmrenumbered.map'.",
required=False, default="hmmrenumbered.map")
parser.add_argument("-c","--coarse", dest="coarse",
help="Output file for coarse cst file (-cst_file). Default is 'coarse.cst'.",
required=False, default="coarse.cst")
parser.add_argument("-o","--full", dest="full",
help="Output file for full-atom cst file (-cst_fa_file). Default is 'full.cst'.",
required=False, default="full.cst")
args = parser.parse_args()
aa = "G A S V C T P D N I L E Q M H K F Y R W".split()
rawx0 = """4.467,5.201,5.510,5.671,5.777,5.619,6.140,6.135,6.321,6.413,6.554,7.036,7.297,7.383,7.472,8.216,7.966,9.098,9.166,8.966
,5.381,5.829,5.854,6.057,5.982,6.412,6.388,6.766,6.587,6.707,7.124,7.583,7.605,7.591,8.327,8.162,9.121,9.365,9.252
,,6.190,6.567,6.590,6.450,6.937,6.760,7.081,7.142,7.394,7.483,7.807,8.010,8.051,8.792,8.694,9.594,9.753,9.770
,,,6.759,6.941,6.791,7.063,6.972,7.219,7.441,7.633,7.404,8.008,8.335,8.179,8.077,9.057,9.442,9.513,10.021
,,,,6.426,6.801,7.157,6.985,7.205,7.476,7.685,7.449,7.962,8.265,8.422,8.494,9.026,9.362,9.460,9.752
,,,,,6.676,7.062,6.971,7.159,7.442,7.642,7.628,8.055,8.397,8.221,8.715,9.030,9.813,9.764,9.980
,,,,,,7.288,7.321,7.497,7.554,7.751,7.938,8.308,8.247,8.537,9.198,8.895,9.965,10.266,9.719
,,,,,,,8.001,7.672,7.472,7.696,8.945,8.601,8.401,8.634,9.306,9.111,9.979,10.123,9.867
,,,,,,,,7.682,7.631,7.889,8.485,8.502,8.550,8.672,9.319,9.168,10.039,10.135,9.976
,,,,,,,,,8.096,8.342,7.949,8.302,8.874,8.523,8.329,9.602,9.719,9.746,10.470
,,,,,,,,,,8.522,8.077,8.480,9.122,8.676,8.479,9.900,9.889,9.852,10.707
,,,,,,,,,,,9.863,9.328,8.870,9.454,9.842,9.403,10.544,10.713,10.303
,,,,,,,,,,,,9.074,9.102,9.391,9.667,9.506,10.534,10.610,10.429
,,,,,,,,,,,,,9.530,9.396,9.096,10.253,10.400,10.250,11.110
,,,,,,,,,,,,,,10.606,9.582,9.602,10.843,10.879,10.661
,,,,,,,,,,,,,,,10.662,9.344,10.627,11.322,10.136
,,,,,,,,,,,,,,,,10.903,10.999,10.577,11.758
,,,,,,,,,,,,,,,,,11.536,11.615,11.807
,,,,,,,,,,,,,,,,,,12.050,11.355
,,,,,,,,,,,,,,,,,,,12.806""".split("\n")
rawm = """0.017,,,,,,,,,,,,,,,,,,,
0.269,0.262,,,,,,,,,,,,,,,,,,
0.153,0.291,0.292,,,,,,,,,,,,,,,,,
0.107,0.312,0.205,0.145,,,,,,,,,,,,,,,,
0.129,0.394,0.240,0.173,0.178,,,,,,,,,,,,,,,
0.120,0.378,0.214,0.138,0.181,0.188,,,,,,,,,,,,,,
0.245,0.399,0.321,0.298,0.259,0.320,0.339,,,,,,,,,,,,,
0.193,0.289,0.323,0.287,0.299,0.307,0.416,0.392,,,,,,,,,,,,
0.169,0.349,0.305,0.232,0.240,0.262,0.334,0.337,0.249,,,,,,,,,,,
0.179,0.214,0.342,0.242,0.295,0.259,0.336,0.341,0.341,0.321,,,,,,,,,,
0.125,0.250,0.287,0.179,0.206,0.190,0.317,0.348,0.279,0.261,0.198,,,,,,,,,
0.249,0.340,0.446,0.510,0.538,0.409,0.475,0.354,0.423,0.453,0.475,0.389,,,,,,,,
0.216,0.356,0.408,0.359,0.347,0.378,0.410,0.357,0.373,0.406,0.411,0.450,0.436,,,,,,,
0.255,0.394,0.369,0.295,0.439,0.292,0.388,0.361,0.310,0.327,0.318,0.511,0.498,0.457,,,,,,
0.206,0.380,0.435,0.383,0.203,0.417,0.457,0.325,0.289,0.379,0.401,0.443,0.401,0.342,0.333,,,,,
0.358,0.550,0.445,0.634,0.521,0.464,0.550,0.343,0.398,0.582,0.591,0.434,0.521,0.611,0.714,0.738,,,,
0.219,0.260,0.394,0.246,0.286,0.264,0.425,0.351,0.393,0.347,0.260,0.512,0.451,0.377,0.542,0.441,0.460,,,
0.267,0.443,0.467,0.535,0.585,0.430,0.506,0.676,0.586,0.589,0.611,0.469,0.547,0.661,0.554,0.704,0.767,0.855,,
0.334,0.485,0.483,0.514,0.491,0.477,0.506,0.327,0.372,0.557,0.578,0.363,0.535,0.641,0.595,0.648,0.738,0.822,0.704,
0.239,0.290,0.497,0.271,0.417,0.315,0.462,0.475,0.458,0.397,0.331,0.493,0.490,0.397,0.458,0.470,0.447,0.684,0.889,0.473""".split("\n")
x0 = [[0.0]*20 for _ in range(20)]
for i in range(20):
ln = rawx0[i].split(",")
for j in range(20):
if len(ln[j]) > 0:
x0[i][j] = x0[j][i] = float(ln[j])
m = [[0.0]*20 for _ in range(20)]
for i in range(20):
ln = rawm[i].split(",")
for j in range(20):
if len(ln[j]) > 0:
m[i][j] = m[j][i] = 1.0/float(ln[j])
def readfasta(fastafile):
fasta = ''
with open(fastafile) as f:
for line in f:
if line.startswith('>'):
continue
else:
fasta += line.strip()
return fasta
def main():
#ここ二行の入力があまりにも雑
# ファイル書き出し
with open(args.coarse, mode='w') as c:
with open(args.full, mode='w') as fu:
fasta = readfasta(args.fasta)
for ln in open(args.map):
lnt = ln.split()
a = int(lnt[1]) - 1
b = int(lnt[2]) - 1
if a*b == 0 : continue
weight = float(lnt[3])
aa_a = fasta[a]; aa_b = fasta[b]
out_x0 = x0[ aa.index(aa_a) ][ aa.index(aa_b) ]
out_m = m[ aa.index(aa_a) ][ aa.index(aa_b) ]
cb_a = "CB" if not aa_a == 'G' else "CA"
cb_b = "CB" if not aa_b == 'G' else "CA"
fu.write(f"AtomPair {cb_a} {a+1} {cb_b} {b+1} SCALARWEIGHTEDFUNC {weight} SIGMOID {out_x0} {out_m:.4}\n")
c.write(f"AtomPair {cb_a} {a+1} {cb_b} {b+1} SCALARWEIGHTEDFUNC {weight} BOUNDED 0 {out_x0} 1 0.5 #\n")
if __name__ == '__main__':
main()
| [
"virgospica93@gmail.com"
] | virgospica93@gmail.com |
f9197a33c02d27eaa7c912ad0142e4e415f29be6 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/01_netCDF_extraction/merra902Combine/267-tideGauge.py | 77661b2e1c355fc0cfe10e2d86a6e1f385a5c771 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,376 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 17 11:28:00 2020
--------------------------------------------
Load predictors for each TG and combine them
--------------------------------------------
@author: Michael Tadesse
"""
import os
import pandas as pd
#define directories
# dir_name = 'F:\\01_erainterim\\01_eraint_predictors\\eraint_D3'
dir_in = "/lustre/fs0/home/mtadesse/merraLocalized"
dir_out = "/lustre/fs0/home/mtadesse/merraAllCombined"
def combine():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
x = 267
y = 268
for tg in range(x, y):
os.chdir(dir_in)
tg_name = tg_list_name[tg]
print(tg_name, '\n')
#looping through each TG folder
os.chdir(tg_name)
#check for empty folders
if len(os.listdir()) == 0:
continue
#defining the path for each predictor
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp.csv'),\
"wnd_u": os.path.join(where, 'wnd_u.csv'),\
'wnd_v' : os.path.join(where, 'wnd_v.csv')}
first = True
for pr in csv_path.keys():
print(tg_name, ' ', pr)
#read predictor
pred = pd.read_csv(csv_path[pr])
#remove unwanted columns
pred.drop(['Unnamed: 0'], axis = 1, inplace=True)
#sort based on date as merra files are scrambled
pred.sort_values(by = 'date', inplace=True)
#give predictor columns a name
pred_col = list(pred.columns)
for pp in range(len(pred_col)):
if pred_col[pp] == 'date':
continue
pred_col[pp] = pr + str(pred_col[pp])
pred.columns = pred_col
#merge all predictors
if first:
pred_combined = pred
first = False
else:
pred_combined = pd.merge(pred_combined, pred, on = 'date')
#saving pred_combined
os.chdir(dir_out)
tg_name = str(tg)+"_"+tg_name;
pred_combined.to_csv('.'.join([tg_name, 'csv']))
os.chdir(dir_in)
print('\n')
#run script
combine()
| [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
900fd2718c905d21f076a8702c6726302a7d466f | d0041cbd074b1211bf9764e16ba76d4ef7f44e75 | /DSA_LAB8/venv/Scripts/easy_install-script.py | cebffb0ce01a2dcc2d3f8f38b7ee4f92343af7b6 | [] | no_license | sharozraees802/Data_Structure-python- | dab9135faaa71146d22e4eca21dca09ec4e0afe8 | a77ea39eb1680c920052add735a9d9b377e1b985 | refs/heads/master | 2022-12-06T12:49:09.300127 | 2020-01-07T12:46:14 | 2020-01-07T12:46:14 | 232,320,833 | 1 | 1 | null | 2022-11-23T15:13:03 | 2020-01-07T12:33:05 | Python | UTF-8 | Python | false | false | 464 | py | #!C:\Users\SharozRaees\Desktop\DSA-LAb-THEORY\DSA_LAB8\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"sharozraees802@gmail.com"
] | sharozraees802@gmail.com |
eb0a6a84da2a387357aeec52eb7d78f8a680211f | 17264498ac1ff3a8078a0e79b1c8ed934674d908 | /musicmatch/asgi.py | 4f5b4acbd987be9dd29cf8e417e12573df189c01 | [] | no_license | oscarmangan/musicmatch-backend | 51d982b8a72c55870605a999ba8ca64fe3f3a07e | 2ce24f7ca52ce371eca48d1b6206ac7c90581278 | refs/heads/master | 2023-03-30T17:43:04.121291 | 2021-04-03T20:02:00 | 2021-04-03T20:02:00 | 297,345,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
ASGI config for musicmatch project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'musicmatch.settings')
application = get_asgi_application()
| [
"oscarmangan.ire@gmail.com"
] | oscarmangan.ire@gmail.com |
259194eae610fffb873eecb9f8ed868ff3a2b910 | bb03ef2ee3e850324985e2ae4ed89fa6f059c14c | /tests/test_tgp.py | 9c493a3332cbb96b57f8604d15a110b988b7b807 | [
"MIT"
] | permissive | watzon/tgp | 1337c1faa405ec451d80baf1f440247ed7b7931b | 0c27ac3eeadd0d05c2ea9b3eb53af38dff0ea6da | refs/heads/master | 2020-11-25T13:35:40.507351 | 2019-12-17T19:48:08 | 2019-12-17T19:48:08 | 228,690,033 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | from tgp import __version__
def test_version():
assert __version__ == '0.1.0'
| [
"cawatson1993@gmail.com"
] | cawatson1993@gmail.com |
06eab32812567f359d7aea988deb216e87b8b3e1 | d114a6576659a4a299f5965032489d2abbe41282 | /src/computer_vision/nodes/synchronize_img_command_lidar.py | 9c7be96b236672ac1cb80aa900f9db16b72f267c | [
"MIT"
] | permissive | mldiego/Platooning-F1Tenth | dbc23ff7af3397716be1bbfdf9881da799206855 | ec5eadb137da8428642b3ffd1b8ca31fde4f6dff | refs/heads/master | 2023-03-04T21:08:12.799694 | 2021-02-18T00:11:46 | 2021-02-18T00:11:46 | 230,968,509 | 0 | 0 | MIT | 2021-02-16T17:34:01 | 2019-12-30T19:25:59 | Python | UTF-8 | Python | false | false | 4,887 | py | #!/usr/bin/env python
import rospy
import cv2
from std_msgs.msg import String
from sensor_msgs.msg import Image, CompressedImage,LaserScan
from cv_bridge import CvBridge, CvBridgeError
from message_filters import ApproximateTimeSynchronizer, Subscriber
from ackermann_msgs.msg import AckermannDriveStamped
import imutils
from race.msg import drive_param
import os
import rospkg
import numpy as np
# import sys so we can use packages outside of this folder in
# either python 2 or python 3, I know it's janky, chill
import sys
import os
from pathlib import Path
#insert parent directory into the path
sys.path.insert(0,str(Path(os.path.abspath(__file__)).parent.parent))
from preprocessing.utils import ImageUtils
class MessageSynchronizer:
''' Gathers messages with vehicle information that have similar time stamps
/camera/zed/rgb/image_rect_color/compressed: 18 hz
/camera/zed/rgb/image_rect_color: 18 hz
/vesc/ackermann_cmd_mux/input/teleop: 40 hz
'''
def __init__(self,racecar_name,vesc_name,data_path):
self.image_topic = racecar_name+'/camera/zed/rgb/image_rect_color'
self.drive_topic = vesc_name+'/ackermann_cmd_mux/input/teleop'
self.lidar_topic = racecar_name+'/scan'
print(self.image_topic,self.drive_topic,self.lidar_topic)
self.image_rect_color=Subscriber(self.image_topic,Image)
self.ackermann_stamped=Subscriber(self.drive_topic,AckermannDriveStamped)
self.lidar_sub=Subscriber(self.lidar_topic,LaserScan)
r = rospkg.RosPack()
self.util=ImageUtils()
self.save_path_root=os.path.sep.join([r.get_path('computer_vision'),data_path])
self.cv_bridge=CvBridge()
self.count=0
self.save_count=0
#create the time synchronizer
self.sub = ApproximateTimeSynchronizer([self.image_rect_color,self.ackermann_stamped,self.lidar_sub], queue_size = 20, slop = 0.08)
#register the callback to the synchronizer
self.sub.registerCallback(self.master_callback)
#callback for the synchronized messages
#Note: a negative value means turning to the right, a postive value means turning to the left
def master_callback(self,image,ackermann_msg,lidar_msg): #drive_param):
#convert rosmsg to cv image
try:
cv_image=self.cv_bridge.imgmsg_to_cv2(image,"bgr8")
self.count+=1
except CvBridgeError as e:
print(e)
#convert the steering command to a string to I can store it with the image name
#for efficient data storage
command='%.10f' % ackermann_msg.drive.steering_angle
#replace the period with ~ so it's a valid filename
command=command.replace('.','~')
#save path
save_path=os.path.join(self.save_path_root,self.label_image(ackermann_msg.drive.steering_angle),str(rospy.Time.now())+'~'+command+'.png')
limited_ranges=np.asarray(lidar_msg.ranges)
indices=np.where(limited_ranges>=10.0)[0]
limited_ranges[indices]=10.0
limited_ranges= limited_ranges[29:1053]
limited_ranges = limited_ranges.reshape((32,32,1))
limited_ranges = limited_ranges
if(self.count % 1==0):
dirPath = os.path.split(save_path)[0]
if not 'straight' in dirPath and 'weak_right' not in dirPath and 'weak_left' not in dirPath:
self.save_image(cv_image,save_path)
np.save(save_path.replace(".png",".npy"),limited_ranges)
self.save_count+=1
self.count+=1
#function that categorizes images into left, weak_left, straight, weak_right, right
def label_image(self,steering_angle):
if(steering_angle<-0.261799):
return "right"
elif(steering_angle>0.261799):
return "left"
elif(steering_angle<-0.0523599 and steering_angle>-0.261799):
return "weak_right"
elif(steering_angle>0.0523599 and steering_angle<0.261799):
return "weak_left"
else:
return "straight"
def save_image(self,image,path):
dirPath = os.path.split(path)[0]
# if the output directory does not exist, create it
if not os.path.exists(dirPath):
os.makedirs(dirPath)
print('does not exist')
print(path)
cv2.imwrite(path,image)
if __name__=='__main__':
rospy.init_node('image_command_sync')
args = rospy.myargv()[1:]
# get the racecar name so we know what to subscribe to
racecar_name=args[0]
# get the name of the vesc for the car
vesc_name=args[1]
# path where to store the dataset
data_path = args[2]
# initialize the message filter
mf=MessageSynchronizer(racecar_name,vesc_name,data_path)
# spin so that we can receive messages
rospy.spin() | [
"pmusau13ster@gmail.com"
] | pmusau13ster@gmail.com |
fa11e1724cb6566a64492b7256db57c5ace0ff14 | 95d33f7038c358b371aac23e1ad1a0546aa7e275 | /backtest.py | eb513323206be26b02e1d0903c03d794e2bb05f5 | [] | no_license | prajakta-dc/Factor-Timing-Project---Elon-Musk | 022bea50029cef31f1ac47cff131a9052102a4d6 | 3ee95a5ce48609c03defdeab0aa6f1c33d6e3e3a | refs/heads/main | 2023-03-24T01:43:06.432934 | 2021-03-18T07:54:22 | 2021-03-18T07:54:22 | 348,986,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,690 | py | import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from tabulate import tabulate
class Parameters:
'''
MACRO variables, no changes needed unless environment changes.
'''
risk_aversion = 3.0
risk_premium = 0.1
trading_days_pa = 252
rebalance_freq_days = 21
data_time_step_days = 1
@staticmethod
def set_param(risk_aversion = 3.0,
risk_premium = 0.1,
trading_days_pa = 252,
rebalance_freq_days = 1,
data_time_step_days = 1):
Parameters.risk_aversion = risk_aversion
Parameters.risk_premium = risk_premium
Parameters.trading_days_pa = trading_days_pa
Parameters.rebalance_freq_days = rebalance_freq_days
Parameters.data_time_step_days = data_time_step_days
class Simulator:
def __init__(self, strategy_name,initial_book_size=1):
'''
Assume we don't vary simulator's parameters once set
'''
self.initial_book_size = initial_book_size
self.strategy_name = strategy_name
def add_weights_lag(self, weights, lag=1):
'''
Allows variation of lags, lag >= 1
'''
weights_lag = weights.shift(lag)
weights_lag.fillna(0, inplace=True)
return weights_lag
def cal_pnl(self,
target_weights,
returns,
tcost_coeff,
hodling_cost_coeff):
'''
PnL calculation that allows variation of transaction costs.
'''
# Sanity check on the weights and returns matrix
assert(target_weights.shape == returns.shape)
assert(sum(returns.index == target_weights.index) == target_weights.shape[0])
gross_label = '{} gross'.format(self.strategy_name)
tcost_label = '{} gross - tcost'.format(self.strategy_name)
tcost_hodl_label = '{} gross - tcost - hcost'.format(self.strategy_name)
# Data frame that contains the value of the portfolio over time for different simulation settings
V_t = pd.DataFrame(index=returns.index,
columns=[gross_label, tcost_label, tcost_hodl_label])
idx = target_weights.index
# Initializing the portfolio value with stated initial book size
V_t.loc[idx[0], :] = self.initial_book_size
# Weights of the assets at the start of simulation: Portfolio yet to be formed
w_start_of_period_gross = [0.0] * len(target_weights.columns)
w_start_of_period_tc = [0.0] * len(target_weights.columns)
w_start_of_period_tc_hc = [0.0] * len(target_weights.columns)
# Rebalance condition boolean variable
rebalance_cond = False
# Turnover series
turnover_series_gross = pd.Series(index = idx)
turnover_series_tc = pd.Series(index = idx)
turnover_series_tc_hc = pd.Series(index = idx)
# Setting initial value to be = 0
turnover_series_gross.loc[idx[0]] = 0
turnover_series_tc.loc[idx[0]] = 0
turnover_series_tc_hc.loc[idx[0]] = 0
# Simulation Start Bool
flag = True
# Simulation
for dt in range(1, len(idx)):
"""
We are working with end of day prices and weights rebalancing.
Weights are lagged by >= 1 lag.
We go sequentially from the start of the period with rebalancing to target weights to end of the period
where value of the portfolio is computed for different simulation settings.
"""
curr_date = idx[dt]
prev_date = idx[dt - 1]
step_days = float((curr_date - prev_date).days)
# Given multiplier (below) is used for holding cost, we choose to use ACT/365 convention
step_multiplier_ann = step_days / 365
curr_target_weights = target_weights.loc[curr_date]
curr_returns = returns.loc[curr_date]
# Period Start:
# Rebalance condition
if dt % (Parameters.rebalance_freq_days/Parameters.data_time_step_days) == 0 or flag:
rebalance_cond = True
flag = False
else:
rebalance_cond = False
# Rebalancing/Trading the portfolio to the target weights from the prevailing weights if the rebalancing frequency arrives
if rebalance_cond:
# print(curr_target_weights.values ,w_start_of_period_tc)
# Transaction cost computation for transaction cost enabled simulation setting
# print(curr_target_weights.shape)
# print(w_start_of_period_tc.shape)
tcost_tc = np.nansum(abs(curr_target_weights.values - w_start_of_period_tc) * tcost_coeff) * V_t.loc[prev_date, tcost_label]
tcost_tc_hc = np.nansum(abs(curr_target_weights.values - w_start_of_period_tc_hc) * tcost_coeff) * V_t.loc[prev_date, tcost_hodl_label]
"""
# Transaction cost computation for transaction cost enabled simulation setting
#tcost_tc = abs(curr_risky_weight - w_start_of_period_tc[0]) * V_t.loc[prev_date, tcost_label] * tcost_coeff
"""
"""
# Registering turnover in the turnover series of respective simulation settings
turnover_series_gross.loc[curr_date] = abs(curr_risky_weight - w_start_of_period_gross[0])
turnover_series_tc.loc[curr_date] = abs(curr_risky_weight - w_start_of_period_tc[0])
"""
# Registering turnover in the respective turnover series
turnover_series_gross.loc[curr_date] = np.nansum(abs(curr_target_weights - w_start_of_period_gross))
turnover_series_tc.loc[curr_date] = np.nansum(abs(curr_target_weights - w_start_of_period_tc))
turnover_series_tc_hc.loc[curr_date] = np.nansum(abs(curr_target_weights - w_start_of_period_tc_hc))
else:
# Case of no rebalance
tcost_tc = 0
tcost_tc_hc = 0
turnover_series_gross.loc[curr_date] = 0
turnover_series_tc.loc[curr_date] = 0
turnover_series_tc_hc.loc[curr_date] = 0
# Simulation setting: transaction costs enabled
V_t_minus_1_star_tc = V_t.loc[prev_date, tcost_label] - tcost_tc
V_t_minus_1_star_tc_hc = V_t.loc[prev_date, tcost_hodl_label] - tcost_tc_hc
# End of the period
# Simulation setting: 'gross': No transaction costs
if rebalance_cond:
V_t.loc[curr_date, gross_label] = V_t.loc[prev_date, gross_label] * (1 + np.nansum(curr_target_weights * curr_returns))
else:
V_t.loc[curr_date, gross_label] = V_t.loc[prev_date, gross_label] * (1 + np.nansum(w_start_of_period_gross * curr_returns))
# Simulation setting: Transaction cost enabled
if rebalance_cond:
# Assuming after transaction costs deduction the curren weights are not drastically deviated from the current target weights
V_t.loc[curr_date, tcost_label] = V_t_minus_1_star_tc * (1 + np.nansum(curr_target_weights * curr_returns))
else:
V_t.loc[curr_date, tcost_label] = V_t_minus_1_star_tc * (1 + np.nansum(w_start_of_period_tc * curr_returns))
# Simulation setting: Transaction and Holding cost enabled; Holding cost assumed to be deducted at the end of a period
if rebalance_cond:
# Assuming after transaction costs deduction the curren weights are not drastically deviated from the current target weights
V_t.loc[curr_date, tcost_hodl_label] = V_t_minus_1_star_tc_hc * (1 + np.nansum(curr_target_weights * curr_returns) - (np.nansum(curr_target_weights * hodling_cost_coeff) * step_multiplier_ann))
else:
V_t.loc[curr_date, tcost_hodl_label] = V_t_minus_1_star_tc_hc * (1 + np.nansum(w_start_of_period_tc_hc * curr_returns) - (np.nansum(w_start_of_period_tc_hc * hodling_cost_coeff) * step_multiplier_ann))
# weight of the new positions at the end of the period or beginning of new period (before trading)
# Simulation setting: 'gross': No transaction costs
if rebalance_cond:
w_start_of_period_gross = curr_target_weights * V_t.loc[prev_date, gross_label] * (1 + curr_returns ) / V_t.loc[curr_date, gross_label]
else:
w_start_of_period_gross = w_start_of_period_gross * V_t.loc[prev_date, gross_label] * (1 + curr_returns ) / V_t.loc[curr_date, gross_label]
# Simulation setting: transaction costs enabled
if rebalance_cond:
w_start_of_period_tc = curr_target_weights * V_t_minus_1_star_tc * (1 + curr_returns ) / V_t.loc[curr_date, tcost_label]
else:
w_start_of_period_tc = w_start_of_period_tc * V_t_minus_1_star_tc * (1 + curr_returns ) / V_t.loc[curr_date, tcost_label]
# Simulation setting: transaction and holding costs enabled
# Approx treatment (Similar to Tcost approx. treatment); assuming the holding cost deduction does not affect the weights drastically
if rebalance_cond:
w_start_of_period_tc_hc = curr_target_weights * V_t_minus_1_star_tc_hc * (1 + curr_returns ) / V_t.loc[curr_date, tcost_hodl_label]
else:
w_start_of_period_tc_hc = w_start_of_period_tc_hc * V_t_minus_1_star_tc_hc * (1 + curr_returns ) / V_t.loc[curr_date, tcost_hodl_label]
"""
Assumption: We employ log returns on the strategy calculation and using the law of large numbers, later compute the mean of the returns of the strategy;
average of log return variables is a unbiased estimator and converges to the true in sample mean as the number of observations grow. For our case, the number of
observations are large enough for all practical purposes.
"""
# Strategy log return
strat_log_ret = np.log(V_t.astype('float')) - np.log(V_t.astype('float')).shift(1)
# Putting together the turnover dataframe
turnover_df = pd.concat([turnover_series_gross, turnover_series_tc, turnover_series_tc_hc],axis = 1)
turnover_df.columns = [gross_label, tcost_label, tcost_hodl_label]
return V_t, strat_log_ret, turnover_df
class Visualizer:
def __init__(self):
pass
def show_stats(self, stats_data):
print(" "*60 + "Mean Annualized Performance Statistics")
print(tabulate(stats_data, headers=stats_data.columns, tablefmt='psql'))
def plot_pnl(self,
pnl_data,
strategy_name,
initial_book_size,
x_label='Time',
y_label='Natural Log scale: log($ Amount)',
scale = 'log',
figsize=[20, 10]):
title = 'Value of ${} when traded via {}'.format(initial_book_size, strategy_name)
font = {'family': 'calibri',
'weight': 'bold',
'size': 15}
matplotlib.rc('font', **font)
fig, axs = plt.subplots(figsize=figsize)
axs.set_title(title)
if scale == 'log':
axs.set_ylabel(y_label)
elif scale == 'linear':
axs.set_ylabel('Linear Scale: $ Amount')
axs.set_xlabel(x_label)
if isinstance(pnl_data, pd.DataFrame):
for c in pnl_data.columns:
if scale == 'log':
axs.plot(np.log(pnl_data.loc[:, c].astype(float)), label=c)
elif scale == 'linear':
axs.plot(pnl_data.loc[:, c].astype(float), label=c)
elif isinstance(pnl_data, pd.Series):
print(pnl_data.name)
if scale == 'log':
axs.plot(np.log(pnl_data.astype(float)), label=pnl_data.name)
elif scale == 'linear':
axs.plot(pnl_data, label=pnl_data.name)
plt.grid(True, alpha=0.75)
plt.legend(loc=0)
plt.show()
class Indicator:
def __init__(self):
pass
def cal_Sharpe(self, strat_log_ret, returns):
# Sharpe Computation
# Computing strategy excess returns for different cases: Gross, net of transaction cost & net of tcost and financing cost
strat_excess_ret = strat_log_ret.copy()
for col in strat_log_ret.columns:
#Sanity check: Whether the indices match
assert(sum(strat_log_ret.index == returns.index) == returns.shape[0])
# Converting to arithmetic returns -> excess arithmetic returns -> log excess returns
strat_excess_ret[col] = np.log(1 + ((np.exp(strat_log_ret[col]) - 1) - returns['rf_ret']))
# Annualized Arithmetic returns/vol based Sharpe ratio
sharpe = (self.cal_annual_ret(strat_excess_ret, arithmetic = True)
/ self.cal_annual_vol(strat_excess_ret, arithmetic = True))
return sharpe
def cal_annual_ret(self, strat_log_ret, arithmetic = False):
# Annualized log returns
avg_log_ret_pa = strat_log_ret.mean(axis = 0, skipna = True) * Parameters.trading_days_pa / Parameters.data_time_step_days
# Converting Ann log returns to Arithmetic returns
if arithmetic:
avg_arithmetic_ret_pa = np.exp(avg_log_ret_pa) - 1
return avg_arithmetic_ret_pa
return avg_log_ret_pa
def cal_annual_vol(self, strat_log_ret, arithmetic = False):
# Annualized volatility of log returns
avg_log_vol_pa = strat_log_ret.std(axis=0, skipna = True) * np.sqrt(Parameters.trading_days_pa
/ Parameters.data_time_step_days)
if arithmetic:
arithmetic_ret = np.exp(strat_log_ret) - 1
avg_arithmetic_vol_pa = arithmetic_ret.std(axis=0, skipna = True) * np.sqrt(Parameters.trading_days_pa
/ Parameters.data_time_step_days)
return avg_arithmetic_vol_pa
return avg_log_vol_pa
def cal_annual_skew(self, strat_log_ret, arithmetic = False):
# https://quant.stackexchange.com/a/3678
avg_log_skew_pa = (strat_log_ret.skew(axis=0, skipna = True)
/ np.sqrt(Parameters.trading_days_pa / Parameters.data_time_step_days))
if arithmetic:
arithmetic_ret = np.exp(strat_log_ret) - 1
avg_arithmetic_skew_pa = (arithmetic_ret.skew(axis=0, skipna = True)
/ np.sqrt(Parameters.trading_days_pa / Parameters.data_time_step_days))
return avg_arithmetic_skew_pa
return avg_log_skew_pa
def cal_annual_kurt(self, strat_log_ret, arithmetic = False):
# https://quant.stackexchange.com/a/3678
avg_log_kurt_pa = (strat_log_ret.kurt(axis=0, skipna = True)
/ (Parameters.trading_days_pa / Parameters.data_time_step_days))
if arithmetic:
arithmetic_ret = np.exp(strat_log_ret) - 1
avg_arithmetic_kurt_pa = (arithmetic_ret.kurt(axis=0, skipna = True)
/ (Parameters.trading_days_pa / Parameters.data_time_step_days))
return avg_arithmetic_kurt_pa
return avg_log_kurt_pa
# Max Drawdown
def cal_mdd(self, V_t):
# https://quant.stackexchange.com/questions/18094/how-can-i-calculate-the-maximum-drawdown-mdd-in-python
Roll_Max = V_t.cummax()
rolling_Drawdown = V_t/Roll_Max - 1.0
Max_Drawdown = rolling_Drawdown.min()
return Max_Drawdown
def cal_turnover(self, turnover_df):
# Computing two way turnover
two_turn = turnover_df.mean(axis = 0)
ann_turnover = two_turn * (Parameters.trading_days_pa / Parameters.data_time_step_days)
return ann_turnover
def agg_stats(self,
strat_log_ret,
returns,
V_t,
turnover_df):
'''
Aggregate indicator calculation methods.
'''
sharpe = self.cal_Sharpe(strat_log_ret, returns)
avg_ret_pa = self.cal_annual_ret(strat_log_ret)
# Computing vol, skewness and kurtosis in arithmetic returns space
avg_vol_pa = self.cal_annual_vol(strat_log_ret, arithmetic=True)
avg_skew_pa = self.cal_annual_skew(strat_log_ret, arithmetic=True)
avg_kurt_pa = self.cal_annual_kurt(strat_log_ret, arithmetic=True)
max_drawdown = self.cal_mdd(V_t)
mdd = [np.around(x * 100,2) for x in max_drawdown.values]
mdd = pd.DataFrame(mdd, index = sharpe.index)
ann_turnover = self.cal_turnover(turnover_df)
stats = pd.concat([sharpe, avg_ret_pa*100, avg_vol_pa*100,
ann_turnover * 100, avg_skew_pa, avg_kurt_pa, mdd],
axis=1)
stats.columns = ['Sharpe Ratio', 'Returns (%)', 'Volatility (%)',
'Turnover (%)','Skewness','Kurtosis','Max Drawdown (%)']
stats = stats.round(2)
return stats
| [
"noreply@github.com"
] | noreply@github.com |
636f195f6a5e54a5eba54008aa8793ed02d28b7a | fbd638914b1119f7f7176bb7f8ac43f733e608d2 | /informatics/циклы/for/Остаток.py | c1a81c55ea8d06263692983bef37e752a3cc15e2 | [] | no_license | aidoka22/Web_development | 363ef6195fdb1ce056af3249b0a48ae159f69389 | 06f6f948705a7d6f54d46f1a578bc5d677a7bda4 | refs/heads/main | 2023-04-03T05:12:21.779598 | 2021-04-14T21:13:08 | 2021-04-14T21:13:08 | 335,631,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | a = int(input())
b = int(input())
c = int(input())
d = int(input())
for i in range(a, b+1):
if i % d == c:
print(i, end= (" "))
| [
"noreply@github.com"
] | noreply@github.com |
f8becd20fb9df5a15511e5f38b40e6d9a0cbabef | c3a33b3126c82eb5723bc1f5f5baffeb6841afeb | /BlockChain.py | 3a55bb391c737c767b31a8dbce5605d34c618f96 | [] | no_license | geetesh-gupta/VanetBlockchain | f53a28f31b0855397c72572796cf2b63bb2f7721 | c57164019d9fecaafe3bf22d56b7576a5e9c06ac | refs/heads/master | 2023-01-08T13:00:24.105500 | 2020-11-06T18:23:01 | 2020-11-06T18:23:01 | 310,387,238 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,207 | py | import time
import hashlib as hasher
import datetime as date
import random
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
from Crypto import Random
from Crypto.Signature import pkcs1_15
from Block import Block
from Transaction import Transaction
class BlockChain:
def __init__(self):
self.blockchain = []
self.utxo_array = []
self.manufacturers_list = []
self.other_users_list = []
self.global_index = 0
self.pow_proof = int(0)
# The function would verify all the blocks in the given blockchain
def verify_blockchain(self):
previous_block = self.blockchain[0]
count = 1
for block in self.blockchain[1:]:
print('\nFor the block #' + str(count) + ': ')
for transaction in block.supply_data:
print('The item ID is ' + str(transaction.item_id) +
' and the associated timestamp is ' + str(transaction.timestamp))
if(str(previous_block.hash) == str(block.previous_hash)):
print('The hash values have been verified.')
sha = hasher.sha256()
sha.update(str(int(block.proof_of_work)).encode('utf-8'))
hash_value = sha.hexdigest()
print('The PoW number is ' + str(block.proof_of_work) +
' and the associated hash is ' + hash_value)
print('------------------------------------------------------------------------------------------------------------------------')
print('\n\n')
# Function for generating manufacturer keys
def generate_manufacturer_keys(self, number):
for item in range(0, int(number)):
self.manufacturers_list.append(
RSA.generate(1024, Random.new().read))
# print(self.manufacturers_list)
print('\nThe manufacturer keys have been generated.')
# Function for generating stakeholder keys
def generate_other_keys(self, number):
for item in range(0, int(number)):
self.other_users_list.append(RSA.generate(1024, Random.new().read))
# print(self.other_users_list)
print('\nThe stakeholder keys have been generated.')
# Function for tracking an item
def track_item(self, item_code):
not_found_flag = True
for block in self.blockchain[1:]:
for transaction in block.supply_data:
if(item_code == transaction.item_id):
if(not_found_flag):
print('\nThe item (' + item_code +
') has been found and the tracking details are: ')
not_found_flag = False
manufacturer_suppplier = False
manufacturer_receiver = False
supplier_count = 0
supplier_not_found_flag = True
for item in self.manufacturers_list:
supplier_count = supplier_count + 1
if str(transaction.supplier_puk.exportKey("PEM").decode('utf-8')) == str(item.publickey().exportKey("PEM").decode('utf-8')):
supplier_not_found_flag = False
manufacturer_suppplier = True
break
if(supplier_not_found_flag):
supplier_count = 0
for item in self.other_users_list:
supplier_count = supplier_count + 1
if str(transaction.supplier_puk.exportKey("PEM").decode('utf-8')) == str(item.publickey().exportKey("PEM").decode('utf-8')):
supplier_not_found_flag = False
break
receiver_count = 0
receiver_not_found_flag = True
for item in self.manufacturers_list:
receiver_count = receiver_count + 1
if str(transaction.receiver_puk) == str(item.publickey().exportKey("PEM").decode('utf-8')):
receiver_not_found_flag = False
manufacturer_receiver = True
break
if(receiver_not_found_flag):
receiver_count = 0
for item in self.other_users_list:
receiver_count = receiver_count + 1
if str(transaction.receiver_puk) == str(item.publickey().exportKey("PEM").decode('utf-8')):
receiver_not_found_flag = False
break
final_result = ""
if(manufacturer_suppplier):
final_result = final_result + "Manufacturer #" + \
str(supplier_count) + " transferred the asset to "
else:
final_result = final_result + "Stakeholder #" + \
str(supplier_count) + " transferred the asset to "
if(manufacturer_receiver):
final_result = final_result + "Manufacturer #" + \
str(receiver_count) + " at " + \
str(transaction.timestamp)
else:
final_result = final_result + "Stakeholder #" + \
str(receiver_count) + " at " + \
str(transaction.timestamp)
print(final_result)
if(not_found_flag):
print('\nThe item code was not found in the blockchain.')
# This function is used for viewing all the blocks and the transactions in the blockchain
def view_blockchain(self):
print('\n\nThe list of blocks are: \n')
for block in self.blockchain:
print('\n------------------------------------------------------------------------------------------------------------------------')
print(block.index)
print(block.timestamp)
print(block.supply_data)
print(block.proof_of_work)
print(block.hash)
print(block.previous_hash)
print('------------------------------------------------------------------------------------------------------------------------')
print('\n\n')
# This function is used to view all the Unspend Transaction Outputs
def view_UTXO(self):
print('\n\nThe list of UTXO are: \n')
for transaction in self.utxo_array:
print('\n------------------------------------------------------------------------------------------------------------------------')
print(transaction.supplier_puk.exportKey("PEM").decode('utf-8'))
print(transaction.receiver_puk)
print(transaction.item_id)
print(transaction.timestamp)
print(transaction.signature)
print('------------------------------------------------------------------------------------------------------------------------')
print('\n\n')
# This function is used to generate a transaction
def make_transaction(self, supplier_key, receiver_key, item_id):
# Selection functions for the keys and the item ID
selection = input('\nSelect type of key (M/O) for supplier: ')
if selection == 'M':
index = int(input('There are a total of ' +
str(len(self.manufacturers_list)) + ' users. Enter your selection: ')) - 1
supplier_key = self.manufacturers_list[index]
elif selection == 'O':
index = int(input('There are a total of ' +
str(len(self.other_users_list)) + ' users. Enter your selection: ')) - 1
supplier_key = self.other_users_list[index]
selection = input('\nSelect type of key (M/O) for receiver: ')
if selection == 'M':
index = int(input('There are a total of ' +
str(len(self.manufacturers_list)) + ' users. Enter your selection: ')) - 1
receiver_key = self.manufacturers_list[index]
elif selection == 'O':
index = int(input('There are a total of ' +
str(len(self.other_users_list)) + ' users. Enter your selection: ')) - 1
receiver_key = self.other_users_list[index]
receiver_puk = receiver_key.publickey().exportKey("PEM").decode('utf-8')
item_id = input('Enter the ID of the tracked item: ')
# Acquiring the details for the transactions
supplier_puk = supplier_key.publickey()
timestamp = date.datetime.now()
# Generating the message text and the signature
message = str(supplier_puk.exportKey("PEM").decode('utf-8')) + \
str(receiver_puk) + item_id + str(timestamp)
hash_message = SHA256.new(message.encode('utf-8'))
supplier_prk = RSA.import_key(supplier_key.exportKey("DER"))
signature = pkcs1_15.new(supplier_prk).sign(hash_message)
# Creating a new transaction
new_transaction = Transaction(
supplier_puk, receiver_puk, item_id, timestamp, signature)
self.utxo_array.append(new_transaction)
# The function for mining the block in the supply blockchain
def mine_block(self):
max_range = len(self.utxo_array)
transaction_amount = random.randint(0, max_range)
transaction_array = []
print('\nThe number of selected transactions for the block is: ' +
str(transaction_amount))
if(transaction_amount):
for index in range(0, transaction_amount):
# All verifications for the transactions
if(self.utxo_array[0].verify_transaction()):
print('\nThe sign verification for transaction #' +
str(index + 1) + ' was true!')
if(self.utxo_array[0].check_item_code(self)):
print(
'The item code has been found. Checking the previous owner details.')
if(self.utxo_array[0].check_previous_owner(self)):
print('Verification of previous owner has been done!')
transaction_array.append(self.utxo_array[0])
else:
print('Verification of previous owner has failed!')
else:
print(
'The item code was not found on blockchain. Checking for manufacturer credentials.')
if(self.utxo_array[0].check_manufacturer_credentials(self)):
print(
'The new item has been added under the manufacturer.')
transaction_array.append(self.utxo_array[0])
else:
print(
'The transaction key is not authorised as a manufacturer!')
else:
print('The sign verification for transaction #' +
str(index + 1) + ' was false!')
self.utxo_array.pop(0)
if(len(transaction_array) != 0):
new_block = Block(self.global_index, date.datetime.now(
), transaction_array, self.blockchain[self.global_index - 1].hash)
self.global_index = self.global_index + 1
self.blockchain.append(new_block)
else:
# Prevent addition of blocks with no transactions
print(
'No transactions have been selected and therefore no block has been added!')
def add_block(self, block):
self.blockchain.append(block)
# This function is used to create genesis block
def create_genesis_block(self):
self.global_index = self.global_index + 1
print('\n\nThe genesis block is being created.')
return Block(0, date.datetime.now(), "GENESIS BLOCK", "0")
| [
"gupta.15@iitj.ac.in"
] | gupta.15@iitj.ac.in |
482d60973fda540ee9895efa199a05d9b8cd7193 | 4163e897c1c70bbe9d2c074fa91f233612a59347 | /catalog/database_setup1.py | 53c36acd3b5c8f8fe48a101e31f6d2b8e9ae1405 | [] | no_license | JahnaviChandrapatla/catalog | 28419986e938bd31773909ab52c20c4573e06e55 | 239141b54c775d8e19265377e332738f7ef96a82 | refs/heads/master | 2020-04-28T16:10:11.929665 | 2019-03-13T10:52:56 | 2019-03-13T10:52:56 | 175,400,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,982 | py | import sys
import os
from sqlalchemy import Column, ForeignKey, Integer, String, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref
from sqlalchemy import create_engine
Base = declarative_base()
class GmailUser(Base):
__tablename__ = 'gmailuser'
id = Column(Integer, primary_key=True)
name = Column(String(150), nullable=False)
email = Column(String(205), nullable=False)
class Television(Base):
__tablename__ = 'television'
id = Column(Integer, primary_key=True)
name = Column(String(333), nullable=False)
user_id = Column(Integer, ForeignKey('gmailuser.id'))
gmailuser = relationship(GmailUser, backref="television")
@property
def serialize(self):
"""Return objects data in easily serializeable formats"""
return {
'name': self.name,
'id': self.id
}
class Tvlist(Base):
__tablename__ = 'tvlist'
id = Column(Integer, primary_key=True)
tvtypes = Column(String(255), nullable=False)
description = Column(String(555))
price = Column(String(900))
rating = Column(String(150))
inches = Column(String(1000))
date = Column(DateTime, nullable=False)
televisionid = Column(Integer, ForeignKey('television.id'))
television = relationship(
Television, backref=backref('tvlist', cascade='all, delete'))
user_id = Column(Integer, ForeignKey('gmailuser.id'))
gmailuser = relationship(GmailUser, backref="tvlist")
@property
def serialize(self):
"""Return objects data in easily serializeable formats"""
return {
'tvtypes': self. tvtypes,
'description': self. description,
'price': self. price,
'rating': self. rating,
'inches': self. inches,
'date': self. date,
'id': self. id
}
engin = create_engine('sqlite:///television.db')
Base.metadata.create_all(engin)
| [
"chjahnavi80@gmail.com"
] | chjahnavi80@gmail.com |
fc1746cb1561850e95f01014fd657000a32f9695 | 5b10b6efa049014f0d00f81b148d94c429286b66 | /DRFViewset/manage.py | b746ebba33addb3132d357d7f25d2d28be599469 | [] | no_license | Ruchika-Munde/Rest_Task | dff657aed041ac6925590f423301f0cae7599f6c | ea741889927ed0fa2a1ba9c2311304671680c6bf | refs/heads/master | 2022-12-21T05:21:00.623464 | 2020-09-09T08:06:24 | 2020-09-09T08:06:24 | 294,044,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DRFViewset.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"ruchamunde@gmail.com"
] | ruchamunde@gmail.com |
59f35ce862fba5572d3fa349af79c857f80998f2 | 5b7af6548668085da9a6ab86f564538ee73c4865 | /build/scripts/slave/recipe_modules/sync_submodules/resources/deps2submodules.py | 7596609ca687bffb82ee3c78743d82aa56d0c70d | [
"BSD-3-Clause"
] | permissive | elastos/Elastos.APP.Android.ShiJiuTV | 463a986450a915f7b3066e6a03aca903cf56f69b | f77189a2b8df86028adc68105988710d16ce012b | refs/heads/master | 2023-03-18T03:11:58.337349 | 2018-03-12T08:50:57 | 2018-03-13T11:10:27 | 124,007,751 | 0 | 1 | null | 2022-10-03T03:30:29 | 2018-03-06T02:21:25 | null | UTF-8 | Python | false | false | 5,186 | py | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Read DEPS and use the information to update git submodules"""
import argparse
import logging
import os
import re
import subprocess
import sys
from deps_utils import GetDepsContent
SHA1_RE = re.compile(r'[0-9a-fA-F]{40}')
SHA1_REF_RE = re.compile(r'^([0-9a-fA-F]{40})\s+refs/[\w/]+\s*')
def SanitizeDeps(submods, path_prefix):
"""
Look for conflicts (primarily nested submodules) in submodule data. In the
case of a conflict, the higher-level (shallower) submodule takes precedence.
Modifies the submods argument in-place.
"""
ret = {}
for name, value in submods.iteritems():
if not name.startswith(path_prefix):
logging.warning('Dropping submodule "%s", because it is outside the '
'working directory "%s"', name, path_prefix)
continue
# Strip the prefix from the submodule name.
name = name[len(path_prefix):]
parts = name.split('/')[:-1]
while parts:
may_conflict = '/'.join(parts)
if may_conflict in submods:
logging.warning('Dropping submodule "%s", because it is nested in '
'submodule "%s"', name, may_conflict)
break
parts.pop()
ret[name] = value
return ret
def CollateDeps(deps_content):
"""
Take the output of deps_utils.GetDepsContent and return a hash of:
{ submod_name : [ [ submod_os, ... ], submod_url, submod_sha1 ], ... }
"""
spliturl = lambda x: list(x.partition('@')[0::2]) if x else [None, None]
submods = {}
# Non-OS-specific DEPS always override OS-specific deps. This is an interim
# hack until there is a better way to handle OS-specific DEPS.
for (deps_os, val) in deps_content[1].iteritems():
for (dep, url) in val.iteritems():
submod_data = submods.setdefault(dep, [[]] + spliturl(url))
submod_data[0].append(deps_os)
for (dep, url) in deps_content[0].iteritems():
submods[dep] = [['all']] + spliturl(url)
return submods
def WriteGitmodules(submods):
"""
Take the output of CollateDeps, use it to write a .gitmodules file and
return a map of submodule name -> sha1 to be added to the git index.
"""
adds = {}
with open('.gitmodules', 'w') as fh:
for name, (os, url, sha1) in sorted(submods.iteritems()):
if not url:
continue
if url.startswith('svn://'):
logging.warning('Skipping svn url %s', url)
continue
print >> fh, '[submodule "%s"]' % name
print >> fh, '\tpath = %s' % name
print >> fh, '\turl = %s' % url
print >> fh, '\tos = %s' % ','.join(os)
if not sha1:
sha1 = 'master'
# Resolve the ref to a sha1 hash.
if not SHA1_RE.match(sha1):
if sha1.startswith('origin/'):
sha1 = sha1[7:]
output = subprocess.check_output(['git', 'ls-remote', url, sha1])
match = SHA1_REF_RE.match(output)
if not match:
logging.warning('Could not resolve ref %s for %s', sha1, url)
continue
logging.info('Resolved %s for %s to %s', sha1, url, match.group(1))
sha1 = match.group(1)
logging.info('Added submodule %s revision %s', name, sha1)
adds[name] = sha1
subprocess.check_call(['git', 'add', '.gitmodules'])
return adds
def RemoveObsoleteSubmodules():
"""
Delete from the git repository any submodules which aren't in .gitmodules.
"""
lsfiles = subprocess.check_output(['git', 'ls-files', '-s'])
for line in lsfiles.splitlines():
if not line.startswith('160000'):
continue
_, _, _, path = line.split()
cmd = ['git', 'config', '-f', '.gitmodules',
'--get-regexp', 'submodule\..*\.path', '^%s$' % path]
try:
with open(os.devnull, 'w') as nullpipe:
subprocess.check_call(cmd, stdout=nullpipe)
except subprocess.CalledProcessError:
subprocess.check_call(['git', 'update-index', '--force-remove', path])
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--path-prefix',
default=os.path.basename(os.getcwd()) + '/',
help='Ignore any dep outside this prefix. DEPS files can '
"specify dependencies in the repo's parent directory, "
'so the default here is to ignore anything outside the '
"current directory's basename")
parser.add_argument('deps_file', default='DEPS', nargs='?')
options = parser.parse_args()
if not options.path_prefix.endswith('/'):
parser.error("--path-prefix '%s' must end with a '/'" % options.path_prefix)
adds = WriteGitmodules(
SanitizeDeps(
CollateDeps(GetDepsContent(options.deps_file)),
options.path_prefix))
RemoveObsoleteSubmodules()
for submod_path, submod_sha1 in adds.iteritems():
subprocess.check_call(['git', 'update-index', '--add',
'--cacheinfo', '160000', submod_sha1, submod_path])
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"xiaokun.mengxk@qcast.cn"
] | xiaokun.mengxk@qcast.cn |
335b5c7ee20102750c5ca4e77c0540a53d588116 | ef4a4c8de95516700134a45800238de9298e1485 | /za7.py | 5661752b9d9e5178e0e04f0b3b082aafd9f9643a | [] | no_license | nikolaj74-hub/lessons | a45d67d380982d245f5950fe6eef3041c7ffbd2e | 54437b8e8063668017d7e29612c0623adb8fce94 | refs/heads/master | 2023-01-23T19:11:18.680790 | 2020-12-04T13:46:02 | 2020-12-04T13:46:02 | 311,939,032 | 1 | 0 | null | 2020-12-04T13:42:39 | 2020-11-11T10:38:48 | Python | UTF-8 | Python | false | false | 440 | py |
from functools import reduce
print(reduce(lambda a, b: int(a) * int(b), range(1, 1+int(input('введите номер эл')))))
###############################################################################################
from math import factorial
def fact():
for el in {factorial(int(input('введите номер элемента')))}:
yield el
f = fact()
print(f)
for el in f:
print(el)
| [
"noreply@github.com"
] | noreply@github.com |
d91c605901bc0be775fc45b509e24954e24ab267 | fecfcc0765a9b01fa3d3621a823037266064d71a | /runnerapi/tests.py | 69ba3ead60cd6f21f94e1802eb75decb8fc9e418 | [] | no_license | agel122/runs | 7f72f7fee7490bdaed6c33872e266cad6e8cb7da | a361bf472475d73b2dd889d2634c7b320acd1695 | refs/heads/master | 2023-08-06T07:42:15.636787 | 2021-09-14T20:13:35 | 2021-09-14T20:13:35 | 406,510,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,229 | py | import json
from rest_framework.test import APITestCase
from rest_framework.authtoken.models import Token
from django.contrib.auth.models import User
from django.urls import reverse
from rest_framework import status
from .models import Run
from .views import AverageData, AllData
from .serializers import RunSerializer, UserSerializer
class RegistrationTestCase(APITestCase):
def test_registration_ok(self):
data = {'username': 'testuser',
'email': 'testuser@email.com',
'password': 'testpass'}
response = self.client.post('/user_create/', data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_registration_nok(self):
data = {'username': 'testuser',
'email': 'testuser@email.com'}
response = self.client.post('/user_create/', data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class LoginTestCase(APITestCase):
url1 = reverse('all_runs-list')
def setUp(self):
self.user1 = User.objects.create_user(username='testuser88',
password='testpass88',
email='testuser88@email.com')
self.user2 = User.objects.create_user(username='testuser99',
password='testpass99',
email='testuser99@email.com')
self.token = Token.objects.create(user=self.user1)
self.api_authentication()
def api_authentication(self):
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
def test_all_runs_list_authonticated(self):
response = self.client.get(self.url1)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_all_runs_list_unauthonticated(self):
self.client.force_authenticate(user=None)
response = self.client.get(self.url1)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_all_runs_post_authonticated(self):
response = self.client.post(reverse('all_runs-list'),
{"date": "2021-09-11",
"distance": 20,
"time": 20})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_all_runs_post_unauthonticated(self):
self.client.force_authenticate(user=None)
response = self.client.post(reverse('all_runs-list'),
{"date": "2021-09-11",
"distance": 20,
"time": 20})
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_all_runs_put_authonticated(self):
self.client.post(reverse('all_runs-list'),
{"date": "2021-09-11",
"distance": 20,
"time": 20})
response = self.client.put(reverse('all_runs-detail', kwargs={'pk': 1}),
{"date": "2021-09-11",
"distance": 22,
"time": 22})
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_all_runs_put_unauthonticated(self):
self.client.force_authenticate(user=None)
self.client.post(reverse('all_runs-list'),
{"date": "2021-09-11",
"distance": 20,
"time": 20})
response = self.client.put(reverse('all_runs-detail', kwargs={'pk': 1}),
{"date": "2021-09-11",
"distance": 22,
"time": 22})
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_all_runs_delete_authonticated(self):
self.client.post(reverse('all_runs-list'),
{"date": "2021-09-11",
"distance": 20,
"time": 20})
response = self.client.delete(reverse('all_runs-detail', kwargs={'pk': 1}))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_all_runs_delete_unauthonticated(self):
self.client.force_authenticate(user=None)
self.client.post(reverse('all_runs-list'),
{"date": "2021-09-11",
"distance": 20,
"time": 20})
response = self.client.delete(reverse('all_runs-detail', kwargs={'pk': 1}))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_average_data_get_authonticated(self):
self.client.post(reverse('all_runs-list'),
{"date": "2021-09-11",
"distance": 20,
"time": 20})
self.client.post(reverse('all_runs-list'),
{"date": "2021-09-11",
"distance": 10,
"time": 10})
response = self.client.get(reverse('average_data'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['average_distance in m'], 15000)
self.assertEqual(response.data['max_distance in km'], {"id": 1,
"owner": 1,
"date": "2021-09-11",
"distance": "20.000",
"time": 20},)
self.assertEqual(response.data['average_distance in m'], 15000)
self.assertEqual(response.data['average_time in min'], 15)
self.assertEqual(response.data['average_speed m/min'], 1000)
self.assertEqual(response.data['max_speed in m/min'], 1000)
def test_average_data_get_unauthonticated(self):
self.client.force_authenticate(user=None)
self.client.post(reverse('all_runs-list'),
{"date": "2021-09-11",
"distance": 20,
"time": 20})
response = self.client.get(reverse('average_data'))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_average_data_get_useronly(self):
self.client.post(reverse('all_runs-list'),
{"date": "2021-09-11",
"distance": 20,
"time": 20})
self.client.force_authenticate(user=self.user2)
self.client.post(reverse('all_runs-list'),
{"date": "2021-09-11",
"distance": 10,
"time": 10})
response = self.client.get(reverse('average_data'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['average_distance in m'], 10000)
| [
"agel122@yandex.ru"
] | agel122@yandex.ru |
35454d5038ec89c97effecea58a551c18d110a92 | 642a087b681bddcdee1beabb255c1401f427697d | /TemporallyCoherent/update_color.py | 84ed6dbd16dc27adfe6cba1003afe901a4e1d334 | [] | no_license | wuk32/VideoCompletion | 75a1d8e25651906935314d7f314b14e3ba151c15 | 6b81b4eb078c948fbc5c5c10d0454f52b5a9f81c | refs/heads/master | 2023-08-16T12:17:24.268809 | 2021-10-07T12:33:30 | 2021-10-07T12:33:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | # coding=utf8
def update_color(z, r1, r2, alpha):
"""
Args:
z: spatial voting
r1: forward temporal neighbor
r2: backward temporal neighbor
I: update color
argmin || I - X ||_2^2 + alpha * \phi(||I - R1||_2^2) + alpha*\phi(||I - R2||_2^2)
"""
numIter = 5
eps = 1e-3
v1 =
v2 =
I_k = Z
for k in range(numIter):
dZ = Z - I_k
dR1 = R1 - I_k
dR2 = R2 - I_k
# Compute the weights
w1 = alpha * 0.5 /
w2 = alpha * 0.5
# Put invalid flow neighbor to zeros
w1 =
w2 =
# Weighted average
dI = (dZ + dR1 * w1 + dR2 * w2) / (1 + w1 + w2)
I_k = I_k + dI
return I_k | [
"wukuncs@163.com"
] | wukuncs@163.com |
fc81fc7ae77bb68bbe360d676d6ea0f9dc2ffdda | 867796f20586cfa70422945d98e7d5e99edbabc2 | /contactista/migrations/ed99772734e1_initial_revision.py | a7f31619c7e62a4b989f9d250739bd7809b112ba | [
"MIT"
] | permissive | rizplate/contactista | 500cf7f640b3db94d0b49b921e4b09abdfc56d5b | 8b3030487518cd4767078703aee04041d2004725 | refs/heads/master | 2020-03-28T11:37:02.932371 | 2017-09-15T18:55:52 | 2017-09-15T18:56:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,048 | py | """Initial revision
Revision ID: ed99772734e1
Revises:
Create Date: 2017-08-01 12:48:40.754913
"""
import os
import json
from alembic import op
import sqlalchemy as sa
from sqlalchemy.schema import Sequence, CreateSequence, DropSequence
# revision identifiers, used by Alembic.
revision = 'ed99772734e1'
down_revision = None
branch_labels = ('default',)
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pronouns_table = op.create_table('pronouns',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('subject_pronoun', sa.String(length=50), nullable=False),
sa.Column('object_pronoun', sa.String(length=50), nullable=False),
sa.Column('possessive_determiner', sa.String(length=50), nullable=False),
sa.Column('possessive_pronoun', sa.String(length=50), nullable=False),
sa.Column('reflexive_pronoun', sa.String(length=50), nullable=False),
sa.PrimaryKeyConstraint('id')
)
role_table = op.create_table('role',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('description', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=False),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('confirmed_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
op.create_table('contact',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('note', sa.Text(), nullable=True),
sa.Column('note_format', sa.String(length=20), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('roles_users',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['role.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], )
)
op.create_table('contact_email',
sa.Column('contact_id', sa.Integer(), nullable=False),
sa.Column('category', sa.String(length=50), nullable=False),
sa.Column('position', sa.Integer(), nullable=False),
sa.Column('email', sa.Text(), nullable=False),
sa.ForeignKeyConstraint(['contact_id'], ['contact.id'], ),
sa.PrimaryKeyConstraint('contact_id', 'category')
)
op.execute(CreateSequence(Sequence('contact_email_position')))
op.create_table('contact_name',
sa.Column('contact_id', sa.Integer(), nullable=False),
sa.Column('category', sa.String(length=50), nullable=False),
sa.Column('position', sa.Integer(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.ForeignKeyConstraint(['contact_id'], ['contact.id'], ),
sa.PrimaryKeyConstraint('contact_id', 'category')
)
op.execute(CreateSequence(Sequence('contact_name_position')))
op.create_table('contact_pronouns',
sa.Column('contact_id', sa.Integer(), nullable=False),
sa.Column('pronouns_id', sa.Integer(), nullable=False),
sa.Column('position', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['contact_id'], ['contact.id'], ),
sa.ForeignKeyConstraint(['pronouns_id'], ['pronouns.id'], ),
sa.PrimaryKeyConstraint('contact_id', 'pronouns_id')
)
op.execute(CreateSequence(Sequence('contact_pronouns_position')))
# ### end Alembic commands ###
# Seed database with default data
op.bulk_insert(role_table, rows=[
{"name": "superuser", "description": "Unlimited access"},
])
pronouns_fixture_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"fixtures",
"pronouns.json",
)
with open(pronouns_fixture_path) as f:
pronouns_list = json.load(f)
pronouns_objs = [{
"subject_pronoun": line[0],
"object_pronoun": line[1],
"possessive_determiner": line[2],
"possessive_pronoun": line[3],
"reflexive_pronoun": line[4],
} for line in pronouns_list]
op.bulk_insert(pronouns_table, rows=pronouns_objs)
def downgrade():
for seqname in ('contact_pronouns_position', 'contact_name_position',
'contact_email_position',
):
op.execute(DropSequence(Sequence(seqname)))
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('contact_pronouns')
op.drop_table('contact_name')
op.drop_table('contact_email')
op.drop_table('roles_users')
op.drop_table('contact')
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_table('user')
op.drop_table('role')
op.drop_table('pronouns')
# ### end Alembic commands ###
| [
"david@davidbaumgold.com"
] | david@davidbaumgold.com |
09ab8328a243514c07ceb46b257ce729e4296604 | f8a5f208d4c82064afa27a4420f61c7847e2addb | /colab2.py | 5626528910531c907157dae977947e81235f79e9 | [] | no_license | jackwh92/colabjack | 9c24129aca923b7b8f6e1e4147e7b8bca718e086 | cdfe32df4ef9db88868d6f5e7725f5fd68500ade | refs/heads/master | 2020-04-11T22:48:36.897512 | 2019-01-03T14:36:50 | 2019-01-03T14:36:50 | 162,148,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | def plusfunc(a, b):
return a + b
#fshsa
#newmessage | [
"jack.hollies@digital.hmrc.gov.uk"
] | jack.hollies@digital.hmrc.gov.uk |
1b90ef45b0337dcc2d3d4e60ba134bda6a7afd66 | 4522a0843c55438533f9e9af802080c702337e78 | /fenwick-tree/test.py | be0c4354ebdc8bbb8c072f3e04cfda78cc0a9347 | [] | no_license | lgdelacruz92/LeetCode-Challenges | 7f84f4b8b84b8f38feebbede6f70b293e6c8e3a5 | 703c3ea995ef52a2d9702fed697a25671e35ff45 | refs/heads/master | 2023-04-08T15:07:08.255375 | 2021-04-13T16:04:38 | 2021-04-13T16:04:38 | 291,032,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,471 | py | query = '''SELECT * FROM (
SELECT -COLUMNS-
FROM zyBookContentSections
JOIN CanonicalSections USING(canonical_section_id)
JOIN ContentResourceCanonicalSections USING(canonical_section_id)
JOIN ContentResources USING(content_resource_id)
JOIN ContentResourceTypes USING(resource_type_id)
WHERE zybook_id = %s
ORDER BY chapter_number, section_number, ordering) AS content_resource_references
WHERE content_resource_reference IS NOT NULL ORDER BY section_number ASC
'''
columns = [
'CanonicalSections.guid AS canonical_section_guid',
'ContentResources.caption AS caption',
'ContentResources.guid AS content_resource_guid',
'payload->>\'$.ref\' AS content_resource_reference',
'payload->>\'$.html[*].guid\' AS sub_content_resource_guids',
'payload->>\'$.html[*].ref\' AS sub_content_resource_refs',
'payload->>\'$.html[*].enumid\' AS enumids',
'zyBookContentSections.chapter_number',
'zyBookContentSections.section_number AS section_number',
'content_resource_id',
'payload->>\'$.name\' AS name',
'ContentResourceCanonicalSections.ordering']
query = query.replace('-COLUMNS-',',\n\t'.join(columns))
print(query) | [
"lgdelacruz92@gmail.com"
] | lgdelacruz92@gmail.com |
16623631176c53072ca491e3ffb09b9bf44c28a6 | 1c397151e98f11f68c1fe548b3610d889eace7e0 | /python/modules/alt_splice/analyze.py | 2477b14be947c3a10e7f5de2a33d4d5c823beb2e | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | abaten/spladder | aaec1e0b21506c11410f2a955a9c9bbf19799f52 | 67d0e9ca9893911a883a199f940ba61dfadb3345 | refs/heads/master | 2021-01-18T00:18:45.487030 | 2016-03-15T20:39:14 | 2016-03-15T20:39:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,402 | py | import sys
import os
import scipy as sp
import cPickle
import h5py
if __name__ == "__main__":
__package__ = "modules.alt_splice"
### local imports
from verify import *
from write import *
from ..rproc import rproc, rproc_wait
from ..helpers import compute_psi
def _prepare_count_hdf5(CFG, OUT, events, event_features):
### load gene info
if 'spladder_infile' in CFG and os.path.exists(CFG['spladder_infile']):
(genes, inserted) = cPickle.load(open(CFG['spladder_infile']))
else:
prune_tag = ''
if CFG['do_prune']:
prune_tag = '_pruned'
validate_tag = ''
if CFG['validate_splicegraphs']:
validate_tag = '.validated'
(genes, inserted) = cPickle.load(open('%s/spladder/genes_graph_conf%i.%s%s%s.pickle' % (CFG['out_dirname'], CFG['confidence_level'], CFG['merge_strategy'], validate_tag, prune_tag)))
### write strain and gene indices to hdf5
OUT.create_dataset(name='strains', data=CFG['strains'])
feat = OUT.create_group(name='event_features')
for f in event_features:
feat.create_dataset(name=f, data=sp.array(event_features[f], dtype='str'))
OUT.create_dataset(name='gene_names', data=sp.array([x.name for x in genes], dtype='str'))
OUT.create_dataset(name='gene_chr', data=sp.array([x.chr for x in genes], dtype='str'))
OUT.create_dataset(name='gene_strand', data=sp.array([x.strand for x in genes], dtype='str'))
OUT.create_dataset(name='gene_pos', data=sp.array([[x.start, x.stop] for x in genes], dtype='int'))
def analyze_events(CFG, event_type):
if CFG['rproc'] and not os.path.exists('%s/event_count_chunks' % CFG['out_dirname']):
os.makedirs('%s/event_count_chunks' % CFG['out_dirname'])
for replicate in CFG['replicate_idxs']:
print 'confidence %i / replicate %i' % (CFG['confidence_level'], replicate)
if len(CFG['replicate_idxs']) > 1:
rep_tag = '_R%i' % r_idx
else:
rep_tag = ''
fn_out = '%s/%s_%s%s_C%i.pickle' % (CFG['out_dirname'], CFG['merge_strategy'], event_type, rep_tag, CFG['confidence_level'])
fn_out_conf = fn_out.replace('.pickle', '.confirmed.pickle')
fn_out_count = fn_out.replace('.pickle', '.counts.hdf5')
### define result files
fn_out_txt = fn_out.replace('.pickle', '.txt')
fn_out_struc = fn_out.replace('.pickle', '.struc.txt')
fn_out_conf_txt = fn_out_conf.replace('.pickle', '.txt')
fn_out_conf_bed = fn_out_conf.replace('.pickle', '.bed')
fn_out_conf_struc = fn_out_conf.replace('.pickle', '.struc.txt')
fn_out_conf_tcga = fn_out_conf.replace('.pickle', '.tcga.txt')
fn_out_conf_gff3 = fn_out_conf.replace('.pickle', '.gff3')
### check if there is anything to do
if os.path.exists(fn_out_txt) and os.path.exists(fn_out_conf_txt) and os.path.exists(fn_out_conf_tcga) and os.path.exists(fn_out_conf_gff3):
print 'All output files for %s exist.\n' % event_type
continue
event_features = {'mult_exon_skip': ['valid', 'exon_pre_cov', 'exons_cov', 'exon_aft_cov', 'exon_pre_exon_conf', 'exon_exon_aft_conf', 'exon_pre_exon_aft_conf', 'sum_inner_exon_conf', 'num_inner_exon', 'len_inner_exon'],
'intron_retention': ['valid', 'intron_cov', 'exon1_cov', 'exon2_cov', 'intron_conf', 'intron_cov_region'],
'exon_skip': ['valid', 'exon_cov', 'exon_pre_cov', 'exon_aft_cov', 'exon_pre_exon_conf', 'exon_exon_aft_conf', 'exon_pre_exon_aft_conf'],
'mutex_exons': ['valid', 'exon_pre_cov', 'exon1_cov', 'exon2_cov', 'exon_aft_cov', 'exon_pre_exon1_conf', 'exon_pre_exon2_conf', 'exon1_exon_aft_conf', 'exon2_exon_aft_conf'],
'alt_3prime': ['valid', 'exon_diff_cov', 'exon_const_cov', 'intron1_conf', 'intron2_conf'],
'alt_5prime': ['valid', 'exon_diff_cov', 'exon_const_cov', 'intron1_conf', 'intron2_conf']}
### check, if confirmed version exists
if not os.path.exists(fn_out_count):
events_all_ = cPickle.load(open(fn_out, 'r'))
if isinstance(events_all_, tuple):
events_all = events_all_[0]
events_all_strains = events_all_[1]
else:
events_all = events_all_
events_all_strains = None
### DEBUG!!!
#for xx in xrange(events_all.shape[0]):
# events_all[xx].verified = []
### add strain information, so we can do two way chunking!
if events_all_strains is None:
events_all_strains = CFG['strains']
### handle case where we did not find any event of this type
if sp.sum([x.event_type == event_type for x in events_all]) == 0:
OUT = h5py.File(fn_out_count, 'w')
OUT.create_dataset(name='event_counts', data=[0])
_prepare_count_hdf5(CFG, OUT, events_all, event_features)
OUT.close()
confirmed_idx = sp.array([], dtype='int')
else:
OUT = h5py.File(fn_out_count, 'w')
if not CFG['rproc']:
#events_all = verify_all_events(events_all, range(len(CFG['strains'])), CFG['bam_fnames'][replicate, :], event_type, CFG)
# TODO handle replicate setting
(events_all, counts) = verify_all_events(events_all, range(len(CFG['strains'])), CFG['bam_fnames'], event_type, CFG)
psi = sp.empty((counts.shape[0], counts.shape[2]), dtype='float')
for i in xrange(counts.shape[2]):
psi[:, i] = compute_psi(counts[:, :, i], event_type, CFG)
OUT.create_dataset(name='event_counts', data=counts, compression='gzip')
OUT.create_dataset(name='psi', data=psi, compression='gzip')
OUT.create_dataset(name='gene_idx', data=sp.array([x.gene_idx for x in events_all], dtype='int'), compression='gzip')
_prepare_count_hdf5(CFG, OUT, events_all, event_features)
else:
jobinfo = []
PAR = dict()
chunk_size_events = 1000
chunk_size_strains = 500
for i in range(0, events_all.shape[0], chunk_size_events):
idx_events = sp.arange(i, min(i + chunk_size_events, events_all.shape[0]))
for j in range(0, len(CFG['strains']), chunk_size_strains):
idx_strains = sp.arange(j, min(j + chunk_size_strains, len(CFG['strains'])))
PAR['ev'] = events_all[idx_events].copy()
PAR['strain_idx'] = idx_strains
#PAR['list_bam'] = CFG['bam_fnames'][replicate, :]
# TODO handle replicate setting
PAR['list_bam'] = CFG['bam_fnames']
PAR['out_fn'] = '%s/event_count_chunks/%s_%i_%i_R%i_C%i.pickle' % (CFG['out_dirname'], event_type, i, j, replicate, CFG['confidence_level'])
PAR['event_type'] = event_type
PAR['CFG'] = CFG
if os.path.exists(PAR['out_fn']):
print 'Chunk event %i, strain %i already completed' % (i, j)
else:
print 'Submitting job %i, event chunk %i, strain chunk %i' % (len(jobinfo) + 1, i, j)
jobinfo.append(rproc('verify_all_events', PAR, 30000, CFG['options_rproc'], 60 * 5))
rproc_wait(jobinfo, 20, 1.0, 1)
events_all_ = []
gene_idx_ = []
print 'Collecting results from chunks ...'
for i in range(0, events_all.shape[0], chunk_size_events):
idx_events = sp.arange(i, min(i + chunk_size_events, events_all.shape[0]))
for j in range(0, len(CFG['strains']), chunk_size_strains):
idx_strains = sp.arange(j, min(j + chunk_size_strains, len(CFG['strains'])))
print '\r%i (%i), %i (%i)' % (i, events_all.shape[0], j, len(CFG['strains']))
out_fn = '%s/event_count_chunks/%s_%i_%i_R%i_C%i.pickle' % (CFG['out_dirname'], event_type, i, j, replicate, CFG['confidence_level'])
if not os.path.exists(out_fn):
print >> sys.stderr, 'ERROR: not finished %s' % out_fn
sys.exit(1)
ev_, counts_ = cPickle.load(open(out_fn, 'r'))
if j == 0:
ev = ev_
counts = counts_
else:
counts = sp.r_[counts, counts_]
for jj in range(len(ev_)):
ev[jj].verified = sp.r_[ev[jj].verified, ev_[jj].verified]
psi = sp.empty((counts.shape[0], counts.shape[2]), dtype='float')
for j in xrange(counts.shape[2]):
psi[:, j] = compute_psi(counts[:, :, j], event_type, CFG)
if i == 0:
OUT.create_dataset(name='event_counts', data=counts, maxshape=(len(CFG['strains']), len(event_features[event_type]), None), compression='gzip')
OUT.create_dataset(name='psi', data=sp.atleast_2d(psi), maxshape=(psi.shape[0], None), compression='gzip')
else:
tmp = OUT['event_counts'].shape
OUT['event_counts'].resize((tmp[0], tmp[1], tmp[2] + len(ev)))
OUT['event_counts'][:, :, tmp[2]:] = counts
tmp = OUT['psi'].shape
OUT['psi'].resize((tmp[0], tmp[1] + len(ev)))
OUT['psi'][:, tmp[1]:] = psi
events_all_ = sp.r_[events_all_, ev]
gene_idx_ = sp.r_[gene_idx_, [x.gene_idx for x in ev]]
assert(events_all.shape[0] == events_all_.shape[0])
assert(sp.all([sp.all(events_all[e].exons1 == events_all_[e].exons1) for e in range(events_all.shape[0])]))
OUT.create_dataset(name='gene_idx', data=gene_idx_)
events_all = events_all_
_prepare_count_hdf5(CFG, OUT, events_all, event_features)
### write more event infos to hdf5
if event_type == 'exon_skip':
event_pos = sp.array([x.exons2.ravel() for x in events_all])
elif event_type == 'intron_retention':
event_pos = sp.array([x.exons2.ravel() for x in events_all])
elif event_type in ['alt_3prime', 'alt_5prime']:
event_pos = sp.array([unique_rows(sp.c_[x.exons1, x.exons2]).ravel() for x in events_all])
elif event_type == 'mult_exon_skip':
event_pos = sp.array([x.exons2[[0, 1, -2, -1], :].ravel() for x in events_all])
elif event_type == 'mutex_exons':
event_pos = sp.array([sp.c_[x.exons1[0, :], x.exons1[1, :], x.exons2[1, :], x.exons2[2, :]] for x in events_all])
OUT.create_dataset(name='event_pos', data=event_pos)
for i in range(events_all.shape[0]):
events_all[i].num_verified = sp.sum(events_all[i].verified, axis=0)
events_all[i].confirmed = sp.array(events_all[i].num_verified).min()
num_verified = sp.array([x.num_verified for x in events_all])
#verified_count = []
#for min_verified = 1:length(CFG.strains),
# verified_count(min_verified) = sum([events_all.confirmed] >= min_verified) ;
confirmed_idx = sp.where([x.confirmed >= 1 for x in events_all])[0]
if confirmed_idx.shape[0] > 0:
OUT.create_dataset(name='conf_idx', data=confirmed_idx)
OUT.create_dataset(name='verified', data=num_verified)
### close HDF5
OUT.close()
### save events
cPickle.dump((events_all, events_all_strains), open(fn_out, 'w'), -1)
cPickle.dump(confirmed_idx, open(fn_out_conf, 'w'), -1)
else:
print '\nLoading event data from %s' % fn_out
(events_all, events_all_strains) = cPickle.load(open(fn_out, 'r'))
confirmed_idx = cPickle.load(open(fn_out_conf, 'r'))
if events_all.shape[0] == 0:
print '\nNo %s event could be found. - Nothing to report' % event_type
continue
else:
print '\nReporting complete %s events:' % event_type
if CFG['output_txt']:
if os.path.exists(fn_out_txt):
print '%s already exists' % fn_out_txt
else:
write_events_txt(fn_out_txt, events_all, fn_out_count)
if CFG['output_struc']:
if os.path.exists(fn_out_struc):
print '%s already exists' % fn_out_struc
else:
write_events_structured(fn_out_struc, events_all, fn_out_count)
if confirmed_idx.shape[0] == 0:
print '\nNo %s event could be confirmed. - Nothing to report.' % event_type
continue
else:
print '\nReporting confirmed %s events:' % event_type
if CFG['output_confirmed_gff3']:
if os.path.exists(fn_out_conf_gff3):
print '%s already exists' % fn_out_conf_gff3
else:
write_events_gff3(fn_out_conf_gff3, events_all, confirmed_idx)
if CFG['output_confirmed_txt']:
if os.path.exists(fn_out_conf_txt):
print '%s already exists' % fn_out_conf_txt
else:
write_events_txt(fn_out_conf_txt, CFG['strains'], events_all, fn_out_count, event_idx=confirmed_idx)
if CFG['output_confirmed_bed']:
if os.path.exists(fn_out_conf_bed):
print '%s already exists' % fn_out_conf_bed
else:
write_events_bed(fn_out_conf_bed, events_all, idx=confirmed_idx)
if CFG['output_confirmed_struc']:
if os.path.exists(fn_out_conf_struc):
print '%s already exists' % fn_out_conf_struc
else:
write_events_structured(fn_out_conf_struc, events_all, fn_out_count, confirmed_idx)
if CFG['output_confirmed_tcga']:
if os.path.exists(fn_out_conf_tcga):
print '%s already exists' % fn_out_conf_tcga
else:
write_events_tcga(fn_out_conf_tcga, CFG['strains'], events_all, fn_out_count, event_idx=confirmed_idx)
if CFG['output_filtered_txt']:
fn_out_conf_txt = fn_out_conf.replace('.pickle', '.filt0.05.txt')
if os.path.exists(fn_out_conf_txt):
print '%s already exists' % fn_out_conf_txt
else:
print '\nWriting filtered events (sample freq 0.05):'
cf_idx = sp.where([x.confirmed for x in events_all[confirmed_idx]] >= (0.05 * CFG['strains'].shape[0]))[0]
write_events_txt(fn_out_conf_txt, CFG['strains'], events_all, fn_out_count, event_idx=confirmed_idx[cf_idx])
fn_out_conf_txt = fn_out_conf.replace('.pickle', '.filt0.1.txt')
if os.path.exists(fn_out_conf_txt):
print '%s already exists' % fn_out_conf_txt
else:
print '\nWriting filtered events (sample freq 0.01):'
cf_idx = sp.where([x.confirmed for x in events_all[confirmed_idx]] >= (0.01 * CFG['strains'].shape[0]))[0]
write_events_txt(fn_out_conf_txt, CFG['strains'], events_all, fn_out_count, event_idx=confirmed_idx[cf_idx])
| [
"akahles@cbio.mskcc.org"
] | akahles@cbio.mskcc.org |
a09a909f0983433a72eb2e9cc96aa02a9a188e27 | d43f8a35a259088d80c63f6070f06fd0c9736472 | /analisis_numerico/simple_scripts/lagrange.py | 148591398fa3f64e5263842dae2300a8a7025235 | [] | no_license | sespinosav/pythonPROYECTS | 628e97e6f05e07caebe160e25a789a528b9f41db | 5972b455757530e60fdc838df873d01e959dfa5c | refs/heads/master | 2023-05-09T15:05:49.585958 | 2021-05-26T18:46:30 | 2021-05-26T18:46:30 | 277,190,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | xs = eval(input("Ingrese los x: "))
ys = eval(input("Ingrese los y: "))
b = []
print("Lagrange")
print()
print("Resultados:")
print()
print("Polinomios interpolantes:")
print()
expresion = ""
result = 1
polim = "*"
for i in xs:
for k in xs:
if xs.index(i) != xs.index(k):
polim += (f"(x - {k})*")
result *= i - k
polim = polim[0:len(polim)-1]
print(polim[1:])
result = 1/result
result = ys[xs.index(i)]*result
b.append(result)
expresion += "("+str(result)+polim+")"+"+"
polim = "*"
result = 1
expresion = expresion[0:len(expresion)-1]
print()
print("Coeficientes del polinomio:")
print()
for i in b:
print(i)
print()
print("Polinomio:")
print()
print(expresion) | [
"sespinosav@eafit.edu.co"
] | sespinosav@eafit.edu.co |
1453ac33e8bd742a88fa47e6ab963128e3a4f4a2 | 552bbeed05e201d92c8ccd6dd50bd6ab214e6203 | /urlib/urllibTest.py | 10c17171507bee42dea918203752478b4285c90f | [] | no_license | VicentDong/pythonTest | a57e2138e12aa2878fbc76670bdf3ceb78276d65 | a369b48ec92e23a9e58cd4458a0193bc34c191ca | refs/heads/master | 2022-04-19T19:36:23.233316 | 2020-04-16T13:36:07 | 2020-04-16T13:36:07 | 256,219,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,162 | py | from urllib import request,parse
# import socket
# import urllib.request
# import urllib.error
# import urllib.parse
# response = urllib.request.urlopen('https://www.python.org')
# # print(response.read().decode('utf-8'))
# # print(type(response))
# print(response.status)
# print(response.getheaders())
# print(response.getheader('Server'))
# data = bytes(urllib.parse.urlencode({'world':'hello'}),encoding='utf8')
# response = urllib.request.urlopen('http://httpbin.org/get',timeout=1)
# print(response.read())
# try:
# response = urllib.request.urlopen('http://httpbin.org/get',timeout=0.1)
# except urllib.error.URLError as e:
# if isinstance(e.reaso,socket.timeout):
# print('TIME OUT')
# 使用opener
url = 'http://httpbin.org/post'
headers={
'User-Agent':'Mozilla/4.0 (compatible;MSIE 5.5;Windows NT)',
'Host':'httpbin.org'
}
dict = {
'name':'Germey'
}
data = bytes(parse.urlencode(dict),encoding='utf8')
req = request.Request(url=url,data=data,headers=headers,method='POST')
req.add_header('User-Agent','Mozilla/4.0 (compatible;MSIE 5.5;Windows NT)')
response = request.urlopen(req)
print(response.read().decode('utf-8'))
| [
"149212768@qq.com"
] | 149212768@qq.com |
5f4b7a75cd81bd997778a34f8ee69bd0e8fe37cb | 8f735396bfcc18bca40829aab79159df813fea16 | /Calendar - Weekday - Calculation.py | fd82ae6d1c129119c83a8c4dc1a4617bbd7c7a66 | [] | no_license | Raviteja-Ainampudi/Calender-Weekday-Calculation | e8f8e7f17c022a617ef26e5ce452c8fe8c6ec3f4 | c7fd9ca270b29b80e7315c69f166792e2aad56fd | refs/heads/master | 2021-01-09T20:19:54.261029 | 2016-09-18T00:16:07 | 2016-09-18T00:16:07 | 64,783,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,752 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 21 17:41:40 2016
@author: RAVI TEJA
"""
print " "
print "Please enter year, month and date in numerical values"
#Manual Input of Date of Birth
xyear = int(raw_input('Given Year is - '))
xmonth = int(raw_input('Given Month is - '))
xday = int(raw_input('Given Date (Number) is - '))
#Basic Input Verification
if (xmonth > 12) or (xmonth < 1):
print "invalid input"
elif (xyear > 9999) or (xyear < 1):
print "Invalid Input"
elif (xday < 1) or (xday > 31):
print "Invalid Input"
elif (xmonth == 2) and ((xyear%4)==0) and (xday > 29):
print "Invalid Input"
elif ((xyear%100 == 0) and (xyear%400) > 0) and (xmonth == 2) and (xday > 28) :
print "Invalid Input"
elif (xmonth == 2) and ((xyear%4) > 0) and (xday > 28):
print "Invalid Input"
elif ((xmonth == 4) or (xmonth == 6) or (xmonth == 9) or (xmonth == 11)) and (xday > 30):
print "Invalid Input"
else:
#To determine the weekday of the birthday of the person
#For this the assumption considered is that the first day of the calendar has started with SATURDAY
#The Source for this assumption is - http://www.webexhibits.org/calendars/year-history.html
# Another Source is - https://en.wikipedia.org/wiki/History_of_calendars
#This Alogorith works perfectly from 14th September 1752. Because there 11 missing in september of 1752..
#The Calendar has been changed. From September 2nd to September to 14th September Missing 11 days.
# Source -1: - http://www.timeanddate.com/calendar/?year=1752
#Source 2 : - http://mentalfloss.com/article/51370/why-our-calendars-skipped-11-days-1752
#So this logic has been developed considering tha fact that 14th September 1752 is considered as THURSDAY instead of Monday
#http://www.genealogytoday.com/columns/everyday/030902.html
if (xyear > 1752):
def weekday(xday, xmonth, xyear):
#If given year is a non leap Year
if ((xyear%4) > 0) or (((xyear%100) == 0) and ((xyear%400) > 0)):
list1 = [31,28,31,30,31,30,31,31,30,31,30,31] #Days of respective month
countdays = 0
for i in range (0,xmonth): #Day Count
countdays = countdays + list1[i]
excessdays = list1[xmonth-1] - xday #To remove additional days during count
totdays = countdays - excessdays
yeardays = xyear * 365
#a = 1
leapcount = 0
leap1count = 0
null = 0
for a in range (1,xyear): # To count the number of leap years
if ((a%4) == 0) and ((a%100) > 0):
leapcount = leapcount + 1
elif (a%4 == 0) and (a%400 == 0):
leap1count = leap1count + 1
else:
null +=1;
totaldays = yeardays + totdays + leapcount + leap1count
troll = totaldays%7 #To determine the day
print " "
if (troll == 0):
print "This day is Saturday"
elif (troll == 1):
print "This day is Sunday"
elif (troll == 2):
print "This day is Monday"
elif (troll == 3):
print "This day is Tuesday"
elif (troll == 4):
print "This day is Wednesday"
elif (troll == 5):
print "This day is Thursday"
else:
print "This day is Friday"
else:
#If given Year is a leap year
if ((xyear%4) == 0):
list1 = [31,29,31,30,31,30,31,31,30,31,30,31] #Days in a month of leap year
countdays = 0
for i in range (0,xmonth):
countdays = countdays + list1[i]
excessdays = list1[(xmonth) -1] - xday
totdays = countdays - excessdays
yeardays = (xyear) * 365
a = 1
leapcount = 0
leap1count = 0
null =0
for a in range (1,xyear):
if ((a%4) == 0) and ((a%100) > 0):
leapcount = leapcount + 1
elif ((a%4) == 0) and ((a%400) == 0):
leap1count = leap1count + 1
#print leap1count
#else:
#null += 1
totaldays = yeardays + totdays + leapcount + leap1count
troll = totaldays%7
print ""
if (troll == 0):
print "This day is Saturday"
elif (troll == 1):
print "This day is Sunday"
elif (troll == 2):
print "This day is Monday"
elif (troll == 3):
print "This day is Tuesday"
elif (troll == 4):
print "This day is Wednesday"
elif (troll == 5):
print "This day is Thursday"
else:
print "This day is Friday"
weekday(xday, xmonth, xyear)
elif (xyear > 0) and (xyear < 1753):
def weekday(xday, xmonth, xyear):
#If given year is a non leap Year
if ((xyear%4) > 0) or (((xyear%100) == 0) and ((xyear%400) > 0)):
list1 = [31,28,31,30,31,30,31,31,30,31,30,31] #Days of respective month
countdays = 0
for i in range (0,xmonth): #Day Count
countdays = countdays + list1[i]
excessdays = list1[xmonth-1] - xday #To remove additional days during count
totdays = countdays - excessdays
yeardays = xyear * 365
#a = 1
leapcount = 0
leap1count = 0
null = 0
for a in range (1,xyear): # To count the number of leap years
if ((a%4) == 0) and ((a%100) > 0):
leapcount = leapcount + 1
elif (a%4 == 0) and (a%400 == 0):
leap1count = leap1count + 1
else:
null +=1;
totaldays = yeardays + totdays + leapcount + leap1count
troll = totaldays%7 #To determine the day
print " "
if (troll == 3):
print "That Day is Saturday"
elif (troll == 4):
print "That Day is Sunday"
elif (troll == 5):
print "That Day is Monday"
elif (troll == 6):
print "That Day is Tuesday"
elif (troll == 0):
print "That Day is Wednesday"
elif (troll == 9):
print "That Day is Thursday"
else:
print "That Day is Friday"
print " "
print "This date/weekday may vary with Gregorian Calender due to conventional methodlogies followed at this time in past"
print "As no standard weekdays and dates were followed during this period"
print "Each country had its own calendar and Conventions"
print "For an instance missing 11 days in 1752, 1700 was made leap year, etc... "
else:
#If given Year is a leap year
if ((xyear%4) == 0):
list1 = [31,29,31,30,31,30,31,31,30,31,30,31] #Days in a month of leap year
countdays = 0
for i in range (0,xmonth):
countdays = countdays + list1[i]
excessdays = list1[(xmonth) -1] - xday
totdays = countdays - excessdays
yeardays = (xyear) * 365
a = 1
leapcount = 0
leap1count = 0
null =0
for a in range (1,xyear):
if ((a%4) == 0) and ((a%100) > 0):
leapcount = leapcount + 1
elif ((a%4) == 0) and ((a%400) == 0):
leap1count = leap1count + 1
else:
null += 1
totaldays = yeardays + totdays + leapcount + leap1count
troll = totaldays%7
print " "
if (troll == 3):
print "This day is Saturday"
elif (troll == 4):
print "This day is Sunday"
elif (troll == 5):
print "This day is Monday"
elif (troll == 6):
print "This day is on Tuesday"
elif (troll == 0):
print "This day is Wednesday"
elif (troll == 1):
print "This day is Thursday"
else:
print "This day is Friday"
print " "
print "This date/weekday may vary with Gregorian Calender due to conventional methodlogies followed at this time in past"
print "As no standard weekdays and dates were followed during this period"
print "Each country had its own calendar and Conventions"
print "For an instance missing 11 days in 1752, 1700 was made leap year, etc... "
weekday(xday, xmonth, xyear)
else:
print "Invalid Entry"
| [
"noreply@github.com"
] | noreply@github.com |
67fc1e1ff0137245d9769c920fca919ca8e09672 | c3cf11c7466a8591a7f596bd6cefeac4a21674eb | /src/run2.py | 0534f733184d0cf588d432a15c3b16202ea54f73 | [] | no_license | wieloranski/experiNurse | 816d3844aa3dcffec9a8200b6028c7427f631f3a | daea8c8545bb2b0b6ebe9645e006ce8b1f2735a3 | refs/heads/master | 2020-12-24T20:33:31.679868 | 2016-06-05T19:54:34 | 2016-06-05T19:54:34 | 59,610,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,083 | py | from knowledge_base.driver import *
from symp_scanner import SymptomLoader
from diag import Diagnosis
from check import *
'''example of created fact in patient.fbc file: feels(Tadeusz, headache)'''
get_symptoms_disease_relation_from_rows()
'''
perform diagnosis
EXAMPLE
'''
dict = {}
while(True):
diseases_list = []
symptom_list = []
print("--Proszę podać imię:")
name = input()
while(True):
print("--Proszę podać objaw:")
ailment = input();
symptom = SymptomLoader.scanInput(ailment)
if(symptom): symptom_list.append(symptom)
if(check_knowledge(dict, symptom)): break
diseases_list = Diagnosis.perform_diagnosis(name , symptom)
if(len(diseases_list) == 1):
break
if(diseases_list):
print(name + " cierpi na: ", diseases_list[0])
key = str(diseases_list[0])
dict.setdefault(key, [])
dict.update({key :symptom_list})
#print_knowledge_base()
'''
example of asserted fact into patient.fbc file: suffers_from('Tadeusz', 'Wymioty')
'''
| [
"wieloranski@gmail.com"
] | wieloranski@gmail.com |
c52c39bab06ae870ec0e57ade0604823f2126ca9 | 87552b87b00a0d0c51dae62c3f6ae39c3396b2b7 | /History/2020-05-05 tutorial RL/AgentRunSAC.py | 0db2987349eea2e778fe13b693e5608cc28ef5a1 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | peide/ElegantRL | 4bd0c6c0ed776749b944082a1a91a333152055ef | 1f3644373ec889f0d4725f6b0490f80bafea8c44 | refs/heads/master | 2022-11-28T08:02:26.845033 | 2020-08-12T10:15:17 | 2020-08-12T10:15:17 | 287,129,333 | 5 | 2 | NOASSERTION | 2020-08-12T22:41:01 | 2020-08-12T22:41:00 | null | UTF-8 | Python | false | false | 12,317 | py | import argparse
import random
import copy
import os
from abc import ABC
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.normal import Normal
from torch.distributions import Distribution
from AgentRun import get_env_info, draw_plot_with_npy
from AgentRun import Recorder
"""
Reference: https://github.com/TianhongDai/reinforcement-learning-algorithms/tree/master/rl_algorithms/sac
Modify: Yonv1943 Zen4 Jia1Hao2.
"""
class ActorSAC(nn.Module):
def __init__(self, state_dim, action_dim, mid_net):
super(ActorSAC, self).__init__()
self.log_std_min = -20
self.log_std_max = 2
self.net = nn.Sequential(nn.Linear(state_dim, mid_net), nn.ReLU(),
nn.Linear(mid_net, mid_net), nn.ReLU(), )
self.net_mean = nn.Linear(mid_net, action_dim)
self.net_log_std = nn.Linear(mid_net, action_dim)
def forward(self, state):
x = self.net(state)
action_mean = self.net_mean(x)
return action_mean
def actor(self, state):
x = self.net(state)
action_mean = self.net_mean(x)
log_std = self.net_log_std(x)
log_std = log_std.clamp(self.log_std_min, self.log_std_max)
action_std = log_std.exp()
return action_mean, action_std
def get__a__log_prob(self, states):
a_mean, a_std = self.actor(states)
noise = torch.randn_like(a_mean, requires_grad=True) # device=self.device
pre_tanh_value = a_mean + a_std * noise
actions_noise = pre_tanh_value.tanh()
log_prob = Normal(a_mean, a_std).log_prob(pre_tanh_value) - (-actions_noise.pow(2) + (1 + 1e-6)).log()
return actions_noise, log_prob
class CriticSAC(nn.Module):
def __init__(self, state_dim, action_dim, mid_dim, use_densenet, use_spectral_norm):
super(CriticSAC, self).__init__()
self.net = nn.Sequential(nn.Linear(state_dim + action_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, 1), )
# layer_norm(self.net[0], std=1.0)
# layer_norm(self.net[-1], std=1.0)
def forward(self, s, a):
x = torch.cat((s, a), dim=1)
q = self.net(x)
return q
class ReplayBuffer:
def __init__(self, memory_size):
self.storage = []
self.memory_size = memory_size
self.next_idx = 0
# add the samples
def add(self, obs, action, reward, obs_, done):
data = (obs, action, reward, obs_, done)
if self.next_idx >= len(self.storage):
self.storage.append(data)
else:
self.storage[self.next_idx] = data
# get the next idx
self.next_idx = (self.next_idx + 1) % self.memory_size
# encode samples
def _encode_sample(self, idx):
obses, actions, rewards, obses_, dones = [], [], [], [], []
for i in idx:
data = self.storage[i]
obs, action, reward, obs_, done = data
obses.append(np.array(obs, copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_.append(np.array(obs_, copy=False))
dones.append(done)
return np.array(obses), np.array(actions), np.array(rewards), np.array(obses_), np.array(dones)
# sample from the memory
def sample(self, batch_size):
idxes = [random.randint(0, len(self.storage) - 1) for _ in range(batch_size)]
return self._encode_sample(idxes)
class AgentSAC:
def __init__(self, env, state_dim, action_dim, net_dim):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.learning_rate = 3e-4
use_densenet = False
use_spectral_norm = False
'''network'''
self.act = ActorSAC(state_dim, action_dim, net_dim).to(self.device)
self.act_optimizer = torch.optim.Adam(self.act.parameters(), lr=self.learning_rate * 0.5)
cri_dim = int(net_dim * 1.25)
self.cri = CriticSAC(state_dim, action_dim, cri_dim, use_densenet, use_spectral_norm).to(self.device)
self.cri2 = CriticSAC(state_dim, action_dim, cri_dim, use_densenet, use_spectral_norm).to(self.device)
self.cri_optimizer = torch.optim.Adam(self.cri.parameters(), lr=self.learning_rate)
self.cri2_optimizer = torch.optim.Adam(self.cri2.parameters(), lr=self.learning_rate)
self.cri_target = copy.deepcopy(self.cri).to(self.device)
self.cri2_target = copy.deepcopy(self.cri2).to(self.device)
'''extension'''
self.target_entropy = -1
self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)
self.alpha_optim = torch.optim.Adam((self.log_alpha,), lr=self.learning_rate)
'''training'''
self.state = env.reset()
self.reward_sum = 0.0
self.step_sum = 0
@staticmethod
def soft_target_update(target, source, tau=5e-3):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(tau * param.data + (1.0 - tau) * target_param.data)
def select_actions(self, states, explore_noise=0.0): # CPU array to GPU tensor to CPU array
states = torch.tensor(states, dtype=torch.float32, device=self.device)
actions = self.act(states)
if explore_noise != 0.0:
pis = self.act.actor(states)
actions = Normal(*pis).sample()
actions = actions.tanh()
actions = actions.cpu().data.numpy()
return actions
def save_or_load_model(self, mod_dir, is_save):
act_save_path = '{}/actor.pth'.format(mod_dir)
# cri_save_path = '{}/critic.pth'.format(mod_dir)
if is_save:
torch.save(self.act.state_dict(), act_save_path)
# torch.save(self.cri.state_dict(), cri_save_path)
# print("Saved act and cri:", mod_dir)
elif os.path.exists(act_save_path):
act_dict = torch.load(act_save_path, map_location=lambda storage, loc: storage)
self.act.load_state_dict(act_dict)
# self.act_target.load_state_dict(act_dict)
# cri_dict = torch.load(cri_save_path, map_location=lambda storage, loc: storage)
# self.cri.load_state_dict(cri_dict)
# self.cri_target.load_state_dict(cri_dict)
else:
print("FileNotFound when load_model: {}".format(mod_dir))
def inactive_in_env_sac(self, env, memo, max_step, max_action, reward_scale, gamma):
rewards = list()
steps = list()
for t in range(max_step):
action = self.select_actions((self.state,), explore_noise=True)[0]
next_state, reward, done, _ = env.step(action * max_action)
res_reward = reward * reward_scale
mask = 0.0 if done else gamma
self.reward_sum += reward
self.step_sum += 1
memo.add(self.state, action, res_reward, next_state, mask)
self.state = next_state
if done:
rewards.append(self.reward_sum)
self.reward_sum = 0.0
steps.append(self.step_sum)
self.step_sum = 0
# reset the environment
self.state = env.reset()
return rewards, steps
def update_parameter_sac(self, memo, max_step, batch_size):
loss_a_sum = 0.0
loss_c_sum = 0.0
iter_num = max_step
for _ in range(iter_num):
with torch.no_grad():
# smaple batch of samples from the replay buffer
states, actions, rewards, next_states, marks = [
torch.tensor(ary, dtype=torch.float32, device=self.device)
for ary in memo.sample(batch_size)
]
rewards = rewards.unsqueeze(-1)
marks = marks.unsqueeze(-1) # mark == (1-float(done)) * gamma
actions_noise, log_prob = self.act.get__a__log_prob(states)
'''auto alpha'''
alpha_loss = -(self.log_alpha * (self.target_entropy + log_prob).detach()).mean()
self.alpha_optim.zero_grad()
alpha_loss.backward()
self.alpha_optim.step()
'''actor loss'''
alpha = self.log_alpha.exp()
q0_min = torch.min(self.cri(states, actions_noise), self.cri2(states, actions_noise))
actor_loss = (alpha * log_prob - q0_min).mean()
self.act_optimizer.zero_grad()
actor_loss.backward()
self.act_optimizer.step()
'''critic loss'''
q1_value = self.cri(states, actions)
q2_value = self.cri2(states, actions)
with torch.no_grad():
next_actions_noise, next_log_prob = self.act.get__a__log_prob(next_states)
next_q0_min = torch.min(self.cri_target(next_states, next_actions_noise),
self.cri2_target(next_states, next_actions_noise))
next_target_q_value = next_q0_min - next_log_prob * alpha
target_q_value = rewards + marks * next_target_q_value
qf1_loss = (q1_value - target_q_value).pow(2).mean()
qf2_loss = (q2_value - target_q_value).pow(2).mean()
# qf1
self.cri_optimizer.zero_grad()
qf1_loss.backward()
self.cri_optimizer.step()
# qf2
self.cri2_optimizer.zero_grad()
qf2_loss.backward()
self.cri2_optimizer.step()
loss_a_sum += actor_loss.item()
loss_c_sum += (qf1_loss.item() + qf2_loss.item()) * 0.5
self.soft_target_update(self.cri_target, self.cri)
self.soft_target_update(self.cri2_target, self.cri2)
loss_a = loss_a_sum / iter_num
loss_c = loss_c_sum / iter_num
return loss_a, loss_c
def train_agent_sac(agent_class, env_name, cwd, net_dim, max_step, max_memo, max_epoch, # env
batch_size, gamma,
**_kwargs): # 2020-0430
reward_scale = 1
env = gym.make(env_name)
state_dim, action_dim, max_action, target_reward = get_env_info(env)
agent = agent_class(env, state_dim, action_dim, net_dim)
memo = ReplayBuffer(max_memo)
recorder = Recorder(agent, max_step, max_action, target_reward, env_name)
agent.inactive_in_env_sac(env, memo, max_step, max_action, reward_scale, gamma) # init memory before training
try:
for epoch in range(max_epoch):
with torch.no_grad():
rewards, steps = agent.inactive_in_env_sac(env, memo, max_step, max_action, reward_scale, gamma)
loss_a, loss_c = agent.update_parameter_sac(memo, max_step, batch_size)
with torch.no_grad(): # for saving the GPU memory
recorder.show_reward(epoch, rewards, steps, loss_a, loss_c)
is_solved = recorder.check_reward(cwd, loss_a, loss_c)
if is_solved:
break
except KeyboardInterrupt:
print("raise KeyboardInterrupt while training.")
except AssertionError: # for BipedWalker BUG 2020-03-03
print("AssertionError: OpenAI gym r.LengthSquared() > 0.0f ??? Please run again.")
return False
train_time = recorder.show_and_save(env_name, cwd)
# agent.save_or_load_model(cwd, is_save=True) # save max reward agent in Recorder
# memo.save_or_load_memo(cwd, is_save=True)
draw_plot_with_npy(cwd, train_time)
return True
def run__sac(gpu_id=0, cwd='AC_SAC'):
from AgentRun import Arguments
args = Arguments(AgentSAC)
args.gpu_id = gpu_id
args.reward_scale = 1.0 # important
# args.env_name = "BipedalWalker-v3"
# args.cwd = './{}/BW_{}'.format(cwd, gpu_id)
# args.init_for_training()
# while not train_agent_sac(**vars(args)):
# args.random_seed += 42
args.env_name = "LunarLanderContinuous-v2"
args.cwd = './{}/LL_{}'.format(cwd, gpu_id)
args.init_for_training()
while not train_agent_sac(**vars(args)):
args.random_seed += 42
if __name__ == '__main__':
run__sac(gpu_id=2, cwd='AC_SAC')
| [
"noreply@github.com"
] | noreply@github.com |
014a1726dce1e3d670880f2daba7a044700067c4 | bbec348efb79c6588a4cb6bb565c813fe3fe86ad | /pyVpx/pyVsm/pyVsm/ReflectTypes.py | 8fca89c7db09164ff1d65f4acc92cf7fbf57273b | [] | no_license | free-Zen/pvc | 2be60fdc0fd0345039219c802223f987fce3b113 | 8428a84481be319ae739dfbb87715f31810138d9 | refs/heads/master | 2022-02-24T12:13:31.599398 | 2019-10-14T07:49:13 | 2019-10-14T07:49:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,108 | py | # ******* WARNING - AUTO GENERATED CODE - DO NOT EDIT *******
from .VmomiSupport import CreateDataType, CreateManagedType
from .VmomiSupport import CreateEnumType
from .VmomiSupport import AddVersion, AddVersionParent
from .VmomiSupport import AddBreakingChangesInfo
from .VmomiSupport import F_LINK, F_LINKABLE
from .VmomiSupport import F_OPTIONAL, F_SECRET
from .VmomiSupport import newestVersions, stableVersions
from .VmomiSupport import publicVersions, dottedVersions
from .VmomiSupport import oldestVersions
AddVersion("vmodl.version.version0", "", "", 0, "vim25")
AddVersion("vmodl.version.version1", "", "", 0, "vim25")
AddVersion("vmodl.version.version2", "", "", 0, "vim25")
AddVersion("vmodl.reflect.version.version1", "reflect", "1.0", 0, "reflect")
AddVersion("vmodl.reflect.version.version2", "reflect", "2.0", 0, "reflect")
AddVersionParent("vmodl.version.version0", "vmodl.version.version0")
AddVersionParent("vmodl.version.version1", "vmodl.version.version0")
AddVersionParent("vmodl.version.version1", "vmodl.version.version1")
AddVersionParent("vmodl.version.version2", "vmodl.version.version0")
AddVersionParent("vmodl.version.version2", "vmodl.version.version1")
AddVersionParent("vmodl.version.version2", "vmodl.version.version2")
AddVersionParent("vmodl.reflect.version.version1", "vmodl.version.version0")
AddVersionParent("vmodl.reflect.version.version1", "vmodl.version.version1")
AddVersionParent("vmodl.reflect.version.version1", "vmodl.version.version2")
AddVersionParent("vmodl.reflect.version.version1", "vmodl.reflect.version.version1")
AddVersionParent("vmodl.reflect.version.version2", "vmodl.version.version0")
AddVersionParent("vmodl.reflect.version.version2", "vmodl.version.version1")
AddVersionParent("vmodl.reflect.version.version2", "vmodl.version.version2")
AddVersionParent("vmodl.reflect.version.version2", "vmodl.reflect.version.version1")
AddVersionParent("vmodl.reflect.version.version2", "vmodl.reflect.version.version2")
newestVersions.Add("vmodl.reflect.version.version2")
stableVersions.Add("vmodl.reflect.version.version2")
publicVersions.Add("vmodl.reflect.version.version2")
dottedVersions.Add("vmodl.reflect.version.version2")
oldestVersions.Add("vmodl.reflect.version.version1")
CreateManagedType("vmodl.reflect.DynamicTypeManager", "InternalDynamicTypeManager", "vmodl.ManagedObject", "vmodl.reflect.version.version1", None, [("queryTypeInfo", "DynamicTypeMgrQueryTypeInfo", "vmodl.reflect.version.version1", (("filterSpec", "vmodl.reflect.DynamicTypeManager.FilterSpec", "vmodl.reflect.version.version1", F_OPTIONAL, None),), (0, "vmodl.reflect.DynamicTypeManager.AllTypeInfo", "vmodl.reflect.DynamicTypeManager.AllTypeInfo"), "System.Read", None), ("queryMoInstances", "DynamicTypeMgrQueryMoInstances", "vmodl.reflect.version.version1", (("filterSpec", "vmodl.reflect.DynamicTypeManager.FilterSpec", "vmodl.reflect.version.version1", F_OPTIONAL, None),), (F_OPTIONAL, "vmodl.reflect.DynamicTypeManager.MoInstance[]", "vmodl.reflect.DynamicTypeManager.MoInstance[]"), "System.Read", None)])
CreateDataType("vmodl.reflect.DynamicTypeManager.Annotation", "DynamicTypeMgrAnnotation", "vmodl.DynamicData", "vmodl.reflect.version.version1", [("name", "string", "vmodl.reflect.version.version1", 0), ("parameter", "string[]", "vmodl.reflect.version.version1", F_OPTIONAL)])
CreateDataType("vmodl.reflect.DynamicTypeManager.PropertyTypeInfo", "DynamicTypeMgrPropertyTypeInfo", "vmodl.DynamicData", "vmodl.reflect.version.version1", [("name", "string", "vmodl.reflect.version.version1", 0), ("version", "string", "vmodl.reflect.version.version1", 0), ("type", "string", "vmodl.reflect.version.version1", 0), ("privId", "string", "vmodl.reflect.version.version1", F_OPTIONAL), ("msgIdFormat", "string", "vmodl.reflect.version.version1", F_OPTIONAL), ("annotation", "vmodl.reflect.DynamicTypeManager.Annotation[]", "vmodl.reflect.version.version1", F_OPTIONAL)])
CreateEnumType("vmodl.reflect.DynamicTypeManager.PropertyTypeInfo.AnnotationType", "DynamicTypeMgrPropertyTypeInfoAnnotationType", "vmodl.reflect.version.version1", ["optional", "readonly", "linkable", "link"])
CreateDataType("vmodl.reflect.DynamicTypeManager.DataTypeInfo", "DynamicTypeMgrDataTypeInfo", "vmodl.DynamicData", "vmodl.reflect.version.version1", [("name", "string", "vmodl.reflect.version.version1", 0), ("wsdlName", "string", "vmodl.reflect.version.version1", 0), ("version", "string", "vmodl.reflect.version.version1", 0), ("base", "string[]", "vmodl.reflect.version.version1", F_OPTIONAL), ("property", "vmodl.reflect.DynamicTypeManager.PropertyTypeInfo[]", "vmodl.reflect.version.version1", F_OPTIONAL), ("annotation", "vmodl.reflect.DynamicTypeManager.Annotation[]", "vmodl.reflect.version.version1", F_OPTIONAL)])
CreateDataType("vmodl.reflect.DynamicTypeManager.ParamTypeInfo", "DynamicTypeMgrParamTypeInfo", "vmodl.DynamicData", "vmodl.reflect.version.version1", [("name", "string", "vmodl.reflect.version.version1", 0), ("version", "string", "vmodl.reflect.version.version1", 0), ("type", "string", "vmodl.reflect.version.version1", 0), ("privId", "string", "vmodl.reflect.version.version1", F_OPTIONAL), ("annotation", "vmodl.reflect.DynamicTypeManager.Annotation[]", "vmodl.reflect.version.version1", F_OPTIONAL)])
CreateEnumType("vmodl.reflect.DynamicTypeManager.ParamTypeInfo.AnnotationType", "DynamicTypeMgrParamTypeInfoAnnotationType", "vmodl.reflect.version.version1", ["optional", "secret"])
CreateDataType("vmodl.reflect.DynamicTypeManager.MethodTypeInfo", "DynamicTypeMgrMethodTypeInfo", "vmodl.DynamicData", "vmodl.reflect.version.version1", [("name", "string", "vmodl.reflect.version.version1", 0), ("wsdlName", "string", "vmodl.reflect.version.version1", 0), ("version", "string", "vmodl.reflect.version.version1", 0), ("paramTypeInfo", "vmodl.reflect.DynamicTypeManager.ParamTypeInfo[]", "vmodl.reflect.version.version1", F_OPTIONAL), ("returnTypeInfo", "vmodl.reflect.DynamicTypeManager.ParamTypeInfo", "vmodl.reflect.version.version1", F_OPTIONAL), ("fault", "string[]", "vmodl.reflect.version.version1", F_OPTIONAL), ("privId", "string", "vmodl.reflect.version.version1", F_OPTIONAL), ("annotation", "vmodl.reflect.DynamicTypeManager.Annotation[]", "vmodl.reflect.version.version1", F_OPTIONAL)])
CreateEnumType("vmodl.reflect.DynamicTypeManager.MethodTypeInfo.AnnotationType", "DynamicTypeMgrMethodTypeInfoAnnotationType", "vmodl.reflect.version.version1", ["internal"])
CreateDataType("vmodl.reflect.DynamicTypeManager.ManagedTypeInfo", "DynamicTypeMgrManagedTypeInfo", "vmodl.DynamicData", "vmodl.reflect.version.version1", [("name", "string", "vmodl.reflect.version.version1", 0), ("wsdlName", "string", "vmodl.reflect.version.version1", 0), ("version", "string", "vmodl.reflect.version.version1", 0), ("base", "string[]", "vmodl.reflect.version.version1", F_OPTIONAL), ("property", "vmodl.reflect.DynamicTypeManager.PropertyTypeInfo[]", "vmodl.reflect.version.version1", F_OPTIONAL), ("method", "vmodl.reflect.DynamicTypeManager.MethodTypeInfo[]", "vmodl.reflect.version.version1", F_OPTIONAL), ("annotation", "vmodl.reflect.DynamicTypeManager.Annotation[]", "vmodl.reflect.version.version1", F_OPTIONAL)])
CreateDataType("vmodl.reflect.DynamicTypeManager.EnumTypeInfo", "DynamicTypeEnumTypeInfo", "vmodl.DynamicData", "vmodl.reflect.version.version1", [("name", "string", "vmodl.reflect.version.version1", 0), ("wsdlName", "string", "vmodl.reflect.version.version1", 0), ("version", "string", "vmodl.reflect.version.version1", 0), ("value", "string[]", "vmodl.reflect.version.version1", F_OPTIONAL), ("annotation", "vmodl.reflect.DynamicTypeManager.Annotation[]", "vmodl.reflect.version.version1", F_OPTIONAL)])
CreateDataType("vmodl.reflect.DynamicTypeManager.AllTypeInfo", "DynamicTypeMgrAllTypeInfo", "vmodl.DynamicData", "vmodl.reflect.version.version1", [("managedTypeInfo", "vmodl.reflect.DynamicTypeManager.ManagedTypeInfo[]", "vmodl.reflect.version.version1", F_OPTIONAL), ("enumTypeInfo", "vmodl.reflect.DynamicTypeManager.EnumTypeInfo[]", "vmodl.reflect.version.version1", F_OPTIONAL), ("dataTypeInfo", "vmodl.reflect.DynamicTypeManager.DataTypeInfo[]", "vmodl.reflect.version.version1", F_OPTIONAL)])
CreateDataType("vmodl.reflect.DynamicTypeManager.MoInstance", "DynamicTypeMgrMoInstance", "vmodl.DynamicData", "vmodl.reflect.version.version1", [("id", "string", "vmodl.reflect.version.version1", 0), ("moType", "string", "vmodl.reflect.version.version1", 0)])
CreateDataType("vmodl.reflect.DynamicTypeManager.FilterSpec", "DynamicTypeMgrFilterSpec", "vmodl.DynamicData", "vmodl.reflect.version.version1", None)
CreateDataType("vmodl.reflect.DynamicTypeManager.TypeFilterSpec", "DynamicTypeMgrTypeFilterSpec", "vmodl.reflect.DynamicTypeManager.FilterSpec", "vmodl.reflect.version.version1", [("typeSubstr", "string", "vmodl.reflect.version.version1", F_OPTIONAL)])
CreateDataType("vmodl.reflect.DynamicTypeManager.MoFilterSpec", "DynamicTypeMgrMoFilterSpec", "vmodl.reflect.DynamicTypeManager.FilterSpec", "vmodl.reflect.version.version1", [("id", "string", "vmodl.reflect.version.version1", F_OPTIONAL), ("typeSubstr", "string", "vmodl.reflect.version.version1", F_OPTIONAL)])
CreateManagedType("vmodl.reflect.ManagedMethodExecuter", "ReflectManagedMethodExecuter", "vmodl.ManagedObject", "vmodl.reflect.version.version1", None, [("executeSoap", "ExecuteSoap", "vmodl.reflect.version.version1", (("moid", "string", "vmodl.reflect.version.version1", 0, None),("version", "string", "vmodl.reflect.version.version1", 0, None),("method", "string", "vmodl.reflect.version.version1", 0, None),("argument", "vmodl.reflect.ManagedMethodExecuter.SoapArgument[]", "vmodl.reflect.version.version1", F_OPTIONAL, None),), (F_OPTIONAL, "vmodl.reflect.ManagedMethodExecuter.SoapResult", "vmodl.reflect.ManagedMethodExecuter.SoapResult"), None, None), ("fetchSoap", "FetchSoap", "vmodl.reflect.version.version1", (("moid", "string", "vmodl.reflect.version.version1", 0, None),("version", "string", "vmodl.reflect.version.version1", 0, None),("prop", "string", "vmodl.reflect.version.version1", 0, None),), (F_OPTIONAL, "vmodl.reflect.ManagedMethodExecuter.SoapResult", "vmodl.reflect.ManagedMethodExecuter.SoapResult"), None, None)])
CreateDataType("vmodl.reflect.ManagedMethodExecuter.SoapArgument", "ReflectManagedMethodExecuterSoapArgument", "vmodl.DynamicData", "vmodl.reflect.version.version1", [("name", "string", "vmodl.reflect.version.version1", 0), ("val", "string", "vmodl.reflect.version.version1", 0)])
CreateDataType("vmodl.reflect.ManagedMethodExecuter.SoapFault", "ReflectManagedMethodExecuterSoapFault", "vmodl.DynamicData", "vmodl.reflect.version.version1", [("faultMsg", "string", "vmodl.reflect.version.version1", 0), ("faultDetail", "string", "vmodl.reflect.version.version1", F_OPTIONAL)])
CreateDataType("vmodl.reflect.ManagedMethodExecuter.SoapResult", "ReflectManagedMethodExecuterSoapResult", "vmodl.DynamicData", "vmodl.reflect.version.version1", [("response", "string", "vmodl.reflect.version.version1", F_OPTIONAL), ("fault", "vmodl.reflect.ManagedMethodExecuter.SoapFault", "vmodl.reflect.version.version1", F_OPTIONAL)])
| [
"liuzhen@vmware.com"
] | liuzhen@vmware.com |
d4f3a9947d08ab980c9a5864dd0773d75cec7af0 | e975953ef6a7fd89cb70bf864dc9473e0291848c | /mail_spam.py | f88abac154086594bc392e509d82ec0dee7a5640 | [] | no_license | mol5944/mail_spamer | 74284c0c2bef3c6ebb084520b23e5176833e9e56 | bf27e55547515609875adc81b70923782a07b4b8 | refs/heads/master | 2020-11-28T10:10:41.834107 | 2019-12-23T15:36:17 | 2019-12-23T15:36:17 | 229,779,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,072 | py | #modules
import smtplib
from sys import argv
from time import sleep
#define
def help():
print('--wordlist_sm [senders mail]') #1
print('--wordlist_rm [wordlist mail recipient]')
print('--file_msg [file where the letter is stored]')
print('--help [call the help menu]')
quit()
def get_msg(file_msg):
with open(file_msg,'rt') as file:
msg = file.read().split('\n')
if msg[0].split(':')[0] != 'From' or msg[1].split(':')[0] != 'Subject':
print('The letter is incorrectly composed')
quit()
from_msg = msg[0].split(':')[1]
sub_msg = msg[1].split(':')[1]
list_text_msg = msg[2::]
msg_text = ""
for i in list_text_msg:
msg_text = msg_text + i + '\n'
msg = [from_msg,sub_msg,msg_text]
return msg
def generator(string):
for word in string:
mail = word.replace('\n','')
yield mail
def get_mails_sendner(wordlist):
with open(wordlist,'rt') as file:
mails_sendner = file.read().split('\n')
emails = []
for i in mails_sendner:
emails.append(i.split(':'))
del mails_sendner
return emails
def type_email(email):
email_spl = email.split('@')[1]
if email_spl == 'yandex.ru':
return 'yandex.ru'
elif email_spl == 'mail.ru':
return 'mail.ru'
def check_connect(email_login,password):
if type_email(email_login) == 'yandex.ru':
try:
server = smtplib.SMTP_SSL('smtp.yandex.ru',465)
server.login(email_login,password)
return [True, 'yandex.ru']
except:
return [False]
elif type_email(email_login) == 'mail.ru':
try:
server = smtplib.SMTP_SSL('smtp.mail.ru',465)
server.login(email_login,password)
return [True, 'mail.ru']
except:
return [False]
#if-else [argv]
if '--help' in argv or '-h' in argv:
help()
if '--wordlist_sm' not in argv:
print('--wordlist_sm specify a list of words with the mails \nto which the letter will be sent')
quit()
if '--wordlist_rm' not in argv:
print('--wordlist_rm specify a list of words with mail recipients')
quit()
#scritp_var
wordlist_sm = argv[argv.index('--wordlist_sm') + 1]
wordlist_rm = argv[argv.index('--wordlist_rm') + 1]
msg_file = argv[argv.index('--file_msg') + 1]
msg_list = get_msg(msg_file)
msg = "From: " + msg_list[0] + "\n" + "Subject: " + msg_list[1] + "\n\n" + msg_list[2]
mails = get_mails_sendner(wordlist_sm)[:-1:]
mails_list = []
mails_yandex = []
mails_mail = []
count_yandex = 0
count_mail = 0
len_check_mail = 0
len_check_yandex = 0
#yandex/mail
for i in mails:
if type_email(i[0]) == 'yandex.ru':
mails_yandex.append(i)
elif type_email(i[0]) == 'mail.ru':
mails_mail.append(i)
yandex_len = len(mails_yandex)
mail_len = len(mails_mail)
with open(wordlist_rm,'rt',errors='ignore') as dict_mail_rec:
for mail_rec in generator(dict_mail_rec):
if len(mails_list) == 5:
yandex_rec_mail = []
mail_rec_mail = []
for mail in mails_list:
if type_email(mail) == 'yandex.ru':
yandex_rec_mail.append(mail)
elif type_email(mail) == 'mail.ru':
mail_rec_mail.append(mail)
#yandex
while True:
if count_yandex == yandex_len:
count_yandex = 0
mail_checked = mails_yandex[count_yandex]
if check_connect(mail_checked[0],mail_checked[1])[0]:
mail_yandex_sendner = mail_checked
count_yandex += 1
break
print('checked ' + mail_yandex_sendner)
if len_check_yandex == 10:
sleep(5)
else:
sleep(1)
#mail
while True:
if count_mail == mail_len:
count_mail = 0
mail_checked = mails_mail[count_mail]
if check_connect(mail_checked[0],mail_checked[1])[0]:
mail_mail_sendner = mail_checked
count_mail += 1
break
#yandex_send
if len(yandex_rec_mail) != 0:
server = smtplib.SMTP_SSL('smtp.yandex.ru',465)
server.login(mail_yandex_sendner[0],mail_yandex_sendner[1])
for yandex_mail_rec in yandex_rec_mail:
server.sendmail(mail_yandex_sendner[0],yandex_mail_rec,msg)
print('send to ' + yandex_mail_rec)
#mail_send
if len(mail_rec_mail) != 0:
server = smtplib.SMTP_SSL('smtp.mail.ru',465)
server.login(mail_mail_sendner[0],mail_mail_sendner[1])
for mail_mail_rec in mail_rec_mail:
server.sendmail(mail_mail_sendner[0],mail_mail_rec,msg)
print('send to ' + mail_mail_rec)
else:
mails_list.append(mail_rec)
| [
"mol81mol@yandex.ru"
] | mol81mol@yandex.ru |
337555bf276f7550941cef15e4d1b489b2601b6c | 17a76329c5bc7aba14b6f0b06055f1bbe6c614b1 | /web_api/web_api/resources/experiment.py | 5caad956b4bb43830f626a040cca6c4295745aa9 | [
"MIT"
] | permissive | saqib-nadeem/sample_python_scripts | 2424b514e12b2aea8ccd962d7ddbc6adeaa6bdf7 | 35054816b262a3860ab9db383ab5d9e11ac0ce87 | refs/heads/master | 2021-01-01T12:08:18.033904 | 2020-02-09T09:21:08 | 2020-02-09T09:21:08 | 239,271,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,198 | py | from flask import jsonify, url_for, request, make_response
from flask.ext.restful import reqparse, abort, Api, Resource, fields, marshal
from web_api.resources import auth
from web_api.models import Experiments, db
from web_api.common.util import abort_if_record_doesnt_exist, get_all_records, get_record_by_ID, update_record, delete_record
experiment_fields = {
'name': fields.String,
'color': fields.String,
'description': fields.String,
'instructions': fields.String,
'spare_answer_id': fields.Integer,
'tag': fields.String,
'deliver_data': fields.Integer,
'uri': fields.Url('experiment', absolute=True, scheme='http')
}
# ExperimentListAPI
# shows a list of all experiments, and lets you POST to add new experiment
class ExperimentListAPI(Resource):
decorators = [auth.login_required]
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('name', type = str)
self.reqparse.add_argument('color', type = str)
self.reqparse.add_argument('description', type = str)
self.reqparse.add_argument('instructions', type = str)
self.reqparse.add_argument('spare_answer_id', type = int)
self.reqparse.add_argument('tag', type = str)
self.reqparse.add_argument('deliver_data', type = int)
super(ExperimentListAPI, self).__init__()
def get(self):
output = get_all_records(Experiments, "experiments")
return {'data' : marshal(output, experiment_fields)}
def post(self):
args = self.reqparse.parse_args()
name = args['name']
color = args['color']
description = args['description']
instructions = args['instructions']
spare_answer_id = args['spare_answer_id']
tag = args['tag']
deliver_data = args['deliver_data']
experiment = Experiments(name, color, description, instructions, spare_answer_id, tag, deliver_data)
db.session.add(experiment)
db.session.commit()
experiment_id = experiment.id
return args, 201
# ExperimentAPI
# show a single experiment info and lets you update/delete them
class ExperimentAPI(Resource):
decorators = [auth.login_required]
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('name', type = str)
self.reqparse.add_argument('color', type = str)
self.reqparse.add_argument('description', type = str)
self.reqparse.add_argument('instructions', type = str)
self.reqparse.add_argument('spare_answer_id', type = int)
self.reqparse.add_argument('tag', type = str)
self.reqparse.add_argument('deliver_data', type = int)
super(ExperimentAPI, self).__init__()
def get(self, id):
output = get_record_by_ID(Experiments, id, "experiments")
return {'data' : marshal(output, experiment_fields)}
def delete(self, id):
delete_record(Experiments, id)
return '', 204
def put(self, id):
args = self.reqparse.parse_args()
output = update_record(Experiments, id, args, "experiments")
return output, 201
| [
"saqib598@gmail.com"
] | saqib598@gmail.com |
52a1f42345b7038647a24cc6ac21b27686650cc4 | f999d82c9c4ac5cd1ec15e6d6f076afbba85a03b | /src/stats.py | 2fac812e7d1e3665375b61b6f7186875bbc6837f | [] | no_license | aunmesh/FoodMartSim | e4108b3f874f20b44e2e5739f5d806576975db0a | 9d8c77c091f4f761a73425c0fb5ab79d6e969102 | refs/heads/master | 2021-08-19T07:44:58.456490 | 2017-11-25T08:10:57 | 2017-11-25T08:10:57 | 110,343,104 | 2 | 1 | null | 2017-11-18T14:14:31 | 2017-11-11T12:18:34 | Python | UTF-8 | Python | false | false | 552 | py | def calculate_efficiency(farmers, buyers):
welfare = 0
maxwelfare = 0
farmer_pop = len(farmers)
buyer_pop = len(buyers)
for i in range(farmer_pop):
welfare -= farmers[i].qty_traded * farmers[i].true_type
for i in range(buyer_pop):
welfare += buyers[i].qty_traded * buyers[i].true_type
maxwelfare = welfare
maxwelfare += (farmers[Farmer.brk_index].qty - farmers[Farmer.brk_index].qty_traded)
maxwelfare += (buyers[Buyer.brk_index].qty - buyers[Buyer.brk_index].qty_traded)
efficiency = welfare/maxwelfare
return efficiency
| [
"noreply@github.com"
] | noreply@github.com |
0cbac4b32b5e85d2aa2d17639e2fb7a6ece2316b | 66176b6735f9fb3f8eaa649cf5df87a1104b9ddb | /src/csh/align_by_permutation.py | 41f2e3f44a543dfa3dd904a0f6f8b79a69bf18cf | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-philippe-de-muyter",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-other-permissive"
] | permissive | jswelling/Fiasco | a918b516c8880d4f627be6f8490fe01dc02f91a5 | 0e9264207c385e6c911f5458b9a90866ea14c4be | refs/heads/master | 2021-12-14T00:23:08.924678 | 2021-11-30T05:08:35 | 2021-11-30T05:08:35 | 66,334,022 | 3 | 0 | null | 2021-11-30T05:06:53 | 2016-08-23T04:49:58 | C | UTF-8 | Python | false | false | 9,918 | py | #! /usr/bin/env python
#
# ************************************************************
# * *
# * Permission is hereby granted to any individual or *
# * institution for use, copying, or redistribution of *
# * this code and associated documentation, provided *
# * that such code and documentation are not sold for *
# * profit and the following copyright notice is retained *
# * in the code and documentation: *
# * Copyright (c) 2006 Department of Statistics, *
# * Carnegie Mellon University *
# * *
# * This program is distributed in the hope that it will *
# * be useful, but WITHOUT ANY WARRANTY; without even the *
# * implied warranty of MERCHANTABILITY or FITNESS FOR A *
# * PARTICULAR PURPOSE. Neither Carnegie Mellon University *
# * nor any of the authors assume any liability for *
# * damages, incidental or otherwise, caused by the *
# * installation or use of this software. *
# * *
# * CLINICAL APPLICATIONS ARE NOT RECOMMENDED, AND THIS *
# * SOFTWARE HAS NOT BEEN EVALUATED BY THE UNITED STATES *
# * FDA FOR ANY CLINICAL USE. *
# * *
# ************************************************************
#
import sys
import os
import os.path
import string
import getopt
from math import *
if "FIASCO" in os.environ:
sys.path.append(os.environ["FIASCO"])
from fiasco_utils import *
idString= "$Id: align_by_permutation.py,v 1.6 2007/02/06 21:45:42 welling Exp $"
def checkInputStructure( chunk, unsafeFlag ):
dimstr= chunk.getValue('dimensions');
if dimstr != "xyz":
if dimstr == "xyzt":
if chunk.getDim("t") != 1 and not unsafeFlag:
sys.exit("Input file %s must have t extent 1!"%\
os.path.basename(chunk.ds.fname))
elif dimstr == "vxyzt":
if chunk.getDim("t") != 1 and not unsafeFlag:
sys.exit("Input file %s must have t extent 1!"%\
os.path.basename(chunk.ds.fname))
if chunk.getDim("v") != 1:
sys.exit("Input file %s must have v extent 1!"%\
os.path.basename(chunk.ds.fname))
elif dimstr == "vxyz":
if chunk.getDim("v") != 1:
sys.exit("Input file %s must have v extent 1!"%\
os.path.basename(chunk.ds.fname))
else:
sys.exit("Input file %s must have dimensions (v)xyz(t)!"%\
os.path.basename(chunk.ds.fname))
##############################
#
# Main
#
##############################
# Check for "-help"
if len(sys.argv)>1:
if sys.argv[1] == "-help":
if len(sys.argv)>2:
os.system( "scripthelp %s %s"%(sys.argv[0],sys.argv[2]) );
else:
os.system( "scripthelp %s"%sys.argv[0] );
sys.exit();
try:
(opts,pargs) = getopt.getopt(sys.argv[1:],"vd",["out=","unsafe"])
except:
print("%s: Invalid command line parameter" % sys.argv[0])
describeSelf();
sys.exit()
#Check calling syntax; parse args
if len(pargs) != 2 :
describeSelf()
sys.exit(1)
outDSName= None
unsafeFlag= 0
for a,b in opts:
if a=="-v":
setVerbose(1)
if a=="-d":
setDebug(1)
if a=="--out":
outDSName= b
if a=="--unsafe":
unsafeFlag= 1
if outDSName==None:
sys.exit("Required output dataset name not given.")
inDS= MRIDataset(os.path.abspath(pargs[0]))
inChunk= inDS.getChunk('images')
protoDS= MRIDataset(os.path.abspath(pargs[1]))
protoChunk= protoDS.getChunk('images')
#Check reasonableness of input
checkInputStructure(inChunk,unsafeFlag)
checkInputStructure(protoChunk,unsafeFlag)
# Create a temporary directory
tmpdir= makeTempDir('tmp_align_by_permutation')
homedir= os.getcwd()
# Get relevant dimensions
xdim= inChunk.getDim("x");
ydim= inChunk.getDim("y");
zdim= inChunk.getDim("z");
dimstr= inChunk.getValue('dimensions');
inBBox= BBox(inChunk)
protoBBox= BBox(protoChunk)
if getVerbose():
inBBox.printBounds("Input bounding box:")
protoBBox.printBounds("Prototype bounding box:")
inRHCoordTest= inBBox.zedge.dot(inBBox.xedge.cross(inBBox.yedge))
protoRHCoordTest= protoBBox.zedge.dot(protoBBox.xedge.cross(protoBBox.yedge))
if inRHCoordTest*protoRHCoordTest < 0.0 and not unsafeFlag:
sys.exit("Input and prototype coord systems don't have same handedness!")
inAxes= { "x":inBBox.xedge.clone(), \
"y":inBBox.yedge.clone(), \
"z":inBBox.zedge.clone() }
for v1 in ["x","y","z"]: inAxes[v1].normalize()
protoAxes= { "x":protoBBox.xedge.clone(), \
"y":protoBBox.yedge.clone(), \
"z":protoBBox.zedge.clone() }
for v1 in ["x","y","z"]: protoAxes[v1].normalize()
becomesMap= {}
usedToBeMap= {}
needsReversed= {}
for v1 in ["x","y","z"]:
largestDot= 0.0;
comp= None
for v2 in ["x","y","z"]:
val= inAxes[v1].dot(protoAxes[v2])
if math.fabs(val)>math.fabs(largestDot):
largestDot= val
comp= v2
debugMessage("%s matches %s, dot %f"%(v1,comp,largestDot))
becomesMap[v1]= comp
needsReversed[v1]= ( largestDot < 0 )
debugMessage("becomesMap: %s"%repr(becomesMap))
for v1 in becomesMap.keys():
usedToBeMap[becomesMap[v1]]= v1
debugMessage("usedToBeMap: %s"%repr(usedToBeMap))
debugMessage("needsReversed: %s"%repr(needsReversed))
debugMessage("inAxes: %s"%repr(inAxes))
debugMessage("protoAxes: %s"%repr(protoAxes))
newDimstr= usedToBeMap['x']+usedToBeMap['y']+usedToBeMap['z']
newExtents= "%d:%d:%d"%(inChunk.getDim(usedToBeMap['x']),\
inChunk.getDim(usedToBeMap['y']),\
inChunk.getDim(usedToBeMap['z']))
if dimstr.startswith('v'):
newDimstr= 'v'+newDimstr
newExtents= ":"+newExtents
if dimstr.endswith('t'):
newDimstr= newDimstr+'t'
newExtents= newExtents+":"
debugMessage("dimstr <%s> becomes <%s>, extents <%s>"%\
(dimstr,newDimstr,newExtents))
# Flip the axis vectors as appropriate
for v1 in ['x','y','z']:
if needsReversed[v1]: inAxes[v1]= -1.0*inAxes[v1]
# We will now use the needsReversed info to determine which data
# dimensions need to be flipped. There is a trick here, since the
# Y data dimension is opposite the Y coordinate dimension in Fiasco
# coordinates. Thus we first dink with the needsReversed info
# to correct for this.
if becomesMap['y'] != 'y':
needsReversed[usedToBeMap['y']]= ( not needsReversed[usedToBeMap['y']] )
needsReversed['y']= ( not needsReversed['y'] )
debugMessage("needsReversed after correction for data order: %s"%\
repr(needsReversed))
# Handle axis reversals via the double-fft trick
currentDSName= inDS.fname
if needsReversed['x']:
if needsReversed['y']:
# use xy fft
safeRun("mri_fft -d xy -fwd -cpx %s %s"%\
(currentDSName,os.path.join(tmpdir,"tmp1")))
safeRun("mri_fft -d xy -fwd -mod %s %s"%\
(os.path.join(tmpdir,"tmp1"),os.path.join(tmpdir,"tmp2")))
needsReversed['y']= 0
else:
# use x fft
safeRun("mri_fft -d x -fwd -cpx %s %s"%\
(currentDSName,os.path.join(tmpdir,"tmp1")))
safeRun("mri_fft -d x -fwd -mod %s %s"%\
(os.path.join(tmpdir,"tmp1"),os.path.join(tmpdir,"tmp2")))
currentDSName= os.path.join(tmpdir,"tmp2")
needsReversed['x']= 0
if not dimstr.startswith('v'):
safeRun("mri_remap -order %s %s"%(dimstr,currentDSName))
if needsReversed['y']:
if needsReversed['z']:
# use yz fft
safeRun("mri_fft -d yz -fwd -cpx %s %s"%\
(currentDSName,os.path.join(tmpdir,"tmp3")))
safeRun("mri_fft -d yz -fwd -mod %s %s"%\
(os.path.join(tmpdir,"tmp3"),os.path.join(tmpdir,"tmp4")))
needsReversed['z']= 0
else:
# use y fft
safeRun("mri_fft -d y -fwd -cpx %s %s"%\
(currentDSName,os.path.join(tmpdir,"tmp3")))
safeRun("mri_fft -d y -fwd -mod %s %s"%\
(os.path.join(tmpdir,"tmp3"),os.path.join(tmpdir,"tmp4")))
currentDSName= os.path.join(tmpdir,"tmp4")
needsReversed['y']= 0
if not dimstr.startswith('v'):
safeRun("mri_remap -order %s %s"%(dimstr,currentDSName))
if needsReversed['z']:
# use z fft
safeRun("mri_fft -d z -fwd -cpx %s %s"%\
(currentDSName,os.path.join(tmpdir,"tmp5")))
safeRun("mri_fft -d z -fwd -mod %s %s"%\
(os.path.join(tmpdir,"tmp5"),os.path.join(tmpdir,"tmp6")))
currentDSName= os.path.join(tmpdir,"tmp6")
needsReversed['z']= 0
if not dimstr.startswith('v'):
safeRun("mri_remap -order %s %s"%(dimstr,currentDSName))
debugMessage("inAxes now %s"%repr(inAxes))
if dimstr != newDimstr:
safeRun("mri_permute -order %s %s %s"%(newDimstr,currentDSName,outDSName))
safeRun("mri_remap -order %s -len %s %s"%(dimstr,newExtents,outDSName))
else:
safeRun("mri_copy_dataset %s %s"%(currentDSName,outDSName))
outDS= MRIDataset(outDSName)
outChunk= outDS.getChunk('images')
outBBox= BBox(outChunk)
outBBox.setCtr(inBBox.ctr)
outBBox.setVox([inChunk.getFloat("voxel_spacing.%s"%usedToBeMap['x']),\
inChunk.getFloat("voxel_spacing.%s"%usedToBeMap['y']),\
inChunk.getFloat("voxel_spacing.%s"%usedToBeMap['z'])])
outBBox.setCorners(inAxes[usedToBeMap['x']],\
inAxes[usedToBeMap['y']],\
inAxes[usedToBeMap['z']])
if getVerbose():
outBBox.printBounds("Output bounding box:")
outBBox.exportBounds()
# Clean up
os.chdir(homedir)
if not getDebug():
removeTmpDir(tmpdir)
| [
"welling@psc.edu"
] | welling@psc.edu |
e40d8657052e26d4cd67730ceea350b9fcbf5a6c | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/constant/ParamConstants.py | 1de953edff0cd37434ed88f9fc6c6feb2d8c34c5 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 938 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2017-12-20
@author: liuqun
'''
COMMON_PARAM_KEYS = set()
P_APP_ID = "app_id"
COMMON_PARAM_KEYS.add(P_APP_ID)
P_METHOD = "method"
COMMON_PARAM_KEYS.add(P_METHOD)
P_FORMAT = "format"
COMMON_PARAM_KEYS.add(P_FORMAT)
P_CHARSET = "charset"
COMMON_PARAM_KEYS.add(P_CHARSET)
P_SIGN_TYPE = "sign_type"
COMMON_PARAM_KEYS.add(P_SIGN_TYPE)
P_SIGN = "sign"
COMMON_PARAM_KEYS.add(P_SIGN)
P_ENCRYPT_TYPE = "encrypt_type"
COMMON_PARAM_KEYS.add(P_ENCRYPT_TYPE)
P_TIMESTAMP = "timestamp"
COMMON_PARAM_KEYS.add(P_TIMESTAMP)
P_VERSION = "version"
COMMON_PARAM_KEYS.add(P_VERSION)
P_NOTIFY_URL = "notify_url"
COMMON_PARAM_KEYS.add(P_NOTIFY_URL)
P_RETURN_URL = "return_url"
COMMON_PARAM_KEYS.add(P_RETURN_URL)
P_AUTH_TOKEN = "auth_token"
COMMON_PARAM_KEYS.add(P_AUTH_TOKEN)
P_APP_AUTH_TOKEN = "app_auth_token"
COMMON_PARAM_KEYS.add(P_APP_AUTH_TOKEN)
P_BIZ_CONTENT = "biz_content"
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
ab6cd8266c32908502d62a2aa3848a27d9d5182b | 4014aa4a5ce0af0f10016b8fd056e26c147e8b42 | /stdlib/src/hmap/std/matching/topic_based/topic_types/flat_numbers.py | 4d48e4cc7bf94ad24eac0e2ff27f4449bf6692f1 | [
"MIT"
] | permissive | gregjhansell97/hive-map-python-3 | d09ac97a89a9cbddf26ab1c91f698d9e44941144 | d3d4f826f154a2aeea7e251266c221f629574b83 | refs/heads/master | 2020-07-31T12:23:55.983819 | 2020-04-28T23:52:49 | 2020-04-28T23:52:49 | 210,602,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from abc import abstractmethod
import struct
from hmap.interface.matching.topic_based import HashableTopic
class FlatNumber(HashableTopic):
fmt = ""
def __init__(self, content):
# TODO move to super class (FlatNumber)
self.__raw = struct.pack(self.fmt, content)
self.__content = content
@property
def content(self):
return self.__content
def calcsize(self):
return struct.calcsize(self.fmt)
@classmethod
def serialize(cls, instance):
return instance.__raw
@classmethod
def deserialize(cls, raw_data, lazy=False):
return cls(struct.unpack_from(cls.fmt, raw_data, offset=0)[0])
# return remaining bytes
class FlatByte(FlatNumber):
fmt = "b"
class FlatUByte(FlatNumber):
fmt = "B"
class FlatInt(FlatNumber):
fmt = "i"
class FlatUInt(FlatNumber):
fmt = "I"
# hide parent class
__all__ = ["FlatByte", "FlatUByte", "FlatInt", "FlatUInt"]
| [
"gregjhansell@gmail.com"
] | gregjhansell@gmail.com |
f527305b9ec7f55ab42eaae2b923b7b8173df7bb | f9ad5d9e0faef9218c872ded0bfe46a8f8fec64d | /MuSLIT/__init__.py | e00cfd607ded333c274a40d6dfbd7ac30e237d87 | [
"MIT"
] | permissive | aymgal/MuSLIT | 286505de31407749f1e430e756e8b9712b405ee8 | 053fc2c0804630f1bf028fecc286cb996161b107 | refs/heads/master | 2020-05-21T08:59:34.065921 | 2019-05-27T06:43:35 | 2019-05-27T06:43:35 | 185,987,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | #print(
#"""
# __ __ _____ _ _____ _______
# | \/ | / ____| | |_ _|__ __|
# | \ / |_ _| (___ | | | | | |
# | |\/| | | | |\___ \| | | | | |
# | | | | |_| |____) | |____ _| |_ | |
# |_| |_|\__,_|_____/|______|_____| |_|
#""") | [
"aymeric.galan@gmail.com"
] | aymeric.galan@gmail.com |
6552dea2d2667854202895aec4f0df5259855cbc | b0f6dbd92c368bd68fa1aafd67fdde9c323ab1be | /config.py | 578b0ee4e0b9ed526e8784e67ae9a7c91b5a685d | [
"Apache-2.0"
] | permissive | niezhongliang/InsightFace-v3 | ac62cff7d4aeb957fac9189ccca26976f9a045e9 | e10cefec3bf0c465c92c42980ecbdb32eacc6dd5 | refs/heads/master | 2020-09-15T20:36:16.087481 | 2019-11-23T00:23:46 | 2019-11-23T00:23:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,130 | py | import logging
import os
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # sets device for model and PyTorch tensors
# Model parameters
image_w = 112
image_h = 112
channel = 3
emb_size = 512
# Training parameters
num_workers = 8 # for data-loading; right now, only 1 works with h5py
grad_clip = 5. # clip gradients at an absolute value of
print_freq = 100 # print training/validation stats every __ batches
checkpoint = None # path to checkpoint, None if none
# Data parameters
num_classes = 85164
num_samples = 3804846
DATA_DIR = 'data'
faces_ms1m_folder = 'data/faces_ms1m_112x112'
path_imgidx = os.path.join(faces_ms1m_folder, 'train.idx')
path_imgrec = os.path.join(faces_ms1m_folder, 'train.rec')
IMG_DIR = 'data/images'
pickle_file = 'data/faces_ms1m_112x112.pickle'
def get_logger():
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s %(levelname)s \t%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
return logger
logger = get_logger()
| [
"liuyang12@focusmedia.cn"
] | liuyang12@focusmedia.cn |
b21d7ae16d254971cd389bdd392cf26f67dd2b89 | 1c9eb7448d899d2d904695cd7412e6702ada2210 | /src/intranet3/intranet3/schemas/times.py | 5f676853c1f769063db91e20d059e61b95046d58 | [
"MIT"
] | permissive | woliveira0101/intranet-open | 28ca7100300aa11203bed90edd6a9a686dcb0345 | 0255227e7133d1c34ab265e1418f9a409370ea3a | refs/heads/master | 2020-03-18T00:46:44.436819 | 2014-05-23T06:24:57 | 2014-05-23T06:24:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,281 | py | # coding: utf-8
from colander import MappingSchema, SchemaNode, Boolean, Float, Integer, \
String, Range, Invalid
class TimeObject(object):
"""
Time format: float `1.2` or `HH:MM`
"""
def deserialize(self, node, cstruct):
if not isinstance(cstruct, float) and not isinstance(cstruct, basestring):
raise Invalid(node, "Invalid format: float or HH:MM")
if isinstance(cstruct, float):
data = cstruct
if isinstance(cstruct, basestring) and cstruct.count(':'):
try:
h, min = [int(i) for i in cstruct.split(':')]
except ValueError:
raise Invalid(node, "Time must be a number in format HH:MM")
else:
if h < 0 or min < 0:
raise Invalid(node, "Hours and minutes must be a positive number")
if h >= 24:
raise Invalid("Hours can not be greater or equal than 24")
if min >= 60:
raise Invalid("Minutes can not be greater or equal than 60")
data = h + (float(min) / 60.0)
if not isinstance(cstruct, float): # "3.5"
data = cstruct.replace(',', '.')
if not isinstance(data, float):
data = data.replace(',', '.')
try:
data = float(data)
except ValueError:
raise Invalid(node, "Time must be a float or HH:MM")
return data
class TicketObject(object):
def deserialize(self, node, cstruct):
if isinstance(cstruct, basestring) or isinstance(cstruct, int):
return cstruct
raise Invalid(node, "Ticket should be String or Int")
class AddEntrySchema(MappingSchema):
project_id = SchemaNode(Integer())
ticket_id = SchemaNode(TicketObject())
time = SchemaNode(TimeObject(), validator=Range(0.0, 24.00))
description = SchemaNode(String())
timer = SchemaNode(Boolean())
add_to_harvest = SchemaNode(Boolean())
class EditEntrySchema(MappingSchema):
project_id = SchemaNode(Integer())
ticket_id = SchemaNode(TicketObject())
time = SchemaNode(TimeObject(), validator=Range(0.0, 24.00))
description = SchemaNode(String())
| [
"meverone@gmail.com"
] | meverone@gmail.com |
b74901c63d3af37488255a82873b37db241683e2 | b773d8fd9953b53cdb292aeebe1e0089f3e56b25 | /Pacing_project/pacing_class_tz.py | 52993a27b529520a4616bc4484ec099916d3d9e0 | [] | no_license | displayce/Displayce_internship | 723f9fe2fd4dde8ad958199ef0fe30a4a2b54969 | 7ac2da6f41d617ca23a6f801fd0e991c1042ac95 | refs/heads/master | 2022-12-22T00:18:44.126198 | 2020-09-30T14:01:54 | 2020-09-30T14:01:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,534 | py | from datetime import datetime
from datetime import timedelta
import pandas as pd
import pytz
import itertools
from statsmodels.formula.api import ols
# Temporal pacing class
class Pacing:
""" The temporal pacing algorithm class
"""
def __init__(self, total_budget, start_date, end_date, timezone):
"""Class constructor"""
# Fixed attributes
self.tz = pytz.timezone(timezone)
self.start_date = self.tz.localize(start_date)
self.end_date = self.tz.localize(end_date + timedelta(days=1))
self.total_days = (self.end_date - self.start_date).days
# data for the linear regression
self.buffer = list()
self.buffer_data = pd.DataFrame()
# Initialise variables
self.remaining_days = (self.end_date - self.start_date).days
self.budget_objective = total_budget
self.budget_engaged = 0
self.budget_spent_total = 0
self.budget_remaining = self.budget_objective - (
self.budget_engaged + self.budget_spent_total)
self.current_hour = -1
self.budget_remaining_hourly = 0
self.budget_daily = self.budget_remaining / self.remaining_days
self.surplus_hour = 0
self.bs_history = [0]
self.ongoing_br = {}
self.acceleration = [{'ts': self.start_date,
'A': 0}]
self.speed = [{'ts': self.start_date,
'S': 0}]
self.size_acceleration = 1
self.sum_acceleration = 0
self.size_speed = 1
self.sum_speed = 0
self.prop_table, self.unif, self.without_weekday = self.meta_prop(self.buffer_data)
# Setup variables to begin pacing
self.day = self.start_date.day
self.nb_br = 0
self.nb_buy = 0
self.prop_purchase = 0
self.spent_per_sec = 0
self.surplus_hour = 0
self.spent_hour = 0
# Flag variables
self.new_objective = None
self.trigger_count = False
self.block_increase = False
self.first_br = True
self.first_day = True
# Functions for the linear regression
@staticmethod
def gen_prop_lr(br_object):
"""Linear regression with hours and weekdays
:param br_object: historical data about bid requests to perform linear regression
:return: Dataframe
"""
aggr = br_object.imps.groupby([br_object.index.date, br_object.index.weekday, br_object.index.hour]).sum()
aggr.index.names = ['date', 'weekday', 'hour']
# Here we keep the date column but note that it is irrelevant.
aggr = aggr.reset_index()
model = ols('imps ~ C(weekday) + C(hour)', data=aggr).fit()
weekday_list = range(7)
weekday_list = list(itertools.chain.from_iterable(itertools.repeat(x, 24) for x in weekday_list))
hour_list = list()
for i in range(7):
for z in range(24):
hour_list.append(z)
df_fitting = pd.DataFrame({'weekday': weekday_list, 'hour': hour_list})
prediction = model.predict(df_fitting)
df_fitting['fitted'] = prediction
pattern = df_fitting.pivot_table('fitted', index=df_fitting.hour, columns=df_fitting.weekday)
line, col = pattern.shape
for i in range(col):
pattern.iloc[:, i] = pattern.iloc[:, i] * 100 / pattern.iloc[:, i].sum()
return pattern
@staticmethod
def gen_prop_lr_hour(br_object):
"""Linear regression with only hours
:param br_object: historical data about bid requests to perform linear regression
:return: Dataframe
"""
aggr = br_object.imps.groupby([br_object.index.date, br_object.index.hour]).sum().reset_index()
aggr.columns = ['date', 'hour', 'imps']
model = ols('imps ~ C(hour)', data=aggr).fit()
hour_list = list()
for z in range(24):
hour_list.append(z)
df_fitting = pd.DataFrame({'hour': hour_list})
prediction = model.predict(df_fitting)
df_fitting['fitted'] = prediction
df_fitting.index = df_fitting.hour
del df_fitting['hour']
df_fitting.iloc[:, 0] = df_fitting.iloc[:, 0] * 100 / df_fitting.iloc[:, 0].sum()
return df_fitting
def meta_prop(self, data):
""" Give the proportion of impressions per hour. The output type depends on the input.
:param data: a dataframe with a datetime as index
:return: an integer, a Serie or a Dataframe
"""
if data.empty or set(data.index.hour.unique()) != set(range(24)):
unif = True
without_weekday = True
prop = 1 / 24
else:
if set(data.index.weekday.unique()) != set(range(7)):
unif = False
without_weekday = True
prop = self.gen_prop_lr_hour(data)
else:
unif = False
without_weekday = False
prop = self.gen_prop_lr(data)
return prop, unif, without_weekday
# Function to reset variables when we start a new day
def day_reset(self, ts):
""" Reset variables when there is a new day
:param ts: timestamp
"""
day = ts.day
month = ts.month
year = ts.year
self.remaining_days = (self.end_date - ts).days + 1 # +1 because we have to take the end of the day
if not self.buffer:
self.buffer_data = pd.DataFrame.from_records(self.buffer)
else:
self.first_day = False
self.buffer_data = pd.DataFrame.from_records(self.buffer, index='Date')
# Reinitialise some variables
self.current_hour = -1
self.budget_remaining_hourly = 0
self.budget_daily = self.budget_remaining / self.remaining_days
self.surplus_hour = 0
self.bs_history = [0]
self.acceleration = [{'ts': self.tz.localize(datetime(year, month, day, 0, 0, 0)),
'A': 0}]
self.speed = [{'ts': self.tz.localize(datetime(year, month, day, 0, 0, 0)),
'S': 0}]
self.size_acceleration = 1
self.sum_acceleration = 0
self.size_speed = 1
self.sum_speed = 0
self.prop_table, self.unif, self.without_weekday = self.meta_prop(self.buffer_data)
# Function when we change hour
def change_hour(self, weekday):
""" Reset budget for the following hour
:param weekday: week day integer
"""
self.current_hour += 1
self.remaining_hours = 24 - self.current_hour
# Evolutive target
self.surplus_hour += self.budget_remaining_hourly / self.remaining_hours
if self.unif:
self.budget_hour = (self.prop_table * self.budget_daily) + self.surplus_hour
elif self.without_weekday and not self.unif:
self.budget_hour = (self.prop_table.iloc[
self.current_hour, 0] / 100) * self.budget_daily + self.surplus_hour
else:
self.budget_hour = (self.prop_table.iloc[
self.current_hour, weekday] / 100) * self.budget_daily + self.surplus_hour
self.target = self.budget_hour / 3600
self.spent_hour = 0
self.budget_remaining_hourly = self.budget_hour - self.spent_hour
# Mean of budget variation the last 30 minutes
def gen_mean_speed(self):
""" Return the moving average of variation of the budget per second over 30 minutes
"""
created_time = self.speed[-1]['ts'] - timedelta(minutes=30)
while self.speed[0]['ts'] < created_time:
self.size_speed -= 1
self.sum_speed += self.speed[0]['S']
del self.speed[0]
try:
average = self.sum_speed / self.size_speed
except ZeroDivisionError:
average = 0
return average
# Mean of speed of variation the last 30 minutes
def gen_mean_acceleration(self):
""" Return the moving average of the speed of variation of the budget per second over 30 minutes
"""
created_time = self.acceleration[-1]['ts'] - timedelta(minutes=30)
while self.acceleration[0]['ts'] < created_time:
self.size_acceleration -= 1
self.sum_acceleration += self.acceleration[0]['A']
del self.acceleration[0]
try:
average = self.sum_acceleration / self.size_acceleration
except ZeroDivisionError:
average = 0
return average
# Build the dataframe for the linear regression
def build_data_prop(self, ts, imps):
""" Build Dataframe for the proportion per hour linear regression
:param ts: current timestamp
:param imps: number of impressions
"""
self.buffer.append({'Date': ts, 'imps': imps})
def bs_calculation(self, average_acceleration, average_speed, remaining_time, coef=1):
""" Calculate the available budget per second
:param average_acceleration: mean of acceleration over 30 min
:param average_speed: mean of speed over 30 min
:param remaining_time: remaining time in seconds before the end of the hour
:param coef: importance of speed and variation in the formula (default is one)
"""
alpha = average_acceleration * coef
try:
bs = self.budget_remaining_hourly * ((1 + alpha * average_speed) / remaining_time)
except ZeroDivisionError:
bs = 1
if bs < 0:
bs = 1
return bs
# Function to make the buying decision. This the main function of the pacing class algorithm
def buying_decision(self, ts, price, imps, br_id):
"""From a BR, decide whether to buy or not
:param ts: timestamp of the BR
:param price: price of the BR
:param imps: number of impressions
:param br_id: id of bid request
:return: Boolean
"""
# If we have spent all budget then we will never buy
if self.budget_remaining <= 0:
buying = False
return buying
# Check problem in br
if price < 0:
return False
if imps < 0:
return False
if self.first_br:
self.first_br = False
self.ts_first_br = datetime.timestamp(ts)
# Enough time to check proportion
if datetime.timestamp(ts) - self.ts_first_br >= 3600:
self.trigger_count = True
# TS de la BR
self.weekday = ts.weekday()
day = ts.day
month = ts.month
year = ts.year
hour = ts.hour
# If we begin a new day, we reset variables
if self.day != day:
self.day_reset(ts)
self.day = day
# Changement of hour
while hour != self.current_hour:
self.change_hour(self.weekday)
# Build data for proportion lr
self.build_data_prop(ts, imps)
# Remaining time before the end of the hour
end_hour = self.tz.localize(datetime(year, month, day, hour, 59, 59, 999999))
remaining_time = datetime.timestamp(end_hour) - datetime.timestamp(ts)
# Calculation of the budget per second (bs)
average_acceleration = self.gen_mean_acceleration()
average_speed = self.gen_mean_speed()
self.bs = self.bs_calculation(average_acceleration, average_speed, remaining_time)
# Calculation of vt and at
self.bs_history.append(self.bs)
vt = self.bs_history[-1] - self.bs_history[-2]
self.speed.append({'ts': ts,
'S': vt})
self.size_speed += 1 # We calculate the size without using len() for micro optimisation
at = self.speed[-1]['S'] - self.speed[-2]['S']
self.acceleration.append({'ts': ts,
'A': at})
self.size_acceleration += 1 # We calculate the size without using len() for micro optimisation
# Buying decision
if (self.bs >= self.target) and (self.budget_remaining_hourly - price) >= 0:
buying = True
self.budget_engaged += price
self.spent_hour += price
self.nb_buy += 1
self.ongoing_br[br_id] = price
else:
buying = False
self.budget_remaining_hourly = self.budget_hour - self.spent_hour
self.budget_remaining = self.budget_objective - (
self.budget_engaged + self.budget_spent_total)
if self.budget_remaining < 0:
self.budget_remaining = 0
self.nb_br += 1
# Check proportion of bought br
self.new_objective = self.check_proportion(ts)
return buying
# Proportion of buying
def check_proportion(self, ts):
""" Check if the algorithm needs to buy a high volume of bid requests to reach the objective
:param ts: current timestamp
:return: New objective if we have to lower the budget or none if it is already ok
"""
self.prop_purchase = self.nb_buy / self.nb_br
if not self.first_day and self.trigger_count and self.prop_purchase >= 0.7:
elapsed_time = datetime.timestamp(ts) - datetime.timestamp(self.start_date)
spent_per_sec = self.budget_spent_total / elapsed_time
remaining_time = datetime.timestamp(self.end_date) - datetime.timestamp(ts)
new_objective = (spent_per_sec * remaining_time) * 0.85
if (self.budget_spent_total + self.budget_engaged) < new_objective < self.budget_objective:
self.block_increase = True
self.trigger_count = False
self.ts_first_br = datetime.timestamp(ts)
return new_objective
# Function to reset the spend objective
def reallocate_budget(self, new_budget):
""" This function handle a reset of the budget that needs to be spent
:param new_budget: budget
"""
self.budget_objective = new_budget
self.budget_remaining = self.budget_objective - (
self.budget_engaged + self.budget_spent_total)
if self.budget_remaining < 0:
self.budget_remaining = 0
self.budget_daily = self.budget_remaining / self.remaining_days
if self.unif:
self.budget_hour = (self.prop_table * self.budget_daily) + self.surplus_hour
elif self.without_weekday and not self.unif:
self.budget_hour = (self.prop_table.iloc[
self.current_hour, 0] / 100) * self.budget_daily + self.surplus_hour
else:
self.budget_hour = (self.prop_table.iloc[
self.current_hour, self.weekday] / 100) * self.budget_daily + self.surplus_hour
self.target = self.budget_hour / 3600
self.spent_hour = 0
self.budget_remaining_hourly = self.budget_hour - self.spent_hour
# Function to handle the reception of a notification
def receive_notification(self, status, br_id):
""" From a notification, take into account the status (win/lose)
:param status: 'win' or 'lose'
:param br_id: id of the bid request
"""
br_price = self.ongoing_br[br_id]
if status == 'win':
self.budget_engaged -= br_price
self.budget_spent_total += br_price
elif status == 'lose':
self.budget_engaged -= br_price
self.spent_hour -= br_price
self.budget_remaining = self.budget_objective - (
self.budget_engaged + self.budget_spent_total)
del self.ongoing_br[br_id]
# Class to create handle different time zones. It allows a dynamic budget reallocation between instances
class GlobalPacing(object):
def __init__(self, total_budget, start_date, end_date):
# Raise errors in parameters
if total_budget < 0:
raise ValueError("Budget cannot be negative!")
if start_date > end_date:
raise ValueError("Start date cannot be later than end date!")
self.total_budget = total_budget
self.start_date = start_date
self.end_date = end_date
self.tz_list = []
self.tz_objective = []
self.timezones = {}
self.instances = {}
# If we need to change the setup
def update_budget(self, new_budget):
"""This function allows to change the budget allocated initially to the line item
:param new_budget: new budget to be spent
:return:
"""
self.total_budget = new_budget
budget_tz = self.total_budget / len(self.tz_list)
for key in self.tz_list:
self.instances[key].reallocate_budget(budget_tz)
# When we receive a bid request from a timezone that we have never met
def new_instance(self, new_tz):
""" Generate a new instance of the temporal pacing class when there is a new time zone
:param new_tz: name of the new timezone
"""
if len(self.instances) == 0:
self.tz_list.append(new_tz)
self.tz_objective.append(new_tz)
self.instances[new_tz] = Pacing(total_budget=self.total_budget,
start_date=self.start_date,
end_date=self.end_date, timezone=new_tz)
else:
budget_tz = self.total_budget / (len(self.tz_list) + 1)
self.instances[new_tz] = Pacing(total_budget=budget_tz,
start_date=self.start_date,
end_date=self.end_date, timezone=new_tz)
for key in self.tz_list:
self.instances[key].reallocate_budget(budget_tz)
self.tz_list.append(new_tz)
self.tz_objective.append(new_tz)
# Main function to select the good pacing instance and make the buying decision
def choose_pacing(self, ts, tz, cpm, imps, br_id):
""" Function to select the good pacing instance and return the buying decision
:param ts: timestamp of the br
:param tz: time zone of the br
:param cpm: Cost per mile of the br
:param imps: number of impressions
:param br_id: id of the br
:return: buying decision with some statistics
"""
price = (imps * cpm) / 1000
local_date = datetime.fromtimestamp(ts, tz=pytz.timezone(tz))
# Before the campaign?
if local_date < pytz.timezone(tz).localize(self.start_date):
raise ValueError("BR before campaign start date")
# End of the campaign?
if local_date > pytz.timezone(tz).localize(self.end_date + timedelta(days=1)):
raise ValueError("BR after campaign end date")
if tz not in self.instances.keys():
self.new_instance(tz)
buying = self.instances[tz].buying_decision(local_date, price, imps, br_id)
if buying:
self.timezones[br_id] = tz
budget_remaining = self.instances[tz].budget_remaining
spent_budget = self.instances[tz].budget_spent_total
budget_engaged = self.instances[tz].budget_engaged
prop = self.instances[tz].prop_purchase
if self.instances[tz].new_objective is not None:
self.set_new_objectives(self.instances[tz].budget_objective,
self.instances[tz].new_objective, tz)
objective = self.instances[tz].budget_objective
return buying, budget_remaining, spent_budget, budget_engaged, objective, prop
# Function called when we have to set new objectives of spend
def set_new_objectives(self, old_budget, new_budget, tz):
""" Allow to dynamically reallocate budget between time zones
:param old_budget: budget objective before
:param new_budget: new budget objective
:param tz: timezone related to the changement
"""
for key in self.tz_list:
# We delete the tz from the list to block the increase
if self.instances[key].block_increase:
try:
self.tz_objective.remove(key)
except ValueError:
pass
# We check if there is at least one timezone to dispatch the surplus budget
if len(self.tz_objective) > 0:
surplus_budget = (old_budget - new_budget) / len(self.tz_objective)
self.instances[tz].reallocate_budget(new_budget)
for key in self.tz_objective:
self.instances[key].reallocate_budget(self.instances[key].budget_objective +
surplus_budget)
# Function to call the good instance when we receive a notification
def dispatch_notifications(self, br_id, status):
"""Dispatch notifications to the good instances of pacing
:param br_id: id of the br
:param status: 'win' or 'lose'
"""
tz = self.timezones[br_id]
del self.timezones[br_id]
self.instances[tz].receive_notification(status, br_id)
# Function to see the current spent of all time zones
def pacing_performance(self):
"""Function that return a list of expenditure of each time zone
"""
spents = []
for key in self.tz_list:
spents.append(self.instances[key].budget_spent_total)
return spents
| [
"th.maurice.pro@gmail.com"
] | th.maurice.pro@gmail.com |
756c17137cf944f47d73b541ac0eb7a1536e56c3 | 77c8c500d4077ad733fbfe2c6a85a1dd47bd3cb5 | /namsookim/SWEA/1주차/5653(줄기세포배양).py | 85a928f852dcc787747546ac319806c3800cc88a | [] | no_license | chelseashin/AlgorithmStudy2021 | 786f03c4c17bc057518d428481e7d710d24ec98e | 1a4744a621ed25715fc9060c5224f0b1092d9c00 | refs/heads/master | 2023-06-22T22:27:47.289806 | 2021-07-28T02:54:22 | 2021-07-28T02:54:22 | 326,441,667 | 1 | 5 | null | 2021-06-29T01:27:40 | 2021-01-03T15:44:16 | Python | UTF-8 | Python | false | false | 3,085 | py | # https://swexpertacademy.com/main/code/problem/problemDetail.do?contestProbId=AWXRJ8EKe48DFAUo
import sys
import copy
from collections import deque
def print_array(array):
for i in range(len(array)):
for j in range(len(array[0])):
print(array[i][j],end=' ')
print()
N,M,K = map(int,input().split())
max_array = [[0]*(M+(2*K)) for _ in range(N+(2*K))]
array = [list(map(int,input().split())) for _ in range(N)]
for i in range(K,K+N):
for j in range(K,K+M):
max_array[i][j] = array[K-i][K-j]
def active_check(non_active_cell,active_cell):
temp = []
for i in range(len(non_active_cell)):
x,y,index,time=non_active_cell[i]
if time == 0:
active_cell.append((x,y,index,index))
else:
time -= 1
temp.append((x,y,index,time))
return temp
dx = [0,0,1,-1]
dy = [1,-1,0,0]
def bfs(array,active_cell,non_active_cell):
# 죽은 부분은 냅둠
new_array=copy.deepcopy(array)
remain_active_cell = []
while active_cell:
x,y,index,time=active_cell.pop(0)
if x == -1 and y == -1: # 이미 처리된적 있는 세포면
continue
# 활성화 한 뒤 세포는 죽음
new_array[x][y] = -1 # 어차피 번식 못하니 죽었다고 처리
for i in range(4):
nx = x+dx[i]
ny = y+dy[i]
# 범위 나갈일 없음
if array[nx][ny] !=0: # 누가 있거나 죽은거니
continue
else: # 0이면 비어있음
if new_array[nx][ny] == 0: # 그냥 넣음
new_array[nx][ny] = index
non_active_cell.append((nx,ny,index,index))
elif new_array[nx][ny]>0: # 누가 있으면 크기비교
# 생명력 높은 세포만 그 자리 차지
if new_array[nx][ny] < index:
non_active_cell.remove((nx, ny, array[nx][ny],index)) # 현재 있는 놈 바뀌니 비활성 세포에서 제거
new_array[nx][ny] = index
non_active_cell.append((nx,ny,index,index))
#print(q)
return new_array,remain_active_cell
def solve(array):
active_cell = []
non_active_cell = []
for i in range(len(array)):
for j in range(len(array[0])):
if array[i][j] !=0:
non_active_cell.append((i,j,array[i][j],array[i][j]))
for t in range(K+1): # K시간 후
# 현재 array -> 시간 반영
new_array,remain_cell = bfs(array, active_cell, non_active_cell) # 시간이 0인 놈 활성화 되고 번식
array = new_array
for x,y,index,time in remain_cell:
active_cell.append((x,y,index,time)) # 활성화 되었다는 처리로 -1,-1 삽입
# 활성화 된놈들 번식하고 비활성 상태 놈들 저장
non_active_cell=active_check(non_active_cell,active_cell)
# 남아있는 총 개수출력
print(len(active_cell)+len(non_active_cell))
solve(max_array) | [
"aowlrehrk3@naver.com"
] | aowlrehrk3@naver.com |
b6ddfd1034f68fcb04d7dd7367c60d64d74c567f | 0da8fdae806b73e9dc57e052dcf1171c5a2c7f28 | /01_Python基础/05_高级数据类型/study_17_字符串的查找和替换.py | 21ac39e23afe3709e5be97d72fd7c073e80aff77 | [] | no_license | xujinshan361/python_study_code | ed37db128c55ee2ad9f7b2db04785c632a7115d4 | e6ce0bdd8243dfaadf56213ef0120d215de0d0cd | refs/heads/master | 2020-12-10T12:19:45.792310 | 2020-01-13T12:48:22 | 2020-01-13T12:48:22 | 233,592,034 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | hello_str = "hello word"
# 1.判断是否以指定字符串开始
print(hello_str.startswith("he"))
# 2.判断是否以指定字符串结束
print(hello_str.endswith("word"))
# 3.查找指定字符串
# index同样可以查找指定的字符串在大字符串中的索引
print(hello_str.find("lo"))
# index 如果指定的字符串不存在,会报错
# find如果指定的字符串不存在,会返回-1
# print(hello_str.index("abc"))
print(hello_str.find("abc"))
# 4.替换字符串
# replace 方法执行完成后,会返回一个新的字符串
# 注意:不会修改原有的字符串内容
print(hello_str.replace("word", "python"))
print(hello_str)
| [
"xujinshan361@163.com"
] | xujinshan361@163.com |
35ffcbdb4fcd1a28e57c02cab3f847dfaea2a016 | 508eff345eb4b7fd6040d6872f5ae626956deb27 | /samples/seal/inspect_seal_model.py | 386db48cd9a964a2eb8d23436b0247c6c09ef6a7 | [
"MIT"
] | permissive | xuannianc/Mask_RCNN | 7c9e2ed5b3d245cd9a7b42319c61a0aa83ddb295 | c942d5cf68508dd0e22d56a6eb25f8a30a090bda | refs/heads/master | 2020-03-30T00:52:43.433219 | 2018-12-03T05:27:30 | 2018-12-03T05:27:30 | 150,548,502 | 0 | 0 | null | 2018-09-27T07:41:57 | 2018-09-27T07:41:56 | null | UTF-8 | Python | false | false | 26,591 | py | import os
import os.path as osp
import sys
import random
import math
import re
import time
import numpy as np
import tensorflow as tf
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# Root directory of the project
import glob
# ROOT_DIR = os.path.abspath("../../")
ROOT_DIR = os.path.abspath("/home/adam/workspace/github/Mask_RCNN")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
import mrcnn.model as modellib
from mrcnn.model import log
from samples.seal import seal
from samples.seal.seal import remove2
import cv2
SEAL_DIR = os.path.join(ROOT_DIR, 'samples', 'seal')
# Directory to save logs and trained model
MODEL_DIR = os.path.join(SEAL_DIR, 'models')
config = seal.SealConfig()
DATASET_DIR = osp.join(ROOT_DIR, 'datasets', 'seal')
# Override the training configurations with a few changes for inferencing.
class InferenceConfig(config.__class__):
# Run detection on one image at a time
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
# config.display()
# Device to load the neural network on. Useful if you're training a model on the same machine,
# in which case use CPU and leave the GPU for training.
DEVICE = "/gpu:1" # /cpu:0 or /gpu:0
# Inspect the model in training or inference modes
# values: 'inference' or 'training'
# TODO: code for 'training' test mode not ready yet
TEST_MODE = "inference"
def get_ax(rows=1, cols=1, size=16):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Adjust the size attribute to control how big to render images
"""
_, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))
return ax
# Load validation dataset
dataset = seal.SealDataset()
dataset.load_seal(DATASET_DIR, "val")
# Must call before using the dataset
dataset.prepare()
print("Images: {}\nClasses: {}".format(len(dataset.image_ids), dataset.class_names))
# Create model in inference mode
with tf.device(DEVICE):
model = modellib.MaskRCNN(mode=TEST_MODE, model_dir=MODEL_DIR, config=config)
# Set path to balloon weights file
# Download file from the Releases page and set its path
# https://github.com/matterport/Mask_RCNN/releases
weights_path = osp.join(MODEL_DIR, 'mask_rcnn_seals_0030.h5')
# Or, load the last model you trained
# weights_path = model.find_last()
# Load weights
print("Loading weights ", weights_path)
model.load_weights(weights_path, by_name=True)
image_id = random.choice(dataset.image_ids)
def display_resized_image():
resized_image, image_meta, gt_class_id, gt_bbox, gt_mask = \
modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
image_info = dataset.image_info[image_id]
# Note: image_info 的 id 是 image 的 filename
print("Image ID: {}.{} ({}) {}".format(image_info["source"], image_info["id"], image_id,
dataset.image_reference(image_id)))
# Run object detection
# rois: [N, (y1, x1, y2, x2)] detection bounding boxes
# class_ids: [N] int class IDs
# scores: [N] float probability scores for the class IDs
# masks: [H, W, N] instance binary masks
results = model.detect([resized_image], verbose=1)
# Display results
ax = get_ax()
r = results[0]
visualize.display_instances(resized_image, r['rois'], r['masks'], r['class_ids'],
dataset.class_names, r['scores'], ax=ax, title="Predictions")
log("gt_class_id", gt_class_id)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)
plt.show()
# display_resized_image()
def display_image():
image = dataset.load_image(image_id)
image_info = dataset.image_info[image_id]
# Note: image_info 的 id 是 image 的 filename
print("Image ID: {}.{} ({}) {}".format(image_info["source"], image_info["id"], image_id,
dataset.image_reference(image_id)))
# Run object detection
# rois: [N, (y1, x1, y2, x2)] detection bounding boxes
# class_ids: [N] int class IDs
# scores: [N] float probability scores for the class IDs
# masks: [H, W, N] instance binary masks
results = model.detect([image], verbose=1)
# Display results
ax = get_ax()
r = results[0]
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
dataset.class_names, r['scores'], ax=ax, title="Predictions")
plt.show()
# display_image()
def color_splash():
image = dataset.load_image(image_id)
image_info = dataset.image_info[image_id]
# Note: image_info 的 id 是 image 的 filename
print("Image ID: {}.{} ({}) {}".format(image_info["source"], image_info["id"], image_id,
dataset.image_reference(image_id)))
results = model.detect([image], verbose=1)
r = results[0]
splashed_image = seal.color_splash(image, r['masks'])
display_images([splashed_image], titles='color_splash', cols=1)
# cv2.namedWindow('splashed_image', cv2.WINDOW_NORMAL)
# cv2.imshow('splashed_image', splashed_image)
# cv2.waitKey(0)
# color_splash()
def display_rpn_targets():
# Generate RPN trainig targets
resized_image, image_meta, gt_class_ids, gt_bboxes, gt_masks = \
modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
image_info = dataset.image_info[image_id]
# Note: image_info 的 id 是 image 的 filename
print("Image ID: {}.{} ({}) {}".format(image_info["source"], image_info["id"], image_id,
dataset.image_reference(image_id)))
# get_anchors 会把 pixel coordinates 赋值到 self.a
normalized_anchors = model.get_anchors(resized_image.shape)
anchors = model.anchors
# target_rpn_match is 1 for positive anchors, -1 for negative anchors
# and 0 for neutral anchors.
target_rpn_match, target_rpn_deltas = modellib.build_rpn_targets(anchors, gt_class_ids, gt_bboxes, model.config)
log("target_rpn_match", target_rpn_match)
log("target_rpn_deltas", target_rpn_deltas)
positive_anchor_ix = np.where(target_rpn_match[:] == 1)[0]
negative_anchor_ix = np.where(target_rpn_match[:] == -1)[0]
neutral_anchor_ix = np.where(target_rpn_match[:] == 0)[0]
positive_anchors = model.anchors[positive_anchor_ix]
negative_anchors = model.anchors[negative_anchor_ix]
neutral_anchors = model.anchors[neutral_anchor_ix]
log("positive_anchors", positive_anchors)
log("negative_anchors", negative_anchors)
log("neutral anchors", neutral_anchors)
# Apply refinement deltas to positive anchors
refined_anchors = utils.apply_box_deltas(
positive_anchors,
target_rpn_deltas[:positive_anchors.shape[0]] * model.config.RPN_BBOX_STD_DEV)
log("refined_anchors", refined_anchors, )
# Display positive anchors before refinement (dotted) and
# after refinement (solid).
visualize.draw_boxes(resized_image, boxes=positive_anchors, refined_boxes=refined_anchors, ax=get_ax())
plt.show()
# display_rpn_targets()
def display_rpn_prediction():
# Run RPN sub-graph
resized_image, image_meta, gt_class_ids, gt_bboxes, gt_masks = \
modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
pillar = model.keras_model.get_layer("ROI").output # node to start searching from
# TF 1.4 and 1.9 introduce new versions of NMS. Search for all names to support TF 1.3~1.10
nms_node = model.ancestor(pillar, "ROI/rpn_non_max_suppression:0")
if nms_node is None:
nms_node = model.ancestor(pillar, "ROI/rpn_non_max_suppression/NonMaxSuppressionV2:0")
if nms_node is None: # TF 1.9-1.10
nms_node = model.ancestor(pillar, "ROI/rpn_non_max_suppression/NonMaxSuppressionV3:0")
rpn = model.run_graph([resized_image], [
("rpn_class", model.keras_model.get_layer("rpn_class").output),
("pre_nms_anchors", model.ancestor(pillar, "ROI/pre_nms_anchors:0")),
("refined_anchors", model.ancestor(pillar, "ROI/refined_anchors:0")),
("refined_anchors_clipped", model.ancestor(pillar, "ROI/refined_anchors_clipped:0")),
("post_nms_anchor_ix", nms_node),
("proposals", model.keras_model.get_layer("ROI").output),
])
ax = get_ax(2, 3)
# Show top anchors by score (before refinement)
limit = 100
# np.flatten() 会把多维数组变成一维数组, 那么此处就默认 batch_size=1, 否则不能这样计算
# 按从大到小排序
sorted_anchor_ids = np.argsort(rpn['rpn_class'][:, :, 1].flatten())[::-1]
visualize.draw_boxes(resized_image, boxes=model.anchors[sorted_anchor_ids[:limit]], ax=ax[0, 0])
# Show top anchors with refinement. Then with clipping to image boundaries
limit = 50
pre_nms_anchors = utils.denorm_boxes(rpn["pre_nms_anchors"][0], resized_image.shape[:2])
refined_anchors = utils.denorm_boxes(rpn["refined_anchors"][0], resized_image.shape[:2])
visualize.draw_boxes(resized_image, boxes=pre_nms_anchors[:limit],
refined_boxes=refined_anchors[:limit], ax=ax[0, 1])
refined_anchors_clipped = utils.denorm_boxes(rpn["refined_anchors_clipped"][0], resized_image.shape[:2])
visualize.draw_boxes(resized_image, refined_boxes=refined_anchors_clipped[:limit], ax=ax[0, 2])
# Show refined anchors after non-max suppression
ixs = rpn["post_nms_anchor_ix"][:limit]
visualize.draw_boxes(resized_image, refined_boxes=refined_anchors_clipped[ixs], ax=ax[1, 0])
# Show final proposals
# These are the same as the previous step (refined anchors
# after NMS) but with coordinates normalized to [0, 1] range.
# Convert back to image coordinates for display
h, w = resized_image.shape[:2]
proposals = rpn['proposals'][0, :limit] * np.array([h, w, h, w])
visualize.draw_boxes(resized_image, refined_boxes=proposals, ax=ax[1, 1])
plt.show()
# display_rpn_prediction()
def display_mrcnn_prediction():
resized_image, image_meta, gt_class_ids, gt_bboxes, gt_masks = \
modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
# Get input and output to classifier and mask heads.
mrcnn = model.run_graph([resized_image], [
("proposals", model.keras_model.get_layer("ROI").output),
("probs", model.keras_model.get_layer("mrcnn_class").output),
("deltas", model.keras_model.get_layer("mrcnn_bbox").output),
("masks", model.keras_model.get_layer("mrcnn_mask").output),
("detections", model.keras_model.get_layer("mrcnn_detection").output),
])
ax = get_ax(1, 4)
################################## display detections ###############################################
# Get detection class IDs. Trim zero padding.
det_class_ids = mrcnn['detections'][0, :, 4].astype(np.int32)
padding_start_ix = np.where(det_class_ids == 0)[0][0]
det_class_ids = det_class_ids[:padding_start_ix]
detections = mrcnn['detections'][0, :padding_start_ix]
log('trimmed_detection', detections)
print("{} detections: {}".format(
padding_start_ix, np.array(dataset.class_names)[det_class_ids]))
captions = ["{} {:.3f}".format(dataset.class_names[int(class_id)], score) if class_id > 0 else ""
for class_id, score in zip(detections[:, 4], detections[:, 5])]
visualize.draw_boxes(resized_image.copy(),
refined_boxes=utils.denorm_boxes(detections[:, :4], resized_image.shape[:2]),
visibilities=[2] * len(detections),
captions=captions, title="Detections",
ax=ax[0])
################################### display proposals ##########################################
# Proposals are in normalized coordinates. Scale them to image coordinates.
h, w = resized_image.shape[:2]
proposals = np.around(mrcnn["proposals"][0] * np.array([h, w, h, w])).astype(np.int32)
# Class ID, score, and mask per proposal
# mrcnn 的 shape 为 (batch_size, num_proposals=1000, num_classes)
proposal_class_ids = np.argmax(mrcnn["probs"][0], axis=1)
proposal_class_scores = mrcnn["probs"][0, np.arange(proposal_class_ids.shape[0]), proposal_class_ids]
proposal_class_names = np.array(dataset.class_names)[proposal_class_ids]
proposal_positive_ixs = np.where(proposal_class_ids > 0)[0]
# How many ROIs vs empty rows?
print("{} valid proposals out of {}".format(np.sum(np.any(proposals, axis=1)), proposals.shape[0]))
print("{} positive ROIs".format(len(proposal_positive_ixs)))
# Class counts
print(list(zip(*np.unique(proposal_class_names, return_counts=True))))
# Display a random sample of proposals.
# Proposals classified as background are dotted, and
# the rest show their class and confidence score.
limit = 200
ixs = np.random.randint(0, proposals.shape[0], limit)
captions = ["{} {:.3f}".format(dataset.class_names[c], s) if c > 0 else ""
for c, s in zip(proposal_class_ids[ixs], proposal_class_scores[ixs])]
visualize.draw_boxes(resized_image.copy(), boxes=proposals[ixs],
visibilities=np.where(proposal_class_ids[ixs] > 0, 2, 1),
captions=captions, title="Proposals Before Refinement",
ax=ax[1])
#################################### apply bounding box refinement #############################
# Class-specific bounding box shifts.
# mrcnn['deltas'] 的 shape 为 (batch_size, num_proposals=1000, num_classes, 4)
proposal_deltas = mrcnn["deltas"][0, np.arange(proposals.shape[0]), proposal_class_ids]
log("proposals_deltas", proposal_deltas)
# Apply bounding box transformations
# Shape: (num_proposals=1000, (y1, x1, y2, x2)]
# NOTE: delta 是不分 normalized coordinates 和 pixel coordinates 的
refined_proposals = utils.apply_box_deltas(
proposals, proposal_deltas * config.BBOX_STD_DEV).astype(np.int32)
log("refined_proposals", refined_proposals)
# Show positive proposals
# ids = np.arange(proposals.shape[0]) # Display all
limit = 5
ids = np.random.randint(0, len(proposal_positive_ixs), limit) # Display random sample
captions = ["{} {:.3f}".format(dataset.class_names[class_id], score) if class_id > 0 else ""
for class_id, score in
zip(proposal_class_ids[proposal_positive_ixs][ids], proposal_class_scores[proposal_positive_ixs][ids])]
visualize.draw_boxes(resized_image.copy(), boxes=proposals[proposal_positive_ixs][ids],
refined_boxes=refined_proposals[proposal_positive_ixs][ids],
visibilities=np.where(proposal_class_ids[proposal_positive_ixs][ids] > 0, 1, 0),
captions=captions, title="ROIs After Refinement",
ax=ax[2])
#################################### more steps ################################################
# Remove boxes classified as background
keep_proposal_ixs = np.where(proposal_class_ids > 0)[0]
print("Remove background proposals. Keep {}:\n{}".format(keep_proposal_ixs.shape[0], keep_proposal_ixs))
# Remove low confidence detections
keep_proposal_ixs = np.intersect1d(keep_proposal_ixs,
np.where(proposal_class_scores >= config.DETECTION_MIN_CONFIDENCE)[0])
print("Remove proposals below {} confidence. Keep {}:\n{}".format(
config.DETECTION_MIN_CONFIDENCE, keep_proposal_ixs.shape[0], keep_proposal_ixs))
# Apply per-class non-max suppression
pre_nms_proposals = refined_proposals[keep_proposal_ixs]
pre_nms_proposal_scores = proposal_class_scores[keep_proposal_ixs]
pre_nms_proposal_class_ids = proposal_class_ids[keep_proposal_ixs]
nms_keep_proposal_ixs = []
for class_id in np.unique(pre_nms_proposal_class_ids):
# Pick detections of this class
ixs = np.where(pre_nms_proposal_class_ids == class_id)[0]
# Apply NMS
class_keep = utils.non_max_suppression(pre_nms_proposals[ixs],
pre_nms_proposal_scores[ixs],
config.DETECTION_NMS_THRESHOLD)
# Map indicies
class_keep_proposal_ixs = keep_proposal_ixs[ixs[class_keep]]
nms_keep_proposal_ixs = np.union1d(nms_keep_proposal_ixs, class_keep_proposal_ixs)
print("{:12}: {} -> {}".format(dataset.class_names[class_id][:10], keep_proposal_ixs[ixs],
class_keep_proposal_ixs))
keep_proposal_ixs = np.intersect1d(keep_proposal_ixs, nms_keep_proposal_ixs).astype(np.int32)
print("\nKeep after per-class NMS: {}\n{}".format(keep_proposal_ixs.shape[0], keep_proposal_ixs))
#################################### Show final detections #####################################
ixs = np.arange(len(keep_proposal_ixs)) # Display all
# ixs = np.random.randint(0, len(keep), 10) # Display random sample
captions = ["{} {:.3f}".format(dataset.class_names[c], s) if c > 0 else ""
for c, s in
zip(proposal_class_ids[keep_proposal_ixs][ixs], proposal_class_scores[keep_proposal_ixs][ixs])]
visualize.draw_boxes(
resized_image.copy(), boxes=proposals[keep_proposal_ixs][ixs],
refined_boxes=refined_proposals[keep_proposal_ixs][ixs],
visibilities=np.where(proposal_class_ids[keep_proposal_ixs][ixs] > 0, 1, 0),
captions=captions, title="Detections after NMS",
ax=ax[3])
plt.show()
# display_mrcnn_prediction()
def display_mrcnn_mask_prediction():
#################################### Mask Targets ##############################################
# gt_masks 的 shape 为 (image_height, image_width, num_instances)
resized_image, image_meta, gt_class_ids, gt_bboxes, gt_masks = \
modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
display_images(np.transpose(gt_masks, [2, 0, 1]), cmap="Blues")
# Get predictions of mask head
mrcnn = model.run_graph([resized_image], [
("detections", model.keras_model.get_layer("mrcnn_detection").output),
("masks", model.keras_model.get_layer("mrcnn_mask").output),
])
# Get detection class IDs. Trim zero padding.
det_class_ids = mrcnn['detections'][0, :, 4].astype(np.int32)
padding_start_ix = np.where(det_class_ids == 0)[0][0]
det_class_ids = det_class_ids[:padding_start_ix]
print("{} detections: {}".format(padding_start_ix, np.array(dataset.class_names)[det_class_ids]))
# Masks
det_boxes = utils.denorm_boxes(mrcnn["detections"][0, :, :4], resized_image.shape[:2])
# mrcnn['masks'] 的 shape 为 (batch_size, num_instances, mask_height, mask_width, num_classes)
det_mask_specific = np.array([mrcnn["masks"][0, i, :, :, c]
for i, c in enumerate(det_class_ids)])
det_masks = np.array([utils.unmold_mask(mask, det_boxes[i], resized_image.shape)
for i, mask in enumerate(det_mask_specific)])
log("det_mask_specific", det_mask_specific)
display_images(det_mask_specific[:4] * 255, cmap="Blues", interpolation="none")
log("det_masks", det_masks)
display_images(det_masks[:4] * 255, cmap="Blues", interpolation="none")
# display_mrcnn_mask_prediction()
def visualize_activations():
# Get activations of a few sample layers
resized_image, image_meta, gt_class_ids, gt_bboxes, gt_masks = \
modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
activations = model.run_graph([resized_image], [
# ("input_image", model.keras_model.get_layer("input_image").output),
("res2c_out", model.keras_model.get_layer("res2c_out").output),
("res3c_out", model.keras_model.get_layer("res3c_out").output),
("res4w_out", model.keras_model.get_layer("res4w_out").output), # for resnet100
("rpn_bbox", model.keras_model.get_layer("rpn_bbox").output),
("roi", model.keras_model.get_layer("ROI").output),
])
# Input image (normalized)
# _ = plt.imshow(modellib.unmold_image(activations["input_image"][0], config))
# Backbone feature map
display_images(np.transpose(activations["res2c_out"][0, :, :, :4], [2, 0, 1]), cols=4)
# visualize_activations()
def show_mrcnn_prediction(image):
resized_image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
min_scale=config.IMAGE_MIN_SCALE,
max_dim=config.IMAGE_MAX_DIM,
mode=config.IMAGE_RESIZE_MODE)
# Get input and output to classifier and mask heads.
mrcnn = model.run_graph([resized_image], [
("proposals", model.keras_model.get_layer("ROI").output),
("probs", model.keras_model.get_layer("mrcnn_class").output),
("deltas", model.keras_model.get_layer("mrcnn_bbox").output),
("masks", model.keras_model.get_layer("mrcnn_mask").output),
("detections", model.keras_model.get_layer("mrcnn_detection").output),
])
################################## display detections ###############################################
# Get detection class IDs. Trim zero padding.
det_class_ids = mrcnn['detections'][0, :, 4].astype(np.int32)
padding_start_ix = np.where(det_class_ids == 0)[0][0]
det_class_ids = det_class_ids[:padding_start_ix]
detections = mrcnn['detections'][0, :padding_start_ix]
log('trimmed_detection', detections)
print("{} detections: {}".format(
padding_start_ix, np.array(dataset.class_names)[det_class_ids]))
################################### display proposals ##########################################
# Proposals are in normalized coordinates. Scale them to image coordinates.
h, w = resized_image.shape[:2]
proposals = np.around(mrcnn["proposals"][0] * np.array([h, w, h, w])).astype(np.int32)
# Class ID, score, and mask per proposal
# mrcnn 的 shape 为 (batch_size, num_proposals=1000, num_classes)
proposal_class_ids = np.argmax(mrcnn["probs"][0], axis=1)
proposal_class_scores = mrcnn["probs"][0, np.arange(proposal_class_ids.shape[0]), proposal_class_ids]
proposal_class_names = np.array(dataset.class_names)[proposal_class_ids]
proposal_positive_ixs = np.where(proposal_class_ids > 0)[0]
# How many ROIs vs empty rows?
print("{} valid proposals out of {}".format(np.sum(np.any(proposals, axis=1)), proposals.shape[0]))
print("{} positive ROIs".format(len(proposal_positive_ixs)))
# Class counts
print(list(zip(*np.unique(proposal_class_names, return_counts=True))))
# Display a random sample of proposals.
# Proposals classified as background are dotted, and
# the rest show their class and confidence score.
limit = 200
#################################### apply bounding box refinement #############################
# Class-specific bounding box shifts.
# mrcnn['deltas'] 的 shape 为 (batch_size, num_proposals=1000, num_classes, 4)
proposal_deltas = mrcnn["deltas"][0, np.arange(proposals.shape[0]), proposal_class_ids]
log("proposals_deltas", proposal_deltas)
# Apply bounding box transformations
# Shape: (num_proposals=1000, (y1, x1, y2, x2)]
# NOTE: delta 是不分 normalized coordinates 和 pixel coordinates 的
refined_proposals = utils.apply_box_deltas(
proposals, proposal_deltas * config.BBOX_STD_DEV).astype(np.int32)
log("refined_proposals", refined_proposals)
#################################### more steps ################################################
# Remove boxes classified as background
keep_proposal_ixs = np.where(proposal_class_ids > 0)[0]
print("Remove background proposals. Keep {}:\n{}".format(keep_proposal_ixs.shape[0], keep_proposal_ixs))
# Remove low confidence detections
keep_proposal_ixs = np.intersect1d(keep_proposal_ixs,
np.where(proposal_class_scores >= config.DETECTION_MIN_CONFIDENCE)[0])
print("Remove proposals below {} confidence. Keep {}:\n{}".format(
config.DETECTION_MIN_CONFIDENCE, keep_proposal_ixs.shape[0], keep_proposal_ixs))
# Apply per-class non-max suppression
pre_nms_proposals = refined_proposals[keep_proposal_ixs]
pre_nms_proposal_scores = proposal_class_scores[keep_proposal_ixs]
pre_nms_proposal_class_ids = proposal_class_ids[keep_proposal_ixs]
nms_keep_proposal_ixs = []
for class_id in np.unique(pre_nms_proposal_class_ids):
# Pick detections of this class
ixs = np.where(pre_nms_proposal_class_ids == class_id)[0]
# Apply NMS
class_keep = utils.non_max_suppression(pre_nms_proposals[ixs],
pre_nms_proposal_scores[ixs],
config.DETECTION_NMS_THRESHOLD)
# Map indicies
class_keep_proposal_ixs = keep_proposal_ixs[ixs[class_keep]]
nms_keep_proposal_ixs = np.union1d(nms_keep_proposal_ixs, class_keep_proposal_ixs)
print("{:12}: {} -> {}".format(dataset.class_names[class_id][:10], keep_proposal_ixs[ixs],
class_keep_proposal_ixs))
keep_proposal_ixs = np.intersect1d(keep_proposal_ixs, nms_keep_proposal_ixs).astype(np.int32)
print("\nKeep after per-class NMS: {}\n{}".format(keep_proposal_ixs.shape[0], keep_proposal_ixs))
#################################### Show final detections #####################################
ixs = np.arange(len(keep_proposal_ixs)) # Display all
refined_bboxes = refined_proposals[keep_proposal_ixs][ixs]
refined_bboxes -= np.array([window[0], window[1], window[0], window[1]])
bboxes = refined_bboxes.astype('float32') / scale
bboxes = bboxes.tolist()
return bboxes
# for bbox in bboxes:
# cv2.rectangle(image, (round(bbox[1]), round(bbox[0])), (round(bbox[3]), round(bbox[2])), (0, 255, 0), 2)
# cv2.namedWindow('image', cv2.WINDOW_NORMAL)
# cv2.imshow('image', image)
# cv2.waitKey(0)
# for image_filepath in glob.glob('/home/adam/Pictures/vat/train/*.jpg'):
# image = cv2.imread(image_filepath)
# show_mrcnn_prediction(image)
# for image_path in glob.glob('/home/adam/Videos/*.jpg'):
# remove2(model, image_path)
| [
"chenxuannian@gmail.com"
] | chenxuannian@gmail.com |
f52bdb4d96eb52e72961315f882dbfc9f813aa0e | 67e08000ca2cf8ae4595bfcce9f1ef2b6001a3cc | /catkin_ws/build/micros_swarm_framework/tools/hector_quadrotor/hector_uav_msgs/cmake/hector_uav_msgs-genmsg-context.py | 8723a33e7acfca3521fae7e7886b357ba3285c7a | [] | no_license | REGATTE/copter_deep_learning_build | 60e490f1eb2254188f5f160ef837a1a281a1ab19 | cb77b5e6d1591a964c2ef7d3e2ae8ada56ad39c0 | refs/heads/master | 2023-03-03T22:25:05.289131 | 2021-02-17T08:13:57 | 2021-02-17T08:13:57 | 339,652,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,680 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/user/catkin_ws/src/micros_swarm_framework/tools/hector_quadrotor/hector_uav_msgs/msg/Altimeter.msg;/home/user/catkin_ws/src/micros_swarm_framework/tools/hector_quadrotor/hector_uav_msgs/msg/AttitudeCommand.msg;/home/user/catkin_ws/src/micros_swarm_framework/tools/hector_quadrotor/hector_uav_msgs/msg/Compass.msg;/home/user/catkin_ws/src/micros_swarm_framework/tools/hector_quadrotor/hector_uav_msgs/msg/ControllerState.msg;/home/user/catkin_ws/src/micros_swarm_framework/tools/hector_quadrotor/hector_uav_msgs/msg/HeadingCommand.msg;/home/user/catkin_ws/src/micros_swarm_framework/tools/hector_quadrotor/hector_uav_msgs/msg/HeightCommand.msg;/home/user/catkin_ws/src/micros_swarm_framework/tools/hector_quadrotor/hector_uav_msgs/msg/MotorCommand.msg;/home/user/catkin_ws/src/micros_swarm_framework/tools/hector_quadrotor/hector_uav_msgs/msg/MotorPWM.msg;/home/user/catkin_ws/src/micros_swarm_framework/tools/hector_quadrotor/hector_uav_msgs/msg/MotorStatus.msg;/home/user/catkin_ws/src/micros_swarm_framework/tools/hector_quadrotor/hector_uav_msgs/msg/PositionXYCommand.msg;/home/user/catkin_ws/src/micros_swarm_framework/tools/hector_quadrotor/hector_uav_msgs/msg/RawImu.msg;/home/user/catkin_ws/src/micros_swarm_framework/tools/hector_quadrotor/hector_uav_msgs/msg/RawMagnetic.msg;/home/user/catkin_ws/src/micros_swarm_framework/tools/hector_quadrotor/hector_uav_msgs/msg/RawRC.msg;/home/user/catkin_ws/src/micros_swarm_framework/tools/hector_quadrotor/hector_uav_msgs/msg/RC.msg;/home/user/catkin_ws/src/micros_swarm_framework/tools/hector_quadrotor/hector_uav_msgs/msg/RuddersCommand.msg;/home/user/catkin_ws/src/micros_swarm_framework/tools/hector_quadrotor/hector_uav_msgs/msg/ServoCommand.msg;/home/user/catkin_ws/src/micros_swarm_framework/tools/hector_quadrotor/hector_uav_msgs/msg/Supply.msg;/home/user/catkin_ws/src/micros_swarm_framework/tools/hector_quadrotor/hector_uav_msgs/msg/ThrustCommand.msg;/home/user/catkin_ws/src/micros_swarm_framework/tools/hector_quadrotor/hector_uav_msgs/msg/VelocityXYCommand.msg;/home/user/catkin_ws/src/micros_swarm_framework/tools/hector_quadrotor/hector_uav_msgs/msg/VelocityZCommand.msg;/home/user/catkin_ws/src/micros_swarm_framework/tools/hector_quadrotor/hector_uav_msgs/msg/YawrateCommand.msg;/home/user/catkin_ws/devel/share/hector_uav_msgs/msg/PoseAction.msg;/home/user/catkin_ws/devel/share/hector_uav_msgs/msg/PoseActionGoal.msg;/home/user/catkin_ws/devel/share/hector_uav_msgs/msg/PoseActionResult.msg;/home/user/catkin_ws/devel/share/hector_uav_msgs/msg/PoseActionFeedback.msg;/home/user/catkin_ws/devel/share/hector_uav_msgs/msg/PoseGoal.msg;/home/user/catkin_ws/devel/share/hector_uav_msgs/msg/PoseResult.msg;/home/user/catkin_ws/devel/share/hector_uav_msgs/msg/PoseFeedback.msg;/home/user/catkin_ws/devel/share/hector_uav_msgs/msg/LandingAction.msg;/home/user/catkin_ws/devel/share/hector_uav_msgs/msg/LandingActionGoal.msg;/home/user/catkin_ws/devel/share/hector_uav_msgs/msg/LandingActionResult.msg;/home/user/catkin_ws/devel/share/hector_uav_msgs/msg/LandingActionFeedback.msg;/home/user/catkin_ws/devel/share/hector_uav_msgs/msg/LandingGoal.msg;/home/user/catkin_ws/devel/share/hector_uav_msgs/msg/LandingResult.msg;/home/user/catkin_ws/devel/share/hector_uav_msgs/msg/LandingFeedback.msg;/home/user/catkin_ws/devel/share/hector_uav_msgs/msg/TakeoffAction.msg;/home/user/catkin_ws/devel/share/hector_uav_msgs/msg/TakeoffActionGoal.msg;/home/user/catkin_ws/devel/share/hector_uav_msgs/msg/TakeoffActionResult.msg;/home/user/catkin_ws/devel/share/hector_uav_msgs/msg/TakeoffActionFeedback.msg;/home/user/catkin_ws/devel/share/hector_uav_msgs/msg/TakeoffGoal.msg;/home/user/catkin_ws/devel/share/hector_uav_msgs/msg/TakeoffResult.msg;/home/user/catkin_ws/devel/share/hector_uav_msgs/msg/TakeoffFeedback.msg"
services_str = "/home/user/catkin_ws/src/micros_swarm_framework/tools/hector_quadrotor/hector_uav_msgs/srv/EnableMotors.srv"
pkg_name = "hector_uav_msgs"
dependencies_str = "actionlib_msgs;std_msgs;geometry_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "hector_uav_msgs;/home/user/catkin_ws/src/micros_swarm_framework/tools/hector_quadrotor/hector_uav_msgs/msg;hector_uav_msgs;/home/user/catkin_ws/devel/share/hector_uav_msgs/msg;actionlib_msgs;/opt/ros/kinetic/share/actionlib_msgs/cmake/../msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg;geometry_msgs;/opt/ros/kinetic/share/geometry_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"ashok_a380@icloud.com"
] | ashok_a380@icloud.com |
f9460bdd828edd3892ba9506260ad360ad7bfbad | ef3a7391b0a5c5d8e276355e97cbe4de621d500c | /venv/Lib/site-packages/thinc/neural/train.py | 1a0492b1e6ef38288d5f82838d0e13063fc3efe1 | [
"MIT",
"Apache-2.0"
] | permissive | countBMB/BenjiRepo | 143f6da5d198ea6f06404b4559e1f4528b71b3eb | 79d882263baaf2a11654ca67d2e5593074d36dfa | refs/heads/master | 2022-12-11T07:37:04.807143 | 2019-12-25T11:26:29 | 2019-12-25T11:26:29 | 230,090,428 | 1 | 1 | Apache-2.0 | 2022-12-08T03:21:09 | 2019-12-25T11:05:59 | Python | UTF-8 | Python | false | false | 1,862 | py | # coding: utf8
from __future__ import unicode_literals
import numpy.random
from tqdm import tqdm
from .optimizers import Adam, linear_decay
class Trainer(object):
def __init__(self, model, **cfg):
self.ops = model.ops
self.model = model
self.L2 = cfg.get("L2", 0.0)
self.optimizer = Adam(model.ops, 0.001, decay=0.0, eps=1e-8, L2=self.L2)
self.batch_size = cfg.get("batch_size", 128)
self.nb_epoch = cfg.get("nb_epoch", 20)
self.i = 0
self.dropout = cfg.get("dropout", 0.0)
self.dropout_decay = cfg.get("dropout_decay", 0.0)
self.each_epoch = []
def __enter__(self):
return self, self.optimizer
def __exit__(self, exc_type, exc_val, exc_tb):
self.model.use_params(self.optimizer.averages)
def iterate(self, train_X, train_y, progress_bar=True):
orig_dropout = self.dropout
for i in range(self.nb_epoch):
indices = numpy.arange(len(train_X))
numpy.random.shuffle(indices)
indices = self.ops.asarray(indices)
j = 0
with tqdm(total=indices.shape[0], leave=False) as pbar:
while j < indices.shape[0]:
slice_ = indices[j : j + self.batch_size]
X = _take_slice(train_X, slice_)
y = _take_slice(train_y, slice_)
yield X, y
self.dropout = linear_decay(orig_dropout, self.dropout_decay, j)
j += self.batch_size
if progress_bar:
pbar.update(self.batch_size)
for func in self.each_epoch:
func()
def _take_slice(data, slice_):
if isinstance(data, list) or isinstance(data, tuple):
return [data[int(i)] for i in slice_]
else:
return data[slice_]
| [
"bengmen92@gmail.com"
] | bengmen92@gmail.com |
e85646f0c172a4be28e3c2a012c72c890de0149e | ee085ef7e9f2d178b1128d82a036fc5574355002 | /apps/confer_manage/views.py | e9c639dfa9e660baa535ba1367d14d4157990aa5 | [] | no_license | ErZhouEr/zhers_CMS | 937d7c2b332ad66f8cf152532744b2b6d007143b | 71d384956bc90d962b268ec9d15002de887dada8 | refs/heads/master | 2022-12-09T00:36:18.961915 | 2019-07-29T12:57:50 | 2019-07-29T12:57:50 | 190,124,817 | 0 | 0 | null | 2022-12-07T23:38:03 | 2019-06-04T03:47:10 | JavaScript | UTF-8 | Python | false | false | 38,258 | py | from django.shortcuts import render
from django.shortcuts import redirect
from django.shortcuts import HttpResponse
from django.contrib import messages
import json
from datetime import datetime, timedelta, timezone
import re
import ast
import traceback
from . import models
from . import forms
from apps.login import models as lg_models
from apps.login import forms as lg_forms
from django.views.decorators.csrf import csrf_exempt
import requests
import logging
# logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger('django')
# handler = logging.FileHandler("log.txt",encoding='utf-8')
# handler.setLevel(logging.INFO)
# formatter=logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# handler.setFormatter(formatter)
# logger.addHandler(handler)
# Create your views here.
def dashboard(request):
if not request.session.get('is_login', None):
return redirect('/login/')
cu_user = request.session['user_id']
people = lg_models.User.objects.get(id=cu_user)
# 计算基金总金额
topics = models.Topic.objects.filter(is_ex=1)
# 根据金额的正负判断收入支出
income_funds = models.Fund.objects.filter(fund_apart=people.apartment, money__gt=0)
print(income_funds)
logger.info(['dashboard:收入项目-', income_funds])
outcome_funds = models.Fund.objects.filter(fund_apart=people.apartment, money__lt=0)
logger.info(['dashboard:支出项目-', outcome_funds])
total_dict = {'income': {}, 'outcome': {}}
for income in income_funds:
total_dict['income'][income.income_people.name] = round(total_dict['income'].get(income.income_people.name,
0) + round(income.money, 1), 1)
for outcome in outcome_funds:
total_dict['outcome'][outcome.reason] = round(total_dict['outcome'].get(outcome.income_people.name, 0) + round(
-outcome.money, 1), 1)
logger.info(['dashboard:总体收支-', total_dict])
# money_dict = {}
# for topic in topics:
# money_dict[topic.people_id.name] = money_dict.get(topic.people_id.name, 0) + topic.money
# print(money_dict)
money_type = ['收入', '支出']
income_people = list(total_dict['income'].keys())
outcome_people = list(total_dict['outcome'].keys())
total_data_x = ['收入', '支出']
for i in income_people:
if i not in total_data_x:
total_data_x.append(i)
for i in outcome_people:
if i not in total_data_x:
total_data_x.append(i)
logger.info(['dashboard:人员-', total_data_x])
# 收支情况
layer1_money = {'income': 0, 'outcome': 0}
for i in total_dict['income']:
layer1_money['income'] = layer1_money['income'] + total_dict['income'][i]
for i in total_dict['outcome']:
layer1_money['outcome'] = abs(layer1_money['outcome']) + abs(total_dict['outcome'][i])
layer1_money['income'] = round(layer1_money['income'], 1)
layer1_money['outcome'] = round(layer1_money['outcome'], 1)
# 找到没交清钱的,进行提醒
money_notsub = models.Fund.objects.filter(fund_apart=people.apartment, is_money_sub=0, money__gt=0)
notsub_dict = {}
for tpc in money_notsub:
key = tpc.income_people.name + ':在' + tpc.income_confer.stime + '-' + tpc.income_confer.confer_type.confer_type + '中,' + tpc.reason + '(id' + str(
tpc.id) + ')'
notsub_dict[key] = round(tpc.money - tpc.money_sub, 2)
logger.info(['dashboard:欠款项目-', notsub_dict])
# 统计每个人的会议总数
people_lst = lg_models.User.objects.all()
peop_cf_dict = {}
for peop in people_lst:
con_num = models.Conference.objects.filter(people=peop).count()
peop_cf_dict[peop.name] = con_num
peoples = list(peop_cf_dict.keys())
values = list(peop_cf_dict.values())
return render(request, 'confer_manege/dashboard.html', locals())
@csrf_exempt
def huanqian(request):
if not request.session.get('is_login', None):
return redirect('/login/')
ret = {"status": None, "message": None}
if request.method == "POST":
try:
cu_user = request.session['user_id']
user = lg_models.User.objects.get(id=cu_user)
if user.role == 'moneyadmin' or user.role == 'admin':
item = request.POST.get('item')
money = request.POST.get('money')
print(item, money)
itemlst = item.split(':在', 1)
people = itemlst[0]
fundinfo = itemlst[1].split('中,', 1)[1]
pattern = re.compile('\(id(?P<id>\d+)\)') # 这个正则的用法比较高级
fund_id = re.search(pattern, fundinfo).group('id')
print(fund_id)
fund = models.Fund.objects.get(id=fund_id)
fund.money_sub = fund.money
fund.is_money_sub = True
fund.save()
ret['status'] = '成功'
return HttpResponse(json.dumps(ret))
else:
ret['message'] = 'Sorry,你没有权限进行基金操作'
return HttpResponse(json.dumps(ret))
except:
ret['message'] = '后台错误,请联系管理员'
return HttpResponse(json.dumps(ret))
def addmoneyitem(request):
if not request.session.get('is_login', None):
return redirect('/login/')
ret = {"status": None, "message": None}
cu_user = request.session['user_id']
user = lg_models.User.objects.get(id=cu_user)
if request.method == "POST":
data=request.POST
logging.info(['additem',data])
print(data)
fund=models.Fund()
fund.reason=data.get('reason','')
try:
if data.get('type')=='支出':
fund.money=-1*float(data.get('money',0))
fund.money_sub=-1*float(data.get('moneysub',0))
if fund.money==fund.money_sub:
fund.is_money_sub=True
else:
fund.is_money_sub=False
else:
fund.money = float(data.get('money', 0))
fund.money_sub = float(data.get('moneysub', 0))
if fund.money==fund.money_sub:
fund.is_money_sub=True
else:
fund.is_money_sub=False
fund.income_people=user
fund.fund_apart=user.apartment
print(data.get('confer'))
if data.get('confer','无')=='无':
confer=models.Conference.objects.get(id=1)
print(confer)
fund.income_confer=confer
fund.save()
ret['status']='success'
return HttpResponse(json.dumps(ret))
except:
logging.error(traceback.format_exc(),exc_info=True)
return HttpResponse(json.dumps(ret))
def usertask(request):
if not request.session.get('is_login', None):
return redirect('/login/')
cu_user = request.session['user_id']
conferences = models.Conference.objects.filter(people=cu_user)
length = len(conferences)
# 不同类型会议对应不同的颜色
type_color = {
'普通小型会议': '#00a65a',
'紧急小型会议': '#f39c12',
'项目例会': '#00c0ef',
'部门例会': '#3c8dbc',
'紧急部门会议': '#dd4b39'
}
confer_data = []
for confer in conferences:
confer_dict = {}
confer_dict['id'] = confer.id
confer_dict['title'] = confer.confer_type.confer_type
confer_dict['start'] = datetime.strptime(confer.stime, '%Y/%m/%d %I:%M %p')
confer_dict['day'] = confer_dict['start'].day
confer_dict['month'] = confer_dict['start'].month
confer_dict['year'] = confer_dict['start'].year
confer_dict['hour'] = confer_dict['start'].hour
confer_dict['min'] = confer_dict['start'].minute
confer_dict['second'] = confer_dict['start'].second
confer_dict['end'] = datetime.strptime(confer.endtime, '%Y/%m/%d %I:%M %p')
confer_dict['end_day'] = confer_dict['end'].day
confer_dict['end_month'] = confer_dict['end'].month
confer_dict['end_year'] = confer_dict['end'].year
confer_dict['end_hour'] = confer_dict['end'].hour
confer_dict['end_min'] = confer_dict['end'].minute
confer_dict['backgroundColor'] = type_color[confer.confer_type.confer_type]
confer_dict['borderColor'] = type_color[confer.confer_type.confer_type]
confer_data.append(confer_dict)
logger.info(['usertask:会议数据-', confer_data])
return render(request, 'confer_manege/usertask.html', locals())
# def dropconfer(request):
# if not request.session.get('is_login', None):
# return redirect('/login/')
# return render(request,'confer_manege/edit.html')
def create(request):
if not request.session.get('is_login', None):
return redirect('/login/')
conftype = request.GET.get('confertype')
confid = request.GET.get('conferid')
conferstartime = request.GET.get('startime')
conferstartime = str(conferstartime).replace('000', '', 1)
formated_stime = datetime.utcfromtimestamp(float(conferstartime)).strftime('%Y/%m/%d %I:%M %p')
logger.info(['create:开始时间', conferstartime, formated_stime])
conferendtime = request.GET.get('endtime')
conferendtime = str(conferendtime).replace('000', '', 1)
formated_endtime = datetime.utcfromtimestamp(float(conferendtime)).strftime('%Y/%m/%d %I:%M %p')
# print(conftype,conferstartime,conferendtime)
users = lg_models.User.objects.filter(has_confirmed=1)
cu_user = request.session['user_id']
one_people = lg_models.User.objects.get(id=cu_user)
# 查询此会议是否是当前登录人创建,只有是登录人创建,并且会议没有被提交过,才会进入会议创建流程
type_id = models.Sysconf.objects.get(confer_type=conftype)
logger.info(['create:会议、结束时间及登录人', confid, type_id, formated_stime, one_people])
try:
confer = models.Conference.objects.get(id=confid)
confer_creater = confer.creater.id
confer_id = confer.id
confer_is_over = confer.is_over
if confer_is_over is True:
request.session['confer_id'] = confer_id
notice = '所选会议已结束,可以查看会议纪要'
logger.info(['create:点击会议', notice])
return document(request)
# 显示会议的准备进度
people_count = confer.people.count()
processed = confer.process
process = "%.2f%%" % (processed / people_count * 100)
if processed == people_count:
start_flag = True
else:
start_flag = False
except:
confer = None
confer_creater = None # 空说明没有这个会议,是新建的
confer_id = None
if confer_id is None:
flag_conf = False # 未创建的会议
else:
flag_conf = True
if (confer_creater is None) or (confer_creater == cu_user):
flag_creat = True # 登录人即创建人
else:
flag_creat = False
# 判断当前登录人是否已经编辑汇报内容
try:
topic = models.Topic.objects.get(confer_id=confer_id, people_id=cu_user)
except:
topic = None
if topic is None:
flag_topic = False # 未创建的汇报
else:
flag_topic = True
logger.info(['create:各种flag', confer, flag_conf, flag_creat, flag_topic])
# 在服务端session中添加key认证,避免用户重复提交表单
token = '10000' # 可以采用随机数
request.session['createToken'] = token
request.session['conftype'] = conftype
request.session['confer_id'] = confer_id
request.session['confstime'] = formated_stime
request.session['confendtime'] = formated_endtime
logger.info(['create:结束时间', formated_endtime])
# 在input中显示已提交过的信息
if confer is not None:
peoples = confer.people.all()
names = [people.name for people in peoples]
data = {'conferproj': confer.confer_proj, 'confersub': confer.subject, 'people': str(names),
'stime': confer.stime + ' - ' + confer.endtime}
logger.info(['create:渲染数据', data])
startime = confer.stime
endtime = confer.endtime
confer_form = forms.ConferForm(auto_id=True, data=data)
else:
project = None
subject = None
peoples = None
startime = formated_stime
endtime = formated_endtime
data = {'stime': startime + ' - ' + endtime}
confer_form = forms.ConferForm(auto_id=True, data=data)
if topic is not None:
topic_data = {}
topic_data['sentence'] = ast.literal_eval(topic.sentence)
topic_num = len(topic_data['sentence'])
proj_titles = list(topic_data['sentence'].keys())
proj_content = list(topic_data['sentence'].values())
topic_data['sharecontent'] = topic.share
topic_data['pretime'] = topic.pre_time
topic_data['exreason'] = topic.ex_reason
print(topic_data)
else:
topic_data = None
topic_num = 0
edit_Form = forms.EditForm(auto_id=True, data=topic_data)
return render(request, 'confer_manege/edit.html', locals())
def createajax(request):
if not request.session.get('is_login', None):
return redirect('/login/')
if request.method == "GET":
confer_form = forms.ConferForm()
return render(request, "confer_manege/edit.html", {"obj": confer_form})
elif request.method == "POST":
confer_form = forms.ConferForm(request.POST)
ret = {"status": None, "message": None}
if confer_form.is_valid():
# 判断是否是第一次提交
confertype = request.session['conftype']
type_id = models.Sysconf.objects.get(confer_type=confertype).id
cu_user = request.session.get('user_id', None)
conferstime = request.session.get('confstime', None)
conferendtime = request.session.get('confendtime', None)
confer_id = request.session.get('confer_id', None)
logger.info(['createajax:', confer_id])
# conference = models.Conference.objects.get(confer_type=type_id, )
# print("31----", confer_form.cleaned_data)
# 防止重复提交
client_token = request.POST.get('createtoken')
server_token = request.session.get('createToken', None)
# print(client_token,server_token,client_token==server_token)
if client_token == server_token:
try:
logger.info(['createajax2:', confer_id])
confer = models.Conference.objects.get(id=int(confer_id))
except:
confer = None
if confer is not None:
confersub = confer_form.cleaned_data.get('confersub')
conferproj = confer_form.cleaned_data.get('conferproj')
# 正则表达式解析多选的参会人员
conferpeople = confer_form.cleaned_data.get('people')
pattern = re.compile('\d+')
people_id_lst = re.findall(pattern, conferpeople)
confertime = confer_form.cleaned_data.get('stime')
# peoplelst = conferpeople.split(',')
startime, endtime = confertime.split('-')
startime = startime.strip()
endtime = endtime.strip()
if len(people_id_lst) < 2:
ret["message"] = '会议人数太少,请继续添加~'
return HttpResponse(json.dumps(ret))
cf_type = models.Sysconf.objects.get(confer_type=confertype)
try:
cf_proj = models.Project.objects.get(projname=conferproj)
except:
cf_proj = None
confer.confer_type = cf_type
confer.confer_proj = cf_proj
confer.subject = confersub
confer.stime = startime
confer.endtime = endtime
# 多对多方式的数据插入,需要set
confer_people = []
for i in people_id_lst:
confer_people.append(int(i))
peopleset = lg_models.User.objects.filter(id__in=confer_people)
confer.people.set(peopleset)
# 多对多添加会议部门信息
confer_apart_lst = []
for i in peopleset:
confer_apart_lst.append(i.apartment)
confer.confer_apart.set(confer_apart_lst)
confer.save()
ret["message"] = '会议更新成功~'
ret["status"] = "成功"
# print("35", ret)
del request.session['createToken']
return HttpResponse(json.dumps(ret))
else:
# insert new conference
confersub = confer_form.cleaned_data.get('confersub')
conferproj = confer_form.cleaned_data.get('conferproj')
# 正则表达式解析多选的参会人员
conferpeople = confer_form.cleaned_data.get('people')
pattern = re.compile('\d+')
people_id_lst = re.findall(pattern, conferpeople)
confertime = confer_form.cleaned_data.get('stime')
# peoplelst = conferpeople.split(',')
startime, endtime = confertime.split(' - ')
nowtime = datetime.now().strftime('%Y/%m/%d %I:%M %p')
if len(people_id_lst) < 2:
ret["message"] = '会议人数太少,请继续添加~'
return HttpResponse(json.dumps(ret))
# elif int(endtime)-int(startime)<0: #还要判断日期大于当前日期
# message='会议时间有误,请再检查下~'
# return render(request, 'confer_manege/edit.html', locals())
try:
cf_type = models.Sysconf.objects.get(confer_type=confertype)
try:
cf_proj = models.Project.objects.get(projname=conferproj)
except:
cf_proj = None
cf_creater = lg_models.User.objects.get(id=cu_user)
newconfer = models.Conference()
newconfer.confer_type = cf_type
newconfer.confer_proj = cf_proj
newconfer.subject = confersub
newconfer.creater = cf_creater
newconfer.stime = startime # 写入的是时间选择框中的开始时间和结束时间
newconfer.endtime = endtime
newconfer.creatime = nowtime
newconfer.save()
# 多对多方式的数据插入,需要set
confer_people = []
for i in people_id_lst:
confer_people.append(int(i))
peopleset = lg_models.User.objects.filter(id__in=confer_people)
newconfer.people.set(peopleset)
# 多对多添加会议部门信息
confer_apart_lst = []
for i in peopleset:
confer_apart_lst.append(i.apartment)
newconfer.confer_apart.set(confer_apart_lst)
newconfer.save()
request.session['confer_id'] = newconfer.id
ret["message"] = '会议创建成功,请在下方继续填写你的会议发言资料~'
ret["status"] = "成功"
if confertype == '部门例会':
msg = '[会议通知]' + cf_creater.name + '发起会议,时间为:' + startime + ',请大家提前进入(http://192.168.1.209:8080/ )编辑提交会议汇报内容,并按时参加,谢谢!'
msg2dingding('text', msg, [], 1)
# print("35", ret)
del request.session['createToken']
except:
traceback.print_exc()
logging.error(traceback.format_exc(), exc_info=True)
ret["message"] = '会议创建失败~'
ret["status"] = "失败"
return HttpResponse(json.dumps(ret))
else:
ret["message"] = '请勿重复提交~'
return HttpResponse(json.dumps(ret))
else:
# err = obj.errors
ret["message"] = str(confer_form.errors)
logger.warning(['create:结束时间', confer_form.errors])
return HttpResponse(json.dumps(ret))
@csrf_exempt
def editajax(request):
if not request.session.get('is_login', None):
return redirect('/login/')
if request.method == "GET":
edit_form = forms.EditForm()
return render(request, "confer_manege/edit.html", {"obj": edit_form})
elif request.method == "POST":
# edit_form = forms.EditForm()
ret = {"status": None, "message": None}
n = int(request.POST.get('num',0))
print(n)
data = {}
sentence = {}
for i in range(n):
titleid = 'selectproj%s' % str(i + 1)
title = request.POST.get(titleid)
contentid = 'contproj%s' % str(i + 1)
content = request.POST.get(contentid)
sentence[title] = content
print(sentence)
data['sen'] = sentence
data['share'] = request.POST.get('share', '')
data['pretime'] = request.POST.get('pretime', '')
data['exreason'] = request.POST.get('exreason', '')
logger.info(['editajax:提交数据', data])
# topic_num = int((len(data) - 3) / 2)
# insert new conference
logger.info(['editajax:话题数', n])
newtopic = models.Topic()
# sentence = {}
# for i in range(topic_num):
# print(i,data[2*i],data[2*i+1])
# sentence[data[2 * i]] = data[2 * i + 1]
cu_user = request.session.get('user_id')
cu_conf = request.session.get('confer_id')
try:
topic = models.Topic.objects.get(people_id=cu_user, confer_id=cu_conf)
except:
topic = None
if topic is not None:
try:
# 更新已存在的topic
topic.sentence = data['sen']
topic.share = data['share']
topic.pre_time = data['pretime']
topic.ex_reason = data['exreason']
topic.save()
request.session['topic_id'] = newtopic.id
ret["message"] = '汇报内容更新成功~'
ret["status"] = "成功"
except:
logging.error(traceback.format_exc(), exc_info=True)
ret["message"] = '汇报内容更新失败~'
else:
try:
# 新增topic记录,同时更新会议的准备情况(多一个人ready)
newtopic.sentence = data['sen']
newtopic.share = data['share']
newtopic.pre_time = data['pretime']
newtopic.ex_reason = data['exreason']
newtopic.is_prepared = True
newtopic.people_id = lg_models.User.objects.get(id=cu_user)
confer = models.Conference.objects.get(id=cu_conf)
newtopic.confer_id = confer
newtopic.save()
# 尝试计算会议准备进度
people_count = confer.people.count()
confer.process = confer.process + 1
process = "%.2f%%" % (confer.process / people_count * 100)
confer.save()
ret['process'] = process
request.session['topic_id'] = newtopic.id
ret["message"] = '汇报内容编辑成功~'
ret["status"] = "成功"
except:
ret["message"] = '汇报内容编辑失败~'
logger.info(['editajax:编辑结果', ret])
# del request.session['createToken']
return HttpResponse(json.dumps(ret))
def edit(request):
if not request.session.get('is_login', None):
return redirect('/login/')
if request.method == 'POST':
edit_Form = forms.EditForm(request.POST)
message = "请检查填写的内容!"
if edit_Form.is_valid():
subjectname = edit_Form.cleaned_data.get('subjectname')
subcontent = edit_Form.cleaned_data.get('subcontent')
pretime = edit_Form.cleaned_data.get('pretime')
exreason = edit_Form.cleaned_data.get('exreason')
# newtopic=models.Topic()
# newtopic.sentence=str({'subject':subjectname,'content':subcontent})
# newtopic.pre_time=pretime
# newtopic.ex_reason=exreason
# newtopic.people_id=request.session.get('user_id')
# newtopic.confer_id=request.session.get('confer_id')
#
# newtopic.save()
#
# request.session['topic_id'] = newtopic.id
return render(request, 'confer_manege/edit.html', locals())
else:
return render(request, 'confer_manege/edit.html', locals())
edit_Form = forms.EditForm(auto_id=True)
confer_form = forms.ConferForm(auto_id=True)
return render(request, 'confer_manege/edit.html', locals())
def start(request):
if not request.session.get('is_login', None):
return redirect('/login/')
confer_id = request.session['confer_id']
cu_user = request.session['user_id']
confer = models.Conference.objects.get(id=confer_id)
people = confer.people.all()
people_lst = []
for i in people:
people_lst.append(i.name + ' / ' + i.email)
logger.info(['start:会议及人员', confer, people_lst])
try:
topic = models.Topic.objects.get(confer_id=confer_id, people_id=cu_user)
sentence = ''
sen_dict = ast.literal_eval(topic.sentence)
for key in sen_dict.keys():
sentence = sentence + key + ':' + '\n' + sen_dict[key] + '\n' + '-----' * 10 + '\n'
data = {'subcontent': sentence, 'sharecontent': topic.share, 'pretime': topic.pre_time * 60,
"realtime": topic.real_time, "followup": topic.followup}
start_form = forms.StartForm(auto_id=True, data=data)
except:
logger.error(['start:会议及人员', confer, people_lst], exc_info=True)
data = {'subcontent': '无', 'sharecontent': '无', 'pretime': '无',
"realtime": '无', "followup": '无'}
start_form = forms.StartForm(auto_id=True, data=data)
return render(request, 'confer_manege/start.html', locals())
@csrf_exempt
def chospeople(request):
if not request.session.get('is_login', None):
return redirect('/login/')
confer_id = request.session['confer_id']
cu_user = request.session['user_id']
ret = {"status": '失败', "message": '请检查输入'}
if request.method == 'POST':
email = request.POST.get('people').split('/')[1].strip()
people = lg_models.User.objects.get(email=email)
logger.info(['choose people:', people.name, confer_id])
# print(confer_id,people)
try:
topic = models.Topic.objects.get(confer_id=confer_id, people_id=people)
logger.info(['choose topic:', topic.sentence])
sentence = ''
sen_dict = ast.literal_eval(topic.sentence)
for key in sen_dict.keys():
sentence = sentence + key + ':' + '\n' + sen_dict[key] + '\n' + '-----' * 10 + '\n'
data = {"subcontent": sentence, "sharecontent": topic.share, "pretime": topic.pre_time * 60,
"realtime": topic.real_time, "followup": topic.followup, "status": '成功', "message": '选择成功!'}
except:
topic = None
logger.error(['choose topic:', people.name, confer_id], exc_info=True)
data = {"status": '失败', "message": '无数据!'}
return HttpResponse(json.dumps(data))
return HttpResponse(json.dumps(ret))
@csrf_exempt
def savenewedit(request):
if not request.session.get('is_login', None):
return redirect('/login/')
ret = {"status": '失败', "message": '请检查输入'}
if request.method == 'POST':
data = request.POST
logging.info(['newdata', data])
# print(data)
email = data['peop'].split('/')[1].strip()
people = lg_models.User.objects.get(email=email)
confer_id = request.session['confer_id']
confer = models.Conference.objects.get(id=confer_id)
logger.info(['savenewedit', people, confer_id])
try:
topic = models.Topic.objects.get(confer_id=confer_id, people_id=people)
except:
topic = models.Topic()
topic.confer_id = confer
topic.people_id = people
try:
sen_dict = {} # 这部分字符串处理的还是不够灵活啊
sentence = data['sentence']
topicsubs = sentence.split('-----' * 10)
print(topicsubs)
logger.info(['savenewedit', topicsubs])
for sub in topicsubs:
sub = sub.strip('-')
if len(sub) > 5:
logger.info(['savenewedit', sub])
sen_lst = sub.split(':')
logger.info(['savenewedit', sen_lst])
sen_dict[sen_lst[0].strip()] = sen_lst[1].strip()
topic.sentence = sen_dict
topic.share = data['share']
topic.real_time = data['rtime']
if (topic.pre_time is not None) and (topic.pre_time>3):
if int(data['rtime']) - 60 *topic.pre_time>0:
topic.is_ex = True
topic.ex_time = int(data['rtime']) - 60 *topic.pre_time
elif int(data['rtime']) - 60 * 3 > 0:
topic.is_ex = True
topic.ex_time = int(data['rtime']) - 60 * 3
else:
topic.is_ex = False
if topic.is_ex:
try:
fund = models.Fund.objects.get(income_confer=confer, income_people=people, reason='发言超时')
fakuan = (topic.ex_time) * 0.1
topic.money = fakuan
fund.money = fakuan
fund.save()
except:
newfund = models.Fund()
fakuan = (topic.ex_time) * 0.1
topic.money = fakuan
newfund.money = fakuan
newfund.fund_apart = people.apartment
newfund.reason = '发言超时'
newfund.income_people = people
newfund.income_confer = confer
newfund.save()
else:
try:
# 这里是防止一开始时间计算错误,导致超时,后来修改的时候把罚款改为0
fund = models.Fund.objects.get(income_confer=confer, income_people=people, reason='发言超时')
fund.money = 0
fund.save()
except:
pass
topic.ex_time = 0
topic.money = 0
if not topic.is_prepared:
if not models.Fund.objects.filter(income_confer=confer, income_people=people,
reason='未提交汇报摘要').exists() and people.name != 'David':
newfund = models.Fund()
newfund.money = 5
newfund.fund_apart = people.apartment
newfund.reason = '未提交汇报摘要'
newfund.income_people = people
newfund.income_confer = confer
newfund.save()
else:
try:
# 这里是防止外部用户的topic虽然提交了,但是is_prepared是false的状态
fund = models.Fund.objects.get(income_confer=confer, income_people=people, reason='未提交汇报摘要')
fund.delete()
except:
pass
topic.followup = data['follup']
topic.save()
ret['status'] = '成功'
ret['message'] = '保存成功'
except Exception as err:
topic = None
print(err.__class__.__name__, err, ';')
logger.error(traceback.format_exc(), exc_info=True) # traceback.print_exc()
ret['message'] = '保存失败,请联系管理员'
logger.info(ret)
return HttpResponse(json.dumps(ret))
return HttpResponse(json.dumps(ret))
def document(request):
if not request.session.get('is_login', None):
return redirect('/login/')
conf_dict = {}
conf_types = models.Sysconf.objects.all()
for typ in conf_types:
conf_subs = models.Conference.objects.filter(confer_type=typ.id)
conf_dict_l2 = {}
for sub in conf_subs:
confs = models.Conference.objects.filter(confer_type=typ.id, subject=sub.subject)
conf_times = []
for conf in confs:
conf_times.append(conf.stime)
conf_dict_l2[sub.subject] = conf_times
conf_dict[typ.confer_type] = conf_dict_l2
all_types = conf_dict.keys()
try:
confer_id = request.session['confer_id']
cu_user = request.session['user_id']
confer = models.Conference.objects.get(id=confer_id)
user = lg_models.User.objects.get(id=cu_user)
data = {'conferconclusion': confer.confer_conclusion}
except:
data = None
document_form = forms.Document(auto_id=True, data=data)
return render(request, 'confer_manege/document.html', locals())
def newdocument(request):
if not request.session.get('is_login', None):
return redirect('/login/')
conf_dict = {}
conf_types = models.Sysconf.objects.all()
for typ in conf_types:
conf_subs = models.Conference.objects.filter(confer_type=typ.id)
conf_dict_l2 = {}
for sub in conf_subs:
confs = models.Conference.objects.filter(confer_type=typ.id, subject=sub.subject)
conf_times = []
for conf in confs:
conf_times.append(conf.stime)
conf_dict_l2[sub.subject] = conf_times
conf_dict[typ.confer_type] = conf_dict_l2
all_types = conf_dict.keys()
try:
confer_id = request.session['confer_id']
cu_user = request.session['user_id']
confer = models.Conference.objects.get(id=confer_id)
user = lg_models.User.objects.get(id=cu_user)
confer_is_over = confer.is_over
if confer_is_over is True:
notice = '所选会议已结束,可以查看会议纪要'
print(notice)
logging.info(notice)
return document(request)
topics = models.Topic.objects.filter(confer_id=confer_id)
conclusion = {} # 读取topic中保存的字典并形成会议纪要的格式
for topic in topics:
sen_dict = ast.literal_eval(topic.sentence)
for key in sen_dict:
# print(key, sen_dict)
conclusion[key] = conclusion.get(key, [])
sen_lst = sen_dict[key].split('\n')
for i in sen_lst:
conclusion[key].append(
'* [ ' + topic.people_id.name + ' ] ' + i.replace('\n', '')) # 有问题啊,用换行符切分是不是好一点
doc = '【会议纪要】\n'
for key in conclusion:
doc = doc + 'PROJECT: ' + key + ':\n' + '\n'.join(conclusion[key]) + '\n'
share = '【SHARE】\n'
for topic in topics:
if len(topic.share) > 3:
share = share + '* [ ' + topic.people_id.name + ' ] ' + topic.share + '\n'
follup = '【FOLLOW UP】\n'
for topic in topics:
if len(topic.followup) > 3:
follup = follup + '* [ ' + topic.people_id.name + ' ] ' + topic.followup + '\n'
money = '【DONATION】\n'
money_people_lst = ['']
print(money_people_lst, type(money_people_lst))
for topic in topics:
if topic.is_ex is True:
if topic.is_money_sub is not True:
status = '待还'
else:
status = '已还'
money = money + '* [ ' + topic.people_id.name + ' ] 发言超时,' + '本次需奉献' + str(
round(topic.money, 3)) + '元' + ',已奉献' + str(
topic.money_sub) + '元' + '\n'
money_people_lst.append(topic.people_id.phone)
if models.Fund.objects.filter(income_confer=confer_id, reason='未提交汇报摘要').exists():
funds = models.Fund.objects.filter(income_confer=confer_id, reason='未提交汇报摘要')
# print(confer_id, funds)
for fund in funds:
money = money + '* [ ' + fund.income_people.name + ' ] 未提交汇报摘要,' + '本次需奉献' + str(
fund.money) + '元' + ',已奉献' + str(
fund.money_sub) + '元' + '\n'
money_people_lst.append(fund.income_people.phone)
else:
s = '无其他罚款项~\n'
money = money + s
foot = '< 会议记录人 > -' + user.name + ' / ' + user.email
doc = doc + '-----' * 40 + '\n' + share + '-----' * 40 + '\n' + follup + '-----' * 40 + '\n' + money + '-----' * 40 + '\n' + foot
confer.confer_conclusion = doc
confer.conclusioner = user
confer.is_over = True
confer.save()
logger.info('会议纪要保存成功')
msg = '【会议纪要】' + 'http://192.168.1.209:8080/confer_manage/document/' + ',请大家点击链接查看,谢谢!'
msg2 = money.replace('DONATION', '罚款情况')
msg2dingding('text', msg, [], 1)
msg2dingding('text', msg2 + ',请发红包给阿连,谢谢', money_people_lst, 0)
try:
former_funds = models.Fund.objects.filter(is_money_sub=False) # 往期未交清罚款怎么做
former_money = '【往期滞交罚款】\n'
former_money_people=[]
for former_fund in former_funds:
if former_fund.income_confer_id != confer.id:
former_money = former_money + '* [ ' + former_fund.income_people.name + ' ] 在往期会议中' + former_fund.reason + ',已交' + str(
former_fund.money_sub) + '元,仍欠款' +str(former_fund.money-former_fund.money_sub)+'元,请及时交款~\n'
former_money_people.append(former_fund.income_people.phone)
msg2dingding('text', former_money + ',请发红包给阿连,谢谢', former_money_people, 0)
except:
logging.error(traceback.format_exc(),exc_info=True)
confer_ownerlst=['连漪','高飘飘','谭子能','伟叔','韩雪','黄晓华','吴任','麦苑菲','周策','杨永健','刘志文']
data = {'conferconclusion': doc}
document_form = forms.Document(auto_id=True, data=data)
return render(request, 'confer_manege/document.html', locals())
except:
logging.error(traceback.format_exc(), exc_info=True)
return render(request, 'confer_manege/500.html', locals())
# ---------------------数据库与前端document页面交互生成前端元素---------------------------
conf_dict = {}
conf_types = models.Sysconf.objects.all()
for typ in conf_types:
conf_subs = models.Conference.objects.filter(confer_type=typ.id)
conf_dict_l2 = {}
for sub in conf_subs:
confs = models.Conference.objects.filter(confer_type=typ.id, subject=sub.subject)
conf_times = []
for conf in confs:
conf_times.append(conf.stime)
conf_dict_l2[sub.subject] = conf_times
conf_dict[typ.confer_type] = conf_dict_l2
# Confer_dict = {
# "普通小型会议": {
# "啊啊啊啊": ["哈哈", "嘎嘎"],
# "噢噢噢噢": ["问问", "嗯嗯", "然然"],
# "呜呜呜呜": ["刚刚", "等等", "找找"]
# },
# "项目例会": {
# "练练": ["12", "23", "34"],
# "吉吉": []
# },
# "部门例会": {
# "45": [],
# "67": []
# }
# }
@csrf_exempt
def choscoonf(request):
if not request.session.get('is_login', None):
return redirect('/login/')
ret = {"status": '失败', "message": '请检查输入'}
if request.method == 'POST':
data = request.POST
# print(data)
conftype = models.Sysconf.objects.get(confer_type=data['cftyp'])
confsub = data['cfsb']
conftime = data['cftm']
logger.info(['choscoonf:选择会议', conftype, confsub, conftime])
try:
conf = models.Conference.objects.get(confer_type=conftype, subject=confsub, stime=conftime)
conclusion = conf.confer_conclusion
searchdata = {'conclusion': conclusion, 'status': '成功'}
ret['status'] = '成功'
return HttpResponse(json.dumps(searchdata))
except:
ret['massage'] = '抱歉,未找到相关会议'
return HttpResponse(json.dumps(ret))
def getSubData(request):
if not request.session.get('is_login', None):
return redirect('/login/')
confertype = request.GET['confertype']
Sub_list = []
for sub in conf_dict[confertype]:
Sub_list.append(sub)
return HttpResponse(json.dumps(Sub_list))
def getTimeData(request):
if not request.session.get('is_login', None):
return redirect('/login/')
confertype, confersub = request.GET['confertype'], request.GET['confersub']
Time_list = conf_dict[confertype][confersub]
return HttpResponse(json.dumps(Time_list))
def userprofile(request):
if not request.session.get('is_login', None):
return redirect('/login/')
cu_user = request.session['user_id']
user = lg_models.User.objects.get(id=cu_user)
name = user.name
apartment = user.apartment.apartment
email = user.email
data = {}
user_form = lg_forms.editUserForm(data=data)
return render(request, 'confer_manege/profile.html', locals())
# 上传用户头像
@csrf_exempt
def upload(request):
if request.method == 'POST':
user_id = request.session.get('user_id')
avatar = request.FILES.get('avatar')
ret = {"status": '失败'}
try:
user = lg_models.User.objects.get(id=user_id)
user.picture = avatar
user.save()
# print(avatar)
request.session['avatar'] = avatar
print(request.session['avatar'])
ret['status'] = '成功'
return HttpResponse(json.dumps(ret))
except:
print(traceback.print_exc())
return HttpResponse(json.dumps(ret))
# else:
# return render(request, 'confer_manege/profile.html')
def msg2dingding(msgtype, message, people_lst, flag):
url = 'https://oapi.dingtalk.com/robot/send?access_token=b0cb9de85f59c94b7c0b914f4b073e82fae2fefc18de252c31dc806e5cd51a1f' # 钉钉机器人的webhook地址
HEADERS = {
"Content-Type": "application/json ;charset=utf-8 "
}
message = message
String_textMsg = {
"msgtype": msgtype,
"text": {"content": message},
"at": {
"atMobiles": people_lst,
# [
# "18789461193" # 如果需要@某人,这里写他的手机号
# ],
"isAtAll": flag # 如果需要@所有人,这些写1
}
}
sendData = json.dumps(String_textMsg)
sendData = sendData.encode("utf-8") # python3的Request要求data为byte类型
res = requests.post(url, data=sendData, headers=HEADERS)
print(res.text)
# 每周四定时钉钉提醒创建会议,这种方法不行,服务启动不了,应该是一直在循环吧
# import time
#
# sched_time = datetime(2019, 7, 25, 15, 0, 0)
# loopflag = 0
# while True:
# now = datetime.now()
# if sched_time<now<(sched_time+timedelta(seconds=1)):
# loopflag = 1
# time.sleep(1)
# if loopflag == 1:
# msg='当前时间为周四下午3点,请本期例会负责人尽快创建会议~'
# msg2dingding('text', msg, [], 0) #此处为你自己想定时执行的功能函数
# loopflag = 0
# sched_time=sched_time+timedelta(days=7) | [
"917846003@qq.com"
] | 917846003@qq.com |
ddf0cd63aa1fd17e0fc5796faeb0563d44020a98 | 18024947305ee208812bac794352eca4fc8d5437 | /GreystoneProject/GreystoneProject/urls.py | 204edea5ce494e1f47317b71001970869d8f45a0 | [] | no_license | zippy1981/GreystoneSample | bf289d6b0bb0601cd7a3ec67331546326335758e | d0679bb8d21a446be739b7fcb18dcc59c8261e41 | refs/heads/master | 2021-01-16T00:09:56.155190 | 2017-08-10T20:14:10 | 2017-08-10T20:14:10 | 99,961,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 832 | py | """GreystoneProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^messages/?', include('Messages.urls'))
]
| [
"zippy1981@gmail.com"
] | zippy1981@gmail.com |
f496224193f4ea3d1b76b22140439cdcfafe45be | 7b3746fd583a44b01c4f1b1357ee1ca07f3b76de | /server.py | 78327b0e04fbf949befe1d282f6c806111d09aa8 | [] | no_license | DRyan1995/dryan1995.github.com | 2652b541a5da30683177a6ebeec628cee21f927e | f18ef5e4c54b95da61b2ec18658e5855df2cf424 | refs/heads/master | 2020-05-22T01:14:27.942872 | 2018-04-03T03:59:17 | 2018-04-03T03:59:17 | 53,734,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,334 | py | import sqlalchemy.exc, traceback, re, json
from flask import Flask, request, render_template, Response, send_from_directory, session, app, redirect, url_for
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
from functools import wraps
from werkzeug import secure_filename
from time import mktime, time
import os, random
app = Flask(__name__, static_folder='site', template_folder='./')
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:ryan@test/blog_commit?charset=utf8mb4'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
app.config.from_object(__name__)
class Comment(db.Model):
id = db.Column(db.Integer, primary_key=True)
page_name = db.Column(db.String(30), index=True)
content = db.Column(db.Text)
author = db.Column(db.String(30))
email = db.Column(db.String(50))
comment_time = db.Column(db.TIMESTAMP)
deleted = db.Column(db.BOOLEAN)
def __init__(self, page_name, content, author, email, comment_time, deleted=False):
self.page_name = page_name
self.content = content
self.author = author
self.email = email
self.comment_time = comment_time
self.deleted = deleted
class visit_statistics(db.Model):
page_name = db.Column(db.String(30), primary_key=True, unique=True)
last_visit_time = db.Column(db.TIMESTAMP)
count = db.Column(db.Integer, default=0)
def __init__(self, page_name):
self.page_name = page_name
self.last_visit_time = str(datetime.now())
self.count = 0
def DbCommit(successRet='success'):
try:
db.session.commit()
except sqlalchemy.exc.IntegrityError as exc:
traceback.print_exc()
return str(exc.args), 501
return successRet
@app.route("/create_database", methods=['POST'])
def CreateDB():
db.create_all()
@app.route('/blog')
@app.route('/blog/')
def index():
return send_from_directory(app.static_folder, 'index.html')
@app.route('/blog/<path:path>')
def Server(path):
if path.endswith('/') or len(path) == 0:
path += 'index.html'
return send_from_directory(app.static_folder, path)
def CommentParser(comment): # convert comment obj to dict
cm = dict(
id = comment.id,
pageName = comment.page_name,
author = comment.author,
email = comment.email,
content = comment.content,
commentTime = int(mktime(comment.comment_time.timetuple())),
deleted = comment.deleted
)
return cm
@app.route("/visit/<pageName>", methods=['GET'])
def VisitPage(pageName):
visit = visit_statistics.query.filter_by(page_name=pageName).first()
if not visit:
newVisit = visit_statistics(page_name=pageName)
db.session.add(newVisit)
ret = dict(
count = 0,
time = int(mktime(datetime.now().timetuple()))
)
else:
ret = dict(
count = visit.count,
time = int(mktime(visit.last_visit_time.timetuple()))
)
visit.count += 1
visit.last_visit_time = str(datetime.now())
return json.dumps(ret)
@app.route("/comments/<pageName>", methods=['GET'])
def GetComments(pageName):
return json.dumps([CommentParser(comment) \
for comment in Comment.query.filter_by(page_name=pageName).all()])
@app.route("/comments/<pageName>", methods=['POST'])
def AddComments(pageName):
print(pageName)
cm = request.get_json()
print(cm)
newComment = Comment(page_name=pageName, author=cm['author'], content=cm['content'],
email=cm['email'], comment_time=str(datetime.now()))
db.session.add(newComment)
return DbCommit()
@app.route("/comments/<commentId>", methods=['DELETE'])
def DeleteComments(commentId):
comment = Comment.query.filter_by(id=commentId).first()
if not comment:
return 'No record', 404
comment.deleted = True
return DbCommit()
@app.route("/recover/comments/<commentId>", methods=['POST'])
def RecoverComment(commentId):
comment = Comment.query.filter_by(id=commentId).first()
if not comment:
return 'No record', 404
comment.deleted = 0
return DbCommit()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8088, debug=True)
| [
"1204633887@qq.com"
] | 1204633887@qq.com |
cafa5f4f16ca55c57c83fe82e13b42135abb9c82 | e5e57303d7fd02692295d117b2c9fa509697b977 | /LittleGarden_Code/Backend/repositories/DataRepository.py | 54abe5826544dd118819dd1b54275376dd91d652 | [
"CC0-1.0"
] | permissive | CarlierAlex2/LittleGarden | a3f0207f38c31c2567a5ca85a024dfd9ba200874 | 744f9d20025f3d341d902834803ea1d0849cc918 | refs/heads/master | 2022-10-31T14:43:11.880181 | 2020-06-15T17:36:38 | 2020-06-15T17:36:38 | 272,476,801 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,005 | py | from .Database import Database
class DataRepository:
@staticmethod
def json_or_formdata(request):
if request.content_type == 'application/json':
gegevens = request.get_json()
else:
gegevens = request.form.to_dict()
return gegevens
# GET WHOLE TABLE
# ---------------------------------------------------------------------------------------------------------------------------------------------------
@staticmethod
def read_devices():
sql = "SELECT naam, eenheid, typeDevice from Devices"
return Database.get_rows(sql)
@staticmethod
def read_metingen():
sql = "SELECT * from Metingen"
return Database.get_rows(sql)
@staticmethod
def read_settings():
sql = "SELECT * from Settings"
return Database.get_rows(sql)
# GET BY ID
# ---------------------------------------------------------------------------------------------------------------------------------------------------
@staticmethod
def read_device_by_id(id):
sql = "SELECT naam, eenheid, typeDevice from Devices WHERE deviceId = %s"
params = [id]
return Database.get_one_row(sql, params)
@staticmethod
def read_meting_by_id(id):
sql = "SELECT * from Metingen WHERE metingId = %s"
params = [id]
return Database.get_one_row(sql, params)
@staticmethod
def read_setting_by_id(id):
sql = "SELECT * from Settings WHERE settingId = %s"
params = [id]
return Database.get_one_row(sql, params)
# GET DEVICES
# ---------------------------------------------------------------------------------------------------------------------------------------------------
@staticmethod
def read_device_by_type(typeDevice):
sql = "SELECT * from Devices WHERE typeDevice = %s"
params = [typeDevice]
return Database.get_rows(sql, params)
@staticmethod
def read_actuators():
return DataRepository.read_device_by_type("actuator")
@staticmethod
def read_sensors(datum):
return DataRepository.read_device_by_type("sensor")
@staticmethod
def create_device(naam, merk, prijs, beschrijving, eenheid, typeDevice):
sql = "INSERT INTO Devices(naam, merk, prijs, beschrijving, eenheid, typeDevice) waardeS(%s,%s,%s,%s,%s,%s)"
params = [naam, merk, prijs, beschrijving, eenheid, typeDevice]
return Database.execute_sql(sql, params)
# GET METINGEN
# ---------------------------------------------------------------------------------------------------------------------------------------------------
@staticmethod
def read_metingen_from_date(deviceId, datum):
sql = "SELECT * from Metingen WHERE deviceId = %s datum = %s"
params = [deviceId, datum]
return Database.get_rows(sql, params)
@staticmethod
def read_metingen_last_by_device(deviceId):
sql = "SELECT deviceId, waarde, commentaar, datum, AVG(waarde) OVER (PARTITION BY CAST(datum as date)) as gemiddelde FROM Metingen WHERE deviceId = %s ORDER BY datum desc;"
params = [deviceId]
return Database.get_one_row(sql, params)
@staticmethod
def read_metingen_from_moment(deviceId, time):
sql = "SELECT * from Metingen WHERE deviceId = %s AND CAST(datum as time) = %s "
params = [deviceId, time]
return Database.get_rows(sql, params)
@staticmethod
def read_metingen_from_period_grouped(datumBegin, datumEind):
#sql = "SELECT * from Metingen water WHERE CAST(datum as time) >= %s AND CAST(datum as time) < %s"
sql = "SELECT datum,"
sql += "max(case when deviceId = 1 then waarde else 0 end) as water,"
sql += "max(case when deviceId = 2 then waarde else 0 end) as licht,"
sql += "max(case when deviceId = 3 then waarde else 0 end) as humid,"
sql += "max(case when deviceId = 4 then waarde else 0 end) as temp"
sql += "FROM Metingen"
sql += "WHERE datum >= %s AND datum <= %s"
sql += "GROUP BY datum"
sql += "ORDER BY datum desc "
params = [datumBegin, datumEind]
return Database.get_rows(sql, params)
@staticmethod
def read_metingen_by_period_and_device(datumBegin, datumEind, deviceId):
#sql = "SELECT * from Metingen water WHERE CAST(datum as time) >= %s AND CAST(datum as time) < %s"
sql = "SELECT datum, waarde, commentaar, deviceId "
sql += "FROM Metingen "
sql += "WHERE datum >= CAST(%s as datetime) AND datum <= CAST(%s as datetime) AND deviceId = %s "
sql += "ORDER BY datum desc, deviceId "
params = [datumBegin, datumEind, deviceId]
return Database.get_rows(sql, params)
@staticmethod
def read_metingen_by_period(datumBegin, datumEind):
#sql = "SELECT * from Metingen water WHERE CAST(datum as time) >= %s AND CAST(datum as time) < %s"
sql = "SELECT datum, waarde, commentaar, deviceId "
sql += "FROM Metingen "
sql += "WHERE datum >= CAST(%s as datetime) AND datum <= CAST(%s as datetime) "
sql += "ORDER BY datum desc, deviceId "
params = [datumBegin, datumEind]
return Database.get_rows(sql, params)
@staticmethod
def read_metingen_by_date_and_device(date, deviceId):
#sql = "SELECT * from Metingen water WHERE CAST(datum as time) >= %s AND CAST(datum as time) < %s"
sql = "SELECT datum, waarde, commentaar, deviceId "
sql += "FROM Metingen "
sql += "WHERE DATE_FORMAT(datum, '%Y-%m-%d') = %s AND deviceId = %s "
sql += "ORDER BY datum desc, deviceId "
params = [date, deviceId]
return Database.get_rows(sql, params)
@staticmethod
def read_dates_in_period(datumBegin, datumEind):
sql = "SELECT distinct(DATE_FORMAT(datum, '%Y-%m-%d')) as datum_distinct "
sql += "FROM Metingen "
sql += "WHERE datum >= CAST(%s as datetime) AND datum <= CAST(%s as datetime) "
sql += "ORDER BY datum_distinct desc "
params = [datumBegin, datumEind]
return Database.get_rows(sql, params)
@staticmethod
def create_meting(deviceId, waarde, commentaar, datum):
sql = "INSERT INTO Metingen(deviceId, waarde, commentaar, datum) VALUES(%s,%s,%s,%s) "
params = [deviceId, waarde, commentaar, datum]
return Database.execute_sql(sql, params)
# SETTINGS
# ---------------------------------------------------------------------------------------------------------------------------------------------------
@staticmethod
def read_settings_by_deviceId(deviceId):
sql = "SELECT * from Settings WHERE deviceId = %s"
params = [deviceId]
return Database.get_rows(sql, params)
@staticmethod
def create_setting(deviceId, waarde, typeSetting):
sql = "INSERT INTO Settings(deviceId, waarde, type) waardeS(%s,%s,%s)"
params = [deviceId, waarde, typeSetting]
return Database.execute_sql(sql, params)
@staticmethod
def update_settings_by_deviceId(deviceId, settingType, waarde):
sql = "UPDATE Settings SET waarde = %s WHERE type = %s and deviceId = %s "
params = [waarde, settingType, deviceId]
return Database.execute_sql(sql, params)
@staticmethod
def read_setting_max_by_deviceId(deviceId):
sql = "SELECT deviceId, waarde, type from Settings WHERE type in ('top','max') and deviceId = %s"
params = [deviceId]
return Database.get_one_row(sql, params)
@staticmethod
def read_setting_min_by_deviceId(deviceId):
sql = "SELECT deviceId, waarde, type from Settings WHERE type in ('bottom','min') and deviceId = %s"
params = [deviceId]
return Database.get_one_row(sql, params)
| [
"noreply@github.com"
] | noreply@github.com |
df6d24540ef2bcdeecc36123121033886d8cc2d1 | fe6586bd9a9eff9bfb207342112acb523ddb107d | /cv/tf-notes-peking/sgdm.py | 1a4de67a4eadf6e9fa8054abf93a2a913d56009b | [] | no_license | KDD2018/Machine-Learning | 3c7b85a85ac713b6a686c9e0bc8eb1d8b4a3035b | ec05beaa985b8713629397dfeb5cbf799559a15a | refs/heads/master | 2022-09-22T04:48:44.214983 | 2022-09-05T07:21:33 | 2022-09-05T07:21:33 | 155,834,633 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,056 | py | # 利用鸢尾花数据集,实现前向传播、反向传播,可视化loss曲线
# 导入所需模块
import tensorflow as tf
from sklearn import datasets
from matplotlib import pyplot as plt
import numpy as np
import time ##1##
# 导入数据,分别为输入特征和标签
x_data = datasets.load_iris().data
y_data = datasets.load_iris().target
# 随机打乱数据(因为原始数据是顺序的,顺序不打乱会影响准确率)
# seed: 随机数种子,是一个整数,当设置之后,每次生成的随机数都一样(为方便教学,以保每位同学结果一致)
np.random.seed(116) # 使用相同的seed,保证输入特征和标签一一对应
np.random.shuffle(x_data)
np.random.seed(116)
np.random.shuffle(y_data)
tf.random.set_seed(116)
# 将打乱后的数据集分割为训练集和测试集,训练集为前120行,测试集为后30行
x_train = x_data[:-30]
y_train = y_data[:-30]
x_test = x_data[-30:]
y_test = y_data[-30:]
# 转换x的数据类型,否则后面矩阵相乘时会因数据类型不一致报错
x_train = tf.cast(x_train, tf.float32)
x_test = tf.cast(x_test, tf.float32)
# from_tensor_slices函数使输入特征和标签值一一对应。(把数据集分批次,每个批次batch组数据)
train_db = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(32)
test_db = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)
# 生成神经网络的参数,4个输入特征故,输入层为4个输入节点;因为3分类,故输出层为3个神经元
# 用tf.Variable()标记参数可训练
# 使用seed使每次生成的随机数相同(方便教学,使大家结果都一致,在现实使用时不写seed)
w1 = tf.Variable(tf.random.truncated_normal([4, 3], stddev=0.1, seed=1))
b1 = tf.Variable(tf.random.truncated_normal([3], stddev=0.1, seed=1))
lr = 0.1 # 学习率为0.1
train_loss_results = [] # 将每轮的loss记录在此列表中,为后续画loss曲线提供数据
test_acc = [] # 将每轮的acc记录在此列表中,为后续画acc曲线提供数据
epoch = 500 # 循环500轮
loss_all = 0 # 每轮分4个step,loss_all记录四个step生成的4个loss的和
##########################################################################
m_w, m_b = 0, 0
beta = 0.9
##########################################################################
# 训练部分
now_time = time.time() ##2##
for epoch in range(epoch): # 数据集级别的循环,每个epoch循环一次数据集
for step, (x_train, y_train) in enumerate(train_db): # batch级别的循环 ,每个step循环一个batch
with tf.GradientTape() as tape: # with结构记录梯度信息
y = tf.matmul(x_train, w1) + b1 # 神经网络乘加运算
y = tf.nn.softmax(y) # 使输出y符合概率分布(此操作后与独热码同量级,可相减求loss)
y_ = tf.one_hot(y_train, depth=3) # 将标签值转换为独热码格式,方便计算loss和accuracy
loss = tf.reduce_mean(tf.square(y_ - y)) # 采用均方误差损失函数mse = mean(sum(y-out)^2)
loss_all += loss.numpy() # 将每个step计算出的loss累加,为后续求loss平均值提供数据,这样计算的loss更准确
# 计算loss对各个参数的梯度
grads = tape.gradient(loss, [w1, b1])
##########################################################################
# sgd-momentun
m_w = beta * m_w + (1 - beta) * grads[0]
m_b = beta * m_b + (1 - beta) * grads[1]
w1.assign_sub(lr * m_w)
b1.assign_sub(lr * m_b)
##########################################################################
# 每个epoch,打印loss信息
print("Epoch {}, loss: {}".format(epoch, loss_all / 4))
train_loss_results.append(loss_all / 4) # 将4个step的loss求平均记录在此变量中
loss_all = 0 # loss_all归零,为记录下一个epoch的loss做准备
# 测试部分
# total_correct为预测对的样本个数, total_number为测试的总样本数,将这两个变量都初始化为0
total_correct, total_number = 0, 0
for x_test, y_test in test_db:
# 使用更新后的参数进行预测
y = tf.matmul(x_test, w1) + b1
y = tf.nn.softmax(y)
pred = tf.argmax(y, axis=1) # 返回y中最大值的索引,即预测的分类
# 将pred转换为y_test的数据类型
pred = tf.cast(pred, dtype=y_test.dtype)
# 若分类正确,则correct=1,否则为0,将bool型的结果转换为int型
correct = tf.cast(tf.equal(pred, y_test), dtype=tf.int32)
# 将每个batch的correct数加起来
correct = tf.reduce_sum(correct)
# 将所有batch中的correct数加起来
total_correct += int(correct)
# total_number为测试的总样本数,也就是x_test的行数,shape[0]返回变量的行数
total_number += x_test.shape[0]
# 总的准确率等于total_correct/total_number
acc = total_correct / total_number
test_acc.append(acc)
print("Test_acc:", acc)
print("--------------------------")
total_time = time.time() - now_time ##3##
print("total_time", total_time) ##4##
# 绘制 loss 曲线
plt.title('Loss Function Curve') # 图片标题
plt.xlabel('Epoch') # x轴变量名称
plt.ylabel('Loss') # y轴变量名称
plt.plot(train_loss_results, label="$Loss$") # 逐点画出trian_loss_results值并连线,连线图标是Loss
plt.legend() # 画出曲线图标
plt.show() # 画出图像
# 绘制 Accuracy 曲线
plt.title('Acc Curve') # 图片标题
plt.xlabel('Epoch') # x轴变量名称
plt.ylabel('Acc') # y轴变量名称
plt.plot(test_acc, label="$Accuracy$") # 逐点画出test_acc值并连线,连线图标是Accuracy
plt.legend()
plt.show()
# 请将loss曲线、ACC曲线、total_time记录到 class2\优化器对比.docx 对比各优化器收敛情况
| [
"935796564@qq.com"
] | 935796564@qq.com |
b6f099da37e81b5cb270c08fae24794902c4db00 | a56d9d534e21d8ea6d790056b56cb130471c4318 | /common/evaluation/evaluators/__init__.py | 9f88ec914fbbb99293faac16c9fcbecfe4a29a50 | [] | no_license | lilujunai/imp_reg_dl_not_norms | 20f07d0cc09437545fa20eb888b60e6a7e34a6b3 | 058ff3df04d8986c142dc791154ddb3f9d5a5277 | refs/heads/master | 2022-07-17T16:19:40.848893 | 2020-05-14T06:12:50 | 2020-05-14T06:12:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | from .evaluator import *
from .supervised_evaluator import *
| [
"noamrazin@gmail.com"
] | noamrazin@gmail.com |
d340edfc182f6a8ba9e32a1b0eef3de9b5f7140d | 24d456d7c43d132ea152f254b45fd8b66fa18d5c | /ros/src/waypoint_updater/waypoint_updater.py | 28179644abb175216cd7e200f43c4602f96092e5 | [
"MIT"
] | permissive | gaurav2205/CarND-Self_Drive_Car | 3c061528dcc1d8c31e4ff60666ab9a20d9f01802 | d6b72577ada3965af4cf386e70d8748f3ff37fbb | refs/heads/master | 2022-11-28T08:22:59.876599 | 2019-06-11T12:38:57 | 2019-06-11T12:38:57 | 191,258,955 | 0 | 0 | MIT | 2022-11-22T00:23:23 | 2019-06-10T23:22:08 | Makefile | UTF-8 | Python | false | false | 5,551 | py | #!/usr/bin/env python
# 2018
# Gaurav Garg, Amr Rizk, Roland Schirmer, Nobuyuki Tomatsu
import math
import numpy as np
import rospy
from geometry_msgs.msg import PoseStamped
from std_msgs.msg import Header, Int32
from styx_msgs.msg import Lane, Waypoint
from scipy.spatial import KDTree
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
'''
LOOKAHEAD_WPS = 150 # Number of waypoints to publish. Set down to 150, was 200
MAX_DECC = .5
class WaypointUpdater(object): # define class
def __init__(self):
rospy.init_node('waypoint_updater') # initialize rospy node
# Subscribers
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb) # current position
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb) # all waypoints, once
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb) # traffic light waypoint
# rospy.Subscriber('/obstacle_waypoint', Int32, self.obstacle_cb) # obstacles, not used
# Publishers
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=10) # 3 enough?
# Declare private __fields
self.__base_waypoints = None # all waypoints
self.__base_waypoints_x_y = None # 2D, only x, y
self.__current_pose = None # current position
self.__waypoints_tree = None # waypoints sorted by distance by KDTree
self.__stopline_wp_idx = -1 # Index of stopline waypoint
self.loop()
def loop(self): # cycle node at 50Hz
rate = rospy.Rate(50)
while not rospy.is_shutdown(): # while running
if self.__current_pose and self.__waypoints_tree: # don't calculate if empty'
idx = self.get_nearest_waypoint_id(self.__current_pose)
self.update_waypoints(idx)
rate.sleep()
def pose_cb(self, pose): # current position
self.__current_pose = pose.pose
def waypoints_cb(self, lane):
if not self.__waypoints_tree:
self.__base_waypoints = lane.waypoints
self.__base_waypoints_x_y = [[w.pose.pose.position.x, w.pose.pose.position.y] for w in self.__base_waypoints]
self.__waypoints_tree = KDTree(self.__base_waypoints_x_y)
def get_nearest_waypoint_id(self, pose): # return index of nearest waypoint
idx = self.__waypoints_tree.query([pose.position.x, pose.position.y])[1] # calculate with KDTree
closest_point = self.__base_waypoints_x_y[idx] # nearest point
previous_point = self.__base_waypoints_x_y[idx - 1]
closest_vector = np.array(closest_point) # vector to point
previous_vector = np.array(previous_point)
current_pos_vector = np.array([self.__current_pose.position.x, self.__current_pose.position.y]) # vector to car position
val = np.dot(closest_vector - previous_vector, current_pos_vector - closest_vector) # Skalarprodukt / dot product
if val > 0: # point lies behind car
return (idx + 1) % len(self.__base_waypoints_x_y)
return idx # point in front of car
def update_waypoints(self, idx):
# Creating header and setting timestamp
header = Header()
header.stamp = rospy.Time.now()
msg = Lane()
msg.header = header
next_waypoints = self.__base_waypoints[idx: idx + LOOKAHEAD_WPS] # next waypoints to publish
msg.waypoints = next_waypoints
if self.__stopline_wp_idx != -1 and self.__stopline_wp_idx < (idx + LOOKAHEAD_WPS): # if stopline closer than LOOKAHEAD_WPS then decelerate
msg.waypoints = self.__decelerate(next_waypoints, idx)
self.final_waypoints_pub.publish(msg) # publish waypoints
def __decelerate(self, waypoints, idx):
temp = []
for i, wp in enumerate(waypoints): # add numbers
p = Waypoint()
p.pose = wp.pose
stop_idx = max(self.__stopline_wp_idx - idx - 2, 0) # stop before stop line
dist = self.distance(waypoints, i, stop_idx) # calculate distance to decrease velocity proportional to distance
#vel = min(dist, wp.twist.twist.linear.x) # velocity <= distance, <= current velocity
vel=math.sqrt(2*MAX_DECC*dist)
if vel < 1.:
vel = 0.
p.twist.twist.linear.x=min(vel,wp.twist.twist.linear.x)
temp.append(p) # add current value to temp
return temp
def traffic_cb(self, msg):
self.__stopline_wp_idx = msg.data
# def obstacle_cb(self, msg): # Callback for /obstacle_waypoint message. not used
# pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| [
"noreply@github.com"
] | noreply@github.com |
576f5a05cda59a1aefc767be46108829bb8864b8 | 52c474b0cc197ab269bbdd6cc86b367efdc21875 | /day_03/py/radon.py | 5a91637f69b5f6dee70963e20b3b3757b954579d | [] | no_license | alexisperrier/XEmines-probabilistic-programming | e4ed0ca045650c63f9a06c1f1cfc993f0bc41bd3 | ecb5f66deb0658ac7d26baa32c7d86daa0cff97a | refs/heads/master | 2020-04-29T05:48:10.872542 | 2019-05-16T09:36:34 | 2019-05-16T09:36:34 | 175,894,588 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,393 | py | '''
https://nbviewer.jupyter.org/github/fonnesbeck/multilevel_modeling/blob/master/multilevel_modeling.ipynb
'''
import sys
# scientific packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import optimize
import pymc3 as pm
import patsy as pt
if __name__ == "__main__":
df = pd.read_csv('../../data/radon.csv')
df = df[['log_radon', 'floor']]
with pm.Model() as model:
pm.glm.GLM.from_formula('log_radon ~ floor', df)
trace = pm.sample(10000, pm.NUTS())
pm.traceplot(trace)
pm.plot_posterior(trace)
pm.summary(trace)
# ------------------------
df = pd.read_csv('../../data/radon.csv')
from statsmodels.formula.api import ols
unpooled_fit = ols('log_radon ~ county + floor - 1', df).fit()
unpooled_estimates = unpooled_fit.params
# ---------------------------
counties = df.county.values
all_traces = pd.DataFrame()
for county in counties[0:10]:
with pm.Model() as partial_pooling:
# Priors
mu_a = pm.Normal('mu_a', mu=0., sd=1000)
sigma_a = pm.Uniform('sigma_a', lower=0, upper=100)
# Random intercepts
radon_level_county = pm.Normal('radon_level_county', mu=mu_a, sd=sigma_a, observed=df[df.county == county].log_radon)
# sigma_y = pm.Uniform('sigma_y', lower=0, upper=100)
# y_like = pm.Normal('y_like', mu=radon_level_county, sd=sigma_y, observed=df[df.county == county].log_radon)
trace = pm.sample(1000, pm.NUTS() )
trace_df = pm.summary(trace)
trace_df['county'] = county
all_traces = pd.concat([all_traces, trace_df])
print('--' * 20)
print(county)
print(trace_df)
# ------------------------------------
# partial pooling
# ------------------------------------
with pm.Model() as varying_intercept:
mu_a = pm.Normal('mu_a', mu=0., tau=0.0001)
sigma_a = pm.Uniform('sigma_a', lower=0, upper=100)
# Random intercepts and common slope
a = Normal('a', mu=mu_a, tau=tau_a, shape=len(set(counties)))
b = Normal('b', mu=0., tau=0.0001)
# Model error
sigma_y = Uniform('sigma_y', lower=0, upper=100)
# Expected value
y_hat = a + b * list(df.floor)
# Data likelihood
y_like = Normal('y_like', mu=y_hat, tau=tau_y, observed=log_radon)
# --------------------------
county_names = df.county.unique()
county_idx = df['county_code'].values
with pm.Model() as hierarchical_model:
# Hyperpriors
mu_a = pm.Normal('mu_alpha', mu=0., sd=1)
sigma_a = pm.HalfCauchy('sigma_alpha', beta=1)
mu_b = pm.Normal('mu_beta', mu=0., sd=1)
sigma_b = pm.HalfCauchy('sigma_beta', beta=1)
# Intercept for each county, distributed around group mean mu_a
a = pm.Normal('alpha', mu=mu_a, sd=sigma_a, shape=len(df.county.unique()))
# Intercept for each county, distributed around group mean mu_a
b = pm.Normal('beta', mu=mu_b, sd=sigma_b, shape=len(df.county.unique()))
# Model error
eps = pm.HalfCauchy('eps', beta=1)
# Expected value
# radon_est = a[county_idx] + b[county_idx] * df.floor.values
radon_est = pm.Deterministic( 'radon_est', a[county_idx] + b[county_idx] * df.floor.values)
# Data likelihood
y_like = pm.Normal('y_like', mu=radon_est, sd=eps, observed=df.log_radon)
trace = pm.sample(10000)
pm.summary(trace)
| [
"alexis.perrier@gmail.com"
] | alexis.perrier@gmail.com |
13959ba15ca422dde18259d7e1484859eb10f41f | 7ae9b49a92c6c49054be8d3b2c87494a4442092c | /python/xx.py | 19b8db61536370537e2b568f467945735a23a27c | [] | no_license | yanyingbing/dev | 425d66ec41d9946a4aec894f17ef706acae3225a | e20914081ab66b86536683f8aad0980d87563702 | refs/heads/master | 2021-01-25T04:02:05.851971 | 2014-08-29T07:58:12 | 2014-08-29T07:58:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | #!/usr/bin/python
x=98.0
if x!=99.000:
print x
| [
"yanyingbing@sinap.ac.cn"
] | yanyingbing@sinap.ac.cn |
44080a16806da4ce8e27acc10b8da2ee1fbfb2eb | b59ee2bef3517d60fe39205fbab61e0d04c40218 | /modules/command_handler.py | c49885e7c856009745db4f77c82d99e591942121 | [
"Apache-2.0"
] | permissive | Hiruma31/ADA | 7f722c0f975e44ea097df19603f2ecc6f36d5900 | 98d23041f1061999cd57a4156bd11eb0baed5582 | refs/heads/master | 2021-01-18T14:02:49.131326 | 2014-05-19T15:53:49 | 2014-05-19T15:53:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | import settings
# RegExp
import re
# PyAIML
import aiml
import commands
def command_handler():
"""
Handle the converted to text order to dispatch to the right module
"""
def regexp_handled(text_order):
# RegExp Handler test
re_test = re.search('^()$',text_order)
if re_test:
print "Test complete."
def aiml_handled():
# PyAIML handler test
KERNEL = aiml.Kernel()
# Load the AIML file
KERNEL.learn("resources/test_AIML.aiml")
# Set the name as a constant
KERNEL.setBotPredicate("name", settings.SYSTEM_NAME)
while True:
input = raw_input("> ")
response = KERNEL.respond(input)
print response # Print the answer
print commands.getoutput("/usr/bin/espeak -v en+f4 -p 99 -s 160 \"" + response + "\"") # Say the answer | [
"romain.grenet@gmail.com"
] | romain.grenet@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.