index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
986,800 | e6c6dd9b1cafdc2609547fcca32299dd66016b8e | import numpy as np
# Hyperbolic cotangent
coth = lambda x: 1 / np.tanh(x)
# Hyperbolic secant
sech = lambda x: 2 / (np.exp(x) - np.exp(-x))
# An inverse hyperbolic cosecant
arccsch = lambda x: np.log(1 / x + np.sqrt(1 + x ** 2) / np.abs(x)) |
986,801 | 0be99a9f33c15932b53d1aa1583342b4a3156b5b | """
Django settings for chuables project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import logging
import environ
from django.utils.translation import gettext_lazy as _
import os
# Project Base Paths
# project_root/chuables/config/settings.py - 3 = project_root/
ROOT_DIR = environ.Path(__file__) - 3
CHUABLES_DIR = ROOT_DIR.path('chuables')
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load OS environment variables and then prepare to use them
env = environ.Env()
DJANGO_ENV = env.str('DJANGO_ENV', default='development')
# Loading .env file from root directory to set environment.
# OS Environment variables have precedence over variables defined
# in the .env file, that is to say variables from the .env files
# will only be used if not defined as environment variables.
env_file = ROOT_DIR('.env')
env.read_env(env_file)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = '^^wvl9k-x!(z8rs-q4b=(xbr1bb4+ylfqltrmq$mg9d#5n3+y$'
SECRET_KEY = env.str('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
USE_X_FORWARDED_HOST = env.bool('DJANGO_USE_X_FORWARDED_HOST', default=True)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_filters',
'django_extensions',
'django.contrib.postgres',
'rest_framework',
'rest_framework.authtoken',
'djoser',
'utils',
'axie',
'users'
]
# Rest Framework Settings
# http://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES':
('rest_framework.permissions.DjangoModelPermissions', ),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_THROTTLE_CLASSES': (
'rest_framework.throttling.AnonRateThrottle',
'rest_framework.throttling.UserRateThrottle',
),
'DEFAULT_THROTTLE_RATES': {
'anon': env.str(
'DJANGO_DEFAULT_THROTTLE_RATE_ANON', default='60/minute'),
'user': env.str(
'DJANGO_DEFAULT_THROTTLE_RATE_USER', default='120/minute'),
},
'DEFAULT_PAGINATION_CLASS':
'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE':
env.int('DJANGO_DEFAULT_PAGE_SIZE', default=25),
'DEFAULT_FILTER_BACKENDS': (
'rest_framework.filters.SearchFilter',
'django_filters.rest_framework.DjangoFilterBackend',
),
'EXCEPTION_HANDLER':
'chuables.exceptions.api_exception_handler',
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
]
ROOT_URLCONF = 'chuables.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [CHUABLES_DIR('templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django_settings_export.settings_export',
],
},
},
]
WSGI_APPLICATION = 'chuables.wsgi.application'
# Custom User Model
# https://docs.djangoproject.com/en/2.0/topics/auth/customizing/#substituting-a-custom-user-model
AUTH_USER_MODEL = 'users.User'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': env.db('DATABASE_URL'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
DATABASES['default']['CONN_MAX_AGE'] = env.int(
'DATABASE_CONN_MAX_AGE', default=0)
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en'
LOCALE_PATHS = [
CHUABLES_DIR('locale'),
CHUABLES_DIR('contrib/rest_framework/locale'),
CHUABLES_DIR('contrib/auth/locale'),
CHUABLES_DIR('contrib/conf/locale'),
]
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
DOMAIN = env.str('DOMAIN', default='localhost:3000')
SITE_NAME = _(env.str('SITE_NAME'))
SITE_LOGO_URL = env.str('SITE_LOGO_URL')
SITE_OWNER_NAME = _(env.str('SITE_OWNER_NAME'))
SITE_OWNER_URL = env.str('SITE_OWNER_URL')
ADMIN_SITE_HEADER = env.str('ADMIN_SITE_HEADER')
API_BROWSER_HEADER = env.str('API_BROWSER_HEADER')
SETTINGS_EXPORT = [
'DOMAIN',
'SITE_NAME',
'ADMIN_SITE_HEADER',
'API_BROWSER_HEADER',
]
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'auth.backends.EmailOrUsernameModelBackend'
]
|
986,802 | efcfdbc8ddfd617e08deaacebdefd2d124056997 | import pickle
import torch
from torch.utils import data
import json
import numpy as np
class LatentMolsDataset(data.Dataset):
def __init__(self, latent_space_mols):
self.data = latent_space_mols
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
|
986,803 | 7a3325b1a99ca75dece172f799a16ac2ee8cf449 | def w_sum(input,weight):
assert(len(input) == len(weight))
output = 0
for i in range(len(input)):
output += input[i]*weight[i]
return output
def vect_mat_mul(input,weight):
assert(len(input) == len(weight))
output = [0,0,0]
for i in range(len(input)):
output[i] = w_sum(input,weight[i])
return output
def neural_network(input,weight):
h_input = vect_mat_mul(input,weight[0])
prediction = vect_mat_mul(h_input,weight[1])
return prediction
def main():
ih_wt = [[0.1,0.2,-0.1],
[-0.1,0.1,0.9],
[0.1,0.4,0.1]]
hp_wt = [[0.3,1.1,-0.3],
[0.1,0.2,0.0],
[0.0,1.3,0.1]]
weight = [ih_wt,hp_wt]
toes = [8.5,9.5,9.9,9.0]
wlrec = [0.65,0.8,0.8,0.9]
nfans = [1.2,1.3,0.5,1.0]
input = [toes[0],wlrec[0],nfans[0]]
pred = neural_network(input,weight)
print(pred)
if __name__ == "__main__":
main()
|
986,804 | 9f802b8e850318de86ad07ea19e4b4ded7fabd1b | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
一个机器人位于一个 m x n 网格的左上角 (起始点在下图中标记为“Start” )。
机器人每次只能向下或者向右移动一步。机器人试图达到网格的右下角(在下图中标记为“Finish”)。
现在考虑网格中有障碍物。那么从左上角到右下角将会有多少条不同的路径?
网格中的障碍物和空位置分别用 1 和 0 来表示。
示例 1:
输入:obstacleGrid = [[0,0,0],[0,1,0],[0,0,0]]
输出:2
解释:
3x3 网格的正中间有一个障碍物。
从左上角到右下角一共有 2 条不同的路径:
1. 向右 -> 向右 -> 向下 -> 向下
2. 向下 -> 向下 -> 向右 -> 向右
示例 2:
输入:obstacleGrid = [[0,1],[0,0]]
输出:1
提示:
m ==obstacleGrid.length
n ==obstacleGrid[i].length
1 <= m, n <= 100
obstacleGrid[i][j] 为 0 或 1
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/unique-paths-ii
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
class Solution:
def uniquePathsWithObstacles(self, obstacleGrid: list[list[int]]) -> int:
m, n = len(obstacleGrid), len(obstacleGrid[0])
dp = [[0] * n for _ in range(m)]
if obstacleGrid[0][0] != 1:
dp[0][0] = 1
for _ in range(1, m):
if dp[_ - 1][0] == 0 or obstacleGrid[_][0] == 1:
dp[_][0] = 0
else:
dp[_][0] = 1
for _ in range(1, n):
if dp[0][_ - 1] == 0 or obstacleGrid[0][_] == 1:
dp[0][_] = 0
else:
dp[0][_] = 1
for i in range(1, m):
for j in range(1, n):
if obstacleGrid[i][j] != 1:
dp[i][j] = dp[i - 1][j] + dp[i][j - 1]
return dp[-1][-1]
|
986,805 | e051c6ef4d65c34d07cea10db95aa17805f3c248 | """deck shuffle logic"""
from typing import TextIO, List
def get_instructions(file_input: TextIO) -> List[str]:
"""get list of instructions
Arguments:
file_input {TextIO} -- text stream
Returns:
List[str] -- list of instructions
"""
instructions = []
for instruction in file_input:
instruction = instruction.strip()
instructions.append(instruction)
return instructions
def shuffle(instructions: List[str], deck: List[int]) -> List[int]:
"""shuffles and returns new deck
Arguments:
instructions {List[str]} -- shuffling instructions
deck {List[int]} -- original deck
Raises:
ValueError: unknown instruction given
Returns:
List[int] -- shuffled deck
"""
deck_length = len(deck)
for instruction in instructions:
if instruction == "deal into new stack":
deck = deck[::-1]
elif instruction.startswith("cut "):
_, cut_value = instruction.split(" ")
cut_value = int(cut_value)
deck = deck[cut_value:] + deck[:cut_value]
elif instruction.startswith("deal with increment "):
increment = instruction.split(" ")[-1]
increment = int(increment)
new_deck = [None] * len(deck)
count = 0
for card in deck:
new_deck[count % deck_length] = card
count += increment
deck = new_deck
else:
raise ValueError("Unknown instruction!")
return deck
def find_iter(instructions: List[str], deck_size: int, shuffle_count: int, index: int) -> int:
"""find what card is at a given index, given large deck size and shuffle counts this needs to be
computed
Arguments:
instructions {List[str]} -- shuffle instructions
deck_size {int} -- deck size
shuffle_count {int} -- how many times the deck will be shuffled
index {int} -- index of the card we want to find
Returns:
int -- value of card at given index
"""
offset = 0
total_increment = 1
for instruction in instructions:
if instruction == "deal into new stack":
total_increment *= -1
offset += total_increment
elif instruction.startswith("cut "):
_, cut_value = instruction.split(" ")
cut_value = int(cut_value)
offset += total_increment * cut_value
elif instruction.startswith("deal with increment "):
increment = instruction.split(" ")[-1]
increment = int(increment)
total_increment *= pow(increment, deck_size-2, deck_size)
total_increment %= deck_size
# apply shuffles
shuffle_increment = pow(total_increment, shuffle_count, deck_size)
inverse = pow((1 - total_increment) % deck_size, deck_size-2, deck_size)
shuffle_offset = offset * (1 - shuffle_increment) * inverse
shuffle_offset %= deck_size
return (shuffle_offset + index * shuffle_increment) % deck_size
|
986,806 | cfa09c5c85d3a28e168f9d1e938830a4badac071 | import csv
import os.path
from os import path
import mongo_api
def insert_msg(email, flight, msg):
print("Checking if 'messages.csv' exists...")
if path.exists('csv\\messages.csv'):
print("Opening 'messages.csv'...")
csv_file = open('csv\\messages.csv', 'a', newline = '')
else:
print("File does not exist...\nCreating 'messages.csv'...")
print("Opening 'messages.csv'...")
csv_file = open('csv\\messages.csv', 'w', newline = '')
csv_writer = csv.writer(csv_file)
csv_writer.writerow([email, flight, msg])
csv_file.close() |
986,807 | 86e077a7780b916eac50cf8bd3412e3c05bd403a | import numpy as np
import submission as S
import helper as H
import cv2
'''
Q3.3:
1. Load point correspondences
2. Obtain the correct M2
3. Save the correct M2, C2, and P to q3_3.npz
'''
def test_M2_solution(pts1, pts2, intrinsics, M):
'''
Estimate all possible M2 and return the correct M2 and 3D points P
:param pred_pts1:
:param pred_pts2:
:param intrinsics:
:param M: a scalar parameter computed as max (imwidth, imheight)
:return: M2, the extrinsics of camera 2
C2, the 3x4 camera matrix
P, 3D points after triangulation (Nx3)
'''
# get fundamental matrix
F = S.eightpoint(pts1,pts2,M)
K1 = intrinsics['K1']
K2 = intrinsics['K2']
# get essential matrix
E = S.essentialMatrix(F,K1,K2)
# create the camera matrix for camera 1
M1 = np.hstack((np.eye(3),np.zeros((3,1))))
C1 = np.matmul(K1,M1)
# get the possible extrinsic matrices for camera 2 using E
M_list = H.camera2(E)
min_err = -1
# test the 4 options for M2, and keep the one that produces the minimum error
for i in range(4):
test_M2 = M_list[:,:,i]
test_C2 = np.matmul(K2,test_M2)
test_P,test_err = S.triangulate(C1, pts1, test_C2, pts2)
# if the minimum z position is positive, that means that this solution
# for M2 gives a 3d point that is in front of both cameras, so we've found
# our solution
if np.min(test_P[:,2]) > 0 and (test_err < min_err or min_err == -1):
M2 = test_M2
C2 = test_C2
P = test_P
min_err = test_err
print("Error before bundle adjustment:",min_err)
return M2, C2, P
if __name__ == '__main__':
im1 = cv2.imread('../data/im1.png')
M = max(im1.shape)
data = np.load('../data/some_corresp.npz')
pts1 = data['pts1']
pts2 = data['pts2']
intrinsics = np.load('../data/intrinsics.npz')
M2, C2, P = test_M2_solution(pts1, pts2, intrinsics, M)
np.savez('q3_3', M2=M2, C2=C2, P=P)
|
986,808 | 63c5cab05eab0ead17b9b585cd72a4cc626e1f5f | from VariantaEvolutiva.utils import CryptoPuzzle
""" Functia citeste datele primite in fisier;
:returns CryptoPuzzle: informatia despre puzzle
stocata sub forma de obiect de tip CryptoPuzzle """
def read(fileName):
file = open(fileName, "r")
# numarul de cuvinte ce se aduna
number = int(file.readline())
# multime ce retine toate literele aparute in
myset = set()
words = []
for line in file.read().split("\n"):
letters = line.split(",")
let = []
for l in letters:
let.append(l)
myset.add(l)
words.append(let)
result = words.pop()
letters = list(myset)
file.close()
return CryptoPuzzle(letters, words, result)
""" Functia scrie datele rezultate in fisier """
def write(fileName, cryptoPuzzle, perm):
file = open(fileName, "w")
# scrie numarul de litere ale puzzle-ului
file.write(str(len(cryptoPuzzle.letters)) + '\n')
# scrie pe cate o linie litera si valoarea ei corespunzatoare
for i in sorted(perm):
if i != '*':
file.write(str(i) + ',')
file.write(str(perm.index(i)) + '\n')
file.close() |
986,809 | ba51e97594567fba1e638bd8920c2469e11a5ff4 | from bs4 import BeautifulSoup
import requests
import math
### Search
def kr_search_encoding(keyword_kr, url):
encoded = str(keyword_kr.encode('utf-8')).replace("\\x", '%').split('%', 1)[1].split("'")[0].upper()
encoded = url + '&query=%' + encoded
return encoded
def page_generator(url, page_num):
url = url + "&page=" + str(page_num)
return url
def dom_generator(url):
response = requests.get(url)
dom = BeautifulSoup(response.text, "html.parser")
return dom
def generate_post_links(dom):
post_links = dom.findAll("a", {"class": "posting_name"})
links = []
for link in post_links:
url = "https://www.jobplanet.co.kr" + link.get('href')
links.append(url)
return links
def total_postings(dom):
num = dom.findAll("span", {"class": "num"})
num_postings = []
for n in num:
num_postings.append(n.text)
return int(num_postings[0])
def file_writing(list1):
url_file = open("urls.txt", "w")
url_file.write('\n'.join(list1))
url_file.close()
def main():
default_url = 'https://www.jobplanet.co.kr/job_postings/search?_rs_act=index&_rs_con=search&_rs_element=see_more_job_postings_bottom'
# blankurl = 'https://www.jobplanet.co.kr/job_postings/search?utf8=%E2%9C%93&jp_show_search_result=true&jp_show_search_result_chk=true&order_by=score'
data_url = kr_search_encoding("데이터", default_url)
data_dom = dom_generator(data_url)
### calculate the number of pages
links = generate_post_links(data_dom)
postings = total_postings(data_dom)
num_pages = math.ceil(postings / len(links))
print("The number of pages: " + str(num_pages))
### generating the url of pages
total_page_links = []
for i in range(num_pages):
total_page_links.append(page_generator(data_url, i + 1))
### total links of postings
total_posting_links = []
for page in total_page_links:
total_posting_links = total_posting_links + generate_post_links(dom_generator(page))
print("The number of postings: " + str(len(total_posting_links)))
### file writing to urls.txt
file_writing(total_posting_links)
if __name__ == "__main__":
main() |
986,810 | 373da71878ff78f4760d1c4300211f78d44fcc70 | from fastapi import FastAPI, APIRouter, Query, HTTPException
from typing import Optional, Any
from app.schemas import RecipeSearchResults, Recipe, RecipeCreate
from app.recipe_data import RECIPES
app = FastAPI(title="Recipe API", openapi_url="/openapi.json")
api_router = APIRouter()
@api_router.get("/", status_code=200)
def root() -> dict:
"""
Root GET
"""
return {"msg": "Hello, World!"}
# Updated with error handling
# https://fastapi.tiangolo.com/tutorial/handling-errors/
@api_router.get("/recipe/{recipe_id}", status_code=200, response_model=Recipe)
def fetch_recipe(*, recipe_id: int) -> Any:
"""
Fetch a single recipe by ID
"""
result = [recipe for recipe in RECIPES if recipe["id"] == recipe_id]
if not result:
# the exception is raised, not returned - you will get a validation
# error otherwise.
raise HTTPException(
status_code=404, detail=f"Recipe with ID {recipe_id} not found"
)
return result[0]
@api_router.get("/search/", status_code=200, response_model=RecipeSearchResults)
def search_recipes(
*,
keyword: Optional[str] = Query(None, min_length=3, example="chicken"),
max_results: Optional[int] = 10,
) -> dict:
"""
Search for recipes based on label keyword
"""
if not keyword:
# we use Python list slicing to limit results
# based on the max_results query parameter
return {"results": RECIPES[:max_results]}
results = filter(lambda recipe: keyword.lower() in recipe["label"].lower(), RECIPES)
return {"results": list(results)[:max_results]}
@api_router.post("/recipe/", status_code=201, response_model=Recipe)
def create_recipe(*, recipe_in: RecipeCreate) -> dict:
"""
Create a new recipe (in memory only)
"""
new_entry_id = len(RECIPES) + 1
recipe_entry = Recipe(
id=new_entry_id,
label=recipe_in.label,
source=recipe_in.source,
url=recipe_in.url,
)
RECIPES.append(recipe_entry.dict())
return recipe_entry
app.include_router(api_router)
if __name__ == "__main__":
# Use this for debugging purposes only
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8001, log_level="debug")
|
986,811 | 63aa2809bcfeffeef8ea5fb3a6cdad8f5dd5f1b4 | # Generated by Django 3.0.3 on 2020-03-27 08:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('search', '0003_auto_20200317_2218'),
]
operations = [
migrations.AddField(
model_name='userchart',
name='subtitle',
field=models.CharField(blank=True, default='', max_length=254),
),
]
|
986,812 | bd2e1f07d3fcd6f0e4f3907ea116f0d78b09aaa0 | # Implementation of linked list from scratch
class Node(object):
def __init__(self, data = None, next_node = None):
self.data = data
self.next_node = next_node
# returns the stored data
def get_data(self):
return self.data
# returns the next node (the node to which the object node points),
def get_next(self):
return self.next_node
# reset the pointer to a new node
def set_next(self, new_next):
self.next_node = new_next
class LinkedList(object):
def __init__(self, head = None):
self.head = head
# Insert: inserts a new node into the list
def insert(self, data):
# Insert at the start of the list
new_node = Node(data)
new_node.set_next(self.head)
self.head = new_node
# Size: returns size of list
def size(self):
current = self.head
count = 0
while current:
count += 1
current = current.get_next()
return count
# Search: searches list for a node containing the requested data and returns that node if found, otherwise raises an error
def search(self, data):
found = False
current = self.head
while current and found is False:
if current.get_data() == data:
found = True
return current
else:
current = current.get_next()
if current is None:
raise ValueError("Data not in list")
# Delete: searches list for a node containing the requested data and removes it from list if found, otherwise raises an error
def delete(self, data):
current = self.head
previous = None
found = False
while current and found is False:
if current.get_data() == data:
found = True
else:
previous = current
current = current.get_next()
if current is None:
raise ValueError('Data no in linked list.')
if previous is None:
self.head = current.get_next()
else:
previous.set_next(current.get_next())
|
986,813 | cb39ca5c375cd1066ddd7b080f71b6634c330e47 | from binascii import hexlify
from typing import Dict
import base58
from common.serializers.serialization import serialize_msg_for_signing
from libnacl import randombytes
from plenum.common.types import f
from plenum.common.util import hexToFriendly
from stp_core.crypto.nacl_wrappers import SigningKey, Signer as NaclSigner
from stp_core.crypto.signer import Signer
class SimpleSigner(Signer):
"""
A simple implementation of Signer.
This signer creates a public key and a private key using the seed value
provided in the constructor. It internally uses the NaclSigner to generate
the signature and keys.
"""
# TODO: Do we need both alias and identifier?
def __init__(self, identifier=None, seed=None, alias=None):
"""
Initialize the signer with an identifier and a seed.
:param identifier: some identifier that directly or indirectly
references this client
:param seed: the seed used to generate a signing key.
"""
# should be stored securely/privately
self.seed = seed if seed else randombytes(32)
# generates key pair based on seed
self.sk = SigningKey(seed=self.seed)
# helper for signing
self.naclSigner = NaclSigner(self.sk)
# this is the public key used to verify signatures (securely shared
# before-hand with recipient)
hex_verkey = hexlify(self.naclSigner.verraw)
self.verkey = hexToFriendly(hex_verkey)
self._identifier = identifier or self.verkey
self._alias = alias
@property
def alias(self) -> str:
return self._alias
@property
def identifier(self) -> str:
return self._identifier
@property
def seedHex(self) -> bytes:
return hexlify(self.seed)
def sign(self, msg: Dict) -> Dict:
"""
Return a signature for the given message.
"""
ser = serialize_msg_for_signing(msg, topLevelKeysToIgnore=[f.SIG.nm,
f.SIGS.nm])
bsig = self.naclSigner.signature(ser)
sig = base58.b58encode(bsig).decode("utf-8")
return sig
|
986,814 | d1bc60e83dc9fc93a1b33a79875a4b9bb1551549 | import sence
import os
from datetime import datetime, date, time
from utils import HtmlManage as html
now = datetime.now()
now = str(now.strftime("%d-%b-%Y-%I.%M%p"))
codigoSence = str(input("Ingresar Codigo Sence (10 Digitos): "))
sen = sence.SENCE()
Source = sen.codCurso(codigoSence)
registry = codigoSence +"-" + now
htmlPath = html.writeHTML(Source,registry,"html")
os.startfile(htmlPath) |
986,815 | 86a296e5dc54c81e7ff50c6e8a3e0c72a7614f27 | import pytest
from dataclasses import FrozenInstanceError
from bank_ddd_es_cqrs.accounts import AmountDTO, ClientDetailsDTO
def test_amount_dto_immutable():
amount = AmountDTO(
dollars=23,
cents=12
)
with pytest.raises(FrozenInstanceError):
amount.cents = 23
def test_client_details_dto_immutable():
details = ClientDetailsDTO(
social_security_number=143123654,
first_name="gfdg",
last_name="erwr",
birthdate="27/02/1998",
)
with pytest.raises(FrozenInstanceError):
details.first_name = 'fdsfdsf'
|
986,816 | 162e360a1647d6994f33211187e949b8bb9ea334 | import functools
def limitable(generator_func):
'''A decorator that lets you limit the number of results yielded by a generator'''
def wrapper(*args, **kwargs):
limit = kwargs.pop('limit', None)
gen = generator_func(*args, **kwargs)
return gen if limit is None else (gen.next() for i in xrange(limit))
return functools.update_wrapper(wrapper, generator_func)
|
986,817 | 34b6b671e86abc961951efe91c7b65bf395ad89c | from tkinter import *
from tkinter import messagebox
from tkinter import scrolledtext
from cx_Oracle import *
import socket
import requests
import matplotlib.pyplot as plt
import numpy as np
import bs4
import pandas as pd
res = requests.get("https://www.brainyquote.com/quotes_of_the_day.html")
soup = bs4.BeautifulSoup(res.text, 'lxml')
quote = soup.find('img', {"class":"p-qotd"})
text = 'Quote of the day is : ' + quote['alt']
try:
socket.create_connection( ("www.google.com", 80))
city = "Mumbai"
a1 = "https://api.openweathermap.org/data/2.5/weather?units=metric"
a2 = "&q=" + city
a3 = "&appid=c6e315d09197cec231495138183954bd"
api_address = a1 + a2 + a3
res1 = requests.get(api_address)
data = res1.json()
main = data['main']
#print(main)
temp = main['temp']
#print("temp=", temp)
temp1 = str(city) + " " + str(temp) + " " + str(main)
except OSError as e:
print("check network", e)
def f1():
root.withdraw()
adst.deiconify()
def f2():
adst.withdraw()
root.deiconify()
def f4():
st.delete(1.0,END)
root.deiconify()
vist.withdraw()
def f3():
vist.deiconify()
root.withdraw()
import cx_Oracle
con=None
cursor=None
try:
con=cx_Oracle.connect("system/abc123")
print("u r connected ")
cursor=con.cursor()
sql="select * from student_2020"
cursor.execute(sql)
data=cursor.fetchall()
mdata=""
for d in data:
mdata=mdata+" rno = "+str(d[0])+"\t"+ " name = "+d[1]+"\t"+ " marks = "+str(d[2])+"\n"
st.insert(INSERT, mdata)
except cx_Oracle.DatabaseError as e:
print("Wrong Data ",e)
finally:
if cursor is not None:
cursor.close()
if con is not None:
con.close()
print("U r disconnected ")
def f5():
import cx_Oracle
con = None
cursor=None
try:
con = cx_Oracle.connect("system/abc123")
cursor = con.cursor()
sql = "insert into student_2020 values('%d', '%s', '%d')"
rno = entAddRno.get()
if rno.isdigit() and int(rno)>0:
rno=int(rno)
else:
messagebox.showerror("Error","Enter a valid rno")
entAddRno.delete(0,END)
entAddRno.focus()
return
name = entAddName.get()
if name.isalpha() and len(name)>1:
name=name
else:
messagebox.showerror("Error","Enter valid name(min 2 letters)")
entAddName.delete(0,END)
entAddname.focus()
return
marks = entAddMarks.get()
if marks.isdigit() and int(marks)>0 and int(marks)<101:
marks=int(marks)
else:
messagebox.showerror("Error","Enter +ve marks(0-100)")
entAddMarks.delete(0,END)
entAddMarks.focus()
return
args = (rno, name, marks)
cursor.execute(sql % args)
msg=str(cursor.rowcount)+" records inserted "
messagebox.showinfo("Successful", msg)
entAddRno.delete(0, END)
entAddName.delete(0, END)
entAddMarks.delete(0, END)
entAddRno.focus()
entAddName.focus()
entAddMarks.focus()
con.commit()
except ValueError:
messagebox.showerror('Some Error there ')
entAddRno.delete(0,END)
entAddRno.focus()
entAddName.delete(0,END)
entAddName.focus()
entAddMarks.delete(0,END)
entAddMarks.focus()
except cx_Oracle.DatabaseError as e:
con.rollback()
messagebox.showerror("Failure ",e)
entAddRno.delete(0,END)
entAddRno.focus()
entAddName.delete(0,END)
entAddName.focus()
entAddMarks.delete(0,END)
entAddmarks.focus()
finally:
if cursor is not None:
cursor.close()
if con is not None:
con.close()
def f6():
root.withdraw()
udst.deiconify()
def f7():
udst.withdraw()
root.deiconify()
def f8():
import cx_Oracle
con=None
cursor=None
try:
con=cx_Oracle.connect("system/abc123")
print("u r connected ")
cursor=con.cursor()
sql="update student_2020 set name='%s',marks=%d where rno=%d"
rno=entUpdateRno.get()
if rno.isdigit() and int(rno)>0:
rno=int(rno)
else:
messagebox.showerror("Error","Enter a valid id")
entUpdateRno.delete(0,END)
entUpdateRno.focus()
return
name=entUpdateName.get()
if name.isalpha() and len(name)>1:
name=name
else:
messagebox.showerror("Error","Enter valid name (min 2 letters)")
entUpdateName.delete(0,END)
entUpdateName.focus()
return
marks=entUpdateMarks.get()
if marks.isdigit() and int(marks)>0 and int(marks)<101:
marks=int(marks)
else:
messagebox.showerror("Error","Enter positive marks(0-100)")
entUpdateMarks.delete(0,END)
entUpdateMarks.focus()
return
args=(name,marks,rno)
cursor.execute(sql%args)
con.commit()
msg=str(cursor.rowcount)+" records updated "
messagebox.showinfo("Success", msg)
entUpdateRno.delete(0,END)
entUpdateRno.focus()
entUpdateName.delete(0,END)
entUpdateName.focus()
entUpdateMarks.delete(0,END)
entUpdateMarks.focus()
except cx_Oracle.DatabaseError as e:
con.rollback()
messagebox.showerror("Failure ",e)
entUpdateRno.delete(0,END)
entUpdateRno.focus()
entUpdateName.delete(0,END)
entUpdateName.focus()
entUpdateMarks.delete(0,END)
entUpdateMarks.focus()
finally:
if cursor is not None:
cursor.close()
if con is not None:
con.close()
print("U r disconnected ")
def f9():
root.withdraw()
dldst.deiconify()
def f10():
dldst.withdraw()
root.deiconify()
def f11():
import cx_Oracle
con = None
cursor=None
try:
con = cx_Oracle.connect("system/abc123")
cursor = con.cursor()
sql = "delete from student_2020 where rno=%s"
rno = entDeleteRno.get()
if rno.isdigit() and int(rno)>0:
args=(rno)
cursor.execute(sql%args)
con.commit()
else:
messagebox.showerror("Error","Enter a valid rno")
entDeleteRno.delete(0,END)
entDeleteRno.focus()
return
msg=str(cursor.rowcount)+" records deleted "
messagebox.showinfo("Successful", msg)
entDeleteRno.delete(0,END)
entDeleteRno.focus()
except cx_Oracle.DatabaseError as e:
con.rollback()
messagebox.showerror("Failure ",e)
entDeleteRno.delete(0,END)
entDeleteRno.focus()
finally:
if cursor is not None:
cursor.close()
if con is not None:
con.close()
def f12():
import cx_Oracle
con = None
cursor = None
try:
con = cx_Oracle.connect("system/abc123")
cursor = con.cursor()
cursor.execute("select count(*) from student_2020 ")
for row in cursor:
totalstudents=row[0]
t=np.arange(totalstudents)
cursor.execute("SELECT * FROM(SELECT rno, name, marks FROM student_2020 ORDER BY marks DESC) WHERE ROWNUM <=3 ")
#cursor.execute(sql)
#con.commit()
rno=[]
name=[]
marks=[]
for row in cursor:
rno.append(row[0])
name.append(row[1])
marks.append(row[2])
bar_width=0.4
t = np.arange(len(name))
plt.bar(t,marks,bar_width,label = "Marks", color = 'g', alpha=0.8)
plt.xticks(t,name,fontsize = 10)
plt.xlabel("Name")
plt.ylabel("Marks")
plt.title("Top 3 Student's Marks")
plt.legend()
plt.grid()
xs=[x for x in range(0,totalstudents)]
for x,y in zip(xs,marks):
plt.annotate(marks[x],(x-bar_width/2,y))
plt.show()
except cx_Oracle.DatabaseError as e:
con.rollback()
messagebox.showerror("Failure ",e)
root = Tk()
root.title("Student Management System ")
root.geometry("500x400+200+200")
lbltitle=Label(root,text="--- Student Management System ---",font=("arial", 18, 'bold'))
btnAdd = Button(root, text="Add", font=("arial", 18, 'bold'),width=10, command=f1)
btnView = Button(root, text="View", font=("arial", 18, 'bold'),width=10, command=f3)
btnUpdate = Button(root, text="Update", font=("arial", 18, 'bold'),width=10, command=f6)
btnDelete = Button(root, text="Delete", font=("arial", 18, 'bold'),width=10, command=f9)
lblQuote = Label(root,text = text, font=("arial", 18, 'bold'))
lblTemperature = Label(root,text = temp1, font=("arial", 18, 'bold'))
btnGraph = Button(root, text="Graph", font=("arial", 18, 'bold'),width=20, command=f12)
btnAdd.pack(pady=10)
btnView.pack(pady=10)
btnUpdate.pack(pady=10)
btnDelete.pack(pady=10)
lblQuote.pack(pady=10)
lblTemperature.pack(pady=10)
btnGraph.pack(pady=10)
adst = Toplevel(root)
adst.title("Add Student.")
adst.geometry("500x400+200+200")
adst.withdraw()
lblAddRno = Label(adst, text="enter rno", font=("arial",18,'bold') )
entAddRno = Entry(adst, bd=10, font=("arial", 18, 'bold'))
lblAddName = Label(adst, text="enter name", font=("arial",18,'bold') )
entAddName = Entry(adst, bd=10, font=("arial", 18, 'bold'))
lblAddMarks = Label(adst, text="enter marks", font=("arial",18,'bold') )
entAddMarks = Entry(adst, bd=10, font=("arial", 18, 'bold'))
btnAddSave = Button(adst, text="Save" , font=("arial", 18,'bold'), command=f5)
btnAddBack = Button(adst, text="Back" , font=("arial", 18, 'bold'), command=f2)
lblAddRno.pack(pady=10)
entAddRno.pack(pady=10)
lblAddName.pack(pady=10)
entAddName.pack(pady=10)
lblAddMarks.pack(pady=10)
entAddMarks.pack(pady=10)
btnAddSave.pack(pady=10)
btnAddBack.pack(pady=10)
vist = Toplevel(root)
vist.title("View Student.")
vist.geometry("500x400+200+200")
vist.withdraw()
st = scrolledtext.ScrolledText(vist, width=50, height=10,font=("arial", 14, 'italic'))
btnViewBack = Button(vist, text="Back",font=("arial", 18, 'bold') , command=f4)
st.pack(pady=10)
btnViewBack.pack(pady=10)
udst = Toplevel(root)
udst.title("Update Student.")
udst.geometry("500x400+200+200")
udst.withdraw()
lblUpdateRno = Label(udst, text="enter rno", font=("arial",18,'bold') )
entUpdateRno = Entry(udst, bd=10, font=("arial", 18, 'bold'))
lblUpdateName = Label(udst, text="enter name", font=("arial",18,'bold') )
entUpdateName = Entry(udst, bd=10, font=("arial", 18, 'bold'))
lblUpdateMarks = Label(udst, text="enter marks", font=("arial",18,'bold') )
entUpdateMarks = Entry(udst, bd=10, font=("arial", 18, 'bold'))
btnUpdateSave = Button(udst, text="Save" , font=("arial", 18,'bold'), command=f8)
btnUpdateBack = Button(udst, text="Back" , font=("arial", 18, 'bold'), command=f7)
lblUpdateRno.pack(pady=10)
entUpdateRno.pack(pady=10)
lblUpdateName.pack(pady=10)
entUpdateName.pack(pady=10)
lblUpdateMarks.pack(pady=10)
entUpdateMarks.pack(pady=10)
btnUpdateSave.pack(pady=10)
btnUpdateBack.pack(pady=10)
dldst = Toplevel(root)
dldst.title("Delete Student.")
dldst.geometry("500x400+200+200")
dldst.withdraw()
lblDeleteRno = Label(dldst, text="enter rno", font=("arial",18,'bold') )
entDeleteRno = Entry(dldst, bd=10, font=("arial", 18, 'bold'))
btnDeleteSave = Button(dldst, text="Save" , font=("arial", 18,'bold'), command=f11)
btnDeleteBack = Button(dldst, text="Back" , font=("arial", 18, 'bold'), command=f10)
lblDeleteRno.pack(pady=10)
entDeleteRno.pack(pady=10)
btnDeleteSave.pack(pady=10)
btnDeleteBack.pack(pady=10)
graphst = Toplevel(root)
graphst.title("Graph of top 3 students")
graphst.geometry("500x400+200+200")
graphst.withdraw()
root.mainloop()
|
986,818 | 0ff18a291473e4bf298fcfe41cb8753278680555 | # -*- coding: utf-8 -*-
# Project Euler Problem 1
# 1000までの数のうち3または5で割り切れる数の総和を求めよ
import time
time1 = time.time()
i = 0
s = 0
for number in range(1, 1000):
i += 1
if i % 3 == 0:
s += i
elif i % 5 == 0:
s += i
else:
continue
print("answer is {}".format(s))
print("{} seconds".format(time.time() - time1))
del(i, s, number, time1)
input()
|
986,819 | 2bc15b9fdeebfa8e7275575a1ecd754a5c038f0e | import sys
#sys.argv[0] is the .py file
print sys.argv[1:]
files = [open(f) for f in sys.argv[1:]]
output = open('majority_vote7.txt', 'w')
for i in range(1500):
AlessB=0
for f in files:
if f.readline().strip() == 'A':
AlessB += 1
else:
AlessB -= 1
if AlessB >= 0:
output.write('A\n')
else:
output.write('B\n')
|
986,820 | f5a3692d46b977be72261be7b2d116f3921d6259 | # -*- coding: utf-8 -*-
"""This module contains information on entry types.
Contains a dict with all valid entry types and their required and optional
fields, a set of possible fields and a list of valid entry types.
"""
entry_types = {"article":
{"required": {"author", "title", "journal", "year", "volume"},
"optional": {"number", "pages", "month", "note", "key"}},
"book":
{"required": {"author/editor", "title", "publisher", "year"},
"optional": {"volume/number", "series", "address", "edition",
"month", "note", "key", "url"}},
"booklet":
{"required": {"title"},
"optional": {"author", "howpublished", "address", "month",
"year", "note", "key"}},
"conference":
{"required": {"author", "title", "booktitle", "year"},
"optional": {"editor", "volume/number", "series", "pages",
"address", "month", "organization", "publisher",
"note", "key"}},
"inbook":
{"required": {"author/editor", "title", "chapter/pages",
"publisher", "year"},
"optional": {"volume/number", "series", "type", "address",
"edition", "month", "note", "key"}},
"incollection":
{"required": {"author", "title", "booktitle", "publisher",
"year"},
"optional": {"editor", "volume/number", "series", "type",
"chapter", "pages", "address", "edition", "month",
"note", "key"}},
"inproceedings":
{"required": {"author", "title", "booktitle", "year"},
"optional": {"editor", "volume/number", "series", "pages",
"address", "month", "organization", "publisher",
"note", "key"}},
"manual":
{"required": {"title"},
"optional": {"author", "organization", "address", "edition",
"month", "year", "note", "key"}},
"mastersthesis":
{"required": {"author", "title", "school", "year"},
"optional": {"type", "address", "month", "note", "key"}},
"misc":
{"required": {},
"optional": {"author", "title", "howpublished", "month",
"year", "note", "key"}},
"phdthesis":
{"required": {"author", "title", "school", "year"},
"optional": {"type", "address", "month", "note", "key"}},
"proceedings":
{"required": {"title", "year"},
"optional": {"editor", "volume/number", "series", "address",
"month", "publisher", "organization", "note",
"key"}},
"techreport":
{"required": {"author", "title", "institution", "year"},
"optional": {"type", "number", "address", "month", "note",
"key"}},
"unpublished":
{"required": {"author", "title", "note"},
"optional": {"month", "year", "key"}}}
fields = {"address", "annote", "author", "booktitle", "chapter", "crossref",
"edition", "editor", "howpublished", "institution", "journal", "key",
"month", "note", "number", "organization", "pages", "publisher",
"school", "series", "title", "type", "volume", "year"}
types = list(entry_types.keys())
|
986,821 | 629e0815fc615475f284dbc5214e22d30b0f354c | '''
django装饰器
@login_required
@login_required(login_url='/accounts/login/')
def my_view(request): 每次访问my_view 时,都会进入login_required
'''
'''
group_required('adnubs', 'seller')
'''
def group_required(*group_names):
"""Requires user membership in at least one of the groups passed in."""
def in_groups(u):
if u.is_authenticated():
if bool(u.groups.filter(name__in=group_names)) | u.is_superuser:
return True
return False
return user_passes_test(in_groups)
@group_required('admins', 'seller')
def my_view(request, pk):
pass
# django 登录访问限制@login_required 在 setting设置
# 自定义装饰器
def my_login(func):
def check_login(request):
# 此步骤是检查登录状态
if request.session.get('user_id'):
# 如果当前有用户登录,正常跳转
return func(request)
else:
# 如果没有用户登录,跳转到登录页面
return redirect('/login')
return check_login
# 如果在index页面,必须让他在login验证
@my_login
def index(request):
'''主页'''
return render(request, 'index.html', locals()) |
986,822 | 1888bdfe1535f9f0d33390451c4039ce442aeead | from django.db import models
# Create your models here.
class User(models.Model):
user_name = models.CharField(max_length=10, verbose_name="用户名")
user_age = models.CharField(max_length=3, verbose_name="年龄")
user_position = models.CharField(max_length=10, verbose_name="职位")
def __str__(self):
return self.user_name
class Project(models.Model):
id = models.AutoField(primary_key=True)
project_name = models.CharField(max_length=30, verbose_name="项目名称")
project_version = models.CharField(max_length=10, verbose_name="项目版本")
create_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
create_name = models.ForeignKey("User", on_delete=models.CASCADE)
def __str__(self):
return self.project_name
|
986,823 | 98eb329aa9ac1724e4c3adae377de5f2ef3f8c5d | # coding: utf-8
import json
class DataManager:
dataSetString = "[]"
dataSet = []
fichier = ""
def __init__(self, fichier):
with open(fichier, 'r') as f:
self.dataSetString = f
self.dataSet = json.load(f)
print "Donnees lues avec succes" |
986,824 | 3e5072ad3b19c9f52689d72a948a8f53e9e3f022 | #!/usr/bin/env pypy
import sys
import random
N = 10334465434588059156093965538297516550622260041682062823432902469783188597914276568552700194849877929894375950252570477080418352732597658745665925604704669227133726477243854317836635130694123893711638533001980496229875665476598568821806170303765540489814402234159901540440432134155844542962445153646330595588291605924429211352279943471372817279938720974895260387784578239150931816946786416232516666251965421919651838044618050991294403546958930745419743836966520198735201123255884089263272829846640538826979843642885775791641575109178753509580001660392092396798648924375401024147883702298145910046889402880394195369984000000000000000000000000000000000000000000000000000000000000000000000000000000000
sN = 3214726338988757399964463840205273148284735043324463579894976679845078166928105412104944973948893914339037572694382785661727648297539107767478128297633669341356440278480314502443731079340424764653103468238563073341496690901434197268615240607985890327844073738551115260849983966971570699838147501655616953786428037017304945538845583678438817092853062
x = []
p = N
for i in range(2, 332):
while p % i == 0:
p //= i
x.append(i)
m = sN - sN//(10**8)
M = sN + sN//(10**8)
while True:
random.shuffle(x)
p = 1
for y in x:
p *= y
if p > M:
break
if p >= m:
print(p)
sys.exit(0)
|
986,825 | 8ae0852f2175dbb1f8f55c25ddcfac11c4588af8 | __author__ = 'Brian McMahon'
# connect to BreweryDB to make get and post requests
from secrets import brewerydb_api
import requests
BASE_URL = "http://api.brewerydb.com/v2/"
API_KEY = brewerydb_api
BASE_PARAM = {'key': API_KEY}
""" get request on BreweryDB - example
test = {'key': api, 'q': 'Mill Street'}
r = requests.get(uri + "search?", params=test)
print(r.json())"""
class BreweryDB(object):
# returns a brewery based on the name
def get_brewery_by_name(self, name):
# sets up parameters for request
# param = {'key': API_KEY, 'q': name}
BASE_PARAM['q'] = name
return requests.get(BASE_URL + "search?", params=BASE_PARAM).json()
# returns a beer based on the name
def get_beer_by_name(self, name):
param = {'key': API_KEY, 'name': name}
return requests.get(BASE_URL + "beers", params=param).json()
"""
!!!!!!!!!!!!!!need tp find a way to format the json response to extract the data I need. SEE BOTTOM TEST!!!!!!!!!!!!
r = requests.get(BASE_URL + "beers?", params=param).json()
for key, value in r.iteritems():
#if key == name:
print str(key) + " = " + str(value)
!!!!!!!!!!!!!!WANT TO QUERY THE JSON RESPONSE AND RETURN THE VALUES I NEED TO DISPLAY TO THE USER!!!!!!!!!!!!!!
"""
# returns all beer styles
def get_beer_styles(self):
#param = {'key': API_KEY}
return requests.get(BASE_URL + "styles", params=BASE_PARAM).json()
# returns all ingredients
def get_ingredients(self):
return requests.get(BASE_URL + "ingredients", params=BASE_PARAM).json()
# return ingredients based on ingredientId
def get_ingredient_by_id(self, id):
r = requests.get(BASE_URL + "ingredient/" + str(id), params=BASE_PARAM)
#return requests.get(BASE_URL + "ingredient/" + str(id), params=BASE_PARAM).json()
# todo revise - this is how you get the individual aspects from json response
return r.json()['data']['category']
# TODO set up required methods for each query
"""
search_brewery(search_term, type):
values = {'key': api, 'q': search_term, 'type': type}
r = requests.get(uri + "search?", params=values)
return r.json
"""
run_test = BreweryDB()
import json
#print(run_test.get_brewery_by_name("mill street"))
#print json.dumps(run_test.get_beer_by_name("Naughty 90"), sort_keys=True, indent=4)
#run_test.get_beer_by_name("Naughty 90")
#print(run_test.get_beer_styles())
#print json.dumps(run_test.get_ingredients(), sort_keys=True, indent=4)
#print json.dumps(run_test.get_ingredient_by_id(1), sort_keys=True, indent=4)
print(run_test.get_ingredient_by_id(1))
|
986,826 | 1fd5342d424d14402a8d6e449486096416019706 | from datetime import datetime as dtdt
from datetime import timedelta
import inspect
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
import requests
import yfinance as yf
pd.options.display.precision = 2
pd.options.display.float_format = '{:,.2f}'.format
#Tip: en la fila de arriba, puedo poner un $ o % adelante/atrás de las {} y no deja de ser un float, pero te lo muestra como $ o %
class GetData:
apikeyAV = 'UEE3SBFTYJR21T8E'
apikeyTD = 'HPHFW7WEKPGMB1IF9VQMHTFQKSKJO0AI'
apikeyAM = 'PKRRTJJ0ZEPCZPGHGXE4'
secretkeyAM = '5aecoQkbv8o82Dn54wW9oqIXIORHr7kR5hxOd7m1'
intra = ['1min', '5min', '15min', '30min', '60min']
open_hour = '09:30:00'
close_hour = '16:00:00'
def __init__(self, ticker, interval='daily', start=None, end=None, auto_adjust=True, size='full', extended_hours=False):
"""
Input:
ticker (str): Ticker del activo a analizar
interval (str): Compresión temporal.
accepted input: '1min', '5min', '15min', '30min', '60min', 'daily', 'weekly', 'month', 'year', 'ytd'
start (str): Fecha de inicio para recolectar datos. Default: None
formato de input: 'yyyy-mm-dd'
end (str): Ultima fecha sobre la cual recolectar datos. Default: None. En gral las API te traen hasta ult día de trading
formato de input: 'yyyy-mm-dd'
auto_adjust (bool): Traer valores ajustados por dividendos y splits o no. Default: True
size (str): Para API de alphavantage. No cuenta con fechas de start y end. Este parametro determina si traer hist completa o light
accepted input: 'full', 'compact'
extended_hours (bool): Traer valores de pre y after market o no. Default: False
"""
self.ticker = ticker.upper()
self.interval = interval
self.start = start
self.end = end
self.auto_adjust = auto_adjust
self.size = size
self.extended_hours = extended_hours
def SP500Tickers():
sp500 = pd.read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')[0]
sp500_tickers = list(sp500.Symbol)
sp500_tickers = [e for e in sp500_tickers if e not in ('BRK.B','BF.B')]
with open('sp500tickers.pickle', 'wb') as f:
pickle.dump(sp500_tickers, f)
return sp500_tickers
"""
Para levantar el pickle de los tickers del sp500:
with open('sp500tickers.pickle', 'rb') as f:
tickers = pickle.load(f)
"""
def yfinance(self, start = None, end = None, interval = None, auto_adjust=None):
"""
Input: start, end, interval & auto_adjust son parametros cargados al inicializar el objeto. Si
deseo cambiarlos, paso valor deseado, y queda como el valor del atributo del objeto.
Output: Dataframe con cols: 'open, high, low, close, volume, vol_mln_usd, pct_change, log_change',
desde la fecha start hasta la fecha end, end timeframe dado por interval, ajustados o no.
"""
self.source = 'yfinance'
self.start = start if start else self.start
self.end = end if end else self.end
self.interval = interval if interval else self.interval
if self.end:
try:
end = dtdt.strptime(self.end, '%Y-%m-%d')
except:
print('Error: Invalid format for end date. Try yyyy-mm-dd')
else:
end = dtdt.now()
if self.start:
try:
start = dtdt.strptime(self.start, '%Y-%m-%d')
except:
print('Error: Invalid format for start date. Try YYYY-mm-dd')
else:
start = None
if self.interval in self.intra:
interval = self.interval[:-2] # yfinance me pide 'm', no 'min', entonces le borro los ult dos caracteres ('in')
if interval == '1m':
date_range_limit = 7
elif interval == '60m':
date_range_limit = 730
else:
date_range_limit = 60
if (not self.start) or ((end - start).days >= date_range_limit):
print('''Error: intraday data available for a range within the last 730 days (60 min) /
60 days (30, 15 and 5 min) / 7 days (1 min).''')
elif self.interval == 'daily':
interval = '1d'
elif self.interval == 'weekly':
interval = '1wk'
try:
data = yf.download(self.ticker, interval=interval, start=start,
end=end, auto_adjust=self.auto_adjust, progress=False)
if not self.auto_adjust:
data.drop('Adj Close', axis=1, inplace=True)#Si no pido adjclose,yahoo me lo trae separado del close, pero si no lo pedí lo borro
data['vol_mln'] = data.Volume * data.Close / 10**6
data['pct_change'] = data.Close.pct_change() * 100
data['log_change'] = np.log(data.Close / data.Close.shift()) * 100
data.columns = ['open', 'high', 'low', 'close', 'volume', 'vol_mln_usd', 'pct_change', 'log_change']
except Exception as e:
print('Ocurrió el siguiente error: ', e)
data = pd.DataFrame()
self.data = data
self.hodl = ((data.close[-1] / data.close[0]) - 1) * 100
return data
def alphavantage(self, interval = None, size = None, extended_hours = None, auto_adjust = None):
"""
Input: Interval, size, extended_hours & auto_adjust son parametros cargados al inicializar el objeto. Si
deseo cambiarlos, paso valor deseado, y queda como el valor del atributo del objeto.
Output: Output: Dataframe con cols: 'open, high, low, close, volume, vol_mln_usd, pct_change, log_change',
desde la fecha start hasta la fecha end, end timeframe dado por interval, ajustados o no, con extended_hours o no.
"""
self.source = 'alphavantage'
self.interval = interval if interval else self.interval
self.size = size if size else self.size
self.extended_hours = extended_hours if extended_hours else self.extended_hours
self.auto_adjust = auto_adjust if auto_adjust else self.auto_adjust
url = 'https://www.alphavantage.co/query'
#AV me pide un str 'true' o 'false', no un bool. Transformo el bool en str
adjusted = str(self.auto_adjust).lower()
if self.interval in self.intra:
function = 'TIME_SERIES_INTRADAY'
parametros = {'function': function, 'symbol': self.ticker, 'interval': self.interval,
'adjusted': adjusted, 'outputsize': self.size, 'apikey': self.apikeyAV}
elif (self.interval == 'daily') or (self.interval == 'weekly'):
if self.auto_adjust:
function = f'TIME_SERIES_{self.interval.upper()}_ADJUSTED'
else:
function = f'TIME_SERIES_{self.interval.upper()}'
parametros = {'function': function, 'symbol': self.ticker, 'outputsize': self.size, 'apikey': self.apikeyAV}
else:
print("Invalid interval. Try: '1min', '5min', '15min', '30min', '60min', 'daily', 'weekly'.")
try:
r = requests.get(url, params = parametros)
if self.interval in self.intra:
data = r.json()[f"Time Series ({self.interval})"]
elif self.interval == 'daily':
data = r.json()["Time Series (Daily)"]
elif self.interval == 'weekly':
data = r.json()["Weekly Adjusted Time Series"]
data = pd.DataFrame.from_dict(data, orient = 'index')
indice = 'Datetime' if self.interval in self.intra else 'Date'
data.index.name = indice
for col in data.columns:
data[col] = pd.to_numeric(data[col], errors='coerce')
flotante = data[col] % 1 != 0
if flotante.any():
data[col] = data[col].astype('float')
else:
data[col] = data[col].astype('int')
if self.interval not in self.intra:
if self.auto_adjust:
if self.interval == 'daily':
data.drop(['7. dividend amount', '8. split coefficient'], axis=1, inplace=True)
elif self.interval == 'weekly':
data.drop(['7. dividend amount'], axis=1, inplace=True)
data['factor'] = data['5. adjusted close'] / data['4. close']
cols = [data['1. open'] * data.factor, data['2. high'] * data.factor, data['3. low'] * data.factor,
data['5. adjusted close'], data['6. volume']]
data = pd.concat(cols, axis=1)
data.index = pd.to_datetime(data.index)
data = data.sort_values(indice, ascending=True)
data.columns = ['open', 'high', 'low', 'close','volume']
data['vol_mln_usd'] = (data.close * data.volume) / 10**6
data['pct_change'] = data.close.pct_change() * 100
data['log_change'] = np.log(data.close / data.close.shift()) * 100
if (self.interval in self.intra) and not self.extended_hours:
data = data.between_time(self.open_hour, self.close_hour) #AV tiene data desde 04:00 hasta 20:00. Acá filtro a horario de mercado
except Exception as e:
print('Ocurrió el siguiente error: ', e)
data = pd.DataFrame()
self.data = data
self.hodl = ((data.close[-1] / data.close[0]) - 1) * 100
return data
def TDAmeritrade(self, start = None, end = None, interval = None, extended_hours = None,
periodType=None, period=None, frequencyType=None):
"""
Input:
periodType (str): Tipo de periodo a mostrar. En caso de tener valores de start y endDate, queda sin uso. Default: 'day'
accepted values: 'day', 'month', 'year', 'ytd'
period (str): Cantidad de periodos a mostrar. En caso de tener valores de start y endDate, queda sin uso.
accepted values: Depende del periodType. day(1,2,3,4,5,10*); month(1*,2,3,6), year(1*,2,3,5,10,15,20); ytd(1*)
frequencyType (str): Compresion de las velas.
accepted values: Depende del periodType. day(minute); month(daily, weekly); year(daily, weekly, monthly); ytd(daily, weekly)
frequency: Not a parameter itself. Its the amount of *frequencyType* in each candle. Default: 1. Solo en caso de periodType=day
y frequencyType=minute hay otras opciones (1,5,10,15,30)
"""
self.source = 'TDAmeritrade'
self.start = start if start else self.start
self.end = end if end else self.end
self.interval = interval if interval else self.interval
self.extended_hours = extended_hours if extended_hours else self.extended_hours
url = f'https://api.tdameritrade.com/v1/marketdata/{self.ticker}/pricehistory'
extended_hours = str(self.extended_hours).lower()
if self.interval in self.intra:
frequency = self.interval[:-3]
if self.interval == '60min':
frequency = '30' #TDA no tiene compresion horaria. P q no tire error, le paso 30min y despues resampleo a 1h
periodType = 'day'
frequencyType = 'minute'
else:
frequency = 1 #Cuando no intra, la freq solo puede ser 1 (1día, 1sem, 1mes)
if not periodType:
periodType = 'year' #Cuando no intra, por default le mando year.
if self.interval == 'month':
frequencyType = 'monthly'
else:
frequencyType = self.interval
if self.start:
try:
start = int(dtdt.strptime(self.start, '%Y-%m-%d').timestamp() * 1000) #Pongo int() pq sino queda en float. Y el epoch es un entero
except:
print('Invalid input for start date. Try again with format "yyyy-mm-dd".')
try:
end = int(dtdt.strptime(self.end, '%Y-%m-%d').timestamp() * 1000)
except:
end = int(dtdt.now().timestamp() * 1000) #Si tengo startDate, por default asumo q necesito un endDate. Si no paso parametro, ayer
parametros = {'apikey': self.apikeyTD, 'periodType':periodType, 'frequencyType':frequencyType,
'frequency':frequency, 'endDate':end, 'startDate':start, 'needExtendedHoursData':extended_hours}
else:
parametros = {'apikey': self.apikeyTD, 'periodType':periodType, 'period':period,
'frequency':frequency, 'frequencyType':frequencyType, 'needExtendedHoursData':extended_hours}
try:
r = requests.get(url, params = parametros)
data = r.json()['candles']
data = pd.DataFrame(data)
data['fecha'] = pd.to_datetime(data['datetime'] - 3600*1000*3, unit='ms')
if self.interval in self.intra:
data.index = data['fecha']
data.index.name = 'Datetime'
else:
data.index = data['fecha']
data.index = data.index.date
data.index.name = 'Date'
data.drop(['datetime','fecha'], axis=1,inplace=True)
data = data.loc[data.index[0]+timedelta(days=1):]
if self.interval == '60min':
data = data.resample('1H').first() #Consejo: No usar esta compresion.
data['vol_mln_usd'] = (data.close * data.volume) / 10**6
data['pct_change'] = data.close.pct_change() * 100
data['log_change'] = np.log(data.close / data.close.shift()) * 100
except Exception as e:
print('Ocurrió el siguiente error: ', e)
data = pd.DataFrame()
self.data = data
self.hodl = ((data.close[-1] / data.close[0]) - 1) * 100
return data
def fundamentals(self):
self.source = 'TDAmeritrade'
url = 'https://api.tdameritrade.com/v1/instruments'
parametros = {'apikey': self.apikeyTD, 'symbol': self.ticker, 'projection':'fundamental'}
r = requests.get(url = url, params = parametros)
self.fundamentals = r.json()[self.ticker]['fundamental']
return self.fundamentals
def options(self):
url = 'https://api.tdameritrade.com/v1/marketdata/chains'
parametros = {'apikey': self.apikeyTD, 'symbol': self.ticker}
r = requests.get(url=url, params=parametros).json()
v_calls = list(r['callExpDateMap'].values())
v_calls_fechas = list(r['callExpDateMap'].keys())
v_puts = list(r['putExpDateMap'].values())
v_puts_fechas = list(r['putExpDateMap'].keys())
calls = []
for i in range(len(v_calls)):
v = list(v_calls[i].values())
for j in range(len(v)):
calls.append(v[j][0])
puts = []
for i in range(len(v_puts)):
v = list(v_puts[i].values())
for j in range(len(v)):
puts.append(v[j][0])
contracts = pd.concat([pd.DataFrame(calls),pd.DataFrame(puts)])
tabla = contracts.loc[contracts.daysToExpiration > 0]
tabla = tabla.loc[:,['strikePrice', 'daysToExpiration', 'putCall', 'bid', 'ask',
'last', 'volatility', 'openInterest', 'theoreticalOptionValue']]
tabla.columns = ['Strike', 'Dias', 'Tipo', 'Bid', 'Ask', 'Ultimo', 'VI', 'OpenInt', 'PrimaT']
self.options = tabla
return tabla
def alpaca(self):
self.source = 'alpaca'
url = f'https://data.alpaca.markets/v2/stocks/{self.ticker}/bars'
if self.interval in self.intra:
if self.interval == '60min':
interval = '1Hour'
else:
interval = '1Min'#Es la unica compresion en minutos que me acepta. Desp resampleo a 5,15,30 si es necesario
elif self.interval == 'daily':
interval = '1Day'
if self.start:
start = self.start +'T00:00:00-03:00'
if self.end:
end = self.end +'T00:00:00-03:00'
else:
end = dtdt.today().strftime('%Y-%m-%d') +'T00:00:00-03:00'
else:
print('Error. Start y end no encontrados o formato inválido. Try again.')
parametros = {'start':start, 'end':end, 'limit':10000, 'timeframe':interval}
headers = {'APCA-API-KEY-ID':self.apikeyAM, 'APCA-API-SECRET-KEY':self.secretkeyAM}
r = requests.get(url = url, headers = headers, params = parametros)
js = r.json()
data = pd.DataFrame(js['bars'])
data.t = pd.to_datetime(data.t).apply(lambda x: dtdt(x.year,x.month,x.day,x.hour,x.minute,x.second))
data.set_index('t', inplace=True)
data.index.name = 'Datetime'
if self.interval not in self.intra:
data.index = data.index.date
data.index.name = 'Date'
if (self.interval in self.intra) and (self.interval != '60min'): #Consejo: No usar 5, 15 y 30 min. Hice un cuasimodo.
#Lo correcto sería resamplear volumen con .sum(). Es más lío que el beneficio que reporta. Dejo esto como parche básico.
data = data.resample(self.interval.replace('m','M')).last()
data.fillna(method='ffill', inplace=True) #Acciones ilíquidas -Argy- dejan huecos de op con NaN. Que asuma valor d ult quote
data.columns = ['open', 'high', 'low', 'close', 'volume']
data['vol_mln_usd'] = (data.close * data.volume) / 10**6
data['pct_change'] = data.close.pct_change() * 100
data['log_change'] = np.log(data.close / data.close.shift()) * 100
self.data = data
return data |
986,827 | 83537af63350727f3e1da7e68ade34532cd57890 | def test_login(app):
app.session.ensure_login("administrator", "root")
assert app.session.is_logged_in_as("administrator")
|
986,828 | 122e3cdfd1be56eff2f915c465496f5d8e857e89 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
def make_2d_histogram(xx, yy, dx, dy):
"""
returns indices and counts of unique points on the map
"""
i_color1 = np.round(xx/dx).astype(int)
i_color2 = np.round(yy/dy).astype(int)
dex_reverse = np.array([i_color1, i_color2])
dex_arr = dex_reverse.transpose()
# see http://stackoverflow.com/questions/16970982/find-unique-rows-in-numpy-array
dex_raw = np.ascontiguousarray(dex_arr).view(np.dtype((np.void, dex_arr.dtype.itemsize*dex_arr.shape[1])))
_, unique_rows, unique_counts = np.unique(dex_raw, return_index=True, return_counts=True)
return unique_rows, unique_counts
def plot_color(xx, yy, dx, dy, s=5):
dexes, cts = make_2d_histogram(xx, yy, dx, dy)
sorted_dex = np.argsort(cts)
dexes = dexes[sorted_dex]
cts = cts[sorted_dex]
plt.scatter(xx[dexes], yy[dexes], c=cts, s=s,
cmap=plt.cm.gist_ncar, edgecolor='')
plt.colorbar()
kep_dtype = np.dtype([('kepid', int),
('tm_designation', str, 200),
('teff', float),
('teff_err1', float),
('teff_err2', float),
('logg', float),
('logg_err1', float),
('logg_err2', float),
('feh', float),
('feh_err1', float),
('feh_err2', float),
('mass', float),
('mass_err1', float),
('mass_err2', float),
('st_radius', float),
('radius_err1', float),
('radius_err2', float),
('dens', float),
('dens_err1', float),
('dens_err2', float),
('prov_sec', str, 200),
('kepmag', float),
('dist', float),
('dist_err1', float),
('dist_err2', float),
('nconfp', int),
('nkoi', int),
('ntce', int),
('datalink_dvr', str, 200),
('st_delivname', str, 200),
('st_vet_date_str', str, 200),
('degree_ra', float),
('degree_dec', float),
('st_quarters', int),
('teff_prov', str, 200),
('logg_prov', str, 200),
('feh_prov', str, 200),
('jmag', float),
('jmag_err', float),
('hmag', float),
('hmag_err', float),
('kmag', float),
('kmag_err', float),
('dutycycle', float),
('dataspan', float),
('mesthres01p5', float),
('mesthres02p0', float),
('mesthres02p5', float),
('mesthres03p0', float),
('mesthres03p5', float),
('mesthres04p5', float),
('mesthres05p0', float),
('mesthres06p0', float),
('mesthres07p5', float),
('mesthres09p0', float),
('mesthres10p5', float),
('mesthres12p0', float),
('mesthres12p5', float),
('mesthres15p0', float),
('rrmscdpp01p5', float),
('rrmscdpp02p0', float),
('rrmscdpp02p5', float),
('rrmscdpp03p0', float),
('rrmscdpp03p5', float),
('rrmscdpp04p5', float),
('rrmscdpp05p0', float),
('rrmscdpp06p0', float),
('rrmscdpp07p5', float),
('rrmscdpp09p0', float),
('rrmscdpp10p5', float),
('rrmscdpp12p0', float),
('rrmscdpp12p5', float),
('rrmscdpp15p0', float),
('av', float),
('av_err1', float),
('av_err2', float)])
catalog_name = 'data/kepler_stellar17.csv'
if not os.path.exists(catalog_name):
raise RuntimeError('Need to download Kepler catalog data using\n'
'wget -nH --cut-dirs=3 https://archive.stsci.edu/pub/kepler/catalogs/kepler_stellar17.csv.gz\n'
'and place it in the data/ directory')
kep_data = np.genfromtxt(catalog_name, dtype=kep_dtype,
delimiter='|', skip_header=1)
valid_teff = np.where(np.logical_and(np.logical_not(np.isnan(kep_data['teff'])),
np.logical_and(np.logical_not(np.isnan(kep_data['teff_err1'])),
np.logical_and(np.logical_not(np.isnan(kep_data['teff_err2'])),
np.logical_and(np.logical_not(np.isnan(kep_data['logg'])),
np.logical_and(np.logical_not(np.isnan(kep_data['logg_err1'])),
np.logical_and(np.logical_not(np.isnan(kep_data['logg_err2'])),
np.logical_and(np.logical_not(np.isnan(kep_data['kepmag'])),
np.logical_and(np.logical_not(np.isnan(kep_data['feh'])),
np.logical_and(np.logical_not(np.isnan(kep_data['feh_err1'])),
np.logical_and(np.logical_not(np.isnan(kep_data['feh_err2'])),
np.logical_and(np.logical_not(np.isnan(kep_data['dist'])),
np.logical_and(np.logical_not(np.isnan(kep_data['dist_err1'])),
np.logical_and(np.logical_not(np.isnan(kep_data['dist_err2'])),
kep_data['dist']>1.0e-3))))))))))))))
kep_data = kep_data[valid_teff]
kep_abs_mag = kep_data['kepmag']-5.0*np.log10(kep_data['dist']/10.0)
dtype = np.dtype([('dex', int), ('kep_teff', float), ('catsim_teff', float),
('kep_logg', float), ('catsim_logg', float),
('kep_feh', float), ('catsim_feh', float),
('kep_mag', float), ('catsim_mag', float)])
for kk in (1, 10):
data_file = 'test_star_fits_k%d.txt' % kk
data = np.genfromtxt(data_file, dtype=dtype)
print 'read in data'
feh_min = min(data['catsim_feh'].min(), data['kep_feh'].min())
feh_max = max(data['catsim_feh'].max(), data['kep_feh'].max())
t_min = min(data['catsim_teff'].min(), data['kep_teff'].min())
t_max = max(data['catsim_teff'].max(), data['kep_teff'].max())
mag_min = min(data['catsim_mag'].min(), data['kep_mag'].min())
mag_max = max(data['catsim_mag'].max(), data['kep_mag'].max())
logg_min = min(data['catsim_logg'].min(), data['kep_logg'].min())
logg_max = max(data['catsim_logg'].max(), data['kep_logg'].max())
print 't ',t_min,t_max
print 'feh ',feh_min,feh_max
print 'mag ',mag_min,mag_max
plt.figsize = (30, 30)
plt.subplot(2,2,1)
plot_color(data['catsim_teff'], data['kep_teff'], 100.0, 100.0)
t_ticks = np.arange(np.round(t_min/1000.0)*1000.0, np.round(t_max/1000.0)*1000.0,
2000.0)
t_labels = ['%d' % xx for xx in t_ticks]
plt.xlabel('CatSim Teff', fontsize=10)
plt.xticks(t_ticks, t_labels, fontsize=10)
plt.ylabel('Kepler Teff', fontsize=10)
plt.yticks(t_ticks, t_labels, fontsize=10)
plt.xlim(t_min, t_max)
plt.ylim(t_min, t_max)
plt.plot((t_min, t_max),(t_min, t_max), color='r', linestyle='--')
plt.subplot(2,2,2)
plot_color(data['catsim_feh'], data['kep_feh'], 0.1, 0.1)
#counts, xbins, ybins = np.histogram2d(data['catsim_feh'], data['kep_feh'],
# bins=200)
#plt.contour(counts.transpose(),
# extent=[xbins.min(), xbins.max(), ybins.min(), ybins.max()])
feh_ticks = np.arange(np.round(feh_min/0.25)*0.25, np.round(feh_max/0.25)*0.25,
1.0)
feh_labels = ['%.2f' % xx for xx in feh_ticks]
plt.xlabel('CatSim FeH', fontsize=10)
plt.xticks(feh_ticks, feh_labels, fontsize=10)
plt.ylabel('Kepler FeH', fontsize=10)
plt.yticks(feh_ticks, feh_labels, fontsize=10)
plt.xlim(feh_min-0.1, feh_max+0.1)
plt.ylim(feh_min-0.1, feh_max+0.1)
plt.plot((feh_min, feh_max),(feh_min, feh_max), color='r', linestyle='--')
plt.subplot(2,2,3)
plot_color(data['catsim_mag'], data['kep_mag'], 0.1, 0.1)
mag_ticks = np.arange(np.round(mag_min/0.25)*0.25, np.round(mag_max/0.25)*0.25,
0.5)
mag_labels = ['%.2f' % xx for xx in mag_ticks]
plt.xlabel('CatSim mag', fontsize=10)
plt.xticks(mag_ticks, mag_labels, fontsize=10)
plt.ylabel('Kepler mag', fontsize=10)
plt.yticks(mag_ticks, mag_labels, fontsize=10)
plt.xlim(mag_min-0.1, mag_max+0.1)
plt.ylim(mag_min-0.1, mag_max+0.1)
plt.plot((mag_min, mag_max),(mag_min, mag_max), color='r', linestyle='--')
plt.subplot(2,2,4)
plot_color(data['catsim_logg'], data['kep_logg'], 0.1, 0.1, s=10)
#counts, xbins, ybins = np.histogram2d(data['catsim_logg'], data['kep_logg'],
# bins=200)
#plt.contour(counts.transpose(),
# extent=[xbins.min(), xbins.max(), ybins.min(), ybins.max()])
logg_ticks = np.arange(np.round(logg_min/0.25)*0.25, np.round(logg_max/0.25)*0.25,
1.0)
logg_labels = ['%.2f' % xx for xx in logg_ticks]
plt.xlabel('CatSim logg', fontsize=10)
plt.xticks(logg_ticks, logg_labels, fontsize=10)
plt.ylabel('Kepler logg', fontsize=10)
plt.yticks(logg_ticks, logg_labels, fontsize=10)
plt.xlim(logg_min-0.1, logg_max+0.1)
plt.ylim(logg_min-0.1, logg_max+0.1)
plt.plot((logg_min, logg_max),(logg_min, logg_max), color='r', linestyle='--')
plt.tight_layout()
plt.savefig('fit_plots_nn%d.png' % kk)
plt.close()
|
986,829 | 4a39847684d8a70147d6093871310cbed9e107de | #!/usr/bin/python3
import fileinput
def get_score(layers, delay):
score = 0
time = delay
for i in range(max(layers)+1):
if i not in layers.keys():
time += 1
continue
position = time % (layers[i]*2 - 2)
if position == 0:
score += i * layers[i]
time += 1
return score
def is_clean(layers, delay):
time = delay
for i in range(max(layers)+1):
if i not in layers.keys():
time += 1
continue
position = time % (layers[i]*2 - 2)
if position == 0:
return False
time += 1
return True
layers = {}
for line in fileinput.input():
parts = line.split(":")
layers[int(parts[0])] = int(parts[1])
delay = 0
while not is_clean(layers, delay):
delay += 1
print(delay)
|
986,830 | 488d4aef193e6fec986ba2fe08c52736a24352e3 | #
# PySNMP MIB module CISCOSB-DEBUGCAPABILITIES-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCOSB-DEBUGCAPABILITIES-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:22:08 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint")
switch001, = mibBuilder.importSymbols("CISCOSB-MIB", "switch001")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter64, ObjectIdentity, NotificationType, iso, TimeTicks, IpAddress, Gauge32, MibIdentifier, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, Bits, Unsigned32, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "ObjectIdentity", "NotificationType", "iso", "TimeTicks", "IpAddress", "Gauge32", "MibIdentifier", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "Bits", "Unsigned32", "ModuleIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
rlDebugCapabilities = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 206))
rlDebugCapabilities.setRevisions(('2011-01-05 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: rlDebugCapabilities.setRevisionsDescriptions(('Initial revision.',))
if mibBuilder.loadTexts: rlDebugCapabilities.setLastUpdated('201101050000Z')
if mibBuilder.loadTexts: rlDebugCapabilities.setOrganization('Cisco Small Business')
if mibBuilder.loadTexts: rlDebugCapabilities.setContactInfo('Postal: 170 West Tasman Drive San Jose , CA 95134-1706 USA Website: Cisco Small Business Home http://www.cisco.com/smb>;, Cisco Small Business Support Community <http://www.cisco.com/go/smallbizsupport>')
if mibBuilder.loadTexts: rlDebugCapabilities.setDescription('This private MIB module is used for achieving extended debugging capablities for the device. For example: greater management capabilies for technical support users.')
rlDebugCapabilitiesPassword = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 206, 1), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlDebugCapabilitiesPassword.setStatus('current')
if mibBuilder.loadTexts: rlDebugCapabilitiesPassword.setDescription('A user intereseted to obtain extended debug capabilities should SET this MIB to a well known secret value (it is intended to be used only by authorized users). Most often, this value will be based on the device MAC address. Upon setting the correct value, the SET operation will return noError. Otherwise, wrongValue will return to the caller. GET operation on this MIB will reurn a value of length 0.')
mibBuilder.exportSymbols("CISCOSB-DEBUGCAPABILITIES-MIB", PYSNMP_MODULE_ID=rlDebugCapabilities, rlDebugCapabilitiesPassword=rlDebugCapabilitiesPassword, rlDebugCapabilities=rlDebugCapabilities)
|
986,831 | 76faa4c3639d0bc1f9df88d17a195c6e025f4def | # -*- coding: utf-8 -*-
import sqlite3
import numpy as np
def get_user_coin_num(con, userid):
sql = "select coins from user where uid=?"
cursor = con.cursor()
r = [num for num, in cursor.execute(sql, (userid,))][0]
return int(r)
def add_one(user_id, con):
sql = "update user set coins = coins+1 where uid = ?"
cursor = con.cursor()
cursor.execute(sql, (user_id,))
con.commit()
cursor.close()
def create_account(user_id, con, slogan=""):
sql = "insert into user values(?, ?, ?)"
cursor = con.cursor()
cursor.execute(sql, (user_id, 0, slogan))
con.commit()
cursor.close()
def select_slogan(con):
cursor = con.cursor()
sql = "select slogan, coins from user"
rows = [(slogan, coins) for slogan, coins in cursor.execute(sql)]
coins = [r[1] for r in rows]
scoins = sum(coins)
p = [float(c)/scoins for c in coins]
slogans = [r[0] for r in rows]
return np.random.choice(slogans, p=p)
def get_user_slogan(con, uid):
cursor = con.cursor()
sql = "select slogan from user where uid=?"
res = [slogan for slogan, in cursor.execute(sql, (uid,))]
if not res:
create_account(user_id=uid, con=con)
return get_user_slogan(con, uid)
cursor.close()
return res[0]
def set_user_slogan(con, uid, slogan):
cursor = con.cursor()
sql = "update user set slogan=? where uid =?"
cursor.execute(sql, (slogan, uid))
con.commit()
cursor.close()
return slogan
init_slogans = [
"honey,实习任务多也不要着急哦,慢慢来~",
"honey,真的好想到你身边~",
"honey,给你寄了明信片,不知道能不能寄到,所以我拍了照片~",
"honey,要好好吃饭,多吃水果,我就是这样的~",
"honey,会平衡好写代码和生活啦,不然傻傻的了~我要为你保留一些诗意~",
"只愿君心似我心,定不负君相思意~",
"死生契阔,与子成说。执子之手,与子偕老。",
"衣带渐宽终不悔,为伊消得人憔悴。",
"两情若是久长时,又岂在朝朝暮暮。",
"入我相思门,知我相思苦,长相思兮长相忆,短相思兮无穷极。",
"曾经沧海难为水,除却巫山不是云。",
"相思树底说相思,思郎恨郎郎不知。",
"相思一夜情多少,地角天涯未是长。"
]
def add_test_user(con):
username = "rw{}"
for index, slogan in enumerate(init_slogans):
create_account(username.format(index), con, slogan)
add_one(username.format(index), con)
def main():
con = sqlite3.connect("bitads")
con.text_factory = str
cursor = con.cursor()
create_sql = "create table user(uid text, coins integer, slogan text)"
cursor.execute(create_sql)
'''
slogan = ("我的爱豆是瑞瑞,我在这里给他打call,"
"如果你也想给自己的爱豆打call,那么就设置slogan,"
"然后点击挖矿按钮挖矿,"
"我们会根据你拥有的爱豆币数量让你的slogan上首页哦~")
create_account("weiyanjie", con, slogan)
add_one("weiyanjie", con)
'''
add_test_user(con)
cursor.close()
con.close()
if __name__ == '__main__':
main()
|
986,832 | e0d98ec678ee63244e9dce80a4df35e5d3ce5c0a | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
# Activate if use Database
# SECRET_KEY = os.getenv('SECRET_KEY', 'key_here')
DEBUG = False
class DevConfig(Config):
DEBUG = True
SERVER_NAME = '127.0.0.1:6996'
#SQL SETTING
#Your SQL URI Here
#SQL_DB_URI =
class TestConfig(Config):
DEBUG = True
TESTING = True
#SQL SETTING
#Your SQL URI Here
#SQL_DB_URI =
class ProdConfig(Config):
DEBUG = False
TESTING = False
#SQL SETTING
#Your SQL URI Here
#SQL_DB_URI =
config_name = dict(
dev = DevConfig,
test = TestConfig,
prod = ProdConfig
)
#Activate if USE DB
#key = Config.SECRET_KEY |
986,833 | 7a818c275e63f00c8aa700040fa69792c2869222 | """
In a 2D grid of 0s and 1s, we change at most one 0 to a 1.
After, what is the size of the largest island? (An island is a 4-directionally connected group of 1s).
Example 1:
Input: [[1, 0], [0, 1]]
Output: 3
Explanation: Change one 0 to 1 and connect two 1s, then we get an island with area = 3.
Example 2:
Input: [[1, 1], [1, 0]]
Output: 4
Explanation: Change the 0 to 1 and make the island bigger, only one island with area = 4.
Example 3:
Input: [[1, 1], [1, 1]]
Output: 4
Explanation: Can't change any 0 to 1, only one island with area = 4.
Notes:
1 <= grid.length = grid[0].length <= 50.
0 <= grid[i][j] <= 1.
"""
class Solution(object):
def largestIsland(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
def _union_res(i, j):
area = 0
if i >= len(grid) or i < 0 or j >= len(grid[[0]]) or j < 0:
return 0
if grid[i][j] == 0:
return 0
elif grid[i][j] == 1:
grid[i][j] = 0
area = _union_res(i+1, j) + _union_res(i-1, j) + _union_res(i, j+1) + _union_res(i, j-1)
grid[i][j] = 1
return area
def _lsland(grid):
this_max = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 1:
this_max = max(this_max, _union_res(i,j))
return this_max
if not grid:
return 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 0:
grid[i][j] = 1:
res = max(res, _island(grid))
grid[i][j] = 0
return res
"""
Note:
Exchange Space for Time.
""" |
986,834 | 4f9f06714bf0775983b9908befb023aef97373a0 | from rest_framework import permissions
class AnyPermission(permissions.BasePermission):
"""
Global permission check for user logged.
"""
def has_permission(self, request, view):
return True
|
986,835 | 80e0b7f14865224153ef6d87c0ee4ba6fd732c90 | import cv2
import numpy as np
from matplotlib import pyplot as plt
img=cv2.imread('bbKing.jpg')
kernel=np.ones((5,5),np.float32)/25
kernel[4,3]=4
imgNew=cv2.filter2D(img,-1,kernel)
cv2.imwrite('convolutionImage7.jpg',imgNew) |
986,836 | 6c9a2977b40678d599ef18e3a66541c5ecf39c8f | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'Aurelio Jargas'
SITENAME = u'txt2tags blog'
SITEURL = ''
PATH = 'content'
THEME = 'relapse'
PLUGINS = ['txt2tags_reader']
TIMEZONE = 'Europe/Paris'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Blogroll
LINKS = (('Website', 'https://txt2tags.org'),
('Download', 'https://txt2tags.org/download'),
('GitHub', 'https://github.com/txt2tags'),)
# Social widget
SOCIAL = (('Facebook', 'https://www.facebook.com/txt2tags'),
('Telegram', 'https://www.t.me/txt2tags'),)
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
|
986,837 | dfec0e1f3af96d375b2d68fce7a9bb1fc3d845f0 | class Bob:
def Bob():
return self;
def hey(self, spoke):
if spoke[3:4] == "\xc4" \
and spoke.find("Let's") == -1 \
and spoke.endswith("!") > 0 \
or spoke[:len(spoke)-1].isupper():
return "Woah, chill out!"
elif spoke.endswith("?") > 0:
return "Sure."
elif spoke.isspace() or len(spoke) == 0:
return "Fine. Be that way!"
else:
return "Whatever."
|
986,838 | c96e8fb6204eecb7b55242e652cdb525543aa0d2 | import random
def game():
# ---------------HANGMAN DESIGN----------------
hangman = {
"1": "________\n| |\n|\n|\n|\n|\n|_______",
"2": "________\n| |\n| o\n|\n|\n|\n|_______",
"3": "________\n| |\n| o\n| |\n|\n|\n|_______",
"4": "________\n| |\n| o\n| |\n| |\n|\n|_______",
"5": "________\n| |\n| o\n| \|\n| |\n|\n|_______",
"6": "________\n| |\n| o\n| \|/\n| |\n|\n|_______",
"7": "________\n| |\n| o\n| \|/\n| |\n| /\n|_______",
"8": "________\n| |\n| o\n| \|/\n| |\n| / \ \n|_______"
}
# ----------------HINT OPTIONS------------------
content_properties = {
'apple': ['Red, tennis-ball sized', 'Eating a day, keeps the doctor away!'],
'mango': ['Yellow and Sweet', 'King of fruits'],
'Pineapple': ['Thorny Fruit, with spike leaves', 'Grown on the soil-top'],
'strawberry': ['Red, Sweet-sour', 'Girls fav'],
'chocolate': ['Brown and Bitter', 'Lovers favourite'],
'orange': ['Spherical, Sour and sweet', 'Share in pieces'],
'banana': ['Yellow, Wrestlers food', 'Potassium rich'],
'watermelon': ['Juicy, Red and green', "Relatable to Thompson's pudding model"],
'guava': ['Tough and tasty, Eatable seeds', 'Parrots favourite']
}
# --------------CHOOSING RANDOM OPTION--------------------
contents = list(content_properties.keys())
chosen = random.choice(contents)
chosenList = list(chosen.lower())
spaces = len(chosen)
guessList = list("_" * spaces)
print("\nTotal chances = 8, if you finish all your chances, you will be hanged!")
for i in guessList:
print("_ ", end=' ')
chances = 8
guesses = []
while chances > 0:
guess = input("\nEnter your guess: ")
if guess in guesses:
print("\tAlready guessed!")
print("\tPrevious guess: ", guesses)
continue
else:
guesses.append(guess)
if guess in chosenList:
pos = [i for i in range(len(chosenList)) if chosenList[i] == guess]
for ind in pos:
guessList[ind] = guess
for i in guessList:
print(i, " ", end=' ')
if guessList == chosenList:
print("\n\nThe word is ", chosen)
print("\tCONGRATULATIONS FOR YOUR WIN!")
cont = input("\nDO YOU WANT TO PLAY AGAIN[Y/N] ??")
if cont.lower() in ['y', 'yes']:
game()
else:
break
else:
continue
else:
chances -= 1
print("\tGuessed word is not in the name!")
if chances == 0:
print(hangman[str(8 - chances)])
print("\n\nThe word is ", chosen)
print("\tCHANCES OVER, YOU LOOSE!")
cont = input("\nDO YOU WANT TO PLAY AGAIN[Y/N] ??")
if cont.lower() in ['y', 'yes']:
game()
else:
break
elif chances > 0:
print(hangman[str(8 - chances)])
print("\n\tChances left: ", chances)
if chances == 3:
print("\n\n----------")
print("| HINT | ", random.choice(content_properties[chosen]))
print("----------\n\n")
for i in guessList:
print(i, " ", end=' ')
print("\n\nTry Again!")
game()
|
986,839 | 2075d46616ce444d70cfa066eae20b482abaf72d | """
For a positive number n, define C(n) as the number of the integers x, for which 1<x<n andx3≡1 mod n.
When n=91, there are 8 possible values for x, namely : 9, 16, 22, 29, 53, 74, 79, 81.
Thus, C(91)=8.
Find the sum of the positive numbers n≤1011 for which C(n)=242.
""" |
986,840 | ea5644969e681412554da95405ea8c082b20f6f8 | """empty message
Revision ID: e2fdfe047c83
Revises:
Create Date: 2019-10-07 23:07:38.042830
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e2fdfe047c83'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('curriculum',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('c_id', sa.String(length=32), nullable=True),
sa.Column('c_name', sa.String(length=32), nullable=True),
sa.Column('c_time', sa.Date(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('picture',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('label', sa.String(length=32), nullable=True),
sa.Column('picture', sa.String(length=32), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('user_name', sa.String(length=32), nullable=True),
sa.Column('password', sa.String(length=32), nullable=True),
sa.Column('email', sa.String(length=32), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('vacationTip',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('vacation_id', sa.Integer(), nullable=True),
sa.Column('vacation_name', sa.String(length=32), nullable=True),
sa.Column('vacation_type', sa.String(length=32), nullable=True),
sa.Column('vacation_start', sa.String(length=32), nullable=True),
sa.Column('vacation_deadline', sa.String(length=32), nullable=True),
sa.Column('vacation_description', sa.Text(), nullable=True),
sa.Column('vacation_phone', sa.String(length=32), nullable=True),
sa.Column('vacation_status', sa.Integer(), nullable=True),
sa.Column('vacation_day', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('vacationTip')
op.drop_table('user')
op.drop_table('picture')
op.drop_table('curriculum')
# ### end Alembic commands ###
|
986,841 | 842cd763a02fe23006bf643997e9e4e4bb3d7ad6 | import unittest
import numpy as np
from skfda.representation.basis import (Basis, FDataBasis, Constant, Monomial,
BSpline, Fourier)
class TestBasis(unittest.TestCase):
# def setUp(self): could be defined for set up before any test
def test_from_data_cholesky(self):
t = np.linspace(0, 1, 5)
x = np.sin(2 * np.pi * t) + np.cos(2 * np.pi * t)
basis = BSpline((0, 1), n_basis=5)
np.testing.assert_array_almost_equal(
FDataBasis.from_data(x, t, basis, method='cholesky'
).coefficients.round(2),
np.array([[1., 2.78, -3., -0.78, 1.]])
)
def test_from_data_qr(self):
t = np.linspace(0, 1, 5)
x = np.sin(2 * np.pi * t) + np.cos(2 * np.pi * t)
basis = BSpline((0, 1), n_basis=5)
np.testing.assert_array_almost_equal(
FDataBasis.from_data(x, t, basis, method='qr'
).coefficients.round(2),
np.array([[1., 2.78, -3., -0.78, 1.]])
)
def test_bspline_penalty_special_case(self):
basis = BSpline(n_basis=5)
np.testing.assert_array_almost_equal(
basis.penalty(basis.order - 1),
np.array([[1152., -2016., 1152., -288., 0.],
[-2016., 3600., -2304., 1008., -288.],
[1152., -2304., 2304., -2304., 1152.],
[-288., 1008., -2304., 3600., -2016.],
[0., -288., 1152., -2016., 1152.]]))
def test_fourier_penalty(self):
basis = Fourier(n_basis=5)
np.testing.assert_array_almost_equal(
basis.penalty(2).round(2),
np.array([[0., 0., 0., 0., 0.],
[0., 1558.55, 0., 0., 0.],
[0., 0., 1558.55, 0., 0.],
[0., 0., 0., 24936.73, 0.],
[0., 0., 0., 0., 24936.73]]))
def test_bspline_penalty(self):
basis = BSpline(n_basis=5)
np.testing.assert_array_almost_equal(
basis.penalty(2).round(2),
np.array([[96., -132., 24., 12., 0.],
[-132., 192., -48., -24., 12.],
[24., -48., 48., -48., 24.],
[12., -24., -48., 192., -132.],
[0., 12., 24., -132., 96.]]))
def test_bspline_penalty_numerical(self):
basis = BSpline(n_basis=5)
np.testing.assert_array_almost_equal(
basis.penalty(coefficients=[0, 0, 1]).round(2),
np.array([[96., -132., 24., 12., 0.],
[-132., 192., -48., -24., 12.],
[24., -48., 48., -48., 24.],
[12., -24., -48., 192., -132.],
[0., 12., 24., -132., 96.]]))
def test_basis_product_generic(self):
monomial = Monomial(n_basis=5)
fourier = Fourier(n_basis=3)
prod = BSpline(n_basis=9, order=8)
self.assertEqual(Basis.default_basis_of_product(
monomial, fourier), prod)
def test_basis_constant_product(self):
constant = Constant()
monomial = Monomial()
fourier = Fourier()
bspline = BSpline(n_basis=5, order=3)
self.assertEqual(constant.basis_of_product(monomial), monomial)
self.assertEqual(constant.basis_of_product(fourier), fourier)
self.assertEqual(constant.basis_of_product(bspline), bspline)
self.assertEqual(monomial.basis_of_product(constant), monomial)
self.assertEqual(fourier.basis_of_product(constant), fourier)
self.assertEqual(bspline.basis_of_product(constant), bspline)
def test_basis_fourier_product(self):
# Test when periods are the same
fourier = Fourier(n_basis=5)
fourier2 = Fourier(n_basis=3)
prod = Fourier(n_basis=7)
self.assertEqual(fourier.basis_of_product(fourier2), prod)
# Test when periods are different
fourier2 = Fourier(n_basis=3, period=2)
prod = BSpline(n_basis=9, order=8)
self.assertEqual(fourier.basis_of_product(fourier2), prod)
def test_basis_monomial_product(self):
monomial = Monomial(n_basis=5)
monomial2 = Monomial(n_basis=3)
prod = Monomial(n_basis=8)
self.assertEqual(monomial.basis_of_product(monomial2), prod)
def test_basis_bspline_product(self):
bspline = BSpline(n_basis=6, order=4)
bspline2 = BSpline(domain_range=(0, 1), n_basis=6,
order=4, knots=[0, 0.3, 1 / 3, 1])
prod = BSpline(domain_range=(0, 1), n_basis=10, order=7,
knots=[0, 0.3, 1 / 3, 2 / 3, 1])
self.assertEqual(bspline.basis_of_product(bspline2), prod)
def test_basis_inner_matrix(self):
np.testing.assert_array_almost_equal(Monomial(n_basis=3)._inner_matrix(),
[[1, 1 / 2, 1 / 3], [1 / 2, 1 / 3, 1 / 4], [1 / 3, 1 / 4, 1 / 5]])
np.testing.assert_array_almost_equal(Monomial(n_basis=3)._inner_matrix(Monomial(n_basis=3)),
[[1, 1 / 2, 1 / 3], [1 / 2, 1 / 3, 1 / 4], [1 / 3, 1 / 4, 1 / 5]])
np.testing.assert_array_almost_equal(Monomial(n_basis=3)._inner_matrix(Monomial(n_basis=4)),
[[1, 1 / 2, 1 / 3, 1 / 4], [1 / 2, 1 / 3, 1 / 4, 1 / 5], [1 / 3, 1 / 4, 1 / 5, 1 / 6]])
# TODO testing with other basis
def test_basis_gram_matrix(self):
np.testing.assert_array_almost_equal(Monomial(n_basis=3).gram_matrix(),
[[1, 1 / 2, 1 / 3], [1 / 2, 1 / 3, 1 / 4], [1 / 3, 1 / 4, 1 / 5]])
np.testing.assert_almost_equal(Fourier(n_basis=3).gram_matrix(),
np.identity(3))
np.testing.assert_almost_equal(BSpline(n_basis=6).gram_matrix().round(4),
np.array([[4.760e-02, 2.920e-02, 6.200e-03, 4.000e-04, 0.000e+00, 0.000e+00],
[2.920e-02, 7.380e-02, 5.210e-02,
1.150e-02, 1.000e-04, 0.000e+00],
[6.200e-03, 5.210e-02, 1.090e-01,
7.100e-02, 1.150e-02, 4.000e-04],
[4.000e-04, 1.150e-02, 7.100e-02,
1.090e-01, 5.210e-02, 6.200e-03],
[0.000e+00, 1.000e-04, 1.150e-02,
5.210e-02, 7.380e-02, 2.920e-02],
[0.000e+00, 0.000e+00, 4.000e-04, 6.200e-03, 2.920e-02, 4.760e-02]]))
def test_basis_basis_inprod(self):
monomial = Monomial(n_basis=4)
bspline = BSpline(n_basis=5, order=4)
np.testing.assert_array_almost_equal(
monomial.inner_product(bspline).round(3),
np.array(
[[0.12499983, 0.25000035, 0.24999965, 0.25000035, 0.12499983],
[0.01249991, 0.07500017, 0.12499983, 0.17500017, 0.11249991],
[0.00208338, 0.02916658, 0.07083342, 0.12916658, 0.10208338],
[0.00044654, 0.01339264, 0.04375022, 0.09910693, 0.09330368]])
.round(3)
)
np.testing.assert_array_almost_equal(
monomial.inner_product(bspline),
bspline.inner_product(monomial).T
)
def test_basis_fdatabasis_inprod(self):
monomial = Monomial(n_basis=4)
bspline = BSpline(n_basis=5, order=3)
bsplinefd = FDataBasis(bspline, np.arange(0, 15).reshape(3, 5))
np.testing.assert_array_almost_equal(
monomial.inner_product(bsplinefd).round(3),
np.array([[2., 7., 12.],
[1.29626206, 3.79626206, 6.29626206],
[0.96292873, 2.62959539, 4.29626206],
[0.7682873, 2.0182873, 3.2682873]]).round(3)
)
def test_fdatabasis_fdatabasis_inprod(self):
monomial = Monomial(n_basis=4)
monomialfd = FDataBasis(monomial, [[5, 4, 1, 0],
[4, 2, 1, 0],
[4, 1, 6, 4],
[4, 5, 0, 1],
[5, 6, 2, 0]])
bspline = BSpline(n_basis=5, order=3)
bsplinefd = FDataBasis(bspline, np.arange(0, 15).reshape(3, 5))
np.testing.assert_array_almost_equal(
monomialfd.inner_product(bsplinefd).round(3),
np.array([[16.14797697, 52.81464364, 89.4813103],
[11.55565285, 38.22211951, 64.88878618],
[18.14698361, 55.64698361, 93.14698361],
[15.2495976, 48.9995976, 82.7495976],
[19.70392982, 63.03676315, 106.37009648]]).round(3)
)
np.testing.assert_array_almost_equal(
monomialfd._inner_product_integrate(
bsplinefd, None, None).round(3),
np.array([[16.14797697, 52.81464364, 89.4813103],
[11.55565285, 38.22211951, 64.88878618],
[18.14698361, 55.64698361, 93.14698361],
[15.2495976, 48.9995976, 82.7495976],
[19.70392982, 63.03676315, 106.37009648]]).round(3)
)
def test_comutativity_inprod(self):
monomial = Monomial(n_basis=4)
bspline = BSpline(n_basis=5, order=3)
bsplinefd = FDataBasis(bspline, np.arange(0, 15).reshape(3, 5))
np.testing.assert_array_almost_equal(
bsplinefd.inner_product(monomial).round(3),
np.transpose(monomial.inner_product(bsplinefd).round(3))
)
def test_fdatabasis_times_fdatabasis_fdatabasis(self):
monomial = FDataBasis(Monomial(n_basis=3), [1, 2, 3])
bspline = FDataBasis(BSpline(n_basis=6, order=4), [1, 2, 4, 1, 0, 1])
times_fdar = monomial.times(bspline)
prod_basis = BSpline(n_basis=9, order=6, knots=[0, 0.25, 0.5, 0.75, 1])
prod_coefs = np.array([[0.9788352, 1.6289955, 2.7004969, 6.2678739,
8.7636441, 4.0069960, 0.7126961, 2.8826708,
6.0052311]])
self.assertEqual(prod_basis, times_fdar.basis)
np.testing.assert_array_almost_equal(
prod_coefs, times_fdar.coefficients)
def test_fdatabasis_times_fdatabasis_list(self):
monomial = FDataBasis(Monomial(n_basis=3),
[[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = monomial.times([3, 2, 1])
expec_basis = Monomial(n_basis=3)
expec_coefs = np.array([[3, 6, 9], [8, 10, 12], [7, 8, 9]])
self.assertEqual(expec_basis, result.basis)
np.testing.assert_array_almost_equal(expec_coefs, result.coefficients)
def test_fdatabasis_times_fdatabasis_int(self):
monomial = FDataBasis(Monomial(n_basis=3),
[[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = monomial.times(3)
expec_basis = Monomial(n_basis=3)
expec_coefs = np.array([[3, 6, 9], [12, 15, 18], [21, 24, 27]])
self.assertEqual(expec_basis, result.basis)
np.testing.assert_array_almost_equal(expec_coefs, result.coefficients)
def test_fdatabasis__add__(self):
monomial1 = FDataBasis(Monomial(n_basis=3), [1, 2, 3])
monomial2 = FDataBasis(Monomial(n_basis=3), [[1, 2, 3], [3, 4, 5]])
np.testing.assert_equal(monomial1 + monomial2,
FDataBasis(Monomial(n_basis=3),
[[2, 4, 6], [4, 6, 8]]))
np.testing.assert_equal(monomial2 + 1,
FDataBasis(Monomial(n_basis=3),
[[2, 2, 3], [4, 4, 5]]))
np.testing.assert_equal(1 + monomial2,
FDataBasis(Monomial(n_basis=3),
[[2, 2, 3], [4, 4, 5]]))
np.testing.assert_equal(monomial2 + [1, 2],
FDataBasis(Monomial(n_basis=3),
[[2, 2, 3], [5, 4, 5]]))
np.testing.assert_equal([1, 2] + monomial2,
FDataBasis(Monomial(n_basis=3),
[[2, 2, 3], [5, 4, 5]]))
np.testing.assert_raises(NotImplementedError, monomial2.__add__,
FDataBasis(Fourier(n_basis=3),
[[2, 2, 3], [5, 4, 5]]))
def test_fdatabasis__sub__(self):
monomial1 = FDataBasis(Monomial(n_basis=3), [1, 2, 3])
monomial2 = FDataBasis(Monomial(n_basis=3), [[1, 2, 3], [3, 4, 5]])
np.testing.assert_equal(monomial1 - monomial2,
FDataBasis(Monomial(n_basis=3),
[[0, 0, 0], [-2, -2, -2]]))
np.testing.assert_equal(monomial2 - 1,
FDataBasis(Monomial(n_basis=3),
[[0, 2, 3], [2, 4, 5]]))
np.testing.assert_equal(1 - monomial2,
FDataBasis(Monomial(n_basis=3),
[[0, -2, -3], [-2, -4, -5]]))
np.testing.assert_equal(monomial2 - [1, 2],
FDataBasis(Monomial(n_basis=3),
[[0, 2, 3], [1, 4, 5]]))
np.testing.assert_equal([1, 2] - monomial2,
FDataBasis(Monomial(n_basis=3),
[[0, -2, -3], [-1, -4, -5]]))
np.testing.assert_raises(NotImplementedError, monomial2.__sub__,
FDataBasis(Fourier(n_basis=3),
[[2, 2, 3], [5, 4, 5]]))
def test_fdatabasis__mul__(self):
monomial1 = FDataBasis(Monomial(n_basis=3), [1, 2, 3])
monomial2 = FDataBasis(Monomial(n_basis=3), [[1, 2, 3], [3, 4, 5]])
np.testing.assert_equal(monomial1 * 2,
FDataBasis(Monomial(n_basis=3),
[[2, 4, 6]]))
np.testing.assert_equal(3 * monomial2,
FDataBasis(Monomial(n_basis=3),
[[3, 6, 9], [9, 12, 15]]))
np.testing.assert_equal(3 * monomial2,
monomial2 * 3)
np.testing.assert_equal(monomial2 * [1, 2],
FDataBasis(Monomial(n_basis=3),
[[1, 2, 3], [6, 8, 10]]))
np.testing.assert_equal([1, 2] * monomial2,
FDataBasis(Monomial(n_basis=3),
[[1, 2, 3], [6, 8, 10]]))
np.testing.assert_raises(NotImplementedError, monomial2.__mul__,
FDataBasis(Fourier(n_basis=3),
[[2, 2, 3], [5, 4, 5]]))
np.testing.assert_raises(NotImplementedError, monomial2.__mul__,
monomial2)
def test_fdatabasis__mul__(self):
monomial1 = FDataBasis(Monomial(n_basis=3), [1, 2, 3])
monomial2 = FDataBasis(Monomial(n_basis=3), [[1, 2, 3], [3, 4, 5]])
np.testing.assert_equal(monomial1 / 2,
FDataBasis(Monomial(n_basis=3),
[[1 / 2, 1, 3 / 2]]))
np.testing.assert_equal(monomial2 / 2,
FDataBasis(Monomial(n_basis=3),
[[1 / 2, 1, 3 / 2], [3 / 2, 2, 5 / 2]]))
np.testing.assert_equal(monomial2 / [1, 2],
FDataBasis(Monomial(n_basis=3),
[[1, 2, 3], [3 / 2, 2, 5 / 2]]))
def test_fdatabasis_derivative_constant(self):
monomial = FDataBasis(Monomial(n_basis=8),
[1, 5, 8, 9, 7, 8, 4, 5])
monomial2 = FDataBasis(Monomial(n_basis=5),
[[4, 9, 7, 4, 3],
[1, 7, 9, 8, 5],
[4, 6, 6, 6, 8]])
np.testing.assert_equal(monomial.derivative(),
FDataBasis(Monomial(n_basis=7),
[5, 16, 27, 28, 40, 24, 35]))
np.testing.assert_equal(monomial.derivative(order=0), monomial)
np.testing.assert_equal(monomial.derivative(order=6),
FDataBasis(Monomial(n_basis=2),
[2880, 25200]))
np.testing.assert_equal(monomial2.derivative(),
FDataBasis(Monomial(n_basis=4),
[[9, 14, 12, 12],
[7, 18, 24, 20],
[6, 12, 18, 32]]))
np.testing.assert_equal(monomial2.derivative(order=0), monomial2)
np.testing.assert_equal(monomial2.derivative(order=3),
FDataBasis(Monomial(n_basis=2),
[[24, 72],
[48, 120],
[36, 192]]))
def test_fdatabasis_derivative_monomial(self):
monomial = FDataBasis(Monomial(n_basis=8),
[1, 5, 8, 9, 7, 8, 4, 5])
monomial2 = FDataBasis(Monomial(n_basis=5),
[[4, 9, 7, 4, 3],
[1, 7, 9, 8, 5],
[4, 6, 6, 6, 8]])
np.testing.assert_equal(monomial.derivative(),
FDataBasis(Monomial(n_basis=7),
[5, 16, 27, 28, 40, 24, 35]))
np.testing.assert_equal(monomial.derivative(order=0), monomial)
np.testing.assert_equal(monomial.derivative(order=6),
FDataBasis(Monomial(n_basis=2),
[2880, 25200]))
np.testing.assert_equal(monomial2.derivative(),
FDataBasis(Monomial(n_basis=4),
[[9, 14, 12, 12],
[7, 18, 24, 20],
[6, 12, 18, 32]]))
np.testing.assert_equal(monomial2.derivative(order=0), monomial2)
np.testing.assert_equal(monomial2.derivative(order=3),
FDataBasis(Monomial(n_basis=2),
[[24, 72],
[48, 120],
[36, 192]]))
def test_fdatabasis_derivative_fourier(self):
fourier = FDataBasis(Fourier(n_basis=7),
[1, 5, 8, 9, 8, 4, 5])
fourier2 = FDataBasis(Fourier(n_basis=5),
[[4, 9, 7, 4, 3],
[1, 7, 9, 8, 5],
[4, 6, 6, 6, 8]])
fou0 = fourier.derivative(order=0)
fou1 = fourier.derivative()
fou2 = fourier.derivative(order=2)
np.testing.assert_equal(fou1.basis, fourier.basis)
np.testing.assert_almost_equal(fou1.coefficients.round(5),
np.atleast_2d([0, -50.26548, 31.41593,
-100.53096, 113.09734,
-94.24778, 75.39822]))
np.testing.assert_equal(fou0, fourier)
np.testing.assert_equal(fou2.basis, fourier.basis)
np.testing.assert_almost_equal(fou2.coefficients.round(5),
np.atleast_2d([0, -197.39209, -315.82734,
-1421.22303, -1263.30936,
-1421.22303, -1776.52879]))
fou0 = fourier2.derivative(order=0)
fou1 = fourier2.derivative()
fou2 = fourier2.derivative(order=2)
np.testing.assert_equal(fou1.basis, fourier2.basis)
np.testing.assert_almost_equal(fou1.coefficients.round(5),
[[0, -43.98230, 56.54867, -37.69911, 50.26548],
[0, -56.54867, 43.98230, -
62.83185, 100.53096],
[0, -37.69911, 37.69911, -100.53096, 75.39822]])
np.testing.assert_equal(fou0, fourier2)
np.testing.assert_equal(fou2.basis, fourier2.basis)
np.testing.assert_almost_equal(fou2.coefficients.round(5),
[[0, -355.30576, -276.34892, -631.65468, -473.74101],
[0, -276.34892, -355.30576, -
1263.30936, -789.56835],
[0, -236.87051, -236.87051, -947.48202, -1263.30936]])
def test_fdatabasis_derivative_bspline(self):
bspline = FDataBasis(BSpline(n_basis=8),
[1, 5, 8, 9, 7, 8, 4, 5])
bspline2 = FDataBasis(BSpline(n_basis=5),
[[4, 9, 7, 4, 3],
[1, 7, 9, 8, 5],
[4, 6, 6, 6, 8]])
bs0 = bspline.derivative(order=0)
bs1 = bspline.derivative()
bs2 = bspline.derivative(order=2)
np.testing.assert_equal(bs1.basis, BSpline(n_basis=7, order=3))
np.testing.assert_almost_equal(bs1.coefficients,
np.atleast_2d([60, 22.5, 5,
-10, 5, -30, 15]))
np.testing.assert_equal(bs0, bspline)
np.testing.assert_equal(bs2.basis, BSpline(n_basis=6, order=2))
np.testing.assert_almost_equal(bs2.coefficients,
np.atleast_2d([-375, -87.5, -75,
75, -175, 450]))
bs0 = bspline2.derivative(order=0)
bs1 = bspline2.derivative()
bs2 = bspline2.derivative(order=2)
np.testing.assert_equal(bs1.basis, BSpline(n_basis=4, order=3))
np.testing.assert_almost_equal(bs1.coefficients,
[[30, -6, -9, -6],
[36, 6, -3, -18],
[12, 0, 0, 12]])
np.testing.assert_equal(bs0, bspline2)
np.testing.assert_equal(bs2.basis, BSpline(n_basis=3, order=2))
np.testing.assert_almost_equal(bs2.coefficients,
[[-144, -6, 12],
[-120, -18, -60],
[-48, 0, 48]])
if __name__ == '__main__':
print()
unittest.main()
|
986,842 | 53a8103e89ae303bb48efe6407df6a0705faf14b |
#Dada una lista de números enteros muestre en pantalla el número más grande, el más pequeño y el promedio de la lista
def listaNumEnte(lista):
mayor = max (lista)
menor = min (lista)
acumulador = 0
for elemento in lista :
acumulador += elemento
sizeList = len (lista)
promedio = acumulador / sizeList
print('---------- Respuestas ------------')
print (f'''
El número mayor: {mayor},
el menor: {menor} y
el promedio: {promedio}
''')
numeros_enteros = [22, 32, 43, 20, 38, 40, 60, 18, 84, 28]
listaNumEnte(numeros_enteros)
|
986,843 | a27d52309f7c62b8bc63abe0189bf6b9a2126ad0 | import telebot
import flask
import time
from threading import Semaphore
from os import getcwd
import json
from flask import Flask, request
try:
fd = open(getcwd() + '/save', 'r')
st = fd.read()
res = json.loads(st)
except IOError:
res = {}
try:
fd = open(getcwd() + '/save_global', 'r')
st = fd.read()
res_g = json.loads(st)
except IOError:
res_g = {}
token = '1758008542:AAFnF3MIIfyMLgLnW97rzkVmZtsEGOspPFM'
server = Flask(__name__)
w_a = "Wrong ammount of parameters use /help to see how the bot is used."
e_p = "Error in parameters"
help_msg = """GI_Data_Collector_Bot is a tool to collect GI data and apply stadistical operations to the collected data.
Commands:
/start Starts the bot
/help Shows this help message
/add_entry Adds to the database the ammount of killed monsters and the ammount of dropped items once entered the data, adding a new entry if it doesn't exists
/finish_entry Finishes the last entry opened by the command /add_entry should be used once you have stopped adding entries
/show_data Shows the data collected in the current chat
/show_data <monster_name> Shows the data collected in the current chat about the monster
/show_global Works loke /show_data but showing the data collected in every chat the bot has been started in
/reset resets the data collected in the chat
---------------------------------------------------------------
!!! IMPORTANT !!!:
The bot will only collect up to (2^32) - 1 entries monster type in every chat.
---------------------------------------------------------------
"""
def save(dic, name):
_dic_to_str = str(dic)
to_save = ''
safe = False
for i in _dic_to_str:
if i == '"' and not safe:
safe = True
to_save += i
elif i == '"':
safe = False
to_save += i
elif i == '\'' and not safe:
to_save += '"'
else: to_save += i
fd = open(getcwd() + '/' + name, 'w')
fd.write(to_save)
def get_real_words(message):
words = message.text.split(' ')
real_words = []
for i in range(1, len(words)):
if len(words[i]) > 0:
real_words.append(words[i])
return real_words
def get_real_words_str(message):
words = message.split(' ')
real_words = []
for i in range(len(words)):
if len(words[i]) > 0:
real_words.append(words[i])
return real_words
def show_data_monster_item_amm(dic, chat_id, monster_name, item_name, ammount):
try:
if abs((float)(dic[monster_name][0])) < 0.00001:
return "Zero division."
num = (float)((float)(dic[monster_name][1][item_name][ammount])/(float)(dic[monster_name][0]))
str_num = str(num)
rounded = ''
for i in range(min(len(str_num), 4)):
rounded += str(str_num[i])
text = f"Ammount of times the { item_name } was obtained { ammount } times at once: { dic[monster_name][1][item_name][ammount] } approximated probability: { rounded }\n\n"
return text
except KeyError:
bot.send_message(chat_id, e_p)
def show_data_monster_item(dic, chat_id, monster_name, item_name):
try:
text = ''
for i in dic[monster_name][1][item_name].keys():
text += show_data_monster_item_amm(dic, chat_id, monster_name, item_name, i)
return text
except KeyError:
bot.send_message(chat_id, e_p)
def show_data_monster(dic, chat_id, monster_name):
try:
text = f"Ammount of { monster_name } killed: { dic[monster_name][0] }\n\n"
for i in dic[monster_name][1].keys():
text += '\t' + show_data_monster_item(dic, chat_id, monster_name, i)
bot.send_message(chat_id, text)
except KeyError:
bot.send_message(chat_id, e_p)
def show_data_general(dic, chat_id):
try:
for i in dic.keys():
show_data_monster(dic, chat_id, i)
except KeyError:
bot.send_message(chat_id, e_p)
mutex = Semaphore()
entry_monster = {}
current_monster = {}
_dictionary = res
_global = res_g
bot = telebot.TeleBot(token, parse_mode=None)
@bot.message_handler(commands=['start'])
def send_welcome(message):
if not message.chat.id in _dictionary:
mutex.acquire()
_dictionary[str(message.chat.id)] = {}
mutex.release()
bot.reply_to(message, "Welcome to NazDia's GI data collector's bot.")
@bot.message_handler(commands=['add_entry'])
def create_entry(message):
chat_id = message.chat.id
real_words = get_real_words(message)
mutex.acquire()
if len(real_words) > 0:
bot.reply_to(message, w_a)
else:
bot.reply_to(message, """Write the monster's name and the ammount killed in the following format:
<Monster Name> - <Ammount Killed>""")
entry_monster[chat_id] = True
current_monster[chat_id] = ''
save(_dictionary, 'save')
save(_global, 'save_global')
mutex.release()
@bot.message_handler(commands=['help'])
def send_help(message):
bot.reply_to(message, help_msg)
@bot.message_handler(commands=['show_data'])
def show_data(message):
chat_id = message.chat.id
change = False
real_words = get_real_words(message)
for i in real_words:
if len(i) > 0:
change = True
if change:
real_words = [' '.join(real_words)]
else:
real_words = []
mutex.acquire()
if len(real_words) == 0:
show_data_general(_dictionary[str(chat_id)], chat_id)
elif len(real_words) == 1:
show_data_monster(_dictionary[str(chat_id)], chat_id, real_words[0])
else:
bot.send_message(chat_id, w_a)
mutex.release()
@bot.message_handler(commands=['finish_entry'])
def finish(message):
mutex.acquire()
entry_monster[message.chat.id] = False
mutex.release()
@bot.message_handler(commands=['show_global'])
def show_global(message):
chat_id = message.chat.id
change = False
real_words = get_real_words(message)
for i in real_words:
if len(i) > 0:
change = True
if change:
real_words = [' '.join(real_words)]
else:
real_words = []
mutex.acquire()
if len(real_words) == 0:
show_data_general(_global, chat_id)
elif len(real_words) == 1:
show_data_monster(_global, chat_id, real_words[0])
else:
bot.send_message(chat_id, w_a)
mutex.release()
@bot.message_handler(commands=['reset'])
def reset(message):
chat_id = message.chat.id
mutex.acquire()
_dictionary[str(chat_id)] = {}
save(_dictionary, 'save')
mutex.release()
@bot.message_handler(func=lambda message : (message.chat.id in entry_monster.keys() and entry_monster[message.chat.id]))
def receive_monster_entry(message):
chat_id = message.chat.id
del_minus = message.text.split('-')
if len(del_minus) != 2:
bot.reply_to(message, w_a)
return
real_words = get_real_words_str(del_minus[0])
sub = get_real_words_str(del_minus[1])
if len(sub) != 1:
bot.reply_to(message, w_a)
return
try:
test = (int)(sub[0])
except:
bot.reply_to(message, e_p)
return
temp = ' '.join(real_words)
real_words = [temp] + sub
mutex.acquire()
try:
if test + _dictionary[str(chat_id)][real_words[0]][0] >= 2**32:
bot.reply_to(message, """This entry and the previous collected data exceeds the data collector's capacity of data about this monster.
Try sending us data about other monsters instead.""")
return
_dictionary[str(chat_id)][real_words[0]][0] += (int)(real_words[1])
except KeyError:
_dictionary[str(chat_id)][real_words[0]] = [(int)(real_words[1]), {}]
try:
_global[real_words[0]][0] += (int)(real_words[1])
except KeyError:
_global[real_words[0]] = [(int)(real_words[1]), {}]
bot.reply_to(message, """Monster entry updated, now entry the ammount of drops in the following format:
<Item Dropped> * <Ammount Dropped in every instance> - <Ammount of times this Drop ocurred>""")
save(_dictionary, 'save')
save(_global, 'save_global')
entry_monster[chat_id] = False
current_monster[chat_id] = real_words[0]
mutex.release()
@bot.message_handler(func=lambda message: (message.chat.id in current_monster.keys() and len(current_monster[message.chat.id]) > 0))
def receive_item_entry(message):
chat_id = message.chat.id
_current_monster = current_monster[chat_id]
del_minus = message.text.split('-')
if len(del_minus) != 2:
bot.reply_to(message, w_a)
return
temp = del_minus[0].split('*')
if len(temp) != 2:
bot.reply_to(message, w_a)
return
temp2 = get_real_words_str(del_minus[1])
del_minus = temp + temp2
if len(del_minus) != 3:
bot.reply_to(message, w_a)
real_words = get_real_words_str(del_minus[0])
sub = get_real_words_str(del_minus[1])
sub2 = get_real_words_str(del_minus[2])
if len(sub) != 1:
bot.reply_to(message, w_a)
return
try:
(int)(sub[0])
except:
bot.reply_to(message, e_p)
return
if len(sub2) != 1:
bot.reply_to(message, w_a)
return
try:
(int)(sub2[0])
except:
bot.reply_to(message, e_p)
return
temp = ' '.join(real_words)
real_words = [temp] + sub + sub2
mutex.acquire()
try:
current = _dictionary[str(chat_id)][_current_monster][1][real_words[0]]
except KeyError:
_dictionary[str(chat_id)][_current_monster][1][real_words[0]] = {}
current = _dictionary[str(chat_id)][_current_monster][1][real_words[0]]
try:
current[real_words[1]] += (int)(real_words[2])
except KeyError:
current[real_words[1]] = (int)(real_words[2])
try:
current = _global[_current_monster][1][real_words[0]]
except KeyError:
_global[_current_monster][1][real_words[0]] = {}
current = _global[_current_monster][1][real_words[0]]
try:
current[real_words[1]] += (int)(real_words[2])
except KeyError:
current[real_words[1]] = (int)(real_words[2])
bot.reply_to(message, "Item drop entry updated, use the command /show_data to see the collected data, and if you are done, with this monster's entry, use the command /finish_entry , otherwise keep sending item drops.")
save(_dictionary, 'save')
save(_global, 'save_global')
mutex.release()
@server.route('/' + token, methods=['POST'])
def getMessage():
json_string = request.get_data().decode('utf-8')
update = telebot.types.Update.de_json(json_string)
bot.process_new_updates([update])
return "!", 200
@server.route('/')
def webhook():
bot.remove_webhook()
bot.set_webhook(url='https')
return "!", 200
if __name__ == "__main__":
server.run(host="0.0.0.0", port=int(os.environ.get('PORT', 5000)))
|
986,844 | b41801398b277c56401d7b458e14b0d2514fd187 | from django.shortcuts import render
from server.models import *
# Create your views here.
def index(request):
artigos = Artigo.artigo.all()
ps = P.p.filter(artigo = artigos[0])
return render(request,'index.html',{'artigos':artigos,'artigo':artigos[0],'ps': ps})
|
986,845 | b24abdf63078ac127370e022ce89a6894d409ece | # (C) Andrew Glushchenko 2020
# REST API project v0.1
# Data models module
#
from . import db, session, Base
from sqlalchemy.orm import relationship
from flask_jwt_extended import create_access_token
from datetime import timedelta
from passlib.hash import bcrypt
class NW_Elements(Base):
__tablename__ = 'elements'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
ip_address = db.Column(db.String(15), nullable=False)
name = db.Column(db.String(80), nullable=False)
description = db.Column(db.String(500), nullable=False)
@classmethod
def getElementList(cls, user_id):
try:
elements = cls.query.filter(cls.user_id == user_id).all()
session.commit()
except Exception:
session.rollback()
raise
return elements
pass
def save(self):
try:
session.add(self)
session.commit()
except Exception:
session.rollback()
raise
pass
@classmethod
def get(cls, element_id, user_id):
element = cls.query.filter(cls.id == element_id, cls.user_id == user_id).first()
try:
if not element:
raise Exception('No element with this id')
except Exception:
session.rollback()
raise
return element
pass
def update(self, **kwargs):
try:
for key, value in kwargs.items():
setattr(self, key, value)
session.commit()
except Exception:
session.rollback()
raise
pass
def delete(self):
try:
session.delete(self)
session.commit()
except Exception:
session.rollback()
raise
pass
class User(Base):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), nullable=False)
email = db.Column(db.String(40), nullable=False, unique=True)
password = db.Column(db.String(100), nullable=False)
elements = relationship('NW_Elements', backref='user', lazy=True)
def __init__(self, **kwargs):
self.name = kwargs.get('name')
self.email = kwargs.get('email')
self.password = bcrypt.hash(kwargs.get('password'))
pass
def get_token(self, expire_time=24):
expire_delta = timedelta(expire_time)
token = create_access_token(
identity=self.id, expires_delta=expire_delta)
return token
pass
@classmethod
def authenticate(cls, email, password):
user = cls.query.filter(cls.email == email).one()
if not bcrypt.verify(password, user.password):
raise Exception('No user with this password')
return user
pass
def AddElements(id_el, id_us, addr, name_el, decsr=''):
v1 = NW_Elements(id=id_el, user_id=id_us, ip_address=addr, name=name_el, description=decsr)
v1.save()
|
986,846 | cc3c0f2eeb2b739084943cc6956f8b2f772f5021 |
import webbrowser
print("------------------------------------\n"
+" * B i g T O C H T M L T e x t G e n e r a t o r *\n"
+" 2018 March byBTW\n"
+"------------------------------------\n")
#--USER INPUT--
title = input("請輸入期刊名稱:")
name = input("請輸入期刊檔名縮寫:")
year_start = eval(input("請輸入起始年份:"))
vol_start = eval(input("請輸入起始卷數:"))
vol_end = eval(input("請輸入結束卷數:"))
num = eval(input("請輸入每卷有幾期:"))
sw = eval(input("請選擇每期出刊時間表達方式(0:無/1:季節/2:月份/3:手動輸入):"))
#print(vol_start,vol_num,number,year_start,name)
#--COMPUTE VOL_NUMBER & YEAR_END--
vol_number =( vol_end - vol_start ) + 1
year_end =( year_start + vol_number )- 1
#print(year_end,"\n",vol_number,"\n")
#--SET SEASON & MONTH & USER LIST--
season = list(["Spring","Summer","Fall","Winter"])
month = list(["January","February","March","April","May","June","July","August","September","October","November","December"])
userlist = list()
#print(season,month)
# WRITE HTML
hName = name + "_toc_list_of_volumes.html"
f = open(hName,'wt',encoding ='utf8')
message1 = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>Project MUSE -- %s</title>
<style type="text/css">
a:link {
color:blue;
text-decoration:none;
}
ul {
font-size: 18px;
font-family:sans-serif,"fantasy","cursive";
}
li {
margin: 5px;
line-height: 30px;
}
table {
margin-top: 30px;
border:#ff3030 1px solid;
}
h2 {
text-align: center;
}
</style>
<script language="javascript">
<!-- Begin
function hidestatus()
{
window.status='Project MUSE -- %s'
return true
}
if (document.layers)
document.captureEvents(Event.MOUSEOVER | Event.MOUSEOUT)
document.onmouseover=hidestatus
document.onmouseout=hidestatus
</script>
</head>
<table width="700px" border="0" align="center" cellpadding="0" cellspacing="0">
<tr>
<td><h2>%s</h2></td>
</tr>
<tr>
<td>
"""%(title,title,title)
f.write(message1)
index = 0;
#--CHOOSE SEASON TO REPRESENT--
if sw == 1:
#--USER INPUT SEASON CODE--
while index < num :
# userlist.insert(index,input("Enter the Season following:"))
userlist.append(eval(input("請依序輸入季節代碼(1:春 2:夏 3:秋 4:冬) :")))
index = index + 1
pass
# print(userlist)
#--OUTPUT HTML TEXT--
while vol_start <= vol_end :
print("<ul>\n<b>Volume "+str(vol_end)+ ", "+ str(year_end) + "</b>",file=f)
j = num
while j>0 :
print(
" <li><a href=\"" +str(name)+str(vol_end)+ "."+str(j)+ ".htm\" target=\"\" >Volume "+str(vol_end)+", Number "+str(j)+",",season[userlist[j-1]-1],year_end,"</a></li>"
,file=f)
j = j-1
pass
vol_end = vol_end -1
year_end = year_end -1
print("</ul>",file=f)
pass
#--CHOOSE MONTH TO REPRESENT--
if sw == 2:
#--USER INPUT MONTH CODE--
while index < num :
# userlist.insert(index,input("Enter the Season following:"))
userlist.append(eval(input("請依序輸入月份代碼(1~12:一至十二月) :")))
index = index + 1
pass
# print(userlist)
#--OUTPUT HTML TEXT--
while vol_start <= vol_end :
print("<ul>\n<b>Volume "+str(vol_end)+", "+str(year_end)+"</b>",file=f)
j = num
while j>0 :
print(
" <li><a href=\"" +str(name)+str(vol_end)+ "."+str(j)+ ".htm\" target=\"\" >Volume "+str(vol_end)+", Number "+str(j)+",",month[userlist[j-1]-1],year_end,"</a></li>"
,file=f)
j = j-1
pass
vol_end = vol_end -1
year_end = year_end -1
print("</ul>",file=f)
pass
#--CHOOSE USER INPUT--
if sw == 3:
while index < num :
userlist.insert(index,input("請依序輸入:"))
index = index + 1
pass
# print(userlist)
#--OUTPUT HTML TEXT--
while vol_start <= vol_end :
print("<ul>\n<b>Volume "+str(vol_end)+", "+str(year_end)+"</b>",file=f)
j = num
while j>0 :
print(
" <li><a href=\"" +str(name)+str(vol_end)+ "."+str(j)+ ".htm\" target=\"\" >Volume "+str(vol_end)+", Number "+str(j)+",",userlist[j-1],year_end,"</a></li>"
,file=f)
j = j-1
pass
vol_end = vol_end -1
year_end = year_end -1
print("</ul>",file=f)
pass
#--CHOOSE NO REPRESENT--
else :
#--OUTPUT HTML TEXT--
while vol_start <= vol_end :
print("<ul>\n<b>Volume "+str(vol_end)+", "+str(year_end)+"</b>",file=f)
j = num
while j>0 :
print(
" <li><a href=\"" +str(name)+str(vol_end)+ "."+str(j)+ ".htm\" target=\"\" >Volume "+str(vol_end)+", Number "+str(j)+",",year_end,"</a></li>"
,file=f)
j = j-1
pass
vol_end = vol_end -1
year_end = year_end -1
print("</ul>",file=f)
pass
pass
f.close()
print("輸出!")
os.system("pause") |
986,847 | 0a40ff89333a20410866514e0788ca286c9bc113 | #!/usr/bin/env python3
from util.log import Log
from util.parsing import commit_settings
AUTHORIZED_CMDS = {'ctl-register': ('add', 'remove'),
'ctl-component': ('push'),
}
def valid_ssh(cmd):
subcommands = AUTHORIZED_CMDS.get(cmd[0])
if subcommands is None:
return False
return cmd[1] in subcommands
def execute(cmd):
import subprocess
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
exit(1)
def main():
import argparse
import os
import shlex
prg_desc = 'This is a command authorization wrapper for ctlweb frontends.'
parser = argparse.ArgumentParser(description=prg_desc)
parser.parse_args()
commit_settings(None)
Log.fileoutput()
Log.info('Got access from ctlweb-frontend')
cmd = os.getenv('SSH_ORIGINAL_COMMAND')
Log.debug('ctlweb-frontend tries to execute "%s"' % cmd)
if not cmd:
print('Only ssh allowed')
exit(1)
cmd = shlex.split(cmd)
if valid_ssh(cmd):
execute(cmd)
else:
print('You are not allowed to execute this command!')
exit(1)
if __name__ == '__main__':
main()
|
986,848 | 9400887b72efd45f603783cac9b62da5c236da7e | from tenant.coursesmart.models import *
from django.contrib import admin
admin.site.register(RedirectorTemplate)
|
986,849 | bffc8eb2a548ba2da99331070a6d4b9ff79fc76d | import time
import subprocess as cmd
while True:
time.sleep(0.5)
#print(time.ctime())
|
986,850 | 84f4ec7682c497f72ade10e02dd4069367178db4 | from os import environ, path
from dotenv import load_dotenv
# import redis
basedir = path.abspath(path.dirname(__name__))
load_dotenv(path.join(basedir,".env"))
print(basedir)
class Config(object):
"""Set Flask configuration from .env file."""
# General Config
SECRET_KEY = environ.get("SECRET_KEY")
FLASK_APP = environ.get("FLASK_APP")
DEBUG = False
# Static Assets
STATIC_FOLDER = 'static'
TEMPLATES_FOLDER = 'templates'
# Flask-Assets
LESS_BIN = environ.get('LESS_BIN')
ASSETS_DEBUG = environ.get('ASSETS_DEBUG')
LESS_RUN_IN_DEBUG = environ.get('LESS_RUN_IN_DEBUG')
# Flask-SQLAlchemy
SQLALCHEMY_DATABASE_URI = environ.get('SQLALCHEMY_DATABASE_URI')
SQLALCHEMY_ECHO = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
# Flask-Session
# SESSION_TYPE = environ.get('SESSION_TYPE')
# SESSION_REDIS = redis.from_url(environ.get('SESSION_REDIS'))
class ProductionConfig(Config):
DEVELOPMENT = False
DEBUG = False
DB_HOST = 'my.production.database'
class DevConfig(Config):
FLASK_ENV = 'development'
DEBUG = True
TESTING = True
# DATABASE_URI = environ.get('SQLALCHEMY_DATABASE_URI') |
986,851 | 11ce16a273c708193cf357153f2c984bcb8680e3 | var = 77
def func():
global var
var = 100
print(locals())
func()
print(var)
|
986,852 | 1a568883b12faf5f55f28843726d622240164e98 | from setuptools import setup, find_packages
setup(name='nPYc',
version='1.2.8',
description='National Phenome Centre toolbox',
url='https://github.com/phenomecentre/npyc-toolbox',
author='National Phenome Centre',
author_email='phenomecentre@imperial.ac.uk',
license='MIT',
packages=find_packages(),
install_requires=[
'cycler>=0.10.0',
'iPython>=6.3.1',
#'isaExplorer>=0.1',
#'isatools>=0.9.3',
'Jinja2>=3.0.1',
'lmfit>=0.9.7',
#'markupsafe==2.0.1',
'matplotlib==3.5.2',
'networkx>=2.5.1',
#'numpy>=1.14.2',
#'pandas>=0.23.0',
'numpy~=1.23.3',
'openpyxl',
#'jsonschema~=3.2.0',
'pandas~=1.5.0',
'plotly>=3.1.0',
'pyChemometrics>=0.1',
'scikit-learn>=0.19.1',
'scipy>=1.1.0',
'seaborn>=0.8.1',
'setuptools>=39.1.0',
'statsmodels>=0.9.0'
],
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 3.9",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
long_description = """\
Toolbox for preprocessing of metabolic profiling datasets
---------------------------------------------------------
.. image:: https://travis-ci.org/phenomecentre/nPYc-Toolbox.svg?branch=master
:target: https://travis-ci.org/phenomecentre/nPYc-Toolbox
:alt: Travis CI build status
.. image:: https://readthedocs.org/projects/npyc-toolbox/badge/?version=latest
:target: http://npyc-toolbox.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
.. image:: https://codecov.io/gh/phenomecentre/nPYc-Toolbox/branch/master/graph/badge.svg
:target: https://codecov.io/gh/phenomecentre/nPYc-Toolbox
:alt: Test coverage
|
The nPYc toolbox offers functions for the import, preprocessing, and QC of metabolic profiling datasets.
Documentation can be found on `Read the Docs <http://npyc-toolbox.readthedocs.io/en/latest/?badge=latest>`_.
Imports
- Peak-picked LC-MS data (XCMS, Progenesis QI, *&* Metaboscape)
- Raw NMR spectra (Bruker format)
- Targeted datasets (TargetLynx, Bruker BI-LISA, *&* BI-Quant-Ur)
Provides
- Batch *&* drift correction for LC-MS datasets
- Feature filtering by RSD *&* linearity of response
- Calculation of spectral line-width in NMR
- PCA of datasets
- Visualisation of datasets
Exports
- Basic tabular csv
- `ISA-TAB <http://isa-tools.org>`_
The nPYc toolbox is `developed <https://github.com/phenomecentre/npyc-toolbox>`_ by the informatics team at `The National Phenome Centre <http://phenomecentre.org/>`_ at `Imperial College London <http://imperial.ac.uk/>`_.
""",
long_description_content_type="text/markdown",
documentation='http://npyc-toolbox.readthedocs.io/en/latest/?badge=stable',
include_package_data=True,
zip_safe=False
)
|
986,853 | 78a9a4ae2dbe65e1482d49d83fda0374e6b5ddda | import os
products = []
if os.path.isfile('products.csv'):
print('找到檔案')
#讀取檔案
with open('products.csv', 'r', encoding = 'utf-8') as f:
for line in f:
if '商品, 價格' in line:
continue
name, price = line.strip().split(',')
products.append([name, price])
else:
print('找不到檔案')
print(products)
#輸入新加入的名稱價格
while True:
name = input('請輸入商品名稱: ')
if name == 'q':
break
price = input('請輸入商品價格: ')
p = []
p = [name, price]
products.append(p)
# 印出商品價格
print(products)
for p in products:
print(p[0], '的價格為', p[1])
#寫入檔案
with open('products.csv', 'w', encoding = 'utf-8') as f:
f.write('商品, 價格\n')
for p in products:
f.write(p[0] + ',' + p[1] + '\n')
# data = [1, 3, 5, 7, 9]
# with open('test.txt', 'w') as f:
# for d in data:
# f.write(str(d) + '\n')
|
986,854 | a92c0494546519ce612f84e8e350e6f8edddaefe | from setuptools import setup
def reqs():
with open("requirements.txt") as r:
return list(map(lambda x: x.strip(), r.read().split('\n')))
setup(
name='django-semantic-mediawiki',
packages=['django_semantic_mediawiki'],
version='0.1',
description='Use the semantic mediawiki API as django models',
author='Nikita Marchant',
author_email='nikita.marchant@gmail.com',
url='https://github.com/C4ptainCrunch/django-semantic-mediawiki',
download_url='https://github.com/C4ptainCrunch/django-semantic-mediawiki/tarball/0.1',
keywords=['django', 'mediawiki', 'api', 'manager', 'queryset', 'orm'],
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2 :: Only',
'Topic :: Database',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=reqs()
)
|
986,855 | 2f9c44e51b820d047fecafe49ff91631cd7dea9c | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 6 10:26:09 2020
@author: samuel
"""
import sys
import argparse
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def format_data(path, data=None):
df = pd.read_csv(path)
column_name = ['image', 'level']
if data == 'Aptos':
df.columns = column_name
elif data == 'eye_test':
df = df.drop('Usage', axis=1)
else:
pass
df.loc[df['level'] == 0, 'Level_Name'] = 'Normal'
df.loc[df['level'] == 1, 'Level_Name'] = 'Mild'
df.loc[df['level'] == 2, 'Level_Name'] = 'Moderate'
df.loc[df['level'] == 3, 'Level_Name'] = 'Severe'
df.loc[df['level'] == 4, 'Level_Name'] = 'Proliferative'
return df
parser = argparse.ArgumentParser('parameters')
parser.add_argument("--data", "-d", type=str, default='combined',
help="the data distribution to plot")
parser.add_argument('--title', '-t', type=str,
help='title of the plot')
parser.add_argument('--show', '-s', type=bool, default=False,
help='show title or not')
args = parser.parse_args()
if __name__ == '__main__':
data_1 = format_data('data/train.csv', data='Aptos')
data_2 = format_data('data/trainLabels.csv')
data_3 = format_data('data/retinopathy_solution.csv', data='eye_test')
if args.data == 'combined':
data = pd.concat([data_1, data_2, data_3])
title = args.title
elif args.data == 'aptos':
data = data_1
title = args.title
elif args.data == 'eyepacs':
data = pd.concat([data_2, data_3])
title = args.title
else:
print('This data is not available')
sys.exit()
ax = sns.countplot(
x='Level_Name',
data=data, order=['Normal',
'Mild',
'Moderate',
'Severe',
'Proliferative'])
ax.set(xlabel="Disease Level")
if args.show == True:
ax.set_title(title)
plt.show() |
986,856 | 6e4cc1181bacbec034de48952c7e14bcb61f1284 | import time
its = 1000
start = time.time()
def a(xy):
x, y = xy
d = x + y # noqa f841
def b(xy):
d = xy[0] + xy[1] # noqa f841
for i in range(its):
for j in range(its):
ij = (i, j)
a(ij)
end = time.time()
print("x,y {}".format(end-start))
start = time.time()
for i in range(its):
for j in range(its):
ij = (i, j)
b(ij)
end = time.time()
print("index {}".format(end-start))
|
986,857 | 0e1fac40e4166781a8359220fb845725270c2bd2 | import numpy as np
from r5 import *
l = 8
L = 6
d = 6
tol = 1e-6
def penrose(a):
at = np.transpose(a)
c = a @ at
cinv = np.linalg.inv(c)
r = at @ cinv
return r
def solve(x, f, df, tol, maxit):
i = 0
rv = True
while True:
h = df(x)
p = penrose(h)
# p = np.linalg.inv(h)
fv = f(x)
z = p @ fv
fvnorm = np.inner(fv, fv)
a = min(1, 1/fvnorm)
a = 1
if fvnorm <= tol:
break
else:
x -= a * z
i += 1
if i > maxit:
rv = False
break
return rv
# x = x - a * z
if (__name__ == '__main__'):
xv = np.array([0.5,0.5,0.9,0.9,3.9,2.4])
xnv = xv[0:4]
Fn = lambda y : F(np.concatenate((y, np.array([xv[4], xv[5]]))))
solve(xnv, Fn, hessr, 1e-4, 1000)
print("x = ", xnv)
print("F(x) = ", Fn(xnv))
# solve(xv, F, hess, 1e-4, 1000)
# print("x = ", xv)
|
986,858 | 8a83d0bdc29b9153201957dada68f4dd8cb37460 | import csv
import os
import tarfile
import random
from app import db
from models import User, Track, MetaData, Vote
from song_recommender import generate_recommendations
TRACK_DATA_PATH = os.path.join(os.path.dirname(__file__), '../sample_data/tracks_tiny.csv')
TRACK_ARCHIVE_PATH = os.path.join(os.path.dirname(__file__), '../sample_data/tracks.tar.gz')
TRACK_EXTRACT_PATH = os.path.join(os.path.dirname(__file__), '../sample_data')
TRACK_ATTRIBUTES = (
'artist_name',
'title',
'release'
)
META_ATTRIBUTES = (
'year',
'artist_familiarity',
'artist_hotttnesss',
'artist_latitude',
'artist_longitude',
'duration',
'end_of_fade_in',
'key',
'key_confidence',
'loudness',
'mode',
'mode_confidence',
'song_hotttnesss',
'start_of_fade_out',
'tempo',
'time_signature',
'time_signature_confidence'
)
def seed_database(user_count = 3):
print 'Seeding users'
user = seed_user()
print 'Seeding tracks'
seed_tracks()
print 'Seeding votes'
seed_votes(user)
for i in xrange(user_count - 1):
user = seed_user('demo' + str(i))
seed_votes(user)
print 'Generating recommendations'
generate_recommendations()
def seed_user(username='demo'):
user = User(username=username, password='demo')
db.session.add(user)
db.session.commit()
return user
def seed_tracks():
with open(TRACK_DATA_PATH, 'rb') as csv_file:
reader = csv.DictReader(csv_file, delimiter=';')
tracks = []
meta_data = []
for data in reader:
attrs = normalize_attrs(data)
track_attrs = slice_attrs(attrs, TRACK_ATTRIBUTES)
meta_attrs = slice_attrs(attrs, META_ATTRIBUTES)
meta_attrs['artist_hotness'] = meta_attrs.pop('artist_hotttnesss')
meta_attrs['song_hotness'] = meta_attrs.pop('song_hotttnesss')
tracks.append(Track(**track_attrs))
meta_data.append(MetaData(**meta_attrs))
db.session.add_all(tracks)
db.session.commit()
for i, track in enumerate(tracks):
meta_data[i].track_id = track.id
db.session.add_all(meta_data)
db.session.commit()
def seed_votes(user):
positive = db.session.query(MetaData.track_id, MetaData.year).filter(MetaData.year <= 1990).all()
negative = db.session.query(MetaData.track_id, MetaData.year).filter(MetaData.year > 1990).all()
votes = []
for data in positive:
if random.random() < 0.2:
continue
track_id, _ = data
votes.append(Vote(user_id=user.id,
track_id=track_id,
vote_flag=True))
for data in negative:
if random.random() < 0.2:
continue
track_id, _ = data
votes.append(Vote(user_id=user.id,
track_id=track_id,
vote_flag=False))
db.session.add_all(votes)
db.session.commit()
def normalize_attrs(data):
return {k: v.decode('utf-8') for k, v in data.items()}
def slice_attrs(attrs, to_slice):
return dict([(attr, attrs[attr]) for attr in to_slice])
|
986,859 | 17e810b0b58a6d7b2d004630e4ff5d3d7ff6d63f | from FrontEnd.Elements.Element import Element
import pygame
class SingleClickButton(Element):
# 0 == idle
# 1 == hover
# 2 == select
image = pygame.Surface((100, 100))
image_hover = pygame.Surface((100, 100))
image_select = pygame.Surface((100, 100))
image.fill((255, 255, 255))
image_hover.fill((245, 245, 245))
image_select.fill((235, 235, 235))
def __init__(self, process, location, size, icon_size, url, func: str):
Element.__init__(self, process)
self.size = size
self.icon_size = icon_size
self.icon = pygame.transform.smoothscale(pygame.image.load(url), icon_size)
self.image = pygame.transform.smoothscale(SingleClickButton.image, size)
self.image_hover = pygame.transform.smoothscale(SingleClickButton.image_hover, size)
self.image_select = pygame.transform.smoothscale(SingleClickButton.image_select, size)
if func == 'apply':
self.icon_notice = pygame.transform.smoothscale(
pygame.image.load('./resources/UserWindowUI/mail_notice.png'), icon_size)
self.location = location
self.state = 0
self.func = func
self.pressed = False
self.notice = False
def pos_in(self, pos):
x = pos[0]
y = pos[1]
if self.location[0] < x < self.location[0] + self.size[0] \
and self.location[1] < y < self.location[1] + self.size[1]:
return True
return False
def getEvent(self, event):
if event.type == pygame.MOUSEMOTION:
if self.pos_in(event.pos):
if self.state != 2:
self.state = 1
else:
self.state = 0
return
if event.type in [pygame.MOUSEBUTTONDOWN, pygame.MOUSEBUTTONUP] and self.pos_in(event.pos):
if event.type == pygame.MOUSEBUTTONDOWN and event.button == pygame.BUTTON_LEFT:
self.state = 2
if event.type == pygame.MOUSEBUTTONUP and event.button == pygame.BUTTON_LEFT:
self.state = 0
if self.pos_in(event.pos):
if self.func == 'add':
self.process.createSearchWindow()
elif self.func == 'create':
self.pressed = True
elif self.func == 'apply':
self.notice = False
self.process.createFriendApplyWindow()
def display(self):
if self.state == 0:
self.surface = self.image
elif self.state == 1:
self.surface = self.image_hover
else:
self.surface = self.image_select
surface = self.surface.copy()
surface.blit(self.icon, ((self.size[0] - self.icon_size[0]) // 2, (self.size[1] - self.icon_size[1]) // 2))
if self.func == 'apply' and self.notice:
surface.blit(self.icon_notice, ((self.size[0] - self.icon_size[0]) // 2, (self.size[1] - self.icon_size[1]) // 2))
return surface
def update(self):
pass
|
986,860 | bdb2484d051ea35169196591a6e874193c16cbea | from itertools import combinations
from collections import defaultdict
def increasing(lst):
return all(a < b for a, b in zip(lst, lst[1:]))
def search():
for comb in combinations(lines, N):
trans = tuple(zip(*comb))
if any(not increasing(l) for l in trans):
continue
rem = [l for l in lines if l not in trans and l not in comb]
if any(rem):
continue
cand = defaultdict(int)
for c in (trans+comb):
cand[c] += 1
for c in lines:
cand[c] -= 1
for c, i in cand.items():
if i == 1:
return c
for case in range(int(input())):
N = int(input())
lines = [tuple(map(int, input().split())) for _ in range(2*N-1)]
lines.sort()
print('Case #%d:' % (case+1), ' '.join(map(str, search())))
|
986,861 | a9260a4ada64f381f3f414faa7e4c6f32d0a25d6 | """
setup.py for python-orderby, following PyPA style
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='orderby',
version='0.0.2',
description='Python key functions for multi-field ordering',
long_description=long_description,
url='https://github.com/jvtm/python-orderby',
author='Jyrki Muukkonen',
author_email='jvtm@kruu.org',
license='Apache 2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='sort order orderby development',
packages=find_packages(),
)
|
986,862 | 0d49ff669d1230779a965b0529e49cee6eb780f8 | class Node:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
class BinarySearchTree:
def __init__(self):
self.root = None
def insert(self, value):
new_node = Node(value)
if self.root is None:
self.root = new_node
return True
temp = self.root
while (True):
if new_node.value == temp.value:
return False
if new_node.value < temp.value:
if temp.left is None:
temp.left = new_node
return True
temp = temp.left
else:
if temp.right is None:
temp.right = new_node
return True
temp = temp.right
my_tree = BinarySearchTree()
my_tree.insert(2)
my_tree.insert(1)
my_tree.insert(3)
"""
THE LINES ABOVE CREATE THIS TREE:
2
/ \
1 3
"""
print('Root:', my_tree.root.value)
print('Root->Left:', my_tree.root.left.value)
print('Root->Right:', my_tree.root.right.value)
"""
EXPECTED OUTPUT:
----------------
Root: 2
Root->Left: 1
Root->Right: 3
""" |
986,863 | e021ad07d87abab37b84b23a7892843b90bc9e93 | from PID.PID2d import PID2d
import camera_module
import detection_mod
import util
import threading
import robot_prop
class detection_thread(threading.Thread):
def __init__(self):
# initialize all instances
self.pid = PID2d(2.28,0.000,-0.0000228)
self.camera_thread = camera_module.camera_thread()
self.v = [0.,0.]
self.shoot = 0
threading.Thread.__init__(self)
def run(self):
print('Running detection module')
self.camera_thread.start()
while True:
img = self.camera_thread.read()
coord = detection_mod.get_coord_from_detection(img)
v1,v2,shoot = util.get_v_and_shoot(coord,self.pid)
self.v = [v1,v2]
self.shoot = shoot
def get_attr(self):
return self.v,self.shoot
class detection_non_thread():
def __init__(self):
self.pid = PID2d(2.28,0.000,-0.0000228)
self.camera_thread = camera_module.camera_thread()
self.camera_thread.start()
def get_detection(self):
img = self.camera_thread.read()
coord = detection_mod.get_coord_from_detection(img)
v1,v2,shoot = util.get_v_and_shoot(coord,self.pid,img)
v = [v1,v2]
shoot = shoot
robot_prop.v1 = v1
robot_prop.v2 = v2
robot_prop.shoot = shoot
return v,shoot
|
986,864 | 5b4519aa52c87978111f3ca863cf96ff602d41e4 | print('hello neha')
|
986,865 | cf3ad3dcc567b9c3b36e038b618d6a73054a7c11 | #To import required modules:
import numpy as np
import time
import matplotlib
import matplotlib.cm as cm #for color maps
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec #for specifying plot attributes
from matplotlib import ticker #for setting contour plots to log scale
import scipy.integrate #for numerical integration
import scipy.misc #for factorial function
#matplotlib.rc('text', usetex=True)
##### This module will be used to plot multiple simulated underlying populations generated from ExoplanetsSysSim
#To define some useful constants and functions:
N_Kep = 150061 #number of Kepler targets satisfying our cuts to give our observed catalog
AU = 1.496*10.**13. #AU in cm
Msun = 1.989*10.**30. #Solar mass in kg
Rsun = 6.957*10.**10. #Solar radius in cm
Mearth = 5.972*10.**24 #Earth mass in kg
Rearth = 6.371*10.**8. #Earth radius in cm
def a_from_P(P, Mstar):
#This function converts period (days) to semi-major axis (AU) assuming mass of planet m << Mstar (Msun)
y = (P/365.25)**(2./3.)*(Mstar/1.0)**(1./3.)
return y
def P_from_a(a, Mstar):
#This function converts semi-major axis (AU) to period (days) assuming mass of planet m << Mstar (Msun)
y = 365.25*(a**(3./2.))*(Mstar/1.0)**(-1./2.)
return y
savefigures = False
#loadfiles_directory = 'ExoplanetsSysSim.jl-master/examples/clusters/ACI/Model_Optimization/Non_clustered/Some9_params1_random_weighted_targs150060_maxincl80/'
loadfiles_directory = 'Clustering_Method_Figures/ExoplanetsSysSim/Non_clustered/Sim6/'
##### To load the underlying populations for the non-clustered model:
#To create the lists that will contain all the arrays for each population:
N_sim_non_clustered = []
stellar_mass_all_non_clustered = []
stellar_radii_all_non_clustered = []
P_all_non_clustered = []
e_all_non_clustered = []
radii_all_non_clustered = []
mass_all_non_clustered = []
Mtot_all_non_clustered = []
Rm_all_non_clustered = []
radii_ratio_all_non_clustered = []
N_mH_all_non_clustered = []
P_all_flat_non_clustered = []
Rm_all_flat_non_clustered = []
N_mH_all_flat_non_clustered = []
e_all_flat_non_clustered = []
radii_all_flat_non_clustered = []
radii_ratio_all_flat_non_clustered = []
mass_all_flat_non_clustered = []
#To load the underlying populations:
for run_number in ['']: #range(1,11)
#To first read the number of simulated targets and bounds for the periods and radii:
with open(loadfiles_directory + 'periods_all%s.out' % run_number, 'r') as file:
for line in file:
if line[:26] == '# num_targets_sim_pass_one':
N_sim = int(line[28:])
elif line[:12] == '# min_period':
P_min = float(line[14:])
elif line[:12] == '# max_period':
P_max = float(line[14:])
elif line[:12] == '# min_radius':
radii_min = float(line[24:])
elif line[:12] == '# max_radius':
radii_max = float(line[24:])
P_per_sys = [] #list to be filled with lists of all periods per system (days)
N_sys_with_planets = 0 #counter for the number of simulated systems with planets
with open(loadfiles_directory + 'periods_all%s.out' % run_number, 'r') as file:
for line in file:
if line[0] != '#':
N_sys_with_planets += 1
line = line[1:-2].split(', ')
P_sys = [float(i) for i in line]
P_per_sys.append(P_sys)
#print P_sys
e_per_sys = [] #list to be filled with lists of all eccentricities per system
with open(loadfiles_directory + 'eccentricities_all%s.out' % run_number, 'r') as file:
for line in file:
if line[0] != '#':
line = line[1:-2].split(', ')
e_sys = [float(i) for i in line]
e_per_sys.append(e_sys)
#print e_sys
radii_per_sys = [] #list to be filled with lists of all planet radii per system (solar radii)
with open(loadfiles_directory + 'radii_all%s.out' % run_number, 'r') as file:
for line in file:
if line[0] != '#':
line = line[1:-2].split(', ')
radii_sys = [float(i) for i in line]
radii_per_sys.append(radii_sys)
#print radii_sys
mass_per_sys = [] #list to be filled with lists of all planet radii per system (solar masses)
with open(loadfiles_directory + 'masses_all%s.out' % run_number, 'r') as file:
for line in file:
if line[0] != '#':
line = line[1:-2].split(', ')
mass_sys = [float(i) for i in line]
mass_per_sys.append(mass_sys)
#print mass_sys
stellar_mass_all = np.loadtxt(loadfiles_directory + 'stellar_masses_with_planets%s.out' % run_number) #array of stellar masses of all the systems with a planetary system, in solar masses
stellar_radii_all = np.loadtxt(loadfiles_directory + 'stellar_radii_with_planets%s.out' % run_number) #array of stellar radii of all the systems with a planetary system, in solar radii
N_sim_non_clustered.append(N_sim)
stellar_mass_all_non_clustered.append(stellar_mass_all)
stellar_radii_all_non_clustered.append(stellar_radii_all)
P_all = [] #list to be zero-padded so each list of periods is sorted and has the same length, and then converted to an array
e_all = [] #list to be zero-padded so each list of eccentricities is sorted (by period) and has the same length, and then converted to an array
radii_all = [] #list to be zero-padded so each list of radii is sorted (by period) and has the same length, and then converted to an array
mass_all = [] #list to be zero-padded so each list of masses is sorted (by period) and has the same length, and then converted to an array
Pmin = 0. #set a minimum period (days), discarding planets less than this period
Mmax = max(len(x) for x in P_per_sys) #maximum planet multiplicity generated by the clustering method
for i in range(len(P_per_sys)):
i_sorted = np.argsort(P_per_sys[i]) #array of indices which would sort the system by period
P_sorted = np.array(P_per_sys[i])[i_sorted]
P_sorted_cut = P_sorted[P_sorted > Pmin]
e_sorted_cut = np.array(e_per_sys[i])[i_sorted][P_sorted > Pmin]
radii_sorted_cut = np.array(radii_per_sys[i])[i_sorted][P_sorted > Pmin]
mass_sorted_cut = np.array(mass_per_sys[i])[i_sorted][P_sorted > Pmin]
P_sys = list(P_sorted_cut) + [0]*(Mmax - len(P_sorted_cut)) #zero-pad the list up to Mmax elements
e_sys = list(e_sorted_cut) + [0]*(Mmax - len(e_sorted_cut)) #zero-pad the list up to Mmax elements
radii_sys = list(radii_sorted_cut) + [0]*(Mmax - len(radii_sorted_cut)) #zero-pad the list up to Mmax elements
mass_sys = list(mass_sorted_cut) + [0]*(Mmax - len(mass_sorted_cut)) #zero-pad the list up to Mmax elements
P_all.append(P_sys)
e_all.append(e_sys)
radii_all.append(radii_sys)
mass_all.append(mass_sys)
P_all = np.array(P_all)
e_all = np.array(e_all)
radii_all = np.array(radii_all)
mass_all = np.array(mass_all)
#To convert the radii and masses to Earth units:
radii_all = radii_all*(Rsun/Rearth) #radii in Earth radii
mass_all = mass_all*(Msun/Mearth) #masses in Earth masses
Mtot_all = np.sum(P_all > 0, axis=1) #array of all planet multiplicites
P_all_non_clustered.append(P_all)
e_all_non_clustered.append(e_all)
radii_all_non_clustered.append(radii_all)
mass_all_non_clustered.append(mass_all)
Mtot_all_non_clustered.append(Mtot_all)
#To calculate the underlying period ratios, radii ratios, and separations in mutual Hill radii:
Rm_all = [] #list to be filled with all the period ratios
radii_ratio_all = [] #list to be filled with all the radii ratios
N_mH_all = [] #list to be filled with all the separations between adjacent planet pairs in units of mutual Hill radii
for i in range(len(P_all)):
Mstar_system = stellar_mass_all[i] #mass of the star for this system, in solar masses
P_all_system = P_all[i][P_all[i] > 0]
e_all_system = e_all[i][P_all[i] > 0]
radii_all_system = radii_all[i][P_all[i] > 0]
mass_all_system = mass_all[i][P_all[i] > 0]
#To calculate all the period ratios:
Rm_all_system = list(P_all_system[1:]/P_all_system[0:-1]) #list of period ratios in this system
Rm_all_system = np.array(Rm_all_system + [0]*(Mmax - 1 - len(Rm_all_system))) #to add filler 0's to Rm_all_system to pad it to Mmax - 1 elements
Rm_all.append(Rm_all_system)
#To calculate all the radii ratios:
radii_ratio_all_system = list(radii_all_system[1:]/radii_all_system[0:-1]) #list of radii ratios in this system
radii_ratio_all_system = np.array(radii_ratio_all_system + [0]*(Mmax - 1 - len(radii_ratio_all_system))) #to add filler 0's to radii_ratio_all_system to pad it to Mmax - 1 elements
radii_ratio_all.append(radii_ratio_all_system)
#To calculate all the separations in mutual Hill radii between adjacent planet pairs:
a_all_system = a_from_P(P_all_system, Mstar_system)
R_mH_all_system = ((a_all_system[0:-1] + a_all_system[1:])/2.)*(Mearth*(mass_all_system[0:-1] + mass_all_system[1:])/(3.*Mstar_system*Msun))**(1./3.) #mutual Hill radii between adjacent planet pairs in this system, in AU
R_sep_all_system = a_all_system[1:] - a_all_system[0:-1] #separations between adjacent planet pairs in this system, in AU, ignoring eccentricities
N_mH_all_system = list(R_sep_all_system/R_mH_all_system) #separations between adjacent planet pairs in this system, in mutual Hill radii
N_mH_all_system = np.array(N_mH_all_system + [0]*(Mmax - 1 - len(N_mH_all_system))) #to add filler 0's to N_mH_all_system to pad it to Mmax - 1 elements
N_mH_all.append(N_mH_all_system)
Rm_all = np.array(Rm_all)
radii_ratio_all = np.array(radii_ratio_all)
N_mH_all = np.array(N_mH_all)
P_all_flat = P_all.flatten() #all the periods of all the planets
P_all_flat = P_all_flat[P_all_flat > 0]
Rm_all_flat = Rm_all.flatten() #all the period ratios of all the adjacent planets
Rm_all_flat = Rm_all_flat[Rm_all_flat > 0]
N_mH_all_flat = N_mH_all.flatten() #all the separations of all the adjacent planets in units of their mutual Hill radii
N_mH_all_flat = N_mH_all_flat[N_mH_all_flat > 0]
e_all_flat = e_all.flatten() #all the eccentricities of all the planets
e_all_flat = e_all_flat[e_all_flat > 0]
radii_all_flat = radii_all.flatten() #all the planet radii, in Earth radii
radii_all_flat = radii_all_flat[radii_all_flat > 0]
radii_ratio_all_flat = radii_ratio_all.flatten() #all the radii ratios of all the adjacent planets
radii_ratio_all_flat = radii_ratio_all_flat[radii_ratio_all_flat > 0]
mass_all_flat = mass_all.flatten() #all the planet masses, in Earth masses
mass_all_flat = mass_all_flat[mass_all_flat > 0]
Rm_all_non_clustered.append(Rm_all)
radii_ratio_all_non_clustered.append(radii_ratio_all)
N_mH_all_non_clustered.append(N_mH_all)
P_all_flat_non_clustered.append(P_all_flat)
Rm_all_flat_non_clustered.append(Rm_all_flat)
N_mH_all_flat_non_clustered.append(N_mH_all_flat)
e_all_flat_non_clustered.append(e_all_flat)
radii_all_flat_non_clustered.append(radii_all_flat)
radii_ratio_all_flat_non_clustered.append(radii_ratio_all_flat)
mass_all_flat_non_clustered.append(mass_all_flat)
##### To load the underlying populations for the clustered model:
#loadfiles_directory2 = 'ExoplanetsSysSim.jl-master/examples/clusters/ACI/Model_Optimization/Clustered_P_R/All_params_random_weighted_targs150060_maxincl80/'
loadfiles_directory2 = 'Clustering_Method_Figures/ExoplanetsSysSim/Power_law_r1_r2_sigma_r/Sim2/'
#To create the lists that will contain all the arrays for each population:
N_sim_clustered = []
stellar_mass_all_clustered = []
stellar_radii_all_clustered = []
P_all_clustered = []
e_all_clustered = []
radii_all_clustered = []
mass_all_clustered = []
Mtot_all_clustered = []
Rm_all_clustered = []
radii_ratio_all_clustered = []
N_mH_all_clustered = []
P_all_flat_clustered = []
Rm_all_flat_clustered = []
N_mH_all_flat_clustered = []
e_all_flat_clustered = []
radii_all_flat_clustered = []
radii_ratio_all_flat_clustered = []
mass_all_flat_clustered = []
#To load the underlying populations:
for run_number2 in ['']: #range(1,11)
#To first read the number of simulated targets and bounds for the periods and radii:
with open(loadfiles_directory2 + 'periods_all%s.out' % run_number2, 'r') as file:
for line in file:
if line[:26] == '# num_targets_sim_pass_one':
N_sim2 = int(line[28:])
##### Assuming the bounds on the periods and radii are the same as the previously loaded model!
P_per_sys2 = [] #list to be filled with lists of all periods per system (days)
N_sys_with_planets2 = 0 #counter for the number of simulated systems with planets
with open(loadfiles_directory2 + 'periods_all%s.out' % run_number2, 'r') as file:
for line in file:
if line[0] != '#':
N_sys_with_planets2 += 1
line = line[1:-2].split(', ')
P_sys = [float(i) for i in line]
P_per_sys2.append(P_sys)
e_per_sys2 = [] #list to be filled with lists of all eccentricities per system
with open(loadfiles_directory2 + 'eccentricities_all%s.out' % run_number2, 'r') as file:
for line in file:
if line[0] != '#':
line = line[1:-2].split(', ')
e_sys = [float(i) for i in line]
e_per_sys2.append(e_sys)
radii_per_sys2 = [] #list to be filled with lists of all planet radii per system (solar radii)
with open(loadfiles_directory2 + 'radii_all%s.out' % run_number2, 'r') as file:
for line in file:
if line[0] != '#':
line = line[1:-2].split(', ')
radii_sys = [float(i) for i in line]
radii_per_sys2.append(radii_sys)
mass_per_sys2 = [] #list to be filled with lists of all planet radii per system (solar masses)
with open(loadfiles_directory2 + 'masses_all%s.out' % run_number2, 'r') as file:
for line in file:
if line[0] != '#':
line = line[1:-2].split(', ')
mass_sys = [float(i) for i in line]
mass_per_sys2.append(mass_sys)
stellar_mass_all2 = np.loadtxt(loadfiles_directory2 + 'stellar_masses_with_planets%s.out' % run_number2) #array of stellar masses of all the systems with a planetary system, in solar masses
stellar_radii_all2 = np.loadtxt(loadfiles_directory2 + 'stellar_radii_with_planets%s.out' % run_number2) #array of stellar radii of all the systems with a planetary system, in solar radii
N_sim_clustered.append(N_sim2)
stellar_mass_all_clustered.append(stellar_mass_all2)
stellar_radii_all_clustered.append(stellar_radii_all2)
P_all2 = [] #list to be zero-padded so each list of periods is sorted and has the same length, and then converted to an array
e_all2 = [] #list to be zero-padded so each list of eccentricities is sorted (by period) and has the same length, and then converted to an array
radii_all2 = [] #list to be zero-padded so each list of radii is sorted (by period) and has the same length, and then converted to an array
mass_all2 = [] #list to be zero-padded so each list of masses is sorted (by period) and has the same length, and then converted to an array
Pmin2 = 0. #set a minimum period (days), discarding planets less than this period
Mmax2 = max(len(x) for x in P_per_sys2) #maximum planet multiplicity generated by the clustering method
for i in range(len(P_per_sys2)):
i_sorted = np.argsort(P_per_sys2[i]) #array of indices which would sort the system by period
P_sorted = np.array(P_per_sys2[i])[i_sorted]
P_sorted_cut = P_sorted[P_sorted > Pmin]
e_sorted_cut = np.array(e_per_sys2[i])[i_sorted][P_sorted > Pmin]
radii_sorted_cut = np.array(radii_per_sys2[i])[i_sorted][P_sorted > Pmin]
mass_sorted_cut = np.array(mass_per_sys2[i])[i_sorted][P_sorted > Pmin]
P_sys = list(P_sorted_cut) + [0]*(Mmax2 - len(P_sorted_cut)) #zero-pad the list up to Mmax elements
e_sys = list(e_sorted_cut) + [0]*(Mmax2 - len(e_sorted_cut)) #zero-pad the list up to Mmax elements
radii_sys = list(radii_sorted_cut) + [0]*(Mmax2 - len(radii_sorted_cut)) #zero-pad the list up to Mmax elements
mass_sys = list(mass_sorted_cut) + [0]*(Mmax2 - len(mass_sorted_cut)) #zero-pad the list up to Mmax elements
P_all2.append(P_sys)
e_all2.append(e_sys)
radii_all2.append(radii_sys)
mass_all2.append(mass_sys)
P_all2 = np.array(P_all2)
e_all2 = np.array(e_all2)
radii_all2 = np.array(radii_all2)
mass_all2 = np.array(mass_all2)
#To convert the radii and masses to Earth units:
radii_all2 = radii_all2*(Rsun/Rearth) #radii in Earth radii
mass_all2 = mass_all2*(Msun/Mearth) #masses in Earth masses
Mtot_all2 = np.sum(P_all2 > 0, axis=1) #array of all planet multiplicites
P_all_clustered.append(P_all2)
e_all_clustered.append(e_all2)
radii_all_clustered.append(radii_all2)
mass_all_clustered.append(mass_all2)
Mtot_all_clustered.append(Mtot_all2)
#To calculate the underlying period ratios, radii ratios, and separations in mutual Hill radii:
Rm_all2 = [] #list to be filled with all the period ratios
radii_ratio_all2 = [] #list to be filled with all the radii ratios
N_mH_all2 = [] #list to be filled with all the separations between adjacent planet pairs in units of mutual Hill radii
for i in range(len(P_all2)):
Mstar_system = stellar_mass_all2[i] #mass of the star for this system, in solar masses
P_all_system = P_all2[i][P_all2[i] > 0]
e_all_system = e_all2[i][P_all2[i] > 0]
radii_all_system = radii_all2[i][P_all2[i] > 0]
mass_all_system = mass_all2[i][P_all2[i] > 0]
#To calculate all the period ratios:
Rm_all_system = list(P_all_system[1:]/P_all_system[0:-1]) #list of period ratios in this system
Rm_all_system = np.array(Rm_all_system + [0]*(Mmax2 - 1 - len(Rm_all_system))) #to add filler 0's to Rm_all_system to pad it to Mmax - 1 elements
Rm_all2.append(Rm_all_system)
#To calculate all the radii ratios:
radii_ratio_all_system = list(radii_all_system[1:]/radii_all_system[0:-1]) #list of radii ratios in this system
radii_ratio_all_system = np.array(radii_ratio_all_system + [0]*(Mmax2 - 1 - len(radii_ratio_all_system))) #to add filler 0's to radii_ratio_all_system to pad it to Mmax - 1 elements
radii_ratio_all2.append(radii_ratio_all_system)
#To calculate all the separations in mutual Hill radii between adjacent planet pairs:
a_all_system = a_from_P(P_all_system, Mstar_system)
R_mH_all_system = ((a_all_system[0:-1] + a_all_system[1:])/2.)*(Mearth*(mass_all_system[0:-1] + mass_all_system[1:])/(3.*Mstar_system*Msun))**(1./3.) #mutual Hill radii between adjacent planet pairs in this system, in AU
R_sep_all_system = a_all_system[1:] - a_all_system[0:-1] #separations between adjacent planet pairs in this system, in AU, ignoring eccentricities
N_mH_all_system = list(R_sep_all_system/R_mH_all_system) #separations between adjacent planet pairs in this system, in mutual Hill radii
N_mH_all_system = np.array(N_mH_all_system + [0]*(Mmax2 - 1 - len(N_mH_all_system))) #to add filler 0's to N_mH_all_system to pad it to Mmax - 1 elements
N_mH_all2.append(N_mH_all_system)
Rm_all2 = np.array(Rm_all2)
radii_ratio_all2 = np.array(radii_ratio_all2)
N_mH_all2 = np.array(N_mH_all2)
P_all_flat2 = P_all2.flatten() #all the periods of all the planets
P_all_flat2 = P_all_flat2[P_all_flat2 > 0]
Rm_all_flat2 = Rm_all2.flatten() #all the period ratios of all the adjacent planets
Rm_all_flat2 = Rm_all_flat2[Rm_all_flat2 > 0]
N_mH_all_flat2 = N_mH_all2.flatten() #all the separations of all the adjacent planets in units of their mutual Hill radii
N_mH_all_flat2 = N_mH_all_flat2[N_mH_all_flat2 > 0]
e_all_flat2 = e_all2.flatten() #all the eccentricities of all the planets
e_all_flat2 = e_all_flat2[e_all_flat2 > 0]
radii_all_flat2 = radii_all2.flatten() #all the planet radii, in Earth radii
radii_all_flat2 = radii_all_flat2[radii_all_flat2 > 0]
radii_ratio_all_flat2 = radii_ratio_all2.flatten() #all the radii ratios of all the adjacent planets
radii_ratio_all_flat2 = radii_ratio_all_flat2[radii_ratio_all_flat2 > 0]
mass_all_flat2 = mass_all2.flatten() #all the planet masses, in Earth masses
mass_all_flat2 = mass_all_flat2[mass_all_flat2 > 0]
Rm_all_clustered.append(Rm_all2)
radii_ratio_all_clustered.append(radii_ratio_all2)
N_mH_all_clustered.append(N_mH_all2)
P_all_flat_clustered.append(P_all_flat2)
Rm_all_flat_clustered.append(Rm_all_flat2)
N_mH_all_flat_clustered.append(N_mH_all_flat2)
e_all_flat_clustered.append(e_all_flat2)
radii_all_flat_clustered.append(radii_all_flat2)
radii_ratio_all_flat_clustered.append(radii_ratio_all_flat2)
mass_all_flat_clustered.append(mass_all_flat2)
#'''
#To plot the underlying distributions of the two models as individual panels:
model_name = 'ExoplanetsSysSim_Models_compare'
savefigures_directory_compare = 'Clustering_Method_Figures/ExoplanetsSysSim/Underlying_Compare/'
subdirectory = 'Talk_Figures/'
#Make sure these labels match the models being loaded!
model1_label = 'Non clustered'
model2_label = 'Clustered'
#To plot the multiplicities:
fig = plt.figure(figsize=(8,4))
plot = GridSpec(1,1,left=0.15,bottom=0.2,right=0.95,top=0.925,wspace=0.1,hspace=0.1)
ax = plt.subplot(plot[0,0])
max_M = np.max(([np.max(x) for x in Mtot_all_non_clustered], [np.max(x) for x in Mtot_all_clustered]))
for i in range(len(N_sim_non_clustered)):
counts, bins = np.histogram(Mtot_all_non_clustered[i], bins=max_M+1, range=(-0.5, max_M+0.5))
counts[0] = N_sim_non_clustered[i] - len(Mtot_all_non_clustered[i]) #to compute the number of systems with no planets
bins_mid = (bins[:-1] + bins[1:])/2.
plt.plot(bins_mid, counts/float(N_sim_non_clustered[i]), 'o-', color='r', label= model1_label)
for i in range(len(N_sim_clustered)):
counts, bins = np.histogram(Mtot_all_clustered[i], bins=max_M+1, range=(-0.5, max_M+0.5))
counts[0] = N_sim_clustered[i] - len(Mtot_all_clustered[i]) #to compute the number of systems with no planets
plt.plot(bins_mid, counts/float(N_sim_clustered[i]), 'o-', color='b', label= model2_label)
ax.tick_params(axis='both', labelsize=20)
plt.xlim([0, max_M])
plt.xlabel(r'Number of planets', fontsize=20)
plt.ylabel('Fraction', fontsize=20)
#plt.legend(loc='upper right', bbox_to_anchor=(0.99,0.99), ncol=1, frameon=False, fontsize=16) #show the legend
if savefigures == True:
plt.savefig(savefigures_directory_compare + subdirectory + model_name + '_underlying_multiplicities_compare_multiple.pdf')
plt.close()
#To plot the periods:
fig = plt.figure(figsize=(8,4))
plot = GridSpec(1,1,left=0.15,bottom=0.2,right=0.95,top=0.925,wspace=0.1,hspace=0.1)
ax = plt.subplot(plot[0,0])
xmin, xmax = P_min, P_max
for i in range(len(N_sim_non_clustered)):
x1 = P_all_flat_non_clustered[i]
hist1 = plt.hist(x1, bins=np.logspace(np.log10(xmin), np.log10(xmax), 101), histtype='step', weights=np.ones(len(x1))/len(x1), log=True, color='r', label=model1_label)
for i in range(len(N_sim_clustered)):
x2 = P_all_flat_clustered[i]
hist2 = plt.hist(x2, bins=np.logspace(np.log10(xmin), np.log10(xmax), 101), histtype='step', weights=np.ones(len(x2))/len(x2), log=True, color='b', label=model2_label)
plt.gca().set_xscale("log")
ax.tick_params(axis='both', labelsize=20)
plt.xlim([P_min, 1.1*P_max])
#plt.ylim([np.min((np.min(hist1[0][hist1[0] > 0]), np.min(hist2[0][hist2[0] > 0]))), np.max((np.max(hist1[0][hist1[0] > 0]), np.max(hist2[0][hist2[0] > 0])))])
plt.xlabel(r'$P$ (days)', fontsize=20)
plt.ylabel('Fraction', fontsize=20)
#plt.legend(loc='upper right', bbox_to_anchor=(0.99,0.99), ncol=1, frameon=False, fontsize=16) #show the legend
if savefigures == True:
plt.savefig(savefigures_directory_compare + subdirectory + model_name + '_underlying_periods_compare_multiple.pdf')
plt.close()
#To plot the period ratios:
fig = plt.figure(figsize=(8,4))
plot = GridSpec(1,1,left=0.15,bottom=0.2,right=0.95,top=0.925,wspace=0.1,hspace=0.1)
ax = plt.subplot(plot[0,0])
xmin, xmax = 1., np.max(([np.max(x) for x in Rm_all_flat_non_clustered], [np.max(x) for x in Rm_all_flat_clustered]))
for i in range(len(N_sim_non_clustered)):
x1 = Rm_all_flat_non_clustered[i]
hist1 = plt.hist(x1, bins=np.logspace(np.log10(xmin), np.log10(xmax), 101), histtype='step', weights=np.ones(len(x1))/len(x1), log=True, color='r', label=model1_label)
for i in range(len(N_sim_clustered)):
x2 = Rm_all_flat_clustered[i]
hist2 = plt.hist(x2, bins=np.logspace(np.log10(xmin), np.log10(xmax), 101), histtype='step', weights=np.ones(len(x2))/len(x2), log=True, color='b', label=model2_label)
plt.gca().set_xscale("log")
ax.tick_params(axis='both', labelsize=20)
#plt.xlim([1,5])
#plt.ylim([np.min((np.min(hist1[0][hist1[0] > 0]), np.min(hist2[0][hist2[0] > 0]))), np.max((np.max(hist1[0][hist1[0] > 0]), np.max(hist2[0][hist2[0] > 0])))])
plt.xlabel(r'$P_{i+1}/P_i$', fontsize=20)
plt.ylabel('Fraction', fontsize=20)
#plt.legend(loc='upper right', bbox_to_anchor=(0.99,0.99), ncol=1, frameon=False, fontsize=16) #show the legend
if savefigures == True:
plt.savefig(savefigures_directory_compare + subdirectory + model_name + '_underlying_periodratios_compare_multiple.pdf')
plt.close()
#To plot the eccentricities:
fig = plt.figure(figsize=(8,4))
plot = GridSpec(1,1,left=0.15,bottom=0.2,right=0.95,top=0.925,wspace=0.1,hspace=0.1)
ax = plt.subplot(plot[0,0])
xmin, xmax = 0., np.max(([np.max(x) for x in e_all_flat_non_clustered], [np.max(x) for x in e_all_flat_clustered]))
for i in range(len(N_sim_non_clustered)):
x1 = e_all_flat_non_clustered[i]
hist1 = plt.hist(x1, bins=np.linspace(xmin,xmax,101), histtype='step', weights=np.ones(len(x1))/len(x1), color='r', label=model1_label)
for i in range(len(N_sim_clustered)):
x2 = e_all_flat_clustered[i]
hist2 = plt.hist(x2, bins=np.linspace(xmin,xmax,101), histtype='step', weights=np.ones(len(x2))/len(x2), color='b', label=model2_label)
ax.tick_params(axis='both', labelsize=20)
#plt.ylim([np.min((np.min(hist1[0][hist1[0] > 0]), np.min(hist2[0][hist2[0] > 0]))), np.max((np.max(hist1[0][hist1[0] > 0]), np.max(hist2[0][hist2[0] > 0])))])
plt.xlabel(r'$e$', fontsize=20)
plt.ylabel('Fraction', fontsize=20)
#plt.legend(loc='upper right', bbox_to_anchor=(0.99,0.99), ncol=1, frameon=False, fontsize=16) #show the legend
if savefigures == True:
plt.savefig(savefigures_directory_compare + subdirectory + model_name + '_underlying_eccentricities_compare_multiple.pdf')
plt.close()
#To plot the masses:
fig = plt.figure(figsize=(8,4))
plot = GridSpec(1,1,left=0.15,bottom=0.2,right=0.95,top=0.925,wspace=0.1,hspace=0.1)
ax = plt.subplot(plot[0,0])
xmin, xmax = np.min(([np.min(x) for x in mass_all_flat_non_clustered], [np.min(x) for x in mass_all_flat_clustered])), np.max(([np.max(x) for x in mass_all_flat_non_clustered], [np.max(x) for x in mass_all_flat_clustered]))
for i in range(len(N_sim_non_clustered)):
x1 = mass_all_flat_non_clustered[i]
hist1 = plt.hist(x1, bins=np.logspace(np.log10(xmin), np.log10(xmax), 101), histtype='step', weights=np.ones(len(x1))/len(x1), log=True, color='r', label=model1_label)
for i in range(len(N_sim_clustered)):
x2 = mass_all_flat_clustered[i]
hist2 = plt.hist(x2, bins=np.logspace(np.log10(xmin), np.log10(xmax), 101), histtype='step', weights=np.ones(len(x2))/len(x2), log=True, color='b', label=model2_label)
plt.gca().set_xscale("log")
ax.tick_params(axis='both', labelsize=20)
#plt.xlim([np.min(mass_all_flat), 1.1*np.max(mass_all_flat)])
#plt.ylim([np.min((np.min(hist1[0][hist1[0] > 0]), np.min(hist2[0][hist2[0] > 0]))), np.max((np.max(hist1[0][hist1[0] > 0]), np.max(hist2[0][hist2[0] > 0])))])
plt.xlabel(r'$M_p$ ($M_\oplus$)', fontsize=20)
plt.ylabel('Fraction', fontsize=20)
#plt.legend(loc='upper right', bbox_to_anchor=(0.99,0.99), ncol=1, frameon=False, fontsize=16) #show the legend
if savefigures == True:
plt.savefig(savefigures_directory_compare + subdirectory + model_name + '_underlying_masses_compare_multiple.pdf')
plt.close()
#To plot the radii:
fig = plt.figure(figsize=(8,4))
plot = GridSpec(1,1,left=0.15,bottom=0.2,right=0.95,top=0.925,wspace=0.1,hspace=0.1)
ax = plt.subplot(plot[0,0])
xmin, xmax = radii_min, radii_max
for i in range(len(N_sim_non_clustered)):
x1 = radii_all_flat_non_clustered[i]
hist1 = plt.hist(x1, bins=np.logspace(np.log10(xmin), np.log10(xmax), 101), histtype='step', weights=np.ones(len(x1))/len(x1), log=True, color='r', label=model1_label)
for i in range(len(N_sim_clustered)):
x2 = radii_all_flat_clustered[i]
hist2 = plt.hist(x2, bins=np.logspace(np.log10(xmin), np.log10(xmax), 101), histtype='step', weights=np.ones(len(x2))/len(x2), log=True, color='b', label=model2_label)
plt.gca().set_xscale("log")
ax.tick_params(axis='both', labelsize=20)
plt.xlim([0.5,10])
#plt.ylim([np.min((np.min(hist1[0][hist1[0] > 0]), np.min(hist2[0][hist2[0] > 0]))), np.max((np.max(hist1[0][hist1[0] > 0]), np.max(hist2[0][hist2[0] > 0])))])
plt.xlabel(r'$R_p$ ($R_\oplus$)', fontsize=20)
plt.ylabel('Fraction', fontsize=20)
#plt.legend(loc='upper right', bbox_to_anchor=(0.99,0.99), ncol=1, frameon=False, fontsize=16) #show the legend
if savefigures == True:
plt.savefig(savefigures_directory_compare + subdirectory + model_name + '_underlying_radii_compare_multiple.pdf')
plt.close()
#To plot the radii ratios:
fig = plt.figure(figsize=(8,4))
plot = GridSpec(1,1,left=0.15,bottom=0.2,right=0.95,top=0.925,wspace=0.1,hspace=0.1)
ax = plt.subplot(plot[0,0])
xmin, xmax = np.min(([np.min(x) for x in radii_ratio_all_flat_non_clustered], [np.min(x) for x in radii_ratio_all_flat_clustered])), np.max(([np.max(x) for x in radii_ratio_all_flat_non_clustered], [np.max(x) for x in radii_ratio_all_flat_clustered]))
for i in range(len(N_sim_non_clustered)):
x1 = radii_ratio_all_flat_non_clustered[i]
hist1 = plt.hist(x1, bins=np.logspace(np.log10(xmin), np.log10(xmax), 101), histtype='step', weights=np.ones(len(x1))/len(x1), log=False, color='r', label=model1_label)
for i in range(len(N_sim_clustered)):
x2 = radii_ratio_all_flat_clustered[i]
hist2 = plt.hist(x2, bins=np.logspace(np.log10(xmin), np.log10(xmax), 101), histtype='step', weights=np.ones(len(x2))/len(x2), log=False, color='b', label=model2_label)
plt.gca().set_xscale("log")
ax.tick_params(axis='both', labelsize=20)
plt.xlim([xmin, xmax])
#plt.ylim([np.min((np.min(hist1[0][hist1[0] > 0]), np.min(hist2[0][hist2[0] > 0]))), np.max((np.max(hist1[0][hist1[0] > 0]), np.max(hist2[0][hist2[0] > 0])))])
plt.xlabel(r'$R_{p,i+1}/R_{p,i}$', fontsize=20)
plt.ylabel('Fraction', fontsize=20)
#plt.legend(loc='upper right', bbox_to_anchor=(0.99,0.99), ncol=1, frameon=False, fontsize=16) #show the legend
if savefigures == True:
plt.savefig(savefigures_directory_compare + subdirectory + model_name + '_underlying_radiiratios_compare_multiple.pdf')
plt.close()
#To plot the separations in mutual Hill radii:
fig = plt.figure(figsize=(8,4))
plot = GridSpec(1,1,left=0.15,bottom=0.2,right=0.95,top=0.925,wspace=0.1,hspace=0.1)
ax = plt.subplot(plot[0,0])
xmin, xmax = np.min(([np.min(x) for x in N_mH_all_flat_non_clustered], [np.min(x) for x in N_mH_all_flat_clustered])), np.max(([np.max(x) for x in N_mH_all_flat_non_clustered], [np.max(x) for x in N_mH_all_flat_clustered]))
for i in range(len(N_sim_non_clustered)):
x1 = N_mH_all_flat_non_clustered[i]
hist1 = plt.hist(x1, bins=np.logspace(np.log10(xmin), np.log10(xmax), 101), histtype='step', weights=np.ones(len(x1))/len(x1), log=True, color='r', label=model1_label)
for i in range(len(N_sim_clustered)):
x2 = N_mH_all_flat_clustered[i]
hist2 = plt.hist(x2, bins=np.logspace(np.log10(xmin), np.log10(xmax), 101), histtype='step', weights=np.ones(len(x2))/len(x2), log=True, color='b', label=model2_label)
plt.gca().set_xscale("log")
ax.tick_params(axis='both', labelsize=20)
#plt.ylim([np.min((np.min(hist1[0][hist1[0] > 0]), np.min(hist2[0][hist2[0] > 0]))), np.max((np.max(hist1[0][hist1[0] > 0]), np.max(hist2[0][hist2[0] > 0])))])
plt.xlabel(r'$\Delta$', fontsize=20)
plt.ylabel('Fraction', fontsize=20)
#plt.legend(loc='upper right', bbox_to_anchor=(0.99,0.99), ncol=1, frameon=False, fontsize=16) #show the legend
if savefigures == True:
plt.savefig(savefigures_directory_compare + subdirectory + model_name + '_underlying_stability_compare_multiple.pdf')
plt.close()
plt.show()
plt.close()
#'''
##### To plot the underlying multi-systems by period to visualize the systems (similar to Fig 1 in Fabrycky et al. 2014, but for ALL the planets):
##### Note: since there are way too many simulated systems to plot them all, we will randomly sample a number of systems to plot
'''
N_multi = sum(Mtot_all >= 3) #number of simulated multi-systems with 3 or more planets
N_multi2 = sum(Mtot_all2 >= 3) #number of simulated multi-systems with 3 or more planets
i_multi = np.arange(len(Mtot_all))[Mtot_all >= 3] #array of indices of all multi-systems with 3 or more planets
i_multi2 = np.arange(len(Mtot_all2))[Mtot_all2 >= 3] #array of indices of all multi-systems with 3 or more planets
N_sys_per_plot = 100 #number of systems to sample and plot per figure
i_multi_sample = np.random.choice(i_multi, N_sys_per_plot, replace=False) #array of indices of a sample of multi-systems with 3 or more planets
i_multi_sample2 = np.random.choice(i_multi2, N_sys_per_plot, replace=False) #array of indices of a sample of multi-systems with 3 or more planets
i_sorted_P0 = np.argsort(P_all[i_multi_sample,0]) #array of indices that would sort the arrays of the sample of multi-systems by the innermost period of each system
i_sorted_P0_2 = np.argsort(P_all2[i_multi_sample2,0]) #array of indices that would sort the arrays of the sample of multi-systems by the innermost period of each system
P_sample_multi = P_all[i_multi_sample][i_sorted_P0]
radii_sample_multi = radii_all[i_multi_sample][i_sorted_P0]
P_sample_multi2 = P_all2[i_multi_sample2][i_sorted_P0_2]
radii_sample_multi2 = radii_all2[i_multi_sample2][i_sorted_P0_2]
fig = plt.figure(figsize=(10,10))
plot = GridSpec(1,2,left=0.05,bottom=0.2,right=0.95,top=0.95,wspace=0,hspace=0)
ax = plt.subplot(plot[0,0])
plt.title(model1_label, fontsize=20)
for j in range(len(P_sample_multi)):
P_sys = P_sample_multi[j]
radii_sys = radii_sample_multi[j]
P_sys = P_sys[P_sys > 0]
radii_sys = radii_sys[radii_sys > 0]
plt.scatter(P_sys, np.ones(len(P_sys))+j, c=np.argsort(radii_sys), s=10.*radii_sys**2.)
plt.axhline(y=j+1, lw=0.1, color='k')
plt.gca().set_xscale("log")
ax.set_yticks([])
plt.xlim([2., 500.])
plt.ylim([0., N_sys_per_plot])
plt.xlabel(r'$P$ (days)', fontsize=20)
ax = plt.subplot(plot[0,1])
plt.title(model2_label, fontsize=20)
for j in range(len(P_sample_multi2)):
P_sys = P_sample_multi2[j]
radii_sys = radii_sample_multi2[j]
P_sys = P_sys[P_sys > 0]
radii_sys = radii_sys[radii_sys > 0]
plt.scatter(P_sys, np.ones(len(P_sys))+j, c=np.argsort(radii_sys), s=10.*radii_sys**2.)
plt.axhline(y=j+1, lw=0.1, color='k')
plt.gca().set_xscale("log")
ax.set_yticks([])
plt.xlim([2., 500.])
plt.ylim([0., N_sys_per_plot])
plt.xlabel(r'$P$ (days)', fontsize=20)
plt.show()
plt.close()
'''
|
986,866 | 288698931e98177a5a24ac5373eda144bcf12fe8 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy_djangoitem import DjangoItem
from maxlead_site.models import AsinReviews,Reviews,Listings,Questions,Answers,ListingWacher,CategoryRank,ProxyIp
from max_stock.models import WarehouseStocks,OrderItems
class AsinReviewsItem(DjangoItem):
# define the fields for your item here like:
django_model = AsinReviews
class ReviewsItem(DjangoItem):
# define the fields for your item here like:
image_urls = scrapy.Field()
images = scrapy.Field()
django_model = Reviews
class ListingsItem(DjangoItem):
# define the fields for your item here like:
image_urls = scrapy.Field()
images = scrapy.Field()
django_model = Listings
class QuestionsItem(DjangoItem):
# define the fields for your item here like:
django_model = Questions
class AnswersItem(DjangoItem):
# define the fields for your item here like:
django_model = Answers
class ListingWacherItem(DjangoItem):
# define the fields for your item here like:
django_model = ListingWacher
class CategoryRankItem(DjangoItem):
django_model = CategoryRank
# stocks
class WarehouseStocksItem(DjangoItem):
# define the fields for your item here like:
django_model = WarehouseStocks
class OrderItemsItem(DjangoItem):
# define the fields for your item here like:
django_model = OrderItems
class ProxyIpItem(DjangoItem):
# define the fields for your item here like:
django_model = ProxyIp
|
986,867 | 6cf0cf582670ad391b5737857921d0248d0cc6d9 | import csv
import json
import os
import re
from datetime import datetime, timedelta
from os.path import isfile
from time import time
from urllib.parse import quote
import requests
from requests.adapters import HTTPAdapter
from urllib3 import Retry
from numpy.random import random
from shapely import wkt
# Script metadata first
SCRIPT_VERSION = '0.0.5'
SCRIPT_NAME = os.path.basename(__file__)
TIMESTAMP = str(datetime.now()).replace(':', '.')
SCRIPT_START = time()
# Objects and image settings
SPARQL_URL = 'https://data.pdok.nl/sparql'
BRT_OBJECT_TYPE = 'Windturbine'
MAX_SCALE_FACTOR = 0.1
MAX_COORD_OFFSET = 100
IMAGE_FORMAT = 'image/png'
WMS_URL = 'https://geodata.nationaalgeoregister.nl/luchtfoto/rgb/wms'
IMAGE_SIZE_X = 1024 # in pixels
IMAGE_SIZE_Y = 1024 # in pixels
RESOLUTION = 0.25 # in meters per pixel along x and y axis
BBOX_CENTER_OFFSET_X = IMAGE_SIZE_X * RESOLUTION / 2
BBOX_CENTER_OFFSET_Y = IMAGE_SIZE_Y * RESOLUTION / 2
IMAGES_PER_OBJECT = 5
TRAIN_TEST_SPLIT = 1 / 10
DATA_DIR = '../data/windturbines/'
RATIO_POS_NEG_SAMPLES = 1 / 10
RD_X_MIN = 646.36
RD_X_MAX = 308975.28
RD_Y_MIN = 276050.82
RD_Y_MAX = 636456.31
# Build Netherlands geometry to check randomly created points to fall within
with open('Netherlands.txt') as csv_neth:
netherlands = wkt.load(csv_neth)
# Get list of objects to get aerial imagery for
payload = '''
PREFIX brt: <http://brt.basisregistraties.overheid.nl/def/top10nl#>
PREFIX geo: <http://www.opengis.net/ont/geosparql#>
prefix pdok: <http://data.pdok.nl/def/pdok#>
SELECT * WHERE {{
?instance a ?class ;
geo:hasGeometry/pdok:asWKT-RD ?rd ;
geo:hasGeometry/geo:asWKT ?wgs84 .
filter(
?class = brt:{}
)
}}
'''.format(BRT_OBJECT_TYPE)
headers = {
'Accept': 'application/sparql-results+json',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'keep-alive',
}
def requests_retry_session(
# Thank you Peter Becom
# https://www.peterbe.com/plog/best-practice-with-retries-with-requests
retries=3,
backoff_factor=5,
status_forcelist=(500, 502, 504),
session=None):
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
response = requests.request('POST', SPARQL_URL, data='query=' + quote(payload), headers=headers)
if not response.status_code == 200:
print('Error getting list of', BRT_OBJECT_TYPE, 'instances from sparql endpoint')
response_dict = json.loads(response.text)
variables = response_dict['head']['vars']
positive_data_points = response_dict['results']['bindings']
# Data dirs
for subset in ['train', 'validate', 'test']:
os.makedirs(DATA_DIR + subset + '/positive', exist_ok=True)
os.makedirs(DATA_DIR + subset + '/negative', exist_ok=True)
# Metadata csv creation/append
csv_exists = isfile('{}metadata.csv'.format(DATA_DIR))
csv_file = open('{}metadata.csv'.format(DATA_DIR), 'a', newline='')
fieldnames = ['timestamp', 'subset', 'contains_wind_turbine', 'URI', 'image_file', 'original_rd_x', 'original_rd_y', 'offset_x',
'offset_y', 'scale', 'request', ]
csv_writer = csv.DictWriter(csv_file, fieldnames)
if not csv_exists:
csv_writer.writeheader()
# Harvest positive data point images for locations with wind turbines
sess = requests.Session()
for record_index, record in enumerate(positive_data_points):
image_class = 'positive'
if record_index % (1 / TRAIN_TEST_SPLIT) == 0:
subset = 'test'
elif (record_index - 1) % (1 / TRAIN_TEST_SPLIT) == 0:
subset = 'validate'
else:
subset = 'train'
random_offsets = 2 * MAX_COORD_OFFSET * (random((IMAGES_PER_OBJECT, 2)) - 0.5)
random_scales = 1 - 2 * MAX_SCALE_FACTOR * (random((IMAGES_PER_OBJECT,)) - 0.5)
uri = record['instance']['value']
brt_id = re.findall('.*/(.+)', uri)[0]
for image_index, (offset, scale) in enumerate(zip(random_offsets, random_scales)):
image_file_name = brt_id + '-' + str(image_index) + '.png'
if True in [isfile(DATA_DIR + 'train/' + image_class + '/' + image_file_name),
isfile(DATA_DIR + 'validate/' + image_class + '/' + image_file_name),
isfile(DATA_DIR + 'test/' + image_class + '/' + image_file_name)]:
print('Already have', image_file_name)
continue
rd_wkt = record['rd']['value']
rd_coords = re.findall('POINT \((.+) (.+)\)', rd_wkt)
if not rd_coords:
print('Error finding coordinates in RD geometry string', rd_wkt)
continue
rd_x = float(rd_coords[0][0]) + offset[0]
rd_y = float(rd_coords[0][1]) + offset[1]
querystring = {
'LAYERS': '2016_ortho25',
'FORMAT': IMAGE_FORMAT,
'TRANSPARENT': 'TRUE',
'SERVICE': 'WMS',
'VERSION': '1.1.1',
'REQUEST': 'GetMap',
'STYLES': '',
'SRS': 'EPSG:28992',
# at scale in meter-based coordinate systems it is useless to have more than one decimal
'BBOX': '{min_x:0.1f},{min_y:0.1f},{max_x:0.1f},{max_y:0.1f}'.format(
min_x=rd_x - (BBOX_CENTER_OFFSET_X * scale),
min_y=rd_y - (BBOX_CENTER_OFFSET_Y * scale),
max_x=rd_x + (BBOX_CENTER_OFFSET_X * scale),
max_y=rd_y + (BBOX_CENTER_OFFSET_Y * scale),
),
'WIDTH': IMAGE_SIZE_X, 'HEIGHT': IMAGE_SIZE_Y
}
response = requests_retry_session(session=sess).get(WMS_URL, params=querystring, timeout=500)
if not response.headers['Content-Type'].startswith(IMAGE_FORMAT):
print('Skipping entry', uri, 'Bad response type', response.headers['Content-Type'])
continue
image_file_path = DATA_DIR + subset + '/positive/' + image_file_name
with open(image_file_path, mode='wb') as image:
for chunk in response:
image.write(chunk)
csv_writer.writerow({
'timestamp': datetime.now(),
'subset': subset,
'contains_wind_turbine': image_class,
'URI': uri,
'image_file': image_file_path,
'original_rd_x': rd_coords[0][0],
'original_rd_y': rd_coords[0][1],
'offset_x': offset[0],
'offset_y': offset[1],
'scale': scale,
'request': response.request.url
})
csv_file.flush()
print('Wrote image and data for record', record_index, 'of', len(positive_data_points))
number_of_neg_samples = int(len(positive_data_points) / RATIO_POS_NEG_SAMPLES)
for neg_sample_index in range(number_of_neg_samples):
image_class = 'negative'
if neg_sample_index % (1 / TRAIN_TEST_SPLIT) == 0:
subset = 'test'
elif (neg_sample_index - 1) % (1 / TRAIN_TEST_SPLIT) == 0:
subset = 'validate'
else:
subset = 'train'
last_image = 'negative-' + str(neg_sample_index) + '-4.png'
if True in [isfile(DATA_DIR + 'train/' + image_class + '/' + last_image),
isfile(DATA_DIR + 'validate/' + image_class + '/' + last_image),
isfile(DATA_DIR + 'test/' + image_class + '/' + last_image)]:
print('Already have', last_image)
continue
random_x = random((1,)) * (RD_X_MAX - RD_X_MIN) + RD_X_MIN
random_y = random((1,)) * (RD_Y_MAX - RD_Y_MIN) + RD_Y_MIN
point = wkt.loads('POINT ({} {})'.format(random_x[0], random_y[0]))
while not point.within(netherlands):
print('Retrying random point, not within Netherlands contour')
random_x = random((1,)) * (RD_X_MAX - RD_X_MIN) + RD_X_MIN
random_y = random((1,)) * (RD_Y_MAX - RD_Y_MIN) + RD_Y_MIN
point = wkt.loads('POINT ({} {})'.format(random_x[0], random_y[0]))
random_offsets = 2 * MAX_COORD_OFFSET * (random((IMAGES_PER_OBJECT, 2)) - 0.5)
random_scales = 1 - 2 * MAX_SCALE_FACTOR * (random((IMAGES_PER_OBJECT,)) - 0.5)
for image_index, (offset, scale) in enumerate(zip(random_offsets, random_scales)):
image_file_name = 'negative-' + str(neg_sample_index) + '-' + str(image_index) + '.png'
rd_x = float(random_x) + offset[0]
rd_y = float(random_y) + offset[1]
querystring = {
'LAYERS': '2016_ortho25',
'FORMAT': IMAGE_FORMAT,
'TRANSPARENT': 'TRUE',
'SERVICE': 'WMS',
'VERSION': '1.1.1',
'REQUEST': 'GetMap',
'STYLES': '',
'SRS': 'EPSG:28992',
# at scale in meter-based coordinate systems it is useless to have more than one decimal
'BBOX': '{min_x:0.1f},{min_y:0.1f},{max_x:0.1f},{max_y:0.1f}'.format(
min_x=rd_x - (BBOX_CENTER_OFFSET_X * scale),
min_y=rd_y - (BBOX_CENTER_OFFSET_Y * scale),
max_x=rd_x + (BBOX_CENTER_OFFSET_X * scale),
max_y=rd_y + (BBOX_CENTER_OFFSET_Y * scale),
),
'WIDTH': IMAGE_SIZE_X, 'HEIGHT': IMAGE_SIZE_Y
}
response = requests_retry_session(session=sess).get(url=WMS_URL, params=querystring, timeout=500)
if not response.headers['Content-Type'].startswith(IMAGE_FORMAT):
print('Skipping entry', image_file_name, 'bad response type', response.headers['Content-Type'])
continue
image_file_path = DATA_DIR + subset + '/' + image_class + '/' + image_file_name
with open(image_file_path, mode='wb') as image:
for chunk in response:
image.write(chunk)
csv_writer.writerow({
'timestamp': datetime.now(),
'subset': subset,
'contains_wind_turbine': image_class,
'URI': None,
'image_file': image_file_path,
'original_rd_x': random_x,
'original_rd_y': random_y,
'offset_x': offset[0],
'offset_y': offset[1],
'scale': scale,
'request': response.request.url
})
csv_file.flush()
print('Wrote image and data for record', neg_sample_index, 'of', number_of_neg_samples)
# Close https session
sess.close()
runtime = time() - SCRIPT_START
print(SCRIPT_NAME, 'finished successfully in {}'.format(timedelta(seconds=runtime)))
|
986,868 | bf7386753e4d3c6aa2785c00e9bbb8c0effed772 | from collections import deque
robin=deque()
n,q=map(int,input().split())
for i in range(n):
p,t=input().split()
robin.append([p,int(t)])
time=0
while(len(robin)!=0):
proc=robin.popleft()
if(proc[1]<=q):
print(proc[0],time+proc[1])
time+=proc[1]
else:
proc[1]-=q
robin.append(proc)
time+=q
|
986,869 | e36fbeaa0c90c6965f504afc97d2c223b751fe66 | DICTIONARY_MAPPING_ID_TO_NAME = {1: "rock", 2: "paper", 3: "scissors", 4: "lizard", 5: "spock"}
URL_RANDOM_NUMBER_SERVICE = 'https://codechallenge.boohma.com/random'
WINNER_KEY_LOOSERSET_AS_VALUE_DICTIONARY = {"rock": {"scissors", "lizard"}, "paper": {"rock", "spock"}, "scissors": {"paper", "lizard"},
"lizard": {"spock", "paper"}, "spock": {"scissors", "rock"}}
|
986,870 | 869efc8c21eafd04417ca80a26df0ffa4b410de8 | import couchdb
from celery.execute import send_task
WORKING_DAYS = 17
s = couchdb.Server('http://replication:r2d2rep@iweb10:5984')
db = s['client_docs']
for r in db.view('client/settings', include_docs=True):
doc = r.doc
if doc.has_key('days_before_suspension'):
if doc['days_before_suspension'] == -30:
client_id = doc['client_id']
t = send_task('prepaid_create_invoice.create_invoice', [client_id, WORKING_DAYS])
r = t.get()
if type(r) is str:
print r
else:
if r.has_key('order_id'):
print '%s doc_id=%s' % (r['order_id'], r['_id'])
else:
print r
|
986,871 | 393e027c2e80d8a2ca901ef0104ac59c6887770d | # Distance Calculation
# distance.py
import math
def main():
# Instruction
print('The program calculates the distance between two points.')
# Get two points
x1, y1, x2, y2 = eval(input('Enter two points x1, y1, x2, y2:'\
'(separate by commas) '))
# Calculate the distance
distance = math.sqrt((y2 - y1) ** 2 + (x2 - x1) ** 2)
# Rule them all
print('The distance is {0:.2f}.'.format(distance))
main()
|
986,872 | e15fb5ba12992004a016331539c9db3a39389aa9 | import os
from Crypto.Cipher import AES
from Crypto import Random
from Crypto.Hash import SHA256
#key is the hashed password
#filename is the file to encrypt
'''
The MIT License (MIT)
Copyright (c) 2018 Muhammad Ali Zia
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
from Crypto import Random
from Crypto.Cipher import AES
import os
import os.path
from os import listdir
from os.path import isfile, join
import time
class Encryptor:
def __init__(self, key):
self.key = key
def pad(self, s):
return s + b"\0" * (AES.block_size - len(s) % AES.block_size)
def encrypt(self, message, key, key_size=256):
message = self.pad(message)
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_CBC, iv)
return iv + cipher.encrypt(message)
def encrypt_file(self, file_name):
print('Encrypting File')
with open(file_name, 'rb') as fo:
plaintext = fo.read()
enc = self.encrypt(plaintext, self.key)
with open(file_name + ".enc", 'wb') as fo:
fo.write(enc)
os.remove(file_name)
def decrypt(self, ciphertext, key):
iv = ciphertext[:AES.block_size]
cipher = AES.new(key, AES.MODE_CBC, iv)
plaintext = cipher.decrypt(ciphertext[AES.block_size:])
return plaintext.rstrip(b"\0")
def decrypt_file(self, file_name):
print('Decrypting File')
with open(file_name, 'rb') as fo:
ciphertext = fo.read()
dec = self.decrypt(ciphertext, self.key)
with open(file_name[:-4], 'wb') as fo:
fo.write(dec) |
986,873 | cb54c1c4c7ae98865b6be80e00a78c0053735569 | from threading import Lock
from typing import Dict
from enterprise.camera_ctrl.camera_manager import CameraManager
from enterprise.camera_ctrl.multi_lock import MultiLock
from infrastructure.camera_glue_code.stub.stub_camera import StubCamera
class StubCameraManager(CameraManager):
def __init__(self, cameras):
self._cameras: Dict[StubCamera] = dict()
self._cameras_to_detect = cameras
self._stub_lock = Lock()
@property
def all_locks(self) -> MultiLock:
locks = [camera.sync_lock for camera in self.cameras]
locks.append(self._stub_lock)
return MultiLock(locks)
@property
def sync_lock(self) -> Lock:
return self._stub_lock
@property
def cameras(self):
return self._cameras.values()
def detect_all_cameras(self):
for camera in self._cameras_to_detect:
self._cameras[camera.id] = camera
def disconnect_all(self):
self._cameras.clear()
def get_camera(self, camera_id):
return self._cameras.get(camera_id)
def remove_camera(self, camera_id):
if camera_id in self._cameras:
self._cameras.pop(camera_id)
|
986,874 | 157b100aa91687df8f46469239afcc00aface1c8 | import sys
sys.setrecursionlimit(1000000)
N = int(input())
G = [[] for _ in range(N)]
len_G = [0] * N
for _ in range(N - 1):
ui, vi = map(lambda s: int(s) - 1, input().split())
G[ui].append(vi)
len_G[ui] += 1
G[vi].append(ui)
len_G[vi] += 1
t = 0
L = [N] * N
R = [0] * N
def dfs(v, p):
if p != -1 and len_G[v] == 1:
global t
t += 1
L[v], R[v] = t, t
return L[v], R[v]
for nv in G[v]:
if nv == p:
continue
Li, Ri = dfs(nv, v)
L[v] = min(Li, L[v])
R[v] = max(Ri, R[v])
return L[v], R[v]
dfs(0, -1)
for i in range(N):
print(L[i], R[i])
|
986,875 | 242494152dafcd7e5b7852409e7705b775770c1b | # -*- coding: utf-8 -*-
"""Attributes.
:mod:`~astropy.coordinates.attributes`
"""
__author__ = "Nathaniel Starkman"
__all__ = [
"OrbitRepresentationBase",
"OrbitSkyOffsetRepresentation",
"OrbitOffsetCartesianRepresentation",
"OrbitOffsetCylindricalRepresentation",
]
##############################################################################
# IMPORTS
import abc
import inspect
import typing as T
from collections import OrderedDict
import astropy.units as u
import numpy as np
from astropy.coordinates import BaseDifferential, BaseRepresentation
from astropy.coordinates.distances import Distance
from astropy.utils import classproperty
##############################################################################
# PARAMETERS
##############################################################################
# CODE
##############################################################################
class OrbitRepresentationBase(BaseRepresentation):
"""Base class for Orbit representations.
first parameter is the affine parameter, generally time or arc-length
.. todo:
- support an inverse function t(d), where d is the distance along the
orbit, so can support the arc length as the affine parameter.
- support hidden variables, like _PA
"""
attr_classes = OrderedDict([("afn", u.Quantity)])
"""Attribute Classes. Should be an OrderedDict."""
def __new__(cls, *args, **kwargs):
afn_name = kwargs.pop("afn_name", None)
# print("afn_name: ", afn_name)
if afn_name is not None:
setattr(cls, afn_name, property(lambda self: self.afn))
self = super().__new__(cls)
self._afn_name = afn_name
return self
def __init__(self, afn, *args, **kwargs):
"""OrbitRepresentationBase, afn along orbit as affine parameter."""
super().__init__(afn, *args, **kwargs)
# /def
@abc.abstractmethod
def from_cartesian(self, cartesian): # TODO
"""From Cartesian. Abstract method. Should be classmethod."""
pass
# /def
@abc.abstractmethod
def to_cartesian(self): # TODO
"""To Cartesian. Abstract method."""
pass
# /def
# /class
# -------------------------------------------------------------------
class OrbitSkyOffsetUnitRepresentation(OrbitRepresentationBase):
"""Define a Pseudo-Spherical projected-Orbit on-sky coordinate system.
Parameterized by the:
- afn, the affine parameter along the orbit (instantiation coordinate)
- sep, the on-sky angular separation from the orbit at position `afn`
- distance, the radial distance from coordinate center.
.. todo::
- Allow differentials
- OrbitOffsetCartesianRepresentation transformation
- OrbitOffsetCylindricalRepresentation transformation
Parameters
----------
afn : Quantity
sep : Angle or Quantity
_PA : Angle or Quantity
"""
attr_classes = OrderedDict(
[
("afn", u.Quantity), # affine parameter
("sep", u.Quantity), # sky separation
("_PA", u.Quantity), # position-angle, hidden variable
]
)
@classproperty
def _dimensional_representation(cls):
return OrbitSkyOffsetRepresentation
# @u.quantity_input(sep="angle", _PA="angle")
def __init__(
self,
afn,
sep,
_PA=np.NaN * u.deg,
*,
differentials=None,
copy: bool = True,
afn_name: T.Optional[str] = None
):
if differentials is not None: # TODO, allow differentials
raise ValueError()
super().__init__(
afn, sep, _PA=_PA, copy=copy, differentials=differentials
)
# /def
def scale_factors(self):
"""Physical scale factor in component directions.
Returns
-------
Returns a dict with a Quantity for each component with the appropriate
physical scale factor for a unit change in that direction.
"""
# TODO is self.afn.unit fixed?
sf_afn = np.broadcast_to(1.0 / self.afn.unit, self.shape, subok=True)
sf_sep = np.broadcast_to(1.0 / u.radian, self.shape, subok=True)
return OrderedDict((("afn", sf_afn), ("sep", sf_sep)))
# /def
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is an AlongOrbit representation
# TODO: shortcut even if a differential_class is passed in,
# using the ._re_represent_differentials() method
if inspect.isclass(other_class) and not differential_class:
if issubclass(
other_class, OrbitSkyOffsetRepresentation
): # TODO differential
return OrbitSkyOffsetRepresentation(
afn=self.afn,
sep=self.sep,
distance=1 * u.dimensionless_unscaled,
_PA=self._PA,
)
elif issubclass(other_class, OrbitOffsetCartesianRepresentation):
raise Exception("IOU transformation")
elif issubclass(other_class, OrbitOffsetCylindricalRepresentation):
raise Exception("IOU transformation")
return super().represent_as(other_class, differential_class)
# /def
@classmethod
def from_cartesian(self, cartesian): # TODO
raise Exception("There is no cartesian representation.")
# /def
def to_cartesian(self): # TODO
raise Exception("There is no cartesian representation.")
# /def
# /class
# -------------------------------------------------------------------
class OrbitSkyOffsetRepresentation(OrbitRepresentationBase):
"""Define a Pseudo-Spherical projected-Orbit on-sky coordinate system.
Parameterized by the:
- afn, the afn along the orbit (from instantiation coordinate)
- sep, the on-sky angular separation from the orbit at position afn
- distance, the radial distance from coordinate center.
.. todo::
- Allow differentials
- OrbitOffsetCartesianRepresentation transformation
- OrbitOffsetCylindricalRepresentation transformation
"""
attr_classes = OrderedDict(
[
("afn", u.Quantity),
("sep", u.Quantity), # sky separation
("distance", u.Quantity),
("_PA", u.Quantity), # position-angle, hidden variable
]
)
_unit_representation = OrbitSkyOffsetUnitRepresentation
# @u.quantity_input(sep="angle", _PA="angle")
def __init__(
self,
afn,
sep,
distance,
_PA=np.NaN * u.deg,
*,
differentials=None,
copy: bool = True,
afn_name: T.Optional[str] = None
):
if differentials is not None: # TODO, allow differentials
raise ValueError()
super().__init__(
afn,
sep,
distance,
_PA=_PA,
copy=copy,
differentials=differentials,
)
if self._distance.unit.physical_type == "length":
try:
self._distance = Distance(self._distance, copy=False)
except ValueError as e:
if e.args[0].startswith("Distance must be >= 0"):
raise ValueError(
"Distance must be >= 0. To allow negative "
"distance values, you must explicitly pass"
" in a `Distance` object with the the "
"argument 'allow_negative=True'."
)
else:
raise
# /def
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is an AlongOrbit representation
# TODO: shortcut even if a differential_class is passed in,
# using the ._re_represent_differentials() method
if inspect.isclass(other_class) and not differential_class:
if issubclass(
other_class, OrbitSkyOffsetUnitRepresentation
): # TODO differential
return OrbitSkyOffsetUnitRepresentation(
afn=self.afn, sep=self.sep, _PA=self._PA,
)
elif issubclass(other_class, OrbitOffsetCartesianRepresentation):
raise Exception("IOU transformation")
elif issubclass(other_class, OrbitOffsetCylindricalRepresentation):
raise Exception("IOU transformation")
return super().represent_as(other_class, differential_class)
# /def
@classmethod
def from_cartesian(self, cartesian):
raise Exception("There is no cartesian representation.")
# /def
def to_cartesian(self):
raise Exception("There is no cartesian representation.")
# /def
def unit_vectors(self):
"""Unit vectors in component directions.
Returns
-------
Returns a dict with a CartesianRepresentation of unit vectors
in the direction of each component.
"""
# return {'comp1': CartesianRepresentation(...),
# 'comp2': CartesianRepresentation(...),
# 'comp3': CartesianRepresentation(...)}
raise Exception("Not yet implemented")
# /def
def scale_factors(self):
"""Physical scale factor in component directions.
Returns
-------
Returns a dict with a Quantity for each component with the appropriate
physical scale factor for a unit change in that direction.
"""
# TODO is self.afn.unit fixed?
sf_afn = np.broadcast_to(1.0 / self.afn.unit, self.shape, subok=True)
sf_sep = np.broadcast_to(1.0 / u.radian, self.shape, subok=True)
sf_distance = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return OrderedDict(
(("afn", sf_afn), ("sep", sf_sep), ("distance", sf_distance))
)
# /def
# /class
# -------------------------------------------------------------------
class OrbitSkyOffsetDifferential(BaseDifferential): # TODO
base_representation = OrbitSkyOffsetRepresentation
# /class
# -------------------------------------------------------------------
class OrbitOffsetCartesianRepresentation(OrbitRepresentationBase):
"""Define a pseudo-Cartesian along-Orbit coordinate system.
A tube around the orbit.
Parameterized by the:
- afn, the affine parameter along the orbit (instantiation coordinate)
- x, the distance from the orbit, in the plane of the orbit
- y, the distance from the orbit, perpendicular to the plane
.. todo::
- Allow differentials
- get correct phi value in `represent_as`
- OrbitSkyOffsetRepresentation transformation
- dynamically defined by the orbit, for Cartesian transformations.
maybe make this a base-class with abstract methods
"""
attr_classes = OrderedDict(
[
("afn", u.Quantity),
("x", u.Quantity),
("y", u.Quantity),
("_d_afn", u.Quantity),
]
)
# @u.quantity_input(x="length", y="length")
def __init__(
self,
afn,
x,
y,
_d_afn=np.NaN * u.pc,
differentials=None,
copy: bool = True,
afn_name: T.Optional[str] = None,
):
if differentials is not None: # TODO, allow differentials
raise ValueError()
super().__init__(
afn, x, y, _d_afn=_d_afn, copy=copy, differentials=differentials
)
# /def
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is an OrbitOffser representation
# TODO: shortcut even if a differential_class is passed in,
# using the ._re_represent_differentials() method
if inspect.isclass(other_class) and not differential_class:
if issubclass(other_class, OrbitOffsetCylindricalRepresentation):
return other_class(
afn=self.afn,
rho=np.sqrt(self.x ** 2 + self.y ** 2),
phi=np.arctan2(self.y, self.x), # TODO correct sign?
_d_afn=self._d_afn,
copy=False,
afn_name=self._afn_name,
)
elif issubclass(other_class, OrbitSkyOffsetRepresentation):
raise Exception("IOU transformation")
return super().represent_as(other_class, differential_class)
# /def
@classmethod
def from_cartesian(self, cartesian): # TODO
raise Exception("There is no cartesian representation, currently.")
# /def
def to_cartesian(self): # TODO
# Step 1: use afn to get orbit coordinate at that afn value
# Step 2: use `x` and `y` offset to get real-space position
# Step 3: representation machinery to return Cartesian
raise Exception("There is no cartesian representation, currently.")
# /def
# /class
# -------------------------------------------------------------------
class OrbitOffsetCartesianDifferential(BaseDifferential): # TODO
base_representation = OrbitSkyOffsetRepresentation
# /class
# -------------------------------------------------------------------
class OrbitOffsetCylindricalRepresentation(OrbitRepresentationBase):
"""Define a pseudo-Cartesian along-Orbit coordinate system.
Parameterized by the:
- afn, the afn along the orbit (from instantiation coordinate)
- rho, the distance from the orbit
- phi, the angle around from the orbit
0 pointing toward observer from orbit start
.. todo::
- Allow differentials
- make sure x, y conversions correct in `represent_as`
- dynamically defined by the orbit, for Cartesian transformations.
maybe make this a base-class with abstract methods
Parameters
----------
afn : Quantity
rho : Quantity
phi : Quantity
_d_afn : Quantity
differentials
copy : bool
afn_name : str, optional
"""
attr_classes = OrderedDict(
[
("afn", u.Quantity),
("rho", u.Quantity),
("phi", u.Quantity),
("_d_afn", u.Quantity),
]
)
# @u.quantity_input(rho="length", phi="angle")
def __init__(
self,
afn,
rho,
phi,
_d_afn=np.NaN * u.pc,
differentials=None,
copy: bool = True,
afn_name: T.Optional[str] = None,
):
"""Initialize class."""
if differentials is not None: # TODO, allow differentials
raise ValueError()
super().__init__(
afn,
rho,
phi,
_d_afn=_d_afn,
copy=copy,
differentials=differentials,
)
# /def
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is an AlongOrbit representation
# TODO: shortcut even if a differential_class is passed in,
# using the ._re_represent_differentials() method
if inspect.isclass(other_class) and not differential_class:
if issubclass(other_class, OrbitOffsetCartesianRepresentation):
return other_class(
afn=self.afn,
x=self.rho * np.cos(self.phi), # TODO get correct
y=self.rho * np.sin(self.phi), # TODO get correct
_d_afn=self._d_afn,
copy=False,
afn_name=self._afn_name,
)
elif issubclass(other_class, OrbitSkyOffsetRepresentation):
raise Exception("IOU transformation")
return super().represent_as(other_class, differential_class)
# /def
@classmethod
def from_cartesian(self, cartesian):
"""From Cartesian.
.. todo::
implement
"""
raise Exception("There is no cartesian representation, currently.")
# /def
def to_cartesian(self):
"""To Cartesian.
.. todo::
implement
"""
# Step 1: use afn to get orbit coordinate at that afn value
# Step 2: use `x` and `y` offset to get real-space position
# Step 3: representation machinery to return Cartesian
raise Exception("There is no cartesian representation, currently.")
# /def
# /class
# -------------------------------------------------------------------
class OrbitOffsetCylindricalDifferential(BaseDifferential):
"""Orbit-Offset Cylindrical Differential.
.. todo::
implement
"""
base_representation = OrbitSkyOffsetRepresentation
# /class
##############################################################################
# END
|
986,876 | 5d0f5c6ce0dda4cac012d3e4e0d98d412fbc33ca | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# generated by wxGlade
#
import wx
# begin wxGlade: dependencies
import gettext
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
class Bug184_Frame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: Bug184_Frame.__init__
kwds["style"] = kwds.get("style", 0)
wx.Frame.__init__(self, *args, **kwds)
self.SetTitle(_("frame_1"))
self.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BACKGROUND))
sizer_1 = wx.BoxSizer(wx.VERTICAL)
self.label_1 = wx.StaticText(self, wx.ID_ANY, _("Just a label"))
sizer_1.Add(self.label_1, 1, wx.ALL | wx.EXPAND, 5)
self.SetSizer(sizer_1)
sizer_1.Fit(self)
self.Layout()
# end wxGlade
# end of class Bug184_Frame
class MyApp(wx.App):
def OnInit(self):
self.Frame184 = Bug184_Frame(None, wx.ID_ANY, "")
self.SetTopWindow(self.Frame184)
self.Frame184.Show()
return True
# end of class MyApp
if __name__ == "__main__":
gettext.install("app") # replace with the appropriate catalog name
app = MyApp(0)
app.MainLoop()
|
986,877 | 54a26af6e1b87e6741f42b9046b16fcf5a4db478 | from setuptools import setup
setup(name='noogu',
version='0.1',
description='Simple python WHOIS parser',
author='Doyoon Kim',
author_email='dkim0718 (at) gmail (dot) com',
url='https://github.com/dkim0718/noogu',
packages=['noogu'],
package_dir={'noogu':'noogu'},
package_data={'pythonwhois':['*.dat']},
install_requires=[],
provides=['noogu'],
scripts=[],
license='GPLv3'
)
|
986,878 | 7618c944a5f1aab69ba9797c1de956b25134c6cf | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='datapackage',
version='0.0.1',
description=(
'Utilities to work with Data Packages as defined on dataprotocols.org'
),
long_description=long_description,
url='https://github.com/okfn/datapackage-py',
author='Open Knowledge Foundation',
author_email='info@okfn.org',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords='data dataprotocols jsontableschema openrefine datascience',
packages=find_packages(exclude=['tests']),
install_requires=[
'jsonschema >= 2.5',
'six >= 1.10.0',
'datapackage_registry >= 0.3.0',
'tabulator',
],
dependency_links=[
'https://github.com/okfn/tabulator-py/tarball/master#egg=tabulator'
],
)
|
986,879 | a0574f923fb1fb21eb9155db6fa2ee50f14a9ec4 | # -*- coding:utf-8 -*-
# @Author : 江湖一笑
# @Time : 2020/4/15 8:14
# @Software : Web-Autotest-Python
# @Python_verison : 3.7
import unittest
dir = './case/'
suite = unittest.defaultTestLoader.discover(start_dir=dir,pattern='Te*.py')
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
# print(__name__) |
986,880 | 8ba043899e01912bd4ea36622fa838622b0bdf03 | #! /usr/bin/env python3
from face_landmark.configs import config_pb2
import sys
import os
from google.protobuf import text_format
def parse_config(config_path):
if not os.path.exists(config_path):
raise ValueError(f'{config_path} does not exist.')
config = config_pb2.FaceLandmarkTrainingConfig()
f = open(config_path, "r")
text_format.Parse(f.read(), config)
f.close()
if config.num_modules == 0:
config.num_modules = 4
if config.gpu_count == 0:
config.gpu_count = 1
if config.input_size == 0:
config.input_size = 256
if config.conv1_channels == 0:
config.conv1_channels = 128
if config.conv2_channels == 0:
config.conv2_channels = 168
if config.conv3_channels == 0:
config.conv3_channels = 204
if config.conv4_channels == 0:
config.conv4_channels = 256
if config.num_landmark_pts == 0:
config.num_landmark_pts = 68
assert len(config.learning_rates) == len(config.learning_epoches), f'learning rates and epoches must have equal length'
if len(config.learning_rates) == 0:
config.learning_rates.extend([0.001, 0.0005, 0.0002, 0.0001, 0.00005, 2e-5, 1e-5])
config.learning_epoches.extend([ 5, 5, 10, 10, 10, 20, 20])
if config.initial_checkpoint_path == "":
config.initial_checkpoint_path = ""
if config.train_record_path == "":
config.train_record_path = "/barn2/yuan/datasets/300wlp_20181002.tfrecord"
if config.eval_record_path == "":
config.eval_record_path = "/barn2/yuan/datasets/aflw2000_3d_20181002.tfrecord"
if config.train_batch_size == 0:
config.train_batch_size = 24
if config.eval_batch_size == 0:
config.eval_batch_size = 24
if config.train_sigma == 0:
config.train_sigma = 5.0
if config.eval_sigma == 0:
config.eval_sigma = 5.0
if config.export_dir == "":
raise ValueError('export dir must be explicitly specified in config file')
if config.log_filename == "":
config.log_filename = os.path.join(config.export_dir, 'training_log.txt')
if os.path.exists(config.log_filename):
pass
#raise ValueError('logging filepath already exists')
if config.plot_dir == "":
config.plot_dir = os.path.join(config.export_dir, 'figures')
if not os.path.exists(config.plot_dir):
pass
#os.makedirs(name=config.plot_dir, mode=0o777, exist_ok=False)
if config.train_loss_interval == 0:
config.train_loss_interval = 20
if config.eval_interval == 0:
config.eval_interval = 100
return config
if __name__ == '__main__':
config = parse_config('default_config.txt')
print(config.__str__()) |
986,881 | d0815e48ca2fd98e18056946a789e76f9b2f1486 |
##List First-Ends Elements:
## Write a program that takes a list of numbers (for example, a = [5, 10, 15, 20, 25]) and
## makes a new list of only the first and last elements of the given list. For practice, write this code inside a function.
## Concepts to practice
## Lists and properties of lists
## List comprehensions (maybe)
## Functions
def list_ends(a_list):
return [a_list[0], a_list[len(a_list)-1]]
##>>> list_end([5, 10, 15, 20, 25])
|
986,882 | f75a9c55c3eb2756320466db17f270146983eb7a | #!usr/bin/python
# -*- coding:utf-8 -*-
"""
Construct a Data generator.
"""
import numpy as np
from tqdm import tqdm
import os
import random
class BatchGenerator(object):
""" Construct a Data generator. The input X, y should be ndarray or list like type.
Example:
Data_train = BatchGenerator(X=X_train_all, y=y_train_all, shuffle=True)
Data_test = BatchGenerator(X=X_test_all, y=y_test_all, shuffle=False)
X = Data_train.X
y = Data_train.y
or:
X_batch, y_batch = Data_train.next_batch(batch_size)
"""
def __init__(self, X, y, shuffle=False):
if type(X) != np.ndarray:
X = np.asarray(X)
if type(y) != np.ndarray:
y = np.asarray(y)
self._X = X
self._y = y
self._epochs_completed = 0
self._index_in_epoch = 0
self._number_examples = self._X.shape[0]
self._shuffle = shuffle
if self._shuffle:
new_index = np.random.permutation(self._number_examples)
self._X = self._X[new_index]
self._y = self._y[new_index]
@property
def X(self):
return self._X
@property
def y(self):
return self._y
@property
def num_examples(self):
return self._number_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size):
""" Return the next 'batch_size' examples from this data set."""
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._number_examples:
# finished epoch
self._epochs_completed += 1
# Shuffle the data
if self._shuffle:
new_index = np.random.permutation(self._number_examples)
self._X = self._X[new_index]
self._y = self._y[new_index]
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._number_examples
end = self._index_in_epoch
return self._X[start:end], self._y[start:end]
def to_categorical(topics):
"""把所有的topic id 转为 0,1形式。
Args:
topics: n_sample 个 lists, 问题的话题标签。每个list对应一个问题,topic个数不定。
return:
y: ndarray, shape=(sample, n_class), 其中 n_class = 1999.
Example:
>>> y_batch = to_categorical(topic_batch)
>>> print(y_batch.shape)
>>> (10, 1999)
"""
n_sample = len(topics)
y = np.zeros(shape=(n_sample, 25551))
for i in xrange(n_sample):
topic_index = topics[i]
for j in xrange(len(topic_index)):
topic_index[j] -= 1
y[i, topic_index] = 1
return y
def pad_X30(words, max_len=30):
"""把 word_ids 整理成固定长度。
"""
words_len = len(words)
words = np.asarray(words)
if words_len == max_len:
return words
if words_len > max_len:
return words[:max_len]
return np.hstack([words, np.zeros(max_len-words_len, dtype=int)])
def pad_X50(words, max_len=50):
"""把 word_ids 整理成固定长度。
"""
words_len = len(words)
words = np.asarray(words)
if words_len == max_len:
return words
if words_len > max_len:
return words[:max_len]
return np.hstack([words, np.zeros(max_len-words_len, dtype=int)])
def pad_X200(words, max_len=200):
"""把 word_ids 整理成固定长度。
"""
words_len = len(words)
words = np.asarray(words)
if words_len == max_len:
return words
if words_len > max_len:
return words[:max_len]
return np.hstack([words, np.zeros(max_len-words_len, dtype=int)])
def pad_X150(words, max_len=150):
"""把 word_ids 整理成固定长度。
"""
words_len = len(words)
words = np.asarray(words)
if words_len == max_len:
return words
if words_len > max_len:
return words[:max_len]
return np.hstack([words, np.zeros(max_len-words_len, dtype=int)])
def pad_X300(words, max_len=300):
"""把 word_ids 整理成固定长度。
"""
words_len = len(words)
words = np.asarray(words)
if words_len == max_len:
return words
if words_len > max_len:
return words[:max_len]
return np.hstack([words, np.zeros(max_len-words_len, dtype=int)])
def pad_X20(words, max_len=20):
"""把 word_ids 整理成固定长度。
"""
words_len = len(words)
words = np.asarray(words)
if words_len == max_len:
return words
if words_len > max_len:
return words[:max_len]
return np.hstack([words, np.zeros(max_len-words_len, dtype=int)])
def pad_X100(words, max_len=100):
"""把 word_ids 整理成固定长度。
"""
words_len = len(words)
words = np.asarray(words)
if words_len == max_len:
return words
if words_len > max_len:
return words[:max_len]
return np.hstack([words, np.zeros(max_len-words_len, dtype=int)])
def shuffle(d):
return np.random.permutation(d)
def dropout(d,p=0.5):
len_ = len(d)
index = np.random.choice(len_,int(len_*p))
d[index]=0
return d
def getshuffle(title,content):
augument=random.random()#生成0-1之间的数
#print("augument:",augument)
if augument>0.5:
title = dropout(title,p=0.3)
content = dropout(content,p=0.7)
else:
title = shuffle(title)
content = shuffle(content)
return title,content
def wd_cut_docs(words_id, max_len=20):
"""
把 doc 切割成句子。如果句子长度超过 max_len, 将句子按照最长 max_len切割成多个句子。
Args:
words_id: list or np.array, 整个文档的词对应的 id,[ 2336, 1468, 69, 49241, 68, 5 ... ]
max_len: 切割后最长的句子长度。
Returns:
segs: list of list,每个元素为一个list,即一个句子。 每个元素list包含多个id.
"""
if type(words_id) is np.ndarray:
words_id = words_id.tolist()
if type(words_id) is not list:
print('Type error! the words_id should be list or numpy.ndarray')
set_splits = set([4, 56, 49]) # 切割符号所对应的id
ws_len = len(words_id)
cut_index = filter(lambda i: words_id[i] in set_splits, range(len(words_id)))
segs = list() # 分割之后的句子
if len(cut_index) == 0: # 如果没有切割符号
seg_len = len(words_id)
if seg_len > max_len: # 如果超长,切割
for start in xrange(0, seg_len, max_len):
end = min(seg_len, start+max_len)
segs.append(words_id[start:end])
else: # 否则,整个句子返回
segs.append(words_id)
return segs
if cut_index[-1] != ws_len - 1: # 最后一个不是切割符号
cut_index = cut_index + [ws_len-1]
cut_index = np.asarray(cut_index) + 1
cut_index = cut_index.tolist()
start = [0] + cut_index[:-1]
end = cut_index
cut_indexs = zip(start, end)
for index in cut_indexs:
if index[1] == index[0]: # 1.如果第一个就是分割符号,去掉
continue
seg_len = index[1] - index[0]
if seg_len == 1: # 2.如果只有一个词
if words_id[index[0]] not in set_splits: # 并且不是分隔符
segs.append([words_id[index[0]]]) # 那么添加
continue # 否则丢弃
if seg_len > max_len: # 3.如果超长,切割
for start in xrange(index[0], index[1], max_len):
end = min(index[1], start+max_len)
segs.append(words_id[start:end])
else:
segs.append(words_id[index[0]:index[1]]) # 4.添加序列
return segs
def wd_pad_cut_docs(words_id, doc_len=10, sent_len=20):
"""把 doc 切割成句子,并 padding 成固定个句子数,每个句子长度也固定为 sent_len.
Args:
words_id: list or np.array, 整个文档的词对应的 id,[ 2336, 1468, 69, 49241, 68, 5 ... ]
doc_len: int, 每个文档的句子数,超过 doc_len 的丢弃;少于 doc_len 的补全。
sent_len: int, 每个句子的最大长度, 不足 sent_len 的使用 0 (id for 'UNKNOWN')进行补全。
Returns:
segs: np.adarray, shape=[doc_len, sent_len].
"""
segs4doc = wd_cut_docs(words_id, max_len=sent_len)
segs4doc = np.asarray(map(pad_X20, segs4doc)) # 每一部分都进行补齐
segs_num = segs4doc.shape[0] # 当前句子数
if segs_num >= doc_len: # 如果句子数够了
return segs4doc[:doc_len, :]
if segs_num == 0:
return np.zeros(shape=(doc_len, sent_len), dtype=int)
segs4doc = np.vstack([segs4doc, np.zeros(shape=(doc_len-segs_num, sent_len), dtype=int)])
return segs4doc
def ch_cut_docs(chs_id, max_len=52):
"""
把 doc 切割成句子。如果句子长度超过 max_len, 将句子按照最长 max_len切割成多个句子。
Args:
chs_id: list or np.array, 整个文档的字对应的 id,[ 2336, 1468, 69, 49241, 68, 5 ... ]
max_len: 切割后最长的句子长度。
Returns:
segs: list of list,每个元素为一个list,即一个句子。 每个元素list包含多个id.
"""
if type(chs_id) is np.ndarray:
chs_id = chs_id.tolist()
if type(chs_id) is not list:
print('Type error! the chs_id should be list or numpy.ndarray')
set_splits = set([8, 14, 77]) # 切割符号所对应的id
chs_len = len(chs_id)
cut_index = filter(lambda i: chs_id[i] in set_splits, range(len(chs_id)))
segs = list() # 分割之后的句子
if len(cut_index) == 0: # 如果没有切割符号
seg_len = len(chs_id)
if seg_len > max_len: # 如果超长,切割
for start in xrange(0, seg_len, max_len):
end = min(seg_len, start+max_len)
segs.append(chs_id[start:end])
else: # 否则,整个句子返回
segs.append(chs_id)
return segs
if cut_index[-1] != chs_len - 1: # 最后一个不是切割符号
cut_index = cut_index + [chs_len-1]
cut_index = np.asarray(cut_index) + 1
cut_index = cut_index.tolist()
start = [0] + cut_index[:-1]
end = cut_index
cut_indexs = zip(start, end)
for index in cut_indexs:
if index[1] == index[0]: # 1.如果第一个就是分割符号,去掉
continue
seg_len = index[1] - index[0]
if seg_len == 1: # 2.如果只有一个词
if chs_id[index[0]] not in set_splits: # 并且不是分隔符
segs.append([chs_id[index[0]]]) # 那么添加
continue # 否则丢弃
if seg_len > max_len: # 3.如果超长,切割
for start in xrange(index[0], index[1], max_len):
end = min(index[1], start+max_len)
segs.append(chs_id[start:end])
else:
segs.append(chs_id[index[0]:index[1]]) # 4.添加序列
return segs
def ch_pad_cut_docs(chs_id, doc_len=10, sent_len=52):
"""把 doc 切割成句子,并 padding 成固定个句子数,每个句子长度也固定为 sent_len.
Args:
chs_id: list or np.array, 整个文档的词对应的 id,[ 2336, 1468, 69, 49241, 68, 5 ... ]
doc_len: int, 每个文档的句子数,超过 doc_len 的丢弃;少于 doc_len 的补全。
sent_len: int, 每个句子的最大长度, 不足 sent_len 的使用 0 (id for 'UNKNOWN')进行补全。
Returns:
segs: np.adarray, shape=[doc_len, sent_len].
"""
segs4doc = ch_cut_docs(chs_id, max_len=sent_len)
segs4doc = np.asarray(map(pad_X52, segs4doc)) # 每一部分都进行补齐
segs_num = segs4doc.shape[0] # 当前句子数
if segs_num >= doc_len: # 如果句子数够了
return segs4doc[:doc_len, :]
if segs_num == 0:
return np.zeros(shape=(doc_len, sent_len), dtype=int)
segs4doc = np.vstack([segs4doc, np.zeros(shape=(doc_len-segs_num, sent_len), dtype=int)])
return segs4doc
def train_batch(X, y, batch_path, batch_size=128):
"""对训练集batch."""
if not os.path.exists(batch_path):
os.makedirs(batch_path)
sample_num = len(X)
batch_num = 0
for start in tqdm(xrange(0, sample_num, batch_size)):
end = min(start + batch_size, sample_num)
batch_name = batch_path + str(batch_num) + '.npz'
X_batch = X[start:end]
y_batch = y[start:end]
np.savez(batch_name, X=X_batch, y=y_batch)
batch_num += 1
print('Finished, batch_num=%d' % (batch_num))
def eval_batch(X, batch_path, batch_size=128):
"""对测试数据打batch."""
if not os.path.exists(batch_path):
os.makedirs(batch_path)
sample_num = len(X)
print('sample_num=%d' % sample_num)
batch_num = 0
for start in tqdm(xrange(0, sample_num, batch_size)):
end = min(start + batch_size, sample_num)
batch_name = batch_path + str(batch_num) + '.npy'
X_batch = X[start:end]
np.save(batch_name, X_batch)
batch_num += 1
print('Finished, batch_num=%d' % (batch_num+1)) |
986,883 | 92c745a6b94a5188e76b8ea35332f64beb867aac | n=input("Enter a number")
try:
n=int(n)
sum=(n*(n+1))//2
print(sum)
except:
print("invalid")
|
986,884 | b2c8b45a60c73634b4cfbaa86f5ceb4e43e383e3 | # coding=utf-8
# Copyright 2017 Foursquare Labs Inc. All Rights Reserved.
from __future__ import absolute_import, division, print_function, unicode_literals
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.goal.goal import Goal
from pants.goal.task_registrar import TaskRegistrar as task
from fsqio.pants.python.filtered_python_requirements import FilteredPythonRequirements
from fsqio.pants.python.tasks.futurize_task import FuturizeTask
from fsqio.pants.python.tasks.mypy_task import MypyTask
from fsqio.pants.python.tasks.pytest_prep import PytestPrep
from fsqio.pants.python.tasks.pytest_run import PytestRun
def build_file_aliases():
return BuildFileAliases(
context_aware_object_factories={
'filtered_python_requirements': FilteredPythonRequirements,
}
)
def register_goals():
task(name='mypy', action=MypyTask).install('mypy')
task(name='futurize', action=FuturizeTask).install('futurize')
Goal.by_name('test').uninstall_task('pytest-prep')
task(name='pytest-prep', action=PytestPrep).install('test')
Goal.by_name('test').uninstall_task('pytest')
task(name='pytest', action=PytestRun).install('test')
|
986,885 | 7a6682d0fa60ebb8c2f1108e68bffd483b1a5f7f | from aiohttp import web
from aiohttp_security import check_authorized, forget
from ...db.models.user import User
async def set_email(request: web.Request):
user: User = await check_authorized(request)
data = await request.json()
user.email = data["email"]
user.save()
return web.HTTPOk()
async def set_password(request: web.Request):
user: User = await check_authorized(request)
data = await request.json()
user.set_password(data["password"])
user.save()
return web.HTTPOk()
async def delete_account(request: web.Request):
user: User = await check_authorized(request)
user.delete_instance(recursive=True)
response = web.HTTPOk()
await forget(request, response)
return response
|
986,886 | 2abad6560ff22e61602086c0ec1c6c24646403c1 | from __future__ import annotations
import json
from abc import abstractmethod
from typing import Iterable
from cowait.tasks import TaskDefinition, RemoteTask
from cowait.utils import EventEmitter
from .const import ENV_TASK_CLUSTER, ENV_TASK_DEFINITION
class ClusterProvider(EventEmitter):
def __init__(self, type, args={}):
super().__init__()
self.type = type
self.args = args
@abstractmethod
def spawn(self, taskdef: TaskDefinition) -> RemoteTask:
""" Spawn a task in the cluster """
raise NotImplementedError()
@abstractmethod
def destroy(self, task_id: str) -> None:
""" Destroy a task """
raise NotImplementedError()
@abstractmethod
def destroy_all(self) -> None:
raise NotImplementedError()
@abstractmethod
def destroy_children(self, parent_id: str) -> None:
raise NotImplementedError()
@abstractmethod
def wait(self, task: RemoteTask) -> None:
""" Wait for task to exit """
raise NotImplementedError()
@abstractmethod
def logs(self, task: RemoteTask) -> Iterable[str]:
""" Stream logs from task """
raise NotImplementedError()
@abstractmethod
def list_all(self) -> list:
raise NotImplementedError()
def serialize(self) -> dict:
""" Serialize ClusterProvider into a dict """
return {
'type': self.type,
**self.args,
}
def create_env(self, taskdef: TaskDefinition) -> dict:
"""
Create a container environment dict from a task definition.
Arguments:
taskdef (TaskDefinition): Task definition
Returns:
env (dict): Environment variable dict
"""
return {
**taskdef.env,
ENV_TASK_CLUSTER: json.dumps(self.serialize()),
ENV_TASK_DEFINITION: json.dumps(taskdef.serialize()),
}
def find_agent(self):
return None
|
986,887 | 2296c532875a3cbadd236a98dbd9e162cf11a754 | class Solution:
def minCost(self, colors: str, neededTime: 'List[int]') -> int: # O( NlogN | N )
output = 0
pq = []
heapq.heapify(pq)
prev = colors[0]
for i in range(1, len(colors)): # push all the consecutive same colors to the heap.
curr = colors[i]
if curr == prev:
if pq:
heapq.heappush(pq, neededTime[i])
else: # new heap, need to push the previous and current to the heap
heapq.heappush(pq, neededTime[i-1])
heapq.heappush(pq, neededTime[i])
else:
if pq: #find the smallest ones from the heap, and take them out
while len(pq) > 1:
output += heapq.heappop(pq)
heapq.heappop(pq)
prev = colors[i]
while len(pq) > 1: #check the tail
output += heapq.heappop(pq)
return output
|
986,888 | d4714318ef6d4018375dc39b453024c5f50e9967 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
class Kitten(models.Model):
#django automatically assigns ID
name = models.CharField(max_length=50)
description = models.TextField()
photo_url = models.TextField()
birthday = models.DateField(auto_now=False, auto_now_add=False)
food_portion = models.FloatField()
#allows three values: None, True, False
fluffy_boolean = models.NullBooleanField()
#user_id is the foreign key representing owner; when owner deleted, this kitten will be deleted too
owner = models.ForeignKey(User, on_delete=models.CASCADE)
timestamp_last_edited = models.DateField(auto_now=True, auto_now_add=False)
timestamp_created = models.DateField(auto_now=False, auto_now_add=True)
#tells Django which field to use as display on the Django admin (analgous to console but on the browser)
def __str__(self):
return self.name
|
986,889 | 72238b12a05095b2fc75e7623745a0196d34eaa5 | from torch.utils import data
class Dataset(data.Dataset):
def __init__(self, padded_sequences, lang_labels):
self.padded_sequences = padded_sequences
self.lang_labels = lang_labels
def __len__(self):
return len(self.padded_sequences)
def __getitem__(self, index):
x = self.padded_sequences[index]
y = self.lang_labels[index]
return x, y
|
986,890 | fa45cf460d97254b10937c4f79cbe738a4aa0a6a | #!/usr/bin/env python
# coding: utf-8
# In[1]:
n = int(input("n="))
asal = 1
for i in range(2,n,1):
if n % i == 0 :
asal = 0
if asal == 0:
print(n,"sayısı asal değildir.")
else:
print(n,"sayısı asaldır.")
# In[3]:
n = int(input("n="))
asal = 1
for i in range(2,n,1):
if n % i == 0:
asal =0
if asal ==0:
print(n,"sayısı asal değildir")
else:
print(n,"sayısı asaldır")
# In[4]:
n = int(input("n="))
Acarpan = 2
while n>1:
if n%Acarpan == 0:
n = n / Acarpan
print(Acarpan)
else:
Acarpan = Acarpan + 1
# In[1]:
n = int(input("n="))
carpim = 1
print("sonhane"," ", "carpim")
while n>0:
sonhane = int(n%10)
carpim = carpim * sonhane
print(" ", sonhane," ",carpim)
n = (n - sonhane) / 10
# In[7]:
n = int(input("n="))
while n>9:
carpim = 1
while n>0:
sonhane = int(n%10)
carpim = carpim * sonhane
n = (n - sonhane) / 10
print(carpim)
n = carpim
# In[10]:
h = 5
yol = 0
print(yol," ", h)
yol = yol + h
h = h*0.8
print(yol , " ", h)
while h>1:
yol = yol + 2*h
h = h*0.8
print(yol," ",h)
# In[ ]:
|
986,891 | 7da53770e76d39c19ab7b6e6c830afeb69e15395 | from django.db import models
# Create your models here.
class ProductKeluar(models.Model):
category = models.CharField(max_length=200, db_index=True)
product = models.CharField(max_length=200, db_index=True)
total = models.IntegerField()
satuan = models.CharField(max_length=50)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return self.category |
986,892 | 7ed8712ae080c12c7222b8c46837f1971a1603a3 | import urllib.request
s = "http://farm3.static.flickr.com/2331/1563253001_3e85878db7.jpg"
urllib.request.urlretrieve(s,"test.png") |
986,893 | 8b5282e39c74a986da31575c50b619f2f99c081b | import argparse
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
K.set_image_dim_ordering('th')
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import accuracy_score, confusion_matrix, roc_curve, roc_auc_score
import os
from imutils import paths
import glob
import matplotlib.pyplot as plt
def getClassData(path):
i = 0
class_dict = {}
class_name = []
for filename in glob.iglob(path+"**/*",recursive=True):
print(filename)
file = filename.split('\\')[-1]
label = file.split('.')[0]
file = np.load(filename)
class_dict[i] = label
if (i == 0):
file = np.c_[file, np.zeros(len(file))]
else :
file = np.c_[file, i*np.ones(len(file))]
class_name.append(file)
i += 1
return class_name, class_dict
def cnn_model():
# create model
model = Sequential()
model.add(Conv2D(30, (5, 5), input_shape=(1, 28, 28), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(15, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
# Compile model
print("[INFO] compiling model...")
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--path", required = True, help = "path to input dataset")
args = vars(ap.parse_args())
print("[INFO] loading images...")
class_name = []
label_dict ={}
class_name, label_dict = getClassData(args['path'])
class_name = np.array(class_name)
y = []
for i in range(len(class_name)):
y.extend(class_name[i][:15000,-1])
y=np.array(y)
for i in range(len(class_name)):
class_name[i] = class_name[i][:15000, :-1]
class_data = []
for i in range(len(class_name)):
class_data.extend(class_name[i])
class_data = np.array(class_data)
X = class_data
X_train, X_test, y_train, y_test = train_test_split(X/255.,y,test_size=0.4,random_state=0)
y_train_cnn = np_utils.to_categorical(y_train)
y_test_cnn = np_utils.to_categorical(y_test)
num_classes = y_test_cnn.shape[1]
X_train_cnn = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32')
X_test_cnn = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32')
# build the model
print("[INFO] creating model...")
model = cnn_model()
# Fit the model
print("[INFO] training model...")
records = model.fit(X_train_cnn, y_train_cnn, validation_split=0.3, epochs=10, batch_size=200)
# Final evaluation of the model
print("[INFO] evaluating model...")
scores = model.evaluate(X_test_cnn, y_test_cnn, verbose=0)
print('Final CNN accuracy: ', scores[1])
print("[INFO] saving model...")
model.save("model.h5")
cnn_probab = model.predict(X_test_cnn, batch_size=32, verbose=0)
# extract the probability for the label that was predicted:
p_max = np.amax(cnn_probab, axis=1)
plt.hist(p_max, normed=True, bins=list(np.linspace(0,1,11)));
plt.xlabel('p of predicted class');
N = 10
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, N), records.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), records.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, N), records.history["acc"], label="train_acc")
plt.plot(np.arange(0, N), records.history["val_acc"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
|
986,894 | c8e2a5e233317c0c2a278be32dcd7a6e4ffc9138 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 11 16:06:11 2020
@author: Mark
"""
import sys
sys.path.append('..')
import numpy as np
import ButcherTableau as BT
h = .5
b = .25
def QuadFunc(x):
return h * x
bt = BT.ButcherTableauExplicitMethods('RK45')
x = np.asarray( [b] )
y = bt.IntegrationTimeIndependent(x, QuadFunc)
print(np.exp( np.log(b) + h ))
print(y[0]) |
986,895 | 6ed34cc86bbe06ce21f6db03ba37224fb508714e | xs = [42, 5, 4, -7, 2, 12, -3, -4, 11, 42, 2]
contains = True
for number in xs:
if 42 % number != 0:
contains = False
break
print(contains)
|
986,896 | eec045f8cbc64ae5c6f6fb6e6f8c77e79842afe8 | # © Copyright Databand.ai, an IBM Company 2022
import logging
import sh
from dbnd_run.airflow.compat import AIRFLOW_VERSION_1, AIRFLOW_VERSION_2
def set_airflow_connection(
conn_id, conn_type=None, conn_uri=None, extra=None, host=None
):
airflow_command = ["connections"]
def _support_v1(arg):
if AIRFLOW_VERSION_1:
return arg[0:2] + arg[2:].replace("-", "_")
return arg
# name
airflow_command.extend(
["add", conn_id] if AIRFLOW_VERSION_2 else ["--add", "--conn_id", conn_id]
)
# conn type
if conn_type:
airflow_command.extend([_support_v1("--conn-type"), conn_type])
if extra:
airflow_command.extend([_support_v1("--conn-extra"), extra])
if conn_uri:
airflow_command.extend([_support_v1("--conn-uri"), conn_uri])
if host:
airflow_command.extend([_support_v1("--conn-host"), host])
try:
if AIRFLOW_VERSION_2:
sh.airflow(["connections", "delete", conn_id])
else:
sh.airflow(["connections", "--delete", "--conn_id", conn_id])
except sh.ErrorReturnCode:
pass
logging.info("running: airflow {}".format(" ".join(airflow_command)))
sh.airflow(airflow_command, _truncate_exc=False)
|
986,897 | cc1e3e5f6922cedfd0fccefa0553d6c70f802928 | from . import gtfstable
from . import helpers
from . import stops
from . import routes
from . import trips
from . import stop_times
from . import calendar
from . import calendar_dates
from . import fare_attributes
from . import shapes
__all__ = ['gtfstable', 'helpers', 'stops', 'routes', 'trips', 'stop_times', 'calendar', 'calendar_dates', 'fare_attributes', 'shapes'] |
986,898 | 0f8740387684de8ad4bf1931fbd0859955ade2f6 | #!/usr/bin/env python3
#UDP Sender
import socket
UDP_IP = "127.0.0.1"
UDP_PORT = 5005
MESSAGE = "Hello, World!"
print ("UDP target IP:", UDP_IP)
print ("UDP target port:", UDP_PORT)
print ("message:", MESSAGE)
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
b = bytearray()
b.extend(map(ord, MESSAGE))
sock.sendto(b, (UDP_IP, UDP_PORT)) |
986,899 | 50bf487940000aacdd98e08e54cbaf731c11c68f | from django.db import models
# Create A Blog model
# title
# pub date
# Text body
# image
class Blog(models.Model):
title=models.CharField(max_length=300)
pub_date=models.DateTimeField()
body=models.TextField()
image = models.ImageField(upload_to='images/')
# Add Blog app to settings
# python manage.py makemigrations if ok
# python manage.py migrate
# Create a migrate
# Migrate
# Add to the admin
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.