seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
28642568225 | from itertools import product
from pprint import pformat
import sys
def main():
coefficients = [1,4,3,0,1,2]
run(*coefficients)
def run(*coefficients):
for p in [2,3,5]:
print(f'\n{p}:\n')
results = factor(p, coefficients)
for result in results:
print(list(result[0]), list(result[1]))
# def factor(coefficients, mod_coefficients):
# TODO make this work with arbitrary ideal mods
def factor(p, coefficients):
""" coefficients is a list of numbers
p is a prime integer (also works with non-primes, just saying)
"""
coefficients = mod(coefficients, p)
degree = get_degree(coefficients)
target = polynomial(coefficients)
seen = {}
possible = []
# loop over polynomials
for coef1 in product(*[range(p)]*(degree+1)):
coef1 = remove_leading_zeros(coef1)
degree1 = get_degree(coef1)
if degree1 < 1 or degree1 == degree:
continue
# if coef1 in seen:
# continue
# else:
# seen[coef1] = 1
# print(degree1)
# print(coef1, end=" ")
for coef2 in product(*[range(p)]*(degree - degree1+1)):
if coef2[0] == 0:
continue
product_ = multiply(coef1, coef2, p)
# print(coef1, coef2, product, coefficients)
assert len(coefficients) == len(product_)
if coefficients == product_:
possible.append((coef1, coef2))
# seen[coef2] = 1
return [ x for x in possible if is_monic(x[0]) and is_monic(x[1]) ]
def polynomial(*args):
""" args is list of coefficients corresponding to powers (n, ..., 0) or just the numbers (not a list)
returns function of polynomial
"""
if len(args) == 1 and (isinstance(args[0], tuple) or isinstance(args[0], list)):
args = args[0]
def p(x):
output = 0
power = 1
for arg in args[::-1]:
output += arg * power
power *= x
return output
return p
def multiply(coef1, coef2, p):
""" multiplies two sets of coefficients and mods the result by p """
output = [0]*(len(coef1)+len(coef2)-1)
for i, a in enumerate(coef1[::-1]):
for j, b in enumerate(coef2[::-1]):
output[len(output) - i - j - 1] += a*b
return mod(output, p)
# utility functions
def remove_leading_zeros(coefficients):
first_non_zero = next((x for x in coefficients if x != 0), None)
if first_non_zero == None:
return [0]
return coefficients[coefficients.index(first_non_zero):]
def get_degree(coefficients):
""" returns degree of polynomial with given coefficients
ex: (1,2,3) are the coefficients of x^2 + 2x + 3 which has degree 2
"""
return len(remove_leading_zeros(coefficients)) - 1
def mod(coefficients, n):
""" mod coefficients by n """
return remove_leading_zeros([x % n for x in coefficients])
def is_monic(coefficients):
return coefficients[0] == 1
def get_matricies():
n = int(sys.argv[1])
for row1 in product(*[range(n)]*3):
for row2 in product(*[range(n)]*3):
for row3 in product(*[range(n)]*3):
matrix = [row1, row2, row3]
# print(matrix)
det = det3x3(matrix)
if det in [2,5]:
print(det, pformat(matrix))
def det3x3(m): # m is a matrix
return m[0][0] * det2x2(m[1][1], m[1][2], m[2][1], m[2][2]) \
- m[0][1] * det2x2(m[1][0], m[1][2], m[2][0], m[2][2]) \
+ m[0][2] * det2x2(m[1][0], m[1][1], m[2][0], m[2][1])
def det2x2(a,b,c,d):
return a*d-b*c
if __name__ == "__main__":
# main()
get_matricies() | kylesadler/Zn-Polynomial-Factorizer | factorizer.py | factorizer.py | py | 3,792 | python | en | code | 0 | github-code | 90 |
7554816405 | import requests
import base64
import os
from pyquery import PyQuery as pq
from fake_useragent import UserAgent
def us_proxy_crawler(proxy_ip, headers):
count = 0
response = requests.get('https://www.us-proxy.org/', headers=headers).text
doc = pq(response)
rows = doc('tr')
for row in rows:
name = pq(row).find('td').eq(0).text()
value = pq(row).find('td').eq(1).text()
if name and value:
count += 1
print(f'{count}, {name}, {value}')
proxy_ip[name] = value
if count == 200:
return proxy_ip
def free_proxy_crawler(proxy_ip, headers):
count = 0
for index in range(1, 6):
response = requests.get(f'http://free-proxy.cz/zh/proxylist/country/all/all/ping/all/{index}',
headers=headers).text
doc = pq(response)
rows = doc('tbody > tr')
for row in rows:
name = pq(row).find('td').eq(0).text()
try:
name = name[name.find("\"") + 1:name.find(")") - 1]
ip = base64.b64decode(name).decode()
except:
continue
value = pq(row).find('td').eq(1).text()
if ip and value:
count += 1
print(f'{count}, {ip}, {value}')
proxy_ip[ip] = value
if count == 200:
return proxy_ip
return proxy_ip
def open_proxy_crawler(proxy_ip, headers):
"""
原始網站: https://openproxy.space/list
"""
ip_list = None
count = 0
# 直接從原始網站下載,可以有好幾千個免費ip proxy
with open('open_proxy_ip.txt', 'r') as f:
ip_list = f.readlines()
f.close()
for ip_info in ip_list:
try:
name, port = ip_info.strip().split(':')
proxy_ip[name] = port
count += 1
print(f'{count}, {name}, {port}')
except:
continue
return proxy_ip
def crawl_kuaidaili(proxy_ip, headers):
"""
快代理:https://www.kuaidaili.com
"""
url = "https://www.kuaidaili.com/free/{}"
count = 0
items = ["inha/1/"]
for proxy_type in items:
try:
html = requests.get(url.format(proxy_type), headers=headers, timeout=5).text
if html:
doc = pq(html)
for proxy in doc(".table-bordered tr").items():
ip = proxy("[data-title=IP]").text()
port = proxy("[data-title=PORT]").text()
if ip and port:
proxy_ip[ip] = port
count += 1
print(f"{count}, http://{ip}:{port}")
except:
continue
return proxy_ip
def crawl_data5u(proxy_ip, headers):
"""
无忧代理:http://www.data5u.com/
"""
url = "http://www.data5u.com/"
count = 0
try:
html = requests.get(url, headers=headers).text
if html:
doc = pq(html)
for index, item in enumerate(doc("li ul").items()):
if index > 0:
ip = item("span:nth-child(1)").text()
port = item("span:nth-child(2)").text()
schema = item("span:nth-child(4)").text()
if ip and port and schema:
proxy_ip[ip] = port
count += 1
print(f"{count}, {schema}://{ip}:{port}")
except:
pass
return proxy_ip
def main():
default_user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36'
user_agent = UserAgent(fallback=default_user_agent)
proxy_ip = {}
headers = {"User-Agent": user_agent.random}
proxy_ip = open_proxy_crawler(proxy_ip, headers)
proxy_ip = crawl_kuaidaili(proxy_ip, headers)
proxy_ip = crawl_data5u(proxy_ip, headers)
proxy_ip = us_proxy_crawler(proxy_ip, headers)
proxy_ip = free_proxy_crawler(proxy_ip, headers)
return proxy_ip
if __name__ == '__main__':
default_user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36'
user_agent = UserAgent(fallback=default_user_agent)
proxy_ip = {}
headers = {"User-Agent": user_agent.random}
proxy_ip = crawl_data5u(proxy_ip, headers)
| Hank-07/proxy-pool | crawler.py | crawler.py | py | 4,460 | python | en | code | 0 | github-code | 90 |
18351701689 | import sys
def input():
return sys.stdin.readline().strip()
def main():
n = int(input())
a = list(map(int, input().split()))
sum = 0
for i in a:
sum += 1/i
print(1/sum)
main()
| Aasthaengg/IBMdataset | Python_codes/p02934/s231832097.py | s231832097.py | py | 221 | python | en | code | 0 | github-code | 90 |
13118147899 | """This module contains a Scrapy pipeline class for sending data to the Flask API."""
from http import HTTPStatus
from scrapy.exceptions import CloseSpider, DropItem
from scrapy.pipelines.images import ImagesPipeline
from scrapy.http import Request
from app.performance_scraper.performance_scraper.flask_api.api_client import FlaskAPIClient
from app.performance_scraper.performance_scraper.flask_api.auth import AuthManager
from app.performance_scraper.performance_scraper.flask_api.exceptions import FlaskAPIException
class APIPipeline(object):
"""Class to send data to the Flask API."""
def __init__(self, api_client):
self._api_client = api_client
@classmethod
def from_crawler(cls, crawler):
"""Return a new APIPipeline instance."""
auth_manager = AuthManager(
username=crawler.settings.get("SCRAPY_USERNAME"),
password=crawler.settings.get("SCRAPY_PASSWORD"),
email=crawler.settings.get("SCRAPY_EMAIL"),
cache_path=crawler.settings.get("TOKEN_FILE_PATH"),
)
token = auth_manager.get_cached_token()
api_client = FlaskAPIClient(token=token, auth_manager=auth_manager)
return cls(api_client=api_client)
def process_item(self, performance_item, spider):
"""Send scraped data to the Flask API to be stored."""
if not performance_item:
raise DropItem("Performance Item is Empty")
venue_item = performance_item.pop("venue")
artist_item = performance_item.pop("artist")
image_item = artist_item.pop("image", None)
# attempt to get venue resource from API. If it doesn't exist, create it
venue_resource = self.retrieve_venue_info(venue_item)
if venue_resource is None:
venue_resource = self.store_venue_info(venue_item)
# attempt to get artist resource from API. If it doesn't exist, create it
artist_resource = self.retrieve_artist_info(artist_item)
if artist_resource is None:
artist_resource = self.store_artist_info(artist_item)
# update artist's image
if image_item is not None:
self.store_artist_image(
artist_resource["id"],
spider.settings.get("IMAGE_DOWNLOAD_DIRECTORY") + "/" + image_item["path"]
)
# update performance item to include venue and artist id's that will
# need to be sent in the payload to the API
performance_item["venue_id"] = venue_resource["id"]
performance_item["artist_id"] = artist_resource["id"]
self.store_performance_info(dict(performance_item))
return performance_item
def retrieve_venue_info(self, venue_item):
"""Return a venue resource by making a call to the Flask API."""
try:
venue_resource = self._api_client.get_venue_by_name(venue_item["name"])
except FlaskAPIException as api_exception:
if api_exception.http_status == HTTPStatus.NOT_FOUND:
venue_resource = None
else:
raise CloseSpider(
reason=api_exception.message
)
return venue_resource
def store_venue_info(self, venue_item):
"""Store the venue's information by making a call to the Flask API."""
return self._api_client.create_venue(venue_item)
def retrieve_artist_info(self, artist_item):
"""Return an artist resource by making a call to the Flask API."""
try:
artist_resource = self._api_client.get_artist_by_name(artist_item["name"])
except FlaskAPIException as api_exception:
if api_exception.http_status == HTTPStatus.NOT_FOUND:
artist_resource = None
else:
raise CloseSpider(
reason=api_exception.message
)
return artist_resource
def store_artist_info(self, artist_item):
"""Store the artist's information by making a call to the Flask API."""
# Create artist resource. Returns None if artist already exists
return self._api_client.create_artist(artist_item)
def store_artist_image(self, artist_id, image):
"""Store the artist's image by making a call to the Flask API."""
# attempt to update artist's image if one was found on the scraped website
with open(image, "rb") as image_file:
self._api_client.upload_artist_image(artist_id, image_file)
def store_performance_info(self, performance_item):
"""Store the performance information by making a call to the Flask API."""
self._api_client.create_performance(performance_item)
class ArtistImagePipeline(ImagesPipeline):
"""Class to process scraped images."""
def get_media_requests(self, item, info):
"""Return a list of request objects for each image url."""
if not item:
raise DropItem("Item is Empty")
requests = []
image = item["artist"].get(self.images_urls_field)
if image is not None:
requests = [Request(image["url"])]
return requests
def item_completed(self, results, item, info):
"""Add the image file path to the item, before returning said item."""
if not item:
raise DropItem("Item is Empty")
image = item["artist"].get(self.images_urls_field)
if image is not None:
completed, data = results[0]
if completed:
image[self.images_result_field] = data["path"]
return item
| EricMontague/MailChimp-Newsletter-Project | server/app/performance_scraper/performance_scraper/pipelines.py | pipelines.py | py | 5,594 | python | en | code | 0 | github-code | 90 |
18461421409 | # B - Frog 2
N,K = map(int,input().split())
h = list(map(int,input().split()))
# 無限大の値
INF = 10**10
# DP テーブル
dp = [0]*(100010)
# DP テーブル全体を初期化
for i in range(100010):
dp[i] = INF
# 初期条件
dp[0] = 0
for v in range(1,N):
for k in range(1,K+1):
# 遷移元の足場がないとき
if v-k < 0:
continue
# 足場 v-k から足場 v に移動する
dp[v] = min(dp[v], dp[v-k] + abs(h[v]-h[v-k]))
print(dp[N-1]) | Aasthaengg/IBMdataset | Python_codes/p03161/s190220802.py | s190220802.py | py | 507 | python | ja | code | 0 | github-code | 90 |
13640284898 | """Build Helm Geometry file."""
import zlib
from BuildClasses import ROMPointerFile
from BuildEnums import TableNames
from BuildLib import ROMName
geo_file = "helm.bin"
with open(ROMName, "rb") as rom:
geo_f = ROMPointerFile(rom, TableNames.MapGeometry, 0x11)
rom.seek(geo_f.start)
data = rom.read(geo_f.size)
if geo_f.compressed:
data = zlib.decompress(data, (15 + 32))
with open(geo_file, "wb") as geo:
geo.write(data)
with open(geo_file, "r+b") as geo:
geo_points = [0x37C4, 0x3834, 0x3894, 0x38F4, 0x3954, 0x39BC, 0x3A1C, 0x3A7C, 0x3ADC, 0x3B3C]
geo_overwrite = 4761
for point in geo_points:
geo.seek(point)
geo.write(geo_overwrite.to_bytes(4, "big"))
| 2dos/DK64-Randomizer | base-hack/Build/create_helm_geo.py | create_helm_geo.py | py | 748 | python | en | code | 44 | github-code | 90 |
5286885321 | import numpy as np
import glob
import json
from tqdm import tqdm
import string
from nltk.tokenize import regexp_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from gensim.models import KeyedVectors
print("Downloading the wordnet from nltk...")
import nltk
nltk.download('wordnet')
file = 2
paths = glob.glob(f'./articles_data/{file}/*.json')
article_data = []
print("Adding article data...")
for path in tqdm(paths):
with open(path) as f:
article_data.append(json.load(f))
stopwords_eng = stopwords.words('english')
lemmatizer = WordNetLemmatizer()
def process_text(text):
text = text.replace("\n"," ").replace("\r"," ")
punc_list = '!"#$%()*+,-./:;<=>?@^_{|}~'
t = str.maketrans(dict.fromkeys(punc_list," "))
text = text.translate(t)
t = str.maketrans(dict.fromkeys("'`",""))
text = text.translate(t)
tokens = regexp_tokenize(text,pattern='\s+',gaps=True)
cleaned_tokens = []
for t in tokens:
if t not in stopwords_eng:
l = lemmatizer.lemmatize(t)
cleaned_tokens.append(l)
return cleaned_tokens
def get_vec(word):
try:
return model[word]
except:
return np.zeros(300)
model = KeyedVectors.load_word2vec_format('./GoogleNews-vectors-negative300.bin',binary=True,limit=10**6)
data = []
print("Tokenizing and Getting the sentence vector...")
for i in tqdm(range(len(article_data))):
full_t = article_data[i]['thread']['section_title']+' '+article_data[i]['thread']['title_full']
url = article_data[i]['thread']['url']
tokens = process_text(full_t)
sent_vector = sum([get_vec(t) for t in tokens]).tolist()
data.append({
'full_title':full_t,
'url':url,
'title_tokens':tokens,
'sentence_vector':sent_vector
})
print("Saving the data...")
with open(f"data_{file}.json","w") as f:
json.dump(data,f) | sidthakur08/article_search | on_technology_data/sent_vec.py | sent_vec.py | py | 1,926 | python | en | code | 3 | github-code | 90 |
37947130429 | import sys
sys.path.append('../py')
from iroha import *
from iroha.iroha import *
d = IDesign()
mod = IModule(d, "mod")
def CreateTable(mod):
tab = ITable(mod)
st0 = IState(tab)
st1 = IState(tab)
tab.initialSt = st0
design_tool.AddNextState(st0, st1)
tab.states.append(st0)
tab.states.append(st1)
return tab
tab0 = CreateTable(mod)
tab1 = CreateTable(mod)
# Kicks tab0 by external input
ext_input = design_tool.CreateExtInput(tab0, "data_in", 0)
in_insn = IInsn(ext_input)
in_r = IRegister(tab0, "r")
in_r.isWire = True
in_r.valueType = IValueType(False, 0)
in_insn.outputs.append(in_r)
tab0.states[0].insns.append(in_insn)
df_in = design_tool.GetResource(tab0, "dataflow-in")
df_insn = IInsn(df_in)
df_insn.inputs.append(in_r)
tab0.states[0].insns.append(df_insn)
# Kicks tab1
sreg = design_tool.CreateSharedReg(tab0, "o", 0)
sreg.resource_params.AddValue("DEFAULT-VALUE", "0")
sinsn = IInsn(sreg)
bit0 = design_tool.AllocConstNum(tab0, False, 0, 1)
sinsn.inputs.append(bit0)
tab0.states[-1].insns.append(sinsn)
# Kicked by tab0
rreg = design_tool.CreateSharedRegReader(tab1, sreg)
rinsn = IInsn(rreg)
rwire = IRegister(tab1, "r")
rwire.isWire = True
rwire.valueType = IValueType(False, 0)
rinsn.outputs.append(rwire)
tab1.states[0].insns.append(rinsn)
df1_in = design_tool.GetResource(tab1, "dataflow-in")
df1_insn = IInsn(df1_in)
df1_insn.inputs.append(rwire)
tab1.states[0].insns.append(df1_insn)
# Triggers ext port
ext_output = design_tool.CreateExtOutput(tab1, "data_out", 0)
ext_output.resource_params.AddValue("DEFAULT-VALUE", "0")
oinsn = IInsn(ext_output)
bit1 = design_tool.AllocConstNum(tab1, False, 0, 1)
oinsn.inputs.append(bit1)
tab1.states[-1].insns.append(oinsn)
design_tool.ValidateIds(d)
DesignWriter(d).Write()
| nlsynth/iroha | examples/dataflow_chain.py | dataflow_chain.py | py | 1,773 | python | en | code | 34 | github-code | 90 |
4399933302 | import pygame
from settings import SCREEN_HEIGHT, SCREEN_WIDTH, PLAYER_SPEED
from projectile import Projectile
from pygame.locals import (
RLEACCEL,
K_UP,
K_DOWN,
K_LEFT,
K_RIGHT,
K_ESCAPE,
KEYDOWN,
QUIT,
)
class Player(pygame.sprite.Sprite):
def __init__(self):
super(Player, self).__init__()
self.surf = pygame.image.load("./assets/spaceghost.png").convert()
self.surf.set_colorkey((255,255,255), RLEACCEL)
self.rect = self.surf.get_rect()
def fire(self, group_sprite, projectile_group):
projectile = Projectile(self.rect.center[0], self.rect.center[1])
group_sprite.add(projectile)
projectile_group.add(projectile)
SHOOT_SOUND = pygame.mixer.Sound("./assets/shoot/shoot.wav")
channel=pygame.mixer.find_channel(True)
channel.set_volume(0.4)
channel.play(SHOOT_SOUND)
def update(self, pressed_keys):
if pressed_keys[K_UP]:
self.rect.move_ip(0, -1)
if pressed_keys[K_DOWN]:
self.rect.move_ip(0, 1)
if pressed_keys[K_LEFT]:
self.rect.move_ip(-1, 0)
if pressed_keys[K_RIGHT]:
self.rect.move_ip(1, 0)
# Keep player on the screen
if self.rect.left < 0:
self.rect.left = 0
if self.rect.right > SCREEN_WIDTH:
self.rect.right = SCREEN_WIDTH
if self.rect.top <= 0:
self.rect.top = 0
if self.rect.bottom >= SCREEN_HEIGHT:
self.rect.bottom = SCREEN_HEIGHT
| ronaldo-ramos-dev/space-ghost | player.py | player.py | py | 1,574 | python | en | code | 0 | github-code | 90 |
1117458028 | #!/usr/bin/python3
import os
import json
import packages.configuration_generator as cg
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
config_file = os.path.join(root_dir, 'config.json')
if not os.path.isfile(config_file):
cg.__main__()
with open(config_file, 'r') as conf:
config = json.load(conf)
search_term = ""
img_option = False
ban_option = False
alt_option = ""
comic_name = ""
mark_for_deletion = False
max_results = ""
banned_dir = os.path.join(root_dir, 'banned')
"""
This requires tesseract OCR to be installed on your system. Set the path to the location of the .exe or bin in
the config.json file
"""
tesseract_command_path = config['tesseract_location']
"""
Set the download folder location inside the config.json file
"""
download_folder = config['download_folder']
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36'
}
# List of ANSII colors
RED = '\033[91m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
BLUE = '\033[94m'
BLACK = "\033[0;30m"
BROWN = "\033[0;33m"
PURPLE = "\033[0;35m"
CYAN = "\033[0;36m"
LIGHT_GRAY = "\033[0;37m"
DARK_GRAY = "\033[1;30m"
LIGHT_RED = "\033[1;31m"
LIGHT_GREEN = "\033[1;32m"
LIGHT_BLUE = "\033[1;34m"
LIGHT_PURPLE = "\033[1;35m"
LIGHT_CYAN = "\033[1;36m"
LIGHT_WHITE = "\033[1;37m"
BOLD = "\033[1m"
RESET = '\033[0m'
| edimusxero/Comic-Grabber | packages/shared_variables/__init__.py | __init__.py | py | 1,502 | python | en | code | 0 | github-code | 90 |
3360877674 | # -*- coding: utf-8 -*-
'''
last modified 2012-9-29
@author: slieer
'''
class Person:
i = 10
def __init__(self, name):
self.name = name
def sayHi(self):
print('Hello, my name is', self.name)
def f1(self,x, y):
return min(x, x+y)
class C:
f = f1
def g(self):
return 'hello world'
h = g
#空类
class Employee:
pass
if __name__ == '__main__':
p = Person('Swaroop')
p.sayHi()
print(Person.i)
x = C()
print(x.f(3,4))
print(x.h())
#print C.f() error.
print(id(x))
john = Employee()
'''动态添加Field'''
john.name = 'John Doe'
john.dept = 'computer lab'
john.salary = 1000
print(john)
| slieer/py | py-dev-study/src/simple/class_init.py | class_init.py | py | 769 | python | en | code | 1 | github-code | 90 |
18426577839 | #-*-coding:utf-8-*-
import sys
input=sys.stdin.readline
def main():
strings = input()
answers=[]
counter=0
for string in strings:
if "A" in string or "C" in string or "G" in string or "T" in string:
counter+=1
else:
answers.append(counter)
counter=0
print(max(answers))
if __name__=="__main__":
main() | Aasthaengg/IBMdataset | Python_codes/p03086/s004056038.py | s004056038.py | py | 381 | python | en | code | 0 | github-code | 90 |
20292200280 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# get text from BeautifulSoup
#
from BeautifulSoup import BeautifulSoup, Comment
import re
import urllib,urlparse,cgi
def remove_params(url=None,remove=None,keep_only=None):
"""
remove parameters from a url
remove : tuple of parameters to remove, keep any others
: or "ALL" to remove all parameters
keep_only : keep only this tuple of parameters, remove all others
parameter order is not kept which may cause problems with uncache().
>>> remove_params('http://example.com/page.php?rs=644&x=y&z=zz&uid=1234&loc=en_US&lang=en',keep_only=('rs','uid'))
'http://example.com/page.php?uid=1234&rs=644'
>>> remove_params('http://example.com/page.php?rs=644&x=y&z=zz&uid=1234&loc=en_US&lang=en',remove=('rs','uid'))
'http://example.com/page.php?lang=en&loc=en_US&x=y&z=zz'
"""
if remove and keep_only:
raise TypeError('remove_params: use either remove OR keep_only argument')
s = urlparse.urlsplit(url)
params = dict(cgi.parse_qsl(s.query))
for k in params.keys():
if remove == "ALL" or (remove and k in remove) or (keep_only and not k in keep_only):
del params[k]
q = urllib.urlencode(params)
return urlparse.urlunsplit((s.scheme,s.netloc,s.path,q,s.fragment))
def filter_unicode(s):
import unicodedata
if not isinstance(s,unicode):
s = unicode(s, 'UTF-8')
unicodes = {
# unicode dash
u'\u2013' : '--',
# unicode single quotes
u'\u2018' : '\'',
u'\u2019' : '\'',
}
for k in unicodes:
s = s.replace(k, unicodes[k])
# ignore all other unicode chars
s = unicodedata.normalize('NFC', s).encode('ASCII', 'ignore')
return unicode(s)
def entity2ascii(s):
entities = {
' ' : ' ',
' ' : ' ',
' ' : ' ',
'"' : '"',
'«' : '"',
'«' : '"',
'«' : '"',
'»' : '"',
'»' : '"',
'»' : '"',
'“' : '"',
'”' : '"',
'‘' : '\'',
'’' : '\'',
# ellipse
'…': '...',
# bullet
'&bul;' : '*',
# dash
'—' : '-',
'–' : '-',
'—' : '-',
'—' : '-',
'-' : '-',
'-' : '-',
# single quote
''' : '\'',
''' : '\'',
# copyright
'©' : '(c)',
'©' : '(c)',
# dash
'–' : '--',
# dash
'—' : '--',
'—' : '--',
# open single quote
'‘' : '\'',
# apostrophe
'’' : '\'',
# open double quote
'“' : '"',
# close double quote
'”' : '"',
# bullet
'•' : '-',
# ellipsis
'…' : '...',
# square dot
'■' : '-',
# ampersands
'&' : '&',
'"' : '"',
'"' : '"',
'&' : '&',
'&' : '&',
'|' : '|',
'|' : '|',
}
s = filter_unicode(s)
for k in entities:
s = s.replace(k, entities[k])
return s
def get_text(soup):
"""
>>> s = BeautifulSoup("<p><!-- <valueof param> --> Text here")
>>> get_text(s)
u'Text here'
>>> s = BeautifulSoup(' hot <a href="example.com">Google Trends keywords</a>, maintaining')
>>> get_text(s)
u'hot Google Trends keywords, maintaining'
>>> s = BeautifulSoup(u'Big Bird\u2019s nest')
>>> get_text(s)
u"Big Bird's nest"
>>> s = "\xc2\xa0The majority "
>>> get_text(s)
u'The majority'
>>> s = BeautifulSoup('<title>title with » funny char</title>')
>>> get_text(s)
u'title with funny char'
"""
if not soup:
return ""
text = []
# sometimes we're passed a BeautifulSoup object, sometimes not
try:
soup.findAll
except:
soup = BeautifulSoup(soup)
for s in soup.findAll(text=lambda text:not isinstance(text,Comment)):
text.append(' '.join(entity2ascii(s).split()))
return re.sub(r'\s+([,.;?!])',r'\1', ' '.join(text)).strip()
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
| nod/boombot | plugins/webutil/textutils.py | textutils.py | py | 4,643 | python | en | code | 11 | github-code | 90 |
9593403320 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 22 12:00:37 2020
@author: tianyu
"""
import numpy as np
import pandas as pd
import scipy.sparse as sp
import torch
from sklearn.preprocessing import Normalizer
import math
from torch.autograd import Variable
import torch.nn.functional as F
import torch.utils.data as Data
from sklearn.metrics.pairwise import euclidean_distances
import os
from sklearn import preprocessing
from sklearn import linear_model
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
#path = '/Users/tianyu/Google Drive/fasttext/gcn/pygcn-master/data/cora/'
#dataset = 'cora'
def high_var_dfdata_gene(data, num, gene = None, ind=False):
dat = np.asarray(data)
datavar = np.var(dat, axis = 1)*(-1)
ind_maxvar = np.argsort(datavar) #small --> big
if gene is None and ind is False:
return data.iloc[ind_maxvar[:num]]
if ind:
return data.iloc[ind_maxvar[:num]], ind_maxvar[:num]
ind_gene = data.index.values[ind_maxvar[:num]]
return data.iloc[ind_maxvar[:num]],gene.loc[ind_gene]
def high_var_dfdata(data, num, gene = None, ind=False):
dat = np.asarray(data)
datavar = np.var(dat, axis = 1)*(-1)
ind_maxvar = np.argsort(datavar)
gene_ind = ind_maxvar[:num]
# np.random.shuffle(gene_ind)
if gene is None and ind is False:
return data.iloc[ind_maxvar[:num]]
if ind:
return data.iloc[gene_ind], gene_ind
return data.iloc[gene_ind],gene.iloc[gene_ind]
def high_var_npdata(data, num, gene = None, ind=False): #data: gene*cell
dat = np.asarray(data)
datavar = np.var(dat, axis = 1)*(-1)
ind_maxvar = np.argsort(datavar)
gene_ind = ind_maxvar[:num]
# geneind2 = np.random.choice(ind_maxvar[num//2:], size = num//2, replace = False)
# gene_ind = np.concatenate((gene_ind, geneind2))
#np.random.shuffle(gene_ind)
if gene is None and ind is False:
return data[gene_ind]
if ind:
return data[gene_ind],gene_ind
return data[gene_ind],gene.iloc[gene_ind]
def high_tfIdf_npdata(data,tfIdf, num, gene = None, ind=False):
dat = np.asarray(data)
datavar = np.var(tfIdf, axis = 1)*(-1)
ind_maxvar = np.argsort(datavar)
gene_ind = ind_maxvar[:num]
np.random.shuffle(gene_ind)
if gene is None and ind is False:
return data[gene_ind]
if ind:
return data[gene_ind],gene_ind
return data[gene_ind],gene.iloc[gene_ind]
def high_expr_dfdata(data, num, gene = None, ind=False):
dat = np.asarray(data)
datavar = np.sum(dat, axis = 1)*(-1)
ind_maxvar = np.argsort(datavar)
gene_ind = ind_maxvar[:num]
# np.random.shuffle(gene_ind)
if gene is None and ind is False:
return data.iloc[gene_ind]
if ind:
return data.iloc[gene_ind], gene_ind
return data.iloc[gene_ind],gene.iloc[gene_ind]
def high_expr_npdata(data, num, gene = None, ind=False):
dat = np.asarray(data)
datavar = np.sum(dat, axis = 1)*(-1)
ind_maxvar = np.argsort(datavar)
gene_ind = ind_maxvar[:num]
# np.random.shuffle(gene_ind)
if gene is None and ind is False:
return data[gene_ind]
if ind:
return data[gene_ind],gene_ind
return data[gene_ind],gene.iloc[gene_ind]
def get_rank_gene(OutputDir, dataset):
gene = pd.read_csv(OutputDir+dataset+'/rank_genes_dropouts_'+dataset+'.csv')
return gene
def rank_gene_dropouts(data, OutputDir, dataset):
# data: n_cell * n_gene
genes = np.zeros([np.shape(data)[1],1], dtype = '>U10')
train = pd.DataFrame(data)
train.columns = np.arange(len(train.columns))
# rank genes training set
dropout = (train == 0).sum(axis='rows') # n_gene * 1
dropout = (dropout / train.shape[0]) * 100
mean = train.mean(axis='rows') # n_gene * 1
notzero = np.where((np.array(mean) > 0) & (np.array(dropout) > 0))[0]
zero = np.where(~((np.array(mean) > 0) & (np.array(dropout) > 0)))[0]
train_notzero = train.iloc[:,notzero]
train_zero = train.iloc[:,zero]
zero_genes = train_zero.columns
dropout = dropout.iloc[notzero]
mean = mean.iloc[notzero]
dropout = np.log2(np.array(dropout)).reshape(-1,1)
mean = np.array(mean).reshape(-1,1)
reg = linear_model.LinearRegression()
reg.fit(mean,dropout)
residuals = dropout - reg.predict(mean)
residuals = pd.Series(np.array(residuals).ravel(),index=train_notzero.columns) # n_gene * 1
residuals = residuals.sort_values(ascending=False)
sorted_genes = residuals.index
sorted_genes = sorted_genes.append(zero_genes)
genes[:,0] = sorted_genes.values
genes = pd.DataFrame(genes)
genes.to_csv(OutputDir + dataset + "/rank_genes_dropouts_" + dataset + ".csv", index = False)
def data_noise(data): # data is samples*genes
for i in range(data.shape[0]):
#drop_index = np.random.choice(train_data.shape[1], 500, replace=False)
#train_data[i, drop_index] = 0
target_dims = data.shape[1]
noise = np.random.rand(target_dims)/10.0
data[i] = data[i] + noise
return data
def norm_max(data):
data = np.asarray(data)
max_data = np.max([np.absolute(np.min(data)), np.max(data)])
data = data/max_data
return data
def findDuplicated(df):
df = df.T
idx = df.index.str.upper()
filter1 = idx.duplicated(keep = 'first')
print('duplicated rows:',np.where(filter1 == True)[0])
indd = np.where(filter1 == False)[0]
df = df.iloc[indd]
return df.T
# In[]:
def load_labels(path, dataset):
labels = pd.read_csv(os.path.join(path + dataset) +'/Labels.csv',index_col = None)
labels.columns = ['V1']
class_mapping = {label: idx for idx, label in enumerate(np.unique(labels['V1']))}
labels['V1'] = labels['V1'].map(class_mapping)
del class_mapping
labels = np.asarray(labels).reshape(-1)
return labels
def load_usoskin(path = '/Users/tianyu/google drive/fasttext/imputation/', dataset='usoskin', net='String'):
# path = os.path.join('/Users',user,'google drive/fasttext/imputation')
data = pd.read_csv(os.path.join(path, dataset, 'data_13776.csv'), index_col = 0)
# adj = sp.load_npz(os.path.join(path, dataset, 'adj13776.npz'))
print(data.shape)
adj = sp.load_npz(os.path.join(path + dataset) + '/adj'+ net + dataset + '_'+str(13776)+'.npz')
print(adj.shape)
labels = pd.read_csv(path +'/' +dataset +'/data_labels.csv',index_col = 0)
class_mapping = {label: idx for idx, label in enumerate(np.unique(labels['V1']))}
labels['V1'] = labels['V1'].map(class_mapping)
del class_mapping
labels = np.asarray(labels).reshape(-1)
return adj, np.asarray(data), labels
def load_kolod(path = '/Users/tianyu/google drive/fasttext/imputation/', dataset='kolod', net='pcc'):
# path = os.path.join('/Users',user,'google drive/fasttext/imputation')
data = pd.read_csv(os.path.join(path, dataset, 'kolod.csv'), index_col = 0)
# adj = sp.load_npz(os.path.join(path, dataset, 'adj13776.npz'))
print(data.shape)
adj = np.corrcoef(np.asarray(data))
#adj[np.where(adj < 0.3)] = 0
labels = pd.read_csv(path +'/' +dataset +'/kolod_labels.csv',index_col = 0)
class_mapping = {label: idx for idx, label in enumerate(np.unique(labels['V1']))}
labels['V1'] = labels['V1'].map(class_mapping)
del class_mapping
labels = np.asarray(labels).reshape(-1)
return adj, np.asarray(data), labels
def load_largesc(path = '/Users/tianyu/Desktop/scRNAseq_Benchmark_datasets/Intra-dataset/', dataset='Zhengsorted',net='String'):
if dataset == 'Zhengsorted':
features = pd.read_csv(os.path.join(path + dataset) +'/Filtered_DownSampled_SortedPBMC_data.csv',index_col = 0, header = 0)
elif dataset == 'TM':
features = pd.read_csv(os.path.join(path + dataset) +'/Filtered_TM_data.csv',index_col = 0, header = 0)
elif dataset == 'Xin':
#path = os.path.join(path, 'Pancreatic_data/')
features = pd.read_csv(os.path.join(path + dataset) +'/Filtered_Xin_HumanPancreas_data.csv',index_col = 0, header = 0)
elif dataset == 'BaronHuman':
#path = os.path.join(path, 'Pancreatic_data/')
features = pd.read_csv(os.path.join(path + dataset) +'/Filtered_Baron_HumanPancreas_data.csv',index_col = 0, header = 0)
elif dataset == 'BaronMouse':
#path = os.path.join(path, 'Pancreatic_data/')
features = pd.read_csv(os.path.join(path + dataset) +'/Filtered_MousePancreas_data.csv',index_col = 0, header = 0)
elif dataset == 'Muraro':
#path = os.path.join(path, 'Pancreatic_data/')
features = pd.read_csv(os.path.join(path + dataset) +'/Filtered_Muraro_HumanPancreas_data_renameCols.csv',index_col = 0, header = 0)
elif dataset == 'Segerstolpe':
#path = os.path.join(path, 'Pancreatic_data/')
features = pd.read_csv(os.path.join(path + dataset) +'/Filtered_Segerstolpe_HumanPancreas_data.csv',index_col = 0, header = 0)
elif dataset == 'AMB':
features = pd.read_csv(os.path.join(path + dataset) +'/Filtered_mouse_allen_brain_data.csv',index_col = 0, header = 0)
features = findDuplicated(features)
print(features.shape)
adj = sp.load_npz(os.path.join(path + dataset) + '/adj'+ net + dataset + '_'+str(features.T.shape[0])+'.npz')
print(adj.shape)
shuffle_index = np.loadtxt(os.path.join(path + dataset) +'/shuffle_index_'+dataset+'.txt')
labels = pd.read_csv(os.path.join(path + dataset) +'/Labels.csv',index_col = None)
class_mapping = {label: idx for idx, label in enumerate(np.unique(labels['Class']))}
labels['Class'] = labels['Class'].map(class_mapping)
del class_mapping
labels = np.asarray(labels.iloc[:,0]).reshape(-1)
return adj, np.asarray(features.T), labels,shuffle_index
elif dataset == 'Zheng68K':
features = pd.read_csv(os.path.join(path + dataset) +'/Filtered_68K_PBMC_data.csv',index_col = 0, header = 0)
elif dataset == '10x_5cl':
path = os.path.join(path, 'CellBench/')
features = pd.read_csv(os.path.join(path + dataset) +'/10x_5cl_data.csv',index_col = 0, header = 0)
elif dataset == 'CelSeq2_5cl':
path = os.path.join(path, 'CellBench/')
features = pd.read_csv(os.path.join(path + dataset) +'/CelSeq2_5cl_data.csv',index_col = 0, header = 0)
features = findDuplicated(features)
print(features.shape)
adj = sp.load_npz(os.path.join(path + dataset) + '/adj'+ net + dataset + '_'+str(features.T.shape[0])+'.npz')
print(adj.shape)
labels = load_labels(path, dataset)
shuffle_index = np.loadtxt(os.path.join(path + dataset) +'/shuffle_index_'+dataset+'.txt')
return adj, np.asarray(features.T), labels,shuffle_index
# In[]:
def load_inter(path = '/Users/tianyu/Desktop/scRNAseq_Benchmark_datasets/Inter-dataset/', dataset='CellBench',net='String'):
if dataset == 'CellBench':
features = pd.read_csv(os.path.join(path + dataset) +'/Combined_10x_CelSeq2_5cl_data.csv',index_col = 0, header = 0)
features = findDuplicated(features)
print(features.shape)
adj = sp.load_npz(os.path.join(path + dataset) + '/adj'+ net + dataset + '_'+str(features.T.shape[0])+'.npz')
print(adj.shape)
labels = load_labels(path, dataset)
return adj, np.asarray(features.T), labels, None
# In[]:
def load_pancreas(path = '/Users/tianyu/Desktop/scRNAseq_Benchmark_datasets/Intra-dataset/', dataset='',net='String'):
##############
xin = pd.read_csv(os.path.join(path + 'Xin') +'/Filtered_Xin_HumanPancreas_data.csv',index_col = 0, header = 0)
bh = pd.read_csv(os.path.join(path + 'BaronHuman') +'/Filtered_Baron_HumanPancreas_data.csv',index_col = 0, header = 0)
mu = pd.read_csv(os.path.join(path + 'Muraro') +'/Filtered_Muraro_HumanPancreas_data_renameCols.csv',index_col = 0, header = 0)
se = pd.read_csv(os.path.join(path + 'Segerstolpe') +'/Filtered_Segerstolpe_HumanPancreas_data.csv',index_col = 0, header = 0)
gene_set = list(set(xin.columns)&set(bh.columns)&set(mu.columns)&set(se.columns))
gene_set.sort()
gene_index_bh = [i for i, e in enumerate(bh.columns) if e in gene_set]
xin = xin[gene_set]
bh = bh[gene_set]
mu = mu[gene_set]
se = se[gene_set]
mu = np.log1p(mu)
se = np.log1p(se)
bh = np.log1p(bh)
xin = np.log1p(xin)
# indexXin = xin.index.to_list()
# indexMu = mu.index.to_list()
# indexSe = se.index.to_list()
# indexBh = bh.index.to_list()
min_max_scaler = preprocessing.MinMaxScaler()
temp = min_max_scaler.fit_transform(np.asarray(mu))
mu = pd.DataFrame(temp, index = mu.index, columns = mu.columns)
temp = min_max_scaler.fit_transform(np.asarray(se))
se = pd.DataFrame(temp, index = se.index, columns = se.columns)
temp = min_max_scaler.fit_transform(np.asarray(bh))
bh = pd.DataFrame(temp, index = bh.index, columns = bh.columns)
temp = min_max_scaler.fit_transform(np.asarray(xin))
xin = pd.DataFrame(temp, index = xin.index, columns = xin.columns)
del temp
#mu = preprocessing.normalize(np.asarray(mu), axis = 1, norm='l1')
###############
features = pd.read_csv(os.path.join(path + 'BaronHuman') +'/Filtered_Baron_HumanPancreas_data.csv',index_col = 0, header = 0, nrows=2)
features = findDuplicated(features)
print(features.shape)
adj = sp.load_npz(os.path.join(path + 'BaronHuman') + '/adj'+ net + 'BaronHuman' + '_'+str(features.T.shape[0])+'.npz')
print(adj.shape)
adj = adj[gene_index_bh, :][:, gene_index_bh]
###############
datasets = ['Xin','BaronHuman','Muraro','Segerstolpe', 'BaronMouse']
l_xin = pd.read_csv(os.path.join(path + datasets[0]) +'/Labels.csv',index_col = None)
l_bh = pd.read_csv(os.path.join(path + datasets[1]) +'/Labels.csv',index_col = None)
l_mu = pd.read_csv(os.path.join(path + datasets[2]) +'/Labels.csv',index_col = None)
l_mu = l_mu.replace('duct','ductal')
l_mu = l_mu.replace('pp','gamma')
l_se = pd.read_csv(os.path.join(path + datasets[3]) +'/Labels.csv',index_col = None)
#labels_set = list(set(l_xin['x']) & set(l_bh['x']) & set(l_mu['x']))
if True:
labels_set = set(['alpha','beta','delta','gamma'])
index = [i for i in range(len(l_mu)) if l_mu['x'][i] in labels_set]
mu = mu.iloc[index]
l_mu = l_mu.iloc[index]
index = [i for i in range(len(l_se)) if l_se['x'][i] in labels_set]
se = se.iloc[index]
l_se = l_se.iloc[index]
index = [i for i in range(len(l_bh)) if l_bh['x'][i] in labels_set]
bh = bh.iloc[index]
l_bh = l_bh.iloc[index]
index = [i for i in range(len(l_xin)) if l_xin['x'][i] in labels_set]
xin = xin.iloc[index]
l_xin = l_xin.iloc[index]
alldata = pd.concat((xin,bh,mu,se), 0)
#alldata.to_csv(path+'Data_pancreas_4.csv')
labels = pd.concat((l_xin, l_bh, l_mu, l_se), 0)
# labels.to_csv(path+'Labels_pancreas_19.csv')
labels.columns = ['V1']
class_mapping = {label: idx for idx, label in enumerate(np.unique(labels['V1']))}
labels['V1'] = labels['V1'].map(class_mapping)
del class_mapping
labels = np.asarray(labels).reshape(-1)
###############
#shuffle_index = np.asarray([1449, 8569, 2122,2133])
shuffle_index = np.asarray([1449, 5707, 1554, 1440])
return adj, np.asarray(alldata.T), labels, shuffle_index
# In[]:
def build_adj_weight(idx_features):
edges_unordered = pd.read_csv('/users/tianyu/desktop/imputation/STRING_ggi.csv', index_col = None, usecols = [1,2,16])
# edges_unordered = np.asarray(edges_unordered[['protein1','protein2','combined_score']]) # Upper case.
edges_unordered = np.asarray(edges_unordered)
idx = []
mapped_index = idx_features.index.str.upper() # if data.index is lower case. Usoskin data is upper case, do not need it.
for i in range(len(edges_unordered)):
if edges_unordered[i,0] in mapped_index and edges_unordered[i,1] in mapped_index:
idx.append(i)
edges_unordered = edges_unordered[idx]
print ('idx_num:',len(idx))
del i,idx
# build graph
idx = np.array(mapped_index)
idx_map = {j: i for i, j in enumerate(idx)} # eg: {'TSPAN12': 0, 'TSHZ1': 1}
# the key (names) in edges_unordered --> the index (which row) in matrix
edges = np.array(list(map(idx_map.get, edges_unordered[:,0:2].flatten())),
dtype=np.int32).reshape(edges_unordered[:,0:2].shape) #map:map(function, element):function on element.
adj = sp.coo_matrix((edges_unordered[:, 2], (edges[:, 0], edges[:, 1])),
shape=(idx_features.shape[0], idx_features.shape[0]),
dtype=np.float32)
#del idx,idx_map,edges_unordered
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
#adj = (adj + sp.eye(adj.shape[0])) #diagonal, set to 1
return adj
def getAdjByBiogrid(idx_features, pathnet = '~/Google Drive/fasttext/cnn/TCGA_cnn/BIOGRID-ALL-3.5.169.tab2.txt'):
edges_unordered = pd.read_table(pathnet ,index_col=None, usecols = [7,8] )
edges_unordered = np.asarray(edges_unordered)
idx = []
for i in range(len(edges_unordered)):
if edges_unordered[i,0] in idx_features.index and edges_unordered[i,1] in idx_features.index:
idx.append(i)
edges_unordered = edges_unordered[idx]
del i,idx
# build graph
idx = np.array(idx_features.index)
idx_map = {j: i for i, j in enumerate(idx)}
# the key (names) in edges_unordered --> the index (which row) in matrix
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape) #map:map(function, element):function on element
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(idx_features.shape[0], idx_features.shape[0]),
dtype=np.float32)
del idx,idx_map,edges_unordered
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
# adj = adj + sp.eye(adj.shape[0])
# sp.save_npz(os.path.join(pathnet,'adjCancer18442.npz'), adj)
return adj
def removeZeroAdj(adj, gedata):
#feature size: genes * samples, numpy.darray
if adj[0,0] != 0:
#adj = adj - sp.eye(adj.shape[0])
adj.setdiag(0)
# adjdense = adj.todense()
indd = np.where(np.sum(adj, axis=1) != 0)[0]
adj = adj[indd, :][:, indd]
# adjdense = adjdense[indd,:]
# adjdense = adjdense[:, indd]
gedata = gedata[indd,:]
return adj, gedata
def load_cancer(concat, diseases ,path, net,num_gene):
"""Load citation network dataset (cora only for now)"""
print('Loading {} dataset...'.format('cancer'))
'''
if tianyu:
gedataA = pd.read_csv("/Users/tianyu/Google Drive/fasttext/classification/TCGAcleandata/ge_"+diseaseA+".csv", index_col = 0)
gedataB = pd.read_csv("/Users/tianyu/Google Drive/fasttext/classification/TCGAcleandata/ge_"+diseaseB+".csv", index_col = 0)
cnvdataA = pd.read_csv("/Users/tianyu/Google Drive/fasttext/classification/TCGAcleandata/cnv_"+diseaseA+".csv", index_col = 0)
cnvdataB = pd.read_csv("/Users/tianyu/Google Drive/fasttext/classification/TCGAcleandata/cnv_"+diseaseB+".csv", index_col = 0)
else:
data = pd.read_csv("/users/peng/documents/tianyu/hw5ty/data10000.csv", index_col=0)
if 'T' in data.index:
print ("drop T")
data = data.drop('T')
data = data.T #samples*genes
data2 = data[ind]
data2 = data2.T #genes*samples
'''
gedata = pd.DataFrame()
cnvdata = pd.DataFrame()
labels = []
count = 0
pathgene = ("/Users/tianyu/Google Drive/fasttext/classification/TCGAcleandata/")
for disease in diseases:
tmp = pd.read_csv((pathgene + "/ge/ge_" + disease+ ".csv"), index_col = 0)
gedata = pd.concat([gedata,tmp],axis = 1)
# tmp = pd.read_csv(os.path.join(pathgene, "cnv/cnv_"+disease+".csv"),index_col = 0)
# cnvdata = pd.concat([cnvdata,tmp],axis = 1)
labels.append(np.repeat(count, tmp.shape[1]))
count += 1
labels = np.concatenate(labels)
# adj = getAdjByBiogrid(gedata, path, net)
adj = sp.load_npz(path + 'adjCancer18442.npz')
'''
gedata = pd.concat([gedataA, gedataB], axis = 1)
cnvdata = pd.concat([cnvdataA, cnvdataB], axis = 1)
labels = np.asarray([0,1,2])
labels = np.repeat(labels, [gedataA.shape[1], gedataB.shape[1]], axis=0)
'''
gedata, geneind = high_var_dfdata(gedata, num=num_gene, ind=1)
adj = adj[geneind,:][:,geneind]
adj, gedata = removeZeroAdj(adj, np.asarray(gedata))
adj = normalize(adj)
adj = adj.astype('float32')
labels = labels.astype('uint8')
return adj, gedata, labels
# In[]:
def load_cluster(filepath,num_gene):
data = pd.read_csv(filepath+'/separateData/GeneLabel10000.csv',index_col = 0)
data = data.dropna()
trainID = pd.read_csv(filepath+'/separateData/train2.csv',index_col = 0, header=0)
testID = pd.read_csv(filepath+'/separateData/test.csv',index_col = 0, header=0)
trainID['sample_IDs'] = trainID['sample_id'].str[0:15]
testID['sample_IDs'] = testID['sample_id'].str[0:15]
trainID.drop_duplicates(subset ="sample_IDs",keep = 'first', inplace = True)
testID.drop_duplicates(subset ="sample_IDs",keep = 'first', inplace = True)
trainID = trainID['sample_IDs']
testID = testID['sample_IDs']
train_data = pd.merge(trainID, data, on='sample_IDs',how='inner')
test_data = pd.merge(testID, data, on='sample_IDs',how='inner')
num_train = train_data.shape[0]
num_test = test_data.shape[0]
gedata = pd.concat((train_data, test_data), axis = 0)
trainID = train_data['sample_IDs']
testID = test_data['sample_IDs']
labels = np.asarray(gedata['iclusterlabel'])
gedata = gedata.iloc[:,7:10007]
mydict = {item: i for i,item in enumerate(np.unique(labels))}
labels = np.vectorize(mydict.get)(labels)
del mydict
### gene net
adj = sp.load_npz(filepath + '/separateData/adjCancer18442.npz')
gedata, geneind = high_var_dfdata(gedata.T, num=num_gene, ind=1)
adj = adj[geneind,:][:,geneind]
adj, gedata = removeZeroAdj(adj, np.asarray(gedata))
adj = normalize(adj)
adj = adj.astype('float32')
return adj, gedata, labels, num_train,num_test
# In[]:
def load_cancer_single(user, concat,diseaseA, path,net,num_gene):
"""Load citation network dataset (cora only for now)"""
print('Loading {} dataset...'.format(diseaseA))
pathgene = os.path.join("/Users",user,"Google Drive/fasttext/classification/TCGAcleandata/")
gedata = pd.read_csv(pathgene + diseaseA+ "/ge_"+diseaseA+".csv", index_col = 0)
cnvdata = pd.read_csv(pathgene + diseaseA+ "/cnv_"+diseaseA+".csv", index_col = 0)
labels = pd.read_csv(pathgene + diseaseA+"/labels_"+diseaseA+".csv", index_col = 0)
gedata, geneind = high_expr_dfdata(gedata, num=num_gene, ind=1)
cnvdata = cnvdata.iloc[geneind]
idx_features = gedata
'''
labels = np.asarray([0,1])
labels = np.repeat(labels, [84,147], axis=0)
'''
#------------------------------------- -----------------
#adj =
#-------------------------------------------------------------------
gedata = norm_max(gedata)
cnvdata = norm_max(cnvdata)
gedata = np.expand_dims(gedata.T, axis = 2)
cnvdata = np.expand_dims(cnvdata.T, axis = 2)
idx_features = np.concatenate((gedata,cnvdata), axis=2)
if concat:
idx_features = np.repeat(idx_features, concat, axis=0)
labels = np.repeat(labels, concat)
for i in range(idx_features.shape[0]):
target_dims = idx_features.shape[1]
noise = np.random.rand(target_dims)/10.0
idx_features[i,:,0] = idx_features[i,:,0] + noise
return adj, idx_features, labels
# In[]:
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def mynormalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def accuracy(output, labels): # average of each batch
preds = output.max(1)[1].type_as(labels)
#print ('a:',output)
#print ('b:',preds)
correct = preds.eq(labels).double()
#print ('c:',correct)
correct = correct.sum()
return correct / len(labels)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)#.requires_grad_()
class geDataset(Data.Dataset):
"""
Class that represents a train/validation/test dataset that's readable for PyTorch
Note that this class inherits torch.utils.data.Dataset
"""
def __init__(self, data_list,label):
"""
@param data_list: list of MolDatum
"""
self.data_list = data_list
self.label = label
def __len__(self):
return len(self.data_list)
def __getitem__(self, key):
"""
Triggered when you call dataset[i]
"""
X = self.data_list[key]
y = self.label[key]
return (X, y)
def collate_fn(batch):
batch.sort(key=lambda x: len(x[1]), reverse=True)
img, label = zip(*batch)
pad_label = []
lens = []
max_len = len(label[0])
for i in range(len(label)):
temp_label = [0] * max_len
temp_label[:len(label[i])] = label[i]
pad_label.append(temp_label)
lens.append(len(label[i]))
#return img, pad_label, lens
def construct_loader(features, labels, batch_size, shuffle=True):
data_set = geDataset(features, labels)
loader = torch.utils.data.DataLoader(dataset=data_set,
batch_size=batch_size,
#collate_fn=collate_fn,
shuffle=shuffle)
return loader
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
if classname.find('Conv2d') != -1:
m.weight.data.fill_(1.0)
| tianyu-github/sigGCN | lib/utilsdata.py | utilsdata.py | py | 27,784 | python | en | code | 0 | github-code | 90 |
15274255457 | class Solution:
def numIslands(self, grid: List[List[str]]) -> int:
if not grid:
return 0
visited = set()
island = 0
ROW, COL = len(grid), len(grid[0])
def bfs(row,col):
q = collections.deque()
visited.add((row,col))
q.append((row,col))
directions = [[0,1],[0,-1],[1,0],[-1,0]]
while q:
r,c = q.popleft()
for dr, dc in directions:
R = r + dr
C = c + dc
if (R in range(ROW) and C in range(COL) and grid[R][C] == "1" and (R,C) not in visited):
visited.add((R,C))
q.append((R,C))
for r in range(ROW):
for c in range(COL):
if grid[r][c] == "1" and (r,c) not in visited:
bfs(r,c)
island += 1
return island | kelvinleong0529/Leet-Code | 200-number-of-islands/200-number-of-islands.py | 200-number-of-islands.py | py | 963 | python | en | code | 3 | github-code | 90 |
28230562521 | import json
import tldextract
from pprint import pp
from retrieval_importance import learn_importance, encode_retrievals, encode_groups, v_grouped, \
most_important_groups, least_important_groups
from retrieval_importance import cal_acc, generate_val_test_set, sort_values, get_retain_urls, cal_acc_reweight, cal_loo, load_openai_retrievals
from retrieval_importance.utils import get_project_root
def utility(retrieval, prediction):
if prediction in retrieval["correct_answers"]:
return 1.0
else:
return 0.0
def group(retrieved):
url_parts = tldextract.extract(retrieved)
return f'{url_parts.domain}.{url_parts.suffix}'
def experiment_prune(random_seed, retrievals, K = 10, lr = 500, epoch = 50):
val_set, test_set = generate_val_test_set(len(retrievals), random_seed)
val_retrievals = [retrievals[i] for i in val_set]
encoded_retrievals, mapping = encode_retrievals(val_retrievals, "retrieved_websites", "retrieved_answers", utility)
grouping, group_mapping = encode_groups(mapping, group)
v_ungrouped = learn_importance(encoded_retrievals, k=K, learning_rate=lr, num_steps=epoch, n_jobs=-1, grouping=grouping)
v = v_grouped(v_ungrouped, grouping, group_mapping)
v_sorted, total_doc = sort_values(retrievals, val_set, v, group)
results = []
for remove_rate in range(0, 10, 1):
retain_urls = get_retain_urls(v_sorted, total_doc, remove_rate/10)
acc_dev = cal_acc(val_set, retrievals, group, retain_urls, K)
acc_test = cal_acc(test_set, retrievals, group, retain_urls, K)
results.append((remove_rate/10, acc_dev, acc_test))
acc_baseline = results[0][2]
results.sort(key=lambda x: x[1], reverse=True)
acc_best = results[0][2]
threshold = results[0][0]
return acc_baseline, acc_best, threshold
def experiment_reweight(random_seed, retrievals, K = 10, lr = 500, epoch = 50, threshold = 0.5):
val_set, test_set = generate_val_test_set(len(retrievals), random_seed)
val_retrievals = [retrievals[i] for i in val_set]
encoded_retrievals, mapping = encode_retrievals(val_retrievals, "retrieved_websites", "retrieved_answers", utility)
grouping, group_mapping = encode_groups(mapping, group)
v = learn_importance(encoded_retrievals, k=K, learning_rate=lr, num_steps=epoch, n_jobs=-1, grouping=grouping)
v_per_group = v_grouped(v, grouping, group_mapping)
keep_dict = {str(i): 1 for i in v_per_group}
acc_baseline = cal_acc(test_set, retrievals, group, keep_dict, K)
acc_reweight = cal_acc_reweight(test_set, retrievals, group, group_mapping, v_per_group)
return acc_baseline, acc_reweight
def experiment_loo(random_seed, retrievals, K = 10):
val_set, test_set = generate_val_test_set(len(retrievals), random_seed)
val_retrievals = [retrievals[i] for i in val_set]
v = cal_loo(val_retrievals, group)
v_sorted, total_doc = sort_values(retrievals, val_set, v, group)
results = []
for remove_rate in range(0, 10, 1):
retain_urls = get_retain_urls(v_sorted, total_doc, remove_rate/10)
acc_dev = cal_acc(val_set, retrievals, group, retain_urls, K)
acc_test = cal_acc(test_set, retrievals, group, retain_urls, K)
results.append((remove_rate/10, acc_dev, acc_test))
acc_baseline = results[0][2]
results.sort(key=lambda x: x[1], reverse=True)
acc_best = results[0][2]
threshold = results[0][0]
return acc_baseline, acc_best, threshold
def load_retrievals():
retrievals = []
with open(f'{str(get_project_root())}/test_data/webquestion.jsonl') as f:
for line in f:
retrievals.append(json.loads(line))
return retrievals
def work_load(metric):
seed_list = [441, 1, 469, 53, 280, 123, 219, 181, 5, 9, 199, 156, 93, 313, 28, 56, 359, 108, 8, 58, 407, 451, 322, 266, 268, 297, 12, 182, 320, 474, 296, 142, 64, 201, 32, 392, 98, 242, 344, 438, 427, 35, 77, 394, 39, 55, 330, 38, 67, 358, 237, 149, 405, 420, 411, 57, 488, 49, 42, 155, 109, 73, 331, 128]
retrievals = load_retrievals()
if metric == "prune":
result_list = []
for random_seed in seed_list:
result_list.append(experiment_prune(random_seed, retrievals))
print("Finish random seed %d"%(random_seed))
print(result_list[-1])
acc_baseline = sum([i[0] for i in result_list])/len(result_list)
acc_prune = sum([i[1] for i in result_list])/len(result_list)
acc_threshold = sum([i[2] for i in result_list])/len(result_list)
return acc_baseline, acc_prune, acc_threshold
elif metric == "reweight":
result_list = []
for random_seed in seed_list:
result_list.append(experiment_reweight(random_seed, retrievals))
print("Finish random seed %d"%(random_seed))
print(result_list[-1])
acc_baseline = sum([i[0] for i in result_list])/len(result_list)
acc_reweight = sum([i[1] for i in result_list])/len(result_list)
return acc_baseline, acc_reweight
elif metric == "loo":
result_list = []
for random_seed in seed_list:
result_list.append(experiment_loo(random_seed, retrievals))
print("Finish random seed %d"%(random_seed))
print(result_list[-1])
acc_baseline = sum([i[0] for i in result_list])/len(result_list)
acc_loo = sum([i[1] for i in result_list])/len(result_list)
acc_threshold = sum([i[2] for i in result_list])/len(result_list)
return acc_baseline, acc_loo, acc_threshold
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-m', type=str, default="prune", help='loo/reweight/prune')
args = parser.parse_args()
with open("./test_data/result/web_question_qa_%s.jsonl"%(args.m), "w") as f:
if args.m == "prune":
acc_baseline, acc_prune, acc_threshold = work_load(args.m)
tmp = {'acc_baseline':acc_baseline, 'acc_prune':acc_prune, 'acc_threshold':acc_threshold}
print("prune ", acc_baseline, acc_prune, acc_threshold)
f.write(json.dumps(tmp) + "\n")
f.flush()
elif args.m == "reweight":
acc_baseline, acc_reweight = work_load(args.m)
tmp = {'acc_baseline':acc_baseline, 'acc_reweight':acc_reweight}
print("reweight ", acc_baseline, acc_reweight)
f.write(json.dumps(tmp) + "\n")
f.flush()
elif args.m == "loo":
acc_baseline, acc_loo, acc_threshold = work_load(args.m)
tmp = {'acc_baseline':acc_baseline, 'acc_loo':acc_loo, 'acc_threshold':acc_threshold}
print("loo ", acc_baseline, acc_loo, acc_threshold)
f.write(json.dumps(tmp) + "\n")
f.flush()
| amsterdata/retrieval_importance | webquestions.py | webquestions.py | py | 6,864 | python | en | code | 0 | github-code | 90 |
3443788854 | #!/usr/bin/env python3
import random
import re
from flask import Flask, jsonify
from flask_cors import CORS
from pymongo import MongoClient
MONGO_URI = 'mongodb://admin:aaWyedsDgy03jcLc@cluster0-shard-00-00-kwnae.gcp.mongodb.net:27017,cluster0-shard-00-01-kwnae.gcp.mongodb.net:27017,cluster0-shard-00-02-kwnae.gcp.mongodb.net:27017/markov?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin&retryWrites=true'
STARTER_SIZE = 10
WORDS_SIZE = 100
PAD_SIZE = 10
app = Flask(__name__)
CORS(app)
db = MongoClient(MONGO_URI).markov
@app.route('/')
def index():
return 'Welcome to WordsWordsWords,\n a Shakespeare Markov Chain API'
@app.route('/words/<word>')
def words(word):
word_regex = re.compile(re.escape(word), re.IGNORECASE)
word_relation = db.freqs.find_one({'word': word_regex})
if not word_relation:
word_relation = {'word': word, 'freqs': []}
# Extract the list of words and frequencies from this word's relations
freq_pairs = word_relation['freqs']
random.shuffle(freq_pairs)
# Limit number of pairs taken
freq_pairs = freq_pairs[:WORDS_SIZE]
# Sort in descending order of frequency
freq_pairs.sort(key=lambda f: -f['freq'])
# Pad pairs with random sample
num_left = max(0, PAD_SIZE - len(freq_pairs))
rand_relations = db.freqs.aggregate([{'$sample': {'size': num_left}}])
rand_words = [rand_relation['word'] for rand_relation in rand_relations]
freq_pairs.extend([{'word': word, 'freq': 0.0} for word in rand_words])
return jsonify(freq_pairs)
@app.route('/starters')
def starters():
rand_words = db.starters.aggregate([{'$sample': {'size': STARTER_SIZE}}])
rand_words = [word['word'] for word in rand_words]
freq = 1 / len(rand_words)
return jsonify([{'word': word, 'freq': freq} for word in rand_words])
@app.route('/<other>')
def handleIllegalRequest(other):
return "405: Restricted method"
@app.route('/ping')
def ping():
return 'pong'
if __name__ == '__main__':
app.run(host='0.0.0.0')
'''
def synonyms(word):
syn_sets = wordnet.synsets(word)
synonyms = set()
for syn_set in syn_sets or []:
for lemma in syn_set.lemmas():
synonyms.add(lemma.name())
return synonyms
'''
| amanj120/WordsWordsWords | main.py | main.py | py | 2,236 | python | en | code | 0 | github-code | 90 |
18215003959 | import itertools
n, m, x = map(int, input().split())
ca = []
for _ in range(n):
ca.append(list(map(int, input().split())))
prices = []
best_skill = [0]*m
for i in range(1, n+1):
n_list = [i for i in range(n)]
for N in itertools.combinations(n_list, i):
skill = [0]*m
price = 0
for j in N:
price += ca[j][0]
for k in range(1, m+1):
skill[k-1] += ca[j][k]
if min(skill) >= x:
prices.append(price)
if prices:
print(min(prices))
else:
print(-1) | Aasthaengg/IBMdataset | Python_codes/p02683/s842849211.py | s842849211.py | py | 546 | python | en | code | 0 | github-code | 90 |
7927881591 | SHRIMP_MINVER = (0, 1, 0, )
SHRIMP_PLATFORM = ('all', )
SHRIMP_INFO = {
'name': u'江大侠',
'ver': u'0.1.0',
'author': [
u'\u738b\u96ea\u745e@\u6570\u5a92\u5b66\u9662 (xenon@JNRain)',
u'\u5c0fC@\u6570\u5a92\u5b66\u9662 (TheC@JNRain)',
u'\u848b\u9a04\u5929@\u7269\u8054\u7f51\u9662 (JLT@JNRain)',
],
'desc': u'\u6c5f\u5927\u4fa0\u2014\u2014\u751f\u6d3b\u5c3d\u5728\u6307'
u'\u5c16\uff0c\u6c5f\u5927\u4eba\u81ea\u5df1\u7684\u6821\u56ed'
u'\u751f\u6d3b\u5ba2\u6237\u7aef',
'copyr': u'(C) 2011 \u6c5f\u5927\u4fa0\u5f00\u53d1\u56e2\u961f',
'lic': u'''\
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
''',
}
# maybe a better icon will be designed
from lobster_icon import SHRIMP_ICON
################################################################
## SHRIMP DESCRIPTION END, GLOBAL DECLARATIONS AND SHRIMP PROCS
################################################################
import sys
import os
import wx
from gingerprawn import VERSION_STR
from gingerprawn.api.utils.metaprogramming import fun2meth
from gingerprawn.api import cooker
from gingerprawn.api.cooker import iconmgr
from gingerprawn.api import univlib
from gingerprawn.api import logger
logger.install()
from gingerprawn.api.platform import w32version
# dummy placeholder for i18n
_ = lambda x: x
def shrimp_init():
logdebug('lobster init routine')
pass
_SELF_FRAME = None
_SHRIMP_ARGS = None
def shrimp_threadproc(args):
global _SHRIMP_ARGS
_SHRIMP_ARGS = args
reason = args[0]
if reason == 'autostart':
# starting with OS, do nothing
waitqueue = args[1]
# If all shrimp behave well, it's impossible to block here
# Simply put something to indicate that we're done.
waitqueue.put('lobster')
return
# GUI init should take place in the main thread
wx.CallAfter(_APP_OBJECT._On_LobsterInit, create)
def shrimp_down(just_querying=False):
if just_querying:
ret = wx.MessageBox(_(u'真的要退出吗?'), _(u'\u6c5f\u5927\u4fa0'),
wx.YES_NO | wx.ICON_QUESTION)
if ret == wx.YES:
logdebug('shutdown request approved')
return True
else:
logdebug('shutdown request declined')
return False
# not kidding, we have to go now
loginfo('lobster teardown initiated')
wx.CallAfter(_SELF_FRAME.Destroy)
#############################################################################
## SEPARATOR BETWEEN SHRIMP ARCHITECTURE AND (MAINLY) GUI IMPLEMENTATION
#############################################################################
# (rather) cool UI when using Windows with Aero enabled~
from gingerprawn.api.platform import aero
# 2 icons belonging to lobster itself
from lobster_icon import SETTINGS_ICON, LOBSTER_ABOUT_ICON
# aboutbox factored out as a common utility
from gingerprawn.api.ui.aboutbox import show_aboutbox
# settings dialog, almost barebone
from lobster_setting_dlg import invoke_dlg as show_settings
SHRIMPBTN_NAME_FMT = 'BtnShrimp%d'
SHRIMPBTN_EVTBUTTON_FMT = 'On%sButton' % SHRIMPBTN_NAME_FMT
SHRIMPBTN_ID_FMT = 'wxID_LOBSTER_MAINBTNSHRIMP%d'
# now this is dynamically calculated, but leave this as an initial reference
SHRIMPBTN_INITIAL_NUM_PER_ROW = 4
SHRIMPBTN_HGAP = 10
SHRIMPBTN_VGAP = 10
SHRIMPBTN_SIZETUPLE = (iconmgr.ICON_WIDTH + 16, iconmgr.ICON_HEIGHT + 16)
## FIXED: WINDOWSIZE_PAD gets calculated EVERY TIME the window is sized,
## AND this time it's derived automatically from system metrics, so
## this is more robust against theme changes and OS variations.
#class WindowSizePadProvider(object):
# @staticmethod
# def GetPadX():
# return wx.SystemSettings.GetMetric(wx.SYS_FRAMESIZE_X) * 2
# @staticmethod
# def GetPadY():
# return (wx.SystemSettings.GetMetric(wx.SYS_FRAMESIZE_Y) * 2 +
# wx.SystemSettings.GetMetric(wx.SYS_CAPTION_Y))
# @staticmethod
# def __getitem__(idx):
# if idx == 0:
# fn = WindowSizePadProvider.GetPadX
# elif idx == 1:
# fn = WindowSizePadProvider.GetPadY
# else:
# raise IndexError('the requested dimension does not exist')
# return fn()
#
#WINDOWSIZE_PAD = WindowSizePadProvider()
def create(parent):
global _SELF_FRAME
_SELF_FRAME = lobster_main(parent)
return _SELF_FRAME
[wxID_LOBSTER_MAIN, wxID_LOBSTER_MAINBTNABOUT, wxID_LOBSTER_MAINBTNSETTING,
wxID_LOBSTER_MAINBTNBOARD, ] = [wx.NewId() for _init_ctrls in range(4)]
class lobster_main(wx.Frame):
def _calc_width(self, num_col):
return ((SHRIMPBTN_SIZETUPLE[0] + SHRIMPBTN_HGAP) * num_col
- SHRIMPBTN_HGAP) # + WINDOWSIZE_PAD[0])
def _calc_height(self, num_row):
return ((SHRIMPBTN_SIZETUPLE[1] + SHRIMPBTN_VGAP) * num_row
- SHRIMPBTN_VGAP) # + WINDOWSIZE_PAD[1])
def _calc_size(self):
num_row = self._ShrimpButtonRowCount
num_col = len(self._ShrimpButtonCols)
return wx.Size(self._calc_width(num_col), self._calc_height(num_row))
def _init_sizers(self):
self.bag = wx.GridBagSizer(hgap=SHRIMPBTN_HGAP, vgap=SHRIMPBTN_VGAP)
self.bag.SetEmptyCellSize(wx.Size(*SHRIMPBTN_SIZETUPLE))
self.DoLayout(self.bag, True) # is_initial=True
self.btnboard.SetSizer(self.bag)
self.btnboard.SetAutoLayout(True)
def DoLayout(self, sizer, is_initial=False):
sizer.Clear()
if is_initial:
num_per_row = SHRIMPBTN_INITIAL_NUM_PER_ROW
else:
num_per_row = self.GetSize()[0] / (SHRIMPBTN_SIZETUPLE[0] +
sizer.GetHGap())
if num_per_row == 0:
num_per_row = 1
# Got bitten by the nasty shallowcopy thing!!
# must manually add each of the empty lists here -- a lesson learnt
self._ShrimpButtonCols = btn_arr = [[] for i in range(num_per_row)]
# first put those shrimp buttons into the sizer
# MODIFIED: insert settings button as well (a nasty kludge)
# this var is named "left" because the about button is at the right...
# how silly... who can come up with a better name?
left_buttons = self._ShrimpButtons[:]
left_buttons.append(self.btnSetting)
for idx, btn in enumerate(left_buttons):
row, col = divmod(idx, num_per_row)
sizer.AddWindow(btn, (row, col),
border=0, flag=0, span=(1, 1))
btn_arr[col].append(btn)
# ... then the (somewhat lonely) about button
# now it won't be lonely any more since i decided to put it back
# along with those cute shrimp buttons
# (right-justify though)
aboutbtn_row_idx, rem = divmod(len(left_buttons), num_per_row)
sizer.AddWindow(self.btnAbout, (aboutbtn_row_idx, num_per_row - 1),
border=0, flag=0, span=(1, 1))
btn_arr[-1].append(self.btnAbout)
# don't know whether this is needed, but added anyway
sizer.Layout()
self._ShrimpButtonRowCount = aboutbtn_row_idx + 1
newsize = self._calc_size()
self.__DoNotRedoLayout = True
# self.SetSize(newsize)
# This mighty method... eliminated all those pads...
self.SetClientSize(newsize)
self.__DoNotRedoLayout = False
def _init_ctrls(self, prnt):
wx.Frame.__init__(self, id=wxID_LOBSTER_MAIN, name=u'lobster_main',
parent=prnt, style=wx.DEFAULT_FRAME_STYLE,
title=_(u'\u6c5f\u5927\u4fa0 %s') % VERSION_STR)
self.SetToolTipString(u'')
self.Center(wx.BOTH)
self.SetHelpText(u'')
if wx.Platform == '__WXMSW__':
self.SetBackgroundColour(wx.SystemSettings.GetColour(
wx.SYS_COLOUR_GRADIENTACTIVECAPTION))
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.Bind(wx.EVT_SIZE, self.OnSize)
# this is not a "scroller" any more
self.btnboard = wx.Panel(self, wxID_LOBSTER_MAINBTNBOARD,
style=wx.TAB_TRAVERSAL, name='btnboard')
# XXX the if stmt below only works for that Ubuntu theme, and resulted
# in VERY BAD appearance when running in any other distro or theme.
# Disabled it altogether.
#
# if wx.Platform == '__WXGTK__':
# # not sure if this is the proper colour, but at least on Ubuntu's
# # default theme this looks the same as titlebar's fill color
# self.btnboard.SetBackgroundColour(wx.SystemSettings.GetColour(
# wx.SYS_COLOUR_CAPTIONTEXT))
# my icon...
self.btnSetting = wx.BitmapButton(self.btnboard,
wxID_LOBSTER_MAINBTNSETTING, SETTINGS_ICON.GetBitmap(),
(0, 0), SHRIMPBTN_SIZETUPLE, name=u'btnSetting')
self.btnSetting.SetHelpText(u'')
self.btnSetting.SetToolTipString(_(u'选项'))
self.btnSetting.Bind(wx.EVT_BUTTON, self.OnBtnSettingButton,
id=wxID_LOBSTER_MAINBTNSETTING)
# my icon...
self.btnAbout = wx.BitmapButton(self.btnboard,
wxID_LOBSTER_MAINBTNABOUT, LOBSTER_ABOUT_ICON.GetBitmap(),
(0, 0), SHRIMPBTN_SIZETUPLE, name=u'btnAbout')
self.btnAbout.SetHelpText(u'')
self.btnAbout.SetToolTipString(_(u'\u5173\u4e8e...'))
self.btnAbout.Bind(wx.EVT_BUTTON, self.OnBtnAboutButton,
id=wxID_LOBSTER_MAINBTNABOUT)
def __init__(self, parent):
logdebug('Lobster frame init')
self._init_ctrls(parent)
self.InitShrimpList()
self._init_sizers()
self.bag.Layout()
# is this useful??
self.SendSizeEvent()
# now for the crazy full glass effect in Windows~
if wx.Platform == '__WXMSW__':
aero.make_full_glass(self)
def AddShrimpBtn(self, prnt, idx, shrimp):
btn_name = SHRIMPBTN_NAME_FMT % idx
id_name = SHRIMPBTN_ID_FMT % idx
handler_name = SHRIMPBTN_EVTBUTTON_FMT % idx
# 1st we make a EVT_BUTTON handler which fires up the corresponding
# shrimp.
# the method is adapted from the former OnLvwShrimpListItemActivated
# handler, adding some cool dynamic stuff
def _FireUpShrimp(self, event):
try:
cooker.bring_up_shrimp(shrimp)
except ValueError:
# already running
wx.MessageBox('error: already running!')
event.Skip()
_FireUpShrimp.func_name = handler_name
fun2meth(_FireUpShrimp, self) # , handler_name)
# Some identifying info...
icon_bmap = iconmgr.get_bitmap(shrimp)
name = cooker.get_name(shrimp)
# Prepare the button...
newid = self.__dict__[id_name] = wx.NewId()
tmp = wx.BitmapButton(prnt, newid, icon_bmap, (0, 0),
SHRIMPBTN_SIZETUPLE) # , style=SHRIMPBTN_STYLE)
tmp.SetToolTipString(name)
self.__dict__[btn_name] = tmp
tmp.Bind(wx.EVT_BUTTON, getattr(self, handler_name), id=newid)
# set up layout later, so we are basically done here
# store some lookup information
self._ShrimpButtons.append(tmp)
def InitShrimpList(self):
ldstat = cooker.SHRIMP_LOADSTATUS
ok_shrimp = [sh for sh in ldstat
if ldstat[sh] == 'ok' and sh != 'lobster'] # exclude myself
ok_shrimp.sort()
appender = self.AddShrimpBtn
self._ShrimpButtons = []
parent = self.btnboard
for idx, sh in enumerate(ok_shrimp):
appender(parent, idx, sh)
def OnClose(self, evt):
loginfo('window close event, initiating shutdown')
ok_to_shutdown = cooker.query_shutdown()
if ok_to_shutdown:
cooker.do_shutdown()
evt.Skip()
else:
evt.Veto() # VETO the wx shutdown!
def OnBtnAboutButton(self, evt):
show_aboutbox('lobster', self)
def OnBtnSettingButton(self, evt):
# TODO
show_settings(self)
def OnSize(self, evt):
# After some experiments, I found out that self.Size already changed
# when this event fires.
# So directly calling the rearrangement routine should cause little to
# no problem.
if not self.__DoNotRedoLayout:
# do it
self.DoLayout(self.bag)
evt.Skip()
# vi:ai:et:ts=4 sw=4 sts=4 fenc=utf-8
| xen0n/gingerprawn | gingerprawn/shrimp/lobster/lobster_main.py | lobster_main.py | py | 13,109 | python | en | code | 1 | github-code | 90 |
19031015460 | import torch
import torch.nn.functional as F
from math import exp
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss / (gauss.sum())
def create_window(window_size, channel):
_1D_window = gaussian(window_size, window_size/6.).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = torch.Tensor(_2D_window.expand(1, channel, window_size, window_size).contiguous()) / channel
return window
def _mef_ssim(X, Ys, window, ws, denom_g, denom_l, C1, C2, is_lum=False, full=False):
K, C, H, W = list(Ys.size())
# compute statistics of the reference latent image Y
muY_seq = F.conv2d(Ys, window, padding=ws // 2).view(K, H, W)
muY_sq_seq = muY_seq * muY_seq
sigmaY_sq_seq = F.conv2d(Ys * Ys, window, padding=ws // 2).view(K, H, W) \
- muY_sq_seq
sigmaY_sq, patch_index = torch.max(sigmaY_sq_seq, dim=0)
# compute statistics of the test image X
muX = F.conv2d(X, window, padding=ws // 2).view(H, W)
muX_sq = muX * muX
sigmaX_sq = F.conv2d(X * X, window, padding=ws // 2).view(H, W) - muX_sq
# compute correlation term
sigmaXY = F.conv2d(X.expand_as(Ys) * Ys, window, padding=ws // 2).view(K, H, W) \
- muX.expand_as(muY_seq) * muY_seq
# compute quality map
cs_seq = (2 * sigmaXY + C2) / (sigmaX_sq + sigmaY_sq_seq + C2)
cs_map = torch.gather(cs_seq.view(K, -1), 0, patch_index.view(1, -1)).view(H, W)
if is_lum:
lY = torch.mean(muY_seq.view(K, -1), dim=1)
lL = torch.exp(-((muY_seq - 0.5) ** 2) / denom_l)
lG = torch.exp(- ((lY - 0.5) ** 2) / denom_g)[:, None, None].expand_as(lL)
LY = lG * lL
muY = torch.sum((LY * muY_seq), dim=0) / torch.sum(LY, dim=0)
muY_sq = muY * muY
l_map = (2 * muX * muY + C1) / (muX_sq + muY_sq + C1)
else:
l_map = torch.Tensor([1.0])
if Ys.is_cuda:
l_map = l_map.cuda(Ys.get_device())
if full:
l = torch.mean(l_map)
cs = torch.mean(cs_map)
return l, cs
qmap = l_map * cs_map
q = qmap.mean()
return q
def mef_ssim(X, Ys, window_size=11, is_lum=False):
(_, channel, _, _) = Ys.size()
window = create_window(window_size, channel)
if Ys.is_cuda:
window = window.cuda(Ys.get_device())
window = window.type_as(Ys)
return _mef_ssim(X, Ys, window, window_size, 0.08, 0.08, 0.01**2, 0.03**2, is_lum)
def mef_msssim(X, Ys, window, ws, denom_g, denom_l, C1, C2, is_lum=False):
# beta = torch.Tensor([0.0710, 0.4530, 0.4760])
# beta = torch.Tensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333])
# beta = torch.Tensor([1, 1, 1, 1, 1])
beta = torch.Tensor([1])
if Ys.is_cuda:
window = window.cuda(Ys.get_device())
beta = beta.cuda(Ys.get_device())
window = window.type_as(Ys)
levels = beta.size()[0]
l_i = []
cs_i = []
for _ in range(levels):
l, cs = _mef_ssim(X, Ys, window, ws, denom_g, denom_l, C1, C2, is_lum=is_lum, full=True)
l_i.append(l)
cs_i.append(cs)
X = F.avg_pool2d(X, (2, 2))
Ys = F.avg_pool2d(Ys, (2, 2))
Ql = torch.stack(l_i)
Qcs = torch.stack(cs_i)
return (Ql[levels-1] ** beta[levels-1]) * torch.prod(Qcs ** beta)
class MEFSSIM(torch.nn.Module):
def __init__(self, window_size=11, channel=3, sigma_g=0.2, sigma_l=0.2, c1=0.01, c2=0.03, is_lum=False):
super(MEFSSIM, self).__init__()
self.window_size = window_size
self.channel = channel
self.window = create_window(window_size, self.channel)
self.denom_g = 2 * sigma_g**2
self.denom_l = 2 * sigma_l**2
self.C1 = c1**2
self.C2 = c2**2
self.is_lum = is_lum
def forward(self, X, Ys):
(_, channel, _, _) = Ys.size()
if channel == self.channel and self.window.data.type() == Ys.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if Ys.is_cuda:
window = window.cuda(Ys.get_device())
window = window.type_as(Ys)
self.window = window
self.channel = channel
return _mef_ssim(X, Ys, window, self.window_size,
self.denom_g, self.denom_l, self.C1, self.C2, self.is_lum)
class MEF_MSSSIM(torch.nn.Module):
def __init__(self, window_size=11, channel=3, sigma_g=0.2, sigma_l=0.2, c1=0.01, c2=0.03, is_lum=False):
super(MEF_MSSSIM, self).__init__()
self.window_size = window_size
self.channel = channel
self.window = create_window(window_size, self.channel)
self.denom_g = 2 * sigma_g**2
self.denom_l = 2 * sigma_l**2
self.C1 = c1**2
self.C2 = c2**2
self.is_lum = is_lum
def forward(self, X, Ys):
(_, channel, _, _) = Ys.size()
if channel == self.channel and self.window.data.type() == Ys.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if Ys.is_cuda:
window = window.cuda(Ys.get_device())
window = window.type_as(Ys)
self.window = window
self.channel = channel
return mef_msssim(X, Ys, window, self.window_size,
self.denom_g, self.denom_l, self.C1, self.C2, self.is_lum)
| makedede/MEFNet | mefssim.py | mefssim.py | py | 5,525 | python | en | code | 68 | github-code | 90 |
72344172776 | from copy import deepcopy
from typing import TYPE_CHECKING
from loguru import logger
if TYPE_CHECKING:
from simpsom import SOMNet
class EarlyStop:
""" Monitors the convergence of a map and activates
a switch to interrupt the training if a certain tolerance
map difference threshold is hit.
Warning: this is a work in progress.
Use only if you know what you are doing!
"""
def __init__(self,
tolerance: float = 1e-4,
patience: int = 3) -> None:
""" Initialize the early stopping class.
Args:
tolerance (float): the map change threshold to start
the counter for early stopping.
patience (int): number of iterations with below-threshold
map change before stopping the training.
"""
self.tolerance = tolerance
self.patience = patience
self.stop_training = False
self.convergence = []
self.counter = 0
self.history = None
def calc_loss(self, net: 'SOMNet', to_monitor: str = "mapdiff") -> float:
""" Calculate map difference convergence.
Args:
net (SOMNet): a SOMNet instance.
to_monitor (str): the loss type to monitor for convergence.
Returns:
loss (float): the calculated loss.
Raises:
ValueError: if loss type is not recognized.
Currently only map difference (mapdiff) is implemented.
"""
all_weights = net.xp.array([n.weights for n in net.nodes_list])
loss = None
if self.history is not None:
if to_monitor == "mapdiff":
loss = net.xp.abs(net.xp.subtract(
all_weights, self.history)).mean()
else:
logger.error("Convergence method not recognized.")
raise ValueError
self.history = deepcopy(all_weights)
return loss
def check_convergence(self, loss: float) -> None:
""" Check the change of a given loss quantity
against its history.
If it has been reached, activate the stop_training flag.
Args:
loss (float): the value to monitor.
"""
if loss is not None:
self.convergence.append(loss)
if len(self.convergence) > 1 and \
abs(self.convergence[-2] - self.convergence[-1]) < self.tolerance:
self.counter += 1
else:
self.counter = 0
if self.counter >= self.patience:
self.stop_training = True
| fcomitani/simpsom | simpsom/early_stop.py | early_stop.py | py | 2,598 | python | en | code | 152 | github-code | 90 |
7171795377 | '''
Interpolation Package
MAINLY USING FOR TERM STRUCTURE
'''
def linear(R1,t1,R2,t2,t) :
'''
input :
R1,R2: the interest rate of two terminals of the interval
t1,t2: the time point of the two terminals and t1<t2
t: the time point of the interpolated interest rate
output :
R: the interpolated interest rate
'''
R=R1+(R2-R1)*(t-t1)/(t2-t1)
return R
def linear_spline(Rate,t):
def take(elem) :
return elem.maturity
Rate.sort(key=take)
# judge which interval the maturity belongs
def judge_maturity(Rate,t):
for i in range(len(Rate)):
if t<=Rate[i].maturity :
return i
return len(Rate)
num=judge_maturity(Rate,t)
return linear(Rate[num-1].value,Rate[num-1].maturity,Rate[num].value,Rate[num].maturity,t)
def cubic_polynomial(Rate,t):
'''
The cubic polynomial is of the form
r(s)=a*(s^3)+b*(s^2)+c*s+d
A=B.dot(C)
input :
Rate: list of the rate
t: the time point of the interpolated interest rate
output:
R: the interpolated interest rate
'''
import numpy as np
# select 4 different rate
rate_list=np.random.choice(Rate,size=4,replace=False)
A=list()
B=list()
for i in rate_list :
A.append(i.value)
B.append([i.maturity**3,i.maturity**2,i.maturity,1])
A=np.array(A)
B=np.array(B)
C=np.linalg.inv(B).dot(A)
b=np.array([t**3,t**2,t,1])
R=b.dot(C)
return R
def polydyne(Rate,t,n):
'''
The polydyne is of the form
r(s)=a*(s^n)+b*(s^(n-1))+c*(s^(n-2))+...+d
A=B.dot(C)
input :
Rate: list of the rate
t: the time point of the interpolated interest rate
n: the degree of the polynomial
output:
R: the interpolated interest rate
'''
import numpy as np
# select 4 different rate
if len(Rate)<n+1 :
return IOError
rate_list=np.random.choice(Rate,size=n+1,replace=False)
A=list()
B=list()
for i in rate_list :
A.append(i.value)
for j in range(n) :
B.append(i.maturity**(n-j))
B.append(1)
A=np.array(A)
B=np.array(B)
B=B.reshape((n+1,n+1))
C=np.linalg.inv(B).dot(A)
b=list()
for j in range(n) :
b.append(t**(n-j))
b.append(1)
b=np.array(b)
R=b.dot(C)
return R
def difference(rate1,rate,rate2,startpoint=False,endpoint=False) :
'''
Calculate the difference at point i
'''
sb=rate1.maturity
sa=rate2.maturity
si=rate.maturity
rb=rate1.value
ra=rate2.value
ri=rate.value
if startpoint==False and endpoint==False :
dri=1/(sa-sb)*((sa-si)*(ri-rb)/(si-sb)+(si-sb)*(ra-ri)/(sa-si))
return dri
elif startpoint==True and endpoint==False :
dri=1/(sa-sb)*((sa+si-2*sb)*(ri-rb)/(si-sb)-(si-sb)*(ra-ri)/(sa-si))
return dri
elif startpoint==False and endpoint==True :
dri=1/(sa-sb)*((sa-si)*(ri-rb)/(si-sb)-(2*sa-si-sb)*(ra-ri)/(sa-si))
return dri
def Hermit(Rate,t):
'''
The Rate are interpolated by Hermit interpolate
USING Hermit interpolation
'''
def take(elem) :
return elem.maturity
Rate.sort(key=take)
pb=0
pa=0
for i in range(len(Rate)) :
if t>Rate[i].maturity :
pb=i
pa=pb+1
h=Rate[pa].maturity-Rate[pb].maturity
ta=Rate[pa].maturity
tb=Rate[pb].maturity
ra=Rate[pa].value
rb=Rate[pb].value
if pb==0 :
dyb=difference(Rate[pb],Rate[pb+1],Rate[pb+2],startpoint=True)
dya=difference(Rate[pa-1],Rate[pa],Rate[pa+1])
elif pb>0 and pa<len(Rate)-1 :
dyb=difference(Rate[pb-1],Rate[pb],Rate[pb+1])
dya=difference(Rate[pa-1],Rate[pa],Rate[pa+1])
elif pa==len(Rate)-1 :
dyb=difference(Rate[pb-1],Rate[pb],Rate[pb+1])
dya=difference(Rate[pa-2],Rate[pa-1],Rate[pa],endpoint=True)
alpha_b=((h+2*(t-tb))*(t-ta)**2)/(h**3)*rb
alpha_a=((h+2*(t-ta))*(t-tb)**2)/(h**3)*ra
beta_b=((t-tb)*(t-ta)**2)/(h**2)*dyb
beta_a=((t-tb)**2*(t-ta))/(h**2)*dya
H=alpha_a+alpha_b+beta_a+beta_b
return H
def Rate_to_Discount(Rate):
import numpy as np
from Options.rate import rate
def take(elem) :
return elem.maturity
Rate.sort(key=take)
def value(x):
return x.value
def maturity(x):
return x.maturity
value=list(map(value,Rate))
matur=list(map(maturity,Rate))
Discount=list()
for i in range(len(value)):
Discount.append(rate(np.exp(-value[i]*matur[i]),matur[i]))
return Discount
def Discount_to_Rate(Discount):
import numpy as np
from Options.rate import rate
def take(elem) :
return elem.maturity
Discount.sort(key=take)
def value(x):
return x.value
def maturity(x):
return x.maturity
value=list(map(value,Discount))
matur=list(map(maturity,Discount))
Rate=list()
for i in range(len(value)):
Rate.append(rate(-np.log(value[i])/matur[i],matur[i]))
return Rate
def cubic_spline(Rate,t,bc='natural',para=None):
'''
The Rate are interpolated by cubic spline
'''
import numpy as np
n=len(Rate)-1
X=np.zeros((4*n,4*n))
B=np.ndarray((4*n,1))
'''
Arrange the Rate on maturity
'''
def take(elem) :
return elem.maturity
Rate.sort(key=take)
'''
judge the interval of the interpolated t
'''
pb=0
for i in range(len(Rate)) :
if t>Rate[i].maturity :
pb=i
'''
interpolation condition
'''
j=0
for i in range(n-1):
X[i,j]=Rate[i+1].maturity**3
X[i,j+1]=Rate[i+1].maturity**2
X[i,j+2]=Rate[i+1].maturity
X[i,j+3]=1
B[i]=Rate[i+1].value
j=j+4
'''
Connnection Condition
'''
j=0
for i in range(n-1):
X[n-1+i,j]=-3*Rate[i+1].maturity**2
X[n-1+i,j+1]=-2*Rate[i+1].maturity
X[n-1+i,j+2]=-1
X[n-1+i,j+4]=3*Rate[i+1].maturity**2
X[n-1+i,j+5]=2*Rate[i+1].maturity
X[n-1+i,j+6]=1
B[n-1+i]=0
j=j+4
j=0
for i in range(n-1) :
X[2*n-2+i,j]=-6*Rate[i+1].maturity
X[2*n-2+i,j+1]=-2
X[2*n-2+i,j+4]=6*Rate[i+1].maturity
X[2*n-2+i,j+5]=2
B[2*n-2+i]=0
j=j+4
j=0
for i in range(n-1):
X[3*n-3+i,j]=-Rate[i+1].maturity**3
X[3*n-3+i,j+1]=-Rate[i+1].maturity**2
X[3*n-3+i,j+2]=-Rate[i+1].maturity
X[3*n-3+i,j+3]=-1
X[3*n-3+i,j+4]=Rate[i+1].maturity**3
X[3*n-3+i,j+5]=Rate[i+1].maturity**2
X[3*n-3+i,j+6]=Rate[i+1].maturity
X[3*n-3+i,j+7]=1
B[3*n-3+i]=0
j=j+4
'''
Boundary Condition
'''
X[4*n-4,0]=Rate[0].maturity**3
X[4*n-4,1]=Rate[0].maturity**2
X[4*n-4,2]=Rate[0].maturity
X[4*n-4,3]=1
B[4*n-4]=Rate[0].value
X[4*n-3,4*n-4]=Rate[-1].maturity**3
X[4*n-3,4*n-3]=Rate[-1].maturity**2
X[4*n-3,4*n-2]=Rate[-1].maturity
X[4*n-3,4*n-1]=1
B[4*n-3]=Rate[-1].value
if bc=='natural' :
'''
the natural is the second boundary condition
'''
X[4*n-2,0]=6*Rate[0].maturity
X[4*n-2,1]=2
B[4*n-2]=0
X[4*n-1,4*n-4]=6*Rate[-1].maturity
X[4*n-1,4*n-3]=2
B[4*n-1]=0
elif bc=='continue' :
'''
the continue is the first boundary condition
'''
X[4*n-2,0]=3*Rate[0].maturity**2
X[4*n-2,1]=2*Rate[0].maturity
X[4*n-2,2]=1
B[4*n-2]=0
X[4*n-1,4*n-4]=3*Rate[-1].maturity**2
X[4*n-1,4*n-3]=2*Rate[-1].maturity
X[4*n-1,4*n-2]=1
B[4*n-1]=0
elif bc=='setting_natural' :
'''
the setting is that the boundary condition is setted with certain condition
'''
X[4*n-2,0]=6*Rate[0].maturity
X[4*n-2,1]=2
B[4*n-2]=para[0]
X[4*n-1,4*n-4]=6*Rate[-1].maturity
X[4*n-1,4*n-3]=2
B[4*n-1]=para[1]
elif bc=='setting_continue' :
X[4*n-2,0]=3*Rate[0].maturity**2
X[4*n-2,1]=2*Rate[0].maturity
X[4*n-2,2]=1
B[4*n-2]=para[0]
X[4*n-1,4*n-4]=3*Rate[-1].maturity**2
X[4*n-1,4*n-3]=2*Rate[-1].maturity
X[4*n-1,4*n-2]=1
B[4*n-1]=para[1]
else:
return IOError
A=np.linalg.inv(X).dot(B)
R=A[4*pb]*t**3+A[4*pb+1]*t**2+A[4*pb+2]*t+A[4*pb+3]
return R[0]
# return np.linalg.inv(X).dot(B)
# return X,B
class cubic_constraint_regress():
'''
Cubic constraint regress class
instead of analyzing Rate, analyse the discount factor: Discount
The relation formula is
Discount(0,s)=exp(Rate(0,s)*s)
The regress funtion is
Discount(0,s)=a1*s**3+b1*s**2+c1*s+d1 if s in interval 1
a2*s**3+b2*s**2+c2*s+d2 if s in interval 2
...
an*s**3+bn*s**2+cn*s+dn if s in interval n
WITH n-1 continuous constraint :
a1*s**3+b1*s**2+c1*s+d1=a2*s**3+b2*s**2+c2*s+d2 at the joint point between interval 1 and 2
...
a[n-1]*s**3+b[n-1]*s**2+c[n-1]*s+d[n-1]=an*s**3+bn*s**2+cn*s+dn at the joint point between interval n-1 and n
WITH boundary condition
Discount(0,0)=1
'''
def __init__(self,Rate):
'''
initial parameters:
Rate: the analyzed Term Structure
initial function:
judge_maturity
'''
self.rate=Rate
self.judge_maturity
def judge_maturity(self,interval,maturity):
'''
judge which interval the maturity belongs
input: interval: the interval for the whole term structure
maturity: the maturity of the rate i
output: len(interval): which interval the maturity belongs
'''
for i in range(len(interval)):
if maturity<=interval[i] :
return i
return len(interval)
def cubic_constraint_regress(self,number=2,interval=None,constraint=None,constraint_or_not=True,omga=None):
'''
The Rate are constructed by constraint regress
input:
Rate: the interpolated yield curve
t: the interpoalted rate at time point t
number: the number of interval
interval: the interval point of cut points
constraint: the constraint of the regression
constraint_or_not : having or having not constraint
if True, then having constraint
if False, then having not constraint
if number is given, then the interval are cutted into equal length
output: regress parameter
'''
import numpy as np
Rate=self.rate
def take(elem) :
return elem.maturity
Rate.sort(key=take)
# the numbers of parameters
if interval!=None :
number=len(interval)+1
b=4*number
else:
interval=list()
b=4*number
mlength=Rate[-1].maturity-Rate[0].maturity
for i in range(number-1):
interval.append(mlength/number*(i+1))
self.interval=interval
# the number of the samples
n=len(Rate)
# the parameters
self.beta=np.zeros((b,1))
# the variable Y
f=np.zeros((n,1))
''' the constraint matrix satisfies
A*beta=0
'''
if constraint != None and constraint_or_not==True :
A=constraint[0]
d=constraint[1]
elif constraint==None and constraint_or_not==True :
m=len(interval)+1
A=np.zeros((m,b))
d=np.zeros((m,1))
''' boundary condition
B[0,0]=1
'''
A[0,3]=1
d[0,0]=1
'''
continue condition
B[i]-=B[i-1]+
'''
for i in range(1,len(interval)+1):
A[i,4*(i-1)]=interval[i-1]**3
A[i,4*(i-1)+1]=interval[i-1]**2
A[i,4*(i-1)+2]=interval[i-1]
A[i,4*(i-1)+3]=1
A[i,4*i]=-interval[i-1]**3
A[i,4*i+1]=-interval[i-1]**2
A[i,4*i+2]=-interval[i-1]
A[i,4*i+3]=-1
X=np.zeros((b,n))
for j in range(n) :
mjudge=self.judge_maturity(interval,Rate[j].maturity)
X[4*mjudge,j]=Rate[j].maturity**3
X[4*mjudge+1,j]=Rate[j].maturity**2
X[4*mjudge+2,j]=Rate[j].maturity
X[4*mjudge+3,j]=1
f[j]=Rate[j].value
from numpy.linalg import inv
if constraint_or_not==None and omga==None :
self.beta=inv(X.dot(X.T)).dot(X).dot(f)
return self.beta
elif constraint_or_not==None and omga!=None :
self.beta=inv(X.dot(inv(omga)).dot(X.T)).dot(X).dot(inv(omga)).dot(f)
return self.beta
elif constraint_or_not!=None and omga==None :
self.beta=inv(X.dot(X.T)).dot(X).dot(f)+inv(X.dot(X.T)).dot(A.T).dot(inv(A.dot(inv(X.dot(X.T))).dot(A.T))).dot(d-A.dot(inv(X.dot(X.T)).dot(X).dot(f)))
return self.beta
elif constraint_or_not!=None and omga!=None :
self.beta=inv(X.dot(inv(omga)).dot(X.T)).dot(X).dot(inv(omga)).dot(f)+inv(X.dot(inv(omga)).dot(X.T)).dot(A.T).dot(inv(A.dot(inv(X.dot(inv(omga)).dot(X.T))).dot(A.T))).dot(d-A.dot(inv(X.dot(X.T)).dot(X).dot(f)))
return self.beta
def fit(self,t):
'''
fit the regressed model
input: t: the fitted maturity
output: the rate for the fitted maturity
'''
import numpy as np
X=np.zeros((len(self.beta),1))
mjudge=self.judge_maturity(self.interval,t)
X[4*mjudge,0]=t**3
X[4*mjudge+1,0]=t**2
X[4*mjudge+2,0]=t
X[4*mjudge+3,0]=1
return self.beta.T.dot(X)[0,0]
def plot_Rate(Rate):
'''
plot term structure:
input: Rate: the term structure list
output: plot the term structure
'''
import matplotlib.pyplot as plt
def value(x):
return x.value
def maturity(x):
return x.maturity
value=list(map(value,Rate))
matur=list(map(maturity,Rate))
plt.plot(matur,value,label='Rate')
class NS_Model():
'''
Nelson-Siegel Model
the basic function form NS model:
R(0,s)=beta0+beta1*(1-exp(-s/m))/(s/m)+beta2*{[1-exp(-s/m)]/(s/m)-exp(-s/m)}
the advanced function form NSS model:
R(0,s)=beta0+beta1*(1-exp(-s/m))/(s/m)+beta2*{[1-exp(-s/m1)]/(s/m1)-exp(-s/m1)}+beta3*{[1-exp(-s/m2)]/(s/m2)-exp(-s/m2)}
WITH or WITHOUT boundary condition:
R(0,0)=0
'''
def __init__(self,Rate,mtype,bc=False):
'''
initial parameters:
input: Rate: term structure
mtype: if 'NS', then using NS model
if 'NSS', then using NSS model
bc: with or without boundary condition
if 'False', then without condition
if 'True', then with condition
'''
def take(elem) :
'''
take the elem's maturity
'''
return elem.maturity
# sort on the maturity
Rate.sort(key=take)
self.Rate=Rate
def value(x):
return x.value
def maturity(x):
return x.maturity
self.matur=list(map(maturity,self.Rate))
self.value=list(map(value,self.Rate))
self.mtype=mtype
self.bc=bc
def NS_m_Setted(self,m):
'''
calibration the NS model with fixed m
input:
the yield curve: Rate
the interpolated time point: t
the parameter m determined or not determined
if determined, then m != None
if not determined, then m=None
the model type: mtype
if mtype='NS', then the model is NS model
if mtype='NSS', then the model is NSS model
output:
the interpolated rate
'''
import numpy as np
from numpy.linalg import inv
Rate=self.Rate
mtype=self.mtype
n=len(Rate)
f=np.zeros((n,1))
if mtype=='NS' :
b=3
self.b=b
m=list([m])
elif mtype=='NSS' :
b=4
self.b=b
else:
return IOError
X=np.zeros((b,n))
A=np.zeros((1,b))
d=np.zeros((1,1))
A[0,0]=1
A[0,1]=1
for j in range(n):
s=Rate[j].maturity
X[0,j]=1
if s==0 :
X[1,j]=1
X[2,j]=0
elif s>0 :
X[1,j]=(1-np.exp(-s/m[0]))/(s/m[0])
X[2,j]=(1-np.exp(-s/m[0]))/(s/m[0])-np.exp(-s/m[0])
f[j]=Rate[j].value
if b==4 :
if s==0 :
X[3,j]=0
elif s>0 :
X[3,j]=(1-np.exp(-s/m[1]))/(s/m[1])-np.exp(-s/m[1])
self.X=X
self.f=f
para=np.zeros((b,1))
if self.bc==False:
para=inv(X.dot(X.T)).dot(X).dot(f)
elif self.bc==True :
para=inv(X.dot(X.T)).dot(X).dot(f)+inv(X.dot(X.T)).dot(A.T).dot(inv(A.dot(inv(X.dot(X.T))).dot(A.T))).dot(d-A.dot(inv(X.dot(X.T)).dot(X).dot(f)))
self.m_para=para
self.m=m
self.X=X
return para
def NS_m_setted_fit(self,t):
'''
fit the NS model with m fixed
input : t: the fitted maturity
output: f_hat: the rate on the fitted maturity
'''
import numpy as np
b=self.b
m=self.m
m=list([m])
para=self.m_para
Xt=np.zeros((b,len(t)))
for j in range(len(t)):
Xt[0,j]=1
Xt[1,j]=(1-np.exp(-t[j]/m[0]))/(t[j]/m[0])
Xt[2,j]=(1-np.exp(-t[j]/m[0]))/(t[j]/m[0])-np.exp(-t[j]/m[0])
if b==4 :
Xt[3,j]=(1-np.exp(-t[j]/m[1]))/(t[j]/m[1])-np.exp(-t[j]/m[1])
f_hat=Xt.T.dot(para)
return f_hat
def optimization(self,m_initial,step,precise) :
'''
optimize the loss fucntion using gradient descent
input: m_initial: the setted initial value of m
step: the step using for optimization
precise: the step length for each iteration
output: the optimized parameter: m
'''
import numpy as np
last_m=0.9*m_initial
m=m_initial
for i in range(step):
e1=np.log(self.error(last_m))
e2=np.log(self.error(m))
temp=last_m
last_m=m
if self.mtype=='NS' and m==temp :
return m
elif self.mtype=='NSS' and (m==temp).all() :
return m
m=m-precise*(e2-e1)/(m-temp)
return m
def error(self,m):
'''
calculate the total squared error between the fit value and the real value
input: the parameter m
output: the total squared error
'''
import numpy as np
estimate=np.array(self.value)-self.NS_m_Setted(m).T.dot(self.X)[0]
return sum(estimate**2)
def NS_m_unsetted(self,m_initial,step,precise):
'''
calibration the NS model with unfixed m
input: m_initial: the initial value of m
step: the iteration step for optimization
precise: the step length for each iteration
output: the parameter calibrated
'''
m=self.optimization(m_initial,step,precise)
self.m=m
return [self.NS_m_Setted(m),m]
def NS_m_unsetted_fit(self,t):
'''
fit the NS model with unfixed m
input: t: the fitted maturity
output: the fitted rate at the maturity t
'''
import numpy as np
b=self.b
m=self.m
m=list(m)
para=self.m_para
Xt=np.zeros((b,len(t)))
for j in range(len(t)):
Xt[0,j]=1
Xt[1,j]=(1-np.exp(-t[j]/m[0]))/(t[j]/m[0])
Xt[2,j]=(1-np.exp(-t[j]/m[0]))/(t[j]/m[0])-np.exp(-t[j]/m[0])
if b==4 :
Xt[3,j]=(1-np.exp(-t[j]/m[1]))/(t[j]/m[1])-np.exp(-t[j]/m[1])
f_hat=Xt.T.dot(para)
return f_hat[0,0]
| whyecofiliter/Options | interpolation.py | interpolation.py | py | 21,040 | python | en | code | 3 | github-code | 90 |
554785275 | def blue(text):
blue_text = ""
for character in text:
blue_text += f"\033[38;2;0;0;255m{character}\033[0m"
return blue_text
def green(text):
green_text = ""
for character in text:
green_text += f"\033[38;2;0;255;0m{character}\033[0m"
return green_text
def orange(text):
orange_text = ""
for character in text:
orange_text += f"\033[38;2;255;165;0m{character}\033[0m"
return orange_text
def purple(text):
purple_text = ""
for character in text:
purple_text += f"\033[38;2;221;160;221m{character}\033[0m"
return purple_text
def yellow(text):
yellow_text = ""
for character in text:
yellow_text += f"\033[38;2;255;255;0m{character}\033[0m"
return yellow_text
def red(text):
red_text = ""
for character in text:
red_text += f"\033[38;2;255;0;0m{character}\033[0m"
return red_text
def pinkish_red(text):
pinkish_red_text = ""
for character in text:
pinkish_red_text += f"\033[38;2;255;20;147m{character}\033[0m"
return pinkish_red_text
def water(text):
faded = ""
colour_green = 10
for line in text.splitlines():
faded += f"\033[38;2;0;{colour_green};255m{line}\033[0m\n"
if not colour_green == 255:
colour_green += 15
if colour_green > 255:
colour_green = 255
return faded | Benzo-Fury/PyBet | Utility/Colour/colour.py | colour.py | py | 1,394 | python | en | code | 1 | github-code | 90 |
641131581 | import os.path
def task():
print(f"Лабораторная работа №3\nВариант №6. Выполнила студентка группы 6101-090301D Горбунцова А.А\nЗадание: "
f"написать программу, которая для каждой строки исходного файла будет выводить в результирующий файл "
f"последовательность цифр\n('0','1'..'9') из входной последовательности и, через пробел, частот их "
f"повторения. Печать должна происходить в порядке возрастания.\n")
def strToRes(s):
a = [0] * 10
for i in range(len(s)):
if ord(s[i]) in range(48, 58):
a[ord(s[i]) - ord("0")] += 1
res = ""
for c in range(10):
if a[c] > 0:
res = res + str(chr(ord("0") + c)) + " - " + str(a[c]) + ", "
res = res[0:len(res) -2 ]
return res
def fileToFile(fname1, fname2):
f1 = open(fname1, "r")
f2 = open(fname2, "w")
data = f1.readlines()
for s in data:
res = ""
if s != "":
s = s.upper()
res = strToRes(s)
f2.write(res + "\n")
f1.close()
f2.close()
task()
filename1 = input("Введите имя исходного файла: ")
if os.path.exists(filename1):
filename2 = input("Введите имя результирующего файла: ")
fileToFile(filename1, filename2)
print("Задание выполнено")
else:
print("Такого файла не существует")
| litirnntir/lab-py-1sem | lab3.py | lab3.py | py | 1,690 | python | ru | code | 0 | github-code | 90 |
21199653031 | # You are given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse order, and each of their nodes contains a single digit. Add the two numbers and return the sum as a linked list.
# You may assume the two numbers do not contain any leading zero, except the number 0 itself.
# leetcode 2
# https://leetcode.com/problems/add-two-numbers/
# 2. Add Two Numbers
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
dump = ListNode(0) # dummy node
cur = dump # current node
carry = 0
while l1 or l2: # while l1 or l2 is not None
if l1: # if l1 is not None
carry += l1.val
l1=l1.next # move to next node
if l2:
carry += l2.val
l2=l2.next
cur.next = ListNode(carry%10)
cur = cur.next
carry //= 10 # carry = carry // 10 # carry = int(carry / 10)
if carry == 1:
cur.next = ListNode(1)
return dump.next | endermeihl/ender.github.io | leetcode2023/L2.py | L2.py | py | 1,302 | python | en | code | 0 | github-code | 90 |
43334129656 | #!/usr/bin/env python3
import sys
from operator import add, mul
def run(p):
pc = 0
while p[pc] != 99:
opcode, in1, in2, out = p[pc:pc + 4]
op = add if opcode == 1 else mul
p[out] = op(p[in1], p[in2])
pc += 4
def initrun(p, noun, verb):
p = list(p)
p[1:3] = noun, verb
run(p)
return p[0]
def find_params(p, desired_result):
noun = verb = 0
while initrun(p, noun, verb) <= desired_result:
noun += 1
noun -= 1
while initrun(p, noun, verb) < desired_result:
verb += 1
return 100 * noun + verb
program = list(map(int, sys.stdin.read().split(',')))
print(initrun(program, 12, 2))
print(find_params(program, 19690720))
| taddeus/advent-of-code | 2019/02_intcode.py | 02_intcode.py | py | 708 | python | en | code | 2 | github-code | 90 |
8589183275 | import os
import difflib
from gi.repository import Gtk as gtk
from gi.repository import WebKit as webkit
from parsers.trs_parser import TRSParser
from utils.ui_utils import UIUtils
from utils.progress_dialog import ProgressDialog
from utils.backend_utils import BackendUtils
from ui.verifier_app.diff_win import DiffWin
class OpenPairWindow():
def __init__(self):
self.window = gtk.Window(gtk.WindowType.TOPLEVEL)
self.window.set_title('Transcription Verifier')
self.window.connect('destroy', lambda w: self.window.destroy())
self.window.set_default_size(270, 210)
self.window.set_resizable(True)
vbox = gtk.VBox()
file1_grid = gtk.Grid()
file1_frame = gtk.Frame(label='File 1')
file1_name_label = gtk.Label('Transcriber Name:')
file1_name_entry = gtk.Entry()
file1_name_entry.set_width_chars(20)
file1_label = gtk.Label('Path:')
file1_entry = gtk.Entry()
file1_entry.set_width_chars(50)
file1_browse_button = gtk.Button('Browse')
file1_browse_button.connect('clicked', lambda w: UIUtils.browse_file('Select File 1', file1_entry, [UIUtils.TRS_FILE_FILTER]))
file1_grid.attach(file1_name_label, 0, 0, 1, 1)
file1_grid.attach(file1_name_entry, 1, 0, 1, 1)
file1_grid.attach(file1_label, 0, 1, 1, 1)
file1_grid.attach(file1_entry, 1, 1, 1, 1)
file1_grid.attach(file1_browse_button, 2, 1, 1, 1)
file1_frame.add(file1_grid)
vbox.pack_start(file1_frame, True, True, 0)
file2_grid = gtk.Grid()
file2_frame = gtk.Frame(label='File 2')
file2_name_label = gtk.Label('Transcriber Name:')
file2_name_entry = gtk.Entry()
file2_name_entry.set_width_chars(20)
file2_label = gtk.Label('Path:')
file2_entry = gtk.Entry()
file2_entry.set_width_chars(50)
file2_browse_button = gtk.Button('Browse')
file2_browse_button.connect('clicked', lambda w: UIUtils.browse_file('Select File 2', file2_entry, [UIUtils.TRS_FILE_FILTER]))
file2_grid.attach(file2_name_label, 0, 2, 1, 1)
file2_grid.attach(file2_name_entry, 1, 2, 1, 1)
file2_grid.attach(file2_label, 0, 3, 1, 1)
file2_grid.attach(file2_entry, 1, 3, 1, 1)
file2_grid.attach(file2_browse_button, 2, 3, 1, 1)
file2_frame.add(file2_grid)
vbox.pack_start(file2_frame, True, True, 0)
#for debugging
#file1_entry.set_text('G:\\Wayne\\baby-lab\\test-data\\trs\\C001b_20090901lFINAL.trs')
#file2_entry.set_text('G:\\Wayne\\baby-lab\\test-data\\trs\\C001b_20090901lFINAL - Copy.trs')
file1_name_entry.grab_focus()
button_box = gtk.HButtonBox()
button_box.set_layout(gtk.ButtonBoxStyle.EDGE)
cancel_button = gtk.Button(stock=gtk.STOCK_CANCEL, label='Cancel')
cancel_button.connect('clicked', lambda w: self.window.destroy())
button_box.add(cancel_button)
ok_button = gtk.Button(stock=gtk.STOCK_OK, label='Ok')
ok_button.connect('clicked', lambda w: self._check_input(
file1_entry.get_text(),
file2_entry.get_text(),
file1_name_entry.get_text(),
file2_name_entry.get_text())
)
button_box.add(ok_button)
vbox.pack_start(button_box, True, True, 0)
self.window.add(vbox)
self.window.show_all()
def _check_input(self, file1_path, file2_path, file1_name, file2_name):
if file1_path and file2_path:
bad_paths = []
for path in [file1_path, file2_path]:
if not os.path.exists(path):
bad_paths.append(path)
if bad_paths:
message = 'The following files could not be located.\n'
for path in bad_paths:
message += '\n- %s' % (path)
message += '\n\nPlease double-check the paths and try again.'
UIUtils.show_message_dialog(message)
else:
self._compare(file1_path, file2_path, file1_name, file2_name)
else:
UIUtils.show_message_dialog('Please select two files.')
def _compare(self, file1_path, file2_path, file1_name, file2_name):
self.window.set_sensitive(False)
paths = [file1_path, file2_path]
segs = []
dialog = ProgressDialog('Processing Files...', ['Parsing trs file %d...' % (i + 1) for i in range(len(paths))] + ['Comparing files...', 'Generating output...'])
dialog.show()
for i in range(len(paths)):
file_segs = TRSParser(paths[i]).parse(
progress_update_fcn=dialog.set_fraction,
validate=False,
remove_bad_trans_codes=False
)
segs.append(file_segs)
dialog.next_phase()
desc_strs = self._build_desc_strs(segs, dialog)
dialog.next_phase()
html = difflib.HtmlDiff().make_file(*desc_strs, fromdesc=file1_name, todesc=file2_name, context=True, numlines=0)
#prevent font selection from killing webkit on Windows systems
html = html.replace('font-family:Courier;', '')
DiffWin(html)
dialog.ensure_finish()
self.window.destroy()
def _build_desc_strs(self, segs, dialog):
descs = []
for i in range(len(segs)):
file_descs = []
for seg in segs[i]:
for utter in seg.utters:
file_descs.append(self._build_utter_desc(utter))
dialog.set_fraction(float(i) / float(len(segs)))
descs.append(file_descs)
return descs
def _build_utter_desc(self, utter):
desc_str = ''
speaker_cd = '?'
if utter.speaker:
if utter.speaker.speaker_codeinfo:
speaker_cd = utter.speaker.speaker_codeinfo.get_code()
else:
speaker_cd = ' - '
desc_str = '%s [%s - %s]' % ( speaker_cd, BackendUtils.get_time_str(utter.start), BackendUtils.get_time_str(utter.end))
if utter.lena_notes:
desc_str += ' %s' % (utter.lena_notes)
if utter.trans_phrase:
desc_str += ' %s' % (utter.trans_phrase)
if utter.lena_codes:
desc_str += ' |%s|' % ('|'.join(utter.lena_codes))
if utter.trans_codes:
if not utter.lena_codes:
desc_str += ' |'
desc_str += '%s|' % ('|'.join(utter.trans_codes))
desc_str += '\n'
return desc_str
| babylanguagelab/bll_app | wayne/ui/verifier_app/open_pair_window.py | open_pair_window.py | py | 6,766 | python | en | code | 0 | github-code | 90 |
28008451984 | from __future__ import print_function
from __future__ import division
import numpy as np
import numpy.linalg as la
import numbers
np.set_printoptions(precision=3)
import matplotlib.pyplot as plt
import scipy.fftpack as spfft
#import time
#import itertools
from abc import abstractmethod
import pywt
try:
from itertools import accumulate
except:
# can also try numpy.cumsum
import operator
def accumulate(iterable, func=operator.add):
'Return running totals'
# accumulate([1,2,3,4,5]) --> 1 3 6 10 15
# accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120
it = iter(iterable)
try:
total = next(it)
except StopIteration:
return
yield total
for element in it:
total = func(total, element)
yield total
class AbstractOperator(object):
'''To make sure that the derived classes have the right functions'''
@abstractmethod
def apply(self, x):
"""Compute Ax"""
pass
@abstractmethod
def inv(self, x):
"""A^-1 x"""
pass
# reals space: everything 2D
# T-space: 1D
class DCT(AbstractOperator):
'''Discrete cosine transform'''
def __init__(self, shape):
self.shape = shape
def __call__(self, image):
Timage = spfft.dct(spfft.dct(image, norm='ortho', axis=0), norm='ortho', axis=1)
return Timage.reshape(-1)
def inv(self, Timage):
Timage = Timage.reshape(self.shape)
return spfft.idct(spfft.idct(Timage, norm='ortho', axis=0), norm='ortho', axis=1)
class WT(AbstractOperator):
'''wavelet transform:
call input: matrix
inv input: vector of length fitting WT.shape'''
def __init__(self, shape, wavelet = 'db6', level = 3, amplify = None):
self.shape = shape
self.wavelet = wavelet
self.level = level
self.cMat_shapes = []
#build amplification vector of length 3*level
if amplify is None:
self.amplify = np.ones(3*self.level+1)
else:
self.amplify = amplify
if isinstance(amplify, numbers.Number):
self.amplify = np.ones(3*self.level+1)
self.amplify[0] = amplify
def __call__(self, image):
coeffs = pywt.wavedec2(image, wavelet=self.wavelet, level=self.level)
# format: [cAn, (cHn, cVn, cDn), ...,(cH1, cV1, cD1)] , n=level
#to list of np.arrays
#multiply with self.amplify[0] to have them more strongly weighted in compressions
#tbd: implement others
cMat_list = [coeffs[0]]
for c in coeffs[1:]:
cMat_list = cMat_list + list(c)
#memorize all shapes for inv
self.cMat_shapes = list(map(np.shape,cMat_list))
#array vectorization
vect = lambda array: np.array(array).reshape(-1)
#store coeffcient matrices as vectors in list
#cVec_list = map(vect,cMat_list)
#apply amplification
cVec_list = [vect(cMat_list[j])*self.amplify[j] for j in range(3*self.level+1)]
return np.concatenate(cVec_list)
def inv(self,wavelet_vector):
'''Inverse WT
cVec_list: vector containing all wavelet coefficients as vectrized in __call__'''
#check if shapes of the coefficient matrices are known
if self.cMat_shapes == []:
print("Call WT first to obtain shapes of coefficient matrices")
return None
cVec_shapes = list(map(np.prod,self.cMat_shapes))
split_indices = list(accumulate(cVec_shapes))
cVec_list = np.split(wavelet_vector,split_indices)
#reverse amplification
cVec_list = [cVec_list[j]/self.amplify[j] for j in range(3*self.level+1)]
#back to level format
coeffs=[ np.reshape(cVec_list[0],self.cMat_shapes[0]) ]
for j in range(self.level):
triple = cVec_list[3*j+1:3*(j+1)+1]
triple = [np.reshape( triple[i], self.cMat_shapes[1 +3*j +i] )
for i in range(3) ]
coeffs = coeffs + [tuple(triple)]
return pywt.waverec2( coeffs, wavelet=self.wavelet )
def rand(self):
'''outpus a random wavelet in picture domain'''
Tz = self.__call__(np.zeros(shape)) # to initialize self.cMat_shapes
cVec_shapes = list(map(np.prod,self.cMat_shapes))
split_indices = list(accumulate(cVec_shapes))
cVec_list = np.split(Tz,split_indices)
#back to level format
coeffs=[ np.reshape(cVec_list[0],self.cMat_shapes[0]) ]
for j in range(self.level):
triple = cVec_list[3*j+1:3*(j+1)+1]
triple = [np.reshape( triple[i], self.cMat_shapes[1 +3*j +i] )
for i in range(3)]
coeffs = coeffs + [tuple(triple)]
return pywt.waverec2( coeffs, wavelet=self.wavelet )
#end class(WT)
def rgb2gray(rgb):
'''Convert from rgb to grayscale'''
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
def pltPic(X, size = (9,12) ):
plt.figure(figsize=size)
plt.imshow(X,interpolation='nearest', cmap=plt.cm.gray)
plt.show()
def cL(s,x):
'''returns n-s abs-smallest indices of vector x'''
ns = len(x)-s
return np.argpartition(abs(x),ns)[:ns]
class hardTO(object):
'''Hard thresholding operator:
takes vector x, returns hard thresholded vector'''
def __init__(self,sparsity):
'''s: sparsity (integer number)'''
self.s = int(sparsity)
def __call__(self,x):
x[cL(self.s,x)] = 0
return x
class softTO(object):
'''Soft thresholding operator:
takes vector x, returns hard thresholded vector'''
def __init__(self,tau):
'''tau>0: thresholding parameter'''
self.tau = tau
def __call__(self,x):
return pywt.threshold(x, self.tau, mode='soft')
def compress(T, TO, image):
'''returns compressed image by appyling thresholding to coeffcients in dictionary T:
T: transformation taking image to vector, subclass of AbstractOperator
thresholding = (H,thresholding_parameter):
H(v,thresholding_parameter) gives a vector for a vector v
image: matrix of black-white values'''
x = T(image)
x = TO(x)
Cimage = T.inv(x)
# print error
rel_error = la.norm(Cimage-image,'fro')/la.norm(image,'fro')
print("Relative compression error: {}".format( rel_error ))
return Cimage
def getRandMask(N,m):
'''Random sample of m indices in range(N)'''
return np.random.choice(N, m, replace=False)
def update(T, thOp, mask, Xsub, X, mu):
'''IHT-type update, returns updated matrix Xnew and T-support of Xnew
T: transform
TO: thresholding operator
mask: indices with unknown pixels
Xsub: image matrix with Xsub[mask] arbitrary
mu: step size'''
Xm = np.zeros(T.shape)
Xm.flat[mask] = X.flat[mask]
#calc gradient of squared L2-norm
grad = 2*(Xm-Xsub)
norm_grad = la.norm(grad.flat)
#gradient step, transform
TXnew = T( X-mu*grad )
#threshold
TXnew = thOp(TXnew)
#calculate support
support = TXnew==0
return ( T.inv(TXnew), norm_grad, support )
def IHT(T, thOp, mask, Xsub, stepsize = 1, n_steps = 100, X0=None, Xorig = None):
'''IHT-type estimate
:param T: transfrom on pictures, e.g., DCT
:param s: expected sparsity
:param mask: np.array of indices of Xsub.flat, i.e., Xsub[mask]==0
:param X0: original picture to output the relative error'''
#learning rate
mu = stepsize #/np.sqrt(np.sum(mask))
if X0 is None:
X = Xsub
else:
X = X0
last_support = T(X)==0
# for checking divergence later
norm0 = la.norm(Xsub,'fro')
if isinstance(Xorig,np.ndarray):
print("Relative error (support change): {:3.3f}".format( la.norm(X-Xorig,'fro')/la.norm(Xorig,'fro') ), end = ', ')
else:
print("Support change: ")
for j in range(n_steps):
#update
X, norm_grad, support = update(T, thOp, mask, Xsub, X, mu)
#set negative values to zero
#X = pywt.threshold(X, 0, mode='greater', substitute = 0)
X = proj2range(X)
#print output
if j % 10 == 0:
#output support diff size
support_diff = np.sum( support == last_support )
print(' ({})'.format(len( support)-support_diff ),end ='')
last_support = support
# print error if original picture is provided
if isinstance(Xorig,np.ndarray):
rel_error = la.norm(X-Xorig,'fro')/la.norm(Xorig,'fro')
if rel_error>10: break
print(", {:3.3f}".format( rel_error ), end = '')
#interrupt if diverging
elif la.norm(X,'fro')> 10*norm0*np.sqrt( np.prod(T.shape)/len(mask) ):
break
print(' ')
return X
def proj(T, thOp, mask, Xsub, X):
'''IHT-type update, returns updated matrix Xnew and T-support of Xnew
T: transform
TO: thresholding operator
mask: indices with unknown pixels
Xsub: image matrix with Xsub[mask] arbitrary
mu: step size'''
Xm = np.zeros(T.shape)
Xm.flat[mask] = X.flat[mask]
#calc gradient of squared L2-norm
grad = 2*(Xm-Xsub)
norm_grad = la.norm(grad.flat)
#gradient step, transform
TXnew = T( X-grad )
#threshold
TXnew = thOp(TXnew)
#calculate support
support = TXnew==0
return ( T.inv(TXnew), norm_grad, support )
def FISTA(T, thOp, mask, Xsub, stepsize = .8, n_steps = 100, X0=None, Xorig = None):
'''FISTA-type estimate
:param T: transfrom on pictures, e.g., DCT
:param s: expected sparsity
:param mask: np.array of indices of Xsub.flat, i.e., Xsub[mask]==0
:param X0: original picture to output the relative error'''
if X0 is None:
X = Xsub
else:
X = X0
last_support = T(X)==0
# for checking divergence later
norm0 = la.norm(Xsub,'fro')
if isinstance(Xorig,np.ndarray):
print("Relative error (support change): {:3.3f}".format( la.norm(X-Xorig,'fro')/la.norm(Xorig,'fro') ), end = ', ')
else:
print("Support change: ")
#initialize
t0 = stepsize/2 #/np.sqrt(np.sum(mask))
Y = X0
for j in range(1,n_steps):
#calck projection
X1, norm_grad, support = proj(T, thOp, mask, Xsub, Y)
#set negative values to zero
X1 = proj2range(X1)
t1 = (1+np.sqrt( 1+4*t0**2 ))/2
Y = X1 + ((t0-1)/t1)*(X1-X0)
#save previous steps for next iteration
t0=t1
X0=X1
#print output
if j % 5 == 0:
#output support diff size
support_diff = np.sum( support == last_support )
print(' ({})'.format(len( support)-support_diff ),end ='')
last_support = support
# print error if original picture is provided
if isinstance(Xorig,np.ndarray):
rel_error = la.norm(X1-Xorig,'fro')/la.norm(Xorig,'fro')
if rel_error>10: break
print(", {:3.3f}".format( rel_error ), end = '')
#interrupt if diverging
elif la.norm(X,'fro')> 10*norm0*np.sqrt( np.prod(T.shape)/len(mask) ):
break
print(' ')
return X1
def proj2range(X):
'''Projects array elements to interval [0,255]'''
X = pywt.threshold(X, 255, mode='less', substitute = 255)
X = pywt.threshold(X, 0, mode='greater', substitute = 0)
return X
def rand_ux(N,s):
ux = np.random.uniform(0,255,N)
mask = np.random.choice(N, N-s, replace=False) # random sample of indices
ux[mask] = 0
return ux
def randomPic(T,s):
'''generates a random picture, s-sparse in T-space'''
shape = T.shape
rX = np.random.random(shape)
TX = T(rX)
TX = pywt.threshold(TX, TX[TX.argsort()[-s]], mode='hard')
return T.inv( TX ) | MartKl/CS_image_recovery_demo | pit.py | pit.py | py | 12,178 | python | en | code | 28 | github-code | 90 |
18086279303 | # -*- coding: utf-8 -*-
import os
import numpy as np
import pandas as pd
import xgboost as xgb
import warnings
warnings.filterwarnings('ignore') # 不显示警告
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '3'
def prepare(dataset):
# 复制
data = dataset.copy()
# 折扣处理
data['is_manjian'] = data['Discount_rate'].map(lambda x: 1 if ':' in str(x) else 0) # Discount_rate是否为满减
data['discount_rate'] = data['Discount_rate'].map(lambda x: float(x) if ':' not in str(x) else
(float(str(x).split(':')[0]) - float(str(x).split(':')[1])) / float(str(x).split(':')[0])) # 满减转换为折扣率
data['min_cost_of_manjian'] = data['Discount_rate'].map(lambda x: -1 if ':' not in str(x)
else int(str(x).split(':')[0])) # 满减最低消费
# 距离处理
data['Distance'].fillna(-1, inplace=True) # 空距离填充为-1
data['null_distance'] = data['Distance'].map(lambda x: 1 if x == -1 else 0)
# 时间处理
data['date_received'] = pd.to_datetime(data['Date_received'], format='%Y%m%d')
if 'Date' in data.columns.tolist():
data['date'] = pd.to_datetime(data['Date'], format='%Y%m%d')
data['Weekday_received'] = data['date_received'].apply(lambda x: x.isoweekday())
return data
# 打标
def get_label(dataset):
# 复制
data = dataset.copy()
# 领券后15天内消费为1,否则为0
data['label'] = list(map(lambda x, y: 1 if (x - y).total_seconds() / (60 * 60 * 24) <= 15 else 0, data['date'],
data['date_received']))
return data
def get_label_feature(label_field):
data = label_field.copy()
data['Date_received'] = data['Date_received'].map(int)
data['Coupon_id'] = data['Coupon_id'].map(int)
data['cnt'] = 1 # 方便特征提取
l_feat = data.copy()
# 用户特征
keys = ['User_id']
prefixs = 'label_field_' + '_'.join(keys) + '_'
# 用户领券数
pivot = pd.pivot_table(data, index=keys, values='cnt', aggfunc=len)
pivot = pd.DataFrame(pivot).rename(columns={'cnt': prefixs + 'received_cnt'}).reset_index()
l_feat = pd.merge(l_feat, pivot, on=keys, how='left')
l_feat.fillna(0, downcast='infer', inplace=True)
# 用户领取的优惠券不同折扣率种数
pivot = pd.pivot_table(data, index=keys, values='Discount_rate', aggfunc=lambda x: len(set(x)))
pivot = pd.DataFrame(pivot).rename(columns={'Discount_rate': prefixs + 'received_discount_rate_cnt'}).reset_index()
l_feat = pd.merge(l_feat, pivot, on=keys, how='left')
l_feat.fillna(0, downcast='infer', inplace=True)
# 用户领券距离的平均数
pivot = pd.pivot_table(data, index=keys, values='Distance',
aggfunc=lambda x: np.mean([np.nan if i == -1 else i for i in x]))
pivot = pd.DataFrame(pivot).rename(columns={'Distance': prefixs + 'received_mean_distance'}).reset_index()
l_feat = pd.merge(l_feat, pivot, on=keys, how='left')
l_feat.fillna(-1, downcast='infer', inplace=True)
# 用户领券距离的最大值
pivot = pd.pivot_table(data, index=keys, values='Distance',
aggfunc=lambda x: np.max([np.nan if i == -1 else i for i in x]))
pivot = pd.DataFrame(pivot).rename(columns={'Distance': prefixs + 'received_max_distance'}).reset_index()
l_feat = pd.merge(l_feat, pivot, on=keys, how='left')
l_feat.fillna(-1, downcast='infer', inplace=True)
# 用户领券距离的最小值
pivot = pd.pivot_table(data, index=keys, values='Distance',
aggfunc=lambda x: np.min([np.nan if i == -1 else i for i in x]))
pivot = pd.DataFrame(pivot).rename(columns={'Distance': prefixs + 'received_min_distance'}).reset_index()
l_feat = pd.merge(l_feat, pivot, on=keys, how='left')
l_feat.fillna(-1, downcast='infer', inplace=True)
# 用户领券距离的方差
pivot = pd.pivot_table(data, index=keys, values='Distance',
aggfunc=lambda x: np.var([np.nan if i == -1 else i for i in x]))
pivot = pd.DataFrame(pivot).rename(columns={'Distance': prefixs + 'received_var_distance'}).reset_index()
l_feat = pd.merge(l_feat, pivot, on=keys, how='left')
l_feat.fillna(-1, downcast='infer', inplace=True)
# 商家特征
keys = ['Merchant_id']
prefixs = 'label_field_' + '_'.join(keys) + '_'
# 领取商家优惠券的不同用户数
pivot = pd.pivot_table(data, index=keys, values='User_id', aggfunc=lambda x: len(set(x)))
pivot = pd.DataFrame(pivot).rename(columns={'User_id': prefixs + 'received_User_cnt'}).reset_index()
l_feat = pd.merge(l_feat, pivot, on=keys, how='left')
l_feat.fillna(0, downcast='infer', inplace=True)
# 商家优惠券被领取距离的平均数
pivot = pd.pivot_table(data, index=keys, values='Distance',
aggfunc=lambda x: np.mean([np.nan if i == -1 else i for i in x]))
pivot = pd.DataFrame(pivot).rename(columns={'Distance': prefixs + 'received_mean_distance'}).reset_index()
l_feat = pd.merge(l_feat, pivot, on=keys, how='left')
l_feat.fillna(-1, downcast='infer', inplace=True)
# 优惠券特征
keys = ['Coupon_id']
prefixs = 'label_field_' + '_'.join(keys) + '_'
# 领取该优惠券的用户数
pivot = pd.pivot_table(data, index=keys, values='User_id', aggfunc=lambda x: len(set(x)))
pivot = pd.DataFrame(pivot).rename(columns={'User_id': prefixs + 'received_user_cnt'}).reset_index()
l_feat = pd.merge(l_feat, pivot, on=keys, how='left')
l_feat.fillna(0, downcast='infer', inplace=True)
# 领券距离的平均数
pivot = pd.pivot_table(data, index=keys, values='Distance',
aggfunc=lambda x: np.mean([np.nan if i == -1 else i for i in x]))
pivot = pd.DataFrame(pivot).rename(columns={'Distance': prefixs + 'received_mean_distance'}).reset_index()
l_feat = pd.merge(l_feat, pivot, on=keys, how='left')
l_feat.fillna(-1, downcast='infer', inplace=True)
# 领券距离的方差
pivot = pd.pivot_table(data, index=keys, values='Distance',
aggfunc=lambda x: np.var([np.nan if i == -1 else i for i in x]))
pivot = pd.DataFrame(pivot).rename(columns={'Distance': prefixs + 'received_var_distance'}).reset_index()
l_feat = pd.merge(l_feat, pivot, on=keys, how='left')
l_feat.fillna(-1, downcast='infer', inplace=True)
# 用户-商家特征
keys = ['User_id', 'Merchant_id']
prefixs = 'label_field_' + '_'.join(keys) + '_'
# 该用户在该商家领券数
pivot = pd.pivot_table(data, index=keys, values='cnt', aggfunc=len)
pivot = pd.DataFrame(pivot).rename(columns={'cnt': prefixs + 'received_cnt'}).reset_index()
l_feat = pd.merge(l_feat, pivot, on=keys, how='left')
l_feat.fillna(0, downcast='infer', inplace=True)
# 用户是否第一次在该商家领取优惠券
tmp = data[keys + ['Date_received']].sort_values(['Date_received'], ascending=True)
first = tmp.drop_duplicates(keys, keep='first')
first[prefixs + 'is_first_receive'] = 1
l_feat = pd.merge(l_feat, first, on=keys + ['Date_received'], how='left')
l_feat.fillna(0, downcast='infer', inplace=True)
# 该用户在该商家领取的优惠券种数
pivot = pd.pivot_table(data, index=keys, values='Coupon_id', aggfunc=lambda x: len(set(x)))
pivot = pd.DataFrame(pivot).rename(columns={'Coupon_id': prefixs + 'received_coupon_cnt'}).reset_index()
l_feat = pd.merge(l_feat, pivot, on=keys, how='left')
l_feat.fillna(0, downcast='infer', inplace=True)
# 用户-优惠券特征
keys = ['User_id', 'Coupon_id']
prefixs = 'label_field_' + '_'.join(keys) + '_'
# 用户领取特定优惠券数
pivot = pd.pivot_table(data, index=keys, values='cnt', aggfunc=len)
pivot = pd.DataFrame(pivot).rename(columns={'cnt': prefixs + 'received_cnt'}).reset_index()
l_feat = pd.merge(l_feat, pivot, on=keys, how='left')
l_feat.fillna(0, downcast='infer', inplace=True)
# 用户-领取日期特征
keys = ['User_id', 'Date_received']
prefixs = 'label_field_' + '_'.join(keys) + '_'
# 用户当天领券数
pivot = pd.pivot_table(data, index=keys, values='cnt', aggfunc=len)
pivot = pd.DataFrame(pivot).rename(columns={'cnt': prefixs + 'received_cnt'}).reset_index()
l_feat = pd.merge(l_feat, pivot, on=keys, how='left')
l_feat.fillna(0, downcast='infer', inplace=True)
# 商家-领取日期特征
keys = ['Merchant_id', 'Date_received']
prefixs = 'label_field_' + '_'.join(keys) + '_'
# 商家当天被领券数
pivot = pd.pivot_table(data, index=keys, values='cnt', aggfunc=len)
pivot = pd.DataFrame(pivot).rename(columns={'cnt': prefixs + 'recieved_cnt'}).reset_index()
l_feat = pd.merge(l_feat, pivot, on=keys, how='left')
l_feat.fillna(0, downcast='infer', inplace=True)
# 用户
keys = ['User_id']
prefixs = 'label_field_' + '_'.join(keys) + '_'
# 用户-距离正反排序
l_feat[prefixs + 'distance_true_rank'] = l_feat.groupby(keys)['Distance'].rank(ascending=True)
l_feat[prefixs + 'distance_false_rank'] = l_feat.groupby(keys)['Distance'].rank(ascending=False)
# 用户-领券日期正反排序
l_feat[prefixs + 'date_received_true_rank'] = l_feat.groupby(keys)['Date_received'].rank(ascending=True)
l_feat[prefixs + 'date_received_false_rank'] = l_feat.groupby(keys)['Date_received'].rank(ascending=False)
# 用户-折扣率正反排序
l_feat[prefixs + 'discount_rate_true_rank'] = l_feat.groupby(keys)['discount_rate'].rank(ascending=True)
l_feat[prefixs + 'discount_rate_false_rank'] = l_feat.groupby(keys)['discount_rate'].rank(ascending=False)
# 用户-满减最低消费正反排序
l_feat[prefixs + 'min_cost_of_manjian_true_rank'] = l_feat.groupby(keys)['min_cost_of_manjian'].rank(ascending=True)
l_feat[prefixs + 'min_cost_of_manjian_false_rank'] = l_feat.groupby(keys)['min_cost_of_manjian'].rank(
ascending=False)
# 商家
keys = ['Merchant_id']
prefixs = 'label_field_' + '_'.join(keys) + '_'
# 商家-距离正反排序
l_feat[prefixs + 'distance_true_rank'] = l_feat.groupby(keys)['Distance'].rank(ascending=True)
l_feat[prefixs + 'distance_false_rank'] = l_feat.groupby(keys)['Distance'].rank(ascending=False)
# 商家-领券日期正反排序
l_feat[prefixs + 'date_received_true_rank'] = l_feat.groupby(keys)['Date_received'].rank(ascending=True)
l_feat[prefixs + 'date_received_false_rank'] = l_feat.groupby(keys)['Date_received'].rank(ascending=False)
# 商家-折扣率正反排序
l_feat[prefixs + 'discount_rate_true_rank'] = l_feat.groupby(keys)['discount_rate'].rank(ascending=True)
l_feat[prefixs + 'discount_rate_false_rank'] = l_feat.groupby(keys)['discount_rate'].rank(ascending=False)
# 商家-满减最低消费正反排序
l_feat[prefixs + 'min_cost_of_manjian_true_rank'] = l_feat.groupby(keys)['min_cost_of_manjian'].rank(ascending=True)
l_feat[prefixs + 'min_cost_of_manjian_false_rank'] = l_feat.groupby(keys)['min_cost_of_manjian'].rank(
ascending=False)
# 优惠券
keys = ['Coupon_id']
prefixs + 'label_field_rank_' + '_'.join(keys) + '_'
# 优惠券-距离正反排序
l_feat[prefixs + 'distance_true_rank'] = l_feat.groupby(keys)['Distance'].rank(ascending=True)
l_feat[prefixs + 'distance_false_rank'] = l_feat.groupby(keys)['Distance'].rank(ascending=False)
# 优惠券-领券日期正反排序
l_feat[prefixs + 'date_received_true_rank'] = l_feat.groupby(keys)['Date_received'].rank(ascending=True)
l_feat[prefixs + 'date_received_false_rank'] = l_feat.groupby(keys)['Date_received'].rank(ascending=False)
# 填充空值
l_feat.fillna(0, downcast='infer', inplace=True)
# 删去'cnt'列
l_feat.drop(['cnt'], axis=1, inplace=True)
return l_feat
def get_week_feature(label_field):
"""根据Date_received得到的一些日期特征
根据date_received列得到领券日是周几,新增一列week存储,并将其one-hot离散为week_0,week_1,week_2,week_3,week_4,week_5,week_6;
根据week列得到领券日是否为休息日,新增一列is_weekend存储;
"""
# 源数据
data = label_field.copy()
data['Coupon_id'] = data['Coupon_id'].map(int)
data['Date_received'] = data['Date_received'].map(int)
# 返回的特征数据集
w_feat = data.copy()
w_feat['week'] = w_feat['date_received'].map(lambda x: x.weekday()) # 星期几
w_feat['is_weekend'] = w_feat['week'].map(lambda x: 1 if x == 5 or x == 6 else 0) # 判断领券日是否为休息日
w_feat = pd.concat([w_feat, pd.get_dummies(w_feat['week'], prefix='week')], axis=1) # one-hot离散星期几
w_feat.index = range(len(w_feat)) # 重置index
# 返回
return w_feat
def get_dataset(history_field, middle_field, label_field):
# 特征工程
label_feat = get_label_feature(label_field)
week_feat = get_week_feature(label_field)
# 构造数据集
share_characters = list(set(label_feat.columns.tolist()) & set(
week_feat.columns.tolist())) # 共有属性,包括id和一些基础特征,为每个特征块的交集
dataset = pd.concat([week_feat, label_feat.drop(share_characters, axis=1)], axis=1) # 将两个特征结合起来,删除共同特征
# 删除无用属性并将label置于最后一列
if 'Date' in dataset.columns.tolist(): # 表示训练集和验证集
dataset.drop(['Merchant_id', 'Discount_rate', 'Date', 'date_received', 'date'], axis=1, inplace=True)
label = dataset['label'].tolist()
dataset.drop(['label'], axis=1, inplace=True)
dataset['label'] = label
else: # 表示测试集
dataset.drop(['Merchant_id', 'Discount_rate', 'date_received'], axis=1, inplace=True)
# 修正数据类型
dataset['User_id'] = dataset['User_id'].map(int)
dataset['Coupon_id'] = dataset['Coupon_id'].map(int)
dataset['Date_received'] = dataset['Date_received'].map(int)
dataset['Distance'] = dataset['Distance'].map(int)
if 'label' in dataset.columns.tolist():
dataset['label'] = dataset['label'].map(int)
# 去重
dataset.drop_duplicates(keep='first', inplace=True)
dataset.index = range(len(dataset))
# 返回
return dataset
def model_xgb(train, test):
params = {'booster': 'gbtree',
'objective': 'binary:logistic',
'eval_metric': 'auc',
'silent': 1,
'eta': 0.01,
'max_depth': 8, # 原5
'min_child_weight': 1,
'gamma': 0,
'lambda': 1,
'colsample_bylevel': 0.7,
'colsample_bytree': 0.7, # 原0.7,用来控制每棵树的随机采样的 列数的占比
'subsample': 0.9, # 原0.9,用来控制对于每棵树随机采样比例
'scale_pos_weight': 1}
# 数据集
dtrain = xgb.DMatrix(train.drop(['User_id', 'Coupon_id', 'Date_received', 'label'], axis=1), label=train['label'])
dtest = xgb.DMatrix(test.drop(['User_id', 'Coupon_id', 'Date_received'], axis=1))
# 训练
watchlist = [(dtrain, 'train')]
model = xgb.train(params, dtrain, num_boost_round=2000, evals=watchlist)
# 预测
_predict = model.predict(dtest)
# 处理结果
_predict = pd.DataFrame(_predict, columns=['prob'])
_result = pd.concat([test[['User_id', 'Coupon_id', 'Date_received']], _predict], axis=1)
return _result
def rebuild_feature():
# 源数据
off_train = pd.read_csv('ccf_offline_stage1_train.csv')
off_test = pd.read_csv('ccf_offline_stage1_test_revised.csv')
# 预处理
off_train = prepare(off_train)
off_test = prepare(off_test)
# 打标
off_train = get_label(off_train)
# 离散特征
pd.get_dummies(off_train['Distance'])
pd.pivot_table(off_train, index='User_id', columns='Discount_rate', values='Distance', aggfunc='count')
# 划分区间
# 训练集历史区间、中间区间、标签区间
train_history_field = off_train[
off_train['date_received'].isin(pd.date_range('2016/3/2', periods=60))] # [20160302,20160501)
train_middle_field = off_train[off_train['date'].isin(pd.date_range('2016/5/1', periods=15))] # [20160501,20160516)
train_label_field = off_train[
off_train['date_received'].isin(pd.date_range('2016/5/16', periods=31))] # [20160516,20160616)
# 验证集历史区间、中间区间、标签区间
validate_history_field = off_train[
off_train['date_received'].isin(pd.date_range('2016/1/16', periods=60))] # [20160116,20160316)
validate_middle_field = off_train[
off_train['date'].isin(pd.date_range('2016/3/16', periods=15))] # [20160316,20160331)
validate_label_field = off_train[
off_train['date_received'].isin(pd.date_range('2016/3/31', periods=31))] # [20160331,20160501)
# 测试集历史区间、中间区间、标签区间
test_history_field = off_train[
off_train['date_received'].isin(pd.date_range('2016/4/17', periods=60))] # [20160417,20160616)
test_middle_field = off_train[off_train['date'].isin(pd.date_range('2016/6/16', periods=15))] # [20160616,20160701)
test_label_field = off_test.copy() # [20160701,20160801)
# 构造训练集、验证集、测试集
print('构造训练集')
train = get_dataset(train_history_field, train_middle_field, train_label_field)
print('构造验证集')
validate = get_dataset(validate_history_field, validate_middle_field, validate_label_field)
print('构造测试集')
test = get_dataset(test_history_field, test_middle_field, test_label_field)
# 保存训练集、验证集、测试集
train.to_csv('train.csv', index=False)
validate.to_csv('validate.csv', index=False)
test.to_csv('test.csv', index=False)
if __name__ == '__main__':
# rebuild_feature()
train = pd.read_csv('train.csv')
validate = pd.read_csv('validate.csv')
test = pd.read_csv('test.csv')
# 线上训练
big_train = pd.concat([train, validate], axis=0)
result = model_xgb(big_train, test)
# 保存
result.to_csv('submission.csv', index=False)
| sarailQAQ/ml-prac | main.py | main.py | py | 18,279 | python | en | code | 0 | github-code | 90 |
32923533752 | Name = []
Set = []
def read_data(inF,name):
Name.append(name)
L = []
inFile = open(inF)
for line in inFile:
line = line.strip()
fields = line.split('\t')
L.append(fields[0])
inFile.close()
Set.append(set(L))
read_data('split-mapped-deletion.normal.seq.filtered.num.gene.more_than_one.gene','Deletion')
read_data('split-mapped-duplication.normal.seq.filtered.num.gene.more_than_one.gene','Duplication')
read_data('split-mapped-inversion.normal.seq.filtered.num.gene.more_than_one.gene','Inversion')
read_data('split-mapped-translocation.normal.seq.filtered.num.gene.more_than_one.gene','Translocation')
S = Set[0]& Set[1] & Set[2] & Set[3]
for x in S:
print(x)
| wanghuanwei-gd/SIBS | RNAseqMSMS/21-rna-seq-stats/13-set.py | 13-set.py | py | 715 | python | en | code | 0 | github-code | 90 |
21317189815 | import pickle
import torch
import torch.nn as nn
with open('train.feature.pickle', 'rb') as f:
train_vectors = pickle.load(f)
class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(300, 4)
nn.init.xavier_normal_(self.fc.weight)
def forward(self, x):
x = self.fc(x)
return x
model = Net()
torch.save(model, 'model.pth')
x = model(train_vectors[0])
x = torch.softmax(x, dim=-1)
print(x)
x = model(train_vectors[:4])
x = torch.softmax(x, dim=-1)
print(x)
"""
tensor([0.2376, 0.2169, 0.2739, 0.2715], grad_fn=<SoftmaxBackward>)
tensor([[0.2376, 0.2169, 0.2739, 0.2715],
[0.2262, 0.2163, 0.2453, 0.3122],
[0.2137, 0.2471, 0.2716, 0.2676],
[0.2495, 0.2456, 0.2362, 0.2687]], grad_fn=<SoftmaxBackward>)
"""
| KazumaAkiyama/100knocks | 第8章/Net_8_71.py | Net_8_71.py | py | 817 | python | en | code | 0 | github-code | 90 |
18189626499 | import sys
read = sys.stdin.read
readline = sys.stdin.readline
readlines = sys.stdin.readlines
import numpy as np
def main():
n = int(input())
if n == 1:
print(1)
sys.exit()
divs = np.arange(1, n + 1)
divs2 = n // divs
divs3 = divs2 * (divs2 + 1) // 2
divs3 = divs3 * divs
r = divs3.sum()
print(r)
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p02624/s370921365.py | s370921365.py | py | 385 | python | en | code | 0 | github-code | 90 |
40065524104 | from typing import Dict, List
from einops import rearrange
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import defaultdict
from dynamic_stereo.models.core.update import (
BasicUpdateBlock,
SequenceUpdateBlock3D,
TimeAttnBlock,
)
from dynamic_stereo.models.core.extractor import BasicEncoder
from dynamic_stereo.models.core.corr import CorrBlock1D
from dynamic_stereo.models.core.attention import (
PositionEncodingSine,
LocalFeatureTransformer,
)
from dynamic_stereo.models.core.utils.utils import InputPadder, interp
autocast = torch.cuda.amp.autocast
class DynamicStereo(nn.Module):
def __init__(
self,
max_disp: int = 192,
mixed_precision: bool = False,
num_frames: int = 5,
attention_type: str = None,
use_3d_update_block: bool = False,
different_update_blocks: bool = False,
):
super(DynamicStereo, self).__init__()
self.max_flow = max_disp
self.mixed_precision = mixed_precision
self.hidden_dim = 128
self.context_dim = 128
dim = 256
self.dim = dim
self.dropout = 0
self.use_3d_update_block = use_3d_update_block
self.fnet = BasicEncoder(
output_dim=dim, norm_fn="instance", dropout=self.dropout
)
self.different_update_blocks = different_update_blocks
cor_planes = 4 * 9
self.depth = 4
self.attention_type = attention_type
# attention_type is a combination of the following attention types:
# self_stereo, temporal, update_time, update_space
# for example, self_stereo_temporal_update_time_update_space
if self.use_3d_update_block:
if self.different_update_blocks:
self.update_block08 = SequenceUpdateBlock3D(
hidden_dim=self.hidden_dim, cor_planes=cor_planes, mask_size=4
)
self.update_block16 = SequenceUpdateBlock3D(
hidden_dim=self.hidden_dim,
cor_planes=cor_planes,
mask_size=4,
attention_type=attention_type,
)
self.update_block04 = SequenceUpdateBlock3D(
hidden_dim=self.hidden_dim, cor_planes=cor_planes, mask_size=4
)
else:
self.update_block = SequenceUpdateBlock3D(
hidden_dim=self.hidden_dim, cor_planes=cor_planes, mask_size=4
)
else:
if self.different_update_blocks:
self.update_block08 = BasicUpdateBlock(
hidden_dim=self.hidden_dim, cor_planes=cor_planes, mask_size=4
)
self.update_block16 = BasicUpdateBlock(
hidden_dim=self.hidden_dim,
cor_planes=cor_planes,
mask_size=4,
attention_type=attention_type,
)
self.update_block04 = BasicUpdateBlock(
hidden_dim=self.hidden_dim, cor_planes=cor_planes, mask_size=4
)
else:
self.update_block = BasicUpdateBlock(
hidden_dim=self.hidden_dim, cor_planes=cor_planes, mask_size=4
)
if attention_type is not None:
if ("update_time" in attention_type) or ("temporal" in attention_type):
self.time_embed = nn.Parameter(torch.zeros(1, num_frames, dim))
if "temporal" in attention_type:
self.time_attn_blocks = nn.ModuleList(
[TimeAttnBlock(dim=dim, num_heads=8) for _ in range(self.depth)]
)
if "self_stereo" in attention_type:
self.self_attn_blocks = nn.ModuleList(
[
LocalFeatureTransformer(
d_model=dim,
nhead=8,
layer_names=["self"] * 1,
attention="linear",
)
for _ in range(self.depth)
]
)
self.cross_attn_blocks = nn.ModuleList(
[
LocalFeatureTransformer(
d_model=dim,
nhead=8,
layer_names=["cross"] * 1,
attention="linear",
)
for _ in range(self.depth)
]
)
self.num_frames = num_frames
@torch.jit.ignore
def no_weight_decay(self):
return {"time_embed"}
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def convex_upsample(self, flow: torch.Tensor, mask: torch.Tensor, rate: int = 4):
"""Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination"""
N, _, H, W = flow.shape
mask = mask.view(N, 1, 9, rate, rate, H, W)
mask = torch.softmax(mask, dim=2)
up_flow = F.unfold(rate * flow, [3, 3], padding=1)
up_flow = up_flow.view(N, 2, 9, 1, 1, H, W)
up_flow = torch.sum(mask * up_flow, dim=2)
up_flow = up_flow.permute(0, 1, 4, 2, 5, 3)
return up_flow.reshape(N, 2, rate * H, rate * W)
def zero_init(self, fmap: torch.Tensor):
N, _, H, W = fmap.shape
_x = torch.zeros([N, 1, H, W], dtype=torch.float32)
_y = torch.zeros([N, 1, H, W], dtype=torch.float32)
zero_flow = torch.cat((_x, _y), dim=1).to(fmap.device)
return zero_flow
def forward_batch_test(
self, batch_dict: Dict, kernel_size: int = 14, iters: int = 20
):
stride = kernel_size // 2
predictions = defaultdict(list)
disp_preds = []
video = batch_dict["stereo_video"]
num_ims = len(video)
print("video", video.shape)
for i in range(0, num_ims, stride):
left_ims = video[i : min(i + kernel_size, num_ims), 0]
padder = InputPadder(left_ims.shape, divis_by=32)
right_ims = video[i : min(i + kernel_size, num_ims), 1]
left_ims, right_ims = padder.pad(left_ims, right_ims)
with autocast(enabled=self.mixed_precision):
disparities_forw = self.forward(
left_ims[None].cuda(),
right_ims[None].cuda(),
iters=iters,
test_mode=True,
)
disparities_forw = padder.unpad(disparities_forw[:, 0])[:, None].cpu()
if len(disp_preds) > 0 and len(disparities_forw) >= stride:
if len(disparities_forw) < kernel_size:
disp_preds.append(disparities_forw[stride // 2 :])
else:
disp_preds.append(disparities_forw[stride // 2 : -stride // 2])
elif len(disp_preds) == 0:
disp_preds.append(disparities_forw[: -stride // 2])
predictions["disparity"] = (torch.cat(disp_preds).squeeze(1).abs())[:, :1]
print(predictions["disparity"].shape)
return predictions
def forward_sst_block(
self, fmap1_dw16: torch.Tensor, fmap2_dw16: torch.Tensor, T: int
):
*_, h, w = fmap1_dw16.shape
# positional encoding and self-attention
pos_encoding_fn_small = PositionEncodingSine(d_model=self.dim, max_shape=(h, w))
# 'n c h w -> n (h w) c'
fmap1_dw16 = pos_encoding_fn_small(fmap1_dw16)
# 'n c h w -> n (h w) c'
fmap2_dw16 = pos_encoding_fn_small(fmap2_dw16)
if self.attention_type is not None:
# add time embeddings
if (
"temporal" in self.attention_type
or "update_time" in self.attention_type
):
fmap1_dw16 = rearrange(
fmap1_dw16, "(b t) m h w -> (b h w) t m", t=T, h=h, w=w
)
fmap2_dw16 = rearrange(
fmap2_dw16, "(b t) m h w -> (b h w) t m", t=T, h=h, w=w
)
# interpolate if video length doesn't match
if T != self.num_frames:
time_embed = self.time_embed.transpose(1, 2)
new_time_embed = F.interpolate(time_embed, size=(T), mode="nearest")
new_time_embed = new_time_embed.transpose(1, 2).contiguous()
else:
new_time_embed = self.time_embed
fmap1_dw16 = fmap1_dw16 + new_time_embed
fmap2_dw16 = fmap2_dw16 + new_time_embed
fmap1_dw16 = rearrange(
fmap1_dw16, "(b h w) t m -> (b t) m h w", t=T, h=h, w=w
)
fmap2_dw16 = rearrange(
fmap2_dw16, "(b h w) t m -> (b t) m h w", t=T, h=h, w=w
)
if ("self_stereo" in self.attention_type) or (
"temporal" in self.attention_type
):
for att_ind in range(self.depth):
if "self_stereo" in self.attention_type:
fmap1_dw16 = rearrange(
fmap1_dw16, "(b t) m h w -> (b t) (h w) m", t=T, h=h, w=w
)
fmap2_dw16 = rearrange(
fmap2_dw16, "(b t) m h w -> (b t) (h w) m", t=T, h=h, w=w
)
fmap1_dw16, fmap2_dw16 = self.self_attn_blocks[att_ind](
fmap1_dw16, fmap2_dw16
)
fmap1_dw16, fmap2_dw16 = self.cross_attn_blocks[att_ind](
fmap1_dw16, fmap2_dw16
)
fmap1_dw16 = rearrange(
fmap1_dw16, "(b t) (h w) m -> (b t) m h w ", t=T, h=h, w=w
)
fmap2_dw16 = rearrange(
fmap2_dw16, "(b t) (h w) m -> (b t) m h w ", t=T, h=h, w=w
)
if "temporal" in self.attention_type:
fmap1_dw16 = self.time_attn_blocks[att_ind](fmap1_dw16, T=T)
fmap2_dw16 = self.time_attn_blocks[att_ind](fmap2_dw16, T=T)
return fmap1_dw16, fmap2_dw16
def forward_update_block(
self,
update_block: nn.Module,
corr_fn: CorrBlock1D,
flow: torch.Tensor,
net: torch.Tensor,
inp: torch.Tensor,
predictions: List,
iters: int,
interp_scale: float,
t: int,
):
for _ in range(iters):
flow = flow.detach()
out_corrs = corr_fn(flow)
with autocast(enabled=self.mixed_precision):
net, up_mask, delta_flow = update_block(net, inp, out_corrs, flow, t=t)
flow = flow + delta_flow
flow_up = flow_out = self.convex_upsample(flow, up_mask, rate=4)
if interp_scale > 1:
flow_up = interp_scale * interp(
flow_out,
(
interp_scale * flow_out.shape[2],
interp_scale * flow_out.shape[3],
),
)
flow_up = flow_up[:, :1]
predictions.append(flow_up)
return flow_out, net
def forward(self, image1, image2, flow_init=None, iters=10, test_mode=False):
"""Estimate optical flow between pair of frames"""
# if input is list,
image1 = 2 * (image1 / 255.0) - 1.0
image2 = 2 * (image2 / 255.0) - 1.0
b, T, *_ = image1.shape
image1 = image1.contiguous()
image2 = image2.contiguous()
hdim = self.hidden_dim
image1 = rearrange(image1, "b t c h w -> (b t) c h w")
image2 = rearrange(image2, "b t c h w -> (b t) c h w")
with autocast(enabled=self.mixed_precision):
fmap1, fmap2 = self.fnet([image1, image2])
net, inp = torch.split(fmap1, [hdim, hdim], dim=1)
net = torch.tanh(net)
inp = F.relu(inp)
*_, h, w = fmap1.shape
# 1/4 -> 1/16
# feature
fmap1_dw16 = F.avg_pool2d(fmap1, 4, stride=4)
fmap2_dw16 = F.avg_pool2d(fmap2, 4, stride=4)
fmap1_dw16, fmap2_dw16 = self.forward_sst_block(fmap1_dw16, fmap2_dw16, T=T)
net_dw16, inp_dw16 = torch.split(fmap1_dw16, [hdim, hdim], dim=1)
net_dw16 = torch.tanh(net_dw16)
inp_dw16 = F.relu(inp_dw16)
fmap1_dw8 = (
F.avg_pool2d(fmap1, 2, stride=2) + interp(fmap1_dw16, (h // 2, w // 2))
) / 2.0
fmap2_dw8 = (
F.avg_pool2d(fmap2, 2, stride=2) + interp(fmap2_dw16, (h // 2, w // 2))
) / 2.0
net_dw8, inp_dw8 = torch.split(fmap1_dw8, [hdim, hdim], dim=1)
net_dw8 = torch.tanh(net_dw8)
inp_dw8 = F.relu(inp_dw8)
# Cascaded refinement (1/16 + 1/8 + 1/4)
predictions = []
flow = None
flow_up = None
if flow_init is not None:
scale = h / flow_init.shape[2]
flow = -scale * interp(flow_init, (h, w))
else:
# zero initialization
flow_dw16 = self.zero_init(fmap1_dw16)
# Recurrent Update Module
# Update 1/16
update_block = (
self.update_block16
if self.different_update_blocks
else self.update_block
)
corr_fn_att_dw16 = CorrBlock1D(fmap1_dw16, fmap2_dw16)
flow, net_dw16 = self.forward_update_block(
update_block=update_block,
corr_fn=corr_fn_att_dw16,
flow=flow_dw16,
net=net_dw16,
inp=inp_dw16,
predictions=predictions,
iters=iters // 2,
interp_scale=4,
t=T,
)
scale = fmap1_dw8.shape[2] / flow.shape[2]
flow_dw8 = -scale * interp(flow, (fmap1_dw8.shape[2], fmap1_dw8.shape[3]))
net_dw8 = (
net_dw8
+ interp(net_dw16, (2 * net_dw16.shape[2], 2 * net_dw16.shape[3]))
) / 2.0
# Update 1/8
update_block = (
self.update_block08
if self.different_update_blocks
else self.update_block
)
corr_fn_dw8 = CorrBlock1D(fmap1_dw8, fmap2_dw8)
flow, net_dw8 = self.forward_update_block(
update_block=update_block,
corr_fn=corr_fn_dw8,
flow=flow_dw8,
net=net_dw8,
inp=inp_dw8,
predictions=predictions,
iters=iters // 2,
interp_scale=2,
t=T,
)
scale = h / flow.shape[2]
flow = -scale * interp(flow, (h, w))
net = (
net + interp(net_dw8, (2 * net_dw8.shape[2], 2 * net_dw8.shape[3]))
) / 2.0
# Update 1/4
update_block = (
self.update_block04 if self.different_update_blocks else self.update_block
)
corr_fn = CorrBlock1D(fmap1, fmap2)
flow, __ = self.forward_update_block(
update_block=update_block,
corr_fn=corr_fn,
flow=flow,
net=net,
inp=inp,
predictions=predictions,
iters=iters,
interp_scale=1,
t=T,
)
predictions = torch.stack(predictions)
predictions = rearrange(predictions, "d (b t) c h w -> d t b c h w", b=b, t=T)
flow_up = predictions[-1]
if test_mode:
return flow_up
return predictions
| facebookresearch/dynamic_stereo | models/core/dynamic_stereo.py | dynamic_stereo.py | py | 16,065 | python | en | code | 132 | github-code | 90 |
12551281505 | """
The HaxBall gym environment.
"""
from typing import Dict, List, Tuple, Union
import numpy as np
from gym import Env
from haxballgym.envs.match import Match
class Gym(Env):
def __init__(self, match: Match):
super().__init__()
self._match = match
self.observation_space = match.observation_space
self.action_space = match.action_space
self._prev_state = None
def reset(self, return_info=False, save_recording=False) -> Union[List, Tuple]:
"""
The environment reset function.
When called, this will reset the state of the environment.
This should be called once when the environment is initialized,
then every time the `done` flag from the `step()` function is `True`.
"""
self._match.get_reset_state(save_recording)
state = self._receive_state()
self._match.episode_reset(state)
self._prev_state = state
obs = self._match.build_observations(state)
if return_info:
info = {"state": state, "result": self._match.get_result(state)}
return obs, info
return obs
def step(self, actions: list[int] | np.ndarray) -> Tuple[List, List, bool, Dict]:
"""
The step function will send the list of provided actions to the game,
then advance the game forward by `tick_skip` physics ticks using that action.
We then get the `GameState` object, which gets passed to the configuration
objects to determine the rewards, next observation, and done signal.
:param actions: An object containing actions, in the correct format
:return: A tuple containing (obs, rewards, done, info)
"""
actions = self._match.parse_actions(actions, self._prev_state)
actions_all = self._get_all_actions(actions)
for _ in range(self._match._tick_skip + 1):
self._match._game.step(actions_all)
state = self._receive_state()
obs = self._match.build_observations(state)
done = self._match.is_done(state)
reward = self._match.get_rewards(state, done)
self._prev_state = state
info = {"state": state, "result": self._match.get_result(state)}
return obs, reward, done, info
def _receive_state(self):
self._match._game_state.update(self._match._game)
return self._match._game_state
def _get_all_actions(self, actions: list[int] | np.ndarray):
if self._match._bots is None:
return actions
actions_all = [p.step(self._match._game) for p in self._match._game.players]
i = 0
for j, act in enumerate(actions_all):
if act is None:
actions_all[j] = actions[i]
i += 1
return actions_all
| HaxballGym/HaxballGym | haxballgym/gym.py | gym.py | py | 2,812 | python | en | code | 8 | github-code | 90 |
30642300843 | def wordBreak(s, wordDict):
table = [False] * (len(s) + 1)
table[0] = True
for i in range(0, len(table)):
if (table[i] == True):
for j in range(i + 1, len(table)):
word = s[i:j]
if word in wordDict:
table[j] = True
return table[-1]
n = wordBreak("catdogcat", ["cat", "dog"])
if n:
print("\nhello world!\n")
| tombetthauser/aa_october_cohort_files_2 | classworks/test.py | test.py | py | 395 | python | en | code | 0 | github-code | 90 |
71719502698 | #!/usr/bin/env python3
import argparse
def do_argparse():
parser = argparse.ArgumentParser(
description="Prints the number of unique strings "
+ "separated by newlines in a file."
)
parser.add_argument("file", help="path to a valid file")
return parser.parse_args()
def main():
args = do_argparse()
working_set = set()
with open(args.file, "r") as in_file:
for line in in_file:
if line != "\n": # Exclude empty lines
working_set.add(line)
# Python accesses lengths for built-in types in constant time, bite me
print(len(working_set))
if __name__ == "__main__":
main()
| kenny-kelley/cli-utilities | get-set-size.py | get-set-size.py | py | 670 | python | en | code | 0 | github-code | 90 |
32613626998 | import xlrd
import dishsql
import re
import os
import datetime
import thedish
import jinja2
import codecs
def render(tpl_path, context):
path, filename = os.path.split(tpl_path)
return jinja2.Environment(
loader=jinja2.FileSystemLoader(path or './')
).get_template(filename).render(context)
def update_counts_manually(file_name):
"""Reads an excel file whose first column (A) is the page name and
second column (B) is the number of uncounted views to increment the sql
table's counters by."""
wb = xlrd.open_workbook(file_name)
sh = wb.sheet_by_index(0)
pages = sh.col(0)
counts = sh.col(1)
# for now, we only keep track of post page view counts, so we can ignore
# everything else
extract_post_name = re.compile('.*/posts/([-a-zA-Z0-9]*)/?')
to_update = dict()
for i, page in enumerate(pages):
match = extract_post_name.match(page.value)
if match:
to_update[match.groups()[0]] = counts[i].value
with dishsql.session_scope() as session:
for page, count in to_update.items():
post = session.query(dishsql.Post).\
filter_by(url_title=page).\
first()
if post:
post.view_count = post.view_count + count
default_preview_text = "Announcing cool new content from The Dish on Science!"
def create_announcement_email_given_posts_(new_posts, extra_article_pairs, preview_text=None, events=None, date=None):
if date is None:
date = datetime.datetime.now()
if preview_text is None:
preview_text = default_preview_text
if len(extra_article_pairs) % 2 == 1:
raise ValueError('Odd number of "extra" articles not allowed by email template.')
email_rel_url = '/emails/dish-article-alert-' + date.strftime('%Y-%m-%d') + '.html'
email_url = thedish.dish_info.url + email_rel_url
email_file = thedish.www_dir + email_rel_url
extra_article_pairs = [(extra_article_pairs[2*i], extra_article_pairs[2*i+1])
for i in range(int(len(extra_article_pairs)/2))]
context = {'preview_text': preview_text, 'new_posts': new_posts,
'article_pairs': extra_article_pairs, 'events': events,
'thedish': thedish.dish_info, 'archive_url': email_url,
'num_articles_plus_one': len(new_posts)+1}
email = render(os.path.join(thedish.www_dir, 'templates/newsletter.html'), context=context)
with codecs.open(email_file, 'w', encoding='utf=8') as f:
f.write(email)
def create_announcement_email(new_posts, extra_article_pairs,
preview_text=None, events=None, date=None):
with dishsql.session_scope() as session:
new_posts = [dishsql.get_post_by_name(post, session) for post in new_posts]
extra_article_pairs = [dishsql.get_post_by_name(post, session) for post in extra_article_pairs]
return create_announcement_email_given_posts_(new_posts,
extra_article_pairs, preview_text, events, date)
| brunobeltran/the-dish-on-science | cgi-bin/dishutil.py | dishutil.py | py | 3,047 | python | en | code | 0 | github-code | 90 |
15086601210 | from app.forms.login_form import LoginForm
from datetime import date, timedelta
from app import application, login_manager
from flask import session, redirect, render_template, flash, redirect, url_for
from flask_login import login_required, login_user, logout_user, current_user
import bcrypt
from app.models.produtor import Produtor
from app.models.compra import Compra
from app.models.venda import Venda
from app.models.propriedade import Propriedade
@application.before_request
def make_session_permanent():
session.permanent = True
application.permanent_session_lifetime = timedelta(minutes=60)
session.modified = True
@login_manager.unauthorized_handler
def not_allowed():
return redirect('/login')
@login_manager.user_loader
def get_user(produtor_id):
return Produtor.query.filter_by(id=produtor_id).first()
@application.route('/')
@login_required
def inicial():
propriedade = Propriedade.query.filter(
Propriedade.produtor_id == current_user.id,
Propriedade.ativa == True
).first()
if not propriedade:
return render_template('inicial.html', propriedade=False)
propriedade_id = propriedade.id
hoje = date.today()
diasDoMes = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lista_despesas = Compra.query.filter_by(propriedade_id = propriedade_id).order_by(Compra.data.desc()).limit(4).all()
lista_lucros = Venda.query.filter(
Venda.propriedade_id == propriedade_id,
Venda.data.between(str(hoje.year)+'-'+str(hoje.month)+'-01', str(hoje.year)+'-'+str(hoje.month)+'-'+str(diasDoMes[hoje.month - 1]))
).order_by(Venda.data.desc()).limit(4).all()
lista_despesas_mes = Compra.query.filter(
Compra.propriedade_id == propriedade_id,
Compra.data.between(str(hoje.year)+'-'+str(hoje.month)+'-01', str(hoje.year)+'-'+str(hoje.month)+'-'+str(diasDoMes[hoje.month - 1]))
).all()
lista_lucros_mes = Venda.query.filter(
Venda.propriedade_id == propriedade_id,
Venda.data.between(str(hoje.year)+'-'+str(hoje.month)+'-01', str(hoje.year)+'-'+str(hoje.month)+'-'+str(diasDoMes[hoje.month - 1]))
).all()
lista_despesas_mes_anterior = Compra.query.filter(
Compra.propriedade_id == propriedade_id,
Compra.data.between(str(hoje.year)+'-'+str(hoje.month - 1)+'-01', str(hoje.year)+'-'+str(hoje.month - 1)+'-'+str(diasDoMes[hoje.month - 2]))
).all()
lista_lucros_mes_anterior = Venda.query.filter(
Venda.propriedade_id == propriedade_id,
Venda.data.between(str(hoje.year)+'-'+str(hoje.month - 1)+'-01', str(hoje.year)+'-'+str(hoje.month - 1)+'-'+str(diasDoMes[hoje.month - 2]))
).all()
despesa_mes = 0
for despesa in lista_despesas_mes:
despesa_mes += despesa.get_insumo().valor_total
despesa_mes *= -1
lucro_mes = 0
for lucro in lista_lucros_mes:
lucro_mes += lucro.valor_total
despesa_mes_anterior = 0
for despesa in lista_despesas_mes_anterior:
despesa_mes_anterior += despesa.get_insumo().valor_total
despesa_mes_anterior *= -1
lucro_mes_anterior = 0
for lucro in lista_lucros_mes_anterior:
lucro_mes_anterior += lucro.valor_total
caixa_mes = despesa_mes + lucro_mes
caixa_mes_anterior = despesa_mes_anterior + lucro_mes_anterior
return render_template('inicial.html',
propriedade=True,
despesa_mes='{:.2f}'.format(despesa_mes).replace('.', ','),
lucro_mes='{:.2f}'.format(lucro_mes).replace('.', ','),
lista_lucros=lista_lucros, lista_despesas=lista_despesas,
despesa_mes_anterior='{:.2f}'.format(despesa_mes_anterior).replace('.', ','),
lucro_mes_anterior='{:.2f}'.format(lucro_mes_anterior).replace('.', ','),
caixa_mes=caixa_mes, caixa_mes_anterior='{:.2f}'.format(caixa_mes_anterior).replace('.', ','))
@application.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
login = form.usuario.data
senha = form.senha.data
produtor = Produtor.query.filter_by(login=login).first()
autorizado = False
if produtor:
autorizado = bcrypt.checkpw(senha.encode('UTF-8'), produtor.senha.encode('UTF-8'))
if not produtor or not autorizado:
flash("Login não autorizado, verificar informações", 'flash-falha')
return redirect(url_for('login'))
else:
login_user(produtor, remember=False)
return redirect(url_for('inicial'))
return render_template('login.html', form=form)
@application.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('login'))
| pedroferronato/gerenciamento-rural | app/controllers/server_controller.py | server_controller.py | py | 4,685 | python | pt | code | 0 | github-code | 90 |
9268077410 | import json
import os
from statistics import mean
from typing import Dict, Union
from collector import DATA_FILE
class SolarProvider:
forecasts: Dict
def __init__(self):
if not os.path.isfile(DATA_FILE):
self.forecasts = {}
else:
with open(DATA_FILE, "rb") as file:
self.forecasts = json.loads(file.read())
def get_mean_and_range_for_date(self, date: str) -> Dict[str, Union[int, float]]:
if date not in self.forecasts:
return {}
values = self.forecasts[date]
return {
"mean": mean(values),
"min": min(values),
"max": max(values),
"count": len(values),
}
| frak/energy-advisor | solar_provider.py | solar_provider.py | py | 721 | python | en | code | 1 | github-code | 90 |
7266480536 | import json
import random, string
import Geohash
from app.util import Sample
def pin_lst(_x, _y) :
lst = list()
try :
for i in range(0, 10) :
x = float(_x) + ( random.choice([-1, 1]) * random.randrange(0,9) / 1000 ) + ( random.choice([-1, 1]) * random.randrange(0,9) / 10000 )
y = float(_y) + ( random.choice([-1, 1]) * random.randrange(0,9) / 1000 ) + ( random.choice([-1, 1]) * random.randrange(0,9) / 10000 )
user = random.choice(Sample.users)
print("x[%f] y[%f]"%(x, y))
category = random.choice(Sample.category)
lst.append({
'id' : ''.join(random.choices(string.ascii_letters + string.digits, k=16))
, 'owner' : user
, 'title' : 'title_' + ''.join(random.choices(string.digits, k=5))
, 'category' : category
, 'tags' : random.choices(Sample.tags[category], k=3)
, 'img' : random.choice(Sample.sample_imgs)
, 'x' : x
, 'y' : y
, 'geohash' : Geohash.encode(y, x, precision=4)
})
except Exception as e :
print(e)
return lst
| korMaple0428/firebase-in-flask | app/util/Test.py | Test.py | py | 1,259 | python | en | code | 0 | github-code | 90 |
21944446776 | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.14.2
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %% [markdown]
# # Chapter 3 - Linear Regression
# %% [markdown]
# - [Load Datasets](#Load-Datasets)
# - [3.1 Simple Linear Regression](#3.1-Simple-Linear-Regression)
# - [3.2 Multiple Linear Regression](#3.2-Multiple-Linear-Regression)
# - [3.3 Other Considerations in the Regression Model](#3.3-Other-Considerations-in-the-Regression-Model)
# %%
# # %load ../standard_import.txt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
import seaborn as sns
from sklearn.preprocessing import scale
import sklearn.linear_model as skl_lm
from sklearn.metrics import mean_squared_error, r2_score
import statsmodels.api as sm
import statsmodels.formula.api as smf
# %matplotlib inline
plt.style.use('seaborn-white')
# %% [markdown]
# ### Load Datasets
# Datasets available on https://www.statlearning.com/resources-first-edition
# %%
advertising = pd.read_csv('Data/Advertising.csv', usecols=[1,2,3,4])
advertising.info()
# %%
credit = pd.read_csv('Data/Credit.csv', usecols=list(range(1,12)))
credit['Student2'] = credit.Student.map({'No':0, 'Yes':1})
credit.head(3)
# %%
auto = pd.read_csv('Data/Auto.csv', na_values='?').dropna()
auto.info()
# %% [markdown]
# ## 3.1 Simple Linear Regression
# %% [markdown]
# ### Figure 3.1 - Least squares fit
# %%
sns.regplot(advertising.TV, advertising.Sales, order=1, ci=None, scatter_kws={'color':'r', 's':9})
plt.xlim(-10,310)
plt.ylim(ymin=0);
# %% [markdown]
# ### Figure 3.2 - Regression coefficients - RSS
# Note that the text in the book describes the coefficients based on uncentered data, whereas the plot shows the model based on centered data. The latter is visually more appealing for explaining the concept of a minimum RSS. I think that, in order not to confuse the reader, the values on the axis of the B0 coefficients have been changed to correspond with the text. The axes on the plots below are unaltered.
# %%
# Regression coefficients (Ordinary Least Squares)
regr = skl_lm.LinearRegression()
X = scale(advertising.TV, with_mean=True, with_std=False).reshape(-1,1)
y = advertising.Sales
regr.fit(X,y)
print(regr.intercept_)
print(regr.coef_)
# %%
# Create grid coordinates for plotting
B0 = np.linspace(regr.intercept_-2, regr.intercept_+2, 50)
B1 = np.linspace(regr.coef_-0.02, regr.coef_+0.02, 50)
xx, yy = np.meshgrid(B0, B1, indexing='xy')
Z = np.zeros((B0.size,B1.size))
# Calculate Z-values (RSS) based on grid of coefficients
for (i,j),v in np.ndenumerate(Z):
Z[i,j] =((y - (xx[i,j]+X.ravel()*yy[i,j]))**2).sum()/1000
# Minimized RSS
min_RSS = r'$\beta_0$, $\beta_1$ for minimized RSS'
min_rss = np.sum((regr.intercept_+regr.coef_*X - y.values.reshape(-1,1))**2)/1000
min_rss
# %%
fig = plt.figure(figsize=(15,6))
fig.suptitle('RSS - Regression coefficients', fontsize=20)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122, projection='3d')
# Left plot
CS = ax1.contour(xx, yy, Z, cmap=plt.cm.Set1, levels=[2.15, 2.2, 2.3, 2.5, 3])
ax1.scatter(regr.intercept_, regr.coef_[0], c='r', label=min_RSS)
ax1.clabel(CS, inline=True, fontsize=10, fmt='%1.1f')
# Right plot
ax2.plot_surface(xx, yy, Z, rstride=3, cstride=3, alpha=0.3)
ax2.contour(xx, yy, Z, zdir='z', offset=Z.min(), cmap=plt.cm.Set1,
alpha=0.4, levels=[2.15, 2.2, 2.3, 2.5, 3])
ax2.scatter3D(regr.intercept_, regr.coef_[0], min_rss, c='r', label=min_RSS)
ax2.set_zlabel('RSS')
ax2.set_zlim(Z.min(),Z.max())
ax2.set_ylim(0.02,0.07)
# settings common to both plots
for ax in fig.axes:
ax.set_xlabel(r'$\beta_0$', fontsize=17)
ax.set_ylabel(r'$\beta_1$', fontsize=17)
ax.set_yticks([0.03,0.04,0.05,0.06])
ax.legend()
# %% [markdown]
# ### Confidence interval on page 67 & Table 3.1 & 3.2 - Statsmodels
# %%
est = smf.ols('Sales ~ TV', advertising).fit()
est.summary().tables[1]
# %%
# RSS with regression coefficients
((advertising.Sales - (est.params[0] + est.params[1]*advertising.TV))**2).sum()/1000
# %% [markdown]
# ### Table 3.1 & 3.2 - Scikit-learn
# %%
regr = skl_lm.LinearRegression()
X = advertising.TV.values.reshape(-1,1)
y = advertising.Sales
regr.fit(X,y)
print(regr.intercept_)
print(regr.coef_)
# %%
Sales_pred = regr.predict(X)
r2_score(y, Sales_pred)
# %% [markdown]
# ## 3.2 Multiple Linear Regression
# %% [markdown]
# ### Table 3.3 - Statsmodels
# %%
est = smf.ols('Sales ~ Radio', advertising).fit()
est.summary().tables[1]
# %%
est = smf.ols('Sales ~ Newspaper', advertising).fit()
est.summary().tables[1]
# %% [markdown]
# ### Table 3.4 & 3.6 - Statsmodels
# %%
est = smf.ols('Sales ~ TV + Radio + Newspaper', advertising).fit()
est.summary()
# %% [markdown]
# ### Table 3.5 - Correlation Matrix
# %%
advertising.corr()
# %% [markdown]
# ### Figure 3.5 - Multiple Linear Regression
# %%
regr = skl_lm.LinearRegression()
X = advertising[['Radio', 'TV']].as_matrix()
y = advertising.Sales
regr.fit(X,y)
print(regr.coef_)
print(regr.intercept_)
# %%
# What are the min/max values of Radio & TV?
# Use these values to set up the grid for plotting.
advertising[['Radio', 'TV']].describe()
# %%
# Create a coordinate grid
Radio = np.arange(0,50)
TV = np.arange(0,300)
B1, B2 = np.meshgrid(Radio, TV, indexing='xy')
Z = np.zeros((TV.size, Radio.size))
for (i,j),v in np.ndenumerate(Z):
Z[i,j] =(regr.intercept_ + B1[i,j]*regr.coef_[0] + B2[i,j]*regr.coef_[1])
# %%
# Create plot
fig = plt.figure(figsize=(10,6))
fig.suptitle('Regression: Sales ~ Radio + TV Advertising', fontsize=20)
ax = axes3d.Axes3D(fig)
ax.plot_surface(B1, B2, Z, rstride=10, cstride=5, alpha=0.4)
ax.scatter3D(advertising.Radio, advertising.TV, advertising.Sales, c='r')
ax.set_xlabel('Radio')
ax.set_xlim(0,50)
ax.set_ylabel('TV')
ax.set_ylim(ymin=0)
ax.set_zlabel('Sales');
# %% [markdown]
# ## 3.3 Other Considerations in the Regression Model
# %% [markdown]
# ### Figure 3.6
# %%
sns.pairplot(credit[['Balance','Age','Cards','Education','Income','Limit','Rating']]);
# %% [markdown]
# ### Table 3.7
# %%
est = smf.ols('Balance ~ Gender', credit).fit()
est.summary().tables[1]
# %% [markdown]
# ### Table 3.8
# %%
est = smf.ols('Balance ~ Ethnicity', credit).fit()
est.summary().tables[1]
# %% [markdown]
# ### Table 3.9 - Interaction Variables
# %%
est = smf.ols('Sales ~ TV + Radio + TV*Radio', advertising).fit()
est.summary().tables[1]
# %% [markdown]
# ### Figure 3.7 - Interaction between qualitative and quantative variables
# %%
est1 = smf.ols('Balance ~ Income + Student2', credit).fit()
regr1 = est1.params
est2 = smf.ols('Balance ~ Income + Income*Student2', credit).fit()
regr2 = est2.params
print('Regression 1 - without interaction term')
print(regr1)
print('\nRegression 2 - with interaction term')
print(regr2)
# %%
# Income (x-axis)
income = np.linspace(0,150)
# Balance without interaction term (y-axis)
student1 = np.linspace(regr1['Intercept']+regr1['Student2'],
regr1['Intercept']+regr1['Student2']+150*regr1['Income'])
non_student1 = np.linspace(regr1['Intercept'], regr1['Intercept']+150*regr1['Income'])
# Balance with iteraction term (y-axis)
student2 = np.linspace(regr2['Intercept']+regr2['Student2'],
regr2['Intercept']+regr2['Student2']+
150*(regr2['Income']+regr2['Income:Student2']))
non_student2 = np.linspace(regr2['Intercept'], regr2['Intercept']+150*regr2['Income'])
# Create plot
fig, (ax1,ax2) = plt.subplots(1,2, figsize=(12,5))
ax1.plot(income, student1, 'r', income, non_student1, 'k')
ax2.plot(income, student2, 'r', income, non_student2, 'k')
for ax in fig.axes:
ax.legend(['student', 'non-student'], loc=2)
ax.set_xlabel('Income')
ax.set_ylabel('Balance')
ax.set_ylim(ymax=1550)
# %% [markdown]
# ### Figure 3.8 - Non-linear relationships
# %%
# With Seaborn's regplot() you can easily plot higher order polynomials.
plt.scatter(auto.horsepower, auto.mpg, facecolors='None', edgecolors='k', alpha=.5)
sns.regplot(auto.horsepower, auto.mpg, ci=None, label='Linear', scatter=False, color='orange')
sns.regplot(auto.horsepower, auto.mpg, ci=None, label='Degree 2', order=2, scatter=False, color='lightblue')
sns.regplot(auto.horsepower, auto.mpg, ci=None, label='Degree 5', order=5, scatter=False, color='g')
plt.legend()
plt.ylim(5,55)
plt.xlim(40,240);
# %% [markdown]
# ### Table 3.10
# %%
auto['horsepower2'] = auto.horsepower**2
auto.head(3)
# %%
est = smf.ols('mpg ~ horsepower + horsepower2', auto).fit()
est.summary().tables[1]
# %% [markdown]
# ### Figure 3.9
# %%
regr = skl_lm.LinearRegression()
# Linear fit
X = auto.horsepower.values.reshape(-1,1)
y = auto.mpg
regr.fit(X, y)
auto['pred1'] = regr.predict(X)
auto['resid1'] = auto.mpg - auto.pred1
# Quadratic fit
X2 = auto[['horsepower', 'horsepower2']].as_matrix()
regr.fit(X2, y)
auto['pred2'] = regr.predict(X2)
auto['resid2'] = auto.mpg - auto.pred2
# %%
fig, (ax1,ax2) = plt.subplots(1,2, figsize=(12,5))
# Left plot
sns.regplot(auto.pred1, auto.resid1, lowess=True,
ax=ax1, line_kws={'color':'r', 'lw':1},
scatter_kws={'facecolors':'None', 'edgecolors':'k', 'alpha':0.5})
ax1.hlines(0,xmin=ax1.xaxis.get_data_interval()[0],
xmax=ax1.xaxis.get_data_interval()[1], linestyles='dotted')
ax1.set_title('Residual Plot for Linear Fit')
# Right plot
sns.regplot(auto.pred2, auto.resid2, lowess=True,
line_kws={'color':'r', 'lw':1}, ax=ax2,
scatter_kws={'facecolors':'None', 'edgecolors':'k', 'alpha':0.5})
ax2.hlines(0,xmin=ax2.xaxis.get_data_interval()[0],
xmax=ax2.xaxis.get_data_interval()[1], linestyles='dotted')
ax2.set_title('Residual Plot for Quadratic Fit')
for ax in fig.axes:
ax.set_xlabel('Fitted values')
ax.set_ylabel('Residuals')
# %% [markdown]
# ### Figure 3.14
# %%
fig, (ax1,ax2) = plt.subplots(1,2, figsize=(12,5))
# Left plot
ax1.scatter(credit.Limit, credit.Age, facecolor='None', edgecolor='r')
ax1.set_ylabel('Age')
# Right plot
ax2.scatter(credit.Limit, credit.Rating, facecolor='None', edgecolor='r')
ax2.set_ylabel('Rating')
for ax in fig.axes:
ax.set_xlabel('Limit')
ax.set_xticks([2000,4000,6000,8000,12000])
# %% [markdown]
# ### Figure 3.15
# %%
y = credit.Balance
# Regression for left plot
X = credit[['Age', 'Limit']].as_matrix()
regr1 = skl_lm.LinearRegression()
regr1.fit(scale(X.astype('float'), with_std=False), y)
print('Age/Limit\n',regr1.intercept_)
print(regr1.coef_)
# Regression for right plot
X2 = credit[['Rating', 'Limit']].as_matrix()
regr2 = skl_lm.LinearRegression()
regr2.fit(scale(X2.astype('float'), with_std=False), y)
print('\nRating/Limit\n',regr2.intercept_)
print(regr2.coef_)
# %%
# Create grid coordinates for plotting
B_Age = np.linspace(regr1.coef_[0]-3, regr1.coef_[0]+3, 100)
B_Limit = np.linspace(regr1.coef_[1]-0.02, regr1.coef_[1]+0.02, 100)
B_Rating = np.linspace(regr2.coef_[0]-3, regr2.coef_[0]+3, 100)
B_Limit2 = np.linspace(regr2.coef_[1]-0.2, regr2.coef_[1]+0.2, 100)
X1, Y1 = np.meshgrid(B_Limit, B_Age, indexing='xy')
X2, Y2 = np.meshgrid(B_Limit2, B_Rating, indexing='xy')
Z1 = np.zeros((B_Age.size,B_Limit.size))
Z2 = np.zeros((B_Rating.size,B_Limit2.size))
Limit_scaled = scale(credit.Limit.astype('float'), with_std=False)
Age_scaled = scale(credit.Age.astype('float'), with_std=False)
Rating_scaled = scale(credit.Rating.astype('float'), with_std=False)
# Calculate Z-values (RSS) based on grid of coefficients
for (i,j),v in np.ndenumerate(Z1):
Z1[i,j] =((y - (regr1.intercept_ + X1[i,j]*Limit_scaled +
Y1[i,j]*Age_scaled))**2).sum()/1000000
for (i,j),v in np.ndenumerate(Z2):
Z2[i,j] =((y - (regr2.intercept_ + X2[i,j]*Limit_scaled +
Y2[i,j]*Rating_scaled))**2).sum()/1000000
# %%
fig = plt.figure(figsize=(12,5))
fig.suptitle('RSS - Regression coefficients', fontsize=20)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
min_RSS = r'$\beta_0$, $\beta_1$ for minimized RSS'
# Left plot
CS = ax1.contour(X1, Y1, Z1, cmap=plt.cm.Set1, levels=[21.25, 21.5, 21.8])
ax1.scatter(regr1.coef_[1], regr1.coef_[0], c='r', label=min_RSS)
ax1.clabel(CS, inline=True, fontsize=10, fmt='%1.1f')
ax1.set_ylabel(r'$\beta_{Age}$', fontsize=17)
# Right plot
CS = ax2.contour(X2, Y2, Z2, cmap=plt.cm.Set1, levels=[21.5, 21.8])
ax2.scatter(regr2.coef_[1], regr2.coef_[0], c='r', label=min_RSS)
ax2.clabel(CS, inline=True, fontsize=10, fmt='%1.1f')
ax2.set_ylabel(r'$\beta_{Rating}$', fontsize=17)
ax2.set_xticks([-0.1, 0, 0.1, 0.2])
for ax in fig.axes:
ax.set_xlabel(r'$\beta_{Limit}$', fontsize=17)
ax.legend()
# %% [markdown]
# ### Variance Inflation Factor - page 102
# %%
est_Age = smf.ols('Age ~ Rating + Limit', credit).fit()
est_Rating = smf.ols('Rating ~ Age + Limit', credit).fit()
est_Limit = smf.ols('Limit ~ Age + Rating', credit).fit()
print(1/(1-est_Age.rsquared))
print(1/(1-est_Rating.rsquared))
print(1/(1-est_Limit.rsquared))
| rambalachandran/ISLR | py_notebooks/Chapter 3.py | Chapter 3.py | py | 13,186 | python | en | code | 0 | github-code | 90 |
1789223655 | # https://www.geeksforgeeks.org/greedy-algorithm-to-find-minimum-number-of-coins/
def findMin(V):
coins = [1,2,5,10,20,50,100,500,1000]
n=len(coins)
res=[]
for i in range(n-1,-1,-1):
while V>=coins[i]:
V-=coins[i]
res.append(coins[i])
print(res)
# Driver Code
if __name__ == '__main__':
n = 93
print("Following is minimal number",
"of change for", n, ": ", end = "")
findMin(n) | danish-faisal/Striver-s-SDE-Sheet | Greedy - Day 8/min-coins-using-greedy.py | min-coins-using-greedy.py | py | 459 | python | en | code | 0 | github-code | 90 |
11162324300 |
from flask import Flask, render_template, Response,url_for,redirect,jsonify
from main import out
import time
import cv2
app = Flask(__name__)
m = False
@app.route('/')
def index():
while True:
global m
return render_template('index.html',enable = m)
@app.route('/huh')
def test():
print('test')
return str(m)
def gen():
cap = cv2.VideoCapture(0)
while True:
try:
frame,marked = out(cap)
if marked:
print(marked)
except:
frame,marked = out(cap)
print(marked)
yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
if marked[0]==True:
global m
m= marked
@app.route('/video_feed')
def video_feed():
return Response(gen(),mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| NeelGaji/online-attendance | web.py | web.py | py | 983 | python | en | code | 1 | github-code | 90 |
18114231179 | N,K = map(int,input().split())
W = []
for _ in range(N):
W.append(int(input()))
def is_OK(P):
track_index = 0
w_index = 0
while w_index < N and track_index < K:
tmp_sum = 0
while w_index < N and tmp_sum+W[w_index] <= P:
tmp_sum += W[w_index]
w_index += 1
track_index += 1
return w_index == N
L = 0
R = 100000*100000
#mid = (L+R)//2
ans = R
while L <= R:
mid = (L+R)//2
if is_OK(mid):
ans = mid
R = mid-1
else:
L = mid+1
#mid = (L+R)//2
print("%d"%ans)
| Aasthaengg/IBMdataset | Python_codes/p02270/s962445463.py | s962445463.py | py | 567 | python | en | code | 0 | github-code | 90 |
18021614479 | # https://atcoder.jp/contests/abc054/submissions/4360181
def main():
from collections import defaultdict
INF = 40 * 100 + 1
N, Ma, Mb = map(int, input().split())
memo = defaultdict(lambda: INF)
for _ in range(N):
ai, bi, ci = map(int, input().split())
x = Ma * bi - Mb * ai # Σai:Σbi=Ma:Mb<->Ma*Σbi-Mb*Σai=0
for key, value in tuple(memo.items()):
memo[key + x] = min(
memo[key + x],
value + ci
) # 既存の組み合わせに混合
memo[x] = min(memo[x], ci) # 新規のみ
print(memo[0] if 0 in memo else -1)
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03806/s328835675.py | s328835675.py | py | 676 | python | en | code | 0 | github-code | 90 |
36127095180 | import multiprocessing
import os
import random
from math import *
from NetworkV2 import *
# def calculate(value):
# return value * 10
#
# if __name__ == '__main__':
# pool = multiprocessing.Pool(None)
# tasks = range(10000)
# results = []
# r = pool.map_async(calculate, tasks, callback=results.append)
# r.wait() # Wait on the results
# print results
#for x in range (23, 99, 2):
def CompileReport():
reports = []
for x in range(0,NumLambdas):
if os.path.isfile("Test" + str(x) + ".txt"):
print(x, "found")
report = open("Test" + str(x) + ".txt", 'r')
for line in report:
reports.append(line)
report.close()
else:
print("Report", x, "Not Found")
continue
finalReport = open("FinalReport.txt", 'w')
for rLine in reports:
finalReport.write(rLine)
finalReport.write('\n')
finalReport.close()
for x in range(0, NumLambdas):
try:
os.remove("Test" + str(x) + ".txt")
except:
pass
def WorkerInit():
pass
def InitRun(myLambdaIndex):
RandSeed(myLambdaIndex)
RunTrial(myLambdaIndex)
def RandSeed(mySeed):
random.seed(a=mySeed)
alt = False
if __name__ == '__main__':
if alt == False:
results = []
timer = clock()
procPool = multiprocessing.Pool(initializer=WorkerInit)
numCores = multiprocessing.cpu_count()
# rangeLambda = range(NumLambdas)
rangeLambda = range(NumLambdas)
numChunks = ceil(NumLambdas/numCores)
results = procPool.map_async(InitRun, rangeLambda, chunksize=numChunks)
procPool.close() # Wait on the results
procPool.join()
timer = clock() - timer
print("Took", timer, "seconds")
CompileReport()
else:
CompileReport()
| NetLab/reservation-testbed | TestController.py | TestController.py | py | 1,887 | python | en | code | 1 | github-code | 90 |
37085421051 | import numpy as np
import cv2
img = cv2.imread("p2.jpg")
def bgrtogray(image,r,g,b):
# blue = [0] 7%
# green = [1] 72%
# red = [2] 21%
grayValue = r * image[:,:,2] + g * image[:,:,1] + b * image[:,:,0]
# convert uint8 to image gray
gray_img = grayValue.astype(np.uint8)
return gray_img
image1 = bgrtogray(img,0.299,0.587,0.114)
image2 = bgrtogray(img,0.2126,0.7152,0.0722)
image3 = bgrtogray(img,0.2627,0.6780,0.0593)
# print(image1.shape)
cv2.imshow("GrayScale1",image1)
cv2.imshow("GrayScale2",image2)
cv2.imshow("GrayScale3",image3)
cv2.imwrite('picture_gray1.jpg',image1)
cv2.imwrite('picture_gray2.jpg',image2)
cv2.imwrite('picture_gray3.jpg',image3)
k = cv2.waitKey(0)
if k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()
| overzon/image_processing | lab1/bgrtogray.py | bgrtogray.py | py | 808 | python | en | code | 0 | github-code | 90 |
6257770959 | #!/usr/bin/env python
# -*- charset utf8 -*-
# from https://gist.github.com/netom/8221b3588158021704d5891a4f9c0edd
import pyaudio
import numpy
import tkinter as tk
from PIL import Image, ImageTk
from util.spectrogram_generator import Params, generator
VERBOSE = True
class MicrophoneDisplayer:
def __init__(self, rate=16000, width=64, add_deltafeatures=False):
height = 900
self.width = width
self.imgwidth = width * (3 if add_deltafeatures else 1)
self.height = height
self.rate = rate
self.add_deltafeatures = add_deltafeatures
self.img = numpy.zeros((self.imgwidth, self.height), dtype=numpy.uint8)
# we are aiming for 15~20 ms per buffer
if self.rate == 16000:
self.fftwidth = 1024 # 16 ms
elif self.rate == 44100:
self.fftwidth = 1024 # 11 ms
else:
raise Exception("don't know the fftwidth for this rate")
self.params = Params(self.rate, self.fftwidth, width, add_deltafeatures = add_deltafeatures)
self.params.subdivisions = 4
self.generator = generator(self.params)
self.curline = 0
if VERBOSE:
print("Created microphone display.")
print("Signal rate: %d Hz" % self.rate)
print("FFT width: %d" % self.fftwidth)
print("Time between buffers: %d ms" % (self.time_between_buffers() * 1000))
def time_between_buffers(self):
# samples per buffer * seconds per buffer / subdivisions
return self.fftwidth / self.rate / self.params.subdivisions
def start(self):
self.root = tk.Tk()
self.canvas = tk.Canvas(self.root, width=self.imgwidth, height=self.height)
self.time = 0
self.cimg = None
self.canvas.pack()
self.root.after(100, self.loop)
self.startaudio()
self.root.mainloop()
def loop(self):
self.update()
self.im = Image.frombuffer('L',
(self.imgwidth, self.height),
self.img.T.tobytes(),
"raw"
)
self.photo = ImageTk.PhotoImage(image = self.im)
if self.cimg is None:
self.cimg = self.canvas.create_image(
0,
0,
image = self.photo,
anchor = tk.NW)
else:
#print("cimg", self.cimg)
self.canvas.itemconfig(
self.cimg,
image = self.photo
)
#print("loop")
self.root.after(10, self.loop)
def update(self):
while True:
cur = self.generator.next()
if cur is None:
break
cur = numpy.clip(cur, 0, 255)
self.img[:, self.height - 1 - self.curline] = cur
self.curline += 1
if self.curline == self.height:
self.curline = 0
self.img[:, self.height - 1 - self.curline] = 0
def startaudio(self):
self.py = pyaudio.PyAudio()
self.stream = self.py.open(
format = pyaudio.paFloat32,
channels = 1,
rate = self.rate,
input = True,
output = False,
frames_per_buffer = 1024,
stream_callback = self.callback
)
self.stream.start_stream()
def callback(self, in_data, frame_count, time_info, status_flags):
# rar
self.generator.add(numpy.frombuffer(in_data, dtype=numpy.float32))
return (None, pyaudio.paContinue)
| colaprograms/speechify | util/mic_display.py | mic_display.py | py | 3,557 | python | en | code | 7 | github-code | 90 |
5280114668 | import requests
def get_subdomains(domain):
url = "https://api.hackertarget.com/hostsearch/?q="+domain
subd = []
res = requests.get(url)
for line in res.text.split("\n"):
subd.append(line.split(",")[0])
return subd | Fundacio-i2CAT/InfoHound | infohound/tool/data_sources/hacker_target.py | hacker_target.py | py | 224 | python | en | code | 123 | github-code | 90 |
72106055018 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Heming
"""
import numpy as np
from naivebayesPY import naivebayesPY
from naivebayesPXY import naivebayesPXY
def naivebayes(x, y, x1):
# =============================================================================
#function logratio = naivebayes(x,y,x1);
#
#Computation of log P(Y|X=x1) using Bayes Rule
#Input:
#x : n input vectors of d dimensions (dxn)
#y : n labels (-1 or +1)
#x1: input vector of d dimensions (dx1)
#
#Output:
#logratio: log (P(Y = 1|X=x1)/P(Y=-1|X=x1))
# =============================================================================
# Convertng input matrix x and x1 into NumPy matrix
# input x and y should be in the form: 'a b c d...; e f g h...; i j k l...'
X = np.matrix(x)
X1= np.matrix(x1)
# Pre-configuring the size of matrix X
d,n = X.shape
# =============================================================================
# fill in code here
pos, neg = naivebayesPY(x, y)
posprob, negprob = naivebayesPXY(x, y)
# get (P(Y = 1|X=x1)
posprob_dot = np.zeros([d, 1])
for i in range(d):
if x1[i, :] == 0:
# posprob_dot[i, :] = (1 - posprob[i, :]) # categorical prod
posprob_dot[i, :] = 1
else:
posprob_dot[i, :] = posprob[i, :]
positive_prob = pos * np.prod(posprob_dot)
# get (P(Y = -1|X=x1)
negprob_dot = np.zeros([d, 1])
for i in range(d):
if x1[i, :] == 0:
# negprob_dot[i, :] = (1 - negprob[i, :]) # categorical prod
negprob_dot[i, :] = 1
else:
negprob_dot[i, :] = negprob[i, :]
negative_prob = neg * np.prod(negprob_dot)
print(posprob_dot)
print(negprob_dot)
print(positive_prob)
print(negative_prob)
logratio = np.log(positive_prob / negative_prob)
return logratio
# =============================================================================
| heming-zhang/MachineLearning-Projects | project2/naivebayes.py | naivebayes.py | py | 1,952 | python | en | code | 0 | github-code | 90 |
45144534626 | import hmac
import json
from hashlib import sha512
from io import BytesIO
from time import time
from urllib.parse import urlencode
from twisted.logger import Logger
from twisted.internet import reactor, defer
from twisted.web.client import Agent, HTTPConnectionPool, readBody, \
FileBodyProducer, ContentDecoderAgent, GzipDecoder
from twisted.web.http_headers import Headers
from txpoloniex import const, util, queue
class PoloniexBase:
log = Logger()
connectTimeout = 1.0
maxPerSecond = 6
queue = queue.RateLimit(maxPerSecond)
pool = HTTPConnectionPool(reactor)
agent = ContentDecoderAgent(
Agent(
reactor,
connectTimeout=connectTimeout,
pool=pool
),
[(b'gzip', GzipDecoder)],
)
class PoloniexPrivate(PoloniexBase):
def __init__(self, api_key, secret):
self.api_key = api_key
self.secret = secret
self.nonce = int(time() * 1000)
@defer.inlineCallbacks
def request(self, command, **kwargs):
"""
Submit a request to the private, authenticated, endpoint
"""
if not self.api_key or not self.secret:
raise
self.nonce += 1
kwargs.update({
'command': command,
'nonce': self.nonce,
})
url = const.PRIVATE_API
args = urlencode(kwargs).encode('utf-8')
body = FileBodyProducer(BytesIO(args))
sign = hmac.new(
self.secret.encode('utf-8'),
args,
sha512
)
headers = {
'Sign': [sign.hexdigest()],
'Key': [self.api_key],
'Content-Type': ['application/x-www-form-urlencoded'],
}
response = yield self.agent.request(
b'POST',
url.encode('utf-8'),
Headers(headers),
body,
)
body = yield readBody(response)
parsed = json.loads(body.decode('utf-8'))
defer.returnValue(parsed)
class PoloniexPublic(PoloniexBase):
@defer.inlineCallbacks
def request(self, command, **kwargs):
"""
Submit a request to the public endpoint
"""
kwargs.update({'command': command})
args = urlencode(kwargs)
url = '{uri}?{args}'.format(uri=const.PUBLIC_API, args=args)
response = yield self.agent.request(
b'GET',
url.encode('utf-8'),
)
body = yield readBody(response)
parsed = json.loads(body.decode('utf-8'))
defer.returnValue(parsed)
| congruency/txpoloniex | txpoloniex/base.py | base.py | py | 2,581 | python | en | code | 2 | github-code | 90 |
42658622233 | #region
"""
+ = concatenation (birleştirme)
* = replication (tekrarlama)
"""
a = "A"
b = "B"
c = "C"
yaz = a + b + c
print (yaz)
ad= "Büşra"
soyad = "Derbazlar"
print(ad + " " + soyad)
print("-"*50)
#bukadarkez tekrarla demek
print("aziz"*3)
| busraderbazlar/VS-Code-Pyhton | 01_python_giris/0127_string_operatorleri.py | 0127_string_operatorleri.py | py | 249 | python | tr | code | 0 | github-code | 90 |
26745773907 | import sys
import re
prefixes = ["fix", "feat", "release"]
def main():
pr_title = sys.argv[1]
prefix = pr_title.split("(")[0]
if prefix not in prefixes:
exit_with_error()
subject = pr_title.split(prefix)[1]
if re.match('\(R-(\d+)\):', subject) is None:
exit_with_error()
def exit_with_error():
message = "Invalid PR title. Please use correct format.\r\nHere are some examples:\r\n"
message += "\tfix(R-12345): fix issue\r\n"
message += "\tfeat(R-42313): implemented feature\r\n"
message += "\trelease(R-12345): release\r\n"
sys.exit(message)
if __name__ == "__main__":
main()
| sutirthak/validate-pr-title | verify-pr.py | verify-pr.py | py | 642 | python | en | code | 0 | github-code | 90 |
73404950696 | # get the two binary inputs separated by spaces
bnum1, bnum2 = input("Enter two binary numbers: ").split()
# get the maximum length among the two binaries
max_len = max(len(bnum1), len(bnum2))
# fill out the zeros of those shorter binary numbers
bnum1 = bnum1.zfill(max_len)
bnum2 = bnum2.zfill(max_len)
result = ''
borrow = False
for i in range(max_len -1, -1, -1):
r = ''
if borrow:
if bnum1[i] == '0':
bnum1 = bnum1[:i] + '2' + bnum1[i+1:]
else:
borrow = False
bnum1 = bnum1[:i] + '0' + bnum1[i+1:]
if bnum1[i] == '0' and bnum2[i] == '1':
r += '1'
borrow = True
elif bnum1[i] == '2' or (bnum1[i] == '1' and bnum2[i] == '0'):
r += '1'
else:
r += '0'
result += r
# fill out empty spaces with 0
result = result.zfill(max_len)
# reverse the result
result = result[::-1]
print(result)
| arielmagbanua/python-training | exercises/binary_subtraction.py | binary_subtraction.py | py | 909 | python | en | code | 2 | github-code | 90 |
70910171497 | class Solution:
def reconstructQueue(self, people):
# 两个维度需要考虑,先排序搞定其中一个,在解决另一个维度
# 先按照身高降序排序 按照k升序排列 前面的人身高一定比后面的高
people.sort(key=lambda x:(-x[0], x[1]))
print(people)
result = []
n = len(people)
for i in range(n):
result.insert(people[i][1], people[i])
print(result)
return result
s = Solution()
s.reconstructQueue([[7,0],[4,4],[7,1],[5,0],[6,1],[5,2]]) | Ericshunjie/algorithm | 贪心算法/406根据身高重建队列.py | 406根据身高重建队列.py | py | 560 | python | zh | code | 0 | github-code | 90 |
39175338540 | '''
pre-processing.py
Author: Adam Swart
Pre-processing to normalise MCQ sheets
'''
import cv2
import os
import numpy as np
import cvutils
import math
from operator import itemgetter
'''
Finds the corners in an image
'''
def findCorners(img):
img2 = img.copy()
template = cv2.imread('images/templates/cnr_template.ppm', 0)
w, h = template.shape[::-1]
corners = []
threshold = 0.7
for i in range(4):
# Apply template matching
res = cv2.matchTemplate(img2,template,cv2.TM_CCOEFF_NORMED)
_, max_val, _, max_loc = cv2.minMaxLoc(res)
#if max_val > threshold:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1]+h)
#cv2.rectangle(img2,top_left, bottom_right, (0,0,255), 2)
centre = (top_left[0]+int(w/2),top_left[1]+int(h/2))
cv2.circle(img2,centre,50,(0,255,0),-1)
corners.append(centre)
cv2.imwrite('res.ppm',img2)
corners = sorted(corners, key=lambda x: x[1])
return corners
'''
Flips a page if it is upside down
'''
def flip(img, corners):
result = img
w, h = img.shape[::-1]
if corners[0][1] < 500:
result = cv2.flip(img,-1)
corners = findCorners(result)
# cv2.imwrite('flip.ppm',result)
return result, corners
'''
Corrects misalignments
'''
def align(img, corners):
result = img.copy()
h, w = np.shape(result)
if corners[0][1] != corners[1][1]:
xd = corners[0][0] - corners[1][0]
yd = corners[0][1] - corners[1][1]
grad = yd/xd
theta = np.arctan(grad)
A = cv2.getRotationMatrix2D((w/2,h/2),theta,1)
result = cv2.warpAffine(result,A,(w,h))
# cv2.imwrite('align.ppm',result)
corners = findCorners(result)
# print corners
return result
'''
Finds the first answer block
def get_block(img):
img2 = img.copy()'''
def normalise(img, corners):
#h = corners[2][1] - corners[0][1]
#w = corners[1][0] - corners[0][0]
h = 4235
w = 3190
corners = sorted(corners, key=itemgetter(0))
crop_img = img[corners[0][1]:corners[0][1]+h, corners[0][0]:corners[0][0]+w]
# thresh, crop_img = cv2.threshold(crop_img, 200,255,cv2.THRESH_BINARY)
crop_img = cv2.adaptiveThreshold(~crop_img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,11,-2)
cv2.imwrite("cropped.ppm", crop_img)
return crop_img
def process(path):
img = cv2.imread(path,0)
corners = findCorners(img)
img, corners = flip(img, corners)
corners = sorted(corners, key=itemgetter(0))
if (corners[0][0] < 50 or corners[0][0] > 200) and (corners[0][1] < 500 or corners[0][1] > 700):
corners[0] = (150, 600)
corners[1] = (3350, 600)
img = align(img, corners)
img = normalise(img,corners)
return img
| Swartacus/IP | Project - MCQ/pre_processing.py | pre_processing.py | py | 2,804 | python | en | code | 0 | github-code | 90 |
6768975870 | import sqlite3
import pytz
import datetime
db = sqlite3.connect("accounts.sqlite", detect_types=sqlite3.PARSE_DECLTYPES)
db.execute("CREATE TABLE IF NOT EXISTS accounts (name TEXT PRIMARY KEY NOT NULL, balance INTEGER NOT NULL)")
db.execute("CREATE TABLE IF NOT EXISTS history (time TIMESTAMP NOT NULL, "
"account TEXT NOT NULL, amount INTEGER NOT NULL, PRIMARY KEY (time, account))")
db.execute("CREATE VIEW IF NOT EXISTS localhistory AS"
" SELECT strftime('%Y-%m-%d %H:%M:%f', history.time, 'localtime') AS localtime,"
" history.account, history.amount FROM history ORDER BY history.time")
# opt + cmd + L -> reformat
class Account(object):
@staticmethod
def _current_time():
# return 1 # 导致composite key(time, account)不一致,就会出现错误
return pytz.utc.localize(datetime.datetime.utcnow())
# local_time = pytz.utc.localize(datetime.datetime.utcnow())
# return local_time.astimezone()
def __init__(self, name: str, opening_balance: int = 0):
cursor = db.execute("SELECT name, balance FROM accounts WHERE (name = ?)", (name,))
row = cursor.fetchone()
if row:
self.name, self._balance = row
print("Retrieved record for {}. ".format(self.name), end='')
else:
self.name = name
self._balance = opening_balance
cursor.execute("INSERT INTO accounts VALUES(?, ?)", (name, opening_balance))
cursor.connection.commit()
print("Account created for {}. ".format(self, name), end='')
self.show_balance()
def _save_update(self, amount):
new_balance = self._balance + amount
deposit_time = Account._current_time()
# db.execute("UPDATE accounts SET balance = ? WHERE (name = ?)", (new_balance, self.name))
# db.execute("INSERT INTO history VALUES(?, ?, ?)", (deposit_time, self.name, amount))
# db.commit()
# self._balance = new_balance
try:
db.execute("UPDATE accounts SET balance = ? WHERE (name = ?)", (new_balance, self.name))
db.execute("INSERT INTO history VALUES(?, ?, ?)", (deposit_time, self.name, amount))
except sqlite3.Error:
db.rollback()
# 因为insert出现错误时,update语句是执行了的,
# rollback的作用就是这个尚未被保存却被执行了的update语句也被取消执行。
# 普遍地来说,就是在错误语句之前执行的语句都撤销,即整个try block的语句。
#
# pass
# 疑问?反正此处update语句(崩溃语句前一条)并未被commit(保存),为何还要rollback呢?
# 懂了,当此处删除rollback,那么update就一直pending。老师的例子是,删除了表中的terryG再次运行,发现john的存款是1980而非2010
# 就是因为三个deposit和最后一个withdraw对于表的操作会覆盖,当withdraw完了之后,就进行了terryG对象的建立,插入数据
# 这个插入之后,进行了cursor.connection.commit()即db.commit(),就导致了withdraw的update操作成功,减少了30。
# 所以必须要进行哪怕try中有语句出现错误没有commit之前的操作,也要rollback这些语句,
# 以避免其他的操作带来的commit导致最后一次pending的update操作进行成功。
else:
# commit放在更新balance前面是因为如果commit出现了问题,balance不用进行存储
db.commit()
self._balance = new_balance # 虽然放在try里面也可行,但是不是好的习惯,因为最好try里面就只放想要保护会出问题的代码
# finally:
# db.commit()
def deposit(self, amount: int) -> float:
if amount > 0.0:
# # self._balance += amount
# new_balance = self._balance + amount
# deposit_time = Account._current_time()
# db.execute("UPDATE accounts SET balance = ? WHERE (name = ?)", (new_balance, self.name))
# db.execute("INSERT INTO history VALUES(?, ?, ?)", (deposit_time, self.name, amount))
# db.commit()
# self._balance = new_balance
self._save_update(amount)
print("{:.2f} deposited".format(amount / 100))
return self._balance / 100
def withdraw(self, amount: int) -> float:
if 0 < amount <= self._balance:
# # self._balance -= amount
# new_balance = self._balance - amount
# withdraw_time = Account._current_time()
# db.execute("UPDATE accounts SET balance = ? WHERE (name = ?)", (new_balance, self.name))
# db.execute("INSERT INTO history VALUES(?, ?, ?)", (withdraw_time, self.name, -amount))
# db.commit()
# self._balance = new_balance
self._save_update(-amount)
print("{:.2f} withdrawn".format(amount / 100))
return amount / 100
else:
print("The amount must be greater than zero and no more than your account balance.")
return 0.0
def show_balance(self):
print("Balance on account {} is {:.2f}".format(self.name, self._balance / 100))
if __name__ == "__main__":
john = Account("John")
john.deposit(1010)
john.deposit(10)
john.deposit(10)
john.withdraw(30)
john.withdraw(0)
john.show_balance()
terry = Account("TerryJ")
graham = Account("Graham", 9000)
eric = Account("Eric", 7000)
michael = Account("Michael")
terryG = Account("TerryG")
db.close()
| ZhaoyangChen101/Python-Course | database/RollingBack/rollback.py | rollback.py | py | 5,693 | python | en | code | 0 | github-code | 90 |
33544404579 | import telebot
import random
import functools
def my_map(func, iterable):
result = []
for item in iterable:
result.append(func(item))
return result
numbers = [1, 2, 3, 4, 5]
squared_numbers = my_map(lambda x: x**2, numbers)
print(squared_numbers)
def repeat(times):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
for _ in range(times):
result = func(*args, **kwargs)
return result
return wrapper
return decorator
@repeat(3)
def greet(name):
print(f"Hello, {name}!")
greet("Alice")
bot = telebot.TeleBot('YOUR_BOT_TOKEN')
target_number = random.randint(1, 1000)
attempts = 0
@bot.message_handler(commands=['start'])
def start(message):
global target_number, attempts
target_number = random.randint(1, 1000)
attempts = 0
bot.reply_to(
message, "Привет! Я загадал число от 1 до 1000. Попробуй угадать!")
@bot.message_handler(func=lambda message: True)
def guess_number(message):
global attempts
user_number = int(message.text)
attempts += 1
if user_number == target_number:
bot.reply_to(
message, f"Поздравляю, ты угадал число {target_number}! Количество попыток: {attempts}")
elif user_number < target_number:
bot.reply_to(message, "Загаданное число больше.")
else:
bot.reply_to(message, "Загаданное число меньше.")
bot.polling()
| SKYWWALKER777/GB_Python | homework-07.py | homework-07.py | py | 1,575 | python | en | code | 0 | github-code | 90 |
16302577972 | import numpy as np
import glob
import logging
import subprocess as sub
import os
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
logging.basicConfig(filename='FitsToCats.log', filemode='w',
format='%(levelname)s:%(message)s',
encoding='utf-8', level=logging.INFO)
def main(input_dict):
images = sorted(glob.glob(input_dict['path_to_folder'] + "*.fit*"))
check_if_all_input_variables_filled(images, **input_dict)
for im in images:
im_name = get_im_name(im)
logging.info('Program started working on image %s.' % im_name)
hdr_values = get_im_hdrs(im, im_name, **input_dict)
file_new = im_astr(im, im_name, **hdr_values, **input_dict)
print(file_new)
new_to_sextractor(im_name, file_new, **hdr_values, **input_dict)
remove_files(**input_dict)
logging.info('Program finished working on image %s.' % im_name)
def check_if_all_input_variables_filled(images, path_to_folder, path_to_astr_file, path_to_sex_config_file, fits_hdrs, **kwargs):
if all([images, path_to_folder, path_to_astr_file, path_to_sex_config_file, fits_hdrs]):
logging.info('All input variables filled.')
else:
logging.warning('One or more input variables empty!')
def get_im_name(image):
pathname = os.path.splitext(image)[0]
image_name = pathname.split('/')[-1]
return image_name
def get_im_hdrs(im, im_name, fits_hdrs, **kwargs):
hdul = fits.open(im)
dict_keys = ['RA', 'DEC', 'time_data']
hdr_values = {}
k = 0
for n, key in enumerate(dict_keys):
try:
hdr_values[key] = hdul[0].header[fits_hdrs[n]]
except KeyError:
logging.warning('header for %s in image %s is incorrect!', key, im_name)
hdr_values[key] = ''
pass
else:
k += 1
if k == len(dict_keys):
logging.info('All headers in image %s are correct.' % im_name)
hdr_values = change_format_ra_dec(hdr_values, dict_keys)
return hdr_values
def change_format_ra_dec(hdr_values, dict_keys):
date_hdrs = dict_keys[0:2]
RA = float(hdr_values[date_hdrs[0]])
DEC = float(hdr_values[date_hdrs[1]])
if type(RA) == str:
for hdr in date_hdrs:
hdr_values[hdr] = hdr_values[hdr].replace(' ', ':')
elif type(RA) == float:
c = SkyCoord(ra=RA*u.degree, dec=DEC*u.degree, frame='icrs')
a = c.to_string('hmsdms')
a1 = a.split(' ')
for n, i in enumerate(a1):
i = i.replace('h', ':')
i = i.replace('d', ':')
i = i.replace('m', ':')
i = i.replace('s', '')
hdr_values[date_hdrs[n]] = i
hdr_values['RA'] = hdr_values['RA'][0:8]
hdr_values['DEC'] = hdr_values['DEC'][0:9]
hdr_values['time_data'] = hdr_values['time_data'][0:22]
else:
pass
return hdr_values
def im_astr(im, im_name, RA, DEC, path_to_astr_file, **kwargs):
logging.info('Astrometry for image %s started.' % im_name)
solve_field_command = ['solve-field', '--ra', '%s' % RA, '--dec', '%s' % DEC,
'--radius', '1', '--cpulimit', '30', '--config', path_to_astr_file,
'--overwrite', '--no-verify', '--no-plots', '%s' % im]
sub.Popen(solve_field_command, stdout=sub.PIPE, stderr=sub.PIPE).communicate()
file_new = im.replace('.fits', '.new')
if os.path.exists(file_new):
logging.info('Astrometry for image %s succesfully finished.' % im_name)
else:
logging.warning('Astrometry for image %s unsuccesful.' % im_name)
return file_new
def new_to_sextractor(im_name, file_new, path_to_folder, time_data, path_to_sex_config_file, **kwargs):
logging.info('Sextractor started working on image %s.' % im_name)
file_cat = path_to_folder + im_name + time_data + '.cat'
analyse_new_command = ['source-extractor', '-c', path_to_sex_config_file,
file_new, '-CATALOG_NAME', file_cat]
sub.Popen(analyse_new_command, stdout=sub.PIPE, stderr=sub.PIPE).communicate()
if os.path.exists(file_cat):
logging.info('Sextractor finished working on image %s and returned cat file.' % im_name)
else:
logging.warning('Sextractor didn`t make a new file while working on image %s.' % im_name)
def remove_files(path_to_folder, files_to_rm, **kwargs):
for file_ in files_to_rm:
file1_ = glob.glob(path_to_folder + file_)
os.remove(file1_[0])
input_dict = {
'path_to_folder' : '/home/kamil/Programs/Analyze-fits/Test_files/test/',
'path_to_astr_file' : '/home/kamil/astrometry.net-0.89/etc/astrometry.cfg',
'path_to_sex_config_file' : '/usr/share/source-extractor/default.sex',
# 'path_to_folder' : '/home/kamilraczka/Projects/Na_zaliczenie/'
# 'path_to_astr_file' : '/home/kamilraczka/astrometry.net-0.85/etc/astrometry.cfg'
# 'sextractor_path_to_config_file' : '/home/kamilraczka/.config/sextractor/default.sex'
'files_to_rm' : ['*.axy' , '*.corr', '*.xyls', '*.match',
'*.rdls', '*.solved', '*.wcs'],
# 'fits_hdrs' : ['OBJCTRA', 'OBJCTDEC', 'DATE-OBS']
'fits_hdrs' : ['RA_OBJ', 'DEC_OBJ', 'DATE-OBS']
}
if __name__ == '__main__':
main(input_dict)
| KamilRaczka12/Analyze-fits | FitsToCats.py | FitsToCats.py | py | 5,322 | python | en | code | 0 | github-code | 90 |
41463447234 | #Author guo
#利用条件算数符学习成绩》=90 A
points=int(input("请输入学生成绩"))
if points>=90:
grade='A'
elif points<60:
grade="C"
else:grade='B'
print(grade)
#这个题目要设置边界条件
#设计测试用例
#输入的学生成绩
#1.非数字型 预期期望为输出 输入类型错误
#2.数字型 但》100或者小于0 输入提示范围
#3.输入的为边界值
#4.输入的为小数值 转换为int是可以的,因为只保留整数部分,靠整数部分来进行判定
| guojia60180/guo.github-io | python实例/分数归档.py | 分数归档.py | py | 516 | python | zh | code | 0 | github-code | 90 |
9546215256 | from manim import *
class M1_part1(Scene):
def construct(self):
M1_formula_1 = MathTex(r"V_A \derivative{P_A}{t}=\dot V_A(P_I-P_A)+\lambda Q(P_v^*-P_a) \tag{1}").shift(UP * 3)
M1_formula_2 = MathTex(r"V_m \derivative{P_m}{t} = \frac {M_m} {k} + Q_m(P_a^*-P_m) \tag{2} ").shift(UP * 1.5)
M1_formula_3 = MathTex(r"V_{ot} \derivative{P_{ot}}{t} = \frac {M_{ot}} {k} + Q_{ot}(P_a^*-P_{ot}) \tag 3")
M1_formula_4 = MathTex(r"Q = Q_m+Q_{ot} \tag 4 ").shift(DOWN)
Assumption_1 = MathTex(r"\mbox{CO2 is in chemical equilibrium}").next_to(M1_formula_4, DOWN)
M1_formula_5 = MathTex(r"P_v=(Q_{ot}P_{ot}+Q_mP_m)/Q \tag 5 ")
M1_formula_5.next_to(M1_formula_4, DOWN)
# 展示动画
self.play(Write(M1_formula_1, run_time=1)) # Write()从左到右写出来
self.wait() # 等待一个单位时间
self.play(Write(M1_formula_2, run_time=1)) # Write()从左到右写出来
self.wait() # 等待一个单位时间
self.play(Write(M1_formula_3, run_time=1)) # Write()从左到右写出来
self.wait() # 等待一个单位时间
self.play(Write(M1_formula_4, run_time=1)) # Write()从左到右写出来
self.wait() # 等待一个单位时间
self.play(Write(Assumption_1))
self.wait(2)
self.play(Assumption_1.animate.scale(0.3).to_edge(UP + RIGHT), run_time=2)
self.play(Write(M1_formula_5, run_time=1)) # Write()从左到右写出来
self.wait() # 等待一个单位时间
# R2 = VGroup(M1_formula_4,M1_formula_5)
self.remove(M1_formula_5, M1_formula_4)
R1 = VGroup(M1_formula_1, M1_formula_2, M1_formula_3)
# self.play(R1.animate.scale(0.3))
# 边缩小,边移动
self.play(R1.animate.scale(0.3).to_edge(UP + LEFT), run_time=1.5)
self.wait(3)
# get back Eq 2,3
Eq2_3_group = VGroup(M1_formula_2, M1_formula_3)
rectangle = Rectangle(fill_opacity=0.0,stroke_color= WHITE,width=4.5, height=1)
rectangle.move_to(Eq2_3_group.get_center())
rectangle.add_updater(lambda x: x.move_to(Eq2_3_group.get_center()))
self.play(FadeIn(rectangle))
self.remove(M1_formula_1)
self.play(FadeOut(rectangle))
self.play(Eq2_3_group.animate.scale(10/3).move_to(ORIGIN), run_time=1.5)
self.wait()
# Need to merge two equations to tissue
M1_Tis_1 = MathTex(r"V_{tis} \derivative{P_{tis} }{t} = ", r"\frac {M_{tis} } {k} + Q", r"(", r"P_a^*", r"-", r"P_{tis})", r"\tag{6}").shift(UP)
self.play(TransformMatchingTex(Eq2_3_group, M1_Tis_1))
self.wait()
rectangle_M = Rectangle(fill_opacity=0.0, stroke_color=ORANGE, width=1.3, height=0.6)
rectangle_V = Rectangle(fill_opacity=0.0, stroke_color=ORANGE, width=1.3, height=0.6)
rectangle_M.move_to(M1_Tis_1.get_center()).move_to((-3, 1.3, 0))
rectangle_V.move_to(M1_Tis_1.get_center()).move_to((-5.7, 1, 0))
self.play(FadeIn(rectangle_M))
self.play(FadeIn(rectangle_V))
self.wait()
self.play(FadeOut(rectangle_M))
self.play(FadeOut(rectangle_V))
Assumption_2 = MathTex(r"\mbox{let RHS=0, consider stable situation}").next_to(M1_Tis_1, UP)
self.play(Write(Assumption_2))
self.wait(2)
self.play(Assumption_2.animate.scale(0.3).next_to(Assumption_1, DOWN), run_time=2)
self.wait()
M1_Tis_1_equal_0_1 = MathTex(r"\frac {M_{tis} } {k} + Q", r"(", r"P_a^*", r"-", r"P_{tis})", r"=", r"0").shift(UP)
self.play(TransformMatchingTex(M1_Tis_1, M1_Tis_1_equal_0_1))
self.wait()
M1_Tis_1_equal_0_2 = MathTex(r"\frac {M_{tis} } {k} + ", r"Q", r"P_a^*", r"=", r"Q", r"P_{tis}").shift(UP)
self.play(TransformMatchingTex(M1_Tis_1_equal_0_1, M1_Tis_1_equal_0_2))
self.wait()
self.remove(M1_Tis_1_equal_0_1)
self.wait()
M1_Tis_1_equal_0_3 = MathTex(r"Q", r"P_{tis}", r"=", r"\frac {M_{tis} } {k} + ", r"Q", r"P_a^*").shift(UP*2)
self.play(TransformMatchingTex(M1_Tis_1_equal_0_2, M1_Tis_1_equal_0_3))
M1_ot_1_equal = MathTex(r"Q_{ot}", r"P_{ot}", r"=", r"\frac {M_{ot} } {k} + ", r"Q_{ot}", r"P_a^*").next_to(M1_Tis_1_equal_0_3, DOWN)
M1_mu_1_equal = MathTex(r"Q_{m}", r"P_{m}", r"=", r"\frac {M_{m} } {k} + ", r"Q_{m}", r"P_a^*").next_to(M1_ot_1_equal, DOWN)
M1_QP_group = VGroup(M1_Tis_1_equal_0_3, M1_ot_1_equal, M1_mu_1_equal)
self.play(TransformMatchingTex(M1_Tis_1_equal_0_3, M1_QP_group))
self.play(M1_QP_group.animate.shift(UP).scale(0.8))
M_1_mass_conservation_1 = MathTex(r"P_{tis} = (", r"Q_{ot}", r"P_{ot}", r"+", r"Q_m", r"P_m", r") / Q").next_to(M1_QP_group, DOWN)
M_1_mass_conservation_2 = MathTex(r"Q", r"P_{tis}", r"=", r"Q_{ot}", r"P_{ot}", r"+", r"Q_m", r"P_m").next_to(M1_QP_group, DOWN)
M_1_mass_conservation_2_plugin = MathTex(r"\frac {M_{tis}} {k} + QP_a^*", r"=", r"\frac {M_{ot} } {k} + Q_{ot}P_a^*", r"+", r"\frac {M_{m} } {k} + Q_{m}P_a^*").next_to(M1_QP_group, DOWN)
self.play(Write(M_1_mass_conservation_1))
self.wait()
self.play(TransformMatchingTex(M_1_mass_conservation_1, M_1_mass_conservation_2))
self.wait()
variables = VGroup(MathTex(r"\frac {M_{tis} } {k} + QP_a^*"), MathTex(r"\frac {M_{ot} } {k} + Q_{ot}P_a^*"), MathTex(r"\frac {M_{m} } {k} + Q_{m}P_a^*")).arrange_submobjects().next_to(M_1_mass_conservation_2, UP)
self.play(TransformMatchingTex(Group(M_1_mass_conservation_2, variables), M_1_mass_conservation_2_plugin))
self.remove(M1_QP_group)
self.wait()
Mass_conservation = MathTex(r"M_{tis}", r"=", r"M_m", r"+", r"M_{ot}")
self.play(TransformMatchingTex(M_1_mass_conservation_2_plugin, Mass_conservation))
self.wait()
self.play(Mass_conservation.animate.move_to(ORIGIN), run_time=1)
self.wait()
| TwilightSpar/CO2_Manim | M1_part1.py | M1_part1.py | py | 5,963 | python | en | code | 0 | github-code | 90 |
18336540849 | import numpy as np
def divisor(n):
i = 1
table = []
while i * i <= n:
if n%i == 0:
table.append(i)
table.append(n//i)
i += 1
table = list(set(table))
table = sorted(table)
return table
def make_prime(U):
is_prime = np.zeros(U,np.bool)
is_prime[2] = 1
is_prime[3::2] = 1
M = int(U**.5)+1
for p in range(3,M,2):
if is_prime[p]:
is_prime[p*p::p+p] = 0
return is_prime, is_prime.nonzero()[0]
A, B = map(int, input().split())
def is_prime(n):
if n == 1:
return False
for i in range(2,int(n**0.5)+1):
if n % i == 0:
return False
return True
A_list = set(divisor(A))
B_list = set(divisor(B))
list = A_list & B_list
#_, primes = make_prime(10**8)
cnt = 1
for i in list:
if is_prime(i):
cnt += 1
print(cnt) | Aasthaengg/IBMdataset | Python_codes/p02900/s360824964.py | s360824964.py | py | 867 | python | en | code | 0 | github-code | 90 |
74934323176 | import os
import cv2
root_dir = "D:/BaiduNetdiskDownload/image/CMEImages/NoCME"
target_dir = "D:/BaiduNetdiskDownload/image/CMEImages/NoCME_polar"
os.makedirs(target_dir, exist_ok=True)
index = 0
for filename1 in os.listdir((root_dir)):
index += 1
filename = os.path.join(root_dir, filename1)
img = cv2.imread(filename)
center = [img.shape[0]//2, img.shape[1]//2]
polar = cv2.warpPolar(img, dsize = (300, 600), center = center, maxRadius = center[0],flags = cv2.INTER_LINEAR + cv2.WARP_POLAR_LINEAR)
polar = polar[:, 100:]
cv2.imwrite(os.path.join(target_dir, str(index) + ".jpg"), polar) | bazingayu/machineLearningGroupProject | transform_to_polar.py | transform_to_polar.py | py | 618 | python | en | code | 2 | github-code | 90 |
74214505576 | from PIL import Image, ImageDraw, ImageFont
import os
from io import BytesIO
import requests
# meme_there = os.path.isfile("worthless.jpg")
# if meme_there:
# os.remove("worthless.jpg")
def worthless(name):
name = name
image = Image.open('./assets/worthless/meme.jpg')
draw = ImageDraw.Draw(image)
fontsize = 32
font = ImageFont.truetype('./assets/worthless/Roboto-Bold.ttf', fontsize)
(x, y) = (155, 100)
msg = (f'{name}'+'\'s\nOpinion')
message = msg
color = 'rgb(0,0,0)'
draw.text((x, y), message, fill=color, font=font)
image.save(f'worthless.jpg')
def slap(url):
responce = requests.get(url)
try:
user = Image.open(BytesIO(responce.content))
except OSError:
user = Image.open('./user/user1.jpg')
img = Image.open('./user/Araonjr.png', 'r') # gets bot's dp
background = Image.open('./Assets/slap/slap.jpg') # fetches the asset
offset = (335,165)
background.paste(img, offset)
profile = user.resize((100,100))
poffset = (125,180)
background.paste(profile, poffset)
background.save('slap.jpg')
def spank(url):
responce = requests.get(url)
try:
user = Image.open(BytesIO(responce.content))
except OSError:
user = Image.open('./user/user1.jpg')
img = Image.open('./Assets/profilepic/Araonjr.png')
img = img.resize((190,240))
background = Image.open('./Assets/spank/spank.jpg')
offset = (745,45)
background.paste(img, offset)
profile = user.resize((240,240))
poffset = (1200,340)
background.paste(profile, poffset)
background.save('spank.jpg')
#spank('https://cdn.discordapp.com/avatars/651715103313362944/d8b5f4ee9746238ef82dd5a7a10a575b.webp') | Araon/AraonJR | helper.py | helper.py | py | 1,597 | python | en | code | 1 | github-code | 90 |
35782490035 | import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
import sqlite3 as sq
# объявляем главный класс
class Main(tk.Tk):
def __init__(self):
super().__init__()
self.db = db
self.btns()
self.treeview()
self.view_records()
#добавляем кнопки
def btns(self):
#рамка для кнопок
toolbar = tk.Frame(bg='#D7D8E0',bd=2)
toolbar.pack(side=tk.TOP,fill=tk.X)
#иконки кнопок
self.img_add = tk.PhotoImage(file='./img/add.png')
self.img_del = tk.PhotoImage(file='./img/delete.png')
self.img_upd = tk.PhotoImage(file='./img/update.png')
self.img_srch = tk.PhotoImage(file='./img/search.png')
self.img_refresh = tk.PhotoImage(file='./img/refresh.png')
#объявляем кнопки
btn_add = tk.Button(toolbar,image=self.img_add,bg='#d7d8e0',bd=0,command=self.f_btn_add)
btn_del = tk.Button(toolbar,image=self.img_del,bg='#d7d8e0',bd=0,command=self.f_btn_del)
btn_upd = tk.Button(toolbar,image=self.img_upd,bg='#d7d8e0',bd=0,command=self.f_btn_upd)
btn_srch = tk.Button(toolbar,image=self.img_srch,bg='#d7d8e0',bd=0,command=self.f_btn_srch)
btn_refresh = tk.Button(toolbar,image=self.img_refresh,bg='#d7d8e0',bd=0,command=self.view_records)
#отображаем кнопки
btn_add.pack(side='left')
btn_del.pack(side='left')
btn_upd.pack(side='left')
btn_srch.pack(side='left')
btn_refresh.pack(side='left')
#делаем таблицу
def treeview(self):
columns = ("#1", "#2", "#3","#4","#5")
self.tree = ttk.Treeview(self, show="headings", columns=columns,height=30)
self.tree.column('#1',width=50)
self.tree.column('#2',width=260)
self.tree.column('#3',width=233)
self.tree.column('#4',width=233)
self.tree.column('#5',width=233)
self.tree.heading("#1", text="ID")
self.tree.heading("#2", text="ФИО")
self.tree.heading("#3", text="Номер")
self.tree.heading("#4", text="Почта")
self.tree.heading("#5", text="Зарплата")
ysb = ttk.Scrollbar(self, orient=tk.VERTICAL, command=self.tree.yview)
self.tree.configure(yscroll=ysb.set)
self.tree.pack(side='left')
#функции кнопок
def f_btn_add(self):
Window()
def f_btn_del(self):
for selection_item in self.tree.selection():
self.db.c.execute('DELETE FROM db WHERE id=?',(self.tree.set(selection_item,'#1'),))
self.db.conn.commit()
self.view_records()
def f_btn_upd(self):
Update()
def f_btn_srch(self):
Search()
#функция для кнопки редактирования
def update_record(self,name,tel,email,salary):
self.db.c.execute('UPDATE db SET name=?,tel=?,email=?,salary=? WHERE ID=?',
(name,tel,email,salary,self.tree.set(self.tree.selection()[0],
'#1')))
self.db.conn.commit()
self.view_records()
#функция для кнопки поиска
def search_records(self,name):
name = ('%' + name + '%')
self.db.c.execute("""SELECT * FROM db
WHERE name LIKE ?""",(name,))
[self.tree.delete(i) for i in self.tree.get_children()]
[self.tree.insert('','end',values=row)
for row in self.db.c.fetchall()]
#отображение записей в таблице
def view_records(self):
self.db.c.execute('SELECT * FROM db')
[self.tree.delete(i) for i in self.tree.get_children()]
[self.tree.insert('','end',values=row)
for row in self.db.c.fetchall()]
#запись данных в таблицу
def records(self,name,tel,email,salary):
self.db.insert_data(name,tel,email,salary)
#Окно добавления / шаблон окна
class Window(tk.Toplevel):
def __init__(self):
super().__init__()
self.init_child()
self.root = app
def init_child(self):
# Заголовок окна
self.title('Добавить')
self.geometry('400x220')
self.resizable(False,False)
self.grab_set()
self.focus_set()
#подписи
label_name = tk.Label(self,text='ФИО:')
label_name.place(x=50,y=20)
label_select = tk.Label(self,text='Телефон')
label_select.place(x=50,y=50)
label_sum = tk.Label(self,text='E-mail')
label_sum.place(x=50,y=80)
label_salary = tk.Label(self,text='Зарплата')
label_salary.place(x=50,y=110)
#добавляем строку ввода для наименования
self.entry_name = ttk.Entry(self)
self.entry_name.place(x=200,y=20)
#добавляем строку ввода для email
self.entry_email = ttk.Entry(self)
self.entry_email.place(x=200,y=50)
#добвляем строку ввода для телефона
self.entry_tel = ttk.Entry(self)
self.entry_tel.place(x=200,y=80)
#добвляем строку ввода для зарплаты
self.entry_salary = ttk.Entry(self)
self.entry_salary.place(x=200,y=110)
#кнопка закрытия дочернего окна
self.btn_cancel = ttk.Button(self,text='Закрыть',command=self.destroy)
self.btn_cancel.place(x=300,y=170)
#кнопка добавления
self.btn_ok = ttk.Button(self,text='Добавить')
self.btn_ok.place(x=220,y=170)
#срабатывания по лкм
self.btn_ok.bind('<Button-1>', lambda event:
self.root.records(self.entry_name.get(),
self.entry_email.get(),
self.entry_tel.get(),
self.entry_salary.get()))
self.btn_ok.bind('<Button-1>', lambda event: self.root.view_records(), add='+')
self.btn_ok.bind('<Button-1>', lambda event: self.destroy(), add='+')
#Окно редактирования позиции
class Update(Window):
def __init__(self):
super().__init__()
self.init_upd()
self.root = app
self.db = db
#проверка на выделение записи
try:
self.default_data()
except IndexError:
messagebox.showerror(title='Ошибка',message='Выберите запись для редактирования!')
self.destroy()
#изобразить окошко
def init_upd(self):
self.title('Редактировать позицию')
self.btn_ok.destroy()
btn_edit = ttk.Button(self,text='Редактировать позицию')
btn_edit.place(x=155,y=170)
btn_edit.bind('<Button-1>', lambda event: self.root.update_record(self.entry_name.get(),
self.entry_email.get(),
self.entry_tel.get(),
self.entry_salary.get()))
btn_edit.bind('<Button-1>', lambda event: self.destroy(), add='+')
#заполнение в окошко выделенной записи
def default_data(self):
self.db.c.execute("SELECT * FROM db WHERE id=?",
self.root.tree.set(self.root.tree.selection()[0],'#1'))
row = self.db.c.fetchone()
self.entry_name.insert(0,row[1])
self.entry_email.insert(0,row[2])
self.entry_tel.insert(0,row[3])
self.entry_salary.insert(0,row[4])
#окно поиска
class Search(tk.Toplevel):
def __init__(self):
super().__init__()
self.init_search()
self.view = app
def init_search(self):
self.title("Поиск")
self.geometry('300x100')
self.resizable(False,False)
label_search = tk.Label(self,text='Поиск')
label_search.place(x=50,y=20)
self.entry_search = ttk.Entry(self)
self.entry_search.place(x=105,y=20,width=150)
btn_cancel = ttk.Button(self,text='Закрыть',command=self.destroy)
btn_cancel.place(x=185,y=50)
btn_search = ttk.Button(self,text='Поиск')
btn_search.place(x=105,y=50)
btn_search.bind('<Button-1>', lambda event:
self.view.search_records(self.entry_search.get()))
btn_search.bind('<Button-1>', lambda event:
self.destroy(), add="+")
#база данных
class DB:
def __init__(self):
self.conn = sq.connect('db.db')
self.c = self.conn.cursor()
self.c.execute("""CREATE TABLE IF NOT EXISTS db (
id INTEGER PRIMARY KEY,
name TEXT,
tel TEXT,
email TEXT,
salary TEXT
)""")
self.conn.commit()
def insert_data(self,name,tel,email,salary):
self.c.execute("INSERT INTO db(name,tel,email,salary) VALUES(?,?,?,?)",(name,tel,email,salary))
self.conn.commit()
#точка входа
if __name__ == '__main__':
#объявляем базу данных
db = DB()
#объявляем приложение
app = Main()
app.title('Список сотрудников компании')
app.geometry('1000x700+460+190')
app.resizable(False,False)
#цикл
app.mainloop() | Dispondi/final-dz | main.py | main.py | py | 9,861 | python | ru | code | 0 | github-code | 90 |
44569732885 | #!/usr/bin/python3
import numpy as np
import tensorflow as tf
import sys
from os.path import join
from sklearn.utils import shuffle
from utils import conv_layer, fc_layer
from utils import Cursors
from sklearn.decomposition import PCA
#############################################
############### IMPORT DATA #################
#############################################
data_path = '..'
images_train_fname = join(data_path, 'data_train.bin')
templates_train_fname = join(data_path, 'fv_train.bin')
images_valid_fname = join(data_path, 'data_valid.bin')
templates_valid_fname = join(data_path, 'fv_valid.bin')
images_test_fname = join(data_path, 'data_test.bin')
# number of images
num_train_images = 100000
num_valid_images = 10000
num_test_images = 10000
# size of the images 48*48 pixels in gray levels
image_dim = 48
image_size = image_dim ** 2
img_range = 255
# dimension of the templates
template_dim = 128
# read the training files
with open(templates_train_fname, 'rb') as f:
train_template_data = np.fromfile(f, dtype=np.float32, count=num_train_images * template_dim)
train_template_data = train_template_data.reshape(num_train_images, template_dim)
with open(images_train_fname, 'rb') as f:
train_image_data = np.fromfile(f, dtype=np.uint8, count=num_train_images * image_size).astype(np.float32)
train_image_data = train_image_data.reshape(num_train_images, image_size)
# read the validation files
with open(templates_valid_fname, 'rb') as f:
valid_template_data = np.fromfile(f, dtype=np.float32, count=num_valid_images * template_dim)
valid_template_data = valid_template_data.reshape(num_valid_images, template_dim)
with open(images_valid_fname, 'rb') as f:
valid_image_data = np.fromfile(f, dtype=np.uint8, count=num_valid_images * image_size).astype(np.float32)
valid_image_data = valid_image_data.reshape(num_valid_images, image_size)
# read the test file
with open(images_test_fname, 'rb') as f:
test_image_data = np.fromfile(f, dtype=np.uint8, count=num_test_images * image_size).astype(np.float32)
test_image_data = test_image_data.reshape(num_test_images, image_size)
###### Template preprocessing
train_template_data = train_template_data[:, :template_dim]
valid_template_data = valid_template_data[:, :template_dim]
#train_template_data /= np.linalg.norm(train_template_data, axis=1).reshape(-1, 1)
#valid_template_data /= np.linalg.norm(valid_template_data, axis=1).reshape(-1, 1)
######### data pre-processing
train_image_data_mean = np.mean(train_image_data, axis=1).reshape(-1, 1)
train_image_data_std = np.std(train_image_data, axis=1).reshape(-1, 1)
valid_image_data_mean = np.mean(valid_image_data, axis=1).reshape(-1, 1)
valid_image_data_std = np.std(valid_image_data, axis=1).reshape(-1, 1)
test_image_data_mean = np.mean(test_image_data, axis=1).reshape(-1, 1)
test_image_data_std = np.std(test_image_data, axis=1).reshape(-1, 1)
train_imgs = (train_image_data - train_image_data_mean) / train_image_data_std
valid_imgs = (valid_image_data - valid_image_data_mean) / valid_image_data_std
test_imgs = (test_image_data - test_image_data_std) / test_image_data_std
#####################################################################################################################
#####################################################################################################################
# Params
nb_img_train, nb_features = train_imgs.shape
nb_img_valid, _ = valid_imgs.shape
nb_img_test, _ = test_imgs.shape
_, predictions_size = train_template_data.shape
max_epoch = 1500
batch_train = 50
batch_test = 500
epoch_step = batch_train / nb_img_train
nbiter_epoch = np.floor(nb_img_train / batch_train)
nb_max_iter = np.floor(max_epoch / epoch_step)
dropout = 0.95
decay_epoch = 10
decay_factor = 0.97
inital_lr = 1e-3 # best 3e-3
batch_norm = False
nb_montecarlo_predictions = 20
pre_processing = True
power_pca = - 1 / 5
nb_kept_components = 2000
summary_dir = '../tensorlog'
folder_name = 'epoch_%i_dp_%.2f_nbmcdp_%i' % (max_epoch, dropout, nb_montecarlo_predictions)
if pre_processing:
folder_name += '_preprocess_%.2f_%i' % (-power_pca, nb_kept_components)
if batch_norm:
folder_name += '_batchnorm'
folder_name += '_deep_2-3layers_4blocks_elu'
full_dir = join(summary_dir, folder_name)
validation_log_frequency = 20
evaluation_log_frequency = 1000
training_log_frequency = 0.5
reshuffling_frequency = 3.0
validation_log_frequency_iter = np.floor(validation_log_frequency / epoch_step).astype(int)
evaluation_log_frequency_iter = np.floor(evaluation_log_frequency / epoch_step).astype(int)
training_log_frequency_iter = np.floor(training_log_frequency / epoch_step).astype(int)
reshuffling_frequency_iter = np.floor(reshuffling_frequency / epoch_step).astype(int)
np.random.seed(10)
tf.set_random_seed(0)
nb_display_images = 8
#####################################################################################################################
#####################################################################################################################
if pre_processing:
pca = PCA(svd_solver='randomized', n_components=nb_kept_components)
pca.fit(train_imgs)
pca_preprocess = lambda x: x.dot(pca.components_.T).dot(pca.components_ * np.power(pca.explained_variance_, power_pca).reshape(-1,1))
train_imgs = pca_preprocess(train_imgs)
valid_imgs = pca_preprocess(valid_imgs)
test_imgs = pca_preprocess(test_imgs)
'''
indices_components_loss = np.array([28,1,105,59,46,15,55,107,83,75,109,16,82,106,25,18,93,89,97,34,92,64,61,48,125,112,49,113,87,33,56,62,96,78,86,42,51,50,41,76,67,20,60,70,110,26,32,99,104,17,43,77,57,101,35,11,91,7,58,8,54,88,19,73,98,38,12,53,2,94,102,127,66,122,126,37,90,24,95,6,14,103,31,68,74,65,10,111,114,27,124,36,39,79,115,72,3,119,22,45,23,100,108,52,117,30,21,44,84,13,69,120,9,40,81,118,85,116,71,80,47,121,63,4,5,0,123,29])
weights_loss = np.ones(128)
weights_loss[indices_components_loss[-20:]] = 1
weights_loss = weights_loss.reshape(1, -1)
'''
#####################################################################################################################
#####################################################################################################################
#### Placeholders
with tf.name_scope('input'):
x_ = tf.placeholder(tf.float32, [None, image_dim, image_dim, 1], name='x-input')
y_ = tf.placeholder(tf.float32, [None, template_dim], name='y-input')
keep_prob = tf.placeholder(tf.float32, name='dropout')
is_training = tf.placeholder(np.float32, name='is-training')
placeholder_dict = {'x_': x_, 'y_': y_, 'keep_prob': keep_prob, 'is-training': is_training}
#############################################
############### THE NETWORK #################
#############################################
stride = 1
filter_size = 3
filter_nb_1 = 10
filter_nb_2 = 13
filter_nb_3 = 18
filter_nb_4 = 25
filter_nb_5 = 100
activation_func = tf.nn.relu
activation_func = tf.nn.elu
hidden1 = conv_layer(x_, [filter_size, filter_size, 1, filter_nb_1], 'conv-1', stride, keep_prob, is_training, act=activation_func)
hidden2 = conv_layer(hidden1, [filter_size, filter_size, filter_nb_1, filter_nb_1], 'conv-2', stride, keep_prob, is_training, act=activation_func)
hidden4 = conv_layer(hidden2, [filter_size, filter_size, filter_nb_1, filter_nb_1], 'conv-3', stride, keep_prob, is_training, act=activation_func)
#hidden4 = conv_layer(hidden3, [filter_size, filter_size, filter_nb_1, filter_nb_1], 'conv-4', stride, keep_prob, is_training, act=activation_func)
pool5 = tf.nn.max_pool(hidden4, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME', data_format='NHWC', name=None)
hidden6 = conv_layer(pool5, [filter_size, filter_size, filter_nb_1, filter_nb_2], 'conv-5', stride, keep_prob, is_training, act=activation_func)
hidden7 = conv_layer(hidden6, [filter_size, filter_size, filter_nb_2, filter_nb_2], 'conv-6', stride, keep_prob, is_training, act=activation_func)
hidden8 = conv_layer(hidden7, [filter_size, filter_size, filter_nb_2, filter_nb_2], 'conv-7', stride, keep_prob, is_training, act=activation_func)
#hidden9 = conv_layer(hidden8, [filter_size, filter_size, filter_nb_2, filter_nb_2], 'conv-8', stride, keep_prob, is_training, act=activation_func)
pool10 = tf.nn.max_pool(hidden8, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME', data_format='NHWC', name=None)
hidden11 = conv_layer(pool10, [filter_size, filter_size, filter_nb_2, filter_nb_3], 'conv-9', stride, keep_prob, is_training, act=activation_func)
hidden12 = conv_layer(hidden11, [filter_size, filter_size, filter_nb_3, filter_nb_3], 'conv-10', stride, keep_prob, is_training, act=activation_func)
#hidden13 = conv_layer(hidden12, [filter_size, filter_size, filter_nb_3, filter_nb_3], 'conv-11', stride, keep_prob, is_training, act=activation_func)
#hidden14 = conv_layer(hidden13, [filter_size, filter_size, filter_nb_3, filter_nb_3], 'conv-12', stride, keep_prob, is_training, act=activation_func)
pool15 = tf.nn.max_pool(hidden12, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME', data_format='NHWC', name=None)
hidden16 = conv_layer(pool15, [filter_size, filter_size, filter_nb_3, filter_nb_4], 'conv-13', stride, keep_prob, is_training, act=activation_func)
hidden17 = conv_layer(hidden16, [filter_size, filter_size, filter_nb_4, filter_nb_4], 'conv-14', stride, keep_prob, is_training, act=activation_func)
#hidden18 = conv_layer(hidden17, [filter_size, filter_size, filter_nb_4, filter_nb_4], 'conv-15', stride, keep_prob, is_training, act=activation_func)
#hidden19 = conv_layer(hidden18, [filter_size, filter_size, filter_nb_4, filter_nb_4], 'conv-16', stride, keep_prob, is_training, act=activation_func)
pool20 = tf.nn.max_pool(hidden17, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME', data_format='NHWC', name=None)
'''
hidden21 = conv_layer(pool20, [filter_size, filter_size, filter_nb_4, filter_nb_5], 'conv-17', stride, keep_prob, is_training, act=activation_func)
hidden22 = conv_layer(hidden21, [filter_size, filter_size, filter_nb_5, filter_nb_5], 'conv-18', stride, keep_prob, is_training, act=activation_func)
hidden23 = conv_layer(hidden22, [filter_size, filter_size, filter_nb_5, filter_nb_5], 'conv-19', stride, keep_prob, is_training, act=activation_func)
hidden24 = conv_layer(hidden23, [filter_size, filter_size, filter_nb_5, filter_nb_5], 'conv-20', stride, keep_prob, is_training, act=activation_func)
pool25 = tf.nn.max_pool(hidden24, [1, 3, 3, 1], [1, 3, 3, 1], padding='SAME', data_format='NHWC', name=None)
'''
pool25 = tf.reshape(pool20, shape=[-1, 3 * 3 * filter_nb_4])
#fc14 = fc_layer(hidden17, [3 * 3 * filter_nb_4, 40], 'fc-1', keep_prob, is_training)
y = fc_layer(pool25, [3 * 3 * filter_nb_4, template_dim], 'fc-1', keep_prob, act=None)
#############################################
################ THE LOSS ###################
#############################################
""" Loss for regression """
with tf.name_scope('training'):
euclidean_loss = tf.reduce_mean(tf.reduce_sum(tf.square(y - y_), axis=1))
tf.summary.scalar('train_euclidean_loss', euclidean_loss)
""" Learning rate """
with tf.name_scope('learning_rate'):
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(inital_lr, global_step, np.floor(decay_epoch * nbiter_epoch), decay_factor, staircase=True)
tf.summary.scalar('learning_rate_summary', learning_rate)
""" Optimizer """
with tf.name_scope('opt-training'):
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(euclidean_loss, global_step=global_step)
merged_train_summary = tf.summary.merge_all()
with tf.name_scope('validation'):
validation_loss = tf.placeholder(tf.float32, name='loss')
summary_validation_loss = tf.summary.scalar('validation_euclidean_loss', validation_loss)
############ IMAGE SUMMARIES
#### Training
with tf.name_scope('training-high-variance-images'):
training_high_variance_images = tf.placeholder(tf.float32, [None, image_dim, image_dim, 1])
summary_training_high_variance_images = tf.summary.image('training-high-variance', training_high_variance_images, nb_display_images)
with tf.name_scope('training-low-variance-images'):
training_low_variance_images = tf.placeholder(tf.float32, [None, image_dim, image_dim, 1])
summary_training_low_variance_images = tf.summary.image('training-low-variance', training_low_variance_images, nb_display_images)
with tf.name_scope('training-high-error-images'):
training_high_error_images = tf.placeholder(tf.float32, [None, image_dim, image_dim, 1])
summary_training_high_error_images = tf.summary.image('training-high-error', training_high_error_images, nb_display_images)
with tf.name_scope('training-low-error-images'):
training_low_error_images = tf.placeholder(tf.float32, [None, image_dim, image_dim, 1])
summary_training_low_error_images = tf.summary.image('training-low-error', training_low_error_images, nb_display_images)
summary_training_images = tf.summary.merge([summary_training_high_variance_images,
summary_training_low_variance_images,
summary_training_high_error_images,
summary_training_low_error_images])
#### Validation
with tf.name_scope('validation-high-variance-images'):
validation_high_variance_images = tf.placeholder(tf.float32, [None, image_dim, image_dim, 1])
summary_validation_high_variance_images = tf.summary.image('validation-high-variance', validation_high_variance_images, nb_display_images)
with tf.name_scope('validation-low-variance-images'):
validation_low_variance_images = tf.placeholder(tf.float32, [None, image_dim, image_dim, 1])
summary_validation_low_variance_images = tf.summary.image('validation-low-variance', validation_low_variance_images, nb_display_images)
with tf.name_scope('validation-high-error-images'):
validation_high_error_images = tf.placeholder(tf.float32, [None, image_dim, image_dim, 1])
summary_validation_high_error_images = tf.summary.image('validation-high-error', validation_high_error_images, nb_display_images)
with tf.name_scope('validation-low-error-images'):
validation_low_error_images = tf.placeholder(tf.float32, [None, image_dim, image_dim, 1])
summary_validation_low_error_images = tf.summary.image('validation-low-error', validation_low_error_images, nb_display_images)
summary_validation_images = tf.summary.merge([summary_validation_high_variance_images,
summary_validation_low_variance_images,
summary_validation_high_error_images,
summary_validation_low_error_images])
##########################################################################################################
if tf.gfile.Exists(full_dir):
var = input('The folder {:s} already exists.' + ' Would you like to overwrite it ?\n' + 'yes(y), no(n): '.format(full_dir))
if not var in ['y', 'yes']:
sys.exit()
tf.gfile.DeleteRecursively(full_dir)
tf.gfile.MakeDirs(full_dir)
else:
tf.gfile.MakeDirs(full_dir)
sess = tf.Session()
train_writer = tf.summary.FileWriter(full_dir + '/train', sess.graph)
validation_writer = tf.summary.FileWriter(full_dir + '/validation')
init = tf.global_variables_initializer()
sess.run(init)
##########################################################################################################
##########################################################################################################
rng = np.random.RandomState(42)
train_imgs = train_imgs.reshape([-1, image_dim, image_dim, 1])
valid_imgs = valid_imgs.reshape([-1, image_dim, image_dim, 1])
test_imgs = test_imgs.reshape([-1, image_dim, image_dim, 1])
X_train, y_train = shuffle(train_imgs, train_template_data, random_state=42)
cursors = Cursors()
def feed_func(batch_size, mode='train', placeholder_dict=placeholder_dict, cursors=cursors):
if mode == 'train':
tmp_cur = cursors.train_current_pos
ind_batch = np.mod(tmp_cur + np.arange(batch_size), nb_img_train).astype(int)
X_tmp = X_train[ind_batch]
y_tmp = y_train[ind_batch]
cursors.train_current_pos = ind_batch[-1] + 1
is_training_tmp = 1.0
elif mode == 'valid':
tmp_cur = cursors.validation_current_pos
ind_batch = np.mod(tmp_cur + np.arange(batch_size), nb_img_valid).astype(int)
X_tmp = valid_imgs[ind_batch]
y_tmp = valid_template_data[ind_batch]
cursors.validation_current_pos = ind_batch[-1] + 1
is_training_tmp = 0.0
# non shuffled dataset
elif mode == 'eval':
tmp_cur = cursors.eval_current_pos
ind_batch = np.mod(tmp_cur + np.arange(batch_size), nb_img_train).astype(int)
X_tmp = train_imgs[ind_batch]
y_tmp = train_template_data[ind_batch]
cursors.eval_current_pos = ind_batch[-1] + 1
is_training_tmp = 0.0
return {placeholder_dict['x_']: X_tmp, placeholder_dict['y_']: y_tmp,
placeholder_dict['keep_prob']: dropout, placeholder_dict['is-training']: is_training_tmp}
##################### TRAINING LOOP #####################"""
i = 0
nb_iter_validation = np.ceil(nb_img_valid / batch_test)
nb_iter_evaluation = np.ceil(nb_img_train / batch_test)
while i < nb_max_iter:
####################### VALIDATION MODE ############################
if ((np.mod(i, validation_log_frequency_iter) == 0) & (not i == 0)):
cursors.validation_current_pos = 0
montecarlo_samples_validation = np.zeros((nb_img_valid, template_dim, nb_montecarlo_predictions), dtype=np.float32)
for jj in np.arange(nb_iter_validation):
ind_tmp = np.mod(jj * batch_test + np.arange(batch_test), nb_img_valid).astype(int)
feed_dict = feed_func(batch_test, mode='valid')
for kk in np.arange(nb_montecarlo_predictions):
mc_sample = sess.run(y, feed_dict=feed_dict)
montecarlo_samples_validation[ind_tmp, :, kk] = mc_sample
montecarlo_predictions_validation = np.mean(montecarlo_samples_validation, axis=2)
validation_squared_error = np.sum((montecarlo_predictions_validation - valid_template_data)** 2, axis=1)
validation_score = np.mean(np.sum((montecarlo_predictions_validation - valid_template_data)** 2, axis=1), axis=0)
sorted_ind = np.argsort(validation_squared_error)
high_error_ind = sorted_ind[-nb_display_images:]
low_error_ind = sorted_ind[:nb_display_images]
feed_images = {validation_high_error_images: valid_imgs[high_error_ind],
validation_low_error_images:valid_imgs[low_error_ind]}
sum_high_err_img, sum_low_err_img = sess.run([summary_validation_high_error_images,
summary_validation_low_error_images], feed_dict=feed_images)
validation_writer.add_summary(sum_high_err_img, i)
validation_writer.add_summary(sum_low_err_img, i)
valid_sum = sess.run(summary_validation_loss, feed_dict={validation_loss:validation_score})
validation_writer.add_summary(valid_sum, i)
print('{:.1f} epoch || validation score: {:.4e}'.format(i * epoch_step, validation_score))
####################### TRAIN MODE ############################
if ((np.mod(i, training_log_frequency_iter) == 0) & (not i == 0)):
train_sum, _, loss = sess.run([merged_train_summary, train_op, euclidean_loss], feed_dict=feed_func(batch_train, mode='train'))
train_writer.add_summary(train_sum, i)
####
print('{:.1f} epoch || training loss: {:.4e}'.format(i * epoch_step, loss))
else:
_ = sess.run(train_op, feed_dict=feed_func(batch_train, mode='train'))
##################### EVAL ON TRAIN DATASET ###################
if ((np.mod(i, evaluation_log_frequency_iter) == 0) & (not i == 0)):
cursors.eval_current_pos = 0
montecarlo_samples_evaluation = np.zeros((nb_img_train, template_dim, nb_montecarlo_predictions), dtype=np.float32)
for jj in np.arange(nb_iter_evaluation):
ind_tmp = np.mod(jj * batch_test + np.arange(batch_test), nb_img_train).astype(int)
feed_dict = feed_func(batch_test, mode='eval')
for kk in np.arange(nb_montecarlo_predictions):
mc_sample = sess.run(y, feed_dict=feed_dict)
montecarlo_samples_evaluation[ind_tmp, :, kk] = mc_sample
montecarlo_predictions_evaluation = np.mean(montecarlo_samples_evaluation, axis=2)
#centred_prediction_evaluation = montecarlo_samples_evaluation - montecarlo_predictions_evaluation.reshape(-1, -1, 1)
train_squared_error = np.sum((montecarlo_predictions_evaluation - train_template_data)** 2, axis=1)
full_train_loss = np.mean(np.sum((montecarlo_predictions_evaluation - train_template_data)** 2, axis=1), axis=0)
sorted_ind = np.argsort(train_squared_error)
high_error_ind = sorted_ind[-nb_display_images:]
low_error_ind = sorted_ind[:nb_display_images]
feed_images = {training_high_error_images: train_imgs[high_error_ind],
training_low_error_images:train_imgs[low_error_ind]}
sum_high_err_img, sum_low_err_img=sess.run([summary_training_high_error_images,
summary_training_low_error_images], feed_dict=feed_images)
train_writer.add_summary(sum_high_err_img, i)
train_writer.add_summary(sum_low_err_img, i)
print('{:.1f} epoch || full training loss: {:.4e}'.format(i * epoch_step, full_train_loss))
if np.mod(i, reshuffling_frequency_iter) == 0:
print('Shuffling training data')
train_imgs, train_template_data = shuffle(train_imgs, train_template_data, random_state=42)
i += 1
##################### PREDICT ON TETS DATASET ###################
montecarlo_samples_test = np.zeros((nb_img_test, template_dim, nb_montecarlo_predictions), dtype=np.float32)
nb_iter_test = np.ceil(nb_img_test / batch_test).astype(int)
for jj in np.arange(nb_iter_test):
ind_tmp = np.mod(jj * batch_test + np.arange(batch_test), nb_img_test).astype(int)
feed_dict = {placeholder_dict['x_']: test_imgs[ind_tmp], placeholder_dict['keep_prob']: dropout,
placeholder_dict['is-training']: 1.0}
for kk in np.arange(nb_montecarlo_predictions):
mc_sample = sess.run(y, feed_dict=feed_dict)
montecarlo_samples_test[ind_tmp, :, kk] = mc_sample
montecarlo_predictions_test = np.mean(montecarlo_samples_test, axis=2)
######### SAVE MODEL #############
saver = tf.train.Saver()
saver.save(sess, folder_name + 'tf_model', global_step)
#################### WRITE DOWN FILE #######################
output_file_name = join('..', folder_name + '_template_pred.bin' )
f = open(output_file_name, 'wb')
for i in range(nb_img_test):
f.write(montecarlo_predictions_test[i, :])
f.close()
| TalarG/challenge-mdi341 | challenge_main.py | challenge_main.py | py | 22,298 | python | en | code | 0 | github-code | 90 |
5487666997 | # Write a merge sort algorithm to sort an array.
# The function should return the sorted array.
# two examples
array1 = [45, 98, 3, 24, 15, 77, 9, 50] # output: [3, 9, 15, 24, 45, 50, 77, 98]
array2 = [18, 16, 27, 4, 12] # output: [4, 12, 16, 18, 27]
import math
def mergeSort(arr):
mergeSortTwo(arr, 0, len(arr)-1)
return arr
def mergeSortTwo(arr, first, last):
if first < last:
middle = (first + last) //2
mergeSortTwo(arr, first, middle)
mergeSortTwo(arr, middle+1, last)
merge(arr, first, middle, last)
return arr
def merge(arr, first, middle, last):
left = arr[first:middle+1]
right = arr[middle+1:last+1]
left.append(math.inf)
right.append(math.inf)
i = j = 0
for k in range(first, last +1):
if left[i] <= right[j]:
arr[k] = left[i]
i+=1
else:
arr[k] = right[j]
j+=1
return arr
print(mergeSort(array1))
print(mergeSort(array2)) | kandelin16/TechnicalInterviewCourse | Class_06_Frontend_Interviews_And_Merge_Sort/Frontend_Interviews_and_Merge_Sort_Homework/Problems/merge_sort_problem.py | merge_sort_problem.py | py | 979 | python | en | code | null | github-code | 90 |
31744423360 | # -*- coding:utf-8 -*-
# 需要用到api 直接从__init__里面导过来无需重复创建api对象
from flask.json import jsonify
from . import api
@api.route('/login')
def login():
my_dict = {
'name': 'aaa',
'age': 18,
}
# jsonify 命名参数和传字典都会转换为json对象
return jsonify(my_dict)
# return '123'
| qq453388937/Flask_ihome_Git | ihome/api/login.py | login.py | py | 363 | python | zh | code | 0 | github-code | 90 |
6812271420 | import sys
from PyQt5.QtWidgets import QApplication, QMainWindow
from GUI.Login import Login
if __name__ == "__main__":
app = QApplication([])
index = QMainWindow()
main_window = Login()
main_window.setup_ui(index)
index.show()
sys.exit(app.exec_())
| alexlealr/Software_Horarios_UQ | GUI/Main.py | Main.py | py | 276 | python | en | code | 0 | github-code | 90 |
72211386858 | # 라빈-카프 : 시간초과, KMP : 해결
# 배운 이론을 토대로 코드를 작성했으나 시간초과가 나는 이유를 알 수 없다.
# 코드상으로 O(n)이 소요되는 것 같은데 내가 간과한 무엇인가가 있는 것 같다.
# 같은 문자열인지 비교하는 for 같은 경우에는 해시값이 충돌하는 문자열이
# 거의 없기 때문에 웬만하면 한 번에 끝이 난다.
S = input()
P = input()
result = 0
value_S, value_P = 0, 0
n = len(P)
arr = [i for i in range(n-1, -1, -1)]
for i in range(n):
value_P += (ord(P[i]) * (2 ** arr[i]))
value_S += (ord(S[i]) * (2 ** arr[i]))
if value_S == value_P:
result = 1
for i in range(n):
if S[i] != P[i]:
result = 0
break
start = 1
end = len(P)
while end < len(S):
value_S = 2 * (value_S - ord(S[start-1]) * (2 ** arr[0])) + ord(S[end])
if value_S == value_P:
result = 1
for i in range(n):
if S[i+start] != P[i]:
result = 0
break
if result:
break
start += 1
end += 1
print(result)
# 문제 : https://www.acmicpc.net/problem/16916
| khyup0629/Algorithm | 라빈 카프(Rabin-Karp)/부분 문자열(★★★).py | 부분 문자열(★★★).py | py | 1,210 | python | ko | code | 3 | github-code | 90 |
12202794700 | from mainfuncs import *
def main():
ip_add = extract_ip()
cont = True
while cont:
try:
choice = int(input("Would you like to do a quick sweep or extensive sweep? Type 1 for quick or 2 for extensive\n(Note: An extensive sweep will take longer, but be more accurate, especially for devices that take a while to respond):"))
if choice == 1:
quicktest(ip_add)
elif choice == 2:
extensivetest(ip_add)
else:
print("Invalid response")
continue
except ValueError:
print("Invalid response")
else:
cont = False
if __name__ == '__main__':
main()
| Velocities/ping-sweep | main.py | main.py | py | 581 | python | en | code | 0 | github-code | 90 |
2744655096 | from collections import namedtuple
from typing import Tuple
from algorithm import Genome, List
Thing = namedtuple('Thing', ['name', 'value', 'weight'])
ThingList = List[Thing]
max_weight = 3000
first_example = [
Thing('Laptop', 500, 2200),
Thing('Headphones', 150, 160),
Thing('Coffee Mug', 60, 350),
Thing('Notepad', 40, 333),
Thing('Water Bottle', 30, 192)]
second_example = [
Thing('Mints', 5, 25),
Thing('Socks', 10, 38),
Thing('Tissues', 15, 80),
Thing('Phone', 500, 200),
Thing('Baseball Cap', 100, 70)
] + first_example
def fitness(genome: Genome, thing_list: List[Thing], weight_limit: int) -> int:
if len(genome) != len(thing_list):
raise ValueError("genome and thing list must be of same length")
value = 0
weight = 0
for index, i in enumerate(genome):
if i == 1:
value += thing_list[index].value
weight += thing_list[index].weight
if weight > weight_limit:
return 0
return value
def genome_to_things(genome: Genome, thing_list: ThingList) -> Tuple[List[str], int, int]:
knapsack_things: List[str] = []
knapsack_value: int = 0
knapsack_weight: int = 0
for index, gene in enumerate(genome):
if gene == 1:
knapsack_things.append(thing_list[index].name)
knapsack_value += thing_list[index].value
knapsack_weight += thing_list[index].weight
return knapsack_things, knapsack_value, knapsack_weight
| weszerzad/genetic_algorithm | knapsack_problem/knapsack_problem.py | knapsack_problem.py | py | 1,604 | python | en | code | 0 | github-code | 90 |
15773360896 | gyldig = False
while not gyldig:
tall = input("Skriv et tall: ")
try:
tall = int(tall)
gyldig = True
except ValueError:
print("Du må skrive inn et heltall.")
print(f"Du skrev inn {tall}.") | hausnes/IT2-2023-2024 | intro-serie/validering_av_input.py | validering_av_input.py | py | 228 | python | no | code | 1 | github-code | 90 |
11366524811 | import os
import subprocess
import matplotlib.pyplot as plt
import numpy as np
os.system("cmake . -B build/")
threads = 1
os.chdir("build")
print("make")
os.system("make")
accelerations = []
efficiencies = []
sizes = []
threads = 1
cmd = "./Integral " + str(threads) + " 0.000000001"
result = subprocess.check_output(cmd, shell=True, text=True)
sizes.append(threads)
accelerations.append(1.0)
efficiencies.append(1.0)
seq = float(result)
threads = 2
while threads < 60:
cmd = "./Integral " + str(threads) + " 0.000000001"
result = subprocess.check_output(cmd, shell=True, text=True)
print("threads = %s: time = %ss" % (str(threads), result))
sizes.append(threads)
accelerations.append(seq / float(result))
efficiencies.append(seq / (float(result) * threads))
threads += 1
plt.figure(figsize=[12, 5], dpi=100)
plt.plot(list(sizes), accelerations, '-o', markersize=4, linewidth=2, label='y1', color = np.random.rand(3))
plt.xlabel("num of threads")
plt.ylabel("seq_time / time")
plt.title("Acceleration")
plt.minorticks_on()
plt.grid()
os.chdir("../")
if(not os.path.exists("graphs")):
os.mkdir("graphs")
os.chdir("graphs")
current_dir = os.getcwd()
print(f"Saving graph to {current_dir}/Acceleration.png")
plt.savefig('Acceleration.png')
plt.figure(figsize=[12, 5], dpi=100)
plt.plot(list(sizes), efficiencies, '-o', markersize=4, linewidth=2, label='y1', color = np.random.rand(3))
plt.xlabel("num of threads")
plt.ylabel("(seq_time) / (time * processes)")
plt.title("Efficiency")
plt.minorticks_on()
plt.grid()
current_dir = os.getcwd()
print(f"Saving graph to {current_dir}/Efficiency.png")
plt.savefig('Efficiency.png')
| KhankharaevArdan/lab2 | acceleration.py | acceleration.py | py | 1,721 | python | en | code | 0 | github-code | 90 |
18340567779 | #C - Attack Survival
N,K,Q = map(int,input().split())
A = list(int(input()) for i in range(Q))
score = [0]*(N)
for i in range(Q):
score[A[i]-1] += 1
score = [(K-Q+j) for j in score]
for k in score:
if k > 0:
print('Yes')
else:
print('No') | Aasthaengg/IBMdataset | Python_codes/p02911/s406077652.py | s406077652.py | py | 266 | python | en | code | 0 | github-code | 90 |
1420852452 | class Node:
def __init__(self, data):
self.data = data
self.ref = None
class LinkedList:
def __init__(self):
self.head = None
def print_LL(self):
if self.head is None:
print("Linked list is empty")
else:
n = self.head
while n is not None:
print(n.data)
n = n.ref
def add_begin(self, data):
new_node = Node(data)
new_node.ref = self.head
self.head = new_node
def add_end(self, data):
new_node = Node(data)
if self.head is None:
self.head = new_node
else:
n = self.head
while n.ref is not None:
n = n.ref
n.ref = new_node
def add_between(self,data,x):
n=self.head
while n is not None:
if x==n.data:
break
n=n.ref
if n is None:
print("node is not present in LL")
else:
new_node=Node(data)
new_node.ref=n.ref
n.ref=new_node
def delete_begin(self):
if self.head is Node:
print("LL is empty so we can't delete the node")
else:
self.head=self.head.ref # to get the reference of second node present in the "next" part of thr first node
def delete_last(self):
if self.head is None:
print("LL is empty so we can't delete the node")
elif self.head.ref is None: # if linked list has only one node
self.head=None
else:
n=self.head
while n.ref.ref is not None:
n=n.ref
n.ref=None
LL1 = LinkedList()
LL1.add_begin(10)
LL1.add_begin(20)
LL1.add_begin(30)
LL1.add_begin(40)
LL1.add_end(5)
LL1.add_between(25,20) #adding 15 after 20
LL1.delete_begin() # delete the first node
LL1.delete_last() # delete the first node
LL1.print_LL()
| AswathiMohan23/Python_Basics | LinkedList/single_linkedList.py | single_linkedList.py | py | 1,939 | python | en | code | 0 | github-code | 90 |
2205831950 | import sys
from random import *
import matplotlib.pyplot as plt
import numpy as np
import scipy.ndimage
import scipy.signal
import scipy.special
from keras.datasets import mnist
class MyNN:
def __init__(self, rate, inputs, hiddens, outputs):
# добавляем один вход под bias
self.i_count = inputs + 1
self.h_count = hiddens
self.o_count = outputs
# заполняем массивы весов случайными значениями
self.w_ih = np.random.normal(0.0, pow(self.h_count, -0.5), (self.h_count, self.i_count))
self.w_ho = np.random.normal(0.0, pow(self.o_count, -0.5), (self.o_count, self.h_count))
# learning rate и сигмоид
self.lr = rate
self.activation_function = lambda x: scipy.special.expit(x)
def train(self, inputs_list, targets_list):
# добавляем 1 вход под bias
inputs_list = np.concatenate((inputs_list, [1]), axis=0)
# вектор-столбцы входных данных и правильных ответов
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T
# прямое распространение, сигмоид и линеар
hid_results = self.activation_function(np.dot(self.w_ih, inputs))
out_results = self.activation_function(np.dot(self.w_ho, hid_results))
# ошибки вывода
out_errors = (targets - out_results)
# ошибки скрытого слоя
hid_errors = np.dot(self.w_ho.T, out_errors)
# поправки для весов скрытый-выход
self.w_ho += self.lr * np.dot(out_errors * out_results * (1.0 - out_results),
np.transpose(hid_results))
# поправки для весов вход-скрытый
self.w_ih += self.lr * np.dot(hid_errors * hid_results * (1.0 - hid_results),
np.transpose(inputs))
def query(self, inputs_list):
# добавляем 1 вход под bias
inputs_list = np.concatenate((inputs_list, [1]), axis=0)
# вектор-столбец входных данных
inputs = np.array(inputs_list, ndmin=2).T
# прямое распространение, сигмоид и линеар
hid_results = self.activation_function(np.dot(self.w_ih, inputs))
out_results = self.activation_function(np.dot(self.w_ho, hid_results))
return out_results
def set_lr(self, rate):
self.lr = rate
def train(n):
target = np.zeros(10)
target[y_train[n]] = 1
query = np.array(x_train[n]/255).reshape(784)
myNN.train(query, target)
def trainR(n):
target = np.zeros(10)
target[y_train[n]] = 1
rotation = random()*30-15
imageR = scipy.ndimage.rotate(x_train[n]/255, rotation, cval=0, reshape=False)
query = np.array(imageR).reshape(784)
myNN.train(query, target)
def test_t(n):
query = np.array(x_train[n] / 255).reshape(784)
return myNN.query(query)
def test(n):
query = np.array(x_test[n]/255).reshape(784)
return myNN.query(query)
def epoch_train(learning_rate):
myNN.set_lr(learning_rate)
x_train_len = len(x_train)
for i in range(x_train_len):
trainR(i)
if i%100 == 0:
sys.stdout.write("Row: %s\r" % i)
sys.stdout.flush()
def epoch_test():
x_test_len = len(x_test)
precision = 0
i = 0
for i in range (x_test_len):
ans = test(i)
if ans.argmax() == y_test[i]:
precision += 1
return precision/(i+1)
def epoch_test_t():
x_test_len = len(x_train)
precision = 0
i = 0
for i in range (x_test_len):
ans = test_t(i)
if ans.argmax() == y_train[i]:
precision += 1
return precision/(i+1)
def epoch_test_draw():
x_test_len = len(x_test)
precision = 0
i = 0
for i in range(x_test_len):
ans = test(i)
if ans.argmax() == y_test[i]:
precision += 1
else:
plt.imshow(255-x_test[i], cmap="gray")
plt.show()
plt.pause(0.1)
return precision/(i+1)
if __name__ == '__main__':
(x_train, y_train), (x_test, y_test) = mnist.load_data()
myNN = MyNN(0.1, 784, 100, 10)
for j in range (7):
print("\nЭпоха ", j)
epoch_train(0.1)
print("\nНа обучающей:", epoch_test_t())
print("На тестовой:", epoch_test())
for k in range (3):
epoch_train(0.01)
print("\nПосле уменьшения learning rate на порядок, эпоха: ", k)
print("\nНа обучающей:", epoch_test_t())
print("На тестовой:", epoch_test())
# print("\nРисуем цифры, которые неверно классифицированы")
# print(epoch_test_draw())
# лучший результат на тестовой 0.9785
| makaryb/nn2s5k | lab1/src/mnistWorker.py | mnistWorker.py | py | 5,079 | python | ru | code | 0 | github-code | 90 |
35985437085 | import torch
import torch.nn as nn
from .arches import conv3x3, conv5x5, ResBlock
from thop import profile
class RNNCell(nn.Module):
def __init__(self, dual_cell=True):
super(RNNCell, self).__init__()
self.dual_cell = dual_cell
# F_B: blur feature extraction part
self.F_B = nn.Sequential(
conv5x5(3, 20, stride=1),
conv5x5(20, 40, stride=2),
conv5x5(40, 60, stride=2)
)
# F_R: residual blocks part
res_blocks = []
for i in range(6):
res_blocks.append(ResBlock(80, batch_norm=False))
self.F_R = nn.Sequential(*res_blocks)
if not dual_cell:
# F_L: reconstruct part
self.F_L = nn.Sequential(
nn.ConvTranspose2d(80, 40, 3, stride=2, padding=1, output_padding=1),
nn.ConvTranspose2d(40, 20, 3, stride=2, padding=1, output_padding=1),
conv5x5(20, 3, stride=1)
)
# F_h: hidden state part
self.F_h = nn.Sequential(
conv3x3(80, 20),
ResBlock(20, batch_norm=False),
conv3x3(20, 20)
)
def forward(self, x, h_last, infer=True):
# x structure: (batch_size, channel, height, width)
h = self.F_B(x)
h = torch.cat([h, h_last], dim=1) # Cat in channel dimension
h = self.F_R(h)
if not self.dual_cell and infer:
out = self.F_L(h)
else:
out = None
hc = self.F_h(h)
return out, hc
class Model(nn.Module):
"""
Recurrent Neural Networks with Intra-Frame Iterations for Video Deblurring (IFIRNN, CVPR2019)
"""
def __init__(self, para):
super(Model, self).__init__()
self.para = para
self.ratio = 4
# C2H3
self.iters = 3
self.rnncell0 = RNNCell(dual_cell=True)
self.rnncell1 = RNNCell(dual_cell=False)
def forward(self, x, profile_flag=False):
outputs = []
# x structure: (batch_size, frame, channel, height, width) = (64, 12, 3, 720, 1024)
batch_size, frames, channels, height, width = x.shape
h_height = int(height / self.ratio)
h_width = int(width / self.ratio)
# forward h structure: (batch_size, channel, height, width)
hc = torch.zeros(batch_size, 20, h_height, h_width).cuda()
for i in range(frames):
# output: (batch_size, channel, height, width) = (64, 3, 720, 1204)
out, hc = self.rnncell0(x[:, i, :, :, :], hc)
assert out == None
for j in range(self.iters):
if j == self.iters - 1:
out, hc = self.rnncell1(x[:, i, :, :, :], hc)
else:
out, hc = self.rnncell1(x[:, i, :, :, :], hc, infer=False)
assert out == None
outputs.append(torch.unsqueeze(out, dim=1))
return torch.cat(outputs, dim=1)
def feed(model, iter_samples):
inputs = iter_samples[0]
outputs = model(inputs)
return outputs
def cost_profile(model, H, W, seq_length):
x = torch.randn(1, seq_length, 3, H, W).cuda()
profile_flag = True
flops, params = profile(model, inputs=(x, profile_flag), verbose=False)
return flops / seq_length, params
| zzh-tech/ESTRNN | model/IFIRNN.py | IFIRNN.py | py | 3,298 | python | en | code | 273 | github-code | 90 |
11518305369 | t = int(input())
for i in range(t):
n = int(input())
mxa = 0
mxb = 0
k = input().split()
# print(k)
k = [int(i) for i in k]
k = sorted(k)
# print(k)
mxa = 0
mxb = 0
for i in k:
if i>=mxa:
mxb = mxa
mxa = i
if mxa - mxb > 1:
print("NO")
else:
print("YES")
| Sagor31h2/LeetcodeGroup | Rimon/Codeforces/div3_780_b.py | div3_780_b.py | py | 362 | python | en | code | 0 | github-code | 90 |
19318369247 | import cv2
import numpy as np
import os
from PIL import Image
in_dir = "./result/pre/"
out_dir = "./result/postprocessor_pre/"
if not os.path.exists(out_dir):
os.makedirs(out_dir)
for file_name in os.listdir(in_dir):
file_path = in_dir + file_name
# read gray image
img_orign = cv2.imread(file_path, 0)
# erode and dilate the image
kernel = np.ones((3, 3), np.uint8)
img_dilate = cv2.dilate(img_orign, kernel, iterations=5)
img_erode = cv2.erode(img_dilate, kernel, iterations=4)
cv2.imwrite(out_dir + file_name, img_erode)
# file_path = "./result/pre/pre11.jpg"
#
# # read gray image
# img_orign = cv2.imread(file_path, 0)
#
# # erode and dilate the image
# kernel = np.ones((5, 5), np.uint8)
# img_erode = cv2.erode(img_orign, kernel, iterations=2)
# cv2.imshow("erode", np.hstack((img_orign, img_erode)))
#
# img_dilate = cv2.dilate(img_erode, kernel, iterations=2)
#
# cv2.imshow("dilate", np.hstack((img_erode, img_dilate)))
# cv2.waitKey(0)
# cv2.destroyAllWindows()
| tangzhenjie/KnifeGate_Pan | postprocessor.py | postprocessor.py | py | 1,020 | python | en | code | 0 | github-code | 90 |
2481758781 | def make_shirt(size='L', word="I love Python"):
print(f"The shirt's size is: {size}, word is {word}.")
make_shirt("M", "Hello world")
make_shirt()
make_shirt("M")
make_shirt(word='I love Java')
make_shirt(size='S')
def describe_city(city_name='beijing', country_name='china'):
print(f"{city_name.title()} is in {country_name.title()}.")
describe_city('beijing', 'china')
describe_city("shanghai")
describe_city(city_name="shenzhen")
describe_city(country_name="zhongguo")
describe_city("Reykjavik", 'iceland')
| kopstill/python-crush-course-2nd-edition | chapter_8/exercises.py | exercises.py | py | 524 | python | en | code | 0 | github-code | 90 |
23005318301 | import json
import logging
import os
import re
import sqlalchemy
import sys
import zipfile
from gi.repository import GLib, Gio, Gtk, WebKit2
from .models import create_session
from .web_view_api import WebViewApi
from . import utils
logger = logging.getLogger(__name__)
APPLICATION_NAME = "Kolibri WebView Demo"
class WebView(WebKit2.WebView):
def __init__(self, main_window, *args, **kwargs):
web_context = WebKit2.WebContext()
web_context.get_security_manager().register_uri_scheme_as_local('ekn')
web_context.register_uri_scheme('ekn', self.load_ekn_uri)
super().__init__(*args, web_context=web_context, **kwargs)
self.web_view_api = WebViewApi(main_window)
user_content_manager = self.get_user_content_manager()
user_content_manager.register_script_message_handler('eosKnowledgeLibCall')
user_content_manager.connect('script-message-received::eosKnowledgeLibCall',
self.resolve_web_call)
web_settings = self.get_settings()
web_settings.set_enable_developer_extras(True)
web_settings.set_enable_write_console_messages_to_stdout(True)
web_settings.set_javascript_can_access_clipboard(True)
html = GLib.file_get_contents(
os.path.join(os.path.dirname(__file__), 'data/template/index.html')
).contents.decode('utf-8')
self.load_html(html, 'ekn://home')
def resolve_web_call(self, manager, js_result):
payload = json.loads(js_result.get_js_value().to_string())
response_payload = self.web_view_api.dispatch(payload)
self.run_javascript(
'EosKnowledgeLib.resolveCall({json})'.format(json=json.dumps(response_payload)),
None, None)
def update_search(self, query):
self.run_javascript(
'window.dispatchEvent(new CustomEvent(\'ekn-update-search\', {\n' +
' detail: {\n' +
' query: \'{query}\',\n'.format(query=query) +
' },\n' +
'}));',
None, None)
def set_night_mode(self, enabled):
settings = Gtk.Settings.get_default()
settings.set_property('gtk-application-prefer-dark-theme', enabled)
if enabled:
self.run_javascript(
'window.dispatchEvent(new CustomEvent(\'ekn-night-mode\', {\'detail\': true}));',
None, None)
else:
self.run_javascript(
'window.dispatchEvent(new CustomEvent(\'ekn-night-mode\', {\'detail\': false}));',
None, None)
def go_back(self):
pass
def go_forward(self):
pass
def go_home(self):
self.run_javascript(
'window.dispatchEvent(new CustomEvent(\'ekn-go-home\'));',
None, None)
def load_ekn_uri(self, req):
match = re.match(
r'^\/kolibri\/storage\/([a-zA-Z0-9\.]+)([a-zA-Z0-9\.\/]+)?$',
req.get_path())
if match:
file_path = utils.get_kolibri_storage_file_path(match.group(1))
file = Gio.File.new_for_path(file_path)
if file.query_exists():
print('load_ekn_uri', req.get_path(), file_path, match.group(1), match.group(2))
if os.path.splitext(match.group(1))[1] == '.zip':
with zipfile.ZipFile(file_path) as zfile:
zfile_member = 'index.html'
if match.group(2) is not None:
# TODO: Load relative HTML5 files
# zfile_member = match.group(2).strip('/')
pass
input_stream = Gio.MemoryInputStream.new_from_bytes(
GLib.Bytes(zfile.read(zfile_member)))
req.finish(input_stream, -1, 'text/html')
else:
content_type = file.query_info(
Gio.FILE_ATTRIBUTE_STANDARD_CONTENT_TYPE,
Gio.FileQueryInfoFlags.NONE, None).get_content_type()
req.finish(file.read(), -1, content_type)
class MainWindow(Gtk.ApplicationWindow):
__gtype_name__ = 'MainWindow'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs, default_width=900, default_height=700)
builder = Gtk.Builder.new_from_file(
os.path.join(os.path.dirname(__file__), 'data/ui/mainwindow.ui')
)
builder.connect_signals(self)
self.header_bar = builder.get_object('header_bar')
self.set_titlebar(self.header_bar)
self.set_title(APPLICATION_NAME)
self.main_vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.add(self.main_vbox)
self.main_vbox.show()
self.search_bar = builder.get_object('search_bar')
self.main_vbox.pack_start(self.search_bar, False, False, 0)
self.search_bar.show()
self.search_button = builder.get_object('button_search')
self.search_entry = builder.get_object('search_entry')
self.webview = WebView(self)
self.main_vbox.pack_end(self.webview, True, True, 0)
self.webview.show()
def toggle_search(self):
toggled = self.search_button.get_active()
self.search_button.set_active(not toggled)
def set_night_mode(self, enabled):
self.webview.set_night_mode(enabled)
def on_search_entry_search_changed(self, search_entry):
self.webview.update_search(search_entry.get_text())
def on_button_go_back_clicked(self, *args):
self.webview.go_back()
def on_button_go_forward_clicked(self, *args):
self.webview.go_forward()
def on_button_go_home_clicked(self, *args):
self.webview.go_home()
def on_button_search_toggled(self, *args):
toggled = self.search_button.get_active()
self.search_bar.set_reveal_child(toggled)
self.search_entry.set_text('')
if toggled:
self.search_entry.grab_focus()
def on_search_entry_stop_search(self, *args):
self.search_button.set_active(False)
def set_header_title(self, title, subtitle=None):
if not title:
title = APPLICATION_NAME
subtitle = None
self.header_bar.set_title(title)
self.header_bar.set_subtitle(subtitle)
class Application(Gtk.Application):
def __init__(self, *args, **kwargs):
super().__init__(*args, application_id='com.endlessm.KolibriWebViewDemo',
flags=Gio.ApplicationFlags.HANDLES_COMMAND_LINE, **kwargs)
self.main_window = None
self.channel_id = None
quit_action = Gio.SimpleAction.new('quit', None)
quit_action.connect('activate', self.on_quit_action_activate)
self.add_action(quit_action)
self.set_accels_for_action('app.quit', ['<Primary>q'])
search_action = Gio.SimpleAction.new('search', None)
search_action.connect('activate', self.on_search_action_activate)
self.add_action(search_action)
self.set_accels_for_action('app.search', ['<Primary>f'])
night_mode_action = Gio.SimpleAction.new_stateful(
'night_mode',
None,
GLib.Variant.new_boolean(False)
)
night_mode_action.connect('change_state', self.on_night_mode_action_change_state)
self.add_action(night_mode_action)
def do_activate(self):
# We only allow a single window and raise any existing ones
if not self.main_window:
# gvfs.init()
database_path = os.path.join(
utils.KOLIBRI_DATA_DIR,
'content/databases/{id}.sqlite3'.format(id=self.channel_id)
)
create_session(database_path)
# Windows are associated with the application
# when the last one is closed the application shuts down
self.main_window = MainWindow(application=self)
self.main_window.present()
def do_command_line(self, command_line):
arguments = command_line.get_arguments()
if len(arguments) == 1:
logger.error('Missing channel_id')
return 1
self.channel_id = arguments[1]
self.activate()
return 0
def on_quit_action_activate(self, action, param):
self.quit()
def on_search_action_activate(self, action, param):
if self.main_window:
self.main_window.toggle_search()
def on_night_mode_action_change_state(self, action, value):
if self.main_window:
self.main_window.set_night_mode(value)
action.set_state(value)
| endlessm/kolibri-webview-demo | kolibri_webview_demo/application.py | application.py | py | 8,689 | python | en | code | 0 | github-code | 90 |
26775743263 | import torch
import torch.nn as nn
import numpy as np
from flask import Flask, jsonify, request
import io
from PIL import Image
import smart_open
app = Flask(__name__)
class TanhScale(nn.Module):
def __init__(self, mean, scale):
super().__init__()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.tanh = nn.Tanh()
self.scale = torch.FloatTensor([scale]).to(device)
self.mean = torch.FloatTensor([mean]).to(device)
def forward(self, x):
x = self.tanh(x)
x = x * self.scale + self.mean
return x
device = torch.device("cpu")
model_temp = torch.load("api_server/model_temp_29_sfsea_mod.pt", map_location=torch.device('cpu'))
model_rain = torch.load("api_server/model_rain_83_sfsea.pt", map_location=torch.device('cpu'))
tanhscale = TanhScale(40, 55)
def forward_temp(img):
img = torch.as_tensor(img).to(device).float()
inter = model_temp(img)
return tanhscale(inter)
def forward_rain(img):
img = torch.as_tensor(img).to(device).float()
return model_rain(img)
def predicts(img):
# print(img)
img = preprocess(img)
model_temp.eval()
model_rain.eval()
temps = forward_temp(img).detach().numpy()[0]
rains = forward_rain(img).detach().numpy()[0]
return temps[0], temps[1], rains
def preprocess(img):
mean=[0.485, 0.456, 0.406]
std=[0.229, 0.224, 0.225]
if len(img.shape) == 3:
img = img[None]
if img.max() > 1:
img = img/255.
if img.shape[1] != 3:
img = img.transpose(0, 3, 1, 2)
# img = img[:, :, :224, :224]
for i in range(img.shape[1]):
img[:, i, :, :] = (img[:, i, :, :] - mean[i]) / std[i]
return img
@app.route('/predict', methods=['GET','POST'])
def predict():
# print(request.method)
if request.method == 'POST':
file = request.files['file']
img_bytes = file.read()
temp = Image.open(io.BytesIO(img_bytes))
img = np.array(temp.resize((224, 224)))
low, high, rain = predicts(img)
if rain[0] > rain[1]:
return jsonify({'low': str(low), 'high': str(high), 'rain': False})
else:
return jsonify({'low': str(low), 'high': str(high), 'rain': True})
if request.method == "GET":
image_url = request.args.get("image_url")
# print(image_url)
if image_url is None:
return "no image_url defined in query string"
temp = read_image_pil(image_url)
img = np.array(temp.resize((224, 224)))
low, high, rain = predicts(img)
if rain[0] > rain[1]:
return jsonify({'low': str(low), 'high': str(high), 'rain': False})
else:
return jsonify({'low': str(low), 'high': str(high), 'rain': True})
def read_image_pil(image_uri):
with smart_open.open(image_uri, "rb") as image_file:
return read_image_pil_file(image_file)
def read_image_pil_file(image_file):
with Image.open(image_file) as image:
image = image.convert(mode=image.mode)
return image
if __name__ == '__main__':
app.run(host="0.0.0.0", port=8000, debug=False)
| IzzyPutterman/cs194 | api_server/app.py | app.py | py | 3,137 | python | en | code | 0 | github-code | 90 |
5366931811 | import math
import torch
import torch.nn as nn
class BottleNeck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BottleNeck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * block.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm(planes * block.expansion)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(residual)
out += residual
return self.relu(out)
class ResNeXtBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, base_width=4, cardinality=32):
super(ResNeXtBottleneck, self).__init__()
D = int(math.floor(planes * (base_width / 64.)) * cardinality)
self.conv1 = nn.Conv2d(inplanes, D, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(D)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(D, D, kernel_size=3, stride=stride, padding=1, bias=False, groups=cardinality)
self.bn2 = nn.BatchNorm2d(D)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(D, planes * ResNeXtBottleneck.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * ResNeXtBottleneck.expansion)
self.relu3 = nn.ReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(residual)
out += residual
return self.relu3(out)
class ResNeXt(nn.Module):
def __init__(self, block, blocks, num_classes=1000):
super(ResNeXt, self).__init__()
self.inplanes = 64
self.layer1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
)
self.layer2 = self._make_layer(block, 64, blocks[0], stride=1)
self.layer3 = self._make_layer(block, 128, blocks[1], stride=2)
self.layer4 = self._make_layer(block, 256, blocks[2], stride=2)
self.layer5 = self._make_layer(block, 512, blocks[3], stride=2)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, num_blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride),
nn.BatchNorm2d(planes * block.expansion),
# 注意这里没有 ReLU
)
layers = []
layers.append(block(self.inplanes, planes, stride=stride, downsample=downsample))
self.inplanes = planes * block.expansion
for _ in range(1, num_blocks):
layers.append(block(self.inplanes, planes, stride=1))
return nn.Sequential(*layers)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = self.avg_pool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def resnext50():
return ResNeXt(ResNeXtBottleneck, [3, 4, 6, 3])
def resnext101():
return ResNeXt(ResNeXtBottleneck, [3, 4, 23, 3])
def resnext152():
return ResNeXt(ResNeXtBottleneck, [3, 4, 36, 3])
| limingcv/Classification-template-with-PyTorch | models/resnext.py | resnext.py | py | 4,704 | python | en | code | 1 | github-code | 90 |
94951297 | # Majority Element
"""
Given an array nums of size n, return the majority element.
The majority element is the element that appears more than [n / 2] times. You may assume that the majority element always exists in the array.
Strategy:
first approach:
- create two lists: one list to save the occuring number and the other list to save number of acuurences.
- iterate through nums and fill the two lists.
- return the number with the most occurencies (ofcaorse it's gonna occur more than n/2).
second approach:
- sort the list nums.
- return the number in the n/2 index of the sorted list. Since this number occures at least n/2 times in the list it's going to occur in the index n/2.
"""
#first approach - counting occurancies
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
num=[]
occur=[]
for i in range(len(nums)):#iterate through the list
if nums[i] in num:#if the number is already in num just increase it's occurence.
x=num.index(nums[i])
occur[x]+=1
else:# add the number to num and its accurence is 1.
num.append(nums[i])
occur.append(1)
return num[occur.index(max(occur))]
# second approach
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
num=sorted(nums)
return num[len(num)//2] | Talin-Estiban/leetcode | MajorityElement.py | MajorityElement.py | py | 1,492 | python | en | code | 0 | github-code | 90 |
32534059418 | import pqtable
# (1) Make sure you have already downloaded siftsmall data in data/ by scripts/download_siftsmall.sh
# (2) Read vectors
queries = pqtable.ReadTopN("data/siftsmall/siftsmall_query.fvecs", "fvecs") # Because top_n is not set, read all vectors
bases = pqtable.ReadTopN("data/siftsmall/siftsmall_base.fvecs", "fvecs")
learns = pqtable.ReadTopN("data/siftsmall/siftsmall_learn.fvecs", "fvecs")
# (3)Train a product quantizer int
M = 4
print("=== Train a product quantizer ===")
pq = pqtable.PQ(pqtable.PQ.Learn(learns, M))
# (4) Encode vectors to PQ - codes
print("=== Encode vectors into PQ codes ===")
codes = pq.Encode_Array(bases)
# (5) Build a PQTable
print("=== Build PQTable ===")
tbl = pqtable.PQTable(pq.GetCodewords(), codes)
# (6) Do search
print("=== Do search ===")
t0 = pqtable.Elapsed()
for q, query in enumerate(queries):
result = tbl.Query(query) # result = (nearest_id, its_dist)
print(str(q) + "th query: nearest_id=" + str(result[0]) + ", dist=" + str(result[1]))
print(str((pqtable.Elapsed() - t0) / len(queries) * 1000) + " [msec/query]")
| manvendratomar/pyPQTable | demo_siftsmall.py | demo_siftsmall.py | py | 1,090 | python | en | code | 1 | github-code | 90 |
7019406726 | from django.contrib.auth.models import User
from django.db import models
class Todo(models.Model):
title = models.CharField(max_length=255)
user = models.ForeignKey(User, blank=True, on_delete=models.CASCADE, null=True)
completed = models.BooleanField(default=False)
datetime = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['-datetime'] | Nepul321/Todo-List-with-ReactJS-and-Django-Backend | base/models.py | models.py | py | 384 | python | en | code | 0 | github-code | 90 |
18579595799 | import sys
def input():return sys.stdin.readline().strip()
def main():
N, H = map(int, input().split())
info = [tuple(map(int, input().split())) for _ in range(N)]
A_MAX = max(a for a, _ in info)
Bs = [b for _, b in info if b > A_MAX]
Bs.sort(reverse=True)
ans = 0
for b in Bs:
if H <= 0:
break
ans += 1
H -= b
if H > 0:
ans += (H + A_MAX-1)//A_MAX
print(ans)
if __name__ == "__main__":
main() | Aasthaengg/IBMdataset | Python_codes/p03472/s632326082.py | s632326082.py | py | 481 | python | en | code | 0 | github-code | 90 |
35727014248 | import os,json,io,logging
class DataManager:
def __init__(self,path="\\cqpy_data\\"):
self.path = os.getcwd() + path
if not os.path.exists(self.path):
os.mkdir(self.path)
def getFileFullPath(self,file_name:str)->str:
full_path = self.path + file_name
if not os.path.exists(full_path):
with open(full_path,"wb") as f:
f.write(json.dumps({},ensure_ascii=False,indent=4).encode("utf8"))
return full_path
def hasFile(self,file_name:str)->bool:
full_path = self.path + file_name
return os.path.exists(full_path)
def get(self,file_name:str,key:str=None)->dict|list|int|str|float|bool|None:
j = None
try:
with open(self.getFileFullPath(file_name),"rb") as f:
j = json.loads(f.read())
except BaseException as e:
logging.exception(e)
if j != None:
if key == None:
return j
if type(j) == dict and key in j:
return j[key]
return None
def findGet(self,file_name:str,key:str=None,dis_val=None):
r = self.get(file_name,key)
if r == None:
return dis_val
return r
def set(self,file_name:str,key:str,val:dict|list|int|str|float|bool)->bool:
j = None
raw_f = b""
try:
with open(self.getFileFullPath(file_name),"rb") as f:
raw_f = f.read()
j:dict = json.loads(raw_f)
except BaseException as e:
logging.exception(e)
if j!=None:
if type(j) == dict:
j[key] = val
try:
with open(self.getFileFullPath(file_name),"wb") as f:
f.write(json.dumps(j,ensure_ascii=False,indent=4).encode("utf8"))
return True
except BaseException as e:
logging.exception(e)
return False
def getMenbers(self, file_name:str, keys:None|list[str]|tuple[str]=None, dis_fnl:object=lambda x:None)->dict:
j = None
r = {}
try:
with open(self.getFileFullPath(file_name),"rb") as f:
j = json.loads(f.read())
except BaseException as e:
logging.exception(e)
if j != None:
f_t = type(keys)
if f_t != list and f_t != tuple:
return r
for i in keys:
if i in j:
r[i] = j[i]
else:
r[i] = dis_fnl(i)
return r
def setMenbers(self, file_name:str, key_vals:dict)->bool:
j = None
try:
with open(self.getFileFullPath(file_name),"rb") as f:
j:dict = json.loads(f.read())
except BaseException as e:
logging.exception(e)
if j!=None:
if type(j) == dict:
for key in key_vals:
j[key] = key_vals[key]
try:
with open(self.getFileFullPath(file_name),"wb") as f:
f.write(json.dumps(j,ensure_ascii=False,indent=4).encode("utf8"))
return True
except BaseException as e:
logging.exception(e)
return False | xyazh/xyazhServer | xyazhServer/DataManager.py | DataManager.py | py | 3,425 | python | en | code | 1 | github-code | 90 |
35791458497 | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 3 10:23:08 2021
@author: sebbe
"""
import streamlit as st
import pandas as pd
import xgboost as xgb
import os
from sklearn.metrics import accuracy_score
from xgboost import XGBClassifier
from sklearn.pipeline import make_pipeline
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
st.set_page_config(layout="wide")
st.write(""" # Nettside for å automatisere låneprosessen""")
train = pd.read_csv("data/train.csv", index_col=0)
st.sidebar.header("Input verdier")
def file_selector(folder_path='.'):
filenames = os.listdir(folder_path)
selected_filename = st.selectbox('Velg filen med dine oplysniger', filenames)
return os.path.join(folder_path, selected_filename)
filename = file_selector()
st.write('Du valgte denne filen `%s`' % filename)
st.write("Basert på dataen din vil du få lån")
def verdier_fra_bruker():
Gender = st.sidebar.selectbox("Gender", ["Male", "Female"])
Married = st.sidebar.selectbox("Married?", ["Yes", "No"])
Dependents = st.sidebar.slider("Dependents",0,10)
Education = st.sidebar.selectbox("Education", ["Graduate", "Not Graduate"])
Self_Employed = st.sidebar.selectbox("Self Employed", ["Yes", "No"])
ApplicantIncome = st.sidebar.slider("ApplicantIncome",float(train.ApplicantIncome.min()),float(train.ApplicantIncome.max()),float(train.ApplicantIncome.mean()))
CoapplicantIncome = st.sidebar.slider("CoapplicantIncome",float(train.CoapplicantIncome.min()),float(train.CoapplicantIncome.max()),float(train.CoapplicantIncome.mean()))
LoanAmount = st.sidebar.slider("Loan_Amount",float(train.LoanAmount.min()),float(train.LoanAmount.max()),float(train.LoanAmount.mean()))
Loan_Amount_Term = st.sidebar.slider("Loan_Amount_Term",float(train.Loan_Amount_Term.min()),float(train.Loan_Amount_Term.max()),float(train.Loan_Amount_Term.mean()))
Credit_History = st.sidebar.slider("Credit_History",0,1)
Property_Area = st.sidebar.selectbox("Property_Area", ["Urban", "Rural","Semiurban"])
data = {
"Dependents": Dependents,
"Gender" : Gender,
"Married":Married,
"Education": Education,
"Self_Employed" : Self_Employed,
"ApplicantIncome": ApplicantIncome,
"CoapplicantIncome" : CoapplicantIncome,
"Loan_Amount": LoanAmount,
"Loan_Amount_Term": Loan_Amount_Term,
"Credit_History" : Credit_History,
"Property_Area" : Property_Area
}
featurs = pd.DataFrame(data, index = [0])
return featurs
pred_user = verdier_fra_bruker()
st.write("")
st.write("")
st.write("")
st.write("")
st.write("")
st.write("")
#st.dataframe(data=pred_user, width=1200, height=768)
#st.dataframe(pred_user)
st.write(""" ## Se om du fortsatt vil få lån dersom du endrer noen parametere""")
st.write(""" ### Dine nye parametere""")
st.table(pred_user)
#st.write(pred_user)
train = train.dropna()
train["Dependents"].replace({"0":0 , "1": 1,"2":2,"3":3,"3+":3, "4" : 4 }, inplace = True)
train["Gender"].replace({"Male":0 , "Female": 1 }, inplace = True)
train["Married"].replace({"Yes":0 , "No": 1 }, inplace = True)
train["Education"].replace({"Graduate":0 , "Not Graduate": 1 }, inplace = True)
train["Self_Employed"].replace({"Yes": 0 , "No": 1}, inplace = True )
train["Property_Area"].replace({"Urban": 0 , "Rural": 1,"Semiurban" : 2 }, inplace = True )
train["Loan_Status"].replace({"Y": 0,"N" : 1 }, inplace = True )
print(train["Credit_History"].value_counts())
pred_user["Gender"].replace({"Male":0 , "Female": 1 }, inplace = True)
pred_user["Married"].replace({"Yes":0 , "No": 1 }, inplace = True)
pred_user["Education"].replace({"Graduate":0 , "Not Graduate": 1 }, inplace = True)
pred_user["Self_Employed"].replace({"Yes": 0 , "No": 1}, inplace = True )
pred_user["Property_Area"].replace({"Urban": 0 , "Rural": 1,"Semiurban" : 2 }, inplace = True )
X = train.iloc[:, :-1]
y = train.iloc[:, -1]
#Test train split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=123)
pipe_lr = make_pipeline(
XGBClassifier(booster="gbtree", learning_rate=0.05, max_depth=5, n_estimators=100, min_child_weight=4, nthread=8, subsample=0.5,use_label_encoder=False)
)
pipe_lr.fit(X_train, y_train)
y_train_pred = pipe_lr.predict(X_train)
y_test_pred = pipe_lr.predict(X_test)
prediksjon = pipe_lr.predict(pred_user)
if prediksjon == 0:
st.write("Basert på dette får du lån ")
elif prediksjon> 0:
st.write("Basert på dette får du ikke lån ")
| eirihoyh/TIN200_jun2021 | StreamLit.py | StreamLit.py | py | 4,702 | python | en | code | 0 | github-code | 90 |
18980696315 | from newspaper import build, Article
class NewsScrapper:
def __init__(self, src_url):
self.src_url = src_url
def __create_news_with(self, url):
news = Article(url, language='ko')
news.download()
news.parse()
return news
def __get_news_urls(self, num_of_news):
urls = []
articles = build(self.src_url).articles
for article in articles[:num_of_news]:
urls.append(article.url)
return urls
def get_news(self, num_of_news):
news = []
for url in self.__get_news_urls(num_of_news):
news.append(self.__create_news_with(url))
if len(news) == 0:
raise RuntimeError("뉴스를 스크랩 하는데 실패함.")
print(news)
return news
| emplam27/github-action-test | news_scrapper.py | news_scrapper.py | py | 861 | python | en | code | 0 | github-code | 90 |
35648490302 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import os
import codecs
from datastationary import *
from dataconst import *
from dataweight import *
from funcv import *
from functhrust import *
n_r = 3 # Round to number of digits
usealldata = 2 # 0 = manual data, 1 = manual data + trim data, 2 = matlab data
if usealldata == 2: # Read all data from dataflat.txt (Configure this file using maindata.py)
print("Using matlab data.")
data_not_si = np.genfromtxt('dataflat.txt')
data_not_si_T = data_not_si
hp_ft = data_not_si_T[3]
Vc_kts = data_not_si_T[6]
Tmta_c = data_not_si_T[5]
FFl_lbhr = data_not_si_T[1]
FFr_lbhr = data_not_si_T[2]
mfu_lb = data_not_si_T[4]
alpha_deg = data_not_si_T[0]
n_test = len(hp_ft)
saved = True
else: # Read data from manually recorded data
print("/!\ Not using matlab data.")
if usealldata == 1: data_not_si = np.concatenate((data_not_si, trim_not_si))
saved = False
data_not_si_T = data_not_si.T
n_test = len(data_not_si)
hp_ft = data_not_si_T[0]
Vc_kts = data_not_si_T[1]
Tmta_c = data_not_si_T[2]
FFl_lbhr = data_not_si_T[3]
FFr_lbhr = data_not_si_T[4]
mfu_lb = data_not_si_T[5]
alpha_deg = data_not_si_T[6]
print('Loading manual data.')
datasaved = np.genfromtxt('datamanual.txt')
alphas = datasaved[0]
CLs = datasaved[1]
CDs = datasaved[2]
# Convert all to SI units
empty_weight = empty_weight_lb * lb_kg
fuel_weight = fuel_weight_lb * lb_kg
m_tot = sum(person_weight_value) + empty_weight + fuel_weight
alpha_rad = np.radians(alpha_deg)
hp = hp_ft * ft_m
Vc = Vc_kts * kts_ms
m = m_tot - mfu_lb * lb_kg
Tmta = Tmta_c + c_k
FFl = FFl_lbhr * lbhr_kgs
FFr = FFr_lbhr * lbhr_kgs
W = m * g
alpha = alpha_rad # Choose between alpha_rad and alpha_deg
# Intermediate steps in reductions to Ve, Ve itself is not used
p = fp(hp)
M = fM(p, Vc)
T = fT(M, Tmta)
a = fa(T)
Vt = fVt(M, a)
rho = frho(p, T)
#Re and M range
mu_air = labda_air * T**(3/2) / (T + C_air)
Re = rho * Vt * c / mu_air
Mrange = str(round(min(M),n_r))+' - '+str(round(max(M),n_r))
Rerange = str(int(min(Re)))+' - '+str(int(max(Re)))
# Lift coefficient
CL = 2 * W / (rho * Vt**2 * S) # Lift coefficient [ ]
#Plotting and finding CLa by linear regression
CLa, intercept, r_value, uu_p_value, uu_std_err = stats.linregress(alpha,CL) # Lots of unused (uu_) values
linregress_x = np.array([min(alpha), max(alpha)])
linregress_y = intercept + CLa * linregress_x
CLalabel = '$C_{L_a}$ = '+str(round(CLa,n_r))+' [5.084], $r^2$ = '+str(round(r_value**2,n_r))
print(CLalabel)
plt.plot(linregress_x, linregress_y, label = CLalabel)
plt.scatter(alpha, CL, label = 'Automatically recorded')
plt.scatter(alphas, CLs, label = 'Manually recorded')
plt.title('$C_L / \alpha$ at clean configuration,\n Mach range = '+Mrange+', Re range = '+Rerange)
plt.ylabel('$C_L$ [-]')
plt.xlabel('\alpha [rad]')
plt.grid()
plt.legend()
plt.savefig('graphclalpha.png')
plt.cla()
plt.clf()
# Calculate thrust using provided Java exectable
print('Running Java program.')
Ttotal = fTtotal(T,n_test, hp, M, FFl, FFr)
print('Java program finished.')
CD = 2 * Ttotal / (rho * Vt**2 * S)
CL_sq = CL**2
#Plotting CLsq-CD, and find e and CD0, using linear regression
slope, CD0, r_value, uu_p_value, uu_std_err = stats.linregress(CL_sq,CD) # Lots of unused (uu_) values
linregress_x = np.array([min(CL_sq), max(CL_sq)])
linregress_y = CD0 + slope * linregress_x
oswald = 1 / (pi * A * slope)
CClabel = '$C_{D_0}$ = '+str(round(CD0,n_r))+' [0.04], $e$ = '+str(round(oswald,n_r))+' [0.8], r^2 = '+str(round(r_value**2,n_r))
print(CClabel)
plt.plot(linregress_x, linregress_y, label=CClabel)
plt.scatter(CL_sq, CD, label = 'Automatically recorded')
plt.scatter(CLs**2, CDs, label = 'Manually recorded')
plt.title('$C_D / C_L^2$ at clean configuration,\n Mach range = '+Mrange+', Re range = '+Rerange)
plt.ylabel('$C_D$ [-]')
plt.xlabel('$C_L^2$ [-]')
plt.legend()
plt.grid()
plt.savefig('graphcl2cd.png')
plt.cla()
plt.clf()
#Plotting CL-CD
fit = np.polyfit(CL,CD,2)
x = np.linspace(min(CL),max(CL))
plt.plot(x,fit[0]*x**2 + fit[1] *x + fit[2])
plt.scatter(CL, CD, label = 'Automatically recorded')
plt.scatter(CLs, CDs, label = 'Manually recorded')
plt.title('$C_D / C_L$ at clean configuration,\n Mach range = '+Mrange+', Re range = '+Rerange)
plt.ylabel('$C_D$ [-]')
plt.xlabel('$C_L$ [-]')
plt.legend()
plt.grid()
plt.savefig('graphclcd.png')
plt.cla()
plt.clf()
#Plotting CD-a
CDa, intercept, r_value, uu_p_value, uu_std_err = stats.linregress(alpha,CD) # Lots of unused (uu_) values
linregress_x = np.array([min(alpha), max(alpha)])
linregress_y = intercept + CDa * linregress_x
CDalabel = '$C_{D_a}$ = '+str(round(CDa,n_r))+', $r^2$ = '+str(round(r_value**2,n_r))
print(CDalabel)
#plt.plot(linregress_x, linregress_y, label = CDalabel)
#plt.legend()
plt.scatter(alpha, CD, label = 'Automatically recorded')
plt.scatter(alphas, CDs, label = 'Manually recorded')
plt.title('$C_D / \alpha$ at clean configuration,\n Mach range = '+Mrange+', Re range = '+Rerange)
plt.ylabel('$C_D$ [-]')
plt.xlabel('$\alpha$ [rad]')
plt.legend()
plt.grid()
plt.savefig('graphcdalpha.png')
plt.cla()
plt.clf()
if not saved:
np.savetxt('datamanual.txt', np.array([alpha, CL, CD]))
print('Manual data saved.')
print('Graphs exported.')
| mvdwaals/SVV | domas/mainstationary.py | mainstationary.py | py | 5,350 | python | en | code | 0 | github-code | 90 |
19931484806 | import numpy as np
import pandas as pd
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
def softmax(input):
return np.exp(input) / np.exp(input).sum(axis = 1, keepdims = True)
# def cross_entropy_loss(Y, T):
# N = len(T)
# return -np.log(Y[np.arange(N), T.astype(np.int32)]).mean()
def cross_entropy_loss(Y, T):
return -(T * np.log(Y)).mean()
def calcualte_accuracy(output, Y):
pY = np.argmax(output, axis = 1)
return np.mean(pY == Y)
#Now we convert the target values to numeric
def label_onehot_encode(Y):
N = len(Y)
output = []
T = np.zeros((N, len(set(Y))))
for row in range(N):
if Y[row] == "OLD":
T[row, 2] = 1
output.append(2)
elif Y[row] == "MIDDLE":
T[row, 1] = 1
output.append(1)
else:
T[row, 0] = 1
output.append(0)
outputNUMPY = np.array(output)
return T, outputNUMPY
#Convert numerical classes back to alphanumeric
def GetClass(Y):
N = len(Y)
T = []
for row in range(N):
if Y[row] == 0:
T.append("OLD")
elif Y[row] == 1:
T.append("MIDDLE")
else:
T.append("YOUNG")
return T
class FFNNUMPY(object):
def __init__(self, M):
self.M = M
def predict(self, X, get_weights = False):
if get_weights:
print("Check if this is running")
self.W1 = pd.read_csv("W1.csv").as_matrix()
self.W2 = pd.read_csv("W2.csv").as_matrix()
self.b1 = pd.read_csv("b1.csv").as_matrix()
self.b2 = pd.read_csv("b2.csv").as_matrix()
print(self.W1.shape, self.W2.shape, self.b1.shape, self.b2.shape)
hidden = np.tanh(X.dot(self.W1) + self.b1)
output = softmax(hidden.dot(self.W2) + self.b2)
return hidden, output
def initiate_weights(self, N, D, K):
W1 = np.random.randn(D, self.M) / np.sqrt(N + D)
b1 = np.zeros(self.M)
W2 = np.random.randn(self.M, K) / np.sqrt(N + D)
b2 = np.zeros(K)
return W1, b1, W2, b2
def save_weights_func(self):
df = pd.DataFrame(self.W1)
df.to_csv("W1.csv", index = False)
df = pd.DataFrame(self.W2)
df.to_csv("W2.csv", index = False)
df = pd.DataFrame(self.b1)
df.to_csv("b1.csv", index = False)
df = pd.DataFrame(self.b2)
df.to_csv("b2.csv", index = False)
def score_function(self, X, Y, get_weights = False):
_, output = self.predict(X, get_weights)
print(output)
return calcualte_accuracy(output, Y)
def fit(self, X, Y, T, learning_rate = 10e-7, reg = 10e-6, epochs = 20, batch_size = 500, show_fig = True, save_weights = True):
N, D = X.shape
K = len(set(Y))
self.W1, self.b1, self.W2, self.b2 = self.initiate_weights(N, D, K)
num_batches = np.round(N / batch_size).astype(np.int32)
costs = []
for epoch in range(epochs):
X, Y, T = shuffle(X, Y, T)
for batch in range(num_batches):
Xbatch = X[batch * batch_size: batch_size * (batch + 1)]
Tbatch = T[batch * batch_size: batch_size * (batch + 1)]
Ybatch = Y[batch * batch_size: batch_size * (batch + 1)]
hidden, output = self.predict(Xbatch)
pY_T = (output - Tbatch)
self.W2 -= learning_rate * (hidden.T.dot(pY_T) + reg * self.W2)
self.b2 -= learning_rate * (pY_T.sum(axis = 0) + reg * self.b2)
dZ = pY_T.dot(self.W2.T) * hidden * (1 - hidden)
self.W1 -= learning_rate * (Xbatch.T.dot(dZ) + reg * self.W1)
self.b1 -= learning_rate * (dZ.sum(axis = 0) + reg * self.b1)
c = cross_entropy_loss(output, Tbatch)
costs.append(c)
a = calcualte_accuracy(output, Ybatch)
print("Epoch", epoch, "Batch", batch, "Costs", c, "Accuracy", a)
if show_fig:
_ = plt.plot(costs)
plt.show()
self.save_weights_func()
def main():
data = pd.read_csv("Training.csv").as_matrix()
X = (data[:, 2:] / 255).astype(np.float32)
Y = data[:,1]
T, Y = label_onehot_encode(Y)
print(X.shape)
#We would need to setup the training and testing samples from the data
X, Y, T = shuffle(X, Y, T)
Xtrain, Ytrain, Ttrain = X[:18000], Y[:18000], T[:18000]
Xtest, Ytest, Ttest = X[18000:], Y[18000:], T[18000:]
model = FFNNUMPY(M = 1400)
model.fit(Xtrain, Ytrain, Ttrain, epochs = 600, learning_rate = 10e-7, batch_size = 1000)
print(model.score_function(Xtest, Ytest))
#We will now read the Test dataset
data = pd.read_csv("Test.csv").as_matrix()
X = (data[:,1:] / 255).astype(np.float32)
ID = data[:,0]
_, TestResult = model.predict(X)
pY = np.argmax(TestResult, axis = 1)
Submission = pd.DataFrame({"Class" : np.array(GetClass(pY)),
"ID" : ID})
Submission.to_csv("submit.csv", index = False)
if __name__ == "__main__":
main()
| sid86malhotra/Actor-images | FFN in Numpy.py | FFN in Numpy.py | py | 5,143 | python | en | code | 0 | github-code | 90 |
19031026220 | from math import sqrt
x=9.8**201
y=10.2**199
z1=sqrt(x**2+y**2)
z2=y*sqrt(pow((x/y),2)+1)
print(z1)
print(z2)
#Wniosek: W pierwszym działaniu podnosimy i tak już ogromne liczby do kolejnej potęgi, co może powodować przekroczenie limitu kompilatora.
#W drugim działaniu x i y są przez siebie dzielone, a iloraz dwóch ogromnych, całkiem podobnych wartościom liczb jest już zdecydowanie mniejszy
#i można na nim normalnie przeprowadzać obliczenia bez prawdopodobieństwa przekroczenia limitu.
| pstatkiewicz/lista-4 | zad 3.py | zad 3.py | py | 504 | python | pl | code | 0 | github-code | 90 |
1281047765 | import logging
import traceback
from flask_restplus import Api
from itsajungleoutthere import settings
from sqlalchemy.orm.exc import NoResultFound
log = logging.getLogger(__name__)
api = Api(version='1.0', title='Mini Dataguru API',
description='A simple web API to help a Machine Learning team organize its data')
@api.errorhandler
def default_error_handler(e):
message = 'An unhandled exception occurred.'
log.exception(message)
if not settings.FLASK_DEBUG:
return {'message': message}, 500
@api.errorhandler(NoResultFound)
def database_not_found_error_handler(e):
"""No results found in database"""
log.warning(traceback.format_exc())
return {'message': 'A database result was required but none was found.'}, 404
| Policonickolu/itsajungleoutthere | itsajungleoutthere/api/restplus.py | restplus.py | py | 766 | python | en | code | 0 | github-code | 90 |
20368876041 | class Solution:
def maxSubArray(self, nums: List[int]) -> int:
'''
keep track of the cmax untill it is > 0 if it goes below 0 reset the value
to 0
[-2,1,-3,4,-1,2,1,-5,4]
max = 6
cmax = 4-1+2+1 so on
'''
max_value = -float('inf')
temp = 0
for i in nums:
temp+=i
max_value = max(max_value,temp)
if temp < 0:
temp = 0
return max_value | RishabhSinha07/Competitive_Problems_Daily | 53-maximum-subarray/53-maximum-subarray.py | 53-maximum-subarray.py | py | 511 | python | en | code | 1 | github-code | 90 |
113041697 | #!/usr/bin/env python3
"""This is a multi-line commenter
So, here we can describe sucintly whats this scrip do.
Atention, keep this block in 20 lines.
"""
__version__ = "0.0.1"
__author__ = "Raphael Viana"
__license__ = "Unlicense"
import os
# Here we get the environment variable called LANG and with don't exists we set default "en_US"
# Fron environment variable LANG we get only five firt letters [:5]
current_language = os.getenv("LANG", "en_US")[:5]
msg = "Hello, World!"
| rnvdev/python-scripts | python-base/hello-world.py | hello-world.py | py | 487 | python | en | code | 0 | github-code | 90 |
7047374650 | import copy
import re
bag_rules = {}
# process input file
with open('input.txt') as f:
for line in f:
# remove 'bag(s)' strings and final periods
# number of bags also does not matter so remove those too
clean_line = re.sub(r'(bags?|\.|[0-9])', '', line)
bag_rule_key = re.split(r'contain ', clean_line)[0].strip()
bag_rule_value = [bag.strip() for bag in re.split(r'contain ', clean_line)[1].split(',')]
bag_rules[bag_rule_key] = bag_rule_value
# EXAMPLE BAG RULES
# bag_rules = {
# 'light red': ['bright white', 'muted yellow'],
# 'dark orange': ['bright white', 'muted yellow'],
# 'bright white': ['shiny gold'],
# 'muted yellow': ['shiny gold', 'faded blue'],
# 'shiny gold': ['dark olive', 'vibrant plum'],
# 'dark olive': ['faded blue', 'dotted black'],
# 'vibrant plum': ['faded blue', 'dotted black'],
# 'faded blue': [],
# 'dotted black': []
# }
bag_rules_to_check = copy.deepcopy(bag_rules)
# returns true if any item in list a is found in list b
def intersects(a, b):
return len(set(a).intersection(set(b))) > 0
contains = []
cannot_contain = []
while len(bag_rules_to_check) > 0:
for bag_colour in bag_rules.keys():
if bag_colour in list(bag_rules_to_check.keys()):
if 'shiny gold' in bag_rules[bag_colour] or intersects(contains, bag_rules[bag_colour]):
contains.append(bag_colour)
bag_rules_to_check.pop(bag_colour)
elif not bag_rules[bag_colour]: # dead end
cannot_contain.append(bag_colour)
bag_rules_to_check.pop(bag_colour)
elif not intersects(bag_rules_to_check.keys(), bag_rules[bag_colour]): # contains no unchecked colours
cannot_contain.append(bag_colour)
bag_rules_to_check.pop(bag_colour)
print(len(contains))
| naobot/advent-of-code | 2020/day/7/part1.py | part1.py | py | 1,879 | python | en | code | 0 | github-code | 90 |
5680784871 | def input(path):
f = open(path, "r")
lines = f.read().splitlines()
f.close()
return lines[0]
xs, ys = [[int(j) for j in i[2:].split('..')] for i in input('Day17/in.txt')[13:].split(', ')]
y1 = abs(min(ys))
print(y1 * ((y1 - 1) / 2)) | zhangandy437/aoc-2021 | Day17/p1.py | p1.py | py | 251 | python | en | code | 0 | github-code | 90 |
13584478158 | import itertools
import pydot_ng as pd
from load import load_all
def apply_style(floor, map, name):
style = {}
label_style = {
'label': '''<
<table cellborder="0" border="0">
<tr>
<td>{floor}</td>
</tr>
<tr>
<td><img src="data/st_itemicon{icon_id}.png" scale="TRUE"/></td>
</tr>
</table>
>'''.format(icon_id=map['chest'][-1], floor=name),
}
shop_style = {'fillcolor': 'orange'} if floor in {8, 43, 63, 97} else {}
checkpoint_style = {'fillcolor': 'yellow'} if map['chest'][-1] == 29 else {}
boss_style = {'fillcolor': 'gray'} if floor in {10, 25, 40, 55, 70, 85, 100} else {}
style.update(label_style)
style.update(shop_style)
style.update(checkpoint_style)
style.update(boss_style)
return style
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
any_route = set(pairwise([1,2,3,4,5,6,8,9,10,11,12,18,13,18,12,14,15,19,20,21,24,25,26,27,28,31,32,33,34,35,39,40,41,45,46,50,52,51,46,48,49,41,50,52,55,56,58,60,62,60,58,61,65,67,70,71,73,75,76,81,82,84,82,81,76,75,73,85,98,99,86,87,88,98,99,100,101]))
def apply_edge_style(a, b):
style = {}
if (a, b) in any_route or (b, a) in any_route:
any_route.discard((a, b))
any_route.discard((b, a))
style['color'] = 'red'
return style
def create_graph(maps):
graph_args = {
'graph_type': 'graph',
'bgcolor': 'white',
'overlap': 'prism',
'overlap_scaling': 10,
'ratio': 1.5,
}
default_style = {
'style': 'filled',
'fillcolor': 'white',
'shape': 'box',
'margin': 0,
}
graph = pd.Dot(**graph_args)
nodes = {}
for floor, map in maps.items():
node_name = '{}F'.format(floor)
style = dict(default_style)
style.update(apply_style(floor, map, node_name))
nodes[floor] = pd.Node(node_name, **style)
nodes[101] = pd.Node('Credits', **default_style)
for node in nodes.values():
graph.add_node(node)
for floor, map in maps.items():
floor_node = nodes[floor]
for target_floor, _ in map['targets']:
if target_floor < floor:
continue
style = {}
style.update(apply_edge_style(floor, target_floor))
target_node = nodes[target_floor]
edge = pd.Edge(floor_node, target_node, **style)
graph.add_edge(edge)
return graph
maps = load_all()
graph = create_graph(maps)
graph.write('test.dot')
| Cyanogenoid/asakura-p-routing | make_graph.py | make_graph.py | py | 2,667 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.