seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
24946374273 | from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import Column, ForeignKey, Integer, String
db = SQLAlchemy()
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(db.String(80), unique=False, nullable=False)
is_active = db.Column(db.Boolean(), unique=False, nullable=False)
def __repr__(self):
return '<User %r>' % self.id
def serialize(self):
return {
"id": self.id,
"email": self.email,
# do not serialize the password, its a security breach
}
class Planets(db.Model):
id = db.Column(Integer, primary_key=True)
name = db.Column(String(250), nullable=False)
rotation = db.Column(Integer, nullable=True)
climate = db.Column(String(250), nullable=True)
gravity = db.Column(String(250), nullable=True)
terrain = db.Column(String(250), nullable=True)
population = db.Column(Integer, nullable=True)
def serialize(self):
return {
"id": self.id,
"name": self.name,
"rotation": self.rotation,
"climate" : self.climate,
"gravity": self.gravity,
"terrain": self.terrain,
"population": self.population
}
def get_planets():
all_planets = Planets.query.all()
all_planets = list(map(lambda x: x.serialize(), all_planets))
return all_planets
class Characters(db.Model):
id = db.Column(Integer, primary_key=True)
name = db.Column(String(250), nullable=False)
height = db.Column(Integer, nullable=True)
hair_color = db.Column(String(250), nullable=True)
skin_color = db.Column(String(250), nullable=True)
eye_color = db.Column(String(250), nullable=True)
birth_year = db.Column(String(250), nullable=True)
gender = db.Column(String(250), nullable=True)
def serialize(self):
return {
"id": self.id,
"name": self.name,
"height": self.height,
"hair_color": self.hair_color,
"skin_color": self.skin_color,
"eye_color": self.eye_color,
"birth_year": self.birth_year,
"genderr": self.gender
}
def get_characters():
all_characters = Characters.query.all()
all_characters = list(map(lambda x: x.serialize(), all_characters))
return all_characters
class Favorites(db.Model):
id = db.Column(Integer, primary_key=True)
user_id = db.Column(Integer, ForeignKey(User.id))
planet_id = db.Column(Integer, ForeignKey(Planets.id))
character_id = db.Column(Integer, ForeignKey(Characters.id))
planets = db.relationship("Planets")
user = db.relationship("User")
characters = db.relationship("Characters")
def serialize(self):
return {
"id": self.id,
"user_id": self.user_id,
"planet_id": self.planet_id,
"character_id": self.character_id,
}
| Sergei1607/Star-Wars-API | src/models.py | models.py | py | 3,048 | python | en | code | 0 | github-code | 13 |
19647068803 | #%% Spam detection with keras
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
#%%
import tensorflow as tf
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.feature_extraction.text import TfidfVectorizer
from tensorflow import keras
import matplotlib.pyplot as plt
#%%
Data = pd.read_csv('Spam-Classification.csv')
Class_raw = Data["CLASS"]
TextMessage = Data["SMS"]
#%%
# tokenizer-lemmatizer
def customtokenize(str):
tokens=nltk.word_tokenize(str)
nostop = list(filter(lambda token: token not in stopwords.words('english'), tokens))
lemmatized=[lemmatizer.lemmatize(word) for word in nostop ]
return lemmatized
# feature extraction
vectorizer = TfidfVectorizer(tokenizer=customtokenize)
tfidf=vectorizer.fit_transform(TextMessage)
tfidf_array = tfidf.toarray()
label_encoder = preprocessing.LabelEncoder()
Class = label_encoder.fit_transform(Class_raw) # integer coding
Class = tf.keras.utils.to_categorical(Class,2) # one-hot coding
# datset split:
X_train,X_test,Y_train,Y_test = train_test_split(tfidf_array, Class, test_size=0.10)
#%% building the model
NB_CLASSES=2
N_HIDDEN=32
model = tf.keras.models.Sequential()
model.add(keras.layers.Dense(N_HIDDEN,input_shape=(X_train.shape[1],),name='Hidden-Layer-1',activation='relu'))
model.add(keras.layers.Dense(N_HIDDEN,name='Hidden-Layer-2',activation='relu'))
model.add(keras.layers.Dense(NB_CLASSES,name='Output-Layer',activation='softmax'))
model.compile(loss='categorical_crossentropy',metrics=['accuracy'])
model.summary()
#%% training the model
BATCH_SIZE=256
EPOCHS=10
VALIDATION_SPLIT=0.2
history=model.fit(X_train,Y_train,batch_size=BATCH_SIZE,epochs=EPOCHS,verbose=1,validation_split=VALIDATION_SPLIT)
pd.DataFrame(history.history)["accuracy"].plot(figsize=(8, 5))
plt.title("Accuracy improvements with Epoch")
plt.show()
print("\nEvaluation using the test Dataset :\n------------------------------------")
model.evaluate(X_test,Y_test)
#%%
test_sms_1 = "We are pleased to inform you"
predict_tfidf=vectorizer.transform([test_sms_1]).toarray()
prediction=np.argmax(model.predict(predict_tfidf), axis=1 )
test_label_1 = label_encoder.inverse_transform(prediction)
print('text message:', test_sms_1, '; Label: ', test_label_1[0])
test_sms_2 = "OK let's see what happens"
predict_tfidf=vectorizer.transform([test_sms_2]).toarray()
prediction=np.argmax(model.predict(predict_tfidf), axis=1 )
test_label_2 = label_encoder.inverse_transform(prediction)
print('text message:', test_sms_2, '; Label: ', test_label_2[0])
| FarshadGVeshki/Deep_learning_classification_models | Spam_classification_keras.py | Spam_classification_keras.py | py | 2,765 | python | en | code | 0 | github-code | 13 |
25578873972 | #!/usr/bin/env python3
# coding: utf-8
import random
import z3
import itertools
from functools import reduce
from z3.z3util import get_vars
# Approach taken from:
# Rafael Dutra, Kevin Laeufer, Jonathan Bachrach and Koushik Sen:
# Efficient Sampling of SAT Solutions for Testing, ICSE 2018.
# https://github.com/RafaelTupynamba/quicksampler/
# TODO: The generated samples are currently not checked for whether they satisfy the given constraints!
# https://stackoverflow.com/questions/39299015/sum-of-all-the-bits-in-a-bit-vector-of-z3
def bvcount(b):
n = b.size()
bits = [z3.Extract(i, i, b) for i in range(n)]
bvs = [z3.Concat(z3.BitVecVal(0, n - 1), b) for b in bits]
nb = reduce(lambda a, b: a + b, bvs)
return nb
MAX_LEVEL = 6
def cast_long_to_str(x, n):
# see angr/state_plugins/solver.py _cast_to
# return '{:x}'.format(x).zfill(n/4).decode('hex')
return '{:x}'.format(x).zfill(n // 4) # .decode('hex')
def bvsampler(constraints, target):
# targe can onl be a variable???
n = target.size()
solver = z3.Optimize()
solver.add(constraints)
delta = z3.BitVec('delta', n)
result = z3.BitVec('result', n)
solver.add(result == target)
solver.minimize(bvcount(delta))
results = set()
while True:
# print('---------------------------')
guess = z3.BitVecVal(random.getrandbits(n), n)
solver.push()
solver.add(result ^ delta == guess)
if solver.check() != z3.sat:
break
model = solver.model()
result0 = model[result].as_long()
solver.pop()
results.add(result0)
yield result0
# print('solver: ' + str(solver))
# print('guess: ' + str(guess))
# print('model: ' + str(model))
mutations = {}
solver.push()
for i in range(n):
# print('mutating bit ' + str(i))
solver.push()
goal = z3.BitVecVal(result0, n)
solver.add(result ^ delta == goal)
solver.add(z3.Extract(i, i, delta) == 0x1)
if solver.check() == z3.sat:
model = solver.model()
result1 = model[result].as_long()
if result1 not in results:
results.add(result1)
yield result1
new_mutations = {}
new_mutations[result1] = 1
for value in mutations:
level = mutations[value]
if level > MAX_LEVEL:
continue
candidate = (result0 ^ ((result0 ^ value) | (result0 ^ result1)))
# print('yielding candidate ' + str(candidate) + ' at level ' + str(level))
if candidate not in results:
results.add(candidate)
yield candidate
new_mutations[candidate] = level + 1
mutations.update(new_mutations)
solver.pop()
solver.pop()
def test_sampler():
x = z3.BitVec('x', 16)
y = z3.BitVec('y', 16)
# sample = bvsampler(z3.And(x > 1000, x < 10000, z3.Or(x < 4000, x > 5000)), x)
sample = bvsampler(z3.And(x > 1000, y < 10000, z3.Or(x < 4000, x > 5000)), x)
print("Hello")
for x in sample:
y = cast_long_to_str(x, 16)
print('possible solution: ' + y)
def quicksampler_for_file(fname):
try:
fvec = z3.parse_smt2_file(fname)
formula = z3.And(fvec)
vars = get_vars(formula)
print("start")
sample = bvsampler(formula, vars[0])
for x in sample:
y = cast_long_to_str(x, 16)
print('possible solution: ' + y)
except z3.Z3Exception as e:
print(e)
return
if __name__ == '__main__':
# test_sampler()
quicksampler_for_file('../test/t1.smt2')
| ZJU-Automated-Reasoning-Group/arlib | arlib/sampling/finite_domain/quick_sampler.py | quick_sampler.py | py | 3,889 | python | en | code | 6 | github-code | 13 |
18056552508 | from src.circle import Circle
from math import pi
def test_get_area():
# Test the radius of a circle with an area of 1
c1 = Circle(radius=1)
assert c1.get_area() == pi
# Test the radius of a circle with an area of 100
c2 = Circle(radius=10)
assert c2.get_area() > 314
# Test the radius of a circle with an area of 100
c3 = Circle(radius=100)
assert c3.get_area() != 0
def test_get_circumference():
# Test the circumference of a circle with an area of 2
c1 = Circle(radius=5)
assert round(c1.get_circumference(), 2) == 31.42
# Test the circumference of a circle with an area of 10
c2 = Circle(radius=10)
assert c2.get_circumference() > 62 | pctmoraes/pytest | tests/test_circle.py | test_circle.py | py | 706 | python | en | code | 0 | github-code | 13 |
16765325692 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
with open("version.txt", "r") as fh:
version = fh.read().strip()
setuptools.setup(
name="pogona",
version=version,
author="Data Communications and Networking (TKN), TU Berlin",
author_email="stratmann@ccs-labs.org",
description="The Pogona simulator for macroscopic molecular communication",
long_description=long_description,
url="https://git.cs.upb.de/mamoko/mamoko",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
entry_points={
'console_scripts': [
"pogona = pogona:start_cli"
],
},
install_requires=[
"openfoamparser",
"numpy",
"ruamel.yaml",
"coloredlogs",
"argparse",
"scipy",
],
)
| tkn-tub/pogona | setup.py | setup.py | py | 915 | python | en | code | 3 | github-code | 13 |
16249797865 | from fcm import FCM
from point import Point
f = open("sample1.csv", "r")
f.readline()
points = []
for line in f:
pointLine = line.replace("\n","").split(",")
value = []
point = Point()
for val in pointLine:
value.append(float(val))
point.setValue(value)
points.append(point)
fcm = FCM(points, m=2, minCluster=2, maxCluster=10)
fcm.run()
| mohrobati/FuzzyCMeans | main.py | main.py | py | 371 | python | en | code | 0 | github-code | 13 |
23449434812 | from solvers import Solver
'''
The TestRotate solver works through the matches from front to back.
At every index it tries all not yet used pairings.
The score is ignored, every combination is only judged based on the individual check.
'''
class TestRotate(Solver):
def __init__(self, matches = 10):
Solver.__init__(self, matches)
self.matches = matches
self.reset()
print('Bumblesort solver initialized!')
def reset(self):
self.base = [ x for x in range(self.matches) ]
self.matched = [ None for b in self.base ]
self.index = 0
self.testIndex = 0
return self
def predict(self):
prediction = self.matched.copy()
prediction[self.index] = self.base[self.testIndex]
prediction = self.fill(prediction)
return prediction
def performCheck(self):
return True
def checkIndex(self):
return self.index
def updateCheck(self, check):
if check:
self.matched[self.index] = self.base[self.testIndex]
self.index += 1
self.resetTestIndex()
else:
self.updateTestIndex()
def resetTestIndex(self):
self.testIndex = 0
while self.base[self.testIndex] in self.matched:
self.testIndex += 1
if self.testIndex == len(self.base):
raise ValueError('Unable to find matching value for pair ' + str(self.index))
def updateTestIndex(self):
self.testIndex += 1
while self.base[self.testIndex] in self.matched:
self.testIndex += 1
if self.testIndex == len(self.base):
raise ValueError('Unable to find matching value for pair ' + str(self.index))
def fill(self, data):
availableEntries = [ b for b in self.base if b not in data ]
availableIndices = [ i for i in range(len(data)) if data[i] == None ]
for e,i in zip(availableEntries, availableIndices):
data[i] = e
return data
| cestcedric/PerfectMatch | solvers/TestRotate.py | TestRotate.py | py | 2,050 | python | en | code | 0 | github-code | 13 |
3998604520 | # SPDX-License-Identifier: GPL-2.0
"Record and report data access pattern in realtime"
import argparse
import os
import signal
import subprocess
import sys
import _damon
import _damon_args
def cleanup():
if target_type == _damon_args.target_type_cmd and cmd_pipe.poll() == None:
cmd_pipe.kill()
def sighandler(signum, frame):
print('\nsignal %s received' % signum)
cleanup()
def set_argparser(parser):
parser.add_argument('target', type=str, metavar='<target>',
help='monitoring target (command, pid or \'paddr\')')
parser.add_argument('--report_type', type=str, choices=['heats', 'wss'],
default='heats', help='report type')
parser.add_argument('--delay', type=float, metavar='<seconds>', default=3,
help='deplay between updates in seconds.')
parser.add_argument('--count', type=int, metavar='<count>', default=0,
help='number of updates.')
def main(args=None):
if not args:
parser = argparse.ArgumentParser()
set_argparser(parser)
args = parser.parse_args()
_damon.ensure_root_permission()
global target_type
global cmd_pipe
signal.signal(signal.SIGINT, sighandler)
signal.signal(signal.SIGTERM, sighandler)
target = args.target
target_fields = target.split()
target_type = _damon_args.deduced_target_type(target)
if target_type == None:
print('invalid target \'%s\'' % target)
exit(1)
if target_type == _damon_args.target_type_explicit and target == 'paddr':
pass
elif target_type == _damon_args.target_type_cmd:
cmd_pipe = subprocess.Popen(target, shell=True, executable='/bin/bash',
stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
target = cmd_pipe.pid
else:
pid = int(target)
bindir = os.path.dirname(sys.argv[0])
damo = os.path.join(bindir, 'damo')
record_cmd = 'timeout %s %s record \"%s\"' % (args.delay, damo, target)
report_cmd = [damo]
if args.report_type == 'heats':
report_cmd += 'report heats --heatmap stdout --resol 10 80'.split()
else:
report_cmd += ['report', 'wss']
nr_reports = 0
while not args.count or nr_reports < args.count:
if (target_type == _damon_args.target_type_cmd and
cmd_pipe.poll() != None):
break
try:
subprocess.check_output(record_cmd, shell=True,
stderr=subprocess.STDOUT, executable='/bin/bash')
except subprocess.CalledProcessError as e:
pass
try:
output = subprocess.check_output(report_cmd).decode()
if args.report_type == 'heats':
for line in output.strip().split('\n'):
if not line.startswith('#'):
print(line)
else:
print(output)
except subprocess.CalledProcessError as e:
pass
nr_reports += 1
cleanup()
if __name__ == '__main__':
main()
| awslabs/damo | damo_monitor.py | damo_monitor.py | py | 3,030 | python | en | code | 119 | github-code | 13 |
42406602061 | #!/usr/bin/env python3
import os
import requests
import subprocess
import json
import re
OUTPATH = os.getenv('OUTPATH') or 'json'
CODECS_JSON = 'https://browser-resources.s3.yandex.net/linux/codecs.json'
STRINGS_CMD = os.getenv('STRINGS') or 'strings'
BROWSERS = {
'yandex-browser-stable': (os.getenv('STABLE'), 'browser'),
'yandex-browser-beta': (os.getenv('BETA'), 'browser-beta'),
}
def get_codec_sources():
response = requests.get(CODECS_JSON)
if response.ok:
content = response.text
return json.loads(content)
else:
print('Failed to fetch codec links')
def get_links(name):
nix_path, folder_name = BROWSERS[name]
browser_cmd = f'{nix_path}/opt/yandex/{folder_name}/yandex_browser'
filename = "/".join([OUTPATH, f'{name}.json'])
version = None
with open(filename, "r") as h:
text = h.read()
json_data = json.loads(text)
version = json_data['version']
patch = version.split('-')[0].split('.')[-1]
result = subprocess.run(
[STRINGS_CMD, browser_cmd],
capture_output=True,
text=True
)
if result.returncode == 0:
browser_cmd_strings = result.stdout.strip().split('\n')
versions = list(set(filter(
lambda str: re.match(r'\d*\.\d*\.\d*\.' + patch, str),
browser_cmd_strings
)))
chrver = list(filter(
lambda str: not re.match(str, version),
versions
))[0]
chrver_no_patch = '.'.join(chrver.split('.')[0:-1])
codec_sources = get_codec_sources()[chrver_no_patch]
return codec_sources
else:
print(f'Failed to read file {browser_cmd}')
def prefetch_url(url):
result = subprocess.run(
['nix-prefetch-url', url],
capture_output=True,
text=True
)
if result.returncode == 0:
return result.stdout.strip()
else:
return None
def process_links(url_list):
url_count = len(url_list)
failed = 0
for url in url_list:
print(f'Failed urls: {failed} out of {url_count}')
result = prefetch_url(url)
if not result:
failed += 1
continue
else:
version = url.split('/')[-1]\
.split('_')[1]\
.split('-')[0]
return {
'url': url,
'version': version,
'sha256': result
}
if __name__ == '__main__':
for browser in BROWSERS.keys():
print(f'Processing {browser}')
links = get_links(browser)
json_data = process_links(links)
if json_data:
with open(f'{OUTPATH}/{browser}-codecs.json', "w") as h:
json_string = json.dumps(json_data)
h.write(json_string)
else:
print("Error fetching codecs")
| teu5us/nix-yandex-browser | update/codecs.py | codecs.py | py | 2,880 | python | en | code | 1 | github-code | 13 |
32058793370 | '''
16
'''
from PIL import Image
img = Image.open('mozart.gif')
tar = 195
i = 0
ll = list(img.getdata())
for l in [list(t) for t in zip(*[iter(ll)]*img.size[0])]:
pos = l.index(tar)
l = l[pos:] + l[0:pos]
img2 = Image.new(img.mode, (img.size[0], 1))
img2.putdata(l)
img.paste(img2, (0, i, img.size[0], i+1))
i += 1
img.show()
# Image.fromstring()
| aihex/pythonchallenge | level16.py | level16.py | py | 374 | python | en | code | 0 | github-code | 13 |
22036055205 | #
# @lc app=leetcode.cn id=540 lang=python3
#
# [540] 有序数组中的单一元素
#
from typing import List
# @lc code=start
class Solution:
def singleNonDuplicate(self, nums: List[int]) -> int:
start, end = 0, len(nums)-1
while start < end:
mid = start+(end-start)//2
if nums[mid] == nums[mid-1]:
if (mid-start-1) % 2 == 0: # 左边是偶数, 那么单一元素在右边
start = mid+1
else:
end = mid-2
elif nums[mid] == nums[mid+1]:
if (mid-start) % 2 == 0:
start = mid+2
else:
end = mid-1
else:
return nums[mid]
return nums[start]
# @lc code=end
def test():
assert Solution().singleNonDuplicate([1, 1, 2, 3, 3, 4, 4, 8, 8]) == 2
assert Solution().singleNonDuplicate([3, 3, 7, 7, 10, 11, 11]) == 10
| revang/leetcode | 540.有序数组中的单一元素.py | 540.有序数组中的单一元素.py | py | 955 | python | en | code | 0 | github-code | 13 |
1098109251 | from turtle import Turtle, Screen
import pandas as pd
from get_name import Name
turtle = Turtle()
screen = Screen()
image = 'blank_states_img.gif'
screen.addshape(image)
turtle.shape(image)
data_file = pd.read_csv('50_states.csv')
all_state = data_file.state.tolist()
correct_count = 0
correct_list = []
game_is_on = True
while game_is_on:
guess = screen.textinput(f'{correct_count}/50 Correct', 'What is another state name?')
state_guess_info = data_file[data_file['state'] == guess.title()]
screen.tracer(0) # prevent screen flash
if not state_guess_info.empty:
if guess not in correct_list:
screen.update() # update screen
x_cor = state_guess_info['x'].tolist() # or use int(state_info.x)
y_cor = state_guess_info['y'].tolist()
name_state = Name()
name_state.goto(x_cor[0], y_cor[0])
name_state.write(guess, font=('monaco', 10, 'bold'))
correct_count += 1
correct_list.append(guess)
if guess == 'exit':
game_is_on = False
missing_state = [state for state in all_state if state not in correct_list]
df_missing_state = pd.DataFrame(missing_state)
df_missing_state.to_csv('your_missing_answer.csv')
screen.bye()
screen.mainloop()
| haanguyenn/python_learning | state_guessing/main.py | main.py | py | 1,300 | python | en | code | 0 | github-code | 13 |
15087222906 | import logging
# Create your views here.
from django.shortcuts import render,render_to_response
from django.http import HttpResponse,HttpResponseRedirect
from django.core.mail import EmailMessage
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.template import RequestContext
from django.views.decorators.csrf import csrf_exempt
from oauth2client import xsrfutil
from oauth2client.django_orm import Storage
from django_mailbox.models import MessageAttachment
import utils
from .models import MiliBox, Credential
from .forms import SendMailForm
from contacts.models import get_contacts_for_user,Contact,ContactEmail, ContactMessage
def sign_in(request):
return render(request, "sign_in.html", RequestContext(request))
def categorize(request):
contacts = request.user.contact_set.all()
return render(request, "categorize.html",locals())
@csrf_exempt
def categorize_type(request,id,type):
contact = request.user.contact_set.get(id=id)
contact.contact_type = type
contact.save()
return HttpResponse("Success")
@login_required
def inbox(request, provider_id):
name_style="inbox"
url_name = "mails:inbox"
inbox_tab = request.GET.get('type', 'family')
tab_options = {'family': 1, 'friends': 2, 'work': 3, 'others': 4}
inbox_tab_number = inbox_tab in tab_options and tab_options[inbox_tab] or tab_options['family']
contacts = request.user.contact_set.filter(contact_type=inbox_tab_number)
selected = provider_id and contacts.get(provider_id=provider_id) or contacts.exists() and contacts.all()[0] or None
messages = selected and request.user.milibox_set.all()[0].messages.filter(contactmessage__contact=selected).order_by('-id') or []
return render(request, "inbox.html", locals())
@login_required
def compose(request, provider_id):
name_style="compose"
contacts = request.user.contact_set
url_name = "mails:compose"
if provider_id:
email = ContactEmail.objects.get(contact__provider_id=provider_id)
if request.method == 'POST':
if request.FILES:
form = SendMailForm(request.POST,request.FILES)
if form.is_valid():
upload_file = request.FILES['upload']
message=EmailMessage(request.POST.get('subject'),request.POST.get('message'),request.user.email,[request.POST.get('to_message')])
message.attach(upload_file.name,upload_file.read(),upload_file.content_type)
else:
form = SendMailForm(request.POST)
if form.is_valid():
message=EmailMessage(request.POST.get('subject'),request.POST.get('message'),request.user.email,[request.POST.get('to_message')])
message.send()
return HttpResponseRedirect('/')
else:
form = SendMailForm({'to_message':email})
selected = provider_id and contacts.get(provider_id=provider_id) or contacts.exists() and contacts.all()[0] or None
contacts = contacts.all()
return render(request, "compose.html", locals())
@login_required
def attachments(request, provider_id):
name_style="attachments"
url_name = "mails:attachments"
contacts = request.user.contact_set.all()
selected = provider_id and contacts.get(provider_id=provider_id) or contacts.exists() and contacts.all()[0] or None
documents = MessageAttachment.objects.filter(message__mailbox__milibox__user=request.user, message__contactmessage__contact=selected).order_by('-id')
return render(request, "attachments.html", locals())
def index(request):
if request.method == 'POST':
form = SendMailForm(request.POST,request.FILES)
if form.is_valid():
#to_msg = request.POST.get('to_message')
upload_file = request.FILES['upload']
message=EmailMessage(request.POST.get('subject'),request.POST.get('message'),request.POST.get('from_message'),[request.POST.get('to_message')],headers={'Reply-to':request.POST.get('from_message')})
message.attach(upload_file.name,upload_file.read(),upload_file.content_type)
message.send()
#print to_msg
return HttpResponseRedirect('/')
else:
form = SendMailForm()
return render(request,"index.html",{'form':form})
@login_required
def home(request):
credential = Credential.objects.get_for_user(request.user)
if credential is None or credential.invalid == True:
settings.FLOW.params['state'] = xsrfutil.generate_token(settings.SECRET_KEY, request.user)
authorize_url = settings.FLOW.step1_get_authorize_url()
return HttpResponseRedirect(authorize_url)
else:
#mail_box = MiliBox.objects.get(user=request.user)
'''
contacts = get_contacts_for_user(request.user)
if contacts and not Contact.objects.filter(user=request.user).exists():
for contact in contacts:
con=Contact.objects.create(user=request.user,provider_id=contact.id.text.split('/')[-1],name=contact.nickname,image_link=contact.GetPhotoLink())
for email in contact.email:
ContactEmail.objects.create(contact=con,email=email.address)
'''
#mail_box.get_new_mail()
#return HttpResponseRedirect('/mails/categorize')
@login_required
def auth_return(request):
if not xsrfutil.validate_token(settings.SECRET_KEY, request.REQUEST['state'], request.user):
return HttpResponseBadRequest()
credential = settings.FLOW.step2_exchange(request.REQUEST)
storage = Storage(Credential, 'id', request.user, 'credential')
storage.put(credential)
mail_box=MiliBox.objects.create(name="MiliBox", user=request.user)
return HttpResponseRedirect("/")
| abhi3188/djangodash13 | mails/views.py | views.py | py | 5,808 | python | en | code | 1 | github-code | 13 |
27204058909 | from aiogram import types
import aiohttp
from loader import dp, bot
from models.models import UserCart
from tortoise.queryset import Q
from utils.misc import api
from data.config import PAYMENTS_PROVIDER_TOKEN
from keyboards.inline import back_keyboard
RUSSIAN_POST_SHIPPING_OPTION = types.ShippingOption(id='ru_post', title='Почтой России')
COURIER_SHIPPING_OPTION = types.ShippingOption(id='courier', title='Курьером')
@dp.callback_query_handler(lambda call: call.data.split(':')[0] == 'payments_shipping_order')
async def payments_order(call: types.CallbackQuery):
await call.message.delete()
cart = await UserCart.filter(Q(user__tg_id=call.message.chat.id) & Q(active=True))
prices = []
weight = 0
amount_for_button = 0
for item in cart:
product = await api.get_product_info(item.product_id)
amount_for_button += (product['price'] - product['discount']) * item.quantity
weight += product['weight'] * item.quantity
amount = (int(product['price']*100) - int(product['discount'] * 100)) * item.quantity
prices.append(types.LabeledPrice(label=product['name'], amount=amount))
keyboard = await back_keyboard(callback='back_checkout:', pay_text=f"Оплатить {amount_for_button} руб.")
await bot.send_invoice(call.message.chat.id,
title='Ваша корзина',
description='Ваша корзина',
provider_token=PAYMENTS_PROVIDER_TOKEN,
currency='rub',
photo_url='https://thumbs.dreamstime.com/b/happy-shop-logo-design-template-shopping-designs-stock-134743566.jpg',
photo_height=512, # !=0/None or picture won't be shown
photo_width=512,
photo_size=512,
# need_shipping_address=True,
is_flexible=True, # True If you need to set up Shipping Fee
prices=prices,
start_parameter='example',
need_name=True,
need_shipping_address=True,
need_phone_number=True,
payload=f'{weight}',
reply_markup=keyboard)
@dp.shipping_query_handler(lambda query: True)
async def process_shipping_query(shipping_query: types.ShippingQuery):
weight = int(shipping_query.invoice_payload)
shipping_query.invoice_payload = 4
if shipping_query.shipping_address.country_code != 'RU':
return await bot.answer_shipping_query(
shipping_query.id,
ok=False,
error_message="Отправление товара происходит только по территории РФ."
)
if shipping_query.shipping_address.post_code.isdigit() is False or len(shipping_query.shipping_address.post_code) != 6:
return await bot.answer_shipping_query(
shipping_query.id,
ok=False,
error_message="Почтовый индекс состоит из 6 цифр."
)
resp = await api.pochta_rf(postcode=shipping_query.shipping_address.post_code, weight=weight)
if 'detail' in resp:
return await bot.answer_shipping_query(
shipping_query.id,
ok=False,
error_message=resp['detail']
)
shipping_options = [RUSSIAN_POST_SHIPPING_OPTION]
RUSSIAN_POST_SHIPPING_OPTION.prices.clear()
COURIER_SHIPPING_OPTION.prices.clear()
RUSSIAN_POST_SHIPPING_OPTION.add(types.LabeledPrice('Почта РФ', resp['amount'] * 100))
result_city = await api.get_city_courier(shipping_query.shipping_address.city)
if 'amount' in result_city:
COURIER_SHIPPING_OPTION.add(types.LabeledPrice(f"Курьер г. {result_city['city']}", result_city['amount'] * 100))
shipping_options.append(COURIER_SHIPPING_OPTION)
await bot.answer_shipping_query(
shipping_query.id,
ok=True,
shipping_options=shipping_options
)
@dp.pre_checkout_query_handler(lambda query: True)
async def checkout(pre_checkout_query: types.PreCheckoutQuery):
await bot.answer_pre_checkout_query(pre_checkout_query.id, ok=True,
error_message="Aliens tried to steal your card's CVV,"
" but we successfully protected your credentials,"
" try to pay again in a few minutes, we need a small rest.")
@dp.message_handler(content_types=types.ContentType.SUCCESSFUL_PAYMENT)
async def process_successful_payment(message: types.Message):
pmnt = message.successful_payment
cart = await UserCart.filter(Q(user__tg_id=message.chat.id) & Q(active=True))
if pmnt.invoice_payload.isdigit():
if pmnt.shipping_option_id == 'ru_post':
shipping_option = "Почта РФ"
weight = int(pmnt.invoice_payload)
shipping_amount = await api.pochta_rf(postcode=pmnt.order_info.shipping_address.post_code, weight=weight)
elif pmnt.shipping_option_id == 'courier':
shipping_option = "Курьер"
shipping_amount = await api.get_city_courier(pmnt.order_info.shipping_address.city)
shipping_amount = shipping_amount['amount']
order_amount = pmnt.total_amount - (shipping_amount * 100)
resp = await api.create_order(shipping_amount=shipping_amount,
order_amount= order_amount / 100,
shipping_option=shipping_option,
order_info=pmnt.order_info,
tg_id=message.chat.id,
username=message.chat.username,
cart=cart)
text = resp['message']
elif pmnt.invoice_payload.split(':')[0] == 'pp':
pp_id = pmnt.invoice_payload.split(':')[1]
resp = await api.create_pp_order(order_amount= pmnt.total_amount / 100,
shipping_option="Пункт самовывоза",
order_info=pmnt.order_info,
tg_id=message.chat.id,
username=message.chat.username,
cart=cart,
pp_id=int(pp_id))
text = f"{resp['message']}\n\nСкажите этот код в пункте самовывоза:\n<tg-spoiler><b>{resp['code']}</b></tg-spoiler>"
for item in cart:
await item.delete()
await bot.send_message(
message.chat.id,
text
)
| Kyle-krn/TelegramShop | handlers/payments/payments_shipping_handlers.py | payments_shipping_handlers.py | py | 6,886 | python | en | code | 0 | github-code | 13 |
1467884719 | #%%
from airflow.operators.python import PythonOperator
import requests, pytz
from airflow.decorators import task_group
from datetime import datetime
stockholm_timezone = pytz.timezone("Europe/Stockholm")
theme_parks = {"liseberg": 11}
#%%
def _extract_queue_times(theme_park):
response = requests.get(f"https://queue-times.com/parks/{theme_park}/queue_times.json")
if response.status_code == 200:
return response.json()['rides']
def _transform_stockholm_timezone(task_instance):
data = task_instance.xcom_pull(task_ids = "extract_data.extract_queue_time")
for ride in data:
utc_time = datetime.strptime(ride["last_updated"], "%Y-%m-%dT%H:%M:%S.%fZ").replace(tzinfo=pytz.UTC)
ride["last_updated"] = utc_time.astimezone(stockholm_timezone).strftime("%y%m%d %H%M")
return data
#%%
@task_group(group_id="extract_data")
def extract_queue_time():
extract_queue_times = PythonOperator(task_id = "extract_queue_time", python_callable = _extract_queue_times, op_args=[theme_parks["liseberg"]], do_xcom_push = True)
transform_timezone = PythonOperator(task_id = "transform_stockholm_timezone", python_callable=_transform_stockholm_timezone, do_xcom_push = True)
extract_queue_times >> transform_timezone
#%%
| kokchun/Data-engineering-AI22 | Lecture-code/Lec5-Airflow_ELT/include/queue_time/extract.py | extract.py | py | 1,275 | python | en | code | 1 | github-code | 13 |
16031827257 | import numpy as np
import os
import torch
from torch.utils.data import Dataset
from hypothesis.util.data.numpy import InMemoryStorage
from hypothesis.util.data.numpy import PersistentStorage
class SimulationDataset(Dataset):
r""""""
def __init__(self, inputs, outputs, in_memory=False):
super(SimulationDataset, self).__init__()
if in_memory:
self.storage_inputs = InMemoryStorage(inputs)
self.storage_outputs = InMemoryStorage(outputs)
else:
self.storage_inputs = PersistentStorage(inputs)
self.storage_outputs = PersistentStorage(outputs)
def __len__(self):
return len(self.storage_inputs)
def __del__(self):
r""""""
if hasattr(self, "storage_inputs") and self.storage_inputs is not None:
self.storage_inputs.close()
self.storage_outputs.close()
def __getitem__(self, index):
r""""""
inputs = self.storage_inputs[index]
outputs = self.storage_outputs[index]
return inputs, outputs
| montefiore-ai/hypothesis | hypothesis/util/data/numpy/simulation_dataset.py | simulation_dataset.py | py | 1,064 | python | en | code | 47 | github-code | 13 |
1449845581 | while True:
num1 = input("enter num1:")
num2 = input("enter num2:")
try:
raise Exception("主动出现异常")
num1 = int(num1)
num2 = int(num2)
result = num1 + num2
except Exception as ex:
print(ex)
else:
print("num1 + num2 的值为%s" %result)
finally:
print("执行完毕")
| 248808194/python-study | 异常处理/主动出发异常.py | 主动出发异常.py | py | 362 | python | en | code | 0 | github-code | 13 |
26269507176 | import os
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
BASE_PATH = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(BASE_PATH, 'README.rst')).read()
__version__ = '0.1.10'
__author__ = 'Masashi Shibata <contact@c-bata.link>'
__author_email__ = 'contact@c-bata.link'
__license__ = 'MIT License'
__classifiers__ = (
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries',
'Topic :: Internet :: WWW/HTTP :: HTTP Servers',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
)
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
setup(
name='kobin',
version=__version__,
author=__author__,
author_email=__author_email__,
url='https://github.com/kobinpy/kobin',
description='Type Hints friendly WSGI Framework for Python3.',
long_description=README,
classifiers=__classifiers__,
packages=find_packages(exclude=['test*']),
install_requires=[],
keywords='web framework wsgi',
license=__license__,
include_package_data=True,
test_suite='tests',
tests_require=['pytest'],
cmdclass={'test': PyTest},
)
| kobinpy/kobin | setup.py | setup.py | py | 2,242 | python | en | code | 67 | github-code | 13 |
2813110569 | #!/usr/bin/env python3
import requests
import os
import re
import pyfiglet
from colorama import Fore, init
from multiprocessing.dummy import Pool as ThreadPool
# Color
green = Fore.LIGHTGREEN_EX
red = Fore.LIGHTRED_EX
white = Fore.WHITE
cyan = Fore.LIGHTCYAN_EX
yellow = Fore.LIGHTYELLOW_EX
init(autoreset=True)
phpfiles = ["/vendor/phpunit/phpunit/src/Util/PHP/eval-stdin.php", "/yii/vendor/phpunit/phpunit/src/Util/PHP/eval-stdin.php", "/laravel/vendor/phpunit/phpunit/src/Util/PHP/eval-stdin.php", "/laravel52/vendor/phpunit/phpunit/src/Util/PHP/eval-stdin.php", "/lib/vendor/phpunit/phpunit/src/Util/PHP/eval-stdin.php", "/zend/vendor/phpunit/phpunit/src/Util/PHP/eval-stdin.php"]
def banner():
os.system("cls||clear")
__banner__ = pyfiglet.figlet_format("CVE-2017-9841", font="slant", justify="center")
print(red + __banner__)
print(f"\t\t\t{red}[ {white}Created By X - MrG3P5 {red}]")
print(f"\t {red}[ {white}Remote Code Execution (RCE) (Unauthenticated) {red}]\n")
def Exploit(domain):
try:
vuln = False
for x in phpfiles:
site = "http://" + domain + x
req = requests.get(site, headers={
"Content-Type" : "text/html",
"User-Agent" : f"Mozilla/5.0 (X11; Linux x86_64; rv:95.0) Gecko/20100101 Firefox/95.0",
}, data="<?php echo md5(phpunit_rce); ?>")
if "6dd70f16549456495373a337e6708865" in req.text:
vuln = True
else:
pass
if vuln:
print(f"{white}--> {green}Vulnerable: {domain}")
open("result.txt", "a").write(site + "\n")
else:
print(f"{white}--> {red}Not Vulnerable: {domain}")
except:
print(f"{white}--> {red}Not Vulnerable: {domain}")
if __name__=="__main__":
banner()
input_list = open(input(f"{red}[{white}?{red}] {white}Domain List : ")).read().replace("http://", "").replace("https://", "").replace(".php", "")
domain_fixer = re.findall(r'(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]{,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,6}', input_list)
Thread = input(f"{red}[{white}?{red}] {white}Thread : ")
pool = ThreadPool(int(Thread))
pool.map(Exploit, domain_fixer)
pool.close()
pool.join()
| MrG3P5/CVE-2017-9841 | main.py | main.py | py | 2,254 | python | en | code | 4 | github-code | 13 |
26601472094 | # Robot Movement https://projecteuler.net/problem=208
# Track Location https://math.stackexchange.com/questions/1384994/rotate-a-point-on-a-circle-with-known-radius-and-position
import math
import time
class Robot:
def __init__(self, movementAngle, x, y, xCenter, yCenter):
"""
Initialize a robot object and define the required parameters that determine
the starting location and motion behaviour
:param movementAngle:
:param x:
:param y:
:param xCenter:
:param yCenter:
"""
self.movementAngle = movementAngle
self.x = x
self.y = y
self.xCenter = xCenter
self.yCenter = yCenter
def moveRobot(self, direction):
"""
Move a robot a single step in a specific direction
0 for anticlockwise and 1 for clockwise
:param direction:
"""
if direction == 0:
angle = self.movementAngle
else:
angle = - self.movementAngle
newPoint = self.getEndPoint(self.x, self.y, self.xCenter, self.yCenter, angle)
self.x = newPoint[0]
self.y = newPoint[1]
def getEndPoint(self, xStart, yStart, xCenter, yCenter, movementAngle):
"""
Get the next location on the robot path after it moves a single step in a specified
angle
:param xStart:
:param yStart:
:param xCenter:
:param yCenter:
:param movementAngle:
:return:
"""
sinAngle = math.sin(math.radians(movementAngle))
cosAngle = math.cos(math.radians(movementAngle))
xRadius = xStart - xCenter
yRadius = yStart - yCenter
xEnd = xCenter + (xRadius * cosAngle) - (yRadius * sinAngle)
yEnd = yCenter + (xRadius * sinAngle) + (yRadius * cosAngle)
xEnd = round(xEnd, 2)
yEnd = round(yEnd, 2)
if yEnd == 0:
yEnd = abs(yEnd)
if xEnd == 0:
xEnd = abs(xEnd)
return [xEnd, yEnd]
def shiftRobotCenter(self):
"""
Shift the center of motion of the robot in instances where the movement direction changes
from clockwise to anti clockwise or the inverse
"""
newCenter = self.getEndPoint(self.xCenter, self.yCenter, self.x, self.y, 180)
self.xCenter = newCenter[0]
self.yCenter = newCenter[1]
class RobotWalk:
def __init__(self, maxMovements, movementAngle, x, y, xCenter, yCenter):
"""
Initialize a robot walk class to perform the walking for a specified number of steps
for a specific angle movement per step from the specified starting location and orientation
:param maxMovements:
:param movementAngle:
:param x:
:param y:
:param xCenter:
:param yCenter:
"""
self.maxMovement = maxMovements
self.movementAngle = movementAngle
self.x = x
self.y = y
self.xCenter = xCenter
self.yCenter = yCenter
def evaluateMovement(self):
"""
Evaluate the movement of the robot and determine the number of paths that take the robot
back to its starting point after the set number of steps
"""
startPoint = (self.x, self.y, self.xCenter, self.yCenter, None)
reachedLocations = {startPoint: 1}
startTime = time.time()
for index in range(self.maxMovement):
newReachedLocations = {}
for location, ways in reachedLocations.items():
clockwiseRobot = Robot(self.movementAngle, location[0], location[1], location[2], location[3])
antiClockRobot = Robot(self.movementAngle, location[0], location[1], location[2], location[3])
lastStep = location[4]
if lastStep != None:
if lastStep == 0:
clockwiseRobot.shiftRobotCenter()
if lastStep == 1:
antiClockRobot.shiftRobotCenter()
antiClockRobot.moveRobot(0)
antiClockEnd = (antiClockRobot.x, antiClockRobot.y, antiClockRobot.xCenter, antiClockRobot.yCenter, 0)
clockwiseRobot.moveRobot(1)
clockEnd = (clockwiseRobot.x, clockwiseRobot.y, clockwiseRobot.xCenter, clockwiseRobot.yCenter, 1)
newReachedLocations[clockEnd] = newReachedLocations.get(clockEnd, 0) + ways
newReachedLocations[antiClockEnd] = newReachedLocations.get(antiClockEnd, 0) + ways
reachedLocations = newReachedLocations
paths = self.getPathCount(reachedLocations, startPoint)
endTime = time.time()
print("-------------- Final Verdict ------------------------------")
print("Max Steps " + str(self.maxMovement))
print("Correct Paths : " + str(paths))
print("Total Unique Ending Points : " + str(len(reachedLocations)))
print("Time Taken : " + str(endTime - startTime))
print("-----------------------------------------------------------")
def getPathCount(self, reachedLocations, startPoint):
"""
Get the count of the paths that end back to the starting point based on the final
path locations dictionary after the movements are over
:param reachedLocations:
:param startPoint:
:return:
"""
count = 0
for location, ways in reachedLocations.items():
if (location[0] == startPoint[0] and location[1] == startPoint[1]):
count = count + ways
return count
# Evaluate the motion for a range of steps starting from 5 to 70
# We are only factoring in steps divisible by 5 since for the current
# task the other combination wont have any correct paths
for index in range(5, 75, 5):
robot = RobotWalk(index, 72, 1, 0, 0, 0)
robot.evaluateMovement() | Timsnky/challenges | euler_robot/robot_move.py | robot_move.py | py | 5,908 | python | en | code | 0 | github-code | 13 |
43356255367 | # 맵의 세로 크기 N, 가로 크기 M 입력
# 게임 캐릭터의 좌표, 방향 d가 공백으로 구분하여 주어짐.
# 셋째 줄부터 육지(0), 바다(1)로 이루어진 맵 정보가 주어짐. (맵의 외곽은 항상 바다)
n, m = map(int, input().split())
# 방문한 위치를 저장하기 위한 맵을 생성하여 0으로 초기화
d = [[0] * m for _ in range(n)]
x, y, direction = map(int, input().split())
d[x][y] = 1 # 현재 좌표 방문 처리
array = []
for i in range(n):
array.append(list(map(int, input().split())))
# 북, 동, 남, 서
dx = [-1, 0, 1, 0]
dy = [0, 1, 0, -1]
count = 1
turn_time = 0
while True:
direction = (direction-1) % 4 # -1 % 4 => 3
nx = x + dx[direction]
ny = y + dy[direction]
if array[nx][ny] == 0 and d[nx][ny] == 0:
x = nx
y = ny
d[nx][ny] = 1
count += 1
turn_time = 0
continue
else:
turn_time += 1
if turn_time == 4:
nx = x - dx[direction]
ny = y - dy[direction]
if array[nx][ny] == 0:
x = nx
y = ny
else:
break
turn_time = 0
print(count)
| tr0up2r/coding-test | implementation/008_game_development.py | 008_game_development.py | py | 1,172 | python | ko | code | 0 | github-code | 13 |
4250089850 | from django.shortcuts import render, get_object_or_404,redirect
from django.forms.models import model_to_dict
from django.http import HttpResponse ,JsonResponse
from django.utils.translation import gettext as _
# from django.views.generic import DetailView
from .models import User, Designer, Skill, Project
# from .forms import ProjectForm, DesignerSettingsForm, UserSettingsForm
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.urls import reverse_lazy
from .decorators import designer_required
from django.utils.decorators import method_decorator
from django.contrib.auth import login
from django.views.generic import CreateView, UpdateView, TemplateView, DetailView
from .forms import DesignerSignUpForm, ClientSignUpForm, DesignerSkillsForm, ProjectForm, DesignerSettingsForm, UserSettingsForm
from contract.views import get_rating
from contract.models import Contract
class SignUpView(TemplateView):
template_name = 'user_profile/register.html'
def landing_page(request):
return render(request, "user_profile/landing_page.html")
def home(request):
user = request.user
projects = Project.objects.all()
skills = Skill.objects.all()
for project in projects:
print(project.image_file_path)
context = {
'projects':projects,
'skills':skills,
'user': user,
}
return render(request, "user_profile/home.html", context)
def search_data(request):
query = request.GET.get('q')
results = Project.objects.filter(Q(name__icontains=query))
context = {
'results':results
}
return render(request, "user_profile/search.html", context)
@login_required
def upload_portfolio(request, username):
if request.method == 'POST':
form = ProjectForm(request.POST, request.FILES)
if form.is_valid():
project = form.save(commit=False)
project.designer = request.user.designer
project.save()
return redirect('home')
else:
form = ProjectForm()
return render(request, 'user_profile/project_upload.html', {'form': form, 'username':username})
class DesignerSignUpView(CreateView):
model = User
form_class = DesignerSignUpForm
template_name = 'user_profile/register_form.html'
def get_context_data(self, **kwargs):
"""
import pdb; pdb.set_trace()
n -> next line
s -> step into
u -> step out
p <variable_name>
c -> continue
"""
kwargs['user_type'] = 'designer'
return super().get_context_data(**kwargs)
def form_valid(self, form):
user = form.save()
# login(self.request, user)
return redirect('login')
@method_decorator([login_required, designer_required], name='dispatch')
class DesignerSkillsView(UpdateView):
model = Designer
form_class = DesignerSkillsForm
template_name = 'user_profile/skills_form.html'
success_url = reverse_lazy('profile')
def get_object(self):
return self.request.user.designer
def form_valid(self, form):
messages.success(self.request, 'Skills updated with success!')
return super().form_valid(form)
class ClientSignUpView(CreateView):
model = User
form_class = ClientSignUpForm
template_name = 'user_profile/register_form.html'
def get_context_data(self, **kwargs):
kwargs['user_type'] = 'client'
return super().get_context_data(**kwargs)
def form_valid(self, form):
user = form.save()
# login(self.request, user)
return redirect('login')
def designer_profile(request, username):
profile_user = get_object_or_404(User, username=username)
designer = get_object_or_404(Designer, user_id=profile_user.id)
skills = designer.skills.all()
projects = designer.projects.all()
rating = get_rating(request, username)
contract = Contract.objects.filter(designer=designer)
count_contract = len(contract.filter(status="active"))
context = {
"user": request.user,
"designer_user": profile_user,
"designer": designer,
"skills": skills,
"projects": projects,
"username": username,
"rating": rating,
"count_contract": count_contract
}
return render(request, 'user_profile/designer_detail.html', context)
def client_profile(request, username):
user = get_object_or_404(User, username=username)
contract = Contract.objects.filter(client=user)
count_contract = len(contract)
present_user = request.user.username
context = {
"client_user": user,
"username": username,
"present_user": present_user,
"count_contract": count_contract
}
return render(request, 'user_profile/client_detail.html', context)
def project_detail(request, username, pk):
user = get_object_or_404(User, username=username)
designer = get_object_or_404(Designer, user_id=user.id)
project = designer.projects.get(pk=pk)
context = {
"designer_user": user,
"designer": designer,
"project": project
}
return render(request, 'user_profile/project_detail.html', context)
@login_required
def update_profile(request, username):
if request.method == 'POST':
if request.user.is_designer:
user_form = UserSettingsForm(request.POST, request.FILES or None, instance=request.user)
profile_form = DesignerSettingsForm(request.POST, instance=request.user.designer)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request, _('Your profile was successfully updated!'))
return redirect('designer_profile', username=request.user.username)
else:
messages.error(request, 'Please correct the error below.')
elif request.user.is_client:
user_form = UserSettingsForm(request.POST, request.FILES or None, instance=request.user)
if user_form.is_valid():
user_form.save()
messages.success(request, _('Your profile was successfully updated!'))
return redirect('client_profile', username=request.user.username)
else:
messages.error(request, 'Please correct the error below.')
else:
if request.user.is_designer:
user_form = UserSettingsForm(instance=request.user)
profile_form = DesignerSettingsForm(instance=request.user.designer)
context = {
'user_form': user_form,
'profile_form': profile_form,
'username': username
}
elif request.user.is_client:
user_form = UserSettingsForm(instance=request.user)
context = {
'user_form': user_form,
'username': username
}
return render(request, 'user_profile/settings.html', context)
def delete_profile(request, pk):
instance = User.objects.get(pk=pk)
instance.delete()
return redirect('home')
def profile(request, username):
profile_user = User.objects.get(username=username)
if profile_user.is_designer:
designer = get_object_or_404(Designer, user_id=profile_user.id)
skills = designer.skills.all()
projects = designer.projects.all()
context = {
"user": request.user,
"designer_user": profile_user,
"designer": designer,
"skills": skills,
"projects": projects,
"username": username
}
return render(request, 'user_profile/designer_detail.html', context)
else:
user = get_object_or_404(User, username=username)
context = {
"client_user": user,
"username": username
}
return render(request, 'user_profile/client_detail.html', context)
return HttpResponse(username) | akshayk652/Designers-Hub | designers_hub/user_profile/views.py | views.py | py | 8,170 | python | en | code | 0 | github-code | 13 |
13603478172 | import pygame, math, random
class ball:
def __init__(self, screen, color,
x, y, velocity = 0, accel = 1, angle = 0, decel = 100 ):
self.screen = screen
self.color = color
self.pos = [x, y]
self.velocity = velocity
self.angle = angle
self.accel = accel * -1
self.decel = decel
self.y_comp = self.velocity * math.sin(math.radians(self.angle))
self.x_comp = self.velocity * math.cos(math.radians(self.angle))
self.size = random.randint(10,25)
def changePoints(self):
self.pos = [self.pos[0] + self.x_comp ,self.pos[1] + self.y_comp]
def changeVel(self, t):
#true changes the x_comp
#false changes the y_comp
#c is the the percent lost in each bounce
#if c == 100 does lose anything 90 loses 10%
if(t == True):
self.x_comp *= -1
else:
a = 0.9956 * (self.decel * 0.01)
self.y_comp *= -a
def gravity(self):
self.y_comp -= self.accel * 0.001
def draw(self):
pygame.draw.circle(self.screen,self.color,(self.pos[0],self.pos[1]),self.size,0)
| lambopancake/Gravity_Sim | vectorMath.py | vectorMath.py | py | 1,004 | python | en | code | 0 | github-code | 13 |
23206468820 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 20 17:40:22 2019
@author: timhe
"""
import os
import gdal
import glob
import warnings
import datetime
import numpy as np
import pandas as pd
import watertools.General.raster_conversions as RC
import watertools.General.data_conversions as DC
def main(inputs):
# Set Variables
Start_year_analyses = inputs["Start_year"]
End_year_analyses = inputs["End_year"]
output_folder = inputs["Output_folder"]
WAPOR_LVL = inputs["WAPOR_LEVEL"]
Phenology_Threshold = inputs["Phenology_Threshold"]
Phenology_Var = inputs["Phenology_Variable"]
Phenology_Slope = inputs["Phenology_Slope"]
METEO_timestep = inputs["METEO_timestep"]
LU_Data = inputs["LU_Map_Format"]
LU_Legend = inputs["LU_Legend"]
try:
Radiation_Data = inputs["Radiation_Source"]
except:
Radiation_Data = "KNMI"
try:
Albedo_Data = inputs["Albedo_Source"]
except:
Albedo_Data = "MODIS"
import WaporTranslator.LEVEL_1.Input_Data as Inputs
import WaporTranslator.LEVEL_1.DataCube as DataCube
import WaporTranslator.LEVEL_2 as L2
import WaporTranslator.LEVEL_2.Functions as Functions
# Do not show non relevant warnings
warnings.filterwarnings("ignore")
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=RuntimeWarning)
# Create output folder for LEVEL 2 data
output_folder_L2 = os.path.join(output_folder, "LEVEL_2")
if not os.path.exists(output_folder_L2):
os.makedirs(output_folder_L2)
# Define dates
Dates = Functions.Get_Dekads(Start_year_analyses, End_year_analyses)
Dates_yearly = list(pd.date_range("%s-01-01" %str(Start_year_analyses), "%s-12-31" %End_year_analyses, freq = "AS"))
# Get path and formats
Paths = Inputs.Input_Paths()
Formats = Inputs.Input_Formats()
Conversions = Inputs.Input_Conversions()
# Set example file
example_file = os.path.join(output_folder, "LEVEL_1", "MASK", "MASK.tif")
# Open Mask
dest_mask = gdal.Open(example_file)
MASK = dest_mask.GetRasterBand(1).ReadAsArray()
# Load inputs for LEVEL 2
T = DataCube.Rasterdata_tiffs(os.path.join(output_folder, str(Paths.T) %WAPOR_LVL), str(Formats.T) %WAPOR_LVL, Dates, Conversion = Conversions.T, Example_Data = example_file, Mask_Data = example_file, gap_filling = 1, reprojection_type = 2, Variable = 'T', Product = 'WAPOR', Unit = 'mm/day')
ET0 = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.ET0), Formats.ET0, Dates, Conversion = Conversions.ET0, Example_Data = example_file, Mask_Data = example_file, gap_filling = 1, reprojection_type = 2, Variable = 'ET0', Product = 'WAPOR', Unit = 'mm/day')
if LU_Data == "":
LU = DataCube.Rasterdata_tiffs(os.path.join(output_folder, str(Paths.LU) %WAPOR_LVL), str(Formats.LU) %WAPOR_LVL, Dates_yearly, Conversion = Conversions.LU, Example_Data = example_file, Mask_Data = example_file, reprojection_type = 1, Variable = 'LU', Product = 'WAPOR', Unit = 'LU')
LUdek = DataCube.Rasterdata_tiffs(os.path.join(output_folder,str(Paths.LU) %WAPOR_LVL), str(Formats.LU) %WAPOR_LVL, Dates, Conversion = Conversions.LU, Example_Data = example_file, Mask_Data = example_file, reprojection_type = 1, Variable = 'LU', Product = 'WAPOR', Unit = 'LU')
else:
LU = DataCube.Rasterdata_tiffs(os.path.dirname(LU_Data), os.path.basename(LU_Data), Dates_yearly, Conversion = Conversions.LU, Example_Data = example_file, Mask_Data = example_file, reprojection_type = 1, Variable = 'LU', Product = 'WAPOR', Unit = 'LU')
LUdek = DataCube.Rasterdata_tiffs(os.path.dirname(LU_Data), os.path.basename(LU_Data), Dates, Conversion = Conversions.LU, Example_Data = example_file, Mask_Data = example_file, reprojection_type = 1, Variable = 'LU', Product = 'WAPOR', Unit = 'LU')
################################## Calculate LU map ##########################################
Phenology_pixels_year, Grassland_pixels_year = Create_LU_MAP(output_folder, Dates_yearly, LU, LUdek, Paths.LU_ESA, Formats.LU_ESA, example_file, LU_Data, LU_Legend)
LU_END = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.LU_END), Formats.LU_END, list(Dates_yearly), Conversion = Conversions.LU_END, reprojection_type = 1, Variable = 'LU_END', Product = '', Unit = '-')
del LU
################################## Get ALBEDO data ############################################
if Radiation_Data == "LANDSAF":
Start_Rad = 2016
if (Radiation_Data == "KNMI" and METEO_timestep == "Daily"):
# Check dataset
os.chdir(os.path.join(output_folder, Paths.KNMI))
Years_KNMI = []
for Date_yearly in Dates_yearly:
re = glob.glob(Formats.KNMI.format(yyyy=Date_yearly.year, mm = Date_yearly.month, dd = Date_yearly.day))
if len(re)>0:
Years_KNMI.append(Date_yearly.year)
Start_Rad = np.nanmin(Years_KNMI)
if METEO_timestep == "Monthly" or Radiation_Data == "GLDAS":
Start_Rad = Start_year_analyses
Dates_Net_Radiation = Functions.Get_Dekads(str(np.maximum(int(Start_year_analyses), Start_Rad)), End_year_analyses)
if Albedo_Data == "MODIS":
Albedo = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.Albedo), Formats.Albedo, Dates_Net_Radiation, Conversion = Conversions.Albedo, Example_Data = example_file, Mask_Data = example_file, gap_filling = 1, reprojection_type = 2, Variable = 'Albedo', Product = 'MODIS', Unit = '-')
else:
Albedo_Array = np.ones([len(Dates_Net_Radiation), T.Size[1], T.Size[2]])* 0.17
for Dates_albedo in Dates_Net_Radiation:
Year_now = Dates_albedo.year
LU_Now = LU_END.Data[Year_now-Start_year_analyses,:,:]
Albedo_Array_now = np.ones([T.Size[1], T.Size[2]])* np.nan
Albedo_Array_now = np.where(LU_Now==1, 0.20, Albedo_Array_now)
Albedo_Array_now= np.where(LU_Now==2, 0.23, Albedo_Array_now)
Albedo_Array[Year_now-Start_year_analyses,:,:] = Albedo_Array_now
Albedo = DataCube.Rasterdata_Empty()
Albedo.Data = Albedo_Array * MASK
Albedo.Projection = T.Projection
Albedo.GeoTransform = T.GeoTransform
Albedo.Ordinal_time = np.array(list(map(lambda i : i.toordinal(), Dates_Net_Radiation)))
Albedo.Size = Albedo_Array.shape
Albedo.Variable = "Albedo"
Albedo.Unit = "-"
if METEO_timestep == "Monthly":
Dates_monthly = list(pd.date_range("%s-01-01" %str(Start_year_analyses), "%s-12-31" %End_year_analyses, freq = "MS"))
Temp_monthly = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.Temp_monthly), Formats.Temp_monthly, Dates_monthly, Conversion = Conversions.Temp_monthly, Example_Data = example_file, Mask_Data = example_file, reprojection_type = 2, Variable = 'Temperature', Product = 'GLDAS', Unit = 'Celcius')
Hum_monthly = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.Hum_monthly), Formats.Hum_monthly, Dates_monthly, Conversion = Conversions.Hum_monthly, Example_Data = example_file, Mask_Data = example_file, reprojection_type = 2, Variable = 'Humidity', Product = 'GLDAS', Unit = 'Percentage')
DSSF_monthly = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.DSSF_monthly), Formats.DSSF_monthly, Dates_monthly, Conversion = Conversions.DSSF_monthly, Example_Data = example_file, Mask_Data = example_file, reprojection_type = 2, Variable = 'DSSF', Product = 'GLDAS', Unit = 'W/m2')
if METEO_timestep == "Daily":
Dates_Daily = list(pd.date_range("%s-01-01" %str(Start_year_analyses), "%s-12-31" %End_year_analyses))
Dates_Net_Radiation_Daily = list(pd.date_range("%s-01-01" %str(np.maximum(int(Start_year_analyses), Start_Rad)), "%s-12-31" %End_year_analyses))
# Open daily
if Radiation_Data == "LANDSAF":
DSLF_daily = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.DSLF), Formats.DSLF, Dates_Net_Radiation_Daily, Conversion = Conversions.DSLF, Example_Data = example_file, Mask_Data = example_file, reprojection_type = 2, Variable = 'DSLF', Product = 'LANDSAF', Unit = 'W/m2')
DSSF_daily = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.DSSF), Formats.DSSF, Dates_Net_Radiation_Daily, Conversion = Conversions.DSSF, Example_Data = example_file, Mask_Data = example_file, reprojection_type = 2, Variable = 'DSSF', Product = 'LANDSAF', Unit = 'W/m2')
if Radiation_Data == "KNMI":
DSSF_daily = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.KNMI), Formats.KNMI, Dates_Net_Radiation_Daily, Conversion = Conversions.KNMI, Example_Data = example_file, Mask_Data = example_file, reprojection_type = 2, Variable = 'SDS', Product = 'KNMI', Unit = 'W/m2')
if Radiation_Data == "GLDAS":
DSSF_daily = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.DSSF_GLDAS), Formats.DSSF_GLDAS, Dates_Net_Radiation_Daily, Conversion = Conversions.DSSF_GLDAS, Example_Data = example_file, Mask_Data = example_file, reprojection_type = 2, Variable = 'SDS', Product = 'KNMI', Unit = 'W/m2')
Temp_daily = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.Temp), Formats.Temp, Dates_Daily, Conversion = Conversions.Temp, Example_Data = example_file, Mask_Data = example_file, reprojection_type = 2, Variable = 'Temperature', Product = 'GLDAS', Unit = 'Celcius')
Hum_daily = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.Hum), Formats.Hum, Dates_Daily, Conversion = Conversions.Hum, Example_Data = example_file, Mask_Data = example_file, reprojection_type = 2, Variable = 'Humidity', Product = 'GLDAS', Unit = 'Percentage')
#################### Convert into daily datasets ############################################
Albedo_Daily = Functions.Calc_Daily_from_Dekads(Albedo)
################################### Calculate LAI ############################################
LAI_Data = np.log((1-np.minimum(T.Data/ET0.Data, 0.99)))/(-0.55)
if LU_Data == "":
LAI_Data[LUdek.Data==80] = 0.0
del LUdek
LAI_Data = LAI_Data.clip(0.0, 7.0)
# Write in DataCube
LAI = DataCube.Rasterdata_Empty()
LAI.Data = LAI_Data * MASK
LAI.Projection = T.Projection
LAI.GeoTransform = T.GeoTransform
LAI.Ordinal_time = T.Ordinal_time
LAI.Size = LAI_Data.shape
LAI.Variable = "Leaf Area Index"
LAI.Unit = "m2-m-2"
del LAI_Data
LAI.Save_As_Tiff(os.path.join(output_folder_L2, "LAI"))
if METEO_timestep == "Monthly":
DEM = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.DEM), Formats.DEM, Dates = None, Conversion = Conversions.DEM, Example_Data = example_file, Mask_Data = example_file, reprojection_type = 2, Variable = 'DEM', Product = 'SRTM', Unit = 'm')
DSSF = Functions.Calc_Dekads_from_Monthly(DSSF_monthly)
Temp = Functions.Calc_Dekads_from_Monthly(Temp_monthly)
Hum = Functions.Calc_Dekads_from_Monthly(Hum_monthly)
###################### Calculate Net Radiation (daily) #####################################
DOY = np.array(list(map(lambda i : int(datetime.datetime.fromordinal(i).strftime('%j')), LAI.Ordinal_time[np.isin(LAI.Ordinal_time, DSSF.Ordinal_time)])))
Latitude = Albedo.GeoTransform[3] + Albedo.GeoTransform[5] * np.float_(list(range(0,Albedo.Size[1]))) - 0.5 * Albedo.GeoTransform[5]
Inverse_Relative_Distance_Earth_Sun = 1 + 0.033* np.cos(2 * np.pi * DOY/365)
Solar_Declanation = 0.409 * np.sin(2 * np.pi * DOY/365 - 1.39)
Sunset_Hour_Angle = np.squeeze(np.arccos(-np.tan(Latitude[None, :, None]/180 * np.pi)*np.tan(Solar_Declanation[:, None, None])))
Extra_Terrestrial_Radiation = np.squeeze(435.2 * Inverse_Relative_Distance_Earth_Sun[:, None, None] * (Sunset_Hour_Angle[:, :, None] * np.sin((Latitude[None, :, None]/180) * np.pi) * np.sin(Solar_Declanation[:, None, None]) + np.cos((Latitude[None, :, None]/180) * np.pi) * np.cos(Solar_Declanation[:, None, None]) * np.sin(Sunset_Hour_Angle[:, :, None])))
Saturated_Vapor_Pressure = 0.611 * np.exp((17.27 * Temp.Data)/(237.3 + Temp.Data))
Actual_Vapor_Pressure = Hum.Data * 0.01 * Saturated_Vapor_Pressure
Slope_Saturated_Vapor_Pressure = 4098 * Saturated_Vapor_Pressure/(Temp.Data+237.3)**2
Psy_Constant = 0.665 * 0.001 * 101.3 * ((293 - 0.0065 * DEM.Data)/293)**(5.26)
Net_Longwave_FAO = (0.34 - 0.14 * (Actual_Vapor_Pressure[np.isin(Temp.Ordinal_time, DSSF.Ordinal_time), :, :])**(0.5)) * (1.35 * DSSF.Data/(0.8 * Extra_Terrestrial_Radiation[:, :, None]) - 0.35) * 0.0000000567 * (273.15+Temp.Data[np.isin(Temp.Ordinal_time, DSSF.Ordinal_time), :, :])**4
Net_Longwave_Slob = 110 * (DSSF.Data/Extra_Terrestrial_Radiation[:, :, None])
Net_Longwave = np.where(Net_Longwave_FAO>Net_Longwave_Slob, Net_Longwave_FAO, Net_Longwave_Slob)
Net_Radiation_Data =(1 - Albedo.Data[np.isin(Albedo.Ordinal_time, DSSF.Ordinal_time), :, :])*DSSF.Data - Net_Longwave
Days_in_Dekads = np.append(Albedo.Ordinal_time[1:] - Albedo.Ordinal_time[:-1], 11)
###################### Calculate ET0 de Bruin Daily #####################################
ET0_deBruin_Data = ((Slope_Saturated_Vapor_Pressure[np.isin(Temp.Ordinal_time, DSSF.Ordinal_time), :, :]/(Slope_Saturated_Vapor_Pressure[np.isin(Temp.Ordinal_time, DSSF.Ordinal_time), :, :] + Psy_Constant[None, :, :])) * ((1 - 0.23) * DSSF.Data - Net_Longwave_Slob) + 20)/28.4 * Days_in_Dekads[:, None, None]
# Write in DataCube
ET0_deBruin = DataCube.Rasterdata_Empty()
ET0_deBruin.Data = ET0_deBruin_Data * MASK
ET0_deBruin.Projection = Albedo.Projection
ET0_deBruin.GeoTransform = Albedo.GeoTransform
ET0_deBruin.Ordinal_time = Albedo.Ordinal_time
ET0_deBruin.Size = ET0_deBruin_Data.shape
ET0_deBruin.Variable = "ET0 de Bruin"
ET0_deBruin.Unit = "mm-dekad-1"
ET0_deBruin.Save_As_Tiff(os.path.join(output_folder_L2, "ET0_deBruin"))
# Write in DataCube
Net_Radiation = DataCube.Rasterdata_Empty()
Net_Radiation.Data = Net_Radiation_Data * MASK
Net_Radiation.Projection = Albedo.Projection
Net_Radiation.GeoTransform = Albedo.GeoTransform
Net_Radiation.Ordinal_time = Albedo.Ordinal_time
Net_Radiation.Size = Net_Radiation_Data.shape
Net_Radiation.Variable = "Net Radiation"
Net_Radiation.Unit = "W-m-2"
if METEO_timestep == "Daily":
#################### Calculate Net Radiation LANDSAF method ###################################
if Radiation_Data == "LANDSAF":
################# Calculate Land Surface Emissivity ###########################################
Land_Surface_Emissivity_Data = np.minimum(1, 0.9 + 0.017 * LAI.Data[np.isin(LAI.Ordinal_time, Albedo.Ordinal_time), :, :])
Land_Surface_Emissivity_Data = Land_Surface_Emissivity_Data.clip(0, 1.0)
# Write in DataCube
Land_Surface_Emissivity = DataCube.Rasterdata_Empty()
Land_Surface_Emissivity.Data = Land_Surface_Emissivity_Data * MASK
Land_Surface_Emissivity.Projection = Albedo.Projection
Land_Surface_Emissivity.GeoTransform = Albedo.GeoTransform
Land_Surface_Emissivity.Ordinal_time = Albedo.Ordinal_time
Land_Surface_Emissivity.Size = Land_Surface_Emissivity_Data.shape
Land_Surface_Emissivity.Variable = "Land Surface Emissivity"
Land_Surface_Emissivity.Unit = "-"
del Land_Surface_Emissivity_Data
Land_Surface_Emissivity.Save_As_Tiff(os.path.join(output_folder_L2, "Land_Surface_Emissivity"))
#################### Convert into daily datasets ############################################
Land_Surface_Emissivity_Daily = Functions.Calc_Daily_from_Dekads(Land_Surface_Emissivity)
###################### Calculate Net Radiation (daily) #####################################
Net_Radiation_Data_Daily = (1 - Albedo_Daily.Data) * DSSF_daily.Data + DSLF_daily.Data * 1.15 - Land_Surface_Emissivity_Daily.Data * 0.0000000567 * (273.15 + Temp_daily.Data[np.isin(Temp_daily.Ordinal_time, DSLF_daily.Ordinal_time)] - 4)**4
Net_Radiation_Data_Daily = Net_Radiation_Data_Daily.clip(0, 500)
Net_Radiation_Data_Daily[Net_Radiation_Data_Daily == 0] = np.nan
del Land_Surface_Emissivity_Daily, DSSF_daily, DSLF_daily
#################### Calculate Net Radiation KNMI method ###################################
if Radiation_Data == "KNMI" or Radiation_Data == "GLDAS":
DEM = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.DEM), Formats.DEM, Dates = None, Conversion = Conversions.DEM, Example_Data = example_file, Mask_Data = example_file, reprojection_type = 2, Variable = 'DEM', Product = 'SRTM', Unit = 'm')
###################### Calculate Net Radiation (daily) #####################################
DOY = np.array(list(map(lambda i : int(datetime.datetime.fromordinal(i).strftime('%j')), Albedo_Daily.Ordinal_time[np.isin(Albedo_Daily.Ordinal_time, DSSF_daily.Ordinal_time)])))
Latitude = Albedo.GeoTransform[3] + Albedo.GeoTransform[5] * np.float_(list(range(0,Albedo.Size[1]))) - 0.5 * Albedo.GeoTransform[5]
Inverse_Relative_Distance_Earth_Sun = 1 + 0.033* np.cos(2 * np.pi * DOY/365)
Solar_Declanation = 0.409 * np.sin(2 * np.pi * DOY/365 - 1.39)
Sunset_Hour_Angle = np.squeeze(np.arccos(-np.tan(Latitude[None, :, None]/180 * np.pi)*np.tan(Solar_Declanation[:, None, None])))
Extra_Terrestrial_Radiation = np.squeeze(435.2 * Inverse_Relative_Distance_Earth_Sun[:, None, None] * (Sunset_Hour_Angle[:, :, None] * np.sin((Latitude[None, :, None]/180) * np.pi) * np.sin(Solar_Declanation[:, None, None]) + np.cos((Latitude[None, :, None]/180) * np.pi) * np.cos(Solar_Declanation[:, None, None]) * np.sin(Sunset_Hour_Angle[:, :, None])))
Saturated_Vapor_Pressure = 0.611 * np.exp((17.27 * Temp_daily.Data)/(237.3 + Temp_daily.Data))
Actual_Vapor_Pressure = Hum_daily.Data * 0.01 * Saturated_Vapor_Pressure
Slope_Saturated_Vapor_Pressure = 4098 * Saturated_Vapor_Pressure/(Temp_daily.Data+237.3)**2
Psy_Constant = 0.665 * 0.001 * 101.3 * ((293 - 0.0065 * DEM.Data)/293)**(5.26)
Net_Longwave_FAO = (0.34 - 0.14 * (Actual_Vapor_Pressure[np.isin(Temp_daily.Ordinal_time, DSSF_daily.Ordinal_time), :, :])**(0.5)) * (1.35 * DSSF_daily.Data/(0.8 * Extra_Terrestrial_Radiation[:, :, None]) - 0.35) * 0.0000000567 * (273.15+Temp_daily.Data[np.isin(Temp_daily.Ordinal_time, DSSF_daily.Ordinal_time), :, :])**4
Net_Longwave_Slob = 110 * (DSSF_daily.Data/Extra_Terrestrial_Radiation[:, :, None])
Net_Longwave = np.where(Net_Longwave_FAO>Net_Longwave_Slob, Net_Longwave_FAO, Net_Longwave_Slob)
Net_Radiation_Data_Daily =(1 - Albedo_Daily.Data[np.isin(Albedo_Daily.Ordinal_time, DSSF_daily.Ordinal_time), :, :])*DSSF_daily.Data - Net_Longwave
del Hum_daily, Latitude, Inverse_Relative_Distance_Earth_Sun, Solar_Declanation, Sunset_Hour_Angle, Saturated_Vapor_Pressure, Actual_Vapor_Pressure, Net_Longwave_FAO, Net_Longwave
###################### Calculate ET0 de Bruin Daily #####################################
ET0_deBruin_Daily_Data = ((Slope_Saturated_Vapor_Pressure[np.isin(Temp_daily.Ordinal_time, DSSF_daily.Ordinal_time), :, :]/(Slope_Saturated_Vapor_Pressure[np.isin(Temp_daily.Ordinal_time, DSSF_daily.Ordinal_time), :, :] + Psy_Constant[None, :, :])) * ((1 - 0.23) * DSSF_daily.Data - Net_Longwave_Slob) + 20)/28.4
# Write in DataCube
ET0_deBruin_Daily = DataCube.Rasterdata_Empty()
ET0_deBruin_Daily.Data = ET0_deBruin_Daily_Data * MASK
ET0_deBruin_Daily.Projection = Albedo.Projection
ET0_deBruin_Daily.GeoTransform = Albedo.GeoTransform
ET0_deBruin_Daily.Ordinal_time = Albedo_Daily.Ordinal_time
ET0_deBruin_Daily.Size = ET0_deBruin_Daily_Data.shape
ET0_deBruin_Daily.Variable = "ET0 de Bruin"
ET0_deBruin_Daily.Unit = "mm-d-1"
# change from daily to decads
ET0_deBruin = Functions.Calc_Dekads_from_Daily(ET0_deBruin_Daily, flux_state = "flux")
ET0_deBruin.Unit = "mm-dekad-1"
del ET0_deBruin_Daily_Data, Net_Longwave_Slob, DSSF_daily, Psy_Constant
ET0_deBruin.Save_As_Tiff(os.path.join(output_folder_L2, "ET0_deBruin"))
# Write in DataCube
Net_Radiation_Daily = DataCube.Rasterdata_Empty()
Net_Radiation_Daily.Data = Net_Radiation_Data_Daily * MASK
Net_Radiation_Daily.Projection = Albedo.Projection
Net_Radiation_Daily.GeoTransform = Albedo.GeoTransform
Net_Radiation_Daily.Ordinal_time = Albedo_Daily.Ordinal_time
Net_Radiation_Daily.Size = Net_Radiation_Data_Daily.shape
Net_Radiation_Daily.Variable = "Net Radiation"
Net_Radiation_Daily.Unit = "W-m-2"
del Net_Radiation_Data_Daily, Albedo_Daily, ET0_deBruin_Daily, Albedo
############### convert Net Radiation to dekadal ############################################
Net_Radiation = Functions.Calc_Dekads_from_Daily(Net_Radiation_Daily, flux_state = "state")
Temp = Functions.Calc_Dekads_from_Daily(Temp_daily, flux_state = "state")
del Net_Radiation_Daily, Temp_daily
# Calc net Radiation of before 2016 if required
if (int(Start_year_analyses) < Start_Rad and METEO_timestep != "Monthly"):
Total_years = int(np.ceil(Net_Radiation.Size[0]/36))
Net_Radiation_Per_Dekad = np.ones([36, Net_Radiation.Size[1], Net_Radiation.Size[2]]) * np.nan
ET0_Per_Dekad = np.ones([36, Net_Radiation.Size[1], Net_Radiation.Size[2]]) * np.nan
IDs_diff = ET0.Size[0] - Net_Radiation.Size[0]
for dekad in range(0,36):
IDs_rad = np.array(range(0, Total_years)) * 36 + dekad
IDs_rad_good = IDs_rad[IDs_rad<=Net_Radiation.Size[0]]
IDs_et0 = np.array(range(0, Total_years)) * 36 + dekad + IDs_diff
IDs_et0_good = IDs_et0[IDs_et0<=ET0.Size[0]]
Net_Radiation_Per_Dekad[dekad, :, :] = np.nanmean(Net_Radiation.Data[IDs_rad_good,:,:], axis = 0)
ET0_Per_Dekad[dekad, :, :] = np.nanmean(ET0.Data[IDs_et0_good,:,:], axis = 0)
Ratio_per_dekad = Net_Radiation_Per_Dekad/ET0_Per_Dekad
Ratios = Ratio_per_dekad
for i in range(0, Start_Rad - int(Start_year_analyses)-1):
Ratios = np.vstack([Ratios, Ratio_per_dekad])
Net_Radiation_Before_Start_Rad = Ratios * ET0.Data[0:Ratios.shape[0],:,:]
Net_Radiation_Data = np.vstack([Net_Radiation_Before_Start_Rad, Net_Radiation.Data])
Net_Radiation.Data = Net_Radiation_Data
Net_Radiation.Size = Net_Radiation_Data.shape
Net_Radiation.Ordinal_time = T.Ordinal_time
del Net_Radiation_Data
Net_Radiation.Unit = "W-m-2"
Net_Radiation.Save_As_Tiff(os.path.join(output_folder_L2, "Net_Radiation"))
############################ Calculate Root Depth ##########################################
# Load inputs for LEVEL 2
ET = DataCube.Rasterdata_tiffs(os.path.join(output_folder, str(Paths.ET) %WAPOR_LVL), str(Formats.ET) %WAPOR_LVL, Dates, Conversion = Conversions.ET, Example_Data = example_file, Mask_Data = example_file, gap_filling = 1, reprojection_type = 2, Variable = 'ET', Product = 'WAPOR', Unit = 'mm/day')
P = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.P), Formats.P, Dates, Conversion = Conversions.P, Example_Data = example_file, Mask_Data = example_file, gap_filling = 1, reprojection_type = 2, Variable = 'P', Product = 'WAPOR', Unit = 'mm/day')
NPP = DataCube.Rasterdata_tiffs(os.path.join(output_folder, str(Paths.NPP) %WAPOR_LVL), str(Formats.NPP) %WAPOR_LVL, Dates, Conversion = Conversions.NPP, Example_Data = example_file, Mask_Data = example_file, Variable = 'NPP', Product = 'WAPOR', Unit = 'kg/ha/day')
############################ Calculate Root Depth ##########################################
Root_Depth_Data = 0.65 * 100 * np.maximum(0, -0.0326 * LAI.Data**2 + 0.4755 * LAI.Data - 0.0411)
Root_Depth_Data = Root_Depth_Data.clip(0, 500)
# Write in DataCube
Root_Depth = DataCube.Rasterdata_Empty()
Root_Depth.Data = Root_Depth_Data * MASK
Root_Depth.Projection = ET.Projection
Root_Depth.GeoTransform = ET.GeoTransform
Root_Depth.Ordinal_time = ET.Ordinal_time
Root_Depth.Size = Root_Depth_Data.shape
Root_Depth.Variable = "Root Depth"
Root_Depth.Unit = "cm"
del Root_Depth_Data
Root_Depth.Save_As_Tiff(os.path.join(output_folder_L2, "Root_Depth"))
################# Calculate Fractional Vegetation Cover #######################################
Fract_vegt_Data = 1-np.exp(-0.65 * LAI.Data)
Fract_vegt_Data = Fract_vegt_Data.clip(0, 1.0)
# Write in DataCube
Fract_vegt = DataCube.Rasterdata_Empty()
Fract_vegt.Data = Fract_vegt_Data * MASK
Fract_vegt.Projection = ET.Projection
Fract_vegt.GeoTransform = ET.GeoTransform
Fract_vegt.Ordinal_time = ET.Ordinal_time
Fract_vegt.Size = Fract_vegt_Data.shape
Fract_vegt.Variable = "Fractional Vegetation"
Fract_vegt.Unit = "-"
del Fract_vegt_Data
Fract_vegt.Save_As_Tiff(os.path.join(output_folder_L2, "Fractional_Vegetation_Cover"))
########################## Calculate maximum Kc ####################################
Kc_MAX_Data = np.minimum(1.4 ,0.95 * Fract_vegt.Data + 0.2)
Kc_MAX_Data = Kc_MAX_Data.clip(0, 500)
# Write in DataCube
Kc_MAX = DataCube.Rasterdata_Empty()
Kc_MAX.Data = Kc_MAX_Data * MASK
Kc_MAX.Projection = ET.Projection
Kc_MAX.GeoTransform = ET.GeoTransform
Kc_MAX.Ordinal_time = ET.Ordinal_time
Kc_MAX.Size = Kc_MAX_Data.shape
Kc_MAX.Variable = "Kc MAX"
Kc_MAX.Unit = "-"
del Kc_MAX_Data, Fract_vegt
Kc_MAX.Save_As_Tiff(os.path.join(output_folder_L2, "Kc_MAX"))
################# Calculate Evaporative Fraction ############################################
Evaporative_Fraction_Data = ET.Data *28.4/Net_Radiation.Data
Evaporative_Fraction_Data = Evaporative_Fraction_Data.clip(0, 1.5)
# Write in DataCube
Evaporative_Fraction = DataCube.Rasterdata_Empty()
Evaporative_Fraction.Data = Evaporative_Fraction_Data * MASK
Evaporative_Fraction.Projection = ET.Projection
Evaporative_Fraction.GeoTransform = ET.GeoTransform
Evaporative_Fraction.Ordinal_time = ET.Ordinal_time
Evaporative_Fraction.Size = Evaporative_Fraction_Data.shape
Evaporative_Fraction.Variable = "Evaporative Fraction"
Evaporative_Fraction.Unit = "-"
del Evaporative_Fraction_Data
Evaporative_Fraction.Save_As_Tiff(os.path.join(output_folder_L2, "Evaporative_Fraction"))
############## Calculate Land Theta Saturated Subsoil ##################################
# Open Constant
Bulk = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.Bulk), Formats.Bulk.format(level=6), Dates = None, Conversion = Conversions.Bulk, Example_Data = example_file, Mask_Data = example_file, reprojection_type = 2, Variable = 'Bulk', Product = 'SoilGrids', Unit = 'kg/m3')
Sand = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.Sand), Formats.Sand.format(level=6), Dates = None, Conversion = Conversions.Sand, Example_Data = example_file, Mask_Data = example_file, reprojection_type = 2, Variable = 'Sand', Product = 'SoilGrids', Unit = 'Percentage')
Clay = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.Clay), Formats.Clay.format(level=6), Dates = None, Conversion = Conversions.Clay, Example_Data = example_file, Mask_Data = example_file, reprojection_type = 2, Variable = 'Clay', Product = 'SoilGrids', Unit = 'Percentage')
Theta_Sat_Subsoil_Data = 0.85 * (1 - Bulk.Data/2650) + 0.13 * Clay.Data * 0.01
# Write in DataCube
Theta_Sat_Subsoil = DataCube.Rasterdata_Empty()
Theta_Sat_Subsoil.Data = Theta_Sat_Subsoil_Data * MASK
Theta_Sat_Subsoil.Projection = ET.Projection
Theta_Sat_Subsoil.GeoTransform = ET.GeoTransform
Theta_Sat_Subsoil.Ordinal_time = None
Theta_Sat_Subsoil.Size = Theta_Sat_Subsoil_Data.shape
Theta_Sat_Subsoil.Variable = "Saturated Theta Subsoil"
Theta_Sat_Subsoil.Unit = "cm3-cm-3"
del Theta_Sat_Subsoil_Data
Theta_Sat_Subsoil.Save_As_Tiff(os.path.join(output_folder_L2, "Theta_Sat_Subsoil"))
################### Calculate Theta Field Capacity Subsoil #############################
Theta_FC_Subsoil_Data = -2.95 * Theta_Sat_Subsoil.Data**2 + 3.96 * Theta_Sat_Subsoil.Data - 0.871
# Write in DataCube
Theta_FC_Subsoil = DataCube.Rasterdata_Empty()
Theta_FC_Subsoil.Data = Theta_FC_Subsoil_Data * MASK
Theta_FC_Subsoil.Projection = ET.Projection
Theta_FC_Subsoil.GeoTransform = ET.GeoTransform
Theta_FC_Subsoil.Ordinal_time = None
Theta_FC_Subsoil.Size = Theta_FC_Subsoil_Data.shape
Theta_FC_Subsoil.Variable = "Field Capacity Subsoil"
Theta_FC_Subsoil.Unit = "cm3-cm-3"
del Theta_FC_Subsoil_Data
Theta_FC_Subsoil.Save_As_Tiff(os.path.join(output_folder_L2, "Theta_FC_Subsoil"))
################### Calculate Theta Wilting Point Subsoil ##############################
Theta_WP_Subsoil_Data = 3.0575 * Theta_FC_Subsoil.Data**4.5227
# Write in DataCube
Theta_WP_Subsoil = DataCube.Rasterdata_Empty()
Theta_WP_Subsoil.Data = Theta_WP_Subsoil_Data * MASK
Theta_WP_Subsoil.Projection = ET.Projection
Theta_WP_Subsoil.GeoTransform = ET.GeoTransform
Theta_WP_Subsoil.Ordinal_time = None
Theta_WP_Subsoil.Size = Theta_WP_Subsoil_Data.shape
Theta_WP_Subsoil.Variable = "Wilting Point Subsoil"
Theta_WP_Subsoil.Unit = "cm3-cm-3"
del Theta_WP_Subsoil_Data
Theta_WP_Subsoil.Save_As_Tiff(os.path.join(output_folder_L2, "Theta_WP_Subsoil"))
################### Calculate Theta Wilting Point Subsoil ##############################
Soil_Water_Holding_Capacity_Data = (Theta_FC_Subsoil.Data - Theta_WP_Subsoil.Data ) * 1000
# Write in DataCube
Soil_Water_Holding_Capacity = DataCube.Rasterdata_Empty()
Soil_Water_Holding_Capacity.Data = Soil_Water_Holding_Capacity_Data * MASK
Soil_Water_Holding_Capacity.Projection = ET.Projection
Soil_Water_Holding_Capacity.GeoTransform = ET.GeoTransform
Soil_Water_Holding_Capacity.Ordinal_time = None
Soil_Water_Holding_Capacity.Size = Soil_Water_Holding_Capacity_Data.shape
Soil_Water_Holding_Capacity.Variable = "Soil Water Holding Capacity"
Soil_Water_Holding_Capacity.Unit = "mm-m-1"
del Soil_Water_Holding_Capacity_Data
Soil_Water_Holding_Capacity.Save_As_Tiff(os.path.join(output_folder_L2, "Soil_Water_Holding_Capacity"))
################### Calculate Soil Moisture ############################################
Soil_Moisture_Data = Theta_Sat_Subsoil.Data * np.exp((Evaporative_Fraction.Data.clip(0, 0.9) - 1)/0.421)
# Write in DataCube
Soil_Moisture = DataCube.Rasterdata_Empty()
Soil_Moisture.Data = Soil_Moisture_Data * MASK
Soil_Moisture.Projection = ET.Projection
Soil_Moisture.GeoTransform = ET.GeoTransform
Soil_Moisture.Ordinal_time = ET.Ordinal_time
Soil_Moisture.Size = Soil_Moisture_Data.shape
Soil_Moisture.Variable = "Soil Moisture"
Soil_Moisture.Unit = "cm3-cm-3"
del Soil_Moisture_Data
Soil_Moisture.Save_As_Tiff(os.path.join(output_folder_L2, "Soil_Moisture"))
######################## Calculate days in each dekads #################################
Days_in_Dekads = np.append(ET.Ordinal_time[1:] - ET.Ordinal_time[:-1], 11)
######################## Calculate Crop Water Requirement ########################
Crop_Water_Requirement_Data = np.squeeze(np.maximum(Days_in_Dekads[:, None, None] * ET.Data, Kc_MAX.Data[None, :, :] * ET0.Data * Days_in_Dekads[:, None, None]), axis = 0)
# Write in DataCube
Crop_Water_Requirement = DataCube.Rasterdata_Empty()
Crop_Water_Requirement.Data = Crop_Water_Requirement_Data * MASK
Crop_Water_Requirement.Projection = ET.Projection
Crop_Water_Requirement.GeoTransform = ET.GeoTransform
Crop_Water_Requirement.Ordinal_time = ET.Ordinal_time
Crop_Water_Requirement.Size = Crop_Water_Requirement_Data.shape
Crop_Water_Requirement.Variable = "Crop Water Requirement"
Crop_Water_Requirement.Unit = "mm-dekad-1"
del Crop_Water_Requirement_Data, Kc_MAX
Crop_Water_Requirement.Save_As_Tiff(os.path.join(output_folder_L2, "Crop_Water_Requirement"))
######################## Calculate Critical Soil Moisture ########################
# Calculate Critical Soil Moisture
Critical_Soil_Moisture_Data = Theta_WP_Subsoil.Data[None,:,:] + (Theta_FC_Subsoil.Data[None,:,:] - Theta_WP_Subsoil.Data[None,:,:]) * (0.47+0.04*(5 - Crop_Water_Requirement.Data/Days_in_Dekads[:, None, None]))
# Write in DataCube
Critical_Soil_Moisture = DataCube.Rasterdata_Empty()
Critical_Soil_Moisture.Data = Critical_Soil_Moisture_Data * MASK
Critical_Soil_Moisture.Projection = ET.Projection
Critical_Soil_Moisture.GeoTransform = ET.GeoTransform
Critical_Soil_Moisture.Ordinal_time = ET.Ordinal_time
Critical_Soil_Moisture.Size = Critical_Soil_Moisture_Data.shape
Critical_Soil_Moisture.Variable = "Critical Soil Moisture"
Critical_Soil_Moisture.Unit = "cm3-cm-3"
del Critical_Soil_Moisture_Data
Critical_Soil_Moisture.Save_As_Tiff(os.path.join(output_folder_L2, "Critical_Soil_Moisture"))
del Critical_Soil_Moisture
################## Calculate Soil Moisture Start and End ########################
Soil_Moisture_Start_Data = np.concatenate((Soil_Moisture.Data[0,:,:][None, :, :],(Soil_Moisture.Data[:-1,:,:]+Soil_Moisture.Data[1:,:,:])/2), axis=0)
Soil_Moisture_End_Data = np.concatenate(((Soil_Moisture.Data[1:,:,:]+Soil_Moisture.Data[:-1,:,:])/2, Soil_Moisture.Data[-1,:,:][None, :, :]), axis=0)
# Write in DataCube
Soil_Moisture_Start = DataCube.Rasterdata_Empty()
Soil_Moisture_Start.Data = Soil_Moisture_Start_Data * MASK
Soil_Moisture_Start.Projection = Soil_Moisture.Projection
Soil_Moisture_Start.GeoTransform = Soil_Moisture.GeoTransform
Soil_Moisture_Start.Ordinal_time = Soil_Moisture.Ordinal_time
Soil_Moisture_Start.Size = Soil_Moisture_Start_Data.shape
Soil_Moisture_Start.Variable = "Soil Moisture Start"
Soil_Moisture_Start.Unit = "cm3-cm-3"
# Write in DataCube
Soil_Moisture_End = DataCube.Rasterdata_Empty()
Soil_Moisture_End.Data = Soil_Moisture_End_Data * MASK
Soil_Moisture_End.Projection = Soil_Moisture.Projection
Soil_Moisture_End.GeoTransform = Soil_Moisture.GeoTransform
Soil_Moisture_End.Ordinal_time = Soil_Moisture.Ordinal_time
Soil_Moisture_End.Size = Soil_Moisture_End_Data.shape
Soil_Moisture_End.Variable = "Soil Moisture End"
Soil_Moisture_End.Unit = "cm3-cm-3"
del Soil_Moisture_End_Data, Soil_Moisture_Start_Data
Soil_Moisture_Start.Save_As_Tiff(os.path.join(output_folder_L2, "Temp", "Soil_Moisture_Start"))
Soil_Moisture_End.Save_As_Tiff(os.path.join(output_folder_L2, "Temp", "Soil_Moisture_End"))
################## Calculate Soil Moisture Change ##################################
Soil_Moisture_Change_Data = 10 * Root_Depth.Data * (Soil_Moisture_End.Data - Soil_Moisture_Start.Data) # * Days_in_Dekads[:, None, None] heb deze term weggehaald #!!!
# Write in DataCube
Soil_Moisture_Change = DataCube.Rasterdata_Empty()
Soil_Moisture_Change.Data = Soil_Moisture_Change_Data * MASK
Soil_Moisture_Change.Projection = Soil_Moisture.Projection
Soil_Moisture_Change.GeoTransform = Soil_Moisture.GeoTransform
Soil_Moisture_Change.Ordinal_time = Soil_Moisture.Ordinal_time
Soil_Moisture_Change.Size = Soil_Moisture_Change_Data.shape
Soil_Moisture_Change.Variable = "Change Soil Moisture"
Soil_Moisture_Change.Unit = "mm-dekad-1"
del Soil_Moisture_Change_Data, Soil_Moisture_Start, Soil_Moisture_End
Soil_Moisture_Change.Save_As_Tiff(os.path.join(output_folder_L2, "Soil_Moisture_Change"))
################## Calculate Net Supply / Net Drainage ##############################
Net_Supply_Drainage_Data = (ET.Data - P.Data) * Days_in_Dekads[:, None, None] + Soil_Moisture_Change.Data
# Write in DataCube
Net_Supply_Drainage = DataCube.Rasterdata_Empty()
Net_Supply_Drainage.Data = Net_Supply_Drainage_Data * MASK
Net_Supply_Drainage.Projection = Soil_Moisture.Projection
Net_Supply_Drainage.GeoTransform = Soil_Moisture.GeoTransform
Net_Supply_Drainage.Ordinal_time = Soil_Moisture.Ordinal_time
Net_Supply_Drainage.Size = Net_Supply_Drainage_Data.shape
Net_Supply_Drainage.Variable = "Net Supply Drainage"
Net_Supply_Drainage.Unit = "mm-dekad-1"
del Net_Supply_Drainage_Data, Soil_Moisture_Change
Net_Supply_Drainage.Save_As_Tiff(os.path.join(output_folder_L2, "Temp", "Net_Supply_Drainage"))
del Net_Supply_Drainage
#################### Calculate Deep Percolation ###################################
Deep_Percolation_Data = np.maximum(0, (Soil_Moisture.Data - Theta_FC_Subsoil.Data[None, :, :]) * Root_Depth.Data * Days_in_Dekads[:, None, None])
# Write in DataCube
Deep_Percolation = DataCube.Rasterdata_Empty()
Deep_Percolation.Data = Deep_Percolation_Data * MASK
Deep_Percolation.Projection = Soil_Moisture.Projection
Deep_Percolation.GeoTransform = Soil_Moisture.GeoTransform
Deep_Percolation.Ordinal_time = Soil_Moisture.Ordinal_time
Deep_Percolation.Size = Deep_Percolation_Data.shape
Deep_Percolation.Variable = "Deep Percolation"
Deep_Percolation.Unit = "mm-dekad-1"
del Deep_Percolation_Data
Deep_Percolation.Save_As_Tiff(os.path.join(output_folder_L2, "Deep_Percolation"))
del Deep_Percolation
############### Calculate Storage coefficient for surface runoff #################
Storage_Coeff_Surface_Runoff_Data = 4 * (Sand.Data[None, :, :] * np.maximum(LAI.Data),0.5) * (Theta_Sat_Subsoil.Data[None, :, :] - Soil_Moisture.Data)
# Write in DataCube
Storage_Coeff_Surface_Runoff = DataCube.Rasterdata_Empty()
Storage_Coeff_Surface_Runoff.Data = Storage_Coeff_Surface_Runoff_Data * MASK
Storage_Coeff_Surface_Runoff.Projection = Soil_Moisture.Projection
Storage_Coeff_Surface_Runoff.GeoTransform = Soil_Moisture.GeoTransform
Storage_Coeff_Surface_Runoff.Ordinal_time = Soil_Moisture.Ordinal_time
Storage_Coeff_Surface_Runoff.Size = Storage_Coeff_Surface_Runoff_Data.shape
Storage_Coeff_Surface_Runoff.Variable = "Storage Coefficient Surface Runoff"
Storage_Coeff_Surface_Runoff.Unit = "mm-dekad-1"
del Storage_Coeff_Surface_Runoff_Data
Storage_Coeff_Surface_Runoff.Save_As_Tiff(os.path.join(output_folder_L2, "Storage_Coeff_Surface_Runoff"))
######################## Calculate Surface Runoff P #############################
I = DataCube.Rasterdata_tiffs(os.path.join(output_folder, str(Paths.I) %WAPOR_LVL), str(Formats.I) %WAPOR_LVL, Dates, Conversion = Conversions.I, Example_Data = example_file, Mask_Data = example_file, gap_filling = 1, reprojection_type = 2, Variable = 'I', Product = 'WAPOR', Unit = 'mm/day')
Surface_Runoff_P_Data = (Days_in_Dekads[:, None, None] * (P.Data - I.Data))**2/(Days_in_Dekads[:, None, None] * (P.Data- I.Data) + Storage_Coeff_Surface_Runoff.Data)
Surface_Runoff_P_Data[np.isnan(Surface_Runoff_P_Data)] = 0.0
# Write in DataCube
Surface_Runoff_P = DataCube.Rasterdata_Empty()
Surface_Runoff_P.Data = Surface_Runoff_P_Data * MASK
Surface_Runoff_P.Projection = Soil_Moisture.Projection
Surface_Runoff_P.GeoTransform = Soil_Moisture.GeoTransform
Surface_Runoff_P.Ordinal_time = Soil_Moisture.Ordinal_time
Surface_Runoff_P.Size = Surface_Runoff_P_Data.shape
Surface_Runoff_P.Variable = "Surface Runoff Precipitation"
Surface_Runoff_P.Unit = "mm-dekad-1"
del Surface_Runoff_P_Data, I, Storage_Coeff_Surface_Runoff
Surface_Runoff_P.Save_As_Tiff(os.path.join(output_folder_L2, "Surface_Runoff_P"))
######################## Calculate Surface Runoff P ##############################
Surface_Runoff_Coefficient_Data = np.maximum(0.1, Surface_Runoff_P.Data/(P.Data * Days_in_Dekads[:, None, None]))
Surface_Runoff_Coefficient_Data[np.isnan(Surface_Runoff_Coefficient_Data)] = 0.1
# Write in DataCube
Surface_Runoff_Coefficient = DataCube.Rasterdata_Empty()
Surface_Runoff_Coefficient.Data = Surface_Runoff_Coefficient_Data * MASK
Surface_Runoff_Coefficient.Projection = Soil_Moisture.Projection
Surface_Runoff_Coefficient.GeoTransform = Soil_Moisture.GeoTransform
Surface_Runoff_Coefficient.Ordinal_time = Soil_Moisture.Ordinal_time
Surface_Runoff_Coefficient.Size = Surface_Runoff_Coefficient_Data.shape
Surface_Runoff_Coefficient.Variable = "Surface Runoff Coefficient"
Surface_Runoff_Coefficient.Unit = "-"
del Surface_Runoff_Coefficient_Data
Surface_Runoff_Coefficient.Save_As_Tiff(os.path.join(output_folder_L2, "Surface_Runoff_Coefficient"))
del Surface_Runoff_P, Surface_Runoff_Coefficient
######################## Calculate updated maximum kc ######################
Kc_MAX_update_Data = Crop_Water_Requirement.Data/(Days_in_Dekads[:, None, None] * ET0.Data)
# Write in DataCube
Kc_MAX_update = DataCube.Rasterdata_Empty()
Kc_MAX_update.Data = Kc_MAX_update_Data * MASK
Kc_MAX_update.Projection = LAI.Projection
Kc_MAX_update.GeoTransform = LAI.GeoTransform
Kc_MAX_update.Ordinal_time = LAI.Ordinal_time
Kc_MAX_update.Size = Kc_MAX_update_Data.shape
Kc_MAX_update.Variable = "Kc MAX update"
Kc_MAX_update.Unit = "-"
del Kc_MAX_update_Data
Kc_MAX_update.Save_As_Tiff(os.path.join(output_folder_L2, "Kc_MAX_update"))
del Kc_MAX_update
################# Calculate 10 year Mean Net Radiation, per Pixel ########################
Total_years = int(np.ceil(Net_Radiation.Size[0]/36))
Net_Radiation_Long_Term_Data = np.ones([36, Net_Radiation.Size[1], Net_Radiation.Size[2]]) * np.nan
for dekad in range(0,36):
IDs = np.array(range(0, Total_years)) * 36 + dekad
IDs_good = IDs[IDs<=Net_Radiation.Size[0]]
Net_Radiation_Long_Term_Data[dekad, :, :] = np.nanmean(Net_Radiation.Data[IDs_good,:,:], axis = 0)
# Write in DataCube
Net_Radiation_Long_Term = DataCube.Rasterdata_Empty()
Net_Radiation_Long_Term.Data = Net_Radiation_Long_Term_Data * MASK
Net_Radiation_Long_Term.Projection = Soil_Moisture.Projection
Net_Radiation_Long_Term.GeoTransform = Soil_Moisture.GeoTransform
Net_Radiation_Long_Term.Ordinal_time = "Long_Term_Decade"
Net_Radiation_Long_Term.Size = Net_Radiation_Long_Term_Data.shape
Net_Radiation_Long_Term.Variable = "Long Term Net Radiation"
Net_Radiation_Long_Term.Unit = "W-m-2"
del Net_Radiation_Long_Term_Data
Net_Radiation_Long_Term.Save_As_Tiff(os.path.join(output_folder_L2, "Net_Radiation_Long_Term"))
del Net_Radiation_Long_Term
##################### Calculate 10 year mean evaporative fraction ###########################
Total_years = int(np.ceil(Evaporative_Fraction.Size[0]/36))
Evaporative_Fraction_Long_Term_Data = np.ones([36, Evaporative_Fraction.Size[1], Evaporative_Fraction.Size[2]]) * np.nan
for dekad in range(0,36):
IDs = np.array(range(0, Total_years)) * 36 + dekad
IDs_good = IDs[IDs<=Evaporative_Fraction.Size[0]]
Evaporative_Fraction_Long_Term_Data[dekad, :, :] = np.nanmean(Evaporative_Fraction.Data[IDs_good,:,:], axis = 0)
# Write in DataCube
Evaporative_Fraction_Long_Term = DataCube.Rasterdata_Empty()
Evaporative_Fraction_Long_Term.Data = Evaporative_Fraction_Long_Term_Data * MASK
Evaporative_Fraction_Long_Term.Projection = Soil_Moisture.Projection
Evaporative_Fraction_Long_Term.GeoTransform = Soil_Moisture.GeoTransform
Evaporative_Fraction_Long_Term.Ordinal_time = "Long_Term_Decade"
Evaporative_Fraction_Long_Term.Size = Evaporative_Fraction_Long_Term_Data.shape
Evaporative_Fraction_Long_Term.Variable = "Long Term Evaporative Fraction"
Evaporative_Fraction_Long_Term.Unit = "-"
del Evaporative_Fraction_Long_Term_Data
Evaporative_Fraction_Long_Term.Save_As_Tiff(os.path.join(output_folder_L2, "Evaporative_Fraction_Long_Term"))
del Evaporative_Fraction_Long_Term
######################### Calculate 10 yr mean soil moisture ###########################
Total_years = int(np.ceil(Evaporative_Fraction.Size[0]/36))
Soil_Moisture_Long_Term_Data = np.ones([36, Soil_Moisture.Size[1], Soil_Moisture.Size[2]]) * np.nan
for dekad in range(0,36):
IDs = np.array(range(0, Total_years)) * 36 + dekad
IDs_good = IDs[IDs<=Soil_Moisture.Size[0]]
Soil_Moisture_Long_Term_Data[dekad, :, :] = np.nanmean(Soil_Moisture.Data[IDs_good,:,:], axis = 0)
# Write in DataCube
Soil_Moisture_Long_Term = DataCube.Rasterdata_Empty()
Soil_Moisture_Long_Term.Data = Soil_Moisture_Long_Term_Data * MASK
Soil_Moisture_Long_Term.Projection = Soil_Moisture.Projection
Soil_Moisture_Long_Term.GeoTransform = Soil_Moisture.GeoTransform
Soil_Moisture_Long_Term.Ordinal_time = "Long_Term_Decade"
Soil_Moisture_Long_Term.Size = Soil_Moisture_Long_Term_Data.shape
Soil_Moisture_Long_Term.Variable = "Long Term Soil Moisture"
Soil_Moisture_Long_Term.Unit = "cm3-cm-3"
del Soil_Moisture_Long_Term_Data
Soil_Moisture_Long_Term.Save_As_Tiff(os.path.join(output_folder_L2, "Soil_Moisture_Long_Term"))
del Soil_Moisture_Long_Term
################## Calculate Available Water Before Depletion ##########################
Available_Before_Depletion_Data = 0.8 * (Theta_FC_Subsoil.Data[None, :, :] - 0.12) * 10 * Root_Depth.Data
# Write in DataCube
Available_Before_Depletion = DataCube.Rasterdata_Empty()
Available_Before_Depletion.Data = Available_Before_Depletion_Data * MASK
Available_Before_Depletion.Projection = Root_Depth.Projection
Available_Before_Depletion.GeoTransform = Root_Depth.GeoTransform
Available_Before_Depletion.Ordinal_time = Root_Depth.Ordinal_time
Available_Before_Depletion.Size = Available_Before_Depletion_Data.shape
Available_Before_Depletion.Variable = "Available Before Depletion"
Available_Before_Depletion.Unit = "mm"
del Theta_WP_Subsoil, Root_Depth
Available_Before_Depletion.Save_As_Tiff(os.path.join(output_folder_L2, "Available_Before_Depletion"))
del Available_Before_Depletion
############################### Calculate Phenelogy ####################################
L2.Phenology.Calc_Phenology(output_folder, Start_year_analyses, End_year_analyses, T, ET, NPP, P, Temp, ET0, LU_END, Phenology_pixels_year, Grassland_pixels_year, example_file, Days_in_Dekads, Phenology_Threshold, Phenology_Slope, Phenology_Var, inputs)
return()
def Create_LU_MAP(output_folder, Dates_yearly, LU, LUdek, Paths_LU_ESA, Formats_LU_ESA, example_file, LU_Data, LU_Legend):
# Create output folder LVL2
output_folder_L2 = os.path.join(output_folder, "LEVEL_2")
if LU_Data == "":
# Open ESACCI
input_file_LU_ESACCI = os.path.join(output_folder, Paths_LU_ESA, Formats_LU_ESA)
# Converting LU maps into one LU map
# open dictionary WAPOR
WAPOR_Conversions_dict = WAPOR_Conversions()
# open dictionary ESACCI
ESACCI_Conversions_dict = ESACCI_Conversions()
Phenology_pixels_year = np.ones(LUdek.Size) * np.nan
Grassland_pixels_year = np.ones(LUdek.Size) * np.nan
# Loop over the years
for Year in Dates_yearly:
Year_start = int(Dates_yearly[0].year)
Year_int = int(Year.year)
geo = LU.GeoTransform
proj = LU.Projection
if LU_Data == "":
destLUESACCI = RC.reproject_dataset_example(input_file_LU_ESACCI, example_file)
LU_ESACCI = destLUESACCI.GetRasterBand(1).ReadAsArray()
# Create LUmap
LU_Map_WAPOR = np.ones([LU.Size[1], LU.Size[2]]) * np.nan
LU_Map_ESACCI = np.ones([LU.Size[1], LU.Size[2]]) * np.nan
for number in WAPOR_Conversions_dict.items():
LU_Map_WAPOR = np.where(LU.Data[int((Year_int - Year_start)),: ,:] == number[0], number[1], LU_Map_WAPOR)
for number in ESACCI_Conversions_dict.items():
LU_Map_ESACCI = np.where(LU_ESACCI == number[0], number[1], LU_Map_ESACCI)
# Combine LU maps
# 1 = rainfed, 2 = irrigated, 3 = Pasture
# nan = others, 1 = agriculture, 2 = Grassland
LU_END = np.where(np.logical_and(LU_Map_WAPOR == 1, LU_Map_ESACCI == 1), 1, np.nan)
LU_END = np.where(LU_Map_WAPOR > 1, LU_Map_WAPOR, LU_END)
else:
LU_START = LU.Data[int((Year_int - Year_start)),:,:]
LU_END = np.ones(LU_START.shape) * np.nan
for key, item in LU_Legend.items():
if key == "Agriculture":
for i in item:
LU_END = np.where(LU_START == i, 1, LU_END)
if key == "Grassland":
for i in item:
LU_END = np.where(LU_START == i, 2, LU_END)
# Save LU map
DC.Save_as_tiff(os.path.join(output_folder_L2, "LU_END", "LU_%s.tif" %Year_int), LU_END, geo, proj)
# find posible Perennial pixels
Phenology_pixels_year[int((Year_int - Year_start) * 36):int((Year_int - Year_start) * 36)+36,: ,:] = np.where(LU_END==1, 1, np.nan)[None, :, :]
Grassland_pixels_year[int((Year_int - Year_start) * 36):int((Year_int - Year_start) * 36)+36,: ,:] = np.where(LU_END==2, 1, np.nan)[None, :, :]
return(Phenology_pixels_year, Grassland_pixels_year)
def WAPOR_Conversions(version = '1.0'):
converter = {
41: 1,
43: 1,
42: 1,
30: 2}
WAPOR_Conversions =dict()
WAPOR_Conversions['1.0'] = converter
return WAPOR_Conversions[version]
def ESACCI_Conversions(version = '1.0'):
converter = {
10: 1,
#30: 1,
20: 1,
130: 2}
ESACCI_Conversions =dict()
ESACCI_Conversions['1.0'] = converter
return ESACCI_Conversions[version]
| TimHessels/WaporTranslator | LEVEL_2/Run_Intermediate_Parameters.py | Run_Intermediate_Parameters.py | py | 52,038 | python | en | code | 8 | github-code | 13 |
18950122170 | # -*- coding: utf-8 -*-
# weibifan 2022-10-8
# PaddleNLP,中文自然语言处理的工具,可以完成PLMs的下载,微调,及使用
# https://www.paddlepaddle.org.cn/paddle/paddlenlp
'''
https://paddlenlp.readthedocs.io/zh/latest/data_prepare/dataset_list.html
使用PaddleNLP语义预训练模型ERNIE优化情感分析
https://aistudio.baidu.com/aistudio/projectdetail/1294333
'''
import paddlenlp as ppnlp
from paddlenlp.datasets import load_dataset
train_ds, dev_ds, test_ds = load_dataset("chnsenticorp", splits=["train", "dev", "test"])
print(train_ds.label_list)
for data in train_ds.data[:5]:
print(data)
'''
'''
MODEL_NAME = "ernie-3.0-medium-zh"
# 实际上是加载了和这个模型一起的另外一个vocab.txt字典文件
# 对于分类任务来说,可以用字建立特征。ernie无法用词
tokenizer = ppnlp.transformers.ErnieTokenizer.from_pretrained(MODEL_NAME)
ernie_model = ppnlp.transformers.ErnieModel.from_pretrained(MODEL_NAME)
model = ppnlp.transformers.ErnieForSequenceClassification.from_pretrained( \
MODEL_NAME, num_classes=len(train_ds.label_list))
from functools import partial
from paddlenlp.data import Stack, Tuple, Pad
from utils import convert_example, create_dataloader
# 模型运行批处理大小
batch_size = 32
max_seq_length = 128
trans_func = partial(
convert_example,
tokenizer=tokenizer,
max_seq_length=max_seq_length)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # segment
Stack(dtype="int64") # label
): [data for data in fn(samples)]
train_data_loader = create_dataloader(
train_ds,
mode='train',
batch_size=batch_size,
batchify_fn=batchify_fn,
trans_fn=trans_func)
dev_data_loader = create_dataloader(
dev_ds,
mode='dev',
batch_size=batch_size,
batchify_fn=batchify_fn,
trans_fn=trans_func)
from paddlenlp.transformers import LinearDecayWithWarmup
# 训练过程中的最大学习率
learning_rate = 5e-5
# 训练轮次
epochs = 1 #3
# 学习率预热比例
warmup_proportion = 0.1
# 权重衰减系数,类似模型正则项策略,避免模型过拟合
weight_decay = 0.01
num_training_steps = len(train_data_loader) * epochs
lr_scheduler = LinearDecayWithWarmup(learning_rate, num_training_steps, warmup_proportion)
optimizer = paddle.optimizer.AdamW(
learning_rate=lr_scheduler,
parameters=model.parameters(),
weight_decay=weight_decay,
apply_decay_param_fun=lambda x: x in [
p.name for n, p in model.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
])
criterion = paddle.nn.loss.CrossEntropyLoss()
metric = paddle.metric.Accuracy()
import paddle.nn.functional as F
from utils import evaluate
global_step = 0
for epoch in range(1, epochs + 1):
for step, batch in enumerate(train_data_loader, start=1):
input_ids, segment_ids, labels = batch
logits = model(input_ids, segment_ids)
loss = criterion(logits, labels)
probs = F.softmax(logits, axis=1)
correct = metric.compute(probs, labels)
metric.update(correct)
acc = metric.accumulate()
global_step += 1
if global_step % 10 == 0 :
print("global step %d, epoch: %d, batch: %d, loss: %.5f, acc: %.5f" % (global_step, epoch, step, loss, acc))
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.clear_grad()
evaluate(model, criterion, metric, dev_data_loader)
#model.save_pretrained('checkpoint')
#tokenizer.save_pretrained('checkpoint')
from utils import predict
data = [
{"text":'这个宾馆比较陈旧了,特价的房间也很一般。总体来说一般'},
{"text":'怀着十分激动的心情放映,可是看着看着发现,在放映完毕后,出现一集米老鼠的动画片'},
{"text":'作为老的四星酒店,房间依然很整洁,相当不错。机场接机服务很好,可以在车上办理入住手续,节省时间。'},
]
label_map = {0: 'negative', 1: 'positive'}
results = predict(
model, data, tokenizer, label_map, batch_size=batch_size)
for idx, text in enumerate(data):
print('Data: {} \t Lable: {}'.format(text, results[idx])) | weibifan/myPaddleEx | PaddleNLP_ex4.py | PaddleNLP_ex4.py | py | 4,288 | python | en | code | 0 | github-code | 13 |
70765699218 | """SQLAlchemy models for Translation Buddy"""
from flask_bcrypt import Bcrypt
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy_utils import auto_delete_orphans
bcrypt = Bcrypt()
db = SQLAlchemy()
class User(db.Model):
"""User in the system"""
__tablename__ = "users"
id = db.Column(
db.Integer,
primary_key=True,
)
username = db.Column(
db.Text,
nullable=False,
unique=True,
)
password = db.Column(
db.Text,
nullable=False,
)
phrasebooks = db.relationship("Phrasebook", backref="user", cascade='all, delete-orphan')
def __repr__(self):
return f"<User #{self.id}: {self.username}>"
@classmethod
def signup(cls, username, password):
"""Sign up user.
Hashes password and adds user to system.
"""
hashed_pwd = bcrypt.generate_password_hash(password).decode("UTF-8")
user = User(username=username, password=hashed_pwd)
db.session.add(user)
return user
@classmethod
def authenticate(cls, username, password):
"""Find user with `username` and `password`.
This is a class method (call it on the class, not an individual user.)
It searches for a user whose password hash matches this password
and, if it finds such a user, returns that user object.
If can't find matching user (or if password is wrong), returns False.
"""
user = cls.query.filter_by(username=username).first()
if user:
is_auth = bcrypt.check_password_hash(user.password, password)
if is_auth:
return user
return False
def delete(self):
"""Detlete user and any orphaned translations."""
translations = {t for pb in self.phrasebooks for t in pb.translations}
db.session.delete(self)
for t in translations:
t.delete_orphan()
class Phrasebook(db.Model):
"""A user's saved collection of phrases."""
__tablename__ = "phrasebooks"
id = db.Column(
db.Integer,
primary_key=True,
)
name = db.Column(
db.String(),
nullable=False,
)
user_id = db.Column(
db.Integer,
db.ForeignKey("users.id"),
nullable=False
)
public = db.Column(
db.Boolean,
default=False,
nullable=False,
)
lang_from = db.Column(db.String, nullable=False)
lang_to = db.Column(db.String, nullable=False)
translations = db.relationship(
"Translation",
secondary="phrasebook_translation",
backref="phrasebooks",
)
def __repr__(self):
return f"<Phrasebook #{self.id}: {self.name}>"
def delete(self):
translations = self.translations
db.session.delete(self)
for t in translations:
t.delete_orphan()
def delete_translation(self, translation):
'''Delete phrasebook translation association and delete translation if orphaned.'''
pt = PhrasebookTranslation.query.get((self.id, translation.id))
db.session.delete(pt)
translation.delete_orphan()
class PhrasebookTranslation(db.Model):
"""Mapping user phrasebooks to translations"""
__tablename__ = "phrasebook_translation"
# id = db.Column(
# db.Integer,
# primary_key=True
# )
phrasebook_id = db.Column(
db.Integer,
db.ForeignKey("phrasebooks.id"),
primary_key=True
)
translation_id = db.Column(
db.Integer,
db.ForeignKey("translations.id"),
primary_key=True
)
note = db.Column(db.Text)
translation=db.relationship("Translation", back_populates="pb_t", overlaps="phrasebooks,translations")
def __repr__(self):
return f"<Phrasebook #{self.phrasebook_id}, Translation #{self.translation_id}>"
class Translation(db.Model):
"""Translations that have been saved by a user."""
__tablename__ = "translations"
id = db.Column(
db.Integer,
primary_key=True,
)
lang_from = db.Column(
db.String(),
nullable=False,
)
lang_to = db.Column(
db.String,
nullable=False,
)
text_from = db.Column(
db.Text,
nullable=False,
)
text_to = db.Column(
db.Text,
nullable=False,
)
pb_t = db.relationship("PhrasebookTranslation", back_populates="translation", overlaps="phrasebooks,translations")
def __repr__(self):
return f"<Translation #{self.id}: {self.text_from} >> {self.text_to}>"
def delete_orphan(self):
"""Delete translation if it does not belong to any phrasebook."""
if not len(self.phrasebooks):
db.session.delete(self)
def to_dict(self):
"""Serialize SQLalchemy translation object into dictionary for storage in flask session. """
dict = {c.name: getattr(self, c.name) for c in self.__table__.columns}
return dict
def connect_db(app):
"""Connect this database to provided Flask app."""
db.app = app
db.init_app(app)
| adamnyk/capstone-1 | app/models.py | models.py | py | 5,224 | python | en | code | 0 | github-code | 13 |
13280201959 | import numpy as np
import matplotlib.pyplot as plt
import math
from tkinter import *
fields = 'xo', 'xf', 'yo', 'yf', 'Function'
def func(x,y,func):
return eval(func)
def getVal(entries, text):
a = float(entries[text].get())
return a
def getFunc(entries):
f = str(entries['Function'].get())
return f
def hur(entries):
global xo
global xf
global yo
global yf
global f
xo = getVal(entries, 'xo')
xf = getVal(entries, 'xf')
yo = getVal(entries, 'yo')
yf = getVal(entries, 'yf')
f = getFunc(entries)
root.quit()
def makeform(root, fields):
entries = {}
for field in fields:
row = Frame(root)
lab = Label(row, width=22, text=field+": ", anchor='w')
ent = Entry(row)
ent.insert(0,"0")
row.pack(side=TOP, fill=X, padx=5, pady=5)
lab.pack(side=LEFT)
ent.pack(side=RIGHT, expand=YES, fill=X)
entries[field] = ent
return entries
if __name__ == '__main__':
root = Tk()
root.title("Equation Grapher")
ents = makeform(root, fields)
b1 = Button(root, text='Calculate',
command=(lambda e=ents: hur(e)))
b1.pack(side=LEFT, padx=5, pady=5)
root.mainloop()
x = np.linspace(xo, xf, 50)
y = np.linspace(yo, yf, 50)
plt.figure()
X,Y = np.meshgrid(x, y)
Z = func(X, Y, f)
plt.contourf(X,Y,Z,20,cmap='RdGy')
plt.colorbar()
plt.show()
| jbaig77/graphing-toolkit | main.py | main.py | py | 1,395 | python | en | code | 1 | github-code | 13 |
5467532826 | import requests
from will import settings
from will.mixins import StorageMixin
from will.decorators import require_settings
from .base import AnalysisBackend
class HistoryAnalysis(AnalysisBackend, StorageMixin):
def do_analyze(self, message):
# Load the last few messages, add it to the context under "history"
history = self.load("message_history", [])
if not history:
history = []
max_history_context = getattr(settings, "HISTORY_CONTEXT_LENGTH", 20)
if history:
context = {
"history": history[:max_history_context]
}
else:
context = {
"history": [],
}
history.append(message)
self.save("message_history", history)
return context
| skoczen/will | will/backends/analysis/history.py | history.py | py | 807 | python | en | code | 405 | github-code | 13 |
73458188819 | from __future__ import print_function
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD
from keras.utils import np_utils
import numpy as np
batch_size = 20
nClasses = 2
dataAugmentation = False
#################################################
train_datagen = ImageDataGenerator()
test_datagen = ImageDataGenerator()
train_generator = train_datagen.flow_from_directory(
'/home/rohit/cv/Training_Dataset/',
target_size=(224, 224),
batch_size=batch_size)
validation_generator = test_datagen.flow_from_directory(
'/home/rohit/cv/Validation_Dataset/',
target_size=(224, 224),
batch_size=batch_size)
#################################################
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(3,224,224)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(nClasses))
#model.load_weights("vgg16_model_11-20_epochs.h5")
#sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mse',
optimizer='adam',
metrics=['accuracy'])
history = model.fit_generator(
train_generator,
samples_per_epoch=2000,
nb_epoch=2,
validation_data=validation_generator,
nb_val_samples=400,verbose = 1)
file = open("training_history.txt", "w")
file.write(str(history.history['acc']))
file.write(',')
file.write(str(history.history['val_acc']))
file.write(',')
file.write(str(history.history['loss']))
file.write(',')
file.write(str(history.history['val_loss']))
file.write(',')
file.close()
# serialize model to JSON
model_json = model.to_json()
with open("VGG16_Model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("VGG16_Model.h5")
print("Saved model to disk")
| akshatbjain/Object-Detection-using-Computer-Vision | Deep Learning approach/VGG16_Model_training.py | VGG16_Model_training.py | py | 3,418 | python | en | code | 0 | github-code | 13 |
36146856306 | from pkgfixtures import host_with_saved_yum_state
import json
# Requirements:
# From --hosts parameter:
# - host(A1): first XCP-ng host > 8.2.
# And:
# - access to XCP-ng RPM repository from hostA1
class TestUpdate:
def test_check_update(self, host):
host.call_plugin('updater.py', 'check_update')
def test_update(self, host):
host.yum_save_state()
host.call_plugin('updater.py', 'update')
res = host.call_plugin('updater.py', 'check_update')
assert res == "[]"
host.yum_restore_saved_state()
def test_package_update(self, host_with_saved_yum_state):
host = host_with_saved_yum_state
packages = host.get_available_package_versions('dummypkg')
assert len(packages) == 2
assert packages[0].startswith('dummypkg-0:1.0-1.xcpng')
assert packages[1].startswith('dummypkg-0:1.0-2.xcpng')
assert not host.is_package_installed(packages[0])
host.call_plugin('updater.py', 'install', {'packages': packages[0]})
assert host.is_package_installed(packages[0])
host.call_plugin('updater.py', 'update', {'packages': 'dummypkg'})
assert host.is_package_installed(packages[1])
class TestProxies:
def test_get_proxies(self, host):
proxies = json.loads(host.call_plugin('updater.py', 'get_proxies'))
for repo in 'xcp-ng-base', 'xcp-ng-testing', 'xcp-ng-updates':
assert repo in proxies
| xcp-ng/xcp-ng-tests | tests/xapi-plugins/plugin_updater/test_updater.py | test_updater.py | py | 1,447 | python | en | code | 3 | github-code | 13 |
41992726641 | from app import app
from flask import render_template
@app.route('/')
def homepage():
fighter_stats = {"Jon Jones":
{"url":"https://dmxg5wxfqgb4u.cloudfront.net/styles/athlete_bio_full_body/s3/2020-09/JONES_JON_L_12-29.png?VersionId=_V_SgUOaxjt7ja7ddhcJB4m9ALbyeMJz&itok=kMklO45v",
"description":"Jon Jones, the best MMA fighter of all time."},
"Anderson Silva":
{"url":"https://qph.cf2.quoracdn.net/main-qimg-1257c6e43e3b85f1e47b1deee45c88d4-lq",
"description":"Anderson Silva, once believed to be the GOAT."},
"Conor McGregor":
{"url":"https://dmxg5wxfqgb4u.cloudfront.net/styles/athlete_bio_full_body/s3/2021-07/MCGREGOR_CONOR_L_07-10.png?itok=xbg9Kwfj",
"description":"Conor McGregor, the biggest star in MMA history."},
"Stephen Thompson":
{"url":"https://dmxg5wxfqgb4u.cloudfront.net/styles/athlete_bio_full_body/s3/2022-12/THOMPSON_STEPHEN_L_12-03.png?itok=Ix8SmbJQ",
"description":"Stephen Thompson, the nicest mother effer in MMA history."},
"Joe Rogan":
{"url":"https://pyxis.nymag.com/v1/imgs/244/e39/98f4597712a76c23d84f2719beca4fbf44-12-joe-rogan.rsquare.w700.jpg",
"description":"Joe Rogan, the best commentator in MMA history."}}
return render_template('index.html', fighters = fighter_stats)
@app.route('/random_page')
def random_page():
return render_template('random_page.html') | ToddGallegos/coding_temple_flask_app | app/routes.py | routes.py | py | 1,645 | python | en | code | 0 | github-code | 13 |
40958803714 | S_EXT_LOW ='.s'
S_EXT_HI ='.S'
C_EXT ='.c'
O_EXT ='.o'
PRFX_I = '-I'
# Definizione directory INCLUDE
#-------------------------------------------------------
dir_h = './include'
# All INCLUDE dir
asl_h = dir_h + '/asl'
handler_h = dir_h + '/handler'
pcb_h = dir_h + '/pcb'
scheduler_h = dir_h + '/scheduler'
system_h = dir_h + '/system'
sysshared_h = system_h + '/shared'
utilities_h = dir_h + '/utilities'
shared_device_h = sysshared_h + '/device'
# INCLUDE dir specifiche per architettura
uarm_h = dir_h + '/uarm'
umps_h = dir_h + '/umps'
sysuarm_h = system_h + '/uarm'
sysumps_h = system_h + '/umps'
uarm_handler_h = handler_h + '/uarm'
umps_handler_h = handler_h + '/umps'
# Definizione directory SOURCE
#-------------------------------------------------------
dir_s = './src'
# All SOURCE dir
asl_s = dir_s + '/asl'
pcb_s = dir_s + '/pcb'
scheduler_s = dir_s + '/scheduler'
system_s = dir_s + '/system'
handler_s = dir_s + '/handler'
sysshared_s = system_s + '/shared'
device_s = sysshared_s + '/device'
utilities_f = dir_s + '/utilities'
# SOURCE dir specifiche per architettura
uarm_s = dir_s + '/uarm'
umps_s = dir_s + '/umps'
sysuarm_s = system_s + '/uarm'
sysumps_s = system_s + '/umps'
uarm_handler_s = handler_s + '/uarm'
umps_handler_s = handler_s + '/umps'
# Source files list (without extension)
#-------------------------------------------------------
main_f = dir_s + '/main'
p2test_f = dir_s + '/p2test_bikaya_v0.2'
test_f = dir_s + '/test'
# ASL Module
#--------------------
asl_f = asl_s + '/asl'
# Handler Module
#--------------------
handler_f = handler_s + '/shared'
##UARM dedicated
uarm_handler_f = uarm_handler_s + '/handler'
##UMPS dedicated
umps_handler_f = umps_handler_s + '/handler'
# PCB Module
#--------------------
pcb_f = pcb_s + '/pcb'
pcb_utils_f = pcb_s + '/utils'
# Scheduler Module
#--------------------
scheduler_f = scheduler_s + '/scheduler'
# Utilities Module
util_semaphore_f = utilities_f + '/semaphore'
#System Module
#--------------------
##SHARED
shared_device_f = device_s + '/device'
##UARM dedicated
uarm_shared_f = sysuarm_s + '/shared'
uarm_sysinit_f = sysuarm_s + '/sysinit'
##UMPS dedicated
umps_shared_f = sysumps_s + '/shared'
umps_sysinit_f = sysumps_s + '/sysinit'
# File di architettura
#--------------------
##UARM dedicated
crtso_uarm = uarm_s + '/crtso'
libuarm = uarm_s + '/libuarm'
libdiv_uarm = uarm_s + '/libdiv'
##UMPS dedicated
crtso_umps = umps_s + '/crtso'
libumps = umps_s + '/libumps'
#---------------------------------------------------
# ENVIRONMENT
#---------------------------------------------------
# Opzioni disponibili
#-----------------------
option_active = ARGUMENTS.get('TARGET', 'help')
umps_mode = (option_active=='umps' or option_active=='UMPS')
uarm_mode = (option_active=='uarm' or option_active=='UARM')
clean_mode = GetOption('clean')
if not umps_mode and not uarm_mode and not clean_mode:
Help("Digita: 'scons TARGET=[UARM|uarm]' per buildare BiKaya per architetture uarm.\n")
Help("Digita: 'scons TARGET=[UARM|umps]' per buildare BiKaya per architetture umps.\n")
Help("Digita: 'scons -c' per pulire le directory.\n")
Exit(2)
# Set Environments
#-----------------------
## Environment UMPS
umps_ENV = Environment(
CC = 'mipsel-linux-gnu-gcc',
AS = 'mipsel-linux-gnu-gcc',
CFLAGS = '-Wall -O0 -ffreestanding -DTARGET_UMPS=1 -mips1 -mabi=32 -mno-gpopt -G 0 -mno-abicalls -fno-pic -mfp32 '
)
## Environment UARM
uarm_ENV = Environment(
CC = 'arm-none-eabi-gcc',
AS = 'arm-none-eabi-gcc',
CFLAGS = '-Wall -O0 -mcpu=arm7tdmi -DTARGET_UARM=1 ',
)
#---------------------------------------------------
# COMPILE PHASE
#---------------------------------------------------
# Headers lists
#-------------------
shared_headers_list = [dir_h, system_h, sysshared_h, asl_h, handler_h, pcb_h, scheduler_h, utilities_h, shared_device_h ]
uarm_headers_list = [uarm_h, sysuarm_h, uarm_handler_h]
umps_headers_list = [umps_h, sysumps_h, umps_handler_h]
for i,x in enumerate(shared_headers_list):
shared_headers_list[i] = PRFX_I+x
for i,x in enumerate(uarm_headers_list):
uarm_headers_list[i] = PRFX_I+x
for i,x in enumerate(umps_headers_list):
umps_headers_list[i] = PRFX_I+x
# Source (NOEXT) lists
#-------------------
shared_noext_list = [main_f, p2test_f, test_f, handler_f, scheduler_f, pcb_f, pcb_utils_f, asl_f, shared_device_f, util_semaphore_f ]
# Per favore, lascia i file crtso____ e lib_____ per ultimi
uarm_noext_list = [uarm_shared_f, uarm_handler_f, uarm_sysinit_f, crtso_uarm, libuarm, libdiv_uarm]
umps_noext_list = [umps_handler_f, umps_shared_f, umps_sysinit_f, crtso_umps, libumps]
# Source .C lists
#-------------------
shared_c_list = []
uarm_c_list = []
umps_c_list = []
for x in shared_noext_list:
shared_c_list.append(x+C_EXT)
for x in uarm_noext_list:
uarm_c_list.append(x+C_EXT)
uarm_c_list[-1] = uarm_c_list[-1].replace(C_EXT, S_EXT_LOW)
uarm_c_list[-2] = uarm_c_list[-2].replace(C_EXT, S_EXT_LOW)
uarm_c_list[-3] = uarm_c_list[-3].replace(C_EXT, S_EXT_LOW)
for x in umps_noext_list:
umps_c_list.append(x+C_EXT)
umps_c_list[-1] = umps_c_list[-1].replace(C_EXT, S_EXT_HI)
umps_c_list[-2] = umps_c_list[-2].replace(C_EXT, S_EXT_HI)
# Source .O lists
#-------------------
shared_o_list = []
uarm_o_list = []
umps_o_list = []
for x in shared_noext_list:
shared_o_list.append(x+O_EXT)
for x in uarm_noext_list:
uarm_o_list.append(x+O_EXT)
for x in umps_noext_list:
umps_o_list.append(x+O_EXT)
# Compilazione UARM
#---------------------
if uarm_mode:
shared_headers_list.extend(uarm_headers_list)
shared_o_list.extend(uarm_o_list)
shared_c_list.extend(uarm_c_list)
uarm_lib = shared_headers_list #WRAP
uarm_obj = shared_o_list #WRAP
uarm_src = shared_c_list #WRAP
# Settings
#-----------------
LDFLAGS = ' -G 0 -nostdlib -T '+uarm_s+'/elf32ltsarm.h.uarmcore.x '
uarm_ENV.Append(CFLAGS = ' '.join(uarm_lib))
uarm_ENV.Replace(ASFLAGS = uarm_ENV['CFLAGS']+' -c')
uarm_ENV.Replace(LINKCOM = 'arm-none-eabi-ld -o kernel '+' '.join(uarm_obj)+LDFLAGS)
# Compilazione
#----------------
print("Compilazione kernel: uarm_mode attivo.")
uarm_ENV.Program('kernel',uarm_src)
# Compilazione UMPS
#---------------------
if umps_mode:
shared_headers_list.extend(umps_headers_list)
shared_o_list.extend(umps_o_list)
shared_c_list.extend(umps_c_list)
umps_lib = shared_headers_list #WRAP
umps_obj = shared_o_list #WRAP
umps_src = shared_c_list # WRAP
# Settings
#-----------------
LDFLAGS = ' -G 0 -nostdlib -T '+umps_s+'/umpscore.ldscript '
umps_ENV.Append(CFLAGS = ' '.join(umps_lib))
umps_ENV.Replace(ASFLAGS = umps_ENV['CFLAGS']+' -c')
umps_ENV.Replace(LINKCOM = 'mipsel-linux-gnu-ld -o kernel '+' '.join(umps_obj)+LDFLAGS)
# Compilazione
#-----------------
print("Compilazione kernel: umps_mode attivo.")
main = umps_ENV.Program('kernel',umps_src)
elf = umps_ENV.Command('elf',None,'umps2-elf2umps -k kernel')
Depends(elf, main)
#---------------------------------------------------
# CLEAN PHASE
#---------------------------------------------------
clean_list = ['kernel', 'kernel.core.umps','kernel.stab.umps']
clean_list.extend(shared_o_list)
clean_list.extend(uarm_o_list)
clean_list.extend(umps_o_list)
uarm_ENV.Clean(' ',clean_list)
| jjak0b/BiKayaOS | SConstruct | SConstruct | 7,744 | python | en | code | 0 | github-code | 13 | |
10391688117 | from GR86.chassis.taskcenter.tushare_task import TushareTask
from GR86.chassis.spider.tushare import Tushare
from GR86.chassis.taskcenter.work_type import WorkType
import queue
import logging
from threading import Thread, Event
import sys
import time
event = Event()
class Producer(Tushare, Thread):
def __init__(self, thread_id, fetch_data_queue, save_to_mongo_queue, work_type):
Tushare.__init__(self)
Thread.__init__(self)
self.work_type = work_type
self.thread_id = thread_id
self.fetch_data_queue = fetch_data_queue
self.save_to_mongo_queue = save_to_mongo_queue
def get_data_by_work_type(self, item):
start_date = "20170101"
end_date = "20221229"
if self.work_type == WorkType.BALANCE_SHEET:
data = Tushare.get_balance_sheet(self, ts_code=item, start_date=start_date, end_date=end_date)
elif self.work_type == WorkType.INCOME_STATEMENT:
data = Tushare.get_income_statement(self, ts_code=item, start_date=start_date, end_date=end_date)
elif self.work_type == WorkType.CASH_FLOW:
data = Tushare.get_cash_flow(self, ts_code=item, start_date=start_date, end_date=end_date)
elif self.work_type == WorkType.FIN_AUDIT_OPINION:
data = Tushare.get_audit_opinion(self, ts_code=item, start_date=start_date, end_date=end_date)
elif self.work_type == WorkType.FIN_INDICATORS:
data = Tushare.get_financial_indicators(self, ts_code=item, start_date=start_date, end_date=end_date)
return data
def run(self):
while not self.fetch_data_queue.empty():
try:
item = self.fetch_data_queue.get()
time.sleep(0.1)
data = self.get_data_by_work_type(item)
logging.info('爬取数据: 获取 股票 %s , 数据长度 %s' % (item, data.size))
save_to_mongo_data = {
"work_type": self.work_type,
"data": data
}
self.fetch_data_queue.task_done()
self.save_to_mongo_queue.put(save_to_mongo_data)
event.set()
except queue.Empty:
logging.info('任务完成' % (item, self.thread_id))
if item is None:
logging.info('下载完成')
break
class Consumer(Thread, TushareTask):
def __init__(self, thread_id, fetch_data_queue, save_to_mongo_queue):
Thread.__init__(self)
TushareTask.__init__(self)
self.thread_id = thread_id
self.fetch_data_queue = fetch_data_queue
self.save_to_mongo_queue = save_to_mongo_queue
def start_save_data_t_mongodb(self, item):
if item.get("work_type") == WorkType.BALANCE_SHEET:
TushareTask.save_balance_sheet_mongodb(self, sheet=item.get('data').to_dict("records"))
elif item.get("work_type") == WorkType.INCOME_STATEMENT:
TushareTask.save_income_statement_to_mongodb(self, sheet=item.get('data').to_dict("records"))
elif item.get("work_type") == WorkType.CASH_FLOW:
TushareTask.save_cash_flow_to_mongodb(self, sheet=item.get('data').to_dict("records"))
elif item.get("work_type") == WorkType.FIN_AUDIT_OPINION:
TushareTask.save_financial_audit_opinion_to_mongodb(self, sheet=item.get('data').to_dict("records"))
elif item.get("work_type") == WorkType.FIN_INDICATORS:
TushareTask.save_financial_indicators_to_mongodb(self, sheet=item.get('data').to_dict("records"))
def run(self):
while True:
if self.save_to_mongo_queue.empty():
Event().wait(0.01)
if self.fetch_data_queue.empty():
logging.info('Consumer notify : no item to consume')
break
item = self.save_to_mongo_queue.get()
if item.get('data').size > 0:
self.start_save_data_t_mongodb(item)
logging.info('存入mongodb: 数据长度 %s , %s' % (item.get('data').size, self.thread_id))
self.save_to_mongo_queue.task_done()
def do_task():
f = FinialStatementTask()
fetch_balance_queue = queue.Queue(maxsize=0)
fetch_cash_flow_queue = queue.Queue(maxsize=0)
fetch_income_queue = queue.Queue(maxsize=0)
fetch_final_audit_opinion = queue.Queue(maxsize=0)
fetch_final_indicators = queue.Queue(maxsize=0)
save_to_mongo_queue = queue.Queue(maxsize=0)
list_stock = f.init_queue()
logging.info(list_stock)
for item in list_stock:
# fetch_balance_queue.put(item)
# fetch_cash_flow_queue.put(item)
# fetch_income_queue.put(item)
fetch_final_audit_opinion.put(item)
fetch_final_indicators.put(item)
# p1 = Producer('Producer-0001', fetch_balance_queue, save_to_mongo_queue, WorkType.BALANCE_SHEET)
# p2 = Producer('Producer-0002', fetch_cash_flow_queue, save_to_mongo_queue, WorkType.CASH_FLOW)
# p3 = Producer('Producer-0003', fetch_income_queue, save_to_mongo_queue, WorkType.INCOME_STATEMENT)
p4 = Producer('Producer-0004', fetch_final_audit_opinion, save_to_mongo_queue, WorkType.FIN_AUDIT_OPINION)
p5 = Producer('Producer-0005', fetch_final_indicators, save_to_mongo_queue, WorkType.FIN_INDICATORS)
t1 = Consumer('Consumer-0001', fetch_final_audit_opinion, save_to_mongo_queue)
t2 = Consumer('Consumer-0002', fetch_final_audit_opinion, save_to_mongo_queue)
try:
p4.start()
p5.start()
# p3.start()
t1.start()
t2.start()
t1.join()
t2.join()
p4.join()
p5.join()
except KeyboardInterrupt:
sys.exit(1)
class FinialStatementTask(TushareTask):
def __init__(self):
super().__init__()
def init_queue(self):
# SSE上交所 SZSE深交所 BSE北交所
cursor = self.collection_stock_list.find({
"$or": [{
"exchange": "SSE"
}, {
"exchange": "SZSE"
}]
})
return list(map(lambda x: x.get("symbol"), cursor))
def get_stock(self, symbol):
cursor = self.collection_stock_list.find({
"symbol": symbol
})
return cursor
| JackFrankWen/nothing | GR86/chassis/taskcenter/financial_statement_task.py | financial_statement_task.py | py | 6,289 | python | en | code | 0 | github-code | 13 |
70195365458 | from PyQt4.QtCore import *
from PyQt4.QtGui import *
import re
from cStringIO import StringIO
from pyqt.utils import PyQtSignalMapper
try:
from ipshell import IterableIPShell
except:
from pshell import IterablePShell as IterableIPShell
# Mapping of terminal colors to X11 names.
ANSI_COLORS = {'0' : 'black',
'1' : 'white',
'0;30': 'black',
'0;31': 'red',
'0;32': 'green',
'0;33': 'darkRed',
'0;34': 'blue',
'0;35': 'magenta',
'0;36': 'cyan',
'0;37': 'lightGray',
'1;30': 'darkGray',
'1;31': 'darkRed',
'1;32': 'green',
'1;33': 'yellow',
'1;34': 'blue',
'1;35': 'magenta',
'1;36': 'darkCyan',
'1;37': 'white'}
class ConsoleView(QTextEdit):
'''
Specialized text view for console-like workflow.
@ivar text_document: Widget's text document.
@type text_document: QTextDocument
@ivar color_pat: Regex of terminal color pattern
@type color_pat: _sre.SRE_Pattern
@ivar mark: Scroll mark for automatic scrolling on input.
@type mark: gtk.TextMark
@ivar line_start: Start of command line mark.
@type line_start: gtk.TextMark
'''
__pyqtSignals__ = ('closed',)
def __init__(self, title, parent=None):
'''
Initialize console view.
'''
QTextEdit.__init__(self, parent)
self.setWindowTitle(title)
self.prompt_len = 0
self.fixed_position = 0
self.setFontFamily('monospace')
self.setTextInteractionFlags(Qt.TextEditorInteraction)
self.text_document = self.document()
self.color_pat = re.compile('\x01?\x1b\[(.*?)m\x02?')
self.qblack = QColor('black')
def make_action(text, slot=None, shortcut=None, icon=None, tip=None, checkable=False, signal='triggered()'):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
sm = PyQtSignalMapper(self)
for text, shortcut in zip(*[('Cut', 'Copy', 'Paste', 'Select All', 'Clear'),
('Ctrl+X', 'Ctrl+Alt+C', 'Ctrl+V', 'Ctrl+W', 'Ctrl+L')]):
action = make_action(text, slot=sm.map, shortcut=shortcut)
self.addAction(action)
sm.setMapping(action, text)
sm.connect(sm, SIGNAL('mapped'), self.actionSlot)
self.setContextMenuPolicy(Qt.ActionsContextMenu)
def actionSlot(self, text):
text = str(text)
qtype = QEvent.KeyPress
modifiers = Qt.ControlModifier
if text == 'Cut':
key, text = Qt.Key_X, 'x'
elif text == 'Copy':
modifiers = modifiers | Qt.ShiftModifier
key, text = Qt.Key_C, 'c'
elif text == 'Paste':
key, text = Qt.Key_V, 'v'
elif text == 'Select All':
key, text = Qt.Key_A, 'a'
elif text == 'Clear':
key, text = Qt.Key_L, 'l'
else:
raise Exception('Unknown object ID:', text)
keyEvent = QKeyEvent(qtype, key, modifiers, text)
self.keyPressEvent(keyEvent)
def closeEvent(self, event):
event.accept()
self.emit(SIGNAL('closed'))
def clear(self):
line = self.getCurrentLine()
self.setPlainText('')
self.showPrompt(self.prompt)
self.write(line)
def write(self, text):
'''
Write given text to buffer.
@param text: Text to append.
@type text: string
@param editable: If true, added text is editable.
@type editable: boolean
'''
text_len = 0
segments = self.color_pat.split(text)
segment = segments.pop(0)
self.textCursor().insertText(segment)
text_len += len(segment)
if segments:
ansi_tags = self.color_pat.findall(text)
for tag in ansi_tags:
i = segments.index(tag)
if tag in ANSI_COLORS:
self.setTextColor(QColor(ANSI_COLORS[tag]))
else:
self.setTextColor(self.qblack)
self.textCursor().insertText(segments[i+1])
text_len += len(segments[i+1])
segments.pop(i)
self.ensureCursorVisible()
return text_len
def showPrompt(self, prompt):
'''
Prints prompt at start of line.
@param prompt: Prompt to print.
@type prompt: string
'''
self.prompt_len = self.write(prompt)
line_start = self.textCursor()
line_start.movePosition(QTextCursor.End, QTextCursor.MoveAnchor)
line_start.movePosition(QTextCursor.StartOfLine, QTextCursor.MoveAnchor)
self.fixed_position = line_start.position() + self.prompt_len
def changeLine(self, text):
'''
Replace currently entered command line with given text.
@param text: Text to use as replacement.
@type text: string
'''
text_cursor = self.textCursor()
text_cursor.movePosition(QTextCursor.End, QTextCursor.MoveAnchor)
text_cursor.movePosition(QTextCursor.StartOfLine, QTextCursor.KeepAnchor)
text_cursor.movePosition(QTextCursor.NextCharacter, QTextCursor.KeepAnchor, self.prompt_len)
text_cursor.removeSelectedText()
self.setTextCursor(text_cursor)
self.setTextColor(self.qblack)
self.write(text)
def getCurrentLine(self):
'''
Get text in current command line.
@return: Text of current command line.
@rtype: string
'''
text_cursor = self.textCursor()
text_cursor.movePosition(QTextCursor.End, QTextCursor.MoveAnchor)
text_cursor.movePosition(QTextCursor.StartOfLine, QTextCursor.KeepAnchor)
text_cursor.movePosition(QTextCursor.NextCharacter, QTextCursor.KeepAnchor, self.prompt_len)
rv = str(text_cursor.selectedText())
return rv
def showReturned(self, text):
'''
Show returned text from last command and print new prompt.
@param text: Text to show.
@type text: string
'''
self.moveCursor(QTextCursor.End, QTextCursor.MoveAnchor)
self.write('\n'+text)
if text:
self.write('\n')
self.showPrompt(self.prompt)
def gotoEnd(self, resetColor=True):
self.moveCursor(QTextCursor.End, QTextCursor.MoveAnchor)
self.setTextColor(self.qblack)
def canInsertFromMimeData(self, source):
if source.hasText():
return True
else:
return False
def insertFromMimeData(self, source):
if source.hasText():
text = source.text()
self.gotoEnd()
self.write(text)
def keyPressEvent(self, event):
'''
Key press callback used for correcting behavior for console-like
interfaces. For example 'home' should go to prompt, not to begining of
line.
@return: Return True if event should not trickle.
@rtype: boolean
'''
pass_up = False
mode = QTextCursor.MoveAnchor
if event.modifiers() & Qt.ShiftModifier:
mode = QTextCursor.KeepAnchor
if event.key() == Qt.Key_Home:
if event.modifiers() & Qt.ControlModifier or event.modifiers() & Qt.AltModifier:
pass
else:
self.moveCursor(QTextCursor.StartOfLine, mode)
cursor = self.textCursor()
cursor.movePosition(QTextCursor.NextCharacter, mode, self.prompt_len)
self.setTextCursor(cursor)
return
elif event.key() == Qt.Key_End:
if event.modifiers() & Qt.ControlModifier or event.modifiers() & Qt.AltModifier:
pass
else:
self.moveCursor(QTextCursor.EndOfLine, mode)
return
elif event.modifiers() == (Qt.ShiftModifier | Qt.ControlModifier) and event.key() == Qt.Key_C:
newEvent = QKeyEvent(event.type(), event.key(),
event.modifiers() & (~Qt.ShiftModifier),
event.text())
event = newEvent
pass_up = True
#elif event.modifiers() == Qt.ControlModifier and event.key() == Qt.Key_X:
# tc = self.textCursor()
# if min(tc.position(), tc.anchor()) >= self.fixed_position:
# pass_up = True
# else:
# return
#elif event.modifiers() == Qt.ControlModifier and event.key() == Qt.Key_A:
# pass_up = True
#elif event.modifiers() == Qt.ControlModifier and event.key() == Qt.Key_V:
# self.moveCursor(QTextCursor.End, QTextCursor.MoveAnchor)
# self.setTextColor(QColor('black'))
# pass_up = True
elif event.key() == Qt.Key_Control or event.key() == Qt.Key_Shift \
or event.key() == Qt.Key_Meta or event.key() == Qt.Key_Alt:
pass_up = True
elif event.modifiers() == Qt.ControlModifier and event.key() == Qt.Key_L:
self.clear()
return
if pass_up:
QTextEdit.keyPressEvent(self, event)
return
pass_up = not self.keyPressEventExtend(event)
if pass_up:
tc = self.textCursor()
pos = tc.position()
anc = tc.anchor()
if min(pos, anc) < self.fixed_position:
if max(pos, anc) < self.fixed_position:
self.moveCursor(QTextCursor.End, QTextCursor.MoveAnchor)
#self.setTextColor(QColor('black'))
else:
if pos < anc:
tc.setPosition(self.fixed_position, QTextCursor.KeepAnchor)
else:
tc.setPosition(self.fixed_position, QTextCursor.MoveAnchor)
tc.setPosition(pos, QTextCursor.KeepAnchor)
self.setTextCursor(tc)
self.setTextColor(self.qblack)
QTextEdit.keyPressEvent(self, event)
def keyPressEventExtend(self, event):
'''
For some reason we can't extend onKeyPress directly (bug #500900).
'''
return False
class IPythonMdiView(QMdiSubWindow):
def __init__(self, user_ns=None, user_global_ns=None, allow_close=False,
title='Interactive Python Session', parent=None):
QMdiSubWindow.__init__(self, parent)
self.ipython_view = IPythonView(user_ns, user_global_ns, allow_close, title, self)
self.setWidget(self.ipython_view)
self.setAttribute(Qt.WA_DeleteOnClose)
class IPythonView(ConsoleView, IterableIPShell):
'''
Sub-class of both modified IPython shell and L{ConsoleView} this makes
a GTK+ IPython console.
'''
def __init__(self, user_ns=None, user_global_ns=None, allow_close=False,
title='Interactive Python Session', parent=None):
'''
Initialize. Redirect I/O to console.
'''
ConsoleView.__init__(self, title, parent)
self.cout = StringIO()
if allow_close:
exit_func = self.close
else:
exit_func = None
self.allow_close = allow_close
self.interrupt = False
IterableIPShell.__init__(self, user_ns=user_ns, user_global_ns=user_global_ns,
cout=self.cout,cerr=self.cout,
input_func=self.raw_input, exit_func=self.close)
self.execute()
self.cout.truncate(0)
self.showPrompt(self.prompt)
def raw_input(self, prompt=''):
'''
Custom raw_input() replacement. Get's current line from console buffer.
@param prompt: Prompt to print. Here for compatability as replacement.
@type prompt: string
@return: The current command line text.
@rtype: string
'''
if self.interrupt:
self.interrupt = False
raise KeyboardInterrupt
return self.getCurrentLine()
def keyPressEventExtend(self, event):
'''
Key press callback with plenty of shell goodness, like history,
autocompletions, etc.
@return: True if event should not trickle.
@rtype: boolean
'''
if event.modifiers() & Qt.ControlModifier and event.key() == Qt.Key_C:
self.interrupt = True
self.processLine()
return True
elif event.modifiers() & Qt.ControlModifier and event.key() == Qt.Key_Q:
if self.allow_close:
self.close()
return True
elif event.key() == Qt.Key_Return:
self.processLine()
return True
elif event.key() == Qt.Key_Up:
self.changeLine(self.historyBack())
return True
elif event.key() == Qt.Key_Left:
p = self.textCursor().position()
return p <= self.fixed_position
elif event.key() == Qt.Key_Down:
self.changeLine(self.historyForward())
return True
elif event.key() == Qt.Key_Tab:
if not self.getCurrentLine().strip():
self.gotoEnd()
self.write(4*' ')
else:
completed, possibilities = self.complete(self.getCurrentLine())
if len(possibilities) > 1:
slice = self.getCurrentLine()
self.gotoEnd()
self.write('\n')
for symbol in possibilities:
self.write(symbol+'\n')
self.showPrompt(self.prompt)
self.changeLine(completed or slice)
return True
return False
def processLine(self):
'''
Process current command line.
'''
self.history_pos = 0
self.execute()
rv = self.cout.getvalue()
if rv: rv = rv.strip('\n')
self.showReturned(rv)
self.cout.truncate(0)
| bennihepp/snippets | ipython/ipython_view_qt.py | ipython_view_qt.py | py | 14,613 | python | en | code | 1 | github-code | 13 |
4693633085 | word=[]
number=0
first_word=input()
word.append(first_word)
while True:
words=input()
if word[number][-1]!=words[0]:
print("틀린 단어를 입력하셨습니다. 게임을 종료합니다.")
break
if words in word:
print("앞에서 사용한 단어와 동일한 단어를 입력하셨습니다. 게임을 종료합니다.")
break
if ((number+1)%5)==4:
print("(중간 점검) 현재 %d개의 단어를 입력하셨습니다."%(number+2))
number+=1
word.append(words) | SongMinQQ/Python-Study | ex3-5.py | ex3-5.py | py | 546 | python | ko | code | 0 | github-code | 13 |
3980845061 | #!/usr/bin/env python
# encoding: utf-8
from django.template import RequestContext
from django.http import HttpResponse, HttpResponseRedirect
from django.utils import simplejson as json
from django.core.serializers.python import Serializer
from django.db.models.fields import FieldDoesNotExist
from StringIO import StringIO
from django.shortcuts import (
render_to_response,
get_object_or_404,
redirect
)
from simplejson import dumps
def write(obj, **kw):
return dumps(obj, encoding='utf-8', **kw)
from forms import *
from models import *
class InheritanceSerializer(Serializer):
"""
Supports serialization of fields on the model that are inherited (ie. non-local fields).
"""
# Copied from django.core.serializers.base
# Unfortunately, django's serializer only serializes local fields
def serialize(self, queryset, fields=None, **options):
"""
Serialize a queryset.
"""
self.options = options
self.stream = options.pop("stream", StringIO())
self.selected_fields = fields or queryset.model._meta.get_all_field_names()
self.use_natural_keys = options.pop("use_natural_keys", False)
self.start_serialization()
for obj in queryset:
self.start_object(obj)
for field_name in self.selected_fields:
try:
field = obj._meta.get_field(field_name)
except FieldDoesNotExist:
continue
if field in obj._meta.many_to_many:
self.handle_m2m_field(obj, field)
elif field.rel is not None:
self.handle_fk_field(obj, field)
else:
self.handle_field(obj, field)
self.end_object(obj)
self.end_serialization()
return self.getvalue()
def choose_entity(request):
choose_entity_form = ChooseEntityForm(request.POST or None)
if choose_entity_form.is_valid():
entity = choose_entity_form.cleaned_data['entity']
return redirect('create_view', entity=entity)
data = {
'choose_entity_form': choose_entity_form
}
return render_to_response('choose_entity.html', data,
context_instance=RequestContext(request))
def create(request, entity):
initial_entity = {
'refer': entity
}
data = {
'refer': entity,
'dynamic_fields': DynamicFieldFormSet(),
'text_fields_form': TextFieldsForm(),
'multiple_choice_fields_form': MultipleChoiceFieldsForm(),
'single_choice_fields_form': SingleChoiceFieldsForm(),
'text_fields_configs_form': TextFieldsConfigsForm(initial=initial_entity),
'multiple_choice_fields_configs_form': MultipleChoiceFieldsConfigsForm(initial=initial_entity),
'single_choice_fields_configs_form': SingleChoiceFieldsConfigsForm(initial=initial_entity),
}
return render_to_response('create.html', data,
context_instance=RequestContext(request))
def _parse_serialize(queryset):
serialized = [
x['fields']
for x in InheritanceSerializer().serialize(
queryset
)
]
for x in serialized:
x['pk'] = x['id']
for typeconfig in TYPECONFIG_MAP.iteritems():
if x['typo'] in typeconfig[1]:
x['typeconfig'] = typeconfig[0]
return serialized
def create_dynamic_field(request):
dynamic_field_form = CustomDynamicFieldForm(
request.POST or None
)
if dynamic_field_form.is_valid():
dynamic_field = dynamic_field_form.save()
return redirect('create_view', dynamic_field.refer)
return HttpResponseRedirect(redirect_to=request.path)
def get_dynamic_field(request, dfield_name):
try:
data = _parse_serialize(
CustomDynamicField.objects.filter(name=dfield_name)
)[0]
except:
data = {}
return HttpResponse(
json.dumps(data),
mimetype="application/json"
)
def update_dynamic_field(request, dfield_name):
try:
dynamic_field = CustomDynamicField.objects.get(
name=dfield_name
)
except CustomDynamicField.DoesNotExist:
pass
else:
dynamic_field_form = CustomDynamicFieldForm(
data=request.POST or None,
instance=dynamic_field
)
if dynamic_field_form.is_valid():
dynamic_field_form.save()
return redirect('create_view', dynamic_field.refer)
return HttpResponseRedirect(redirect_to=request.path)
def delete_dynamic_field(request, dfield_name):
try:
dynamic_field = CustomDynamicField.objects.get(
name=dfield_name
)
dynamic_field.delete()
return HttpResponse(json.dumps({}), mimetype="application/json")
except:
return HttpResponseRedirect(redirect_to=request.path)
| multmeio/django-flattenfields-form-builder | builder/views.py | views.py | py | 4,915 | python | en | code | 5 | github-code | 13 |
74030537939 | import socket
import logging
import threading
FORMAT = "%(threadName)s %(thread)d %(message)s"
logging.basicConfig(format=FORMAT, level=logging.INFO)
class ChatClient:
def __init__(self, ip='127.0.0.1', port=9999):
self.address = ip, port
self.sock = socket.socket()
self.event = threading.Event()
def start(self):
self.sock.connect(self.address)
self.send('hello server')
threading.Thread(target=self.rec, name='rec').start()
threading.Thread(target=self._inner, name='inner').start()
def rec(self):
while not self.event.is_set():
try:
data = self.sock.recv(1024) # 阻塞
print(data)
except Exception as e:
logging.error(e)
def send(self, msg: str):
self.sock.send(msg.encode())
@staticmethod
def _inner(): # 下面的语句不一定非得在主线程中,交互可以在任意线程
while True:
cmd = input('>>>').strip()
if cmd == 'quit':
cc.stop()
break
cc.send(cmd) # 不写成cc.sock.send(),就是不暴露sock,更方便
logging.info(threading.enumerate())
def stop(self):
self.event.set()
self.send('quit')
self.sock.close()
cc = ChatClient()
cc.start()
| sqsxwj520/python | 网络编程/服务端编程/客户端类编程.py | 客户端类编程.py | py | 1,417 | python | en | code | 1 | github-code | 13 |
28544222181 | if __name__ == '__main__':
a = input()
alnum = False
alpha = False
digit = False
lower = False
upper = False
for s in a:
if s.isalnum():
alnum = True
if s.isalpha():
alpha = True
if s.isdigit():
digit = True
if s.islower():
lower = True
if s.isupper():
upper = True
print(alnum)
print(alpha)
print(digit)
print(lower)
print(upper)
| PahulGogna/Hackerrank_python | string_validation_problem.py | string_validation_problem.py | py | 523 | python | en | code | 0 | github-code | 13 |
74247638738 | from omdbapi.movie_search import GetMovie as g
m=g(api_key='d67ffdb0')
print('----------Movie Details----------')
mv=input('\n Enter the movie name:')
det=m.get_movie(title=mv,plot='full')
print(det)
f=m.get_data('actors','year')
print(f)
| dhaneshvg/Python_tkinder | movieAPI.py | movieAPI.py | py | 242 | python | en | code | 1 | github-code | 13 |
41974308212 | from threading import Lock, Thread
from typing import Any
class SingletonMeta(type):
__instances = {} # Create an empty dictionary
_lock = Lock() # Thread Lock
def __call__(cls, *args: Any, **kwds: Any) -> Any:
with cls._lock:
if cls not in cls.__instances:
instance = super().__call__(*args, *kwds)
cls.__instances[cls] = instance
return cls.__instances[cls]
class Singleton(metaclass = SingletonMeta):
def __init__(self, val: str | None = None):
self.val = val
def test(val: str) -> None:
singleton = Singleton(val)
print(singleton.val)
def main():
print("If two results are the same, Singleton works. Otherwise, Singleton failed")
p1, p2 = Thread(target = test, args = ("p1",)), Thread(target = test, args = ("p2", ))
p1.start()
p2.start()
if __name__ == '__main__':
main() | hieukien503/DesignPattern | DesignPattern/Creational DP/Singleton_thread_safe.py | Singleton_thread_safe.py | py | 925 | python | en | code | 0 | github-code | 13 |
7494399313 | from selenium.common.exceptions import NoSuchElementException
from GrouponScraper.Devices import Devices
from GrouponScraper.CommentManager import CommentManager
class TicketFactory:
def __init__(self):
return
def isGroupon(self, driver):
# Check if the main body things its a groupon
table = driver.find_element_by_id('repair_items')
if table.text.lower().find('groupon') != -1:
return True
else:
# Check the comments to see if the word groupon exists
comments = driver.find_element_by_id('ticket_all-comments')
if comments.text.lower().find('groupon') != -1:
return True
# No groupon word found
return False
def findGroupon(self, driver, ticket):
t = TicketData(driver, ticket)
return t.checkCommentsForGroupon(), t
def getPrice(string):
point = string.find('$')
if point == -1:
return None
else:
return float(string[point:])
def getComments(div):
comments = div.find_elements_by_tag_name('li')
retVal = []
for c in comments:
retVal.append(c.text)
return retVal
class TicketData:
def __init__(self, driver, ticket):
# Have groupon option to add from MainDriver
self.groupons = []
# Take Data from ticket
self.id = ticket.id
# Large body of the div with data
self.summary = driver.find_element_by_class_name('span4')
self.priceInfo = driver.find_element_by_class_name('span5')
self.deviceInfoDiv = driver.find_element_by_id('repair_items')
self.url = driver.current_url
# Comments
commentsDiv = driver.find_element_by_id('ticket_all-comments')
self.containsGroupon = None
self.comments = getComments(commentsDiv)
# Summary info
self.created = None
self.modified = None
self.location = None
self.creator = None
self.source = None
# Price Info
self.subTotal = None
self.discount = None
self.tax = None
self.total = None
self.totalPaid = None
self.due = None
# Davron request Information
self.devices = Devices(self.deviceInfoDiv)
self.technician = self.getTechnician()
self.technician.replace('\n', '') # Just cleaning up the string
userDiv = driver.find_element_by_class_name('span8')
self.user = User(userDiv)
# Get all Available Rows of Data
self.populateSummary()
self.populatePrice()
return
def addGroupons(self, groupons):
self.groupons = groupons
def addGroupon(self, groupon):
self.groupons.append(groupon)
def removeGroupon(self, groupon):
self.groupons.remove(groupon)
def __str__(self):
retVal = ''
retVal += self.url + '\tPrice: ' + self.total
return retVal
def getTechnician(self):
for c in self.comments:
if c.find('Repaired & Collected') != -1:
# Author of this message = person who marked it as repaired
name = ""
for char in c:
if char != ' ':
name += char
else:
self.technician = name
return name
return "Ticket_Not_Marked_As_Repaired"
def populateSummary(self):
options = self.summary.find_elements_by_tag_name('tr')
self.created = (options[1]).text[14:]
self.modified = (options[2]).text[14:]
self.location = (options[3]).text[9:]
self.creator = (options[4]).text[9:]
self.source = (options[5]).text[6:]
def populatePrice(self):
options = self.priceInfo.find_elements_by_tag_name('tr')
self.subTotal = options[0].text
self.discount = options[1].text
self.tax = options[2].text
self.total = options[4].text
self.totalPaid = options[5].text
self.due = options[6].text
def checkCommentsForGroupon(self):
CommentManager().scanComments(self.comments, self)
return CommentManager()._scanComments(self.comments)
class User:
def __init__(self, userDiv):
self.name = userDiv.find_element_by_tag_name('h5').text
self.number = ''
self.email = ''
self.getNumber(userDiv)
self.getEmail(userDiv)
return
def getNumber(self, userDiv):
try:
rawText = userDiv.text
index = rawText.find('Mobile: ')
if index == -1:
return "No_Phone_Number_Found"
retVal = ''
i = index+8
while i < index+20:
retVal = retVal + str(rawText[i])
i += 1
self.number = retVal
except NoSuchElementException:
self.number = "No_Phone_Number_Found"
def getEmail(self, userDiv):
try:
rawText = userDiv.text
index = rawText.find('Email: ')
if index == -1:
return "No_Email_Found"
retVal = ''
i = index+7
while '\n' != rawText[i] != ' ':
retVal += str(rawText[i])
i += 1
if i >= len(rawText):
return retVal
self.email = retVal
except NoSuchElementException:
self.email = "No_Email_Found"
if self.email == '':
self.email = "No_Email_Found"
| DavidCastillo2/GrouponScraper | TicketFactory.py | TicketFactory.py | py | 5,556 | python | en | code | 0 | github-code | 13 |
40789624142 | word=input('Enter the string to find the vowels:')
vowels={'a','e','i','o','u'}
d={}
for ch in word:
if ch in vowels:
d[ch]=d.get(ch,0)+1
for k,v in sorted(d.items()):
print('Vowel {} is appearing {} times'.format(k,v))
| sudheemujum/Python-3 | dict_vowel_count.py | dict_vowel_count.py | py | 238 | python | en | code | 0 | github-code | 13 |
24367774101 | from PIL import Image, ImageOps
import numpy as np
def print_info(img):
print(img.format)
print(img.mode)
print(img.size)
print(img.width)
print(img.height)
print(img.palette)
print(img.info)
def load_image(filename):
img = Image.open(filename)
img.load()
return img
def make_array(img):
return np.array(img)
def split_channels(np_array):
np_array_red = np_array.copy()
np_array_blue = np_array.copy()
np_array_green = np_array.copy()
np_array_blue[:, :, 0] *= 0
np_array_blue[:, :, 1] *= 0
blue = Image.fromarray(np_array_blue)
np_array_green[:, :, 1] *= 0
np_array_green[:, :, 2] *= 0
green = Image.fromarray(np_array_green)
np_array_red[:, :, 0] *= 0
np_array_red[:, :, 2] *= 0
red = Image.fromarray(np_array_red)
return blue, green, red
def transform(img, mode):
if mode is "vertical":
return ImageOps.flip(img)
elif mode is "horizontal":
return ImageOps.mirror(img)
else:
return img
if __name__ == "__main__":
# 2.Bild einlesen
image = load_image("hidden.png")
# 3.Bild zu Numpy-Array konvertieren
array = make_array(image)
# 4. Bild nach Farb-Kanälen getrennt ausgeben
blue, green, red = split_channels(array)
# 5.Funktion zum vertikal/horizontal spiegeln
t_image = transform(image, "vertical")
t_image.show()
blue.show()
green.show()
red.show()
| hekrause/BIVE | Uebung01/work.py | work.py | py | 1,454 | python | en | code | 0 | github-code | 13 |
6143174661 | import logging
import sys
import pytest
from stringOrderCheck.stringOrder import checkOrder, orderIndices, orderScan
logger = logging.getLogger(__name__)
streamHandler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
logger.setLevel(logging.INFO)
"""
# input: "hello"
# ordering: 'hlo'
# true
# input: "helloooooh"
# ordering: 'hlo'
# false
# input: "helzzzzzloo"
# ordering: 'hlo'
# true
"""
@pytest.mark.parametrize(
"inputSource, inputOrder, expected",
[
("helloworld", "hlo!", list()),
("helloooooh", "hlo", [0, 9, 2, 3, 4, 5, 6, 7, 8]),
("helzzzzzloo", "lzo", [2, 8, 3, 4, 5, 6, 7, 9, 10]),
],
)
def orderIndicesTest(inputSource, inputOrder, expected):
logger.info(f"inputstring={inputSource}")
logger.info(f"inputOrder={inputOrder}")
indices = orderIndices(inputSource, inputOrder)
logger.info(f"indices={indices}")
assert indices == expected
@pytest.mark.parametrize(
"inputSource, inputOrder, ,nextchar, expected",
[
("helloorld"[1:], "hlo!", "l", True),
("helloooooh"[2:], "hlo", "o", True),
("helzzzzzloo!"[-2:], "lo!", "l", False),
],
)
def checkOrderTest(inputSource, inputOrder, nextchar, expected):
logger.info(f"inputSource={inputSource}")
logger.info(f"nextchar={nextchar}")
actual = checkOrder(inputSource, inputOrder, nextchar)
assert actual == expected
@pytest.mark.parametrize(
"inputSource, inputOrder, expected",
[
("helloorld", "heod", True),
("helloooooh", "eo!", False),
("hellooo!ooh", "eo!", False),
("helzzzzzoo!", "hl!", True),
],
)
def orderScanTest(inputSource, inputOrder, expected):
actual = orderScan(inputSource, inputOrder)
assert actual == expected
| michael-c-hoffman/TestDrivenDevelopmentPythonPytest | stringOrderCheck/tests/unitTests/stringOrderCheckTests.py | stringOrderCheckTests.py | py | 1,905 | python | en | code | 0 | github-code | 13 |
27788135813 | import csv
import json
import requests
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
import uuid
extract_url = 'http://localhost:9200/skills_taxonomy'
copy_url = 'http://enter elasticsearch_url here/'
def load_csv():
filename="Categories.csv"
data = [row for row in csv.reader(open(filename, 'r'))]
data = data[1:]
print(len(data))
return(data)
def copy_to_new_elastic(temp):
print("copying to new elastic index started")
HEADERS = {
'content-type':'Enter here',
'Authorization': 'Enter here'
}
es = Elasticsearch(hosts=[copy_url], timeout=5000, headers=HEADERS)
data = []
for i in range(len(temp)):
_temp = {}
_temp['_index'] = 'skills_taxonomy'
_temp['_type'] = 'skills'
_temp['id'] = str(uuid.uuid4())
_temp['_source'] = {}
_temp['_source']['keyword'] = temp[i][0]
_temp['_source']['keyword_type'] = temp[i][1]
data.append(_temp)
print(i)
try:
success, _ = bulk (es, data)
except Exception as error:
print(error)
print('finished')
if __name__ == '__main__':
temp= load_csv()
copy_to_new_elastic(temp)
| Gayatri-Shastri7/Search-Engine | Copy_data_to_server.py | Copy_data_to_server.py | py | 1,294 | python | en | code | 0 | github-code | 13 |
36834783539 | import torch
import unittest
from core.embed import PositionalEmbedding
class TestEmbed(unittest.TestCase):
def test_positional_embedding(self):
pe = PositionalEmbedding(12)
x = torch.LongTensor([[1]])
y = pe(x)
w,h,t = y.size()
self.assertEqual(w,1)
self.assertEqual(h,1)
self.assertEqual(t,12)
| imeepos/imeepos | tests/test_embed.py | test_embed.py | py | 370 | python | en | code | 0 | github-code | 13 |
39148398548 | #!/bin/python3
#https://www.hackerrank.com/challenges/jumping-on-the-clouds/problem
import sys
def minJumps(n, c):
i = 0
count = 0
while i < len(c) - 1:
if (len(c) - 1) - i == 1:
i += 1
count += 1
elif c[i + 2] == 1:
i = i + 1
count += 1
else:
i = i + 2
count += 1
return count
n = int(input().strip())
c = [int(c_temp) for c_temp in input().strip().split(' ')]
min_count = minJumps(n, c)
print(min_count) | saumya-singh/CodeLab | HackerRank/Implementation/Jumping_On_The_Clouds.py | Jumping_On_The_Clouds.py | py | 547 | python | en | code | 0 | github-code | 13 |
71455226578 | # -*- coding: utf-8 -*-
# @Author : ZhaoKe
# @Time : 2021-08-16 16:11
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
def bar_single():
data = np.loadtxt("chaos-res-matrix/res-0.txt", delimiter=',', encoding="GB2312")
print("结果的形状", data.shape)
mean_list = np.mean(data, axis=1)
std_list = np.std(data, axis=1)
# 绘制柱状图,设置颜色,设置x轴标签
name_list = ["CRPSO", "CPSO", "DPSO", "GA", "ACO"]
plt.bar(range(len(mean_list)), mean_list, fc="blue", tick_label=name_list)
# plt.bar(range(len(mean_list)), mean_list, color='rgb')
for x, y in enumerate(mean_list):
# plt.text(x, 2, y, ha="center", size=7)
# plt.text(x, y+1, str(round(y*100/sum(mean_list), 1))+'%', ha="center", size=7)
plt.text(x, y+1, str(round(y, 2)), ha="center", size=7)
# plt.show()
plt.savefig('chaos-res-matrix/res-single-bar-0.png', dpi=300, format='png')
def bar_double():
data = np.loadtxt("doublebar.txt", delimiter=',', encoding="GB2312")
print(data)
print("结果的形状", data.shape)
# mean_list = np.mean(data, axis=1)
# std_list = np.std(data, axis=1)
# print(mean_list)
# print(std_list)
# # 绘制柱状图,设置颜色,设置x轴标签
# name_list = ["CRPSO", "CPSO", "DPSO", "GA", "ACO"]
name_list = ["200", "400", "600", "800", "1000"]
x = list(range(5))
total_width, n = 0.6, 3
width = total_width / n
plt.bar(x, data[0, :], width=width, label="HEGPSO", fc="red")
# plt.bar(range(len(mean_list)), mean_list, color='rgb')
for i in range(5):
x[i] = x[i] + width
plt.bar(x, data[1, :], width=width, label="DPSO", tick_label=name_list, fc="green")
for i in range(5):
x[i] = x[i] + width
plt.bar(x, data[2, :], width=width, label="ACO", tick_label=name_list, fc="blue")
plt.xlabel("task number", fontdict={'size': 18})
plt.ylabel("score", fontdict={'size': 18})
plt.xticks(range(5), name_list, size=12)
plt.yticks(size=12)
matplotlib.rcParams.update({'font.size': 13})
plt.legend()
plt.savefig('double-bar-1.png', dpi=300, format='png')
# plt.show()
def box_plot(ind):
# 绘制箱型图
y0 = np.loadtxt("chaos-res-matrix/res-"+str(ind)+".txt", delimiter=',', encoding="GB2312")
y0 = np.transpose(y0)
# r, c = y0.shape
labels = ["CRPSO", "CPSO", "DPSO", "GA", "ACO"]
# y0 = 1/y0
# print(y0.shape)
# 共5组数据,分别绘制箱型图
plt.boxplot((y0[70:, 0], y0[:, 1], y0[:, 2], y0[:, 3], y0[:, 4]))
# plt.plot(range())
plt.ylabel("fitness", fontdict={'size': 18})
plt.xticks(range(5), labels, size=16)
plt.yticks(size=18)
# plt.plot(range(r), y0[:, 0])
plt.show()
# plt.savefig("boxplot" + str(i) + ".png")
# plt.close(i)
if __name__ == '__main__':
# bar_single()
bar_double()
# box_plot(6)
# box_plot(0)
# box_plot(2)
| ZhaoKe1024/IntelligentAlgorithmScheduler | draw_plot.py | draw_plot.py | py | 2,948 | python | en | code | 7 | github-code | 13 |
17042446804 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayMarketingCampaignDiscountBudgetCreateModel(object):
def __init__(self):
self._biz_from = None
self._fund_type = None
self._gmt_end = None
self._name = None
self._out_biz_no = None
self._out_budget_no = None
self._publisher_logon_id = None
self._total_amount = None
@property
def biz_from(self):
return self._biz_from
@biz_from.setter
def biz_from(self, value):
self._biz_from = value
@property
def fund_type(self):
return self._fund_type
@fund_type.setter
def fund_type(self, value):
self._fund_type = value
@property
def gmt_end(self):
return self._gmt_end
@gmt_end.setter
def gmt_end(self, value):
self._gmt_end = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def out_budget_no(self):
return self._out_budget_no
@out_budget_no.setter
def out_budget_no(self, value):
self._out_budget_no = value
@property
def publisher_logon_id(self):
return self._publisher_logon_id
@publisher_logon_id.setter
def publisher_logon_id(self, value):
self._publisher_logon_id = value
@property
def total_amount(self):
return self._total_amount
@total_amount.setter
def total_amount(self, value):
self._total_amount = value
def to_alipay_dict(self):
params = dict()
if self.biz_from:
if hasattr(self.biz_from, 'to_alipay_dict'):
params['biz_from'] = self.biz_from.to_alipay_dict()
else:
params['biz_from'] = self.biz_from
if self.fund_type:
if hasattr(self.fund_type, 'to_alipay_dict'):
params['fund_type'] = self.fund_type.to_alipay_dict()
else:
params['fund_type'] = self.fund_type
if self.gmt_end:
if hasattr(self.gmt_end, 'to_alipay_dict'):
params['gmt_end'] = self.gmt_end.to_alipay_dict()
else:
params['gmt_end'] = self.gmt_end
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.out_budget_no:
if hasattr(self.out_budget_no, 'to_alipay_dict'):
params['out_budget_no'] = self.out_budget_no.to_alipay_dict()
else:
params['out_budget_no'] = self.out_budget_no
if self.publisher_logon_id:
if hasattr(self.publisher_logon_id, 'to_alipay_dict'):
params['publisher_logon_id'] = self.publisher_logon_id.to_alipay_dict()
else:
params['publisher_logon_id'] = self.publisher_logon_id
if self.total_amount:
if hasattr(self.total_amount, 'to_alipay_dict'):
params['total_amount'] = self.total_amount.to_alipay_dict()
else:
params['total_amount'] = self.total_amount
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMarketingCampaignDiscountBudgetCreateModel()
if 'biz_from' in d:
o.biz_from = d['biz_from']
if 'fund_type' in d:
o.fund_type = d['fund_type']
if 'gmt_end' in d:
o.gmt_end = d['gmt_end']
if 'name' in d:
o.name = d['name']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'out_budget_no' in d:
o.out_budget_no = d['out_budget_no']
if 'publisher_logon_id' in d:
o.publisher_logon_id = d['publisher_logon_id']
if 'total_amount' in d:
o.total_amount = d['total_amount']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayMarketingCampaignDiscountBudgetCreateModel.py | AlipayMarketingCampaignDiscountBudgetCreateModel.py | py | 4,461 | python | en | code | 241 | github-code | 13 |
16748096048 |
import numpy as np
class Dataset():
def __init__(self, data, target, n_classes=None, max_output_size=None):
self.data = data
self.target = target
self.n_classes = n_classes
self.max_output_size = max_output_size
self.data_type = 'list' if isinstance(target, list) else 'numpy'
self.observations = len(target) if self.data_type == 'list' else data.shape[0]
def range(self, start, end):
return Dataset(
self.data[start:end], self.target[start:end],
n_classes=self.n_classes,
max_output_size=self.max_output_size
)
def select(self, selector):
if (self.data_type == 'list'):
return Dataset(
[self.data[i] for i in selector],
[self.target[i] for i in selector],
n_classes=self.n_classes,
max_output_size=self.max_output_size
)
else:
return Dataset(
self.data[selector], self.target[selector],
n_classes=self.n_classes,
max_output_size=self.max_output_size
)
def list_to_numpy(self, items):
# NOTE: currently only indexed data is supported
t_max = np.max([obs.size for obs in items])
result = np.zeros((self.observations, t_max), dtype=items[0].dtype)
for i, obs in enumerate(items):
result[i, 0:obs.size] = obs
return result
def astuple(self):
if (self.data_type == 'list'):
return (self.list_to_numpy(self.data), self.list_to_numpy(self.target))
else:
return (self.data, self.target)
def index_to_indicator(matrix, maxIndex):
shape = matrix.shape
tensor = np.zeros((shape[0], maxIndex, shape[1]), dtype='float32')
(obs, time) = np.mgrid[0:shape[0], 0:shape[1]]
tensor[obs, matrix, time] = 1
return tensor
| AndreasMadsen/bachelor-code | dataset/_shared.py | _shared.py | py | 1,919 | python | en | code | 1 | github-code | 13 |
16879760797 | """
You are given an array prices where prices[i] is the price of a given stock on the ith day.
You want to maximize your profit by choosing a single day to buy one stock and choosing a different day in the future to sell that stock.
Return the maximum profit you can achieve from this transaction. If you cannot achieve any profit, return 0.
"""
class Solution:
def maxProfit(self, prices: List[int]) -> int:
if len(prices) < 2:
return 0
buy = None
sell = None
max_profit = 0
# always need to assign buy first before sell
for p in prices:
if buy == None or buy > p:
buy = p
# assign sell back to None because just updated buy
sell = None
continue
elif sell == None or sell < p:
sell = p
max_profit = max((sell-buy), max_profit)
return max_profit | melanietai/leetcode-practice | dynamic_programming/best_time_to_buy_and_sell_stock.py | best_time_to_buy_and_sell_stock.py | py | 977 | python | en | code | 0 | github-code | 13 |
6583280263 | import numpy as np
import tensorflow as tf
class DQN:
def __init__(self, session: tf.Session, input_size: int, output_size: int, name: str="main"):
self.session = session
self.input_size = input_size
self.output_size = output_size
self.net_name = name
self._build_network();
def _build_network(self, h_size = 4, l_rate = 0.1):
self._X = tf.placeholder(dtype = tf.float32, shape = [None, self.input_size])
self._Y = tf.placeholder(dtype = tf.float32, shape = [None, self.output_size])
with tf.variable_scope(self.net_name):
W1 = tf.get_variable(name = "W1", shape = [self.input_size, h_size], initializer = tf.contrib.layers.xavier_initializer())
b1 = tf.Variable(tf.random_normal(shape = [h_size]), name = "bais1")
L1 = tf.nn.relu(tf.matmul(self._X, W1) + b1)
W2 = tf.get_variable(name = "W2", shape = [h_size, self.output_size], initializer = tf.contrib.layers.xavier_initializer())
b2 = tf.Variable(tf.random_normal(shape = [self.output_size]), name = "bais2")
self._Qpred = tf.matmul(L1, W2) + b2
self._loss = tf.losses.mean_squared_error(self._Y, self._Qpred)
optimizer = tf.train.AdamOptimizer(learning_rate = l_rate)
self._train = optimizer.minimize(self._loss)
def predict(self, state: np.ndarray) -> np.ndarray:
x = np.reshape(state, [-1, self.input_size])
return self.session.run(self._Qpred, feed_dict={self._X: x})
def update(self, x_stack: np.ndarray, y_stack: np.ndarray) -> list:
feed = { self._X: x_stack, self._Y: y_stack }
return self.session.run([self._loss, self._train], feed) | ParkSangBeom/DQN | DQN/DQN.py | DQN.py | py | 1,734 | python | en | code | 0 | github-code | 13 |
42834494796 | from pytest import raises as assert_raises
from sys import argv
from threading import Thread
from time import sleep
from wsgiref.simple_server import make_server
from rdflib import Graph, Literal, Namespace, RDF, URIRef
from re import compile as RegExp
from rdfrest.exceptions import CanNotProceedError
from rdfrest.cores.http_client import HttpClientCore
from rdfrest.http_server import HttpFrontend
from rdfrest.cores import ICore
from rdfrest.cores.factory import factory as universal_factory
from rdfrest.cores.local import EditableCore, Service
from rdfrest.cores.mixins import FolderishMixin, GraphPostableMixin
from rdfrest.serializers import bind_prefix, register_serializer
from rdfrest.util import coerce_to_uri, parent_uri
from rdfrest.util.config import get_service_configuration
from rdfrest.util.wsgi import SimpleRouter
# INTERFACE
from rdfrest.wrappers import register_wrapper
EXAMPLE = Namespace("http://example.org/example/")
bind_prefix("ex", "http://example.org/example/")
@register_wrapper(EXAMPLE.Item)
class ItemMixin(ICore):
"""Interface of a simple item"""
__state = None
@property
def identifier(self):
"""This resource's identifier
This is last path-element of URI, without the trailing slash if any.
"""
uri = self.uri
if uri[-1] == "/":
uri = uri[:-1]
return uri.rsplit("/", 1)[1]
@property
def state(self):
"""Shortcut to get_state()"""
ret = self.__state
if ret is None:
self.__state = ret = self.get_state()
return ret
def _get_label(self):
"""label property implementation"""
return self.state.value(self.uri, EXAMPLE.label)
def _set_label(self, value):
"""label property implementation"""
with self.edit(_trust=True) as editable:
if value is not None:
editable.set((self.uri, EXAMPLE.label, Literal(value)))
else:
editable.remove((self.uri, EXAMPLE.label, None))
def _del_label(self):
"""label property implementation"""
with self.edit(_trust=True) as editable:
editable.remove((self.uri, EXAMPLE.label, None))
label = property(_get_label, _set_label, _del_label)
def iter_tags(self):
"""Iter over the tags of this item"""
for tag in self.state.objects(self.uri, EXAMPLE.tag):
yield tag
@property
def tags(self):
"""List of this item's tags"""
return set(self.iter_tags())
def add_tag(self, tag):
"""Add a tag to this item"""
with self.edit(_trust=True) as graph:
graph.add((self.uri, EXAMPLE.tag, Literal(tag)))
def rem_tag(self, tag):
"""Remove a tag from this item"""
with self.edit(_trust=True) as graph:
graph.remove((self.uri, EXAMPLE.tag, Literal(tag)))
def iter_see_alsos(self):
"""Iter over the resources related to this item"""
for uri in self.iter_see_also_uris():
res = universal_factory(uri)
if res is None:
raise ValueError("Could not make resource <%s>" % uri)
yield res
@property
def see_alsos(self):
"""List of this item's related resources"""
return set(self.iter_see_alsos())
def iter_see_also_uris(self):
"""Iter over the URI of the resources related to this item"""
for uri in self.state.objects(self.uri, EXAMPLE.seeAlso):
yield uri
@property
def see_also_uris(self):
"""List of this item's related resource URIs"""
return set(self.iter_see_also_uris())
def add_see_also(self, resource_or_uri):
"""Add a related resource to this item"""
uri = coerce_to_uri(resource_or_uri, self.uri)
with self.edit(_trust=True) as graph:
graph.add((self.uri, EXAMPLE.seeAlso, uri))
def rem_see_also(self, resource_or_uri):
"""Remove a related resource from this item"""
uri = coerce_to_uri(resource_or_uri, self.uri)
with self.edit(_trust=True) as graph:
graph.remove((self.uri, EXAMPLE.seeAlso, uri))
@property
def parent(self):
"""Return the group containing this item (if any)"""
ret = self.factory(parent_uri(self.uri), [EXAMPLE.Group])
assert isinstance(ret, GroupMixin)
return ret
@register_wrapper(EXAMPLE.Group)
class GroupMixin(ItemMixin):
"""Interface of a group"""
ITEM_TYPE = EXAMPLE.Item
GROUP_TYPE = EXAMPLE.Group
def __contains__(self, item):
if isinstance(item, ItemMixin):
return (self.uri, EXAMPLE.contains, item.uri) in self.state
else:
return False
def __iter__(self):
return self.iter_items()
def __len__(self):
"""Return the number of items this group contains"""
# TODO LATER this would be more efficient with SPARQL 1.1
ret = 0
for _ in self.state.objects(self.uri, EXAMPLE.contains):
ret += 1
return ret
def contains_item_with_id(self, ident):
"""Return whethe this group has an item with the given identifier"""
# we check the ident because an ident including a "/" could provide
# seemingly correct results
check_ident(ident)
item_uri = URIRef(self.uri + ident)
if (self.uri, EXAMPLE.contains, item_uri) in self.state:
return True
# test with group URI
group_uri = URIRef(item_uri + "/")
return (self.uri, EXAMPLE.contains, group_uri) in self.state
def get_item(self, ident):
"""Get an item of this group by its identifier"""
# we check the ident because an ident including a "/" could provide
# seemingly correct results
check_ident(ident)
item_uri = URIRef(self.uri + ident)
if not (self.uri, EXAMPLE.contains, item_uri) in self.state:
item_uri = URIRef(item_uri + "/")
item_type = self.state.value(item_uri, RDF.type)
ret = self.factory(item_uri, [item_type])
assert ret is None or isinstance(ret, ItemMixin)
return ret
def iter_items(self):
"""Iter over all items in this group"""
self_factory = self.factory
query = ("SELECT ?i ?t WHERE { <%s> <%s> ?i. ?i a ?t. }"
% (self.uri, EXAMPLE.contains))
self_factory = self.factory
for item_uri, item_type in self.state.query(query):
yield self_factory(item_uri, [item_type])
@property
def items(self):
"""List of all items in this group"""
return set(self.iter_items())
def iter_simple_items(self):
"""Iter over simple items (i.e. not groups) in this group"""
query = ("SELECT ?i WHERE { <%s> <%s> ?i. ?i a <%s>. }"
% (self.uri, EXAMPLE.contains, self.ITEM_TYPE))
self_factory = self.factory
for result in self.state.query(query):
yield self_factory(result[0], [self.ITEM_TYPE])
@property
def simple_items(self):
"""List of simple items (i.e. not groups) in this group"""
return set(self.iter_simple_items())
def iter_groups(self):
"""Iter over groups in this group"""
query = ("SELECT ?i WHERE { <%s> <%s> ?i. ?i a <%s>. }"
% (self.uri, EXAMPLE.contains, self.GROUP_TYPE))
self_factory = self.factory
for result in self.state.query(query):
yield self_factory(result[0], [self.GROUP_TYPE])
@property
def groups(self):
"""List of groups in this group"""
return set(self.iter_groups())
def create_new_simple_item(self, ident):
"""Create a new simple item in this group"""
check_ident(ident)
if self.contains_item_with_id(ident):
raise ValueError("%s already exists" % ident)
new = Graph()
created = URIRef(self.uri + ident)
new.add((self.uri, EXAMPLE.contains, created))
new.add((created, RDF.type, self.ITEM_TYPE))
uris = self.post_graph(new, None, True, created, self.ITEM_TYPE)
assert len(uris) == 1
ret = self.factory(uris[0], [self.ITEM_TYPE])
assert isinstance(ret, ItemMixin)
return ret
def create_new_group(self, ident):
"""Create a new group in this group"""
check_ident(ident)
if self.contains_item_with_id(ident):
raise ValueError("%s already exists" % ident)
new = Graph()
created = URIRef(self.uri + ident + "/")
new.add((self.uri, EXAMPLE.contains, created))
new.add((created, RDF.type, self.GROUP_TYPE))
uris = self.post_graph(new, None, True, created, self.GROUP_TYPE)
assert len(uris) == 1
ret = self.factory(uris[0], [self.GROUP_TYPE])
assert isinstance(ret, GroupMixin)
return ret
def remove_item(self, ident):
"""Remove an item from this group"""
# we check the ident because an ident including a "/" could provide
# seemingly correct results
check_ident(ident)
item = self.get_item(ident)
if item is None:
return
item.delete() # do not trust; we leave it to the implementation to
# check whether this is possible
self.force_state_refresh()
# IMPLEMENTATION
_IDENT_RE = RegExp(r"[a-zA-Z_0-9]+\Z")
def check_ident(ident):
"""Check whether an identifier is syntactically valid"""
if not _IDENT_RE.match(ident):
raise ValueError("Invalid identifier '%s'" % ident)
class ItemImplementation(ItemMixin, EditableCore):
"""Implementation of Item resource"""
BASENAME = "item"
RDF_MAIN_TYPE = EXAMPLE.Item
@classmethod
def mint_uri(cls, target, new_graph, created, basename=None, suffix=""):
"""I overrides :meth:`rdfrest.core.local.ILocalCore.mint_uri`
to use cls.BASENAME instead of cls.__classname__.
"""
return super(ItemImplementation, cls) \
.mint_uri(target, new_graph, created, cls.BASENAME, suffix)
@classmethod
def check_new_graph(cls, service, uri, parameters, new_graph,
resource=None, added=None, removed=None):
"""I implement :meth:`rdfrest.cores.local.ILocalCore.check_new_graph`
"""
diag = super(ItemImplementation, cls).check_new_graph(
service, uri, parameters, new_graph, resource, added, removed)
if not (uri, RDF.type, cls.RDF_MAIN_TYPE) in new_graph:
diag.append("Expected rdf:type <%s>" % cls.RDF_MAIN_TYPE)
return diag
def ack_delete(self, parameters):
"""I implement :meth:`rdfrest.cores.local.EditableCore.ack_delete`.
"""
super(ItemImplementation, self).ack_delete(parameters)
parent = self.parent
if parent is not None:
with parent.edit(_trust=True) as graph:
graph.remove((parent.uri, EXAMPLE.contains, self.uri))
graph.remove((self.uri, RDF.type, self.RDF_MAIN_TYPE))
class GroupImplementation(GroupMixin, FolderishMixin, GraphPostableMixin,
ItemImplementation):
"""Implementation of Group resource"""
BASENAME = "group"
RDF_MAIN_TYPE = EXAMPLE.Group
def find_created(self, new_graph):
"""I implement :meth:`rdfrest.cores.local.GraphPostableMixin.find_created`.
"""
query = ("SELECT ?c WHERE { <%s> <%s> ?c }"
% (self.uri, EXAMPLE.contains))
return self._find_created_default(new_graph, query)
def check_posted_graph(self, parameters, created, new_graph):
"""I implement
:meth:`rdfrest.cores.local.GraphPostableMixin.check_posted_graph`.
"""
diag = super(GroupImplementation, self) \
.check_posted_graph(parameters, created, new_graph)
if isinstance(created, URIRef):
if not created.startswith(self.uri):
diag.append("The URI of the created item is not consistent "
"with the URI of this group <%s>" % created)
else:
ident = created[len(self.uri):]
if ident[-1] == "/":
ident = ident[:-1]
if not _IDENT_RE.match(ident):
diag.append("The identifier of the created item is "
"invalid: [%s]" % ident)
elif (self.uri, EXAMPLE.contains, created) in self.state:
diag.append("Item already exists <%s>" % created)
return diag
def ack_post(self, _parameters, created, new_graph):
"""I implement :meth:`rdfrest.cores.local.GraphPostableMixin.ack_post`.
"""
rdf_type = new_graph.value(created, RDF.type)
with self.edit(_trust=True) as graph:
graph.add((self.uri, EXAMPLE.contains, created))
graph.add((created, RDF.type, rdf_type))
def check_deletable(self, parameters):
"""I implement :meth:`rdfrest.cores.local.EditableCore.check_deletable`.
"""
diag = super(GroupImplementation, self).check_deletable(parameters)
if self.uri == self.service.root_uri:
diag.append("Can not delete root group")
if len(self) > 0:
diag.append("Can not delete non-empty group")
return diag
@classmethod
def create_service_root(cls, service):
"""Create a root-group in given service"""
root_uri = service.root_uri
graph = Graph(identifier=root_uri)
graph.add((root_uri, RDF.type, cls.RDF_MAIN_TYPE))
cls.create(service, root_uri, graph)
@register_serializer("text/html", "htm", 90, EXAMPLE.Item)
def serialize_item_in_html(resource, _parameters=None, _bindings=None,
_base_uri=None):
"""A dedicated HTML view of simple items"""
assert resource.RDF_MAIN_TYPE == EXAMPLE.Item
values = {
"identifier": resource.identifier.encode("utf-8"),
}
yield """<!DOCTYPE html>
<html>
<head><title>Item %(identifier)s</title></head>
<body>
<section><h1>Item %(identifier)s</h1>
<section><h2>Tags</h2>
<ul>""" % values
for i in resource.iter_tags():
yield """
<li>%s</li>""" % i.encode("utf-8")
yield """
</ul>
</section>
<section><h2>See also</h2>
<ul>"""
for i in resource.iter_see_also_uris():
yield """
<li><a href="%s">%s</a></li>""" \
% (str(i), str(i))
yield """
</section>
</section>
<footer><a href="%s.html">See default HTML view</a></footer>
</body>
</html>\n""" % str(resource.uri)
@register_serializer("text/tags", None, 20)
@register_serializer("text/plain", "tags", 15)
def serialize_tags(resource, _parameters=None, _bindings=None, _base_uri=None):
"""A dedicated format exporting only the list of tags"""
return ( i.encode("utf-8")+"\n" for i in resource.tags )
# MAIN FUNCTION AND TESTS
def main():
"""Runs an HTTP server serving items and groups.
If 'test' is passed as argument, first run :func:`do_tests` on both a
local service and through HTTP.
"""
test = len(argv) > 1 and argv[1] == "test"
#root_uri = URIRef("http://localhost:1234/foo/")
service_config = get_service_configuration()
service_config.set('server', 'port', '1234')
service_config.set('server', 'base-path', '/foo')
# TODO Store management : special tests ?
serv = make_example1_service(service_config)
root_uri = serv.root_uri
if test:
local_root = serv.get(root_uri, [EXAMPLE.Group])
assert isinstance(local_root, GroupImplementation)
do_tests(local_root)
print("Local tests passed")
thread, _httpd = make_example1_httpd(serv)
try:
if test:
remote_root = HttpClientCore.factory(root_uri, EXAMPLE.Group)
assert isinstance(remote_root, GroupMixin)
do_tests(remote_root)
print("Remote tests passed")
print("Now listening on", root_uri)
sleep(3600) # allows to catch KeyboardInterrupt (thread.join doesn't)
thread.join() # prevents 'finally' clause if sleep above expires
finally:
_httpd.shutdown()
print("httpd stopped")
def make_example1_service(service_config=None):
"""Make a service serving items and groups."""
return Service([ItemImplementation, GroupImplementation],
service_config,
GroupImplementation.create_service_root)
def do_tests(root):
"""Test items and groups implementations.
Populate root with items and sub-groups, trying to be exhaustive in the
tested functionalities.
Then cleans everything.
"""
check_content(root, [])
item1 = root.create_new_simple_item("item1")
check_content(root, [item1])
test_label_and_tags(item1)
item2 = root.create_new_simple_item("item2")
check_content(root, [item1, item2])
test_label_and_tags(item2)
group1 = root.create_new_group("group1")
check_content(root, [item1, item2, group1])
test_label_and_tags(group1)
check_content(group1, [])
item11 = group1.create_new_simple_item("item1")
check_content(root, [item1, item2, group1])
check_content(group1, [item11])
test_label_and_tags(item11)
assert item1.identifier == item11.identifier
assert group1.identifier == "group1"
assert item1 in root and item1 not in group1
assert item11 in group1 and item11 not in root
# test the _no_spawn argument in factory
item1bis = root.factory(URIRef("item1", root.uri), [EXAMPLE.Item])
assert item1bis is item1
item1bis = root.factory(URIRef("item1", root.uri), [EXAMPLE.Item], _no_spawn=True)
assert item1bis is item1
del item1, item1bis
# the following asserts rely on the garbage collector,
# so they fail in some situations and depend on the interpreter
#item1 = root.factory(URIRef("item1", root.uri), [EXAMPLE.Item], _no_spawn=True)
#assert item1 is None
# clean everything
root.remove_item("item1")
check_content(root, [item2, group1])
root.remove_item("item1") # removing item twice
check_content(root, [item2, group1])
root.remove_item("item3") # removing non-exitsing item
check_content(root, [item2, group1])
root.remove_item("item2")
check_content(root, [group1])
with assert_raises(CanNotProceedError):
root.remove_item("group1")
group1.remove_item("item1")
check_content(group1, [])
root.remove_item("group1")
check_content(root, [])
def make_example1_httpd(service=None, service_config=None):
"""Make a HTTPd running in a separate thread.
Return the thread and the HTTPd.
:param service: if provided
NB: the service is assumed to be located on localhost:1234
"""
BASE_PATH = '/foo'
if service is None:
if service_config is None:
service_config = get_service_configuration()
service_config.set('server', 'port', '1234')
service_config.set('server', 'base-path', BASE_PATH)
service = make_example1_service(service_config)
# cache_control="max-age=60")
app = SimpleRouter([(BASE_PATH, HttpFrontend(service, service_config))])
_httpd = make_server(service_config.get('server', 'host-name', raw=1),
service_config.getint('server', 'port'),
app)
thread = Thread(target=_httpd.serve_forever)
thread.start()
return thread, _httpd
def check_content(group, ref_items):
"""Checks the content of a group against a reference list"""
ref_items = set(ref_items)
assert group.items == ref_items
assert len(group) == len(ref_items)
assert group.simple_items == set (
i for i in ref_items if not isinstance(i, GroupMixin)
)
assert group.groups == set (
i for i in ref_items if isinstance(i, GroupMixin)
)
for i in group:
assert i in group
assert group.contains_item_with_id(i.identifier)
assert i.parent == group
assert group.get_item(i.identifier) == i
def test_label_and_tags(item):
"""Test label- and tag-related functionalities on item"""
# label
assert item.label is None
item.label = "hello world"
assert item.label == Literal("hello world")
item.label = "bonjour le monde"
assert item.label == Literal("bonjour le monde")
item.label = None
assert item.label is None
item.label = "Halo Welt"
assert item.label == Literal("Halo Welt")
del item.label
assert item.label is None
# adding tags
assert item.tags == set([])
item.add_tag("tag1")
assert item.tags == set([Literal("tag1")])
item.add_tag("tag1")
assert list(item.iter_tags()) == [Literal("tag1")] # tags do not duplicate
item.add_tag("tag2")
assert item.tags, set([Literal("tag1") == Literal("tag2")])
item.add_tag("tag3")
assert item.tags, set([Literal("tag1"), Literal("tag2") == Literal("tag3")])
# removing tags
item.rem_tag("tag2")
assert item.tags, set([Literal("tag1") == Literal("tag3")])
item.rem_tag("tag2") # removing tag twice has no effect
assert item.tags, set([Literal("tag1") == Literal("tag3")])
item.rem_tag("tag4") # removing inexisting tag has no effect
assert item.tags, set([Literal("tag1") == Literal("tag3")])
# unicode tags
item.add_tag("tagué")
assert item.tags, set([Literal("tag1"), Literal("tag3") == Literal("tagué")])
if __name__ == "__main__":
main()
| ktbs/ktbs | utest/example1.py | example1.py | py | 21,837 | python | en | code | 24 | github-code | 13 |
20680529038 | import argparse
import rlcard
from rlcard.agents import RandomAgent
from rlcard.utils import set_seed
def run(args):
# Make environment
env = rlcard.make(args.env, config={'seed': 42})
num_episodes = 2
# Seed numpy, torch, random
set_seed(42)
# Set agents
agent = RandomAgent(num_actions=env.num_actions)
env.set_agents([agent for _ in range(env.num_players)])
for episode in range(num_episodes):
# Generate data from the environment
trajectories, player_wins = env.run(is_training=False)
# Print out the trajectories
print('\nEpisode {}'.format(episode))
print(player_wins)
if __name__ == '__main__':
parser = argparse.ArgumentParser("Random Agent Uno Example")
parser.add_argument('--env', type=str, default='uno')
args = parser.parse_args()
run(args) | Derrc/UnoRL | UnoRL/uno_random.py | uno_random.py | py | 886 | python | en | code | 1 | github-code | 13 |
6948030724 | from typing import *
class Solution:
def sortByBits(self, arr: List[int]) -> List[int]:
dic1 = {}
for val in arr:
dic1[val] = self.num_one(val)
arr.sort(key=lambda x: (dic1[x], x))
return arr
def num_one(self, val):
num = 0
while val:
val = val & (val - 1)
num += 1
return num
if __name__ == '__main__':
sol = Solution()
print(3 & 1)
arr = [0, 1, 2, 3, 4, 5, 6, 7, 8]
print(sol.sortByBits(arr))
| Xiaoctw/LeetCode1_python | 位运算/根据数字二进制下1的数目排序_1356.py | 根据数字二进制下1的数目排序_1356.py | py | 516 | python | en | code | 0 | github-code | 13 |
2434116573 | #!/usr/bin/env python3
"""
---------------------------
Test :mod:`phile.tray.tmux`
---------------------------
"""
# Standard library.
import asyncio
import typing
import unittest
# Internal packages.
import phile.asyncio
import phile.tray
import phile.tray.tmux
from test_phile.test_tmux.test_control_mode import (
UsesClientWithFakeSubprocess,
)
class TestRun(
UsesClientWithFakeSubprocess,
unittest.IsolatedAsyncioTestCase,
):
def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None:
super().__init__(*args, **kwargs)
self.client_task: asyncio.Task[typing.Any]
self.control_mode: phile.tmux.control_mode.Client
self.run_task: asyncio.Task[typing.Any]
self.text_icons: phile.tray.TextIcons
self.tray_registry: phile.tray.Registry
async def asyncSetUp(self) -> None:
await super().asyncSetUp()
self.tray_registry = tray_registry = phile.tray.Registry()
self.text_icons = phile.tray.TextIcons(
tray_registry=tray_registry
)
await phile.asyncio.wait_for(self.async_set_up_reply())
async def async_set_up_reply(self) -> None:
self.control_mode = self.client
self.client_task = client_task = asyncio.create_task(
self.client.run()
)
self.addCleanup(client_task.cancel)
await self.server_sendall(b"\x1bP1000p%begin 0\r\n%end 0\r\n")
async def async_set_up_run(self) -> None:
self.run_task = run_task = asyncio.create_task(
phile.tray.tmux.run(
control_mode=self.control_mode,
text_icons=self.text_icons,
)
)
self.addAsyncCleanup(phile.asyncio.cancel_and_wait, run_task)
async def test_checks_for_existing_files(self) -> None:
self.tray_registry.add_entry(
phile.tray.Entry(name="year", text_icon="2345")
)
await self.async_set_up_run()
await phile.asyncio.wait_for(
self.check_status_right_set_to("2345")
)
async def test_checks_for_file_changes(self) -> None:
await self.async_set_up_run()
await phile.asyncio.wait_for(self.check_status_right_set_to(""))
self.tray_registry.add_entry(
phile.tray.Entry(name="year", text_icon="3456")
)
await self.check_status_right_set_to("3456")
async def test_stops_gracefully_if_text_icons_stops(self) -> None:
await self.async_set_up_run()
await phile.asyncio.wait_for(self.text_icons.aclose())
await phile.asyncio.wait_for(self.run_task)
| BoniLindsley/phile | tests/test_phile/test_tray/test_tmux.py | test_tmux.py | py | 2,607 | python | en | code | 0 | github-code | 13 |
4749899921 | from collections import namedtuple
import networkx as nx
Point = namedtuple('Loc', ['r', 'c', 'elevation', 'name'])
def elevation_from_letter(letter):
if letter == 'S':
letter = 'a'
elif letter == 'E':
letter = 'z'
if 'a' <= letter <= 'z':
return ord(letter) - ord('a')
else:
raise ValueError(f"Invalid elevation: {letter}")
def create_tuple_from_rcl(row, col, elevation):
if elevation == 'S':
name = 'S'
elif elevation == 'E':
name = 'E'
else:
name = f"({row}, {col})"
elevation = elevation_from_letter(elevation)
return Point(row, col, elevation, name)
# Read the input, which is a grid of of rows and columns of letters.
# The grid is a directed graph, where each node is a Point.
# The edges are the paths between Points.
# The start node is the Point with the name 'S'
# The end node is the Point with the name 'E'
# There is an edge between two Points if the elevation of the first Point
# is within 1 of the elevation of the second Point.
def create_graph(input):
graph = nx.DiGraph()
grid = []
start = None
end = None
for row, line in enumerate(input):
grid_line = []
grid.append(grid_line)
for col, elevation in enumerate(line):
point = create_tuple_from_rcl(row, col, elevation)
graph.add_node(point)
grid_line.append(point)
if point.name == 'S':
start = point
elif point.name == 'E':
end = point
for row, line in enumerate(grid):
for col, point in enumerate(line):
for r, c in [(row-1, col), (row, col-1), (row, col+1), (row+1, col)]:
if 0 <= r < len(input) and 0 <= c < len(line):
other = grid[r][c]
if (point.elevation + 1) - other.elevation >= 0:
#print("Adding edge", point.elevation, other.elevation)
graph.add_edge(point, other)
return graph, start, end
def read_input():
with open('input.txt') as f:
return [line.strip() for line in f]
def read_graph():
return create_graph(read_input())
def shortest_path_or_none(graph, start, end):
try:
return nx.shortest_path(graph, start, end)
except nx.NetworkXNoPath:
return None
def main():
print("Part 1")
graph, start, end = read_graph()
path = nx.shortest_path(graph, start, end)
# print(''.join(point.name for point in path))
print(f"Length: {len(path)-1}")
print("Part 2")
# get all the points with elevation 0
points = [point for point in graph.nodes if point.elevation == 0]
# find the shortest path from each of those points to the end
paths = [shortest_path_or_none(graph, point, end) for point in points]
# remove the paths that don't exist
paths = [path for path in paths if path is not None]
# list the starting point and the length of the path for each path
path_lengths = [(path[0], len(path)-1) for path in paths]
# sort the paths by length
path_lengths.sort(key=lambda x: x[1])
# print the starting points of all the paths
# print the length of shortest path
print(path_lengths[0][1])
if __name__ == '__main__':
main()
| willf/advent_of_code_2022 | 12/solve.py | solve.py | py | 3,283 | python | en | code | 0 | github-code | 13 |
37290568985 | # Sky Hoffert, Gabby Boehmer
# main.py
# Processes exoplanet data
import matplotlib.pyplot as plt
import sys
import numpy as np
db_path = 'data/kepler.csv'
def main():
filein = open(db_path, 'r')
# create an empty db
db = []
# parse input file
for line in filein.readlines():
finalword = ''
sections = line.split('"')
# parse by splitting " sections
for i, sect in enumerate(sections):
# if current section was in a "
if i%2 == 1:
newword = ''
# change , to ;
for i, c in enumerate(sect):
if sect[i] == ',':
newword += ';'
else:
newword += c
finalword += newword
else:
finalword += sect
# only add to db if the columns are correct
noendline = finalword[:-1]
tokens = noendline.split(',')
if len(tokens) == 98:
db.append(tokens)
# remove the heading line
header = db[0]
db = db[1:]
# DEBUG
db = db[:int(len(db)/1)]
# DEBUG
print(len(db))
x_idx = 11
y_idx = 2
x_data = []
y_data = []
# only add entries without null values
for i,row in enumerate(db):
if row[x_idx] != '' and row[y_idx] != '':
x_data.append(float(row[x_idx]))
y_data.append(float(row[y_idx]))
x_min = min(x_data)
x_max = max(x_data)
y_min = min(y_data)
y_max = max(y_data)
x_lim = (x_min*0.2, x_max*5)
y_lim = (y_min*0.2, y_max*5)
m_earth = 1.0/317.8
m_earth_data = np.empty(len(x_data))
m_earth_data.fill(m_earth)
op_earth = 365
op_earth_data = np.empty(len(y_data))
op_earth_data.fill(op_earth)
# plot with matplotlib
plt.plot(x_data, y_data, 'bo', x_data, m_earth_data, 'r-', op_earth_data, y_data, 'g-')
plt.xlabel(header[x_idx])
plt.ylabel(header[y_idx])
plt.xlim(x_lim)
plt.ylim(y_lim)
plt.xscale('log')
plt.yscale('log')
plt.text(5*10e+3, m_earth*1.1, 'mass of earth', rotation=0)
plt.text(op_earth*1.1, 10e-4, 'orbital period of earth', rotation=-90)
plt.show()
if __name__ == '__main__':
main()
sys.exit()
| skyhoffert/ds_exoplanets | sky.py | sky.py | py | 2,374 | python | en | code | 1 | github-code | 13 |
7528376792 | import time
import numpy as np
import pandas as pd
from bokeh import plotting as bop, io as boi
from bokeh import models as bom, events as boe, layouts as bol
from bokeh.palettes import Category10_10
from itertools import cycle
from . import stats, paths
from contextlib import contextmanager
from IPython.display import clear_output
bop.output_notebook(hide_banner=True)
def array(fig):
fig.canvas.draw_idle()
renderer = fig.canvas.get_renderer()
w, h = int(renderer.width), int(renderer.height)
return (np.frombuffer(renderer.buffer_rgba(), np.uint8)
.reshape((h, w, 4))
[:, :, :3]
.copy())
def timedelta_xaxis(f):
f.xaxis.ticker = bom.tickers.DatetimeTicker()
f.xaxis.formatter = bom.FuncTickFormatter(code="""
// TODO: Add support for millis
// Calculate the hours, mins and seconds
var s = Math.floor(tick / 1e3);
var m = Math.floor(s/60);
var s = s - 60*m;
var h = Math.floor(m/60);
var m = m - 60*h;
var h = h.toString();
var m = m.toString();
var s = s.toString();
var pm = m.padStart(2, "0");
var ps = s.padStart(2, "0");
// Figure out what the min resolution is going to be
var min_diff = Infinity;
for (var i = 0; i < ticks.length-1; i++) {
min_diff = Math.min(min_diff, ticks[i+1]-ticks[i]);
}
if (min_diff <= 60e3) {
var min_res = 2;
} else if (min_diff <= 3600e3) {
var min_res = 1;
} else {
var min_res = 0;
}
// Figure out what the max resolution is going to be
if (ticks.length > 1) {
var max_diff = ticks[ticks.length-1] - ticks[0];
} else {
var max_diff = Infinity;
}
if (max_diff >= 3600e3) {
var max_res = 0;
} else if (max_diff >= 60e3) {
var max_res = 1;
} else {
var max_res = 2;
}
// Format the timedelta. Finally.
if ((max_res == 0) && (min_res == 0)) {
return `${h}h`;
} else if ((max_res == 0) && (min_res == 1)) {
return `${h}h${pm}`;
} else if ((max_res == 0) && (min_res == 2)) {
return `${h}h${pm}m${ps}`;
} else if ((max_res == 1) && (min_res == 1)) {
return `${m}m`;
} else if ((max_res == 1) && (min_res == 2)) {
return `${m}m${ps}`;
} else if ((max_res == 2) && (min_res == 2)) {
return `${s}s`;
}
""")
def suffix_yaxis(f):
f.yaxis.formatter = bom.FuncTickFormatter(code="""
var min_diff = Infinity;
for (var i = 0; i < ticks.length-1; i++) {
min_diff = Math.min(min_diff, ticks[i+1]-ticks[i]);
}
var suffixes = [
'y', 'z', 'a', 'f', 'p', 'n', 'µ', 'm',
'',
'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'];
var precision = Math.floor(Math.log10(min_diff));
var scale = Math.floor(precision/3);
var index = scale + 8;
if (index < 0) {
//TODO: Fall back to numbro here
return tick;
} else if (index == 7) {
// Millis are weird. Feels better to rende them as decimals.
var decimals = -precision;
return `${tick.toFixed(decimals)}`
} else if (index < suffixes.length) {
var suffix = suffixes[index];
var scaled = tick/Math.pow(10, 3*scale);
return `${scaled.toFixed(0)}${suffix}`
} else {
//TODO: Fall back to numbro here
return tick;
}
""")
def x_zeroline(f):
f.add_layout(bom.Span(location=0, dimension='height'))
def default_tools(f):
f.toolbar_location = None
f.toolbar.active_drag = f.select_one(bom.BoxZoomTool)
# f.toolbar.active_scroll = f.select_one(bom.WheelZoomTool)
# f.toolbar.active_inspect = f.select_one(bom.HoverTool)
f.js_on_event(
boe.DoubleTap,
bom.callbacks.CustomJS(args=dict(p=f), code='p.reset.emit()'))
def styling(f):
timedelta_xaxis(f)
suffix_yaxis(f)
def _timeseries(source, x, y):
#TODO: Work out how to apply the axes formatters to the tooltips
f = bop.figure(x_range=bom.DataRange1d(start=0, follow='end'), tooltips=[('', '$data_y')])
f.line(x=x, y=y, source=source)
default_tools(f)
x_zeroline(f)
styling(f)
return f
def timeseries(s):
source = bom.ColumnDataSource(s.reset_index())
return _timeseries(source, s.index.name, s.name)
def _timedataframe(source, x, ys):
f = bop.figure(x_range=bom.DataRange1d(start=0, follow='end'), tooltips=[('', '$data_y')])
for y, color in zip(ys, cycle(Category10_10)):
f.line(x=x, y=y, legend_label=y, color=color, width=2, source=source)
default_tools(f)
x_zeroline(f)
styling(f)
f.legend.label_text_font_size = '8pt'
f.legend.margin = 7
f.legend.padding = 0
f.legend.spacing = 0
f.legend.background_fill_alpha = 0.3
f.legend.border_line_alpha = 0.
f.legend.location = 'top_left'
return f
def timedataframe(df):
source = bom.ColumnDataSource(df.reset_index())
return _timedataframe(source, df.index.name, df.columns)
def timegroups(df):
tags = df.columns.str.extract(r'^(?P<chart1>.*)/(?P<label>.*)|(?P<chart2>.*)$')
tags['chart'] = tags.chart1.combine_first(tags.chart2)
tags.index = df.columns
return tags[['chart', 'label']].fillna('')
class Stream:
def __init__(self, run_name=-1, prefix=''):
super().__init__()
self._reader = stats.Reader(run_name, prefix)
self._source = bom.ColumnDataSource({'time': np.array([0])})
self._handle = None
def _new_grid(self, children):
return bol.gridplot(children, ncols=4, plot_width=350, plot_height=300, merge_tools=False)
def _init(self, df):
self._source = bom.ColumnDataSource(df.reset_index())
children = []
for name, group in timegroups(df).groupby('chart'):
if group.label.eq('').all():
assert len(group) == 1
f = _timeseries(self._source, 'time', group.index[0])
f.title = bom.Title(text=name)
else:
f = _timedataframe(self._source, 'time', group.index)
f.title = bom.Title(text=name)
children.append(f)
self._grid = self._new_grid(children)
## TODO: Not wild about this
clear_output(wait=True)
self._handle = bop.show(self._grid, notebook_handle=True)
def update(self, rule='60s', df=None):
# Drop the last row as it'll be constantly refreshed as the period occurs
df = self._reader.resample(rule).iloc[:-1] if df is None else df
has_new_cols = not df.columns.isin(self._source.data).all()
if has_new_cols:
self._init(df)
else:
threshold = len(self._source.data['time'])
new = df.iloc[threshold:]
self._source.stream(new.reset_index())
boi.push_notebook(handle=self._handle)
def view(run_name=-1, prefix='', rule='60s'):
stream = Stream(run_name, prefix)
while True:
stream.update(rule=rule)
time.sleep(1)
def review(run_name=-1, prefix='', rule='60s'):
stream = Stream(run_name, prefix)
stream.update(rule=rule)
def test_stream():
times = pd.TimedeltaIndex([0, 60e3, 120e3])
dfs = [
pd.DataFrame([[0]], columns=['a'], index=times[:1]),
pd.DataFrame([[0, 1], [10, 20]], columns=['a', 'b/a'], index=times[:2]),
pd.DataFrame([[0, 1, 2], [10, 20, 30], [100, 200, 300]], columns=['a', 'b/a', 'b/b'], index=times[:3])]
stream = Stream()
for df in dfs:
stream.update(df)
time.sleep(1)
| andyljones/megastep | rebar/plots.py | plots.py | py | 7,923 | python | en | code | 117 | github-code | 13 |
24393423066 | import controller.verification as v
from model.sample import Sample
from model.feature import Feature
import unittest
class TestVerification(unittest.TestCase):
"""
A class for testing functions from verification module
Attributes (Object)
modelvalues_for_testing: example modelvalues for test functions
testvalues_for_testing: example testvalues for test functions
samples_for_testing: example samples for test functions
Methods
setUp(): sets up test values before tests
test_calculate_euklidean_distance(): tests the calculate_euklidean_distance() function
test_calculate_euklidean_distance_negative(): tests the calculate_euklidean_distance() function with negative values
test_create_modelvalues(): tests the create_modelvalues() function
test_build_vectors_as_list(): tests the build_vectors_as_list() function
test_build_vectors_as_list_no_match (): tests the build_vectors_as_list() function without a match
test_create_testvalues_by_nearest_neighbor(): tests the create_testvalues_by_nearest_neighbor() function
test_create_testvalues_by_nearest_neighbor_no_match(): tests the create_testvalues_by_nearest_neighbor() function without a match
test_verify_samples_multiple_testsamples_not_encrypted(): tests the verify_samples() function with multiple testsamples and not encrypted
test_verify_samples_one_testsample_not_encrypted(): tests the verify_samples() function with one testsample and not encrypted
test_verify_samples_not_encrypted_no_match(): tests the verify_samples() function not encrypted and with no comparable data
test_verify_samples_multiple_testsamples_encrypted(): tests the verify_samples() function with multiple testsamples and encrypted
test_verify_per_threshold_one_testsample_encrypted(): tests the verify_per_threshold() function with one testsample and encrypted
test_verify_samples_no_learnsamples(): tests the verify_samples() function with empty learnsamples list
test_verify_samples_no_testsamples(): tests the verify_samples() function with empty testsamples list
test_get_results_per_threshold(): tests the get_results_per_threshold() function
test_get_results_per_threshold_no_compared_values(): tests the get_results_per_threshold() function with compared_values = 0
"""
def setUp(self):
"""
sets up test values before tests
"""
self.modelvalues_for_testing = (
{(Feature.M, "x") : 200,
(Feature.DD, "xy") : 250,
(Feature.UD, "xy") : 100,
(Feature.M, "y") : 150},
{(Feature.UU, "ab") : 200})
self.testvalues_for_testing = (
{(Feature.M, "x") : [180, 210],
(Feature.DD, "xy") : [220],
(Feature.UD, "xy") : [80, 300, 220, 250],
(Feature.DU, "ab") : [100, 150, 250]},
{(Feature.M, "u") : [280],
(Feature.DD, "uv") : [400, 450, 320, 120],
(Feature.UD, "uv") : [110, 320, 280],
(Feature.DU, "uv") : [100, 150],
(Feature.M, "v") : [180, 220]},
{(Feature.M, "a") : [100, 150, 150],
(Feature.DD, "ab") : [220, 200],
(Feature.UD, "ab") : [100, 150],
(Feature.M, "b") : [80]},
{(Feature.M, "b") : [120, 300],
(Feature.M, "c") : [180],
(Feature.DD, "ab") : [180, 200],
(Feature.DD, "xy") : [300, 320]},
{(Feature.DD, "xy") : [100, 200],
(Feature.M, "b") : [200],
(Feature.M, "a") : [250, 250],
(Feature.UD, "xy") : [300, 280, 280, 280]},
{(Feature.DD, "ab") : [160],
(Feature.M, "y") : [200]})
self.samples_for_testing = (
Sample("content", 123456, "username1", self.testvalues_for_testing[1]),
Sample("content", 123456, "username2", self.testvalues_for_testing[2]),
Sample("content", 123456, "username3", self.testvalues_for_testing[3]),
Sample("content", 123456, "username4", self.testvalues_for_testing[4]),
Sample("content", 123456, "username5", self.testvalues_for_testing[5]))
def test_calculate_euklidean_distance(self):
"""
tests the calculate_euklidean_distance() function from verification module
"""
vectors = [(1, 1), (2, 3), (5, 3)]
# (((1 - 1)^2 + (2 - 3)^2 + (5 - 3)^2) ^ 0.5) / (3 ^ 0.5) = 2.23606797749979 / 1.732050807568877 = 1.290994448735806
ref = round (1.290994448735806, 4)
self.assertEqual(v.calculate_euklidean_distance (vectors), ref)
def test_calculate_euklidean_distance_negative(self):
"""
tests the calculate_euklidean_distance() function from verification module with negative values
"""
vectors = [(1, 1), (-2, 3), (5, -3)]
# (((1 - 1)^2 + (-2 - 3)^2 + (5 + 3)^2) ^ 0.5) / (3 ^ 0.5) = 9.433981132056604 / 1.732050807568877 = 5.446711546122732
ref = round (5.446711546122732, 4)
self.assertEqual(v.calculate_euklidean_distance (vectors), ref)
def test_create_modelvalues(self):
"""
tests the create_modelvalues() function from verification module
"""
learnsamples = [self.samples_for_testing[1], self.samples_for_testing[2], self.samples_for_testing[3]]
ref = {
# (100 + 150 + 150 + 250 + 250) / 5 = 180
(Feature.M, "a") : 180,
# (220 + 200 + 180 + 200) / 4 = 200
(Feature.DD, "ab") : 200,
# (100 + 150) / 2 = 125
(Feature.UD, "ab") : 125,
# (80 + 120 + 300 + 200) / 4 = 175
(Feature.M, "b") : 175,
# 180 / 1 = 180
(Feature.M, "c") : 180,
# (300 + 320 + 100 + 200) / 4 = 230
(Feature.DD, "xy") : 230,
# (300 + 280 + 280 + 280) / 4 = 285
(Feature.UD, "xy") : 285
}
self.assertEqual(v.create_modelvalues(learnsamples).items(), ref.items())
def test_build_vectors_as_list(self):
"""
tests the build_vectors_as_list() function from verification module
"""
modelvalues = self.modelvalues_for_testing[0]
testvalues = self.testvalues_for_testing[0]
ref = [
(200, 180),
(200, 210),
(250, 220),
(100, 80),
(100, 300),
(100, 220),
(100, 250)
]
self.assertEqual(v.build_vectors_as_list(modelvalues, testvalues), ref)
def test_build_vectors_as_list_no_match (self):
"""
tests the build_vectors_as_list() function from verification module without a match
"""
modelvalues = self.modelvalues_for_testing[0]
testvalues = self.testvalues_for_testing[1]
self.assertEqual(v.build_vectors_as_list(modelvalues, testvalues), [])
def test_create_testvalues_by_nearest_neighbor(self):
"""
tests the create_testvalues_by_nearest_neighbor() function from verification module
"""
model = self.modelvalues_for_testing[0]
testsample = self.samples_for_testing[0]
ref = {(Feature.M, "x") : [280, 180, 220], (Feature.DD, "xy") : [400, 450, 320, 120], (Feature.UD, "xy") : [110, 320, 280]}
self.assertEqual(v.create_testvalues_by_nearest_neighbor(model, testsample), ref)
def test_create_testvalues_by_nearest_neighbor_no_match(self):
"""
tests the create_testvalues_by_nearest_neighbor() function from verification module without a match
"""
model = self.modelvalues_for_testing[1]
testsample = self.samples_for_testing[0]
self.assertEqual(v.create_testvalues_by_nearest_neighbor(model, testsample), {})
def test_verify_samples_multiple_testsamples_not_encrypted(self):
"""
tests the verify_samples() function from verification module with multiple testsamples and not encrypted
"""
learnsamples = [self.samples_for_testing[2]]
testsamples = [self.samples_for_testing[1], self.samples_for_testing[3]]
# model: {(Feature.M, "b") : 210, (Feature.M, "c") : 180, (Feature.DD, "ab") : 190, (Feature.DD, "xy") : 310}
# vector testsample 1: [(190, 220), (190, 200), (210, 80)]
# vector testsample 2: [(310, 100), (310, 200), (210, 200)]
# normalized euklidean distance
# testsample 1: (((190 - 220)^2 + (190 - 200)^2 + (210 - 80)^2) ^ 0.5) / (3 ^ 0.5) = 133.7908816025965 / 1.732050807568877 = 77.24420150837645
# testsampe 2: (((310 - 100)^2 + (310 - 200)^2 + (210 - 200)^2 ^ 0.5) / (3 ^ 0.5) = 237.2762103541 / 1.732050807568877 = 136.9914839257
ref = (6, 137, {self.samples_for_testing[1].get_short_identifier() : 77.2442, self.samples_for_testing[3].get_short_identifier() : 136.9915})
self.assertEqual(v.verify_samples(learnsamples, testsamples, False), ref)
def test_verify_samples_one_testsample_not_encrypted(self):
"""
tests the verify_samples() function from verification module with one testsample and not encrypted
"""
learnsamples = [self.samples_for_testing[3]]
testsamples = [self.samples_for_testing[1]]
# model: {(Feature.DD, "xy") : 150, (Feature.M, "b") : 200, (Feature.M, "a") : 250, (Feature.UD, "xy") : 285}
# vector: [(200, 80), (250, 100), (250, 150), (250, 150)]
# normalized euklidean distance: (((200 - 80)^2 + (250 - 100)^2 + (250 - 150)^2 + (250 - 150)^2) ^ 0.5) / (4 ^ 0.5) = 238.5372088375313 / 2 = 119.2686044187656
ref = (4, 120, {self.samples_for_testing[1].get_short_identifier() : 119.2686})
self.assertEqual(v.verify_samples(learnsamples, testsamples, False), ref)
def test_verify_samples_not_encrypted_no_match(self):
"""
tests the verify_samples() function from verification module not encrypted and with no comparable data
"""
learnsamples = [self.samples_for_testing[0]]
testsamples = [self.samples_for_testing[1], self.samples_for_testing[2]]
self.assertEqual(v.verify_samples(learnsamples, testsamples, False), (0, 0, {}))
def test_verify_samples_multiple_testsamples_encrypted(self):
"""
tests the verify_samples() function from verification module with multiple testsamples and encrypted
"""
learnsamples = [self.samples_for_testing[3]]
testsamples = [self.samples_for_testing[1], self.samples_for_testing[4]]
# model: {(Feature.DD, "xy") : 150, (Feature.M, "b") : 200, (Feature.M, "a") : 250, (Feature.UD, "xy") : 285}
# vector testsample 1 encrypted: [(200, 100), (200, 150), (200, 150), (150, 220), (150, 200), (285, 100), (285, 150), (200, 80)]
# vector testsample 2 encrypted: [(150, 160), (200, 200)]
# normalized euklidean distance
# testsample 1: (((200 - 100)^2 + (200 - 150)^2 + (200 - 150)^2 + (150 - 220)^2 + (150 - 200)^2 + (285 - 100)^2 + (285 - 150)^2 + (200 - 80)^2) ^ 0.5) / (8 ^ 0.5) = 298.7473849258 / 2.82842712474619 = 105.6231508731
# testsample 2: (((150 - 160)^2 + (200 - 200)^2) ^ 0.5) / (2 ^ 0.5) = 7.0710678119
ref = (10, 106, {self.samples_for_testing[1].get_short_identifier() : 105.6232, self.samples_for_testing[4].get_short_identifier() : 7.0711})
self.assertEqual(v.verify_samples(learnsamples, testsamples, True), ref)
def test_verify_per_threshold_one_testsample_encrypted(self):
"""
tests the verify_per_threshold() function from verification module with one testsample and encrypted
"""
learnsamples = [self.samples_for_testing[3]]
testsamples = [self.samples_for_testing[1]]
# model: {(Feature.DD, "xy") : 150, (Feature.M, "b") : 200, (Feature.M, "a") : 250, (Feature.UD, "xy") : 285}
# vector encrypted: [(200, 100), (200, 150), (200, 150), (150, 220), (150, 200), (285, 100), (285, 150), (200, 80)]
# normalized euklidean distance: (((200 - 100)^2 + (200 - 150)^2 + (200 - 150)^2 + (150 - 220)^2 + (150 - 200)^2 + (285 - 100)^2 + (285 - 150)^2 + (200 - 80)^2) ^ 0.5) / (8 ^ 0.5) = 298.7473849258 / 2.82842712474619 = 105.6231508731
ref = (8, 106, {self.samples_for_testing[1].get_short_identifier() : 105.6232})
self.assertEqual(v.verify_samples(learnsamples, testsamples, True), ref)
def test_verify_samples_no_learnsamples(self):
"""
tests the verify_samples() function from verification module with empty learnsamples list
"""
testsamples = [self.samples_for_testing[1], self.samples_for_testing[3]]
self.assertEqual(v.verify_samples([], testsamples, False), (0, 0, {}))
def test_verify_samples_no_testsamples(self):
"""
tests the verify_samples() function from verification module with empty testsamples list
"""
learnsamples = [self.samples_for_testing[1], self.samples_for_testing[3]]
self.assertEqual(v.verify_samples(learnsamples, [], False), (0, 0, {}))
def test_get_results_per_threshold(self):
"""
tests the get_results_per_threshold() function from verification module
"""
#res = (results_as_text, y_acceptance, y_rejection)
distance_per_sample_for_testing = {self.samples_for_testing[0].get_short_identifier() : 100}
max_distance = 200
compared_values = 20
# thresholds: 0, 20, 40, 60, 80, 100, 120, 140, 160, 180, 200
results_as_text = f"Threshold span:\n0 ms (0) - 200 ms (1)\n\nAcceptance:\n\n0.0 (0000 ms) - 0.0 %\n0.1 (0020 ms) - 0.0 %\n0.2 (0040 ms) - 0.0 %\n0.3 (0060 ms) - 0.0 %\n0.4 (0080 ms) - 0.0 %\n0.5 (0100 ms) - 100.0 %\n0.6 (0120 ms) - 100.0 %\n0.7 (0140 ms) - 100.0 %\n0.8 (0160 ms) - 100.0 %\n0.9 (0180 ms) - 100.0 %\n1.0 (0200 ms) - 100.0 %\n\nRejection:\n\n0.0 (0000 ms) - 100.0 %\n0.1 (0020 ms) - 100.0 %\n0.2 (0040 ms) - 100.0 %\n0.3 (0060 ms) - 100.0 %\n0.4 (0080 ms) - 100.0 %\n0.5 (0100 ms) - 0.0 %\n0.6 (0120 ms) - 0.0 %\n0.7 (0140 ms) - 0.0 %\n0.8 (0160 ms) - 0.0 %\n0.9 (0180 ms) - 0.0 %\n1.0 (0200 ms) - 0.0 %\n\nNormalized euklidean distance:\n\n1. Testsample\n\"{self.samples_for_testing[0].get_short_identifier()}\"\nDistance: 100.0000 ms\n\nCompared values in total: {compared_values}"
y_acceptance = [0.0, 0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0]
y_rejection = [100.0, 100.0, 100.0, 100.0, 100.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
res = (results_as_text, y_acceptance, y_rejection)
self.assertEqual(v.get_results_per_threshold(distance_per_sample_for_testing, compared_values, max_distance), res)
def test_get_results_per_threshold_no_compared_values(self):
"""
tests the get_results_per_threshold() function from verification module with compared_values = 0
"""
self.assertEqual(v.get_results_per_threshold({}, 0, 0), (None, None, None))
if __name__ == '__main__':
unittest.main()
| ameliebrucker/KeystrokeBiometrics | keystroke_biometrics/tests/verification_test.py | verification_test.py | py | 15,041 | python | en | code | 0 | github-code | 13 |
18761289362 | # This code checks for internet connection and informs whenever the internet is connected or disconnected
import socket
import urllib.request
import time
from datetime import datetime
import tkinter
from tkinter import messagebox
root = tkinter.Tk()
root.withdraw()
time_now = datetime.now()
read_time = time_now.strftime("%m/%d/%Y, %H:%M:%S")
global current, previous
current = None
previous = None
# This function checks for internet connection
def try_connection():
time_now = datetime.now()
read_time = time_now.strftime('%m/%d/%Y, %H:%M:%S')
try:
response=urllib.request.urlopen('http://google.com/',timeout=1)
return True
except urllib.request.URLError:
return False
except socket.timeout:
return False
return False
while True:
time_now = datetime.now()
read_time = time_now.strftime('%m/%d/%Y, %H:%M:%S')
file = open('C:\\Enter\file\Location\Log_file.txt', 'a')
current = try_connection()
#Inform user whenever the internet is either connected or disconnected
if current != previous:
if current == True:
print('Connected to internet at: ' + read_time + 'hrs')
file.write('Connected to internet at: ' + read_time + '\n')
messagebox.showinfo('Update','Connected to internet at: ' + read_time + ' hrs' )
else:
print('Not Connected to internet at: ' + read_time + ' hrs')
file.write('Not Connected to internet at: ' + read_time + '\n')
messagebox.showinfo('Update','Not Connected to internet at: ' + read_time + ' hrs')
else:
pass
previous = current
time.sleep(5)
| jeff9901/Network_and_Web_Analysis | Connectivity_check.py | Connectivity_check.py | py | 1,711 | python | en | code | 0 | github-code | 13 |
16808483964 | import pytest
from hypothesis import given, settings, strategies as st
from hypothesis.errors import InvalidArgument
from hypothesis.extra.array_api import COMPLEX_NAMES, REAL_NAMES
from hypothesis.internal.floats import width_smallest_normals
from tests.array_api.common import (
MIN_VER_FOR_COMPLEX,
dtype_name_params,
flushes_to_zero,
)
from tests.common.debug import assert_all_examples, find_any, minimal
from tests.common.utils import flaky
def skip_on_missing_unique_values(xp):
if not hasattr(xp, "unique_values"):
pytest.mark.skip("xp.unique_values() is not required to exist")
def xfail_on_indistinct_nans(xp):
"""
xp.unique_value() should return distinct NaNs - if not, tests that (rightly)
assume such behaviour will likely fail. For example, NumPy 1.22 treats NaNs
as indistinct, so tests that use this function will be marked as xfail.
See https://mail.python.org/pipermail/numpy-discussion/2021-August/081995.html
"""
skip_on_missing_unique_values(xp)
two_nans = xp.asarray([float("nan"), float("nan")])
if xp.unique_values(two_nans).size != 2:
pytest.xfail("NaNs not distinct")
@pytest.mark.parametrize("dtype_name", dtype_name_params)
def test_draw_arrays_from_dtype(xp, xps, dtype_name):
"""Draw arrays from dtypes."""
dtype = getattr(xp, dtype_name)
assert_all_examples(xps.arrays(dtype, ()), lambda x: x.dtype == dtype)
@pytest.mark.parametrize("dtype_name", dtype_name_params)
def test_draw_arrays_from_scalar_names(xp, xps, dtype_name):
"""Draw arrays from dtype names."""
dtype = getattr(xp, dtype_name)
assert_all_examples(xps.arrays(dtype_name, ()), lambda x: x.dtype == dtype)
@given(data=st.data())
def test_draw_arrays_from_shapes(xp, xps, data):
"""Draw arrays from shapes."""
shape = data.draw(xps.array_shapes())
x = data.draw(xps.arrays(xp.int8, shape))
assert x.ndim == len(shape)
assert x.shape == shape
@given(data=st.data())
def test_draw_arrays_from_int_shapes(xp, xps, data):
"""Draw arrays from integers as shapes."""
size = data.draw(st.integers(0, 10))
x = data.draw(xps.arrays(xp.int8, size))
assert x.shape == (size,)
@pytest.mark.parametrize(
"strat_name",
[
"scalar_dtypes",
"boolean_dtypes",
"integer_dtypes",
"unsigned_integer_dtypes",
"floating_dtypes",
"real_dtypes",
pytest.param(
"complex_dtypes", marks=pytest.mark.xp_min_version(MIN_VER_FOR_COMPLEX)
),
],
)
def test_draw_arrays_from_dtype_strategies(xp, xps, strat_name):
"""Draw arrays from dtype strategies."""
strat_func = getattr(xps, strat_name)
strat = strat_func()
find_any(xps.arrays(strat, ()))
@settings(deadline=None)
@given(data=st.data())
def test_draw_arrays_from_dtype_name_strategies(xp, xps, data):
"""Draw arrays from dtype name strategies."""
all_names = ("bool", *REAL_NAMES)
if xps.api_version > "2021.12":
all_names += COMPLEX_NAMES
sample_names = data.draw(
st.lists(st.sampled_from(all_names), min_size=1, unique=True)
)
find_any(xps.arrays(st.sampled_from(sample_names), ()))
def test_generate_arrays_from_shapes_strategy(xp, xps):
"""Generate arrays from shapes strategy."""
find_any(xps.arrays(xp.int8, xps.array_shapes()))
def test_generate_arrays_from_integers_strategy_as_shape(xp, xps):
"""Generate arrays from integers strategy as shapes strategy."""
find_any(xps.arrays(xp.int8, st.integers(0, 100)))
def test_generate_arrays_from_zero_dimensions(xp, xps):
"""Generate arrays from empty shape."""
assert_all_examples(xps.arrays(xp.int8, ()), lambda x: x.shape == ())
@given(data=st.data())
def test_generate_arrays_from_zero_sided_shapes(xp, xps, data):
"""Generate arrays from shapes with at least one 0-sized dimension."""
shape = data.draw(xps.array_shapes(min_side=0).filter(lambda s: 0 in s))
assert_all_examples(xps.arrays(xp.int8, shape), lambda x: x.shape == shape)
def test_generate_arrays_from_unsigned_ints(xp, xps):
"""Generate arrays from unsigned integer dtype."""
assert_all_examples(xps.arrays(xp.uint32, (5, 5)), lambda x: xp.all(x >= 0))
# Ensure we're not just picking non-negative signed integers
signed_max = xp.iinfo(xp.int32).max
find_any(xps.arrays(xp.uint32, (5, 5)), lambda x: xp.any(x > signed_max))
def test_generate_arrays_from_0d_arrays(xp, xps):
"""Generate arrays from 0d array elements."""
assert_all_examples(
xps.arrays(
dtype=xp.uint8,
shape=(5, 5),
elements=xps.from_dtype(xp.uint8).map(
lambda e: xp.asarray(e, dtype=xp.uint8)
),
),
lambda x: x.shape == (5, 5),
)
def test_minimize_arrays_with_default_dtype_shape_strategies(xp, xps):
"""Strategy with default scalar_dtypes and array_shapes strategies minimize
to a boolean 1-dimensional array of size 1."""
smallest = minimal(xps.arrays(xps.scalar_dtypes(), xps.array_shapes()))
assert smallest.shape == (1,)
assert smallest.dtype == xp.bool
assert not xp.any(smallest)
def test_minimize_arrays_with_0d_shape_strategy(xp, xps):
"""Strategy with shape strategy that can generate empty tuples minimizes to
0d arrays."""
smallest = minimal(xps.arrays(xp.int8, xps.array_shapes(min_dims=0)))
assert smallest.shape == ()
@pytest.mark.parametrize("dtype", dtype_name_params[1:])
def test_minimizes_numeric_arrays(xp, xps, dtype):
"""Strategies with numeric dtypes minimize to zero-filled arrays."""
smallest = minimal(xps.arrays(dtype, (2, 2)))
assert xp.all(smallest == 0)
def test_minimize_large_uint_arrays(xp, xps):
"""Strategy with uint dtype and largely sized shape minimizes to a good
example."""
if not hasattr(xp, "nonzero"):
pytest.skip("optional API")
smallest = minimal(
xps.arrays(xp.uint8, 100),
lambda x: xp.any(x) and not xp.all(x),
timeout_after=60,
)
assert xp.all(xp.logical_or(smallest == 0, smallest == 1))
idx = xp.nonzero(smallest)[0]
assert idx.size in (1, smallest.size - 1)
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
@flaky(max_runs=50, min_passes=1)
def test_minimize_float_arrays(xp, xps):
"""Strategy with float dtype minimizes to a good example.
We filter runtime warnings and expect flaky array generation for
specifically NumPy - this behaviour may not be required when testing
with other array libraries.
"""
smallest = minimal(xps.arrays(xp.float32, 50), lambda x: xp.sum(x) >= 1.0)
assert xp.sum(smallest) in (1, 50)
def test_minimizes_to_fill(xp, xps):
"""Strategy with single fill value minimizes to arrays only containing said
fill value."""
smallest = minimal(xps.arrays(xp.float32, 10, fill=st.just(3.0)))
assert xp.all(smallest == 3.0)
def test_generate_unique_arrays(xp, xps):
"""Generates unique arrays."""
skip_on_missing_unique_values(xp)
assert_all_examples(
xps.arrays(xp.int8, st.integers(0, 20), unique=True),
lambda x: xp.unique_values(x).size == x.size,
)
def test_cannot_draw_unique_arrays_with_too_small_elements(xp, xps):
"""Unique strategy with elements strategy range smaller than its size raises
helpful error."""
with pytest.raises(InvalidArgument):
xps.arrays(xp.int8, 10, elements=st.integers(0, 5), unique=True).example()
def test_cannot_fill_arrays_with_non_castable_value(xp, xps):
"""Strategy with fill not castable to dtype raises helpful error."""
with pytest.raises(InvalidArgument):
xps.arrays(xp.int8, 10, fill=st.just("not a castable value")).example()
def test_generate_unique_arrays_with_high_collision_elements(xp, xps):
"""Generates unique arrays with just elements of 0.0 and NaN fill."""
@given(
xps.arrays(
dtype=xp.float32,
shape=st.integers(0, 20),
elements=st.just(0.0),
fill=st.just(xp.nan),
unique=True,
)
)
def test(x):
zero_mask = x == 0.0
assert xp.sum(xp.astype(zero_mask, xp.uint8)) <= 1
test()
def test_generate_unique_arrays_using_all_elements(xp, xps):
"""Unique strategy with elements strategy range equal to its size will only
generate arrays with one of each possible element."""
skip_on_missing_unique_values(xp)
assert_all_examples(
xps.arrays(xp.int8, (4,), elements=st.integers(0, 3), unique=True),
lambda x: xp.unique_values(x).size == x.size,
)
def test_may_fill_unique_arrays_with_nan(xp, xps):
"""Unique strategy with NaN fill can generate arrays holding NaNs."""
find_any(
xps.arrays(
dtype=xp.float32,
shape=10,
elements={"allow_nan": False},
unique=True,
fill=st.just(xp.nan),
),
lambda x: xp.any(xp.isnan(x)),
)
def test_may_not_fill_unique_array_with_non_nan(xp, xps):
"""Unique strategy with just fill elements of 0.0 raises helpful error."""
strat = xps.arrays(
dtype=xp.float32,
shape=10,
elements={"allow_nan": False},
unique=True,
fill=st.just(0.0),
)
with pytest.raises(InvalidArgument):
strat.example()
@pytest.mark.parametrize(
"kwargs",
[
{"elements": st.just(300)},
{"elements": st.nothing(), "fill": st.just(300)},
],
)
def test_may_not_use_overflowing_integers(xp, xps, kwargs):
"""Strategy with elements strategy range outside the dtype's bounds raises
helpful error."""
with pytest.raises(InvalidArgument):
xps.arrays(dtype=xp.int8, shape=1, **kwargs).example()
@pytest.mark.parametrize("fill", [False, True])
@pytest.mark.parametrize(
"dtype, strat",
[
("float32", st.floats(min_value=10**40, allow_infinity=False)),
("float64", st.floats(min_value=10**40, allow_infinity=False)),
pytest.param(
"complex64",
st.complex_numbers(min_magnitude=10**300, allow_infinity=False),
marks=pytest.mark.xp_min_version(MIN_VER_FOR_COMPLEX),
),
],
)
def test_may_not_use_unrepresentable_elements(xp, xps, fill, dtype, strat):
"""Strategy with elements not representable by the dtype raises helpful error."""
if fill:
kw = {"elements": st.nothing(), "fill": strat}
else:
kw = {"elements": strat}
with pytest.raises(InvalidArgument):
xps.arrays(dtype=dtype, shape=1, **kw).example()
def test_floats_can_be_constrained(xp, xps):
"""Strategy with float dtype and specified elements strategy range
(inclusive) generates arrays with elements inside said range."""
assert_all_examples(
xps.arrays(
dtype=xp.float32, shape=10, elements={"min_value": 0, "max_value": 1}
),
lambda x: xp.all(x >= 0) and xp.all(x <= 1),
)
def test_floats_can_be_constrained_excluding_endpoints(xp, xps):
"""Strategy with float dtype and specified elements strategy range
(exclusive) generates arrays with elements inside said range."""
assert_all_examples(
xps.arrays(
dtype=xp.float32,
shape=10,
elements={
"min_value": 0,
"max_value": 1,
"exclude_min": True,
"exclude_max": True,
},
),
lambda x: xp.all(x > 0) and xp.all(x < 1),
)
def test_is_still_unique_with_nan_fill(xp, xps):
"""Unique strategy with NaN fill generates unique arrays."""
skip_on_missing_unique_values(xp)
xfail_on_indistinct_nans(xp)
assert_all_examples(
xps.arrays(
dtype=xp.float32,
elements={"allow_nan": False},
shape=10,
unique=True,
fill=st.just(xp.nan),
),
lambda x: xp.unique_values(x).size == x.size,
)
def test_unique_array_with_fill_can_use_all_elements(xp, xps):
"""Unique strategy with elements range equivalent to its size and NaN fill
can generate arrays with all possible values."""
skip_on_missing_unique_values(xp)
xfail_on_indistinct_nans(xp)
find_any(
xps.arrays(
dtype=xp.float32,
shape=10,
unique=True,
elements=st.integers(1, 9),
fill=st.just(xp.nan),
),
lambda x: xp.unique_values(x).size == x.size,
)
def test_generate_unique_arrays_without_fill(xp, xps):
"""Generate arrays from unique strategy with no fill.
Covers the collision-related branches for fully dense unique arrays.
Choosing 25 of 256 possible values means we're almost certain to see
collisions thanks to the birthday paradox, but finding unique values should
still be easy.
"""
skip_on_missing_unique_values(xp)
assert_all_examples(
xps.arrays(dtype=xp.uint8, shape=25, unique=True, fill=st.nothing()),
lambda x: xp.unique_values(x).size == x.size,
)
def test_efficiently_generate_unique_arrays_using_all_elements(xp, xps):
"""Unique strategy with elements strategy range equivalent to its size
generates arrays with all possible values. Generation is not too slow.
Avoids the birthday paradox with UniqueSampledListStrategy.
"""
skip_on_missing_unique_values(xp)
assert_all_examples(
xps.arrays(dtype=xp.int8, shape=255, unique=True),
lambda x: xp.unique_values(x).size == x.size,
)
@given(st.data(), st.integers(-100, 100), st.integers(1, 100))
def test_array_element_rewriting(xp, xps, data, start, size):
"""Unique strategy generates arrays with expected elements."""
x = data.draw(
xps.arrays(
dtype=xp.int64,
shape=size,
elements=st.integers(start, start + size - 1),
unique=True,
)
)
x_set_expect = xp.arange(start, start + size, dtype=xp.int64)
x_set = xp.sort(xp.unique_values(x))
assert xp.all(x_set == x_set_expect)
def test_generate_0d_arrays_with_no_fill(xp, xps):
"""Generate arrays with zero-dimensions and no fill."""
assert_all_examples(
xps.arrays(xp.bool, (), fill=st.nothing()),
lambda x: x.dtype == xp.bool and x.shape == (),
)
@pytest.mark.parametrize("dtype", ["float32", "float64"])
@pytest.mark.parametrize("low", [-2.0, -1.0, 0.0, 1.0])
@given(st.data())
def test_excluded_min_in_float_arrays(xp, xps, dtype, low, data):
"""Strategy with elements strategy excluding min does not generate arrays
with elements less or equal to said min."""
strat = xps.arrays(
dtype=dtype,
shape=(),
elements={
"min_value": low,
"max_value": low + 1,
"exclude_min": True,
},
)
x = data.draw(strat, label="array")
assert xp.all(x > low)
@st.composite
def distinct_int64_integers(draw):
used = draw(st.shared(st.builds(set), key="distinct_int64_integers.used"))
i = draw(st.integers(-(2**63), 2**63 - 1).filter(lambda x: x not in used))
used.add(i)
return i
def test_does_not_reuse_distinct_integers(xp, xps):
"""Strategy with distinct integer elements strategy generates arrays with
distinct values."""
skip_on_missing_unique_values(xp)
assert_all_examples(
xps.arrays(xp.int64, 10, elements=distinct_int64_integers()),
lambda x: xp.unique_values(x).size == x.size,
)
def test_may_reuse_distinct_integers_if_asked(xp, xps):
"""Strategy with shared elements and fill strategies of distinct integers
may generate arrays with non-distinct values."""
skip_on_missing_unique_values(xp)
find_any(
xps.arrays(
xp.int64,
10,
elements=distinct_int64_integers(),
fill=distinct_int64_integers(),
),
lambda x: xp.unique_values(x).size < x.size,
)
def test_subnormal_elements_validation(xp, xps):
"""Strategy with subnormal elements strategy is correctly validated.
For FTZ builds of array modules, a helpful error should raise. Conversely,
for builds of array modules which support subnormals, the strategy should
generate arrays without raising.
"""
elements = {
"min_value": 0.0,
"max_value": width_smallest_normals[32],
"exclude_min": True,
"exclude_max": True,
"allow_subnormal": True,
}
strat = xps.arrays(xp.float32, 10, elements=elements)
if flushes_to_zero(xp, width=32):
with pytest.raises(InvalidArgument, match="Generated subnormal float"):
strat.example()
else:
strat.example()
| HypothesisWorks/hypothesis | hypothesis-python/tests/array_api/test_arrays.py | test_arrays.py | py | 16,753 | python | en | code | 7,035 | github-code | 13 |
11199828675 | import numpy as np
from scipy.ndimage import convolve1d
from .csf_utils import csf_dict, csf_frequency, csf_mannos_daly, csf_spat_filter
from pywt import wavedec2, waverec2
def filter_pyr(pyr, csf_funct):
n_levels = len(pyr) - 1
filt_pyr = []
filt_pyr.append(pyr[0]) # Do not filter approx subband.
for level in range(n_levels):
filt_level = []
for subband in range(3):
if csf_funct.__name__ != 'ahc_weight':
filt_level.append(pyr[level+1][subband] * csf_funct(n_levels-level-1, subband+1)) # No approximation coefficient. Only H, V, D.
else:
filt_level.append(pyr[level+1][subband] * csf_funct(n_levels-level-1, subband+1, n_levels)) # No approximation coefficient. Only H, V, D.
filt_pyr.append(tuple(filt_level))
return filt_pyr
def filter_img(img, filter_key, wavelet=None, **kwargs):
if filter_key in ['frequency', 'frequency_rad', 'mannos_daly']:
d2h = 3.0
pic_height = 1080
f_max = np.pi*pic_height*d2h/180
h, w = img.shape
u_min = -(h >> 1)
u_max = (h >> 1) + 1 if h & 1 else (h >> 1)
v_min = -(w >> 1)
v_max = (w >> 1) + 1 if w & 1 else (w >> 1)
u, v = np.meshgrid(np.arange(u_min, u_max), np.arange(v_min, v_max), indexing='ij')
fx, fy = u*f_max/h, v*f_max/w
if filter_key == 'frequency':
csf_mat = csf_frequency(np.abs(fx)) * csf_frequency(np.abs(fy)) # Separable filtering
elif filter_key == 'frequency_rad':
f_mat = np.sqrt(fx**2 + fy**2)
csf_mat = csf_frequency(f_mat)
elif filter_key == 'mannos_daly':
f_mat = np.sqrt(fx**2 + fy**2)
theta_mat = np.arctan2(v, u)
csf_mat = csf_mannos_daly(f_mat, theta_mat)
img_filtered = np.fft.ifft2(np.fft.ifftshift(np.fft.fftshift(np.fft.fft2(img)) * csf_mat))
elif filter_key in ['li', 'cdf97_watson', 'ahc']:
n_levels = 4
pyr = wavedec2(img, wavelet, 'reflect', n_levels)
csf_funct = csf_dict[filter_key]
pyr_filtered = filter_pyr(pyr, csf_funct)
img_filtered = waverec2(pyr_filtered, wavelet, 'reflect')
elif filter_key in ['spat_filter', 'spat_filter_clipped']:
d2h = 3.0
filt = csf_spat_filter(d2h, k=kwargs.get('k', 21))
img_filtered = convolve1d(img, filt, axis=0)
if filter_key == 'spat_filter_clipped':
img_filtered = np.clip(img_filtered, 0, None)
img_filtered = convolve1d(img_filtered, filt, axis=1)
if filter_key == 'spat_filter_clipped':
img_filtered = np.clip(img_filtered, 0, None)
return np.real(img_filtered)
| abhinaukumar/funque | funque/third_party/funque_atoms/filter_utils.py | filter_utils.py | py | 2,707 | python | en | code | 2 | github-code | 13 |
5150072386 | from glob import glob
import sqlite3
import pandas as pd
PATH = '/media/tiago/HDD - Tiago/pnad'
for ano in range(2012, 2016):
print(ano)
with open(f'{PATH}/{ano}/Dicionários e input/input PES{ano}.txt',
encoding='windows-1252') as myfile:
input_PNADC_trimestre2 = [line.replace('INPUT', '').strip().split(maxsplit=3)
for line in myfile
if line.strip() and
((line.strip()[0] == '@')
or (line.strip()[:5] == 'INPUT'))]
desde, names, sizes, desc = zip(*input_PNADC_trimestre2)
desde = [int(n.strip('@')) - 1 for n in desde]
widths = [int(w.strip('$.')) for w in sizes]
colspecs = [(start, start + width) for start, width in zip(desde, widths)]
# dtypes = {nome:'category' for nome, tipo in zip(names, sizes) if tipo[0] == '$'}
chunks = pd.read_fwf(glob(f'{PATH}/{ano}/Dados/PES{ano}*')[0],
colspecs=colspecs,
names=names,
na_values='.',
chunksize=50_000)
with sqlite3.connect(f'pnad{ano}.sqlite') as conn:
for df in chunks:
df.to_sql('tabela', conn, if_exists='append', index=False)
| tiago-freitas/pnad-educational-data | extrator.py | extrator.py | py | 1,369 | python | en | code | 0 | github-code | 13 |
20974669779 | #!/usr/bin/env python3
import logging
import os
import pyroscope
l = logging.getLogger()
l.setLevel(logging.DEBUG)
addr = os.getenv("PYROSCOPE_SERVER_ADDRESS") or "http://pyroscope:4040"
print(addr)
pyroscope.configure(
application_name = "simple.python.app",
server_address = addr,
enable_logging = True,
)
def work(n):
i = 0
while i < n:
i += 1
def fast_function():
with pyroscope.tag_wrapper({ "function": "fast" }):
work(20000)
def slow_function():
with pyroscope.tag_wrapper({ "function": "slow" }):
work(80000)
if __name__ == "__main__":
while True:
fast_function()
slow_function()
| grafana/pyroscope | examples/python/simple/main.py | main.py | py | 617 | python | en | code | 8,798 | github-code | 13 |
73192583057 | #Preguntas a responder
#¿Cual es el % de ventas historicas de los distintos articulos?
#Del item más vendido, ¿en que mes se vende más?
#¿Cuantos articulos de los mas vendidos se deberian comprar en marzo del 2022?
import pandas as pd
import numpy as np
def run():
dir_ventas = './datos/{}'.format('ventas.csv')
df_ventas = pd.read_csv(dir_ventas, sep=";")
total_ventas = df_ventas['unidades_vendidas'].sum()
df_item_ventas = df_ventas[['item','unidades_vendidas']].groupby(['item']).sum()
df_item_prc_ventas = df_item_ventas.applymap(lambda x:100*x/total_ventas)
item_mas_vendido = df_item_prc_ventas.idxmax(axis=0).values[0]
otro_item = 'jeans_black'
df_item_mes_ventas = df_ventas[['item','mes','unidades_vendidas']].groupby(['item','mes']).mean()
top_mes = df_item_mes_ventas.query('item == @item_mas_vendido').idxmax(axis=0).values[0][1]
marzo = 'Marzo'
df_nacional_marzo = df_ventas[['item','mes','anio','unidades_vendidas']].query('item == @item_mas_vendido and mes == @marzo').groupby(['item','anio']).sum()
df_promedio_nacional_marzo = df_nacional_marzo.groupby(['item']).mean()
df_desvest_nacional_marzo = df_nacional_marzo.groupby(['item']).std()
pnm_mean = df_promedio_nacional_marzo.values[0,0]
pnm_std = df_desvest_nacional_marzo.values[0,0]
sugerencia_menor = pnm_mean-pnm_std
sugerencia_mayor = pnm_mean+pnm_std
#Respuestas
print("El porcentaje de ventas de los distintos articulos se adjunta a continuacion:")
print(df_item_prc_ventas)
print("-----")
print("El articulo mas vendido es: ",item_mas_vendido)
print("El mes donde este articulo mas se vende es: ",top_mes)
print("-----")
print(f"Considerando que las ventas son ciclicas por temporada y que la desviacion estandar ({np.around(pnm_std,1)}) es pequeña frente a la media ({np.around(pnm_mean,1)}).")
print(f"Podemos pronosticar que la venta de marzo 2022 sera entre {np.around(sugerencia_menor,1)} y {np.around(sugerencia_mayor,1)}")
if __name__ == "__main__":
run() | Danvalrub/ventas_pandas | ventas_pandas.py | ventas_pandas.py | py | 2,077 | python | es | code | 0 | github-code | 13 |
32276473443 | # exercise 58: Is It a Leap Year?
year = int(input('enter year: '))
if year % 400 == 0:
res = 'leap'
elif year % 100 == 0:
res = 'not leap'
elif year % 4 == 0:
res = 'leap'
else:
res = 'not leap'
print('year %d: %s year' % (year, res))
""" alternative:
at each if statement I might write isLeapYear = True or isLeapYear = False and then
use a final if-else printing alternative the one or the other text
"""
| sara-kassani/1000_Python_example | books/Python Workbook/decision_making/ex58.py | ex58.py | py | 429 | python | en | code | 1 | github-code | 13 |
36697722472 | #############
#
# In this example, we look at Modules.
#
# Now I know what you're thinking! Every deep learning library out there has a class called Module. There's
# haiku.Module, flax.linen.Module, objax.Module etc.
#
# And each time you have to sit down and read the documentation and understand what "Module" means for each library.
# A lot of these also have custom notions of variables, transforms, scopes, etc. For example there's `haiku.transform`
# or `objax.VarCollection`.
#
# In constrast, Equinox introduces no new abstractions. An Equinox Module is just a nice way to create a PyTree,
# really. If you want, look up the source code for `equinox.Module` -- it's only about 100 lines long.
#
#############
#
# Now that we've finished complaining about the competition ;) let's see how it works.
#
# It's very simple: `Module`, and its subclasses, are PyTrees. Any attributes of the Module are also part of the
# same PyTree. (Your whole model is just one big PyTree.) This means you can use the model in the normal way in
# JAX, with vmap/grad/jit etc.
#
# Now because a `Module` is also a class, we can define methods on it. The `self` parameter -- which is a PyTree,
# remember! -- means that this is just a function that takes PyTrees as inputs, like any other function. No method
# is special cased. If you want you can group several related functions under different methods. If you just want
# to define a single forward pass, then the __call__ method is a convenient choice.
#
#############
#
# In this example, we'll demonstrate how to use `equinox.Module` to create a simple MLP.
import functools as ft
from typing import Any, List
import jax
import jax.nn as jnn
import jax.numpy as jnp
import jax.random as jrandom
import equinox as eqx
# First pass at creating a Linear layer. `Linear1` will be a PyTree node, with `weight` and `bias` as its children.
# A type annotation (the `Any`) is needed for these to be recognised; just use `typing.Any` if you want.
class Linear1(eqx.Module):
weight: Any
bias: Any
def __call__(self, x):
return self.weight @ x + self.bias
# We do need to initialise this. If we wanted to we could create a free function to do it:
def linear(in_features, out_features, key):
wkey, bkey = jrandom.split(key)
weight = jrandom.normal(key, (out_features, in_features))
bias = jrandom.normal(key, (out_features,))
return Linear1(weight, bias) # uses the default __init__ for Linear1.
# Alternatively we can use a custom __init__:
class Linear2(eqx.Module):
weight: Any
bias: Any
def __init__(self, in_features, out_features, key):
# The super-init call isn't actually necessary here, but is good practice in Python (for
# co-operative multiple inheritance).
super().__init__()
wkey, bkey = jrandom.split(key)
weight = jrandom.normal(key, (out_features, in_features))
bias = jrandom.normal(key, (out_features,))
self.weight = weight
self.bias = bias
# An error will be thrown if you forget to set either `weight` or `bias` (or if you to try to set
# anything else).
def __call__(self, x):
return self.weight @ x + self.bias
# And now we can compose these into a small MLP:
class MLP(eqx.Module):
layers: List[Linear2]
activation: callable
def __init__(self, in_size, out_size, width_size, depth, key, activation=jnn.relu):
super().__init__() # Once again not necessary but is good practice
keys = jrandom.split(key, depth + 1)
self.layers = []
self.layers.append(Linear2(in_size, width_size, keys[0]))
for i in range(depth - 1):
self.layers.append(Linear2(width_size, width_size, keys[i + 1]))
self.layers.append(Linear2(width_size, out_size, keys[-1]))
self.activation = activation
def __call__(self, x):
# Incidentally if you want to JIT/grad/whatever in here you can; it's completely safe to do so, unlike some
# other libraries.
for layer in self.layers[:-1]:
x = layer(x)
x = self.activation(x)
return self.layers[-1](x)
# Which we can now use:
def main():
key = jrandom.PRNGKey(5678)
model_key, data_key = jrandom.split(key, 2)
model = MLP(in_size=2, out_size=3, width_size=8, depth=2, key=model_key)
data = jrandom.normal(data_key, (2,))
model(data) # Calls __call__
# Because `model` is a PyTree we can use it with normal JAX: vmap, grad, jit etc.
# However note that `model.activation` is some arbitrary Python function, defaulting to a ReLU.
# JAX has no idea how to JIT/differentiate that! We need to separate the things we want to JIT/grad from the rest.
params = eqx.filter(model, eqx.is_array)
static = eqx.filter(model, eqx.is_array, inverse=True)
@ft.partial(jax.jit, static_argnums=1)
def example_jit(params, static, data):
model = eqx.combine(params, static)
model(data)
@jax.grad
def example_grad(params, static, data):
model = eqx.combine(params, static)
return jnp.sum(model(data)) # return a scalar
@ft.partial(jax.vmap, in_axes=(None, None, 0))
def example_vmap(params, static, data):
model = eqx.combine(params, static)
return model(data)
example_jit(params, static, data)
example_grad(params, static, data)
example_vmap(params, static, jnp.stack([data, data]))
# Now that you've seen how to build a model, you're all set!
# This is literally everything you need to know to use Equinox. Like the README promises, it's a really simple way
# to build models, and you don't need to learn any new abstractions -- it's all just PyTrees.
#
# If you're comfortable with a little more automation, it is possible to combine JIT/grad with the filtering
# automatically, so you can just do fn(model) rather than fn(params, static). Have a look at
# filtered_transformations.py.
if __name__ == "__main__":
main()
| codeaudit/equinox | examples/build_model.py | build_model.py | py | 6,034 | python | en | code | null | github-code | 13 |
17127074015 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('university_dashboard', '0006_auto_20150303_2345'),
]
operations = [
migrations.RemoveField(
model_name='task',
name='assignedTo',
),
migrations.RemoveField(
model_name='task',
name='studentGroup',
),
migrations.DeleteModel(
name='Task',
),
migrations.AddField(
model_name='student',
name='profile_picture',
field=models.ImageField(upload_to='thumbpath', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='student',
name='user',
field=models.ForeignKey(default=None, to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AlterField(
model_name='studentgroup',
name='memberSince',
field=models.DateTimeField(default=datetime.datetime(2015, 3, 9, 5, 9, 29, 449968, tzinfo=utc), verbose_name='Date Joined'),
preserve_default=True,
),
migrations.AlterField(
model_name='university',
name='memberSince',
field=models.DateTimeField(default=datetime.datetime(2015, 3, 9, 5, 9, 29, 448967, tzinfo=utc), verbose_name='Date Joined'),
preserve_default=True,
),
]
| LuisBosquez/student-net-2015 | src/university_dashboard/migrations/0007_auto_20150308_2309.py | 0007_auto_20150308_2309.py | py | 1,695 | python | en | code | 0 | github-code | 13 |
72172438418 | from __future__ import annotations
from logging import getLogger, NullHandler
from typing import TYPE_CHECKING, List, Dict, Tuple
from watchdog.observers import Observer
from watchdog.observers.api import ObservedWatch
if TYPE_CHECKING:
from . import Reloadable
logger = getLogger(__name__)
logger.addHandler(NullHandler())
class ReloadableManager:
"""
Management class for auto-reload.
This class provides a way to stop threads and observers for reloading.
"""
thread_list: List[Reloadable.AutoupdateThread] = list()
observer_dict: Dict[str, Tuple[Observer, ObservedWatch]] = dict()
@classmethod
def stop_periodic_updates(cls):
"""
Stops all periodic reloading.
"""
for thread in cls.thread_list:
thread.stopping.set()
cls.thread_list.clear()
logger.info(f"all periodic reloading are stopped.")
@classmethod
def stop_on_modified_updates(cls):
"""
Stops all reloading on the file modification.
"""
for observer, _ in cls.observer_dict.values():
observer.stop()
observer.join()
cls.observer_dict.clear()
logger.info(f"all reloading on the file modification are stopped.")
| urushiyama/reloadrable | reloadrable/reloadable_manager.py | reloadable_manager.py | py | 1,258 | python | en | code | 0 | github-code | 13 |
22920535185 | def dictionary(l1,l2):
d1 = {}
for i in l1:
if i in d1:
d1[i] += 1
else:
d1[i] = 1
d2 = {}
for j in l2:
if j in d2:
d2[j] += 1
else:
d2[j] = 1
return d1,d2
def numerator(d1,d2):
tsum = 0
for i in d1:
for j in d2:
if i == j:
tsum = tsum + d1[i] * d2[j]
return(tsum)
def denominator(d1,d2):
sum1 = 0
for i in d1:
sum1 = sum1 + d1[i]**2
squareroot1 = sum1**(1/2)
sum2 = 0
for i in d2:
sum2 = sum2 + d2[i]**2
squareroot2 = sum2**(1/2)
total = squareroot1 * squareroot2
return(total)
def euclidianpercentage(sum,total):
answer = (tsum/total) * 100
return(answer)
list1 = []
import os
for file in os.listdir():
if file.endswith(".txt"):
#print(os.path.join(file))
x = os.path.join(file)
list1.append(x)
#print(len(x))
print(list1)
import re
words1=[]
for i in range(len(list1)):
file = open(list1[i], 'r')
# text = file.read().lower()
text = file.read()
file.close()
text = re.sub('[^a-zA-Z0-9\ \']+', " ", text)
words = list(text.split())
# print(words)
words1.append(words)
print (words1)
count = 0
listfinal3 = []
#print(words1[0][34])
for i in range(len(words1)):
listfinal =[]
for j in range(len(words1)):
d1,d2= dictionary(words1[i],words1[j])
tsum = numerator(d1,d2)
total = denominator(d1,d2)
answer = euclidianpercentage(tsum,total)
count = count + 1
print(count)
#print(i,i+1)
#print(list1[i],'vs',list1[i+1])
print(list1[i],'vs',list1[j],'comparison is done and the answer is',answer,'%')
listfinal.append(answer)
listfinal3.append(listfinal)
for i in range(len(listfinal3)):
print(listfinal3[i])
#listfinal4 = []
#for i in range(len(listfinal)):
#if listfinal[i] > 35:
# listfinal4.append(listfinal[i])
#print(listfinal4) | uday12345678/Plagiarism-Detector | CSPP1_2017_part-1_20176043-bagOfCodes.py | CSPP1_2017_part-1_20176043-bagOfCodes.py | py | 1,727 | python | en | code | 0 | github-code | 13 |
10396917279 |
from nodoBusqueda import nodoBusqueda
import distancia
class Problema():
def __init__(self,espacioEstados,estadoInicial):
self.espacioEstados = espacioEstados
self.estadoInicial = estadoInicial
self.contador=1
self.tabla = {}
def EstadoMeta(self,Estado):
return self.espacioEstados.objetivo(Estado)
def CrearListaNodos(self,listaSucesores, nodoAct, maxProf, estrategia, grafo):
ListaNodos=[]
podar=False
valor=0
for e in listaSucesores:
if estrategia=='anchura':
valor=nodoAct.profundidad+1
podar=self.poda(e[1], valor)
elif estrategia=='CosteUniforme':
valor=nodoAct.costo+e[2]
podar=self.poda(e[1], valor)
elif estrategia=='profundidad':
valor=(1/(nodoAct.profundidad+1))
elif estrategia=='voraz':
valor=self.Heuristica(e[1], grafo)
podar=self.poda(e[1], valor)
elif estrategia=='A':
valor=nodoAct.costo+ e[2] + self.Heuristica(e[1], grafo)
podar=self.poda(e[1], valor)
if((nodoAct.profundidad < maxProf) and (podar==False)):
ListaNodos.append(nodoBusqueda(self.contador, nodoAct, e[1], (e[2]+nodoAct.costo), e[0], nodoAct.profundidad+1, valor))
self.contador = self.contador + 1
return ListaNodos
def CrearSolucion(self,nodoAct):
NodosSolucion = []
NodosSolucion.append(nodoAct)
nodo=nodoAct.padre
while( not(nodo.padre==None)):
NodosSolucion.append(nodo)
nodo=nodo.padre
NodosSolucion.append(nodo)
return NodosSolucion
def poda(self,e, valor):
if not(e.__str__() in self.tabla.keys()):
self.tabla[e.__str__()] = valor
return False
elif self.tabla.get(e.__str__()) <= valor:
return True
else:
self.tabla[e.__str__()] = valor
return False
def Heuristica(self,estado, grafo):
costes=[]
for objetivo in estado.objetivos:
costes.append(distancia.dist(estado.lon,estado.lat,grafo.node[objetivo]['lon'],grafo.node[objetivo]['lat']))
if not(costes.__sizeof__()==40):
return max(costes)
else:
return 0
| soker90/inteligentes | Carpeta_Fuente/Problema.py | Problema.py | py | 2,395 | python | es | code | 0 | github-code | 13 |
2435738643 | import os
from operator import itemgetter
import re
import numpy as np
import mlp
accuracy = np.zeros(1)
nhidden = 12
with open('train_s.txt','r') as train_file:
train = eval(train_file.read())
with open('traint_s.txt','r') as traint_file:
traint = eval(traint_file.read())
with open('valid_s.txt','r') as valid_file:
valid = eval(valid_file.read())
with open('validt_s.txt','r') as validt_file:
validt = eval(validt_file.read())
with open('test_s.txt','r') as test_file:
test = eval(test_file.read())
with open('testt_s.txt','r') as testt_file:
testt = eval(testt_file.read())
# Train the network
net = mlp.mlp(12,3,nhidden)
net.earlystopping(train, traint, valid, validt)
accuracy[0] = net.confusion(test,testt)
print("AVERAGE ACCURACY: ", sum(accuracy)/1)
print("MAX ACCURACY: ", np.argmax(accuracy),": ", accuracy[np.argmax(accuracy)])
print("MIN ACCURACY: ", np.argmin(accuracy),": ", accuracy[np.argmin(accuracy)]) | bjornife/Uber_Secret_Project_1 | get_the_data.py | get_the_data.py | py | 945 | python | en | code | 0 | github-code | 13 |
40242416276 | #Import libraries
import socketserver, os
#import mimetypes
# Copyright 2013 Abram Hindle, Eddie Antonio Santos
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Furthermore it is derived from the Python documentation examples thus
# some of the code is Copyright © 2001-2013 Python Software
# Foundation; All Rights Reserved
#
# http://docs.python.org/2/library/socketserver.html
#
# run: python freetests.py
# try: curl -v -X GET http://127.0.0.1:8080/
class MyWebServer(socketserver.BaseRequestHandler):
def handle(self):
self.data = self.request.recv(1024).strip()
if not self.data:
return
if len(self.data) < 3:
output = "HTTP/1.1 400 Bad Request\r\n\r\n"
self.request.sendall(output.encode("utf-8"))
return
request = self.data.decode("utf-8")
request_data = request.splitlines()
check_css = False
# Split the request line into its components
data = request_data[0].split(" ")
r_command = data[0] # HTTP Method (GET, POST, HEAD, etc.)
r_url = data[1] # Requested URL
r_http = data[2] # HTTP version
if r_command != "GET":
output = r_http + " 405 Method Not Allowed\r\n" + "\r\n"
self.request.send(output.encode("utf-8"))
return
if '..' in r_url: # handle home directory
self.request.sendall(bytearray(f'HTTP/1.0 404 NOT FOUND\r\n\n', 'utf-8'))
else:
# Handling redirection for missing trailing slashes.
if (r_url[-5:] != ".html" and r_url[-1] != "/" and r_url[-4:] != ".css" and r_url[-3:] != ".js"):
output = r_http + " 301 Moved Permanently\r\n" + "Location: " + r_url + "/\r\n"
self.request.send(output.encode("utf-8"))
return
if r_url[-4:] == ".css":
check_css = True
path = os.getcwd() + "/www" + r_url
# Check if path is a directory
if os.path.isdir(path):
# Try to use a default file such as index.html
path = os.path.join(path, 'index.html')
if not os.path.exists(path):
# Return a 404 if index.html does not exist
output = r_http + " 404 Not Found\r\n" + "\r\n"
self.request.send(output.encode("utf-8"))
return
try:
file = open(path, "rb")
html_data = file.read()
file.close()
except FileNotFoundError:
output = r_http + " 404 Not Found\r\n" + "\r\n"
self.request.send(output.encode("utf-8"))
return
if check_css:
output = r_http + " 200 OK\r\n" + "Content-Type: text/css\r\n" + "\r\n"
else:
output = r_http + " 200 OK\r\n" + "Content-Type: text/html\r\n" + "\r\n"
self.request.send(output.encode("utf-8"))
self.request.sendall(html_data)
if __name__ == "__main__":
HOST, PORT = "localhost", 8080
socketserver.TCPServer.allow_reuse_address = True
server = socketserver.TCPServer((HOST, PORT), MyWebServer)
server.serve_forever()
| BAFiogbe/CMPUT404-assignment-webserver | server.py | server.py | py | 3,783 | python | en | code | null | github-code | 13 |
37882169930 | from collections import defaultdict
readline = lambda: list(map(int, input().split()))
r, c, k = readline()
board = []
for _ in range(3):
board.append(readline())
def do_R():
for idx, i in enumerate(board):
dic = defaultdict(int)
new_list = []
for j in i:
if j == 0:
continue
dic[j] += 1
kv = list(sorted(sorted(dic.items()), key= lambda x: x[1]))
for k, v in kv:
new_list.append(k)
new_list.append(v)
if len(new_list) >= 100:
new_list = new_list[:100]
board[idx] = new_list
maxv = 0
for i in board:
maxv = max(len(i), maxv)
for idx, v in enumerate(board):
if len(v) < maxv:
for _ in range(maxv - len(v)):
board[idx].append(0)
def do_C():
for i in range(len(board[0])):
dic = defaultdict(int)
new_list = []
for j in range(len(board)):
if board[j][i] == 0:
continue
dic[board[j][i]] += 1
kv = list(sorted(sorted(dic.items()), key= lambda x: x[1]))
for k, v in kv:
new_list.append(k)
new_list.append(v)
if len(new_list) >= 100:
new_list = new_list[:100]
for j, v in enumerate(new_list):
if j >= len(board):
new_block = [0] * len(board[0])
new_block[i] = v
board.append(new_block)
else:
board[j][i] = v
if len(new_list) < len(board):
for j in range(len(new_list), len(board)):
board[j][i] = 0
def check_OOR(r, c):
return r > len(board) or c > len(board[0])
for t in range(0, 101):
if not check_OOR(r, c) and board[r - 1][c - 1] == k:
print(t)
break
elif len(board) >= len(board[0]):
do_R()
elif len(board) < len(board[0]):
do_C()
else:
print(-1) | kod4284/kod-algo-note | 백준/17140-이차원-배열과-연산/solution.py | solution.py | py | 1,957 | python | en | code | 0 | github-code | 13 |
27325991310 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Patent Claim Scoring System
Functionality:
Provide a GUI for our tool
@author: Zhipeng Yu
Input:
1.previously saved machine learning model: 'model.json', 'model.h5','emb_model.json','emb_model.h5'
2.negative and positive word list for word level features: 'negword_freq.csv' and 'posword_freq.csv'
3.word dictionary and reversed word dictionary for sentence level features:'revdic.npy','worddic.npy'
4.claim text for calculating similiary scores:'1314paired_newclaims_dep.txt'
Linux Kernel (3.12.2) on Lee Flemaing's lab server
Python 3.5 NumPy 1.11.2 scikit-learn 0.18 tkinter 8.5.18 h5py 2.6.0 Keras 1.1.0 tensorflow 0.10.0
Hardware Environment, Intel 2 GHz Intel Core i7, 8 GB 1600 MHz DDR3,
256GB SSD
Created on Tue Apr 11 10:15:32 2017
@author: Zhipeng Yu
"""
from __future__ import print_function
import tkinter
from tkinter import ttk,Text
from collections import Counter
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
import re
from keras import backend as K
maxlen=600
class Adder(ttk.Frame):
"""The adders gui and functions."""
def __init__(self, parent, *args, **kwargs):
ttk.Frame.__init__(self, parent, *args, **kwargs)
self.root = parent
self.pos_words = []
self.init_gui()
def on_quit(self):
"""Exits program."""
quit()
# vectorize text data based on counting pos and neg words
def count_vectorize(self,dic,text,length):
wordcount=Counter(text.split())
wordvector=[0]*length
for x in wordcount:
if x in dic:
wordvector[dic[x]-1]=wordcount[x]
return wordvector
def calculate(self):
"""Calculates the score and hightlight pos and neg words."""
# get input from text box
inputvalue = self.textbox.get("1.0",'end-1c')
inputvalue = ''.join(inputvalue)
# extract features from raw text
X_add = np.array(self.get_addData(inputvalue))
X_add = X_add.reshape((1,len(X_add)))
X_main = np.array(self.get_mainData(inputvalue))
X_main = X_main.reshape((1,len(X_main)))
# predict
score = self.model.predict([X_add,X_main])
# display
self.answer_label['text'] = ','.join([''.join(str(item)) for item in score])
# get similar patent ID
cur_claim=self.vectorizer.transform(inputvalue)
sim=(cur_claim*self.X.T).A
top10_index=np.argsort(sim)[0][-10:-1]
top10_id=[self.his_id[i][:-3] for i in top10_index[::-1]]
self.sim_label['text'] = '\n'.join(top10_id)
# get pos and words
self.pos_words,self.neg_words = self.get_weightedWords(X_main)
# store all the lengths
words = inputvalue.split()
words_length = [0]
for i in words:
# +1 for space separator
words_length.append(words_length[-1]+len(i)+1)
# draw color to pos words
if len(self.pos_words) == 0:
self.textbox.tag_add("none", "1.0", 'end-1c')
self.textbox.tag_config("none", background="white", foreground="black")
else:
# track the pos words and get its line.column expression
numChar_before = [words_length[i] for i in self.pos_words]
numChar_after = [words_length[i+1] for i in self.pos_words]
for i in range (len(numChar_before)):
beg = self.getTextIndex(numChar_before[i]+1)
end = self.getTextIndex(numChar_after[i])
print(beg,end)
if len(beg)!=0 and len(end)!=0:
self.textbox.tag_add("pos", beg, end)
self.textbox.tag_config("pos", background="green", foreground="black")
# draw color to neg words
if len(self.neg_words) == 0:
self.textbox.tag_add("none", "1.0", 'end-1c')
self.textbox.tag_config("none", background="while", foreground="black")
else:
# track the pos words and get its line.column expression
numChar_before = [words_length[i] for i in self.neg_words]
numChar_after = [words_length[i+1] for i in self.neg_words]
for i in range (len(numChar_before)):
beg = self.getTextIndex(numChar_before[i]+1)
end = self.getTextIndex(numChar_after[i])
print(beg,end)
if len(beg)!=0 and len(end)!=0:
self.textbox.tag_add("neg", beg, end)
self.textbox.tag_config("neg", background="red", foreground="black")
# get line.column expression given the index position of the words
def getTextIndex(self,num):
pre = 0
# +1 because line start from 1 not 0
for i in range(int(self.textbox.index('end-1c').split('.')[0])+1):
line_len = int(self.textbox.index('%d.end' % (i+1)).split('.')[1])
if num <= pre + line_len:
return str(i+1)+'.'+str(num-pre-1)
else:
pre += line_len
return ''
# get index position of pos words
def get_weightedWords(self,X_test):
print(X_test)
# get output after embedding layer
# learning_phase:0 means output is obtained from test process as there is a dropout(train and test have different processes)
get_3rd_layer_output = K.function([self.emb_model.layers[0].input,K.learning_phase()],
[self.emb_model.layers[0].output])
layer_output = get_3rd_layer_output([X_test,0])[0]
#get weight
weights=[]
for layer in self.model.layers:
weights.append(layer.get_weights())
#weights[1] is from final layer dense(1)(weights and bias) we only interest in weights
#the last maxlen*emb_dimention entries are for embedding features
weight_emb=weights[1][0][-maxlen*128:]
# weight * output of embedding layer
example=layer_output[0,:,:]
words_weights=[]
for index, item in enumerate(example):
words_weights.append(np.dot(item,weight_emb[index*128:min(len(weight_emb),index*128+128)]))
words_weights=np.array(words_weights)
words_weights=words_weights[:,2]
words_weights=np.reshape(words_weights,words_weights.shape[0])
# sort and know where is important for a given claim
words_sorted=np.argsort(words_weights)
# return postion of pos and neg words in this claim (needed to convert to line.column format later)
return words_sorted[max(0,len(words_sorted)-10):],words_sorted[:min(10,len(words_sorted))]
# get additional features from word level
def get_addData(self,text):
# 1.get pos and neg words from previously processed data
negwords=[]
for line in open('/Volumes/Zhipeng/patent_dataset/negword_freq.csv'):
parts=line.lower().split(',')
negwords.append(parts[0])
poswords=[]
for line in open('/Volumes/Zhipeng/patent_dataset/posword_freq.csv'):
parts=line.lower().split(',')
poswords.append(parts[0])
# 2.build dictionary for count vectorize
negword_ids = {}
negrev_word_ids = {}
for i, x in enumerate(negwords):
negword_ids[x] = i + 1 # so we can pad with 0s
negrev_word_ids[i + 1] = x
posword_ids = {}
posrev_word_ids = {}
for i, x in enumerate(poswords):
posword_ids[x] = i + 1 # so we can pad with 0s
posrev_word_ids[i + 1] = x
# 3.get additional features
neg_vector=self.count_vectorize(negword_ids,text,len(negwords))
pos_vector=self.count_vectorize(posword_ids,text,len(poswords))
return neg_vector+pos_vector
# get main features from sentence level
def get_mainData(self,text):
self.rev_word_ids = np.load('/Volumes/Zhipeng/patent_dataset/revdic.npy').item()
word_ids = np.load('/Volumes/Zhipeng/patent_dataset/worddic.npy').item()
t_ids = [word_ids[re.sub('[^a-zA-Z]+', '', x)] if re.sub('[^a-zA-Z]+', '', x) in word_ids else 0 for x in text.split()]
item=[0]*maxlen
item[:min(maxlen,len(t_ids))]=t_ids[:min(maxlen,len(t_ids))]
return item
def load_model(self):
from keras.models import Sequential
from keras.models import model_from_json
# load json and create model
json_file = open('/Users/mac/Machine-Learning-NLP/new_model/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
self.model = model_from_json(loaded_model_json)
# load weights into new model
self.model.load_weights("/Users/mac/Machine-Learning-NLP/new_model/model.h5")
self.change_status("Model Loaded from disk")
self.model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# load json and create model
json_file = open('/Users/mac/Machine-Learning-NLP/new_model/emb_model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
self.emb_model = model_from_json(loaded_model_json)
# load weights into new model
self.emb_model.load_weights("/Users/mac/Machine-Learning-NLP/new_model/emb_model.h5")
self.change_status("Model Loaded and Compiled")
# load similarity check part
i=0
his_data=[]
for line in open('/Volumes/Zhipeng/patent_dataset/1314paired_newclaims_dep.txt',encoding='utf-8',errors='ignore'):
if i%3==0:
his_data.append([line])
elif i%3==1:
his_data[-1].append(line)
i+=1
self.his_id=[item[0] for item in his_data]
self.vectorizer = TfidfVectorizer(min_df=1)
self.X=self.vectorizer.fit_transform([item[1] for item in his_data])
self.change_status("Similarity Ready. All Done.")
def change_status(self,text):
self.model_status['text'] = text
def init_gui(self):
"""Builds GUI."""
self.root.title('Patent Claim Scoring System')
self.root.option_add('*tearOff', 'FALSE')
self.grid(column=0, row=0, sticky='nsew')
self.menubar = tkinter.Menu(self.root)
self.menu_file = tkinter.Menu(self.menubar)
self.menu_file.add_command(label='Exit', command=self.on_quit)
self.menu_edit = tkinter.Menu(self.menubar)
self.menubar.add_cascade(menu=self.menu_file, label='File')
self.menubar.add_cascade(menu=self.menu_edit, label='Edit')
self.root.config(menu=self.menubar)
# Labels that remain constant throughout execution.
ttk.Label(self, text='Please Type Your Idea').grid(column=0, row=0,
columnspan=4, rowspan=2)
ttk.Separator(self, orient='horizontal').grid(column=0,
row=2, columnspan=15, sticky='ew')
# Add initialize button
self.minit_button = ttk.Button(self, text='Initialize the model',
command=self.load_model)
self.minit_button.grid(row=0, column=5, columnspan=4,rowspan=2)
# Add label to show status
self.model_status = ttk.Label(self, text='Uninitialized')
self.model_status.grid(row=1, column=10)
# Add text box to collect input
self.textbox = Text(self, height=20, width=60)
self.textbox.grid(row=3,column=0, columnspan=4,rowspan=4)
# Add Test button
self.calc_button = ttk.Button(self, text='Test',
command=self.calculate)
self.calc_button.grid(row=8, columnspan=4)
# Add label to show top ten most similar patent claims
self.sim_frame = ttk.LabelFrame(self, text='Top 10 Similar Patent Application ID:',
height=100)
self.sim_frame.grid(column=5, row=3, columnspan=4, sticky='nesw')
self.sim_label = ttk.Label(self.sim_frame, text='')
self.sim_label.grid(column=6, row=4)
# Add label to show score
self.answer_frame = ttk.LabelFrame(self, text='Score',
height=100)
self.answer_frame.grid(column=0, row=9, columnspan=4, sticky='nesw')
self.answer_label = ttk.Label(self.answer_frame, text='')
self.answer_label.grid(column=5, row=9)
ttk.Separator(self, orient='horizontal').grid(column=0,
row=9, columnspan=9, sticky='ew')
for child in self.winfo_children():
child.grid_configure(padx=5, pady=5)
if __name__ == '__main__':
root = tkinter.Tk()
# root.geometry("800x800")
Adder(root)
root.mainloop() | yuzhipeng588/Machine-Learning-NLP | GUI.py | GUI.py | py | 13,080 | python | en | code | 0 | github-code | 13 |
9792027936 | import os
import shutil
import rasterio
import numpy as np
from glob import glob
from tqdm import tqdm
from mpi4py import MPI
from rasterio import windows
from itertools import product
from scipy.stats import linregress
from argparse import ArgumentParser
comm = MPI.COMM_WORLD # get MPI communicator object
size = comm.size # total number of processes
rank = comm.rank # rank of this process
status = MPI.Status() # get MPI status object
NAMES = ["slope", "intercept", "r", "p", "se"]
def enum(*sequential, **named):
"""Handy way to fake an enumerated type in Python
http://stackoverflow.com/questions/36932/how-can-i-represent-an-enum-in-python
"""
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
# Define MPI message tags
tags = enum('READY', 'DONE', 'EXIT', 'START')
def batched_lstsq(x, y):
mask = np.isnan(x)
x = x[~mask]
y = y[~mask]
if rank == 1:
print(x, y)
return np.array(linregress(x, y)) # slope, intercept, r, p, se
def get_tiles(ds, width=1024, height=1024):
nols, nrows = ds.meta['width'], ds.meta['height']
offsets = product(range(0, nols, width), range(0, nrows, height))
big_window = windows.Window(col_off=0, row_off=0, width=nols, height=nrows)
for col_off, row_off in offsets:
window = windows.Window(col_off=col_off, row_off=row_off, width=width, height=height).intersection(big_window)
transform = windows.transform(window, ds.transform)
yield window, transform
def main(folder):
dest_path = os.path.join(folder, f"ts_trend_{TRAIT}.tif")
fs = []
years = []
for f in sorted(glob(os.path.join(folder, "*", f"{TRAIT}_stack0.tif"))):
try:
yyyy = int(f.split("/")[-2])
shutil.copy(f, f"/tmp/{yyyy}.tif")
f = os.path.join("/tmp", f"{yyyy}.tif")
fs.append(rasterio.open(f))
years.append(yyyy)
except rasterio.errors.RasterioIOError as e:
print(e)
continue
meta = fs[0].meta.copy()
meta["count"] = 5
meta["nodata"] = -9999
meta["dtype"] = np.float32
with rasterio.open(dest_path, "w", **meta) as dest:
tasks = [x[0] for x in get_tiles(fs[0])]
task_index = 0
num_workers = size - 1
closed_workers = 0
while closed_workers < num_workers:
data = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
source = status.Get_source()
tag = status.Get_tag()
if tag == tags.READY:
# Worker is ready, so send it a task
if task_index < len(tasks):
trait_stack = np.stack([x.read(1, window=tasks[task_index]) for x in fs])
comm.send((trait_stack, tasks[task_index]), dest=source, tag=tags.START)
print("Sending task %d/%d to worker %d" % (task_index, len(tasks), source))
task_index += 1
else:
comm.send(None, dest=source, tag=tags.EXIT)
elif tag == tags.DONE:
results, window = data
print("Got data from worker %d" % source, results.shape, window)
dest.write(results, window=window)
elif tag == tags.EXIT:
print("Worker %d exited." % source)
closed_workers += 1
[x.close() for x in fs]
print(dest_path)
meta["count"] = 1
with rasterio.open(dest_path) as src:
for i, name in enumerate(NAMES):
dest_path = os.path.join(folder, f"ts_trend_{TRAIT}_{name}.tif")
with rasterio.open(dest_path) as dest:
dest.write(src.read(i + 1), 1)
def worker():
name = MPI.Get_processor_name()
print("I am a worker with rank %d on %s." % (rank, name))
while True:
comm.send(None, dest=0, tag=tags.READY)
data = comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
tag = status.Get_tag()
if tag == tags.START:
# Do the work here
trait_stack, window = data
y, h, w = trait_stack.shape
xs = np.tile(np.arange(y), h * w).reshape(h * w, y) * 1.
trait_stack = trait_stack.reshape(y, -1).T # (h * w, y)
trait_stack[trait_stack == -9999] = np.nan
# xs[trait_stack == -9999] = np.nan
results = np.ones((5, h * w)) * -9999.
for i in range(h * w):
if np.sum(np.isnan(trait_stack[i])) > 20:
continue
results[:, i] = batched_lstsq(xs[i], trait_stack[i])
# results[:, np.sum(np.isnan(trait_stack), axis=1) > 17] = -9999
results = results.reshape(5, h, w)
results[np.isnan(results)] = -9999
if MASK is not None:
with rasterio.open(MASK) as src:
mask = src.read(1, window=window)
results[:, mask == 0] = -9999
# Work ends here... Sending results back
comm.send((results, window), dest=0, tag=tags.DONE)
elif tag == tags.EXIT:
break
comm.send(None, dest=0, tag=tags.EXIT)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--mask", type=str)
parser.add_argument("--dir", required=True, type=str)
parser.add_argument("--trait", required=True, type=str)
args = parser.parse_args()
TRAIT = args.trait
MASK = args.mask
if rank == 0:
print(f"Running on {comm.size} cores")
main(args.dir)
else:
worker()
| danielz02/neon | src/nitrogen_regression_mpi.py | nitrogen_regression_mpi.py | py | 5,622 | python | en | code | 0 | github-code | 13 |
2413710137 | from __future__ import print_function
import httplib, urllib, sys, json, re, os, requests
#check if dev or prod mode
mode = sys.argv[1]
#load config from file
f = open('automin_config.json', 'r')
config = json.loads(f.read())
f.close()
total_files = len(config['files'])
files_done = 0
dev_path = {
"js": config['path']['js']['dev'],
"css": config['path']['css']['dev']
}
min_path = {
"js": config['path']['js']['min'],
"css": config['path']['css']['min']
}
html_path = config['path']['root']
for h in config['files']:
files = {
"js": [],
"css": []
}
html_name = h['name']
#read php file
f = open("{}/{}".format(html_path, html_name), 'r')
file_php = f.read()
f.close()
changes = {
"js": False,
"css": False
}
for k in ['css', 'js']:
if mode == 'prod':
#move file to min folder
from_path = '{}/{}/{}.min.{}'.format(html_path, dev_path[k], html_name.split('.')[-2], k)
dest_path = '{}/{}/{}.min.{}'.format(html_path, min_path[k], html_name.split('.')[-2], k)
if os.path.isfile(from_path):
os.rename(from_path, dest_path)
#insert prod script/link tag if does not exist
if k == 'js':
pattern = r'<script src=\"{}\/{}\.min\.js\"><\/script>'.format(min_path[k], html_name.split('.')[-2])
else:
pattern = r'<link rel=\'stylesheet\' href=\"{}\/{}\.min\.css\"\/>'.format(min_path[k], html_name.split('.')[-2])
if not re.search(pattern, file_php):
#look for first dev file to know where to append prod
name = h[k]['files'][0]['name']
if k == 'js':
pattern = r'<script[\w\s\=\-\/\"\']*?src=[\'\"]{}\/{}[\'\"][\w\s\=\-\/\"\'>]+?<\/script>'.format(min_path[k], name)
replace = "<script src=\"{}/{}.min.js\"></script>\n\t<script src=\"{}/{}\"></script>".format(min_path[k], html_name.split('.')[-2], min_path[k], name)
else:
pattern = r'<link[\w\s\=\-\/\"\']*?href=[\'\"]{}\/{}[\'\"]\/>'.format(min_path[k], name)
replace = "<link rel='stylesheet' href=\"{}/{}.min.css\"/>\n\t<link rel='stylesheet' href=\"{}/{}\"/>".format(min_path[k], html_name.split('.')[-2], min_path[k], name)
file_php = re.sub(pattern, replace, file_php)
#cut permission to dev dir
os.system("chmod 000 {}/{}".format(html_path, dev_path[k]))
for j in h[k]['files']:
name = j['name']
if mode == 'prod':
#check if there are changes in dev file
from_path = '{}/{}/{}'.format(html_path, min_path[k], name)
if not os.path.isfile(from_path):
from_path = '{}/{}/{}'.format(html_path, dev_path[k], name)
dev_time = os.path.getmtime(from_path)
if os.path.isfile('{}/{}/{}.min.{}'.format(html_path, min_path[k], html_name.split('.')[-2], k)):
prod_time = os.path.getmtime('{}/{}/{}.min.{}'.format(html_path, min_path[k], html_name.split('.')[-2], k))
if dev_time > prod_time:
changes[k] = True
else:
changes[k] = True
#move files to dev folder
from_path = '{}/{}/{}'.format(html_path, min_path[k], name)
dest_path = '{}/{}/{}'.format(html_path, dev_path[k], name)
if os.path.isfile(from_path):
os.rename(from_path, dest_path)
#store js/css files to compile later
f = open("{}/{}/{}".format(html_path, dev_path[k], name), 'r')
files[k].append(f.read())
f.close()
#erase dev script/link tags
if k == 'js':
pattern = r'<script[\w\s\=\-\/\"\']*?src=[\'\"]{}\/{}[\'\"][\w\s\=\-\/\"\'>]+?<\/script>[\s]*'.format(min_path[k], name)
else:
pattern = r'<link[\w\s\=\-\/\"\']*?href=[\'\"]{}\/{}[\'\"]\/>[\s]*'.format(min_path[k], name)
file_php = re.sub(pattern, '', file_php)
if mode == 'dev':
#insert dev script/link tags
if k == 'js':
pattern = r'<script src=\"{}\/{}\.min\.js\"><\/script>'.format(min_path[k], html_name.split('.')[-2])
replace = "<script src=\"{}/{}\"></script>\n\t<script src=\"{}/{}.min.js\"></script>".format(min_path[k], name, min_path[k], html_name.split('.')[-2])
else:
pattern = r'<link rel=\'stylesheet\' href=\"{}\/{}\.min\.css\"\/>'.format(min_path[k], html_name.split('.')[-2])
replace = "<link rel='stylesheet' href=\"{}/{}\"/>\n\t<link rel='stylesheet' href=\"{}/{}.min.css\"/>".format(min_path[k], name, min_path[k], html_name.split('.')[-2])
file_php = re.sub(pattern, replace, file_php)
#move files to min folder
from_path = '{}/{}/{}'.format(html_path, dev_path[k], name)
dest_path = '{}/{}/{}'.format(html_path, min_path[k], name)
if os.path.isfile(from_path):
os.rename(from_path, dest_path)
if mode == 'dev':
#erase prod script/link tag
if k == 'js':
pattern = r'<script src=\"{}\/{}\.min\.js\"><\/script>'.format(min_path[k], html_name.split('.')[-2])
else:
pattern = r'<link rel=\'stylesheet\' href=\"{}\/{}\.min\.css\"\/>'.format(min_path[k], html_name.split('.')[-2])
file_php = re.sub(pattern, '', file_php)
#move file to dev folder
from_path = '{}/{}/{}.min.{}'.format(html_path, min_path[k], html_name.split('.')[-2], k)
dest_path = '{}/{}/{}.min.{}'.format(html_path, dev_path[k], html_name.split('.')[-2], k)
if os.path.isfile(from_path):
os.rename(from_path, dest_path)
if mode == 'prod':
perc = float(files_done) / total_files * 20
print("[", end='')
for i in range(int(perc)):
print("=", end='')
print("{}".format(int(perc*5)%10), end='')
for i in range(19 - int(perc)):
print(" ", end='')
print("] {:.1f}%".format(perc*5))
if changes['js']:
data_path = "{}/{}/{}.min.js".format(html_path, min_path['js'], html_name.split('.')[-2])
print("Building {}...".format(data_path), end='')
# Define the parameters for the POST request and encode them in
# a URL-safe format.
p_array = [
('compilation_level', 'SIMPLE_OPTIMIZATIONS'),
('output_format', 'text'),
('output_info', 'compiled_code'),
]
for file in files['js']:
p_array.append(('js_code', "{}".format(file)))
params = urllib.urlencode(p_array)
# Always use the following value for the Content-type header.
headers = { "Content-type": "application/x-www-form-urlencoded" }
conn = httplib.HTTPSConnection('closure-compiler.appspot.com')
conn.request('POST', '/compile', params, headers)
response = conn.getresponse()
data = response.read()
#write bundled minified file
f = open(data_path,"w")
f.write(data)
f.close()
print("DONE")
#print data
conn.close()
else:
print("No changes for {}/{}.min.js. Skipping.".format(min_path['js'], html_name.split('.')[-2]))
if changes['css']:
data_path = "{}/{}/{}.min.css".format(html_path, min_path['css'], html_name.split('.')[-2])
print("Building {}...".format(data_path), end='')
content = ""
for file in files['css']:
content = content + file
url = 'https://cssminifier.com/raw'
data = {'input': content}
out = requests.post(url, data=data).text
f = open(data_path,"w")
f.write(out)
f.close()
print("DONE")
else:
print("No changes for {}/{}.min.css. Skipping.".format(min_path['css'], html_name.split('.')[-2]))
files_done = files_done + 1
#write new php file
f = open("{}/{}".format(html_path, html_name), 'w')
f.write(file_php)
f.close()
print("Mode set to {}".format(mode))
| werlang/automin | automin.py | automin.py | py | 8,690 | python | en | code | 7 | github-code | 13 |
73126781138 | from odoo import api, fields, models, _
class PosPaymentCommande(models.Model):
_name = "pos.payment_cmd"
payment_date = fields.Datetime(string='Date', required=True, readonly=True, default=lambda self: fields.Datetime.now())
pos_commande_id = fields.Many2one('pos.commande', string='Commande')
montant = fields.Float(string='Montant')
payment_method_id = fields.Many2one('pos.payment.method', string='Moyen de paiement', required=True)
session_id = fields.Many2one('pos.session', string='Session', related='pos_commande_id.session_id', store=True)
check_number = fields.Char('Numéro de chèque')
check_date = fields.Date('Date remise')
@api.model
def create_payment_cmd(self, commande_line):
#cette fonction permet de créer la ligne de commande en attente
payment_cmd_id = self.create(commande_line).id
return payment_cmd_id | hilinares1/MADEMO | tit_pos_order/models/PosPaymentCommande.py | PosPaymentCommande.py | py | 899 | python | en | code | 0 | github-code | 13 |
33639469734 | from tkinter import filedialog
from tkinter import *
import customtkinter
import pygame
import os
customtkinter.set_appearance_mode("dark") # Modes: "System" (standard), "Dark", "Light"
customtkinter.set_default_color_theme("dark-blue") # Themes: "blue" (standard), "green", "dark-blue"
window = customtkinter.CTk()
window.title('Music Player')
window.geometry("620x420")
pygame.mixer.init()
menubar= Menu(window)
window.config(menu=menubar)
songs = []
current_song = ""
pause = False
def load_music():
global current_song
window.directory = filedialog.askdirectory()
for song in os.listdir(window.directory):
name, ext = os.path.splitext(song)
if ext == '.mp3':
songs.append(song)
for song in songs:
songlist.insert("end", song)
songlist.selection_set(0)
current_song = songs[songlist.curselection()[0]]
def play_music():
global current_song, pause
if not pause:
pygame.mixer.music.load(os.path.join(window.directory, current_song))
pygame.mixer.music.play()
else:
pygame.mixer.music.unpause()
pause = False
def pause_music():
global pause
pygame.mixer.music.pause()
pause = True
def next_music():
global current_song, pause
try:
songlist.selection_clear(0, END)
songlist.selection_set(songs.index(current_song) + 1)
current_song = songs(songlist.curselection()[0])
play_music()
except:
pass
def previous_music():
global current_song, pause
try:
songlist.selection_clear(0, END)
songlist.selection_set(songs.index(current_song) - 1)
current_song = songs(songlist.curselection()[0])
play_music()
except:
pass
browse_menu = Menu(menubar,tearoff=False)
browse_menu.add_command(label='Select Folder', command= load_music)
menubar.add_cascade(label='Browse',menu=browse_menu)
songlist = Listbox(window, bg= "black" , fg= "white" , width=100, height= 22)
songlist.pack()
play_btn_image = PhotoImage(file='play.png')
pause_btn_image = PhotoImage(file='pause.png')
next_btn_image = PhotoImage(file='next.png')
previous_btn_image = PhotoImage(file='previous.png')
control_frame = customtkinter.CTkFrame(window)
control_frame.pack(pady=0, padx=0, fill="both", expand=True)
play_btn = customtkinter.CTkButton(control_frame, image=play_btn_image, text='', command=play_music)
play_btn.grid(row=0, column=0, padx=7, pady=10)
pause_btn = customtkinter.CTkButton(control_frame, image=pause_btn_image, text='', command=pause_music)
pause_btn.grid(row=0, column=2, padx=7, pady=10)
next_btn = customtkinter.CTkButton(control_frame, image=next_btn_image, text='', command=next_music)
next_btn.grid(row=0, column=3, padx=7, pady=10)
previous_btn = customtkinter.CTkButton(control_frame, image=previous_btn_image, text='', command=previous_music)
previous_btn.grid(row=0, column=1, padx=7, pady=10)
window.mainloop()
| humza-uddin/MusicPlayer | main.py | main.py | py | 3,061 | python | en | code | 1 | github-code | 13 |
73731864016 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.conf import settings
from graphdb.redis.graphlayer import GraphLayerRedis
from graphdb.schema.backlinks import BacklinksHelper
def translate_key(key):
' translates the attribute names used for scraping to those in schema '
return {
# general
"location": "located in",
"entity_type": "type",
# person
"college": "studied at",
"employee": "worked at",
"founder": "found company",
# NOTE: not to be confused with "advisors" attr of company
"advisor": "are advisor at",
"past_investor": "invested in",
"board_member": "board member of",
# company
"market": "markets",
"cur_employees": "current employees",
"past_employees": "past employees",
"funding": "funding rounds"
}.get(key, key)
class GraphdbPipeline(object):
def __init__(self):
host = settings.get('REDIS_HOST', 'localhost')
port = settings.get('REDIS_PORT', '6379')
path_to_schema = settings.get('GRAPH_SCHEMA')
self.connection = GraphLayerRedis(host=host, port=port)
self.back_helper = BacklinksHelper(path_to_schema)
print("Connecting to redis on", host, port)
def process_item(self, item, spider):
translated_dict = {}
for key, value in item.items():
new_key = translate_key(key)
translated_dict[new_key] = value
# get the dicts with reverse attrs
spider.logger.info('Translated dict is:')
spider.logger.info(translated_dict)
backlinks_dict = self.back_helper.get_backlinks(translated_dict)
spider.logger.info('graphdb: backlinks:')
spider.logger.info(backlinks_dict)
# insert into the db
uid = translated_dict['uid']
self.connection.set_multiple_edges(**{uid: translated_dict})
self.connection.set_multiple_edges(**backlinks_dict)
return item
| anujkhare/wings | angel/pipelines.py | pipelines.py | py | 2,138 | python | en | code | 0 | github-code | 13 |
72915388498 | import pathlib
import ast
import PyQt5
def find_enums(tree):
"""Find all PyQt enums in an AST tree."""
for node in ast.walk(tree):
if not isinstance(node, ast.Assign):
continue
if node.type_comment is None:
continue
if '.' not in node.type_comment:
continue
if not node.type_comment.startswith("Q"):
continue
comment = node.type_comment.strip("'")
mod, cls = comment.rsplit(".", maxsplit=1)
assert len(node.targets) == 1
name = node.targets[0].id
yield (mod, cls, name)
def main():
pyqt5_path = pathlib.Path(PyQt5.__file__).parent
pyi_files = list(pyqt5_path.glob("*.pyi"))
if not pyi_files:
print("No .pyi-files found for your PyQt installation!")
for path in pyi_files:
print(f"# {path.stem}")
tree = ast.parse(
path.read_text(),
filename=str(path),
type_comments=True,
)
for mod, cls, name in find_enums(tree):
old = f"{mod}.{name}"
new = f"{mod}.{cls}.{name}"
print(f"{old} {new}")
if __name__ == '__main__':
main()
| qutebrowser/qutebrowser | scripts/dev/rewrite_find_enums.py | rewrite_find_enums.py | py | 1,187 | python | en | code | 9,084 | github-code | 13 |
23553059182 | import numpy as np
import ujson
import SimpleITK as sitk
from shapely import geometry
import cv2
#from scipy.spatial import ConvexHull, convex_hull_plot_2d
class Media:
@staticmethod
def write(file, obj):
with open(file, "w") as filef:
filef.write(ujson.dumps(obj))
@staticmethod
def read(file):
data = {}
with open(file,"r") as filef:
data = (ujson.load(filef))
return data
@staticmethod
def make_roids(image):
""" (1) execute Oscar's algorithm"""
""" (2) validate conex pixels for each region"""
pass
@staticmethod
def make_contours(roids):
""" return in format JSON contours of the rois in polygon form """
image_mask = sitk.ReadImage(roids)
lsif = sitk.LabelShapeStatisticsImageFilter()
lsif.Execute(image_mask)
labels = lsif.GetLabels()
print("labels", labels)
im_size = np.array(image_mask.GetSize())[::-1]
image_array = sitk.GetArrayViewFromImage(image_mask)
"""
dd1 = sitk.LabelContour(image_mask)
reference_surface_arr = sitk.GetArrayViewFromImage(dd1)
refpp = np.where(reference_surface_arr == 1)
print("dd1", refpp)
"""
"""
rng = np.random.default_rng()
points = rng.random((30, 2)) # 30 random points in 2-D
hull = ConvexHull(points)
for simplex in hull.simplices:
print("hulls", points[simplex, 0], points[simplex, 1])
"""
xp = [1, 1, 0,-1,-1,-1, 0, 1]
yp = [0, 1, 1, 1, 0,-1,-1,-1]
print("im_size", im_size)
#exit()
results = []
dd1 = sitk.LabelContour(image_mask)
reference_surface_arr = sitk.GetArrayViewFromImage(dd1)
for label in labels:
maskk = np.zeros(im_size, dtype=np.uint8)
maskk[np.where(image_array == label)] = 1
#contours, hierarchy = cv2.findContours(maskk, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
#contours, hierarchy = cv2.findContours(maskk, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours, hierarchy = cv2.findContours(maskk, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# make polygons
hierarchy = hierarchy[0]
aupp = []
for roww, hier in zip(contours, hierarchy):
pauxpp = np.array([ [r[0][0], r[0][1]] for r in roww ], dtype=int)
aupp.append(pauxpp.tolist())
# make interiors contours
auhier = [[] for e in hierarchy]
for i in range(len(aupp)):
i1d = hierarchy[i][3]
if i1d != -1:
auhier[i1d].append(aupp[i])
# append only outter contours
for i in range(len(aupp)):
if hierarchy[i][3] == -1:
results.append({"outters":aupp[i], "intters":auhier[i], "class":label, "group":label, "type":1})
return results
@staticmethod
def make_contours2(roids):
""" return in format JSON contours of the rois in polygon form """
image_mask = sitk.ReadImage(roids)
lsif = sitk.LabelShapeStatisticsImageFilter()
lsif.Execute(image_mask)
labels = lsif.GetLabels()
print("labels", labels)
"""
dd1 = sitk.LabelContour(image_mask)
reference_surface_arr = sitk.GetArrayViewFromImage(dd1)
refpp = np.where(reference_surface_arr == 1)
print("dd1", refpp)
"""
"""
rng = np.random.default_rng()
points = rng.random((30, 2)) # 30 random points in 2-D
hull = ConvexHull(points)
for simplex in hull.simplices:
print("hulls", points[simplex, 0], points[simplex, 1])
"""
xp = [1, 1, 0,-1,-1,-1, 0, 1]
yp = [0, 1, 1, 1, 0,-1,-1,-1]
results = []
dd1 = sitk.LabelContour(image_mask)
reference_surface_arr = sitk.GetArrayViewFromImage(dd1)
for label in labels:
points = np.where(reference_surface_arr == label)
#points = np.vstack((points[0], points[1])).T
"""
aux = []
print(len(points[1]))
for i in range(len(points[1])):
li = [points[0][i], points[1][i]]
aux.append(li)
points = aux """
#print("points", points)
"""
points = np.vstack((points[0], points[1])).T
points = points.tolist() """
"""
poly = geometry.Polygon(points)
points = list(poly.exterior.coords)
print(points)
"""
#poly = np.array(poly.exterior.coords)
#points = poly.tolist()
#print(poly)
#print("points", points)
px = points[1].tolist()
py = points[0].tolist()
results.append({"pointsx":px, "pointsy":py, "class":label, "group":label})
#print("results", results)
print(results)
return results
if __name__ == "__main__":
path ="/mnt/sda6/software/projects/data/media/lung/2021/05/ad2fa6a5c8dd472b8372eee7450c0156"
rois_polygosn = Media.make_contours(path+"/"+"rois.nrrd")
Media.write(path+"/"+"rois.json", rois_polygosn)
rois_polygosn= Media.read(path+"/"+"rois.json")
print(rois_polygosn)
| ivarvb/MEDIA | sourcecode/src/vx/media/Media.py | Media.py | py | 5,531 | python | en | code | 0 | github-code | 13 |
41767670572 | class MyCalendarThree:
def __init__(self):
self.c_map = collections.defaultdict(int)
def book(self, start: int, end: int) -> int:
self.c_map[start] += 1
self.c_map[end] -= 1
s = 0
k = 0
# print(self.c_map)
for key in sorted(self.c_map.keys()):
s += self.c_map[key]
# print(s)
k = max(k, s)
return k
# Your MyCalendarThree object will be instantiated and called as such:
# obj = MyCalendarThree()
# param_1 = obj.book(start,end) | ritwik-deshpande/LeetCode | 732-my-calendar-iii/732-my-calendar-iii.py | 732-my-calendar-iii.py | py | 597 | python | en | code | 0 | github-code | 13 |
34785476788 | from rct229.rulesets.ashrae9012019.data.schema_enums import schema_enums
from rct229.utils.jsonpath_utils import find_all, find_one
from rct229.utils.utility_functions import (
find_exactly_one_fluid_loop,
find_exactly_one_hvac_system,
)
FLUID_LOOP = schema_enums["FluidLoopOptions"]
def is_hvac_sys_preheat_fluid_loop_attached_to_boiler(rmi_b, hvac_b_id):
"""Returns True if the fluid loop associated with preheat system associated with the HVAC system is attached to a boiler.
Returns False if this is not the case.
Parameters
----------
rmi_b : json
RMD at RuleSetModelInstance level
hvac_b_id : str
The HVAC system ID.
Returns
-------
bool
True: preheat system is attached to a boiler
False: otherwise
"""
is_hvac_sys_preheat_fluid_loop_attached_to_boiler_flag = False
loop_boiler_id_list = find_all("$.boilers[*].loop", rmi_b)
# Get the hvac system
hvac_b = find_exactly_one_hvac_system(rmi_b, hvac_b_id)
# hot_water_loop_id can be None
hot_water_loop_id = find_one("preheat_system.hot_water_loop", hvac_b)
if hot_water_loop_id in loop_boiler_id_list:
hot_water_loop = find_exactly_one_fluid_loop(rmi_b, hot_water_loop_id)
is_hvac_sys_preheat_fluid_loop_attached_to_boiler_flag = (
find_one("type", hot_water_loop) == FLUID_LOOP.HEATING
)
return is_hvac_sys_preheat_fluid_loop_attached_to_boiler_flag
| pnnl/ruleset-checking-tool | rct229/rulesets/ashrae9012019/ruleset_functions/baseline_systems/baseline_hvac_sub_functions/is_hvac_sys_preheat_fluid_loop_attached_to_boiler.py | is_hvac_sys_preheat_fluid_loop_attached_to_boiler.py | py | 1,460 | python | en | code | 6 | github-code | 13 |
36588492436 | class Solution:
def largestOverlap(self, img1: List[List[int]], img2: List[List[int]]) -> int:
loc1, loc2, vec = [], [], defaultdict(int)
for i in range(len(img1)):
for j in range(len(img1[0])):
if img1[i][j] == 1:
loc1.append((i, j))
if img2[i][j] == 1:
loc2.append((i, j))
for r1, c1 in loc1:
for r2, c2 in loc2:
vec[(r2-r1, c2-c1)]+= 1
if len(vec) == 0: # [[0]],[[0]]
return 0
#for k, v in vec.items():
# print("{}:{}".format(k,v))
return max(vec.values()) | ysonggit/leetcode_python | 0835_ImageOverlap.py | 0835_ImageOverlap.py | py | 646 | python | en | code | 1 | github-code | 13 |
19158623985 | # Sandro is a well organised person. Every day he makes a list of things which need to be done and enumerates them from 1 to n. However, some things need to be done before others. In this task you have to find out whether Sandro can solve all his duties and if so, print the correct order.
# Dữ liệu nhập
# In the first line you are given an integer n and m (1 <= n <= 10000, 1 <= m <= 1000000). On the next m lines there are two distinct integers x and y, (1 <= x, y <= n) describing that job x needs to be done before job y.
# Dữ liệu xuất
# Print Sandro fails. if Sandro cannot complete all his duties on the list. If there is a solution print the correct ordering, the jobs to be done separated by a whitespace. If there are multiple solutions print the one, whose first number is smallest, if there are still multiple solutions, print the one whose second number is smallest, and so on.
# Ví dụ
# inputcopy
# 8 9
# 1 4
# 1 2
# 4 2
# 4 3
# 3 2
# 5 2
# 3 5
# 8 2
# 8 6
# outputcopy
# 1 4 3 5 7 8 2 6
# inputcopy
# 2 2
# 1 2
# 2 1
# outputcopy
# Sandro fails.
# use heapq so smallest job will be added to top
import heapq
def topologicalSort(graph, result):
indegree = [0] * V
zero_indegree = []
for u in range(V):
for v in graph[u]:
indegree[v] += 1
for i in range(V):
if indegree[i] == 0:
# zero_indegree.put(i)
heapq.heappush(zero_indegree, i)
while len(zero_indegree):
# u = zero_indegree.get()
u = heapq.heappop(zero_indegree)
result.append(u)
for v in graph[u]:
indegree[v] -= 1
if indegree[v] == 0:
# zero_indegree.put(v)
heapq.heappush(zero_indegree, v)
for i in range(V):
if indegree[i] != 0:
return False
return True
V, E = map(int, input().split())
graph = [[] for i in range(V)]
result = []
for i in range(E):
u, v = map(int, input().split())
graph[u-1].append(v-1)
if (topologicalSort(graph, result)):
for i in range(V):
print(result[i]+1, end=" ")
else:
print('Sandro fails.')
| phamtamlinh/coding-challenges | basic/topological-sort/topological-sorting.py | topological-sorting.py | py | 2,020 | python | en | code | 0 | github-code | 13 |
10023300596 | #!/usr/bin/python3
'''Defines class Rectangle that inherits from Base'''
from models.base import Base
class Rectangle(Base):
'''Defines class rectangle'''
def __init__(self, width, height, x=0, y=0, id=None):
'''Initializes an instance'''
super().__init__(id)
self.width = width
self.height = height
self.x = x
self.y = y
@property
def width(self):
'''sets/gets width of the rectangle'''
return self.__width
@width.setter
def width(self, value):
if type(value) not in [int]:
raise TypeError("width must be an integer")
if value <= 0:
raise ValueError("width must be > 0")
self.__width = value
@property
def height(self):
'''Sets/gets height of the rectangle'''
return self.__height
@height.setter
def height(self, value):
if type(value) not in [int]:
raise TypeError("height must be an integer")
if value <= 0:
raise ValueError("height must be > 0")
self.__height = value
@property
def x(self):
'''sets/gets value of x'''
return self.__x
@x.setter
def x(self, value):
if type(value) not in [int]:
raise TypeError("x must be an integer")
if value < 0:
raise ValueError("x must be >= 0")
self.__x = value
@property
def y(self):
'''sets/gets value of y'''
return self.__y
@y.setter
def y(self, value):
if type(value) not in [int]:
raise TypeError("y must be an integer")
if value < 0:
raise ValueError("y must be >= 0")
self.__y = value
def area(self):
'''Returns area of the Rectangle'''
return int(self.__height) * int(self.__width)
def display(self):
'''prints stdout the rectangle with char '#' '''
if self.width == 0 or self.height == 0:
print("")
return
[print("") for y in range(self.y)]
for h in range(self.height):
[print(" ", end="") for x in range(self.x)]
[print("#", end="") for w in range(self.width)]
print("")
def __str__(self):
'''Returns [Rectangle] (<id>) <x>/<y> - <width>/<height>'''
return ("[Rectangle] ({}) {}/{} - {}/{}"
.format(self.id, self.x, self.y, self.width, self.height))
def update(self, *args, **kwargs):
'''Updates the class rectangle
Args:
*args (ints): New attribute values
->1st arg represents id attribute
->2nd arg represents width attribute
->3rd arg represents height attribute
->4th arg represents x attribute
->5th arg represents y attribute
**kwargs (dict) : New key/value pairs of attributes
'''
if args and len(args) != 0:
a = 0
for arg in args:
if a == 0:
if arg is None:
self.__init__(self.width, swlf.height, self.x, self.y)
else:
self.id = arg
elif a == 1:
self.width = arg
elif a == 2:
self.height = arg
elif a == 3:
self.x = arg
elif a == 4:
self.y = arg
a += 1
elif kwargs and len(kwargs) != 0:
for k, v in kwargs.items():
if k == "id":
if v is None:
self.__init__(self.width, self.height, self.x, self.y)
else:
self.id = v
elif k == "width":
self.width = v
elif k == "height":
self.height = v
elif k == "x":
self.x = v
elif k == "y":
self.y = v
def to_dictionary(self):
'''Returns dictionary representation of the Rectangle'''
return {
"x": self.x,
"y": self.y,
"id": self.id,
"height": self.height,
"width": self.width
}
| Jay-Kip/alx-higher_level_programming | 0x0C-python-almost_a_circle/models/rectangle.py | rectangle.py | py | 4,285 | python | en | code | 1 | github-code | 13 |
46275868384 | import re
import json
import numpy as np
import pandas as pd
from promptsource.templates import DatasetTemplates, Template
def add_translated_prompt_templates():
with open('csv_files/entities.json') as f:
template_entities_dict = json.load(f)
translated_prompts_df = pd.read_csv('csv_files/template_translated.csv')
language_code_list = list(
set(translated_prompts_df.columns) - {'template_name', 'template_string'}
)
dataset_name_dict = {}
print("####")
print("Processing translated prompts")
print("####")
for _, row in translated_prompts_df.iterrows():
template_code_name = row['template_name']
print("Processing {}".format(template_code_name))
dataset_name, subset_name, *template_name = template_code_name.split(' - ')
template_name = ' - '.join(template_name)
dict_key = "{}-{}".format(dataset_name, subset_name)
if dict_key not in dataset_name_dict:
dataset_name_dict[dict_key] = 0
if subset_name == "None":
subset_name = None
dataset_templates = DatasetTemplates(
dataset_name=dataset_name,
subset_name=subset_name
)
try:
entity_list = template_entities_dict[template_code_name]
except:
template_code_name += ' '
template_name += ' '
entity_list = template_entities_dict[template_code_name]
if 'answer_choices' in entity_list:
answer_choices = entity_list.pop('answer_choices')
# idx = 0
for language_code in language_code_list:
translated_template_string = row[language_code]
translated_template_string = re.sub(r'٪', '%', translated_template_string)
translated_template_string = re.sub(r'(?:\|).*(?:\|)', '|||', translated_template_string)
regex = r"(?:{).*?(?:})"
for m in re.finditer(regex, translated_template_string, re.DOTALL):
_string = m.group()
_string_fixed = _string.lower()
if bool(re.search("{e.*?[0-9]}",_string_fixed))==True:
_string_fixed = _string_fixed.replace(" ", "")
translated_template_string = re.sub(_string, _string_fixed, translated_template_string)
for entity, original_entity in entity_list.items():
translated_template_string = re.sub(entity, original_entity, translated_template_string)
prefix = "Answser in English, \n"
translated_template_string = prefix + translated_template_string
idx = dataset_name_dict[dict_key]
new_template_name = "{}-translate-{}".format(template_name, language_code)
if new_template_name not in dataset_templates.all_template_names:
print("Adding {} for {}".format(new_template_name, template_code_name))
new_template = Template(
name=new_template_name,
jinja=translated_template_string,
answer_choices=answer_choices,
reference="Translated version of {} in {}".format(template_name, language_code)
)
new_template.id = '00000000-0000-0000-0000-'+str(idx).zfill(12)
dataset_templates.add_template(new_template)
dataset_name_dict[dict_key] += 1
else:
print("Already added {} for {}".format(new_template_name, template_code_name))
if __name__ == '__main__':
print('Prepare file of encoded prompts for translation')
dataset_lists_df = pd.read_csv('csv_files/datasets.csv')
dataset_lists_df = dataset_lists_df[dataset_lists_df['do_train'].notnull()]
template_string_list = []
template_name_list = []
template_entities = {}
for row in dataset_lists_df.iterrows():
idx, row_items = row
dataset_name = row_items['HF_name']
subset_name = None if pd.isna(row_items['subset']) else row_items['subset']
dataset_templates = DatasetTemplates(
dataset_name=dataset_name,
subset_name=subset_name
)
num_templates = len(dataset_templates)
template_list = dataset_templates.all_template_names
for template in template_list:
template_string = dataset_templates[template].jinja
answer_choices = dataset_templates[template].answer_choices
template_name = "{} - {} - {}".format(dataset_name, subset_name, template)
entities = {'answer_choices': answer_choices}
offset = 0
regex = r"(?:{|{{).*?(?:}}|})"
matches = re.finditer(regex, template_string, re.DOTALL)
for idx, m in enumerate(matches, start=1):
entity_identifier = "{{e{}}}".format(idx)
start_idx = m.start()
end_idx = m.end()
original_entity_len = end_idx - start_idx + 1
start_idx -= offset
end_idx -= offset
offset += (original_entity_len - len(entity_identifier) - 1)
start_string = template_string[:start_idx]
end_string = template_string[end_idx:]
entities[entity_identifier] = template_string[start_idx:end_idx]
# "start" : start_idx,
# "end" : start_idx+len(entity_identifier),
# }
template_string = start_string + entity_identifier + end_string
template_entities[template_name] = entities
template_string_list.append(template_string)
template_name_list.append(template_name)
# template_answer_choices = dataset_templates[template].answer_choices
# if template_answer_choices is not None:
# template_answer_choices = template_answer_choices.split('|||')
# template_answer_choices = [re.sub(' ', '', t) for t in template_answer_choices]
# for idx, answer_choice in enumerate(template_answer_choices):
# template_name = 'Answer Choice - {} - '.format(idx)+template_name
# # template_entities[template_name] = answer_choice
# template_string_list.append(answer_choice)
# template_name_list.append(template_name)
with open("csv_files/entities.json","w") as f:
json.dump(template_entities,f)
template_df = pd.DataFrame(
data={
"template_name" : template_name_list,
"template_string" : template_string_list,
},
)
template_df.to_csv('csv_files/template_df.csv', index=False)
| lintangsutawika/multilingual-t0 | hf/translation.py | translation.py | py | 6,771 | python | en | code | 6 | github-code | 13 |
70940496337 | import os
import csv
import sys
import subprocess
#####
#
def install(package):
'''
Installs a given Python Package
'''
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
def find(m_id):
'''
Finds rating, region and genre of an id
'''
# we get a list of parameters from imdb database
dp = ia.get_movie(m_id)
# we get rating, region and genre from this list
rating = dp.get('rating')
region = dp.get('country')
genre = dp.get('genres')
output = (region, rating, genre)
return output
def main(num):
'''
All magic happens here
'''
# Getting data and creating dictionaries
sorter(num)
# Finding information
d_sum = best_sum(country_dict)
d_pop = popularity(country_count)
d_middle = best_genre_middle(country_dict, country_count)
# Finalizing information
saving(d_sum, d_pop, d_middle)
def sorter(n):
'''
'''
# We disable certain genres:
# Short, Family, Game-show, Music, News, Reality TV, Sport, Talk-Show
forbiden = ("Short", "Family", "Game-Show", "Music", "News",
"Reality-TV", "Sport", "Talk-Show")
print("This process needs internet connection")
for num in range(1, n+1):
length = len(str(num))
# Create a proper id
m_id = '0'*(7-length) + str(num)
length = len(str(num))
# Get film info
film_info = find(m_id)
# Get genres
genres = film_info[2]
# Get rating
rating = film_info[1]
# Get country
try:
country = ''.join(film_info[0])
except:
country = film_info[0]
if rating is None:
continue
else:
for genre in genres:
# We check if genre is not disabled
if genre in forbiden:
pass
else:
# Check if the given country exists in the dictionary
if not country_dict.get(country):
country_dict[country] = {genre : rating}
country_count[country] = {genre : 1}
else:
# Country exists, then
# Check if given genre exists
if not country_dict[country].get(genre):
country_dict[country][genre] = rating
country_count[country][genre] = 1
else:
country_dict[country][genre] += rating
country_count[country][genre] += 1
print("===Progress " + str(int(num/n*100)) + "%===")
print("===DONE!===")
def best_sum(dictionary):
'''
'''
# Gets the best film genres by sum of points
best = {}
for country in dictionary:
rating = 0
for genre in dictionary[country]:
if float(dictionary[country][genre]) > rating:
rating = dictionary[country][genre]
best[country] = (genre, rating)
return best
def popularity(dictionary):
'''
'''
# Gets the most popular film genre of every country
best = {}
for country in dictionary:
amount = 0
for genre in dictionary[country]:
if float(dictionary[country][genre]) > amount:
amount = dictionary[country][genre]
best[country] = (genre, amount)
return best
def best_genre_middle(dictionary1, dictionary2):
'''
'''
# Gets the best genre by middle of points
best = {}
for country in dictionary1:
points = 0
for genre in dictionary1[country]:
if float(dictionary1[country][genre]) / float(dictionary2[country][genre]) > points:
points = float(dictionary1[country][genre]) / float(dictionary2[country][genre])
best[country] = (genre, points)
return best
def saving(d_sum, d_pop, d_middle):
'''
'''
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
f = open(os.path.join(__location__, "output.txt"), "w+")
f.write("===============================================================================================================================================\n")
f.write("Best film genres by sum of ratings\n")
for country in d_sum:
f.write(str(country) + " : " + str(d_sum[country][1]) + "\nGenre : " + str(d_sum[country][0]) + "\n\n")
f.write("===============================================================================================================================================\n")
f.write("Most popular film genres\n")
for country in d_pop:
f.write(str(country) + " : " + str(d_pop[country][0]) + "\nAmount : " + str(d_pop[country][1]) + "\n\n")
f.write("===============================================================================================================================================\n")
f.write("Best film genres by average rating\n")
for country in d_middle:
f.write(str(country) + " : " + str(d_middle[country][1]) + "\nGenre : " + str(d_middle[country][0]) + "\n\n")
f.close()
if __name__ == "__main__":
# If the package isn`t already installed the installation will begin
try:
import imdb
except:
install('IMDBpy')
import imdb
ia = imdb.IMDb()
# We use two dict to store data globaly
# We use this dict to store the sum of ratings by genres
country_dict = {}
# This dict looks like this:
# {country1:
# {genre1:rating,
# genre2:rating}
# ,
# country2:
# {genre1:rating,
# genre2:rating}
# }
# We use this dict to store the amount of titles by genres
country_count = {}
# The dict looks like this:
# {country1:
# {genre1:count,
# genre2:count}
# ,
# country2:
# {genre1:rating,
# genre2:rating}
# }
n = int(input("Imput number of movies: "))
main(n) | DanHutsul/imdb_popularity | imdb_popularity/main.py | main.py | py | 6,278 | python | en | code | 0 | github-code | 13 |
29696047904 | class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
return self.back_tracking_method(s, wordDict)
word_dict = set(wordDict)
ans = [False] * len(s)
for i in range(0, len(ans)):
if s[:i + 1] in word_dict:
ans[i] = True
continue
for j in range(0, i):
if ans[j]:
# print(j,i+1,s[j:i+1])
if s[j + 1:i + 1] in word_dict:
ans[i] = True
break
print(ans)
return ans[-1]
def back_tracking_method(self, s: str, wordDict: List[str]) -> bool:
word_dict = set(wordDict)
import functools
@functools.lru_cache(None)
def back_track(s, start_idx: int, end_idx: int):
ans = False
for cur_idx in range(start_idx, end_idx + 1):
if s[start_idx:cur_idx + 1] in word_dict:
if cur_idx == end_idx:
ans = True
else:
if back_track(s, cur_idx + 1, end_idx):
ans = True
return ans
return back_track(s, 0, len(s) - 1)
| xincheng-cao/loser_fruit | backtracking/139. Word Break.py | 139. Word Break.py | py | 1,246 | python | en | code | 0 | github-code | 13 |
3108448366 | """
Create a program that determines whether or not it is possible to construct a particular total
using a specific number of coins.
For example:
it is possible to have a total of $1.00 using four coins if they are all quarters.
However, there is no way to have a total of $1.00 using 5 coins.
Yet it is possible to have $1.00 using 6 coins by using 3 quarters, 2 dimes and a nickel.
Similarly, a total of $1.25 can be formed using 5 coins or 8 coins,
but a total of $1.25 cannot be formed using 4, 6 or 7 coins.
Your program should read both the dollar amount and the number of coins from the user.
Then it should display a clear message indicating whether or not the entered dollar amount
can be formed using the number of coins indicated.
Assume the existence of quarters, dimes, nickels and pennies when completing this problem.
Your solution must use recursion. It cannot contain any loops.
"""
# IMPORT combinations_with_replacement module (from ITERTOOLS)
from itertools import combinations_with_replacement
# START Definition of the FUNCTIONS
def checkEntry(number): # possible evolution -> import module
return True
# possible evolution -> RECURSIVE FUNCTION
def checkNumberCoins(amount, coins, num_coins):
# LIST of COMBINATIONS WITH REPLACEMENT
all_combinations = list(combinations_with_replacement(coins, num_coins))
# Check of the presence of combinations
# having the sum that matches with the entered amount by the user
for combination in all_combinations:
# Check with positive result
if round(sum(combination), 2) == amount:
return combination
# Check with negative result
return ("")
# END Definition of the FUNCTIONS
# START MAIN PROGRAM
def main():
# LIST of COINS
COINS = [0.25, 0.10, 0.05, 0.01]
# Acquisition and Control of the DATA entered by the USER
entry = True
while entry:
amount = input("Enter the dollar amount: ")
num_coins = input("Enter the number of coins: ")
if checkEntry(amount) and checkEntry(num_coins):
entry = False
# Conversion STR -> INT/FLOAT
amount = float(amount)
num_coins = int(num_coins)
# Generation of any COMBINATION of COINS
combination = checkNumberCoins(amount, COINS, num_coins)
# Displaying the RESULTS
if len(combination) == 0:
print("There are NO COMBINATIONS of coins that form the entered AMOUNT ($ {}).".format(amount))
else:
print("The COINS COMBINATION that forms the entered AMOUNT ($ {}) is".format(
amount), combination)
if __name__ == "__main__":
main()
| aleattene/python-workbook | chap_08/exe_181_possible_change.py | exe_181_possible_change.py | py | 2,632 | python | en | code | 1 | github-code | 13 |
41604954976 | import sys
def initial_matrix(n, m):
d = []
for i in range(n+1):
d.append([0]*(m+1))
d[i][0] = i
for j in range(m+1):
d[0][j] = j
return d
def print_matrix(seq, mseq, matrix, start, n, mlen, direction=1):
print('\t\t{}'.format('\t'.join(list(mseq))))
for i in range(n+1):
if i > 0:
base = seq[start+i*direction]
else:
base = ''
print("{}\t{}".format(base, '\t'.join(map(str, matrix[i][0:mlen+1]))))
def wrap_around_distance(base, mseq, mlen, i, matrix):
#first pass
#j = 1
if base == mseq[0]:
cost = 0
else:
cost = 1
matrix[i][1] = min(matrix[i-1][0]+cost, matrix[i-1][mlen]+cost, matrix[i-1][1]+1)
#j > 1
for j in range(2, mlen+1):
if base == mseq[j-1]:
cost = 0
else:
cost = 1
matrix[i][j] = min(matrix[i-1][j-1]+cost, matrix[i][j-1]+1, matrix[i-1][j]+1)
#second pass
#j = 1
matrix[i][1] = min(matrix[i][1], matrix[i][mlen]+1)
#j > 1
for j in range(2, mlen):
matrix[i][j] = min(matrix[i][j], matrix[i][j-1]+1)
return matrix[i][mlen] > matrix[i-1][mlen]
def wrap_around_extend(seq, mseq, mlen, matrix, start, size, max_error, direction):
current_error = 0
if size <= 0: return 0
for i in range(1, size+1):
if wrap_around_distance(seq[start+i*direction], mseq, mlen, i, matrix):
current_error += 1
else:
current_error = 0
if current_error > max_error: break
i -= current_error
return i
def wrap_around_backtrace(mlen, matrix, i):
num_mat = num_sub = num_ins = num_del = 0
j = mlen
path = []
while i > 0 or j > 0:
print(i, j)
#go back through second pass
if i > 0 and j > 0 and j < mlen:
if j == 1:
if matrix[i][j] == matrix[i][mlen] + 1:
num_del += 1
j = mlen
continue
else:
if matrix[i][j] == matrix[i][j-1] + 1:
num_del += 1
j -= 1
continue
elif i == 0:
num_del += 1
j -= 1
continue
#go back through first pass
if j == 1:
v = min(matrix[i-1][mlen], matrix[i-1][0], matrix[i-1][1])
if v == matrix[i-1][mlen]:
if v == matrix[i][j]:
num_mat += 1
else:
num_sub += 1
i -= 1
j = mlen
elif v == matrix[i-1][0]:
if v == matrix[i][j]:
num_mat += 1
else:
num_sub += 1
i -= 1
j -= 1
elif v == matrix[i-1][1]:
num_ins += 1
i -= 1
else:
v = min(matrix[i-1][j-1], matrix[i-1][j], matrix[i][j-1])
if v == matrix[i-1][j-1]:
if v == matrix[i][j]:
num_mat += 1
else:
num_sub += 1
i -= 1
j -= 1
elif v == matrix[i-1][j]:
num_ins += 1
i -= 1
elif v == matrix[i][j-1]:
num_del += 1
j -= 1
return num_mat, num_sub, num_ins, num_del, path
def atr_finder(seq, max_motif_size=6, min_seed_repeat=3, min_seed_length=10,
max_consecutive_error=3, min_identity=0.7, max_extend_length=1000):
matrix = initial_matrix(max_extend_length, max_motif_size)
size = len(seq)
atrs = []
i = 0
while i < size:
if seq[i] == 'N':
i += 1
continue
seed_start = i
for j in range(1, max_motif_size+1):
b = size - j
while i < b and seq[i] == seq[i+j]:
i += 1
seed_length = i + j - seed_start
seed_repeat = int(seed_length / j)
seed_length = seed_repeat * j
if seed_repeat >= min_seed_repeat and seed_length >= min_seed_length:
motif = seq[seed_start:seed_start+j]
#0-based end position
seed_end = seed_start + seed_length - 1
tandem_match = seed_length
tandem_substitute = 0
tandem_insert = 0
tandem_delete = 0
#extend to left
extend_start = seed_start
extend_maxlen = extend_start
if extend_maxlen > max_extend_length:
extend_maxlen = max_extend_length
extend_len = wrap_around_extend(seq, motif[::-1], j, matrix, extend_start,
extend_maxlen, max_consecutive_error, -1)
if extend_len > 0:
print("left: {} {} {}".format(extend_start, j, seed_length))
print_matrix(seq, motif[::-1], matrix, extend_start, extend_len, j, -1)
ed = wrap_around_backtrace(j, matrix, extend_len)
tandem_match += ed[0]
tandem_substitute += ed[1]
tandem_insert += ed[2]
tandem_delete += ed[3]
#path = ed[4]
#for a, b in path:
# matrix[a][b] = "{}*".format(matrix[a][b])
tandem_start = extend_start - extend_len + 1
#extend to right
extend_start = seed_end
extend_maxlen = size - extend_start - 1
if extend_maxlen > max_extend_length:
extend_maxlen = max_extend_length
extend_len = wrap_around_extend(seq, motif, j, matrix, extend_start, extend_maxlen,
max_consecutive_error, 1)
if extend_len > 0:
#print_matrix(seq, motif, matrix, extend_start, extend_len, j)
#ed = wrap_around_backtrace(j, matrix, extend_len)
tandem_match += ed[0]
tandem_substitute += ed[1]
tandem_insert += ed[2]
tandem_delete += ed[3]
path = ed[4]
#for a, b in path:
# matrix[a][b] = "{}*".format(matrix[a][b])
#print_matrix(seq, motif, matrix, extend_start, extend_len, j)
tandem_align = tandem_match + tandem_insert + tandem_substitute + tandem_delete
tandem_identity = tandem_match / tandem_align
if tandem_identity >= min_identity:
tandem_end = extend_start + extend_len + 1
tandem_length = tandem_end - tandem_start + 1
atrs.append((motif, j, tandem_start, tandem_end, tandem_length, tandem_match,
tandem_substitute, tandem_insert, tandem_delete, tandem_identity))
i = tandem_end
break
i = seed_start
i += 1
return atrs
if __name__ == '__main__':
#s = "AAGAAGAAGAAGCCGAGAAGGTAGATAG"
#s = "ATGCATGCATGCAGGCTGC"
import pyfastx
for s in pyfastx.Fasta('../data/chr2.fa.gz'):
pass
atrs = atr_finder(s.seq)
| lmdu/pytrf | atrfinder.py | atrfinder.py | py | 5,674 | python | en | code | 4 | github-code | 13 |
27326806194 | # -*- coding: utf-8 -*-
# 主函数
import sys
from PyQt5 import QtGui, QtCore, QtWidgets
import index, alert, bye
def get_relus():
"""
获取规则库,并将结论和前提分开存储
:return:P:存储前提
Q:存储结论
"""
RD = open("data\RD.txt", "r") # 打开规则库
P = [] # 存储前提
Q = [] # 存储结论
for line in RD: # 按行读取文件
line = line.strip("\n") # 删除每行开头或结尾的换行
if line == '': # 跳过空行
continue
line = line.split(' ') # 把每一行按照空格切片
Q.append(line[line.__len__() - 1]) # 把除了最后一个元素添加到前提数组中
del line[line.__len__() - 1] # 删除前提
P.append(line)
RD.close() # 关闭文件
return P, Q
def ListInSet(li, se):
"""
判断前提是否在输入事实的set中
:param li:前提的列表
:param se:输入事实的集合
:return:
"""
for i in li:
if i not in se:
return False
return True
# 设置退出界面
class Bye_ui(QtWidgets.QMainWindow, bye.Ui_MainWindow):
def __init__(self):
QtWidgets.QMainWindow.__init__(self) # 创建主界面对象
bye.Ui_MainWindow.__init__(self) # 主界面对象初始化
self.setupUi(self) # 配置主界面对象
self.pushButton.clicked.connect(self.no)
def no(self): # 关闭窗口
self.close()
# 设置提示界面
class Alert_ui(QtWidgets.QMainWindow, alert.Ui_MainWindow):
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
alert.Ui_MainWindow.__init__(self)
self.setupUi(self)
# 设置主界面
class Index_ui(QtWidgets.QMainWindow, index.Ui_MainWindow):
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
index.Ui_MainWindow.__init__(self)
self.setupUi(self)
self.pushButton.clicked.connect(self.add_rule) # 添加规则
self.pushButton_2.clicked.connect(self.inference) # 进行推理
self.alert_window = Alert_ui()
for line in open("data\RD.txt", 'r'):
self.textBrowser.append(line) # 将规则库放入显示框
self.pushButton_3.clicked.connect(self.close_window) # 退出系统
def add_rule(self):
"""
添加新规则
:return:
"""
new_rule = self.lineEdit.text() # 获取到添加规则输入框的内容
if new_rule != " ":
self.textBrowser.append(new_rule)
RD = open('data\RD.txt', 'a')
RD.write(new_rule)
RD.write('\n')
RD.close()
def close_window(self):
"""
关闭窗口
:return:
"""
self.bye_window = Bye_ui()
self.bye_window.show()
self.alert_window = Alert_ui()
self.alert_window.close()
self.close()
# self.bye_window.pushButton.clicked.connect(self.bye_window.close())
def inference(self):
"""
推理函数
:return:
"""
input = self.textEdit.toPlainText() # 获取到输入的事实
input = input.split('\n') # 按照回车符进行切片
DB = set(input) # 将切片的事实存放到集合中
[P, Q] = get_relus() # 获取到规则库中的前提和结论
self.process = '' # 存储推理过程
self.animal = '' # 存储推理结果
# 开始推理
flag = 0 # 设置一个标识,判断能否退出结论,若能推出结论,则置为1
for premise in P: # 遍历规则库的前提
if ListInSet(premise, DB): # 判断前提是否在输入事实的集合中
DB.add(Q[P.index(premise)]) # 将前提对应的结论添加到事实集合中
self.animal = Q[P.index(premise)] # 更新一下推理结论
self.process += "%s --> %s" % (premise, Q[P.index(premise)]) # 更新一下推理过程
flag = 1
if flag == 0: # 若一个结论推不出来,弹出提示窗口,询问是否进行补充
self.alert_window.show()
self.alert_window.pushButton.clicked.connect(self.alert_window.close) # 若点是按钮,返回主页面
self.alert_window.pushButton_2.clicked.connect(self.close_window) # 若点否按钮,关闭系统
else: # 若推出结论,则显示推理过程以及结论
self.textEdit_2.setText(self.process)
self.lineEdit_2.setText(self.animal)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv) # 新建窗体
index_window = Index_ui() # 创建系统首页的窗口对象
index_window.show() #显示首页
sys.exit(app.exec_()) # 保持显示 | Mr-Zhang-915/Animal-recognition-expert-system | 基于PYQT5的动物识别专家系统/main.py | main.py | py | 4,898 | python | zh | code | 1 | github-code | 13 |
1632319037 | from __future__ import annotations
import math
import re
from os.path import exists
from typing import Optional, List, Set
from thefuzz import fuzz
from yacs.config import CfgNode
import os
from textdistance import levenshtein
from logging import Logger
import itertools
import torch
import pandas as pd
import enlighten
from DeepOnto.src.deeponto.align.mapping import EntityMapping
from DeepOnto.src.deeponto.onto import Ontology
from DeepOnto.src.deeponto.utils import FileUtils, Tokenizer
from .bert_classifier import BERTSynonymClassifier
from ...utils.kg_utils import BEST_RANK
DEV_MODE = False
# @paper(
# "BERTMap: A BERT-based Ontology Alignment System (AAAI-2022)",
# "https://ojs.aaai.org/index.php/AAAI/article/view/20510",
# )
class MappingPredictor:
r"""Class for the mapping prediction module of $\textsf{BERTMap}$ and $\textsf{BERTMapLt}$ models.
Attributes:
tokenizer (Tokenizer): The tokenizer used for constructing the inverted annotation index and candidate selection.
src_annotation_index (dict): A dictionary that stores the `(class_iri, class_annotations)` pairs from `src_onto` according to `annotation_property_iris`.
tgt_annotation_index (dict): A dictionary that stores the `(class_iri, class_annotations)` pairs from `tgt_onto` according to `annotation_property_iris`.
tgt_inverted_annotation_index (InvertedIndex): The inverted index built from `tgt_annotation_index` used for target class candidate selection.
bert_synonym_classifier (BERTSynonymClassifier, optional): The BERT synonym classifier fine-tuned on text semantics corpora.
num_raw_candidates (int): The maximum number of selected target class candidates for a source class.
num_best_predictions (int): The maximum number of best scored mappings presevred for a source class.
batch_size_for_prediction (int): The batch size of class annotation pairs for computing synonym scores.
"""
override = True
def __init__(
self,
output_path: str,
tokenizer_path: str,
src_annotation_index: dict,
tgt_annotation_index: dict,
bert_synonym_classifier: Optional[BERTSynonymClassifier],
num_raw_candidates: Optional[int],
num_best_predictions: Optional[int],
batch_size_for_prediction: int,
logger: Logger,
enlighten_manager: enlighten.Manager,
enlighten_status: enlighten.StatusBar
):
self.logger = logger
self.enlighten_manager = enlighten_manager
self.enlighten_status = enlighten_status
self.tokenizer = Tokenizer.from_pretrained(tokenizer_path)
if DEV_MODE: self.logger.info("Build inverted annotation index for candidate selection.")
self.src_annotation_index = src_annotation_index
self.tgt_annotation_index = tgt_annotation_index
self.tgt_inverted_annotation_index = Ontology.build_inverted_annotation_index(
tgt_annotation_index, self.tokenizer
)
# the fundamental judgement for whether bertmap or bertmaplt is loaded
self.bert_synonym_classifier = bert_synonym_classifier
self.num_raw_candidates = num_raw_candidates
self.num_best_predictions = num_best_predictions
self.batch_size_for_prediction = batch_size_for_prediction
self.output_path = output_path
self.init_class_mapping = lambda head, tail, score, rank: EntityMapping(head, tail, "<EquivalentTo>", score, rank)
def bert_mapping_score(
self,
src_class_annotations: Set[str],
tgt_class_annotations: Set[str],
):
r"""$\textsf{BERTMap}$'s main mapping score module which utilises the fine-tuned BERT synonym
classifier.
Compute the **synonym score** for each pair of src-tgt class annotations, and return
the **average** score as the mapping score. Apply string matching before applying the
BERT module to filter easy mappings (with scores $1.0$).
"""
# apply string matching before applying the bert module
prelim_score = self.edit_similarity_mapping_score(
src_class_annotations,
tgt_class_annotations,
string_match_only=True
)
if prelim_score == 1.0:
return prelim_score
# apply BERT classifier and define mapping score := Average(SynonymScores)
class_annotation_pairs = list(itertools.product(src_class_annotations, tgt_class_annotations))
if len(class_annotation_pairs) != 0:
synonym_scores = self.bert_synonym_classifier.predict(class_annotation_pairs)
else:
synonym_scores = torch.tensor([0], dtype=torch.float)
# only one element tensor is able to be extracted as a scalar by .item()
return float(torch.mean(synonym_scores).item())
@staticmethod
def edit_similarity_mapping_score(
src_class_annotations: Set[str],
tgt_class_annotations: Set[str],
string_match_only: bool = False,
):
r"""$\textsf{BERTMap}$'s string match module and $\textsf{BERTMapLt}$'s mapping prediction function.
Compute the **normalised edit similarity** `(1 - normalised edit distance)` for each pair
of src-tgt class annotations, and return the **maximum** score as the mapping score.
"""
# edge case when src and tgt classes have an exact match of annotation
if len(src_class_annotations.intersection(tgt_class_annotations)) > 0:
return 1.0
# a shortcut to save time for $\textsf{BERTMap}$
if string_match_only:
return 0.0
annotation_pairs = itertools.product(src_class_annotations, tgt_class_annotations)
sim_scores = [levenshtein.normalized_similarity(src, tgt) for src, tgt in annotation_pairs]
return max(sim_scores) if len(sim_scores) > 0 else 0.0
# ======================================================================================================================
"""
FIND BEST TGT CANDIDATES FOR src_class_iri
"""
def mapping_prediction_for_src_class(self, src_class_iri: str) -> List[EntityMapping]:
r"""Predict $N$ best scored mappings for a source ontology class, where
$N$ is specified in `self.num_best_predictions`.
1. Apply the **string matching** module to compute "easy" mappings.
2. Return the mappings if found any, or if there is no BERT synonym classifier
as in $\textsf{BERTMapLt}$.
3. If using the BERT synonym classifier module:
- Generate batches for class annotation pairs. Each batch contains the combinations of the
source class annotations and $M$ target candidate classes' annotations. $M$ is determined
by `batch_size_for_prediction`, i.e., stop adding annotations of a target class candidate into
the current batch if this operation will cause the size of current batch to exceed the limit.
- Compute the synonym scores for each batch and aggregate them into mapping scores; preserve
$N$ best scored candidates and update them in the next batch. By this dynamic process, we eventually
get $N$ best scored mappings for a source ontology class.
"""
src_class_annotations = self.src_annotation_index[src_class_iri]
# previously wrongly put tokenizer again !!!
tgt_class_candidates = self.tgt_inverted_annotation_index.idf_select(
list(src_class_annotations), pool_size=self.num_raw_candidates
) # [(tgt_class_iri, idf_score)]
best_scored_mappings = []
# for string matching: save time if already found string-matched candidates
def string_match():
"""Compute string-matched mappings."""
string_matched_mappings = []
for tgt_candidate_iri, _ in tgt_class_candidates:
tgt_candidate_annotations = self.tgt_annotation_index[tgt_candidate_iri]
prelim_score = self.edit_similarity_mapping_score(
src_class_annotations,
tgt_candidate_annotations,
string_match_only=True
)
if prelim_score > 0.0:
# if src_class_annotations.intersection(tgt_candidate_annotations):
string_matched_mappings.append(
self.init_class_mapping(src_class_iri, tgt_candidate_iri, prelim_score, BEST_RANK)
)
return string_matched_mappings
best_scored_mappings += string_match()
# return string-matched mappings if found or if there is no bert module (bertmaplt)
if best_scored_mappings or not self.bert_synonym_classifier:
if DEV_MODE: self.logger.info(f"The best scored class mappings for {src_class_iri} are\n{best_scored_mappings}")
return best_scored_mappings
# else, run bert and return its matches :
def generate_batched_annotations(batch_size: int):
"""Generate batches of class annotations for the input source class and its
target candidates.
"""
batches = []
# the `nums`` parameter determines how the annotations are grouped
current_batch = CfgNode({"annotations": [], "nums": []})
for i, (tgt_candidate_iri, _) in enumerate(tgt_class_candidates):
tgt_candidate_annotations = self.tgt_annotation_index[tgt_candidate_iri]
annotation_pairs = list(itertools.product(src_class_annotations, tgt_candidate_annotations))
current_batch.annotations += annotation_pairs
num_annotation_pairs = len(annotation_pairs)
current_batch.nums.append(num_annotation_pairs)
# collect when the batch is full or for the last target class candidate
if sum(current_batch.nums) > batch_size or i == len(tgt_class_candidates) - 1:
batches.append(current_batch)
current_batch = CfgNode({"annotations": [], "nums": []})
return batches
def bert_match():
"""Compute mappings with fine-tuned BERT synonym classifier."""
bert_matched_mappings = []
class_annotation_batches = generate_batched_annotations(self.batch_size_for_prediction)
batch_base_candidate_idx = (
0 # after each batch, the base index will be increased by # of covered target candidates
)
device = self.bert_synonym_classifier.device
# intialize N prediction scores and N corresponding indices w.r.t `tgt_class_candidates`
final_best_scores = torch.tensor([-1] * self.num_best_predictions).to(device)
final_best_idxs = torch.tensor([-1] * self.num_best_predictions).to(device)
for annotation_batch in class_annotation_batches:
synonym_scores = self.bert_synonym_classifier.predict(annotation_batch.annotations)
# aggregating to mappings cores
grouped_synonym_scores = torch.split(
synonym_scores,
split_size_or_sections=annotation_batch.nums,
)
# TODO try replacing mean with max
# account_key has candidate = 'ClientsAndAccounts/AccountIdentifier'
# annotations: [('account key', 'account identifier'), ('account key', 'account number'),...] , numns:[2,....]
# grouped for cand = tensor([0.0022, 0.9369], device='cuda:0')
# mean = 4.6955e-01 !
mapping_scores = torch.stack([torch.max(chunk) for chunk in grouped_synonym_scores])
assert len(mapping_scores) == len(annotation_batch.nums)
# preserve N best scored mappings
# scale N in case there are less than N tgt candidates in this batch
N = min(len(mapping_scores), self.num_best_predictions)
batch_best_scores, batch_best_idxs = torch.topk(mapping_scores, k=N)
batch_best_idxs += batch_base_candidate_idx
# we do the substitution for every batch to prevent from memory overflow
final_best_scores, _idxs = torch.topk(
torch.cat([batch_best_scores, final_best_scores]),
k=self.num_best_predictions,
)
final_best_idxs = torch.cat([batch_best_idxs, final_best_idxs])[_idxs]
# update the index for target candidate classes
batch_base_candidate_idx += len(annotation_batch.nums)
for candidate_idx, mapping_score in zip(final_best_idxs, final_best_scores):
# ignore intial values (-1.0) for dummy mappings
# the threshold 0.9 is for mapping extension
# TODO threshold ?
if mapping_score.item() >= 0.85:
tgt_candidate_iri = tgt_class_candidates[candidate_idx.item()][0]
bert_matched_mappings.append(
self.init_class_mapping(
src_class_iri,
tgt_candidate_iri,
mapping_score.item(),
BEST_RANK
)
)
assert len(bert_matched_mappings) <= self.num_best_predictions
if DEV_MODE: self.logger.info(f"The best scored class mappings for {src_class_iri} are\n{bert_matched_mappings}")
if not bert_matched_mappings and final_best_scores[0] != -1: # 1
bert_matched_mappings = \
self.get_low_score_candidates(src_class_iri, tgt_class_candidates, final_best_scores, final_best_idxs)
return bert_matched_mappings
return bert_match()
# ----------------------------------------------------------------------------------------------------------------------
"""
Get candidates for PO ontology elements (src) that have only low-scored candidates
1. Should have at least one such candidate.
2. Keep the top-k (eg 10) candidates with the highest bert scores or all the candidates in case there are fewer than k
3. (See ranking method below) The low the rank number the better
4. Initialize the variables based on the top (best bert) candidate
- best_bert_score : is the score of the top candidate
- best_rank : will have the lowest/best rank that has been discovered at each step
- topToKeep : a list that will hold the suitable candidates. For each such candidate: (index, bert_score, rank)
Init with the top candidate in case at least one of its annots has some overlap with some scr_annots
or in case it has a good bert score (eg >0.5). Else the top candidate is not selected
5. For each of the rest candidates with index idx, and bert score cand_score:
Calculate the percentage difference of cand_score with the highest bert score (that of the top candidate)
Retrieve the rank of the candidate. In case there was zero overlap cand_rank is set to inf
If it is a suitable candidate:
Add it to topToKeep and update the best/minimum rank
6. A candidate considered suitable in one of the following cases:
- Its percentage difference with the top candidate (in terms of bert score) is small (eg lower than 50% difference)
Also, it must have a good enough bert score (eg >0.5) OR some overlap with the src (non inf rank)
- Otherwise (when the perc_diff is significant or it doesn't have a good enough bert score) is suitable if
it has some overlap with the src and this overlap is better (better/lower rank) than the currently best
discovered rank.
7. Return all suitable candidates
8. Calculate the rank of each candidate:
The candidate has a list of (could be multiple) annotations tgt_annotations - same for the src with scr_annotations
For each pair of src and tgt annotation:
Split the tgt_annot to its tokens. Keep only the words and exclude 'has' as the majority of obj and data prop contain it
For each token if it isn't just a single letter and it can be found in the src_annot
Increase the pair score by one (this token is a point of overlap between the src and tgt annot)
-> Therefore, we have counted the number of tgt tokens that are also present in the src
The score is then divided by the number of tokens in the tgt_annot
-> Therefore, the final score of the src_annot and tgt_annot pair is the portion/percentage of tokens in the
tgt_annot that are also present in the scr_annot.
By dividing with the len we 'punish' long tgt_annots that have low overlap with the src
Example:
src_annot = contribution interest rate
tgt_annot1 = rate 1/1 = 1
tgt_annot2 = base rate 1/2 = 0.5
tgt_annot3 = interest rate 2/2 = 1
tgt_annot4 = some other with interest rate 2/5 = 0.4
tgt_annot5 = unsuitable candidate 0/2 = 0
Ranking :
interest rate : 1
rate : 2
base rate : 3
some other with interest rate : 4
(unsuitable candidate : no ranking -> inf)
Comments:
8.1 The length of the candidate annot is the second criterion. Therefore, 'interest rate' is
better that 'rate' because two words where matched instead of one
8.2 In case a tgt candidate has multiple tgt_annotations (multiple pairs of src_annot, tgt_annot)
the score of the candidate is the highest of all pairs (for example one of the annotations
might be an abbreviation that can't be matched with the src_annot)
8.3 Give the same rank/number to candidates with the same score and length
"""
def rank_candidates(self, src_class_iri, tgt_class_candidates, final_best_idx):
def sort_scores(scores):
return sorted(scores, key=lambda x: (x[1], x[2]), reverse=True) # 8.1
def score_scr_tgt_pair(idx, tgt_annotations):
candidate_scores = []
for src_annot, tgt_annot in itertools.product(src_annotations, tgt_annotations):
tgt_tokens = re.findall(r'\b(?!has\b)\w+', tgt_annot)
pair_score = 0
for token in tgt_tokens:
if len(token) > 1 and fuzz.partial_ratio(token, src_annot) == 100:
pair_score += 1
pair_score /= len(tgt_tokens)
candidate_scores.append([idx, pair_score, len(tgt_tokens)])
final_candidate_score = sort_scores(candidate_scores)[0] # 8.2
return final_candidate_score
src_annotations = self.src_annotation_index[src_class_iri]
final_candidates_scores = [
score_scr_tgt_pair(idx, self.tgt_annotation_index[tgt_class_candidates[idx][0]])
for idx in final_best_idx
]
final_candidates_scores = sort_scores(final_candidates_scores)
# 8.3
ranking, current_rank, prev_score = {}, 0, None
for idx, score, length in final_candidates_scores:
if score == 0:
continue
elif (score, length) != prev_score:
current_rank += 1
ranking[idx] = current_rank
prev_score = (score, length)
return ranking
def get_low_score_candidates(self, src_class_iri,
tgt_class_candidates, final_best_scores, final_best_idxs,
k=10, perc_thrs=0.5):
def is_suitable_candidate(): # 6
return \
(percentage_diff < perc_thrs and (cand_rank < math.inf or cand_score > perc_thrs)) \
or (cand_rank < math.inf and cand_rank <= best_rank)
# 2
final_best_scores = [bert_score for bert_score in final_best_scores[:k] if bert_score!=-1]
final_best_idxs = [idx.item() for idx in final_best_idxs[:k] if idx!=-1]
ranking = self.rank_candidates(src_class_iri, tgt_class_candidates, final_best_idxs) # 3
# 4
best_bert_score = final_best_scores[0]
best_rank = ranking.get(final_best_idxs[0], math.inf)
if best_rank < math.inf or best_bert_score >= perc_thrs:
topToKeep = [(final_best_idxs[0], best_bert_score, best_rank)]
else:
topToKeep = []
# 5
for idx, cand_score in zip(final_best_idxs[1:], final_best_scores[1:]):
percentage_diff = abs((cand_score - best_bert_score) / best_bert_score)
cand_rank = ranking.get(idx, math.inf)
if is_suitable_candidate():
topToKeep.append((idx, cand_score, cand_rank))
best_rank = min(best_rank, cand_rank)
# 7
low_score_mappings = []
for candidate_idx, mapping_score, rank in topToKeep:
tgt_candidate_iri = tgt_class_candidates[candidate_idx][0]
if rank == math.inf: rank = self.num_raw_candidates + 1
low_score_mappings.append(
self.init_class_mapping(
src_class_iri,
tgt_candidate_iri,
mapping_score.item(),
rank
)
)
return low_score_mappings
# ======================================================================================================================
def mapping_prediction(self):
r"""Apply global matching for each class in the source ontology.
See [`mapping_prediction_for_src_class`][deeponto.align.bertmap.mapping_prediction.MappingPredictor.mapping_prediction_for_src_class].
If this process is accidentally stopped, it can be resumed from already saved predictions. The progress
bar keeps track of the number of source ontology classes that have been matched.
"""
self.logger.info("Start global matching for each element in the source ontology...")
match_dir = os.path.join(self.output_path, "match")
try:
mapping_index = FileUtils.load_file(os.path.join(match_dir, "raw_mappings.json"))
if DEV_MODE: self.logger.info("Load the existing mapping prediction file.")
except:
mapping_index = dict()
FileUtils.create_path(match_dir)
if DEV_MODE: self.logger.info(f"Loaded {len(mapping_index)} mappings.")
progress_bar = self.enlighten_manager.counter(
total=len(self.src_annotation_index), desc="Mapping Prediction", unit="per src class"
)
self.enlighten_status.update(demo="Mapping Prediction")
for i, src_class_iri in enumerate(self.src_annotation_index.keys()):
if src_class_iri in mapping_index.keys():
if DEV_MODE: self.logger.info(f"[Class {i}] Skip matching {src_class_iri} as already computed.")
progress_bar.update()
continue
mappings = self.mapping_prediction_for_src_class(src_class_iri)
mapping_index[src_class_iri] = [m.to_tuple(with_score=True) for m in mappings]
if i % 100 == 0 or i == len(self.src_annotation_index) - 1:
self.save_checkpoint_mappings(mapping_index, match_dir)
progress_bar.update()
self.save_checkpoint_mappings(mapping_index, match_dir)
self.logger.info(f"Finished mapping prediction for each element in the source ontology. Mapping index has {len(mapping_index)} src elements")
progress_bar.close()
def save_checkpoint_mappings(self, mapping_index, match_dir):
FileUtils.save_file(mapping_index, os.path.join(match_dir, "raw_mappings.json"))
# also save a .tsv version
mapping_in_tuples = list(itertools.chain.from_iterable(mapping_index.values()))
mapping_df = pd.DataFrame(mapping_in_tuples, columns=["SrcEntity", "TgtEntity", "Score", "Rank"])
mapping_df.to_csv(os.path.join(match_dir, "raw_mappings.tsv"), sep="\t", index=False)
self.logger.info(f"Save currently computed mappings to prevent undesirable loss. Saved {mapping_df.shape[0]} mappings")
| ChristinaK97/KnowledgeGraphs | KnowledgeGraphsPython/DeepOnto/src/deeponto/align/bertmap/mapping_prediction.py | mapping_prediction.py | py | 24,725 | python | en | code | 0 | github-code | 13 |
34941555974 | from tkinter import *
import random
class window:
def __init__(self, master, length):
self.master = master
master.title("Simon")
self.length = length
self.b1 = Button(master, width=12, height=6, bg="grey", relief=RIDGE, activebackground="blue",
command=lambda: self.userInput("1"))
self.b1.place(x=10, y=10)
self.b2 = Button(master, width=12, height=6, bg="grey", relief=RIDGE, activebackground="blue",
command=lambda: self.userInput("2"))
self.b2.place(x=120, y=10)
self.b3 = Button(master, width=12, height=6, bg="grey", relief=RIDGE, activebackground="blue",
command=lambda: self.userInput("3"))
self.b3.place(x=230, y=10)
self.b4 = Button(master, width=12, height=6, bg="grey", relief=RIDGE, activebackground="blue",
command=lambda: self.userInput("4"))
self.b4.place(x=10, y=120)
self.b5 = Button(master, width=12, height=6, bg="grey", relief=RIDGE, activebackground="blue",
command=lambda: self.userInput("5"))
self.b5.place(x=120, y=120)
self.b6 = Button(master, width=12, height=6, bg="grey", relief=RIDGE, activebackground="blue",
command=lambda: self.userInput("6"))
self.b6.place(x=230, y=120)
self.b7 = Button(master, width=12, height=6, bg="grey", relief=RIDGE, activebackground="blue",
command=lambda: self.userInput("7"))
self.b7.place(x=10, y=230)
self.b8 = Button(master, width=12, height=6, bg="grey", relief=RIDGE, activebackground="blue",
command=lambda: self.userInput("8"))
self.b8.place(x=120, y=230)
self.b9 = Button(master, width=12, height=6, bg="grey", relief=RIDGE, activebackground="blue",
command=lambda: self.userInput("9"))
self.b9.place(x=230, y=230)
self.sequence(length)
self.b1.after(15000, lambda: self.compare(length))
def compare(self, length):
with open("userInput.txt", "r") as f:
User = f.read()
with open("AI Sequence.txt", "r") as f:
AI = f.read()
game = True
for i in range(length):
try:
if User[i] == AI[i]:
print("correct")
else:
print("wrong")
game = False
except:
pass
if game == True:
root.after(1000, x.__init__(root, (length+1)))
else:
print("Game over")
root.destroy()
def userInput(self, num):
with open("userInput.txt", "a") as f:
if num == "1":
f.write(num)
if num == "2":
f.write(num)
if num == "3":
f.write(num)
if num == "4":
f.write(num)
if num == "5":
f.write(num)
if num == "6":
f.write(num)
if num == "7":
f.write(num)
if num == "8":
f.write(num)
if num == "9":
f.write(num)
def sequence(self, length):
with open("AI Sequence.txt", "w") as f:
f.close()
with open("userInput.txt", "w") as f:
f.close()
num = 1000
print("length is", length)
numbers = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]
for i in range(length):
string = "".join(random.choices(numbers))
num += 1000
print(string)
with open("AI Sequence.txt","a") as f:
f.write(string)
if string == "1":
self.b1.after(num, lambda: self.b1.configure(background="red"))
self.b1.after((num+750), lambda: self.b1.configure(background="grey"))
if string == "2":
self.b2.after(num, lambda: self.b2.configure(background="red"))
self.b2.after((num+750), lambda: self.b2.configure(background="grey"))
if string == "3":
self.b3.after(num, lambda: self.b3.configure(background="red"))
self.b3.after((num+750), lambda: self.b3.configure(background="grey"))
if string == "4":
self.b4.after(num, lambda: self.b4.configure(background="red"))
self.b4.after((num+750), lambda: self.b4.configure(background="grey"))
if string == "5":
self.b5.after(num, lambda: self.b5.configure(background="red"))
self.b5.after((num+750), lambda: self.b5.configure(background="grey"))
if string == "6":
self.b6.after(num, lambda: self.b6.configure(background="red"))
self.b6.after((num+750), lambda: self.b6.configure(background="grey"))
if string == "7":
self.b7.after(num, lambda: self.b7.configure(background="red"))
self.b7.after((num+750), lambda: self.b7.configure(background="grey"))
if string == "8":
self.b8.after(num, lambda: self.b8.configure(background="red"))
self.b8.after((num+750), lambda: self.b8.configure(background="grey"))
if string == "9":
self.b9.after(num, lambda: self.b9.configure(background="red"))
self.b9.after((num+750), lambda: self.b9.configure(background="grey"))
length = 1
root = Tk()
root.geometry("335x400")
x = window(root, (length))
root.mainloop() | Danielx2003/simon | simon Says Flashing buttons.py | simon Says Flashing buttons.py | py | 5,814 | python | en | code | 0 | github-code | 13 |
11617749761 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Filename :modules.py
@Description :
@Date :2022/02/21 16:55:34
@Author :Arctic Little Pig
@version :1.0
'''
from enum import IntEnum
import torch
import torch.nn as nn
from .resnet import ResNet
from .squeeze import checkerboard_mask
class MaskType(IntEnum):
CHECKERBOARD = 0
CHANNEL_WISE = 1
class CouplingLayer(nn.Module):
"""
Description::Coupling layer in RealNVP.
:param in_channels: Number of channels in the input.
:param mid_channels: Number of channels in the `s` and `t` network.
:param num_blocks: Number of residual blocks in the `s` and `t` network.
:param mask_type: One of `MaskType.CHECKERBOARD` or `MaskType.CHANNEL_WISE`.
:param reverse_mask: Whether to invert the mask. Useful for alternating masks.
"""
def __init__(self, in_channels, mid_channels, num_blocks, mask_type, reverse_mask=False):
super(CouplingLayer, self).__init__()
# Save mask info
self.mask_type = mask_type
self.reverse_mask = reverse_mask
# Build scale and translate network
if self.mask_type == MaskType.CHANNEL_WISE:
in_channels //= 2
self.st_net = ResNet(in_channels, mid_channels, 2 * in_channels,
num_blocks=num_blocks, kernel_size=3, padding=1,
double_after_norm=(self.mask_type == MaskType.CHECKERBOARD))
# Learnable scale for s
self.rescale = nn.utils.weight_norm(Rescale(in_channels))
def forward(self, x, sldj=None, invert=True):
# x shape: [bs, c, w, h] = [256, 3, 32, 32]
# sldj shape: [bs] = [256]
if self.mask_type == MaskType.CHECKERBOARD:
# Checkerboard mask
# out shape: [1, 1, w, h] = [1, 1, 32, 32]
b = checkerboard_mask(x.size(2), x.size(
3), self.reverse_mask, device=x.device)
# out shape: [bs, c, w, h] = [256, 3, 32, 32]
x_b = x * b
# out shape: [bs, 2c, w, h] = [256, 6, 32, 32]
st = self.st_net(x_b)
# out shape: [bs, c, w, h], [bs, c, w, h] = [256, 3, 32, 32], [256, 3, 32, 32]
s, t = st.chunk(2, dim=1)
# out shape: [bs, c, w, h] = [256, 3, 32, 32]
s = self.rescale(torch.tanh(s))
s = s * (1 - b)
t = t * (1 - b)
# Scale and translate
if invert:
inv_exp_s = s.mul(-1).exp()
if torch.isnan(inv_exp_s).any():
raise RuntimeError('Scale factor has NaN entries')
x = x * inv_exp_s - t
else:
exp_s = s.exp()
if torch.isnan(exp_s).any():
raise RuntimeError('Scale factor has NaN entries')
x = (x + t) * exp_s
# Add log-determinant of the Jacobian
sldj += s.view(s.size(0), -1).sum(-1)
else:
# Channel-wise mask
if self.reverse_mask:
x_id, x_change = x.chunk(2, dim=1)
else:
# out shape: [256, 6, 16, 16], [256, 6, 16, 16]
x_change, x_id = x.chunk(2, dim=1)
# out shape: [256, 12, 16, 16]
st = self.st_net(x_id)
# out shape: [256, 6, 16, 16],[256, 6, 16, 16]
s, t = st.chunk(2, dim=1)
s = self.rescale(torch.tanh(s))
# Scale and translate
if invert:
inv_exp_s = s.mul(-1).exp()
if torch.isnan(inv_exp_s).any():
raise RuntimeError('Scale factor has NaN entries')
x_change = x_change * inv_exp_s - t
else:
exp_s = s.exp()
if torch.isnan(exp_s).any():
raise RuntimeError('Scale factor has NaN entries')
x_change = (x_change + t) * exp_s
# Add log-determinant of the Jacobian
sldj += s.reshape(s.size(0), -1).sum(-1)
if self.reverse_mask:
x = torch.cat((x_id, x_change), dim=1)
else:
# out shape: [256, 12, 16, 16]
x = torch.cat((x_change, x_id), dim=1)
return x, sldj
class Rescale(nn.Module):
"""
Description::Per-channel rescaling. Need a proper `nn.Module` so we can wrap it with `torch.nn.utils.weight_norm`.
:param num_channels (int): Number of channels in the input.
"""
def __init__(self, num_channels):
super(Rescale, self).__init__()
self.weight = nn.Parameter(torch.ones(num_channels, 1, 1))
def forward(self, x):
# out shape: [bs, c, w, h] = [256, 3, 32, 32]
x = self.weight * x
return x
| Master-PLC/NeuralODE | RealNVP_Density_Estimation_Using_Real_NVP/models/modules.py | modules.py | py | 4,796 | python | en | code | 0 | github-code | 13 |
43760495016 | from main import models
def order_middleware(get_response):
def middleware(request):
if 'order_id' in request.session:
order_id = request.session['order_id']
try:
order = models.Order.objects.get(id=order_id)
request.order = order
except models.Order.DoesNotExist:
request.order =None
else:
request.order = None
response = get_response(request)
return response
return middleware | kosreharsh/Ecommerce | main/middlewares.py | middlewares.py | py | 540 | python | en | code | 0 | github-code | 13 |
25139131519 | import catboost
from catboost import CatBoostClassifier, CatBoostRegressor
from sklearn.model_selection import StratifiedKFold, KFold
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
class CatBoost:
def __init__(self, args, data):
super().__init__()
# self.X_train = data['X_train']
# self.y_train = data['y_train']
# self.X_valid = data['X_valid']
# self.y_valid = data['y_valid']
self.tr = data['train']
self.test = data['test']
self.users = data['users']
# self.sub = data['sub']
self.cat_features = list(range(0, self.tr.shape[1]-1))
self.epochs = args.EPOCHS
self.learning_rate = args.LR
self.seed = args.SEED
self.model = CatBoostRegressor(iterations=self.epochs, depth=6, learning_rate=self.learning_rate, random_seed=42,
verbose=50, eval_metric='AUC', task_type='GPU')
def train(self):
# model: type, optimizer: torch.optim, train_dataloader: DataLoader, criterion: torch.nn, device: str, log_interval: int=100
kf = KFold(n_splits=10, shuffle=True, random_state=1)
AUC = []
cnt = 1
for train_idx, valid_idx in kf.split(self.users):
print(f'=================================== iter: {cnt} ===================================\n')
tr = self.tr[self.tr['userID'].isin(train_idx)]
valid = self.tr[self.tr['userID'].isin(valid_idx)]
valid = valid[valid['userID'] != valid['userID'].shift(-1)]
X_train = tr.drop(['answerCode'], axis=1)
y_train = tr['answerCode']
X_valid = valid.drop(['answerCode'], axis=1)
y_valid = valid['answerCode']
self.model.fit(
X_train, y_train,
cat_features=self.cat_features,
eval_set=(X_valid, y_valid),
early_stopping_rounds=500
)
AUC.append(self.model.get_best_score()['validation']['AUC'])
cnt += 1
print(f'average AUC: {sum(AUC)/len(AUC)}\n')
#Create arrays from feature importance and feature names
importance = self.model.get_feature_importance()
print(f'=================================== Feature Importance ===================================')
for i, n in enumerate(importance):
print(f'{X_train.columns[i]}: {importance[i]}')
print('\n')
# names = X_train.columns.tolist()
# feature_importance = np.array(importance)
# feature_names = np.array(names)
# #Create a DataFrame using a Dictionary
# feat_data = {'feature_names':feature_names,'feature_importance':feature_importance}
# fi_df = pd.DataFrame(feat_data)
# #Sort the DataFrame in order decreasing feature importance
# fi_df.sort_values(by=['feature_importance'], ascending=False,inplace=True)
# #Define size of bar plot
# plt.figure(figsize=(10,8))
# #Plot Searborn bar chart
# sns.barplot(x=fi_df['feature_importance'], y=fi_df['feature_names'])
# #Add chart labels
# plt.title('CatBoost FEATURE IMPORTANCE')
# plt.xlabel('FEATURE IMPORTANCE')
# plt.ylabel('FEATURE NAMES')
def predict(self):
predicts = self.model.predict(self.test)
# print(self.model.get_all_params)
return predicts | boostcampaitech4lv23recsys1/level2_dkt_recsys-level2-recsys-07 | code/dkt/Catboost/CatBoost_model.py | CatBoost_model.py | py | 3,445 | python | en | code | 0 | github-code | 13 |
24313231665 | import datetime
from magaz.models import Prises
from django.conf import settings
class Cart(object):
def __init__(self, request):
self.session = request.session
cart = request.session.get(settings.CART_SESSION_ID)
if not cart:
cart = self.session[settings.CART_SESSION_ID] = {}
self.cart = cart
def __len__(self):
return sum(item['quantity'] for item in self.cart.values())
def __iter__(self):
goods_ids = self.cart.keys()
prises = Prises.objects.filter(id__in=goods_ids)
for prise in prises:
self.cart[str(prise.id)]['prise'] = prise
for item in self.cart.values():
item['good_price'] = (item['good_price'])
item['total_good_price'] = int(item['good_price']) * item['quantity']
item['dolar_total_price'] = round(((int(item['good_price']) * item['quantity'])/Prises.dolar), 2)
yield item
def add(self, prise, quantity=1, update_quantity=False):
good_id = str(prise.id)
if good_id not in self.cart:
self.cart[good_id] = {'quantity': 0,
'good_price': str(prise.good_price)}
if update_quantity:
self.cart[good_id]['quantity'] = quantity
else:
self.cart[good_id]['quantity'] += quantity
self.save()
def remove(self, prise):
prise_id = str(prise.id)
if prise_id in self.cart:
del self.cart[prise_id]
self.save()
def save(self):
self.session[settings.CART_SESSION_ID] = self.cart
self.session.modified = True
def clear(self):
self.session[settings.CART_SESSION_ID] = {}
self.session.modified = True
def get_total_price(self):
return sum(int(item['good_price']) * item['quantity'] for item in self.cart.values())
def get_dolar_total_price(self):
return round((sum(int(item['good_price']) * item['quantity'] for item in self.cart.values())/Prises.dolar), 2) | mushroom2/laserSite | cart1/cart.py | cart.py | py | 2,041 | python | en | code | 2 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.