seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
31890262213 | __author__ = 'Advik-B' # advik.b@gmail.com
import os
import sys
from fnmatch import fnmatch
# Third party modules
from send2trash import send2trash as delete
from termcolor import cprint
DEL = False
try:
cwd = sys.argv[1]
except IndexError:
cwd = os.getcwd()
if_allowed_files = os.path.isfile(os.path.join(cwd,'allowed-ext.txt'))
if_allowed_dirs = os.path.isdir(os.path.join(cwd,'.keep'))
instructions = """
steps:
- create a file named 'allowed-ext.txt' in the current directory you can keep all the allowed extensions here
- run this script
optional:
- you can also pass the path of the directory as an argument to the script
"""
endmsg = """
if I deleted some important files, you can find them in the trash-folder/bin.
also next time make sure you have the 'allowed-ext.txt' file in the current directory. and all the white-listed extensions in it.
"""
if if_allowed_files == False:
cprint('allowed-ext.txt NOT found in current directory.\
\nPlease follow the below instructions before cleanup to avoid loss of files', 'red')
for line in instructions.split('\n'):
cprint(line, 'yellow')
sys.exit(2)
elif if_allowed_files== True:
cprint('Clean-up is starting.', 'green')
allowed_ext = ['*.md', '*.txt'] # list of allowed extensions
allowed_fil = []
with open(os.path.join(cwd,'allowed-ext.txt'), 'r') as f:
for line in f:
allowed_ext.append(line.strip())
for path, subdirs, files in os.walk(cwd):
for name in files:
for pattern in allowed_ext:
if fnmatch(name.casefold(), pattern) or '.git' in os.path.join(path, name) or '.vscode' in os.path.join(path, name) or '.keep' in os.path.join(path, name):
allowed_fil.append(os.path.join(path, name))
for path, subdirs, files in os.walk(cwd):
for name in files:
if fnmatch(name, '*.*') and os.path.join(path, name) not in allowed_fil:
pth = os.path.join(path, name)
cprint('Deleting: '+pth, color='cyan')
delete(pth)
DEL = True
cprint('Clean-up is done.', 'green')
if DEL:
for line in endmsg.split('\n'):
cprint(line, 'yellow')
| Advik-B/GitHub-Utils | cleanup.py | cleanup.py | py | 2,264 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 1... |
39996367304 | import numpy as np
import pandas as pd
from sklearn.svm import LinearSVC
from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
train_labels = pd.read_csv('../resources/train_labels.csv', names=['label'], header=None)
train_examples = pd.read_csv('../resources/train_examples.csv', names=['example'], engine='python', header=None, delimiter='\t\n')
test_examples = pd.read_csv('../resources/test_examples.csv', names=['example'], engine='python', header=None, delimiter='\t\n')
X = train_examples.example
y = train_labels.label
pipeline = Pipeline([('vectorizer', CountVectorizer()),
('tfidf', TfidfTransformer()),
('classifier', LinearSVC(loss='squared_hinge', class_weight='balanced', max_iter=10000))
])
param_grid = {'classifier__C': np.arange(0.1,10,0.5)}
search = GridSearchCV(pipeline, param_grid=param_grid, iid=True, cv=5, n_jobs=-1,refit=True)
search.fit(X, y)
print("\nBEST SCORE", search.best_score_)
print("BEST PARAMETER", search.best_params_)
| mokleit/text-classification-scikit | src/main/svm/find_best_svm_estimator.py | find_best_svm_estimator.py | py | 1,137 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sklearn.pipeline.Pipeli... |
16706436069 | import argparse
import ConfigParser
import cStringIO
import gzip
import logging
import json
import os
import sys
import traceback
import urllib
from boto.s3.connection import S3Connection
from boto.s3.key import Key
def _init_config():
conf_parser = argparse.ArgumentParser(
description="Downloads a file from a URL and uploads it to Amazon S3",
# Turn off help, so we print all options in response to -h
add_help=False
)
conf_parser.add_argument("-c", "--conf_file", help="Specify a config file", action="store",
metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args()
defaults = {}
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read([os.path.dirname(sys.argv[0]) + "/" + args.conf_file])
defaults = dict(config.items("Defaults"))
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.set_defaults(**defaults)
parser.add_argument("-i", "--input_file", help="The URL to be downloaded.", required=True)
parser.add_argument("-o", "--output_key", help="The S3 key where the data will be stored.",
required=True)
parser.add_argument("-b", "--bucket", help="The S3 bucket where files will be stored.",
required=True)
parser.add_argument("-k", "--key", help="The key for accessing S3.", required=True)
parser.add_argument("-s", "--secret", help="The secret for accessing S3.", required=True)
parser.add_argument("-m", "--mime_type", help="The mimetype of the file stored on S3.",
required=True)
parser.add_argument("-a", "--acl",
help="The access control permissions for the file stored on S3.", required=True)
parser.add_argument("-f", "--force", help="Overwrite an existing key.", action="store_true")
parser.add_argument("-z", "--compress", help="Compress the output before placing it on S3.",
action='store_true')
parser.add_argument("--jsonp_callback_function",
help="If set, the contents of the downloaded file be passed to the named function via jsonp.")
return parser.parse_args(remaining_argv)
def _init_logging(log_level="INFO"):
"""Initialize logging so that it is pretty and compact."""
DATE_FMT = "%Y%m%d %H:%M:%S"
FORMAT = "%(levelname).1s%(asctime)s %(threadName)s %(filename)s:%(lineno)d %(message)s"
old_logging_handlers = logging.root.handlers
# Drop all and any logging handlers, otherwise basicConfig will do nothing.
logging.root.handlers = []
logging.basicConfig(
format=FORMAT,
datefmt=DATE_FMT,
level=log_level,
)
if old_logging_handlers:
logging.error("Logging handlers initialized prior to _init_logging were dropped.")
def s3_progress(complete, total):
if (complete > 0 and total > 0):
percentComplete = float(complete)/float(total)
logging.info("Upload %d%% complete" % (round((percentComplete*1000), 0) / 10))
def compress_string(s):
"""Gzip a given string."""
zbuf = cStringIO.StringIO()
zfile = gzip.GzipFile(mode="wb", compresslevel=6, fileobj=zbuf)
zfile.write(s)
zfile.close()
return zbuf.getvalue()
def put(source_url, bucket, dest_key, mime_type, acl, compress, jsonp, overwrite=False):
k = Key(bucket)
k.key = dest_key
headers = {
"Content-Type": mime_type
}
if k.exists() and not overwrite:
logging.info("Skipping %s - already exists")
return False
try:
logging.info("Downloading from %s" % source_url)
stream = urllib.urlopen(source_url)
contents = stream.read()
logging.info("Uploading to %s" % dest_key)
string_to_store = "%s(%s);" % (prefix, contents) if jsonp else contents
if compress:
headers["Content-Encoding"] = "gzip"
string_to_store = compress_string(string_to_store)
k.set_contents_from_string(string_to_store, headers=headers, cb=s3_progress, num_cb=1000)
k.set_acl(acl)
except:
logging.info("There was an error uploading to %s" % dest_key)
logging.info("Finished uploading to %s" % dest_key)
if __name__ == "__main__":
_init_logging()
args = _init_config()
conn = S3Connection(args.key, args.secret)
bucket = conn.get_bucket(args.bucket)
put(args.input_file, bucket, args.output_key, args.mime_type, args.acl, args.compress,
args.jsonp_callback_function, args.force) | zacharyozer/curlitos | curlitos.py | curlitos.py | py | 4,486 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "ConfigParser.SafeConfigParser",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": ... |
27496332734 |
import re
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
if __name__ == "__main__":
test_string = "+375 (29) 299-00-00"
match = re.search(r"^\+\d{1,3}\s\(\d{2}\)\s\d{3}\-\d{2}\-\d{2}$", test_string)
if match:
logger.info(f"Found {match.group()}")
else:
logger.info("Didn't not found")
| akinfina-ulyana/lesson | lesson_10/classwork_01.py | classwork_01.py | py | 369 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "re.search",
... |
71571428834 | import heapq
import sys
from typing import (
Generic,
Iterable,
Iterator,
List,
NamedTuple,
Optional,
Set,
Tuple,
TypeVar,
)
from termcolor import cprint
from aoc.utils import Coord2D, Grid
H = TypeVar(
"H",
# technically can be anything comparible but obviously python's type system
# doesn't support that so let's go with numbers because that's all I'm using
int,
float,
)
V = TypeVar("V")
class WorkingSet(Generic[H, V]):
heap: List[Tuple[H, V]]
heapset: Set[V]
def __init__(self) -> None:
self.heap = []
self.heapset = set()
def add(self, value: V, heuristic: H) -> None:
self.heapset.add(value)
heapq.heappush(self.heap, (heuristic, value))
def replace(self, value: V, heuristic: H) -> None:
self.heap = [val for val in self.heap if val[1] != value]
heapq.heappush(self.heap, (heuristic, value))
def add_or_replace(self, value: V, heuristic: H) -> None:
if value in self.heapset:
self.replace(value, heuristic)
else:
self.add(value, heuristic)
def pop(self) -> V:
_, value = heapq.heappop(self.heap)
self.heapset.remove(value)
return value
def __contains__(self, value: V) -> bool:
return value in self.heapset
def __len__(self) -> int:
return len(self.heap)
def __bool__(self) -> bool:
return bool(self.heap)
class Chiton(NamedTuple):
risk: int
cheapest_cost: int
cheapest_neighbour: Optional[Coord2D]
class ChitonCave(Grid[Chiton]):
_goal: Coord2D
@classmethod
def parse(cls, data: Iterator[str]):
ret = cls()
ret._fill(
(
(
Chiton(
risk=int(risk),
cheapest_cost=sys.maxsize,
cheapest_neighbour=None,
)
for risk in line
)
for line in data
)
)
for y in range(6):
for x in range(6):
if x == 0 and y == 0:
continue
for coord in ret._iterate_grid():
newcoord = Coord2D(
x=coord.x + ret.width * x,
y=coord.y + ret.height * y,
)
existing = ret[coord]
ret[newcoord] = existing._replace(
risk=((existing.risk + x + y) % 9) or 9
)
ret.width *= 5
ret.height *= 5
ret._goal = Coord2D(ret.width - 1, ret.height - 1)
return ret
def _heuristic(self, coord: Coord2D) -> int:
return coord.distance(self._goal)
def _get_path(self) -> List[Coord2D]:
current = self._goal
path: List[Coord2D] = []
while current:
path.append(current)
_, _, current = self[current]
return path
def print(self, path: Optional[Iterable[Coord2D]] = None) -> None:
if path:
pathset = set(path)
else:
pathset = set((Coord2D(0, 0), self._goal))
for y in range(self.height):
for x in range(self.width):
coord = Coord2D(x, y)
risk, _, _ = self[coord]
if coord in pathset:
cprint(str(risk), attrs=["bold"], end="")
else:
cprint(str(risk), attrs=["dark"], end="")
print()
print()
def a_star(self) -> List[Coord2D]:
# self.print()
start = Coord2D(0, 0)
self[start] = self[start]._replace(cheapest_cost=0)
working_set = WorkingSet[int, Coord2D]()
working_set.add(start, self._heuristic(start))
while working_set:
coord = working_set.pop()
if coord == self._goal:
path = self._get_path()
self.print(path)
return path
_, cost, _ = self[coord]
for neighbour in self._get_neighbours(coord, diagonal=False):
neighbour_risk, neighbour_cost, _ = self[neighbour]
new_cost = cost + neighbour_risk
if new_cost < neighbour_cost:
self[neighbour] = Chiton(neighbour_risk, new_cost, coord)
working_set.add_or_replace(
neighbour,
new_cost + self._heuristic(neighbour),
)
raise RuntimeError("Could not find goal?!")
def main(data: Iterator[str]) -> None:
grid = ChitonCave.parse(data)
path = grid.a_star()
total_risk = 0
start = Coord2D(0, 0)
for coord in path:
if coord != start:
risk, _, _ = grid[coord]
total_risk += risk
print(total_risk)
| Lexicality/advent-of-code | src/aoc/y2021/day15.py | day15.py | py | 4,908 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.TypeVar",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "typing.TypeVar",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "typing.Generic",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_... |
36427808018 | from win32com.client import Dispatch
from tkinter import *
import tkinter as tk
from PIL import Image
from PIL import ImageTk
import os
import re
import random
from threading import Thread
import pythoncom
import time
stu_path = "名单.txt" # 学生名单路径
def speaker(str):
"""
语音播报
:param str: 需要播放语音的文字
"""
speaker = Dispatch("SAPI.SpVoice")
speaker.Speak(str)
class Rollllcall():
def __init__(self):
self.win = Tk()
self.win.title("Python课堂点名器")
self.win.iconbitmap("image/icon.ico")
self.win.geometry("750x450")
self.win.resizable(False, False) # 不允许放大窗口,避免放大导致布局变形带来的麻烦
self.start = False # 开始按钮的状态
# 增加背景图片
img = Image.open('image/back.jpg')
img = ImageTk.PhotoImage(img, size=(650, 450))
theLabel = tk.Label(self.win, # 绑定到一个框架
# justify=tk.LEFT, # 对齐方式
image=img, # 加入图片
compound=tk.CENTER, # 关键:设置为背景图片
font=("华文行楷", 20), # 字体和字号
fg="white",
) # 前景色
theLabel.place(x=0, y=0, relwidth=1, relheight=1)
self.var = tk.StringVar() # 储存文字的类
self.var.set("别紧张") # 设置文字
NameLabel = tk.Label(self.win, textvariable=self.var, # 绑定到一个框架
justify=tk.LEFT, # 对齐方式
compound=tk.CENTER, # 关键:设置为背景图片
font=("华文行楷", 35), # 字体和字号
fg="SeaGreen",
width=10,
) # 前景色
NameLabel.place(x=280, y=100)
# 多选框
self.checkVar = IntVar()
Checkbutton(self.win, text="语音播放", variable=self.checkVar,
onvalue=1, offvalue=0, height=0, width=0).place(x=170, y=410)
tk.Button(self.win, text='编辑学生名单', height=0, width=0, command=self.pop_win).place(x=520, y=408)
self.theButton = tk.Button(self.win, text="开始", font=("华文行楷", 13), fg="SeaGreen", width=20,
command=self.callback)
self.theButton.place(x=300, y=360) # 调整按钮的位置
self.win.mainloop()
def save_names(self, pop, t):
"""
保存名单内容
:param win: #弹出窗
:param t: 文本框对象
"""
names = t.get(0.0, "end")
if re.search(",", names):
textlabel = tk.Label(pop, text="注意:名单不能使用中文逗号分隔", font=("华文行楷", 12), # 字体和字号
fg="red", )
textlabel.place(y=190, x=10)
else:
with open(stu_path, "w", encoding="utf-8") as f:
f.write(names)
pop.destroy()
# 编辑学生姓名
def pop_win(self):
pop = Tk(className='学生名单编辑') # 弹出框框名
pop.geometry('450x250') # 设置弹出框的大小 w x h
pop.iconbitmap("image/icon.ico")
pop.resizable(False, False)
# 用来编辑名单的文本框
t = tk.Text(pop, width=61, height='10')
t.place(x=10, y=10)
# 判断文件存不存在
result = os.path.exists(stu_path)
if result:
# 存在
with open(stu_path, "r", encoding='utf-8') as f:
names = f.read().strip("\n\r\t")
t.insert("end", names)
textlabel = tk.Label(pop, text="学生名单请以,(英文状态)的逗号分隔:\n如:刘亦菲,周迅", font=("华文行楷", 12), # 字体和字号
fg="SeaGreen", )
textlabel.place(y=150, x=10)
# 点击确定保存数据
tk.Button(pop, text='确定', height=0, width=0, command=lambda: self.save_names(pop, t)).place(y=200, x=340)
tk.Button(pop, text='取消', height=0, width=0, command=pop.destroy).place(y=200, x=400)
def callback(self):
# 改变开始按钮的状态
self.start = False if self.start else True
# 开始随机名单之后修改按钮上的文字
self.theButton["text"] = "就你了"
# 开启一个子线程去做操作
self.t = Thread(target=self.mod_stu_name, args=(self.var, self.checkVar))
self.t.start()
def mod_stu_name(self, var, checkVar):
# 随机读取名单中的一个
pythoncom.CoInitialize() # 子线程中调用win32com 语音播放需要设置这一行
if not os.path.exists(stu_path):
var.set("请添加名单")
return None
with open(stu_path, "r", encoding="utf-8") as f:
names = f.read().strip("\n\t\r,")
if not names:
var.set("请添加名单")
return None
name_list = names.split(",")
random_name = ""
while self.start:
random_name = random.choice(name_list)
var.set(random_name) # 设置名字随机出现
time.sleep(0.1)
self.theButton["text"] = "开始" # 选中之后将按钮重新修改成 开始
# 语音播报
if checkVar.get() == 1:
speaker(random_name)
if __name__ == '__main__':
Rollllcall()
| huangguifeng/callroll | rollcall.py | rollcall.py | py | 5,577 | python | zh | code | 1 | github-code | 1 | [
{
"api_name": "win32com.client.Dispatch",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "PIL.ImageTk.PhotoIm... |
22824109486 | r'''
Module with all structures for defining rings with operators.
Let `\sigma: R \rightarrow R` be an additive homomorphism, i.e., for all elements `r,s \in R`,
the map satisfies `\sigma(r+s) = \sigma(r) + \sigma(s)`. We define the *ring* `R` *with operator*
`\sigma` as the pair `(R, \sigma)`.
Similarly, if we have a set of additive maps `\sigma_1,\ldots,\sigma_n : R \rightarrow R`.
Then we define the *ring* `R` *with operators* `(\sigma_1,\ldots,\sigma_n)` as the tuple
`(R, (\sigma_1,\ldots,\sigma_n))`.
This module provides the framework to define this type of rings with as many operators as
the user wants and we also provide a Wrapper class so we can extend existing ring structures that
already exist in `SageMath <https://www.sagemath.org>`_.
The factory :func:`RingWithOperator` allows the creation of these rings with operators and will determine
automatically in which specified category a ring will belong. For example, we can create the differential
ring `(\mathbb{Q}[x], \partial_x)` or the difference ring `(\mathbb{Q}[x], x \mapsto x + 1)` with the
following code::
sage: from dalgebra import *
sage: dQx = RingWithOperators(QQ[x], lambda p : p.derivative())
sage: sQx = RingWithOperators(QQ[x], lambda p : QQ[x](p)(x=QQ[x].gens()[0] + 1))
Once the rings are created, we can create elements within the ring and apply the corresponding operator::
sage: x = dQx(x)
sage: x.operation()
1
sage: x = sQx(x)
sage: x.operation()
x + 1
We can also create the same ring with both operators together::
sage: dsQx = RingWithOperators(QQ[x], lambda p : p.derivative(), lambda p : QQ[x](p)(x=QQ[x].gens()[0] + 1))
sage: x = dsQx(x)
sage: x.operation(operation=0)
1
sage: x.operation(operation=1)
x + 1
However, these operators have no structure by themselves: `SageMath`_ is not able to distinguish the type
of the operators if they are defined using lambda expressions or callables. This can be seen by the fact that
the factory can not detect the equality on two identical rings::
sage: dQx is RingWithOperators(QQ[x], lambda p : p.derivative())
False
To avoid this behavior, we can set the types by providing an optional list called ``types`` whose elements are
strings with values:
* ``homomorphism``: the operator is interpret as a homomorphism/shift/difference operator.
* ``derivation``: the operator is considered as a derivation.
* ``skew``: the operator is considered as a skew-derivation.
* ``none``: the operator will only be considered as an additive Map without further structure.
We can see that, when setting this value, the ring is detected to be equal::
sage: dQx = RingWithOperators(QQ[x], lambda p : p.derivative(), types=["derivation"])
sage: dQx is RingWithOperators(QQ[x], lambda p : p.derivative(), types=["derivation"])
True
sage: # Since we have one variable, the built-in `diff` also work
sage: dQx is RingWithOperators(QQ[x], diff, types=["derivation"])
True
sage: # We can also use elements in the derivation module
sage: dQx is RingWithOperators(QQ[x], QQ[x].derivation_module().gens()[0], types=["derivation"])
True
Also, we can detect this equality when adding operators sequentially instead of at once::
sage: dsQx = RingWithOperators(QQ[x],
....: lambda p : p.derivative(),
....: lambda p : QQ[x](p)(x=QQ[x].gens()[0] + 1),
....: types = ["derivation", "homomorphism"]
....: )
sage: dsQx is RingWithOperators(dQx, lambda p : QQ[x](p)(x=QQ[x].gens()[0] + 1), types=["homomorphism"])
True
For specific types of operators as *derivations* or *homomorphism*, there are other functions where the ``types`` argument can be skipped
taking the corresponding value by default::
sage: dQx is DifferentialRing(QQ[x], lambda p : p.derivative())
True
sage: dsQx is DifferenceRing(DifferentialRing(QQ[x], lambda p : p.derivative()), lambda p : QQ[x](p)(x=QQ[x].gens()[0] + 1))
True
We can also have more complexes structures with different types of operators::
sage: R.<x,y> = QQ[] # x is the usual variable, y is an exponential
sage: dx, dy = R.derivation_module().gens(); d = dx + y*dy
sage: DR = DifferentialRing(R, d)
sage: # We add a special homomorphism where the two generators are squared but QQ is fixed
sage: DSR = DifferenceRing(DR, R.Hom(R)([x^2, y^2]))
sage: DSR.noperators()
2
sage: DSR.operator_types()
('derivation', 'homomorphism')
We can see that these operator **do not commute**::
sage: x = DSR(x); y = DSR(y)
sage: x.difference().derivative()
2*x
sage: x.derivative().difference()
1
sage: y.difference().derivative()
2*y^2
sage: y.derivative().difference()
y^2
Finally, this module also allows the definition of skew-derivations for any ring. This requires the use
of derivation modules with twist (see :sageref:`sage.rings.derivations <rings/sage/rings/derivation>`)::
sage: R.<x,y> = QQ[]
sage: s = R.Hom(R)([x-y, x+y])
sage: td = R.derivation_module(twist=s)(x-y)
sage: tR = RingWithOperators(R, s, td, types=["homomorphism", "skew"])
sage: x,y = tR.gens()
sage: (x*y).skew() == x.skew()*y + x.shift()*y.skew()
True
sage: (x*y).skew() == x.skew()*y.shift() + x*y.skew()
True
AUTHORS:
- Antonio Jimenez-Pastor (:git:`GitHub <Antonio-JP>`)
'''
# ****************************************************************************
# Copyright (C) 2023 Antonio Jimenez-Pastor <ajpa@cs.aau.dk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from __future__ import annotations
from typing import Callable, Collection
from sage.all import ZZ, latex, Parent
from sage.categories.all import Morphism, Category, Rings, CommutativeRings, CommutativeAdditiveGroups
from sage.categories.morphism import IdentityMorphism, SetMorphism # pylint: disable=no-name-in-module
from sage.categories.pushout import ConstructionFunctor, pushout
from sage.misc.all import abstract_method, cached_method
from sage.rings.morphism import RingHomomorphism_im_gens # pylint: disable=no-name-in-module
from sage.rings.ring import Ring, CommutativeRing #pylint: disable=no-name-in-module
from sage.rings.derivation import RingDerivationModule
from sage.structure.element import parent, Element #pylint: disable=no-name-in-module
from sage.structure.factory import UniqueFactory #pylint: disable=no-name-in-module
from sage.symbolic.ring import SR #pylint: disable=no-name-in-module
_Rings = Rings.__classcall__(Rings)
_CommutativeRings = CommutativeRings.__classcall__(CommutativeRings)
_CommutativeAdditiveGroups = CommutativeAdditiveGroups.__classcall__(CommutativeAdditiveGroups)
####################################################################################################
###
### DEFINING THE CATEGORY FOR RINGS WITH OPERATORS
###
####################################################################################################
class RingsWithOperators(Category):
r'''
Category for representing rings with operators.
Let `\sigma: R \rightarrow R` be an additive homomorphism, i.e., for all elements `r,s \in R`,
the map satisfies `\sigma(r+s) = \sigma(r) + \sigma(s)`. We define the *ring* `R` *with operator*
`\sigma` as the pair `(R, \sigma)`.
Similarly, if we have a set of additive maps `\sigma_1,\ldots,\sigma_n : R \rightarrow R`.
Then we define the *ring* `R` *with operators* `(\sigma_1,\ldots,\sigma_n)` as the tuple
`(R, (\sigma_1,\ldots,\sigma_n))`.
This category defines the basic methods for these rings and their elements
'''
## Defining a super-category
def super_categories(self):
return [_Rings]
## Defining methods for the Parent structures of this category
class ParentMethods: #pylint: disable=no-member
##########################################################
### METHODS RELATED WITH THE OPERATORS
##########################################################
### 'generic'
@abstract_method
def operators(self) -> Collection[Morphism]:
r'''
Method to get the collection of operators that are defined over the ring.
These operators are maps from ``self`` to ``self`` that compute the application
of each operator over the elements of ``self``.
'''
raise NotImplementedError("Method 'operators' need to be implemented")
def noperators(self) -> int:
r'''
Method to get the number of operators defined over a ring
'''
return len(self.operators())
def operation(self, element : Element, operator : int = None) -> Element:
r'''
Method to apply an operator over an element.
This method takes an element of ``self`` and applies one of the operators defined over ``self``
over such element. This operator is given by its index, hence raising a :class:`IndexError` if
the index is not in the valid range.
INPUT:
* ``element``: an element over the operator of this ring will be applied.
* ``operator`` (`0` by default) the index of the operator that will be applied.
OUTPUT:
If the index is incorrect, an :class:`IndexError` is raised. Otherwise this method
returns `f(x)` where `x` is the ``element`` and `f` is the operator defined by ``operator``.
EXAMPLES::
sage: from dalgebra import *
sage: dQx = RingWithOperators(QQ[x], lambda p : p.derivative())
sage: sQx = RingWithOperators(QQ[x], lambda p : p(x=QQ[x].gens()[0] + 1))
sage: sdQx = RingWithOperators(QQ[x], lambda p : p(x=QQ[x].gens()[0] + 1), lambda p : p.derivative())
sage: p = QQ[x](x^3 - 3*x^2 + 3*x - 1)
sage: dQx.operation(p)
3*x^2 - 6*x + 3
sage: sQx.operation(p)
x^3
sage: sdQx.operation(p)
Traceback (most recent call last):
...
IndexError: An index for the operation must be provided when having several operations
sage: sdQx.operation(p, 0)
x^3
sage: sdQx.operation(p, 1)
3*x^2 - 6*x + 3
sage: sdQx.operation(p, 2)
Traceback (most recent call last):
...
IndexError: ... index out of range
'''
if operator is None and self.noperators() == 1: operator = 0
elif operator is None: raise IndexError("An index for the operation must be provided when having several operations")
return self.operators()[operator](element)
@abstract_method
def operator_types(self) -> tuple[str]:
r'''
Method to get the types of the operators.
The only condition for `\sigma: R \rightarrow R` to be a valid operator is that it is
an additive homomorphism. However, the behavior of `\sigma` with respect to the multiplication
of `R` categorize `\sigma` into several possibilities:
* "none": no condition is known over this method. This will disallow some extension operations.
* "homomorphism": the map `\sigma` is an homomorphism, i.e., for all `r, s \in R` it satisfies
`\sigma(rs) = \sigma(r)\sigma(s)`.
* "derivative": the map `\sigma` satisfies Leibniz rule, i.e., for all `r, s \in R` it satisfies
`\sigma(rs) = \sigma(r)s + r\sigma(s)`.
* "skew": the map `\sigma` satisfies the skew-Leibniz rule, i.e., there is an homomorphism `\delta`
such for all `r, s \in R` it satisfies `\sigma(rs) = \sigma(r)s + \delta(r)\sigma(s)`.
This method returns a tuple (sorted as the output of :func:`operators`) with the types of each of the
operators.
'''
raise NotImplementedError("Method 'operator_types' need to be implemented")
### 'derivation'
@cached_method
def derivations(self) -> Collection[DerivationMap]:
r'''
Method to filter the derivations out of a ring with operators.
Derivations are a particular type of operators. With this method we
provide a similar interface as with the generic operators but just with
derivation.
Similarly, this class offers access to homomorphisms and skew derivations.
When no derivation is declared for a ring, an empty tuple is returned.
'''
return tuple([operator for (operator, ttype) in zip(self.operators(), self.operator_types()) if ttype == "derivation"])
def nderivations(self) -> int:
r'''
Method to get the number of derivations defined over a ring
'''
return len(self.derivations())
def has_derivations(self) -> bool:
r'''
Method to know if there are derivations defined over the ring.
'''
return self.nderivations() > 0
def is_differential(self) -> bool:
r'''
Method to check whether a ring is differential, i.e, all operators are derivations.
'''
return self.noperators() == self.nderivations()
def derivative(self, element: Element, derivation: int = None) -> Element:
r'''
Method to apply a derivation over an element.
This method applies a derivation over a given element in the same way an operator
is applied by the method :func:`~RingsWithOperators.ParentMethods.operation`.
'''
if derivation is None and self.nderivations() == 1: derivation = 0
elif derivation is None: raise IndexError("An index for the derivation must be provided when having several derivations")
return self.derivations()[derivation](element)
### 'difference'
@cached_method
def differences(self) -> Collection[Morphism]:
r'''
Method to filter the differences out of a ring with operators.
Differences are a particular type of operators. With this method we
provide a similar interface as with the generic operators but just with
difference.
Similarly, this class offers access to derivations and skew derivations.
When no difference is declared for a ring, an empty tuple is returned.
'''
return tuple([operator for (operator, ttype) in zip(self.operators(), self.operator_types()) if ttype == "homomorphism"])
def ndifferences(self) -> int:
r'''
Method to get the number of differences defined over a ring
'''
return len(self.differences())
def has_differences(self) -> bool:
r'''
Method to know if there are differences defined over the ring.
'''
return self.ndifferences() > 0
def is_difference(self) -> bool:
r'''
Method to check whether a ring is difference, i.e, all operators are homomorphisms.
'''
return self.noperators() == self.ndifferences()
def difference(self, element: Element, difference: int = None) -> Element:
r'''
Method to apply a difference over an element.
This method applies a difference over a given element in the same way an operator
is applied by the method :func:`~RingsWithOperators.ParentMethods.operation`.
'''
if difference is None and self.ndifferences() == 1: difference = 0
elif difference is None: raise IndexError("An index for the difference must be provided when having several differences")
return self.differences()[difference](element)
def shift(self, element: Element, shift: int = None) -> Element:
r'''
Alias for :func:`~RingsWithOperators.ParentMethods.difference`.
'''
return self.difference(element, shift)
### 'skews'
@cached_method
def skews(self) -> Collection[Morphism]:
r'''
Method to filter the skew-derivations out of a ring with operators.
Differences are a particular type of operators. With this method we
provide a similar interface as with the generic operators but just with
difference.
Similarly, this class offers access to homomorphisms and derivations.
When no skew-derivation is declared for a ring, an empty tuple is returned.
'''
return tuple([operator for (operator, ttype) in zip(self.operators(), self.operator_types()) if ttype == "skew"])
def nskews(self) -> int:
r'''
Method to get the number of skew-derivations defined over a ring
'''
return len(self.skews())
def has_skews(self) -> bool:
r'''
Method to know if there are skew-derivations defined over the ring.
'''
return self.ndifferences() > 0
def is_skew(self) -> bool:
r'''
Method to check whether a ring is skewed, i.e, all operators are skew-derivations.
'''
return self.noperators() == self.nskews()
def skew(self, element: Element, skew: int = None) -> Element:
r'''
Method to apply a skew-derivation over an element.
This method applies a skew-derivation over a given element in the same way an operator
is applied by the method :func:`~RingsWithOperators.ParentMethods.operation`.
'''
if skew is None and self.nskews() == 1: skew = 0
elif skew is None: raise IndexError("An index for the skew must be provided when having several skews")
return self.skews()[skew](element)
##########################################################
### OTHER METHODS
##########################################################
@abstract_method
def operator_ring(self) -> Ring:
r'''
Method to get the operator ring of ``self``.
When we consider a ring with operators, we can always consider a new (usually non-commutative)
ring where we extend ``self`` polynomially with all the operators and its elements represent
new operators created from the operators defined over ``self``.
This method return this new structure.
'''
raise NotImplementedError("Method 'operator_ring' need to be implemented")
def operators_commute(self, op1: int, op2: int, points: int = 10, *args, **kwds) -> bool:
r'''
Method to check whether two operators of the ring commute.
This method is not deterministic (meaning that it may return ``True`` even
when the two operators do not fully commute) but it tries to check in a fix number
of random elements if the two operators actually commute.
It also try to see if the operators commute in the generators of the ring.
INPUT:
* ``op1``: index of the first operator to check.
* ``op2``: index of the second operator to check.
* ``points``: number of random points to be selected.
* ``args``: arguments to be passed to the ``random_element`` method.
* ``kwds``: arguments to be passed to the ``random_element`` method.
OUTPUT:
``True`` if all the tests indicates the operators commute, ``False`` otherwise.
'''
op1 = self.operators()[op1]; op2 = self.operators()[op2]
to_check = list(self.gens()); current = self.base()
while current.ngens() > 0 and (not 1 in to_check):
to_check.extend([self.element_class(self, el) for el in current.gens()])
current = current.base()
to_check.extend(self.random_element(*args, **kwds) for _ in range(points))
return all(op1(op2(element)) == op2(op1(element)) for element in to_check)
def all_operators_commute(self, points: int = 10, *args, **kwds):
r'''
Method to check whether all operators of the ring commute.
This method is not deterministic (meaning that it may return ``True`` even
when the two operators do not fully commute) but it tries to check in a fix number
of random elements if the two operators actually commute.
It also try to see if the operators commute in the generators of the ring.
See :func:`operators_commute` for further information
INPUT:
* ``points``: number of random points to be selected.
* ``args``: arguments to be passed to the ``random_element`` method.
* ``kwds``: arguments to be passed to the ``random_element`` method.
OUTPUT:
``True`` if all the tests indicates the operators commute, ``False`` otherwise.
EXAMPLES::
sage: from dalgebra import *
sage: R.<x> = QQ[]; d = diff; s = R.Hom(R)(x+1)
sage: dsR = DifferenceRing(DifferentialRing(R, d), s)
sage: dsR.all_operators_commute()
True
sage: R.<x,y> = QQ[]
sage: dx,dy = R.derivation_module().gens(); d = dx + y*dy
sage: s = R.Hom(R)([x + 1, y^2])
sage: dsR = DifferenceRing(DifferentialRing(R, d), s)
sage: dsR.all_operators_commute()
False
'''
return all(
self.operators_commute(i, j, points, *args, **kwds)
for i in range(self.noperators())
for j in range(i+1, self.noperators())
)
@abstract_method
def constant_ring(self, operation: int = 0) -> Parent:
r'''
Method to obtain the constant ring of a given operation.
The meaning of a ring of constants depends on the type of operator that
we are considering:
* "homomorphism": the elements that are fixed by the operator.
* "derivation": the elements that goes to zero with the operator.
* "skew": the elements that goes to zero with the operator.
* "none": it makes no sense to talk about constant for these operators.
'''
raise NotImplementedError("Method 'constant_ring' not implemented")
## Defining methods for the Element structures of this category
class ElementMethods: #pylint: disable=no-member
##########################################################
### APPLICATION METHODS
##########################################################
def operation(self, operation : int = None, times : int = 1) -> Element:
r'''
Apply an operation to ``self`` a given amount of times.
This method applies repeatedly an operation defined in the parent of ``self``.
See :func:`~RingsWithOperators.ParentMethods.operation` for further information.
'''
if(not times in ZZ or times < 0):
raise ValueError("The argument ``times`` must be a non-negative integer")
if(times == 0):
return self
elif(times == 1):
return self.parent().operation(self, operation)
else:
return self.parent().operation(self.operation(operation=operation, times=times-1), operation)
def derivative(self, derivation: int = None, times: int = 1) -> Element:
r'''
Apply a derivation to ``self`` a given amount of times.
This method applies repeatedly a derivation defined in the parent of ``self``.
See :func:`~RingsWithOperators.ParentMethods.derivative` for further information.
'''
if(not times in ZZ or times < 0):
raise ValueError("The argument ``times`` must be a non-negative integer")
if(times == 0):
return self
elif(times == 1):
return self.parent().derivative(self, derivation)
else:
return self.parent().derivative(self.derivative(derivation=derivation, times=times-1), derivation)
def difference(self, difference: int = None, times: int = 1) -> Element:
r'''
Apply a difference to ``self`` a given amount of times.
This method applies repeatedly a difference defined in the parent of ``self``.
See :func:`~RingsWithOperators.ParentMethods.difference` for further information.
'''
if(not times in ZZ or times < 0):
raise ValueError("The argument ``times`` must be a non-negative integer")
if(times == 0):
return self
elif(times == 1):
return self.parent().difference(self, difference)
else:
return self.parent().difference(self.difference(difference=difference, times=times-1), difference)
def shift(self, shift: int = None, times: int = 1) -> Element:
r'''
Alias for :func:`~RingsWithOperators.ElementMethods.difference`.
'''
return self.difference(shift, times)
def skew(self, skew: int = None, times: int = 1) -> Element:
r'''
Apply a skew-derivation to ``self`` a given amount of times.
This method applies repeatedly a difference defined in the parent of ``self``.
See :func:`~RingsWithOperators.ParentMethods.skew` for further information.
'''
if(not times in ZZ or times < 0):
raise ValueError("The argument ``times`` must be a non-negative integer")
if(times == 0):
return self
elif(times == 1):
return self.parent().skew(self, skew)
else:
return self.parent().skew(self.skew(skew=skew, times=times-1), skew)
##########################################################
### BOOLEAN METHODS
##########################################################
def is_constant(self, operation: int = 0):
r'''
Method to check whether an element is a constant with respect to one operator.
INPUT:
* ``operation``: index defining the operation we want to check.
OUTPUT:
A boolean value with ``True`` is the element is a constant (see
:func:`~RingsWithOperators.ParentMethods.constant_ring` for further information
on what is a constant depending on the type of operator).
REMARK: this method do not require the implementation on :func:`~RingsWithOperators.ParentMethods.constant_ring`
on its parent structure.
EXAMPLES::
sage: from dalgebra import *
sage: R = DifferentialRing(QQ[x], diff)
sage: p = R(3)
sage: p.is_constant()
True
sage: p = R(x^3 - 3*x + 1)
sage: p.is_constant()
False
Some interesting constants may arise unexpectedly when adding other derivations::
sage: R.<x,y> = QQ[]
sage: dx, dy = R.derivation_module().gens(); d = y*dx - x*dy
sage: dR = DifferentialRing(R, d)
sage: x,y = dR.gens()
sage: x.is_constant()
False
sage: y.is_constant()
False
sage: (x^2 + y^2).is_constant()
True
'''
ttype = self.parent().operator_types()[operation]
if ttype == "homomorphism":
result = self.operation(operation=operation) == self
elif ttype in ("derivation", "skew"):
result = self.operation(operation=operation) == self.parent().zero()
else:
raise ValueError(f"The operation {operation} has not a good type defined")
return result
# methods that all morphisms involving differential rings must implement
class MorphismMethods:
pass
_RingsWithOperators = RingsWithOperators.__classcall__(RingsWithOperators)
####################################################################################################
###
### DEFINING THE FACTORY FOR THE CREATION OF WRAPPED RINGS
###
####################################################################################################
class RingWithOperatorFactory(UniqueFactory):
r'''
Factory to create wrappers around existing rings.
The :class:`RingsWithOperatorFactory` allows to create wrapper around existing rings
with a predefined set of operators. For doing so, we have two possibilities:
INPUT:
* ``base``: a commutative ring to which we will add operators.
* ``operators``: a list with operators that will be added to ``base``. It may be one of the following:
- An additive callable: a :class:`AdditiveMap` will be created for it.
- An additive homomorphism: a :class:`Morphism` with appropriate domain and codomain.
- A ring homomorphism: a :class:`Morphism` in the appropriate Hom set.
- A (skew)-derivation: an element of a module of (skew)-derivations. The corresponding :class:`SkewMap`
will be created for it.
* ``types`` (optional): if given, it must be a list with the corresponding types of the operators.
We will use this information to create different types of :class:`Morphism`.
SPECIAL CASES:
If this is used over another wrapped ring, this Factory will create an extended version where the
new operators are concatenated to the previous operators.
OUTPUT:
A :class:`RingWithOperators_Wrapper` with the new ring with operators.
'''
def create_key(self, base : CommutativeRing, *operators : Callable, **kwds):
# checking the arguments
if len(operators) < 1:
raise ValueError("At least one operator must be given.")
elif len(operators) == 1 and isinstance(operators[0], Collection):
operators = operators[0]
operators = list(operators)
types = list(kwds.pop("types", len(operators)*["none"]))
if isinstance(base, RingWithOperators_Wrapper):
operators = list(base.construction()[0].operators) + operators
types = list(base.construction()[0].types) + types
base = base.wrapped
# we convert the input into a common standard to create an appropriate key
for (i, (operator, ttype)) in enumerate(zip(operators, types)):
if ttype == "none":
## We decide the structure depending on the type of object
if operator in base.Hom(base): # it is an homomorphism - we do nothing
types[i] = "homomorphism"
new_operator = operator
elif isinstance(parent(operator), RingDerivationModule):
if operator.parent().twisting_morphism() is None: # derivation without twist
new_operator = DerivationMap(
base,
operator
)
types[i] = "derivation"
else:
new_operator = SkewMap(
base,
operator.parent().twisting_morphism(),
operator
)
types[i] = "skew"
elif isinstance(operator, Callable):
new_operator = AdditiveMap(
base,
operator
)
else:
raise TypeError(f"All operators must be callables. Found {operator}")
elif ttype == "homomorphism":
def hom_from_callable(base, func):
if base.ngens() > 0 and (not 1 in base.gens()):
base_map = hom_from_callable(base.base(), func)
else:
base_map = None
hom_set = base.Hom(base)
return hom_set([base(func(gen)) for gen in base.gens()], base_map = base_map)
new_operator = hom_from_callable(base, operator)
elif ttype == "derivation":
der_module = base.derivation_module()
new_operator = DerivationMap(
base,
sum((operator(base_gen)*der_gen for (base_gen,der_gen) in zip(base.gens(),der_module.gens())), der_module.zero())
)
elif ttype == "skew":
if not isinstance(parent(operator), RingDerivationModule):
raise NotImplementedError("Building skew-derivation from callable not implemented")
twist = operator.parent().twisting_morphism()
twist = base.Hom(base).one() if twist is None else twist
new_operator = SkewMap(base, twist, operator)
if new_operator != operator:
operators[i] = new_operator
return tuple([base, tuple(operators), tuple(types)])
def create_object(self, _, key):
base, operators, types = key
return RingWithOperators_Wrapper(base, *operators, types=types)
RingWithOperators = RingWithOperatorFactory("dalgebra.ring_w_operator.ring_w_operator.RingWithOperator")
def DifferentialRing(base : CommutativeRing, *operators : Callable):
r'''
Method that calls the :class:`RingWithOperatorFactory` with types always as "derivation".
See documentation on :class:`RingWithOperatorFactory` for further information.
'''
# checking the arguments
if len(operators) < 1:
raise ValueError("At least one operator must be given.")
elif len(operators) == 1 and isinstance(operators[0], Collection):
operators = operators[0]
return RingWithOperators(base, *operators, types=len(operators)*["derivation"])
def DifferenceRing(base: CommutativeRing, *operators : Callable):
r'''
Method that calls the :class:`RingWithOperatorFactory` with types always as "homomorphism".
See documentation on :class:`RingWithOperatorFactory` for further information.
'''
# checking the arguments
if len(operators) < 1:
raise ValueError("At least one operator must be given.")
elif len(operators) == 1 and isinstance(operators[0], Collection):
operators = operators[0]
return RingWithOperators(base, *operators, types=len(operators)*["homomorphism"])
####################################################################################################
###
### DEFINING THE ELEMENT AND PARENT FOR WRAPPED RINGS
###
####################################################################################################
class RingWithOperators_WrapperElement(Element):
def __init__(self, parent, element):
if(not isinstance(parent, RingWithOperators_Wrapper)):
raise TypeError("An element created from a non-wrapper parent")
elif(not element in parent.wrapped):
raise TypeError("An element outside the parent [%s] is requested" %parent)
Element.__init__(self, parent=parent)
self.wrapped = element
# Arithmetic methods
def _add_(self, x) -> RingWithOperators_WrapperElement:
if parent(x) != self.parent(): # this should not happened
x = self.parent().element_class(self.parent(), self.parent().base()(x))
return self.parent().element_class(self.parent(), self.wrapped + x.wrapped)
def _sub_(self, x) -> RingWithOperators_WrapperElement:
if parent(x) != self.parent(): # this should not happened
x = self.parent().element_class(self.parent(), self.parent().base()(x))
return self.parent().element_class(self.parent(), self.wrapped - x.wrapped)
def _neg_(self) -> RingWithOperators_WrapperElement:
return self.parent().element_class(self.parent(), -self.wrapped)
def _mul_(self, x) -> RingWithOperators_WrapperElement:
if parent(x) != self.parent(): # this should not happened
x = self.parent().element_class(self.parent(), self.parent().base()(x))
return self.parent().element_class(self.parent(), self.wrapped * x.wrapped)
def _rmul_(self, x) -> RingWithOperators_WrapperElement:
if parent(x) != self.parent(): # this should not happened
x = self.parent().element_class(self.parent(), self.parent().base()(x))
return self.parent().element_class(self.parent(), self.wrapped * x.wrapped)
def _lmul_(self, x) -> RingWithOperators_WrapperElement:
if parent(x) != self.parent(): # this should not happened
x = self.parent().element_class(self.parent(), self.parent().base()(x))
return self.parent().element_class(self.parent(), self.wrapped * x.wrapped)
def __pow__(self, n) -> RingWithOperators_WrapperElement:
return self.parent().element_class(self.parent(), self.wrapped ** n)
def __eq__(self, x) -> bool:
if x is None: return False
r = pushout(self.parent(), parent(x))
if isinstance(r, RingWithOperators_Wrapper):
return self.wrapped == r(x).wrapped
return r(self) == r(x)
def is_zero(self) -> bool:
return self.wrapped == 0
def is_one(self) -> bool:
return self.wrapped == 1
## Other magic methods
def __hash__(self) -> int:
return hash(self.wrapped)
def __str__(self) -> str:
return str(self.wrapped)
def __repr__(self) -> str:
return repr(self.wrapped)
def _latex_(self) -> str:
return latex(self.wrapped)
class RingWithOperators_Wrapper(CommutativeRing):
r'''
Class for wrapping a Commutative ring and add operators over it.
This class allows the user to translate a Commutative ring with some operations to
the category of :class:`RingsWithOperators` preserving as many operations and properties
of the original ring as possible, but adding the new functionality in the category.
We do not recommend to use this class by itself. It should be created using the
corresponding factory (see :class:`RingWithOperatorFactory` and its defined instance in
``dalgebra.ring_w_operator.ring_w_operator.RingWithOperators``).
INPUT:
* ``base``: the :class:`CommutativeRing` that will be wrapped.
* ``operators``: a valid :class:`sage.categories.map.Map` to define an operator over ``self``.
* ``types`` (optional): a list with the types (see :func:`RingsWithOperators.ParentMethods.operator_types`
for further information). If nothing is given, the list will be automatically computed.
* ``category`` (optional): argument from the category framework to allow further flexibility.
'''
Element = RingWithOperators_WrapperElement
def __init__(self,
base : CommutativeRing,
*operators : Morphism | Collection[Morphism],
types : Collection[str] = None,
category = None
):
#########################################################################################################
### CHECKING THE ARGUMENTS
### 'base'
if not base in _CommutativeRings:
raise TypeError("Only commutative rings can be wrapped as RingWithOperators")
### 'operators'
if len(operators) == 1 and isinstance(operators[0], (list,tuple)):
operators = operators[0]
if any(not isinstance(operator, Morphism) for operator in operators):
raise TypeError("All the given operators must be Maps")
if any(operator.domain() != operator.codomain() or operator.domain() != base for operator in operators):
raise TypeError("The operators must bu maps from and to the commutative ring given by 'base'")
### 'types'
if types is None: # we compute the types using the maps
types = []
for operator in operators:
if isinstance(operator, DerivationMap): types.append("derivation")
elif isinstance(operator, SkewMap): types.append("skew")
elif operator.category_for().is_subcategory(_CommutativeRings): types.append("homomorphism")
else: types.append("none")
else: # we check the operators behave as requested
if not isinstance(types, (list, tuple)) or len(types) != len(operators):
raise TypeError("The types must be a list of the same length of the operators")
for operator, ttype in zip(operators, types):
if ttype == "none":
if not operator.category_for().is_subcategory(_CommutativeAdditiveGroups):
raise ValueError(f"An operator invalid for type 'none' -> {operator}")
elif ttype == "homomorphism":
if not operator.category_for().is_subcategory(_CommutativeRings):
raise ValueError(f"An operator invalid for type 'homomorphism' -> {operator}")
elif ttype == "derivation":
if not isinstance(operator, DerivationMap):
raise ValueError(f"An operator invalid for type 'derivation' -> {operator}")
elif ttype == "skew":
if not isinstance(operator, SkewMap):
raise ValueError(f"An operator invalid for type 'skew' -> {operator}")
else:
raise ValueError(f"Invalid type provided -> {ttype}")
self.__types = tuple(types)
#########################################################################################################
# CREATING CATEGORIES
categories = [_RingsWithOperators, base.category()]
if(isinstance(category, (list, tuple))):
categories += list(category)
elif(category != None):
categories.append(category)
#########################################################################################################
### CALLING THE SUPER AND ARRANGING SOME CONVERSIONS
super().__init__(base.base(), category=tuple(categories))
self.__wrapped = base
# registering conversion to simpler structures
current = self.base()
morph = RingWithOperators_Wrapper_SimpleMorphism(self, current)
current.register_conversion(morph)
while(not(current.base() == current)):
current = current.base()
morph = RingWithOperators_Wrapper_SimpleMorphism(self, current)
current.register_conversion(morph)
#########################################################################################################
### CREATING THE NEW OPERATORS FOR THE CORRECT STRUCTURE
self.__operators : tuple[WrappedMap] = tuple([WrappedMap(self, operator) for operator in operators])
@property
def wrapped(self) -> CommutativeRing: return self.__wrapped
def operators(self) -> tuple[WrappedMap]: return self.__operators
def operator_types(self) -> tuple[str]: return self.__types
## Coercion methods
def _has_coerce_map_from(self, S) -> bool:
r'''
Return ``True`` if it is possible to have a coercion map from `S` to ``self``.
'''
if isinstance(S, RingWithOperators_Wrapper):
return self.wrapped._has_coerce_map_from(S.wrapped) # the operators do not matter for coercing elements
else:
return self.wrapped._has_coerce_map_from(S)
def _element_constructor_(self, x) -> RingWithOperators_WrapperElement:
r'''
Extended definition of :func:`_element_constructor_`.
'''
if x in SR:
# conversion from symbolic ring --> using its string representation
x = str(x)
elif isinstance(parent(x), RingWithOperators_Wrapper):
# conversion from other wrapped rings with operators --> we convert the element within
x = x.wrapped
p = self.wrapped._element_constructor_(x)
return self.element_class(self, p)
def _is_valid_homomorphism_(self, codomain, im_gens, base_map=None) -> bool:
return self.wrapped._is_valid_homomorphism_(codomain, im_gens, base_map)
def construction(self) -> RingWithOperatorsFunctor:
return RingWithOperatorsFunctor([operator.function for operator in self.operators()], self.operator_types()), self.wrapped
# Rings methods
def characteristic(self) -> int:
return self.wrapped.characteristic()
def gens(self) -> tuple[RingWithOperators_WrapperElement]:
return tuple([self.element_class(self, gen) for gen in self.wrapped.gens()])
def ngens(self) -> int:
return self.wrapped.ngens()
## Representation methods
def __repr__(self) -> str:
begin = "Differential " if self.is_differential() else "Difference " if self.is_difference() else ""
return f"{begin}Ring [[{self.wrapped}], {repr(self.operators())}]"
def __str__(self) -> str:
return repr(self)
def _latex_(self) -> str:
return "".join((
r"\left(",
latex(self.wrapped),
", ",
latex(self.operators()) if self.noperators() > 1 else latex(self.operators()[0]),
r"\right)"
))
## Element generation
def one(self) -> RingWithOperators_WrapperElement:
r'''
Return the one element in ``self``.
EXAMPLES::
sage: from dalgebra import *
sage: R = RingWithOperators(QQ['x'], diff)
sage: R.one()
1
'''
return self.element_class(self, self.wrapped.one())
def zero(self) -> RingWithOperators_WrapperElement:
r'''
Return the zero element in ``self``.
EXAMPLES::
sage: from dalgebra import *
sage: R = RingWithOperators(QQ['x'], diff)
sage: R.zero()
0
'''
return self.element_class(self, self.wrapped.zero())
def random_element(self,*args,**kwds) -> RingWithOperators_WrapperElement:
r'''
Creates a random element in this ring.
This method creates a random element in the base infinite polynomial ring and
cast it into an element of ``self``.
'''
p = self.wrapped.random_element(*args,**kwds)
return self.element_class(self, p)
####################################################################################################
###
### DEFINING THE CONSTRUCTION FUNCTOR AND SIMPLE MORPHISM
###
####################################################################################################
class RingWithOperatorsFunctor(ConstructionFunctor):
def __init__(self, operators: Collection[Morphism], types: Collection[str]):
if len(operators) != len(types):
raise ValueError("The length of the operators and types must coincide.")
self.__operators = tuple(operators)
self.__types = tuple(types)
super().__init__(_CommutativeRings, _RingsWithOperators)
### Methods to implement
def _coerce_into_domain(self, x: Element) -> Element:
if not x in self.domain():
raise TypeError(f"The object [{x}] is not an element of [{self.domain()}]")
def _apply_functor(self, x):
return RingWithOperators(x, self.__operators, self.__types)
def _repr_(self):
return f"RingWithOperators(*,{self.__operators}])"
def __eq__(self, other):
return self.__class__ == other.__class__ and self.__operators == other.__operators and self.__types == other.__types
def merge(self, other):
if isinstance(other, RingWithOperatorsFunctor):
return RingWithOperatorsFunctor(self.__operators + other.__operators, self.__types + other.__types)
else:
raise NotImplementedError(f"{self} can only be merged with other RingWithOperatorsFunctor")
@property
def operators(self) -> Collection[Morphism]: return self.__operators
@property
def types(self): return self.__types
class RingWithOperators_Wrapper_SimpleMorphism(Morphism):
r'''
Class representing maps to simpler rings.
This map allows the coercion system to detect that some elements in a
:class:`RingWithOperator_Wrapper` are included in simpler rings.
'''
def __init__(self, domain, codomain):
super().__init__(domain, codomain)
def _call_(self, p):
return self.codomain()(p.wrapped)
####################################################################################################
###
### DEFINING THE REQUIRED MAPS FOR THIS MODULE
###
####################################################################################################
class AdditiveMap(SetMorphism):
def __init__(self, domain : Parent, function : Callable):
# We create the appropriate Hom set
hom = domain.Hom(domain, category=_CommutativeAdditiveGroups)
self.function = function
super().__init__(hom, function)
def __str__(self) -> str:
return f"Additive Map [{repr(self)}]\n\t- From: {self.domain()}\n\t- To : {self.codomain()}"
def __repr__(self) -> str:
return f"{repr(self.function)}"
def _latex_(self) -> str:
return latex(self.function)
def __eq__(self, other) -> bool:
return isinstance(other, AdditiveMap) and self.domain() == other.domain() and self.function == other.function
def __hash__(self) -> int:
return self.function.__hash__()
class SkewMap(AdditiveMap):
def __init__(self, domain : Parent, twist : Morphism, function : Callable):
# we check the input
if not twist in domain.Hom(domain):
raise TypeError("The twist for a skew derivation must be an homomorphism.")
tw_der_module = domain.derivation_module(twist=twist)
if not function in tw_der_module:
raise TypeError("The function for a skew derivation must be in the corresponding module")
self.twist = twist
super().__init__(domain, function)
def __str__(self) -> str:
return f"Skew Derivation [{repr(self)}] over (({self.domain()}))"
class DerivationMap(SkewMap):
def __init__(self, domain, function : Callable):
super().__init__(domain, domain.Hom(domain).one(), function)
def __str__(self) -> str:
return f"Derivation [{repr(self)}] over (({self.domain()}))"
class WrappedMap(AdditiveMap):
def __init__(self, domain : RingWithOperators_Wrapper, function : Morphism):
if not isinstance(domain, RingWithOperators_Wrapper):
raise TypeError("A WrappedMap can only be created for a 'RingWithOperators_Wrapper'")
if function.domain() != domain.wrapped:
raise ValueError(f"The map to be wrapped must have appropriate domain: ({domain.wrapped}) instead of ({function.domain()})")
super().__init__(domain, lambda p : domain(function(domain(p).wrapped)))
self.function = function
def __repr__(self) -> str:
if isinstance(self.function, RingHomomorphism_im_gens):
im_gens = {v: im for (v,im) in zip(self.function.domain().gens(), self.function.im_gens())}
return f"Hom({im_gens})"
elif isinstance(self.function, IdentityMorphism):
return "Id"
else:
return super().__repr__()
def __str__(self) -> str:
return f"Wrapped [{repr(self)}] over (({self.domain()}))"
def _latex_(self) -> str:
if isinstance(self.function, RingHomomorphism_im_gens):
im_gens = {v: im for (v,im) in zip(self.function.domain().gens(), self.function.im_gens())}
return r"\sigma\left(" + r", ".join(f"{latex(v)} \\mapsto {latex(im)}" for (v,im) in im_gens.items()) + r"\right)"
elif isinstance(self.function, IdentityMorphism):
return r"\text{id}"
return super()._latex_()
__all__ = ["RingsWithOperators", "RingWithOperators", "DifferentialRing", "DifferenceRing"] | Antonio-JP/dalgebra | dalgebra/ring_w_operator.py | ring_w_operator.py | py | 54,589 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "sage.categories.all.Rings.__classcall__",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "sage.categories.all.Rings",
"line_number": 155,
"usage_type": "argument"
},
{
"api_name": "sage.categories.all.CommutativeRings.__classcall__",
"line_number": 1... |
23787043311 | import torch as th
import pandas as pd
import numpy as np
import dgl
from bipartite_graph import BipartiteGraph
#######################
# user-item Subgraph Extraction
#######################
def map_newid(df, col):
old_ids = df[col]
old_id_uniq = old_ids.unique()
id_dict = {old: new for new, old in enumerate(sorted(old_id_uniq))}
new_ids = np.array([id_dict[x] for x in old_ids])
return new_ids
def one_hot(idx, length):
x = th.zeros([len(idx), length], dtype=th.int32)
x[th.arange(len(idx)), idx] = 1.0
return x
def get_neighbor_nodes_labels(u_node_idx, i_node_idx, graph,
hop=1, sample_ratio=1.0, max_nodes_per_hop=200):
# 1. neighbor nodes sampling
dist = 0
u_nodes, i_nodes = th.unsqueeze(u_node_idx, 0), th.unsqueeze(i_node_idx, 0)
u_dist, i_dist = th.tensor([0], dtype=th.long), th.tensor([0], dtype=th.long)
u_visited, i_visited = th.unique(u_nodes), th.unique(i_nodes)
u_fringe, i_fringe = th.unique(u_nodes), th.unique(i_nodes)
for dist in range(1, hop+1):
# sample neigh alternately
# diff from original code : only use one-way edge (u-->i)
u_fringe, i_fringe = graph.in_edges(i_fringe)[0], graph.out_edges(u_fringe)[1]
u_fringe = th.from_numpy(np.setdiff1d(u_fringe.numpy(), u_visited.numpy()))
i_fringe = th.from_numpy(np.setdiff1d(i_fringe.numpy(), i_visited.numpy()))
u_visited = th.unique(th.cat([u_visited, u_fringe]))
i_visited = th.unique(th.cat([i_visited, i_fringe]))
if sample_ratio < 1.0:
shuffled_idx = th.randperm(len(u_fringe))
u_fringe = u_fringe[shuffled_idx[:int(sample_ratio*len(u_fringe))]]
shuffled_idx = th.randperm(len(i_fringe))
i_fringe = i_fringe[shuffled_idx[:int(sample_ratio*len(i_fringe))]]
if max_nodes_per_hop is not None:
if max_nodes_per_hop < len(u_fringe):
shuffled_idx = th.randperm(len(u_fringe))
u_fringe = u_fringe[shuffled_idx[:max_nodes_per_hop]]
if max_nodes_per_hop < len(i_fringe):
shuffled_idx = th.randperm(len(i_fringe))
i_fringe = i_fringe[shuffled_idx[:max_nodes_per_hop]]
if len(u_fringe) == 0 and len(i_fringe) == 0:
break
u_nodes = th.cat([u_nodes, u_fringe])
i_nodes = th.cat([i_nodes, i_fringe])
u_dist = th.cat([u_dist, th.full((len(u_fringe), ), dist,)])
i_dist = th.cat([i_dist, th.full((len(i_fringe), ), dist,)])
nodes = th.cat([u_nodes, i_nodes])
# 2. node labeling
# this labeling is based on hop from starting nodes
u_node_labels = th.stack([x*2 for x in u_dist])
v_node_labels = th.stack([x*2+1 for x in i_dist])
node_labels = th.cat([u_node_labels, v_node_labels])
return nodes, node_labels
def subgraph_extraction_labeling(u_node_idx, i_node_idx, graph,
hop=1, sample_ratio=1.0, max_nodes_per_hop=200,):
# extract the h-hop enclosing subgraph nodes around link 'ind'
nodes, node_labels = get_neighbor_nodes_labels(u_node_idx=u_node_idx, i_node_idx=i_node_idx, graph=graph,
hop=hop, sample_ratio=sample_ratio, max_nodes_per_hop=max_nodes_per_hop)
subgraph = dgl.node_subgraph(graph, nodes, store_ids=True)
subgraph.ndata['nlabel'] = one_hot(node_labels, (hop+1)*2)
subgraph.ndata['x'] = subgraph.ndata['nlabel']
# set edge mask to zero as to remove links between target nodes in training process
subgraph.edata['edge_mask'] = th.ones(subgraph.number_of_edges(), dtype=th.int32)
su = subgraph.nodes()[subgraph.ndata[dgl.NID]==u_node_idx]
si = subgraph.nodes()[subgraph.ndata[dgl.NID]==i_node_idx]
_, _, target_edges = subgraph.edge_ids([su, si], [si, su], return_uv=True)
subgraph.edata['edge_mask'][target_edges.to(th.long)] = 0
# mask target edge label
subgraph.edata['label'][target_edges.to(th.long)] = 0.0
# timestamp normalization
# compute ts diff from target edge & min-max normalization
n = subgraph.edata['ts'].shape[0]
timestamps = subgraph.edata['ts'][:n//2]
standard_ts = timestamps[target_edges.to(th.long)[0]]
timestamps = th.abs(timestamps - standard_ts.item())
timestamps = 1 - (timestamps - th.min(timestamps)) / (th.max(timestamps)-th.min(timestamps) + 1e-5)
subgraph.edata['ts'] = th.cat([timestamps, timestamps], dim=0) + 1e-5
return subgraph
#######################
# Ego-graph Extraction
#######################
def get_egograph_neighbor(center_node_idx:int, graph:dgl.DGLGraph,
hop=1, max_nodes_per_hop=20):
# 1. neighbor nodes sampling
node_dist = th.tensor([0], dtype=th.long)
visited_nodes = th.tensor([center_node_idx], dtype=th.long)
nodes = th.tensor([center_node_idx], dtype=th.long)
fringe = th.tensor([center_node_idx], dtype=th.long)
for dist in range(1, hop+1):
fringe = graph.in_edges(fringe)[0]
fringe = th.from_numpy(np.setdiff1d(fringe.numpy(), visited_nodes.numpy()))
visited_nodes = th.unique(th.cat([visited_nodes, fringe]))
if max_nodes_per_hop < len(fringe):
shuffled_idx = th.randperm(len(fringe))
fringe = fringe[shuffled_idx[:max_nodes_per_hop]]
if len(fringe) == 0 :
break
nodes = th.cat([nodes, fringe])
node_dist = th.cat([node_dist, th.full((len(fringe),), dist,)])
# 2. node labeling
# this labeling is based on hop from starting nodes
node_labels = th.stack([x for x in node_dist])
return nodes, node_labels
def egograph_extraction(node_idx, graph,
hop=1, max_nodes_per_hop=10,):
# extract the h-hop enclosing subgraph nodes around link 'ind'
nodes, node_labels = get_egograph_neighbor(center_node_idx=node_idx, graph=graph,
hop=hop, max_nodes_per_hop=max_nodes_per_hop)
subgraph = dgl.node_subgraph(graph, nodes, store_ids=True)
subgraph.ndata['nlabel'] = one_hot(node_labels, hop+1)
subgraph.ndata['x'] = subgraph.ndata['nlabel']
return subgraph
#######################
# Ego graph Dataset
#######################
class EgoGraphDataset(th.utils.data.Dataset):
def __init__(self, graph,
hop=2, max_nodes_per_hop=5):
self.nodes = graph.nodes()
self.graph = graph
self.hop = hop
self.max_nodes_per_hop = max_nodes_per_hop
def __len__(self):
return len(self.nodes)
def __getitem__(self, idx):
node_idx = self.nodes[idx]
ego_graph = egograph_extraction(node_idx, self.graph, hop=self.hop, max_nodes_per_hop=self.max_nodes_per_hop)
return ego_graph
def collate_data(data):
g_list = data
g = dgl.batch(g_list)
return g
""" Dataset for classifier"""
class AmazonDataset(th.utils.data.Dataset):
def __init__(self, df:pd.DataFrame, embeds, start=0, end=-1):
df = df.reset_index()
self.labels = df['rating'] #pre processed 0~4
self.users = df['user_id']
self.items = df['item_id'] + max(self.users) + 1
self.embeds = embeds[start:]
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
uid = self.users[idx]
iid = self.items[idx]
u_emb = self.embeds[uid]
i_emb = self.embeds[iid]
label = self.labels[idx]
return uid, iid, u_emb, i_emb, label
| venzino-han/graph-transfer | dataset.py | dataset.py | py | 7,605 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.int32",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "torch.arange",
"line_num... |
8877029762 | """
FILE: kernelregression.py
LAST MODIFIED: 24-12-2015
DESCRIPTION: Module for Gaussian kernel regression
===============================================================================
This file is part of GIAS2. (https://bitbucket.org/jangle/gias2)
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
===============================================================================
"""
import logging
import numpy as np
log = logging.getLogger(__name__)
def _gaussianKernel(T, t, s):
c1 = 1.0 / (s * np.sqrt(2 * np.pi))
c2 = np.exp(-(T - t) ** 2.0 / (2 * s * s))
return c1 * c2
def _gaussianKernelMax(s):
return 1.0 / (s * np.sqrt(2 * np.pi))
def _weightedMean(x, w):
# print x
# print x.shape
# print w.shape
return (x * w).sum(-1) / w.sum()
def _weightedSD(x, w):
u = _weightedMean(x, w)
d = ((x.T - u).T) ** 2.0
return np.sqrt((d * w).sum(-1) / w.sum())
class KernelRegressor(object):
sigmaeps = 1e-6
def __init__(self, k=2, sigma0=1.0, wmin=0.35):
self.k = k
self.sigma0 = sigma0
self.wmin = wmin # kernel weight cutoff
self.nTarg = None # target number of obs at each time point
self.sigmas = None
self.n = None
self.y = None
self.ytmeans = None
self.ytSDs = None
self.ytweights = None
self.ytinds = None
self.x = None
self.xsamples = None
self.xt = None
self.xtn = None
self.xtnTarg = None
self.xmin = None
self.xmax = None
def fit(self, x, y, xmin, xmax, xsamples):
"""
x is a 1-d array, the independent variable e.g. time
if y is multivariate, each observation should be a column, so variables
in rows.
"""
self.x = x
self.y = y
self.xmin = xmin
self.xmax = xmax
self.sigmaeps = 1e-3 * (xmax - xmin)
self.xsamples = xsamples
self.xt = np.linspace(self.xmin, self.xmax, self.xsamples)
self._fitInit()
self._optWidths()
return self.ytmeans
def _fitInit(self):
# initialise kernel width at each time point
self.sigmas = np.ones(self.xsamples) * self.sigma0
# calc number of obs at each y sampling (yt[i])
self.xtn = []
self.ytmeans = []
self.ytweights = []
self.ytSDs = []
self.ytinds = []
for t, s in zip(self.xt, self.sigmas):
ty, tw, tyi = self._getKernelY(t, s)
self.xtn.append(len(tw))
self.ytweights.append(tw)
self.ytinds.append(tyi)
self.ytmeans.append(_weightedMean(ty, tw))
self.ytSDs.append(_weightedSD(ty, tw))
self.ytmeans = np.array(self.ytmeans)
# calculate target number obs per time point
self.xtnTarg = np.median(self.xtn)
def _optWidths(self):
for i in range(self.xsamples):
tyi = None
change = 0
if self.xtn[i] > (self.xtnTarg + self.k):
while self.xtn[i] > (self.xtnTarg + self.k):
self.sigmas[i] -= self.sigmaeps
ty, tw, tyi = self._getKernelY(self.xt[i], self.sigmas[i])
self.xtn[i] = len(tw)
change = 1
if self.xtn[i] < (self.xtnTarg - self.k):
while self.xtn[i] < (self.xtnTarg - self.k):
self.sigmas[i] += self.sigmaeps
ty, tw, tyi = self._getKernelY(self.xt[i], self.sigmas[i])
self.xtn[i] = len(tw)
change = 1
# calculated weighted mean
# if tyi is not None:
# print self.xt[i], tyi
# else:
# print self.xt[i], 'no opt'
if change:
self.ytmeans[i] = _weightedMean(ty, tw)
self.ytSDs[i] = _weightedSD(ty, tw)
self.ytweights[i] = tw
self.ytinds[i] = tyi
def _getKernelY(self, t, s):
xw = _gaussianKernel(self.x, t, s)
validMask = xw >= (self.wmin * _gaussianKernelMax(s))
validW = xw[validMask]
validY = self.y[:, validMask]
return validY, validW, np.where(validMask)[0]
def test():
ndata = 500
x = 3.0 * np.random.rand(ndata)
y = np.sin(x) + 0.1 * (np.random.rand(ndata) - 0.5)
r = KernelRegressor(k=2, sigma0=0.05, wmin=0.35)
r.fit(x, y, 0.1, 2.9, 10)
log.debug('xt sigma n y_mean')
for xt, s, n, ym in zip(r.xt, r.sigmas, r.xtn, r.ytmeans):
log.debug(('{:4.3f} {:4.3f} {:4d} {:4.3f}').format(xt, s, n, ym))
import matplotlib.pyplot as plt
f = plt.figure()
plt.scatter(x, y)
plt.plot(r.xt, r.ytmeans, 'r')
plt.plot(r.xt, r.ytmeans + r.ytSDs, 'g--')
plt.plot(r.xt, r.ytmeans - r.ytSDs, 'g--')
plt.show()
if __name__ == '__main__':
test()
| musculoskeletal/gias2 | src/gias2/learning/kernelregression.py | kernelregression.py | py | 5,059 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "numpy.exp",
"line_numb... |
72689147554 | #coding: utf-8
__author__ = "Lário dos Santos Diniz"
from django.contrib import admin
from .models import (RPGSystem)
class RPGSystemAdmin(admin.ModelAdmin):
list_display = ['name', 'description', 'site']
search_fields = ['name', 'description', 'site']
admin.site.register(RPGSystem, RPGSystemAdmin) | lariodiniz/minhaMesaRPG | api/admin.py | admin.py | py | 314 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 14,
"usage_type": "call"
},... |
44370675825 | # Import required libraries
import pandas as pd
from sqlalchemy import create_engine
# Load data from source into a Pandas dataframe
df = pd.read_csv('source_data.csv')
# Perform data transformation and cleaning
df = df.dropna()
df['column_name'] = df['column_name'].str.upper()
# Load data into a SQL database
engine = create_engine('sqlite:///data.db')
df.to_sql('table_name', engine, if_exists='replace')
# Perform data reconciliation
query = '''
SELECT COUNT(*)
FROM table_name
'''
count = pd.read_sql_query(query, engine)['COUNT(*)'][0]
# Log the reconciliation result
if count == df.shape[0]:
print('Data integration successful')
else:
print('Data integration failed')
# This is just a basic example to illustrate the steps involved in a data integration workflow.
# ou can add more complex transformations, data quality checks, and error handling to suit your specific requirements. | Kripadhn/DataIntegration | DI-Alogorithms/Data Integration/DataIntegration.py | DataIntegration.py | py | 906 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql_query",
"line_number": 21,
"usage_type": "call"
}
] |
71835741475 | import random
import sys
import numpy
import torch
import pygad
import pygad.torchga
from nn import create_ga, create_network
import math
class Gym:
def __init__(self, w, h, left_ai, right_ai):
self.turn_i = 0
self.w = w
self.h = h
self.left_ai = left_ai
self.right_ai = right_ai
self.left_units = []
self.right_units = []
def prepare(self):
i = 0
for unit in self.left_units:
unit.set_position(0, i)
unit.side = True
i += random.randint(1, 2)
i = 0
for unit in self.right_units:
unit.set_position(self.w - 1, i)
unit.side = False
i += random.randint(1, 2)
def turn(self, represent, dprint = True):
# for now - left to right foreach
states = []
if len(self.left_units) == 0:
self.left_ai.score -= 100
self.right_ai.score += 100
elif len(self.right_units) == 0:
self.left_ai.score += 100
self.right_ai.score -= 100
for unit in self.left_units:
self.action(
self.left_ai,
unit,
self.left_ai.decide(unit,
self)
)
if represent:
states.append(self.represent(dprint))
for unit in self.right_units:
self.action(
self.right_ai,
unit,
self.right_ai.decide(unit,
self)
)
if represent:
states.append(self.represent(dprint))
self.turn_i += 1
return states
def action(self, ai, unit, act):
act = numpy.array(act)
pos = numpy.array([unit.x, unit.y])
new_pos = (act - pos)
x_diff = round(new_pos[0])
y_diff = round(new_pos[1])
new_x = unit.x + x_diff
new_y = unit.y + y_diff
dist = math.sqrt(x_diff ** 2 + y_diff ** 2)
if self.is_out_of_bounds(new_x, new_y) or dist > unit.speed:
ai.score -= 1
elif self.is_occupied(new_x, new_y) and self.get_unit_at(new_y, new_y) != unit:
ai.score -= 1
else:
unit.set_position(new_x, new_y)
e = self.get_adjacent_enemy(new_x, new_y, unit.side)
if e is not None:
dmg = unit.damage * unit.count
fh = (e.count - 1) * e.max_health + e.health
fh -= dmg
new_c = fh // e.max_health
new_h = fh % e.max_health
e.count = new_c
e.health = new_h
if new_c <= 0:
[self.right_units, self.left_units][e.side].remove(e)
ai.score += 50
ai.score += dmg
opp_ai = self.right_ai if self.left_ai == ai else self.left_ai
opp_ai.score -= dmg
# here we check if enemy adjacent
def get_adjacent_enemy(self, x, y, side):
adj_mat = [
[-1, 0], # L
[-1, -1], # TL
[0, -1], # T
[1, -1], # TR
[1, 0], # R
[1, 1], # BR
[0, 1], # B
[-1, 1], # BL
]
for adj in adj_mat:
pos = numpy.array([x, y]) + adj
if self.is_out_of_bounds(*pos):
continue
if self.is_occupied(*pos):
u = self.get_unit_at(*pos)
if u.side != side:
return u
return None
def is_out_of_bounds(self, x, y):
return x >= self.w or y >= self.h or x < 0 or y < 0
def is_occupied(self, x, y):
return self.get_unit_at(x, y) is not None
def get_unit_at(self, x, y):
for unit in self.left_units + self.right_units:
if unit.x == x and unit.y == y:
return unit
return None
def represent(self, dprint = True):
table = []
for x in range(self.w):
arr = []
table.append(arr)
for y in range(self.h):
arr.append('-')
# default, 1st unit, 2nd unit in team
colors = ['\033[91m', '\033[94m', '\033[92m', '\033[93m', '\033[95m', '\033[96m']
i = 1
for unit in self.left_units:
table[unit.y][unit.x] = colors[i] + '▣' + colors[0]
i += 1
i = 1
for unit in self.right_units:
table[unit.y][unit.x] = colors[i] + '△' + colors[0]
i += 1
t = ''
if dprint:
print('State, turn ', self.turn_i)
for x in range(self.w):
for y in range(self.h):
if dprint:
print(table[x][y], end=' ')
t += table[x][y] + ' '
if dprint:
print()
t += '\n'
return t
class Unit:
def __init__(self, speed, damage, health, count):
self.speed = speed
self.damage = damage
self.count = count
self.max_health = health
self.health = health
self.side = True
self.x = -1
self.y = -1
def set_position(self, x, y):
self.x = x
self.y = y
class AI:
def __init__(self, model):
self.model = model
self.score = 0
def get_input(self, unit, gym):
unit_table = numpy.zeros(8 * 8).reshape((8, 8))
for u in gym.left_units + gym.right_units:
# index = u.x + u.y * gym.w
if unit.side == u.side:
unit_table[u.y][u.x] = -1
else:
unit_table[u.y][u.x] = 1
count_table = numpy.zeros(8 * 8).reshape((8, 8))
for u in gym.left_units + gym.right_units:
# index = u.x + u.y * gym.w
count_table[u.y][u.x] = u.count
return torch.tensor([[
unit_table,
count_table
]]).float()
def decide(self, unit, gym):
# inputs
input = self.get_input(unit, gym)
# outputs
# 1x64 array of weights
decision = self.model.model(torch.tensor(input).float())
# we need to find in range best spot
bw = -1
bwi = -1
for (idx, weight) in enumerate(decision):
px, py = idx % 8, idx // 8
dist = math.sqrt((px - unit.x) ** 2 + (py - unit.y) ** 2)
if weight > bw and dist <= unit.speed:
bw = weight
bwi = idx
return [bwi % 8, bwi // 8]
def create_gym(left_ai, right_ai):
g = Gym(8, 8, left_ai, right_ai)
speed = 2
damage = 15
health = 20
min, max = 3,3
# 5 of 1's 1 of 15
g.left_units.append(
Unit(speed,damage, health, 15)
)
for i in range(random.randint(min, max)):
g.left_units.append(
Unit(speed, damage, health, 1)
)
g.right_units.append(
Unit(speed, damage, health, 15)
)
for i in range(random.randint(min, max)):
g.right_units.append(
Unit(speed, damage, health, 1)
)
g.prepare()
return g
def save_model(model, filename):
torch.save(model, filename)
class GymTester:
def run_simulation(self, ai_to_test, solution, side=False, rep=False):
# get weights from current model
model_weights_dict = pygad.torchga.model_weights_as_dict(model=ai_to_test.model, weights_vector=solution)
# Use the current solution as the model parameters.
ai_to_test.model.load_state_dict(model_weights_dict)
# create gym where left is tested model and right is previous generation
ais = [AI(ai_to_test), AI(ai_to_test) if self.previous_best_model is None else AI(self.previous_best_model)]
if side:
ais = ais[::-1] # reverse so tested ai becomes second(right side)
g = create_gym(ais[0], ais[1])
for i in range(15):
g.turn(rep)
return g
def callback_gen(self, ga_instance):
model = self.previous_best_model.model
weights = pygad.torchga.model_weights_as_dict(model=model, weights_vector=self.ga_instance.best_solution()[0])
self.previous_best_model.model.load_state_dict(weights)
print("%d / %d Fitness of the best solution :" % (ga_instance.generations_completed, self.iterations), ga_instance.best_solution()[1])
def fitness_func(self, solution, solution_idx):
# random_side = bool(random.getrandbits(1))
random_side = False
g = self.run_simulation(self.training_ga, solution, side=random_side, rep = False)
score = g.left_ai.score if random_side == False else g.right_ai.score
return score
def __init__(self, iterations_to_run):
self.iterations = iterations_to_run
self.input_size = 8 * 8
self.training_ga, self.ga_instance = create_ga(self.input_size, iterations_to_run,
lambda x, y: self.fitness_func(x, y), 10,
callback_gen=lambda instance: self.callback_gen(instance))
self.previous_best_model = create_network(self.input_size, 64, 10)
pass
def run(self):
self.ga_instance.run()
solution, solution_fitness, solution_idx = self.ga_instance.best_solution()
save_model(self.training_ga.model, 'models/%d_%d.pt' % (self.iterations, solution_fitness))
self.previous_best_model = self.training_ga
self.run_simulation(self.training_ga, solution, side=False, rep=True)
# self.ga_instance.plot_result(title="PyGAD & PyTorch - Iteration vs. Fitness", linewidth=4)
def main(iters):
GymTester(iters).run()
if __name__ == "__main__":
# stuff only to run when not called via 'import' here
iters = int(sys.argv[1])
main(iters)
| enchantinggg4/pytorch_experiment | src/mygym.py | mygym.py | py | 9,940 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "random.randint",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_num... |
38523258836 | import csv
import models
from operator import attrgetter
import statistics
def filter_data(columns, row):
ENERGYSTARScore = row[columns.index('ENERGYSTARScore')]
if ENERGYSTARScore == '':
return False
YearBuilt = row[columns.index('YearBuilt')]
if int(YearBuilt) < 1920:
return False
BuildingName = row[columns.index('BuildingName')]
if 'CENTER' in BuildingName:
return False
return True
def parse_file(filename):
print('Starting parsing file:')
list_of_buildings = []
# read file
with open(filename) as file_raw:
print(f'Opening file {filename}')
data_raw = csv.reader(file_raw, delimiter=',')
print('Iterating over each line in the file:')
firstline = True
columns = []
for row in data_raw:
if firstline:
columns = row
firstline = False
continue
# filter data
if filter_data(columns, row) == False:
continue
OSEBuildingID = row[columns.index('OSEBuildingID')]
# Question 1
BuildingName = row[columns.index('BuildingName')]
NumberofFloors = int(row[columns.index('NumberofFloors')])
# Question 2
ENERGYSTARScore = int(row[columns.index('ENERGYSTARScore')])
SiteEUI = float(row[columns.index('SiteEUI(kBtu/sf)')])
NaturalGas = row[columns.index('NaturalGas(kBtu)')]
# Question 3
Neighborhood = row[columns.index('Neighborhood')]
Electricity = int(row[columns.index('Electricity(kBtu)')])
# Question 4
PropertyGFABuildings = int(
row[columns.index('PropertyGFABuilding(s)')])
PrimaryPropertyType = row[columns.index('PrimaryPropertyType')]
LargestPropertyUseType = row[columns.index(
'LargestPropertyUseType')]
SecondLargestPropertyUseType = row[columns.index(
'SecondLargestPropertyUseType')]
ThirdLargestPropertyUseType = row[columns.index(
'ThirdLargestPropertyUseType')]
PropertyTypes = [
PrimaryPropertyType,
LargestPropertyUseType,
SecondLargestPropertyUseType,
ThirdLargestPropertyUseType]
# instance a Building
building = models.Building(
OSEBuildingID,
BuildingName,
NumberofFloors,
ENERGYSTARScore,
SiteEUI,
NaturalGas,
Neighborhood,
Electricity,
PropertyGFABuildings,
PropertyTypes)
list_of_buildings.append(building)
print('Finish parsing file and filter data.')
print(f'list_of_buildings length is {len(list_of_buildings)}')
return list_of_buildings
def find_buildings_with_largest_number_of_floors(
list_of_buildings, max_number_of_floors):
list_of_buildings_with_largest_number_of_floors = []
for building in list_of_buildings:
if building.NumberofFloors == max_number_of_floors:
list_of_buildings_with_largest_number_of_floors.append(building)
continue
if building.NumberofFloors > max_number_of_floors:
max_number_of_floors = building.NumberofFloors
list_of_buildings_with_largest_number_of_floors.clear()
list_of_buildings_with_largest_number_of_floors.append(building)
continue
return list_of_buildings_with_largest_number_of_floors
def question_1(list_of_buildings):
print('Solving question 1:')
max_number_of_floors = 0
list_of_buildings_with_largest_number_of_floors = find_buildings_with_largest_number_of_floors(
list_of_buildings, max_number_of_floors)
print('Building(s) with the largest number of floors:')
for building in list_of_buildings_with_largest_number_of_floors:
print(
'Id {0}, name {1}, with {2} floors.'.format(
building.OSEBuildingID,
building.BuildingName,
building.NumberofFloors))
def question_2(list_of_buildings):
print('Solving question 2:')
energy_star_score_threshold = 97
nums_of_building_energy = sum(
building.ENERGYSTARScore >= energy_star_score_threshold for building in list_of_buildings)
print(
'The number of buildings with ENERGYSTARScore of at least {0} is {1}.'.format(
energy_star_score_threshold,
nums_of_building_energy))
def filter_natural_gas(list_of_buildings):
print('Filtering buildings that used natural gas:')
list_of_buildings_used_naturalgas = []
for building in list_of_buildings:
if building.NaturalGas:
list_of_buildings_used_naturalgas.append(building)
return list_of_buildings_used_naturalgas
def question_3(list_of_buildings):
print('Solving question 3:')
list_of_buildings_used_naturalgas = filter_natural_gas(list_of_buildings)
list_of_gas_used = []
for building in list_of_buildings_used_naturalgas:
list_of_gas_used.append(building.SiteEUI)
median_SiteEUI = statistics.median(list_of_gas_used)
print(
f'The median of SiteEUI among buildings using natural gas is {median_SiteEUI} kBtu.')
# list_of_gas_used.sort()
# n = len(list_of_gas_used)
# if n % 2 != 0:
# median_SiteEUI = list_of_gas_used[int(n / 2)]
# else:
# median_SiteEUI = (list_of_gas_used[int(n / 2)] + list_of_gas_used[int(n / 2 - 1)]) / 2
# print(
# f'The median of SiteEUI among buildings using natural gas is {median_SiteEUI}.')
def filter_neighborhood(list_of_buildings, neighborhood):
# neighborhood = 'BALLARD'
list_of_buildings_in_neighborhood = []
for building in list_of_buildings:
if building.Neighborhood == neighborhood:
list_of_buildings_in_neighborhood.append(building)
return list_of_buildings_in_neighborhood
def get_threshold_building(filename, threshold_building_name):
with open(filename) as file_raw:
data_raw = csv.reader(file_raw, delimiter=',')
firstline = True
columns = []
for row in data_raw:
if firstline:
columns = row
firstline = False
continue
if threshold_building_name in row:
threshold_building_electricity = int(
row[columns.index('Electricity(kBtu)')])
print('Building {0} used the amount of Electricity of {1} kBtu.'.format(
threshold_building_name, threshold_building_electricity))
return threshold_building_electricity
def filter_electricity_use(list_of_buildings, threshold_building_electricity):
list_of_building_used_more_electricity = []
for building in list_of_buildings:
if building.Electricity > threshold_building_electricity:
list_of_building_used_more_electricity.append(building)
return list_of_building_used_more_electricity
def question_4(filename, list_of_buildings):
print('Solving question 4:')
neighborhood = 'BALLARD'
threshold_building_name = 'BIOMED FAIRVIEW RESEARCH CENTER'
threshold_building_electricity = get_threshold_building(
filename, threshold_building_name)
list_of_buildings_in_neighborhood = filter_neighborhood(
list_of_buildings, neighborhood)
list_of_buildings_used_more_electricity = filter_electricity_use(
list_of_buildings_in_neighborhood, threshold_building_electricity)
for building in list_of_buildings_used_more_electricity:
print(
'Building id {0}, name {1} used the amount of Electricity of {2} kBtu.'.format(
building.OSEBuildingID,
building.BuildingName,
building.Electricity))
def filter_property_type_block_list(list_of_buildings, block_list):
print('Filtering buildings that are not offices or hospitals:')
list_of_buildings_not_block_listed = []
for building in list_of_buildings:
chosen = True
for property_type in building.PropertyTypes:
if not chosen:
continue
for block_type in block_list:
if block_type in property_type:
chosen = False
break
if chosen:
list_of_buildings_not_block_listed.append(building)
return list_of_buildings_not_block_listed
def filter_property_floor_area_building(
list_of_buildings, floor_area_threshold):
list_of_large_buildings = []
for building in list_of_buildings:
if building.PropertyGFABuildings > floor_area_threshold:
list_of_large_buildings.append(building)
return list_of_large_buildings
def question_5(list_of_buildings):
print('Solving question 5:')
football_field_area = 57600 # square feet
floor_area_threshold = 15 * football_field_area
print(f'Threshold property area is {floor_area_threshold} square feet.')
block_list = ['Hospital', 'Office']
list_of_buildings_not_offices_or_hospitals = filter_property_type_block_list(
list_of_buildings, block_list)
list_of_large_buildings = filter_property_floor_area_building(
list_of_buildings_not_offices_or_hospitals, floor_area_threshold)
for building in list_of_large_buildings:
print('Building id {0}, name {1}, total floor area for buildings {2} square feet.'.format(
building.OSEBuildingID, building.BuildingName, building.PropertyGFABuildings
))
def ada_data_challenge(filename):
list_of_buildings = parse_file(filename)
question_1(list_of_buildings)
question_2(list_of_buildings)
question_3(list_of_buildings)
question_4(filename, list_of_buildings)
question_5(list_of_buildings)
| the-non-binary-tree/ada_data_challenge_c15 | utils.py | utils.py | py | 9,974 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "csv.reader",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "models.Building",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "statistics.median",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_... |
3408800450 | import serial
import matplotlib
matplotlib.use('TkAgg') # MUST BE CALLED BEFORE IMPORTING plt
from matplotlib import pyplot as plt
import queue
import threading
import animation
import seaborn as sns
import numpy as np
import time
class ArduinoReader(threading.Thread):
def __init__(self, stop_event, sig, serport):
threading.Thread.__init__(self)
self.stopped = stop_event
self.signal = sig
self.pixalarray = [[] for _ in range(8)]
self.pixalarray0 = [[] for _ in range(8)]
port = serport
# self.s = serial.Serial(port, 9600, timeout=1, rtscts=True, dsrdtr=True)
self.s = serial.Serial(port, 115200, timeout=0.1, rtscts=True, dsrdtr=True)
if not self.s.isOpen():
self.s.open()
print("connected: ", self.s)
def run(self):
while not self.stopped.is_set():
# print(self.s.readline().rstrip())
try:
dstr = str(self.s.readline())
# print(dstr.find('Frame'))
if dstr.find('Frame') > 0:
data_d1 = str(self.s.readline()).split(',')
self.pixalarray = [float(x) for x in data_d1[1:-1]]
data_d0 = str(self.s.readline()).split(',')
self.pixalarray0 = [float(x) for x in data_d0[1:-1]]
if len(self.pixalarray) == 64 and len(self.pixalarray0) == 64:
self.signal.put([self.pixalarray, self.pixalarray0])
except:
continue
self.clean()
def get_signal(self):
return self.signal
def clean(self):
# self.s.cancel_read()
while self.s.isOpen():
self.s.close()
# print('the serial port is open? {}'.format(self.s.isOpen()))
def colorscale(data, minc, maxc):
data_scale = 256 * (data - minc) / (maxc - minc)
if data_scale < 0:
data_scale = 0
elif data_scale > 255:
data_scale = 255
print(data, data_scale)
return int(data_scale)
if __name__ == '__main__':
try:
q = queue.Queue()
stop_event = threading.Event()
data_reader = ArduinoReader(stop_event, q, 'COM3')
data_reader.start()
fig, (ax1, ax0) = plt.subplots(1, 2)
im1 = ax1.imshow(np.random.uniform(low=22, high=32, size=(8, 8)), vmin=22, vmax=32, cmap='jet', interpolation='lanczos')
im0 = ax0.imshow(np.random.uniform(low=22, high=32, size=(8,8)), vmin = 22, vmax = 32, cmap='jet', interpolation='lanczos')
plt.tight_layout()
plt.ion()
while True:
[frame1, frame0] = q.get()
im1.set_array(np.reshape(frame1, (8, 8)))
im0.set_array(np.reshape(frame0, (8, 8)))
# plt.draw()
plt.pause(0.001)
plt.ioff()
plt.show()
finally:
stop_event.set()
data_reader.clean()
data_reader.join()
| saintnever/dualring_py | stream.py | stream.py | py | 2,949 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.use",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "threading.Thread.__init__",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "threadi... |
10913773933 | from flask import Flask, render_template, request
import pandas as pd
import numpy as np
app = Flask(__name__)
# Reading dataset in global scope
df = pd.read_csv("winequalityN.csv")
# This is the home page
@app.route("/")
def home():
return render_template("home.html")
# This is the page where we will load the dataset
@app.route("/load", methods = ["GET", "POST"])
def load():
if request.method == "POST" and "submit_button" in request.form:
return render_template("results.html")
# This is the page where we will check the shape of the dataset
@app.route("/shape", methods = ["GET", "POST"])
def shape():
if request.method == "POST" and "check_shape" in request.form:
rc = df.shape
shape = f"Rows: {rc[0]}, Columns: {rc[1]}"
return render_template("results2.html", shape = shape)
# This is the page where we will check the column names of the dataset
@app.route("/column_names", methods = ["GET", "POST"])
def column_names_fun():
if request.method == "POST" and "check_column_names" in request.form:
col_names = df.columns
arr = list(col_names)
return render_template("columnr.html", colnames = arr)
# This is the page where we will check the missing values of the dataset
@app.route("/missing_values", methods = ["GET", "POST"])
def miss_val():
if request.method == "POST" and "missing_values" in request.form:
values = list(df.isnull().sum())
miss_values = []
arr = list(df.columns)
for i in range(len(values)):
if values[i] != 0:
miss_values.append((arr[i], values[i])) # to get a list of tuples of feature and number of missing values
return render_template("results4.html", missing_values = miss_values)
# This is the page where we will handle the missing values of the dataset
@app.route("/handle_miss_value", methods = ["GET", "POST"])
def handle_mis():
df = pd.read_csv("winequalityN.csv")
if request.method == "POST" and "handling_values" in request.form:
# case 1 : when there are too many rows in dataset as compared to columns
# we will proceed by deleting them
rc = df.shape
if rc[0] - df.dropna().shape[0] < 0.05*rc[0]: # if the number of rows with missing values is less than 5% of total rows
df = df.dropna()
return render_template("results5.html", more_rows = "Missing Values have been dropped by dropping rows")
else:
# Here we will replace the missing values with the mean if outliers are not present
# We dont need to find outliers as we will just see difference between mean and median
values = list(df.isnull().sum())
arr = list(df.columns)
for i in range(len(values)):
if values[i] != 0 and (arr[i].dtype == "float64" or arr[i].dtype == "int64"):
if df[arr[i]].mean() - df[arr[i]].median() < 2:
df[arr[i]] = df[arr[i]].fillna(df[arr[i]].mean())
return render_template("results5.html", no_outliers = "Missing Values have been replaced with mean because no outliers were present")
else:
df[arr[i]] = df[arr[i]].fillna(df[arr[i]].median())
return render_template("results5.html", outliers = "Missing Values have been replaced with median because outliers were present")
return render_template("results5.html", no_missing_values = "No Missing Values were present")
# here we will count how many categorical features are present
@app.route("/categorical_feature", methods = ["GET", "POST"])
def handle():
if request.method == "POST" and "cat_feature" in request.form:
cat_arr = df.select_dtypes(include=['object']).columns.tolist() # list of Categorical Features
if len(cat_arr) == 0:
return render_template("results6.html", x = "No Categorical Features were present")
else:
return render_template("results6.html", x = f"{len(cat_arr)} Categorical Features were present and these are {cat_arr}")
# Now we will do OrdinalEncoding on the categorical features
@app.route("/ordinal_encoding", methods = ["GET", "POST"])
def ord_enc():
if request.method == "POST" and "encode" in request.form:
from sklearn.preprocessing import OrdinalEncoder
encoder = OrdinalEncoder()
cat_arr = df.select_dtypes(include=['object']).columns.tolist()
for i in cat_arr:
df[i] = encoder.fit_transform(df[[i]]).astype(int)
return render_template("results7.html", oe = "Ordinal Encoding has been done on the Categorical Features")
# And we are not adding any king of upsampling and downsampling techniques due to lack of time
# Here we are making an assumption that last feature is the target feature
# This Sets Dataset target variable as Wine Quality
@app.route("/set_target", methods = ["GET", "POST"])
def taregt():
if request.method == "POST" and "target" in request.form:
global X, y
# Independent Features
X = df.drop(df.columns[-1], axis = 1)
# Target Feature
y = df[df.columns[-1]]
return render_template("results8.html", target = "Target Feature has been set as Wine Quality")
# Now Feature Selection, checking if there are any two independent features which are highly correlated
# we will take only one of them and drop the other one, we keep threshold as 0.8
@app.route("/feature_selection", methods = ["GET", "POST"])
def feature_select():
if request.method == "POST" and "f_select" in request.form:
X = df.drop(df.columns[-1], axis = 1)
dataframe = X
threshold = 0.8
# Correlation matrix
corr_matrix = dataframe.corr().abs() # absolute value of correlation matrix
# Creating a set to hold the correlated features
corr_features = set()
# Looping through Each Feature
for i in range(len(corr_matrix.columns)):
for j in range(i):
# Check if the correlation between two features is greater than threshold
if corr_matrix.iloc[i, j] >= threshold:
colname = corr_matrix.columns[i]
corr_features.add(colname) # we need only 1 feature out of the two highly correlated features
# Dropping the correlated features
X = X.drop(labels = corr_features, axis = 1)
X = X.dropna()
return render_template("results9.html", feature = "Feature Selection has been done")
# Here we will import standard scaler
@app.route("/standard_scaler", methods = ["GET", "POST"])
def scaler():
global scaler
if request.method == "POST" and "std_scaler" in request.form:
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
return render_template("results10.html", scaler = "Standard Scaler has been imported")
# Here we will split the dataset into train and test
# Make sure to first set the target feature and then split the dataset
@app.route("/train_test_split", methods = ["GET", "POST"])
def train_test():
global X_test, X_train
if request.method == "POST" and "split" in request.form:
global X_train, X_test, y_train, y_test
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
trained_size = X_train.shape
test_size = X_test.shape
return render_template("results11.html", trained = "Dataset has been split into train and test", trained_2 = "Now you can train the model"
, trained_3 = f"Train Size is {trained_size} and Test Size is {test_size}")
# now we will do standard scaling on the dataset
@app.route("/scaling", methods = ["GET", "POST"])
def transform():
global X_train_scaled, X_test_scaled
if request.method == "POST" and "scale" in request.form:
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
X_train_scaled = np.nan_to_num(X_train_scaled, nan=np.nanmean(X_train_scaled))
X_test_scaled = np.nan_to_num(X_test_scaled, nan=np.nanmean(X_test_scaled))
return render_template("results12.html", scaled = "Standard Scaling has been done on the dataset")
#1 Linear Regression Training
@app.route("/linear_regression", methods = ["GET", "POST"])
def lin_reg():
global regressor
if request.method == "POST" and "linear" in request.form:
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train_scaled, y_train)
return render_template("results13.html", linear_reg = "Linear Regression Model has been trained")
#2 Ridge Regression Training
@app.route("/ridge_regression", methods = ["GET", "POST"])
def ridge_reg():
global ridge_regressor
if request.method == "POST" and "ridge" in request.form:
from sklearn.linear_model import Ridge
ridge_regressor = Ridge()
ridge_regressor.fit(X_train_scaled, y_train)
return render_template("results14.html", ridge_reg = "Ridge Regression Model has been trained")
#3 Lasso Regression Training
@app.route("/lasso_regression", methods = ["GET", "POST"])
def lasso_reg():
global lasso_regressor
if request.method == "POST" and "lasso" in request.form:
from sklearn.linear_model import Lasso
lasso_regressor = Lasso()
lasso_regressor.fit(X_train_scaled, y_train)
return render_template("results15.html", lasso_reg = "Lasso Regression Model has been trained")
#4 Elastic Net Regression Training
@app.route("/elastic_net_regression", methods = ["GET", "POST"])
def elastic_reg():
global elastic_regressor
if request.method == "POST" and "elastic" in request.form:
from sklearn.linear_model import ElasticNet
elastic_regressor = ElasticNet()
elastic_regressor.fit(X_train_scaled, y_train)
return render_template("results16.html", elastic_reg = "Elastic Net Regression Model has been trained")
#5 Logistic Regression Training
@app.route("/logistic_regression", methods = ["GET", "POST"])
def log_reg():
global log_regressor
if request.method == "POST" and "logistic" in request.form:
from sklearn.linear_model import LogisticRegression
log_regressor = LogisticRegression()
log_regressor.fit(X_train_scaled, y_train)
return render_template("results17.html", log_reg = "Logistic Regression Model has been trained")
#6 Decision Tree Classifier Training
@app.route("/decision_tree_classifier", methods = ["GET", "POST"])
def dec_tree():
global dec_tree_classifier
if request.method == "POST" and "decision" in request.form:
from sklearn.tree import DecisionTreeClassifier
dec_tree_classifier = DecisionTreeClassifier()
dec_tree_classifier.fit(X_train_scaled, y_train)
return render_template("results18.html", decs_tree = "Decision Tree Classifier Model has been trained")
#7 Decision Tree Regressor Training
@app.route("/decision_tree_regressor", methods = ["GET", "POST"])
def dec_tree_reg():
global dec_tree_regressor
if request.method == "POST" and "decision_regressor" in request.form:
from sklearn.tree import DecisionTreeRegressor
dec_tree_regressor = DecisionTreeRegressor()
dec_tree_regressor.fit(X_train_scaled, y_train)
return render_template("results19.html", decs_tree_reg = "Decision Tree Regressor Model has been trained")
#8 Support Vector Classifier Training
@app.route("/support_vector_classifier", methods = ["GET", "POST"])
def svc():
global svc_classifier
if request.method == "POST" and "support" in request.form:
from sklearn.svm import SVC
svc_classifier = SVC()
svc_classifier.fit(X_train_scaled, y_train)
return render_template("results20.html", svc = "Support Vector Classifier Model has been trained")
#9 Support Vector Regressor Training
@app.route("/support_vector_regressor", methods = ["GET", "POST"])
def svr():
global svr_regressor
if request.method == "POST" and "support_regressor" in request.form:
from sklearn.svm import SVR
svr_regressor = SVR()
svr_regressor.fit(X_train_scaled, y_train)
return render_template("results21.html", svr = "Support Vector Regressor Model has been trained")
#10 Naive Bayes Classifier Training
@app.route("/naive_bayes_classifier", methods = ["GET", "POST"])
def naive():
global naive_classifier
if request.method == "POST" and "naive" in request.form:
from sklearn.naive_bayes import GaussianNB
naive_classifier = GaussianNB()
naive_classifier.fit(X_train_scaled, y_train)
return render_template("results22.html", naive = "Naive Bayes Classifier Model has been trained")
#11 Random Forest Classifier Training
@app.route("/random_forest_classifier", methods = ["GET", "POST"])
def random():
global random_classifier
if request.method == "POST" and "random" in request.form:
from sklearn.ensemble import RandomForestClassifier
random_classifier = RandomForestClassifier()
random_classifier.fit(X_train_scaled, y_train)
return render_template("results23.html", random = "Random Forest Classifier Model has been trained")
#12 AdaBoost Classifier Training
@app.route("/adaboost_classifier", methods = ["GET", "POST"])
def adaboost():
global adaboost_classifier
if request.method == "POST" and "adaboost" in request.form:
from sklearn.ensemble import AdaBoostClassifier
adaboost_classifier = AdaBoostClassifier()
adaboost_classifier.fit(X_train_scaled, y_train)
return render_template("results24.html", adaboost = "AdaBoost Classifier Model has been trained")
# All Predictions at one place
@app.route("/prediction", methods = ["GET", "POST"])
def predict():
global y_pred_linear, y_pred_lasso, y_pred_adaboost, y_pred_dec_tree, y_pred_dec_tree_reg, y_pred_elastic, y_pred_log, y_pred_random, y_pred_ridge,y_pred_support, y_pred_support_reg, y_pred_naive
if request.method == "POST" and "predict" in request.form:
#1 Linear Regression Prediction
y_pred_linear = regressor.predict(X_test_scaled)
#2 Ridge Regression Prediction
y_pred_ridge = ridge_regressor.predict(X_test_scaled)
#3 Lasso Regression Prediction
y_pred_lasso = lasso_regressor.predict(X_test_scaled)
#4 Elastic Net Regression Prediction
y_pred_elastic = elastic_regressor.predict(X_test_scaled)
#5 Logistic Regression Prediction
y_pred_log = log_regressor.predict(X_test_scaled)
#6 Decision Tree Classifier Prediction
y_pred_dec_tree = dec_tree_classifier.predict(X_test_scaled)
#7 Decision Tree Regressor Prediction
y_pred_dec_tree_reg = dec_tree_regressor.predict(X_test_scaled)
#8 Support Vector Classifier Prediction
y_pred_support = svc_classifier.predict(X_test_scaled)
#9 Support Vector Regressor Prediction
y_pred_support_reg = svr_regressor.predict(X_test_scaled)
#10 Naive Bayes Classifier Prediction
y_pred_naive = naive_classifier.predict(X_test_scaled)
#11 Random Forest Classifier Prediction
y_pred_random = random_classifier.predict(X_test_scaled)
#12 AdaBoost Classifier Prediction
y_pred_adaboost = adaboost_classifier.predict(X_test_scaled)
return render_template("results25.html",predict = "All Predictions have been done")
# All Accuracy Scores at one place
@app.route("/accuracy", methods = ["GET", "POST"])
def accuracy():
if request.method == "POST" and "accuracy" in request.form:
#1 Linear Regression Accuracy
from sklearn.metrics import r2_score
r2_linear = r2_score(y_test, y_pred_linear)
#2 Ridge Regression Accuracy
r2_ridge = r2_score(y_test, y_pred_ridge)
#3 Lasso Regression Accuracy
r2_lasso = r2_score(y_test, y_pred_lasso)
#4 Elastic Net Regression Accuracy
r2_elastic = r2_score(y_test, y_pred_elastic)
#5 Logistic Regression Accuracy
from sklearn.metrics import accuracy_score
accuracy_log = accuracy_score(y_test, y_pred_log)
#6 Decision Tree Classifier Accuracy
accuracy_dec_tree = accuracy_score(y_test, y_pred_dec_tree)
#7 Decision Tree Regressor Accuracy
from sklearn.metrics import r2_score
r2_dec_tree_reg = r2_score(y_test, y_pred_dec_tree_reg)
#8 Support Vector Classifier Accuracy
accuracy_support = accuracy_score(y_test, y_pred_support)
#9 Support Vector Regressor Accuracy
from sklearn.metrics import r2_score
r2_support_reg = r2_score(y_test, y_pred_support_reg)
#10 Naive Bayes Classifier Accuracy
accuracy_naive = accuracy_score(y_test, y_pred_naive)
#11 Random Forest Classifier Accuracy
accuracy_random = accuracy_score(y_test, y_pred_random)
#12 AdaBoost Classifier Accuracy
accuracy_adaboost = accuracy_score(y_test, y_pred_adaboost)
max_accuracy = max(accuracy_log, accuracy_dec_tree, accuracy_support, accuracy_naive, accuracy_random, accuracy_adaboost)
return render_template("results26.html",linea_reg = r2_linear, ridg_reg = r2_ridge, lass_reg = r2_lasso, elasti_reg = r2_elastic, lo_reg = accuracy_log, de_tree = accuracy_dec_tree, de_tree_reg = r2_dec_tree_reg, suppor_vec = accuracy_support, suppor_vec_reg = r2_support_reg, naiv = accuracy_naive, rando = accuracy_random, adaboos = accuracy_adaboost, x = max_accuracy)
# Hyperparameter Tuning
# Find Best parameters that will fit all 12 models at once
# find parameters using RandomizedSearchCV
if __name__=="__main__":
app.run(host="0.0.0.0")
| ayushraina2028/basic-machine | app.py | app.py | py | 18,393 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.request.method"... |
31855918986 | import requests
import re
import random
import time
from bs4 import BeautifulSoup
import bs4
from fake_useragent import UserAgent
ua = UserAgent()
books = []
discounts = []
cookie = {
"bid": "6183e2a207286",
"_gcl_au": "1.1.1678734493.1636033188",
"cid": "kypss95053",
"pd": "B4MPDFMstRRagO9wOXmP3pNPoI",
"stepsession": "YaCaCwo8DR4AAT5h8nYAAAAc",
"ssid": "6183e2a207286.1637940082",
}
for i in range(10703023, 11000000):
cookie["ssid"] = (
"6183e2a207286.16379400" + str(random.randint(0, 9)) + str(random.randint(0, 9))
)
print(f"https://www.books.com.tw/products/00{i:08}", end="\r")
time.sleep(0.5)
flag = True
while flag:
try:
res = requests.get(
f"https://www.books.com.tw/products/00{i:08}",
headers={
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9,zh-TW;q=0.8,zh;q=0.7",
"user-agent": ua.random,
},
cookies=cookie,
)
flag = False
except:
time.sleep(60)
flag = True
if res.status_code == 404:
print(f"{i:08}: {res.status_code}", end="\r")
continue
else:
retry = 0
while res.status_code != 200 and retry < 3:
time.sleep(60)
try:
res = requests.get(
f"https://www.books.com.tw/products/00{i:08}",
headers={"user-agent": ua.random},
cookies=cookie,
)
except:
pass
if res.status_code == 404:
retry = 3
break
retry += 1
if retry >= 3:
continue
try:
res.encoding = "utf-8"
soup = BeautifulSoup(res.text, "lxml")
soup.prettify()
book_id = i
name = "None"
author = "None"
author_original = "None"
translator = "None"
publishing_date = "None"
publishing_house = "None"
price = "None"
ISBN = "None"
discount_price = "None"
expire_date = "None"
name = soup.find_all("div", class_="type02_p002")[0].h1.text
basic_information = soup.find_all("div", class_="type02_p003")[0].ul
lis = basic_information.find_all("li", recursive=False)
for li in lis:
# print(li.children)
children = [_ for _ in li.children]
if len(children) == 1:
if re.match("出版日期*", li.text):
publishing_date = li.text[5:]
# print(publishing_date)
elif re.match("語言*", li.text):
language = li.text[3:]
# print(language)
else:
if re.match("原文作者", children[0]):
author_original = children[1].text
# print(author_original)
elif re.match("譯者", children[0]):
translator = children[1].text
# print(translator)
elif re.match("出版社", children[0]):
publishing_house = children[1].text
# print(publishing_house)
else:
if (
len(children) > 2
and type(children[2]) == bs4.element.NavigableString
and re.match("作者", children[2].strip())
):
author = children[3].text
if "," in author:
raise Exception()
price_ul = soup.find_all("ul", class_="price")[0]
price_ul_len = len(price_ul.find_all("li", recursive=False))
price = price_ul.find_all("li", recursive=False)[0].em.text
# print(price)
if price_ul_len > 1:
discount_price = (
price_ul.find_all("li", recursive=False)[1]
.find_all("strong", recursive=False)[1]
.b.text
)
# print(discount_price)
try:
expire_date = price_ul.find_all("li", recursive=False)[2].text
if expire_date[:4] == "優惠期限":
expire_date = expire_date[5:]
expire_date = expire_date.replace("年", "/")
expire_date = expire_date.replace("月", "/")
expire_date = expire_date.replace("日", "")
expire_date = expire_date.replace("止", "")
discounts = [str(book_id), discount_price, expire_date]
else:
discount = [str(book_id), discount_price, ""]
# print(expire_date)
except:
discounts = [str(book_id), discount_price, ""]
detail_div = soup.find_all("h3", text="詳細資料")[0].parent.div
if detail_div.ul.li.text[:4] == "ISBN":
ISBN = detail_div.ul.li.text[5:]
else:
raise Exception()
# print(ISBN)
book = [
str(book_id),
ISBN,
name,
author,
author_original,
translator,
publishing_house,
publishing_date,
price,
]
with open("book.csv", "r") as f:
data = f.read()
with open("book.csv", "w") as f:
f.write(data)
f.write(",".join(book) + "\n")
with open("discount.csv", "r") as f:
data = f.read()
with open("discount.csv", "w") as f:
f.write(data)
f.write(",".join(discount) + "\n")
except Exception as e:
# raise e
# break
pass
| jeff-901/bookstore | data/crawl.py | crawl.py | py | 6,009 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fake_useragent.UserAgent",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "requests.get",
... |
42615079781 | import numpy as np
import glob
import sklearn.covariance as Covariance
def get_covariance_object(X, load=True):
if load:
covarianceDict = np.load('./profiles/covarianceDict.npy', allow_pickle=True)[()]
cov_object, mean, std = covarianceDict['cov_object'], covarianceDict['mean'], covarianceDict['std']
return cov_object, mean, std
mean = X.mean(0)
std = X.std()
X = (X - mean) / std
cov_object = Covariance.OAS(assume_centered=True).fit(X)
#cov_object = Covariance.EmpiricalCovariance(assume_centered=True).fit(X)
#cov_object = Covariance.ShrunkCovariance(assume_centered=True, shrinkage=0.01).fit(X)
#cov_object = MinCovDet(assume_centered=True).fit(X)
#cov_object = Covariance.GraphicalLassoCV(assume_centered=True).fit(X)
covarianceDict = {
"cov_object" : cov_object,
"mean" : mean,
"std" : std,
}
np.save('./profiles/covarianceDict.npy', covarianceDict)
#i = 300
#G=10
#plt.title("covariance matrix")
#plt.imshow(oas.covariance_[21*i:21*(i+G),21*i:21*(i+G)])
#plt.colorbar()
#plt.show()
#
#i = 400
#G=5
#plt.plot(profiles[0][21*i:21*(i+G)], label="sample")
#plt.plot(profiles.mean(0)[21*i:21*(i+G)], label="average")
#plt.legend()
#plt.show()
return cov_object, mean, std
def mahalanobis(cov_object, X, mean, std):
return cov_object.mahalanobis( (X-mean) / std )
def loadProfiles():
profilesPaths = glob.glob("./profiles/*.txt")
profilesPaths = [p for p in profilesPaths if 'samplePoints' not in p]
profiles = []
for p in profilesPaths:
profiles.append(np.loadtxt(p))
return np.stack(profiles)[:,:]
if __name__ == "__main__":
import sys
assert len(sys.argv) == 2
assert sys.argv[1] in ['load', 'save']
profiles = loadProfiles()
#rank of covariance matrix
#(scipy.linalg.svdvals(np.dot(p.T, p)) > 1e-8).sum()
load = True if sys.argv[1] == 'load' else False
cov_object, mean, std = get_covariance_object(profiles, load=load)
| scarpma/SSM_segmentation_3DSlicer_module | compute_profiles_covariance.py | compute_profiles_covariance.py | py | 2,053 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "numpy.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sklearn.covariance.OAS",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sklearn.covariance",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "numpy.save",
... |
38979638858 | from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.conf import settings
import requests
from dbproducts.models import Category, Product
from dbproducts.related_functions import symbol_removal
class Command(BaseCommand):
""" This command will populate server. Only use once"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# informations that will be inserted into database
self.informations_gathered = []
# id gathered from website, usefull for unicity
self.id_prod_website = []
def handle(self, *args, **options):
""" This function will populate the database"""
# We first need to gather informations from
# OpenFoodFacts.
def gather_informations():
"""This function will gather informations using environment
variables from django.conf.settings on OpenFoodFacts website and
store them in self.informations_gathered list"""
print("gathering informations from Open Food Facts.")
for num, val in enumerate(settings.PROD_CATEGORIES):
print("pass : {} / {}".format(num+1, len(settings.PROD_CATEGORIES)))
# Adding a category to the dictionnary
settings.SITE_PARAMETERS[settings.TAG_CAT] = val
req = requests.get(settings.OPEN_FOOD_SITE,
settings.SITE_PARAMETERS).json()
# Removing useless valors
req = req["products"]
for prods in req:
if prods["_id"] not in self.id_prod_website \
and "product_name_fr" in prods and \
prods["product_name_fr"] not in \
self.informations_gathered:
try:
self.informations_gathered.append((prods["_id"],
prods["product_name_fr"],
prods["image_front_url"],
ord(prods["nutrition_grades"]),
prods["image_nutrition_url"],
prods["url"],
prods["categories_hierarchy"],
))
self.id_prod_website.append(prods["_id"])
except KeyError:
pass
print("{} products gathered from OpenFoodFacts website"\
.format(len(self.informations_gathered)))
def populating_db():
"""This function will populate database with informations gathered
from gather_informations() function"""
# Message for admin
print("Populating database with OpenFoodFacts informations")
# Marker of sub categories.
# Will be used to not add them if already inserted in database
sub_cat = []
#marker for insertion
marker = 1
show_five_hundred = 500
# Inserting product into Products table
for info_product in self.informations_gathered:
try:
add_prod = Product.objects.create(id=info_product[0],
product_name=info_product[1],
img_front_url=info_product[2],
nutri_grade=info_product[3],
img_nutrition_url=info_product[4],
web_link=info_product[5],
)
marker += 1
if marker == show_five_hundred:
print("{} insertions into database so far".format(show_five_hundred))
show_five_hundred += 500
for num_sub_categories in info_product[6]:
# Creating a var without symbols, converted for
# SQL policy
sub_category = symbol_removal(num_sub_categories)
if sub_category in sub_cat:
add_sub_cat = Category.objects.get(name=sub_category)
else:
sub_cat.append(sub_category)
add_sub_cat = Category.objects.create(name=sub_category)
#Adding many to many relation between product and category
add_prod.categories.add(add_sub_cat)
except FieldDoesNotExist:
print("The column you are trying to fullfill doesn't exist")
except FieldError:
print("The information you are trying to enter \
into the database has incorrect values")
# Running all functions wrote in "handle" Command function
gather_informations()
populating_db()
| guillaumecarru/Pur_Beurre_Website | dbproducts/management/commands/populate_db.py | populate_db.py | py | 5,396 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.PROD_CATEGORIES",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 32,
"usage_type":... |
24522787900 | """Copied from cpython to ensure compatibility"""
import io
from typing import Any, Callable, Dict
BUFFER_SIZE = io.DEFAULT_BUFFER_SIZE # Compressed data read chunk size
class BaseStream(io.BufferedIOBase):
"""Mode-checking helper functions."""
def _check_not_closed(self):
if self.closed:
raise ValueError("I/O operation on closed file")
def _check_can_read(self):
if not self.readable():
raise io.UnsupportedOperation("File not open for reading")
def _check_can_write(self):
if not self.writable():
raise io.UnsupportedOperation("File not open for writing")
def _check_can_seek(self):
if not self.readable():
raise io.UnsupportedOperation(
"Seeking is only supported " "on files open for reading"
)
if not self.seekable():
raise io.UnsupportedOperation(
"The underlying file object " "does not support seeking"
)
class DecompressReader(io.RawIOBase):
"""Adapts the decompressor API to a RawIOBase reader API"""
def readable(self):
return True
def __init__(
self,
fp: io.IOBase,
decomp_factory: Callable,
**decomp_args: Dict[str, Any],
):
self._fp = fp
self._eof = False
self._pos = 0 # Current offset in decompressed stream
# Set to size of decompressed stream once it is known, for SEEK_END
self._size = -1
# Save the decompressor factory and arguments.
# If the file contains multiple compressed streams, each
# stream will need a separate decompressor object. A new decompressor
# object is also needed when implementing a backwards seek().
self._decomp_factory = decomp_factory
self._decomp_args = decomp_args
self._decompressor = self._decomp_factory(**self._decomp_args)
# Exception class to catch from decompressor signifying invalid
# trailing data to ignore
self._buffer = bytearray() # type: bytearray
def close(self) -> None:
self._decompressor = None
return super().close()
def seekable(self) -> bool:
return self._fp.seekable()
def readinto(self, b) -> int:
with memoryview(b) as view, view.cast("B") as byte_view:
data = self.read(len(byte_view))
byte_view[: len(data)] = data
return len(data)
def read(self, size=-1) -> bytes: # todo 这个是重点
if size < 0:
return self.readall()
if size <= len(self._buffer):
self._pos += size
ret = bytes(self._buffer[:size])
del self._buffer[:size]
return ret
if not size or self._eof:
return b""
# data = None # Default if EOF is encountered
# Depending on the input data, our call to the decompressor may not
# return any data. In this case, try again after reading another block.
# try:
while True:
rawblock = self._fp.read(BUFFER_SIZE)
if not rawblock:
break
self._buffer.extend(self._decompressor.decompress(rawblock))
if len(self._buffer) >= size:
break
if len(self._buffer) >= size:
self._pos += size
ret = bytes(self._buffer[:size])
del self._buffer[:size]
else: # 不够长了
self._pos += len(self._buffer)
self._eof = True
self._size = self._pos
ret = bytes(self._buffer)
self._buffer.clear()
return ret
def readall(self) -> bytes:
while True:
rawblock = self._fp.read(BUFFER_SIZE)
if not rawblock:
break
self._buffer.extend(self._decompressor.decompress(rawblock))
self._pos += len(self._buffer)
ret = bytes(self._buffer)
self._buffer.clear()
return ret
# Rewind the file to the beginning of the data stream.
def _rewind(self):
self._fp.seek(0)
self._eof = False
self._pos = 0
self._buffer.clear()
self._decompressor = self._decomp_factory(**self._decomp_args)
def seek(self, offset, whence=io.SEEK_SET):
# Recalculate offset as an absolute file position.
if whence == io.SEEK_SET:
pass
elif whence == io.SEEK_CUR:
offset = self._pos + offset
elif whence == io.SEEK_END:
# Seeking relative to EOF - we need to know the file's size.
if self._size < 0:
while self.read(io.DEFAULT_BUFFER_SIZE):
pass
offset = self._size + offset
else:
raise ValueError("Invalid value for whence: {}".format(whence))
# Make it so that offset is the number of bytes to skip forward.
if offset < self._pos:
self._rewind()
else:
offset -= self._pos
# Read and discard data until we reach the desired position.
while offset > 0:
data = self.read(min(io.DEFAULT_BUFFER_SIZE, offset))
if not data:
break
offset -= len(data)
return self._pos
def tell(self) -> int:
"""Return the current file position."""
return self._pos
| synodriver/python-bz3 | bz3/compression.py | compression.py | py | 5,403 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "io.DEFAULT_BUFFER_SIZE",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "io.BufferedIOBase",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "io.UnsupportedOperation",
"line_number": 17,
"usage_type": "call"
},
{
"api_name... |
3262515470 | #!/usr/bin/env python3
import secrets
import sys
import subprocess
import argparse
import headerStruct
def calculateSizeOfTheImage(lastVirtAddr, size, SectAlignment):
mult = int((size-1) / SectAlignment) + 1
return lastVirtAddr + (SectAlignment * mult)
def generateKey(arch):
# init values
wordlength = 4
mask = 0x000000FF
if arch == 64:
wordlength = 8
mask = 0x00000000000000FF
# kind of do while
# generate random key until the two last digit != 00
random = secrets.token_bytes(wordlength)
while(int.from_bytes(random, 'big') & mask == 0):
random = secrets.token_bytes(wordlength)
return random
def hashing(data, arch): # TODO implement a real hashing function
# init values
wordlength = 4
if arch == 64:
wordlength = 8
output = 0
# xor each word with each other to create a kind of hash
for i in range(int(len(data)/wordlength)):
output ^= int.from_bytes(data[wordlength*i:wordlength*(i+1)], 'big')
return output
def xorDat(data, key):
data = bytearray(data)
for i in range(len(data)):
data[i] ^= key[i % len(key)]
return data
def createUnpacker(ADDRESS_OEP, ADDRESS_CODE_START, TOTAL_CODE_SIZE, PARTIAL_KEY, CORRECT_HASH, arch):
if arch == 32:
mask = 0xFFFFFF00
hexFormat = '08x'
file = "unpacker32.asm"
runtimeOffset = 0x400000
elif arch == 64:
mask = 0xFFFFFFFFFFFFFF00
hexFormat = '016x'
file = "unpacker64.asm"
runtimeOffset = 0x140000000
else:
exit(5)
# remove two last bytes
key = int.from_bytes(PARTIAL_KEY, 'little') & mask
# little indianing
hs = format(CORRECT_HASH, hexFormat)
# if [:] out of bound, it returns ''
final_string = "0x" + hs[14:16] + hs[12:14] + hs[10:12] + hs[8:10] + hs[6:8] + hs[4:6] + hs[2:4] + hs[:2]
subprocess.run(["cp", file, "tmpUnpack.asm"])
subprocess.run(["sed", "-i", "-e", f"s/ADDRESS_CODE_START/{hex(ADDRESS_CODE_START + runtimeOffset)}/g", "tmpUnpack.asm"])
subprocess.run(["sed", "-i", "-e", f"s/TOTAL_CODE_SIZE/{hex(TOTAL_CODE_SIZE)}/g", "tmpUnpack.asm"])
subprocess.run(["sed", "-i", "-e", f"s/PARTIAL_KEY/{hex(key)}/g", "tmpUnpack.asm"])
subprocess.run(["sed", "-i", "-e", f"s/CORRECT_HASH/{final_string}/g", "tmpUnpack.asm"])
subprocess.run(["sed", "-i", "-e", f"s/ADDRESS_OEP/{hex(ADDRESS_OEP + runtimeOffset)}/g", "tmpUnpack.asm"])
subprocess.run(["nasm", "tmpUnpack.asm"])
subprocess.run(["rm", "tmpUnpack.asm"])
with open("tmpUnpack", "rb") as f:
output = f.read()
subprocess.run(["rm", "tmpUnpack"])
return output
#######################################################
# PARSING #
#######################################################
def parsing(filename):
with open(filename, "rb") as f:
binary = f.read()
# Offsets of header dict
offsets = {"begin":0}
# Parse MSDOS header
msdos = headerStruct.MSDOS(binary[:64])
# Get fist part of offsets
offsets["pe"] = msdos.pe_offset
offsets["PEOpt"] = offsets["pe"] + 24
# Parse PE header
pe = headerStruct.PEHeader(binary[offsets["pe"]:offsets["PEOpt"]])
# Get second part of offsets
offsets["section"] = offsets["PEOpt"] + pe.SizeOfOptionalHeader
offsets["EndSection"] = offsets["section"] + 40 * pe.NumberOfSections
if pe.getArch() == 0:
print("Not 32 nor 64 bits")
exit(1)
# Parse optional header
opt = headerStruct.PEOptHeader(binary[offsets["PEOpt"]:offsets["section"]], pe.getArch())
# Parse sections header
sections = headerStruct.SectionHeader(binary[offsets["section"]:offsets["EndSection"]], pe.NumberOfSections)
return binary, offsets, msdos, pe, opt, sections
#######################################################
# INFO PRINT #
#######################################################
def giveInfo(binary, offsets, msdos, pe, opt, sections):
print(f"################# MS DOS ################\nStarts at : {hex(offsets['begin'])}")
msdos.printAll()
print(f"################### PE ##################\nStarts at : {hex(offsets['pe'])}")
pe.printAll()
print(f"############ OPTIONAL HEADERS ###########\nStarts at : {hex(offsets['PEOpt'])}")
imgSize = calculateSizeOfTheImage(sections.getSectionLast("VirtualAddress"), sections.getSectionLast("Misc"), opt.SectionAlignment)
opt.printAll(sections, imgSize, offsets["section"], binary)
print(f"################ SECTIONS ###############\nStarts at : {hex(offsets['section'])}")
sections.sectionsInfo(True)
nbleft = (opt.SizeOfHeaders - (offsets['section'] + pe.NumberOfSections * 40)) / 40
if nbleft >= 1:
print(f"\n\033[32mCan add {nbleft} sections\033[39m")
else:
print(f"\n\033[31mCan't add any section. Size left = {nbleft} sections\033[39m")
print("\n#########################################\n")
print(f"\033[36m0x{format(offsets['begin'], '08x')} ################")
print(" "*11 + " MS DOS ")
print(f"0x{format(offsets['pe'] - 1, '08x')} ################")
print(f"\033[32m0x{format(offsets['pe'], '08x')} ################")
print(" "*11 + " PE ")
print(f"0x{format(offsets['PEOpt'] - 1, '08x')} ################")
print(f"\033[33m0x{format(offsets['PEOpt'], '08x')} ################")
print(" "*11 + " OPT HEAD ")
print(f"0x{format(offsets['section'] - 1, '08x')} ################")
print(f"\033[34m0x{format(offsets['section'], '08x')} ################")
print(" "*11 + " SECTIONS ")
print(f"0x{format(opt.SizeOfHeaders - 1, '08x')} ################")
print(f"\033[39m###########################")
endOfLastSection = sections.printBox()
if len(binary) > endOfLastSection:
print(f"\033[31m0x{format(endOfLastSection, '08x')} ################")
print(" "*11 + " UNKNOWN ")
print(f"0x{format(len(binary) - 1, '08x')} ################")
print(f"\033[39m###########################")
rsrc = headerStruct.Rsrc(binary[sections.getStartAddr(b".rsrc\x00\x00\x00"):sections.getEndAddr(b".rsrc\x00\x00\x00")],
sections.getVirtStart(b".rsrc\x00\x00\x00")
)
print(rsrc)
def changeRsrc(binary, sections, filename):
rsrc = headerStruct.Rsrc(binary[sections.getStartAddr(b".rsrc\x00\x00\x00"):sections.getEndAddr(b".rsrc\x00\x00\x00")],
sections.getVirtStart(b".rsrc\x00\x00\x00")
)
rsrc.change()
with open(f"{filename}.rsrc.exe", "wb") as f:
f.write(binary[:sections.getStartAddr(b".rsrc\x00\x00\x00")] +
rsrc.repack() +
binary[sections.getEndAddr(b".rsrc\x00\x00\x00"):]
)
def testSectionName(sectionName, default, sections, exists):
if sectionName == None:
sectionName = default
if len(sectionName) > 8:
verboseLog(1, f"Section name {sectionName} too long")
exit(6)
sectionName = bytes(sectionName, 'utf-8') + b'\x00' * (8 - len(sectionName))
if sections.isExisting(sectionName) != exists:
verboseLog(1, "Conflict with existing sections")
exit(7)
return sectionName
def addNewSection(newSect, size, pe, opt, sections):
sections.addSection(newSect, size, 0x1000, 0x60000020)
pe.addSection()
opt.addCode(0x1000)
imgSize = calculateSizeOfTheImage(
sections.getSectionLast("VirtualAddress"),
sections.getSectionLast("Misc"),
opt.SectionAlignment,
)
opt.setSizeOfImage(imgSize)
opt.rmChecksum()
def packingBack(offsets, pe, opt, sections, packedSect, unpacker, binary, unpackingSect, sectionToPack, packedSize):
upckStart = sections.getStartAddr(unpackingSect)
upckEnd = sections.getEndAddr(unpackingSect)
beginPack = sections.getStartAddr(sectionToPack)
EndPack = sections.getEndAddr(sectionToPack)
packedBin = (
# Headers
binary[0:offsets["pe"]] + pe.repack() + opt.repack() + sections.repack() +
binary[offsets["EndSection"]:beginPack] +
# packed Sections
packedSect + binary[beginPack + packedSize:EndPack] +
# other section if existing
binary[EndPack:upckStart] +
unpacker + b'\x00'*(0x1000-len(unpacker)) +
# after last section if other existing data
binary[upckStart:]
)
return packedBin
def verboseLog(ok, message):
if ok == 0:
code = "[ \033[32mOK\033[39m ]"
if ok == 1:
code = "[ \033[31mFAIL\033[39m ]"
if ok == 2:
code = "[ \033[33mINFO\033[39m ]"
if verbose:
print(f"{code} {message}")
def main(args):
(
binary, offsets, msdos, pe, opt, sections
) = parsing(args.filename)
if args.info:
giveInfo(binary, offsets, msdos, pe, opt, sections)
return 0
if args.rsrc:
changeRsrc(binary, sections, args.filename)
verboseLog(0, "Binary parsed")
#######################################################
# SETUP VARIABLES #
#######################################################
sectionToPack = testSectionName(args.section, '.text', sections, True)
unpackingSect = testSectionName(args.new, '.unpack', sections, False)
# Get original entry point
entry = opt.getOEP()
# Get starting and finishing address of the section to pack
beginPack = sections.getStartAddr(sectionToPack)
EndPack = sections.getEndAddr(sectionToPack)
# be sure packedSize is multiple of 4 (or 8) and secure the hash
wordlength = 4
if pe.getArch() == 64:
wordlength = 8
packedSize = EndPack - beginPack
while(
packedSize % wordlength != 0 or
packedSize % wordlength == 0 and (packedSize/wordlength)%2==0
):
packedSize -= 1
verboseLog(2, f"Packed size : {hex(packedSize)}")
sections.addRight(sectionToPack, 'w')
key = generateKey(pe.getArch())
verboseLog(0, f"Key generated with success : {key}")
packedSect = xorDat(binary[beginPack:beginPack + packedSize], key)
goodHash = hashing(binary[beginPack:beginPack + packedSize], pe.getArch())
verboseLog(0, f"Hash generated with success : {hex(goodHash)}")
unpacker = createUnpacker(entry, beginPack, packedSize, key, goodHash, pe.getArch())
verboseLog(0, "Unpacker created with success")
#######################################################
# CREATING NEW SECTION CHANGE EP #
#######################################################
if sections.getLowerAddr() > opt.SizeOfHeaders:
verboseLog(2, "Size of header inferior to their real size... fixing")
opt.SizeOfHeaders = sections.getLowerAddr()
verboseLog(0, "Size of header fixed")
nbleft = (opt.SizeOfHeaders - (offsets['section'] + pe.NumberOfSections * 40)) / 40
if nbleft >= 1:
verboseLog(0, f"Can add {nbleft} sections")
else:
verboseLog(1, f"Can't add any section. Size left = {nbleft} sections")
# Create new pack section
addNewSection(unpackingSect, len(unpacker), pe, opt, sections)
offsets["EndSection"] += 40
verboseLog(0, f"New section {unpackingSect} added")
# Change entry point
opt.setEP(sections.getVirtStart(unpackingSect))
verboseLog(0, "Entry point changed")
#######################################################
# PACKING BACK #
#######################################################
packedBin = packingBack(
offsets, pe, opt, sections, packedSect, unpacker,
binary, unpackingSect, sectionToPack, packedSize
)
verboseLog(0, "Binary packed")
with open("{}.packed.exe".format(args.filename), "wb") as f:
f.write(packedBin)
verboseLog(0, f"{args.filename}.packed.exe created")
return 0
verbose = False
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Packer for windows 32/64 binary')
parser.add_argument(
'filename', metavar='filename', type=str,
help='Name of the executable to tweak'
)
parser.add_argument(
'-i', '--info', action='store_true',
help='Give general informations about the file', default=False
)
parser.add_argument(
'-s', '--section', type=str,
help='Section to pack'
)
parser.add_argument(
'-n', '--new', type=str,
help='Name of the new unpacking section'
)
parser.add_argument(
'-v', '--verbose', action='store_true',
help='Activate verbosity in the program', default=False
)
parser.add_argument(
'-r', '--rsrc', action='store_true',
help='Rewrite .rsrc section', default=False
)
args = parser.parse_args()
verbose = args.verbose
exit(main(args))
| idrirap/projectEthHack | packer.py | packer.py | py | 13,046 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "secrets.token_bytes",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "secrets.token_bytes",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "subprocess.run... |
19681624663 | from PIL import Image, ImageFilter
img = Image.open('./astro.jpg')
# filtered_img = img.filter(ImageFilter.BLUR) # Blurs the image
# filtered_img = img.filter(ImageFilter.SMOOTH) # Smooth the image
# filtered_img = img.filter(ImageFilter.SHARPEN) # Sharpens the image
# filtered_img = img.convert('L') # converts the image into Grey
# rotate = filtered_img.rotate(180) # rotates the image in angles
# # resize = filtered_img.resize((300,300)) # This is to resize the image
# box = (100, 100, 400, 400)
# region = filtered_img.crop(box)
# region.save('rotate.png', 'png') The above 3 code lines are used to resize the picture
img.thumbnail((400,400))
img.save('thumbnail.jpg')
| Yeshwanth37/ImageProcessing | Image.py | Image.py | py | 687 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PIL.Image.open",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 3,
"usage_type": "name"
}
] |
26237350861 | import logging
from datetime import datetime, timedelta
from typing import Dict, Optional
from synthetic.user.profile_data_update import ProfileDataUpdate
from synthetic.utils.time_utils import total_difference_seconds
logger = logging.getLogger(__name__)
class BaseVariableManager:
"""Responsible for managing a variable over time. Any information placed in the passed-in "stored_data" dict will
be persisted in the database.
"""
def __init__(self, stored_data: Dict, initial_ts: datetime, variable_name: str, update_increment_seconds=86400):
self._stored_data = stored_data
self._variable_name = variable_name
self._update_increment_seconds = update_increment_seconds
if "last_seen_ts" not in self.data:
self.data["last_seen_ts"] = initial_ts.timestamp()
def get_manager_data(self):
if "managers" not in self._stored_data:
self._stored_data["managers"] = {}
if self._variable_name not in self._stored_data["managers"]:
self._stored_data["managers"][self._variable_name] = {}
return self._stored_data["managers"][self._variable_name]
def reset(self):
pass
def initialize(self):
raise NotImplementedError()
def update(self, current_ts: datetime) -> Optional[Dict[datetime, ProfileDataUpdate]]:
if "last_seen_ts" not in self.data:
raise ValueError("Variable %s not properly initialized!" % (type(self),))
last_seen_ts = datetime.fromtimestamp(self.data["last_seen_ts"]) if "last_seen_ts" in self.data else current_ts
updates: Optional[Dict[datetime, ProfileDataUpdate]] = None
while total_difference_seconds(last_seen_ts, current_ts) >= self._update_increment_seconds:
update = self.update_variable()
last_seen_ts += timedelta(seconds=self._update_increment_seconds)
if update is not None:
if updates is None:
updates = {}
update.add_set_variable(f"managers/{self._variable_name}/last_seen_ts", last_seen_ts.timestamp())
updates[last_seen_ts] = update
self.data["last_seen_ts"] = last_seen_ts.timestamp()
return updates
def update_variable(self) -> ProfileDataUpdate:
raise NotImplementedError()
@property
def data(self):
return self.get_manager_data()
def set_data(self, data):
self._stored_data = data
self.initialize()
@property
def variable_name(self):
return self._variable_name
| benshi-ai/open-synthetic-data-generator | src/synthetic/managers/base_manager.py | base_manager.py | py | 2,574 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
... |
6363891788 | from dataclasses import dataclass, field
from typing import Any, Iterable, List, Dict
from config import SUBJECT_PATTERNS, DAYS
@dataclass(kw_only=True)
class Pattern:
subject_type: str
classes: int
duration: int
required_rooms: List[Any] = field(init=False, default_factory=list)
def add_rooms(self, *args: Iterable[object]) -> None:
self.required_rooms.extend(args)
class GeneralPattern:
__slots__ = ('__days',)
def __init__(self):
self.__days = {DAY: {} for DAY in DAYS}
def get_pattern(*, patterns: List[Dict[str, int]]) -> List[Pattern]:
return list(Pattern(subject_type=subject_type, **pattern) for subject_type, pattern in zip(SUBJECT_PATTERNS, patterns))
| oneku16/UCA-schedule-generator | brute_force_2/subject/pattern.py | pattern.py | py | 726 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "dataclasses.field",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "typing.Iterable",
"line_... |
5142368170 | import sklearn
from sklearn.utils import shuffle
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
import numpy as np
from sklearn import linear_model, preprocessing
data = pd.read_csv("car.data")
print(data.head())
le = preprocessing.LabelEncoder()
buying = le.fit_transform(list(data["buying"]))
maint = le.fit_transform(list(data["maint"]))
door = le.fit_transform(list(data["door"]))
persons = le.fit_transform(list(data["persons"]))
lug_boot = le.fit_transform(list(data["lug_boot"]))
safety = le.fit_transform(list(data["safety"]))
cls = le.fit_transform(list(data["class"]))
predict = "class"
x=list(zip(buying, maint, door, persons, lug_boot, safety))
y = list(cls)
xtrain, xtest, ytrain, ytest = sklearn.model_selection.train_test_split(x,y, test_size=0.1)
model = KNeighborsClassifier(n_neighbors=10)
model.fit(xtrain,ytrain)
acc = model.score(xtest, ytest)
print(f"Prediction accuracy {acc}")
print()
print("--------------------------------------------------------------------------------------")
names = ["UNACC","ACC", "GOOD", "VERY GOOD"]
predictions = model.predict(xtest)
for x in range(len(xtest)):
print("PREDICTION : ", names[predictions[x]],"DATA : ",xtest[x], "ACTUAL VALUE : ", names[ytest[x]])
if predictions[x] == ytest[x]:
print("Prediction is true")
else:
print("Prediction is false") | Laudkyle/my-python-projects | Python Scripts/machine learning 1/knn.py | knn.py | py | 1,408 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.LabelEncoder",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing",
"line_number": 11,
"usage_type": "name"
},
{
"api_name"... |
8362112734 | # ---------------------------
# Problem 2
# Given an array of integers, return a new array such that each element at index i of the new array is the
# product of all the numbers in the original array except the one at i.
#
# For example, if our input was [1, 2, 3, 4, 5], the expected output would be [120, 60, 40, 30, 24].
# If our input was [3, 2, 1], the expected output would be [2, 3, 6].
#
# Follow-up: what if you can't use division?
# ---------------------------
from functools import reduce
def calculate_product(values: list):
result = []
for i, v in enumerate(values):
# Get all values in the list apart from the current one:
multiply_values = [x for i2, x in enumerate(values) if i2 != i]
# Use reduce to multiply all values in the list with each other and append to our new list:
result.append(reduce(lambda x, y: x * y, multiply_values))
return result
assert calculate_product([1, 2, 3, 4, 5]) == [120, 60, 40, 30, 24]
assert calculate_product([3, 2, 1]) == [2, 3, 6]
assert not calculate_product([1, 2, 3, 4, 5]) == [140, 60, 40, 30, 21] | dmallory42/daily-coding-problem-solutions | problem_002.py | problem_002.py | py | 1,104 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "functools.reduce",
"line_number": 23,
"usage_type": "call"
}
] |
72966759394 | import json
import requests
import sys
import glance_check.exception as exc
class GlanceCheck:
def __init__(self, creds=None, imageid=None, os_image_url=None,
cacert=None, verbose=False):
self.__imageid = imageid
self.__image_url = os_image_url
self.__auth_url = creds['os_auth_url']
self.__username = creds['os_username']
self.__tenant = creds['os_tenant']
self.__password = creds['os_password']
self.__cacert = cacert
self.__verbose = verbose
def print_verbose(self, message):
if self.__verbose:
sys.stderr.write("%s\n" % message)
def check_create_image(self, token):
self.print_verbose("Attempting to create image id: %s using the api"
" at %s" % (self.__imageid, self.__image_url))
headers = {'X-Auth-Token': token}
payload = json.dumps({"name":
"GlanceCheck test image",
"id":
self.__imageid,
"tags": [
"test"
]})
request = requests.post(("%s/v2/images" % self.__image_url),
headers=headers, data=payload,
verify=self.__cacert)
self.print_verbose(request.text)
if not request.status_code == requests.codes.created:
if request.status_code == requests.codes.conflict:
raise exc.TestImageAlreadyExistsError()
else:
raise exc.CreateImageError(
request.status_code)
def check_get_image(self, token):
self.print_verbose("Attempting to download image id: %s using the"
" api at %s" % (self.__imageid, self.__image_url))
headers = {'X-Auth-Token': token}
request = requests.get(("%s/v2/images/%s" % (self.__image_url,
self.__imageid)),
headers=headers,
verify=self.__cacert)
self.print_verbose(request.text)
if not request.status_code == requests.codes.ok:
if request.status_code == requests.codes.not_found:
raise exc.TestImageNotFoundError()
else:
raise exc.GetImageError(
request.status_code)
def check_connection(self):
self.print_verbose("Attempting to connect to glance at %s"
% self.__image_url)
request = requests.get(self.__image_url,
verify=self.__cacert)
self.print_verbose(request.text)
if not request.status_code == requests.codes.multiple_choices:
raise exc.CheckConnectionError(
request.status_code)
def check_update_image(self, token):
self.print_verbose("Attempting to update image id: %s using the api"
" at %s" % (self.__imageid, self.__image_url))
headers = {'X-Auth-Token': token,
'Content-Type':
"application/openstack-images-v2.1-json-patch"}
payload = json.dumps([{"path": "/foo", "value":
"bar", "op": "add"}])
request = requests.patch(
"%s/v2/images/%s" %
(self.__image_url, self.__imageid),
headers=headers, data=payload, verify=self.__cacert)
self.print_verbose(request.text)
if not request.status_code == requests.codes.ok:
raise exc.UpdateImageError(
request.status_code)
def get_keystone_token_v2(self):
self.print_verbose("Attempting to get a keystone token using v2 of"
" the api at %s" % self.__auth_url)
payload = json.dumps({"auth":
{"tenantName":
self.__tenant,
"passwordCredentials":
{"username": self.__username,
"password":
self.__password}}})
request = requests.post(("%s/tokens"
% self.__auth_url),
data=payload,
verify=self.__cacert)
self.print_verbose(request.text)
authdict = dict(request.json())
return authdict['access']['token']['id']
| ArdanaCLM/glance-check | glance_check/check.py | check.py | py | 4,530 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.stderr.write",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line... |
74152867874 | import gym
import time
env = gym.make('CartPole-v0')
env.reset()
for step in range(1000):
env.render() # rendering the environment at each step
env.step(env.action_space.sample()) # feed the env with random actions that exist in all possible actions
time.sleep(0.1)
| Aslanfmh65/open_ai_project | practice.py | practice.py | py | 281 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gym.make",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 10,
"usage_type": "call"
}
] |
1168928910 | import cv2
import keras
camera = cv2.VideoCapture(0)
haar = cv2.CascadeClassifier('cascades/haarcascade_frontalface_alt2.xml')
model = keras.models.load_model('gender/InceptionResNetV2/weights/inception_gender.h5')
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
while True:
try:
ret, frame = camera.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.putText(frame, 'TechVariable',(25,25), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255,0,0), 2, cv2.LINE_AA)
face = haar.detectMultiScale(gray, 1.2, 5)
for x,y,w,h in face:
cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,0), 2)
roi = frame[x:x+w, y:y+h]
target = cv2.resize(roi, (128,128))
target = target.reshape(-1, 128,128,3)
dt = model.predict_classes(target)
if dt[0] == 0:
gname = 'Female'
else:
gname = 'Male'
text = "Gender: " + str(gname)
cv2.putText(frame, text, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,255,0), 2, cv2.LINE_AA)
cv2.imshow('ME', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
except Exception as ex:
print(ex)
camera.release()
cv2.destroyAllWindows()
| imdeepmind/age-gender-prediction | detect.py | detect.py | py | 1,388 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "keras.models.load_model",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "keras.mod... |
41131109007 | #Standard
import numpy as np
import cv2
import os
import copy
from PIL import Image, ImageFilter
import time
#Local files
from Utilities import make_directory, align_image, get_from_directory, save_to_directory, numericalSort
from HOG_functions import process_HOG_image, get_HOG_image
import JetsonYolo
#SCIPY and SKlearn
from scipy.signal import savgol_filter, fftconvolve
from sklearn.metrics import mean_squared_error
from scipy.linalg import norm
from scipy import sum, average
from skimage.metrics import structural_similarity as compare_ssim
def run_histogram_equalization(image):
ycrcb_img = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb)
# equalize the histogram of the Y channel
ycrcb_img[:, :, 0] = cv2.equalizeHist(ycrcb_img[:, :, 0])
equalized_img = cv2.cvtColor(ycrcb_img, cv2.COLOR_YCrCb2BGR)
return equalized_img
def edge_detect (channel):
sobelX = cv2.Sobel(channel, cv2.CV_16S, 1, 0)
sobelY = cv2.Sobel(channel, cv2.CV_16S, 0, 1)
sobel = np.hypot(sobelX, sobelY)
sobel[sobel > 255] = 255;
return sobel
def create_special_silhouettes(mask_path = './Images/Masks/FewShot', image_path = './Images/FewShot', masks = None, single = False):
#Allow for the passing of pre-loaded silhouettes for the video test function
if masks == None:
mask_instances = get_from_directory(mask_path)
else:
mask_instances = masks
special_silhouettes = []
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
for iterator, (subdir, dirs, files) in enumerate(os.walk(image_path)):
dirs.sort(key=numericalSort)
if len(files) > 0:
masks = []
background = []
combined_example = []
silhouettes = []
for file_iter, file in enumerate(sorted(files, key=numericalSort)):
# load the input image and associated mask from disk and perform initial pre-processing
image = cv2.imread(os.path.join(subdir, file))
hist_image = run_histogram_equalization(image)
blurred = cv2.GaussianBlur(hist_image, (3, 3), 0)
cv2.imshow("Stage 1", blurred)
#Prepare the image using a sobel edge detector, remove noise and convert to an 8-bit array
edgeImg = np.max(np.array([edge_detect(blurred[:, :, 0]), edge_detect(blurred[:, :, 1]), edge_detect(blurred[:, :, 2])]), axis=0)
mean = np.mean(edgeImg);
edgeImg[edgeImg <= mean] = 0;
edgeImg_8u = np.asarray(edgeImg, np.uint8)
#If first frame in the sequence, set as background
if file_iter == 0:
background = edgeImg_8u
#Use morphological operations to produce an inflated silhouette from background subtraction
background_based_silhouette = cv2.absdiff(edgeImg_8u, background)
cv2.imshow("Stage 2", background_based_silhouette)
background_based_silhouette = cv2.threshold(background_based_silhouette, 100, 255, cv2.THRESH_BINARY)[1]
*_, bk_mask = cv2.threshold(background_based_silhouette, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
opening = cv2.morphologyEx(bk_mask, cv2.MORPH_OPEN, kernel, iterations=1)
cv2.imshow("Stage 3", opening)
# Retrieve and dilate the mask to prevent parts of the body being excluded
if single == False:
mask = mask_instances[iterator - 1][file_iter]
else:
mask = mask_instances[file_iter]
bk_expanded = cv2.dilate(mask, kernel, iterations=5)
cv2.imshow("Stage 4", bk_expanded)
bk_mask_test = cv2.bitwise_and(opening, opening, mask=bk_expanded)
cv2.imshow("Stage 5", bk_mask_test)
#Perform morphological operations on edge detected image after applying mask
#Thresholding to produce a silhouette of the extremities of the silhouette that the mask may have missed
edge_based_silhouette = edgeImg_8u * bk_expanded
edge_based_silhouette = cv2.morphologyEx(edge_based_silhouette, cv2.MORPH_CLOSE, kernel)
edge_based_silhouette = cv2.morphologyEx(edge_based_silhouette, cv2.MORPH_OPEN, kernel)
#Get copy of the mask and turn into an actual mask (from range 0-1)
temp_mask = copy.deepcopy(mask)
temp_mask[temp_mask == 255] = 1
#Remove noise from the blurred image as a template
threshold_lower = 30
threshold_upper = 220
mask_based_silhouette = cv2.Canny(blurred, threshold_lower, threshold_upper)
#Apply the mask
mask_based_silhouette = cv2.bitwise_and(mask_based_silhouette, mask_based_silhouette, mask=temp_mask)
#Take this and turn all pixels white, then perform open and close to tidy it up
mask_based_silhouette = cv2.morphologyEx(mask_based_silhouette, cv2.MORPH_CLOSE, kernel)
mask_based_silhouette = cv2.morphologyEx(mask_based_silhouette, cv2.MORPH_OPEN, kernel)
#Align images
mask = align_image(mask, 30)
bk_mask_test = align_image(bk_mask_test, 1)
mask_based_silhouette = align_image(mask_based_silhouette, 30)
edge_based_silhouette = align_image(edge_based_silhouette, 30)
alpha = 1.0
beta = 1.0
combined_example = []
finished_example = []
if len(combined_example) == 0:
combined_example = mask
#Combine the masks, apply a mode filter to smooth the result
combined_example = cv2.addWeighted(bk_mask_test, alpha, combined_example, beta, 0.0)
cv2.imshow("Stage 5", combined_example)
image_example = Image.fromarray(combined_example)
image_example = image_example.filter(ImageFilter.ModeFilter(size=5))
cv2.imshow("Stage 6", np.asarray(image_example))
key = cv2.waitKey(0) & 0xff
silhouettes.append(np.array(image_example))
special_silhouettes.append(silhouettes)
print("silhouette complete")
#If a single image has been passed instead of a whole instance, return after one iteration
if single == True:
return silhouettes
#Save
save_to_directory(special_silhouettes, './Images/SpecialSilhouettes/FewShot')
print("operation complete, special silhouettes saved")
return special_silhouettes
#Graph cut
def graph_cut(mask_path = './Images/Masks/FewShot', image_path = './Images/FewShot', by_mask = True, mask_edges = True, masks = None):
#Adjust save path depending on which combination is used to create it. The best reults are hard-coded into the definition
if by_mask and mask_edges:
save_path = './Images/GraphCut/FewShot'
elif by_mask and not mask_edges:
save_path = './Images/graph_mask_noedges'
elif not by_mask and mask_edges:
save_path = './Images/graph_nomask_edges'
else:
save_path = './Images/graph_nomask_noedges'
#Allow masks to be read directly from memory for live testing
if masks == None:
mask_instances = get_from_directory(mask_path)
else:
mask_instances = masks
image_instances = []
for iterator, (subdir, dirs, files) in enumerate(os.walk(image_path)):
dirs.sort(key=numericalSort)
if len(files) > 0:
masks = []
images = []
for file_iter, file in enumerate(sorted(files, key = numericalSort)):
# load the input image and associated mask from disk
image = cv2.imread(os.path.join(subdir, file))
#Emphasize outlines
image = run_histogram_equalization(image)
#Blur to remove noise
blurred = cv2.GaussianBlur(image, (5, 5), 0)
#Generate edge detection image
edgeImg = np.max(np.array([edge_detect(blurred[:, :, 0]), edge_detect(blurred[:, :, 1]), edge_detect(blurred[:, :, 2])]), axis=0)
mean = np.mean(edgeImg);
# Reduce noise
edgeImg[edgeImg <= mean] = 0;
edgeImg_8u = np.asarray(edgeImg, np.uint8)
rect = [(0, 1), (0, 1)]
#Bounding box
if by_mask == False:
# Get humans
objs = JetsonYolo.get_objs_from_frame(np.asarray(image), False)
seen_human = False
for obj in objs:
(xmin, ymin), (xmax, ymax) = obj['bbox']
rect = [xmin, ymin, xmax, ymax]
# Detector only returns human objs
if len(objs) == 0:
continue
else:
# Mask
mask = mask_instances[iterator-1][file_iter]
if np.all(mask == 0):
continue
# any mask values greater than zero should be set to probable
# foreground
mask[mask > 0] = cv2.GC_FGD
mask[mask == 0] = cv2.GC_BGD
# allocate memory for two arrays that the GrabCut algorithm internally
# uses when segmenting the foreground from the background
fgModel = np.zeros((1, 65), dtype="float")
bgModel = np.zeros((1, 65), dtype="float")
# apply GrabCut using the the mask segmentation method
start = time.time()
if mask_edges == True:
edgeImg = edgeImg.astype("uint8")
edgeImg[edgeImg == 255] = 1;
grab_image = cv2.bitwise_and(image, image, mask=edgeImg)
else:
grab_image = image
if by_mask == False:
mask = np.zeros(image.shape[:2], dtype="uint8")
(mask, bgModel, fgModel) = cv2.grabCut(grab_image, mask, rect, bgModel,
fgModel, iterCount=5, mode=cv2.GC_INIT_WITH_RECT)
else:
(mask, bgModel, fgModel) = cv2.grabCut(grab_image, mask, None, bgModel,
fgModel, iterCount=5, mode=cv2.GC_INIT_WITH_MASK)
end = time.time()
print("[INFO] applying GrabCut took {:.2f} seconds".format(end - start))
outputMask = np.where((mask == cv2.GC_BGD) | (mask == cv2.GC_PR_BGD),
0, 1)
# scale the mask from the range [0, 1] to [0, 255]
outputMask = (outputMask * 255).astype("uint8")
#Smooth to avoid noisy pixels on the mask edges
outputMask = Image.fromarray(outputMask)
outputMask = outputMask.filter(ImageFilter.ModeFilter(size=13))
outputMask = np.array(outputMask)
# apply a bitwise AND to the image using our mask generated by GrabCut to generate our final output image
output = cv2.bitwise_and(image, image, mask=outputMask)
outputMask = align_image(outputMask, 0)
images.append(outputMask)
image_instances.append(images)
save_to_directory(image_instances, save_path)
print("graph cut operation complete")
def process_image(image, raw_img, verbose = 0, subtractor = None):
# Extract the contours formed by the silhouette, image is now silhouette and raw image not used, nor is subtractor
white_mask = cv2.inRange(image, 180, 255)
return np.asarray(align_image(white_mask, 0)) # was image
def get_silhouettes(path, verbose = 0, HOG = False, few_shot = False):
global HOG_background
mask_instances = get_from_directory('./Images/Masks')
make_directory(path, "Silhouette folder already exists")
processed_images = []
for iterator, (subdir, dirs, files) in enumerate(os.walk(path)):
dirs.sort(key=numericalSort)
if len(files) > 0:
raw_images = []
processed_instances = []
subtractor = cv2.createBackgroundSubtractorKNN()
for file_iter, file in enumerate(sorted(files, key = numericalSort)):
raw_images.append(cv2.imread(os.path.join(subdir, file)))
#Prepare image
gray_img = cv2.cvtColor(raw_images[file_iter], cv2.COLOR_BGR2GRAY)
#First pass: if HOG take a background example
if file_iter == 0 and HOG == True:
HOG_background = get_HOG_image(gray_img)
#Process image according to chosen processing method
if HOG == False:
processed_instances.append(process_image(gray_img, raw_images[file_iter], verbose, subtractor))
else:
processed_instances.append(process_HOG_image(get_HOG_image(gray_img), HOG_background, mask_instances[iterator-1][file_iter]))
processed_images.append(processed_instances)
# Processed images taken, save to location
os.chdir(os.path.abspath(os.path.join(__file__, "../../..")))
for instance in processed_images:
#Find the latest un-made path and save the new images to it
path_created = False
n = 0.0
while path_created == False:
try:
if HOG == False and few_shot == False:
local_path = './Images/Silhouettes' + "/Instance_" + str(n) + "/"
elif HOG == True and few_shot == False:
local_path = './Images/HOG_silhouettes' + "/Instance_" + str(n) + "/"
elif HOG == False and few_shot == True:
local_path = './Images/HOG_silhouettes/FewShot' + "/Instance_" + str(n) + "/"
elif HOG == True and few_shot == True:
local_path = './Images/HOG_silhouettes/FewShot' + "/Instance_" + str(n) + "/"
os.makedirs(local_path, exist_ok=False)
path_created = True
except:
n += 1
for i, image in enumerate(instance):
#Exclude entirely black or entirely white images from the sequence.
if HOG:
cv2.imwrite(local_path + str(i) + ".jpg", image)
else:
if not np.all((image == 0)) and not np.all((image == 255)):
cv2.imwrite(local_path + str(i) + ".jpg", image)
print("all saved")
| ChrisLochhead/PhDSummerProject | PhDSummerProject/Programs/image_processing/ImageProcessor.py | ImageProcessor.py | py | 14,887 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.cvtColor",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2YCrCb",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "cv2.equalizeHist",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",... |
25654546309 | import uvicorn
from fastapi import FastAPI, Request, status
from fastapi.openapi.utils import get_openapi
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from fastapi.exceptions import RequestValidationError
from app.v1.routers.facts import v1_router
from config import NAME, HOST, PORT, VERSION, HOMEPAGE
origins = ["*"]
def custom_openapi():
if app.openapi_schema:
return app.openapi_schema
openapi_schema = get_openapi(
title=NAME,
version=VERSION,
description=f"A public API service to retrieve cool dog facts. Homepage: {HOMEPAGE}",
routes=app.routes,
)
app.openapi_schema = openapi_schema
return app.openapi_schema
def add_middlewares(app: FastAPI):
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
def add_test_endpoint(app: FastAPI):
@app.get("/")
async def home():
return {
"message": "Welcome to Dog Facts API!. Go to endpoint /docs to access the interactive documentation."
}
def add_exception_handler(app: FastAPI):
@app.exception_handler(ValueError)
@app.exception_handler(RequestValidationError)
async def value_error_exception_handler(request: Request, exc):
"""
Custom Exception Handler
"""
return JSONResponse(
status_code=status.HTTP_400_BAD_REQUEST, content={"message": str(exc)}
)
app = FastAPI()
app.openapi = custom_openapi
add_test_endpoint(app)
add_middlewares(app)
add_exception_handler(app)
app.include_router(v1_router)
if __name__ == "__main__":
uvicorn.run(app, host=HOST, port=PORT)
| DucNgn/Dog-Facts-API-v2 | app/main.py | main.py | py | 1,763 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "app.v1.routers.facts.openapi_schema",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "app.v1.routers.facts",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "app.v1.routers.facts.openapi_schema",
"line_number": 17,
"usage_type": "att... |
33845405230 | #-*- coding: utf-8 -*-
import pyqtgraph as pg
from DateAxis import DateAxis
from TableModel import TableModel
from PyQt4.QtGui import *
from PyQt4 import uic
class LogView(QTableView):
def __init__(self, graphicLayout, layoutRow = 0, layoutCol = 0):
super().__init__()
self.view = graphicLayout.addPlot(axisItems={'bottom': DateAxis(orientation='bottom')}, row = layoutRow, col = layoutCol, rowspan = 1)
self.view.setLimits(yMin = 0, yMax = 100)
self.view.showGrid(x=True, y=False)
self.viewLegend = self.view.addLegend()
def show(self):
self.view.show()
self.viewLegend.show()
def hide(self):
self.view.hide()
self.viewLegend.hide()
def clear(self):
self.view.clear()
def autoRange(self):
self.view.autoRange()
def setLimits(self, **args):
self.view.setLimits(**args)
def showGrid(self, **args):
self.view.showGrid(**args)
def addLegend(self, **args):
return self.view.addLegend(**args)
def setXLink(self, target):
self.view.setXLink(target.view)
def getPlotView(self):
return self.view
# addPlot
# Input :
# - lineName : will be shown in graph for Legend
# - x_values : values of X axis
# - y_values : values of Y axis
# - color : color of line. ex) 'r', 'g', 'b'
# output : N/A
def addPlot(self, lineName, x_values, y_values, color):
fixedY_values = y_values
if len(x_values) == len(y_values) == 0:
return
elif len(y_values) == 1:
fixedY_values = [y_values[0] for _ in range(len(x_values))]
self.view.plot(x=x_values, y=fixedY_values, name=lineName, pen=color, symbol='o')
# addText
# Input :
# - x_values : values of X axis
# - y_values : values of Y axis
# - text_values : text values will be shown at graph
# - color : color of Text. ex) 'red', 'green', 'blue'
# output : N/A
def addText(self, x_values, y_values, text_values, color):
fixedY_values = y_values
fixedTest_values = text_values
if len(x_values) == len(y_values) == len(text_values) == 0:
return
if len(y_values) == 1:
fixedY_values = [y_values[0] for _ in range(len(x_values))]
if len(text_values) == 1:
fixedTest_values = [text_values[0] for _ in range(len(x_values))]
for x_value, y_value, text_value in zip(x_values, fixedY_values, fixedTest_values):
text = pg.TextItem(
html='<div style="text-align: center"><span style="color:' + color
+ '; font-size: 8pt;">' + text_value + '</span></div>', border='w')
self.view.addItem(text)
text.setPos(x_value, y_value)
# addSpot
# Input :
# - x_values : values of X axis
# - y_values : values of Y axis
# - color : color of Spot. ex) 'r', 'g', 'b'
# output : N/A
def addSpot(self, x_values, y_values, color):
fixedY_values = y_values
if len(x_values) == len(y_values) == 0:
return
elif len(y_values) == 1:
fixedY_values = [y_values[0] for _ in range(len(x_values))]
spot = []
for x_value, y_value in zip(x_values, fixedY_values):
spot.append({'pos': (x_value, y_value), 'pen': {'color': color, 'width': 2}})
point = pg.ScatterPlotItem(size=10, pen=pg.mkPen(None), brush=pg.mkBrush(255, 255, 255, 120))
point.addPoints(spot)
self.view.addItem(point)
| turlvo/KuKuLogAnalyzer | LogView.py | LogView.py | py | 3,684 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "DateAxis.DateAxis",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pyqtgraph.TextItem",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "pyqtgraph.ScatterPlotItem",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "pyqtg... |
7420566826 | from django.contrib.auth import get_user_model
from django.test import TestCase
from posts.models import Group, Post
User = get_user_model()
class TestGroupModel(TestCase):
@classmethod
def setUpTestData(cls):
cls.group = Group.objects.create(
title="Тестовый Заголовок",
slug="test_header",
description="Описание группы для теста модели",
)
def test_object_name(self):
"""group.__str__ равна значению поля group.title."""
group = self.group
expected_object_name = group.title
self.assertEqual(expected_object_name, str(group))
class TestPostModel(TestCase):
@classmethod
def setUpTestData(cls):
user = User.objects.create_user(
username="test_user",
)
cls.post = Post.objects.create(
text="Текст тестового поста.",
author=user,
)
def test_object_name(self):
"""post.__str__ равна значению post.text[:15]."""
post = self.post
expected_object_name = post.text[:15]
self.assertEqual(expected_object_name, str(post))
def test_fields_verboses(self):
"""Поля имеют правильное значение параметра verbose_name."""
expected_values = (
("text", "Текст поста"),
("group", "Группа (необязательно)")
)
for field, expected_verbose in expected_values:
with self.subTest(field=field, expected_verbose=expected_verbose):
self.assertEqual(
Post._meta.get_field(field).verbose_name, expected_verbose
)
def test_fields_help_text(self):
"""Поля имеют правильное значение параметра help_text."""
expected_values = (
("text", "Текст поста не может быть пустым"),
("group", "В какой группе выложить пост"),
)
for field, expected_help_field in expected_values:
with self.subTest(
field=field,
expected_help_field=expected_help_field
):
self.assertEqual(
Post._meta.get_field(field).help_text, expected_help_field
)
| VaSeWS/hw05_final | yatube/posts/tests/test_models.py | test_models.py | py | 2,456 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.test.TestCase",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "posts.models.Group.objects.create",
"line_number": 12,
"usage_type": "call"
},
... |
8803458348 | from typing import Union
import requests as r
from requests_toolbelt.multipart.encoder import MultipartEncoder
class PetFriends:
def __init__(self):
self.base_url = 'https://petfriends1.herokuapp.com/'
def get_api_key(self, email: str, password: str):
"""Метод получения ключа API"""
headers = {
'email': email,
'password': password
}
res = r.get(self.base_url + 'api/key', headers=headers)
return PetFriends.__get_result(res)
def get_pets(self, key: str, fltr: str = ''):
"""Метод получения списка собственных питомцев. fltr = 'my_pets'"""
headers = {'auth_key': key}
fltr = {'filter': fltr}
res = r.get(self.base_url + 'api/pets', headers=headers, params=fltr)
return PetFriends.__get_result(res)
def add_pet(self, name: str, animal_type: str, age: str, key: str, pet_photo: str):
"""Метод добавления питомца на сайт"""
data = MultipartEncoder(
fields={
'name': name,
'animal_type': animal_type,
'age': age,
'pet_photo': (pet_photo, open(pet_photo, 'rb'))
})
headers = {
'auth_key': key,
'Content-Type': data.content_type
}
res = r.post(self.base_url + 'api/pets', headers=headers, data=data)
return PetFriends.__get_result(res)
def update_pet(self, key: str, pet_id: str, name: str = '', animal_type: str = '', age: int = ''):
"""Метод изменения информации о питомце"""
headers = {
'auth_key': key
}
data = {
'name': name,
'animal_type': animal_type,
'age': age
}
res = r.put(self.base_url + 'api/pets/{0}'.format(pet_id), headers=headers, data=data)
return PetFriends.__get_result(res)
def del_pet(self, key: str, pet_id: str):
"""Метод удаления питомца с сайта"""
res = r.delete(self.base_url + 'api/pets/{0}'.format(pet_id), headers={'auth_key': key})
return PetFriends.__get_result(res)
@staticmethod
def __get_result(res: r.models.Response) -> tuple[int, Union[str, dict]]:
"""Метод возврата результатов методов"""
try:
result = res.json()
except:
result = res.text
return res.status_code, result | 313109116/Unit_19.7 | api.py | api.py | py | 2,661 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "requests_toolbelt.multipart.encoder.MultipartEncoder",
"line_number": 33,
"usage_type": "call"
},
{
"ap... |
1603872328 | """
trainvalsplit.py is a script that splits an MS COCO formatted dataset into train and val partitions.
For sample usage, run from command line:
Example:
python trainvalsplit.py --help
"""
import random
from pathlib import Path
from typing import Any, List, Tuple
import numpy as np
from .class_dist import CocoClassDistHelper
from .coco_builder import CocoJsonBuilder
# Used to check the results of the split--all classes in both splits
# should have at least this many annotations:
_CLASS_COUNT_THRESHOLD = 0
# Seed value 341589 was chosen via the train-val-split-xviewcoco notebook:
_RANDOM_SEED = 486
# Size of val split. The train split size will be 1 - _TEST_SIZE.
_TEST_SIZE = 0.065
def split(data: List, test_size: float = 0.2, random_state=None) -> Tuple[List[Any], List[Any]]:
"""
Similar to scikit learn, creates train/test splits of the passed in data.
Args:
data: A list or iterable type, of data to split.
test_size: value in [0, 1.0] indicating the size of the test split. random_state: an int or
RandomState object to seed the numpy randomness.
Returns: 2-tuple of lists; (train, test), where each item in data has been placed
into either the train or test split.
"""
n = len(data)
num_test = int(np.ceil(test_size * n))
# print(F"n:{n}, num_test:{num_test}")
np.random.seed(random_state)
test_idx = set(np.random.choice(range(n), num_test))
data_test, data_train = list(), list()
for idx, datum in enumerate(data):
if idx in test_idx:
data_test.append(data[idx])
else:
data_train.append(data[idx])
return data_train, data_test
def create_split(input_json, output_path, output_json_name):
"""
Creates train/val split for the coco-formatted dataset defined by input_json.
Args:
input_json: full path or Path object to coco-formatted input json file. output_path: full
path or Path object to directory where outputted json will be saved. output_json_name:
"""
coco = CocoClassDistHelper(input_json)
train_img_ids, val_img_ids = split(
coco.img_ids, test_size=_TEST_SIZE, random_state=_RANDOM_SEED
)
train_counts, train_percents = coco.get_class_dist(train_img_ids)
val_counts, val_percents = coco.get_class_dist(val_img_ids)
# Generate coco-formatted json's for train and val:
def generate_coco_json(coco, split_type, img_ids):
coco_builder = CocoJsonBuilder(
coco.cats, dest_path=output_path, dest_name=output_json_name.format(split_type)
)
for idx, img_id in enumerate(img_ids):
coco_builder.add_image(coco.imgs[img_id], coco.imgToAnns[img_id])
coco_builder.save()
generate_coco_json(coco, "train", train_img_ids)
generate_coco_json(coco, "val", val_img_ids)
return coco
def verify_output(original_coco, output_path, output_json_name):
"""
Verify that the outputted json's for the train/val split can be loaded, and have correct number
of annotations, and minimum count for each class meets our threshold.
"""
def verify_split_part(output_json_name, split_part):
json_path = output_path / output_json_name.format(split_part)
print(f"Checking if we can load json via coco api:{json_path}...")
coco = CocoClassDistHelper(json_path)
counts, _ = coco.get_class_dist()
assert min(counts.values()) >= _CLASS_COUNT_THRESHOLD, (
f"min class count ({min(counts.values())}) is "
+ f"lower than threshold of {_CLASS_COUNT_THRESHOLD}"
)
print(f"{split_part} class counts: ", counts)
return coco
train_coco = verify_split_part(output_json_name, "train")
val_coco = verify_split_part(output_json_name, "val")
assert len(original_coco.imgs) == len(train_coco.imgs) + len(
val_coco.imgs
), "Num Images in original data should equal sum of imgs in splits."
assert len(original_coco.anns) == len(train_coco.anns) + len(
val_coco.anns
), "Num annotations in original data should equal sum of those in splits."
def _main(opt):
"""
Creates train/val split and verifies output.
Args:
opt: command line options (there are none right now)
output_json_name: format-string of output file names, with a '{}' style placeholder where
split type will be inserted.
"""
print(h4dconfig.DATA_DIR)
datadir: Path = Path("/home/laielli/data")
output_json_name = "xview_coco_complete_v1_{}.json"
input_json = datadir / "Xview/coco_complete/{}.json".format("xview_coco_complete_v0")
output_path = datadir / "Xview/coco_complete"
original_coco = create_split(input_json, output_path, output_json_name)
verify_output(original_coco, output_path, output_json_name)
if __name__ == "__main__":
opt = None
# parser = argparse.ArgumentParser()
# opt = parser.parse_args()
_main(opt)
| GiscardBiamby/cocobetter | PythonAPI/pycocotools/helpers/splits.py | splits.py | py | 4,977 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "numpy.ceil",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_num... |
21513980032 | import json
from scrapy import Selector
import requests
import re
headers = {
"content-type": "application/x-www-form-urlencoded",
"sec-ch-ua-mobile": "?0",
"x-requested-with": "XMLHttpRequest",
'User-Agent': 'Mozilla/5.0 (Linux; Android 5.0; SM-G900P Build/LRX21T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.164 Mobile Safari/537.36'
}
def get_post_id(item, xpath_expression: str):
post_id = item.xpath(xpath_expression).get()
post_id = re.search(r'\d+_\d+', post_id).group()
return post_id
def get_likes(item, xpath_expression: str):
likes = item.xpath(xpath_expression).get()
likes = re.sub("\D", "", likes)
return likes
def get_views(item, xpath_expression: str):
views = item.xpath(xpath_expression).get()
views = re.sub("\D", "", views)
return views
def get_share(item, xpath_expression: str):
share = item.xpath(xpath_expression).get()
if share:
share = re.sub("\D", "", share)
return share
def get_data(alias: str, skip: int = 0):
url = f"https://m.vk.com/{alias}?offset={skip}"
data = {
'_ajax': 1
}
r = requests.post(url, headers=headers, data=json.dumps(data))
JSON = r.json()
html = JSON['html']
page = Selector(text=html)
data = []
for item in page.xpath('.//div[@class="wall_item"]'):
post_id = get_post_id(item, './/a[@class="wi_date"]/@href')
data.append({
'post_id': post_id,
'url': 'https://m.vk.com/wall' + post_id,
'likes': get_likes(
item, './/div[@class="like_wrap"]/a/@aria-label'),
'share': get_share(
item, './/b[@class="v_share"]/text()'),
'views': get_views(
item, './/span[@class="Socials__button_views "]/@aria-label')
})
return data
| petr777/pp | flask_app/vk_app/posts.py | posts.py | py | 1,845 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.search",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 36,
"usage_ty... |
45290876502 | # -*- coding: utf-8 -*-
import re
import markdown
from markdown.treeprocessors import Treeprocessor
from tina.front.templatetags.functions import resolve
class TargetBlankLinkExtension(markdown.Extension):
"""An extension that add target="_blank" to all external links."""
def extendMarkdown(self, md):
md.treeprocessors.add("target_blank_links",
TargetBlankLinksTreeprocessor(md),
"<prettify")
class TargetBlankLinksTreeprocessor(Treeprocessor):
def run(self, root):
home_url = resolve("home")
links = root.getiterator("a")
for a in links:
href = a.get("href", "")
url = a.get("href", "")
if url.endswith("/"):
url = url[:-1]
if not url.startswith(home_url):
a.set("target", "_blank")
| phamhongnhung2501/Taiga.Tina | fwork-backend/tina/mdrender/extensions/target_link.py | target_link.py | py | 879 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "markdown.Extension",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "markdown.treeprocessors.Treeprocessor",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "tina.front.templatetags.functions.resolve",
"line_number": 22,
"usage_type"... |
2760383933 | # This file contains the main class to run the model
import os
import math
from tensorflow.keras.callbacks import LambdaCallback
import numpy as np
import time
import matplotlib.pyplot as plt
# generate samples and save as a plot and save the model
def summarize_performance(step, g_model, c_model, dataset, n_samples=10):
# prepare fake examples
X, _ = g_model.generate_fake_samples(n_samples)
X = (X + 1) / 2.0
# plot images
f = plt.figure(figsize=(20,20))
for i in range(10):
# define subplot
ax = plt.subplot(10, 1, 1 + i)
# turn off axis
# plt.axis('off')
# plot raw pixel data
ax.imshow(X[i, :, :])
f.show()
# save plot to file
# filename1 = 'generated_plot_%04d.png' % (step+1)
# plt.savefig(filename1)
# plt.close()
# evaluate the classifier model
X, y = dataset
_, acc = c_model.evaluate(X, y, verbose=0)
print('Classifier Accuracy: %.3f%%' % (acc * 100))
return acc
# train the generator and discriminator
def train(data_loader, model, n_iter = 100, epochs=10, n_batch=24, batch_size=8):
# select supervised dataset
iterator = data_loader.generate_supervised_samples(n_batch)
# print(X_sup.shape, y_sup.shape)
# model.c_load()
print("### Start Training ###")
finished = False
a = np.array([])
b = np.array([])
c = np.array([])
d = np.array([])
best_performance = 0
tolerance = 0
rests
# manually enumerate epochs
for i in range(n_iter):
print("=======================================================")
print("Training Procedure {0}".format(i+1))
X_sup, y_sup = next(iterator)
# calculate the size of half a batch of samples
half_batch = int(X_sup.shape[0] / 2)
# update supervised discriminator (c)
print("supervised-real")
[x_real, y_real], y_real2 = data_loader.generate_real_samples([X_sup, y_sup], half_batch)
res = model.c_model.fit(x_real, y_real, batch_size=batch_size, epochs=epochs, validation_data=data_loader.test_data)
performance = np.average(res.history['val_accuracy'])
print("Average Performance: {0}".format(performance))
# update unsupervised discriminator (d)
print("unsupervised-real")
model.d_model.fit(x_real, y_real2, batch_size=batch_size, epochs=epochs)
print("unsupervised-fake")
x_fake, y_fake = model.g_model.generate_fake_samples(half_batch)
model.d_model.fit(x_fake, y_fake, batch_size=batch_size, epochs=epochs)
# update generator (g)
print("gan")
x_gan, y_gan = model.g_model.generate_latent_points(X_sup.shape[0]), np.ones((X_sup.shape[0], 256, 256, 1))
model.gan_model.fit(x_gan, y_gan, batch_size=batch_size, epochs=epochs)
# evaluate the model performance every so often
del X_sup
del y_sup
time.sleep(1)
if i % 3 == 1:
performance = summarize_performance(i, model.g_model, model.c_model, data_loader.test_data)
print("Best performance: {0}".format(best_performance))
print("Tolerance: {0}".format(tolerance))
if best_performance < performance:
best_performance = performance
tolerance = 0
elif best_performance - performance < 0.001:
tolerance += 1
if tolerance > 1:
print("Not progressing for too long time")
finished = True
break
print("Save new weight at iter {0}".format(i))
model.c_save()
model.gan_model.save_weights(model.save_path)
a = np.append(a, res.history['accuracy'])
b = np.append(b, res.history['val_accuracy'])
c = np.append(c, res.history['loss'])
d = np.append(d, res.history['val_loss'])
print("Discriminator Training complete.")
return finished, a, b, c, d
def train_supervised(model, data_loader, checkpoint_path = './', n_iter = 5000, n_batch = 1, batch_size=8, epochs=10):
# model.load()
# prepare training data loader
iterator = data_loader.generate_supervised_samples(n_batch)
# manually enumerate epochs
print("### Start Training ###")
best_performance = 0
tolerance = 0
finished = False
a = np.array([])
b = np.array([])
c = np.array([])
d = np.array([])
for i in range(n_iter):
print("=======================================================")
print("Training Procedure {0}".format(i+1))
# get randomly selected 'real' samples
x_real, y_real = next(iterator)
# update discriminator on real samples
res = model.c_model.fit(x_real, y_real, batch_size=batch_size, epochs=epochs,
validation_data=data_loader.test_data)# callbacks = [LambdaCallback(on_epoch_end=lambda batch, logs: print(model.get_weights(-2)))])
performance = np.average(res.history['val_accuracy'])
loss = res.history['val_loss'][-1]
print("Average Performance: {0}".format(performance))
# if i % 5 == 1 and not math.isnan(model.get_weights(-2)):
# print("Save new weight at iter {0}".format(i))
# model.save()
# if i % 10 == 5 and not math.isnan(model.layers[-2].get_weights()[0][0][0][0][0]):
del x_real
del y_real
time.sleep(1)
if i % 3 == 1 and not math.isnan(loss):
print("Save new weight at iter {0}".format(i))
print("Best performance: {0}".format(best_performance))
print("Tolerance: {0}".format(tolerance))
if best_performance < performance:
best_performance = performance
tolerance = 0
elif best_performance - performance > 0.01:
tolerance += 1
if tolerance > 1:
print("Not progressing for too long time")
finished = True
break
model.c_save()
a = np.append(a, res.history['accuracy'])
b = np.append(b, res.history['val_accuracy'])
c = np.append(c, res.history['loss'])
d = np.append(d, res.history['val_loss'])
print("Discriminator Training complete.")
return finished, a, b, c, d | AKI-maggie/thesis | main.py | main.py | py | 6,229 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "... |
12158969726 | from unittest import TestCase, mock
from matplotlib import animation, pyplot as plt
from src.chinese_checkers.game.ChineseCheckersGame import ChineseCheckersGame
from src.chinese_checkers.simulation.GameSimulationAnimation import GameSimulationAnimation
from src.chinese_checkers.simulation.GameSimulation import GameSimulation
class TestGameSimulationAnimation(TestCase):
def setUp(self):
# Mock some basic data for GameSimulation
self.mock_game_simulation_data = mock.MagicMock(spec=GameSimulation)
self.mock_game_sequence = [mock.MagicMock(spec=ChineseCheckersGame), mock.MagicMock(spec=ChineseCheckersGame)]
# Mock the method to_game_sequence to return mock_game_sequence
self.mock_game_simulation_data.to_game_sequence.return_value = self.mock_game_sequence
self.mock_game_simulation_data.positions = mock.MagicMock()
self.mock_game_simulation_data.positions.player_ids = ["player1", "player2"]
self.mock_game_simulation_data.positions.player_start_positions = ["start1", "start2"]
self.mock_game_simulation_data.positions.player_target_positions = ["target1", "target2"]
self.mock_game_simulation_data.positions.historical_moves = [("move1", "move2")]
# Set up 'metadata' and its sub-attributes for the mock
self.mock_game_simulation_data.metadata = mock.MagicMock()
self.mock_game_simulation_data.metadata.board_size = 4
def test_from_simulation_data(self):
animation_instance = GameSimulationAnimation.from_simulation_data(self.mock_game_simulation_data)
# Test if the animation object has been created
self.assertTrue(hasattr(animation_instance, "anim"))
self.assertTrue(isinstance(animation_instance.game_sequence, list))
self.assertEqual(len(animation_instance.game_sequence), 2)
def test_display(self):
animation_instance = GameSimulationAnimation(self.mock_game_sequence)
with mock.patch.object(plt, "show", return_value=None):
# Check if it runs without issues
animation_instance.display()
| dakotacolorado/ChineseCheckersGameEngine | tests/chinese_checkers/simulation/test_GameSimulationAnimation.py | test_GameSimulationAnimation.py | py | 2,113 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "unittest.TestCase",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "unittest.mock.MagicMock",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "unittest.mock",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "src.chinese_c... |
4752374660 | import os
import openai
openai.api_key = ""
def get_completion(prompt, model="gpt-3.5-turbo"):
messages = [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0,
)
return response.choices[0].message["content"]
def get_completion_from_messages(messages, model="gpt-3.5-turbo", temperature=0):
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature, # this is the degree of randomness of the model's output
)
return response.choices[0].message["content"]
messages = [
{
'role': 'system',
'content': 'You are friendly chatbot that has a casual conversation with the Alzheimer\'s Disease \
patient based on the information about the patient. \
You are having a conversation with the Alzheimer\'s Disease Patient, Mikhail Ivanov. \
Initialize casual conversation while giving your patient to hints about his/her past to help remember.\
Here are the details of you patient that you should refer to when having conversation with him/her.\
Name: Mikhail Ivanov\
Gender: Male\
Age: 58\
Ethnicity: Russian\
Religion: Eastern Orthodox\
Medical Condition: Early onset Alzheimer\'s\
First language: Russian, fluent in English\
Family: Wife, two sons\
Location: Boston, Massachusetts\
Mikhail was a chess grandmaster, competing internationally.\
What’s important to him?\
The strategic depth and intellectual challenge of chess.\
What’s happening for him at the moment?\
Mikhail often forgets certain strategic plays or overlooks opponent moves.\
What is the impact on him?\
He\'s distressed, fearing he won\'t be able to compete at elite levels.\
What would he like to happen in the future?\
Mikhail hopes to coach aspiring chess players and pass on his strategic insights.\
What strengths and support networks does he have?\
His sons, both avid chess players, regularly play with him. The global chess community recognizes his achievements.'
}
]
def chat():
while True:
user_input = input("Patient: ")
messages.append({"role": "user", "content": user_input})
response = get_completion_from_messages(messages)
print("MemoryCompanion:", response)
# start the chatbot
print("MemoryCompanion: Hi, How are you?")
chat() | yeonieheoo/MemoryCompanion | ML4H_LLM/case90.py | case90.py | py | 2,539 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "openai.api_key",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "openai.ChatCompletion.create",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "openai.ChatCompletion",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name... |
14920841148 | """
> Extremely Simple Image file format <
>------------------------------------------------------------------------------------------<
> Designed for databending or glitching
> Has very little fancy features that could cause problems with decoding
> Decoder is designed to assume, without any penalties if it assumes wrong
> Width can be forced, or can be half of the length of the data, rounded up
> Color format is assumed to be 8 bit color
> Can support grayscale or color in a variety of bit depths
> There is no data between or around pixels, or at the end of the file
> It is literally a sequence of bits after the minimal header
> This should prevent any issues with databending
> Header is encoded with 16 bits for width for a maximum width of 65536 pixels, errors on 0
> All data is big-endian encoded
> This is followed by 6 bits for color depth which is followed by 42 ignored bits:
> | 0
> | 0000 1 bit black and white
> | #### 4 bits encoding bit depth, 1 indexed
> -----------------------------------------
> | 1
> | #### 4 bits encoding bit depth per channel, errors on 0
> Sample header:
> | 01100101 01110011 01101001 00110001 ( esi1 in ascii binary )
> | 00000000 00010000 10100000 00000000
> | 00000000 00000000 00000000 00000000 ( 4 ignored bytes )
> -----------------------------------------
> | 00000000
> | 00010000 width of 16
> | 1 color
> | 01000 8 bit depth
> | 00 00000000 ignored bits
> | ( to ease databending the entire image )
> | ( without affecting the header )
> Flags
> | -fw width ( chops to an int, will error on not a number )
> | -g ( guesses width )
"""
import sys
from PIL import Image
import math
import re
import struct
from bitstring import BitArray
import os
class ZeroSizeError(BaseException):
pass
class OverByteError(BaseException):
pass
def push(x: str):
x = re.sub(' ', '', x)
if len(x) > 8:
raise OverByteError
print(x)
print(int(x, 2))
esi.write(struct.pack('>1B', int(x, 2)))
def multipush(x: str):
bound1 = 0
bound2 = 8
x = re.sub(' ', '', x)
for n in range(int(math.floor((int(len(x))/8)))):
push(x[bound1:bound2])
bound1 += 8
bound2 += 8
def big_endian16(x):
if x == 0:
raise ZeroSizeError
return format(x, "016b")
def big_endian8(x):
return format(x, "08b")
with open(sys.argv[1], "rb") as esi:
# contents = esi.read()
contents = BitArray(bytes=esi.read()).bin
# print(contents)
if contents[:32] == "01100101011100110110100100110001":
print("Image is ESI")
else:
print("Header does not identify image as ESI")
contents = contents[32:]
width_bytes = contents[:16]
contents = contents[16:]
power = int(len(width_bytes))
width = 0
for bit in width_bytes:
width += (2 ** power) * int(bit)
power -= 1
# print(width)
contents = contents[48:]
# print(contents)
length = len(contents)
custom_name = None
try:
if sys.argv[2] == "-fw":
width = int(sys.argv[3])
# print(width)
elif sys.argv[2] == "-g":
width = math.ceil(length / 24) * 24
elif sys.argv[2] == "-n":
custom_name = sys.argv[3]
except IndexError:
pass
try:
if sys.argv[3] == "-n":
custom_name = sys.argv[4]
elif sys.argv[4] == "-n":
custom_name = sys.argv[5]
elif sys.argv[3] == "-fw":
width = int(sys.argv[4])
elif sys.argv[4] == "-fw":
width = int(sys.argv[5])
elif sys.argv[4] == "-g":
width = math.ceil(length / 24) * 24
except IndexError:
pass
try:
if sys.argv[5] == "-n":
custom_name = sys.argv[6]
elif sys.argv[6] == "-n":
custom_name = sys.argv[7]
elif sys.argv[5] == "-fw":
width = int(sys.argv[6])
elif sys.argv[6] == "-fw":
width = int(sys.argv[7])
elif sys.argv[6] == "-g":
width = math.ceil(length / 24) * 24
except IndexError:
pass
'''filename = str(str(sys.argv[1]).split("/")[-1:][0])
path = "/".join(str(sys.argv[1]).split("/")[:-1]) + "/"
create_file = str(filename.split(".")[:-1][0]) + ".png"
if custom_name is not None:
create_file = custom_name + ".png"
if path != "/":
new_file = path + create_file
else:
new_file = create_file'''
path, file = os.path.split(sys.argv[1])
if custom_name is not None:
file = custom_name + ".png"
else:
file = str(file.split(".")[:-1][0]) + ".png"
new_file = os.path.join(path, file)
height = int(math.ceil(length / width / 12))
width = int(math.ceil(width / 2))
size = width, height
print(size)
im = Image.new("RGB", size)
counter = 0
total_amount = len(contents)
done_amount = 0
segment_value = total_amount / 8 / 80 / 3
segments = [int(segment_value * n) for n in range(1, 81)]
print("#", end="")
print("-" * 78, end="")
print("#")
seg_counter = 0
for x in range(height):
for y in range(width):
try:
color_tuple = (int(contents[counter] + contents[counter + 1] + contents[counter + 2] + contents[counter + 3] + contents[counter + 4] + contents[counter + 5] + contents[counter + 6] + contents[counter + 7], 2), int(contents[counter + 8] + contents[counter + 9] + contents[counter + 10] + contents[counter + 11] + contents[counter + 12] + contents[counter + 13] + contents[counter + 14] + contents[counter + 15], 2), int(contents[counter + 16] + contents[counter + 17] + contents[counter + 18] + contents[counter + 19] + contents[counter + 20] + contents[counter + 21] + contents[counter + 22] + contents[counter + 23], 2))
except IndexError:
color_tuple = (0, 0, 0)
done_amount += 1
if int(done_amount) == segments[seg_counter]:
seg_counter += 1
print("#", end="", flush=True)
im.putpixel((y, x), color_tuple)
counter += 24
print()
im.save(new_file)
| AlexPoulsen/esi | esi_to_png.py | esi_to_png.py | py | 5,823 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "re.sub",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 69,
"us... |
11101974490 | #!/usr/bin/env python
import json
import csv
import re
import math
from pprint import pprint
CURRENT_SOURCE_PATTERN = re.compile('^i', re.I)
INDUCTOR_PATTERN = re.compile('^l', re.I)
PULSE_PATTERN = re.compile('^pulse', re.I)
POSITION_PATTERN = re.compile(r'\An|_n', re.I)
TIME_PATTERN = re.compile(r'^\.tran', re.I)
max_time = 0
def get_PWLs(powertrace, fmt, cycletime, risetime, falltime, csf):
'''
Return a dictionary keyed by the columns powertrace. Each entry is a
long string of the form PWL(t1 v1 t2 v2 ...) for the component.
powertrace is a csv.DictReader
'''
result = dict()
i = 0
components = powertrace.fieldnames
for c in components:
result[c] = ['PWL(']
for row in powertrace:
cycle_start = cycletime * i
peak = cycle_start + risetime
cycle_fall = cycle_start + cycletime - falltime
for c in components:
peak_amplitude = float(row[c]) / csf
if math.isinf(peak_amplitude) or math.isnan(peak_amplitude):
peak_amplitude = 0.0
result[c].append(fmt % (cycle_start, peak, peak_amplitude, cycle_fall, peak_amplitude))
i = i + 1
global max_time
max_time = i * cycletime
for c in components:
# in case we don't hae any data for this component, set it to zero
if len(result[c]) == 1:
result[c].append('0 0')
result[c][1] = result[c][1].strip()
result[c].append(')')
result[c] = ''.join(result[c])
return result
def indexof_match(regex, l):
'''
Return the index of the first string in l that matches the compiled regex object
'''
for i in range(len(l)):
if regex.match(l[i]): return i
def get_positions(e):
'''
Extract and return all (x,y) positions in the split line e
This function is pretty liberal in what it will accept as a position specifier
'''
result = []
for w in e:
if POSITION_PATTERN.search(w):
pos = w.split('_')[-2:]
if len(pos) == 2:
try:
result.append([float(x) for x in pos])
except ValueError:
pass
return result
def get_i_position(e):
'''
If e is a current source, return the parsed position
e is a split line to check
'''
if e:
i = indexof_match(POSITION_PATTERN, e)
if i:
return [float(x) for x in e[i].split('_')[1:]]
def get_l_positions(e):
return [[float(x) for x in p] for p in [f.split('_')[-2:] for f in e[1:3]]]
def position_range(spice):
'''
For the split lines in spice, find the bounding box of the all points
'''
positions = []
for e in spice:
positions += get_positions(e)
xs = [p[0] for p in positions]
ys = [p[1] for p in positions]
return [[min(xs), max(xs)], [min(ys), max(ys)]]
def replace_current(e, s):
'''
Given the split list e containing a SPICE element, replace
the amplitude with the given string s
'''
return e[:3] + [s]
def chop_pulse(e):
'''
If the split list e has a pulse, return everything preceding it
'''
pulse_index = indexof_match(PULSE_PATTERN, e)
if pulse_index:
return e[:pulse_index]
def floorplan_range(fp):
'''
Return a bounding box for the given floorplan
'''
xs = [e[0][0] for e in fp.values()] + [e[0][1] for e in fp.values()]
ys = [e[1][0] for e in fp.values()] + [e[1][1] for e in fp.values()]
return [[min(xs), max(xs)], [min(ys), max(ys)]]
def scale_floorplan(fp, box):
'''
Perform an in-place scaling of fp to fit box
'''
fp_box = floorplan_range(fp)
for rec in fp.values():
rec[0][0] = box[0][0] + (rec[0][0] - fp_box[0][0]) * (box[0][1] - box[0][0]) / (fp_box[0][1] - fp_box[0][0])
rec[0][1] = box[0][0] + (rec[0][1] - fp_box[0][0]) * (box[0][1] - box[0][0]) / (fp_box[0][1] - fp_box[0][0])
rec[1][0] = box[1][0] + (rec[1][0] - fp_box[1][0]) * (box[1][1] - box[1][0]) / (fp_box[1][1] - fp_box[1][0])
rec[1][1] = box[1][0] + (rec[1][1] - fp_box[1][0]) * (box[1][1] - box[1][0]) / (fp_box[1][1] - fp_box[1][0])
def find_component(fp, pos):
'''
Find the component in floorplan fp (if any) that contains the point pos
'''
for component, rec in fp.items():
if pos[0] >= rec[0][0] and pos[0] <= rec[0][1] and pos[1] >= rec[1][0] and pos[1] <= rec[1][1]:
return component
def pop_worst(d):
loser_value = max([e['distance'] for e in d.values()])
for k,v in d.items():
if v['distance'] == loser_value:
loser = k
return d.pop(loser)
def nearest_components(fp, pos, name, left, right, best, inductor_count):
for component, rec in fp.items():
if pos[0] >= rec[0][0] and pos[0] <= rec[0][1] and pos[1] >= rec[1][0] and pos[1] <= rec[1][1]:
# we're inside a rectangle
best[component][name] = {
'left': left,
'right': right,
'distance': 0.0
}
else:
distance = abs((rec[0][0] + rec[0][1])/2 - pos[0]) + abs((rec[1][0] + rec[1][1])/2 - pos[1])
if len([k for k,v in best[component].items() if v['distance'] > 0.0]) < inductor_count:
best[component][name] = {
'left': left,
'right': right,
'distance': distance
}
elif distance < max([b['distance'] for b in best[component].values()]):
best[component][name] = {
'left': left,
'right': right,
'distance': distance
}
pop_worst(best[component])
def PWL_format(timeprefix, timeprec, aprec):
'''
Format strings generating format strings...
'''
return ' %%.%df%s 0 %%.%df%s %%.%df %%.%df%s %%.%df' % (timeprec, timeprefix, timeprec, timeprefix, aprec, timeprec, timeprefix, aprec)
def translate_to_PWL(floorplan, powertrace, spice, inductor_count):
'''
Given a scaled floorplan fp, powertrace pt, and spice file sp, convert current
sources to PWL representation using the given power data
'''
hit = 0
miss = 0
inductors = {'list': [], 'nearest': {}}
for comp in floorplan.keys():
inductors['nearest'][comp] = {}
for i in range(len(spice)):
if len(spice[i]) < 1:
continue
if CURRENT_SOURCE_PATTERN.match(spice[i][0]):
pos = get_i_position(spice[i])
if not pos:
continue
comp = find_component(floorplan, pos)
if not comp:
spice[i].insert(0, '*')
miss += 1
continue
hit += 1
spice[i] = replace_current(spice[i], powertrace[comp])
elif INDUCTOR_PATTERN.match(spice[i][0]):
inductors['list'].append(spice[i][0])
pos = get_l_positions(spice[i])
if not pos:
continue
nearest_components(floorplan, pos[0], spice[i][0], spice[i][1], spice[i][2], inductors['nearest'], inductor_count)
print('\thit: %d\n\tmiss: %d' % (hit, miss))
return inductors
def single_PWL(fmt, pwl, spice):
for i in range(len(spice)):
if len(spice[i]) < 1:
continue
elif TIME_PATTERN.match(spice[i][0]):
spice[i] = spice[i][:1] + ['10ps', fmt % max_time]
elif CURRENT_SOURCE_PATTERN.match(spice[i][0]):
spice[i] = replace_current(spice[i], pwl)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='The name mappings should be a JSON object of the form {"floorplan_name": "powertrace_name"}')
parser.add_argument('-n', '--name-mappings', default='names.json', help='Defaults to names.json')
parser.add_argument('-f', '--floorplan', default='floorplan.tsv', help='Defaults to floorplan.tsv')
parser.add_argument('-s', '--spice', default='in.spice', help='Defaults to in.spice')
parser.add_argument('--cycle-time', default=1.0, type=float, help='Defaults to 1.0')
parser.add_argument('--rise-time', default=0.1, type=float, help='Defaults to 0.1')
parser.add_argument('--fall-time', default=0.1, type=float, help='Defaults to 0.1')
parser.add_argument('--current-scale-factor', default=1.0, help='each peak amplitude wil be divided by csf, defaults to 1.0')
parser.add_argument('-o', '--out', default='out.spice', help='Defaults to out.spice')
parser.add_argument('-l', '--inductors', default='inductors.json', help='Sets of nearest inductors for each component. Defaults to inductors.json')
parser.add_argument('-p', '--powertrace', default='powertrace.csv', help='Defaults to powertrace.csv')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('--time-prefix', default='N', help='SI prefix for time units in the output, defaults to N')
parser.add_argument('--time-precision', default=1, type=int, help='Number of decimal places to output for time values, defaults to 1')
parser.add_argument('--amplitude-precision', default=4, type=int, help='Number of decimal places to output for amplitude values, defaults to 4')
parser.add_argument('--nearest-inductors', default=5, type=int, help='Number nearby inductors to report, defaults to 5')
parser.add_argument('--single', help='Assume all FUs are the one given rather than checking the floorplan')
args = parser.parse_args()
if not args.single:
print('loading name mappings')
with open(args.name_mappings) as f:
name_mappings = json.load(f)
if not args.single:
floorplan = dict()
print('loading floorplan')
with open(args.floorplan, newline='') as f:
d = csv.reader((row for row in f if not row.startswith('#')), delimiter='\t')
for row in d:
if row:
try:
floorplan[name_mappings[row[0]]] = [
[float(row[3]), float(row[1]) + float(row[3])],
[float(row[4]), float(row[2]) + float(row[4])]]
except KeyError:
#ignore anything not listed in names.json
pass
print('loading powertrace')
fmt = PWL_format(args.time_prefix, args.time_precision, args.amplitude_precision)
with open(args.powertrace, newline='') as f:
d = csv.DictReader(f)
powertrace = get_PWLs(d, fmt, args.cycle_time, args.rise_time, args.fall_time, args.current_scale_factor)
print('loading SPICE file')
with open(args.spice) as f:
spice = [l.split() for l in f.readlines()]
if not args.single:
print('getting bounding box from SPICE file')
box = position_range(spice)
if args.verbose and not args.single:
pprint(box)
if not args.single:
print('scaling floorplan')
scale_floorplan(floorplan, box)
if args.verbose and not args.single:
pprint(floorplan)
print('converting to PWL')
if args.single:
single_PWL('%%f%s' % args.time_prefix, powertrace[args.single], spice)
else:
inductors = translate_to_PWL(floorplan, powertrace, spice, args.nearest_inductors)
if not args.single:
print('writing out inductors')
with open(args.inductors, 'w') as f:
json.dump(inductors, f, sort_keys=True, indent=4, separators=(',', ': '))
print('writing out new SPICE file')
with open(args.out, 'w') as f:
for line in spice:
for word in line:
f.write(word)
f.write(' ')
f.write('\n')
| tshaffe1/noisemapper | noisemapper.py | noisemapper.py | py | 11,790 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.compile",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "re.I",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "re.I",
"line_number": 10,
"usag... |
27393973025 | # --- Bibliothèques utilisées ---
from functools import partial
import tkinter as tk
from random import seed
from random import randint
# --- Préparation du jeu ---
def diff_size(diff):
"""
sert à déterminer le nombre de cases du jeu
entrées : diff (difficulté) avec trois valeurs possibles : easy, normal, hard
sortie : renvoie les coordonées de x (colonnes) et y (lignes) correspondant à la difficulté
"""
global col, row
if diff == "hard":
col = 15
row = 15
elif diff == "normal":
col = 12
row = 10
elif diff == "easy":
col = 8
row = 7
else:
print("Nous ne pouvons pas recourir à la demande, veuillez réessayer \n")
return diff_size(str(input("Difficulté : easy/normal/hard \n")))
# --- Initialisation des grilles ---
class Grid:
def new_grid2(m):
TT = []
for j in range(m):
TT.extend([j])
return TT
def new_grid(n, m):
T = []
for i in range(n):
T.append(Grid.new_grid2(m))
return T
def init_grid(n, m, grid):
for i in range(n):
for j in range(m):
grid[i][j] = 0
def bombe_grid(n, m, coord_bombes):
"""
Création d'un tableau référençant les bombes (1) et le reste des cases (0) aléatoirement
entrée : n, m = limites du démineur, coord_bombes = liste de liste (= grille de bombes)
sortie : aucune (mais retenue des coordonées des bombes)
"""
global bombes
seed()
for i in range(n):
for j in range(m):
if 2 > randint(0, 20):
coord_bombes[i][j] = 1
else:
coord_bombes[i][j] = 0
def how_bombe(n, m, coord_bombes, near_bombes):
"""
Permet de déterminer le chiffre présent dans les cases au alentour d'une bombe (1,2,3,4...)
"""
chiffre = 0
for i in range(n):
for j in range(m):
for k in range(-1, 2, 1):
for h in range(-1, 2, 1):
try:
if (coord_bombes[i + k][j + h] == 1) and (i + k >= 0) and (j + h >= 0) and (
(k != 0) or (h != 0)): # si case autour = bombe,
chiffre = chiffre + 1 # ajout de 1 à la case autour des bombes
except IndexError:
pass
near_bombes[i][j] = chiffre
chiffre = 0
def flag_bombes(n, m, flag_count):
"""
Permet de créer une grille qui par la suite pourra changer la valeur de flag_count[i][j] en 1
lorsqu'il y a un drapeau posé sur une bombe
"""
for i in range(n):
for j in range(m):
flag_count[i][j] = 0
# --- Préparation des grilles ---
class Plateau:
def __init__(self, n, m):
self.n = n
self.m = m
self.all_grid()
self.all_init_grid()
def all_grid(self):
"""
forme les différentes "sous-grilles" cachées sous les boutons du démineur
entrées : aucune
sortie : grille coord_bombes, near_bombes, dig_case et flag_count
"""
self.coord_bombes = Grid.new_grid(self.n,
self.m) # pour les coordonées des bombes (0 : pas de bombe / 1 : bombe)
self.near_bombes = Grid.new_grid(self.n,
self.m) # pour les cases adjacentes aux bombes (x bombes autour = chiffre x)
self.flag_count = Grid.new_grid(self.n,
self.m) # compter le nombre de drapeaux posés sur une bombe
self.dig_case = Grid.new_grid(self.n,
self.m) # savoir si une case est creusée (0 : pas creusée, 1 creusée)
def all_init_grid(self):
"""
Attribue les valeurs coorespondantes aux différentes grilles créées
"""
Grid.bombe_grid(self.n, self.m, self.coord_bombes)
Grid.how_bombe(self.n, self.m, self.coord_bombes, self.near_bombes)
Grid.flag_bombes(self.n, self.m, self.flag_count)
Grid.init_grid(self.n, self.m, self.dig_case)
# --- Début des actions du joueur ---
class Game:
def creuser(i, j, Plat, A):
"""
Détermine ce qu'affiche un bouton une fois cliqué (ou creusé par récursion)
"""
Plat.dig_case[i][j] = 1 # case est creusée
if Plat.coord_bombes[i][j] == 1: # si bombe sous la case
A.pt[i][j] = tk.Button(A.plateau, width=5, height=1, text="BOUM", bg="red",
command=partial(Game.is_mine, i, j, Plat, A))
A.pt[i][j].grid(row=i, column=j)
elif (Plat.coord_bombes[i][j] == 0) and (Plat.near_bombes[i][j] != 0): # si pas de bombe et bombes autour
A.pt[i][j] = tk.Button(A.plateau, width=5, height=1, text=Plat.near_bombes[i][j], bg="blue",
command=partial(Game.is_mine, i, j, Plat, A))
A.pt[i][j].grid(row=i, column=j)
elif (Plat.coord_bombes[i][j] == 0) and (Plat.near_bombes[i][j] == 0): # pas une bombe et pas de bombes autour
A.pt[i][j] = tk.Button(A.plateau, width=5, height=1, text="", bg="gray",
command=partial(Game.is_mine, i, j, Plat, A))
A.pt[i][j].grid(row=i, column=j)
def recur(i, j, Plat, A):
"""
Permet de creuser par récursivité des cases (grises) qui ne sont à coté d'aucune bombe, et la récursivité
stoppe à l'approche d'une bombe (apparition de cases avec chiffres)
"""
if Plat.coord_bombes[i][j] == 0: # si pas de mine sur les coordonnées ij
Game.creuser(i, j, Plat, A) # on creuse le bouton sur les coordonnées ij
for k in range(-1, 2, 1):
for h in range(-1, 2, 1):
try:
if (Plat.dig_case[i + k][j + h] == 0) and (Plat.near_bombes[i + k][j + h] == 0) and \
(i + k >= 0) and (j + h >= 0) and \
((k != 0) or (h != 0)): # si case pas déjà creusée et si pas de mines autour
try:
Game.recur(i + k, j + h, Plat, A) # on relance
except:
pass
except IndexError:
pass
try:
if Plat.near_bombes[i + k][j + h] != 0: # si mine autour
try:
Game.creuser(i + k, j + h, Plat, A)
except:
pass
except IndexError:
pass
def is_mine(i, j, Plat, A):
Game.creuser(i, j, Plat, A)
if Plat.near_bombes[i][j] == 0: # si case est "grise" / sans chiffre
Game.recur(i, j, Plat, A)
if Plat.coord_bombes[i][j] == 1: # si mine
A.life.configure(text='GAME OVER, YOU LOST')
def replay(col, row, Plat, A):
A.all_supp()
Plat.all_init_grid()
A.fen_1()
A.fen_2(col, row, Plat) # mêmes coordonnées qu'au début
A.fen_3(col, row, Plat)
def clic_droit(self, A, i, j, Plat, bouton):
bouton.configure(text="FLAG", bg="green")
if Plat.coord_bombes[i][j] == 1: # si présence d'une bombe en dessous du drapeau
Plat.flag_count[i][j] = 1 # le drapeau est posé sur une bombe
if Game.compare(Plat.flag_count, Plat.coord_bombes, i=0) == 1: # si les grilles sont identiques
A.life.configure(text='GAME OVER, YOU WON')
def compare(liste_flag, liste_bombe, i):
"""
permet de comparer si les grilles de bombes et de flag sont identiques (le joueur a trouvé toutes les bombes)
"""
next = 0
nb_ligne = Plat.n
for val_flag in liste_flag[i]:
if val_flag != liste_bombe[i][next]:
return None
next += 1
if i >= nb_ligne - 1: # si i (allant de 0 à x = nb_lignes = x+1)
return True
return Game.compare(liste_flag, liste_bombe, i + 1)
# --- Définition des fenêtres Tkinter ---
class App:
def __init__(self, col, row, Plat):
self.fen_set()
self.fen_1()
self.fen_2(col, row, Plat)
self.fen_3(col, row, Plat)
self.fen.mainloop()
def fen_set(self):
self.fen = tk.Tk()
self.fen.title('demineur')
self.fen.geometry('800x500')
def fen_1(self):
self.content = tk.Frame(self.fen, width=900, height=800) # contient tout les groupes de widgets
self.plateau = tk.Frame(self.content, width=500, height=500) # groupe widgets des grilles
self.content.place(x=0, y=0)
self.plateau.place(x=80, y=80)
def fen_2(self, col, row, Plat):
self.pt = Grid.new_grid(col, row)
for i in range(col):
for j in range(row):
self.pt[i][j] = tk.Button(self.plateau, width=5, height=1, text="",
command=partial(Game.is_mine, i, j, Plat, self))
self.pt[i][j].bind("<Button-3>",
partial(Game.clic_droit, A=self, i=i, j=j, Plat=Plat, bouton=self.pt[i][j]))
self.pt[i][j].grid(row=i, column=j)
def fen_3(self, col, row, Plat):
self.life = tk.Label(text="GAME")
self.replay = tk.Button(text="Rejouer", command=lambda: Game.replay(col, row, Plat, self))
self.life.place(x=10, y=10)
self.replay.place(x=10, y=30)
def all_supp(self):
self.life.destroy()
# --- mise en place du programme ---
diff_size(str(input("Difficulté : easy/normal/hard \n")))
Plat = Plateau(col, row)
A = App(col, row, Plat)
| Claripouet/demineur | démineur_final.py | démineur_final.py | py | 10,371 | python | fr | code | 0 | github-code | 1 | [
{
"api_name": "random.seed",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"l... |
32087619445 | import uiautomator2 as u2
import pytest
import allure
@allure.feature("测试首页")#类的主要测试部分
#@allure.environment(app_package='com.mobile.fm')# 具体Environment参数可自行设置
# @allure.environment(app_activity='com.mobile.fm.activity')
# @allure.environment(device_name='aad464')
# @allure.environment(platform_name='Android')
class TestInfo():
'''
Allure中对严重级别的定义:
1、 Blocker级别:中断缺陷(客户端程序无响应,无法执行下一步操作)
2、 Critical级别:临界缺陷( 功能点缺失)
3、 Normal级别:普通缺陷(数值计算错误)
4、 Minor级别:次要缺陷(界面错误与UI需求不符)
5、 Trivial级别:轻微缺陷(必输项无提示,或者提示不规范
'''
@allure.story('测试是否可以获取信息') # 分支功能
@allure.severity('blocker')
@allure.step('')
@allure.issue('www.baidu.com')
@allure.testcase('www.baidu.com')
def test_info(self):
"""
我是一个用例描述,这个用例是用来获取Android信息的
"""
d = u2.connect('emulator-5554')
print(d.info)
#d.app_install('https://imtt.dd.qq.com/16891/7E569C80A3714D58E77F6173EB8F6329.apk?fsname=com.netease.cloudmusic_5.7.2_130.apk&csr=1bbd')
@allure.step("字符串相加:{0},{1}")
# 测试步骤,可通过format机制自动获取函数参数
def str_add(str1, str2):
if not isinstance(str1, str):
return "%s is not a string" % str1
if not isinstance(str2, str):
return "%s is not a string" % str2
return str1 + str2
@allure.story('test_story_01')
@allure.severity('blocker')
def test_case(self):
str1 = 'hello'
str2 = 'world'
assert self.str_add(str1, str2) == 'helloworld'
#在报告中增加附件:allure.attach(’arg1’,’arg2’,’arg3’):
#file = open('../testfile/image/lqf.fpg', 'rb').read()
#allure.attach('test_img', file, allure.attach_type.PNG)
if __name__ == '__main__':
pytest.main(['-s', '-q', '--alluredir', '../report/xml'])
| luoqingfu/u2demo | testcase/test_demo.py | test_demo.py | py | 2,173 | python | zh | code | 0 | github-code | 1 | [
{
"api_name": "uiautomator2.connect",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "allure.story",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "allure.severity",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "allure.step",
"... |
16805089134 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
a_dataframe = pd.DataFrame(
{'name':['Alice','Bob','Charles'],
'age':[25, 23, 34],
'gender':['female','male','male']})
print(a_dataframe)
# new_dataframe = pd.DataFrame(np.arange(16).reshape((4,4)),
# #x-axis and y -axis
# index = ['x1','x2','x3','x4'],
# columns = ['y1','y2','y3','y4'])
#
# #change previous x-axis:'x1','x2','x3','x4', set new x axis start from 1 ,end at 4, 5-1=4
# new_dataframe.index = np.arange(1,5)
# print(new_dataframe)
new_dataframe[:4].plot()
plt.legend(loc="best")
plt.show()
new_dataframe[:4].plot(kind='bar', color=['magenta','yellow','cyan','lime'])
#legend change the lable location,best
# upper right
# upper left
# lower left
# lower right
# right
# center left
# center right
# lower center
# upper center
# center
plt.legend(loc="upper right")
plt.show()
| OceanicSix/Python_program | Study/external/pand/pandas_example.py | pandas_example.py | py | 973 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pandas.DataFrame",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "matplotlib... |
1293373601 | import random
import itertools
from fp.fp import FreeProxy
import requests
from itemloaders.processors import TakeFirst, MapCompose
proxies_list = FreeProxy().get_proxy_list()
print(type(proxies_list))
proxy = itertools.cycle(proxies_list)
# pr = random.choice(proxies)
def set_proxy(proxy):
_proxy = next(proxy)
proxies = {
"http": _proxy,
"https": _proxy
}
print(proxies)
return proxies
print(F"length of proxy = > {len(proxies_list)}")
proxies = set_proxy(proxy)
response = requests.get("https://api.ipify.org/", proxies=proxies)
print(response.text)
| navneet37/BusinessScrapy | testproxy.py | testproxy.py | py | 589 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fp.fp.FreeProxy",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "itertools.cycle",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 20,
"usage_type": "call"
}
] |
28941505818 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from tex import Tex
from PIL import Image
if __name__ == '__main__':
for root, dirs, files in os.walk('PackedContent'):
for f in files:
if os.path.splitext(f)[1].lower() == '.tex':
name = os.path.join(root, f)
new_name = os.path.join('new', name).replace('.tex', '.png')
tex = None
if os.path.exists(new_name):
print(name)
tex = Tex(open(name, 'rb'))
tex.image = Image.open(new_name)
patch_name = '../patch/' + name
else:
new_name = new_name.replace('half', '')
if os.path.exists(new_name):
print(name)
tex = Tex(open(name, 'rb'))
tex.image = Image.open(new_name).resize((tex.width, tex.height))
patch_name = '../patch/' + name
if tex is not None:
try:
os.makedirs(os.path.split(patch_name)[0])
except BaseException:
pass
tex.save(open(patch_name, 'wb'))
| noword/EXAPUNKS-Localize | images/import_imgs.py | import_imgs.py | py | 1,256 | python | en | code | 32 | github-code | 1 | [
{
"api_name": "os.walk",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number"... |
647601614 | import json
import re
from konlpy.tag import Twitter
from collections import Counter
import pytagcloud
import webbrowser
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import font_manager, rc
def showGraph(wordInfo) :
font_location = "C:\Windows\Fonts\malgun.ttf"
font_name = font_manager.FontProperties(fname=font_location).get_name()
matplotlib.rc('font', family=font_name)
plt.xlabel("주요단어")
plt.ylabel("빈도수")
plt.grid(True)
Sorted_Dict_Values = sorted(wordInfo.values(), reverse=True)
Sorted_Dict_Keys = sorted(wordInfo, key=wordInfo.get, reverse=True)
plt.bar(range(len(wordInfo)), Sorted_Dict_Values, align="center")
plt.xticks(range(len(wordInfo)), list(Sorted_Dict_Keys), rotation='70')
plt.show()
def saveWordCloud(wordInfo, filename) :
taglist = pytagcloud.make_tags(dict(wordInfo).items(), maxsize=100)
pytagcloud.create_tag_image(taglist, filename, size=(640, 480), fontname="Korean")
webbrowser.open(filename)
# 함수 선언
# def 함수명() :
def main() :
openFileName = "E:\PythonData_2016146007\김대호_naver_news.json"
cloudImagePath = openFileName + ".jpg"
print("cloudImagePath : " + cloudImagePath)
rfile = open(openFileName, 'r', encoding='utf-8').read()
jsonData = json.loads(rfile)
description= ""
for item in jsonData :
if 'description' in item.keys() :
description = description + re.sub(r'[^\w]', '', item['description']) + ''
nlp = Twitter()
nouns = nlp.nouns(description)
count = Counter(nouns)
wordInfo = dict()
for tags, counts in count.most_common(100) :
if ( len(str(tags)) > 1 ) :
wordInfo[tags] = counts
print("%s : %d" % (tags, counts))
showGraph(wordInfo)
saveWordCloud(wordInfo, cloudImagePath)
if __name__ == "__main__" :
main()
| Gyeo1/Project | Iot-인공지능-빅데이터(크롤링,워드클라우드)/2.워드클라우드.py | 2.워드클라우드.py | py | 1,933 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "matplotlib.font_manager.FontProperties",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.font_manager",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "matplotlib.rc",
"line_number": 15,
"usage_type": "call"
},
{
"api_... |
15405751223 | #!/usr/bin/env python3
"""
https://adventofcode.com/2021/day/21
"""
import collections
import itertools
import aoc
PUZZLE = aoc.Puzzle(day=21, year=2021)
def solve_b(positions):
"""Solve puzzle part b"""
rolls = collections.Counter(
sum(rolls)
for rolls in itertools.product(range(1, 4), repeat=3)
)
unfinished = {(positions[0], positions[1], 0, 0): 1}
finished = collections.defaultdict(int)
turn = 0
while unfinished:
# print(f'unfinished={len(unfinished)}, finished={finished}')
new_states = collections.defaultdict(int)
for roll, frequency in rolls.items():
for state, universes in unfinished.items():
state = list(state)
universes *= frequency
state[turn] = (state[turn] + roll) % 10 or 10
state[turn+2] += state[turn]
if state[turn+2] >= 21:
finished[turn] += universes
else:
new_states[tuple(state)] += universes
unfinished = new_states
turn = 1 - turn
return max(finished.values())
def solve(part='a'):
"""Solve puzzle"""
positions = [
int(player.split()[-1]) % 10
for player in PUZZLE.input.splitlines()
]
# positions = [4, 8]
if part == 'b':
return solve_b(positions)
scores = [0, 0]
player = 0
rolls = 0
die = itertools.count(1)
while True:
rolls += 3
roll = sum(next(die) for _ in range(3))
# position 0 is really position 10
position = (positions[player] + roll) % 10 or 10
positions[player] = position
scores[player] += position
if scores[player] >= 1000:
# print(f'rolls={rolls}, {scores}')
return scores[1-player] * rolls
player = 1 - player
return None
if __name__ == "__main__":
PUZZLE.report_a(solve('a'))
PUZZLE.report_b(solve('b'))
| trosine/advent-of-code | 2021/day21.py | day21.py | py | 1,970 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "aoc.Puzzle",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "collections.defaultd... |
24645452076 | """
This module should contain your main project pipeline(s).
Whilst the pipeline may change during the analysis phases, any more stable pipeline should be implemented here so
that it can be reused and easily reproduced.
"""
# This must be set in the beggining because in model_util, we import it
logger_name = "FCRN-BID"
import logging
import os
import sys
import threading
import time
import schedule
import yaml
from MsgLog import LogInit
from proxy import Proxy
from utils import log_level
class Pipeline(object):
"""
Class responsible for pipeline fuctionality
"""
def __init__(self, config):
self.active = False
self.Proxy = Proxy(config)
self.run()
def run_threaded(self, job_func):
"""
Running job on thread
"""
job_thread = threading.Thread(target=job_func)
job_thread.start()
def run(self):
"""
Runs the main processing pipeline
"""
log.info("Start running the pipeline")
# self.Proxy.update_fingrid_api()
# self.Proxy.transform_frequency(request_period="last_six_months")
# self.Proxy.train_models()
# log.info(f"{self.forecast_load(request_period="last_year")}")
self.Proxy.submit_bids()
# self.Proxy.update_bids()
# self.Device.update_state()
self.active = False
if self.active == True:
# Fingrid prices are stated to be published at 22:45
# schedule.every().day.at("02:10").do(self.run_threaded, self.Proxy.update_fingrid_api)
# schedule.every().sunday.at("16:05").do(lambda s=self: s.Proxy.train_models())
schedule.every(2).to(3).minutes.do(self.run_threaded, self.Proxy.keep_alive)
schedule.every().day.at("09:20").do(
self.run_threaded, self.Proxy.submit_bids
)
schedule.every().day.at("21:59").do(
self.run_threaded, self.Proxy.update_bids
)
for minutes in range(0, 60, 15):
schedule.every().hour.at(":%02d" % (minutes)).do(
self.run_threaded, self.Proxy.verify_the_job
)
for minutes in [13, 28, 43, 58]:
schedule.every().hour.at(":%02d" % (minutes)).do(
self.run_threaded, self.Proxy.update_state
)
while self.active:
schedule.run_pending()
time.sleep(1)
# self.stop()
def stop(self):
"""
Emergency/testing interruption
"""
log.warning(f"Emergency interruption. Stopping the scheduler.")
self.Proxy.Device.deactivate()
self.active = False
return
if __name__ == "__main__":
config_file = "./configuration.yml"
config = yaml.full_load(open(config_file, "r"))
log = LogInit(
logger_name,
os.path.join(".", "logs", "logs.log"),
debuglevel=log_level(config["logLevel"]),
log=True,
)
log.info(os.getcwd())
log.info(f"Python version: {sys.version}")
pp = Pipeline(config=config)
| aleksei-mashlakov/fcrn-bidding | src/fcrn_bidding/pipeline.py | pipeline.py | py | 3,112 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "proxy.Proxy",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "schedule.every",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "schedule.every",
"lin... |
43464527814 | import easyocr
import cv2
import matplotlib.pyplot as plt
import re
import unidecode
from datetime import datetime
import numpy as np
import math
import os
import json
from difflib import SequenceMatcher
from itertools import combinations
READER = easyocr.Reader(['vi'])
json_path = "data/vn_administrative_location.json"
with open(json_path) as f:
LOCATIONS = json.load(f)
LIST_OF_PROVINCES = []
for location in LOCATIONS:
LIST_OF_PROVINCES.append(location["name"])
with open("data/full_locations.json") as f:
FULL_LOCATIONS_DICT = json.load(f)
with open("data/full_locations_street.json") as f:
FULL_LOCATIONS_STREET_DICT = json.load(f)
def get_ocr_results(image_path):
raw_results = READER.readtext(image_path)
detections = {}
for result in raw_results:
coordinates = result[0]
top_left = tuple(coordinates[0])
top_right = tuple(coordinates[1])
bottom_right = tuple(coordinates[2])
bottom_left = tuple(coordinates[3])
center = (int((top_left[0] + top_right[0]) / 2),
int((top_left[1] + bottom_left[1]) / 2))
keypoints = [top_left, top_right, bottom_right, bottom_left, center]
detections[result[1]] = {
"keypoints": keypoints,
"confidence": result[2]
}
return detections
def dist(x, y):
return math.sqrt( (y[0] - x[0])**2 + (y[1] - x[1])**2 )
def get_nearest_right(p1, list_of_p2):
'''
p1, p2 have (x, y) format
'''
vd = []
for i in range(len(list_of_p2)):
if list_of_p2[i][0] > p1[0]:
vd.append(dist(p1, list_of_p2[i]))
else:
vd.append(math.inf) # assign a very large number (infinity)
return list_of_p2[np.argmin(np.array(vd))]
def check_id_number(text: str = None):
if text == None:
return False
if text.isnumeric():
if len(text) == 12:
return True
else:
return False
return False
def check_name(text: str = None):
if text == None:
return False
no_accent = unidecode.unidecode(text)
if no_accent.isupper():
return True
else:
return False
def check_date_format(text: str = None):
text = re.sub('[^A-Za-z0-9]+', '', text)
# text = re.findall(r'\d+', text)[-1]
format = "%d%m%Y"
res = True
try:
res = bool(datetime.strptime(text, format))
except ValueError:
res = False
return res
def get_datetime(text: str = None):
for txt in text.split():
if check_date_format(txt):
return txt
else:
return None
def norm(text: str = None):
text = unidecode.unidecode(text).lower().replace(' ', '')
text = re.sub('[^A-Za-z0-9]+', '', text)
return text
def similar(s1: str = None,
s2: str = None):
return SequenceMatcher(None, s1, s2).ratio()
def argmax(lst: list = None):
return lst.index(max(lst))
def count_word(sentence: str = ""):
return len(sentence.split())
def get_nearest_province(location_info, LIST_OF_PROVINCES):
potential_name = norm(location_info)[-10:]
similarities = []
for province in LIST_OF_PROVINCES:
similarity = similar(norm(potential_name), norm(province))
similarities.append(similarity)
return LIST_OF_PROVINCES[argmax(similarities)]
def refine_location_information(sample: str = "",
get_street_info: bool = False):
full_locations = []
similarities = []
if get_street_info:
for key in FULL_LOCATIONS_STREET_DICT.keys():
similarities.append(similar(norm(sample), norm(key)))
full_locations.append(FULL_LOCATIONS_STREET_DICT[key])
else:
for key in FULL_LOCATIONS_DICT.keys():
similarities.append(similar(norm(sample), norm(key)))
full_locations.append(FULL_LOCATIONS_DICT[key])
return full_locations[argmax(similarities)]
def get_all_substring(string: str = ""):
return [string[x:y] for x, y in combinations(range(len(string) + 1), r = 2)]
def get_substring_reverse(string: str = ""):
reversed_string = string[::-1]
substrings = []
sub = ""
for c in reversed_string:
sub += c
substrings.append(sub[::-1])
return substrings
def refine_ocr_results(results: dict = None):
assert results is not None
# Refine place of origin info
results["Place of origin"] = refine_location_information(results["Place of origin"])
# # Refine place of residence info
# similarity = -1
# src = results["Place of residence"]
# for key in FULL_LOCATIONS_STREET_DICT.keys():
# for substring in get_substring_reverse:
# if similar(norm(substring), norm(key)) > similarity:
# similarity = similar(norm(substring), norm(key))
# else:
# refined = FULL_LOCATIONS_STREET_DICT[key]
# results["Place of residence"] = refined
return results
def perspective_transoform(image, source_points):
dest_points = np.float32([[0,0], [3000,0], [3000,1800], [0,1800]])
M = cv2.getPerspectiveTransform(source_points, dest_points)
dst = cv2.warpPerspective(image, M, (3000, 1800))
# plt.subplots(figsize = (10, 10))
# plt.imshow(dst[:, :, ::-1])
# plt.axis('off')
cv2.imwrite("aligned.jpg", dst)
if __name__ == "__main__":
pass | tungedng2710/TonEKYC | utils/ocr_utils.py | ocr_utils.py | py | 5,405 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "easyocr.Reader",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 24,... |
33776237261 | # # Notation: Draw Supported Notations of Explicit Converter
import mechkit
import networkx as nx
import matplotlib.pyplot as plt
plot_options = dict(
node_color="yellow",
node_size=2000,
width=2,
arrows=True,
font_size=10,
font_color="black",
)
converter = mechkit.notation.ExplicitConverter()
for entity_type, graph in converter.graphs_dict.items():
pos = nx.spring_layout(graph, seed=1)
fig = plt.figure()
nx.draw_networkx(graph, **plot_options)
plt.gca().set_title(entity_type)
plt.tight_layout()
| JulianKarlBauer/mechkit | docs/source/notebooks/06.py | 06.py | py | 551 | python | en | code | 14 | github-code | 1 | [
{
"api_name": "mechkit.notation.ExplicitConverter",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "mechkit.notation",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "networkx.spring_layout",
"line_number": 20,
"usage_type": "call"
},
{
"a... |
33501639542 | import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Optimizer
import scipy.io
from Bayesian_DL.BPINN.VI.src.utils import log_gaussian_loss, gaussian, get_kl_Gaussian_divergence
from torch.utils.tensorboard import SummaryWriter
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('device: {}'.format(device))
from Bayesian_DL.BPINN.VI.src.model import BBP_Model_PINN
class BBP_Model_PINN_KdV(BBP_Model_PINN):
def __init__(self, xt_lb, xt_ub, u_lb, u_ub,
layers, loss_func, opt, local, res, activation,
learn_rate, batch_size, n_batches,
prior, numerical, identification, device):
super().__init__(xt_lb, xt_ub, u_lb, u_ub,
layers, loss_func, opt, local, res, activation,
learn_rate, batch_size, n_batches,
prior, numerical, identification, device)
def initial_para(self):
self.lambda1_mus = nn.Parameter(torch.Tensor(1).uniform_(0, 2))
self.lambda1_rhos = nn.Parameter(torch.Tensor(1).uniform_(-3, 2))
self.lambda2_mus = nn.Parameter(torch.Tensor(1).uniform_(0, 0.05))
self.lambda2_rhos = nn.Parameter(torch.Tensor(1).uniform_(-3, -2))
self.alpha = nn.Parameter(torch.Tensor(1).uniform_(0, 2))
# self.beta = nn.Parameter(torch.Tensor(1).uniform_(0, 1))
self.network.register_parameter('lambda1_mu', self.lambda1_mus)
self.network.register_parameter('lambda2_mu', self.lambda2_mus)
self.network.register_parameter('lambda1_rho', self.lambda1_rhos)
self.network.register_parameter('lambda2_rho', self.lambda2_rhos)
self.network.register_parameter('alpha', self.alpha)
# self.network.register_parameter('beta', self.beta)
self.prior_lambda1 = self.prior
self.prior_lambda2 = self.prior
def net_F(self, x, t, u, lambda1_sample, lambda2_sample):
lambda_1 = lambda1_sample
lambda_2 = lambda2_sample
# u, _, _ = self.net_U(x, t)
u = u*(self.u_ub-self.u_lb) + self.u_lb # reverse scaling
u_t = torch.autograd.grad(u, t, torch.ones_like(u),
retain_graph=True,
create_graph=True)[0]
u_x = torch.autograd.grad(u, x, torch.ones_like(u),
retain_graph=True,
create_graph=True)[0]
u_xx = torch.autograd.grad(u_x, x, torch.ones_like(u_x),
retain_graph=True,
create_graph=True)[0]
u_xxx = torch.autograd.grad(u_xx, x, torch.ones_like(u_xx),
retain_graph=True,
create_graph=True)[0]
F = u_t + lambda_1*u*u_x + lambda_2*u_xxx
return F
def fit(self, X, t, U, n_samples):
self.network.train()
# X = torch.tensor(self.X, requires_grad=True).float().to(device)
# t = torch.tensor(self.t, requires_grad=True).float().to(device)
U = (U-self.u_lb)/(self.u_ub-self.u_lb) # scaling
# U = (U-self.u_mean)/self.u_std # scaling
# reset gradient and total loss
self.optimizer.zero_grad()
fit_loss_F_total = 0
fit_loss_U_total = 0
for _ in range(n_samples):
lambda1_epsilons = self.lambda1_mus.data.new(self.lambda1_mus.size()).normal_()
lambda1_stds = torch.log(1 + torch.exp(self.lambda1_rhos))
lambda2_epsilons = self.lambda2_mus.data.new(self.lambda2_mus.size()).normal_()
lambda2_stds = torch.log(1 + torch.exp(self.lambda2_rhos))
lambda1_sample = self.lambda1_mus + lambda1_epsilons * lambda1_stds
lambda2_sample = self.lambda2_mus + lambda2_epsilons * lambda2_stds
u_pred, log_noise_u, KL_loss_para = self.net_U(X, t)
f_pred = self.net_F(X, t, u_pred, lambda1_sample, lambda2_sample)
# calculate fit loss based on mean and standard deviation of output
fit_loss_U_total += self.loss_func(u_pred, U, log_noise_u.exp(), self.network.output_dim)
# fit_loss_F_total += torch.sum(f_pred**2) ######
fit_loss_F_total += self.loss_func(f_pred, torch.zeros_like(f_pred), (self.alpha.exp()+1)*torch.ones_like(f_pred), self.network.output_dim)
KL_loss_lambda1 = get_kl_Gaussian_divergence(self.prior_lambda1.mu, self.prior_lambda1.sigma**2, self.lambda1_mus, lambda1_stds**2)
KL_loss_lambda2 = get_kl_Gaussian_divergence(self.prior_lambda2.mu, self.prior_lambda2.sigma**2, self.lambda2_mus, lambda2_stds**2)
KL_loss_total = KL_loss_para + KL_loss_lambda1 + KL_loss_lambda2
# KL_loss_total = KL_loss_para
# minibatches and KL reweighting
KL_loss_total = KL_loss_total/self.n_batches
# self.coef = F.softmax(self.alpha)
self.coef = self.alpha.exp() + 1
total_loss = KL_loss_total + (fit_loss_U_total + fit_loss_F_total)
total_loss /= (n_samples*X.shape[0])
total_loss.backward()
self.optimizer.step()
self.scheduler.step()
return fit_loss_U_total/n_samples, fit_loss_F_total/n_samples, KL_loss_total, total_loss
if __name__ == '__main__':
data = scipy.io.loadmat('./Data/KdV.mat')
t = data['tt'].flatten()[:,None] # 201 x 1
x = data['x'].flatten()[:,None] # 512 x 1
Exact_ = np.real(data['uu']).T # 201 x 512
noise = 0.1
Exact = Exact_ + noise*np.std(Exact_)*np.random.randn(201, 512)
X, T = np.meshgrid(x,t) # 201 x 512
X_star = np.hstack((X.flatten()[:,None], T.flatten()[:,None])) # 102912 x 2
u_star = Exact.flatten()[:,None] # 102912 x 1
N_u_test = 10000
idx_test = np.random.choice(X_star.shape[0], N_u_test, replace = False)
X_test = X_star[idx_test,:]
u_test = u_star[idx_test,:]
# Domain bounds of x, t
xt_lb = X_star.min(0)
xt_ub = X_star.max(0)
# training data
N_u = 500
idx = np.random.choice(X_star.shape[0], N_u, replace = False)
X_u_train = X_star[idx,:]
u_train = u_star[idx,:]
u_min = u_train.min(0)
u_max = u_train.max(0)
X = torch.tensor(X_u_train[:,0:1], requires_grad = True, device = device).float()
t = torch.tensor(X_u_train[:,1:2], requires_grad = True, device = device).float()
U = torch.tensor(u_train, requires_grad = True, device = device).float()
#%% model
local = True
identification = True
numerical = False
learn_rate = 1e-3
n_hidden = 50
opt = torch.optim.AdamW
loss_func = log_gaussian_loss
layers = [2, n_hidden, n_hidden, n_hidden, n_hidden, n_hidden, 2]
# layers = [2, n_hidden, n_hidden, n_hidden, 2] # res
prior = gaussian(0, 1)
num_epochs = 25000
n_batches = 1
batch_size = len(X_u_train)
res = False
activation = nn.Tanh()
pinn_model = BBP_Model_PINN_KdV(xt_lb, xt_ub, u_min, u_max,
layers, loss_func, opt, local, res, activation,
learn_rate, batch_size, n_batches,
prior, numerical, identification, device)
#%%
n_fit = 20
comment = f'KdV n_sample = {N_u} n_fit = {n_fit} res = {res}'
writer = SummaryWriter(comment = comment)
fit_loss_U_train = np.zeros(num_epochs)
fit_loss_F_train = np.zeros(num_epochs)
KL_loss_train = np.zeros(num_epochs)
loss = np.zeros(num_epochs)
for i in range(num_epochs):
EU, EF, KL_loss, total_loss = pinn_model.fit(X, t, U, n_samples = n_fit)
fit_loss_U_train[i] = EU.item()
fit_loss_F_train[i] = EF.item()
KL_loss_train[i] = KL_loss.item()
loss[i] = total_loss.item()
writer.add_scalar("loss/total_loss", loss[i], i)
writer.add_scalar("loss/U_loss", fit_loss_U_train[i], i)
writer.add_scalar("loss/F_loss", fit_loss_F_train[i], i)
writer.add_scalar("loss/KL_loss", KL_loss_train[i], i)
# if i % 1000 == 0:
# F_test = net.sample_F(X_u_test_25)
# fig, axs = plt.subplots(2, 2, figsize=(20, 8))
# axs[0,0].hist(F_test[:,0])
# axs[0,1].hist(F_test[:,100])
# axs[1,0].hist(F_test[:,150])
# axs[1,1].hist(F_test[:,255])
# plt.savefig('./plots/kdv_epoch{}_F.tiff'.format(i))
if i % 10 == 0 or i == num_epochs - 1:
print("Epoch: {:5d}/{:5d}, total loss = {:.3f}, Fit loss U = {:.3f}, Fit loss F = {:.3f}, KL loss = {:.3f}".format(i + 1, num_epochs,
loss[i], fit_loss_U_train[i], fit_loss_F_train[i], KL_loss_train[i]))
lambda1_mus = pinn_model.lambda1_mus.item()
lambda1_stds = torch.log(1 + torch.exp(pinn_model.lambda1_rhos)).item()
lambda2_mus = pinn_model.lambda2_mus.item()
lambda2_stds = torch.log(1 + torch.exp(pinn_model.lambda2_rhos)).item()
if i % 100 == 0 or i == num_epochs - 1:
samples_star, _ = pinn_model.predict(X_test, 50, pinn_model.network)
u_pred_star = samples_star.mean(axis = 0)
error_star = np.linalg.norm(u_test-u_pred_star, 2)/np.linalg.norm(u_test, 2)
samples_train, _ = pinn_model.predict(X_u_train, 50, pinn_model.network)
u_pred_train = samples_train.mean(axis=0)
error_train = np.linalg.norm(u_train-u_pred_train, 2)/np.linalg.norm(u_train, 2)
print("Epoch: {:5d}/{:5d}, error_test = {:.5f}, error_train = {:.5f}".format(i+1, num_epochs, error_star, error_train))
writer.add_scalars("loss/train_test", {'train':error_train, 'test':error_star}, i)
print("Epoch: {:5d}/{:5d}, lambda1_mu = {:.5f}, lambda2_mu = {:.5f}, lambda1_std = {:.3f}, lambda2_std = {:.3f}".format(i + 1, num_epochs,
lambda1_mus, lambda2_mus,
lambda1_stds, lambda2_stds))
# print("Epoch: {:5d}/{:5d}, alpha = {:.5f}, beta = {:.5f}".format(i+1, num_epochs, pinn_model.coef[0].item(), pinn_model.coef[1].item()))
print("Epoch: {:5d}/{:5d}, alpha = {:.5f}".format(i+1, num_epochs, pinn_model.coef.item()))
print()
writer.close()
#%%
x = data['x'].flatten()[:,None]
X_u_test_15 = np.hstack([x, 0.15*np.ones_like((x))]); u_test_15 = Exact[30]; u_mean_15 = Exact_[30]
X_u_test_25 = np.hstack([x, 0.25*np.ones_like((x))]); u_test_25 = Exact[50]; u_mean_25 = Exact_[50]
X_u_test_50 = np.hstack([x, 0.50*np.ones_like((x))]); u_test_50 = Exact[100]; u_mean_50 = Exact_[100]
X_u_test_75 = np.hstack([x, 0.75*np.ones_like((x))]); u_test_75 = Exact[150]; u_mean_75 = Exact_[150]
def get_res(X):
samples, noises = pinn_model.predict(X, 100, pinn_model.network)
u_pred = samples.mean(axis = 0)
aleatoric = (noises**2).mean(axis = 0)**0.5
epistemic = samples.var(axis = 0)**0.5
total_unc = (aleatoric**2 + epistemic**2)**0.5
return u_pred.ravel(), aleatoric.ravel(), epistemic.ravel(), total_unc.ravel()
x = x.ravel()
u_pred_25, ale_25, epi_25, total_unc_25 = get_res(X_u_test_25)
u_pred_50, ale_50, epi_50, total_unc_50 = get_res(X_u_test_50)
u_pred_75, ale_75, epi_75, total_unc_75 = get_res(X_u_test_75)
#%% plot
fig, axs = plt.subplots(1, 3, figsize = (15,4))
axs[0].scatter(x, u_test_25, s = 10, marker = 'x', color = 'black', alpha = 0.5, label = 'Exact')
axs[0].plot(x, u_mean_25, 'b-', linewidth = 2, label = 'Prediction')
axs[0].plot(x, u_pred_25, 'r--', linewidth = 2, label = 'Prediction')
axs[0].fill_between(x, u_pred_25-2*total_unc_25, u_pred_25+2*total_unc_25, color = 'g', alpha = 0.5, label = 'Epistemic + Aleatoric')
axs[0].set_xlabel('$x$')
axs[0].set_ylabel('$u(t,x)$')
axs[0].set_title('$t = 0.25$', fontsize = 10)
axs[1].scatter(x, u_test_50, s = 10, marker = 'x', color = 'black', alpha = 0.5, label = 'Exact')
axs[1].plot(x, u_mean_50, 'b-', linewidth = 2, label = 'Prediction')
axs[1].plot(x, u_pred_50, 'r--', linewidth = 2, label = 'Prediction')
axs[1].fill_between(x, u_pred_50-2*total_unc_50, u_pred_50+2*total_unc_50, color = 'g', alpha = 0.5, label = 'Epistemic + Aleatoric')
axs[1].set_xlabel('$x$')
axs[1].set_ylabel('$u(t,x)$')
axs[1].set_title('$t = 0.5$', fontsize = 10)
axs[2].scatter(x, u_test_75, s = 10, marker = 'x', color = 'black', alpha = 0.5, label = 'Exact')
axs[2].plot(x, u_mean_75, 'b-', linewidth = 2, label = 'Prediction')
axs[2].plot(x, u_pred_75, 'r--', linewidth = 2, label = 'Prediction')
axs[2].fill_between(x, u_pred_75-2*total_unc_75, u_pred_75+2*total_unc_75, color = 'g', alpha = 0.5, label = 'Epistemic + Aleatoric')
axs[2].set_xlabel('$x$')
axs[2].set_ylabel('$u(t,x)$')
axs[2].set_title('$t = 0.75$', fontsize = 10)
plt.savefig('./plots/final_prediction_KdV.tiff')
| SoloChe/BPINN | VI/KdV_identification.py | KdV_identification.py | py | 13,494 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.cuda.is_available",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "Bayesian_DL.BPINN.VI.src.model.BBP_Model_PINN",
"line_number": 20,
"usage_type": "name"
},
{
... |
5237933206 | import numpy as np
import matplotlib.pyplot as plt
class Agent:
def __init__(self, bandit, exploration_rate):
self.bandit = bandit
self.exploration_rate = exploration_rate
self.cur_estimates = self.first_estimates()
self.all_estimates = [[self.cur_estimates[i]] for i in range(len(self.cur_estimates))]
self.rewards = []
self.avg_rewards = []
def first_estimates(self):
estimates = []
for i in range(len(self.bandit)):
estimates.append(np.random.normal(self.bandit[0], 1))
return estimates
def select_action(self):
if np.random.random() < self.exploration_rate:
return np.random.choice(len(self.cur_estimates))
else:
return np.argmax(self.cur_estimates)
def get_reward(self, action):
reward = np.random.normal(self.bandit[action], 1)
self.rewards.append(reward)
self.avg_rewards.append(np.mean(self.rewards))
self.all_estimates[action].append(reward)
self.cur_estimates[action] = np.mean(self.all_estimates[action])
def run(nb_plays):
nb_arms = 10
exploration_rates = [0, 0.01, 0.1, 0.2, 0.3, 1]
bandit = np.random.normal(0, 1, nb_arms)
agents_list = []
for i in range(len(exploration_rates)):
ex = exploration_rates[i]
agents_list.append(Agent(bandit, ex))
for i in range(nb_plays):
for agent in agents_list:
action = agent.select_action()
agent.get_reward(action)
time = [i for i in range(nb_plays)]
for agent in agents_list:
plt.plot(time, agent.avg_rewards, label="{e}".format(e=agent.exploration_rate))
plt.legend(loc='best')
plt.show()
run(400)
| Oppac/RL | simple_bandit.py | simple_bandit.py | py | 1,735 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.random.normal",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.random",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.rando... |
71349302755 | """
"""
import datetime as dt
import requests
import time
import pandas as pd
import streamlit as st
def app():
asset_contract_address = st.sidebar.text_input("Contract Address")
start_dt_input = st.sidebar.date_input(label='Start Date')
end_dt_input = st.sidebar.date_input(label='End Date')
def get_historical_collection_stats(address: str,
start_dt: dt.datetime,
end_dt: dt.datetime
) -> pd.DataFrame:
"""
"""
# turn start and end dates into unix timestamps
import time
start_ts = time.mktime(start_dt.timetuple())
end_ts = time.mktime(end_dt.timetuple())
url = "https://api.reservoir.tools/collections/daily-volumes/v1"
headers = {
"Accept": "*/*",
"x-api-key": "demo-api-key"
}
params = {
'id': address,
'startTimestamp': start_ts,
'endTimestamp': end_ts
}
response = requests.get(url, headers=headers, params=params)
resp_df = pd.DataFrame(response.json()['collections'])
resp_df['timestamp'] = pd.to_datetime(resp_df['timestamp'], unit='s')
resp_df = resp_df.set_index(pd.DatetimeIndex(resp_df['timestamp']))
return resp_df
try:
df = get_historical_collection_stats(asset_contract_address, start_dt_input, end_dt_input)
st.write('Average Floor Price')
st.line_chart(df['floor_sell_value'])
st.write('Volume')
st.line_chart(df['volume'])
st.write('Sales Count')
st.line_chart(df['sales_count'])
st.dataframe(df)
@st.cache
def convert_df(base_df):
# IMPORTANT: Cache the conversion to prevent computation on every rerun
return base_df.to_csv().encode('utf-8')
csv = convert_df(df)
st.download_button(
label="Download data as CSV",
data=csv,
file_name='trending.csv',
mime='text/csv',
)
except KeyError:
error_text = """
No Data for the select asset and date combination.
Please change parameters.
"""
st.write(error_text)
| alhedlund/al_nft_data_app | pages/collections.py | collections.py | py | 2,310 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "streamlit.sidebar.text_input",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.date_input",
"line_number": 13,
"usage_type": "call"
},
{
"... |
16190520974 | # coding: utf-8
"""
Производственный календарь.
"""
import json
import os
import datetime
import requests
WORKING_TYPE_WORK = 0
WORKING_TYPE_HOLIDAY = 2
WORKING_TYPE_SHORT = 3
DEFAULT_CACHE_PATH = '/tmp/basicdata_calend.json'
def is_working_time(date_time, use_cache=False, cache_path=DEFAULT_CACHE_PATH):
exceptions = _get_prod_exceptions(use_cache=use_cache, cache_path=cache_path)
work_first_hour = 10
work_last_hour = 17
is_work_day = date_time.weekday() < 5
year = str(date_time.year)
month = str(date_time.month)
day = str(date_time.day)
if exceptions.get(year) and exceptions[year].get(month) and exceptions[year][month].get(day):
working_type = exceptions[year][month][day]['isWorking']
if working_type == WORKING_TYPE_HOLIDAY:
is_work_day = False
elif working_type == WORKING_TYPE_SHORT:
work_last_hour = 16
elif working_type == WORKING_TYPE_WORK:
is_work_day = True
is_work_time = work_first_hour <= date_time.hour <= work_last_hour
return is_work_day and is_work_time
def _get_prod_exceptions(use_cache=False, cache_path=DEFAULT_CACHE_PATH):
if not use_cache:
return _load_prod_exceptions()
if _is_cache_available(cache_path=cache_path):
exceptions = _load_cache(cache_path)
else:
exceptions = _load_prod_exceptions()
_save_cache(exceptions, cache_path)
return exceptions
def _load_prod_exceptions():
"""
Используется http://basicdata.ru/api/calend/
Как написано на сайте:
Предполагается, что дни недели с понедельника по пятницу включительно являются рабочими,
а суббота и воскресение — выходными.
Данное API возвращает все исключения из этого правила
"""
url = 'http://basicdata.ru/api/json/calend/'
exceptions = requests.get(url).json()
return exceptions['data']
def _save_cache(data, cache_path):
with open(cache_path, 'w') as cache_file:
json.dump(data, cache_file)
def _load_cache(cache_path):
with open(cache_path) as cache_file:
return json.load(cache_file)
def _is_cache_available(cache_path, expiration_days=1):
if not os.path.isfile(cache_path):
return False
now = datetime.datetime.now()
cache_modify_dt = datetime.datetime.fromtimestamp(os.path.getmtime(cache_path))
delta = now - cache_modify_dt
if delta.days >= expiration_days:
return False
try:
with open(cache_path) as cache_file:
json.load(cache_file)
except Exception:
return False
return True
| telminov/sw-python-utils | swutils/prod_calendar.py | prod_calendar.py | py | 2,802 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": ... |
4496867704 | import requests
import argparse
from datetime import datetime
PXLA_ENDPOINT = "https://pixe.la/v1/users"
USERNAME = "stamnoob"
PWD = "m0n0mlkiaple0n"
HEADER = {"X-USER-TOKEN": PWD}
def arg_parser() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Post a coding hours pixel in the pixela \"code-graph\".")
parser.add_argument("-H", "--hours",
required=True, type=float,
help="Today's coding hours (float)")
return parser.parse_args()
def create_user(pwd: str, username: str) -> None:
request_body = {
"token": pwd,
"username": username,
"agreeTermsOfService": "yes",
"notMinor": "yes"
}
r = requests.post(url=PXLA_ENDPOINT, json=request_body)
print("Creating new user with username: {}".format(USERNAME))
print_response(r)
def create_graph(graph_id: str, name: str, unit: str, unit_type: str, color: str) -> None:
graph_body = {
"id": graph_id,
"name": name,
"unit": unit,
"type": unit_type,
"color": color
}
r = requests.post(url="{}/{}/graphs".format(PXLA_ENDPOINT, USERNAME), json=graph_body, headers=HEADER)
print("Creating new graph with ID:", graph_id)
print_response(r)
def update_pixel(hours: float, graph_id: str) -> None:
today_date = datetime.today()
date_string = "{}{}{}".format(today_date.year, today_date.month, today_date.day)
pixel_body = {
"date": date_string,
"quantity": str(hours)
}
r = requests.post(url="{}/{}/graphs/{}".format(PXLA_ENDPOINT,
USERNAME,
graph_id),
json=pixel_body,
headers=HEADER)
print_response(r)
def print_response(resp: requests.Response) -> None:
print(resp.json()["message"], "\n")
def main() -> None:
# create_user(PWD, USERNAME)
# create_graph("code-graph", "Coding Graph", "Hours", "float", "sora")
update_pixel(arg_parser().hours, "code-graph")
print("URL: https://pixe.la/v1/users/{}/graphs/{}".format(USERNAME, "code-graph"))
if __name__ == "__main__":
main()
| stzanos95/python-projects | Habit-Tracker/main.py | main.py | py | 2,221 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "argparse.Namespace",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "requests.post",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "request... |
72703642915 | import math
import random
import sys
import importlib
#========================change variables here to modify scenario========================
path_to_folder = "C:\\Users\\LJMU\\Documents\\Felix\\OpenMATB_ScenarioCreator"
path_to_folder = "C:\\Users\\felix\\Desktop\\LJMU\\Scripts\\Python\\OpenMATB_ScenarioCreator"
#define MATB length
total_mins = 3
total_secs = total_mins * 60
buffer = 15 #seconds at the start and end where no events will be placed (safety measure for communication prompts and possibly unfair scale failures towards the end)
#import difficulty configuration
try:
print ("Number of arguments: ", len(sys.argv))
print ("The arguments are: " , str(sys.argv))
cfg_file = sys.argv[1]
diff = sys.argv[2]
n_scenarios = sys.argv[3]
mod = importlib.import_module(cfg_file)
config = []
if diff == 'easy':
config = mod.easy
if diff == 'medium':
config = mod.medium
if diff == 'hard':
config = mod.hard
if sys.argv[2] not in ['easy', 'medium' ,'hard']:
raise
except Exception as exception:
print("---You need to pass three arguments: /config file name/ /difficulty (easy, medium or hard)/ /number of scenarios to generate/---")
raise
#====================================Creating scenario========================================
#---define naming constants
TIMEPOINTS = list(range(0,total_secs))
S_SYSMON = 'sysmon'
S_RESMAN = 'resman'
S_TRACK = 'track'
S_COMM = 'communications'
SYSTEMS = [S_SYSMON, S_RESMAN, S_TRACK, S_COMM]
PARAMETERS ={#needs more parameters:
'target' : 'tank-{}-target',
'loss' : 'lossperminute',
'update' : 'taskupdatetime',
'alert' : 'alerttimeout',
'radius' : 'targetradius',
's-failure' : 'scales-{}-failure',
'p-failure' : 'pump-{}-state',
'prompt' : 'radioprompt'
}
#define which pump and scales will be involved
PUMPS = config['pumpsToFail']
SCALES = config['scalesToFail']
#define frequency of events:
N_EVENTS = {
's-failure' : config['scaleFailN'], #number of scale failures
'p-failure' : config['pumpFailN'], #number of pump failures
'prompt' : config['promptN'], #number of prompts (whether own or other is 50/50)
}
#---define helper function:
#function that processes different paramater types (pumps need extra information for example))
def write_param(parameter,extra = 0, P=PARAMETERS):
if extra == 0:
return P[parameter]
return P[parameter].format(extra)
#function that prints the time at the start of each line
def write_time(secs):
str_hours = '0'
str_minutes = '00'
minutes = secs /60
seconds = secs - (math.floor(minutes) * 60)
if minutes >= 1:
str_minutes = "0{}".format(math.floor(minutes))
if seconds >= 10:
str_seconds = "{}".format(seconds)
else:
str_seconds = "0{}".format(seconds)
return "{}:{}:{}".format(str_hours,str_minutes,str_seconds)
#function that could write start and end commands..not in use at the moment
def make_command(time_t,system,command):
return "{time_t};{system};{command}".format(time_t = time_t, system = system, command = command)
#function that prints parameter lines
def make_param(time_t, system, parameter, value):
return "{time_t};{system};{parameter};{value}".format(time_t = time_t, system = system, parameter = parameter, value = value)
#function that makes sure spacings between events are as defined by the safezone variable (seconds around a certain event onset)
def removeFromTime(timePoints, timePoint, timePoint_idx, safeZone, previousEventTimes):
for i in range(0,len(previousEventTimes)): # loop over events that were created so far
x_before = set(range(previousEventTimes[i] - safeZone,previousEventTimes[i])) #These are sets of seconds that events last, with the intersection function we can check if they overlap
x_after = set(range(previousEventTimes[i],previousEventTimes[i] + safeZone))
y_before = set(range(timePoint - safeZone, timePoint))
y_after = set(range(timePoint, timePoint + safeZone))
if len(x_before.intersection(y_after) ) > 0: #Case 1 new event intersects with with safezone before previous event
if len(x_before.intersection(y_after)) == safeZone:
return timePoints[0:timePoint_idx - safeZone] + timePoints[timePoint_idx : -1]
else:
return timePoints[0:timePoint_idx - safeZone] + timePoints[timePoint_idx + len(x_before.intersection(y_after)) : -1]
if len(x_after.intersection(y_before) ) > 0: #Case 2 new event intersects with with safezone after previous event
if len(x_after.intersection(y_before)) == safeZone:
return timePoints[0:timePoint_idx] + timePoints[timePoint_idx + safeZone : -1]
else:
return timePoints[0:timePoint_idx - len(x_after.intersection(y_before))] + timePoints[timePoint_idx : -1]
return timePoints[0:timePoint_idx - safeZone] + timePoints[timePoint_idx + safeZone : -1] # Case 3 new event does not overlap with previous events
#----Start defining variables to be written into the scenario file
#Start lines with modified starting values given by the chosen difficulty configuration
START_LINES = [
"0:00:00;sysmon;scalestyle;2",
"0:00:00;sysmon;feedbacks-positive-color;#00ff00",
"0:00:00;sysmon;feedbacks-negative-color;#ff0000",
"0:00:00;sysmon;alerttimeout;4000",
"0:00:00;sysmon;safezonelength;{}".format(config['sysSafe']),
"0:00:00;resman;tank-a-target;2000",
"0:00:00;resman;tank-a-lossperminute;{}".format(config['lossA']),
"0:00:00;resman;tank-b-target;1000",
"0:00:00;resman;tank-b-lossperminute;{}".format(config['lossB']),
"0:00:00;resman;taskupdatetime;200",
"0:00:00;resman;tolerancelevel;{}".format(config['tankTolerance']),
"0:00:00;track;cursorcolor;#009900",
"0:00:00;track;targetradius;{}".format(config['trackingRad']),
"0:00:00;communications;callsignregex;[A-Z][A-Z]\d\d",
"0:00:00;communications;othercallsignnumber;5",
"0:00:00;communications;voicegender;male",
"0:00:00;communications;voiceidiom;english",
"0:00:00;labstreaminglayer;start",
"0:00:00;pumpstatus;start",
"0:00:00;resman;start",
"0:00:00;track;start",
"0:00:00;sysmon;start",
"0:00:00;communications;start",
"0:00:00;scheduling;start",
"0:00:00;participantinfo;start",
"0:00:00;track;automaticsolver;False"
]
#define flow rates of the different pumps
FLOW_LINES = []
for i in range(0,len(PUMPS)):
p = PUMPS[i]
if p < 7:
FLOW_LINES.append("0:00:00;resman;pump-{}-flow;{}".format(p,config['flowStd']))
else:
FLOW_LINES.append("0:00:00;resman;pump-{}-flow;{}".format(p,config['flowBetween']))
#End lines with the correct end times
END_LINES = [
"{};pumpstatus;stop".format(write_time(total_secs)),
"{};resman;stop".format(write_time(total_secs)),
"{};track;stop".format(write_time(total_secs)),
"{};sysmon;stop".format(write_time(total_secs)),
"{};communications;stop".format(write_time(total_secs)),
"{};scheduling;stop".format(write_time(total_secs)),
"{};labstreaminglayer;stop".format(write_time(total_secs)),
"{};end".format(write_time(total_secs + 1)),
]
for n_scenario in range(1,int(n_scenarios)+1):
#define all events
event_lines = []
safe_zone = config['safeZone']
prompt_safe_zone = 20
promptTime = TIMEPOINTS
promptEvents = []
for n in range(1,N_EVENTS['prompt']):
timepoint_idx = random.sample(range(buffer,len(promptTime) -buffer),1)[0]
timepoint = promptTime[timepoint_idx]
tmp_target = random.choices(["own", "other"], weights = [3,1],k = 1)[0]
promptTime = removeFromTime(promptTime, timepoint, timepoint_idx, prompt_safe_zone, promptEvents)
promptEvents.append(timepoint)
event_lines.append(make_param(write_time(timepoint), S_COMM, PARAMETERS['prompt'],tmp_target))
scaleTime = TIMEPOINTS
scaleEvents = []
for n in range(1,N_EVENTS['s-failure']):
timepoint_idx = random.sample(range(buffer,len(scaleTime) -buffer),1)[0]
timepoint = scaleTime[timepoint_idx]
tmp_scale = random.sample(SCALES,1)[0]
tmp_dir = random.sample(["up", "down"],1)[0]
scaleTime = removeFromTime(scaleTime, timepoint, timepoint_idx, safe_zone, scaleEvents)
scaleEvents.append(timepoint)
event_lines.append(make_param(write_time(timepoint), S_SYSMON, PARAMETERS['s-failure'].format(tmp_scale),tmp_dir))
pumpTime = TIMEPOINTS
pumpEvents = []
for n in range(1,N_EVENTS['p-failure']):
timepoint_idx = random.sample(range(buffer,len(pumpTime) -buffer),1)[0]
timepoint = pumpTime[timepoint_idx]
tmp_pump = random.sample(PUMPS,1)[0]
pumpTime = removeFromTime(pumpTime, timepoint, timepoint_idx, safe_zone, pumpEvents)
pumpEvents.append(timepoint)
event_lines.append(make_param(write_time(timepoint), S_RESMAN, PARAMETERS['p-failure'].format(tmp_pump),-1))
event_lines.append(make_param(write_time(timepoint +10), S_RESMAN, PARAMETERS['p-failure'].format(tmp_pump),0))
contents = START_LINES + FLOW_LINES + event_lines + END_LINES
contents = [line + "\n" for line in contents]
out_filename = "{cfg_file}_{diff}_{n}.txt".format(cfg_file = cfg_file, diff = diff, n = n_scenario)
print(path_to_folder + "\\Scenarios\\" + out_filename,"w")
file = open(path_to_folder + "\\Scenarios\\" + out_filename,"w")
file.writelines(contents)
file.close() | Zebrakopf/OpenMATB_ScenarioCreator | create_scenario.py | create_scenario.py | py | 9,599 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number... |
4490266612 | # My_Picture Predict
import numpy as np
import matplotlib.pyplot as plt
import cv2
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import load_model
model = load_model('../data/h5/k67_img.h5')
pred_datagen = ImageDataGenerator(rescale=1./255)
pred_data = pred_datagen.flow_from_directory(
'../data/image',
classes=['my'],
target_size=(150,150),
batch_size=1,
class_mode=None
)
print(pred_data[0])
pred = model.predict_generator(pred_data)
plt.imshow(pred_data[0].reshape(150,150,3))
plt.show()
print(pred)
print('======================================')
if pred > 0.5:
print("남자 acc =", pred)
else:
print("여자 acc =", pred)
# 남자 acc = [[0.65340704]]
img = cv2.imread('../data/image/my/my.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, dsize=(150,150)) / 255.0
plt.imshow(img)
plt.show()
print(img)
result = model.predict(np.array([img]))
print(result)
print('======================================')
if result > 0.5:
print("남자 acc =", result)
else:
print("여자 acc =", result)
# 남자 acc = [[0.65340704]]
| Taerimmm/ML | keras2/keras67_4_my_result.py | keras67_4_my_result.py | py | 1,145 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "tensorflow.keras.models.load_model",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.preprocessing.image.ImageDataGenerator",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 22,
"... |
19294404545 | from statsmodels.tsa.holtwinters import ExponentialSmoothing
from dateutil.relativedelta import relativedelta
import pandas as pd
def predict_next_12_months(data):
pred = pd.DataFrame()
start_and_finish = [max(pd.to_datetime(data.columns, format = "%Y-%m")) + relativedelta(months=(x*11)+1) for x in range(2)]
for prod_family in data.index:
ts = data.loc[prod_family, :]
ts.index = pd.DatetimeIndex(pd.to_datetime(ts.index, format = "%Y-%m"))
ts.index.freq = "MS"
try:
model = ExponentialSmoothing(ts, trend = 'mul', seasonal = 'mul', seasonal_periods = 12).fit(
use_basinhopping = True)
except ValueError:
model = ExponentialSmoothing(ts, trend = 'add', seasonal = 'add', seasonal_periods = 12).fit(
use_basinhopping = True)
except:
model = ExponentialSmoothing(ts, seasonal_periods = 12).fit()
temp_pred = model.predict(start = start_and_finish[0], end = start_and_finish[1])
pred[prod_family] = temp_pred
pred = pred.transpose().floordiv(1).fillna(0)
pred[pred < 0] = 0
pred.columns = [x.strftime("%Y-%m") for x in pred.columns]
return pred
if __name__=="__main__":
from backend.compiler import revert_checkpoint
archive = revert_checkpoint("C://sl_data//input//Archive.mng")
forecast = predict_next_12_months(archive.order_history.agg(pivot=True).iloc[:, :-12]) | nizarcan/CapacityPlanningDSS-SD | backend/predictor.py | predictor.py | py | 1,444 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.DataFrame",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "dateutil.relativedelta.relativedelta",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": ... |
36916798983 | # Módulos
from datetime import date
# Declaração de variáveis
pessoa = dict()
# Entrada de dados da pessoa
pessoa['nome'] = str(input('Nome: '))
nasc = int(input('Ano de nascimento: '))
pessoa['idade'] = date.today().year - nasc
ctps = int(input('Carteira de Trabalho (0 se não possui): '))
if ctps != 0:
pessoa['ctps'] = ctps
pessoa['contratação'] = int(input('Ano de Contratação: '))
pessoa['aposentadoria'] = (pessoa['contratação'] + 30) - nasc
pessoa['salário'] = float(input('Salário: R$'))
print('-=-'*15)
for k, v in pessoa.items():
print(f'- {k: <15} ---→ {v: >8}') | Henrique-Botelho/ExerciciosDePython-Curso-em-Video | Exercícios Aula 19/Ex. 092.py | Ex. 092.py | py | 636 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "datetime.date.today",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 12,
"usage_type": "name"
}
] |
71730627235 | from gui.visual.player import Player
from gui.visual.entity import Entity
import glm
import OpenGL.GL as gl
from gui.visual.camera import Camera
from gui.visual.staticShader import StaticShader
from gui.visual.entityRenderer import EntityRenderer
from gui.visual.skyboxRenderer import SkyboxRenderer
from gui.visual.worldRenderer import WorldRenderer
class MasterRenderer:
FOV = 70
NEAR_PLANE = 0.1
FAR_PLANE = 17500 # max world size + some offset
INITIAL_WIDTH = 800
INITIAL_HEIGHT = 600
def __init__(self, loader) -> None:
self._width = self.INITIAL_WIDTH
self._height = self.INITIAL_HEIGHT
self._createProjectionMatrix()
self._shader = StaticShader()
self._renderer = EntityRenderer(self._shader, self.projectionMatrix)
self._skyboxRenderer = SkyboxRenderer(loader, self.projectionMatrix)
self.worldRenderer = WorldRenderer(loader, self.projectionMatrix, self._shader)
self._entities = {}
self._isTexturesOn = True
def renderScene(self, player: Player, entities: list, camera: Camera):
for entity in entities:
self.processEntity(entity)
self.render(camera)
def render(self, camera: Camera):
self.__prepare()
self._shader.start()
self._shader.loadViewMatrix(camera)
self._renderer.render(self._entities, self._isTexturesOn)
self.worldRenderer.render(camera)
self._skyboxRenderer.render(camera)
self._shader.stop()
self._entities.clear()
def processEntity(self, entity: Entity):
entityModel = entity.modelData
batch = self._entities.get(entityModel)
if batch:
batch.append(entity)
else:
newBatch = [entity]
self._entities[entityModel] = newBatch
def toggleTextures(self):
self._isTexturesOn = not self.isTexturesOn
def cleanUp(self):
self._shader.cleanUp()
def resize(self, width, height):
self._width = width
self._height = height
self._createProjectionMatrix()
self._shader.start()
self._renderer.loadProjectionMatrix(self.projectionMatrix)
self._shader.stop()
self._skyboxRenderer.shader.start()
self._skyboxRenderer.loadProjectionMatrix(self.projectionMatrix)
self._skyboxRenderer.shader.stop()
self.worldRenderer.shader.start()
self.worldRenderer.loadProjectionMatrix(self.projectionMatrix)
self.worldRenderer.shader.stop()
def __prepare(self):
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glClearColor(0.0, 0.0, 0.0, 1)
def _createProjectionMatrix(self):
self.projectionMatrix = glm.perspective(self.FOV, self._width / self._height, self.NEAR_PLANE, self.FAR_PLANE)
def reloadWorld(self, loader, worldType: int, simType: int, worldSize: float, worldMap: str, worldBoundaries: int, worldWaterLevel: float) -> None:
self.worldRenderer.reloadWorld(loader, worldType, simType, worldSize, worldMap, worldBoundaries, worldWaterLevel) | Mimikkk/2023-amib | src/libs/framspy/gui/visual/masterRenderer.py | masterRenderer.py | py | 3,138 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gui.visual.staticShader.StaticShader",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "gui.visual.entityRenderer.EntityRenderer",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "gui.visual.skyboxRenderer.SkyboxRenderer",
"line_number": 24,
... |
2558854509 | import sqlite3
connection=sqlite3.connect("RUGIPP_REGISTRI.db")
crsr=connection.cursor()
class Registar_Geodeta:
def __init__(self,JMBG,ime,prezime,strucna_sprema,broj_strucnog,red_licence):
self.JMBG=JMBG
self.ime=ime
self.prezime=prezime
self.sprema=strucna_sprema
self.strucni=broj_strucnog
self.licenca=red_licence
def geodete(connection,geo):
sql=''' INSERT INTO Registar_Geodeta (JMBG,Ime,Prezime,Strucna_sprema,Broj_Uvjerenja_Strucni,Red_Licence)
VALUES(?,?,?,?,?,?) '''
params = (geo.JMBG, geo.ime, geo.prezime, geo.sprema, geo.strucni, geo.licenca)
crsr.execute(sql, params)
connection.commit()
while True:
print("_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/")
rg = int(input('NALAZITE SE U REGISTRU GEODETA \nZa unos Geodete unesite 1, \nZa azuriranje podataka Geodete unesite 2,'
' \nZa brisanje Geodete iz registra unesite 3, \nZa uvid u podatke Geodeta unesite 4,'
' \nZa izlaz iz Registra Geodeta unesite 0, \nUNESI BROJ:'))
if rg == 0:
print("Registar Geodeta je zatvoren.")
break
elif rg == 1:
rg1 = Registar_Geodeta(int(input("Unesi JMBG: ")), input("Unesi Ime: "), input("Unesi Prezime: "),
input('Unesi visinu strucne spreme: '),
input("Unesi broj uvjerenja strucnog ispita: "),
int(input("Unesi red licence (1 ili 2): ")))
Registar_Geodeta.geodete(connection, rg1)
elif rg == 2:
maticni = int(input("Unesi JMBG Geodete koji se azurira:"))
while True:
print("_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/")
azur= int(input("Za azuriranje Imena unesite 1, \nZa azuriranje Prezimena unesite 2, \nZa azuriranje Strucne spreme unesite 3,"
" \nZa azuriranje Broja Uvjerenja strucog ispita unesite 4, \nZa azuriranje Reda licence unesite 5,"
" \nZa azuriranje svih podataka unesite 6, \nZa prekid azuriranja unesite 0, \nUNESI BROJ:"))
if azur == 0:
print("Azuriranje Geodete je zavrseno.")
break
elif azur == 1:
m = maticni
i=input("Unesite novo Ime Geodete:")
crsr.execute('''UPDATE Registar_Geodeta SET Ime = ? WHERE JMBG=?''',(i,m))
connection.commit()
elif azur == 2:
m = maticni
p=input("Unesite novo Prezime Geodete:")
crsr.execute('''UPDATE Registar_Geodeta SET Prezime = ? WHERE JMBG=?''',(p,m))
connection.commit()
elif azur == 3:
m = maticni
vss=input("Unesite novu Visinu strucne spreme:")
crsr.execute('''UPDATE Registar_Geodeta SET Strucna_sprema = ? WHERE JMBG=?''',(vss,m))
connection.commit()
elif azur == 4:
m = maticni
bus=input("Unesite novi Broj Uvjerenja strucnog ispita:")
crsr.execute('''UPDATE Registar_Geodeta SET Broj_Uvjerenja_Strucni = ? WHERE JMBG=?''',(bus,m))
connection.commit()
elif azur == 5:
m = maticni
rl=input("Unesite novi Red Licence:")
crsr.execute('''UPDATE Registar_Geodeta SET Red_Licence = ? WHERE JMBG=?''',(rl,m))
connection.commit()
elif azur == 6:
m = maticni
i = input("Unesite novo Ime Geodete:")
p = input("Unesite novo Prezime Geodete:")
vss = input("Unesite novu Visinu strucne spreme:")
bus = input("Unesite novi Broj Uvjerenja strucnog ispita:")
rl=input("Unesite novi Red Licence:")
crsr.execute('''UPDATE Registar_Geodeta SET Ime = ?, Prezime = ?, Strucna_sprema = ?, Broj_Uvjerenja_Strucni = ?, Red_Licence = ? WHERE JMBG=?''',(i,p,vss,bus,rl,m))
connection.commit()
else:
print("Niste unjeli dobar broj, unesite ispravan broj iz liste.")
elif rg == 3:
bris = int(input("Unesi JMBG Geodete kog zelite da izbrisete iz Registra Geodeta:"))
crsr.execute('''DELETE FROM Registar_Geodeta WHERE JMBG=?''', (bris, ))
connection.commit()
elif rg == 4:
crsr.execute('''SELECT * FROM Registar_Geodeta''')
ans = crsr.fetchall()
for i in ans:
print(i)
a=input("Za povratak u Regisar Geodeta pritisni ENTER")
else:
print("Niste unjeli dobar broj, unesite ispravan broj iz liste.")
| SarajlicS/Zavrsni_Rad | Registar_Geodeta.py | Registar_Geodeta.py | py | 4,781 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlite3.connect",
"line_number": 2,
"usage_type": "call"
}
] |
34459058884 | import os, sys
from typing import Union, List
import numpy as np
import pandas as pd
from sklearn.preprocessing import scale
import torch
import torch.nn as nn
from torch.utils.data import Dataset
def load_data(data_path):
name = ["train", "test"]
columns = [f"V{i}" for i in range(1, 31)]
val_columns = columns + ["Class"]
data_dict = {
key: pd.read_csv(os.path.join(data_path, f"{key}.csv"))[columns] for key in name
}
data_dict["valid"] = pd.read_csv(os.path.join(data_path, "valid.csv"))[val_columns]
# scale 해주면 array 형태가 됨
data_dict["train"] = scale(data_dict["train"])
x_train = data_dict["train"]
x_valid = data_dict["valid"][columns].values
y_valid = data_dict["valid"].Class.values
test_data = pd.read_csv("data/test.csv")
x_test = test_data.iloc[:, 1:].values
return x_train, x_valid, y_valid, x_test
class TabularDataset(Dataset):
def __init__(
self,
inputs: np.array = None,
normalize=True,
mean=None,
std=None,
):
self.inputs = inputs
if mean is not None:
self.mean = mean
if std is not None:
self.std = std
self.normalize = normalize
if self.normalize:
self.scaled_inputs = self.get_normalize(self.inputs, self.mean, self.std)
def get_normalize(self, x: np.array, mean, std):
scaled_x = (x - mean) / std
return scaled_x
def __len__(self):
return len(self.inputs)
def __getitem__(self, idx):
if self.normalize:
x_torch = torch.tensor(self.scaled_inputs, dtype=torch.float)[idx]
else:
x_torch = torch.tensor(self.inputs, dtype=torch.float)[idx]
return {"inputs": x_torch}
| doyooni303/UnsupervisedAnomalyDetection_VAE | src/build_datset.py | build_datset.py | py | 1,789 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line... |
26074240113 | import json
from flask import Flask, redirect, request, render_template
from oauth2client.client import flow_from_clientsecrets
from config import GOOGLE_CLIENT_SECRETS_JSON, REGISTERED_CREDENTIALS_JSON
server_uri = 'http://localhost:5000'
app = Flask(__name__)
flow = flow_from_clientsecrets(
GOOGLE_CLIENT_SECRETS_JSON,
scope='https://www.googleapis.com/auth/gmail.modify',
redirect_uri=server_uri + '/post_registration'
)
@app.route('/')
def index():
"""
renders home page
"""
return render_template('index.html')
@app.route('/register')
def register():
"""
redirects to authorization url
"""
authorization_url = flow.step1_get_authorize_url()
return redirect(authorization_url)
def store_credentials(credentials):
try:
with open(REGISTERED_CREDENTIALS_JSON) as f:
registered_credentials = json.load(f)
except IOError:
registered_credentials = []
registered_credential = credentials.to_json()
registered_credentials.append(registered_credential)
with open(REGISTERED_CREDENTIALS_JSON, 'w') as f:
json.dump(registered_credentials, f)
@app.route('/post_registration')
def post_registration():
"""
renders post registration page
"""
error = request.args.get('error', None)
auth_code = request.args.get('code', None)
if error is not None:
return render_template('error.html', detail='I am sure you have your reasons...')
if auth_code is None:
return render_template('error.html', detail='Sorry! There were some problems. Please try again...')
credentials = flow.step2_exchange(auth_code)
store_credentials(credentials)
return render_template('registered.html')
| sk364/inbox-cleaner | server.py | server.py | py | 1,740 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "oauth2client.client.flow_from_clientsecrets",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "config.GOOGLE_CLIENT_SECRETS_JSON",
"line_number": 12,
"usage_type": "argument"
... |
27286970033 | import numpy as np
import pandas as pd
import pytest
from cleanlab.datalab.internal.issue_manager import IssueManager
from cleanlab.datalab.internal.issue_manager_factory import REGISTRY, register
class TestCustomIssueManager:
@pytest.mark.parametrize(
"score",
[0, 0.5, 1],
ids=["zero", "positive_float", "one"],
)
def test_make_summary_with_score(self, custom_issue_manager, score):
summary = custom_issue_manager.make_summary(score=score)
expected_summary = pd.DataFrame(
{
"issue_type": [custom_issue_manager.issue_name],
"score": [score],
}
)
assert pd.testing.assert_frame_equal(summary, expected_summary) is None
@pytest.mark.parametrize(
"score",
[-0.3, 1.5, np.nan, np.inf, -np.inf],
ids=["negative_float", "greater_than_one", "nan", "inf", "negative_inf"],
)
def test_make_summary_invalid_score(self, custom_issue_manager, score):
with pytest.raises(ValueError):
custom_issue_manager.make_summary(score=score)
def test_register_custom_issue_manager(monkeypatch):
import io
import sys
assert "foo" not in REGISTRY
@register
class Foo(IssueManager):
issue_name = "foo"
def find_issues(self):
pass
assert "foo" in REGISTRY
assert REGISTRY["foo"] == Foo
# Reregistering should overwrite the existing class, put print a warning
monkeypatch.setattr("sys.stdout", io.StringIO())
@register
class NewFoo(IssueManager):
issue_name = "foo"
def find_issues(self):
pass
assert "foo" in REGISTRY
assert REGISTRY["foo"] == NewFoo
assert all(
[
text in sys.stdout.getvalue()
for text in ["Warning: Overwriting existing issue manager foo with ", "NewFoo"]
]
), "Should print a warning"
| cleanlab/cleanlab | tests/datalab/test_issue_manager.py | test_issue_manager.py | py | 1,931 | python | en | code | 7,004 | github-code | 1 | [
{
"api_name": "pandas.DataFrame",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.testing.assert_frame_equal",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pandas.testing",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name":... |
26781812415 | import re
import nltk
import pandas as pd
from textblob import TextBlob
def lemmatize_with_postag(sentence):
'''
https://www.machinelearningplus.com/nlp/lemmatization-examples-python/
'''
sent = TextBlob(sentence)
tag_dict = {"J": 'a',
"N": 'n',
"V": 'v',
"R": 'r'}
words_and_tags = [(w, tag_dict.get(pos[0], 'n'))
for w, pos in sent.tags]
lemmatized_list = [wd.lemmatize(tag) for wd, tag in words_and_tags]
return " ".join(lemmatized_list)
def tweets_clean_text(tweet):
'''
Cleans the text of a tweet
Parameters
----------
tweet : str
Any string can be entered
Returns
-------
str
Clean tweet in all lower case with stop words removed
'''
# remove urls (https://stackoverflow.com/questions/24399820/expression-to-remove-url-links-from-twitter-tweet)
tweet = re.sub(r"http\S+", "", tweet)
# remove non alpha/numeric characters
tweet = re.sub(r"[^a-zA-Z0-9\s]", "", tweet)
# Make lower case
tweet = TextBlob(tweet)
tweet = tweet.words.lower()
# remove stop words
stop_words = nltk.corpus.stopwords.words('english')
tweet = [word for word in tweet if word not in stop_words]
tweet = TextBlob(' '.join(tweet)).words
tweet = ' '.join(tweet)
# remove specific characters
tweet = re.sub(r" amp ", "", tweet) # amp = &
tweet = re.sub(r"'", "", tweet)
tweet = re.sub(r"’", "", tweet)
tweet = re.sub(r"–", " ", tweet)
tweet = re.sub(r" ", " ", tweet)
tweet = re.sub(r" ", " ", tweet)
tweet = re.sub(r" ", " ", tweet)
return(tweet)
def tweets_break(x):
'''Loop through a tweet and insert <br> every 60 characters for better spacing'''
it = 1
start = 0
stop = start + 60
num_loops = ((len(x)-1) // 60) + 1
clean = []
while it <= num_loops:
i = x[start:stop]+"<br>"
clean += i # append to list
# update positions
it += 1
start = stop
stop = start + 60
if stop > len(x)-1:
stop = len(x)-1
# concatenate list
return "".join(clean)
def get_sentiment(tweets):
'''returns a dictionary with sentiment and polarity'''
polarity = []
subjectivity = []
for tweet in tweets:
tweet = TextBlob(tweet)
pol = tweet.sentiment.polarity
polarity.append(pol)
subj = tweet.sentiment.subjectivity
subjectivity.append(subj)
return {'polarity': polarity, 'subjectivity': subjectivity}
def get_word_counts(tweets_df):
"""
Calculates the word counts for a string
Parameters:
-----------
tweets_df -- (list) a list of tweets, or column from dataframe of tweets
Returns:
--------
Dictionary with word count
"""
words = " ".join(list(tweets_df))
counts = TextBlob(words).word_counts
counts_df = pd.DataFrame.from_dict(dict(counts), orient="index")
counts_df = counts_df.sort_values(by=[0], ascending=False)
counts_df.reset_index(level=0, inplace=True)
counts_df.columns = ['word', 'count']
return counts_df
def get_phrase_counts(tweets_df):
"""
Calculates the word counts for a string
Parameters:
-----------
tweets_df -- (list) a list of tweets, or column from dataframe of tweets
Returns:
--------
Dictionary with phrase count
"""
# get ngrams
words = " ".join(list(tweets_df))
ngram_2 = TextBlob(words).ngrams(n=2)
ngram_3 = TextBlob(words).ngrams(n=3)
ngrams = ngram_2 + ngram_3
# do word count on ngrams
phrases = []
for i in ngrams:
phrases.append("_".join(i))
phrases = " ".join(list(phrases))
counts = TextBlob(phrases).word_counts
# turn into dataframe
counts_df = pd.DataFrame.from_dict(dict(counts), orient="index")
counts_df = counts_df.sort_values(by=[0], ascending=False)
counts_df.reset_index(level=0, inplace=True)
counts_df.columns = ['phrase', 'count']
return counts_df
def get_phrase_counts_df(df, selected_col, users):
df_phrase_count_total = get_phrase_counts(df[selected_col])
df_phrase_count_total.columns = ['phrase', 'total_count']
df_phrase_count_total['rank'] = df_phrase_count_total['total_count'].rank(
ascending=False, method="first")
df_phrase_count = pd.DataFrame()
for i in users:
temp = get_phrase_counts(
df[df['handle'] == i]['clean_tweet'])
temp['handle'] = i
df_phrase_count = pd.concat([temp, df_phrase_count])
df_phrase_count = pd.merge(df_phrase_count, df_phrase_count_total,
how='left', on='phrase')
df_phrase_count = df_phrase_count.sort_values(
by=['total_count', 'phrase', 'count'], ascending=False
).reset_index(drop=True)
return df_phrase_count.head(5000)
def get_word_counts_df(df, selected_col, users):
df_word_count_totals = get_word_counts(df[selected_col])
df_word_count_totals.columns = ['word', 'total_count']
df_word_count_totals['rank'] = df_word_count_totals['total_count'].rank(
ascending=False, method="first")
df_word_count = pd.DataFrame()
for i in users:
temp = get_word_counts(
df[df['handle'] == i]['clean_tweet'])
temp['handle'] = i
df_word_count = pd.concat([temp, df_word_count])
df_word_count = pd.merge(df_word_count, df_word_count_totals, how='left',
on='word')
df_word_count = df_word_count.sort_values(
by=['total_count', 'word', 'count'], ascending=False
).reset_index(drop=True)
return df_word_count.head(5000)
def word_search(text, search_words):
"""
Checks to see if words exist in a body of text
Parameters:
-----------
search_words -- (list) a list of words to search for in text
text -- (string) the body of text to search
Returns:
--------
True if any word is found, False otherwise
"""
for i in search_words:
if i.lower() in text.lower():
return True
return False
def count_tweets_about(df, col_to_search):
justin_search = ["justin", "trudeau", "justintrudeau"]
scheer_search = ["scheer", "andrew", "andrewscheer"]
may_search = ["may", "elizabeth", "ElizabethMay"]
singh_search = ["singh", "jagmeet", "jagmeetsingh", "theJagmeetSingh"]
bernier_search = ["bernier", "maxime", "MaximeBernier"]
df["about_trudeau"] = df[col_to_search].apply(
word_search, search_words=justin_search)
df["about_scheer"] = df[col_to_search].apply(
word_search, search_words=scheer_search)
df["about_may"] = df[col_to_search].apply(
word_search, search_words=may_search)
df["about_singh"] = df[col_to_search].apply(
word_search, search_words=singh_search)
df["about_bernier"] = df[col_to_search].apply(
word_search, search_words=bernier_search)
return df
| SamEdwardes/sentiment-cdn-election | src/twitter_analysis.py | twitter_analysis.py | py | 6,996 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "textblob.TextBlob",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "textblob.TextBlob",
"line_number"... |
22119528446 | import numpy as np
import cv2
def load_image(path_img):
return cv2.imread(path_img)
def bgr2hsv(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
def setRangeColor(hsv, lower_color, upper_color):
return cv2.inRange(hsv, lower_color, upper_color)
def contours_img(mask):
contours,_ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
return contours
def filter_contours_img(contours, img_draw, color_bbox):
count = 0
for c in contours:
rect = cv2.boundingRect(c)
x,y,w,h = rect
area = w * h
if area > 1000:
count = count + 1
cv2.rectangle(img_draw, (x, y), (x+w, y+h), color_bbox, 5)
return img_draw, count
def draw_text_on_image(img_draw, count_Red, count_Blue, count_Green):
cv2.putText(img_draw,'Red Count : ' + str(count_Red),
(10,230), # bottomLeftCornerOfText
cv2.FONT_HERSHEY_SIMPLEX, # font
0.5, # fontScale
(0,0,0), # fontColor
2) # lineType
cv2.putText(img_draw,'Blue Count : ' + str(count_Blue),
(10,260), # bottomLeftCornerOfText
cv2.FONT_HERSHEY_SIMPLEX, # font
0.5, # fontScale
(0,0,0), # fontColor
2) # lineType
cv2.putText(img_draw,'Green Count : ' + str(count_Green),
(10,290), # bottomLeftCornerOfText
cv2.FONT_HERSHEY_SIMPLEX, # font
0.5, # fontScale
(0,0,0), # fontColor
2) # lineType
return img_draw
def main():
path_img = 'E:\\New folder\\ss\\image.jpg'
img = load_image(path_img)
img = cv2.resize(img, None,fx=0.5,fy=0.5)
hsv = bgr2hsv(img)
img_draw = img
lower_Red = np.array([160, 100, 100])
upper_Red = np.array([179, 255, 255])
mask = setRangeColor(hsv, lower_Red, upper_Red)
contours = contours_img(mask)
color_rbox = (0, 0, 255)
img_draw, count_Red = filter_contours_img(contours, img_draw, color_rbox)
print('Red Count:', count_Red)
lower_Green = np.array([50,80, 40])
upper_Green = np.array([70, 255, 255])
mask = setRangeColor(hsv, lower_Green, upper_Green)
contours = contours_img(mask)
color_gbox = (0, 255, 0)
img_draw, count_Green = filter_contours_img(contours, img_draw, color_gbox)
print('Green Count:', count_Green)
lower_Blue = np.array([94, 127, 100])
upper_Blue = np.array([120, 255, 255])
mask = setRangeColor(hsv, lower_Blue, upper_Blue)
contours = contours_img(mask)
color_bbox = (255, 0, 0)
img_draw, count_Blue = filter_contours_img(contours, img_draw, color_bbox)
print('Blue Count:', count_Blue)
img_draw = draw_text_on_image(img_draw, count_Red, count_Blue, count_Green)
cv2.imwrite('E:\\New folder\\s\\output.image.jpg', img_draw)
main() | opsun1/code | color_detection.py | color_detection.py | py | 3,063 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2HSV",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "cv2.inRange",
"line_n... |
43199229342 | import bpy
from bpy.props import BoolProperty, EnumProperty
from bpy_extras.view3d_utils import region_2d_to_location_3d, region_2d_to_origin_3d, region_2d_to_vector_3d
from mathutils import Vector
from .. utils.registration import get_addon, get_prefs
from .. utils.tools import get_active_tool
from .. utils.object import parent, unparent, get_eval_bbox
from .. utils.math import compare_matrix
from .. utils.mesh import get_coords
from .. utils.modifier import remove_mod, get_mod_obj
from .. utils.ui import get_zoom_factor, get_flick_direction, init_status, finish_status
from .. utils.draw import draw_vector, draw_circle, draw_point, draw_label, draw_bbox, draw_cross_3d
from .. utils.system import printd
from .. utils.property import step_list
from .. utils.view import get_loc_2d
from .. colors import red, green, blue, white, yellow
from .. items import axis_items, axis_index_mapping
decalmachine = None
def draw_mirror(op):
def draw(self, context):
layout = self.layout
row = layout.row(align=True)
row.label(text='Mirror')
row.label(text="", icon='MOUSE_MOVE')
row.label(text="Pick Axis")
row.label(text="", icon='MOUSE_LMB')
row.label(text="Finish")
row.label(text="", icon='MOUSE_RMB')
row.label(text="Cancel")
row.separator(factor=10)
if not op.remove:
row.label(text="", icon='EVENT_C')
row.label(text=f"Cursor: {op.cursor}")
if op.cursor and op.cursor_empty:
row.separator(factor=1)
row.label(text="", icon='EVENT_E')
row.label(text=f"Use Existing: {op.use_existing_cursor}")
if op.sel_mirror_mods:
row.separator(factor=1)
row.label(text="", icon='EVENT_A')
row.label(text=f"Remove All + Finish")
if op.mirror_mods:
row.separator(factor=1)
row.label(text="", icon='EVENT_X')
row.label(text=f"Remove Mirror: {op.remove}")
if op.remove and op.misaligned:
if not op.misaligned['isallmisaligned']:
row.separator(factor=1)
row.label(text="", icon='EVENT_Q')
row.label(text=f"Togggle Mirror Object: {op.use_misalign}")
row.separator(factor=1)
row.label(text="", icon='MOUSE_MMB')
row.label(text="Cycle Mirror Object")
return draw
class Mirror(bpy.types.Operator):
bl_idname = "machin3.mirror"
bl_label = "MACHIN3: Mirror"
bl_options = {'REGISTER', 'UNDO'}
flick: BoolProperty(name="Flick", default=False)
remove: BoolProperty(name="Remove", default=False)
axis: EnumProperty(name="Axis", items=axis_items, default="X")
use_x: BoolProperty(name="X", default=True)
use_y: BoolProperty(name="Y", default=False)
use_z: BoolProperty(name="Z", default=False)
bisect_x: BoolProperty(name="Bisect", default=False)
bisect_y: BoolProperty(name="Bisect", default=False)
bisect_z: BoolProperty(name="Bisect", default=False)
flip_x: BoolProperty(name="Flip", default=False)
flip_y: BoolProperty(name="Flip", default=False)
flip_z: BoolProperty(name="Flip", default=False)
DM_mirror_u: BoolProperty(name="U", default=True)
DM_mirror_v: BoolProperty(name="V", default=False)
cursor: BoolProperty(name="Mirror across Cursor", default=False)
use_misalign: BoolProperty(name="Use Mislinged Object Removal", default=False)
use_existing_cursor: BoolProperty(name="Use existing Cursor Empty", default=False)
passthrough = None
across = False
removeacross = False
removecursor = False
removeall = False
def draw(self, context):
layout = self.layout
column = layout.column()
row = column.row(align=True)
row.prop(self, 'cursor', toggle=True)
row = column.row(align=True)
row.prop(self, "use_x", toggle=True)
row.prop(self, "use_y", toggle=True)
row.prop(self, "use_z", toggle=True)
if self.meshes_present and len(context.selected_objects) == 1 and self.active in context.selected_objects and not self.cursor:
row = column.row(align=True)
r = row.row()
r.active = self.use_x
r.prop(self, "bisect_x")
r = row.row()
r.active = self.use_y
r.prop(self, "bisect_y")
r = row.row()
r.active = self.use_z
r.prop(self, "bisect_z")
row = column.row(align=True)
r = row.row()
r.active = self.use_x
r.prop(self, "flip_x")
r = row.row()
r.active = self.use_y
r.prop(self, "flip_y")
r = row.row()
r.active = self.use_z
r.prop(self, "flip_z")
if self.decals_present:
column.separator()
column.label(text="DECALmachine - UVs")
row = column.row(align=True)
row.prop(self, "DM_mirror_u", toggle=True)
row.prop(self, "DM_mirror_v", toggle=True)
@classmethod
def poll(cls, context):
if context.mode == "OBJECT":
return context.active_object
def draw_HUD(self, context):
if not self.passthrough:
draw_vector(self.flick_vector, origin=self.init_mouse, alpha=0.99)
color = red if self.remove else white
alpha = 0.2 if self.remove else 0.02
draw_circle(self.init_mouse, size=self.flick_distance, width=3, color=color, alpha=alpha)
title = 'Remove' if self.remove else 'Mirror'
alpha = 1 if self.remove else 0.8
draw_label(context, title=title, coords=(self.init_mouse[0], self.init_mouse[1] + self.flick_distance - (30 * self.scale)), center=True, color=color, alpha=alpha)
if self.remove and self.misaligned and self.use_misalign:
name = 'Cursor Empty' if self.use_misalign and self.mirror_obj.type == 'EMPTY' else self.mirror_obj.name if self.use_misalign else 'None'
alpha = 1 if self.use_misalign else 0.3
color = blue if self.use_misalign and self.mirror_obj.type == 'EMPTY' else yellow if self.use_misalign else white
draw_label(context, title=name, coords=(self.init_mouse[0], self.init_mouse[1] - self.flick_distance + (15 * self.scale)), center=True, color=color, alpha=alpha)
elif not self.remove and self.cursor or len(self.sel) > 1:
title, color = ('New Cursor', green) if self.cursor and not self.use_existing_cursor else ('Existing Cursor', blue) if self.cursor else (self.active.name, yellow)
draw_label(context, title=title, coords=(self.init_mouse[0], self.init_mouse[1] - self.flick_distance + (15 * self.scale)), center=True, alpha=1, color=color)
title = self.flick_direction.split('_')[1] if self.remove else self.flick_direction.replace('_', ' ').title()
draw_label(context, title=title, coords=(self.init_mouse[0], self.init_mouse[1] - self.flick_distance), center=True, alpha=0.4)
if self.remove and self.misaligned and self.use_misalign:
if self.mirror_obj.type == 'EMPTY':
if self.passthrough:
self.mirror_obj_2d = get_loc_2d(context, self.mirror_obj.matrix_world.to_translation())
draw_circle(self.mirror_obj_2d, size=10 * self.scale, width=2 * self.scale, color=blue, alpha=0.99)
def draw_VIEW3D(self, context):
for direction, axis, color in zip(self.axes.keys(), self.axes.values(), self.colors):
positive = 'POSITIVE' in direction
width, alpha = (2, 0.99) if positive or self.remove else (1, 0.3)
draw_vector(axis * self.zoom / 2, origin=self.init_mouse_3d, color=color, width=width, alpha=alpha)
draw_point(self.init_mouse_3d + self.axes[self.flick_direction] * self.zoom / 2 * 1.2, size=5, alpha=0.8)
if self.remove and self.misaligned and self.use_misalign:
mx = self.misaligned['matrices'][self.mirror_obj]
if self.mirror_obj.type == 'MESH':
bbox = get_eval_bbox(self.mirror_obj)
draw_bbox(bbox, mx=mx, color=yellow, corners=0.1, width=2 * self.scale, alpha=0.5)
elif self.mirror_obj.type == 'EMPTY':
loc = mx.inverted_safe() @ mx.to_translation()
draw_cross_3d(loc, mx=mx, color=blue, width=2 * self.scale, length=2 * self.cursor_empty_zoom, alpha=0.99)
def modal(self, context, event):
context.area.tag_redraw()
self.mousepos = Vector((event.mouse_region_x, event.mouse_region_y, 0))
events = ['MOUSEMOVE']
if not self.remove:
events.append('C')
if self.cursor and self.cursor_empty:
events.append('E')
if self.mirror_mods:
events.extend(['X', 'D', 'R'])
if self.remove and self.misaligned:
events.extend(['Q', 'WHEELDOWNMOUSE', 'WHEELUPMOUSE', 'ONE', 'TWO'])
if self.sel_mirror_mods:
events.append('A')
if event.type in events:
if self.passthrough:
self.passthrough = False
self.init_mouse = self.mousepos
self.init_mouse_3d = region_2d_to_location_3d(context.region, context.region_data, self.init_mouse, self.origin)
self.zoom = get_zoom_factor(context, depth_location=self.origin, scale=self.flick_distance, ignore_obj_scale=True)
if self.mirror_obj and self.mirror_obj.type == 'EMPTY':
loc = self.mirror_obj.matrix_world.to_translation()
self.mirror_obj_2d = get_loc_2d(context, loc)
self.cursor_empty_zoom = get_zoom_factor(context, depth_location=loc, scale=10, ignore_obj_scale=True)
if event.type == 'MOUSEMOVE':
self.flick_vector = self.mousepos - self.init_mouse
if self.flick_vector.length:
self.flick_direction = get_flick_direction(context, self.init_mouse_3d, self.flick_vector, self.axes)
self.set_mirror_props()
if self.flick_vector.length > self.flick_distance:
self.finish()
self.execute(context)
return {'FINISHED'}
elif event.type in {'C', 'E', 'A', 'X', 'D', 'R', 'Q', 'WHEELDOWNMOUSE', 'WHEELUPMOUSE', 'ONE', 'TWO'} and event.value == 'PRESS':
if event.type in {'X', 'D', 'R'}:
self.remove = not self.remove
self.active.select_set(True)
elif event.type == 'C':
self.cursor = not self.cursor
self.active.select_set(True)
elif event.type == 'E':
self.use_existing_cursor = not self.use_existing_cursor
self.active.select_set(True)
if self.misaligned:
if not self.misaligned['isallmisaligned'] and event.type == 'Q' and event.value == 'PRESS':
self.use_misalign = not self.use_misalign
self.active.select_set(True)
if event.type in ['WHEELDOWNMOUSE', 'WHEELUPMOUSE', 'ONE', 'TWO'] and event.value == 'PRESS':
if self.use_misalign:
if event.type in ['WHEELDOWNMOUSE', 'ONE']:
self.mirror_obj = step_list(self.mirror_obj, self.misaligned['sorted_objects'], step=-1, loop=True)
elif event.type in ['WHEELUPMOUSE', 'tWO']:
self.mirror_obj = step_list(self.mirror_obj, self.misaligned['sorted_objects'], step=1, loop=True)
else:
self.use_misalign = True
self.active.select_set(True)
if self.remove and self.misaligned and self.use_misalign:
mo_mx = self.misaligned['matrices'][self.mirror_obj]
self.axes = self.get_axes(mo_mx)
elif not self.remove and self.cursor:
self.axes = self.get_axes(self.cmx)
else:
self.axes = self.get_axes(self.mx)
if self.misaligned and self.mirror_obj.type == 'EMPTY':
loc = self.mirror_obj.matrix_world.to_translation()
self.mirror_obj_2d = get_loc_2d(context, loc)
self.cursor_empty_zoom = get_zoom_factor(context, depth_location=loc, scale=10, ignore_obj_scale=True)
if event.type == 'A':
self.finish()
for mod in self.sel_mirror_mods:
obj = mod.id_data
remove_mod(mod.name, objtype=obj.type, context=context, object=obj)
self.removeall = True
return {'FINISHED'}
if event.type in {'MIDDLEMOUSE'} or (event.alt and event.type in {'LEFTMOUSE', 'RIGHTMOUSE'}) or event.type.startswith('NDOF'):
self.passthrough = True
return {'PASS_THROUGH'}
elif event.type in {'LEFTMOUSE', 'SPACE'}:
self.finish()
self.execute(context)
return {'FINISHED'}
elif event.type in {'RIGHTMOUSE', 'ESC'}:
self.finish()
return {'CANCELLED'}
return {'RUNNING_MODAL'}
def finish(self):
bpy.types.SpaceView3D.draw_handler_remove(self.HUD, 'WINDOW')
bpy.types.SpaceView3D.draw_handler_remove(self.VIEW3D, 'WINDOW')
finish_status(self)
self.active.select_set(True)
def invoke(self, context, event):
global decalmachine
if decalmachine is None:
decalmachine = get_addon("DECALmachine")[0]
self.decalmachine = decalmachine
scene = context.scene
active_tool = get_active_tool(context).idname
self.active = context.active_object
self.sel = context.selected_objects
self.meshes_present = True if any([obj for obj in self.sel if obj.type == 'MESH']) else False
self.decals_present = True if self.decalmachine and any([obj for obj in self.sel if obj.DM.isdecal]) else False
if self.flick:
self.mx = self.active.matrix_world
self.cmx = scene.cursor.matrix
self.scale = context.preferences.view.ui_scale * get_prefs().HUD_scale
self.flick_distance = get_prefs().mirror_flick_distance * self.scale
self.mirror_obj = None
self.mirror_mods = self.get_mirror_mods([self.active])
self.sel_mirror_mods = self.get_mirror_mods(self.sel)
self.cursor_empty = self.get_matching_cursor_empty(context)
self.use_existing_cursor = True if self.cursor_empty else False
self.removeall = False
self.aligned, self.misaligned = self.get_misaligned_mods(context, self.active, self.mx, debug=False)
if self.misaligned:
self.use_misalign = self.misaligned['isallmisaligned']
self.mirror_obj = self.misaligned['sorted_objects'][-1]
if self.mirror_obj.type == 'EMPTY':
loc = self.mirror_obj.matrix_world.to_translation()
self.mirror_obj_2d = get_loc_2d(context, loc)
self.cursor_empty_zoom = get_zoom_factor(context, depth_location=loc, scale=10, ignore_obj_scale=True)
self.mousepos = Vector((event.mouse_region_x, event.mouse_region_y, 0))
view_origin = region_2d_to_origin_3d(context.region, context.region_data, self.mousepos)
view_dir = region_2d_to_vector_3d(context.region, context.region_data, self.mousepos)
self.origin = view_origin + view_dir * 10
self.zoom = get_zoom_factor(context, depth_location=self.origin, scale=self.flick_distance, ignore_obj_scale=True)
self.init_mouse = self.mousepos
self.init_mouse_3d = region_2d_to_location_3d(context.region, context.region_data, self.init_mouse, self.origin)
self.flick_vector = self.mousepos - self.init_mouse
self.flick_direction = 'NEGATIVE_X'
self.axes = self.get_axes(self.cmx if self.cursor else self.mx)
self.colors = [red, red, green, green, blue, blue]
init_status(self, context, func=draw_mirror(self))
self.active.select_set(True)
self.HUD = bpy.types.SpaceView3D.draw_handler_add(self.draw_HUD, (context, ), 'WINDOW', 'POST_PIXEL')
self.VIEW3D = bpy.types.SpaceView3D.draw_handler_add(self.draw_VIEW3D, (context, ), 'WINDOW', 'POST_VIEW')
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
else:
self.mirror(context, self.active, self.sel)
return {'FINISHED'}
def execute(self, context):
self.active = context.active_object
self.sel = context.selected_objects
if self.flick and self.remove:
self.remove_mirror(self.active)
else:
self.across = len(self.sel) > 1
self.mirror(context, self.active, self.sel)
return {'FINISHED'}
def get_axes(self, mx):
axes = {'POSITIVE_X': mx.to_quaternion() @ Vector((1, 0, 0)),
'NEGATIVE_X': mx.to_quaternion() @ Vector((-1, 0, 0)),
'POSITIVE_Y': mx.to_quaternion() @ Vector((0, 1, 0)),
'NEGATIVE_Y': mx.to_quaternion() @ Vector((0, -1, 0)),
'POSITIVE_Z': mx.to_quaternion() @ Vector((0, 0, 1)),
'NEGATIVE_Z': mx.to_quaternion() @ Vector((0, 0, -1))}
return axes
def get_matching_cursor_empty(self, context):
scene = context.scene
matching_empties = [obj for obj in scene.objects if obj.type == 'EMPTY' and compare_matrix(obj.matrix_world, self.cmx, precision=5)]
if matching_empties:
return matching_empties[0]
def get_mirror_mods(self, objects):
mods = []
for obj in objects:
if obj.type == 'GPENCIL':
mods.extend([mod for mod in obj.grease_pencil_modifiers if mod.type == 'GP_MIRROR'])
else:
mods.extend([mod for mod in obj.modifiers if mod.type == 'MIRROR'])
return mods
def mirror(self, context, active, sel):
if self.cursor:
if self.flick and self.cursor_empty and self.use_existing_cursor:
empty = self.cursor_empty
else:
empty = bpy.data.objects.new(name=f"{active.name} Mirror", object_data=None)
context.collection.objects.link(empty)
empty.matrix_world = context.scene.cursor.matrix
empty.show_in_front = True
empty.empty_display_type = 'ARROWS'
empty.empty_display_size = (context.scene.cursor.location - sel[0].matrix_world.to_translation()).length / 10
empty.hide_set(True)
if len(sel) == 1 and active in sel:
if self.cursor:
self.bisect_x = self.bisect_y = self.bisect_z = False
self.flip_x = self.flip_y = self.flip_z = False
if active.type in ["MESH", "CURVE"]:
self.mirror_mesh_obj(context, active, mirror_object=empty if self.cursor else None)
elif active.type == "GPENCIL":
self.mirror_gpencil_obj(context, active, mirror_object=empty if self.cursor else None)
elif active.type == "EMPTY" and active.instance_collection:
self.mirror_instance_collection(context, active, mirror_object=empty if self.cursor else None)
elif len(sel) > 1 and active in sel:
self.bisect_x = self.bisect_y = self.bisect_z = False
self.flip_x = self.flip_y = self.flip_z = False
if not self.cursor:
sel.remove(active)
for obj in sel:
if obj.type in ["MESH", "CURVE"]:
self.mirror_mesh_obj(context, obj, mirror_object=empty if self.cursor else active)
elif obj.type == "GPENCIL":
self.mirror_gpencil_obj(context, obj, mirror_object=empty if self.cursor else active)
elif obj.type == "EMPTY" and obj.instance_collection:
self.mirror_instance_collection(context, obj, mirror_object=empty if self.cursor else active)
def mirror_mesh_obj(self, context, obj, mirror_object=None):
mirror = obj.modifiers.new(name="Mirror", type="MIRROR")
mirror.use_axis = (self.use_x, self.use_y, self.use_z)
mirror.use_bisect_axis = (self.bisect_x, self.bisect_y, self.bisect_z)
mirror.use_bisect_flip_axis = (self.flip_x, self.flip_y, self.flip_z)
mirror.show_expanded = False
if mirror_object:
mirror.mirror_object = mirror_object
if self.decalmachine:
if obj.DM.isdecal:
mirror.use_mirror_u = self.DM_mirror_u
mirror.use_mirror_v = self.DM_mirror_v
nrmtransfer = obj.modifiers.get("NormalTransfer")
if nrmtransfer:
bpy.ops.object.modifier_move_to_index({'object': obj}, modifier=nrmtransfer.name, index=len(obj.modifiers) - 1)
def mirror_gpencil_obj(self, context, obj, mirror_object=None):
mirror = obj.grease_pencil_modifiers.new(name="Mirror", type="GP_MIRROR")
mirror.use_axis_x = self.use_x
mirror.use_axis_y = self.use_y
mirror.use_axis_z = self.use_z
mirror.show_expanded = False
if mirror_object:
mirror.object = mirror_object
def mirror_instance_collection(self, context, obj, mirror_object=None):
mirror_empty = bpy.data.objects.new("mirror_empty", object_data=None)
col = obj.instance_collection
if mirror_object:
mirror_empty.matrix_world = mirror_object.matrix_world
mirror_empty.matrix_world = obj.matrix_world.inverted_safe() @ mirror_empty.matrix_world
col.objects.link(mirror_empty)
meshes = [obj for obj in col.objects if obj.type == "MESH"]
for obj in meshes:
self.mirror_mesh_obj(context, obj, mirror_empty)
def set_mirror_props(self):
self.use_x = self.use_y = self.use_z = False
self.bisect_x = self.bisect_y = self.bisect_z = False
self.flip_x = self.flip_y = self.flip_z = False
direction, axis = self.flick_direction.split('_')
setattr(self, f'use_{axis.lower()}', True)
if len(self.sel) == 1:
setattr(self, f'bisect_{axis.lower()}', True)
if direction == 'POSITIVE':
setattr(self, f'flip_{axis.lower()}', True)
self.axis = axis
def remove_mirror(self, obj):
axis = self.flick_direction.split('_')[1]
if self.misaligned and self.use_misalign:
if obj.type == 'GPENCIL':
mods = [mod for mod in self.misaligned['object_mods'][self.mirror_obj] if getattr(mod, f'use_axis_{axis.lower()}')]
else:
mods = [mod for mod in self.misaligned['object_mods'][self.mirror_obj] if mod.use_axis[axis_index_mapping[axis]]]
else:
if obj.type == 'GPENCIL':
mods = [mod for mod in self.aligned if getattr(mod, f'use_axis_{axis.lower()}')]
else:
mods = [mod for mod in self.aligned if mod.use_axis[axis_index_mapping[axis]]]
if mods:
mod = mods[-1]
mod_object = mod.object if mod.type == 'GP_MIRROR' else mod.mirror_object
if mod_object:
if mod_object.type == 'EMPTY':
self.removeacross = False
self.removecursor = True
else:
self.removeacross = True
self.removecursor = False
else:
self.removeacross = False
self.removecursor = False
remove_mod(mod.name, objtype=obj.type)
return True
def get_misaligned_mods(self, context, active, mx, debug=False):
object_mirror_mods = [mod for mod in self.mirror_mods if get_mod_obj(mod)]
aligned = [mod for mod in self.mirror_mods if mod not in object_mirror_mods]
if debug:
print()
print("object mirrors:", object_mirror_mods)
print("non-object mirrors:", aligned)
misaligned = {'sorted_mods': [],
'sorted_objects': [],
'object_mods': {},
'matrices': {},
'isallmisaligned': False}
for mod in object_mirror_mods:
mirror_obj = get_mod_obj(mod)
mo_mx = mirror_obj.matrix_world
if not compare_matrix(mx.to_3x3(), mo_mx.to_3x3(), precision=5):
misaligned['sorted_mods'].append(mod)
if mirror_obj not in misaligned['sorted_objects']:
misaligned['sorted_objects'].append(mirror_obj)
if mirror_obj in misaligned['object_mods']:
misaligned['object_mods'][mirror_obj].append(mod)
else:
misaligned['object_mods'][mirror_obj] = [mod]
misaligned['matrices'][mirror_obj] = mirror_obj.matrix_world
else:
aligned.append(mod)
if len(self.mirror_mods) == len(misaligned['sorted_mods']):
misaligned['isallmisaligned'] = True
if debug:
printd(misaligned)
if misaligned['sorted_mods']:
return aligned, misaligned
else:
return aligned, False
class Unmirror(bpy.types.Operator):
bl_idname = "machin3.unmirror"
bl_label = "MACHIN3: Unmirror"
bl_description = "Removes the last modifer in the stack of the selected objects"
bl_options = {'REGISTER', 'UNDO'}
def draw(self, context):
layout = self.layout
column = layout.column()
@classmethod
def poll(cls, context):
mirror_meshes = [obj for obj in context.selected_objects if obj.type == "MESH" and any(mod.type == "MIRROR" for mod in obj.modifiers)]
if mirror_meshes:
return True
mirror_gpencils = [obj for obj in context.selected_objects if obj.type == "GPENCIL" and any(mod.type == "GP_MIRROR" for mod in obj.grease_pencil_modifiers)]
if mirror_gpencils:
return True
def execute(self, context):
targets = set()
for obj in context.selected_objects:
if obj.type in ["MESH", "CURVE"]:
target = self.unmirror_mesh_obj(obj)
if target and target.type == "EMPTY" and not target.children:
targets.add(target)
elif obj.type == "GPENCIL":
self.unmirror_gpencil_obj(obj)
elif obj.type == "EMPTY" and obj.instance_collection:
col = obj.instance_collection
instance_col_targets = set()
for obj in col.objects:
target = self.unmirror_mesh_obj(obj)
if target and target.type == "EMPTY":
instance_col_targets.add(target)
if len(instance_col_targets) == 1:
bpy.data.objects.remove(list(targets)[0], do_unlink=True)
if targets:
targets_in_use = {mod.mirror_object for obj in bpy.data.objects for mod in obj.modifiers if mod.type =='MIRROR' and mod.mirror_object and mod.mirror_object.type == 'EMPTY'}
for target in targets:
if target not in targets_in_use:
bpy.data.objects.remove(target, do_unlink=True)
return {'FINISHED'}
def unmirror_mesh_obj(self, obj):
mirrors = [mod for mod in obj.modifiers if mod.type == "MIRROR"]
if mirrors:
target = mirrors[-1].mirror_object
obj.modifiers.remove(mirrors[-1])
return target
def unmirror_gpencil_obj(self, obj):
mirrors = [mod for mod in obj.grease_pencil_modifiers if mod.type == "GP_MIRROR"]
if mirrors:
obj.grease_pencil_modifiers.remove(mirrors[-1])
| AtixCG/Universal-3D-Shortcuts | Blender/With Addons/scripts/addons/MACHIN3tools/operators/mirror.py | mirror.py | py | 28,604 | python | en | code | 38 | github-code | 1 | [
{
"api_name": "bpy.types",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "bpy.props.BoolProperty",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "bpy.props.BoolProperty",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "bpy.prop... |
39101218782 | # type: ignore
import json
fin = open("secrets.json")
raw_data = fin.read()
#print(raw_data)
environ_data = json.loads(raw_data)
def load(os, db):
for i in environ_data:
os.environ[i] = environ_data[i]
| py660/PyChat-Self-Deploy | shh.py | shh.py | py | 216 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.loads",
"line_number": 7,
"usage_type": "call"
}
] |
25497213504 | import abc
import logging
import random
import numpy as np
import pandas as pd
EVALUATION_CRITERIA = 'Accuracy'
def _new_func(optimization, t, theta=1.0, record=None, gamma=1):
third_term = np.sqrt(2 * np.log(t) / optimization.count)
forth_term = np.sqrt(1 / theta * third_term)
second_term = np.sqrt(1 / theta * optimization.square_mean)
result = gamma * (optimization.mu + second_term) + third_term + forth_term
if record is not None:
assert isinstance(record, list)
record.append((optimization.name, optimization.mu, optimization.square_mean, second_term,
third_term, forth_term, third_term + forth_term, result))
return result
def _ucb_func(optimization, t, record=None):
second_term = np.sqrt(2 * np.log(t) / optimization.count)
result = optimization.mu + second_term
if record is not None:
assert isinstance(record, list)
record.append((optimization.name, optimization.mu, second_term, result))
return result
class ModelSelection:
def __init__(self, optimizations, logging_level=logging.DEBUG):
self.optimizations = optimizations
self._logger = self._init_logger(logging_level)
def show_models(self):
models_info = ''
for optimization in self.optimizations:
models_info += str(optimization)
models_info += '\n\n'
return models_info
@abc.abstractmethod
def fit(self, train_x, train_y):
return
def statistics(self):
data = [(o.name, o.mu, o.sigma, o.count, o.best_evaluation[EVALUATION_CRITERIA])
for o in self.optimizations]
return pd.DataFrame(data=data, columns=['name', 'mu', 'sigma', 'budget', 'best v'])
def _best_selection(self):
best_results = [r.best_evaluation[EVALUATION_CRITERIA] for r in self.optimizations]
best_index = np.argmax(best_results)
return self.optimizations[best_index]
@staticmethod
def _init_logger(level):
logger = logging.getLogger('model_selection')
logger.setLevel(level)
if logger.hasHandlers():
return logger
console_handler = logging.StreamHandler()
console_handler.setLevel(level)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
return logger
class ERUCB(ModelSelection):
def __init__(self, optimizations, theta=0.01):
super().__init__(optimizations)
self.theta = theta
self.param_change_info = []
self.columns = ['name', 'count', 'beta1', 'beta0', 'e_beta0', 'e_beta1', 'e_variance',
'sigma', 'func_m', 'func_n', '1/t^alpha', 'e_beta0_inside_ppf',
'e_beta1_inside_ppf', 'e_beta0_item3', 'muY_kt', 'delta_t', 'sqrt(1/theta*variance)',
'last_item', 'selection_value']
def fit(self, train_x, train_y, budget=200):
# Initializing models
self._logger.info("Initialization")
consumption = self._init_models(train_x, train_y) - 1
self._logger.info("Initialization Done")
# do model selection
while consumption < budget:
self._logger.info("Process: {}/{}".format(consumption + 1, budget))
selection_values = [o.selection_value(consumption + 1, self.theta) for o in self.optimizations]
if np.isnan(selection_values).any():
for (value, o) in zip(selection_values, self.optimizations):
if np.isnan(value):
while consumption < budget and np.isnan(value):
self._logger.info('Selection value of {} is nan, rerunning: {}/{}'
.format(o.name, consumption + 1, budget))
self._update_param_info(consumption + 1, prefix='rerun {} '.format(o.name))
o.run_one_step(train_x, train_y)
consumption += 1
value = o.selection_value(consumption + 1, self.theta)
if consumption >= budget:
return self._best_selection()
selection_values = [o.selection_value(consumption + 1, self.theta) for o in self.optimizations]
assert not np.isnan(selection_values).any()
next_model = self.optimizations[np.argmax(selection_values)]
self._update_param_info(consumption + 1, prefix='Select {} '.format(next_model.name))
next_model.run_one_step(train_x, train_y)
consumption += 1
return self._best_selection()
def _update_param_info(self, t, prefix=''):
param_info = [o.param_info(t, self.theta) for o in self.optimizations]
self.param_change_info.append(('{}t={}'.format(prefix, t),
pd.DataFrame(data=param_info, columns=self.columns)))
def statistics(self):
data = [(o.name, o.beta0, o.beta1, o.variance, o.count, o.best_evaluation[EVALUATION_CRITERIA]) for o in
self.optimizations]
return pd.DataFrame(data=data, columns=['name', 'beta0', 'beta1', 'variance', 'budget', 'best v'])
def _init_models(self, train_x, train_y, init_times=3):
count = 1
total_count = len(self.optimizations) * init_times
for o in self.optimizations:
self._logger.info('Initializing {}'.format(o.name))
for _ in range(init_times):
self._logger.info('Init {}/{}'.format(count, total_count))
count += 1
o.run_one_step(train_x, train_y)
return count
class BanditModelSelection(ModelSelection):
_update_functions = ['new', 'ucb', 'random']
def __init__(self, optimizations, update_func='new', theta=0.01, gamma=20, beta=0):
super().__init__(optimizations)
self.param_change_info = []
self.theta = theta
self.gamma = gamma
self.update_func = update_func
self.beta = beta
def fit(self, train_x, train_y, budget=200):
"""Fit on training data and select the best model
Parameters
----------
train_x: np.ndarray or list
the features
train_y: np.ndarray or list
the label
budget: int
the number of samples
Returns
-------
result: bandit.model_optimization.RandomOptimization
best model
"""
self._clean() # clean history data
self._logger.debug('Initializing')
self._init_each_optimizations(train_x, train_y, beta=self.beta)
for t in range(len(self.optimizations) + 1, budget + 1):
self._logger.debug('Process: {} / {}'.format(t, budget))
next_model = self._next_selection(t)
if self.update_func == 'new':
next_model.run_one_step(train_x, train_y, beta=self.beta)
else:
next_model.run_one_step(train_x, train_y)
return self._best_selection()
def statistics(self):
if self.update_func == 'new':
data = [(o.name, o.mu, o.sigma, o.square_mean, o.count, o.best_evaluation[EVALUATION_CRITERIA])
for o in self.optimizations]
return pd.DataFrame(data=data, columns=['name', 'mu(-beta)', 'sigma', 'mu_Y', 'budget', 'best v'])
else:
# random or ucb method
return super().statistics()
def _wrap_selection_information(self, data):
if self.update_func == 'new':
return pd.DataFrame(data=data, columns=['name', 'mu', 'square_mean', 'sqrt(mu_Y)', 'third term',
'forth term', 'sum of last two', 'sum all'])
elif self.update_func == 'ucb':
return pd.DataFrame(data=data, columns=['name', 'mu', 'second_term', 'sum all'])
def _init_each_optimizations(self, train_x, train_y, beta):
for optimization in self.optimizations:
optimization.clear() # clear history data
if self.update_func == 'new':
optimization.run_one_step(train_x, train_y, beta=beta)
else:
optimization.run_one_step(train_x, train_y)
def _next_selection(self, current_count):
selection_record = [] # used to record values of the terms of the equation for each models
if self.update_func == 'new':
values = [_new_func(o, current_count, theta=self.theta, record=selection_record, gamma=self.gamma)
for o in self.optimizations]
elif self.update_func == 'ucb':
values = [_ucb_func(o, current_count, selection_record) for o in self.optimizations]
else:
# return random result
return random.choice(self.optimizations)
self.param_change_info.append(self._wrap_selection_information(selection_record))
return self.optimizations[np.argmax(values)]
def _clean(self):
self.param_change_info = []
class EpsilonGreedySelection(ModelSelection):
def __init__(self, optimizations):
super().__init__(optimizations)
def fit(self, train_x, train_y, epsilon=0.1, budget=200):
for i in range(1, budget + 1):
self._logger.debug('Process: {} / {}'.format(i, budget))
point = random.uniform(0, 1)
if point < epsilon:
# do exploration
selection = random.choice(self.optimizations)
else:
# do exploitation
values = np.array([o.mu for o in self.optimizations])
max_items = np.argwhere(values == values.max())
max_item = random.choice(max_items.reshape(max_items.shape[0]))
selection = self.optimizations[max_item]
selection.run_one_step(train_x, train_y)
return self._best_selection()
class SoftMaxSelection(ModelSelection):
def fit(self, train_x, train_y, temperature=0.1, budget=200):
for i in range(budget):
self._logger.debug('Process: {} / {}'.format(i + 1, budget))
model = self._next_selection(temperature)
model.run_one_step(train_x, train_y)
return self._best_selection()
def _next_selection(self, temperature):
# construct select range
select_range = [0]
for o in self.optimizations:
select_range.append(select_range[-1] + np.power(np.e, o.mu / temperature))
# choose a model according the select range
point = random.uniform(0, select_range[-1])
for i in range(1, len(select_range)):
if point < select_range[i]:
return self.optimizations[i - 1]
assert False
class SingleArm(ModelSelection):
def fit(self, train_x, train_y, budget=200):
model = self.optimizations[0]
for i in range(budget):
self._logger.debug(f'Process: {i + 1}/{budget}')
model.run_one_step(train_x, train_y)
return model.instances
| pineconebean/automl_lab | bandit/model_selection.py | model_selection.py | py | 11,212 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.sqrt",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 14,
... |
70561841635 |
import tensorflow.keras.backend as K
import matplotlib.pyplot as plt
from tensorflow.keras.callbacks import Callback
class LRFinder(Callback):
#adjuted callback from Lucas Anders at: https://github.com/LucasAnders1/LearningRateFinder/blob/master/lr_finder_callback.py
#adjusted to geometrically increase by step size instead of linearly increase
#adjusted to run on tensorflow.keras instead of plain ole' keras
'''
This callback implements a learning rate finder(LRF)
The learning rate is constantly increased during training.
On training end, the training loss is plotted against the learning rate.
One may choose a learning rate for a model based on the given graph,
selecting a value slightly before the minimal training loss.
The idea was introduced by Leslie N. Smith in this paper: https://arxiv.org/abs/1506.01186
# Example
lrf = LRFinder(max_iterations=5000, base_lr = 0.0001, max_lr = 0.1)
model.fit(x_train, y_train, epochs=1, batch_size=128, callbacks=[LRF])
# Arguments
max_iterations: training stops when max_iterations are reached
base_lr: initial learning rate used in training
max_lr: training stops when learning rate exceeds max_lr
lr_step_size: for each batch, the learning rate is increased by
lr_step_size
'''
def __init__(self, max_iterations=5000, base_lr=0.0001, max_lr=0.1, lr_step_size=1.05):
self.max_iterations = max_iterations
self.base_lr = base_lr
self.max_lr = max_lr
self.lr_step_size = lr_step_size
self.losses = []
self.lrs = []
self.lr = base_lr
def on_train_batch_end(self, batch, logs={}):
iterations = logs.get('batch')
if (iterations >= self.max_iterations or self.lr >= self.max_lr):
self.model.stop_training = True
self.lr = self.lr * self.lr_step_size
K.set_value(self.model.optimizer.lr, self.lr)
self.losses.append(logs.get('loss'))
self.lrs.append(self.lr)
def on_train_end(self, logs=None):
plt.plot(self.lrs, self.losses)
plt.show()
class LR_pattern_smith(Callback):
'''
The learning rate is linearly increased from base_lr to max_lr, then linear decreased back to base_lr, and then
held constant at a low learning rate (min_lr) for the final epochs (Around 20-35% of epochs)
The idea was introduced by Leslie N. Smith in this paper: https://arxiv.org/abs/1506.01186
# Example
lra = LR_adjuster(15, min_lr = 0.002, max_lr = 0.1, base_lr = 0.04)
model.fit(x_train, y_train, epochs=1, batch_size=128, callbacks=[lra])
# Arguments
epochs: the amount of epochs used to train the neural network
base_lr: initial learning rate used in training
max_lr: the highest learning rate to be used in training, the learning rate will decrease after reaching this rate
this learning rate should be set using methods discussed in Smith's paper https://arxiv.org/pdf/1803.09820.pdf
min_lr: the learning rate to be used for the last 20-30% of epochs
'''
def __init__(self, epochs, min_lr = 0.0015, base_lr=0.01, max_lr=0.1):
self.base_lr = base_lr
self.max_lr = max_lr
self.min_lr = 0.0015
self.epochs_max_point = (epochs - 5) / 2
self.lr_step_size = (max_lr - base_lr) / self.epochs_max_point
self.lrs = []
self.lr = base_lr
self.epochs = epochs
def on_epoch_end(self, epoch, logs={}):
if (epoch < self.epochs_max_point):
self.lr = self.lr + self.lr_step_size
elif (epoch >= self.epochs_max_point and epoch < self.epochs_max_point * 2):
self.lr = self.lr - self.lr_step_size
else:
self.lr = self.min_lr
K.set_value(self.model.optimizer.lr, self.lr)
self.lrs.append(self.lr)
def on_train_end(self, logs=None):
plt.plot( np.arange(self.epochs), self.lrs)
plt.show
print(self.lrs)
class LR_adjuster(Callback):
'''
The learning rate is linearly increased from base_lr to max_lr, then linear decreased back to base_lr, and then
held constant at a low learning rate (min_lr) for the final epochs (Around 20-35% of epochs)
The idea was introduced by Leslie N. Smith in this paper: https://arxiv.org/abs/1506.01186
# Example
lra = LR_adjuster(15, min_lr = 0.002, max_lr = 0.1, base_lr = 0.04)
model.fit(x_train, y_train, epochs=1, batch_size=128, callbacks=[lra])
# Arguments
epoch_switch: the epoch on which the base_lr is switched to the final_lr
base_lr: initial learning rate used in training
final_lr: the second learning rate to be used
'''
def __init__(self, epoch_switch, base_lr=0.002, final_lr=0.0002):
self.final_lr = final_lr
self.lrs = []
self.lr = base_lr
self.epoch_switch = epoch_switch
def on_epoch_end(self, epoch, logs={}):
if (epoch == self.epoch_switch):
self.lr = self.final_lr
K.set_value(self.model.optimizer.lr, self.lr)
self.lrs.append(self.lr)
def on_train_end(self, logs=None):
plt.plot( np.arange(self.epochs), self.lrs)
plt.show
print(self.lrs)
| valentinocc/Keras_cifar10 | custom_callbacks.py | custom_callbacks.py | py | 5,315 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tensorflow.keras.callbacks.Callback",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras.backend.set_value",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.backend",
"line_number": 42,
"usage_type": "name"
... |
36783915591 | import numpy as np
import matplotlib.pyplot as plt
import higra as hg
import torch
from tqdm import tqdm
#=========================================
#= Helper Functions =
#=========================================
def get_centroids(X, high_dim_clusters,K,device="cpu",dim=2):
index_sets = [np.argwhere(high_dim_clusters==i) for i in np.arange(K)]
centers = torch.zeros((K,dim),device=device)
for i,indices in enumerate(index_sets):
center = torch.mean(X[indices,:],axis=0)
centers[i] = center
return centers
def get_closest_2_centroids(X,centroids,device="cpu"):
closest =torch.zeros((X.shape[0],2),dtype=torch.long,device=device)
for i,point in enumerate(X) :
dist = torch.sum((point - centroids)**2,axis=1)
indices = torch.topk(-dist,2)[1]
if indices[1] < indices[0]:
indices = torch.flip(indices,[0])
closests = centroids[indices]
closest[i] = indices
return closest
def cdist_custom(X,Y,eps=1e-6,device="cpu"):
epsilon = torch.FloatTensor([eps]).to(device)
distances = X.unsqueeze(1) - Y
distances = distances**2
distances = torch.sum(distances,axis=2)
distances = torch.max(epsilon,distances) # eps + distances ?
distances = torch.sqrt(distances)
return distances
def get_ghost_centroids_low_dim(centroids, end_vertices, neighbours,ratio=0.1):
return centroids[end_vertices] + (centroids[end_vertices] - centroids[neighbours])*ratio
#=========================================
#= Loss Functions =
#=========================================
def cosine_loss(triplets, centroids):
x1 = centroids[triplets[:,1]] - centroids[triplets[:,0]]
x2 = centroids[triplets[:,2]] - centroids[triplets[:,1]]
cos_sim = torch.nn.CosineSimilarity(dim = 1, eps= 1e-08)
loss = cos_sim(x1,x2)
return 2-(loss+1) # Rescale between 0 and 2 and to get cosine distance
def push_pull_crispness(X,centers,closests_2,x_labels,geo_dist,eps=1e-6,device="cpu"):
#Fix for sqrt issues in pytorch cdist
distances = cdist_custom(centers,X,eps=eps,device=device)
distances_centroids = cdist_custom(centers,centers,eps=eps,device=device)
# Geodesic distance to the two closest centroids
geodesic_distances = geo_dist[closests_2.T[0],closests_2.T[1]]
idx = torch.arange(X.shape[0])
# Normalize distances
norm = distances_centroids[x_labels.T[0],x_labels.T[1]]
distances_closests = ((distances[closests_2[:,0],idx]+distances[closests_2[:,1],idx])/norm)**2
distances_truth = ((distances[x_labels[:,0],idx]+distances[x_labels[:,1],idx])/norm)**2
# > 0 means that we are wrong and =0 that we have the right clustering
# We break the relationship if factor > 1
return geodesic_distances*(-distances_closests + distances_truth), distances_truth
def finetune_loss(X, groundtruth,device="cpu",real_cosine_dist=False,ghost_centroids_distance_ratio=0.5):
centroids = get_centroids(X,groundtruth.high_dim_assignments,groundtruth.K,device=groundtruth.device)
ghost_centroids = get_ghost_centroids_low_dim(centroids, groundtruth.end_vertices, groundtruth.neighbours,ratio=ghost_centroids_distance_ratio)
centroids = torch.cat([centroids,ghost_centroids], dim=0)
new_closests_centroids = get_closest_2_centroids(X,centroids,device=groundtruth.device)
loss_push_pull, loss_crispness = push_pull_crispness(X,centroids,new_closests_centroids,groundtruth.closests_centroids,groundtruth.geo_dist,device=groundtruth.device)
loss_cos = cosine_loss(groundtruth.triplets, centroids)
if real_cosine_dist == True:
mse = torch.nn.MSELoss(reduction="none")
loss_cos = mse(loss_cos,groundtruth.cos_dist)
return torch.mean(loss_push_pull),torch.mean(loss_crispness), torch.mean(loss_cos)
| hci-unihd/DTAE | loss.py | loss.py | py | 3,853 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "numpy.argwhere",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number... |
25608601191 | import json
with open( "Task2.json","r+")as f:
data=json.load(f)
def group_of_decade(movies):
dic={}
list1=[]
for i in movies:
m=int(i)%10
decade=int(i)-m
if decade not in list1:
list1.append(decade)
list1.sort()
for i in list1:
dic[i]=[]
for i in dic:
dec10=i+9
for x in movies:
if int(x)<=dec10 and int(x)>=i:
for v in movies[x]:
dic[i].append(v)
with open("Task3.json","w+") as file:
json.dump(dic,file,indent = 4)
return(dic)
group_of_decade(data)
| Subhkirti/PYTHON | WEB SCRAPPING/TASK3.py | TASK3.py | py | 666 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 22,
"usage_type": "call"
}
] |
21532212683 | # -*- coding: utf-8 -*-
import scrapy
from protectoras_scrap.models.Pet import Pet
class ProtectoraLugoSpider(scrapy.Spider):
name = 'protectora_lugo_spider'
allowed_domains = ['www.protectoralugo.org']
base_url = 'http://www.protectoralugo.org/'
start_urls = ['http://www.protectoralugo.org/templates/jt001_j25/html/com_wrapper/wrapper/adopciones.php?username=&email=&nombreusuario=&password=&pruebas=']
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(url, self.parse_pet_urls)
def get_next_page(self, response):
return response.css('#pagina_actual + a::attr(href)').extract_first()
def parse_pet_urls(self, response):
next_page = self.get_next_page(response)
pet_urls = response.css('.box_listado a::attr(href)').extract()
for url in pet_urls:
yield scrapy.Request(url, callback=self.parse_pet_info)
if (next_page):
yield scrapy.Request(self.base_url + next_page, callback=self.parse_pet_urls)
def parse_pet_info(self, response):
data = response.css('div[style="width: 300px"] b::text').extract()
pet = Pet()
pet['name'] = data[1]
pet['breed'] = data[2]
pet['sex'] = data[3]
pet['born_date'] = data[4]
pet['dangerous'] = data[5]
pet['aptitude'] = data[7]
pet['adult_weight'] = data[8]
yield pet | SaulEiros/protectoras-scraper | protectoras_scrap/spiders/protectora_lugo_spider.py | protectora_lugo_spider.py | py | 1,447 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "scrapy.Spider",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Request",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "scrapy.Request",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "scrapy.Request",
... |
264166147 | """django_obj URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include,re_path
from django.views.static import serve
urlpatterns = [
path('cms/',include('cms.urls')),
path('user/',include('user.urls')),
# path('admin/', admin.site.urls),
#上传图片显示的url,settings有配置template,media
#之前写在user的url里面,和现在路径一致但是不显示图片,估计是因为r'^media/(?P<path>.*)$'是包含在user后的
#media其实和user和cms是在同一个项目文件夹下,属于同级了,下次写文件存放路径时务必注意
re_path(r'^media/(?P<path>.*)$', serve, {"document_root": 'C:/Users/admin/PycharmProjects/django_obj/media/'}),
path('school/',include('school.urls')),
path('major/',include('major.urls')),
path('order/',include('orders.urls')),
path('posts/',include('posts.urls')),
path('server/',include('server.urls')),
]
| zhouf1234/django_obj | django_obj/urls.py | urls.py | py | 1,554 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.urls.inc... |
73426657635 | from time import sleep
import requests
def parsing_data(token_key, repos_list):
url = "https://api.github.com/repos/{}/{}"
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization": "token {}".format(token_key), # 此处的XXX代表上面的token
"X-OAuth-Scopes": "repo"
}
urls = []
for line in repos_list:
if line == '' or line[0] == '#':
continue
try:
name, repo = line.strip().split("/")
except IOError:
print("ERROR: cannot delete repo name:", line)
continue
urls.append(url.format(name, repo))
for i in range(len(urls)):
print("deleting repository: ", urls[i])
ret = requests.delete(url=urls[i], headers=headers)
sleep(2)
print(ret)
return urls
if __name__ == '__main__':
# 具有删除权限的 token
delete_repo_token = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
# 文档路径,填写需要删除的库
file_path = './repos.txt'
with open(file_path, 'r', encoding='utf-8') as f:
data = f.readlines()
parsing_data(delete_repo_token, data)
print("finished.")
| freedanfan/delete_gitlab_repositories | delete_gitlab_repositories.py | delete_gitlab_repositories.py | py | 1,191 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "requests.delete",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 25,
"usage_type": "call"
}
] |
41440456232 | from collections import deque
from time import sleep
def append_one(num):
return int(str(num) + '1')
def solution(num, target):
queue = deque()
queue.append([num, 0])
while(queue):
cur_num, cur_cnt = queue.popleft()
if cur_num == target:
return cur_cnt + 1
# 두배한 값 삽입
if cur_num * 2 <= target:
queue.append([cur_num * 2, cur_cnt+1])
# 마지막에 1을 붙인 삽입
if append_one(cur_num) <= target:
queue.append([append_one(cur_num), cur_cnt+1])
else:
return -1
if __name__ == "__main__":
num, target = list(map(int, input().split()))
ret = solution(num, target)
print(ret) | aszxvcb/TIL | BOJ/boj16953.py | boj16953.py | py | 738 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 8,
"usage_type": "call"
}
] |
31326029236 | # -*- coding: utf-8 -*-
### Import libraries ###
import numpy as np
import pandas as pd
from pandas import Grouper
import matplotlib.pyplot as plt
import seaborn as sns
color = sns.color_palette()
sns.set_style(style="darkgrid")
from data_utils import most_reviewed_products
from pathlib import Path
from matplotlib import rcParams
import json
# Default text styling for figures
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Inter']
rcParams['font.weight'] = 500
rcParams['xtick.labelsize'] = 13
rcParams['ytick.labelsize'] = 13
figOutputPath = Path("../figures/")
### Functions ###
def label_typography(ax):
ax.xaxis.label.set_fontweight(500)
ax.yaxis.label.set_fontsize(15)
ax.yaxis.label.set_fontweight(500)
ax.xaxis.label.set_fontsize(15)
return
def most_active_reviewers(df, n_reviewers):
n_reviews = df['reviewerID'].value_counts()
most_reviews = n_reviews.nlargest(n_reviewers)
most_reviews = most_reviews.reset_index()
most_reviews = most_reviews.drop('reviewerID', axis=1)
definitive = df.merge(most_reviews, left_on='reviewerID', right_on='index')
definitive = definitive.drop('index', axis=1)
return definitive
def analyze_reviews(df, df_attribute, name_file, xlabel):
fig, ax = plt.subplots(figsize=(10, 10))
sns.countplot(df_attribute, ax=ax)
label_typography(ax)
# Set and style the title, and move it up a bit (1.02 = 2%)
#ax.set_title(title, fontname='Inter', fontsize=20, fontweight=500, y=1.02)
ax.xaxis.label.set_text(xlabel)
ax.yaxis.label.set_text("Review count")
if (name_file=="review_distribution_per_day"):
ax.set_xticklabels(["Sunday", "Monday", "Thuesday", "Wednesday", "Thursday", "Friday", "Saturday"])
ax.xaxis.label.set_fontsize(13)
ax.set_yticks([0, 100000, 200000])
ax.set_yticklabels(["0", "100K", "200K"])
elif (name_file=="review_distribution_per_month"):
ax.set_xticklabels(["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"])
ax.xaxis.label.set_fontsize(13)
ax.set_yticks([0, 100000, 200000])
ax.set_yticklabels(["0", "100K", "200K"])
elif (name_file=="review_distribution_per_year"):
ax.set_xticklabels([2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018])
ax.xaxis.label.set_fontsize(13)
ax.set_yticks([0, 100000, 200000])
ax.set_yticklabels(["0", "100K", "200K"])
elif (name_file=="unverified_overall_distribution"):
ax.set_yticks([0, 50000, 100000])
ax.set_yticklabels(["0", "50K", "100K"])
elif (name_file=="verified_overall_distribution"):
ax.set_yticks([0, 300000, 600000])
ax.set_yticklabels(["0", "300K", "600K"])
else:
ax.set_yticks([0, 100000, 500000, 1000000])
ax.set_yticklabels(["0", "100K", "500K", "1M"])
ax.figure.savefig(figOutputPath / '1_{0}.svg'.format(name_file), format='svg')
print('Exported 1_{}.svg'.format(name_file))
def run(df):
# 1 - Countplot: overall distribution
analyze_reviews(df, df.overall, 'overall_distribution', 'Overall')
# 2 - Countplot: opinion distribution
analyze_reviews(df, df.opinion, 'opinion_distribution', 'Opinion')
# 3 - Distribution of words
reduced_df = df.copy()
reduced_df = reduced_df[reduced_df['n_words'] <= 1000]
fig, ax5 = plt.subplots()
ax5 = sns.violinplot(x=reduced_df['opinion'], y=reduced_df['n_words'])
#ax5.set_title('Distribution of words in review for each opinion')
ax5.xaxis.label.set_text("Opinion")
ax5.yaxis.label.set_text("Number of words")
label_typography(ax5)
ax5.figure.savefig(figOutputPath / '1_correlation_words_opinion.svg', format='svg')
# 4 - Review distribution per day
analyze_reviews(df, df.week_day, 'review_distribution_per_day', 'Day')
# 5 - Top 20 products
fig, ax3 = plt.subplots(figsize=(15, 15))
top_products = most_reviewed_products(df, 20)
r = list(top_products['asin'].unique())
positive = list(top_products.loc[top_products['opinion'] == 'positive', 'asin'].value_counts().reindex(top_products['asin'].unique(), fill_value=0))
neutral = list(top_products.loc[top_products['opinion'] == 'neutral', 'asin'].value_counts().reindex(top_products['asin'].unique(), fill_value=0))
negative = list(top_products.loc[top_products['opinion'] == 'negative', 'asin'].value_counts().reindex(top_products['asin'].unique(), fill_value=0))
raw_data = {'positive': positive, 'neutral': neutral, 'negative': negative}
raw_data = pd.DataFrame(raw_data)
totals = [i+j+k for i,j,k in zip(raw_data['positive'], raw_data['neutral'], raw_data['negative'])]
positive_percentage = [i / j * 100 for i, j in zip(raw_data['positive'], totals)]
neutral_percentage = [i / j * 100 for i, j in zip(raw_data['neutral'], totals)]
negative_percentage = [i / j * 100 for i, j in zip(raw_data['negative'], totals)]
bar_width = 0.85
ax3.bar(r, positive_percentage, color='#b5ffb9', edgecolor='white', width=bar_width, label='positive')
ax3.bar(r, neutral_percentage, bottom=positive_percentage, color='#f9bc86', edgecolor='white', width=bar_width, label='neutral')
ax3.bar(r, negative_percentage, bottom=[i + j for i, j in zip(positive_percentage, neutral_percentage)], color='#a3acff', edgecolor='white', width=bar_width, label='negative')
ax3.set_xticklabels(r, rotation=90)
ax3.set_xlabel('Unique product')
ax3.set_xticks([])
ax3.set_ylabel('Percentage')
ax3.set_xticks([])
label_typography(ax3)
#legend = ax3.legend(loc='lower left', shadow=True, fontsize='large')
#legend.get_frame().set_facecolor('#00FFCC')
#ax3.set_title('Opinion for besteller products')
ax3.figure.savefig(figOutputPath / '1_sentiment_reviews_bestseller_products.svg', format='svg')
print("Exported 1_sentiment_reviews_besteller_products.svg")
# 6 - Top 50 reviewers
fig, ax4 = plt.subplots(figsize=(15, 15))
top_reviewers = most_active_reviewers(df, 50)
sns.countplot(top_reviewers.reviewerID, ax=ax4, order=top_reviewers['reviewerID'].value_counts().index)
r = list(top_reviewers['reviewerID'].unique())
ax4.set_xticklabels(r, rotation=90)
ax4.set_ylabel('Review count')
ax4.set_xlabel('Unique Reviewers')
ax4.set_xticks([])
label_typography(ax4)
#ax4.set_title('Reviewers with most reviews')
ax4.figure.savefig(figOutputPath / '1_reviewers_most_reviews.svg', format='svg')
# 7 - Opinion of top reviewers
fig, ax6 = plt.subplots(figsize=(15, 15))
top_reviewers = most_active_reviewers(df, 50)
r = list(top_reviewers['reviewerID'].unique())
positive = list(top_reviewers.loc[top_reviewers['opinion'] == 'positive', 'reviewerID'].value_counts().reindex(top_reviewers['reviewerID'].unique(), fill_value=0))
neutral = list(top_reviewers.loc[top_reviewers['opinion'] == 'neutral', 'reviewerID'].value_counts().reindex(top_reviewers['reviewerID'].unique(), fill_value=0))
negative = list(top_reviewers.loc[top_reviewers['opinion'] == 'negative', 'reviewerID'].value_counts().reindex(top_reviewers['reviewerID'].unique(), fill_value=0))
raw_data = {'positive': positive, 'neutral': neutral, 'negative': negative}
raw_data = pd.DataFrame(raw_data)
#print("Opinions ",raw_data)
totals = [i+j+k for i,j,k in zip(raw_data['positive'], raw_data['neutral'], raw_data['negative'])]
#totals = list(top_products['asin'].value_counts().reindex(top_products['asin'].unique(), fill_value=0))
positive_percentage = [i / j * 100 for i, j in zip(raw_data['positive'], totals)]
neutral_percentage = [i / j * 100 for i, j in zip(raw_data['neutral'], totals)]
negative_percentage = [i / j * 100 for i, j in zip(raw_data['negative'], totals)]
bar_width = 1
ax6.bar(r, positive_percentage, color='#b5ffb9', edgecolor='white', width=bar_width, label='positive')
ax6.bar(r, neutral_percentage, bottom=positive_percentage, color='#f9bc86', edgecolor='white', width=bar_width, label='neutral')
ax6.bar(r, negative_percentage, bottom=[i + j for i, j in zip(positive_percentage, neutral_percentage)], color='#a3acff', edgecolor='white', width=bar_width, label='negative')
ax6.set_xticklabels(r, rotation=90)
ax6.set_xlabel('Unique Reviewers')
ax3.set_xticks([])
ax6.set_xticks([])
ax6.set_ylabel('Percentage')
label_typography(ax6)
label_typography(ax3)
#legend = ax6.legend(loc='lower left', shadow=True, fontsize='large')
#legend.get_frame().set_facecolor('#00FFCC')
#ax6.set_title('Opinion of top reviewers')
#plt.show()
ax6.figure.savefig(figOutputPath / '1_opinion_top_reviewers.svg', format='svg')
print("Exported 1_opinion_top_reviewers.svg")
# 8 - Unverified reviews
unverified = df[df['verified'] == False]
analyze_reviews(unverified, unverified.overall, 'unverified_overall_distribution', 'Overall')
# 9 - Verified reviews
verified = df[df['verified'] == True]
analyze_reviews(verified, verified.overall, 'verified_overall_distribution', 'Overall')
# 10 - verified vs unverified of top 50 reviewers
fig, ax7 = plt.subplots(figsize=(15, 15))
r = list(top_reviewers['reviewerID'].unique())
verified = list(top_reviewers.loc[top_reviewers['verified'] == True, 'reviewerID'].value_counts().reindex(top_reviewers['reviewerID'].unique(), fill_value=0))
unverified = list(top_reviewers.loc[top_reviewers['verified'] == False, 'reviewerID'].value_counts().reindex(top_reviewers['reviewerID'].unique(), fill_value=0))
raw_data = {'verified': verified, 'unverified': unverified}
raw_data = pd.DataFrame(raw_data)
totals = [i+j for i,j in zip(raw_data['verified'], raw_data['unverified'])]
verified_percentage = [i / j * 100 for i, j in zip(raw_data['verified'], totals)]
unverified_percentage = [i / j * 100 for i, j in zip(raw_data['unverified'], totals)]
bar_width = 1
ax7.bar(r, verified_percentage, color='#b5ffb9', edgecolor='white', width=bar_width, label='verified')
ax7.bar(r, unverified_percentage, bottom=verified_percentage, color='#f9bc86', edgecolor='white', width=bar_width, label='unverified')
ax7.set_xticklabels(r, rotation=90)
ax7.set_xlabel('Unique Reviewers')
ax7.set_xticks([])
ax3.set_xticks([])
ax7.set_ylabel('Percentage')
label_typography(ax3)
label_typography(ax7)
#legend = ax7.legend(loc='upper right', shadow=True, fontsize='large')
#legend.get_frame().set_facecolor('#00FFCC')
#ax7.set_title('Verified vs Unverified reviews of top reviewers')
#plt.show()
ax7.figure.savefig(figOutputPath / '1_verified_unverified.svg', format='svg')
print("Exported 1_verified_unverified.svg")
# Exporting raw data for the web demo
def top_50_products_verified_unverified_both(df):
print("top_50_products_verified_unverified_both")
top_products = most_reviewed_products(df, 5)
r = list(top_products['asin'].unique())
products = []
verified_series = []
unverified_series = []
overall_series = []
for asin in r:
print("Product: ", asin)
products.append(asin)
verified = df.loc[(df['asin'] == asin) & (df['verified'] == True), 'overall'].mean()
print("-verified: ",verified)
verified_series.append(verified)
unverified = df.loc[(df['asin'] == asin) & (df['verified'] == False), 'overall'].mean()
unverified_series.append(unverified)
print("-unverified: ", unverified)
aall = df.loc[(df['asin'] == asin), 'overall'].mean()
overall_series.append(aall)
print("-all: ", aall)
obj = [
{"name": "products",
"data": products},
{"name": "verified",
"data": verified_series},
{"name": "unverified",
"data": unverified_series},
{"name": "all",
"data": overall_series
}]
with open('ver_unver.json', 'w') as outfile:
json.dump(obj, outfile, indent=2, sort_keys=True)
print(products)
def count_reviews(df):
top_products = most_reviewed_products(df, 20)
r = list(top_products['asin'].unique())
products = []
# One element per product
verified_score_qty = []
unverified_score_qty = []
n = 0
for asin in r:
print("Product: ", asin)
products.append(asin)
dataseries_ver = []
dataseries_unver = []
for i in range(1,6):
key = { "name" : int(i), "data": [int(df.loc[(df['asin'] == asin) & (df['verified'] == True) & (df['overall'] == i), 'overall'].count()), int(df.loc[(df['asin'] == asin) & (df['verified'] == False) & (df['overall'] == i), 'overall'].count())]}
dataseries_ver.append(key)
verified_score_qty.append(dataseries_ver)
n = n+1
obj = {'products': products, 'count':verified_score_qty,}
with open('ver_counts.json', 'w') as outfile:
json.dump(obj, outfile, indent=2, sort_keys=True)
def year_month_day_reviews(df):
analyze_reviews(df, df.week_day, 'review_distribution_per_day', 'Day')
analyze_reviews(df, df.month, 'review_distribution_per_month', 'Month')
analyze_reviews(df, df.year, 'review_distribution_per_year', 'Year')
def export_week_day(df):
for i in range(1,6):
print(i, df.loc[df['overall']==i].groupby(['week_day']).size())
def export_month(df):
for i in range(1,6):
print(i, df.loc[df['overall']==i].groupby(['month']).size().values.tolist())
def export_year(df):
for i in range(1,6):
print(i, df.loc[df['overall']==i].groupby(['year']).size().values.tolist())
| avivace/reviews-sentiment | scripts/data_exploration.py | data_exploration.py | py | 14,076 | python | en | code | 25 | github-code | 1 | [
{
"api_name": "seaborn.color_palette",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "seaborn.set_style",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matplotli... |
2396932646 | import pathlib
# directories
DATA_DIR = pathlib.Path(__file__).resolve().parent.parent / "data"
RESOURCE_DIR = pathlib.Path(__file__).resolve().parent.parent / "resources"
MODEL_DIR = RESOURCE_DIR / "checkpoints"
WSD_DIR = DATA_DIR / "wsd_corpora"
TRAIN_DIR = DATA_DIR / "train"
DEV_DIR = DATA_DIR / "dev"
MAPPING_DIR = RESOURCE_DIR / "mapping"
VOCABS_DIR = RESOURCE_DIR / "vocabs"
EMBEDDINGS_DIR = RESOURCE_DIR / "embeddings"
EVALUATION_DIR = RESOURCE_DIR / "evaluation"
PREDICTION_DIR = RESOURCE_DIR / "predicts"
# data
SEMCOR_TRAIN = TRAIN_DIR / "semcor_train.txt"
SEMCOR_POS = TRAIN_DIR / "semcor_pos.txt"
SEMCOR_LABEL = TRAIN_DIR / "semcor_label.txt"
SE07_FEATURE = DEV_DIR / "se07_features.txt"
SE07_LABEL = DEV_DIR / "se07_labels.txt"
# mapping
BN2LEX_MAP = MAPPING_DIR / "babelnet2lexnames.tsv"
BN2DOM_MAP = MAPPING_DIR / "babelnet2wndomains.tsv"
BN2WN_MAP = MAPPING_DIR / "babelnet2wordnet.tsv"
WN2BN_MAP = MAPPING_DIR / "wordnet2babelnet.txt"
LEMMA2WN_MAP = MAPPING_DIR / "lemma2wordnet.txt"
LEMMA2BN_MAP = MAPPING_DIR / "lemma2babelnet.txt"
SEMCOR_MAP = WSD_DIR / "semcor" / "semcor.gold.key.txt"
SE07MAP = WSD_DIR / "semeval2007" / "semeval2007.gold.key.txt"
OMSTI_MAP = WSD_DIR / "semcor_omsti" / "semcor+omsti.gold.key.txt"
TRAIN_VOCAB_BN = VOCABS_DIR / "train_vocab_bn.txt"
LABEL_VOCAB_BN = VOCABS_DIR / "label_vocab_bn.txt"
LABEL_VOCAB_DOM = VOCABS_DIR / "label_vocab_dom.txt"
LABEL_VOCAB_LEX = VOCABS_DIR / "label_vocab_lex.txt"
# embeddings
SENSE_VECTORS = EMBEDDINGS_DIR / "embeddings_senses.vec"
SENSE_VECTORS_BIN = EMBEDDINGS_DIR / "embeddings_senses.bin"
PRE_VECTORS = EMBEDDINGS_DIR / "embeddings.vec"
PRE_VECTORS_BIN = EMBEDDINGS_DIR / "embeddings.bin"
| Riccorl/elmo-wsd | elmo-wsd/constants.py | constants.py | py | 1,683 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 5,
"usage_type": "call"
}
] |
73268716835 | import json
test_list =\
[{"Title":"Harry Potter", "DVD":"T", "Form":"C", "Genre":"Fantasy", "Date":"2003", "Alt Title 1":"", "Alt Title 2":"", "Count":1, \
"Director":"Jon","Writer":"Rowling", "Language":"English", "Date Watched":"2019", "Spec":""}, \
{"Title":"Transformers", "DVD":"F", "Form":"B", "Genre":"Action", "Date":"2005", "Alt Title 1":"Worst Movie", "Alt Title 2":"", "Count":1, \
"Director":"Mike","Writer":"Bay", "Language":"English", "Date Watched":"2010", "Spec":""}]
#Create dict of various columns requested by user
database = []
#for loop to put test list into database
for entry in test_list:
database.append(test_list)
final_database = json.dumps(database)
with open ('final_database.json','w') as outfile:
json.dump(final_database,outfile) | Leeoku/MovieDatabase | main.py | main.py | py | 789 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.dumps",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 17,
"usage_type": "call"
}
] |
74329843233 | import numpy as np
from sympy import Matrix
import string
import random
dim = 2 #n차원 행렬
cipher = string.ascii_uppercase
def main():
mode = input("Select Encrypt or Decrypt:")
if mode == 'Encrypt':
encrypt()
elif mode == 'Decrypt':
decrypt()
def encrypt():
key = np.matrix([[1, 2], [2, 5]])
plaintext = input("Input your plaintext for encryption:")
encryption = ""
for index, i in enumerate(plaintext):
value = []
if index % dim == 0:
for j in range(0, dim):
if (index + j < len(plaintext)):
value.append([cipher.index(plaintext[index + j])])
else:
value.append([random.randint(0, 25)])
vector = np.matrix(value)
vector = key * vector
vector %= 26
for j in range(0, dim):
encryption += cipher[vector.item(j)]
print(encryption)
def decrypt():
ciphertext = input("Input your ciphertext for decryption:")
decryption = ""
key = np.matrix([[1, 2], [2, 5]])
key = Matrix(key)
key = key.inv_mod(26)
key = key.tolist()
for index, i in enumerate(ciphertext):
value = []
if index % dim == 0:
for j in range(0, dim):
value.append([cipher.index(ciphertext[index + j])])
vector = np.matrix(value)
vector = key * vector
vector %= 26
for j in range(0, dim):
decryption += cipher[vector.item(j)]
print(decryption)
if __name__ == '__main__':
main() | jeongyoonlee2015/Ciphers | Theoretical/hillCipher.py | hillCipher.py | py | 1,602 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "string.ascii_uppercase",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "numpy.matrix",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.matrix",... |
30632192338 | import argparse
import json
import os
import platform
import PySide6 as RefMod
import PyInstaller.__main__
from mapclient.core.provenance import reproducibility_info
from mapclient.settings.definitions import APPLICATION_NAME, FROZEN_PROVENANCE_INFO_FILE
# Set Python optimisations on.
os.environ['PYTHONOPTIMIZE'] = '1'
here = os.path.dirname(__file__)
def main(variant):
run_command = [
'../../src/mapclient/application.py',
'-n', f'{APPLICATION_NAME}{variant}',
# '--debug', 'noarchive',
'--windowed',
# '--console',
'--noconfirm',
'--hidden-import', 'scipy',
'--hidden-import', 'scipy.interpolate',
'--hidden-import', 'numpy',
'--hidden-import', 'mapclientplugins',
'--hidden-import', 'cmlibs.zinc',
'--additional-hooks-dir=hooks',
]
info = reproducibility_info()
info_file = FROZEN_PROVENANCE_INFO_FILE
with open(info_file, 'w') as f:
f.write(json.dumps(info, default=lambda o: o.__dict__, sort_keys=True, indent=2))
data = os.pathsep.join([info_file, '.'])
run_command.append(f'--add-data={data}')
images_dir = os.path.join('..', '..', 'src', 'mapclient', 'tools', 'pluginwizard', 'qt', 'images')
names = os.listdir(images_dir)
for name in names:
data = os.pathsep.join([os.path.join(os.path.abspath(images_dir), name), os.path.join('res', 'images')])
run_command.append(f'--add-data={data}')
pyside_dir = os.path.dirname(RefMod.__file__)
if platform.system() == 'Darwin':
rcc_exe = os.path.join(pyside_dir, 'Qt', 'libexec', "rcc")
uic_exe = os.path.join(pyside_dir, 'Qt', 'libexec', "uic")
macos_icon = os.path.join('..', 'macos', 'MAP-Client.icns')
run_command.append(f'--icon={macos_icon}')
elif platform.system() == "Windows":
rcc_exe = os.path.join(pyside_dir, "rcc.exe")
uic_exe = os.path.join(pyside_dir, "uic.exe")
win_icon = os.path.join('..', 'win', 'MAP-Client.ico')
run_command.append(f'--icon={win_icon}')
else:
raise NotImplementedError("Platform is not supported for creating a MAP Client application.")
run_command.append(os.pathsep.join([f'--add-binary={rcc_exe}', 'PySide6/']))
run_command.append(os.pathsep.join([f'--add-binary={uic_exe}', 'PySide6/']))
externally_specified_internal_workflows_zip = os.environ.get('INTERNAL_WORKFLOWS_ZIP', '<not-a-file>')
if os.path.isfile(externally_specified_internal_workflows_zip):
internal_workflows_zip = externally_specified_internal_workflows_zip
else:
internal_workflows_zip = os.path.abspath(os.path.join('..', '..', 'src', 'internal_workflows.zip'))
if os.path.isfile(internal_workflows_zip):
data = os.pathsep.join([internal_workflows_zip, '.'])
run_command.append(f'--add-data={data}')
plugin_paths_file = os.path.join(os.getcwd(), 'mapclientplugins_paths.txt')
if os.path.isfile(plugin_paths_file):
with open(plugin_paths_file) as f:
lines = f.readlines()
for line in lines:
run_command.append(f'--paths={line.rstrip()}')
print('Running command: ', run_command)
PyInstaller.__main__.run(run_command)
os.remove(info_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog="create_installer")
parser.add_argument("variant", nargs='?', default='', help="MAP Client variant")
args = parser.parse_args()
app_variant = ''
if args.variant:
app_variant = f"-{args.variant}"
main(app_variant)
| MusculoskeletalAtlasProject/mapclient | res/pyinstaller/create_application.py | create_application.py | py | 3,596 | python | en | code | 19 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "mapclient.settings.defin... |
2113231979 | from __future__ import division
from __future__ import print_function
import numpy as np
import gzip
import re
import datetime
import calendar
import time
import glob
from copy import deepcopy
import warnings
import sys
import os
import codecs
from .tools import unix2date, date2unix, limitMaInidces, quantile
from .tools import oneD2twoD, _get_netCDF_module
try:
from importlib.metadata import version, PackageNotFoundError
py3 = True
except ImportError:
from pkg_resources import get_distribution, DistributionNotFound
py3 = False
if py3:
try:
__version__ = version("IMProToo")
except PackageNotFoundError:
# package is not installed
pass
else:
try:
__version__ = get_distribution("IMProToo").version
except DistributionNotFound:
# package is not installed
pass
class MrrZe:
'''
class to calculate the 'real' MRR Ze from MRR raw data. The spectra are
noise corrected and dealiased. see batch_convert_rawData.py for
exemplary use
'''
warnings.filterwarnings('always', '.*', UserWarning,)
def __init__(self, rawData):
if rawData.mrrRawCC == 0:
print('WARNING: MRR calibration constant set to 0!')
self.co = dict()
# verbosity
self.co["debug"] = 0
# ######MRR Settings#######
# mrr frequency, MRR after 2011 (or upgraded) use 24.23e9
self.co["mrrFrequency"] = 24.15e9 # in Hz,
# wavelength in m
self.co["lamb"] = 299792458. / self.co["mrrFrequency"]
# mrr calibration constant
self.co["mrrCalibConst"] = rawData.mrrRawCC
# do not change these values, unless you have a non standard MRR!
# nyquist range minimum
self.co["nyqVmin"] = 0
# nyquist range maximum
self.co["nyqVmax"] = 11.9301147
# nyquist delta
self.co["nyqVdelta"] = 0.1893669
# list with nyquist velocities
self.co["nyqVel"] = np.arange(
self.co["nyqVmin"],
self.co["nyqVmax"]+0.0001,
self.co["nyqVdelta"]
)
# spectral resolution
self.co["widthSpectrum"] = 64
# min height to be processed
self.co["minH"] = 1 # start counting at 0
# max height to be processed
self.co["maxH"] = 31 # start counting at 0
# no of processed heights
self.co["noH"] = self.co["maxH"]+1 - self.co["minH"]
# shape of spectrum for one time step
self.co["specShape"] = (self.co["noH"], self.co["widthSpectrum"],)
# input data MRR averaging time
self.co["averagingTime"] = 10
# |K**2| dielectric constant
self.co["K2"] = 0.92
# ######options for finding peaks#######
# minimum width of a peak. if set to 4 instead of 3, more clutter is
# removed, but sensitivity becomes worse.
self.co["findPeak_minPeakWidth"] = 3
# minimum standard deviation of of spectrum for peak
# self.co["findPeak_minStdPerS"]/np.sqrt(self.co["averagingTime"])
self.co["findPeak_minStdPerS"] = 0.6
# minimum difference of Doppler velocity from self.co["nyqVmax"]/2 for
# peak
self.co["findPeak_minWdiff"] = 0.2
# ######options for getting peaks#######
# method for finding peaks in the spectrum, either based on Hildebrand
# and Sekhon, 1974 [hilde] or on the method of descending average
# [descAve]. [hilde] is recommended
self.co["getPeak_method"] = "hilde" # ["hilde","descAve"]
# sometimes the first method fails and almost the whole spectrum is
# found as a peak, so apply a second check based on the remaining
# method from [hilde,descAve]
self.co["getPeak_makeDoubleCheck"] = True
# apply double check to peaks wider than xx*noOfSpec
# wider real peaks can actually happen! These are usually bimodal
# peaks, descending average method fails for them, thus the spectrum
self.co["getPeak_makeDoubleCheck_minPeakWidth"] = 0.9
# hilde method uses an extra buffer to avoid to large peaks. loop stops
# first at spectrum >= self.co["getPeak_hildeExtraLimit"]*hilde_limit,
# only one more bin is added if above self.co[
# "getPeak_hildeExtraLimit"]. More bins above self.co[
# "getPeak_hildeExtraLimit"] are ignored
self.co["getPeak_hildeExtraLimit"] = 1.2 # times hildebrand limit
# options for descAve method
# window to calculate the average, if too large, it might go into the
# next peak! if too small, it might not catch bimodal distributions
self.co["getPeak_descAveCheckWidth"] = 10
# descAve stops not before mean is smaller than self.co[
# "getPeak_descAveMinMeanWeight"] of the mean of the self.co[
# "getPeak_descAveCheckWidth"] smallest bins. make very big to turn off
self.co["getPeak_descAveMinMeanWeight"] = 4
# ####options for confirming peaks ##########
# check whether time/height neighbors of a peak contain a peak as well
self.co["confirmPeak_5x5boxCoherenceTest"] = True
# maximum of other peaks must be within X Doppler-bins of the maximum
# of the tested peak
self.co["confirmPeak_5x5boxCoherenceTest_maxBinDistane"] = 10
# ######general options#######
# process only peaks in self.co["spectrumBorderMin"][height]:
# self.co["spectrumBorderMax"][height]
self.co["spectrumBorderMin"] = [5, 4, 3, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 5]
self.co["spectrumBorderMax"] = [60, 61, 62, 63, 63, 63, 63, 63, 63,
63, 63, 63, 63, 63, 63, 63, 63, 63,
63, 63, 63, 63, 63, 63, 63, 63, 63,
63, 62, 61, 63]
# interpolate spectrum in between
self.co["interpolateSpectrum"] = True
# extend also peaks to interpolated part
self.co["fillInterpolatedPeakGaps"] = True
# mask everything in these heights, since they are disturbed
self.co["completelyMaskedHeights"] = [0, 1, 30]
# first height with trustful peaks. Setting important for dealiasing
# to avoid folding from completelyMaskedHeights into the first used#
# height.
self.co["firstUsedHeight"] = 2
# ######dealiasing options#######
# dealiase spectrum yes/no
self.co["dealiaseSpectrum"] = True
# save also non dealiased eta, Ze, W, Znoise specWidth,
# peakVelLeftBorder, peakVelRightBorder
self.co["dealiaseSpectrum_saveAlsoNonDealiased"] = True
# make sure there is only one peak per height after dealiasing!
self.co["dealiaseSpectrum_maxOnePeakPerHeight"] = True
# dealiasing is based on comparison with reference velocity calculated
# from reflectivity. v = A*Ze**B
# Atlas et al. 1973
self.co['dealiaseSpectrum_Ze-vRelationSnowA'] = 0.817
# Atlas et al. 1973
self.co['dealiaseSpectrum_Ze-vRelationSnowB'] = 0.063
# Atlas et al. 1973
self.co['dealiaseSpectrum_Ze-vRelationRainA'] = 2.6
# Atlas et al. 1973
self.co['dealiaseSpectrum_Ze-vRelationRainB'] = 0.107
# trusted peak needs minimal Ze
self.co['dealiaseSpectrum_trustedPeakminZeQuantile'] = 0.1
# if you have interference, you don't want to start you dealiasing
# procedure there
self.co["dealiaseSpectrum_heightsWithInterference"] = []
# test coherence of dealiasesd velocity spectrum in time dimension.
# try to refold short jumps.
self.co["dealiaseSpectrum_makeCoherenceTest"] = True
# if the height averaged velocity between to timesteps is larger than
# this, it is tried to refold the spectrum
self.co["dealiaseSpectrum_makeCoherenceTest_velocityThreshold"] = 8
# if there are after coherence test still velocity jumps, mask
# +/- timesteps
self.co["dealiaseSpectrum_makeCoherenceTest_maskRadius"] = 10
# ######netCDF options#######
self.co["ncCreator"] = "IMProToo user"
self.co["ncDescription"] = "MRR data processed with IMProToo"
self.co["ncLocation"] = ""
self.co["ncInstitution"] = ""
# ######end of settings#######
# special option to top processing in the middel and return results
self.debugStopper = 0
self.missingNumber = -9999.
self.header = rawData.header
self.time = rawData.mrrRawTime
self.timezone = rawData.timezone
self.H = rawData.mrrRawHeight[:, self.co["minH"]:self.co["maxH"]+1]
self.TF = rawData.mrrRawTF[:, self.co["minH"]:self.co["maxH"]+1]
self.rawSpectrum = rawData.mrrRawSpectrum[
:, self.co["minH"]:self.co["maxH"]+1
]
self.noSpecPerTimestep = rawData.mrrRawNoSpec
self.no_h = np.shape(self.H)[1]
self.no_t = np.shape(self.time)[0]
self.no_v = self.co["widthSpectrum"]
self._shape2D = np.shape(self.H)
self._shape3D = np.shape(self.rawSpectrum)
self.qual = dict()
return
def averageSpectra(self, averagingTime):
"""
average spectra and other data. If averaging time is e.g. 60, the
data with the timestamp 14:00 contains all measurements from 13:59:00
to 13:59:59 (like MRR standard software)
"""
rawSpectra = self.rawSpectrum
rawTimestamps = self.time
heights = self.H
TFs = self.TF
noSpec = self.noSpecPerTimestep
# find first entry
startSeconds = unix2date(rawTimestamps[0]).second
start = rawTimestamps[0] + averagingTime - startSeconds
# find last minute
endSeconds = unix2date(rawTimestamps[-1]).second
end = rawTimestamps[-1] + 60 - endSeconds
# make new time vector and
rawTimestampsAve = np.ma.arange(
start, end+averagingTime, averagingTime, dtype="int")
# create new arrays
newSpectraShape = list(rawSpectra.shape)
newSpectraShape[0] = rawTimestampsAve.shape[0]
rawSpectraAve = np.ma.zeros(newSpectraShape) * np.nan
newTFsShape = list(TFs.shape)
newTFsShape[0] = rawTimestampsAve.shape[0]
TFsAve = np.ma.zeros(newTFsShape) * np.nan
newHeightsShape = list(heights.shape)
newHeightsShape[0] = rawTimestampsAve.shape[0]
heightsAve = np.ma.zeros(newHeightsShape) * np.nan
newNoSpecShape = (rawTimestampsAve.shape[0],)
noSpecAve = np.ma.zeros(newNoSpecShape, dtype=int)
# ugly loop trough new, averaged time vector!
for t, timestamp in enumerate(rawTimestampsAve):
# boolean array containing the wanted entries
booleanTimes = (rawTimestamps < timestamp) * \
(rawTimestamps >= timestamp-averagingTime)
aveLength = np.sum(booleanTimes)
# proceed only if entries were found
if aveLength != 0:
# and if TF and heights are NOT changing and if heights are
# not zero!!
if (
np.all(TFs[booleanTimes] == TFs[booleanTimes][0]) and
np.all(heights[booleanTimes] == heights[booleanTimes][0])
and np.logical_not(np.all(heights[booleanTimes] == 0))
):
# averaging:
rawSpectraAve[t] = np.ma.average(
rawSpectra[booleanTimes], axis=0)
heightsAve[t] = np.ma.average(
heights[booleanTimes], axis=0)
TFsAve[t] = np.ma.average(TFs[booleanTimes], axis=0)
noSpecAve[t] = np.ma.sum(noSpec[booleanTimes])
else:
print("Skipping data due to changed MRR configuration!")
else:
rawSpectraAve[t] = np.nan
heightsAve[t] = np.nan
TFsAve[t] = np.nan
noSpecAve[t] = 0
print("No Data at " + str(unix2date(timestamp)))
self.rawSpectrum = rawSpectraAve
self.time = rawTimestampsAve
self.H = heightsAve
self.TF = TFsAve
self.noSpecPerTimestep = noSpecAve.filled(0)
self.no_t = np.shape(self.time)[0]
self._shape2D = np.shape(self.H)
self._shape3D = np.shape(self.rawSpectrum)
self.co["averagingTime"] = averagingTime
return
def getSub(self, start, stop):
"""
cut out some spectra (for debugging)
start,stop (int): border indices
"""
if stop == -1:
stop = self._shape2D[0]
self.rawSpectrum = self.rawSpectrum[start:stop]
self.time = self.time[start:stop]
self.H = self.H[start:stop]
self.TF = self.TF[start:stop]
self.noSpecPerTimestep = self.noSpecPerTimestep[start:stop]
if len(self.noSpecPerTimestep) == 0:
raise ValueError('getSub: No data lef!')
self.no_t = np.shape(self.time)[0]
self._shape2D = np.shape(self.H)
self._shape3D = np.shape(self.rawSpectrum)
return
def rawToSnow(self):
'''
core function for calculating Ze and other moments. Settings have
to be set before
'''
if self.co["mrrCalibConst"] == 0:
raise IOError('ERROR: MRR calibration constant set to 0!')
self.untouchedRawSpectrum = deepcopy(self.rawSpectrum)
self.specVel = self.co["nyqVel"]
self.specVel3D = np.zeros(self._shape3D)
self.specVel3D[:] = self.specVel
self.specIndex = np.arange(self.no_v)
self._specBorderMask = np.ones(self.co["specShape"], dtype=bool)
for h in range(self.co["noH"]):
self._specBorderMask[h, self.co["spectrumBorderMin"]
[h]:self.co["spectrumBorderMax"][h]] = False
self._specBorderMask3D = np.ones(self._shape3D, dtype=bool)
self._specBorderMask3D[:] = self._specBorderMask
# but we have to apply the TF before we start anything:
TF3D = np.zeros(self._shape3D)
TF3D.T[:] = self.TF.T
self.rawSpectrum = np.ma.masked_array(
self.rawSpectrum.data / TF3D, self.rawSpectrum.mask)
# 1)missing spectra
missingMask = np.any(np.isnan(self.rawSpectrum.data), axis=-1)
self.qual["incompleteSpectrum"] = missingMask
# 2) Wdiff
WdiffMask, self.wdiffs = self._testMeanW(self.rawSpectrum)
# 3) std
stdMask, self.stds = self._testStd(self.rawSpectrum)
# join the results
noiseMask = missingMask+(stdMask*WdiffMask)
self.qual["spectrumVarianceTooLowForPeak"] = stdMask * \
WdiffMask # 2) no signal detected by variance test
# make 3D noise Mask
noiseMaskLarge = np.zeros(self._shape3D, dtype=bool).T
noiseMaskLarge[:] = noiseMask.T
noiseMaskLarge = noiseMaskLarge.T
# we don't need the mask right now since missingMask contains all
# mask entries
self.rawSpectrum = self.rawSpectrum.data
if self.debugStopper == 1:
self.rawSpectrum = np.ma.masked_array(
self.rawSpectrum, noiseMaskLarge)
return
# find the peak
peakMask = np.ones(self._shape3D, dtype=bool)
self.qual["usedSecondPeakAlgorithmDueToWidePeak"] = np.zeros(
self._shape2D, dtype=bool)
self.qual["peakTooThinn"] = np.zeros(self._shape2D, dtype=bool)
for h in range(0, self.co["noH"]):
# check whether there is anything to do
if np.any(np.logical_not(noiseMaskLarge[:, h])):
# get the peak
specMins = self.co["spectrumBorderMin"][h]
specMaxs = self.co["spectrumBorderMax"][h]
res = self._getPeak(
self.rawSpectrum[:, h, specMins:specMaxs][
~noiseMask[:, h]],
self.noSpecPerTimestep[~noiseMask[:, h]],
h
)
(
peakMask[:, h, specMins:specMaxs][~noiseMask[:, h]],
self.qual["peakTooThinn"][:, h][~noiseMask[:, h]],
self.qual["usedSecondPeakAlgorithmDueToWidePeak"][:, h][
~noiseMask[:, h]]
) = res
# apply results
self.rawSpectrum = np.ma.masked_array(self.rawSpectrum, peakMask)
# what is the noise, but _without_ the borders, we want in noise 3D
# also
noise = np.ma.masked_array(self.rawSpectrum.data, (np.logical_not(
self.rawSpectrum.mask)+self._specBorderMask3D))
self.specNoise = np.ma.average(noise, axis=-1).filled(0)
if self.debugStopper == 2:
return
if self.co["confirmPeak_5x5boxCoherenceTest"]:
coherCheckNoiseMask = self._cleanUpNoiseMask(self.rawSpectrum)
coherCheckNoiseMask3D = np.zeros(self._shape3D, dtype=bool)
coherCheckNoiseMask3D.T[:] = coherCheckNoiseMask.T
else:
coherCheckNoiseMask = np.zeros(self._shape2D, dtype=bool)
coherCheckNoiseMask3D = np.zeros(self._shape3D, dtype=bool)
self.qual["peakRemovedByCoherenceTest"] = coherCheckNoiseMask * \
(~np.all(self.rawSpectrum.mask, axis=-1))
self.rawSpectrum.mask = self.rawSpectrum.mask + coherCheckNoiseMask3D
if self.debugStopper == 3:
return
# since we have removed more noisy spectra we have to calculate the
# noise again
noise = np.ma.masked_array(self.rawSpectrum.data, (np.logical_not(
self.rawSpectrum.mask)+self._specBorderMask3D))
self.specNoise = np.ma.average(noise, axis=-1).filled(0)
self.specNoise_std = np.ma.std(noise, axis=-1).filled(0)
self.specNoise3D = np.zeros_like(noise).filled(0)
self.specNoise3D.T[:] = self.specNoise.T
# remove the noise
self.rawSpectrum = np.ma.masked_array(
self.rawSpectrum.data - self.specNoise3D, self.rawSpectrum.mask)
if self.co["interpolateSpectrum"]:
# interpolate spectrum
intSpectrum = deepcopy(self.rawSpectrum.data)
ix = np.arange(len(self.rawSpectrum.ravel()))
intSpectrum[self._specBorderMask3D] = np.interp(
ix[self._specBorderMask3D.ravel()],
ix[~self._specBorderMask3D.ravel()],
self.rawSpectrum[~self._specBorderMask3D]
)
self.rawSpectrum = np.ma.masked_array(
intSpectrum, self.rawSpectrum.mask)
self.qual["interpolatedSpectrum"] = np.ones(
self._shape2D, dtype=bool)
if self.debugStopper == 5:
return
else:
self.qual["interpolatedSpectrum"] = np.zeros(
self._shape2D, dtype=bool)
if self.co["fillInterpolatedPeakGaps"]:
(
self.rawSpectrum.mask,
self.qual["filledInterpolatedPeakGaps"]
) = self._fillInterpolatedPeakGaps(self.rawSpectrum.mask)
else:
self.qual["filledInterpolatedPeakGaps"] = np.zeros(
self._shape2D, dtype=bool)
# calculate the (not dealiased) SNR
self.SNR = (10*np.ma.log10(np.ma.sum(self.rawSpectrum, axis=-1) /
(self.specNoise*self.co["widthSpectrum"]))).filled(-9999)
if self.co["dealiaseSpectrum"] == True:
if self.co["dealiaseSpectrum_saveAlsoNonDealiased"] == True:
self.eta_noDA, self.Ze_noDA, self.W_noDA, self.etaNoiseAve_noDA_TBD, self.etaNoiseStd_noDA_TBD, self.specWidth_noDA, self.skewness_noDA, self.kurtosis_noDA, self.peakVelLeftBorder_noDA, self.peakVelRightBorder_noDA, self.leftSlope_noDA, self.rightSlope_noDA = self._calcEtaZeW(
self.rawSpectrum, self.H, self.specVel3D, self.specNoise, self.specNoise_std)
self.qual_noDA = deepcopy(self.qual)
# can be deleted, is identical to self.etaNoise, because noise is not dealiased.
del self.etaNoiseAve_noDA_TBD, self.etaNoiseStd_noDA_TBD
self.rawSpectrum = self._dealiaseSpectrum(self.rawSpectrum)
# since we don't want that spectrum from teh disturbed 1st range gate are folded into the secod on, peaks in the second one might be incomplete. try to make an entry in the quality mask.
self.qual["peakMightBeIncomplete"] = np.zeros(
self._shape2D, dtype=bool)
self.qual["peakMightBeIncomplete"][:, self.co["firstUsedHeight"]][self.rawSpectrum.mask[:, self.co["firstUsedHeight"],
self.co["widthSpectrum"]+self.co["spectrumBorderMin"][self.co["firstUsedHeight"]]] == False] = True
# no dealiasing
else:
pass
self.eta, self.Ze, self.W, self.etaNoiseAve, self.etaNoiseStd, self.specWidth, self.skewness, self.kurtosis, self.peakVelLeftBorder, self.peakVelRightBorder, self.leftSlope, self.rightSlope = self._calcEtaZeW(
self.rawSpectrum, self.H, self.specVel3D, self.specNoise, self.specNoise_std)
# make bin mask out of quality information
self.qualityBin, self.qualityDesc = self.getQualityBinArray(self.qual)
return
def _testMeanW(self, rawSpectrum):
'''
checks whether spectrum mean velocity is unequal to mean velocity (6m s^-1)
'''
mask = deepcopy(rawSpectrum.mask) + self._specBorderMask3D
spec = np.ma.masked_array(rawSpectrum.data, mask)
velocity = np.ma.masked_array(self.specVel3D, self._specBorderMask3D)
Wdiff = np.absolute(np.ma.average(
velocity, axis=-1)-(np.ma.sum(velocity*spec, axis=-1)/np.sum(spec, axis=-1)))
noiseMask = Wdiff.filled(0) < self.co["findPeak_minWdiff"]
return noiseMask, Wdiff.filled(0)
def _testStd(self, rawSpectrum):
'''
checks whether spectrum passes variance limit
'''
mask = deepcopy(rawSpectrum.mask) + self._specBorderMask3D
spec = np.ma.masked_array(rawSpectrum.data, mask)
std = (np.ma.std(spec, axis=-1)/np.ma.mean(spec, axis=-1))
# the 5.7 is because we have typically 5.7 spectra per second and this
# quantitiy was defined with self.co["averagingTime"] instead of
# self.noSpecPerTimestep before
maxStd = self.co["findPeak_minStdPerS"] / \
np.sqrt(self.noSpecPerTimestep/5.7)
return std.filled(0) < maxStd[:, np.newaxis], std.filled(0)
def _findAddtionalPeaks(self, rawSpectrum):
'''
This functio tries to find addtional peaks in the spectrum
disabled since it gives too many false positives...
'''
qual = np.zeros(self._shape2D, dtype=bool)
# invert mask
rawSpectrum = np.ma.masked_array(rawSpectrum.data, ~rawSpectrum.mask)
self.co["findAddtionalPeaksThreshold"] = 15
for tt in range(self.no_t):
for hh in range(self.no_h):
if hh in self.co["completelyMaskedHeights"]:
continue
greaterZero = 0
for ii in range(self.co["spectrumBorderMin"][hh], self.co["spectrumBorderMax"][hh]):
if greaterZero >= self.co["findAddtionalPeaksThreshold"]:
qual[tt, hh] = True
if rawSpectrum.mask[tt, hh, ii] == True or rawSpectrum.data[tt, hh, ii] <= 0:
greaterZero = 0
continue
else:
greaterZero += 1
return qual
def _cleanUpNoiseMask(self, spectrum):
"""
11 of 5x5 points in height/time space must have a signal to be valid!
@parameter spectrum (numpy masked float): spectrum + noiseMask to be applied to teh data
@return - newMask (numpy boolean):numpy boolean noiseMask
"""
noiseMask = np.all(spectrum.mask, axis=-1)
newMask = deepcopy(noiseMask)
# make it bigger to cover edges for 5x5 test, 2 pixel border
maxs = np.ma.masked_all((self.no_t+4, self.no_h+1))
maxs[2:-2, 2:-2] = np.ma.masked_array(
np.ma.argmax(spectrum, axis=-1), noiseMask)[:, 2:30]
highLimit = 11
lowLimit = 9
lowestLimit = 8
hOffset = self.co["minH"] # since we don't start at zero height
# loop through all points...
for t in np.arange(self.no_t):
# is it real signal? only if at least 11 of 25 neigbours have signal as well!
# for h in np.arange(4,28):
for h in np.arange(2, 30):
if noiseMask[t, h] == False:
tSM = t+2 # for subMaxs t needs to be 2 larger due to 2 pixel border! for h not neccesary, 2 pixel border at botztom already there
subMaxs = maxs[tSM-2:tSM+3, h-2:h+3]
thisMaxsDiff = 32-maxs[tSM, h]
subMaxsNormed = limitMaInidces(subMaxs + thisMaxsDiff, 64)
diffs = np.abs(subMaxsNormed - 32)
if t in [0, self.no_t-1] or h in [2, 29]:
limit = lowestLimit
elif t in [1, self.no_t-2] or h in [3, 28]:
limit = lowLimit
else:
limit = highLimit
if np.ma.sum(diffs <= self.co["confirmPeak_5x5boxCoherenceTest_maxBinDistane"]) < limit:
newMask[t, h] = True
# kick out heights #0,1,30
newMask[:, self.co["completelyMaskedHeights"]] = True
self.qual["spectrumNotProcessed"] = np.zeros(self._shape2D, dtype=bool)
self.qual["spectrumNotProcessed"][:,
self.co["completelyMaskedHeights"]] = True
return newMask
def _getPeak(self, spectrum, noSpecs, h):
"""
get the peak of the spectrum, first getPeakHildebrand is used, if the spectrum is wider than 10 and makeDoubleCheck = True, also getPeakDescendingAve is used and the smaller one is taken!
@parameter spectrum (numpy float64): (averaged, dealiased) raw data from MRR Raw data
@parameter noSpecs (numpy float64):number of single spectras which belong to each average spectrum, usually 58* No of averaged spectra
@paramter h, (int): height, for easier debugging
@return - spectrum (numpy float64): masked(!) spectrum
@return - qualiy (dict with array bool)
"""
t = time.time()
quality = dict()
specLength = np.shape(spectrum)[-1]
# get maxima of reduced spectra
iMax = np.argmax(spectrum, axis=-1)
iMaxFlat = np.ravel(iMax)
# arrays don't work, so make them flat
spectrumFlat = np.reshape(spectrum, (-1, specLength))
if self.co["getPeak_method"] == "hilde":
# get peak using Hildebrands method
firstPeakMask = self._getPeakHildebrand(
spectrumFlat, iMaxFlat, noSpecs, h)
elif self.co["getPeak_method"] == "descAve":
# get peak using Hildebrands method
firstPeakMask = self._getPeakDescendingAve(spectrumFlat, iMaxFlat)
else:
raise ValueError("Unknown doubleCheckPreference: " +
self.co["getPeak_method"])
peakMask = deepcopy(firstPeakMask)
# look for wide peak and make a second check
if self.co["getPeak_makeDoubleCheck"]:
doubleCheck = np.sum(np.logical_not(
firstPeakMask), axis=-1) > specLength * self.co["getPeak_makeDoubleCheck_minPeakWidth"]
quality["veryWidePeakeUsedSecondPeakAlgorithm"] = doubleCheck
if np.any(doubleCheck == True):
#secondPeakMVeryWidePeakeUask = getPeakDescendingAve(spectrumFlat,iMaxFlat)
secondPeakMask = np.zeros(np.shape(spectrumFlat), dtype=bool)
if self.co["getPeak_method"] == "hilde":
# get peak using desc Average method
secondPeakMask[doubleCheck] = self._getPeakDescendingAve(
spectrumFlat[doubleCheck], iMaxFlat[doubleCheck])
elif self.co["getPeak_method"] == "descAve":
# get peak using Hildebrands method
secondPeakMask[doubleCheck] = self._getPeakHildebrand(
spectrumFlat[doubleCheck], iMaxFlat[doubleCheck], noSpecs[doubleCheck], h)
peakMask[doubleCheck] = firstPeakMask[doubleCheck] + \
secondPeakMask[doubleCheck]
else:
quality["veryWidePeakeUsedSecondPeakAlgorithm"] = np.zeros(
specLength, dtype=bool)
# only peaks which are at least 3 bins wide, remove the others
tooThinn = np.sum(np.logical_not(peakMask), axis=-
1) < self.co["findPeak_minPeakWidth"]
peakMask[tooThinn] = True
quality["peakTooThinn"] = tooThinn * (np.sum(~peakMask, axis=-1) != 0)
if self.co["debug"] > 0:
print("runtime", time.time()-t, "s")
# spectrum
return np.reshape(peakMask, np.shape(spectrum)), quality["peakTooThinn"], quality["veryWidePeakeUsedSecondPeakAlgorithm"]
# get the border indices belonging to the hildebrand limit
def _getPeakHildebrand(self, dataFlat, iMax, noSpecs, h):
"""
get the peak of the spectrum using Hildebrand algorithm. Note that this routine works
'the other way around' than e.g. pamtra's or pyart's Hildebrand routine. I.e. we start
with the full spectrum and remove the largest bins instead of starting with the
smallest values and adding larger ones. This is more robust for the MRR. also
getPeak_hildeExtraLimit works better for MRR than teh traditional threshold definition from HS74.
@parameter dataFlat (numpy float64): flat spectrum from MRR Raw data
@parameter iMax (numpy float64): vector containing indices of the maxima
@parameter Nspec (numpy float64): number of spectra of each averaged spectrum
@return - iPeakMin, iMax (int float64): edges of each spectrum
"""
# first get the limit reflectivity
limits = self._noiseHildebrand(dataFlat, noSpecs, h)
maskHildebrand = np.ones(np.shape(dataFlat), dtype=bool)
iPeakMax = deepcopy(iMax)
iPeakMin = deepcopy(iMax)
# not only uses extra limit, but also starts at the peak!, thus specturm is refolded around peak!
# then get the edges of the peak as index of the spectrum
for k in np.arange(iMax.shape[0]):
# unmask the peak
maskHildebrand[k, iMax[k]] = False
spectrum = np.roll(dataFlat[k], -iMax[k])
mask = np.roll(maskHildebrand[k], -iMax[k])
# to the right
for i in np.arange(1, dataFlat.shape[-1], 1):
# unmask if above limit (=peak)
if spectrum[i] > limits[k]*self.co["getPeak_hildeExtraLimit"]:
mask[i] = False
# else stop
else:
# unmask on last bin if between limits[k]*self.co["getPeak_hildeExtraLimit"] and limits[k], but stop in any case!
if spectrum[i] > limits[k]:
mask[i] = False
break
# to the left
for i in np.arange(dataFlat.shape[-1]-1, 0-1, -1):
if spectrum[i] > limits[k]*self.co["getPeak_hildeExtraLimit"]:
mask[i] = False
else:
if spectrum[i] > limits[k]:
mask[i] = False
break
dataFlat[k] = np.roll(spectrum, iMax[k])
maskHildebrand[k] = np.roll(mask, iMax[k])
return maskHildebrand
def _noiseHildebrand(self, dataFlat, noSpecs, h, flat=True):
"""
#calculate the minimum reflectivity of the peak (or maximum of the noise) according to Hildebrand and Sekhon
@parameter dataFlat (numpy masked array float64): flat spectrum from MRR Raw data
@parameter Nspec (numpy float64): number of spectra of each averaged spectrum
@return - limits (int float64): limit reflectivity of each spectrum
"""
specLength = np.shape(dataFlat)[-1]
if flat == False:
dataShape = np.shape(dataFlat)[0]
dataFlat = np.reshape(dataFlat, (-1, specLength))
# sort the data
dataFlat = np.ma.sort(dataFlat, axis=-1)
# calculate all variances and means (that is cheaper than a loop!)
# start with whole spectrum, then discard maximum, than second but next maximum etc.
Dvar = np.zeros(dataFlat.shape)
Dmean = np.zeros(dataFlat.shape)
limits = np.zeros(np.shape(dataFlat[..., 0]))
for i in np.arange(specLength-1, 1, -1):
Dvar[..., i] = np.ma.var(dataFlat[..., 0:i], axis=-1)
Dmean[..., i] = np.ma.mean(dataFlat[..., 0:i], axis=-1)
# calculate the Hildebrand coefficient
Dvar[Dvar == 0] = 0.0001
Coefficient = ((Dmean**2) / Dvar)
# check where hildebrands assumption is true
for j in np.arange(np.shape(dataFlat)[0]):
for i in np.arange(specLength-1, -1, -1):
if Coefficient[j, i] >= noSpecs[j]:
limits[j] = dataFlat[j, i-1]
break
if flat == False:
limits = np.reshape(limits, (dataShape, self.co["noH"]))
return limits
def _getPeakDescendingAve(self, dataFlat, iMax):
"""
get the peak of the spectrum
function iterates through the _not_ size-sorted spectrum from the maximum to the left and to the right and stops as soon as the average stops decreasing.
@parameter dataFlat (numpy float64): flat spectrum from MRR Raw data
@parameter iMax (numpy float64): vector containing indices of the maxima
@return - iPeakMin, iMax (int float64): edges of each spectrum
"""
maskDescAve = np.ones(np.shape(dataFlat), dtype=bool)
# iterate through spectras:
for k in np.arange(iMax.shape[0]):
# the rolling allow recognition also if 0 m s^-1 is crossed
rolledSpectrum = np.roll(dataFlat[k], -iMax[k])
rolledMask = np.roll(maskDescAve[k], -iMax[k])
meanRightOld = np.ma.mean(
rolledSpectrum[1:self.co["getPeak_descAveCheckWidth"]+1])
meanLeftOld = np.ma.mean(
rolledSpectrum[-1:-(self.co["getPeak_descAveCheckWidth"]+1):-1])
minMeanToBreak = self.co["getPeak_descAveMinMeanWeight"] * np.mean(
np.sort(dataFlat[k])[0:self.co["getPeak_descAveCheckWidth"]])
# unmask peak
rolledMask[0] = False
# to the right:
for i in np.arange(1, dataFlat.shape[-1], 1):
meanRight = np.ma.mean(
rolledSpectrum[i:i+self.co["getPeak_descAveCheckWidth"]])
# is the average still decraesing?
if meanRight <= meanRightOld or meanRight > minMeanToBreak:
rolledMask[i] = False
meanRightOld = meanRight
else:
break
# to the left
for i in np.arange(dataFlat.shape[-1]-1, 0-1, -1):
meanLeft = np.ma.mean(
rolledSpectrum[i:i-self.co["getPeak_descAveCheckWidth"]:-1])
# is the average still decraesing?
if meanLeft <= meanLeftOld or meanLeft > minMeanToBreak:
rolledMask[i] = False
meanLeftOld = meanLeft
else:
break
dataFlat[k] = np.roll(rolledSpectrum, iMax[k])
maskDescAve[k] = np.roll(rolledMask, iMax[k])
return maskDescAve
def _fillInterpolatedPeakGaps(self, specMask):
'''
Interpolate gaps of specMask around 0 m s^-1 between spectrumBorderMin and spectrumBorderMax in noH heights
returns updated specMask and quality information
'''
quality = np.zeros(self._shape2D, dtype=bool)
for h in range(1, self.co["noH"]):
# the ones with peaks at both sides around 0 m s^-1!
peaksAroundZero = (specMask[:, h-1, self.co["spectrumBorderMax"][h-1]-1] == False) * (
specMask[:, h, self.co["spectrumBorderMin"][h]] == False)
specMask[:, h, 0:self.co["spectrumBorderMin"]
[h]][peaksAroundZero] = False
specMask[:, h-1, self.co["spectrumBorderMax"]
[h-1]:][peaksAroundZero] = False
# the ones with peak at only one side,
peaksAroundZeroHalfToLeft = (specMask[:, h-1, self.co["spectrumBorderMax"][h-1]-1] == True) * (
specMask[:, h, self.co["spectrumBorderMin"][h]] == False)
peaksAroundZeroHalfToLeftBMin = (peaksAroundZeroHalfToLeft * (
self.rawSpectrum.data[:, h, 0:self.co["spectrumBorderMin"][h]] > self.specNoise3D[:, h, 0:self.co["spectrumBorderMin"][h]]).T).T
peaksAroundZeroHalfToLeftBMax = (peaksAroundZeroHalfToLeft * (
self.rawSpectrum.data[:, h-1, self.co["spectrumBorderMax"][h-1]:] > self.specNoise3D[:, h, self.co["spectrumBorderMax"][h-1]:]).T).T
specMask[:, h, 0:self.co["spectrumBorderMin"]
[h]][peaksAroundZeroHalfToLeftBMin] = False
specMask[:, h-1, self.co["spectrumBorderMax"]
[h-1]:][peaksAroundZeroHalfToLeftBMax] = False
peaksAroundZeroHalfToRight = (specMask[:, h-1, self.co["spectrumBorderMax"][h-1]-1] == False) * (
specMask[:, h, self.co["spectrumBorderMin"][h]] == True)
peaksAroundZeroHalfToRightBMin = (peaksAroundZeroHalfToRight * (
self.rawSpectrum.data[:, h, 0:self.co["spectrumBorderMin"][h]] > self.specNoise3D[:, h-1, 0:self.co["spectrumBorderMin"][h]]).T).T
peaksAroundZeroHalfToRightBMax = (peaksAroundZeroHalfToRight * (
self.rawSpectrum.data[:, h-1, self.co["spectrumBorderMax"][h-1]:] > self.specNoise3D[:, h-1, self.co["spectrumBorderMax"][h-1]:]).T).T
specMask[:, h, 0:self.co["spectrumBorderMin"]
[h]][peaksAroundZeroHalfToRightBMin] = False
specMask[:, h-1, self.co["spectrumBorderMax"][h-1] :][peaksAroundZeroHalfToRightBMax] = False
quality[:, h] = quality[:, h-1] = peaksAroundZero + \
peaksAroundZeroHalfToLeft + peaksAroundZeroHalfToRight
return specMask, quality
def _dealiaseSpectrum(self, rawSpectrum):
'''
dealiase Spectrum
input rawSpectrum
output extendSpectrum with 192 bins
'''
self.qual["severeProblemsDuringDA"] = np.zeros(
self._shape2D, dtype=bool)
# first locate peaks in raveld specturm
self._allPeaks, self._allPeaksIndices, self._allPeaksMaxIndices, self._allPeaksVelMe, self._allPeaksHeight, self._allPeaksRefV, self._allPeaksZe = self._locatePeaks(
rawSpectrum)
# find one peaks and its veloci/heigth you trust
self._trustedPeakNo, self._trustedPeakHeight, self._trustedPeakVel, self._trustedPeakHeightStart, self._trustedPeakHeightStop = self._getTrustedPeak(
self._allPeaksZe, self._allPeaksVelMe, self._allPeaksRefV, self._allPeaksMaxIndices, self._allPeaksHeight)
# now extend spectrum!
extendedRawSpectrum = deepcopy(rawSpectrum.data)
extendedRawSpectrum = np.concatenate((np.roll(
extendedRawSpectrum, 1, axis=1), extendedRawSpectrum, np.roll(extendedRawSpectrum, -1, axis=1)), axis=2)
# do not apply fo first range gates
extendedRawSpectrum[:, 0, :self.co["widthSpectrum"]] = 0
# and not to the last one
extendedRawSpectrum[:, self.co["noH"] -
1, 2*self.co["widthSpectrum"]:] = 0
extendedRawSpectrum = np.ma.masked_array(extendedRawSpectrum, True)
# if wanted, save old values
if self.co["dealiaseSpectrum_saveAlsoNonDealiased"] == True:
self.specVel_noDA = deepcopy(self.specVel)
self.specVel3D_noDA = deepcopy(self.specVel3D)
self.specIndex_noDA = deepcopy(self.specIndex)
self.no_v_noDA = deepcopy(self.no_v)
# save new velocities
self.specVel = np.array(list(self.co["nyqVel"] - self.co["widthSpectrum"]*self.co["nyqVdelta"])+list(
self.co["nyqVel"])+list(self.co["nyqVel"] + self.co["widthSpectrum"]*self.co["nyqVdelta"]))
self.specVel3D = np.zeros(np.shape(extendedRawSpectrum))
self.specVel3D[:] = self.specVel
self.specIndex = np.arange(3*self.no_v)
self.no_v = self.no_v * 3
# extend spectrum to 192 bins and unmask best fitting peaks
extendedRawSpectrum = self._findHeightsForPeaks(extendedRawSpectrum, self._trustedPeakNo, self._trustedPeakVel, self._trustedPeakHeight,
self._trustedPeakHeightStart, self._trustedPeakHeightStop, self._allPeaks, self._allPeaksIndices, self._allPeaksVelMe, self._allPeaksHeight)
if self.co["dealiaseSpectrum_makeCoherenceTest"]:
# simple method to detect falsely folded peaks, works only for 1-2 outliers
extendedRawSpectrum = self._deAlCoherence(extendedRawSpectrum)
self.qual["spectrumIsDealiased"] = np.all(
extendedRawSpectrum.mask[:, :, self.co["widthSpectrum"]:2*self.co["widthSpectrum"]] != rawSpectrum.mask[:, :], axis=-1)
# still we don't want peaks at height 0,1,31
extendedRawSpectrum.mask[:, self.co["completelyMaskedHeights"]] = True
return extendedRawSpectrum
def _locatePeaks(self, rawSpectrum):
'''
ravel rawSpectrum and try to find one peak per height
returns time dictonaries with:
allPeaks - time dictonary with lists of the spectral reflectivities for each peak
allPeaksIndices - related indices
allPeaksMaxIndices - time dictonary maximum of each peak
allPeaksVelMe - first guess peak velocity based on the last bin
allPeaksHeight - first guess peak height based on the last bin
allPeaksRefV - expected velocity of each peak based on Ze according to theory
allPeaksZe - time dictonary with lists of first guess Ze for each peak
'''
allPeaks = dict()
allPeaksIndices = dict()
allPeaksMaxIndices = dict()
allPeaksVelMe = dict()
allPeaksHeight = dict()
allPeaksRefV = dict()
allPeaksZe = dict()
# get velocities of spectrum. we start negative, because first guess height is always defualt height of most right bin of peak
velMe = np.array(list(
self.co["nyqVel"] - self.co["widthSpectrum"]*self.co["nyqVdelta"])+list(self.co["nyqVel"]))
for t in np.arange(self.no_t):
completeSpectrum = self.rawSpectrum[t].ravel()
# skip if there are no peaks in the timestep
if np.all(completeSpectrum.mask) == True:
if self.co["debug"] > 4:
'_locatePeaks: nothing to do at', t
continue
deltaH = self.H[t, 15] - self.H[t, 14]
peaks = list()
peaksIndices = list()
peaksMaxIndices = list()
peaksVelMe = list()
peaksHeight = list()
peaksVref = list()
peaksZe = list()
peakTmp = list()
peakTmpInd = list()
peaksStartIndices = list()
peaksEndIndices = list()
truncatingPeak = False
# go through all bins
for ii, spec in enumerate(completeSpectrum):
# found peak!
withinPeak = (completeSpectrum.mask[ii] == False) and (
truncatingPeak == False)
if withinPeak:
peakTmp.append(spec)
peakTmpInd.append(ii)
# if the peak length is now larger than the raw spectrum width, then this peak has
# wrapped around the entire width. Flag will cause the peak to be split in two, because
# the next step within the loop through completeSpectrum will have withinPeak False.
if len(peakTmp) >= self.co["widthSpectrum"]:
truncatingPeak = True
warnings.warn('Truncated peak early. Masked area has wrapped around spectrum width at ' +
'timestemp ' + str(t) + ', bin number ' + str(ii))
# 3found no peak, but teh last one has to be processed
elif len(peakTmp) >= self.co["findPeak_minPeakWidth"]:
# get the height of the LAST entry of the peak, uses int division // !
peakTmpHeight = peakTmpInd[-1]//self.co["widthSpectrum"]
# reconstruct the non folded indices shifted by 64! since peakTmpInd[-1] is reference
orgIndex = np.arange(peakTmpInd[-1] % self.co["widthSpectrum"]-len(
peakTmpInd), peakTmpInd[-1] % self.co["widthSpectrum"])+1+self.co["widthSpectrum"]
# calculate a first guess Ze
etaSumTmp = np.sum(
peakTmp * np.array((self.co["mrrCalibConst"] * (peakTmpHeight**2 * deltaH)) / (1e20), dtype=float))
# in rare cases, Ze is below Zero, maybey since the wrong peak is examined?
if etaSumTmp <= 0:
warnings.warn('negative (linear) Ze occured during dealiasing, peak removed at timestep '+str(
t)+', bin number ' + str(ii)+', most likely at height ' + str(peakTmpHeight))
self.qual["severeProblemsDuringDA"][t,
peakTmpHeight] = True
peakTmp = list()
peakTmpInd = list()
continue
ZeTmp = 1e18*(self.co["lamb"]**4 *
etaSumTmp/(np.pi**5*self.co["K2"]))
# guess doppler velocity
peakTmpSnowVel = self.co['dealiaseSpectrum_Ze-vRelationSnowA'] * \
ZeTmp**self.co['dealiaseSpectrum_Ze-vRelationSnowB']
peakTmpRainVel = self.co['dealiaseSpectrum_Ze-vRelationRainA'] * \
ZeTmp**self.co['dealiaseSpectrum_Ze-vRelationRainB']
peakTmpRefVel = (peakTmpSnowVel + peakTmpRainVel)/2.
# save other features
peaksVref.append(peakTmpRefVel)
peaks.append(peakTmp)
peaksIndices.append(peakTmpInd)
peaksStartIndices.append(peakTmpInd[0])
peaksEndIndices.append(peakTmpInd[-1])
peaksMaxIndices.append(np.argmax(peakTmp)+ii-len(peakTmp))
peaksHeight.append(peakTmpHeight)
peaksVelMe.append(
np.sum((velMe[orgIndex[0]:orgIndex[-1]+1]*peakTmp))/np.sum(peakTmp))
peaksZe.append(ZeTmp)
peakTmp = list()
peakTmpInd = list()
truncatingPeak = False
# small peaks can show up again due to dealiasing, get rid of them:
elif len(peakTmp) > 0 and len(peakTmp) < self.co["findPeak_minPeakWidth"]:
peakTmp = list()
peakTmpInd = list()
truncatingPeak = False
# no peak
else:
continue
# we want only ONE peak per range gate!
if self.co["dealiaseSpectrum_maxOnePeakPerHeight"]:
# get list with peaks, whcih are too much
peaksTbd = self._maxOnePeakPerHeight(
t, peaksStartIndices, peaksEndIndices, peaksZe)
# remove them
for peakTbd in np.sort(peaksTbd)[::-1]:
peaks.pop(peakTbd)
peaksIndices.pop(peakTbd)
peaksMaxIndices.pop(peakTbd)
peaksVelMe.pop(peakTbd)
peaksHeight.pop(peakTbd)
peaksVref.pop(peakTbd)
peaksZe.pop(peakTbd)
# if anything was found, save it
if len(peaks) > 0:
allPeaks[t] = peaks
allPeaksIndices[t] = peaksIndices
allPeaksMaxIndices[t] = peaksMaxIndices
allPeaksVelMe[t] = peaksVelMe
allPeaksHeight[t] = peaksHeight
allPeaksRefV[t] = peaksVref
allPeaksZe[t] = peaksZe
# end for t
return allPeaks, allPeaksIndices, allPeaksMaxIndices, allPeaksVelMe, allPeaksHeight, allPeaksRefV, allPeaksZe
def _maxOnePeakPerHeight(self, t, peaksStartIndices, peaksEndIndices, peaksZe):
'''
some height will contain more than one peak, try to find them
returns a list with peaks to be delteted
'''
peaksStartIndices = np.array(peaksStartIndices)
peaksEndIndices = np.array(peaksEndIndices)
peaksZeCopy = np.array(peaksZe)
peaksTbd = list()
for pp, peakStart in enumerate(peaksStartIndices):
deletePeaks = False
if peakStart == -9999:
continue # peak has been deleted
followingPeaks = (peaksStartIndices >= peakStart) * \
(peaksStartIndices < peakStart+(1.5*self.co["widthSpectrum"]))
if (np.sum(followingPeaks) >= 3):
# if you have three peaks so close together it is cristal clear:
deletePeaks = True
elif (np.sum(followingPeaks) == 2):
# if you have only two they must be close together
secondPeak = np.where(followingPeaks)[0][1]
deletePeaks = (
peaksEndIndices[secondPeak] - peakStart < self.co["widthSpectrum"]/2.)
if deletePeaks == True:
# don't consider more than 3! the rest is hopefully caught by next loop!
Indices = np.where(followingPeaks)[0][0:3]
smallestZe = Indices[np.argmin(peaksZeCopy[Indices])]
peaksTbd.append(smallestZe)
# these are needed for the loop, so they are only masked, not deleted
peaksStartIndices[peaksTbd[-1]] = -9999
peaksEndIndices[peaksTbd[-1]] = -9999
peaksZeCopy[peaksTbd[-1]] = 9999
return peaksTbd
def _getTrustedPeak(self, allPeaksZe, allPeaksVelMe, allPeaksRefV, allPeaksMaxIndices, allPeaksHeight):
'''
find heigth and position of most trustfull peak
allPeaksZe - time dictonary with lists of first guess Ze for each peak
allPeaksVelMe - first guess peak velocity based on the last bin
allPeaksRefV - expected velocity of each peak based on Ze according to theory
allPeaksMaxIndices - time dictonary maximum of each peak
allPeaksHeight - first guess peak height based on the last bin
returns 1D time arrays
trustedPeakNo - no of trusted peaks (starting at bottom)
trustedPeakHeight - estimated height
trustedPeakVel - -estimated velocity
trustedPeakHeightStart, trustedPeakHeightStop - start and stop indices from 0:192 range
'''
trustedPeakHeight = np.zeros(self.no_t, dtype=int)
trustedPeakVel = np.zeros(self.no_t)
trustedPeakNo = np.ones(self.no_t, dtype=int)*-9999
trustedPeakHeightStart = np.zeros(self.no_t, dtype=int)
trustedPeakHeightStop = np.zeros(self.no_t, dtype=int)
for t in np.arange(self.no_t):
# now process the found peaks
if t in list(self._allPeaks.keys()):
# the trusted peak needs a certain minimal reflectivity to avoid confusion by interference etc, get the minimum threshold
averageZe = np.sum(allPeaksZe[t])/float(len(allPeaksZe[t]))
minZe = quantile(
self._allPeaksZe[t], self.co['dealiaseSpectrum_trustedPeakminZeQuantile'])
peaksVelMe = np.array(allPeaksVelMe[t])
peaksVels = np.array([peaksVelMe+self.co["nyqVdelta"]*self.co["widthSpectrum"],
peaksVelMe, peaksVelMe-self.co["nyqVdelta"]*self.co["widthSpectrum"]])
refVels = np.array(
[allPeaksRefV[t], allPeaksRefV[t], allPeaksRefV[t]])
# this difference between real velocity (thee different ones are tried: dealaisisnmg up, static or down) and expected Ze based velocityhas to be minimum to find trusted peak
diffs = np.abs(peaksVels - refVels)
# mask small peaks, peaks which are in the firt processed range gate and peaks which are in self.co["dealiaseSpectrum_heightsWithInterference"] (e.g. disturbed by interference)
diffs = np.ma.masked_array(diffs, [allPeaksZe[t] <= minZe]*3)
tripplePeaksMaxIndices = np.array(3*[allPeaksMaxIndices[t]])
# the first used height is a bit special, often peaks are incomplete,try to catch them to avoid trust them
diffs = np.ma.masked_array(diffs, (tripplePeaksMaxIndices >= self.co["firstUsedHeight"]*self.co["widthSpectrum"])*(
tripplePeaksMaxIndices < self.co["firstUsedHeight"]*(self.co["widthSpectrum"]*1.5)))
# now mask all other peaks which are found unlikely
for hh in self.co["dealiaseSpectrum_heightsWithInterference"]+self.co["completelyMaskedHeights"]:
diffs = np.ma.masked_array(diffs, (tripplePeaksMaxIndices >= hh*self.co["widthSpectrum"])*(
tripplePeaksMaxIndices < (hh+1)*self.co["widthSpectrum"]))
# if we managed to mask all peaks, we have no choice but taking all
if np.all(diffs.mask == True):
diffs.mask[:] = False
if self.co["debug"] > 4:
print("managed to mask all peaks at " + str(t) +
" while trying to find most trustfull one during dealiasing.")
# the minimum velocity difference tells wehther dealiasing goes up, down or is not applied
UpOrDn = np.ma.argmin(np.ma.min(diffs, axis=1))
# get paramters for trusted peaks
trustedPeakNo[t] = np.ma.argmin(diffs[UpOrDn])
# -1 to ensure that updraft is negative now!!
trustedPeakHeight[t] = allPeaksHeight[t][trustedPeakNo[t]] + UpOrDn-1
trustedPeakSpecShift = trustedPeakHeight[t] * \
self.co["widthSpectrum"] - self.co["widthSpectrum"]
trustedPeakVel[t] = peaksVels[UpOrDn][trustedPeakNo[t]]
# transform back to height related spectrum
# in dimension of 0:192 #spectrum is extended to the left
trustedPeakHeightIndices = (np.array(
self._allPeaksIndices[t][trustedPeakNo[t]])-trustedPeakSpecShift)[[0, -1]]
trustedPeakHeightStart[t] = trustedPeakHeightIndices[0]
trustedPeakHeightStop[t] = trustedPeakHeightIndices[-1]
return trustedPeakNo, trustedPeakHeight, trustedPeakVel, trustedPeakHeightStart, trustedPeakHeightStop
def _findHeightsForPeaks(self, extendedRawSpectrum, trustedPeakNo, trustedPeakVel, trustedPeakHeight, trustedPeakHeightStart, trustedPeakHeightStop, allPeaks, allPeaksIndices, allPeaksVelMe, allPeaksHeight):
'''
try to find the height of each peak by starting at the trusted peak
extendedRawSpectrum - extended to 192 bins, returned with new, dealiased mask
trustedPeakNo - trusted peak number of all peaks in time step
trustedPeakVel - most liekely velocity
trustedPeakHeight - most likely height
trustedPeakHeightStart, trustedPeakHeightStop - start/stop of peaks
allPeaks - time dictonary with lists of the spectral reflectivities for each peak
allPeaksIndices - related indices
allPeaksVelMe - first guess peak velocity based on the last bin
allPeaksHeight - first guess peak height based on the last bin
'''
for t in np.arange(self.no_t):
if t in list(self._allPeaks.keys()):
extendedRawSpectrum[t, trustedPeakHeight[t],
trustedPeakHeightStart[t]:trustedPeakHeightStop[t]+1].mask = False
peaksVelMe = np.array(allPeaksVelMe[t])
# get all three possible velocities
peaksVels = np.array([peaksVelMe+self.co["nyqVdelta"]*self.co["widthSpectrum"],
peaksVelMe, peaksVelMe-self.co["nyqVdelta"]*self.co["widthSpectrum"]])
formerPeakVel = trustedPeakVel[t]
# loop through all peaks, starting at the trusted one
for jj in list(range(trustedPeakNo[t]-1, -1, -1))+list(range(trustedPeakNo[t]+1, len(allPeaks[t]))):
# To combine ascending and descending loop in one:
if jj == trustedPeakNo[t]+1:
formerPeakVel = trustedPeakVel[t]
# go up, stay or down? for which option fifference to former (trusted) peaks is smallest.
UpOrDn = np.argmin(
np.abs(peaksVels[:, jj] - formerPeakVel))
# change height, indices and velocity accordingly
thisPeakHeight = allPeaksHeight[t][jj] + UpOrDn-1
if thisPeakHeight not in list(range(self.co["noH"])):
warnings.warn('Dealiasing failed! peak boundaries excced max/min height. time step '+str(
t)+', peak number ' + str(jj)+', tried to put at height ' + str(thisPeakHeight))
self.qual["severeProblemsDuringDA"][t] = True
continue
thisPeakSpecShift = thisPeakHeight * \
self.co["widthSpectrum"] - self.co["widthSpectrum"]
thisPeakVel = peaksVels[UpOrDn][jj]
thisPeakHeightIndices = np.array(
allPeaksIndices[t][jj])-thisPeakSpecShift
if np.any(thisPeakHeightIndices < 0) or np.any(thisPeakHeightIndices >= 3*self.co["widthSpectrum"]):
warnings.warn('Dealiasing failed! peak boundaries fall out of spectrum. time step '+str(
t)+', peak number ' + str(jj)+', most likely at height ' + str(thisPeakHeight))
self.qual["severeProblemsDuringDA"][t] = True
# check whether there is already a peak in the found height!
if np.all(extendedRawSpectrum[t, thisPeakHeight].mask == True):
if thisPeakHeight >= self.co["noH"] or thisPeakHeight < 0:
warnings.warn('Dealiasing reached max/min height... time step '+str(
t)+', peak number ' + str(jj)+', most likely at height ' + str(thisPeakHeight))
self.qual["severeProblemsDuringDA"][t] = True
continue
# only if there is no peak yet!!
extendedRawSpectrum[t, thisPeakHeight, thisPeakHeightIndices[0] :thisPeakHeightIndices[-1]+1].mask = False
formerPeakVel = thisPeakVel
# if there is already a peak in the height, repeat the process, but take the second likely height/velocity
else:
if self.co["debug"] > 4:
print('DA: there is already a peak in found height, take second choice',
t, jj, thisPeakHeight, trustedPeakNo[t], trustedPeakHeight)
# otherwise take second choice!
formerPeakVelList = np.array([formerPeakVel]*3)
formerPeakVelList[UpOrDn] = 1e10 # make extremely big
UpOrDn2 = np.ma.argmin(
np.abs(peaksVels[:, jj] - formerPeakVelList))
thisPeakHeight = allPeaksHeight[t][jj] + UpOrDn2-1
if thisPeakHeight not in list(range(self.co["noH"])):
warnings.warn('Dealiasing step 2 failed! peak boundaries excced max/min height. time step '+str(
t)+', peak number ' + str(jj)+', tried to put at height ' + str(thisPeakHeight))
self.qual["severeProblemsDuringDA"][t] = True
continue
thisPeakSpecShift = thisPeakHeight * \
self.co["widthSpectrum"] - self.co["widthSpectrum"]
thisPeakVel = peaksVels[UpOrDn2][jj]
thisPeakHeightIndices = np.array(
allPeaksIndices[t][jj])-thisPeakSpecShift
if np.any(thisPeakHeightIndices < 0) or np.any(thisPeakHeightIndices >= 3*self.co["widthSpectrum"]):
warnings.warn('Dealiasing step 2 failed! peak boundaries fall out of spectrum. time step '+str(
t)+', peak number ' + str(jj)+', most likely at height ' + str(thisPeakHeight))
self.qual["severeProblemsDuringDA"][t] = True
if thisPeakHeight >= self.co["noH"] or thisPeakHeight < 0:
warnings.warn('Dealiasing reached max/min height... time step '+str(
t)+', peak number ' + str(jj)+', most likely at height ' + str(thisPeakHeight))
self.qual["severeProblemsDuringDA"][t] = True
continue
# check again whether there is already a peak in the spectrum
if np.all(extendedRawSpectrum[t, thisPeakHeight].mask == True):
# next try
extendedRawSpectrum[t, thisPeakHeight, thisPeakHeightIndices[0] :thisPeakHeightIndices[-1]+1].mask = False
formerPeakVel = thisPeakVel
# if yes, give up
else:
warnings.warn('Could not find height of peak! time step '+str(
t)+', peak number ' + str(jj)+', most likely at height ' + str(thisPeakHeight))
self.qual["severeProblemsDuringDA"][t] = True
return extendedRawSpectrum
def _deAlCoherence(self, newSpectrum):
'''
make sure no weired foldings happend by looking for big jumps in the height-averaged velocity
if two jumps very closely together (<=3 peaks inbetween) are found, teh peaks inbetween are corrected
can make it worse if dealiasing produces zig-zag patterns.
'''
self.qual["DAdirectionCorrectedByCoherenceTest"] = np.zeros(
self._shape2D, dtype=bool)
meanVelocity = np.ma.average(np.ma.sum(
newSpectrum*self.specVel, axis=-1)/np.ma.sum(newSpectrum, axis=-1), axis=-1)
velDiffs = np.diff(meanVelocity)
# find velocity jumps
velDiffsBig = np.where(
velDiffs > self.co["dealiaseSpectrum_makeCoherenceTest_velocityThreshold"])[0]
velDiffsSmall = np.where(
velDiffs < -self.co["dealiaseSpectrum_makeCoherenceTest_velocityThreshold"])[0]
# check whether there is an opposite one close by and collect time steps to be refolded
foldUp = list()
for ll in velDiffsBig:
if ll+1 in velDiffsSmall:
foldUp.append(ll+1)
continue
if ll+2 in velDiffsSmall:
foldUp.append(ll+1)
foldUp.append(ll+2)
continue
if ll+3 in velDiffsSmall:
foldUp.append(ll+1)
foldUp.append(ll+2)
foldUp.append(ll+3)
updatedSpectrumMask = deepcopy(newSpectrum.mask)
for tt in foldUp:
updatedSpectrumMask[tt] = np.roll(updatedSpectrumMask[tt].ravel(
), 2 * self.co["widthSpectrum"]).reshape((self.co["noH"], 3*self.co["widthSpectrum"]))
# avoid that something is folded into the highest range gate
updatedSpectrumMask[tt, 0, :2*self.co["widthSpectrum"]] = True
self.qual["DAdirectionCorrectedByCoherenceTest"][tt, :] = True
if self.co["debug"] > 4:
print('coherenceTest corrected dealiasing upwards:', foldUp)
newSpectrum = np.ma.masked_array(newSpectrum.data, updatedSpectrumMask)
# now the same for the other folding direction
meanVelocity = np.ma.average(np.ma.sum(
newSpectrum*self.specVel, axis=-1)/np.ma.sum(newSpectrum, axis=-1), axis=-1)
velDiffs = np.diff(meanVelocity)
# find very big differences
velDiffsBig = np.where(
velDiffs > self.co["dealiaseSpectrum_makeCoherenceTest_velocityThreshold"])[0]
velDiffsSmall = np.where(
velDiffs < -self.co["dealiaseSpectrum_makeCoherenceTest_velocityThreshold"])[0]
foldDn = list()
# check whether there is an opposite one close by and collect time steps to be refolded
for ll in velDiffsSmall:
if ll+1 in velDiffsBig:
foldDn.append(ll+1)
continue
if ll+2 in velDiffsBig:
foldDn.append(ll+1)
foldDn.append(ll+2)
continue
if ll+3 in velDiffsBig:
foldDn.append(ll+1)
foldDn.append(ll+2)
foldDn.append(ll+2)
updatedSpectrumMask = deepcopy(newSpectrum.mask)
# change all peaks accordingly
for tt in foldDn:
# roll the mask!
updatedSpectrumMask[tt] = np.roll(updatedSpectrumMask[tt].ravel(
), -2*self.co["widthSpectrum"]).reshape((self.co["noH"], 3*self.co["widthSpectrum"]))
# avoid that something is folded into the lowest range gate
updatedSpectrumMask[tt, -1, -2*self.co["widthSpectrum"]:] = True
self.qual["DAdirectionCorrectedByCoherenceTest"][tt, :] = True
if self.co["debug"] > 4:
print('coherenceTest corrected dealiasing Donwards:', foldDn)
newSpectrum = np.ma.masked_array(newSpectrum.data, updatedSpectrumMask)
# this method is very incompelte, so save still odd looking peaks in the quality mask:
# first, collect all height which should be treated, we don't want to find jumps of the interpolated area!:
includedHeights = list(set(range(self.co["maxH"])).difference(set(
self.co["completelyMaskedHeights"]+self.co["dealiaseSpectrum_heightsWithInterference"])))
# now get the mean velocity of the profile
meanVelocity = np.ma.average(np.ma.sum(
newSpectrum[:, includedHeights]*self.specVel, axis=-1)/np.ma.sum(newSpectrum[:, includedHeights], axis=-1), axis=-1)
velDiffs = np.abs(np.diff(meanVelocity))
# find all steps exceeding a min velocity threshold
crazyVelDiffs = np.where(
velDiffs > self.co["dealiaseSpectrum_makeCoherenceTest_velocityThreshold"])[0]
self.qual["DAbigVelocityJumpDespiteCoherenceTest"] = np.zeros(
self._shape2D, dtype=bool)
# surrounding data has to be masked as well, take +- self.co["dealiaseSpectrum_makeCoherenceTest_maskRadius"] (default 20min) around suspicous data
for crazyVelDiff in crazyVelDiffs:
self.qual["DAbigVelocityJumpDespiteCoherenceTest"][crazyVelDiff-self.co["dealiaseSpectrum_makeCoherenceTest_maskRadius"] :crazyVelDiff+self.co["dealiaseSpectrum_makeCoherenceTest_maskRadius"]+1, :] = True
return newSpectrum
def _calcEtaZeW(self, rawSpectra, heights, velocities, noise, noise_std):
'''
calculate the spectral moements and other spectral variables
'''
deltaH = oneD2twoD(
heights[..., 15]-heights[..., 14], heights.shape[-1], 1)
# transponieren um multiplizieren zu ermoeglichen!
eta = (rawSpectra.data.T * np.array(
(self.co["mrrCalibConst"] * (heights**2 / deltaH)) / (1e20), dtype=float).T).T
eta = np.ma.masked_array(eta, rawSpectra.mask)
etaNoiseAve = noise * \
(self.co["mrrCalibConst"] * (heights**2 / deltaH)) / 1e20
etaNoiseStd = noise_std * \
(self.co["mrrCalibConst"] * (heights**2 / deltaH)) / 1e20
# calculate Ze
Ze = 1e18*(self.co["lamb"]**4*np.ma.sum(eta,
axis=-1)/(np.pi**5*self.co["K2"]))
Ze = (10*np.ma.log10(Ze)).filled(-9999)
#Znoise = 1e18*(self.co["lamb"]**4*(etaNoise*self.co["widthSpectrum"])/(np.pi**5*self.co["K2"]))
#Znoise = 10*np.ma.log10(Znoise).filled(-9999)
# no slicing neccesary due to mask! definign average value "my"
my = np.ma.sum(velocities*rawSpectra, axis=-1) / \
np.ma.sum(rawSpectra, axis=-1)
# normed weights
P = (rawSpectra.T/np.ma.sum(rawSpectra, axis=-1).T).T
x = velocities
# http://mathworld.wolfram.com/CentralMoment.html
# T is neccessary due to different dimensions
mom2 = np.ma.sum(P*(x.T-my.T).T**2, axis=-1)
mom3 = np.ma.sum(P*(x.T-my.T).T**3, axis=-1)
mom4 = np.ma.sum(P*(x.T-my.T).T**4, axis=-1)
# average fall velocity is my
W = my.filled(-9999)
# spec width is weighted std
specWidth = np.sqrt(mom2).filled(-9999)
# http://mathworld.wolfram.com S^-1kewness.html
skewness = (mom3/mom2**(3./2.)).filled(-9999)
# http://mathworld.wolfram.com/Kurtosis.html
kurtosis = (mom4/mom2**(2.)).filled(-9999)
# get velocity at borders and max of peak
peakVelLeftBorder = self.specVel[np.argmin(rawSpectra.mask, axis=-1)]
peakVelRightBorder = self.specVel[len(
self.specVel) - np.argmin(rawSpectra.mask[..., ::-1], axis=-1) - 1]
peakVelMax = self.specVel[np.argmax(rawSpectra.filled(-9999), axis=-1)]
# get the according indices
peakArgLeftBorder = np.argmin(rawSpectra.mask, axis=-1)
peakArgRightBorder = len(
self.specVel) - np.argmin(rawSpectra.mask[..., ::-1], axis=-1) - 1
# to find the entries we have to flatten everything
etaSpectraFlat = eta.reshape((eta.shape[0]*eta.shape[1], eta.shape[2]))
# no get the according values
peakEtaLeftBorder = 10*np.log10(etaSpectraFlat[list(range(
etaSpectraFlat.shape[0])), peakArgLeftBorder.ravel()].reshape(self._shape2D))
peakEtaRightBorder = 10*np.log10(etaSpectraFlat[list(range(
etaSpectraFlat.shape[0])), peakArgRightBorder.ravel()].reshape(self._shape2D))
peakEtaMax = 10*np.log10(np.max(eta.filled(-9999), axis=-1))
leftSlope = (peakEtaMax - peakEtaLeftBorder) / \
(peakVelMax - peakVelLeftBorder)
rightSlope = (peakEtaMax - peakEtaRightBorder) / \
(peakVelMax - peakVelRightBorder)
peakVelLeftBorder[Ze == -9999] = -9999
peakVelRightBorder[Ze == -9999] = -9999
leftSlope[Ze == -9999] = -9999
rightSlope[Ze == -9999] = -9999
leftSlope[np.isnan(leftSlope)] = -9999
rightSlope[np.isnan(rightSlope)] = -9999
return eta, Ze, W, etaNoiseAve, etaNoiseStd, specWidth, skewness, kurtosis, peakVelLeftBorder, peakVelRightBorder, leftSlope, rightSlope
def getQualityBinArray(self, qual):
'''
convert the bool quality masks to one binary array
'''
binQual = np.zeros(self._shape2D, dtype=int)
qualFac = dict()
description = ''
description += 'A) usually, the following erros can be ignored (no. is position of bit): '
qualFac["interpolatedSpectrum"] = 0b1
description += '1) spectrum interpolated around 0 and 12 m s^-1 '
qualFac["filledInterpolatedPeakGaps"] = 0b10
description += '2) peak streches over interpolated part '
qualFac["spectrumIsDealiased"] = 0b100
description += '3) peak is dealiased '
qualFac["usedSecondPeakAlgorithmDueToWidePeak"] = 0b1000
description += '4) first Algorithm to determine peak failed, used backup '
qualFac["DAdirectionCorrectedByCoherenceTest"] = 0b10000
description += '5) dealiasing went wrong, but is corrected '
description += 'B) reasons why a spectrum does NOT contain a peak: '
qualFac["incompleteSpectrum"] = 0b10000000
description += '8) spectrum was incompletely recorded '
qualFac["spectrumVarianceTooLowForPeak"] = 0b100000000
description += '9) the variance test indicated no peak '
qualFac["spectrumNotProcessed"] = 0b1000000000
description += '10) spectrum is not processed due to according setting '
qualFac["peakTooThinn"] = 0b10000000000
description += '11) peak removed since not wide enough '
qualFac["peakRemovedByCoherenceTest"] = 0b100000000000
description += '12) peak removed, because too few neighbours show signal, too '
description += "C) thinks went seriously wrong, don't use data with these codes"
qualFac["peakMightBeIncomplete"] = 0b1000000000000000
description += '16) peak is at the very border to bad data '
qualFac["DAbigVelocityJumpDespiteCoherenceTest"] = 0b10000000000000000
description += '17) in this area there are still strong velocity jumps, indicates failed dealiasing '
qualFac["severeProblemsDuringDA"] = 0b100000000000000000
description += '18) during dealiasing, a warning was triggered, applied to whole columm '
for key in list(qual.keys()):
binQual[:] = binQual[:] + (qual[key] * qualFac[key])
return binQual, description
def writeNetCDF(self, fname, varsToSave="all", ncForm="NETCDF3_CLASSIC"):
'''
write the results to a netcdf file
Input:
fname: str filename with path
varsToSave list of variables of the profile to be saved. "all" saves all implmented ones
ncForm: str netcdf file format, possible values are NETCDF3_CLASSIC, NETCDF3_64BIT, NETCDF4_CLASSIC, and NETCDF4 for the python-netcdf4 package, NETCDF3 takes the "old" Scientific.IO.NetCDF module, which is a bit more convinient to install or as fall back option python-netcdf3
'''
nc, pyNc = _get_netCDF_module(ncForm=ncForm)
# option dealiaseSpectrum_saveAlsoNonDealiased makes only sence, if spectrum is really dealiased:
saveAlsoNonDealiased = self.co["dealiaseSpectrum_saveAlsoNonDealiased"] and self.co["dealiaseSpectrum"]
if pyNc:
cdfFile = nc.Dataset(fname, "w", format=ncForm)
else:
cdfFile = nc.NetCDFFile(fname, "w")
# write meta data
cdfFile.title = 'Micro rain radar data processed with IMProToo'
cdfFile.comment = 'IMProToo has been developed for improved snow measurements. Note that this data has been processed regardless of precipitation type.'
cdfFile.institution = self.co["ncInstitution"]
cdfFile.contact_person = self.co["ncCreator"]
cdfFile.source = 'MRR-2'
cdfFile.location = self.co["ncLocation"]
cdfFile.history = 'Created with IMProToo v' + __version__
cdfFile.author = 'Max Maahn'
cdfFile.processing_date = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC")
cdfFile.reference = 'Maahn, M. and Kollias, P., 2012: Improved Micro Rain Radar snow measurements using Doppler spectra post-processing, Atmos. Meas. Tech., 5, 2661-2673, doi:10.5194/amt-5-2661-2012. '
cdfFile.properties = str(self.co)
cdfFile.mrrHeader = str(self.header)
# make frequsnions
cdfFile.createDimension('time', int(self.no_t))
cdfFile.createDimension('range', int(self.no_h))
cdfFile.createDimension('velocity', int(self.no_v))
if saveAlsoNonDealiased:
cdfFile.createDimension('velocity_noDA', int(self.no_v_noDA))
ncShape2D = ("time", "range",)
ncShape3D = ("time", "range", "velocity",)
ncShape3D_noDA = ("time", "range", "velocity_noDA",)
fillVDict = dict()
# little cheat to avoid hundreds of if, else...
if pyNc:
fillVDict["fill_value"] = self.missingNumber
nc_time = cdfFile.createVariable('time', 'i', ('time',), **fillVDict)
nc_time.description = "measurement time. Following Meteks convention, the dataset at e.g. 11:55 contains all recorded raw between 11:54:00 and 11:54:59 (if delta t = 60s)!"
nc_time.timezone = self.timezone
nc_time.units = 'seconds since 1970-01-01 00:00:00'
nc_time[:] = np.array(self.time.filled(self.missingNumber), dtype="i4")
# commented because of Ubuntu bug: https://bugs.launchpad.net/ubuntu/+source/python-scientific/+bug/1005571
#if not pyNc: nc_time._FillValue =int(self.missingNumber)
nc_range = cdfFile.createVariable(
'range', 'i', ('range',), **fillVDict) # = missingNumber)
nc_range.description = "range bins"
nc_range.units = '#'
nc_range[:] = np.arange(self.co["minH"], self.co["maxH"]+1, dtype="i4")
#if not pyNc: nc_range._FillValue =int(self.missingNumber)
nc_velocity = cdfFile.createVariable(
'velocity', 'f', ('velocity',), **fillVDict)
nc_velocity.description = "Doppler velocity bins. If dealiasing is applied, the spectra are triplicated"
nc_velocity.units = 'm s^-1'
nc_velocity[:] = np.array(self.specVel, dtype="f4")
#if not pyNc: nc_velocity._FillValue =float(self.missingNumber)
if saveAlsoNonDealiased:
nc_velocity_noDA = cdfFile.createVariable(
'velocity_noDA', 'f', ('velocity_noDA',), **fillVDict)
nc_velocity_noDA.description = "Original, non dealiased, Doppler velocity bins."
nc_velocity_noDA.units = 'm s^-1'
nc_velocity_noDA[:] = np.array(self.specVel_noDA, dtype="f4")
#if not pyNc: nc_velocity_noDA._FillValue =float(self.missingNumber)
nc_height = cdfFile.createVariable(
'height', 'f', ncShape2D, **fillVDict) # = missingNumber)
nc_height.description = "height above instrument"
nc_height.units = 'm'
nc_height[:] = np.array(self.H.filled(self.missingNumber), dtype="f4")
#if not pyNc: nc_height._FillValue =float(self.missingNumber)
if (varsToSave == 'all' and saveAlsoNonDealiased) or "eta_noDA" in varsToSave:
nc_eta_noDA = cdfFile.createVariable(
'eta_noDA', 'f', ncShape3D_noDA, **fillVDict)
nc_eta_noDA.description = "spectral reflectivities NOT dealiased"
nc_eta_noDA.units = "mm^6 m^-3"
nc_eta_noDA[:] = np.array(self.eta_noDA.data, dtype="f4")
#if not pyNc: nc_eta_noDA._FillValue =float(self.missingNumber)
nc_etaMask_noDA = cdfFile.createVariable(
'etaMask_noDA', 'i', ncShape3D_noDA, **fillVDict)
nc_etaMask_noDA.description = "noise mask of eta NOT dealiased, 0: signal, 1:noise"
nc_etaMask_noDA.units = "bool"
nc_etaMask_noDA[:] = np.array(
np.array(self.eta_noDA.mask, dtype=int), dtype="i4")
#if not pyNc: nc_etaMask_noDA._FillValue =int(self.missingNumber)
if varsToSave == 'all' or "eta" in varsToSave:
nc_eta = cdfFile.createVariable('eta', 'f', ncShape3D, **fillVDict)
nc_eta.description = "spectral reflectivities. if dealiasing is applied, the spectra are triplicated, thus up to three peaks can occur from -12 to +24 m s^-1. However, only one peak is not masked in etaMask"
nc_eta.units = "mm^6 m^-3"
nc_eta[:] = np.array(self.eta.data, dtype="f4")
#if not pyNc: nc_eta._FillValue =float(self.missingNumber)
nc_etaMask = cdfFile.createVariable(
'etaMask', 'i', ncShape3D, **fillVDict)
nc_etaMask.description = "noise mask of eta, 0: signal, 1:noise"
nc_etaMask.units = "bool"
nc_etaMask[:] = np.array(
np.array(self.eta.mask, dtype=int), dtype="i4")
#if not pyNc: nc_etaMask._FillValue =int(self.missingNumber)
if varsToSave == 'all' or "quality" in varsToSave:
qualArray, qualDescription = self.getQualityBinArray(self.qual)
nc_qual = cdfFile.createVariable(
'quality', 'i', ncShape2D, **fillVDict)
nc_qual.description = qualDescription
nc_qual.units = "bin"
nc_qual[:] = np.array(qualArray, dtype="i4")
#if not pyNc: nc_qual._FillValue =int(self.missingNumber)
if varsToSave == 'all' or "TF" in varsToSave:
nc_TF = cdfFile.createVariable('TF', 'f', ncShape2D, **fillVDict)
nc_TF.description = "Transfer Function (see Metek's documentation)"
nc_TF.units = "-"
nc_TF[:] = np.array(self.TF.filled(self.missingNumber), dtype="f4")
#if not pyNc: nc_TF._FillValue =float(self.missingNumber)
if (varsToSave == 'all' and saveAlsoNonDealiased) or "Ze_noDA" in varsToSave:
nc_ze_noDA = cdfFile.createVariable(
'Ze_noDA', 'f', ncShape2D, **fillVDict)
nc_ze_noDA.description = "reflectivity of the most significant peak, not dealiased"
nc_ze_noDA.units = "dBz"
nc_ze_noDA[:] = np.array(self.Ze_noDA, dtype="f4")
#if not pyNc: nc_ze_noDA._FillValue =float(self.missingNumber)
if varsToSave == 'all' or "Ze" in varsToSave:
nc_ze = cdfFile.createVariable('Ze', 'f', ncShape2D, **fillVDict)
nc_ze.description = "reflectivity of the most significant peak"
nc_ze.units = "dBz"
nc_ze[:] = np.array(self.Ze, dtype="f4")
#if not pyNc: nc_ze._FillValue =float(self.missingNumber)
if (varsToSave == 'all' and saveAlsoNonDealiased) or "specWidth_noDA" in varsToSave:
nc_specWidth_noDA = cdfFile.createVariable(
'spectralWidth_noDA', 'f', ncShape2D, **fillVDict)
nc_specWidth_noDA.description = "spectral width of the most significant peak, not dealiased"
nc_specWidth_noDA.units = "m s^-1"
nc_specWidth_noDA[:] = np.array(self.specWidth_noDA, dtype="f4")
#if not pyNc: nc_specWidth_noDA._FillValue =float(self.missingNumber)
if varsToSave == 'all' or "specWidth" in varsToSave:
nc_specWidth = cdfFile.createVariable(
'spectralWidth', 'f', ncShape2D, **fillVDict)
nc_specWidth.description = "spectral width of the most significant peak"
nc_specWidth.units = "m s^-1"
nc_specWidth[:] = np.array(self.specWidth, dtype="f4")
#if not pyNc: nc_specWidth._FillValue =float(self.missingNumber)
if (varsToSave == 'all' and saveAlsoNonDealiased) or "skewness_noDA" in varsToSave:
nc_skewness_noDA = cdfFile.createVariable(
'skewness_noDA', 'f', ncShape2D, **fillVDict)
nc_skewness_noDA.description = "Skewness of the most significant peak, not dealiased"
nc_skewness_noDA.units = "-"
nc_skewness_noDA[:] = np.array(self.skewness_noDA, dtype="f4")
#if not pyNc: nc_skewness_noDA._FillValue =float(self.missingNumber)
if varsToSave == 'all' or "skewness" in varsToSave:
nc_skewness = cdfFile.createVariable(
'skewness', 'f', ncShape2D, **fillVDict)
nc_skewness.description = "Skewness of the most significant peak"
nc_skewness.units = "-"
nc_skewness[:] = np.array(self.skewness, dtype="f4")
#if not pyNc: nc_skewness._FillValue =float(self.missingNumber)
if (varsToSave == 'all' and saveAlsoNonDealiased) or "kurtosis_noDA" in varsToSave:
nc_kurtosis_noDA = cdfFile.createVariable(
'kurtosis_noDA', 'f', ncShape2D, **fillVDict)
nc_kurtosis_noDA.description = "kurtosis of the most significant peak, not dealiased"
nc_kurtosis_noDA.units = "-"
nc_kurtosis_noDA[:] = np.array(self.kurtosis_noDA, dtype="f4")
#if not pyNc: nc_kurtosis_noDA._FillValue =float(self.missingNumber)
if varsToSave == 'all' or "kurtosis" in varsToSave:
nc_kurtosis = cdfFile.createVariable(
'kurtosis', 'f', ncShape2D, **fillVDict)
nc_kurtosis.description = "kurtosis of the most significant peak"
nc_kurtosis.units = "-"
nc_kurtosis[:] = np.array(self.kurtosis, dtype="f4")
#if not pyNc: nc_kurtosis._FillValue =float(self.missingNumber)
if (varsToSave == 'all' and saveAlsoNonDealiased) or "peakVelLeftBorder_noDA" in varsToSave:
nc_peakVelLeftBorder_noDA = cdfFile.createVariable(
'peakVelLeftBorder_noDA', 'f', ncShape2D, **fillVDict)
nc_peakVelLeftBorder_noDA.description = "Doppler velocity of the left border of the peak, not dealiased"
nc_peakVelLeftBorder_noDA.units = "m s^-1"
nc_peakVelLeftBorder_noDA[:] = np.array(
self.peakVelLeftBorder_noDA, dtype="f4")
#if not pyNc: nc_peakVelLeftBorder_noDA._FillValue =float(self.missingNumber)
if varsToSave == 'all' or "peakVelLeftBorder" in varsToSave:
nc_peakVelLeftBorder = cdfFile.createVariable(
'peakVelLeftBorder', 'f', ncShape2D, **fillVDict)
nc_peakVelLeftBorder.description = "Doppler velocity of the left border of the peak"
nc_peakVelLeftBorder.units = "m s^-1"
nc_peakVelLeftBorder[:] = np.array(
self.peakVelLeftBorder, dtype="f4")
#if not pyNc: nc_peakVelLeftBorder._FillValue =float(self.missingNumber)
if (varsToSave == 'all' and saveAlsoNonDealiased) or "peakVelRightBorder_noDA" in varsToSave:
nc_peakVelRightBorder_noDA = cdfFile.createVariable(
'peakVelRightBorder_noDA', 'f', ncShape2D, **fillVDict)
nc_peakVelRightBorder_noDA.description = "Doppler velocity of the right border of the peak, not dealiased"
nc_peakVelRightBorder_noDA.units = "m s^-1"
nc_peakVelRightBorder_noDA[:] = np.array(
self.peakVelRightBorder_noDA, dtype="f4")
#if not pyNc: nc_peakVelRightBorder_noDA._FillValue =float(self.missingNumber)
if varsToSave == 'all' or "peakVelRightBorder" in varsToSave:
nc_peakVelRightBorder = cdfFile.createVariable(
'peakVelRightBorder', 'f', ncShape2D, **fillVDict)
nc_peakVelRightBorder.description = "Doppler velocity of the right border of the peak"
nc_peakVelRightBorder.units = "m s^-1"
nc_peakVelRightBorder[:] = np.array(
self.peakVelRightBorder, dtype="f4")
#if not pyNc: nc_peakVelRightBorder._FillValue =float(self.missingNumber)
if (varsToSave == 'all' and saveAlsoNonDealiased) or "leftSlope_noDA" in varsToSave:
nc_leftSlope_noDA = cdfFile.createVariable(
'leftSlope_noDA', 'f', ncShape2D, **fillVDict)
nc_leftSlope_noDA.description = "Slope at the left side of the peak, not dealiased"
nc_leftSlope_noDA.units = "dB/(m s^-1)"
nc_leftSlope_noDA[:] = np.array(self.leftSlope_noDA, dtype="f4")
#if not pyNc: nc_leftSlope_noDA._FillValue =float(self.missingNumber)
if varsToSave == 'all' or "leftSlope" in varsToSave:
nc_leftSlope = cdfFile.createVariable(
'leftSlope', 'f', ncShape2D, **fillVDict)
nc_leftSlope.description = "Slope at the left side of the peak"
nc_leftSlope.units = "dB/(m s^-1)"
nc_leftSlope[:] = np.array(self.leftSlope, dtype="f4")
#if not pyNc: nc_leftSlope._FillValue =float(self.missingNumber)
if (varsToSave == 'all' and saveAlsoNonDealiased) or "rightSlope_noDA" in varsToSave:
nc_rightSlope_noDA = cdfFile.createVariable(
'rightSlope_noDA', 'f', ncShape2D, **fillVDict)
nc_rightSlope_noDA.description = "Slope at the right side of the peak, not dealiased"
nc_rightSlope_noDA.units = "dB/(m s^-1)"
nc_rightSlope_noDA[:] = np.array(self.rightSlope_noDA, dtype="f4")
#if not pyNc: nc_rightSlope_noDA._FillValue =float(self.missingNumber)
if varsToSave == 'all' or "rightSlope" in varsToSave:
nc_rightSlope = cdfFile.createVariable(
'rightSlope', 'f', ncShape2D, **fillVDict)
nc_rightSlope.description = "Slope at the right side of the peak"
nc_rightSlope.units = "dB/(m s^-1)"
nc_rightSlope[:] = np.array(self.rightSlope, dtype="f4")
#if not pyNc: nc_rightSlope._FillValue =float(self.missingNumber)
if (varsToSave == 'all' and saveAlsoNonDealiased) or "W_noDA" in varsToSave:
nc_w_noDA = cdfFile.createVariable(
'W_noDA', 'f', ncShape2D, **fillVDict)
nc_w_noDA.description = "Mean Doppler Velocity of the most significant peak, not dealiased"
nc_w_noDA.units = "m s^-1"
nc_w_noDA[:] = np.array(self.W_noDA, dtype="f4")
#if not pyNc: nc_w_noDA._FillValue =float(self.missingNumber)
if varsToSave == 'all' or "W" in varsToSave:
nc_w = cdfFile.createVariable('W', 'f', ncShape2D, **fillVDict)
nc_w.description = "Mean Doppler Velocity of the most significant peak"
nc_w.units = "m s^-1"
nc_w[:] = np.array(self.W, dtype="f4")
#if not pyNc: nc_w._FillValue =float(self.missingNumber)
if varsToSave == 'all' or "etaNoiseAve" in varsToSave:
nc_noiseAve = cdfFile.createVariable(
'etaNoiseAve', 'f', ncShape2D, **fillVDict)
nc_noiseAve.description = "mean noise of one Doppler Spectrum in the same units as eta, never dealiased"
nc_noiseAve.units = "mm^6 m^-3"
nc_noiseAve[:] = np.array(self.etaNoiseAve, dtype="f4")
#if not pyNc: nc_noiseAve._FillValue =float(self.missingNumber)
if varsToSave == 'all' or "etaNoiseStd" in varsToSave:
nc_noiseStd = cdfFile.createVariable(
'etaNoiseStd', 'f', ncShape2D, **fillVDict)
nc_noiseStd.description = "std of noise of one Doppler Spectrum in the same units as eta, never dealiased"
nc_noiseStd.units = "mm^6 m^-3"
nc_noiseStd[:] = np.array(self.etaNoiseStd, dtype="f4")
#if not pyNc: nc_noiseStd._FillValue =float(self.missingNumber)
if varsToSave == 'all' or "SNR" in varsToSave:
nc_SNR = cdfFile.createVariable('SNR', 'f', ncShape2D, **fillVDict)
nc_SNR.description = "signal to noise ratio of the most significant peak, never dealiased!"
nc_SNR.units = "dB"
nc_SNR[:] = np.array(self.SNR, dtype="f4")
#if not pyNc: nc_SNR._FillValue =float(self.missingNumber)
cdfFile.close()
return
class mrrProcessedData:
'''
Class to read MRR average or instantaneous data
includes function to save data to netcdf
'''
missingNumber = -9999
def __init__(self, fname, debugLimit=0, maskData=True, verbosity=2, ncForm="NETCDF3_CLASSIC"):
"""
reads MRR Average or Instantaneous data. The data is not converted, no magic! The input files can be .gz compressed. Invalid or missing data is marked as nan
@parameter fname (str or list): list of files or Filename, wildcards allowed, or
a single netCDF filename if reading from a file previously
created by mrrProcessedData.writeNetCDF()
@parameter debugLimit (int): stop after debugLimit timestamps
@parameter maskData (bool): mask nan's in arrays
@parameter verbosity (int): 0: silent exept warnings/errors, 2:verbose
@parameter ncForm (string): set netCDF format
No return, but provides MRR dataset variables
"""
# If this is a single filename input, and it is a netCDF
# (extension is nc or cdf), then read it directly and return.
if type(fname) is str:
if os.path.splitext(fname)[1] in ('.nc', '.cdf'):
nc, pyNc = _get_netCDF_module(ncForm=ncForm)
if pyNc:
cdfFile = nc.Dataset(fname, "r", format=ncForm)
else:
cdfFile = nc.NetCDFFile(fname, "r")
self.header = cdfFile.getncattr('mrrHeader')
self.mrrTimestamps = cdfFile.variables['time'][:]
self.mrrH = cdfFile.variables['MRR_H'][:]
self.mrrTF = cdfFile.variables['MRR_TF'][:]
self.mrrF = cdfFile.variables['MRR_F'][:]
self.mrrD = cdfFile.variables['MRR_D'][:]
self.mrrN = cdfFile.variables['MRR_N'][:]
self.mrrK = cdfFile.variables['MRR_K'][:]
self.mrrCapitalZ = cdfFile.variables['MRR_Capital_Z'][:]
self.mrrSmallz = cdfFile.variables['MRR_Small_z'][:]
self.mrrPIA = cdfFile.variables['MRR_PIA'][:]
self.mrrRR = cdfFile.variables['MRR_RR'][:]
self.mrrLWC = cdfFile.variables['MRR_LWC'][:]
self.mrrW = cdfFile.variables['MRR_W'][:]
cdfFile.close()
self.shape2D = np.shape(self.mrrH)
self.shape3D = np.shape(self.mrrF)
return
# some helper functions!
def splitMrrAveData(string, debugTime, floatInt):
'''
splits one line of mrr data into list
@parameter string (str) string of MRR data
@parameter debugTime (int) time for debug output
@parameter floatInt (type) convert float or integer
@retrun array with mrr data
'''
listOfData = list()
listOfData_append = listOfData.append
i_start = 3
i_offset = 7
try:
for k in np.arange(i_start, i_offset*31, i_offset):
listOfData_append(mrrDataEsc(
string[k:k+i_offset], floatInt))
except:
# try to fix MRR bug
print("repairing data at " + str(unix2date(debugTime)))
string = string.replace("10000.0", "10000.")
string = string.replace("1000.00", "1000.0")
string = string.replace("100.000", "100.00")
string = string.replace("10.0000", "10.000")
string = string.replace("1.00000", "1.0000")
listOfData = list()
listOfData_append = listOfData.append
for k in np.arange(i_start, i_offset*31, i_offset):
try:
listOfData_append(mrrDataEsc(
string[k:k+i_offset], floatInt))
except:
print("######### Warning, Corrupt data at " + str(unix2date(debugTime)
) + ", position "+str(k)+": " + string+" #########")
listOfData_append(np.nan)
return np.array(listOfData)
def mrrDataEsc(string, floatInt):
"""
set invalid data to nan!
@parameter string (str): string from mrr data
@parameter floatInt (function): int or float function
@return - float or int number
"""
if (string == " "*7) or (len(string) != 7):
return np.nan
else:
return floatInt(string)
if type(fname) == list:
files = fname
else:
files = glob.glob(fname)
files.sort()
foundAtLeastOneFile = False
# go through all files
for f, file in enumerate(files):
if verbosity > 1:
print("%s of %s:" % (f+1, len(files)), file)
# open file, gzip or ascii
try:
if file[-3:] == ".gz":
try:
allData = gzip.open(file, 'rt')
except:
print("could not open:", file)
raise IOError("could not open:" + file)
else:
try:
# without errors='ignore', post-processing script crashes
# when loading MRR raw file with some missing/corrupt data
# using codecs.open(... encoding='UTF-8' ...) as this seems to be
# the only method that works in python 2 and 3.
allData = codecs.open(file, 'r', encoding='UTF-8', errors='ignore')
except:
print("could not open:", file)
raise IOError("could not open:" + file)
if len(allData.read(10)) == 0:
print(file, "empty!")
allData.close()
raise IOError("File empty")
else:
allData.seek(0)
i = 0
except IOError:
print("skipping...", file)
continue
foundAtLeastOneFile = True
# go through file and make a dictionary with timestamp as key and all corresponding lines of data as values
dataMRR = {}
prevDate = 0
tmpList = list()
for line in allData:
if line[0:3] == "MRR":
if i != 0:
dataMRR[prevDate] = tmpList
tmpList = []
asciiDate = line[4:20]
# We must have UTC!
if (re.search("UTC", line) == None):
sys.exit("Warning, line must start with UTC!")
date = datetime.datetime(year=2000+int(asciiDate[0:2]), month=int(asciiDate[2:4]), day=int(
asciiDate[4:6]), hour=int(asciiDate[6:8]), minute=int(asciiDate[8:10]), second=int(asciiDate[10:12]))
date = int(date2unix(date))
tmpList.append(line)
prevDate = date
else:
tmpList.append(line)
i += 1
dataMRR[prevDate] = tmpList
allData.close()
try:
del dataMRR[0]
print("Warning: some lines without timestamp")
except:
pass
if debugLimit == 0:
debugLimit = len(list(dataMRR.keys()))
# create arrays for data
aveTimestamps = np.array(np.sort(list(dataMRR.keys()))[
0:debugLimit], dtype=int)
aveH = np.ones((debugLimit, 31), dtype=float)*np.nan
aveTF = np.ones((debugLimit, 31), dtype=float)*np.nan
aveF = np.ones((debugLimit, 31, 64), dtype=float)*np.nan
aveN = np.ones((debugLimit, 31, 64), dtype=float)*np.nan
aveD = np.ones((debugLimit, 31, 64), dtype=float)*np.nan
aveK = np.ones((debugLimit, 31), dtype=float)*np.nan
aveCapitalZ = np.ones((debugLimit, 31), dtype=float)*np.nan
aveSmallz = np.ones((debugLimit, 31), dtype=float)*np.nan
avePIA = np.ones((debugLimit, 31), dtype=float)*np.nan
aveRR = np.ones((debugLimit, 31), dtype=float)*np.nan
aveLWC = np.ones((debugLimit, 31), dtype=float)*np.nan
aveW = np.ones((debugLimit, 31), dtype=float)*np.nan
# go through timestamps and fill up arrays
for t, timestamp in enumerate(aveTimestamps[0:debugLimit]):
# print unix2date(timestamp)
dataSet = dataMRR[timestamp]
for dataLine in dataSet:
if dataLine[0:3] == "MRR":
# just one is stored, thus no array
self.header = dataLine[21:-2]
continue # print timestamp
elif dataLine[0:3] == "H ":
aveH[t, :] = splitMrrAveData(
dataLine, timestamp, float)
continue
elif dataLine[0:3] == "TF ":
aveTF[t, :] = splitMrrAveData(
dataLine, timestamp, float)
continue # print "TF"
elif dataLine[0:1] == "F":
try:
specBin = int(dataLine[1:3])
except:
print("######### Warning, Corrupt data header at " +
str(unix2date(timestamp)) + ", " + dataLine+" #########")
continue
aveF[t, :, specBin] = splitMrrAveData(
dataLine, timestamp, float)
continue
elif dataLine[0:1] == "D":
try:
specBin = int(dataLine[1:3])
except:
print("######### Warning, Corrupt data header at " +
str(unix2date(timestamp)) + ", " + dataLine+" #########")
continue
aveD[t, :, specBin] = splitMrrAveData(
dataLine, timestamp, float)
continue
elif dataLine[0:1] == "N":
try:
specBin = int(dataLine[1:3])
except:
print("######### Warning, Corrupt data header at " +
str(unix2date(timestamp)) + ", " + dataLine+" #########")
continue
aveN[t, :, specBin] = splitMrrAveData(
dataLine, timestamp, float)
continue
elif dataLine[0:3] == "K ":
aveK[t, :] = splitMrrAveData(
dataLine, timestamp, float)
continue
elif dataLine[0:3] == "PIA":
avePIA[t, :] = splitMrrAveData(
dataLine, timestamp, float)
continue
elif dataLine[0:3] == "Z ":
aveCapitalZ[t, :] = splitMrrAveData(
dataLine, timestamp, float)
continue
elif dataLine[0:3] == "z ":
aveSmallz[t, :] = splitMrrAveData(
dataLine, timestamp, float)
continue
elif dataLine[0:3] == "RR ":
aveRR[t, :] = splitMrrAveData(
dataLine, timestamp, float)
continue
elif dataLine[0:3] == "LWC":
aveLWC[t, :] = splitMrrAveData(
dataLine, timestamp, float)
continue
elif dataLine[0:3] == "W ":
aveW[t, :] = splitMrrAveData(
dataLine, timestamp, float)
continue
elif len(dataLine) == 2:
continue
else:
print("? Line not recognized:", str(
unix2date(timestamp)), dataLine, len(dataLine))
# join arrays of different files
try:
self.mrrTimestamps = np.concatenate(
(self.mrrTimestamps, aveTimestamps), axis=0)
self.mrrH = np.concatenate((self.mrrH, aveH), axis=0)
self.mrrTF = np.concatenate((self.mrrTF, aveTF), axis=0)
self.mrrF = np.concatenate((self.mrrF, aveF), axis=0)
self.mrrN = np.concatenate((self.mrrN, aveN), axis=0)
self.mrrD = np.concatenate((self.mrrD, aveD), axis=0)
self.mrrK = np.concatenate((self.mrrK, aveK), axis=0)
self.mrrPIA = np.concatenate((self.mrrPIA, avePIA), axis=0)
self.mrrCapitalZ = np.concatenate(
(self.mrrCapitalZ, aveCapitalZ), axis=0)
self.mrrSmallz = np.concatenate(
(self.mrrSmallz, aveSmallz), axis=0)
self.mrrRR = np.concatenate((self.mrrRR, aveRR), axis=0)
self.mrrLWC = np.concatenate((self.mrrLWC, aveLWC), axis=0)
self.mrrW = np.concatenate((self.mrrW, aveW), axis=0)
except AttributeError:
self.mrrTimestamps = aveTimestamps
self.mrrH = aveH
self.mrrTF = aveTF
self.mrrF = aveF
self.mrrN = aveN
self.mrrD = aveD
self.mrrK = aveK
self.mrrPIA = avePIA
self.mrrCapitalZ = aveCapitalZ
self.mrrSmallz = aveSmallz
self.mrrRR = aveRR
self.mrrLWC = aveLWC
self.mrrW = aveW
if foundAtLeastOneFile == False:
print("NO DATA")
raise UnboundLocalError
try:
self.header
except:
print("did not find any MRR data in file!")
raise IOError("did not find any MRR data in file!")
del aveTimestamps, aveH, aveTF, aveF, aveN, aveD, aveK, avePIA, aveCapitalZ, aveSmallz, aveRR, aveLWC, aveW
if maskData:
self.mrrTimestamps = np.ma.masked_array(
self.mrrTimestamps, np.isnan(self.mrrTimestamps))
self.mrrH = np.ma.masked_array(self.mrrH, np.isnan(self.mrrH))
self.mrrTF = np.ma.masked_array(self.mrrTF, np.isnan(self.mrrTF))
self.mrrF = np.ma.masked_array(self.mrrF, np.isnan(self.mrrF))
self.mrrN = np.ma.masked_array(self.mrrN, np.isnan(self.mrrN))
self.mrrD = np.ma.masked_array(self.mrrD, np.isnan(self.mrrD))
self.mrrK = np.ma.masked_array(self.mrrK, np.isnan(self.mrrK))
self.mrrPIA = np.ma.masked_array(
self.mrrPIA, np.isnan(self.mrrPIA))
self.mrrCapitalZ = np.ma.masked_array(
self.mrrCapitalZ, np.isnan(self.mrrCapitalZ))
self.mrrSmallz = np.ma.masked_array(
self.mrrSmallz, np.isnan(self.mrrSmallz))
self.mrrRR = np.ma.masked_array(self.mrrRR, np.isnan(self.mrrRR))
self.mrrLWC = np.ma.masked_array(
self.mrrLWC, np.isnan(self.mrrLWC))
self.mrrW = np.ma.masked_array(self.mrrW, np.isnan(self.mrrW))
self.shape2D = np.shape(self.mrrH)
self.shape3D = np.shape(self.mrrF)
if verbosity > 0:
print("done reading")
# end def __init__
def writeNetCDF(self, fileOut, author="IMProToo", location="", institution="", ncForm="NETCDF3_CLASSIC"):
'''
writes MRR Average or Instantaneous Data into Netcdf file
@parameter fileOut (str): netCDF file name
@parameter author (str): Author for netCDF meta data (default:IMProToo)
@parameter location (str): Location of instrument for NetCDF Metadata (default: "")
@parameter institution (str): Institution to whom the instrument belongs (default: "")
@parameter ncForm (str): netCDF Format, possible values are NETCDF3_CLASSIC, NETCDF3_64BIT, NETCDF4_CLASSIC, and NETCDF4 for the python-netcdf4 package, NETCDF3 takes the "old" Scientific.IO.NetCDF module, which is a bit more convinient to install or as fall back option python-netcdf3
'''
nc, pyNc = _get_netCDF_module(ncForm=ncForm)
if pyNc:
cdfFile = nc.Dataset(fileOut, "w", format=ncForm)
else:
cdfFile = nc.NetCDFFile(fileOut, "w")
fillVDict = dict()
# little cheat to avoid hundreds of if, else...
if pyNc:
fillVDict["fill_value"] = self.missingNumber
print("writing %s ..." % (fileOut))
# Attributes
cdfFile.history = 'Created with IMProToo v' + __version__
cdfFile.author = 'Max Maahn'
cdfFile.processing_date = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC")
cdfFile.reference = 'Maahn, M. and Kollias, P., 2012: Improved Micro Rain Radar snow measurements using Doppler spectra post-processing, Atmos. Meas. Tech., 5, 2661-2673, doi:10.5194/amt-5-2661-2012. '
cdfFile.title = 'Micro rain radar averaged data (Metek standard output) converted to netcdf'
cdfFile.comment = 'This data is only valid in case of liquid precipitation. Note that this data has been processed regardless of precipitation type and additional external information about precipitation type is needed for correct interpretation of the measurements.'
cdfFile.institution = institution
cdfFile.contact_person = author
cdfFile.source = 'MRR-2'
cdfFile.location = location
cdfFile.mrrHeader = self.header
# Dimensions
cdfFile.createDimension('MRR rangegate', 31)
cdfFile.createDimension('time', None) # allows Multifile read
cdfFile.createDimension('MRR spectralclass', 64)
nc_times = cdfFile.createVariable('time', 'i', ('time',), **fillVDict)
nc_ranges = cdfFile.createVariable(
'MRR rangegate', 'f', ('time', 'MRR rangegate',), **fillVDict)
nc_classes = cdfFile.createVariable(
'MRR spectralclass', 'i', ('MRR spectralclass',))
nc_times.units = 'seconds since 1970-01-01 00:00:00'
nc_ranges.units = 'm'
nc_classes.units = 'none'
# Create Variables
nc_h = cdfFile.createVariable(
'MRR_H', 'f', ('time', 'MRR rangegate',), **fillVDict)
nc_h.units = 'm'
nc_tf = cdfFile.createVariable(
'MRR_TF', 'f', ('time', 'MRR rangegate',), **fillVDict)
nc_tf.units = 'none'
nc_f = cdfFile.createVariable(
'MRR_F', 'f', ('time', 'MRR rangegate', 'MRR spectralclass',), **fillVDict)
nc_f.units = 'dB'
nc_d = cdfFile.createVariable(
'MRR_D', 'f', ('time', 'MRR rangegate', 'MRR spectralclass',), **fillVDict)
nc_d.units = 'mm'
nc_n = cdfFile.createVariable(
'MRR_N', 'f', ('time', 'MRR rangegate', 'MRR spectralclass',), **fillVDict)
nc_n.units = 'm^-3 mm^-1'
nc_k = cdfFile.createVariable(
'MRR_K', 'f', ('time', 'MRR rangegate',), **fillVDict)
nc_k.units = 'dB'
nc_capitalZ = cdfFile.createVariable(
'MRR_Capital_Z', 'f', ('time', 'MRR rangegate',), **fillVDict)
nc_capitalZ.units = 'dBz'
nc_smallz = cdfFile.createVariable(
'MRR_Small_z', 'f', ('time', 'MRR rangegate',), **fillVDict)
nc_smallz.units = 'dBz'
nc_pia = cdfFile.createVariable(
'MRR_PIA', 'f', ('time', 'MRR rangegate',), **fillVDict)
nc_pia.units = 'dB'
nc_rr = cdfFile.createVariable(
'MRR_RR', 'f', ('time', 'MRR rangegate',), **fillVDict)
nc_rr.units = 'mm/h'
nc_lwc = cdfFile.createVariable(
'MRR_LWC', 'f', ('time', 'MRR rangegate',), **fillVDict)
nc_lwc.units = 'g/m^3'
nc_w = cdfFile.createVariable(
'MRR_W', 'f', ('time', 'MRR rangegate',), **fillVDict)
nc_w.units = 'm s^-1'
# fill dimensions
nc_classes[:] = np.arange(0, 64, 1, dtype="i4")
nc_times[:] = np.array(self.mrrTimestamps, dtype="i4")
nc_ranges[:] = np.array(self.mrrH, dtype="f4")
# fill data
nc_h[:] = np.array(self.mrrH, dtype="f4")
nc_tf[:] = np.array(self.mrrTF, dtype="f4")
nc_f[:] = np.array(self.mrrF, dtype="f4")
nc_d[:] = np.array(self.mrrD, dtype="f4")
nc_n[:] = np.array(self.mrrN, dtype="f4")
nc_k[:] = np.array(self.mrrK, dtype="f4")
nc_capitalZ[:] = np.array(self.mrrCapitalZ, dtype="f4")
nc_smallz[:] = np.array(self.mrrSmallz, dtype="f4")
nc_pia[:] = np.array(self.mrrPIA, dtype="f4")
nc_rr[:] = np.array(self.mrrRR, dtype="f4")
nc_lwc[:] = np.array(self.mrrLWC, dtype="f4")
nc_w[:] = np.array(self.mrrW, dtype="f4")
# commented because of Ubuntu bug: https://bugs.launchpad.net/ubuntu/+source/python-scientific/+bug/1005571
# if not pyNc:
##import pdb;pdb.set_trace()
#nc_ranges._FillValue =float(self.missingNumber)
#nc_tf._FillValue =float(self.missingNumber)
#nc_f._FillValue =float(self.missingNumber)
#nc_d._FillValue =float(self.missingNumber)
#nc_n._FillValue =float(self.missingNumber)
#nc_k._FillValue =float(self.missingNumber)
#nc_capitalZ._FillValue =float(self.missingNumber)
#nc_smallz._FillValue =float(self.missingNumber)
#nc_pia._FillValue =float(self.missingNumber)
#nc_rr._FillValue =float(self.missingNumber)
#nc_lwc._FillValue =float(self.missingNumber)
#nc_w._FillValue =float(self.missingNumber)
cdfFile.close()
print("done")
# end def writeNetCDF
# end class MrrData
class mrrRawData:
'''
Class to read MRR raw data
includes function to save data to netcdf
'''
missingNumber = -9999
def __init__(self, fname, debugStart=0, debugLimit=0, maskData=True, ncForm="NETCDF3_CLASSIC"):
"""
reads MRR raw data. The data is not converted, no magic! The input files can be .gz compressed.
A single netCDF file can be input, that was previously created from mrrRawData.writeNetCDF()
Invalid or Missing data is marked as nan and masked
Since MRR raw data can contains all teh data transfered on the serial bus, a lot warnings can be raised. Usually these can be ignored.
@parameter fname (str or list): list of files or Filename, wildcards allowed!
a single netCDF filename if reading from a file previously
created by mrrProcessedData.writeNetCDF()
@parameter debugstart (int): start after debugstart timestamps
@parameter debugLimit (int): stop after debugLimit timestamps
@parameter ncForm (string): set netCDF format
provides:
mrrRawTime (numpy int64): timestamps in seconds since 01-01-1970 (time)
mrrRawHeight (numpy float64): height levels (time*height)
mrrRawTF (numpy float64): Transfer function (time*height)
mrrRawSpectrum (numpy float64): spectral reflectivities of MRR raw data (time*height*velocity)
"""
# only provided in newer Firmware, has to be guessed for older ones
self.defaultSpecPer10Sec = 58
self.timezone = None
# If this is a single filename input, and it is a netCDF
# (extension is nc or cdf), then read it directly and return.
if type(fname) is str:
if os.path.splitext(fname)[1] in ('.nc', '.cdf'):
nc, pyNc = _get_netCDF_module(ncForm=ncForm)
if pyNc:
cdfFile = nc.Dataset(fname, "r", format=ncForm)
else:
cdfFile = nc.NetCDFFile(fname, "r")
self.header = cdfFile.getncattr('mrrHeader')
self.mrrRawCC = cdfFile.getncattr('mrrCalibrationConstant')
self.mrrRawHeight = cdfFile.variables['MRR rangegate'][:]
self.mrrRawTime = cdfFile.variables['MRR time'][:]
self.mrrRawTF = cdfFile.variables['MRR_TF'][:]
self.mrrRawSpectrum = cdfFile.variables['MRR_Spectra'][:]
self.mrrRawNoSpec = cdfFile.variables['MRR_NoSpectra'][:]
try:
self.timezone = str(cdfFile.variables['MRR time'].timezone)
except AttributeError:
# this can occur when loading a file created with an older
# version of IMProToo, before the timezone update.
warnings.warn("timezone attribute missing, assuming UTC")
self.timezone = "UTC"
cdfFile.close()
self.shape2D = np.shape(self.mrrRawHeight)
self.shape3D = np.shape(self.mrrRawSpectrum)
return
# some helper functions
def rawEsc(string, floatInt):
"""
set invalid data to nan!
@parameter string (str): string from mrr data
@parameter floatInt (function): int or float function
@return - float or int number
"""
if (string == " "*9) or (len(string) != 9):
return np.nan
else:
return floatInt(string)
def splitMrrRawData(string, debugTime, floatInt, startI):
'''
splits one line of mrr raw data into list
@parameter string (str) string of MRR data
@parameter debugTime (int) time for debug output
@parameter floatInt (type) convert float or integer
@parameter startI (int) first data index, old file format 6, new 3
@retrun array with mrr data
'''
instData = list()
instData_append = instData.append
for k in np.arange(startI, 9*32, 9):
try:
instData_append(rawEsc(string[k:k+9], floatInt))
except:
print("######### Warning, Corrupt data at " + str(unix2date(debugTime)) +
", " + str(timestamp) + ", position "+str(k)+": " + string+" #########")
instData_append(np.nan)
return np.array(instData)
if type(fname) == list:
files = fname
else:
files = glob.glob(fname)
files.sort()
foundAtLeastOneFile = False
# go thorugh all files
for f, file in enumerate(files):
print("%s of %s:" % (f+1, len(files)), file)
# open file gz or ascii
try:
if file[-3:] == ".gz":
try:
allData = gzip.open(file, 'rt')
except:
print("could not open:", file)
raise IOError("could not open:" + file)
else:
try:
# without errors='ignore', post-processing script crashes
# when loading MRR raw file with some missing/corrupt data
# using codecs.open(... encoding='UTF-8' ...) as this seems to be
# the only method that works in python 2 and 3.
allData = codecs.open(file, 'r', encoding='UTF-8', errors='ignore')
except:
print("could not open:", file)
raise IOError("could not open:" + file)
if len(allData.read(10)) == 0:
print(file, "empty!")
allData.close()
raise IOError("File empty")
else:
allData.seek(0)
i = 0
except IOError:
print("skipping...")
continue
foundAtLeastOneFile = True
# go through file and make a dictionary with timestamp as key and all corresponding lines of data as values
dataMRR = {}
prevDate = 0
tmpList = list()
# preset, is changed in 8 lines if required
fileFormat = "new"
for line in allData:
if line[0:2] == "T:" or line[0:3] == "MRR":
if i != 0:
dataMRR[prevDate] = tmpList
tmpList = []
if line[0:2] == "T:":
asciiDate = line[2:14] # old mrr raw data
fileFormat = "old" # if there
elif line[0:4] == "MRR ":
asciiDate = line[4:16] # new mrr raw spectra
else:
raise IOError("must be either new or old file format!")
# Script wants UTC!
date = datetime.datetime(year=2000+int(asciiDate[0:2]), month=int(asciiDate[2:4]), day=int(
asciiDate[4:6]), hour=int(asciiDate[6:8]), minute=int(asciiDate[8:10]), second=int(asciiDate[10:12]))
date = int(date2unix(date))
tmpList.append(line)
prevDate = date
else:
tmpList.append(line)
i += 1
# end for line
dataMRR[prevDate] = tmpList
allData.close()
try:
del dataMRR[0]
warnings.warn("Warning: some lines without timestamp")
except:
pass
if fileFormat == "new":
startIndex = 3
elif fileFormat == "old":
startIndex = 6
else:
raise IOError("must be either new or old file format!")
if debugLimit == 0:
debugLimit = len(list(dataMRR.keys()))
specLength = debugLimit - debugStart
# create arrays for data
rawSpectra = np.ones((specLength, 32, 64), dtype=int)*np.nan
rawTimestamps = np.array(np.sort(list(dataMRR.keys()))[
debugStart:debugLimit], dtype=int)
rawHeights = np.ones((specLength, 32), dtype=int)*np.nan
rawTFs = np.ones((specLength, 32), dtype=float)*np.nan
rawNoSpec = np.zeros(specLength, dtype=int)
# default value - if the whole file is processed without ever setting mrrRawCC, this
# means the file is not usable for Ze calculations, but there is no workaround there.
self.mrrRawCC = 0
# go through timestamps and fill up arrays
for t, timestamp in enumerate(rawTimestamps):
dataSet = dataMRR[timestamp]
for dataLine in dataSet:
if dataLine[0:2] == "T:" or dataLine[0:3] == "MRR":
# store the first or second header line as an example, but parse every one
# to check for the CC and number of spectra variables. The first header line
# of MRR data might be messed up after starting the MRR, so the second one
# is used if available.
if t in [0, 1]:
self.header = dataLine
headerLineCC, headerLineNumSpectra, timezone = self.parseHeaderLine(
dataLine, fileFormat)
if headerLineCC is not None:
self.mrrRawCC = headerLineCC
if headerLineNumSpectra is not None:
rawNoSpec[t] = headerLineNumSpectra
else:
# if fileFormat is "old", then the default value must always be taken;
# otherwise, use the value from the headerLine, if present, otherwise
# print a warning, since that means the headerLine had a problem.
if fileFormat == "new":
warnings.warn(
'Warning, could not read number of Spectra, taking default instead: '+self.defaultSpecPer10Sec)
rawNoSpec[t] = self.defaultSpecPer10Sec
if self.timezone is None:
self.timezone = timezone
else:
assert self.timezone == timezone
continue # print timestamp
elif dataLine[0:3] == "M:h" or dataLine[0] == "H":
rawHeights[t, :] = splitMrrRawData(
dataLine, timestamp, int, startIndex)
continue # print "H"
elif dataLine[0:4] == "M:TF" or dataLine[0:2] == "TF":
rawTFs[t, :] = splitMrrRawData(
dataLine, timestamp, float, startIndex)
continue # print "TF"
elif dataLine[0:3] == "M:f" or dataLine[0] == "F":
try:
if fileFormat == "old":
specBin = int(dataLine[3:5])
else:
specBin = int(dataLine[1:3])
except:
warnings.warn("######### Warning, Corrupt data header at " + str(
unix2date(timestamp)) + ", " + str(timestamp) + ", " + dataLine+" #########")
continue
rawSpectra[t, :, specBin] = splitMrrRawData(
dataLine, timestamp, int, startIndex)
continue
elif (dataLine[0:2] == "C:") or (dataLine[0:2] == "R:"):
continue
else:
warnings.warn("? Line not recognized:" + dataLine)
# end for t,timestamp
# discard spectra which are only partly valid!
rawSpectra[np.any(np.isnan(rawSpectra), axis=2)] = np.nan
rawSpectra[np.any(np.isnan(rawTFs), axis=1)] = np.nan
rawSpectra[np.any(np.isnan(rawHeights), axis=1)] = np.nan
rawTFs[np.any(np.isnan(rawTFs), axis=1)] = np.nan
rawHeights[np.any(np.isnan(rawHeights), axis=1)] = np.nan
# join arrays of different days
try:
self.mrrRawHeight = np.concatenate(
(self.mrrRawHeight, rawHeights), axis=0)
self.mrrRawTime = np.concatenate(
(self.mrrRawTime, rawTimestamps), axis=0)
self.mrrRawTF = np.concatenate((self.mrrRawTF, rawTFs), axis=0)
self.mrrRawSpectrum = np.concatenate(
(self.mrrRawSpectrum, rawSpectra), axis=0)
self.mrrRawNoSpec = np.concatenate(
(self.mrrRawNoSpec, rawNoSpec), axis=0)
except AttributeError:
self.mrrRawHeight = rawHeights
self.mrrRawTime = rawTimestamps
self.mrrRawTF = rawTFs
self.mrrRawSpectrum = rawSpectra
self.mrrRawNoSpec = rawNoSpec
# end try
# end for f,file
if foundAtLeastOneFile == False:
raise UnboundLocalError("No files found: " + fname)
try:
self.header
except:
print("did not find any MRR data in file!")
raise IOError("did not find any MRR data in file!")
del rawHeights, rawTimestamps, rawTFs, rawSpectra
if maskData:
self.mrrRawHeight = np.ma.masked_array(
self.mrrRawHeight, np.isnan(self.mrrRawHeight))
self.mrrRawTime = np.ma.masked_array(
self.mrrRawTime, np.isnan(self.mrrRawTime))
self.mrrRawTF = np.ma.masked_array(
self.mrrRawTF, np.isnan(self.mrrRawTF))
self.mrrRawSpectrum = np.ma.masked_array(
self.mrrRawSpectrum, np.isnan(self.mrrRawSpectrum))
self.shape2D = np.shape(self.mrrRawHeight)
self.shape3D = np.shape(self.mrrRawSpectrum)
# end def __init__
@staticmethod
def parseHeaderLine(headerLine, fileFormat):
'''
Parses the raw data header line.
Tries to parse according to the fileFormat ("old", or "new")
Prints a warning if unsuccessful.
'''
tokens = headerLine.split()
CC = None
numSpectra = None
try:
idx = tokens.index('CC')
except:
warnings.warn('Warning, could not find Keyword CC in :'+headerLine)
else:
try:
CC = int(tokens[idx+1])
except:
warnings.warn('Warning, could not read CC in: ' + headerLine)
if fileFormat == "new":
if not tokens[2].startswith("UTC"):
raise IOError("ERROR, timestring must start with UTC!")
timezone = tokens[2]
if tokens[-1] != "RAW":
raise IOError("Was expecting MRR RAW DATA, found: "+tokens[-1])
try:
idx = tokens.index('MDQ')
except:
warnings.warn(
'Warning, could not find Keyword MDQ in :'+headerLine)
else:
try:
numSpectra = int(tokens[idx+2])
except:
warnings.warn(
'Warning, could not read number of Spectra: in ' + headerLine)
elif fileFormat == "old":
if tokens[1] != "UTC":
raise IOError("ERROR, time must be UTC!")
timezone = tokens[1]
else:
raise IOError("must be either new or old file format!")
return CC, numSpectra, timezone
def writeNetCDF(self, fileOut, author="IMProToo", description="MRR Raw Data", ncForm='NETCDF3_CLASSIC'):
'''
writes MRR raw Data into Netcdf file
@parameter fileOut (str): netCDF file name
@parameter author (str): Author for netCDF meta data (default:IMProToo)
@parameter description (str): Description for NetCDF Metadata (default: empty)
@parameter netcdfFormat (str): netcdf format, possible values are NETCDF3_CLASSIC, NETCDF3_64BIT, NETCDF4_CLASSIC, and NETCDF4 for the python-netcdf4 package, NETCDF3 takes the "old" Scientific.IO.NetCDF module, which is a bit more convinient to install or as fall back option python-netcdf3
'''
nc, pyNc = _get_netCDF_module(ncForm=ncForm)
if pyNc:
cdfFile = nc.Dataset(fileOut, "w", format=ncForm)
else:
cdfFile = nc.NetCDFFile(fileOut, "w")
print("writing %s ..." % (fileOut))
# Attributes
cdfFile.history = 'Created ' + str(time.ctime(time.time()))
cdfFile.source = 'Created by '+author + ' with IMProToo v' + __version__
cdfFile.mrrHeader = self.header
cdfFile.description = description
cdfFile.mrrCalibrationConstant = self.mrrRawCC
fillVDict = dict()
# little cheat to avoid hundreds of if, else...
if pyNc:
fillVDict["fill_value"] = self.missingNumber
# Dimensions
cdfFile.createDimension('MRR rangegate', 32)
cdfFile.createDimension('time', None) # allows Multifile read
cdfFile.createDimension('MRR spectralclass', 64)
nc_times = cdfFile.createVariable(
'MRR time', 'i', ('time',), **fillVDict)
nc_ranges = cdfFile.createVariable(
'MRR rangegate', 'f', ('time', 'MRR rangegate',))
nc_classes = cdfFile.createVariable(
'MRR spectralclass', 'i', ('MRR spectralclass',), **fillVDict)
nc_times.units = 'seconds since 1970-01-01 00:00:00'
nc_times.timezone = self.timezone
nc_ranges.units = 'm'
nc_classes.units = 'none'
# Create Variables
nc_tf = cdfFile.createVariable(
'MRR_TF', 'f', ('time', 'MRR rangegate',), **fillVDict)
nc_tf.units = 'none'
nc_spectra = cdfFile.createVariable(
'MRR_Spectra', 'f', ('time', 'MRR rangegate', 'MRR spectralclass',), **fillVDict)
nc_spectra.units = 'none'
nc_noSpec = cdfFile.createVariable(
'MRR_NoSpectra', 'i', ('time',), **fillVDict)
nc_noSpec.units = 'none'
# fill dimensions
nc_classes[:] = np.array(np.arange(0, 64, 1), dtype="i4")
nc_times[:] = np.array(self.mrrRawTime, dtype="i4")
nc_ranges[:] = np.array(self.mrrRawHeight, dtype="f4")
# fill data
nc_tf[:] = np.array(self.mrrRawTF, dtype="f4")
nc_spectra[:] = np.array(self.mrrRawSpectrum, dtype="f4")
nc_noSpec[:] = np.array(self.mrrRawNoSpec, dtype="i4")
# commented because of Ubuntu bug: https://bugs.launchpad.net/ubuntu/+source/python-scientific/+bug/1005571
# if not pyNc:
#nc_noSpec._FillValue =int(self.missingNumber)
#nc_spectra._FillValue =float(self.missingNumber)
#nc_tf._FillValue =float(self.missingNumber)
#nc_ranges._FillValue =float(self.missingNumber)
cdfFile.close()
print("done")
# end def write2NetCDF
# end class MrrData
| maahn/IMProToo | IMProToo/core.py | core.py | py | 135,069 | python | en | code | 19 | github-code | 1 | [
{
"api_name": "importlib.metadata.version",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "importlib.metadata.PackageNotFoundError",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "pkg_resources.get_distribution",
"line_number": 35,
"usage_type": "cal... |
2524008345 | import matplotlib.pyplot as plt
import cv2
import os, glob
import numpy as np
import matplotlib._png as png
from moviepy.editor import VideoFileClip
#%matplotlib inline
#%config InlineBackend.figure_format = 'retina'
def show_images(images, cmap=None):
cols = 2
rows = (len(images) + 1) // cols
plt.figure(figsize=(10, 11))
for i, image in enumerate(images):
plt.subplot(rows, cols, i + 1)
# use gray scale color map if there is only one channel
cmap = 'gray' if len(image.shape) == 2 else cmap
plt.imshow(image, cmap=cmap)
plt.xticks([])
plt.yticks([])
plt.tight_layout(pad=0, h_pad=0, w_pad=0)
plt.show()
#test_images = [plt.imread(path) for path in glob.glob('test_images/*.jpg')]
#show_images(test_images)
# image is expected be in RGB color space
def select_rgb_white_yellow(image):
# white color mask
lower = np.uint8([200, 200, 200])
upper = np.uint8([255, 255, 255])
white_mask = cv2.inRange(image, lower, upper)
# yellow color mask
lower = np.uint8([190, 190, 0])
upper = np.uint8([255, 255, 255])
yellow_mask = cv2.inRange(image, lower, upper)
# combine the mask
mask = cv2.bitwise_or(white_mask, yellow_mask)
masked = cv2.bitwise_and(image, image, mask = mask)
return masked
#show_images(list(map(select_rgb_white_yellow, test_images)))
def convert_hsv(image):
return cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
#show_images(list(map(convert_hsv, test_images)))
def convert_hls(image):
return cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
#show_images(list(map(convert_hls, test_images)))
def select_white_yellow(image):
converted = convert_hls(image)
# white color mask
lower = np.uint8([ 0, 200, 0])
upper = np.uint8([255, 255, 255])
white_mask = cv2.inRange(converted, lower, upper)
# yellow color mask
lower = np.uint8([ 10, 0, 100])
upper = np.uint8([ 40, 255, 255])
yellow_mask = cv2.inRange(converted, lower, upper)
# combine the mask
mask = cv2.bitwise_or(white_mask, yellow_mask)
return cv2.bitwise_and(image, image, mask = mask)
#mask detect only white not yellow
#white_yellow_images = list(map(select_white_yellow, test_images))
#show_images(white_yellow_images)
def convert_gray_scale(image):
return cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
def apply_smoothing(image, kernel_size=15):
"""
kernel_size must be postivie and odd
"""
return cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)
def detect_edges(image, low_threshold=50, high_threshold=150):
return cv2.Canny(image, low_threshold, high_threshold)
def filter_region(image, vertices):
"""
Create the mask using the vertices and apply it to the input image
"""
mask = np.zeros_like(image)
if len(mask.shape) == 2:
cv2.fillPoly(mask, vertices, 255)
else:
cv2.fillPoly(mask, vertices, (255,) * mask.shape[2]) # in case, the input image has a channel dimension
return cv2.bitwise_and(image, mask)
def select_region(image):
"""
It keeps the region surrounded by the `vertices` (i.e. polygon). Other area is set to 0 (black).
"""
# first, define the polygon by vertices
rows, cols = image.shape[:2]
bottom_left = [cols * 0.1, rows * 0.95]
top_left = [cols * 0.4, rows * 0.6]
bottom_right = [cols * 0.9, rows * 0.95]
top_right = [cols * 0.6, rows * 0.6]
# the vertices are an array of polygons (i.e array of arrays) and the data type must be integer
vertices = np.array([[bottom_left, top_left, top_right, bottom_right]], dtype=np.int32)
return filter_region(image, vertices)
def hough_lines(image):
"""
`image` should be the output of a Canny transform.
Returns hough lines (not the image with lines)
"""
return cv2.HoughLinesP(image, rho=1, theta=np.pi / 180, threshold=20, minLineLength=40, maxLineGap=20)
def draw_lines(image, lines, color=[255, 0, 0], thickness=2, make_copy=True):
# the lines returned by cv2.HoughLinesP has the shape (-1, 1, 4)
if make_copy:
image = np.copy(image) # don't want to modify the original
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(image, (x1, y1), (x2, y2), color, thickness)
return image
def average_slope_intercept(lines):
left_lines = [] # (slope, intercept)
left_weights = [] # (length,)
right_lines = [] # (slope, intercept)
right_weights = [] # (length,)
for line in lines:
for x1, y1, x2, y2 in line:
if x2 == x1:
continue # ignore a vertical line
slope = (y2 - y1) / (x2 - x1)
intercept = y1 - slope * x1
length = np.sqrt((y2 - y1) ** 2 + (x2 - x1) ** 2)
if slope < 0: # y is reversed in image
left_lines.append((slope, intercept))
left_weights.append((length))
else:
right_lines.append((slope, intercept))
right_weights.append((length))
# add more weight to longer lines
left_lane = np.dot(left_weights, left_lines) / np.sum(left_weights) if len(left_weights) > 0 else None
right_lane = np.dot(right_weights, right_lines) / np.sum(right_weights) if len(right_weights) > 0 else None
return left_lane, right_lane # (slope, intercept), (slope, intercept)
def make_line_points(y1, y2, line):
"""
Convert a line represented in slope and intercept into pixel points
"""
if line is None:
return None
slope, intercept = line
# make sure everything is integer as cv2.line requires it
x1 = int((y1 - intercept) / slope)
x2 = int((y2 - intercept) / slope)
y1 = int(y1)
y2 = int(y2)
return ((x1, y1), (x2, y2))
def lane_lines(image, lines):
left_lane, right_lane = average_slope_intercept(lines)
y1 = image.shape[0] # bottom of the image
y2 = y1 * 0.6 # slightly lower than the middle
left_line = make_line_points(y1, y2, left_lane)
right_line = make_line_points(y1, y2, right_lane)
return left_line, right_line
def draw_lane_lines(image, lines, color=[255, 0, 0], thickness=20):
# make a separate image to draw lines and combine with the orignal later
line_image = np.zeros_like(image)
for line in lines:
if line is not None:
cv2.line(line_image, *line, color, thickness)
# image1 * α + image2 * β + λ
# image1 and image2 must be the same shape.
return cv2.addWeighted(image, 1.0, line_image, 0.95, 0.0)
def shadow(img):
or_img = img
#or_img = cv2.imread('./data_road/training/image_2/um_000007.png')
# covert the BGR image to an YCbCr image
y_cb_cr_img = cv2.cvtColor(or_img, cv2.COLOR_BGR2YCrCb)
# copy the image to create a binary mask later
binary_mask = np.copy(y_cb_cr_img)
# get mean value of the pixels in Y plane
y_mean = np.mean(cv2.split(y_cb_cr_img)[0])
# get standard deviation of channel in Y plane
y_std = np.std(cv2.split(y_cb_cr_img)[0])
# classify pixels as shadow and non-shadow pixels
for i in range(y_cb_cr_img.shape[0]):
for j in range(y_cb_cr_img.shape[1]):
if y_cb_cr_img[i, j, 0] < y_mean - (y_std / 3):
# paint it white (shadow)
binary_mask[i, j] = [255, 255, 255]
else:
# paint it black (non-shadow)
binary_mask[i, j] = [0, 0, 0]
# Using morphological operation
# The misclassified pixels are
# removed using dilation followed by erosion.
kernel = np.ones((3, 3), np.uint8)
erosion = cv2.erode(binary_mask, kernel, iterations=1)
# sum of pixel intensities in the lit areas
spi_la = 0
# sum of pixel intensities in the shadow
spi_s = 0
# number of pixels in the lit areas
n_la = 0
# number of pixels in the shadow
n_s = 0
# get sum of pixel intensities in the lit areas
# and sum of pixel intensities in the shadow
for i in range(y_cb_cr_img.shape[0]):
for j in range(y_cb_cr_img.shape[1]):
if erosion[i, j, 0] == 0 and erosion[i, j, 1] == 0 and erosion[i, j, 2] == 0:
spi_la = spi_la + y_cb_cr_img[i, j, 0]
n_la += 1
else:
spi_s = spi_s + y_cb_cr_img[i, j, 0]
n_s += 1
# get the average pixel intensities in the lit areas
average_ld = spi_la / n_la
# get the average pixel intensities in the shadow
average_le = spi_s / n_s
# difference of the pixel intensities in the shadow and lit areas
i_diff = average_ld - average_le
# get the ratio between average shadow pixels and average lit pixels
ratio_as_al = average_ld / average_le
# added these difference
for i in range(y_cb_cr_img.shape[0]):
for j in range(y_cb_cr_img.shape[1]):
if erosion[i, j, 0] == 255 and erosion[i, j, 1] == 255 and erosion[i, j, 2] == 255:
y_cb_cr_img[i, j] = [y_cb_cr_img[i, j, 0] + i_diff, y_cb_cr_img[i, j, 1] + ratio_as_al,
y_cb_cr_img[i, j, 2] + ratio_as_al]
# covert the YCbCr image to the BGR image
return cv2.cvtColor(y_cb_cr_img, cv2.COLOR_YCR_CB2BGR)
def thresholding(img):
return cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
if __name__=="__main__":
test_images = np.array([
png.read_png_int('./data_road/training/image_2/um_000001.png'),
png.read_png_int('./data_road/training/image_2/um_000002.png'),
png.read_png_int('./data_road/training/image_2/um_000003.png'),
png.read_png_int('./data_road/training/image_2/um_000004.png'),
png.read_png_int('./data_road/training/image_2/um_000005.png'),
png.read_png_int('./data_road/training/image_2/um_000006.png'),
])
#show_images(test_images)
white_yellow_images = list(map(select_white_yellow, test_images))
#show_images(white_yellow_images)
gray_images = list(map(convert_gray_scale, white_yellow_images))
#show_images(gray_images)
#gray_images = list(map(thresholding, gray_images))
blurred_images = list(map(lambda image: apply_smoothing(image), gray_images))
#show_images(blurred_images)
edge_images = list(map(lambda image: detect_edges(image), blurred_images))
#show_images(edge_images)
roi_images = list(map(select_region, edge_images))
#show_images(roi_images)
list_of_lines = list(map(hough_lines, roi_images))
lane_images = []
for image, lines in zip(test_images, list_of_lines):
lane_images.append(draw_lane_lines(image, lane_lines(image, lines)))
show_images(lane_images)
| ghazalsaf/mobNavigation | road_detect_hls.py | road_detect_hls.py | py | 10,716 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "... |
538148120 | import streamlit as st
import datetime
import requests
import json
import pandas as pd
import time
page = st.sidebar.selectbox('chose your page', ['users', 'checkin', 'checkout'])
if page == 'users':
st.title('ユーザー登録画面')
with st.form(key='user'):
username: str = st.text_input('ユーザー名', max_chars=12)
data = {
'username': username
}
submit_button = st.form_submit_button(label='ユーザー登録')
if submit_button:
url = 'http://127.0.0.1:8000/users'
res = requests.post(
url,
data=json.dumps(data)
)
if res.status_code == 200:
st.success("ユーザー登録完了")
st.json(res.json())
elif page == 'checkin':
st.title('checkin画面')
#ユーザー一覧取得
url_users = 'http://127.0.0.1:8000/users'
res = requests.get(url_users)
users = res.json()
#ユーザー名をキー、IDをバリュー
users_name = {}
for user in users:
users_name[user['username']] = user['user_id']
with st.form(key='checkin'):
url_check = 'http://127.0.0.1:8000/checkin'
url_users_delete = 'http://127.0.0.1:8000/users_delete'
username: str = st.selectbox('名前', list(users_name.keys()))
submit_button_users_delete = st.form_submit_button(label='ユーザー削除')
submit_button_checkin = st.form_submit_button(label='checkin実行')
submit_button_checkout = st.form_submit_button(label='checkout実行')
# users delete
if submit_button_users_delete:
data_users_delete = {'username': username}
res = requests.post(
url_users_delete,
data=json.dumps(data_users_delete)
)
if res.status_code == 200:
st.success(f"{username}の削除完了しました")
st.json(res.json())
# checkin
if submit_button_checkin:
data_checkin = {'user_id': users_name[username],'status': 'checkin'}
res = requests.post(
url_check,
data=json.dumps(data_checkin)
)
if res.status_code == 200:
st.success(f"{username}のcheckin完了しました")
#st.json(res.json())
url_check_query = 'http://127.0.0.1:8000/checkin/'
res = requests.get(f"{url_check_query}?user_id={users_name[username]}")
records = res.json()
df_records = pd.DataFrame(records)
st.table(df_records)
# checkout
if submit_button_checkout:
data_checkout = {'user_id': users_name[username],'status': 'checkout'}
res = requests.post(
url_check,
data=json.dumps(data_checkout)
)
if res.status_code == 200:
st.success(f"{username}のcheckout完了しました。")
#st.json(res.json())
url_check_query = 'http://127.0.0.1:8000/checkin/'
res = requests.get(f"{url_check_query}?user_id={users_name[username]}")
records = res.json()
df_records = pd.DataFrame(records)
st.table(df_records)
elif page == 'bookings':
st.title('会議室予約画面')
#ユーザー一覧取得
url_users = 'http://127.0.0.1:8000/users'
res = requests.get(url_users)
users = res.json()
#ユーザー名をキー、IDをバリュー
users_name = {}
for user in users:
users_name[user['username']] = user['user_id']
#会議室一覧取得
url_rooms = 'http://127.0.0.1:8000/rooms'
res = requests.get(url_rooms)
rooms = res.json()
rooms_name = {}
for room in rooms:
rooms_name[room['room_name']] = {
'room_id': room['room_id'],
'capacity': room['capacity']
}
st.write('### 会議室一覧')
df_rooms = pd.DataFrame(rooms)
df_rooms.columns = ['会議室名', '定員', '会議室ID']
st.table(df_rooms)
url_bookings = 'http://127.0.0.1:8000/bookings'
res = requests.get(url_bookings)
bookings = res.json()
df_bookings = pd.DataFrame(bookings)
users_id = {}
for user in users:
users_id[user['user_id']] = user['username']
rooms_id = {}
for room in rooms:
rooms_id[room['room_id']] = {
'room_name': room['room_name'],
'capacity': room['capacity'],
}
# IDを各値に変更
to_username = lambda x: users_id[x]
to_room_name = lambda x: rooms_id[x]['room_name']
to_datetime = lambda x: datetime.datetime.fromisoformat(x).strftime('%Y/%m/%d %H:%M')
#特定の列に適用
df_bookings['user_id'] = df_bookings['user_id'].map(to_username)
df_bookings['room_id'] = df_bookings['room_id'].map(to_room_name)
df_bookings['start_datetime'] = df_bookings['start_datetime'].map(to_datetime)
df_bookings['end_datetime'] = df_bookings['end_datetime'].map(to_datetime)
df_bookings = df_bookings.rename(columns={
'user_id': '予約者名',
'room_id': '会議室名',
'booked_num': '予約人数',
'start_datetime': '開始時間',
'end_datetime': '終了時間',
'booking_id': '予約番号'
})
st.write('### 予約一覧')
st.table(df_bookings)
with st.form(key='booking'):
#booking_id: int = random.randint(0, 10)
username: str = st.selectbox('予約者名', list(users_name.keys()))
room_name: str = st.selectbox('会議室名', list(rooms_name.keys()))
booked_num: int = st.number_input('予約人数', step=1, min_value=1)
date = st.date_input('日付を入力', min_value=datetime.date.today())
start_time = st.time_input('開始時刻: ', value=datetime.time(hour=9, minute=0))
end_time = st.time_input('終了時刻: ', value=datetime.time(hour=20, minute=0))
submit_button = st.form_submit_button(label='予約登録')
if submit_button:
user_id: int = users_name[username]
room_id: int = rooms_name[room_name]['room_id']
capacity: int = rooms_name[room_name]['capacity']
data = {
'user_id': user_id,
'room_id': room_id,
'booked_num': booked_num,
'start_datetime': datetime.datetime(
year=date.year,
month=date.month,
day=date.day,
hour=start_time.hour,
minute=start_time.minute
).isoformat(),
'end_datetime': datetime.datetime(
year=date.year,
month=date.month,
day=date.day,
hour=end_time.hour,
minute=end_time.minute
).isoformat()
}
#定員以下の予約場合
if booked_num <= capacity:
url = 'http://127.0.0.1:8000/bookings'
res = requests.post(
url,
data=json.dumps(data)
)
if res.status_code == 200:
st.success('予約完了しました')
st.json(res.json())
else:
st.error(f'{room_name}の定員は{capacity}名です')
| terotero57/tes | app.py | app.py | py | 7,125 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "streamlit.sidebar.selectbox",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "streamlit.title",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "stre... |
70508257953 | import itertools
#taking input
k=int(input())
a=list(map(int,input().split()))
#generating prime numbers
soe=[True]*(100000)
for i in range(2,100000):
if soe[i]==True:
j=i+i
while j<100000:
soe[j]=False
j+=i
#storing prime numbers whith in given input
p=[i for i in range(2,len(soe)) if soe[i]==True and i<=a[0]]
c=0
g=0
for j in range(1,len(p)+1):
for i in itertools.combinations(p,j):
if soe[sum(i)]:
#print(sum(i))
c+=1
if max(i)<=a[1]:
g+=1
print(c%(10**16),g)
| jay8299/practice_cp | python_prac/smarttraining_infytq.py | smarttraining_infytq.py | py | 572 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "itertools.combinations",
"line_number": 18,
"usage_type": "call"
}
] |
9907501422 | #!/usr/bin/env python3
import sys
import re
import sqlite3
import codecs
season_length = 14
def __grab(term, lines):
for line in lines:
if term in line:
return line
def __get_line(term, lines):
num = 0
for line in lines:
num = num+1
if term in line:
return num
def __updatedb(dbPath, p):
conn = sqlite3.connect(dbPath)
#check if db file has the requisite table#
new = True
tables = conn.execute("SELECT name FROM sqlite_master WHERE type='table';")
for tname in tables:
if tname[0] == 'cdda_scores':
new = False
if new:
conn.execute('''CREATE TABLE cdda_scores
(id integer primary key,
name text,
prof text,
days integer,
dist integer,
kills integer,
hshot integer,
dmg integer,
heal integer,
cause text,
last text,
fmsg text,
ver text,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP);''')
conn.execute('''INSERT INTO cdda_scores (name, prof, days, dist, kills, hshot, dmg, heal, cause, last, fmsg, ver, created_at)
values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);''',
(str(p["name"]), str(p["prof"]), int(p["days"]), int(p["dist"]), int(p["kills"]), int(p["hshot"]), int(p["dmg"]), int(p["heal"]), p["cause"], str(p["last"]), str(p["fmsg"]), str(p["ver"]), str(p["date"])))
conn.commit()
conn.close()
def parse_player(date, linelist):
## Get version ##
ver = __grab("Cataclysm - Dark Days Ahead version", linelist)
ver = ver.split(" ")[6].strip()
## Get name ##
name = __grab("In memory of:", linelist)
name = name.split(':')[1].strip()
## Get last words, if any ##
last_words = __grab("Last words:", linelist)
if last_words == None:
cause = linelist[-1]
else:
last_words = last_words.split("|")[3].split(":")[1].strip()
cause = linelist[len(linelist)-2]
## Get cause of death and check if suicide ##
cause = __grab("was killed in", linelist)
cause = cause.split(' ',1)[1].strip()
suicide_check = __grab("committed suicide", linelist)
if suicide_check != None:
cause = re.sub('was killed', 'committed suicide', cause)
## Get profession ##
prof = __grab("when the apocalypse began", linelist)
prof = prof.split("when the apocalypse began")[0]
## Ignore "he/she was a " words
temp = prof.split(" ")
prof = ""
for i in range(3,len(temp)):
prof = prof + " " + temp[i]
prof=prof.strip()
## Find number of kills ##
kills = __grab("Total kills", linelist)
if kills != None:
kills = kills.split(':')[1].strip()
kills = int(kills)
else:
kills = 0
## Find days survived ##
day_lines = linelist[-1]
season = day_lines.split(",")[1].strip()
season = season[:-2]
num_list = re.findall("\d+", day_lines)
days = (int(num_list[0])-1)*56 + int(num_list[1])
if season == "Summer":
days += 14
elif season == "Autumn":
days += 28
elif season == "Winter":
days += 42
## Get Distance Traveled ##
dist = __grab("Distance walked", linelist).split(":")[1].strip()
dist = dist.split(" ")[0]
## Get Damage taken ##
dmg = __grab("Damage taken", linelist).split(":")[1].strip()
dmg = dmg.split(" ")[0]
## Get Damage healed ##
heal = __grab("Damage healed", linelist).split(":")[1].strip()
heal = heal.split(" ")[0]
## Get Headshots ##
hshot = __grab("Headshots", linelist).split(":")[1].strip()
hshot = hshot.split(" ")[0]
## Get final message ##
num= __get_line("Kills", linelist)
final_message = linelist[num-3].strip()
## Get final message given to player, there are probably more things I need to filter out. ##
if final_message.count('Unknown') == 0 and final_message.count('Safe mode') == 0:
final_message = final_message.split(' ',1)[1]
final_message = final_message[:-1]
final_message = re.sub('AM|PM', '', final_message).strip()
#__updatedb(self, name, prof, days, dist, kills, hshot, dmg, heal)
player = {
"name" : name,
"prof" : prof,
"days" : days,
"dist" : dist,
"kills": kills,
"hshot": hshot,
"dmg": dmg,
"heal": heal,
"cause" : cause,
"last" : last_words,
"fmsg" : final_message,
"ver" : ver,
"date" : date
}
return player
#if __name__ == "__main__":
def autopsy_cdda(filePath, dbPath):
with codecs.open(filePath, 'r', 'utf-8') as autopsy:
linelist = autopsy.readlines()
rawDate = filePath.split("-")[1:7]
rawDate[5]=rawDate[5][:-4]
date = rawDate[0]+ "-" + rawDate[1] + "-" + rawDate[2] \
+ " " + rawDate[3] + ":" + rawDate[4] + ":" + rawDate[5]
p = parse_player(date, linelist)
__updatedb(dbPath, p)
return
| phantom-voltage/mortician | scripts/cdda.py | cdda.py | py | 5,313 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlite3.connect",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 156,
... |
35219965731 | from ast import main
import collections
from statistics import mean
import numpy as np
import random
from collections import defaultdict
import matplotlib.pyplot as plt
class MonteCarlo():
def __init__(self, gamma):
self.actions = [(-1,0), (0,1), (1,0), (0,-1)] # up, right, down, left
self.arrows = ["↑", "→","↓", "←"]
self.gamma = gamma
self.v = np.array([[0.0 for j in range(5)] for i in range(5)])
# self.policy = np.array([["" for j in range(5)] for i in range(5)])
self.policy = np.array([[(0, 1) for j in range(5)] for i in range(5)])
# self.states = np.array([[(i, j) for j in range(5)] for i in range(5)])
self.states = [(i, j) for j in range(5) for i in range(5)]
self.pi_star = [[(0, 1), (0, 1), (0, 1), (1, 0), (1, 0)],
[(0, 1), (0, 1), (0, 1), (1, 0), (1, 0)],
[(-1, 0), (-1, 0), (-1, 0), (1, 0), (1, 0)],
[(-1, 0), (-1, 0), (-1, 0), (1, 0), (1, 0)],
[(-1, 0), (-1, 0), (0, 1), (0, 1), (-1, 0)]]
self.v_star = [[4.0187,4.5548,5.1576,5.8337,6.4553],
[4.3716,5.0324,5.8013,6.6473,7.3907],
[3.8672,4.39, 0.0, 7.5769,8.4637],
[3.4183,3.8319,0.0, 8.5738,9.6946],
[2.9978,2.9309,6.0733,9.6946,0.]]
# print(self.states)
self.q = np.array([[[0.0 for a in range(len(self.actions))] for j in range(5)] for i in range(5)])
self.pi_esoft = collections.defaultdict(list) # key: state, value: list(best action)
for s in self.states:
self.pi_esoft[s] = self.actions
self.test_pol = np.array([[[0.25 for k in range(4)]for j in range(5)] for i in range(5)])
pass
def transition_function(self):
counter = 0
p = defaultdict(list)
for state in self.states:
for next_direction in [(-1,0), (0,1), (1,0), (0,-1), (0,0)]:
# print("next_direction = {}".format(next_direction[0]))
# print("state = {}".format(state))
next_state = (state[0] + next_direction[0], state[1] + next_direction[1])
# print(next_state)
# print()
for action in self.actions:
prob = 0
if ((next_state[0] < 0) or (next_state[1] < 0) or (next_state[0] > 4) or (next_state[1] > 4)):
continue
if ((state[0] == 2) and (state[1] == 2)) or ((state[0] == 3) and (state[1] == 2)):
prob = 0
p[state, next_state].append(prob)
continue
if ((next_state[0] == 2) and (next_state[1] == 2)) or ((next_state[0] == 3) and (next_state[1] == 2)):
prob = 0
p[state, next_state].append(prob)
continue
if ((state[0] == 4) and (state[1] == 4)):
if ((next_state[0] == 4) and (next_state[1] == 4)):
prob = 1
else:
prob = 0
p[state, next_state].append(prob)
continue
# if ((state[0] == 0) and (state[1] == 2)):
# if ((next_state[0] == 0) and (next_state[1] == 2)):
# prob = 1
# else:
# prob = 0
# p[state, next_state].append(prob)
# continue
if action == next_direction:
prob = 0.8
elif (next_direction == self.actions[(self.actions.index(action) + 1) % 4]) or (next_direction == self.actions[(self.actions.index(action) - 1) % 4]):
prob = 0.05
elif (next_direction == (0,0)):
prob = 0.1
if ((state[0] + action[0]) == 2 and (state[1] + action[1]) == 2): # going into the obstacle directly when moving towards the obstacle
prob += 0.8
if ((state[0] + action[0]) == 3 and (state[1] + action[1]) == 2):# going into the obstacle directly when moving towards the obstacle
prob += 0.8
if (((state[0] + action[0]) < 0) or ((state[1] + action[1]) < 0) or ((state[0] + action[0]) > 4) or ((state[1] + action[1]) > 4)): # going directly into the wall
prob += 0.8
if (((state[0] + self.actions[(self.actions.index(action) + 1) % 4][0]) == 2) and ((state[1] + self.actions[(self.actions.index(action) + 1) % 4][1]) == 2)): # going into the obstacle mistakenly towrads right
prob += 0.05
if (((state[0] + self.actions[(self.actions.index(action) + 1) % 4][0]) == 3) and ((state[1] + self.actions[(self.actions.index(action) + 1) % 4][1]) == 2)):# going into the obstacle
prob += 0.05
if (((state[0] + self.actions[(self.actions.index(action) - 1) % 4][0]) == 2) and ((state[1] + self.actions[(self.actions.index(action) - 1) % 4][1]) == 2)): # going into the obstacle
prob += 0.05
if (((state[0] + self.actions[(self.actions.index(action) - 1) % 4][0]) == 3) and ((state[1] + self.actions[(self.actions.index(action) - 1) % 4][1]) == 2)):# going into the obstacle
prob += 0.05
if ((state[0] + self.actions[(self.actions.index(action) + 1) % 4][0]) < 0): # going into the wall
prob += 0.05
if ((state[1] + self.actions[(self.actions.index(action) + 1) % 4][1]) < 0): # going into the wall
prob += 0.05
if ((state[0] + self.actions[(self.actions.index(action) + 1) % 4][0]) > 4): # going into the wall
prob += 0.05
if ((state[1] + self.actions[(self.actions.index(action) + 1) % 4][1]) > 4): # going into the wall
prob += 0.05
if ((state[0] + self.actions[(self.actions.index(action) - 1) % 4][0]) < 0): # going into the wall
prob += 0.05
if ((state[1] + self.actions[(self.actions.index(action) - 1) % 4][1]) < 0): # going into the wall
prob += 0.05
if ((state[0] + self.actions[(self.actions.index(action) - 1) % 4][0]) > 4): # going into the wall
prob += 0.05
if ((state[1] + self.actions[(self.actions.index(action) - 1) % 4][1]) > 4): # going into the wall
prob += 0.05
# print("state = {}, action = {}, next_state = {}, prob = {}".format(state, action, next_state, round(prob, 3)))
p[state, next_state].append(round(prob, 3))
# print(len(p))
return p
def trans_func(self, s, a):
"""
Args:
s = tuple(row, col)
a = action (tuple)
Returns:
next state (tuple)
"""
if s == (4, 4): return s
rand = random.uniform(0, 1)
if rand < 0.8:
s_prime = (s[0] + a[0], s[1] + a[1])
elif 0.8 < rand < 0.85:
a = self.actions[(self.actions.index(a) + 1) % 4]
s_prime = (s[0] + a[0], s[1] + a[1])
elif 0.85 < rand < 0.9:
a = self.actions[(self.actions.index(a) - 1) % 4]
s_prime = (s[0] + a[0], s[1] + a[1])
else:
s_prime = s
if (s_prime == (2,2)) or (s_prime == (3,2)) or (s_prime[0] < 0) or (s_prime[0] > 4) or (s_prime[1] < 0) or (s_prime[1] > 4):
s_prime = s
return s_prime
def reward(self, s, a, s_prime):
if (s == (4, 4)):
return 0
elif s_prime == (4, 4):
return 10
elif s_prime == (4, 2):
return -10
# elif s_prime == (0, 2):
# # return 5
# return 4.4844 # found using binary search
else:
return 0
def d0(self):
states = self.states.copy()
states.remove((2,2))
states.remove((3,2))
random_index = random.randint(0,len(states)-1)
return states[random_index]
def run(self, threshold):
# self.v = np.array([[0 for j in range(5)] for i in range(5)])
p = self.transition_function()
count = 0
while True:
count += 1
delta = 0
v_old = np.copy(self.v)
# print(np.amax(v_old))
for s in self.states:
max_val = -float("inf")
max_a = None
for i, a in enumerate(self.actions):
val = 0
for s_prime in self.states:
# print(s, s_prime)
try:
# print(v_old[s_prime])
val += p[s, s_prime][i]*(self.reward(s, a, s_prime) + (self.gamma*v_old[s_prime]))
except:
continue
# print("val = {}".format(val))
if max_val < val:
max_val = val
max_a = i
# if (s == (1, 1)): print("val = {}".format(val))
self.v[s] = round(max_val, 4)
self.policy[s] = self.actions[max_a]
delta = max(delta, np.amax(abs(self.v - v_old)))
if delta < threshold:
break
return self.v, self.policy, count
def pi_func(self, pi, s):
# self.gamma = 0.9
# v_star, pi_star, iterations = self.run(0.0001)
return pi[s[0]][s[1]]
def v_star_func(self, s):
# self.gamma = 0.9
# v_star, pi_star, iterations = self.run(0.0001)
return self.v_star[s[0]][s[1]]
def generateEpisode(self, pi):
trajectory = []
s = self.d0()
while(s != (4, 4)):
a = self.pi_func(pi, s)
s_prime = self.trans_func(s, a)
r = self.reward(s, a, s_prime)
trajectory.append((s, r))
s = s_prime
trajectory.append(((4,4), 0))
return trajectory
def pi_esoft_func(self, pi, s, eps):
"""
Args:
pi: dictionary key = states, value = list of actions
s = tuple(row, col)
eps = float
Returns:
action in state s (tuple)
"""
# rand = random.uniform(0, 1)
# A_star = pi[s]
# A = self.actions
# A_ = list(set(A) - set(A_star))
# # print(A_star, A, A_)
# prob = ((1- eps)/len(A_star)) + (eps/len(A))
# for i in range(len(A_star)):
# if prob*(i) < rand < prob*(i+1):
# return A_star[i]
# for i in range(len(A_)):
# if (prob*len(A_star) + (eps/len(A))*(i)) < rand < (prob*len(A_star) + (eps/len(A))*(i+1)):
# return A_[i]
# print(s)
chosen_action = np.random.choice(4, 1, p=pi[s[0]][s[1]])
return self.actions[chosen_action[0]]
def policy_prob(self, s, a, pi, eps):
A_star = pi[s]
A = self.actions
# print(a, A_star)
if a in A_star:
# print(((1- eps)/len(A_star)) + (eps/len(A)))
return ((1- eps)/len(A_star)) + (eps/len(A))
else:
# print("else condition")
# print((eps/len(A)))
return (eps/len(A))
def generateEpisode_esoft(self, pi, eps):
"""
Args:
pi = dictionary key = states, value = list of actions
eps
"""
trajectory = []
s = self.d0()
while(s != (4, 4)):
a = self.pi_esoft_func(pi, s, eps)
s_prime = self.trans_func(s, a)
r = self.reward(s, a, s_prime)
trajectory.append(((s, a), r))
s = s_prime
trajectory.append((((4,4), (0, 1)), 0))
return trajectory
def first_visit(self,):
returns = collections.defaultdict(list)
max_norm = []
count = 0
while True:
count += 1
episode = self.generateEpisode(self.pi_star)
# print("episode = {}".format(episode))
states_present = []
rewards = []
for s, r in episode:
states_present.append(s)
rewards.append(r)
for s in set(states_present):
first_index = states_present.index(s)
G = 0
temp_rewards = rewards[first_index:]
# print("temp_rewards = {}".format(temp_rewards))
for pow in range(len(temp_rewards)):
G += (self.gamma**pow) * temp_rewards[pow]
# print(G)
returns[s].append(G)
self.v[s[0]][s[1]] = mean(returns[s])
max_norm.append(np.amax(abs(self.v - self.v_star)))
if np.amax(abs(self.v - self.v_star)) < 0.1:
break
print("max norm = {}".format(max_norm[-1]))
print("Iterations to converge = {}".format(count))
plt.plot(max_norm)
plt.title("Max norm")
plt.xlabel("Iterations")
plt.ylabel("Max norm")
plt.show()
return count
def every_visit(self, ):
returns = collections.defaultdict(list)
max_norm = []
count = 0
while True:
count += 1
episode = self.generateEpisode(self.pi_star)
# print("episode = {}".format(episode))
states_present = []
rewards = []
for s, r in episode:
states_present.append(s)
rewards.append(r)
for i, s in enumerate(states_present):
G = 0
temp_rewards = rewards[i:]
# print("temp_rewards = {}".format(temp_rewards))
for pow in range(len(temp_rewards)):
G += (self.gamma**pow) * temp_rewards[pow]
# print(G)
returns[s].append(G)
# print(returns)
self.v[s[0]][s[1]] = mean(returns[s])
max_norm.append(np.amax(abs(self.v - self.v_star)))
if np.amax(abs(self.v - self.v_star)) < 0.1 or count > 10000:
break
print("max norm = {}".format(max_norm[-1]))
print("Iterations to converge = {}".format(count))
plt.plot(max_norm)
plt.title("Max norm")
plt.xlabel("Iterations")
plt.ylabel("Max norm")
plt.show()
return count
def e_soft(self, eps, decay=False):
returns = collections.defaultdict(list)
max_norm = []
count = 0
mse = []
itr_number = []
while True and eps > 0:
count += 1
if decay and count % 500 == 0:
eps -= 0.05
# episode = self.generateEpisode_esoft(self.pi_esoft, eps)
episode = self.generateEpisode_esoft(self.test_pol, eps)
# print("episode = {}".format(episode))
states_action_present = []
rewards = []
for s_a, r in episode:
states_action_present.append(s_a)
rewards.append(r)
for s_a in set(states_action_present):
first_index = states_action_present.index(s_a)
G = 0
temp_rewards = rewards[first_index:]
for pow in range(len(temp_rewards)):
G += (self.gamma**pow) * temp_rewards[pow]
# print("state= {}, action = {}".format(s_a[0], s_a[1]))
returns[s_a].append(G)
index_a = self.actions.index(s_a[1])
row = s_a[0][0]
col = s_a[0][1]
# print("q update value = {}".format(mean(returns[s_a])))
self.q[row][col][index_a] = mean(returns[s_a]) # Update q
# print("udated q value = {}".format(self.q[row][col][index_a]))
best_a_list = []
# print("list of best actions b4 = {}".format(best_a_list))
best_qsa = -float("inf")
# for i, expl_a in enumerate(self.actions):
# if best_qsa < self.q[row][col][i]:
# best_qsa = self.q[row][col][i]
# best_a_list = [expl_a]
# elif best_qsa == self.q[row][col][i]:
# best_a_list.append(expl_a)
# self.pi_esoft[s_a[0]] = best_a_list
for i, expl_a in enumerate(self.actions):
if best_qsa < self.q[row][col][i]:
best_qsa = self.q[row][col][i]
best_a_list = [i]
elif best_qsa == self.q[row][col][i]:
best_a_list.append(i)
# print("list of best actions after = {}".format(best_a_list))
not_best_list = list(set(range(4)) - set(best_a_list))
new_prob = max(0, ((1- eps)/len(best_a_list)) + (eps/len(self.actions)))
remaining_prob = (eps/len(self.actions))
np.put(self.test_pol[row][col], best_a_list, [new_prob]*len(best_a_list))
np.put(self.test_pol[row][col], not_best_list, [remaining_prob]*len(not_best_list))
for s in self.states:
# self.v[s[0]][s[1]] = sum([self.policy_prob(s, a, self.pi_esoft, eps)*self.q[s[0]][s[1]][a_index] for a_index, a in enumerate(self.actions)])
self.v[s[0]][s[1]] = sum([self.test_pol[s[0]][s[1]][a_index]*self.q[s[0]][s[1]][a_index] for a_index, a in enumerate(self.actions)])
max_norm.append(np.amax(abs(self.v - self.v_star)))
# if np.amax(abs(self.v - self.v_star)) < 0.1:
if count % 250 == 0:
mse.append(self.mse(self.v, self.v_star))
itr_number.append(count)
if count > 10000:
break
print("max norm = {}".format(np.amax(abs(self.v - self.v_star))))
print("MSE = {}".format(mse[-1]))
# plt.plot(max_norm)
# plt.title("Plotting Max norm")
plt.plot(itr_number, mse)
# plt.title("Mean squared Error for eps = {}".format(eps))
plt.title("Mean squared Error for eps = {}".format("decaying"))
plt.xlabel("Iterations")
plt.ylabel("MSE")
plt.show()
# print(returns)
pass
def mse(self, m1, m2):
return np.square(np.subtract(m1, m2)).mean()
def main():
def replace(inp, positions, char):
for pos in positions:
inp[pos] = char
obstacles = [(2,2), (3,2)]
goal = [(0,2), (4,4)]
# gamma = 0.9133
gamma = 0.9
monte_carlo = MonteCarlo(gamma=gamma)
# print("test esoft pi")
# print(monte_carlo.pi_esoft_func(monte_carlo.pi_esoft, s=()))
print("running first visit monte carlo")
# itr_list = []
# from tqdm import tqdm
# for _ in tqdm(range(20)):
# itr_list.append(monte_carlo.first_visit())
# print("average nuumber of iterations = {}".format(mean(itr_list)))
print(monte_carlo.first_visit())
print(monte_carlo.v)
print("running the every visit monte carlo")
# itr_list = []
# from tqdm import tqdm
# for _ in tqdm(range(20)):
# itr_list.append(monte_carlo.every_visit())
# print(mean(itr_list))
# print("average nuumber of iterations = {}".format(mean(itr_list)))
print(monte_carlo.every_visit())
print(monte_carlo.v)
for eps in [0.2, 0.1, 0.05]:
print("running e soft with e = {}".format(eps))
print(monte_carlo.e_soft(eps))
print(monte_carlo.v)
print("running e soft with decay")
print(monte_carlo.e_soft(1, True))
print(monte_carlo.v)
if __name__ == '__main__':
main()
# # 0.2
# max norm = 2.635907184410178
# MSE = 1.0916569585432456
# 0.1
# max norm = 1.0146242375768457
# MSE = 0.20658440173913825
# 0.05
# max norm = 0.6371196286440819
# MSE = 0.053532617694715456
# decay
# max norm = 10.652396821340675
# MSE = 12.980199028027773
# Every visit
# max norm = 0.09976012680428381
# Iterations to converge = 5495
# max norm = 0.0777421256165125
# Iterations to converge = 13800
# average nuumber of iterations = 8353.1 | saurabhbajaj123/Reinforcement-Learning-Algorithms-1 | HW4/submission/HW4.py | HW4.py | py | 20,952 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"li... |
74112978272 | from commands.Command import Command
import discord
import asyncio
class Cat(Command):
def __init__(self):
super().__init__(
{
'name': 'cat',
'description': 'extracts the text content of your file',
'argc': 1
}
)
async def run(self, message: discord.Message, argc: int, argv: list):
extension = argv[0]
valid_extensions = ['cpp']
if extension not in valid_extensions:
emb = discord.Embed(
title='Error', description="Invalid file extension.", colour=discord.Color.from_rgb(255, 0, 0)
)
await message.channel.send(embed=emb)
return
file = open(f'./files/{message.author}.{extension}', 'r')
msg = '```cpp\n'
for line in file.readlines():
if len(line.strip()) != 0:
msg += line
else:
msg += '\n'
msg += '\n```'
file.close()
await message.channel.send(msg)
| luccanunes/code-runner-bot | commands/Cat.py | Cat.py | py | 1,045 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "commands.Command.Command",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "discord.Message",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "discord.Embed",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "discord.Co... |
1443567060 | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 9 12:39:55 2022
@author: pkinn
"""
def cvTrain(model, features, targets, nSplits, nEpochs, batchSz, initWts):
from sklearn.model_selection import KFold
import numpy as np
kf = KFold(n_splits = nSplits, shuffle = True)
fn = 1
# Define per-fold score containers <-- these are new
acc_per_fold = []
loss_per_fold = []
allHist = np.zeros((nSplits, nEpochs))
for train, test in kf.split(features, targets):
print('------------------------------------------------------------------------')
print(f'Training for fold {fn} ...')
model.set_weights(initWts)
for kk in range(nEpochs):
history = model.fit(features[train], targets[train],
batch_size = batchSz,
epochs = 1,
verbose = 0)
scores = model.evaluate(features[test], targets[test], verbose = 0)
allHist[fn-1, kk] = scores[1]
print(f'Score for fold {fn}: {model.metrics_names[0]} of {scores[0]}; {model.metrics_names[1]} of {scores[1]*100}%')
acc_per_fold.append(scores[1] * 100)
loss_per_fold.append(scores[0])
fn += 1
return acc_per_fold, loss_per_fold, allHist | Tessier-Lab-UMich/Emi_Pareto_Opt_ML | cvTrain.py | cvTrain.py | py | 1,305 | python | en | code | 14 | github-code | 1 | [
{
"api_name": "sklearn.model_selection.KFold",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 15,
"usage_type": "call"
}
] |
36170270181 | from abc import ABCMeta, abstractmethod
from bisect import bisect_right
from typing import Any, Dict, Iterable, List, Optional, Tuple
from volatility3.framework import exceptions, interfaces
from volatility3.framework.configuration import requirements
from volatility3.framework.layers import linear
class NonLinearlySegmentedLayer(
interfaces.layers.TranslationLayerInterface, metaclass=ABCMeta
):
"""A class to handle a single run-based layer-to-layer mapping.
In the documentation "mapped address" or "mapped offset" refers to
an offset once it has been mapped to the underlying layer
"""
def __init__(
self,
context: interfaces.context.ContextInterface,
config_path: str,
name: str,
metadata: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__(
context=context, config_path=config_path, name=name, metadata=metadata
)
self._base_layer = self.config["base_layer"]
self._segments: List[Tuple[int, int, int, int]] = []
self._minaddr: Optional[int] = None
self._maxaddr: Optional[int] = None
self._load_segments()
@abstractmethod
def _load_segments(self) -> None:
"""Populates the _segments variable.
Segments must be (address, mapped address, length, mapped_length) and must be
sorted by address when this method exits
"""
def is_valid(self, offset: int, length: int = 1) -> bool:
"""Returns whether the address offset can be translated to a valid
address."""
try:
base_layer = self._context.layers[self._base_layer]
return all(
[
base_layer.is_valid(mapped_offset)
for _i, _i, mapped_offset, _i, _s in self.mapping(offset, length)
]
)
except exceptions.InvalidAddressException:
return False
def _find_segment(
self, offset: int, next: bool = False
) -> Tuple[int, int, int, int]:
"""Finds the segment containing a given offset.
Returns the segment tuple (offset, mapped_offset, length, mapped_length)
"""
if not self._segments:
self._load_segments()
# Find rightmost value less than or equal to x
i = bisect_right(
self._segments,
(offset, self.context.layers[self._base_layer].maximum_address),
)
if i and not next:
segment = self._segments[i - 1]
if segment[0] <= offset < segment[0] + segment[2]:
return segment
if next:
if i < len(self._segments):
return self._segments[i]
raise exceptions.InvalidAddressException(
self.name, offset, f"Invalid address at {offset:0x}"
)
# Determines whether larger segments are in use and the offsets within them should be tracked linearly
# When no decoding of the data occurs, this should be set to true
_track_offset = False
def mapping(
self, offset: int, length: int, ignore_errors: bool = False
) -> Iterable[Tuple[int, int, int, int, str]]:
"""Returns a sorted iterable of (offset, length, mapped_offset, mapped_length, layer)
mappings."""
done = False
current_offset = offset
while not done:
try:
# Search for the appropriate segment that contains the current_offset
logical_offset, mapped_offset, size, mapped_size = self._find_segment(
current_offset
)
# If it starts before the current_offset, bring the lower edge up to the right place
if current_offset > logical_offset:
difference = current_offset - logical_offset
logical_offset += difference
if self._track_offset:
mapped_offset += difference
size -= difference
except exceptions.InvalidAddressException:
if not ignore_errors:
# If we're not ignoring errors, raise the invalid address exception
raise
try:
# Find the next valid segment after our current_offset
(
logical_offset,
mapped_offset,
size,
mapped_size,
) = self._find_segment(current_offset, next=True)
# We know that the logical_offset must be greater than current_offset so skip to that value
current_offset = logical_offset
# If it starts too late then we're done
if logical_offset > offset + length:
return
except exceptions.InvalidAddressException:
return
# Crop it to the amount we need left
chunk_size = min(size, length + offset - logical_offset)
yield logical_offset, chunk_size, mapped_offset, mapped_size, self._base_layer
current_offset += chunk_size
# Terminate if we've gone (or reached) our required limit
if current_offset >= offset + length:
done = True
@property
def minimum_address(self) -> int:
if not self._segments:
raise ValueError("SegmentedLayer must contain some segments")
if self._minaddr is None:
mapped, _, _, _ = self._segments[0]
self._minaddr = mapped
return self._minaddr
@property
def maximum_address(self) -> int:
if not self._segments:
raise ValueError("SegmentedLayer must contain some segments")
if self._maxaddr is None:
mapped, _, length, _ = self._segments[-1]
self._maxaddr = mapped + length
return self._maxaddr
@property
def dependencies(self) -> List[str]:
"""Returns a list of the lower layers that this layer is dependent
upon."""
return [self._base_layer]
@classmethod
def get_requirements(cls) -> List[interfaces.configuration.RequirementInterface]:
return [
requirements.TranslationLayerRequirement(name="base_layer", optional=False)
]
class SegmentedLayer(
NonLinearlySegmentedLayer, linear.LinearlyMappedLayer, metaclass=ABCMeta
):
_track_offset = True
def mapping(
self, offset: int, length: int, ignore_errors: bool = False
) -> Iterable[Tuple[int, int, int, int, str]]:
# Linear mappings must return the same length of segment as that requested
for offset, length, mapped_offset, mapped_length, layer in super().mapping(
offset, length, ignore_errors
):
yield offset, length, mapped_offset, length, layer
| volatilityfoundation/volatility3 | volatility3/framework/layers/segmented.py | segmented.py | py | 6,939 | python | en | code | 1,879 | github-code | 1 | [
{
"api_name": "volatility3.framework.interfaces.layers",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "volatility3.framework.interfaces",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "abc.ABCMeta",
"line_number": 11,
"usage_type": "name"
},
... |
34529095803 | import copy
import re
from knack.log import get_logger
from azdev.utilities import get_name_index
logger = get_logger(__name__)
_LOADER_CLS_RE = re.compile('.*azure/cli/command_modules/(?P<module>[^/]*)/__init__.*')
def filter_modules(command_loader, help_file_entries, modules=None, include_whl_extensions=False):
""" Modify the command table and help entries to only include certain modules/extensions.
: param command_loader: The CLICommandsLoader containing the command table to filter.
: help_file_entries: The dict of HelpFile entries to filter.
: modules: [str] list of module or extension names to retain.
"""
return _filter_mods(command_loader, help_file_entries, modules=modules,
include_whl_extensions=include_whl_extensions)
def exclude_commands(command_loader, help_file_entries, module_exclusions, include_whl_extensions=False):
""" Modify the command table and help entries to exclude certain modules/extensions.
: param command_loader: The CLICommandsLoader containing the command table to filter.
: help_file_entries: The dict of HelpFile entries to filter.
: modules: [str] list of module or extension names to remove.
"""
return _filter_mods(command_loader, help_file_entries, modules=module_exclusions, exclude=True,
include_whl_extensions=include_whl_extensions)
def _filter_mods(command_loader, help_file_entries, modules=None, exclude=False, include_whl_extensions=False):
modules = modules or []
# command tables and help entries must be copied to allow for seperate linter scope
command_table = command_loader.command_table.copy()
command_group_table = command_loader.command_group_table.copy()
command_loader = copy.copy(command_loader)
command_loader.command_table = command_table
command_loader.command_group_table = command_group_table
help_file_entries = help_file_entries.copy()
name_index = get_name_index(include_whl_extensions=include_whl_extensions)
for command_name in list(command_loader.command_table.keys()):
try:
source_name, _ = _get_command_source(command_name, command_loader.command_table)
except LinterError as ex:
# command is unrecognized
logger.warning(ex)
source_name = None
try:
long_name = name_index[source_name]
is_specified = source_name in modules or long_name in modules
except KeyError:
is_specified = False
if is_specified == exclude:
# brute force method of ignoring commands from a module or extension
command_loader.command_table.pop(command_name, None)
help_file_entries.pop(command_name, None)
# Remove unneeded command groups
retained_command_groups = {' '.join(x.split(' ')[:-1]) for x in command_loader.command_table}
excluded_command_groups = set(command_loader.command_group_table.keys()) - retained_command_groups
for group_name in excluded_command_groups:
command_loader.command_group_table.pop(group_name, None)
help_file_entries.pop(group_name, None)
return command_loader, help_file_entries
def share_element(first_iter, second_iter):
return any(element in first_iter for element in second_iter)
def _get_command_source(command_name, command_table):
from azure.cli.core.commands import ExtensionCommandSource # pylint: disable=import-error
command = command_table.get(command_name)
# see if command is from an extension
if isinstance(command.command_source, ExtensionCommandSource):
return command.command_source.extension_name, True
if command.command_source is None:
raise LinterError('Command: `%s`, has no command source.' % command_name)
# command is from module
return command.command_source, False
# pylint: disable=line-too-long
def merge_exclusion(left_exclusion, right_exclusion):
for command_name, value in right_exclusion.items():
for rule_name in value.get('rule_exclusions', []):
left_exclusion.setdefault(command_name, {}).setdefault('rule_exclusions', []).append(rule_name)
for param_name in value.get('parameters', {}):
for rule_name in value.get('parameters', {}).get(param_name, {}).get('rule_exclusions', []):
left_exclusion.setdefault(command_name, {}).setdefault('parameters', {}).setdefault(param_name, {}).setdefault('rule_exclusions', []).append(rule_name)
class LinterError(Exception):
"""
Exception thrown by linter for non rule violation reasons
"""
pass # pylint: disable=unnecessary-pass
| Azure/azure-cli-dev-tools | azdev/operations/linter/util.py | util.py | py | 4,677 | python | en | code | 71 | github-code | 1 | [
{
"api_name": "knack.log.get_logger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "azdev.utilities.get_name_ind... |
25922177105 | from swift.common.swob import wsgify, HTTPInternalServerError, HTTPException
from swift.common.utils import get_logger
from zion.handlers import ProxyHandler
from zion.handlers import ComputeHandler
from zion.handlers import ObjectHandler
from zion.handlers.base import NotFunctionRequest
from distutils.util import strtobool
import redis
class FunctionHandlerMiddleware(object):
def __init__(self, app, conf):
self.app = app
self.conf = conf
self.exec_server = self.conf.get('execution_server')
self.logger = get_logger(conf, name=self.exec_server +
"-server Zion",
log_route='function_handler')
redis_host = self.conf.get('redis_host')
redis_port = self.conf.get('redis_port')
redis_db = self.conf.get('redis_db')
self.redis_conn_pool = redis.ConnectionPool(host=redis_host,
port=redis_port,
db=redis_db)
self.handler_class = self._get_handler(self.exec_server)
def _get_handler(self, exec_server):
"""
Generate Handler class based on execution_server parameter
:param exec_server: Where this storlet_middleware is running.
This should value should be 'proxy' or 'compute'
:raise ValueError: If exec_server is invalid
"""
if exec_server == 'proxy':
return ProxyHandler
elif exec_server == 'compute':
return ComputeHandler
elif exec_server == 'object':
return ObjectHandler
else:
raise ValueError('configuration error: execution_server must be '
'either proxy, object or compute but is %s' % exec_server)
@wsgify
def __call__(self, req):
try:
r = redis.Redis(connection_pool=self.redis_conn_pool)
handler = self.handler_class(req, self.conf, self.app, self.logger, r)
self.logger.debug('%s call in %s' % (req.method, req.path))
return handler.handle_request()
except NotFunctionRequest:
self.logger.debug('No Zion Request, bypassing middleware')
return req.get_response(self.app)
except HTTPException:
self.logger.exception('Zion execution failed')
raise
except Exception:
self.logger.exception('Zion execution failed')
raise HTTPInternalServerError(body='Unable to execute the requested operation: Zion execution failed\n')
def filter_factory(global_conf, **local_conf):
"""Standard filter factory to use the middleware with paste.deploy"""
conf = global_conf.copy()
conf.update(local_conf)
# Common
conf['execution_server'] = conf.get('execution_server')
conf['functions_container'] = conf.get('functions_container', 'functions')
conf['functions_visibility'] = strtobool(conf.get('functions_visibility', 'True'))
# Paths
conf['main_dir'] = conf.get('main_dir', '/opt/zion')
# Worker paths
conf['workers_dir'] = conf.get('workers_dir', 'workers')
conf['java_runtime_dir'] = conf.get('java_runtime_dir', 'runtime/java')
# Function Paths
conf['functions_dir'] = conf.get('functions_dir', 'functions')
conf['cache_dir'] = conf.get('cache_dir', 'cache')
conf['log_dir'] = conf.get('log_dir', 'logs')
conf['bin_dir'] = conf.get('bin_dir', 'bin')
# Redis metastore
conf['redis_host'] = conf.get('redis_host', 'localhost')
conf['redis_port'] = int(conf.get('redis_port', 6379))
conf['redis_db'] = int(conf.get('redis_db', 10))
# Function defaults
conf['default_function_timeout'] = int(conf.get('default_function_timeout', 10))
conf['default_function_memory'] = int(conf.get('default_function_memory', 1024))
conf['max_function_memory'] = int(conf.get('max_function_memory', 1024))
# Compute Nodes
conf['disaggregated_compute'] = strtobool(conf.get('disaggregated_compute', 'True'))
conf['compute_nodes'] = conf.get('compute_nodes', 'localhost:8585')
conf['docker_pool_dir'] = conf.get('docker_pool_dir', 'docker_pool')
def swift_functions(app):
return FunctionHandlerMiddleware(app, conf)
return swift_functions
| JosepSampe/storage-functions | Engine/swift/middleware/zion/function_handler.py | function_handler.py | py | 4,342 | python | en | code | 11 | github-code | 1 | [
{
"api_name": "swift.common.utils.get_logger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "redis.ConnectionPool",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "zion.handlers.ProxyHandler",
"line_number": 38,
"usage_type": "name"
},
{
"api... |
8243965245 | import numpy as np
import random
import matplotlib.pyplot as plt
import pickle
class Dataset:
def __init__(self):
self.index = 0
self.obs = []
self.classes = []
self.num_obs = 0
self.num_classes = 0
self.indices = []
def __iter__(self):
return self
def __next__(self):
if self.index >= self.num_obs:
self.index = 0
raise StopIteration
else:
self.index += 1
return self.obs[self.index - 1], self.classes[self.index - 1]
def reset(self):
self.index = 0
def get_obs_with_target(self, k):
index_list = [index for index, value in enumerate(self.classes) if value == k]
return [self.obs[i] for i in index_list]
def get_all_obs_class(self, shuffle=False):
if shuffle:
random.shuffle(self.indices)
return [(self.obs[i], self.classes[i]) for i in self.indices]
def get_mini_batches(self, batch_size, shuffle=False):
if shuffle:
random.shuffle(self.indices)
batches = [(self.obs[self.indices[n:n + batch_size]],
self.classes[self.indices[n:n + batch_size]])
for n in range(0, self.num_obs, batch_size)]
return batches
class IrisDataset(Dataset):
def __init__(self, path):
super(IrisDataset, self).__init__()
self.file_path = path
self.loadFile()
self.indices = np.arange(self.num_obs)
def loadFile(self):
# load a comma-delimited text file into an np matrix
resultList = []
f = open(self.file_path, 'r')
for line in f:
line = line.rstrip('\n') # "1.0,2.0,3.0"
sVals = line.split(',') # ["1.0", "2.0, "3.0"]
fVals = list(map(np.float32, sVals)) # [1.0, 2.0, 3.0]
resultList.append(fVals) # [[1.0, 2.0, 3.0] , [4.0, 5.0, 6.0]]
f.close()
data = np.asarray(resultList, dtype=np.float32) # not necessary
self.obs = data[:, 0:4]
self.classes = data[:, 4:7]
self.num_obs = data.shape[0]
self.num_classes = 3
# Helpers
def square(list):
return np.array([i ** 2 for i in list])
# Activations
def tanh(x, deriv):
'''
d/dx tanh(x) = 1 - tanh^2(x)
during backpropagation when we need to go though
the derivative we have already computed tanh(x),
therefore we pass tanh(x) to the function which reduces the gradient to:
1 - tanh(x)
'''
if deriv == True:
return 1.0 - np.power(np.tanh(x), 2)
else:
return np.tanh(x)
def sigmoid(x, deriv):
'''
Task 2a
This function is the sigmoid function.
It gets an input digit or vector and should return sigmoid(x).
The parameter "deriv" toggles between the sigmoid and the derivate of the sigmoid.
Hint: In the case of the derivate
you can expect the input to be sigmoid(x) instead of x
:param x:
:param deriv:
:return:
'''
if deriv == True:
#return 0.5 * (1 + tanh(0.5 * x, False))
#return (1 / (1 + np.exp(-x)))
return sigmoid(x, False) * (1 - sigmoid(x, False))
#return x*(1-x)
else:
#if type(x) != 'float32':
# return [0.5 * (1 + tanh(0.5 * i)) for i in x]
#else:
#return sigmoid(x, False) * (1 - sigmoid(x, False))
return 0.5 * (1 + tanh(0.5 * x, False))
#return (1 / (1 + np.exp(-x)))
def softmax(x, deriv):
'''
Task 2a
This function is the sigmoid function with a softmax applied.
This will be used in the last layer of the network
The derivate will be the same as of sigmoid(x)
:param x:
:param deriv:
:return:
'''
if deriv:
return sigmoid(x, True)
else:
exps = [np.exp(i) for i in x]
sumOfExps = np.sum(exps)
return np.array([np.divide(i, sumOfExps) for i in exps])
class Layer:
def __init__(self, numInput, numOutput, activation=sigmoid):
print('Create layer with: {}x{} @ {}'.format(numInput, numOutput, activation))
self.ni = numInput
self.no = numOutput
self.weights = np.zeros(shape=[self.ni, self.no], dtype=np.float32)
self.biases = np.zeros(shape=[self.no], dtype=np.float32)
self.initializeWeights()
self.activation = activation
self.last_input = np.zeros(shape=[self.ni], dtype=np.float32)
self.last_output = np.zeros(shape=[self.no], dtype=np.float32)
self.last_nodes = np.zeros(shape=[self.no], dtype=np.float32)
self.mbs = None
def initializeWeights(self):
"""
Task 2d
Initialized the weight matrix of the layer.
Weights should be initialized to something other than 0.
You can search the literature for possible initialization methods.
:return: None
"""
#####RANDOM
#self.weights = np.random.rand(self.weights.shape[0], self.weights.shape[1])
#self.biases = np.random.rand(self.biases.shape[0])
#####NORMAL
self.weights = np.random.normal(0, np.sqrt(2/(self.ni * self.no)), (self.weights.shape[0], self.weights.shape[1]))
self.biases = np.random.normal(0, np.sqrt(2/(self.ni * self.no)), self.biases.shape[0])
def inference(self, x):
"""
Task 2b
This transforms the input x with the layers weights and bias and applies the activation function
Hint: you should save the input and output of this function usage in the backpropagation
:param x:
:return: output of the layer
:rtype: np.array
"""
self.last_input = x
z = np.zeros(shape=[self.no], dtype=np.float32)
for i in range(0, self.no):
for j in range(0, self.ni):
z[i] += self.weights[j][i] * x[j]
z[i] += self.biases[i]
self.last_nodes = z
self.last_output = self.activation(z, False)
return self.last_output
def backprop(self, error):
"""
Task 2c
This function applied the backpropagation of the error signal.
The Layer receives the error signal from the following
layer or the network. You need to calculate the error signal
for the next layer by backpropagating thru this layer.
You also need to compute the gradients for the weights and bias.
:param error:
:return: error signal for the preceeding layer
:return: gradients for the weight matrix
:return: gradients for the bias
:rtype: np.array
"""
#Gradient for weights
grad_weights = np.zeros(shape=[self.ni, self.no], dtype=np.float32)
for i in range(0, self.no):
for j in range(0, self.ni):
grad_weights[j][i] = error[i] \
* self.activation(self.last_nodes[i], True) \
* self.last_input[j]
#Gradient for biases
grad_biases = np.zeros(shape=[self.no], dtype=np.float32)
for i in range(0, self.no):
grad_biases[i] = error[i] \
* self.activation(self.last_nodes[i], True)
#Error signal for prev Layer E/y(l-1)
errorSignal = np.zeros(shape=[self.ni], dtype=np.float32)
for i in range(0, self.ni):
for k in range(0, self.no):
errorSignal[i] += error[k] \
* self.activation(self.last_nodes[k], True) \
* self.weights[i][k]
return errorSignal, grad_weights, grad_biases
class BasicNeuralNetwork():
def __init__(self, layer_sizes=5, num_input=4, num_output=3, num_epoch=300, learning_rate=0.1,
mini_batch_size=4, number_of_hiddenlayers=0):
self.layers = []
self.ls = layer_sizes
self.ni = num_input
self.no = num_output
self.lr = learning_rate
self.num_epoch = num_epoch
self.mbs = mini_batch_size
self.nhl = number_of_hiddenlayers
self.constructNetwork()
def forward(self, x):
"""
Task 2b
This function forwards a single feature vector through every layer and
return the output of the last layer
:param x: input feature vector
:return: output of the network
:rtype: np.array
"""
y = self.layers[0].inference(x)
for layerIterator in range(1, len(self.layers)):
y = self.layers[layerIterator].inference(y)
return y
def train(self, train_dataset, eval_dataset=None, monitor_ce_train=True, monitor_accuracy_train=True,
monitor_ce_eval=True, monitor_accuracy_eval=True, monitor_plot='monitor.png'):
ce_train_array = []
ce_eval_array = []
acc_train_array = []
acc_eval_array = []
for e in range(self.num_epoch):
if self.mbs:
self.mini_batch_SGD(train_dataset)
else:
self.online_SGD(train_dataset)
print('Finished training epoch: {}'.format(e))
if monitor_ce_train:
ce_train = self.ce(train_dataset)
ce_train_array.append(ce_train)
print('CE (train): {}'.format(ce_train))
if monitor_accuracy_train:
acc_train = self.accuracy(train_dataset)
acc_train_array.append(acc_train)
print('Accuracy (train): {}'.format(acc_train))
if monitor_ce_eval:
ce_eval = self.ce(eval_dataset)
ce_eval_array.append(ce_eval)
print('CE (eval): {}'.format(ce_eval))
if monitor_accuracy_eval:
acc_eval = self.accuracy(eval_dataset)
acc_eval_array.append(acc_eval)
print('Accuracy (eval): {}'.format(acc_eval))
if monitor_plot:
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(9, 4))
line1, = ax[0].plot(ce_train_array, '--', linewidth=2, label='ce_train')
line2, = ax[0].plot(ce_eval_array, label='ce_eval')
line3, = ax[1].plot(acc_train_array, '--', linewidth=2, label='acc_train')
line4, = ax[1].plot(acc_eval_array, label='acc_eval')
ax[0].legend(loc='upper right')
ax[1].legend(loc='upper left')
ax[1].set_ylim([0, 1])
plt.savefig(monitor_plot)
def online_SGD(self, dataset):
"""
Task 2d
This function trains the network in an online fashion.
Meaning the weights are updated after each observation.
:param dataset:
:return: None
"""
for o, k in dataset:
temp = self.forward(o)
#############SQUARED ERROR##############
#error = (temp - k)
############ CROSS ENTROPY ERROR#############
k*np.log(temp) + (1-k)*np.log(1-temp)
for layer in reversed(self.layers):
(e, w, b) = layer.backprop(error)
layer.weights = layer.weights - self.lr * w
layer.biases = layer.biases - self.lr * b
error = e
def mini_batch_SGD(self, dataset):
"""
Task 2d
This function trains the network using mini batches.
Meaning the weights updates are accumulated and applied after each mini batch.
:param dataset:
:return: None
"""
for o, k in dataset.get_mini_batches(self.mbs):
error = np.zeros(shape=[self.no])
for b in range(0, self.mbs):
error += (self.forward(o[b]) - k[b])
for layer in reversed(self.layers):
(e, w, b) = layer.backprop(error)
layer.weights = layer.weights - self.lr * w
layer.biases = layer.biases - self.lr * b
error = e
def constructNetwork(self):
"""
Task 2d
uses self.ls self.ni and self.no to construct a list of layers.
The last layer should use sigmoid_softmax as an activation function.
any preceeding layers should use sigmoid.
:return: None
"""
input_layer = Layer(self.ni, self.ls, sigmoid)
self.layers = [input_layer]
for i in range(0, self.nhl):
hidden_layer = Layer(self.ls, self.ls, sigmoid)
self.layers.append(hidden_layer)
output_layer = Layer(self.ls, self.no, softmax)
self.layers.append(output_layer)
def ce(self, dataset):
ce = 0
for x, t in dataset:
t_hat = self.forward(x)
ce += np.sum(np.nan_to_num(-t * np.log(t_hat) - (1 - t) * np.log(1 - t_hat)))
return ce / dataset.num_obs
def accuracy(self, dataset):
cm = np.zeros(shape=[dataset.num_classes, dataset.num_classes], dtype=np.int)
for x, t in dataset:
t_hat = self.forward(x)
c_hat = np.argmax(t_hat) # index of largest output value
c = np.argmax(t)
cm[c, c_hat] += 1
correct = np.trace(cm)
return correct / dataset.num_obs
def load(self, path=None):
if not path:
path = './network.save'
with open(path, 'rb') as f:
self.layers = pickle.load(f)
def save(self, path=None):
if not path:
path = './network.save'
with open(path, 'wb') as f:
pickle.dump(self.layers, f)
| moritzlangenberg/SCaML6 | network.py | network.py | py | 13,504 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "random.shuffle",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_... |
21358312248 | __author__ = ["fkiraly"]
from sktime.tests import test_all_estimators
def pytest_addoption(parser):
"""Pytest command line parser options adder."""
parser.addoption(
"--matrixdesign",
default=False,
help="sub-sample estimators in tests by os/version matrix partition design",
)
def pytest_configure(config):
"""Pytest configuration preamble."""
if config.getoption("--matrixdesign") in [True, "True"]:
test_all_estimators.MATRIXDESIGN = True
| orgTestCodacy11KRepos110MB/repo-5089-sktime | conftest.py | conftest.py | py | 499 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sktime.tests.test_all_estimators.MATRIXDESIGN",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "sktime.tests.test_all_estimators",
"line_number": 18,
"usage_type": "name"
}
] |
72184037154 | import tqdm
import torch
import csv
import os
import os.path as osp
import random
import json
import h5py
import time
from collections import defaultdict
if __name__ == '__main__':
from MiniImageNet import MiniImageNetDataset, TransformedImageLoader, h5load
from base import MultiProcessImageLoader
else:
from .base import MultiProcessImageLoader
from .MiniImageNet import MiniImageNetDataset, TransformedImageLoader, h5load
def shuffle_slice(a, start, stop):
assert stop <= len(a)
i = start
while (i < stop-1):
idx = random.randrange(i, stop)
a[i], a[idx] = a[idx], a[i]
i += 1
class MiniImageNetMAMLDataset(MiniImageNetDataset):
def __init__(self,
csv_dir,
split,
image_dir,
shuffle=True,
num_threads=2,
transform=None,
transformed_images=None,
imname_index_file=None,
separate_metasample={'train':True, 'test':False, 'val':False},
metasample_holdout=200,
):
super(MiniImageNetMAMLDataset, self).__init__(csv_dir, split, image_dir, shuffle, num_threads, transform,
transformed_images, imname_index_file)
assert len(self.split_names) == len(separate_metasample)
self.separate_metasample = separate_metasample
#for i in range(len(separate_metasample)):
# self.separate_metasample.update({self.split_names[i]: separate_metasample[i]})
self.metasample_holdout = metasample_holdout
self.cat_ixs_meta = dict() # holds current read index for the meta-update images
for sp in self.split_names:
if self.separate_metasample[sp]:
self.cat_ixs_meta[sp] = dict()
else:
self.cat_ixs_meta[sp] = self.cat_ixs[sp] # make the meta index refer to the normal index
if self.separate_metasample[sp]:
for cat in self.cat_to_files[sp].keys():
self.cat_ixs_meta[sp][cat] = len(self.cat_to_files[sp][cat]) - self.metasample_holdout
def shuffle(self, split, cat):
# shuffle all samples of given split and category
if 'separate_metasample' in self.__dict__:
if self.separate_metasample[split]:
# shuffle separately
self.shuffle_sq(split, cat)
self.shuffle_meta(split, cat)
else:
random.shuffle(self.cat_to_files[split][cat])
def shuffle_sq(self, split, cat):
#print('sq shuffle', split, cat)
num_files = len(self.cat_to_files[split][cat])
shuffle_slice(self.cat_to_files[split][cat], 0, num_files - self.metasample_holdout)
def shuffle_meta(self,split, cat):
#print('meta shuffle', split, cat)
num_files = len(self.cat_to_files[split][cat])
shuffle_slice(self.cat_to_files[split][cat], num_files - self.metasample_holdout, num_files)
def update_sq_iterator(self, split, cat):
ix = self.cat_ixs[split][cat]
ix += 1
maxlen = len(self.cat_to_files[split][cat])
if self.separate_metasample[split]:
maxlen -= self.metasample_holdout
if ix == maxlen:
self.cat_ixs[split][cat] = 0
if self.do_shuffle:
if self.separate_metasample[split]:
self.shuffle_sq(split, cat)
else:
self.shuffle(split, cat)
else:
self.cat_ixs[split][cat] = ix
def update_meta_iterator(self, split, cat):
ix = self.cat_ixs_meta[split][cat]
ix += 1
if ix == len(self.cat_to_files[split][cat]):
if self.separate_metasample[split]:
self.cat_ixs_meta[split][cat] = len(self.cat_to_files[split][cat]) - self.metasample_holdout
if self.do_shuffle:
self.shuffle_meta(split, cat)
else:
self.cat_ixs_meta[split][cat] = 0
if self.do_shuffle:
self.shuffle(split, cat)
else:
self.cat_ixs_meta[split][cat] = ix
def fetch_images_MAML(self, split, cat, num, metanum):
image_files = []
for _ in range(num):
ix = self.cat_ixs[split][cat]
file = self.cat_to_files[split][cat][ix]
image_files.append(file)
self.update_sq_iterator(split, cat)
for _ in range(metanum):
ix = self.cat_ixs_meta[split][cat]
file = self.cat_to_files[split][cat][ix]
image_files.append(file)
self.update_meta_iterator(split, cat)
return image_files
def get_episode_MAML(self, nway=5, nshot=4, nquery=1, nmeta=15, split='train', transform=None):
categories = self.sample_categories(split, nway)
images = dict()
support_data = []
support_lbls = []
support_cats = []
query_data = []
query_lbls = []
query_cats = []
metasample_data = []
metasample_lbls = []
metasample_cats = []
for lbl, cat in enumerate(categories):
images[cat] = self.fetch_images_MAML(split, cat, nshot+nquery, nmeta)
supports = images[cat][:nshot]
queries = images[cat][nshot:nshot+nquery]
metasamples = images[cat][nshot+nquery:]
support_data.extend(supports)
query_data.extend(queries)
metasample_data.extend(metasamples)
support_lbls.extend([lbl] * nshot)
query_lbls.extend([lbl] * nquery)
metasample_lbls.extend([lbl] * nmeta)
support_cats.extend([cat] * nshot)
query_cats.extend([cat] * nquery)
metasample_cats.extend([cat] * nmeta)
data_files = support_data + query_data + metasample_data
data = self.load_images(data_files, transform)
lbls = support_lbls + query_lbls + metasample_lbls
cats = support_cats + query_cats + metasample_cats
return {'data': data,
'label': torch.LongTensor(lbls),
'file': data_files,
'category': cats,
'nway': nway,
'nshot': nshot,
'nquery': nquery,
'nmeta': nmeta,
'split': split}
if __name__ == '__main__':
import torchvision.transforms as transforms
csv_dir = 'data/splits/mini_imagenet_split'
split = 'Ravi'
image_dir = 'data/raw/mini-imagenet'
shuffle = True
num_threads = 4
transform = transforms.Compose([transforms.Resize((84, 84)),
transforms.ToTensor()])
separate_metasample = {'train': True, 'test': True, 'val': False}
mini_loader = MiniImageNetMAMLDataset(csv_dir, split,
image_dir, shuffle,
num_threads, transform, separate_metasample=separate_metasample)
for idx in tqdm.tqdm(range(100)):
data = mini_loader.get_episode_MAML(20, 5, 15, 5, split='test')
#data = mini_loader.get_episode_MAML(5, 4, 1, 15, split='test')
#import ipdb
#ipdb.set_trace()
for idx in tqdm.tqdm(range(1000)):
data = mini_loader.get_episode_MAML(5, 4, 1, 15, split='test')
| alecwangcq/Prototypical-network | dataloader/MiniImageNetMAML.py | MiniImageNetMAML.py | py | 7,428 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "random.randrange",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "MiniImageNet.MiniImageNetDataset",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "random.shuffle",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "torc... |
2724354165 | from graphviz import Digraph
import pandas as pd
import numpy as np
import glob
import os
#Associa cada cor na planilha a um par (cor de fundo,cor da fonte) do Graphviz
colors = {'Amarelo':('yellow','black'),'Azul':('blue','white'),'Branco':('white','black'),
'Cinza':('grey','black'),'Marrom':('brown','white'),'Ouro':('gold3','black'),
'Preto':('black','white'),'Roxo':('purple','white'),'Verde':('green','black'),
'Vermelho':('red','black'),'Laranja':('orangered','black')}
colors_sub = { 'Amarelo':'#ffff005f','Azul':'cyan','Branco':'white','Cinza':'gray95',
'Marrom':'#a52a2a5f','Ouro':'gold','Preto':'black','Roxo':'mediumpurple1',
'Verde':'palegreen','Vermelho':'tomato','Laranja':'orange' }
#Associa os tipos de relação na planilha aos vértices no Graphviz
rel_types = {'Positivo':'blue', 'Negativo':'red', 'Neutro':'black'}
dirs = {'Sim':'both', 'Não':'forward'}
'''
Obtém o data frame do Pandas
Recebe o nome do arquivo Excel
Retorna as listas de atores e relacionamentos
'''
def get_data(fileloc, ext):
#lendo o arquivo usando Pandas
ext_engine={'.xls':'xlrd','.xlsx':'openpyxl','.ods':'odf'}
try:
df_at_rel = pd.read_excel(fileloc, ['atores','relacionamentos'], engine=ext_engine.get(ext,None))
except:
return None, None
#Obtendo os atores e relacionamentos
df_atores = df_at_rel['atores'].loc[:,['ator', 'cor', 'grupo']].dropna(subset=['ator']).replace({np.nan: None})
df_relacionamentos = df_at_rel['relacionamentos'].loc[:,['de', 'relacionamento', 'para', 'tipo', 'bilateral']].dropna()
return df_atores.to_numpy().tolist(), df_relacionamentos.to_numpy().tolist()
'''
Faz os nós dos atores
Recebe o objeto Digraph e a lista de atores
Retorna o Dicionário com os grupos e membros
'''
def make_actor_nodes(graph,atores):
lst_grupos = set()
for ator in atores:
ator_nome, ator_cor, ator_grupo = ator
if ator_grupo is not None: lst_grupos.add(ator_grupo)
grupos = {}
grupos_cores = {}
for ator in atores:
#expandindo os parâmetros da lista
ator_nome, ator_cor, ator_grupo = ator
if ator_cor is None: ator_cor = "Branco"
if ator_nome in lst_grupos:
grupos_cores[ator_nome] = ator_cor
else:
graph.node(ator_nome,shape='circle',fillcolor=colors[ator_cor][0],style='filled', fontcolor=colors[ator_cor][1], fixedsize='false',width='1')
if ator_grupo is None: continue
if ator_grupo not in grupos:
grupos[ator_grupo] = list()
grupos[ator_grupo].append(ator_nome)
return grupos, grupos_cores
'''
Cria um subgrafo
Recebe o nome e cor do grupo
Retorna o subgrafo criado
'''
def makeGroup(grupo_nome, grupo_cor, nivel=1 ):
sub_g = Digraph(name='cluster_'+grupo_nome)
sub_g.attr(label=grupo_nome)
sub_g.attr(style='rounded')
sub_g.attr(bgcolor=colors_sub[grupo_cor])
return sub_g
'''
Insere os nós e grupos em um subgrafo
'''
def fillGroup(sub_g, grupos, grupos_cores, membros, not_root, nivel=1 ):
for membro in membros:
if membro not in grupos.keys():
sub_g.node(membro)
else:
new_g = makeGroup(membro, grupos_cores.get(membro,'Branco'), nivel+1)
fillGroup(new_g, grupos, grupos_cores, grupos[membro], not_root, nivel+1)
sub_g.subgraph(new_g)
not_root.add(membro)
'''
Faz os agrupamentos
Recebe o objeto Digraph e Dicionário com os grupos e membros
'''
def makeGroups(graph, grupos, grupos_cores):
sub_graphs = {}
not_root = set()
#Cria todos os grupos
for grupo,membros in grupos.items():
sub_g = makeGroup(grupo, grupos_cores.get(grupo,'Branco'), 1)
fillGroup(sub_g, grupos, grupos_cores, membros, not_root, 1)
sub_graphs[grupo]=sub_g
#Insere tudo no grafo principal
for k,v in sub_graphs.items():
if k not in not_root:
graph.subgraph(v)
def find_actor(grupos, grupo):
for membro in grupos[grupo]:
if membro not in grupos.keys():
return membro
else:
return find_actor(grupos, membro)
'''
Cria os relacionamentos
Recebe o objeto Digraph, a lista de relacionamentos e o Dicionário com os grupos e membros
'''
def make_relationships(graph, relacionamentos, grupos):
for relacionamento in relacionamentos:
#expandindo os parâmetros da lista
de, legenda, para, tipo, bilateral = relacionamento
#Parâmetros básicos
params = {'dir':dirs[bilateral], 'color':rel_types[tipo], 'fontcolor':rel_types[tipo], 'penwidth':'1.0', 'decorate':'false', 'minlen':'2'}
#Ajustando caso o relacionamento envolva clusters
if de in grupos.keys():
#Criando a chave ltail para o relacionamento com o cluster
params['ltail'] = 'cluster_'+de
#Pegando um ator qualquer (o primeiro) dentro do cluster
deN = find_actor(grupos, de)
else:
deN = de
if para in grupos.keys():
#Criando a chave lhead para o relacionamento com o cluster
params['lhead'] = 'cluster_'+para
#Pegando um ator qualquer (o primeiro) dentro do cluster
paraN = find_actor(grupos, para)
else:
paraN = para
graph.edge(deN, paraN, label=legenda, **params)
'''
Loop principal
'''
#Monta a lista arquivos .xls, .xlsx e .ods na pasta
files = {f:glob.glob('*'+f) for f in ['.ods','.xls','.xlsx']}
#Cria o gráfico para cada arquivo encontrado
for ext,lista_f in files.items():
for fileloc in lista_f:
#ignora arquivos temporários do Excel
if fileloc.startswith('~$'):
continue
print("Processando arquivo: " + fileloc)
k = fileloc.rfind(ext)
gattr = dict()
#allow edges between clusters
gattr['compound'] = 'true'
gattr['rankdir'] = 'LR'
gattr['ranksep'] = '1.0'
#gattr['dpi'] = '300'
#gattr['ratio'] = '0.5625'
gattr['newrank'] = 'true'
gattr['overlap'] = 'false'
gattr['fontsize'] = '20'
g = Digraph(filename=fileloc[:k], engine='dot', format='png', graph_attr=gattr)
#Constroi os dataframes do excel
atores, relacionamentos = get_data(fileloc, ext)
#Checa se get_data foi bem sucedida
if atores is None and relacionamentos is None:
continue
#Faz os nós dos atores
grupos, grupos_cores = make_actor_nodes(g, atores)
#Cria os clusters
makeGroups(g, grupos, grupos_cores)
#Faz os relacionamentos
make_relationships(g, relacionamentos, grupos)
#Cria e abre o arquivo
g.view()
#Remove arquivo intermediário
os.remove(fileloc[:k])
| lcoandrade/relationshipdiagram | diagrama_relacoes.py | diagrama_relacoes.py | py | 7,035 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_excel",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "graphviz.Digraph",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"li... |
4348600276 | from pyspark.sql import SparkSession
from pyspark.ml.feature import MinMaxScaler
from pyspark.ml.linalg import Vectors
spark = SparkSession.builder.appName('normalization').getOrCreate()
spark.sparkContext.setLogLevel("WARN")
print("### spark starting ###")
records = [
(1, Vectors.dense([10.0, 10000.00, 1.0]),),
(2, Vectors.dense([20.0, 30000.00, 2.0]),),
(3, Vectors.dense([30.0, 40000.00, 3.0]),)
]
columns = ["id", "features"]
features_df = spark.createDataFrame(records, columns)
#print(features_df.take(1))
feature_scaler = MinMaxScaler(inputCol="features", outputCol="sfeatures")
s_model = feature_scaler.fit(features_df)
s_features_df = s_model.transform(features_df)
#print(s_features_df.take(1))
s_features_df.select("features", "sfeatures").show()
spark.stop() | yuyatinnefeld/spark | python/pyspark/ml_transformation/normalization.py | normalization.py | py | 793 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pyspark.sql.SparkSession.builder.appName",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 5,
"usage_type": "... |
15166107096 | import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
all_models = pd.read_csv('model_results.csv')
all_models['Accuracy'] = all_models['target_response']
folder = 'accuracy_plots'
mymax = all_models.query('Task == "Different"').groupby(
['c', 'Representation', 'Category', 'Subcategory', 'Cleaned'], as_index=False).mean().groupby(
['Representation', 'Category', 'Subcategory', 'Cleaned'], as_index=False)['Accuracy'].max()
sns.catplot('Category', 'Accuracy', col='Representation', hue='Cleaned', kind='bar', data=mymax, aspect=1.5, legend_out=True)
plt.savefig(os.path.join(folder, 'vet_maximimum_accuracy.jpg'))
# plt.show()
plt.close()
mymax.to_csv('model_results_max_accuracy_subcategories.csv')
by_network = all_models.query('Task == "Different"').groupby(
['c', 'Representation', 'Cleaned'], as_index=False)['Accuracy'].mean()
sns.lineplot('c', 'Accuracy', hue='Representation', style='Cleaned', data=by_network)
legend = plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# plt.savefig(os.path.join(folder, 'vet_accuracy_overall.jpg'),
# bbox_extra_artists=(legend,), bbox_inches='tight')
plt.show()
plt.close()
nrep = all_models['Representation'].nunique()
f, axes = plt.subplots(ncols=nrep, sharex=True, sharey=True, figsize=(16, 12))
for i, rep in enumerate(set(all_models['Representation'])):
by_category = all_models.query('Task == "Different" and Representation == "{}"'.format(rep)).groupby(
['c', 'Category', 'Cleaned'], as_index=False)['Accuracy'].mean()
if i == nrep-1:
lgd = 'full'
else:
lgd = False
sns.lineplot('c', 'Accuracy', hue='Category', style='Cleaned', data=by_category, ci=None, ax=axes[i], legend=lgd,
).set_title('Network = {}'.format(rep))
legend = plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.savefig(os.path.join(folder, 'vet_accuracy_categories.jpg'),
bbox_extra_artists=(legend,), bbox_inches='tight')
# plt.show()
plt.close()
for cat in set(all_models['Category']):
category = all_models.query('Category == "{}" and Task == "Different"'.format(cat))
f, axes = plt.subplots(ncols=nrep, sharex=True, sharey=True, figsize=(16, 12))
for i, rep in enumerate(set(all_models['Representation'])):
by_subcategory = category.query('Task == "Different" and Representation == "{}"'.format(rep)).groupby(
['c', 'Subcategory', 'Cleaned'], as_index=False)['Accuracy'].mean()
if i == nrep-1:
lgd = 'full'
else:
lgd = False
sns.lineplot('c', 'Accuracy', hue='Subcategory', style='Cleaned', data=by_subcategory, ci=None, ax=axes[i],
legend=lgd).set_title('Network = {}'.format(rep))
legend = plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# plt.show()
plt.savefig(os.path.join(folder, 'vet_accuracy_{}_subcategories.jpg'.format(cat)),
bbox_extra_artists=(legend,), bbox_inches='tight')
plt.close()
| crasanders/vision | plot_model_accuracy.py | plot_model_accuracy.py | py | 3,031 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "seaborn.catplot",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.p... |
73033768993 | # -*- coding: utf-8 -*-
'''
The AWS Cloud Module
====================
The AWS cloud module is used to interact with the Amazon Web Services system.
This module has been replaced by the EC2 cloud module, and is no longer
supported. The documentation shown here is for reference only; it is highly
recommended to change all usages of this driver over to the EC2 driver.
If this driver is still needed, set up the cloud configuration at
``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/aws.conf``:
.. code-block:: yaml
my-aws-botocore-config:
# The AWS API authentication id
id: GKTADJGHEIQSXMKKRBJ08H
# The AWS API authentication key
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
# The ssh keyname to use
keyname: default
# The amazon security group
securitygroup: ssh_open
# The location of the private key which corresponds to the keyname
private_key: /root/default.pem
provider: aws
'''
from __future__ import absolute_import
# pylint: disable=E0102
# Import python libs
import os
import stat
import logging
# Import salt.cloud libs
import salt.config as config
from salt.utils import namespaced_function
from salt.exceptions import SaltCloudException, SaltCloudSystemExit
import salt.ext.six as six
# Import libcloudfuncs and libcloud_aws, required to latter patch __opts__
try:
from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401
from salt.cloud import libcloudfuncs
from salt.cloud.clouds import libcloud_aws
# Import libcloud_aws, storing pre and post locals so we can namespace any
# callable to this module.
PRE_IMPORT_LOCALS_KEYS = locals().copy()
from salt.cloud.clouds.libcloud_aws import * # pylint: disable=W0614,W0401
POST_IMPORT_LOCALS_KEYS = locals().copy()
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
# Get logging started
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'aws'
# Only load in this module if the AWS configurations are in place
def __virtual__():
'''
Set up the libcloud funcstions and check for AWS configs
'''
if not HAS_LIBCLOUD:
return False
try:
# Import botocore
import botocore.session
except ImportError:
# Botocore is not available, the Libcloud AWS module will be loaded
# instead.
return False
# "Patch" the imported libcloud_aws to have the current __opts__
libcloud_aws.__opts__ = __opts__
libcloudfuncs.__opts__ = __opts__
if get_configured_provider() is False:
return False
for provider, details in six.iteritems(__opts__['providers']):
if 'provider' not in details or details['provider'] != 'aws':
continue
if not os.path.exists(details['private_key']):
raise SaltCloudException(
'The AWS key file {0!r} used in the {1!r} provider '
'configuration does not exist\n'.format(
details['private_key'],
provider
)
)
keymode = str(
oct(stat.S_IMODE(os.stat(details['private_key']).st_mode))
)
if keymode not in ('0400', '0600'):
raise SaltCloudException(
'The AWS key file {0!r} used in the {1!r} provider '
'configuration needs to be set to mode 0400 or 0600\n'.format(
details['private_key'],
provider
)
)
# Let's bring the functions imported from libcloud_aws to the current
# namespace.
keysdiff = set(POST_IMPORT_LOCALS_KEYS).difference(
PRE_IMPORT_LOCALS_KEYS
)
for key in keysdiff:
# only import callables that actually have __code__ (this includes
# functions but excludes Exception classes)
if (callable(POST_IMPORT_LOCALS_KEYS[key]) and
hasattr(POST_IMPORT_LOCALS_KEYS[key], "__code__")):
globals().update(
{
key: namespaced_function(
POST_IMPORT_LOCALS_KEYS[key], globals(), ()
)
}
)
global avail_images, avail_sizes, avail_locations, script
global list_nodes, list_nodes_full, list_nodes_select
# open a connection in a specific region
conn = get_conn(**{'location': get_location()})
# Init the libcloud functions
avail_locations = namespaced_function(avail_locations, globals(), (conn,))
avail_images = namespaced_function(avail_images, globals(), (conn,))
avail_sizes = namespaced_function(avail_sizes, globals(), (conn,))
script = namespaced_function(script, globals(), (conn,))
list_nodes = namespaced_function(list_nodes, globals(), (conn,))
list_nodes_full = namespaced_function(list_nodes_full, globals(), (conn,))
list_nodes_select = namespaced_function(
list_nodes_select, globals(), (conn,)
)
return 'aws'
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
'aws',
('id', 'key', 'keyname', 'securitygroup', 'private_key')
)
def enable_term_protect(name, call=None):
'''
Enable termination protection on a node
CLI Example:
.. code-block:: bash
salt-cloud -a enable_term_protect mymachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'This action must be called with -a or --action.'
)
return _toggle_term_protect(name, True)
def disable_term_protect(name, call=None):
'''
Disable termination protection on a node
CLI Example:
.. code-block:: bash
salt-cloud -a disable_term_protect mymachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'This action must be called with -a or --action.'
)
return _toggle_term_protect(name, False)
def _toggle_term_protect(name, enabled):
'''
Toggle termination protection on a node
'''
# region is required for all boto queries
region = get_location(None)
# init botocore
vm_ = get_configured_provider()
session = botocore.session.get_session() # pylint: disable=E0602
session.set_credentials(
access_key=config.get_cloud_config_value(
'id', vm_, __opts__, search_global=False
),
secret_key=config.get_cloud_config_value(
'key', vm_, __opts__, search_global=False
)
)
service = session.get_service('ec2')
endpoint = service.get_endpoint(region)
# get the instance-id for the supplied node name
conn = get_conn(location=region)
node = get_node(conn, name)
params = {
'instance_id': node.id,
'attribute': 'disableApiTermination',
'value': 'true' if enabled else 'false',
}
# get instance information
operation = service.get_operation('modify-instance-attribute')
http_response, response_data = operation.call(endpoint, **params)
if http_response.status_code == 200:
msg = 'Termination protection successfully {0} on {1}'.format(
enabled and 'enabled' or 'disabled',
name
)
log.info(msg)
return msg
# No proper HTTP response!?
msg = 'Bad response from AWS: {0}'.format(http_response.status_code)
log.error(msg)
return msg
| shineforever/ops | salt/salt/cloud/clouds/botocore_aws.py | botocore_aws.py | py | 7,499 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "salt.cloud.clouds.libcloud_aws.__opts__",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "salt.cloud.clouds.libcloud_aws",
"line_number": 83,
"usage_type": "name"
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.