blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8285d32e60bedabe538a29a3d0439d69fd0d42f4 | 0c84b6ba39e2d60c8ae50ba9d3a9d052dac3f85b | /demo/routes.py | 91aa49ce534b10bde15ad5393dcc4486c1378d55 | [] | no_license | RuslanOm/simple_server | 6c84e6d0d7d051c3fb6b97e00384c7040594ef77 | 4838fd63eb573d91faa1225376c2c0e80bec7d3d | refs/heads/master | 2021-07-01T23:17:04.225822 | 2019-11-25T13:40:06 | 2019-11-25T13:40:06 | 223,955,519 | 0 | 0 | null | 2021-03-26T00:56:24 | 2019-11-25T13:29:40 | Python | UTF-8 | Python | false | false | 408 | py | from .views import frontend
def setup_routes(app):
app.router.add_route("GET", "/", frontend.index)
app.router.add_route("POST", "/registration", frontend.reg_user)
app.router.add_route("GET", "/login", frontend.login)
app.router.add_route("GET", "/error", frontend.error)
app.router.add_route("GET", '/logout', frontend.logout)
app.router.add_route("GET", '/test', frontend.test)
| [
"ruslangfwm@gmail.com"
] | ruslangfwm@gmail.com |
8a6874d0099dce3f2d73698422596393937926c4 | 1422a57e98aba02321b772d72f8f0ada6d8b8cba | /mm/models/shared/augmentation.py | 91ccf3fae4c30c7c4b6af2cc19bd690100302532 | [
"MIT"
] | permissive | JonasRSV/Friday | e1908a411aa133bc5bd2f383b0a995f7e028092d | f959eff95ba7b11525f97099c8f5ea0e325face7 | refs/heads/main | 2023-05-15T03:33:21.542621 | 2021-06-12T10:34:50 | 2021-06-12T10:34:50 | 315,309,991 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | from typing import List
import tensorflow as tf
import numpy as np
import models.shared.augmentations as a
import random
def create_audio_augmentations(aug: List[a.Augmentation], p: np.ndarray):
if len(aug) != len(p):
raise ValueError(f"Length of augmentations must match distribution {len(aug)} != {len(p)}")
def audio_augmentations(audio: np.ndarray, sample_rate: int):
for aug_to_apply, with_prob in zip(aug, p):
if np.random.rand() < with_prob:
audio = aug_to_apply.apply(audio, sample_rate)
return audio
return audio_augmentations
| [
"jonas@valfridsson.net"
] | jonas@valfridsson.net |
37647a330c24011d5d8bd893ef4bdfdccdd17b31 | a1a64308806fe7b6625a8d4f6a67a116ee6ff882 | /p_methods.py | 5aadb94396b5e6e3d13c7095ba928d3d11df3a55 | [] | no_license | obakanue/machine-learning | ae9783ec0a2db4dff8fde6f7c1da53c54031154b | 1b9f8c7a2ea4ebaf6e5acafd8f2d77861dd04333 | refs/heads/master | 2021-01-16T11:54:48.940100 | 2020-02-28T09:36:56 | 2020-02-28T09:36:56 | 243,109,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,000 | py | import numpy as np
import matplotlib.pyplot as plt
def looc(matrix, alpha):
missclassified_eval = 0
tries = 0
for i in range(len(matrix)):
evaluation_sample = matrix[i]
training_set = np.delete(matrix, i, axis=0)
w = [0.4,1.0,1.0]
######################## Training Weights #######################
w, count = train_weights(training_set, w, alpha)
tries += count
_, right, count = evaluate(evaluation_sample, w)
tries += count
if not right: missclassified_eval += 1
print(f"Prediction: {_} class: {evaluation_sample[3]}")
print("Right: ", right)
print("Missclassified count: ", missclassified_eval)
#plot_perceptron(matrix, w, alpha, evaluation_sample, i) # Uncomment in order to plot every model
print(f"########################### Evaluation ##########################")
print(f"Missclassification: {missclassified_eval / tries}%")
return w
def read_libsvm():
dataset = open('salammbo_a_binary.libsvm').read().strip().split('\n')
observations = [dataset[i].split() for i in range(len(dataset))]
y = [float(obs[0]) for obs in observations]
X = [['0:1'] + obs[1:] for obs in observations]
X = [list(map(lambda x: float(x.split(':')[1]), obs)) for obs in X]
return X, y
def numpy_hstack(x_values, y_values):
return np.hstack((np.array(x_values), np.array([y_values]).T))
def normalize(x_values, y_values):
print("X-values: ", x_values)
print("--------------------------------------------------------------------------")
print("Y-values: ", np.array(y_values).T)
x_values = [x / max(x_values) for x in x_values]
y_values = [y / max(y_values) for y in y_values]
print("############################## Normalized ################################")
print("X -values: ", x_values)
print("--------------------------------------------------------------------------")
print("Y-values: ", y_values)
return x_values, y_values
def evaluate(row, w):
prediction = 0
right = 0
sum_ = w[0] + w[1] * row[1] + w[2] * row[2]
if sum_ > 0:
prediction = 1
else:
prediction = 0
if prediction == row[3]: right = 1
return prediction, right, 1
def predict(matrix, w):
nbr_right = 0
predictions = []
for row in matrix:
prediction, right, count = evaluate(row, w)
nbr_right += right
predictions.append(prediction)
return nbr_right, predictions, count
def update_step(matrix, w, alpha, predictions):
for row, prediction in zip(matrix, predictions):
if not prediction == row[3]:
d = 1 if row[3] == 1 else -1
for i in range(len(w)):
w[i] = w[i] + alpha * d * row[i]
return w
def train_weights(matrix, w, alpha):
nbr_right = 0
counter = 0
while nbr_right < len(matrix):
nbr_right, predictions, count = predict(matrix, w)
w = update_step(matrix, w, alpha, predictions)
counter += count
return w, counter
def plot_perceptron(X, w, alpha, *args):
X_en = np.array([x for x in filter(lambda X: X[3] == 0, X)])
X_fr = np.array([x for x in filter(lambda X: X[3] == 1, X)])
x_axis = np.array(X)[:,1]
slope = -(w[0] / w[2]) / (w[0] / w[1])
intercept = -w[0] / w[2]
plt.plot(x_axis, slope * x_axis + intercept, '-', label='Line')
plt.scatter(X_en[:,1], X_en[:,2], marker='o', label='English, 0', color='Blue')
plt.scatter(X_fr[:,1], X_fr[:,2], marker='o', label='French, 1', color='Red')
language = "French"
if args:
if args[0][3] == 0:
language = "English"
args = np.array(args)
plt.scatter(args[0][1], args[0][2], marker='o', label=f'Evaluation Sample {language} Eval: {args[1]}', color='Black')
plt.xlabel('Letters')
plt.ylabel('A\'s')
plt.title(f"Perceptron classified (alpha={alpha})")
plt.legend()
plt.show()
| [
"sofi.flinck@gmail.com"
] | sofi.flinck@gmail.com |
d613832fb1e4fbf8daf1f43cb77c47396088f146 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_181/ch4_2020_03_05_16_07_05_989464.py | dff125cf8d4a74499e2b22478368603f7e78b838 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | def classifica_idade(i):
if i<12:
return 'crianca'
if 18>i>12:
return 'adolescente'
else:
return 'adulto'
| [
"you@example.com"
] | you@example.com |
9f89a0ec42ff6944d55b32008610820e57ebc105 | 3da8cc504f5c0b03aeca1955c52d103280630c84 | /scrapper/spiders/vnexpress.py | 8faf4124c32b7b761a61d4da305cb250f9622826 | [] | no_license | nguyenvhung9420/WebCrawler-Python | af8b8c1028397a9b53b235d831b6ab7ff44eac31 | 07f575517668c8c36246886532c2359804951df8 | refs/heads/master | 2022-05-23T09:43:25.908794 | 2019-02-26T06:03:46 | 2019-02-26T06:03:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,793 | py | import scrapy
from random import randint
class VnExpressSpider(scrapy.Spider):
name = "vnexpress"
BASE_URL = 'https://vnexpress.net'
def start_requests(self):
url = self.BASE_URL
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
links = response.css('#main_menu a::attr(href)') .extract()
normal_links = [self.BASE_URL + x for x in links if not x.startswith('http') and "raovat" not in x and "video" not in x]
full_links = [x for x in links if x.startswith('http')]
links = normal_links + full_links
print(links)
for link in links:
yield scrapy.Request(link, callback=self.parse_links)
def parse_links(self, response):
links = response.css('article .title_news a::attr(href)').extract()
for link in links:
yield scrapy.Request(link, callback=self.parse_content)
paging = response.css('#pagination a.next::attr(href)').extract_first()
if paging:
yield scrapy.Request(paging, callback=self.parse_links)
def parse_content(self, response):
title = response.css('.title_news_detail::text').extract_first().lstrip().rstrip()
summary = response.css('.sidebar_1 .description::text').extract()
content = response.css('.content_detail p').xpath('.//text()').extract()
cat = response.css('.cat_header ul li.start a::text').extract_first()
date = response.css('.time::text').extract_first()
content = '\n'.join(content).lstrip().rstrip()
summary = '\n'.join(summary).lstrip().rstrip()
yield {'_id': response.url, 'date': date, 'title': title, 'summary': summary, 'content': content, 'type': cat}
| [
"noreply@github.com"
] | nguyenvhung9420.noreply@github.com |
2c112181b5b017d573f3ec6d896a3e5b4a5c9433 | 086b56d661a947200cb4b100929a2c3147ab861f | /dasouche/handle_dasouche.py | dcbb2cae7587ebbf7658be7ee24e794e1696caa4 | [] | no_license | peterdocter/small-spider-project | 129ac5136603bbe0f56beb14676ab0031ecbc545 | b1f1e2e804794e87152c4f5a07a77cad1f99f3e4 | refs/heads/master | 2023-02-05T18:02:19.324107 | 2020-12-28T09:48:23 | 2020-12-28T09:48:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,778 | py | import requests
import json
import re
import pymongo
from pymongo.collection import Collection
from concurrent.futures.thread import ThreadPoolExecutor
class HandleDaSouChe(object):
def __init__(self):
#页码请求URL
self.page_url = "https://aolai.souche.com/v1/searchApi/searchCar.json?_security_token=undefined"
self.header = {
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36",
}
self.item_url_list = []
mongo_client = pymongo.MongoClient(host="10.70.120.156", port=27017)
self.db_data = mongo_client['oreo']
def handle_save_data(self,item):
db_collection = Collection(self.db_data, 'dasouche_data')
db_collection.update({'carId':item['carId']},item,True)
def handle_page(self):
for page in range(1,5):
#构造请求数据POST,每页可现实500条数据,共4页
data = {
"keyword":"",
"brandCode":"",
"seriesCode":"",
"price":"",
"carModel":"",
"carAge":"",
"mileage":"",
"gearboxType":"",
"displacement":"",
"emissionStandard":"",
"bodyColor":"",
"fuelType":"",
"seatingCapacity":"",
"drivingMode":"",
"country":"",
"pageNo":page,
"pageSize":"500",
"from":"pc",
"cityCode":"",
"shopCode":"",
"sort":"newsOnShelf",
}
page_result = self.handle_request(method='POST',url=self.page_url,data=data)
for item in json.loads(page_result)['data']['items']:
self.item_url_list.append(item['detailUrl'])
#处理详情页
def handle_detail(self,url):
id_search = re.compile(r"carId=(.*?)&shopCode=(\d+)")
car_id = id_search.search(url).group(1)
shop_id = id_search.search(url).group(2)
#车辆详情信息
car_detail_url = "https://aolai.souche.com//v1/carDetailsApi/carDetailInfo.json?carId=%s"%car_id
car_detail = self.handle_request(method='GET',url=car_detail_url)
car_detail_result = json.loads(car_detail)['data']
#售卖商店信息
shop_detail_url = "https://aolai.souche.com//v1/shopApi/queryTangecheShopInfo.json?carId=%s&citycode=%s&shopCode=%s"%(car_id,car_detail_result['baseCarInfoView']['cityCode'],shop_id)
shop_detail_result = self.handle_request(method='GET',url=shop_detail_url)
car_detail_result.update(json.loads(shop_detail_result)['data'])
#车辆厂商配置信息
car_config_url = "https://aolai.souche.com/v1/carDetailsApi/carConfigDetailInfo.json?_security_token=undefined&carId=%s"%car_id
car_config_result = self.handle_request(method='GET',url=car_config_url)
car_detail_result.update(json.loads(car_config_result)['data'])
car_detail_result['from_url'] = url
self.handle_save_data(car_detail_result)
def handle_request(self,method,url,data=None):
if method == 'POST':
response = requests.post(url=url,headers=self.header,data=data)
return response.text
elif method == 'GET':
response = requests.get(url=url,headers=self.header)
return response.text
def run(self):
self.handle_page()
t = ThreadPoolExecutor()
for url in self.item_url_list:
t.submit(self.handle_detail,url)
t.shutdown()
def main():
dasouche = HandleDaSouChe()
dasouche.run()
if __name__ == '__main__':
main()
| [
"dazhuang_python@sina.com"
] | dazhuang_python@sina.com |
2c5c3ca6112c1b3ff3ba2728a650e7fe1f45edba | 642ba1746fed0b722a127b8426eca987df6efc61 | /docs/domains/hocdomain.py | 73ae11df5d57648e36cb62ece00a59013112ad63 | [
"BSD-3-Clause"
] | permissive | neuronsimulator/nrn | 23781d978fe9253b0e3543f41e27252532b35459 | b786c36d715ba0f6da1ba8bdf5d2338c939ecf51 | refs/heads/master | 2023-08-09T00:13:11.123525 | 2023-08-04T13:11:02 | 2023-08-04T13:11:02 | 71,627,569 | 313 | 171 | NOASSERTION | 2023-09-14T17:48:03 | 2016-10-22T08:47:37 | C++ | UTF-8 | Python | false | false | 61,213 | py | # generated from 'sphinx/domains/python.py' @ Sphinx 6.1.3
"""The HOC domain."""
from __future__ import annotations
import ast
import builtins
import inspect
import re
import typing
from inspect import Parameter
from typing import Any, Iterable, Iterator, List, NamedTuple, Tuple, cast
from docutils import nodes
from docutils.nodes import Element, Node
from docutils.parsers.rst import directives
from docutils.parsers.rst.states import Inliner
from sphinx import addnodes
from sphinx.addnodes import desc_signature, pending_xref, pending_xref_condition
from sphinx.application import Sphinx
from sphinx.builders import Builder
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, Index, IndexEntry, ObjType
from sphinx.environment import BuildEnvironment
from sphinx.locale import _, __
from sphinx.roles import XRefRole
from sphinx.util import logging
from sphinx.util.docfields import Field, GroupedField, TypedField
from sphinx.util.docutils import SphinxDirective, switch_source_input
from sphinx.util.inspect import signature_from_str
from sphinx.util.nodes import (
find_pending_xref_condition,
make_id,
make_refnode,
nested_parse_with_titles,
)
from sphinx.util.typing import OptionSpec, TextlikeNode
logger = logging.getLogger(__name__)
# REs for HOC signatures
hoc_sig_re = re.compile(
r"""^ ([\w.]*\.)? # class name(s)
(\w+) \s* # thing name
(?: \(\s*(.*)\s*\) # optional: arguments
(?:\s* -> \s* (.*))? # return annotation
)? $ # and nothing more
""",
re.VERBOSE,
)
pairindextypes = {
"module": _("module"),
"keyword": _("keyword"),
"operator": _("operator"),
"object": _("object"),
"exception": _("exception"),
"statement": _("statement"),
"builtin": _("HOC built-in function"),
}
class ObjectEntry(NamedTuple):
docname: str
node_id: str
objtype: str
aliased: bool
class ModuleEntry(NamedTuple):
docname: str
node_id: str
synopsis: str
platform: str
deprecated: bool
def parse_reftarget(
reftarget: str, suppress_prefix: bool = False
) -> tuple[str, str, str, bool]:
"""Parse a type string and return (reftype, reftarget, title, refspecific flag)"""
refspecific = False
if reftarget.startswith("."):
reftarget = reftarget[1:]
title = reftarget
refspecific = True
elif reftarget.startswith("~"):
reftarget = reftarget[1:]
title = reftarget.split(".")[-1]
elif suppress_prefix:
title = reftarget.split(".")[-1]
elif reftarget.startswith("typing."):
title = reftarget[7:]
else:
title = reftarget
if reftarget == "None" or reftarget.startswith("typing."):
# typing module provides non-class types. Obj reference is good to refer them.
reftype = "obj"
else:
reftype = "class"
return reftype, reftarget, title, refspecific
def type_to_xref(
target: str, env: BuildEnvironment | None = None, suppress_prefix: bool = False
) -> addnodes.pending_xref:
"""Convert a type string to a cross reference node."""
if env:
kwargs = {
"hoc:module": env.ref_context.get("hoc:module"),
"hoc:class": env.ref_context.get("hoc:class"),
}
else:
kwargs = {}
reftype, target, title, refspecific = parse_reftarget(target, suppress_prefix)
if env.config.hoc_use_unqualified_type_names:
# Note: It would be better to use qualname to describe the object to support support
# nested classes. But python domain can't access the real python object because this
# module should work not-dynamically.
shortname = title.split(".")[-1]
contnodes: list[Node] = [
pending_xref_condition("", shortname, condition="resolved"),
pending_xref_condition("", title, condition="*"),
]
else:
contnodes = [nodes.Text(title)]
return pending_xref(
"",
*contnodes,
refdomain="hoc",
reftype=reftype,
reftarget=target,
refspecific=refspecific,
**kwargs,
)
def _parse_annotation(annotation: str, env: BuildEnvironment | None) -> list[Node]:
"""Parse type annotation."""
def unparse(node: ast.AST) -> list[Node]:
if isinstance(node, ast.Attribute):
return [nodes.Text(f"{unparse(node.value)[0]}.{node.attr}")]
elif isinstance(node, ast.BinOp):
result: list[Node] = unparse(node.left)
result.extend(unparse(node.op))
result.extend(unparse(node.right))
return result
elif isinstance(node, ast.BitOr):
return [
addnodes.desc_sig_space(),
addnodes.desc_sig_punctuation("", "|"),
addnodes.desc_sig_space(),
]
elif isinstance(node, ast.Constant):
if node.value is Ellipsis:
return [addnodes.desc_sig_punctuation("", "...")]
elif isinstance(node.value, bool):
return [addnodes.desc_sig_keyword("", repr(node.value))]
elif isinstance(node.value, int):
return [addnodes.desc_sig_literal_number("", repr(node.value))]
elif isinstance(node.value, str):
return [addnodes.desc_sig_literal_string("", repr(node.value))]
else:
# handles None, which is further handled by type_to_xref later
# and fallback for other types that should be converted
return [nodes.Text(repr(node.value))]
elif isinstance(node, ast.Expr):
return unparse(node.value)
elif isinstance(node, ast.Index):
return unparse(node.value)
elif isinstance(node, ast.Invert):
return [addnodes.desc_sig_punctuation("", "~")]
elif isinstance(node, ast.List):
result = [addnodes.desc_sig_punctuation("", "[")]
if node.elts:
# check if there are elements in node.elts to only pop the
# last element of result if the for-loop was run at least
# once
for elem in node.elts:
result.extend(unparse(elem))
result.append(addnodes.desc_sig_punctuation("", ","))
result.append(addnodes.desc_sig_space())
result.pop()
result.pop()
result.append(addnodes.desc_sig_punctuation("", "]"))
return result
elif isinstance(node, ast.Module):
return sum((unparse(e) for e in node.body), [])
elif isinstance(node, ast.Name):
return [nodes.Text(node.id)]
elif isinstance(node, ast.Subscript):
if getattr(node.value, "id", "") in {"Optional", "Union"}:
return _unparse_pep_604_annotation(node)
result = unparse(node.value)
result.append(addnodes.desc_sig_punctuation("", "["))
result.extend(unparse(node.slice))
result.append(addnodes.desc_sig_punctuation("", "]"))
# Wrap the Text nodes inside brackets by literal node if the subscript is a Literal
if result[0] in ("Literal", "typing.Literal"):
for i, subnode in enumerate(result[1:], start=1):
if isinstance(subnode, nodes.Text):
result[i] = nodes.literal("", "", subnode)
return result
elif isinstance(node, ast.UnaryOp):
return unparse(node.op) + unparse(node.operand)
elif isinstance(node, ast.Tuple):
if node.elts:
result = []
for elem in node.elts:
result.extend(unparse(elem))
result.append(addnodes.desc_sig_punctuation("", ","))
result.append(addnodes.desc_sig_space())
result.pop()
result.pop()
else:
result = [
addnodes.desc_sig_punctuation("", "("),
addnodes.desc_sig_punctuation("", ")"),
]
return result
else:
raise SyntaxError # unsupported syntax
def _unparse_pep_604_annotation(node: ast.Subscript) -> list[Node]:
subscript = node.slice
if isinstance(subscript, ast.Index):
# py38 only
subscript = subscript.value # type: ignore[assignment]
flattened: list[Node] = []
if isinstance(subscript, ast.Tuple):
flattened.extend(unparse(subscript.elts[0]))
for elt in subscript.elts[1:]:
flattened.extend(unparse(ast.BitOr()))
flattened.extend(unparse(elt))
else:
# e.g. a Union[] inside an Optional[]
flattened.extend(unparse(subscript))
if getattr(node.value, "id", "") == "Optional":
flattened.extend(unparse(ast.BitOr()))
flattened.append(nodes.Text("None"))
return flattened
try:
tree = ast.parse(annotation, type_comments=True)
result: list[Node] = []
for node in unparse(tree):
if isinstance(node, nodes.literal):
result.append(node[0])
elif isinstance(node, nodes.Text) and node.strip():
if (
result
and isinstance(result[-1], addnodes.desc_sig_punctuation)
and result[-1].astext() == "~"
):
result.pop()
result.append(type_to_xref(str(node), env, suppress_prefix=True))
else:
result.append(type_to_xref(str(node), env))
else:
result.append(node)
return result
except SyntaxError:
return [type_to_xref(annotation, env)]
def _parse_arglist(
arglist: str, env: BuildEnvironment | None = None
) -> addnodes.desc_parameterlist:
"""Parse a list of arguments using AST parser"""
params = addnodes.desc_parameterlist(arglist)
sig = signature_from_str("(%s)" % arglist)
last_kind = None
for param in sig.parameters.values():
if param.kind != param.POSITIONAL_ONLY and last_kind == param.POSITIONAL_ONLY:
# PEP-570: Separator for Positional Only Parameter: /
params += addnodes.desc_parameter(
"", "", addnodes.desc_sig_operator("", "/")
)
if param.kind == param.KEYWORD_ONLY and last_kind in (
param.POSITIONAL_OR_KEYWORD,
param.POSITIONAL_ONLY,
None,
):
# PEP-3102: Separator for Keyword Only Parameter: *
params += addnodes.desc_parameter(
"", "", addnodes.desc_sig_operator("", "*")
)
node = addnodes.desc_parameter()
if param.kind == param.VAR_POSITIONAL:
node += addnodes.desc_sig_operator("", "*")
node += addnodes.desc_sig_name("", param.name)
elif param.kind == param.VAR_KEYWORD:
node += addnodes.desc_sig_operator("", "**")
node += addnodes.desc_sig_name("", param.name)
else:
node += addnodes.desc_sig_name("", param.name)
if param.annotation is not param.empty:
children = _parse_annotation(param.annotation, env)
node += addnodes.desc_sig_punctuation("", ":")
node += addnodes.desc_sig_space()
node += addnodes.desc_sig_name("", "", *children) # type: ignore
if param.default is not param.empty:
if param.annotation is not param.empty:
node += addnodes.desc_sig_space()
node += addnodes.desc_sig_operator("", "=")
node += addnodes.desc_sig_space()
else:
node += addnodes.desc_sig_operator("", "=")
node += nodes.inline(
"", param.default, classes=["default_value"], support_smartquotes=False
)
params += node
last_kind = param.kind
if last_kind == Parameter.POSITIONAL_ONLY:
# PEP-570: Separator for Positional Only Parameter: /
params += addnodes.desc_parameter("", "", addnodes.desc_sig_operator("", "/"))
return params
def _pseudo_parse_arglist(signode: desc_signature, arglist: str) -> None:
""" "Parse" a list of arguments separated by commas.
Arguments can have "optional" annotations given by enclosing them in
brackets. Currently, this will split at any comma, even if it's inside a
string literal (e.g. default argument value).
"""
paramlist = addnodes.desc_parameterlist()
stack: list[Element] = [paramlist]
try:
for argument in arglist.split(","):
argument = argument.strip()
ends_open = ends_close = 0
while argument.startswith("["):
stack.append(addnodes.desc_optional())
stack[-2] += stack[-1]
argument = argument[1:].strip()
while argument.startswith("]"):
stack.pop()
argument = argument[1:].strip()
while argument.endswith("]") and not argument.endswith("[]"):
ends_close += 1
argument = argument[:-1].strip()
while argument.endswith("["):
ends_open += 1
argument = argument[:-1].strip()
if argument:
stack[-1] += addnodes.desc_parameter(
"", "", addnodes.desc_sig_name(argument, argument)
)
while ends_open:
stack.append(addnodes.desc_optional())
stack[-2] += stack[-1]
ends_open -= 1
while ends_close:
stack.pop()
ends_close -= 1
if len(stack) != 1:
raise IndexError
except IndexError:
# if there are too few or too many elements on the stack, just give up
# and treat the whole argument list as one argument, discarding the
# already partially populated paramlist node
paramlist = addnodes.desc_parameterlist()
paramlist += addnodes.desc_parameter(arglist, arglist)
signode += paramlist
else:
signode += paramlist
# This override allows our inline type specifiers to behave like :class: link
# when it comes to handling "." and "~" prefixes.
class HOCXrefMixin:
def make_xref(
self,
rolename: str,
domain: str,
target: str,
innernode: type[TextlikeNode] = nodes.emphasis,
contnode: Node | None = None,
env: BuildEnvironment | None = None,
inliner: Inliner | None = None,
location: Node | None = None,
) -> Node:
# we use inliner=None to make sure we get the old behaviour with a single
# pending_xref node
result = super().make_xref(
rolename,
domain,
target, # type: ignore
innernode,
contnode,
env,
inliner=None,
location=None,
)
if isinstance(result, pending_xref):
result["refspecific"] = True
result["hoc:module"] = env.ref_context.get("hoc:module")
result["hoc:class"] = env.ref_context.get("hoc:class")
reftype, reftarget, reftitle, _ = parse_reftarget(target)
if reftarget != reftitle:
result["reftype"] = reftype
result["reftarget"] = reftarget
result.clear()
result += innernode(reftitle, reftitle)
elif env.config.hoc_use_unqualified_type_names:
children = result.children
result.clear()
shortname = target.split(".")[-1]
textnode = innernode("", shortname)
contnodes = [
pending_xref_condition("", "", textnode, condition="resolved"),
pending_xref_condition("", "", *children, condition="*"),
]
result.extend(contnodes)
return result
def make_xrefs(
self,
rolename: str,
domain: str,
target: str,
innernode: type[TextlikeNode] = nodes.emphasis,
contnode: Node | None = None,
env: BuildEnvironment | None = None,
inliner: Inliner | None = None,
location: Node | None = None,
) -> list[Node]:
delims = r"(\s*[\[\]\(\),](?:\s*o[rf]\s)?\s*|\s+o[rf]\s+|\s*\|\s*|\.\.\.)"
delims_re = re.compile(delims)
sub_targets = re.split(delims, target)
split_contnode = bool(contnode and contnode.astext() == target)
in_literal = False
results = []
for sub_target in filter(None, sub_targets):
if split_contnode:
contnode = nodes.Text(sub_target)
if in_literal or delims_re.match(sub_target):
results.append(contnode or innernode(sub_target, sub_target))
else:
results.append(
self.make_xref(
rolename,
domain,
sub_target,
innernode,
contnode,
env,
inliner,
location,
)
)
if sub_target in ("Literal", "typing.Literal", "~typing.Literal"):
in_literal = True
return results
class HOCField(HOCXrefMixin, Field):
pass
class HOCGroupedField(HOCXrefMixin, GroupedField):
pass
class HOCTypedField(HOCXrefMixin, TypedField):
pass
class HOCObject(ObjectDescription[Tuple[str, str]]):
"""
Description of a general HOC object.
:cvar allow_nesting: Class is an object that allows for nested namespaces
:vartype allow_nesting: bool
"""
option_spec: OptionSpec = {
"noindex": directives.flag,
"noindexentry": directives.flag,
"nocontentsentry": directives.flag,
"module": directives.unchanged,
"canonical": directives.unchanged,
"annotation": directives.unchanged,
}
doc_field_types = [
HOCTypedField(
"parameter",
label=_("Parameters"),
names=(
"param",
"parameter",
"arg",
"argument",
"keyword",
"kwarg",
"kwparam",
),
typerolename="class",
typenames=("paramtype", "type"),
can_collapse=True,
),
HOCTypedField(
"variable",
label=_("Variables"),
names=("var", "ivar", "cvar"),
typerolename="class",
typenames=("vartype",),
can_collapse=True,
),
HOCGroupedField(
"exceptions",
label=_("Raises"),
rolename="exc",
names=("raises", "raise", "exception", "except"),
can_collapse=True,
),
Field(
"returnvalue",
label=_("Returns"),
has_arg=False,
names=("returns", "return"),
),
HOCField(
"returntype",
label=_("Return type"),
has_arg=False,
names=("rtype",),
bodyrolename="class",
),
]
allow_nesting = False
def get_signature_prefix(self, sig: str) -> list[nodes.Node]:
"""May return a prefix to put before the object name in the
signature.
"""
return []
def needs_arglist(self) -> bool:
"""May return true if an empty argument list is to be generated even if
the document contains none.
"""
return False
def handle_signature(self, sig: str, signode: desc_signature) -> tuple[str, str]:
"""Transform a HOC signature into RST nodes.
Return (fully qualified name of the thing, classname if any).
If inside a class, the current class name is handled intelligently:
* it is stripped from the displayed name if present
* it is added to the full name (return value) if not present
"""
m = hoc_sig_re.match(sig)
if m is None:
raise ValueError
prefix, name, arglist, retann = m.groups()
# determine module and class name (if applicable), as well as full name
modname = self.options.get("module", self.env.ref_context.get("hoc:module"))
classname = self.env.ref_context.get("hoc:class")
if classname:
add_module = False
if prefix and (prefix == classname or prefix.startswith(classname + ".")):
fullname = prefix + name
# class name is given again in the signature
prefix = prefix[len(classname) :].lstrip(".")
elif prefix:
# class name is given in the signature, but different
# (shouldn't happen)
fullname = classname + "." + prefix + name
else:
# class name is not given in the signature
fullname = classname + "." + name
else:
add_module = True
if prefix:
classname = prefix.rstrip(".")
fullname = prefix + name
else:
classname = ""
fullname = name
signode["module"] = modname
signode["class"] = classname
signode["fullname"] = fullname
sig_prefix = self.get_signature_prefix(sig)
if sig_prefix:
if type(sig_prefix) is str:
raise TypeError(
"HOC directive method get_signature_prefix()"
" must return a list of nodes."
f" Return value was '{sig_prefix}'."
)
else:
signode += addnodes.desc_annotation(str(sig_prefix), "", *sig_prefix)
if prefix:
signode += addnodes.desc_addname(prefix, prefix)
elif modname and add_module and self.env.config.add_module_names:
nodetext = modname + "."
signode += addnodes.desc_addname(nodetext, nodetext)
signode += addnodes.desc_name(name, name)
if arglist:
try:
signode += _parse_arglist(arglist, self.env)
except SyntaxError:
# fallback to parse arglist original parser.
# it supports to represent optional arguments (ex. "func(foo [, bar])")
_pseudo_parse_arglist(signode, arglist)
except NotImplementedError as exc:
logger.warning(
"could not parse arglist (%r): %s", arglist, exc, location=signode
)
_pseudo_parse_arglist(signode, arglist)
else:
if self.needs_arglist():
# for callables, add an empty parameter list
signode += addnodes.desc_parameterlist()
if retann:
children = _parse_annotation(retann, self.env)
signode += addnodes.desc_returns(retann, "", *children)
anno = self.options.get("annotation")
if anno:
signode += addnodes.desc_annotation(
" " + anno, "", addnodes.desc_sig_space(), nodes.Text(anno)
)
return fullname, prefix
def _object_hierarchy_parts(self, sig_node: desc_signature) -> tuple[str, ...]:
if "fullname" not in sig_node:
return ()
modname = sig_node.get("module")
fullname = sig_node["fullname"]
if modname:
return (modname, *fullname.split("."))
else:
return tuple(fullname.split("."))
def get_index_text(self, modname: str, name: tuple[str, str]) -> str:
"""Return the text for the index entry of the object."""
raise NotImplementedError("must be implemented in subclasses")
def add_target_and_index(
self, name_cls: tuple[str, str], sig: str, signode: desc_signature
) -> None:
modname = self.options.get("module", self.env.ref_context.get("hoc:module"))
fullname = (modname + "." if modname else "") + name_cls[0]
node_id = make_id(self.env, self.state.document, "", fullname)
signode["ids"].append(node_id)
self.state.document.note_explicit_target(signode)
domain = cast(HOCDomain, self.env.get_domain("hoc"))
domain.note_object(fullname, self.objtype, node_id, location=signode)
canonical_name = self.options.get("canonical")
if canonical_name:
domain.note_object(
canonical_name, self.objtype, node_id, aliased=True, location=signode
)
if "noindexentry" not in self.options:
indextext = self.get_index_text(modname, name_cls)
if indextext:
self.indexnode["entries"].append(
("single", indextext, node_id, "", None)
)
def before_content(self) -> None:
"""Handle object nesting before content
:py:class:`HOCObject` represents HOC language constructs. For
constructs that are nestable, such as a HOC classes, this method will
build up a stack of the nesting hierarchy so that it can be later
de-nested correctly, in :py:meth:`after_content`.
For constructs that aren't nestable, the stack is bypassed, and instead
only the most recent object is tracked. This object prefix name will be
removed with :py:meth:`after_content`.
"""
prefix = None
if self.names:
# fullname and name_prefix come from the `handle_signature` method.
# fullname represents the full object name that is constructed using
# object nesting and explicit prefixes. `name_prefix` is the
# explicit prefix given in a signature
(fullname, name_prefix) = self.names[-1]
if self.allow_nesting:
prefix = fullname
elif name_prefix:
prefix = name_prefix.strip(".")
if prefix:
self.env.ref_context["hoc:class"] = prefix
if self.allow_nesting:
classes = self.env.ref_context.setdefault("hoc:classes", [])
classes.append(prefix)
if "module" in self.options:
modules = self.env.ref_context.setdefault("hoc:modules", [])
modules.append(self.env.ref_context.get("hoc:module"))
self.env.ref_context["hoc:module"] = self.options["module"]
def after_content(self) -> None:
"""Handle object de-nesting after content
If this class is a nestable object, removing the last nested class prefix
ends further nesting in the object.
If this class is not a nestable object, the list of classes should not
be altered as we didn't affect the nesting levels in
:py:meth:`before_content`.
"""
classes = self.env.ref_context.setdefault("hoc:classes", [])
if self.allow_nesting:
try:
classes.pop()
except IndexError:
pass
self.env.ref_context["hoc:class"] = classes[-1] if len(classes) > 0 else None
if "module" in self.options:
modules = self.env.ref_context.setdefault("hoc:modules", [])
if modules:
self.env.ref_context["hoc:module"] = modules.pop()
else:
self.env.ref_context.pop("hoc:module")
def _toc_entry_name(self, sig_node: desc_signature) -> str:
if not sig_node.get("_toc_parts"):
return ""
config = self.env.app.config
objtype = sig_node.parent.get("objtype")
if config.add_function_parentheses and objtype in {"function", "method"}:
parens = "()"
else:
parens = ""
*parents, name = sig_node["_toc_parts"]
if config.toc_object_entries_show_parents == "domain":
return sig_node.get("fullname", name) + parens
if config.toc_object_entries_show_parents == "hide":
return name + parens
if config.toc_object_entries_show_parents == "all":
return ".".join(parents + [name + parens])
return ""
class HOCFunction(HOCObject):
"""Description of a function."""
option_spec: OptionSpec = HOCObject.option_spec.copy()
option_spec.update(
{
"async": directives.flag,
}
)
def get_signature_prefix(self, sig: str) -> list[nodes.Node]:
if "async" in self.options:
return [addnodes.desc_sig_keyword("", "async"), addnodes.desc_sig_space()]
else:
return []
def needs_arglist(self) -> bool:
return True
def add_target_and_index(
self, name_cls: tuple[str, str], sig: str, signode: desc_signature
) -> None:
super().add_target_and_index(name_cls, sig, signode)
if "noindexentry" not in self.options:
modname = self.options.get("module", self.env.ref_context.get("hoc:module"))
node_id = signode["ids"][0]
name, cls = name_cls
if modname:
text = _("%s() (in module %s)") % (name, modname)
self.indexnode["entries"].append(("single", text, node_id, "", None))
else:
text = f'{pairindextypes["builtin"]}; {name}()'
self.indexnode["entries"].append(("pair", text, node_id, "", None))
def get_index_text(self, modname: str, name_cls: tuple[str, str]) -> str | None:
# add index in own add_target_and_index() instead.
return None
class HOCDecoratorFunction(HOCFunction):
"""Description of a decorator."""
def run(self) -> list[Node]:
# a decorator function is a function after all
self.name = "hoc:function"
return super().run()
def handle_signature(self, sig: str, signode: desc_signature) -> tuple[str, str]:
ret = super().handle_signature(sig, signode)
signode.insert(0, addnodes.desc_addname("@", "@"))
return ret
def needs_arglist(self) -> bool:
return False
class HOCVariable(HOCObject):
"""Description of a variable."""
option_spec: OptionSpec = HOCObject.option_spec.copy()
option_spec.update(
{
"type": directives.unchanged,
"value": directives.unchanged,
}
)
def handle_signature(self, sig: str, signode: desc_signature) -> tuple[str, str]:
fullname, prefix = super().handle_signature(sig, signode)
typ = self.options.get("type")
if typ:
annotations = _parse_annotation(typ, self.env)
signode += addnodes.desc_annotation(
typ,
"",
addnodes.desc_sig_punctuation("", ":"),
addnodes.desc_sig_space(),
*annotations,
)
value = self.options.get("value")
if value:
signode += addnodes.desc_annotation(
value,
"",
addnodes.desc_sig_space(),
addnodes.desc_sig_punctuation("", "="),
addnodes.desc_sig_space(),
nodes.Text(value),
)
return fullname, prefix
def get_index_text(self, modname: str, name_cls: tuple[str, str]) -> str:
name, cls = name_cls
if modname:
return _("%s (HOC in module %s)") % (name, modname)
else:
return _("%s (HOC built-in variable)") % name
class HOCClasslike(HOCObject):
"""
Description of a class-like object (classes, interfaces, exceptions).
"""
option_spec: OptionSpec = HOCObject.option_spec.copy()
option_spec.update(
{
"final": directives.flag,
}
)
allow_nesting = True
def get_signature_prefix(self, sig: str) -> list[nodes.Node]:
if "final" in self.options:
return [
nodes.Text("final"),
addnodes.desc_sig_space(),
nodes.Text(self.objtype),
addnodes.desc_sig_space(),
]
else:
return [nodes.Text(self.objtype), addnodes.desc_sig_space()]
def get_index_text(self, modname: str, name_cls: tuple[str, str]) -> str:
if self.objtype == "class":
if not modname:
return _("%s (HOC built-in class)") % name_cls[0]
return _("%s (HOC class in %s)") % (name_cls[0], modname)
elif self.objtype == "exception":
return name_cls[0]
else:
return ""
class HOCMethod(HOCObject):
"""Description of a method."""
option_spec: OptionSpec = HOCObject.option_spec.copy()
option_spec.update(
{
"abstractmethod": directives.flag,
"async": directives.flag,
"classmethod": directives.flag,
"final": directives.flag,
"staticmethod": directives.flag,
}
)
def needs_arglist(self) -> bool:
return True
def get_signature_prefix(self, sig: str) -> list[nodes.Node]:
prefix: list[nodes.Node] = []
if "final" in self.options:
prefix.append(nodes.Text("final"))
prefix.append(addnodes.desc_sig_space())
if "abstractmethod" in self.options:
prefix.append(nodes.Text("abstract"))
prefix.append(addnodes.desc_sig_space())
if "async" in self.options:
prefix.append(nodes.Text("async"))
prefix.append(addnodes.desc_sig_space())
if "classmethod" in self.options:
prefix.append(nodes.Text("classmethod"))
prefix.append(addnodes.desc_sig_space())
if "staticmethod" in self.options:
prefix.append(nodes.Text("static"))
prefix.append(addnodes.desc_sig_space())
return prefix
def get_index_text(self, modname: str, name_cls: tuple[str, str]) -> str:
name, cls = name_cls
try:
clsname, methname = name.rsplit(".", 1)
if modname and self.env.config.add_module_names:
clsname = ".".join([modname, clsname])
except ValueError:
if modname:
return _("%s() (in module %s)") % (name, modname)
else:
return "%s()" % name
if "classmethod" in self.options:
return _("%s() (HOC %s class method)") % (methname, clsname)
elif "staticmethod" in self.options:
return _("%s() (HOC %s static method)") % (methname, clsname)
else:
return _("%s() (HOC %s method)") % (methname, clsname)
class HOCClassMethod(HOCMethod):
"""Description of a classmethod."""
option_spec: OptionSpec = HOCObject.option_spec.copy()
def run(self) -> list[Node]:
self.name = "hoc:method"
self.options["classmethod"] = True
return super().run()
class HOCStaticMethod(HOCMethod):
"""Description of a staticmethod."""
option_spec: OptionSpec = HOCObject.option_spec.copy()
def run(self) -> list[Node]:
self.name = "hoc:method"
self.options["staticmethod"] = True
return super().run()
class HOCDecoratorMethod(HOCMethod):
"""Description of a decoratormethod."""
def run(self) -> list[Node]:
self.name = "hoc:method"
return super().run()
def handle_signature(self, sig: str, signode: desc_signature) -> tuple[str, str]:
ret = super().handle_signature(sig, signode)
signode.insert(0, addnodes.desc_addname("@", "@"))
return ret
def needs_arglist(self) -> bool:
return False
class HOCAttribute(HOCObject):
"""Description of an attribute."""
option_spec: OptionSpec = HOCObject.option_spec.copy()
option_spec.update(
{
"type": directives.unchanged,
"value": directives.unchanged,
}
)
def handle_signature(self, sig: str, signode: desc_signature) -> tuple[str, str]:
fullname, prefix = super().handle_signature(sig, signode)
typ = self.options.get("type")
if typ:
annotations = _parse_annotation(typ, self.env)
signode += addnodes.desc_annotation(
typ,
"",
addnodes.desc_sig_punctuation("", ":"),
addnodes.desc_sig_space(),
*annotations,
)
value = self.options.get("value")
if value:
signode += addnodes.desc_annotation(
value,
"",
addnodes.desc_sig_space(),
addnodes.desc_sig_punctuation("", "="),
addnodes.desc_sig_space(),
nodes.Text(value),
)
return fullname, prefix
def get_index_text(self, modname: str, name_cls: tuple[str, str]) -> str:
name, cls = name_cls
try:
clsname, attrname = name.rsplit(".", 1)
if modname and self.env.config.add_module_names:
clsname = ".".join([modname, clsname])
except ValueError:
if modname:
return _("%s (HOC in module %s)") % (name, modname)
else:
return name
return _("%s (HOC %s attribute)") % (attrname, clsname)
class HOCProperty(HOCObject):
"""Description of an attribute."""
option_spec = HOCObject.option_spec.copy()
option_spec.update(
{
"abstractmethod": directives.flag,
"classmethod": directives.flag,
"type": directives.unchanged,
}
)
def handle_signature(self, sig: str, signode: desc_signature) -> tuple[str, str]:
fullname, prefix = super().handle_signature(sig, signode)
typ = self.options.get("type")
if typ:
annotations = _parse_annotation(typ, self.env)
signode += addnodes.desc_annotation(
typ,
"",
addnodes.desc_sig_punctuation("", ":"),
addnodes.desc_sig_space(),
*annotations,
)
return fullname, prefix
def get_signature_prefix(self, sig: str) -> list[nodes.Node]:
prefix: list[nodes.Node] = []
if "abstractmethod" in self.options:
prefix.append(nodes.Text("abstract"))
prefix.append(addnodes.desc_sig_space())
if "classmethod" in self.options:
prefix.append(nodes.Text("class"))
prefix.append(addnodes.desc_sig_space())
prefix.append(nodes.Text("property"))
prefix.append(addnodes.desc_sig_space())
return prefix
def get_index_text(self, modname: str, name_cls: tuple[str, str]) -> str:
name, cls = name_cls
try:
clsname, attrname = name.rsplit(".", 1)
if modname and self.env.config.add_module_names:
clsname = ".".join([modname, clsname])
except ValueError:
if modname:
return _("%s (HOC in module %s)") % (name, modname)
else:
return name
return _("%s (HOC %s property)") % (attrname, clsname)
class HOCModule(SphinxDirective):
"""
Directive to mark description of a new module.
"""
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec: OptionSpec = {
"platform": lambda x: x,
"synopsis": lambda x: x,
"noindex": directives.flag,
"nocontentsentry": directives.flag,
"deprecated": directives.flag,
}
def run(self) -> list[Node]:
domain = cast(HOCDomain, self.env.get_domain("hoc"))
modname = self.arguments[0].strip()
noindex = "noindex" in self.options
self.env.ref_context["hoc:module"] = modname
content_node: Element = nodes.section()
with switch_source_input(self.state, self.content):
# necessary so that the child nodes get the right source/line set
content_node.document = self.state.document
nested_parse_with_titles(self.state, self.content, content_node)
ret: list[Node] = []
if not noindex:
# note module to the domain
node_id = make_id(self.env, self.state.document, "module", modname)
target = nodes.target("", "", ids=[node_id], ismod=True)
self.set_source_info(target)
self.state.document.note_explicit_target(target)
domain.note_module(
modname,
node_id,
self.options.get("synopsis", ""),
self.options.get("platform", ""),
"deprecated" in self.options,
)
domain.note_object(modname, "module", node_id, location=target)
# the platform and synopsis aren't printed; in fact, they are only
# used in the modindex currently
ret.append(target)
indextext = f'{pairindextypes["module"]}; {modname}'
inode = addnodes.index(entries=[("pair", indextext, node_id, "", None)])
ret.append(inode)
ret.extend(content_node.children)
return ret
def make_old_id(self, name: str) -> str:
"""Generate old styled node_id.
Old styled node_id is incompatible with docutils' node_id.
It can contain dots and hyphens.
.. note:: Old styled node_id was mainly used until Sphinx-3.0.
"""
return "module-%s" % name
class HOCCurrentModule(SphinxDirective):
"""
This directive is just to tell Sphinx that we're documenting
stuff in module foo, but links to module foo won't lead here.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec: OptionSpec = {}
def run(self) -> list[Node]:
modname = self.arguments[0].strip()
if modname == "None":
self.env.ref_context.pop("hoc:module", None)
else:
self.env.ref_context["hoc:module"] = modname
return []
class HOCXRefRole(XRefRole):
def process_link(
self,
env: BuildEnvironment,
refnode: Element,
has_explicit_title: bool,
title: str,
target: str,
) -> tuple[str, str]:
refnode["hoc:module"] = env.ref_context.get("hoc:module")
refnode["hoc:class"] = env.ref_context.get("hoc:class")
if not has_explicit_title:
title = title.lstrip(".") # only has a meaning for the target
target = target.lstrip("~") # only has a meaning for the title
# if the first character is a tilde, don't display the module/class
# parts of the contents
if title[0:1] == "~":
title = title[1:]
dot = title.rfind(".")
if dot != -1:
title = title[dot + 1 :]
# if the first character is a dot, search more specific namespaces first
# else search builtins first
if target[0:1] == ".":
target = target[1:]
refnode["refspecific"] = True
return title, target
def filter_meta_fields(
app: Sphinx, domain: str, objtype: str, content: Element
) -> None:
"""Filter ``:meta:`` field from its docstring."""
if domain != "hoc":
return
for node in content:
if isinstance(node, nodes.field_list):
fields = cast(List[nodes.field], node)
# removing list items while iterating the list needs reversed()
for field in reversed(fields):
field_name = cast(nodes.field_body, field[0]).astext().strip()
if field_name == "meta" or field_name.startswith("meta "):
node.remove(field)
class HOCModuleIndex(Index):
"""
Index subclass to provide the HOC module index.
"""
name = "modindex"
localname = _("HOC Module Index")
shortname = _("modules")
def generate(
self, docnames: Iterable[str] | None = None
) -> tuple[list[tuple[str, list[IndexEntry]]], bool]:
content: dict[str, list[IndexEntry]] = {}
# list of prefixes to ignore
ignores: list[str] = self.domain.env.config["modindex_common_prefix"]
ignores = sorted(ignores, key=len, reverse=True)
# list of all modules, sorted by module name
modules = sorted(
self.domain.data["modules"].items(), key=lambda x: x[0].lower()
)
# sort out collapsible modules
prev_modname = ""
num_toplevels = 0
for modname, (docname, node_id, synopsis, platforms, deprecated) in modules:
if docnames and docname not in docnames:
continue
for ignore in ignores:
if modname.startswith(ignore):
modname = modname[len(ignore) :]
stripped = ignore
break
else:
stripped = ""
# we stripped the whole module name?
if not modname:
modname, stripped = stripped, ""
entries = content.setdefault(modname[0].lower(), [])
package = modname.split(".")[0]
if package != modname:
# it's a submodule
if prev_modname == package:
# first submodule - make parent a group head
if entries:
last = entries[-1]
entries[-1] = IndexEntry(
last[0], 1, last[2], last[3], last[4], last[5], last[6]
)
elif not prev_modname.startswith(package):
# submodule without parent in list, add dummy entry
entries.append(
IndexEntry(stripped + package, 1, "", "", "", "", "")
)
subtype = 2
else:
num_toplevels += 1
subtype = 0
qualifier = _("Deprecated") if deprecated else ""
entries.append(
IndexEntry(
stripped + modname,
subtype,
docname,
node_id,
platforms,
qualifier,
synopsis,
)
)
prev_modname = modname
# apply heuristics when to collapse modindex at page load:
# only collapse if number of toplevel modules is larger than
# number of submodules
collapse = len(modules) - num_toplevels < num_toplevels
# sort by first letter
sorted_content = sorted(content.items())
return sorted_content, collapse
class HOCDomain(Domain):
"""HOC language domain."""
name = "hoc"
label = "HOC"
object_types: dict[str, ObjType] = {
"function": ObjType(_("function"), "func", "obj"),
"data": ObjType(_("data"), "data", "obj"),
"class": ObjType(_("class"), "class", "exc", "obj"),
"exception": ObjType(_("exception"), "exc", "class", "obj"),
"method": ObjType(_("method"), "meth", "obj"),
"classmethod": ObjType(_("class method"), "meth", "obj"),
"staticmethod": ObjType(_("static method"), "meth", "obj"),
"attribute": ObjType(_("attribute"), "attr", "obj"),
"property": ObjType(_("property"), "attr", "_prop", "obj"),
"module": ObjType(_("module"), "mod", "obj"),
}
directives = {
"function": HOCFunction,
"data": HOCVariable,
"class": HOCClasslike,
"exception": HOCClasslike,
"method": HOCMethod,
"classmethod": HOCClassMethod,
"staticmethod": HOCStaticMethod,
"attribute": HOCAttribute,
"property": HOCProperty,
"module": HOCModule,
"currentmodule": HOCCurrentModule,
"decorator": HOCDecoratorFunction,
"decoratormethod": HOCDecoratorMethod,
}
roles = {
"data": HOCXRefRole(),
"exc": HOCXRefRole(),
"func": HOCXRefRole(fix_parens=True),
"class": HOCXRefRole(),
"const": HOCXRefRole(),
"attr": HOCXRefRole(),
"meth": HOCXRefRole(fix_parens=True),
"mod": HOCXRefRole(),
"obj": HOCXRefRole(),
}
initial_data: dict[str, dict[str, tuple[Any]]] = {
"objects": {}, # fullname -> docname, objtype
"modules": {}, # modname -> docname, synopsis, platform, deprecated
}
indices = [
HOCModuleIndex,
]
@property
def objects(self) -> dict[str, ObjectEntry]:
return self.data.setdefault("objects", {}) # fullname -> ObjectEntry
def note_object(
self,
name: str,
objtype: str,
node_id: str,
aliased: bool = False,
location: Any = None,
) -> None:
"""Note a python object for cross reference.
.. versionadded:: 2.1
"""
if name in self.objects:
other = self.objects[name]
if other.aliased and aliased is False:
# The original definition found. Override it!
pass
elif other.aliased is False and aliased:
# The original definition is already registered.
return
else:
# duplicated
logger.warning(
__(
"duplicate object description of %s, "
"other instance in %s, use :noindex: for one of them"
),
name,
other.docname,
location=location,
)
self.objects[name] = ObjectEntry(self.env.docname, node_id, objtype, aliased)
@property
def modules(self) -> dict[str, ModuleEntry]:
return self.data.setdefault("modules", {}) # modname -> ModuleEntry
def note_module(
self, name: str, node_id: str, synopsis: str, platform: str, deprecated: bool
) -> None:
"""Note a python module for cross reference.
.. versionadded:: 2.1
"""
self.modules[name] = ModuleEntry(
self.env.docname, node_id, synopsis, platform, deprecated
)
def clear_doc(self, docname: str) -> None:
for fullname, obj in list(self.objects.items()):
if obj.docname == docname:
del self.objects[fullname]
for modname, mod in list(self.modules.items()):
if mod.docname == docname:
del self.modules[modname]
def merge_domaindata(self, docnames: list[str], otherdata: dict[str, Any]) -> None:
# XXX check duplicates?
for fullname, obj in otherdata["objects"].items():
if obj.docname in docnames:
self.objects[fullname] = obj
for modname, mod in otherdata["modules"].items():
if mod.docname in docnames:
self.modules[modname] = mod
def find_obj(
self,
env: BuildEnvironment,
modname: str,
classname: str,
name: str,
type: str | None,
searchmode: int = 0,
) -> list[tuple[str, ObjectEntry]]:
"""Find a HOC object for "name", perhaps using the given module
and/or classname. Returns a list of (name, object entry) tuples.
"""
# skip parens
if name[-2:] == "()":
name = name[:-2]
if not name:
return []
matches: list[tuple[str, ObjectEntry]] = []
newname = None
if searchmode == 1:
if type is None:
objtypes = list(self.object_types)
else:
objtypes = self.objtypes_for_role(type)
if objtypes is not None:
if modname and classname:
fullname = modname + "." + classname + "." + name
if (
fullname in self.objects
and self.objects[fullname].objtype in objtypes
):
newname = fullname
if not newname:
if (
modname
and modname + "." + name in self.objects
and self.objects[modname + "." + name].objtype in objtypes
):
newname = modname + "." + name
elif (
name in self.objects and self.objects[name].objtype in objtypes
):
newname = name
else:
# "fuzzy" searching mode
searchname = "." + name
matches = [
(oname, self.objects[oname])
for oname in self.objects
if oname.endswith(searchname)
and self.objects[oname].objtype in objtypes
]
else:
# NOTE: searching for exact match, object type is not considered
if name in self.objects:
newname = name
elif type == "mod":
# only exact matches allowed for modules
return []
elif classname and classname + "." + name in self.objects:
newname = classname + "." + name
elif modname and modname + "." + name in self.objects:
newname = modname + "." + name
elif (
modname
and classname
and modname + "." + classname + "." + name in self.objects
):
newname = modname + "." + classname + "." + name
if newname is not None:
matches.append((newname, self.objects[newname]))
return matches
def resolve_xref(
self,
env: BuildEnvironment,
fromdocname: str,
builder: Builder,
type: str,
target: str,
node: pending_xref,
contnode: Element,
) -> Element | None:
modname = node.get("hoc:module")
clsname = node.get("hoc:class")
searchmode = 1 if node.hasattr("refspecific") else 0
matches = self.find_obj(env, modname, clsname, target, type, searchmode)
if not matches and type == "attr":
# fallback to meth (for property; Sphinx-2.4.x)
# this ensures that `:attr:` role continues to refer to the old property entry
# that defined by ``method`` directive in old reST files.
matches = self.find_obj(env, modname, clsname, target, "meth", searchmode)
if not matches and type == "meth":
# fallback to attr (for property)
# this ensures that `:meth:` in the old reST files can refer to the property
# entry that defined by ``property`` directive.
#
# Note: _prop is a secret role only for internal look-up.
matches = self.find_obj(env, modname, clsname, target, "_prop", searchmode)
if not matches:
return None
elif len(matches) > 1:
canonicals = [m for m in matches if not m[1].aliased]
if len(canonicals) == 1:
matches = canonicals
else:
logger.warning(
__("more than one target found for cross-reference %r: %s"),
target,
", ".join(match[0] for match in matches),
type="ref",
subtype="hoc",
location=node,
)
name, obj = matches[0]
if obj[2] == "module":
return self._make_module_refnode(builder, fromdocname, name, contnode)
else:
# determine the content of the reference by conditions
content = find_pending_xref_condition(node, "resolved")
if content:
children = content.children
else:
# if not found, use contnode
children = [contnode]
return make_refnode(builder, fromdocname, obj[0], obj[1], children, name)
def resolve_any_xref(
self,
env: BuildEnvironment,
fromdocname: str,
builder: Builder,
target: str,
node: pending_xref,
contnode: Element,
) -> list[tuple[str, Element]]:
modname = node.get("hoc:module")
clsname = node.get("hoc:class")
results: list[tuple[str, Element]] = []
# always search in "refspecific" mode with the :any: role
matches = self.find_obj(env, modname, clsname, target, None, 1)
multiple_matches = len(matches) > 1
for name, obj in matches:
if multiple_matches and obj.aliased:
# Skip duplicated matches
continue
if obj[2] == "module":
results.append(
(
"hoc:mod",
self._make_module_refnode(builder, fromdocname, name, contnode),
)
)
else:
# determine the content of the reference by conditions
content = find_pending_xref_condition(node, "resolved")
if content:
children = content.children
else:
# if not found, use contnode
children = [contnode]
results.append(
(
"hoc:" + self.role_for_objtype(obj[2]),
make_refnode(
builder, fromdocname, obj[0], obj[1], children, name
),
)
)
return results
def _make_module_refnode(
self, builder: Builder, fromdocname: str, name: str, contnode: Node
) -> Element:
# get additional info for modules
module = self.modules[name]
title = name
if module.synopsis:
title += ": " + module.synopsis
if module.deprecated:
title += _(" (deprecated)")
if module.platform:
title += " (" + module.platform + ")"
return make_refnode(
builder, fromdocname, module.docname, module.node_id, contnode, title
)
def get_objects(self) -> Iterator[tuple[str, str, str, str, str, int]]:
for modname, mod in self.modules.items():
yield (modname, modname, "module", mod.docname, mod.node_id, 0)
for refname, obj in self.objects.items():
if obj.objtype != "module": # modules are already handled
if obj.aliased:
# aliased names are not full-text searchable.
yield (refname, refname, obj.objtype, obj.docname, obj.node_id, -1)
else:
yield (refname, refname, obj.objtype, obj.docname, obj.node_id, 1)
def get_full_qualified_name(self, node: Element) -> str | None:
modname = node.get("hoc:module")
clsname = node.get("hoc:class")
target = node.get("reftarget")
if target is None:
return None
else:
return ".".join(filter(None, [modname, clsname, target]))
def builtin_resolver(
app: Sphinx, env: BuildEnvironment, node: pending_xref, contnode: Element
) -> Element | None:
"""Do not emit nitpicky warnings for built-in types."""
def istyping(s: str) -> bool:
if s.startswith("typing."):
s = s.split(".", 1)[1]
return s in typing.__all__
if node.get("refdomain") != "hoc":
return None
elif node.get("reftype") in ("class", "obj") and node.get("reftarget") == "None":
return contnode
elif node.get("reftype") in ("class", "obj", "exc"):
reftarget = node.get("reftarget")
if inspect.isclass(getattr(builtins, reftarget, None)):
# built-in class
return contnode
elif istyping(reftarget):
# typing class
return contnode
return None
def setup(app: Sphinx) -> dict[str, Any]:
app.setup_extension("sphinx.directives")
app.add_domain(HOCDomain)
app.add_config_value("hoc_use_unqualified_type_names", False, "env")
app.connect("object-description-transform", filter_meta_fields)
app.connect("missing-reference", builtin_resolver, priority=900)
return {
"version": "builtin",
"env_version": 3,
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| [
"noreply@github.com"
] | neuronsimulator.noreply@github.com |
83b9b89602f94805f1ff6283f7237c42100ead2a | f5a7de717f41f8379ccdee7d06de838fdf1d0a0b | /soloperformance-api/apps/catalog/management/commands/exercises.py | b73d1df31fb2d914106dd6d80bd4253425dbe55c | [] | no_license | jimmy818/mexico-angular | 977e4d1d0cab2ff8c10c9892d9c72ca2f4f9ac49 | 005ed3729b807d77a8fd97a3b5469a42ceefdaad | refs/heads/main | 2023-08-10T21:37:53.614298 | 2021-05-11T19:04:29 | 2021-05-11T19:04:29 | 366,485,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,560 | py |
from django.core.management.base import BaseCommand, CommandError
from django.http import HttpRequest
import requests
import xlrd
from apps.catalog import utils
class Command(BaseCommand):
help = 'Add exercises'
def handle(self, *args, **options):
request = HttpRequest()
r = requests.get('https://d2femlmiaazi1b.cloudfront.net/media/excel/DB_Drills.xlsx')
with open('/tmp/excel.xlsx', 'wb') as f:
f.write(r.content)
path = '/tmp/excel.xlsx'
book = xlrd.open_workbook(path)
# sheets = book.sheet_names()
sheet_0 = book.sheet_by_index(0) # Open the first tab
## this range is for excercices length
for row_index in range(1012):
if row_index > 3:
excercice = None
for col_index in range(sheet_0.ncols):
item = sheet_0.cell(rowx=row_index,colx=col_index).value
if excercice == None:
excercice = item
excercice_item = utils.get_or_add_excercice(excercice)
else:
if item != None and item != '':
utils.add_sub_excercice(excercice_item,sheet_0.cell(rowx=3,colx=col_index).value)
print(excercice)
print(sheet_0.cell(rowx=3,colx=col_index).value)
self.stdout.write(self.style.SUCCESS('Successfully.....')) | [
"45069768+itsrocketfuel@users.noreply.github.com"
] | 45069768+itsrocketfuel@users.noreply.github.com |
d317dbe026ee64a6164000d22bb33f657cfdb157 | 16eac9ad11ca1b7833e6df37e7ddbbaeea82a66d | /Sudoku Agent/solution.py | e29ab8688125a1b3c546746bf49929138ea5171f | [] | no_license | tejasvin/Artificial-Inteligence | 6d664d8742dc981cecd2bb3bd5ad8d45bbe683ee | 690d7e5cadc1cdce7edbdd5d535dc55b0fb98f19 | refs/heads/master | 2021-01-01T15:32:12.192139 | 2017-10-14T19:27:29 | 2017-10-14T19:27:29 | 97,635,361 | 0 | 1 | null | 2017-10-14T19:27:30 | 2017-07-18T19:21:32 | Python | UTF-8 | Python | false | false | 7,022 | py | import itertools
assignments = []
rows = 'ABCDEFGHI'
cols = '123456789'
def assign_value(values, box, value):
"""
Please use this function to update your values dictionary!
Assigns a value to a given box. If it updates the board record it.
"""
print(values)
print(box)
print(value)
# Don't waste memory appending actions that don't actually change any values
if values[box] == value:
return values
values[box] = value
if len(value) == 1:
assignments.append(values.copy())
return values
def naked_twins(values):
"""Eliminate values using the naked twins strategy.
Args:
values(dict): a dictionary of the form {'box_name': '123456789', ...}
Returns:
the values dictionary with the naked twins eliminated from peers.
"""
# Find all instances of naked twins
nakedTwins = [[a,b] for a in boxes for b in boxes if a != b and values[a] == values[b] and b in peers[a] and len(values[a]) == 2 and len(values[b]) == 2]
#remove duplicates if any
# Eliminate the naked twins as possibilities for their peers
for pair in nakedTwins:
for eachUnit in units[pair[0]]:
if pair[1] in eachUnit: # check if [A1,A2] are in same unit
for box in eachUnit:
if box not in pair:
val = values[box]
for ch in values[pair[0]]:
val = val.replace(ch,'')
assign_value(values, box, val)
return values
def cross(A, B):
"Cross product of elements in A and elements in B."
return [s+t for s in A for t in B]
# decode the string to a dictionary format
def grid_values(grid):
"""
Convert grid into a dict of {square: char} with '123456789' for empties.
Args:
grid(string) - A grid in string form.
Returns:
A grid in dictionary form
Keys: The boxes, e.g., 'A1'
Values: The value in each box, e.g., '8'. If the box has no value, then the value will be '123456789'.
"""
assert len(grid) == 81 , "Invalid: string length != 81"
i = 0
dictionary = {}
replacement = '123456789'
for r in 'ABCDEFGHI':
for c in '123456789':
if grid[i] == '.':
dictionary[r+c] = replacement
else:
dictionary[r+c] = grid[i]
i+=1
return dictionary
def display(values):
"""
Display the values as a 2-D grid.
Args:
values(dict): The sudoku in dictionary form
"""
width = 1+max(len(values[s]) for s in boxes)
line = '+'.join(['-'*(width*3)]*3)
for r in rows:
print(''.join(values[r+c].center(width)+('|' if c in '36' else '')
for c in cols))
if r in 'CF': print(line)
return
def eliminate(values):
solved_values = [box for box in values.keys() if len(values[box]) == 1]
for box in solved_values:
digit = values[box]
for peer in peers[box]:
assign_value(values, peer, values[peer].replace(digit,''))
return values
def only_choice(values):
for unit in unitlist:
for digit in '123456789':
dplaces = [box for box in unit if digit in values[box]]
if len(dplaces) == 1:
assign_value(values,dplaces[0], digit)
return values
def reduce_puzzle(values):
stalled = False
while not stalled:
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
values = eliminate(values)
values = naked_twins(values)
#print("\n================================AFTER ELIMINATION====================================\n")
#display(values)
values = only_choice(values)
#print("\n================================AFTER ONLY CHOICE====================================\n")
#display(values)
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
stalled = solved_values_before == solved_values_after
#print("\n================================IS STALLED = ====================================\n")
#print(stalled)
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values
def search(values):
# First, reduce the puzzle using the previous function
values = reduce_puzzle(values)
if values is False:
return False ## Failed earlier
if all(len(values[s]) == 1 for s in boxes):
return values ## Solved!
# Choose one of the unfilled squares with the fewest possibilities
n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)
# Now use recurrence to solve each one of the resulting sudokus, and
#print("\n===================SINCE STALLED USING RANDOM VALUE WITH MIN OCCURENCE============================\n")
#print("GRID-" ,s)
for value in values[s]:
new_sudoku = values.copy()
new_sudoku[s] = value
#print("VALUE ASSIGNED = " + value)
attempt = search(new_sudoku)
#print("THIS ATTEMPT PASSED : " , attempt != False)
if attempt:
return attempt
def solve(grid):
"""
Find the solution to a Sudoku grid.
Args:
grid(string): a string representing a sudoku grid.
Example: '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
Returns:
The dictionary representation of the final sudoku grid. False if no solution exists.
"""
return search(grid_values(grid))
boxes = cross(rows, cols)
row_units = [cross(r, cols) for r in rows]
column_units = [cross(rows, c) for c in cols]
square_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')]
unitlist = row_units + column_units + square_units
# units = [[row],[col],[square]]
units = dict((s, [u for u in unitlist if s in u]) for s in boxes)
# diagonal
diagonal = [rows[i]+cols[i] for i in range(0,9)]
#anti-diagonal
anti_diagonal = [rows[i]+cols[8-i] for i in range(0,9)]
for element in diagonal:
units[element] = units[element]+[diagonal]
for element in anti_diagonal:
units[element] = units[element]+[anti_diagonal]
# peers = [row+col+square]
peers = dict((s, set(sum(units[s],[]))-set([s])) for s in boxes)
if __name__ == '__main__':
diag_sudoku_grid = '....9......8.6.5...9.5.2.8...1.4.9..74.6.9.23..9.2.4...1.9.8.4...6.1.8......5....'
#'......8.68.........7..863.....8............8..8.5.9...1.8..............8.....8.4.'
#'2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
display(solve(diag_sudoku_grid))
try:
from visualize import visualize_assignments
visualize_assignments(assignments)
except SystemExit:
pass
except:
print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')
| [
"tnuthalapati@expedia.com"
] | tnuthalapati@expedia.com |
7a9167147d6e513db93b899d470a2f533fd1679d | 837291c1b17c93b86d57109e7134ed2a9bff5d22 | /aula5-exercicio.py | 855d73d645021df0cead04db6a19321bbbcf03b9 | [] | no_license | flaviokosta/python_basico | ff128d63bdb7ec3490d64648d6d320fd86b206cd | eed07091ab7fa1473cf8adb2b3ef33b207a02666 | refs/heads/master | 2020-04-02T13:34:03.989367 | 2019-01-01T14:47:30 | 2019-01-01T14:47:30 | 154,486,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | '''
EXERCICIO
Faça um programa que leia a quantidade de pessoas que serão convidadas
para uma festa.
Após isso, o programa irá perguntar o nome de todas as pessoas e colocar
numa lista de convidados.
Após isso, irá imprimir todos os nomes da lista
'''
print('/...............::::...............\\')
print('| Programinha de Festinhas 1.0 |')
print('\...............::::.............../\n')
quantidade = int(input('Quantidade de convidados: '))
convidado = 1
lista_convidados = []
while convidado <= quantidade:
lista_convidados.append(input('Nome do convidado # ' + str(convidado) + ': '))
convidado += 1
print('\n-- LISTA DE CONVIDADOS --')
print('Quantidade de convidados:', quantidade)
print('\n')
for i in range(quantidade):
print('Convidado', (i+1), '-', lista_convidados[i])
| [
"flaviokosta@gmail.com"
] | flaviokosta@gmail.com |
6bd5bf1140285308546aa340873abdb645e215e5 | 24861097bfa7a9d52ee20bd8e0861d9baae309c5 | /model/train.py | a76cac4ce24e7815dd4ecc1b879d8d8964e2968d | [
"MIT"
] | permissive | shao19950821/ESAPN | b9ea2d966b9420e4dd0497a0e7d55b46447d9b04 | 5ae9afdbeb7e2d098bde05e68503814077381d16 | refs/heads/master | 2023-03-10T01:05:15.868477 | 2021-02-24T23:18:20 | 2021-02-24T23:18:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,875 | py | # %matplotlib inline
import os, time, pickle, argparse, sys
import pickle as pk
import pandas as pd
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from scipy.stats import beta
torch.set_printoptions(threshold=10000)
np.set_printoptions(threshold=np.inf)
parser = argparse.ArgumentParser(description='RSAutoML')
parser.add_argument('--Train_Method', type=str, default='AutoML', help='options: AutoML, Supervised')
parser.add_argument('--Policy_Type', type=int, default=1, help='options: 0, 1, 2, 3, 4, 5')
parser.add_argument('--Val_Type', type=str, default='last_batch', help='options: last_batch, last_random')
parser.add_argument('--Loss_Type', type=str, default='MSE_sigmoid', help='options: MSE_sigmoid MSE_no_sigmoid BCEWithLogitsLoss CrossEntropyLoss')
parser.add_argument('--Data_Set', type=str, default='ml-20m', help='options: ml-20m ml-latest')
parser.add_argument('--Dy_Emb_Num', type=int, default=2, help='options: 1, 2')
parser.add_argument('--Reward_Base', type=str, default=None, help='options: None, last_loss, ave_loss')
parser.add_argument('--last_num', type=int, default=5, help='options: 1, 2')
args = parser.parse_args()
Model_Gpu = torch.cuda.is_available()
device = torch.device('cuda:0' if Model_Gpu else 'cpu')
DATA_PATH = './data'
DATA_SET = args.Data_Set
Batch_Size = 500 # batch size
LR_model = 0.001 # learning rate
LR_darts = 0.0001 # learning rate
Epoch = 1 # train epoch
Beta_Beta = 20 # beta for Beta distribution
H_alpha = 0 # for nn.KLDivLoss 0.001
if DATA_SET == 'ml-20m':
Train_Size = 15000000 # training dataset size
elif DATA_SET == 'ml-latest':
Train_Size = 22000000 # training dataset size
Test_Size = 5000000 # training dataset size
Emb_Size = [2, 4, 8, 16, 64, 128] # 1,2,4,8,16,32,64,128,256,512
Train_Method = args.Train_Method
Policy_Type = args.Policy_Type
Types = ['Policy0: embedding for popularity',
'Policy1: embedding for popularity + last_weights',
'Policy2: embedding for popularity + last_weights + last_loss',
'Policy3: popularity one_hot',
'Policy4: popularity one_hot + last_weights',
'Policy5: popularity one_hot + last_weights + last_loss']
Val_Type = args.Val_Type
Dy_Emb_Num = args.Dy_Emb_Num # dynamic num of embedding to adjust, 1 for user, 2 for user & movie
Reward_Base = args.Reward_Base
last_num = args.last_num
Loss_Type = args.Loss_Type
ControllerLoss = nn.CrossEntropyLoss(reduce=False)
print('\n****************************************************************************************\n')
print('os.getpid(): ', os.getpid())
if torch.cuda.is_available():
print('torch.cuda: ', torch.cuda.is_available(), torch.cuda.current_device(), torch.cuda.device_count(), torch.cuda.get_device_name(0), torch.cuda.device(torch.cuda.current_device()))
else:
print('GPU is not available!!!')
print('Train_Size: ', Train_Size)
print('Test_Size: ', Test_Size)
print('Emb_Size: ', Emb_Size)
print('Batch_Size: ', Batch_Size)
print('Dy_Emb_Num: ', Dy_Emb_Num)
print('Loss_Type: ', Loss_Type)
print('Train_Method: ', Train_Method)
print('Policy_Type: ', Types[Policy_Type])
print('Val_Type: ', Val_Type)
print('Beta_Beta: ', Beta_Beta)
print('H_alpha: ', H_alpha)
print('LR_model: ', LR_model)
print('LR_darts: ', LR_darts)
print('\n****************************************************************************************\n')
def load_data():
train_features, test_features, train_target, test_target \
= pickle.load(open('{}/{}_TrainTest_{}_{}.data'.format(DATA_PATH, DATA_SET, Train_Size, Output_Dim), mode='rb'))
print("len(train_features): ", len(train_features))
print("len(test_features): ", len(test_features))
test_features, test_target = test_features[:Test_Size], test_target[:Test_Size]
genome_scores_dict = pickle.load(open('{}/{}_GenomeScoresDict.data'.format(DATA_PATH, DATA_SET), mode='rb'))
train_feature_data = pd.DataFrame(train_features, columns=['userId', 'movieId', 'user_frequency', 'movie_frequency'])
test_feature_data = pd.DataFrame(test_features, columns=['userId', 'movieId', 'user_frequency', 'movie_frequency'])
User_Num = max(train_feature_data['userId'].max() + 1, test_feature_data['userId'].max() + 1) # 138494
Movie_Num = max(train_feature_data['movieId'].max() + 1, test_feature_data['movieId'].max() + 1) # 131263
max_user_popularity = max(train_feature_data['user_frequency'].max()+1, test_feature_data['user_frequency'].max()+1)
max_movie_popularity = max(train_feature_data['movie_frequency'].max() + 1, test_feature_data['movie_frequency'].max() + 1)
return train_features, test_features, train_target, test_target, genome_scores_dict, \
train_feature_data, test_feature_data, len(train_features), len(test_features), \
User_Num, Movie_Num, max_user_popularity, max_movie_popularity
def Batch_Losses(Loss_Type, prediction, target):
if Loss_Type == 'MSE_sigmoid':
return nn.MSELoss(reduction='none')(nn.Sigmoid()(prediction), target)
elif Loss_Type == 'MSE_no_sigmoid':
return nn.MSELoss(reduction='none')(prediction, target)
elif Loss_Type == 'BCEWithLogitsLoss':
return nn.BCEWithLogitsLoss(reduction='none')(prediction, target)
elif Loss_Type == 'CrossEntropyLoss':
return nn.CrossEntropyLoss(reduction='none')(prediction, target)
else:
print('No such Loss_Type.')
def Batch_Accuracies(Loss_Type, prediction, target):
with torch.no_grad():
if Loss_Type == 'MSE_sigmoid':
predicted = 1 * (torch.sigmoid(prediction).data > 0.5)
elif Loss_Type == 'MSE_no_sigmoid':
predicted = 1 * (prediction > 0.5)
elif Loss_Type == 'BCEWithLogitsLoss':
predicted = 1 * (torch.sigmoid(prediction).data > 0.5)
elif Loss_Type == 'CrossEntropyLoss':
_, predicted = torch.max(prediction, 1)
else:
print('No such Loss_Type.')
Batch_Accuracies = 1 * (predicted == target)
Batch_Accuracies = list(Batch_Accuracies.detach().cpu().numpy())
return Batch_Accuracies
def Beta(length, popularity, be=10):
x = [i/length for i in range(length+1)]
cdfs = [beta.cdf(x[i+1], popularity, be) - beta.cdf(x[i], popularity, be) for i in range(length)]
return cdfs
class Policy(nn.Module):
def __init__(self, Setting_Popularity, Setting_Weight, Policy_Type):
super(Policy, self).__init__()
self.Policy_Type = Policy_Type
if self.Policy_Type == 0:
self.transfrom_input_length = Setting_Popularity[1]
elif self.Policy_Type == 1:
self.transfrom_input_length = Setting_Popularity[1] + Setting_Weight[1]
elif self.Policy_Type == 2:
self.transfrom_input_length = Setting_Popularity[1] + Setting_Weight[1] + 1
elif self.Policy_Type == 3:
self.transfrom_input_length = Setting_Popularity[0]
elif self.Policy_Type == 4:
self.transfrom_input_length = Setting_Popularity[0] + Setting_Weight[1]
elif self.Policy_Type == 5:
self.transfrom_input_length = Setting_Popularity[0] + Setting_Weight[1] + 1
else:
print('No such Policy_Type 1')
if self.Policy_Type in [0, 1, 2]:
self.emb_popularity = nn.Embedding(num_embeddings=Setting_Popularity[0], embedding_dim=Setting_Popularity[1])
self.batch_norm = nn.BatchNorm1d(Setting_Popularity[1])
elif self.Policy_Type in [3, 4, 5]:
self.emb_popularity = nn.Embedding(num_embeddings=Setting_Popularity[0], embedding_dim=Setting_Popularity[0]).to(dtype=torch.float32)
self.emb_popularity.weight.data = torch.eye(Setting_Popularity[0])
self.emb_popularity.weight.requires_grad = False
else:
print('No such Policy_Type 2')
self.transfrom = nn.Sequential(
nn.Linear(self.transfrom_input_length, 512),
nn.BatchNorm1d(512),
nn.Tanh(),
nn.Linear(512, 2)) # remain dim, increase dim
# nn.Softmax(dim=1))
def forward(self, popularity, emb_sizes, last_loss):
'''
popularity: (batch_size)
emb_sizes: (batch_size)
output: (batch_size x 3)
'''
# emb_popularity: (batch_size x emb_size)
emb_popularity = self.emb_popularity(popularity)
if self.Policy_Type in [0, 1, 2]:
transformed_emb_popularity = self.batch_norm(emb_popularity)
elif self.Policy_Type in [3, 4, 5]:
transformed_emb_popularity = emb_popularity
else:
transformed_emb_popularity = None
print('No such Policy_Type 3')
if self.Policy_Type in [0, 3]:
concatenation = transformed_emb_popularity
elif self.Policy_Type in [1, 4]:
last_weights = nn.functional.one_hot(emb_sizes, num_classes=len(Emb_Size)).float()
concatenation = torch.cat((transformed_emb_popularity, last_weights), 1)
elif self.Policy_Type in [2, 5]:
last_weights = nn.functional.one_hot(emb_sizes, num_classes=len(Emb_Size)).float()
concatenation = torch.cat((transformed_emb_popularity, last_weights, last_loss), 1)
else:
print('No such Policy_Type 4')
return self.transfrom(concatenation)
class RS_MLP(nn.Module):
def __init__(self, Output_Dim, Dynamic_Emb_Num):
super(RS_MLP, self).__init__()
# self.emb_user = nn.Embedding(num_embeddings=User_Num, embedding_dim=sum(Emb_Size))
# self.emb_movie = nn.Embedding(num_embeddings=Movie_Num, embedding_dim=sum(Emb_Size))
self.emb_user = nn.ModuleList(nn.Embedding(num_embeddings=User_Num, embedding_dim=emb_size) for emb_size in Emb_Size)
self.emb_movie = nn.ModuleList(nn.Embedding(num_embeddings=Movie_Num, embedding_dim=emb_size) for emb_size in Emb_Size)
# for emb in self.emb_user + self.emb_movie:
# emb.to(device)
self.bn_user = nn.BatchNorm1d(max(Emb_Size))
self.bn_movie = nn.BatchNorm1d(max(Emb_Size))
self.W_user = nn.ModuleList([nn.Linear(Emb_Size[i], Emb_Size[i + 1]) for i in range(len(Emb_Size) - 1)])
self.W_movie = nn.ModuleList([nn.Linear(Emb_Size[i], Emb_Size[i + 1]) for i in range(len(Emb_Size) - 1)])
self.tanh = nn.Tanh()
self.movie_transfrom = nn.Sequential( # nn.BatchNorm1d(1128),
nn.Linear(1128, 512),
nn.BatchNorm1d(512),
nn.Tanh(),
nn.Linear(512, max(Emb_Size)))
self.transfrom = nn.Sequential(
nn.BatchNorm1d(max(Emb_Size) * 2),
nn.Linear(max(Emb_Size) * 2, 512),
nn.BatchNorm1d(512),
nn.Tanh(),
nn.Linear(512, Output_Dim))
self.den = Dynamic_Emb_Num
# setattr(self, 'z', 666)
def forward(self, u_emb_sizes, m_emb_sizes, userId, movieId, movie_vec):
'''
u_emb_sizes: (batch_size)
m_emb_sizes: (batch_size)
'''
# u_weight: (batch_size x emb_num)
# m_weight: (batch_size x emb_num)
u_weight = nn.functional.one_hot(u_emb_sizes, num_classes=len(Emb_Size))
if self.den == 2:
m_weight = nn.functional.one_hot(m_emb_sizes, num_classes=len(Emb_Size))
user_emb = [self.emb_user[i](userId) for i in range(len(Emb_Size))]
movie_emb = None if self.den == 1 else [self.emb_movie[i](movieId) for i in range(len(Emb_Size))]
user_embs = []
for i in range(len(Emb_Size)):
temp = user_emb[i]
for j in range(i, len(Emb_Size) - 1):
temp = self.W_user[j](temp)
user_embs.append(temp)
if self.den == 2:
movie_embs = []
for i in range(len(Emb_Size)):
temp = movie_emb[i]
for j in range(i, len(Emb_Size) - 1):
temp = self.W_movie[j](temp)
movie_embs.append(temp)
v_user = sum([torch.reshape(u_weight[:, i], (len(u_weight), -1)) * self.tanh(
self.bn_user(user_embs[i])) for i in range(len(Emb_Size))])
v_movie = sum([torch.reshape(m_weight[:, i], (len(m_weight), -1)) * self.tanh(
self.bn_movie(movie_embs[i])) for i in range(len(Emb_Size))]) if self.den == 2 else self.movie_transfrom(movie_vec)
user_movie = torch.cat((v_user, v_movie), 1)
return self.transfrom(user_movie)
def update_controller(index, features, target):
""" Update user_policy and movie_policy """
if Train_Method == 'AutoML' and index > 0:
if Val_Type == 'last_random':
val_index = np.random.choice(index, Batch_Size)
batch_train = features[:index][val_index]
batch_train_target = target[:index][val_index]
elif Val_Type == 'last_batch':
batch_train = features[index - Batch_Size:index]
batch_train_target = target[index - Batch_Size:index]
else:
batch_train = None
batch_train_target = None
print('No such Val_Type')
userId = torch.tensor(batch_train[:, 0], requires_grad=False).to(device)
movieId = torch.tensor(batch_train[:, 1], requires_grad=False).to(device)
userPop = torch.tensor(batch_train[:, 2], requires_grad=False).to(device)
moviePop = torch.tensor(batch_train[:, 3], requires_grad=False).to(device)
old_uw = torch.tensor(user_weights[batch_train[:, 0]], requires_grad=False).to(device)
old_mw = torch.tensor(movie_weights[batch_train[:, 1]], requires_grad=False).to(device)
old_ul = torch.tensor(user_losses[batch_train[:, 0], :], requires_grad=False).to(device)
old_ml = torch.tensor(movie_losses[batch_train[:, 1], :], requires_grad=False).to(device)
movie_vec = torch.tensor([genome_scores_dict[str(batch_train[:, 1][i])] for i in range(len(batch_train[:, 1]))],
requires_grad=False).to(device) if Dy_Emb_Num == 1 else None
batch_train_target = torch.tensor(batch_train_target,
dtype=torch.int64 if Loss_Type == 'CrossEntropyLoss' else torch.float32,
requires_grad=False).to(device)
if Reward_Base == 'ave_loss':
old_utl = torch.tensor(user_total_losses[batch_train[:, 0]], requires_grad=False).to(device)
old_mtl = torch.tensor(movie_total_losses[batch_train[:, 1]], requires_grad=False).to(device)
old_uc = torch.tensor(user_count[batch_train[:, 0]], requires_grad=False).to(device)
old_mc = torch.tensor(movie_count[batch_train[:, 1]], requires_grad=False).to(device)
# user_adj_prob: (batch_size x 2)
user_adj_weights = user_policy(userPop, old_uw, old_ul)
user_adj_prob = nn.functional.softmax(user_adj_weights, dim=-1)
mask = old_uw != len(Emb_Size) - 1
# user_adj_samples: (batch_size)
user_adj_samples = mask * torch.multinomial(user_adj_prob, 1).squeeze(1)
# new_uw: (batch_size)
new_uw = old_uw + user_adj_samples
if Dy_Emb_Num == 2:
# movie_adj_prob: (batch_size x 3)
movie_adj_weights = movie_policy(moviePop, old_mw, old_ml)
movie_adj_prob = nn.functional.softmax(movie_adj_weights, dim=-1)
mask = old_mw != len(Emb_Size) - 1
# movie_adj_samples: (batch_size)
movie_adj_samples = mask * torch.multinomial(movie_adj_prob, 1).squeeze(1)
# new_mw: (batch_size)
new_mw = old_mw + movie_adj_samples
else:
new_mw = 0
with torch.no_grad():
temp_emb_user = model.emb_user
for i in range(len(Emb_Size) - 1):
j = i + 1
part_userId = userId[
((old_uw == i) * (new_uw == j)).nonzero().squeeze(1)]
if len(part_userId) > 0:
model.emb_user[j].weight[part_userId, :] = model.W_user[i](
model.emb_user[i].weight[part_userId, :])
if Dy_Emb_Num == 2:
temp_emb_movie = model.emb_movie
for i in range(len(Emb_Size) - 1):
j = i + 1
part_movieId = movieId[
((old_mw == i) * (new_mw == j)).nonzero().squeeze(1)]
if len(part_movieId) > 0:
model.emb_movie[j].weight[part_movieId, :] = model.W_movie[i](
model.emb_movie[i].weight[part_movieId, :])
# rating: (batch_size x 1)
rating = model(new_uw, new_mw, userId, movieId, movie_vec)
# rating: (batch_size)
rating = rating.squeeze(1).squeeze(1) if Loss_Type == 'CrossEntropyLoss' else rating.squeeze(1)
# batch_losses: (batch_size)
batch_losses = Batch_Losses(Loss_Type, rating, batch_train_target)
rewards = 1 - batch_losses
model.emb_user = temp_emb_user
if Dy_Emb_Num == 1:
if Reward_Base == 'last_loss':
baseline = 1 - old_ul[:, 0]
rewards = rewards - baseline
elif Reward_Base == 'ave_loss':
last_num_tensor = torch.Tensor([last_num]).repeat(len(old_uc)).to(device)
baseline = 1 - torch.sum(old_utl, dim=1) / torch.where(old_uc < last_num, old_uc, last_num_tensor)
rewards = rewards - baseline
loss = torch.sum(ControllerLoss(user_adj_weights, user_adj_samples) * rewards)
if index % 100000 == 0:
print("rewards: ", rewards[:50].tolist())
optimizer_user.zero_grad()
loss.backward()
optimizer_user.step()
elif Dy_Emb_Num == 2:
model.emb_movie = temp_emb_movie
if Reward_Base == 'last_loss':
baseline_u = 1 - old_ul[:, 0]
baseline_m = 1 - old_ml[:, 0]
elif Reward_Base == 'ave_loss':
last_num_tensor = torch.Tensor([last_num]).repeat(len(old_uc)).to(device)
baseline_u = 1 - torch.sum(old_utl, dim=1) / torch.where(old_uc < last_num, old_uc, last_num_tensor)
baseline_m = 1 - torch.sum(old_mtl, dim=1) / torch.where(old_mc < last_num, old_mc, last_num_tensor)
rewards_u = rewards - baseline_u
rewards_m = rewards - baseline_m
loss_u = torch.sum(ControllerLoss(user_adj_weights, user_adj_samples) * rewards_u)
loss_m = torch.sum(ControllerLoss(movie_adj_weights, movie_adj_samples) * rewards_m)
if index % 100000 == 0:
print("rewards_u: ", rewards_u[:50].tolist())
print("rewards_m: ", rewards_m[:50].tolist())
optimizer_user.zero_grad()
loss_u.backward()
optimizer_user.step()
optimizer_movie.zero_grad()
loss_m.backward()
optimizer_movie.step()
def update_RS(index, features, Len_Features, target, mode):
""" Update RS's embeddings and NN """
global train_sample_loss, train_sample_accuracy, user_dims_record, movie_dims_record
index_end = index + Batch_Size
if index_end >= Len_Features:
batch_train = features[index:Len_Features]
batch_train_target = target[index:Len_Features]
else:
batch_train = features[index:index_end]
batch_train_target = target[index:index_end]
userId = torch.tensor(batch_train[:, 0], requires_grad=False).to(device)
movieId = torch.tensor(batch_train[:, 1], requires_grad=False).to(device)
userPop = torch.tensor(batch_train[:, 2], requires_grad=False).to(device)
moviePop = torch.tensor(batch_train[:, 3], requires_grad=False).to(device)
old_uw = torch.tensor(user_weights[batch_train[:, 0]], requires_grad=False).to(device)
old_mw = torch.tensor(movie_weights[batch_train[:, 1]], requires_grad=False).to(device)
old_ul = torch.tensor(user_losses[batch_train[:, 0], :], requires_grad=False).to(device)
old_ml = torch.tensor(user_losses[batch_train[:, 1], :], requires_grad=False).to(device)
movie_vec = torch.tensor([genome_scores_dict[str(batch_train[:, 1][i])] for i in range(len(batch_train[:, 1]))],
requires_grad=False).to(device) if Dy_Emb_Num == 1 else None
batch_train_target = torch.tensor(batch_train_target,
dtype=torch.int64 if Loss_Type == 'CrossEntropyLoss' else torch.float32,
requires_grad=False).to(device)
with torch.no_grad():
# user_adj_prob: (batch_size x 2)
user_adj_weights = user_policy(userPop, old_uw, old_ul)
user_adj_prob = nn.functional.softmax(user_adj_weights, dim=-1)
mask = old_uw != len(Emb_Size) - 1
user_adj = mask * torch.argmax(user_adj_prob, dim=1)
# new_uw: (batch_size)
new_uw = old_uw + user_adj
if index % 50000 == 0:
print("old_uw: ", old_uw)
print("new_uw: ", new_uw)
for i in range(len(Emb_Size) - 1):
j = i + 1
part_userId = userId[((old_uw == i) * (new_uw == j)).nonzero().squeeze(1)]
if len(part_userId) > 0:
model.emb_user[j].weight[part_userId, :] = model.W_user[i](
model.emb_user[i].weight[part_userId, :])
if Dy_Emb_Num == 2:
# movie_adj_prob: (batch_size x 2)
movie_adj_weights = movie_policy(moviePop, old_mw, old_ml)
movie_adj_prob = nn.functional.softmax(movie_adj_weights, dim=-1)
mask = old_mw != len(Emb_Size) - 1
movie_adj = mask * torch.argmax(movie_adj_prob, dim=1)
# new_mw: (batch_size x emb_num)
new_mw = old_mw + movie_adj
if index % 50000 == 0:
print("old_mw: ", old_mw)
print("new_mw: ", new_mw)
for i in range(len(Emb_Size) - 1):
j = i + 1
part_movieId = movieId[
((old_mw == i) * (new_mw == j)).nonzero().squeeze(1)]
if len(part_movieId) > 0:
model.emb_movie[j].weight[part_movieId, :] = model.W_movie[i](
model.emb_movie[i].weight[part_movieId, :])
else:
new_mw = 0
rating = model(new_uw, new_mw, userId, movieId, movie_vec)
rating = rating.squeeze(1).squeeze(1) if Loss_Type == 'CrossEntropyLoss' else rating.squeeze(1)
# batch_losses: (batch_size)
batch_losses = Batch_Losses(Loss_Type, rating, batch_train_target)
loss = sum(batch_losses)
batch_accuracies = Batch_Accuracies(Loss_Type, rating, batch_train_target)
train_sample_loss += list(batch_losses.detach().cpu().numpy())
losses[mode].append(loss.detach().cpu().numpy())
train_sample_accuracy += batch_accuracies
accuracies[mode].append((sum(batch_accuracies), len(batch_train_target)))
user_dims_record += [Emb_Size[item] for item in new_uw.detach().cpu()]
if Dy_Emb_Num == 2:
movie_dims_record += [Emb_Size[item] for item in new_mw.detach().cpu()]
if Train_Method == 'AutoML':
optimizer_model.zero_grad()
loss.backward()
optimizer_model.step()
elif Train_Method == 'Supervised':
optimizer_whole.zero_grad()
loss.backward()
optimizer_whole.step()
else:
print('No such Train_Method')
""" Update old_uw old_mw old_ul old_ml """
user_weights[batch_train[:, 0]] = new_uw.detach().cpu().numpy()
movie_weights[batch_train[:, 1]] = new_mw.detach().cpu().numpy() if Dy_Emb_Num == 2 else np.zeros((len(batch_train),))
user_losses[batch_train[:, 0], :] = np.reshape(batch_losses.detach().cpu().numpy(), (-1, 1))
movie_losses[batch_train[:, 1], :] = np.reshape(batch_losses.detach().cpu().numpy(), (-1, 1))
final_user_pop[batch_train[:, 0]] = batch_train[:, 2]
final_movie_pop[batch_train[:, 1]] = batch_train[:, 3]
if Reward_Base == 'ave_loss':
user_total_losses[batch_train[:, 0], 1:last_num] = user_total_losses[batch_train[:, 0], 0:last_num-1]
movie_total_losses[batch_train[:, 1], 1:last_num] = movie_total_losses[batch_train[:, 1], 0:last_num-1]
user_total_losses[batch_train[:, 0], 0] = user_losses[batch_train[:, 0], 0]
movie_total_losses[batch_train[:, 1], 0] = movie_losses[batch_train[:, 1], 0]
user_count[batch_train[:, 0]] += 1
movie_count[batch_train[:, 1]] += 1
if __name__ == "__main__":
Output_Dim = 5 if Loss_Type == 'CrossEntropyLoss' else 1
train_features, test_features, train_target, test_target, genome_scores_dict, \
train_feature_data, test_feature_data, Len_Train_Features, Len_Test_Features, \
User_Num, Movie_Num, max_user_popularity, max_movie_popularity = load_data()
train_feature_data, test_feature_data = train_feature_data[:Len_Train_Features], test_feature_data[:Len_Test_Features]
Emb_Split = [0] + [sum(Emb_Size[0:i + 1]) for i in range(len(Emb_Size))] # [0, 2, 18, 146]
Setting_User_Popularity = [max_user_popularity, 32]
Setting_Movie_Popularity = [max_movie_popularity, 32]
Setting_User_Weight = [User_Num, len(Emb_Size)]
Setting_Movie_Weight = [Movie_Num, len(Emb_Size)]
if Train_Method == 'AutoML' and H_alpha > 0:
Beta_Dis = nn.Embedding(num_embeddings=max(max_user_popularity, max_movie_popularity), embedding_dim=len(Emb_Size)).to(dtype=torch.float32)
Beta_Dis.weight.data = torch.tensor(np.array([Beta(len(Emb_Size), popularity, Beta_Beta) for popularity in range(1, max(max_user_popularity, max_movie_popularity) + 1)]), dtype=torch.float32, requires_grad=False)
Beta_Dis.weight.requires_grad = False
Beta_Dis.to(device)
criterion = nn.KLDivLoss(reduction='sum')
user_policy = Policy(Setting_User_Popularity, Setting_User_Weight, Policy_Type)
movie_policy = Policy(Setting_Movie_Popularity, Setting_Movie_Weight, Policy_Type)
model = RS_MLP(Output_Dim, Dy_Emb_Num)
user_policy.to(device)
movie_policy.to(device)
model.to(device)
if Model_Gpu:
print('\n========================================================================================\n')
print('Model_Gpu?:', next(model.parameters()).is_cuda, next(user_policy.parameters()).is_cuda, next(movie_policy.parameters()).is_cuda)
print('Memory: ', torch.cuda.memory_allocated(0) / 1024 ** 3, 'GB', torch.cuda.memory_cached(0) / 1024 ** 3, 'GB')
print('\n========================================================================================\n')
user_weights = np.zeros((User_Num,), dtype=np.int64)
movie_weights = np.zeros((Movie_Num,), dtype=np.int64)
final_user_pop = np.zeros((User_Num,), dtype=np.int64)
final_movie_pop = np.zeros((User_Num,), dtype=np.int64)
user_losses = np.ones((User_Num, 1), dtype=np.float32)
movie_losses = np.ones((Movie_Num, 1), dtype=np.float32)
if Reward_Base == 'ave_loss':
user_total_losses = np.zeros((User_Num, last_num), dtype=np.float32)
movie_total_losses = np.zeros((Movie_Num, last_num), dtype=np.float32)
user_count = np.zeros((User_Num,), dtype=np.float32)
movie_count = np.zeros((Movie_Num,), dtype=np.float32)
t0 = time.time()
optimizer_model = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=LR_model, weight_decay=0)
optimizer_user = torch.optim.Adam(filter(lambda p: p.requires_grad, user_policy.parameters()), lr=LR_darts, weight_decay=0)
optimizer_movie = torch.optim.Adam(filter(lambda p: p.requires_grad, movie_policy.parameters()), lr=LR_darts, weight_decay=0)
optimizer_darts = torch.optim.Adam(filter(lambda p: p.requires_grad, list(user_policy.parameters()) + list(movie_policy.parameters())), lr=LR_darts, weight_decay=0)
optimizer_whole = torch.optim.Adam(filter(lambda p: p.requires_grad, list(model.parameters()) + list(user_policy.parameters()) + list(movie_policy.parameters())), lr=LR_model, weight_decay=0)
losses = {'train': [], 'test': []}
accuracies = {'train': [], 'test': []}
train_sample_loss = list()
train_sample_accuracy = list()
user_dims_record = list()
movie_dims_record = list()
print('\n******************************************Train******************************************\n')
for epoch_i in range(Epoch):
index = 0
while index < Len_Train_Features:
update_controller(index, train_features, train_target)
update_RS(index, train_features, Len_Train_Features, train_target, mode='train')
if len(losses['train']) % 10 == 0:
print('Epoch = {:>3} Batch = {:>4}/{:>4} ({:.3f}%) train_loss = {:.3f} train_accuracy = {:.3f} total_time = {:.3f} min'.format(
epoch_i, index + Batch_Size, Len_Train_Features, 100 * (index + Batch_Size) / Len_Train_Features, sum(losses['train'][-10:]) / 10,
sum([item[0] / item[1] for item in accuracies['train'][-10:]]) / 10,
(time.time() - t0) / 60))
index += Batch_Size
#############################test#############################
print('\n******************************************Test******************************************\n')
t0 = time.time()
index = 0
# Len_Test_Features = 20000
while index < Len_Test_Features:
update_controller(index, test_features, test_target)
update_RS(index, test_features, Len_Test_Features, test_target, mode='test')
if len(losses['test']) % 10 == 0:
print(
'Test Batch = {:>4}/{:>4} ({:.3f}%) test_loss = {:.3f} test_accuracy = {:.3f} whole_time = {:.3f} min'.format(
index + Batch_Size, Len_Test_Features, 100 * (index + Batch_Size) / Len_Test_Features,
sum(losses['test'][-10:]) / 10,
sum([item[0] / item[1] for item in accuracies['test'][-10:]]) / 10, (time.time() - t0) / 60))
index += Batch_Size
correct_num = sum([item[0] for item in accuracies['test']])
test_num = sum([item[1] for item in accuracies['test']])
print('Test Loss: {:.4f}'.format(sum(losses['test']) / test_num))
print('Test Correct Num: {}'.format(correct_num))
print('Test Num: {}'.format(test_num))
print('Test Accuracy: {:.4f}'.format(correct_num / test_num))
# Save model
save_model_name = './save_model/DyEmbNum{}_Policy_Type{}_LossType{}_Reward_Base{}_last{}_TestAcc{:.4f}'.format(
Dy_Emb_Num, Policy_Type, Loss_Type, Reward_Base, last_num,
correct_num / test_num)
torch.save(model.state_dict(), save_model_name + '.pt')
with open(save_model_name + '_weights.pkl', 'wb') as f:
if Dy_Emb_Num == 1:
pk.dump((final_user_pop, user_weights), f)
elif Dy_Emb_Num == 2:
pk.dump(((final_user_pop, user_weights), (final_movie_pop, movie_weights)), f)
print('Model saved to ' + save_model_name + '.pt')
print('Weights saved to ' + save_model_name + '_weights.pkl')
feature_data = pd.concat([train_feature_data, test_feature_data])
print("feature_data: ", feature_data.shape[0], feature_data.shape[1])
feature_data['user_dims'] = pd.DataFrame(
[[i] for i in user_dims_record])
if Dy_Emb_Num == 2:
feature_data['movie_dims'] = pd.DataFrame([[i] for i in movie_dims_record])
feature_data['{}{}_loss_{}'.format(Train_Method[0], Policy_Type, Emb_Size)] = pd.DataFrame(
[[i] for i in train_sample_loss])
feature_data['{}{}_acc_{}'.format(Train_Method[0], Policy_Type, Emb_Size)] = pd.DataFrame(
[[i] for i in train_sample_accuracy])
print('\n****************************************************************************************\n')
if Model_Gpu:
print('\n++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n')
print('Memory: ', torch.cuda.memory_allocated(0) / 1024 ** 3, 'GB', torch.cuda.memory_cached(0) / 1024 ** 3, 'GB')
# torch.cuda.empty_cache()
print('Memory: ', torch.cuda.memory_allocated(0) / 1024 ** 3, 'GB', torch.cuda.memory_cached(0) / 1024 ** 3, 'GB')
print('\n++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n')
Parameter_Name = 'DataSet{}_ValType{}_Policy{}_DyEmbNum{}_LossType{}_RewardBase{}'.format(
DATA_SET,
Val_Type if Train_Method == 'AutoML' else 'None',
Policy_Type,
Dy_Emb_Num,
Loss_Type,
Reward_Base)
feature_data.to_csv('./results/feature_data_with_loss_{}.csv'.format(Parameter_Name), index=None)
if Dy_Emb_Num == 1:
result_user = []
for i in range(1, 100):
feature_data1 = feature_data[feature_data['user_frequency'] == i]
result_user.append(list(feature_data1.mean(axis=0)) + [len(feature_data1)])
Head = list(feature_data.columns) + ['count']
pd.DataFrame(result_user).to_csv('./results/result_{}_user.csv'.format(Parameter_Name), index=None,
header=Head)
elif Dy_Emb_Num == 2:
result_user, result_movie = [], []
for i in range(1, 100):
feature_data1 = feature_data[feature_data['user_frequency'] == i]
result_user.append(list(feature_data1.mean(axis=0)) + [len(feature_data1)])
Head = list(feature_data.columns) + ['count']
pd.DataFrame(result_user).to_csv('./results/result_{}_user.csv'.format(Parameter_Name), index=None,
header=Head)
for i in range(1, 100):
feature_data1 = feature_data[feature_data['movie_frequency'] == i]
result_movie.append(list(feature_data1.mean(axis=0)) + [len(feature_data1)])
Head = list(feature_data.columns) + ['count']
pd.DataFrame(result_movie).to_csv('./results/result_{}_movie.csv'.format(Parameter_Name), index=None,
header=Head)
result = []
for i in range(int(Train_Size/1000000)):
feature_data1 = feature_data[i*1000000:(i+1)*1000000]
result.append(list(feature_data1.mean(axis=0)) + [len(feature_data1)])
Head = list(feature_data.columns) + ['count']
pd.DataFrame(result).to_csv('./results/result_{}_trendency.csv'.format(Parameter_Name), index=None, header=Head)
print('\n****************************************************************************************\n')
print('os.getpid(): ', os.getpid())
if torch.cuda.is_available():
print('torch.cuda: ', torch.cuda.is_available(), torch.cuda.current_device(), torch.cuda.device_count(), torch.cuda.get_device_name(0), torch.cuda.device(torch.cuda.current_device()))
else:
print('GPU is not available!!!')
print('Train_Size: ', Train_Size)
print('Test_Size: ', Test_Size)
print('Emb_Size: ', Emb_Size)
print('Dy_Emb_Num: ', Dy_Emb_Num)
print('Loss_Type: ', Loss_Type)
print('Train_Method: ', Train_Method)
print('Policy_Type: ', Types[Policy_Type])
print('Val_Type: ', Val_Type)
print('Beta_Beta: ', Beta_Beta)
print('H_alpha: ', H_alpha)
print('LR_model: ', LR_model)
print('LR_darts: ', LR_darts)
print('\n****************************************************************************************\n')
print('{} done'.format(Train_Method))
| [
"liuhaochen@s-MacBook-puro.local"
] | liuhaochen@s-MacBook-puro.local |
84555327ae07d2945fac7b3d7ca618e1946fb291 | e56214188faae8ebfb36a463e34fc8324935b3c2 | /intersight/models/workflow_default_value_ref.py | 18613e62146e7f7c285e489454fb63c30fab824b | [
"Apache-2.0"
] | permissive | CiscoUcs/intersight-python | 866d6c63e0cb8c33440771efd93541d679bb1ecc | a92fccb1c8df4332ba1f05a0e784efbb4f2efdc4 | refs/heads/master | 2021-11-07T12:54:41.888973 | 2021-10-25T16:15:50 | 2021-10-25T16:15:50 | 115,440,875 | 25 | 18 | Apache-2.0 | 2020-03-02T16:19:49 | 2017-12-26T17:14:03 | Python | UTF-8 | Python | false | false | 5,734 | py | # coding: utf-8
"""
Cisco Intersight OpenAPI specification.
The Cisco Intersight OpenAPI specification.
OpenAPI spec version: 1.0.9-1461
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class WorkflowDefaultValueRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'object_type': 'str',
'moid': 'str',
'selector': 'str'
}
attribute_map = {
'object_type': 'ObjectType',
'moid': 'Moid',
'selector': 'Selector'
}
def __init__(self, object_type=None, moid=None, selector=None):
"""
WorkflowDefaultValueRef - a model defined in Swagger
"""
self._object_type = None
self._moid = None
self._selector = None
if object_type is not None:
self.object_type = object_type
if moid is not None:
self.moid = moid
if selector is not None:
self.selector = selector
@property
def object_type(self):
"""
Gets the object_type of this WorkflowDefaultValueRef.
The Object Type of the referenced REST resource.
:return: The object_type of this WorkflowDefaultValueRef.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this WorkflowDefaultValueRef.
The Object Type of the referenced REST resource.
:param object_type: The object_type of this WorkflowDefaultValueRef.
:type: str
"""
self._object_type = object_type
@property
def moid(self):
"""
Gets the moid of this WorkflowDefaultValueRef.
The Moid of the referenced REST resource.
:return: The moid of this WorkflowDefaultValueRef.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this WorkflowDefaultValueRef.
The Moid of the referenced REST resource.
:param moid: The moid of this WorkflowDefaultValueRef.
:type: str
"""
self._moid = moid
@property
def selector(self):
"""
Gets the selector of this WorkflowDefaultValueRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:return: The selector of this WorkflowDefaultValueRef.
:rtype: str
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this WorkflowDefaultValueRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:param selector: The selector of this WorkflowDefaultValueRef.
:type: str
"""
self._selector = selector
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, WorkflowDefaultValueRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"ucs-build@github.com"
] | ucs-build@github.com |
4969afebf44a83a2aed909f5bb39f4088b19c5a3 | 4f0aab6f1064ea189b9cdfc2f713f56a8fd5ceba | /comment_analyser.py | 9816950f5343757551137b678239572b6eb6107c | [
"MIT"
] | permissive | tarunvelagala/youtube-song-comment-highlights | aed2c0cfc35822e4a963b042464907f2f3776f6e | 26bd93297a17800f9b54eb2302945dd10e8d29d6 | refs/heads/master | 2020-04-13T09:59:24.718413 | 2019-06-11T15:56:54 | 2019-06-11T15:56:54 | 163,126,457 | 0 | 0 | MIT | 2018-12-26T02:19:14 | 2018-12-26T02:19:13 | null | UTF-8 | Python | false | false | 2,248 | py | from os import path, getcwd
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from nltk.tokenize import RegexpTokenizer
from textblob import TextBlob
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from .comment_scraper import lst_comments
d = getcwd()
blob = [TextBlob(i) for i in lst_comments]
blob = [i for i in blob if i.detect_language() == 'en' and len(str(i)) > 3]
pos_comments = []
pos_cmnt_p_lst = []
neg_comments = []
neg_cmnt_p_lst = []
for i in blob:
if i.polarity > 0:
pos_cmnt_p_lst.append(i.polarity)
pos_comments.append(str(i))
elif i.polarity == 0:
pass
else:
neg_cmnt_p_lst.append(i.polarity)
neg_comments.append(str(i))
tokenizer = RegexpTokenizer(r'\w+')
pos_tokens = [tokenizer.tokenize(i) for i in pos_comments]
neg_tokens = [tokenizer.tokenize(i) for i in neg_comments]
# print(pos_comments)
# print(neg_comments)
# print(pos_tokens)
stop_words = STOPWORDS
# concatenate the list of comment words
pos_tokens1 = [x for xs in pos_tokens for x in xs]
neg_tokens1 = [y for ys in neg_tokens for y in ys]
# filtering the words
_filtered_pos = ' '
_filtered_neg = ' '
for i in pos_tokens1:
if i not in stop_words:
_filtered_pos = _filtered_pos + i + ' '
for j in neg_tokens1:
if j not in stop_words:
_filtered_neg = _filtered_neg + j + ' '
mask = np.array(Image.open(path.join(d, "india2.png")))
# word cloud for positive comments
wordcloud_pos = WordCloud(background_color='white', max_font_size=90, mask=mask, max_words=200, contour_width=3,
contour_color='green')
image_colors = ImageColorGenerator(mask)
wordcloud_pos.generate(_filtered_pos)
plt.figure()
plt.imshow(wordcloud_pos.recolor(
color_func=image_colors), interpolation='bilinear')
plt.axis("off")
plt.show()
# word cloud for negative comments
wordcloud_neg = WordCloud(background_color='black', max_font_size=90, mask=mask, max_words=200, contour_color='green',
contour_width=2)
image_colors = ImageColorGenerator(mask)
wordcloud_neg.generate(_filtered_neg)
plt.figure()
plt.imshow(wordcloud_neg.recolor(
color_func=image_colors), interpolation='bilinear')
plt.axis("off")
plt.show()
| [
"tarunvelagala80@gmail.com"
] | tarunvelagala80@gmail.com |
775e8773b7f863ffab19fde06a51b8f24d70c9c8 | 4fa53363fbf4b40251f16a0e0cf8b3112041e5f0 | /Level_1/제일 작은 수 제거하기.py | 93c4848acc05c5d49bd8f10c433a735a26ddf40b | [] | no_license | naayoung/programmers | 29307ac4d8bec520e24ac9179197a15c44731fad | e55c5a9350cd5c6256b623365c0a7f6c3fbba498 | refs/heads/master | 2020-04-23T11:19:23.980023 | 2019-08-13T17:27:03 | 2019-08-13T17:27:03 | 170,897,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | # 정수를 저장한 배열, arr 에서 가장 작은 수를 제거한 배열을 리턴하는 함수, solution을 완성해주세요. 단, 리턴하려는 배열이 빈 배열인 경우엔 배열에 -1을 채워 리턴하세요. 예를들어 arr이 [4,3,2,1]인 경우는 [4,3,2]를 리턴 하고, [10]면 [-1]을 리턴 합니다.
# 제한 조건
# arr은 길이 1 이상인 배열입니다.
# 인덱스 i, j에 대해 i ≠ j이면 arr[i] ≠ arr[j] 입니다.
def solution(arr):
if len(arr) == 1:
return [-1]
else:
arr.remove(min(arr))
return arr
| [
"ask5046a@gmail.com"
] | ask5046a@gmail.com |
45b6c79eddcc908e1ee9bf24bc1a255bce101702 | 46053816f104f93c72cc582cf28fcbbfe30e6bdd | /Chapter08/c8_35_plot_digits_linkage.py | ab0ebd1d6c6d43d2e18716a890382fe489651818 | [
"MIT"
] | permissive | andrewjcoxon/Hands-On-Data-Science-with-Anaconda | bcbfa3630fd25e00f937eed2f626fe20569fc2fa | 82504a059ecd284b3599fa9af2b3eb6bbd6e28f3 | refs/heads/master | 2023-01-13T20:36:06.069693 | 2020-11-12T18:19:44 | 2020-11-12T18:19:44 | 278,735,001 | 0 | 0 | MIT | 2020-07-10T21:18:33 | 2020-07-10T21:18:32 | null | UTF-8 | Python | false | false | 3,083 | py | """
========================================================================
Various Agglomerative Clustering on a 2D embedding of digits
========================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
http://scikit-learn.org/stable/auto_examples/cluster/plot_digits_linkage.html#sphx-glr-auto-examples-cluster-plot-digits-linkage-py
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| [
"30429265+NirbhayaS@users.noreply.github.com"
] | 30429265+NirbhayaS@users.noreply.github.com |
3c9739f861eb892c6b0dc15e077eecc2320fdccc | c319ab21116227a0c762d1cd1ab375973837ac08 | /src/test.py | 0f39c6216935ce5734d111aed510d452e40cb206 | [] | no_license | LiviaJanke/SummerProject | b81a0b489203775caac673c460bc566610712fd0 | faad53fe1a01ad89d5d3e143bb0777daf7287776 | refs/heads/master | 2023-06-04T16:28:17.246977 | 2021-06-23T14:15:52 | 2021-06-23T14:15:52 | 366,399,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6 | py | #'pls
| [
"eichmliv12@gmail.com"
] | eichmliv12@gmail.com |
ad2ed0d994ef7cc626e77c6eea4bb49e27394ca2 | 50eec69abff3a505df4e85cfa68c169442f6760f | /Day_48_Selenium/main.py | dac0d3b8ad7abcdc11e0fb6dcf66470fd4f51458 | [] | no_license | bencekemenyik/100DaysOfCode | e4e77800612987158982fe604226eb4e754fc30b | 2f7e8edba7c66c0392175a0a5b48f799d74d1e91 | refs/heads/main | 2023-07-26T23:29:13.553380 | 2021-09-09T13:03:48 | 2021-09-09T13:03:48 | 401,998,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,548 | py | from selenium import webdriver
chrome_driver_path = "D:\Python100Nap\Chrome_driver\chromedriver.exe"
driver = webdriver.Chrome(executable_path=chrome_driver_path)
driver.get("https://www.python.org/")
# price = driver.find_element_by_id("priceblock_ourprice")
# print(price.text.split('$')[1])
# search_bar = driver.find_element_by_name("q")
# print(search_bar.tag_name)
# print(search_bar.get_attribute("placeholder"))
# logo = driver.find_element_by_class_name("python-logo")
# print(logo.size)
# sajat megoldas
event_dictionary = {}
list_of_events = driver.find_elements_by_xpath("/html/body/div/div[3]/div/section/div[2]/div[2]/div/ul/li")
for idx, event in enumerate(list_of_events):
time_tag = event.find_element_by_css_selector("time")
time_of_event = time_tag.get_attribute("datetime").split('T')[0]
a_tag = event.find_element_by_css_selector("a")
name_of_event = a_tag.text
event_dictionary[idx] = {
"time": time_of_event,
"name": name_of_event,
}
print(event_dictionary)
# times_list = driver.find_elements_by_css_selector(".event-widget time")
# events_list = driver.find_elements_by_css_selector(".event-widget li a")
# for idx in range(len(times_list)):
# event_dictionary[idx] = {
# "time": times_list[idx].get_attribute("datetime").split('T')[0],
# "name": events_list[idx].text
# }
#
# print(event_dictionary)
# driver.close() # Adott tabot zar be
driver.quit() # Az egesz bongeszot bezarja, inkabb ez jobb | [
"noreply@github.com"
] | bencekemenyik.noreply@github.com |
7c0139a692222cb79586a6c46000c0c2d38dbf27 | 12fccbeb88f08bce0e6b590d3cd515171788ae2c | /Speech/urls.py | 24b5ca7a67c860158fc9bff46cf44fb2fa5cd05a | [] | no_license | aabbcc0812206523/Avatar | 65b84cd9c698f7d78f12d848e9fb29bd0c7ca21e | 82d2f46823f8d5f6c523a7e06d7bbf760770decc | refs/heads/master | 2020-04-01T09:39:47.997334 | 2017-02-21T21:52:22 | 2017-02-21T21:52:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^ajax_analyze/$', views.ajax_analyze, name='ajax-analyze'),
] | [
"hanbox2016@outlook.com"
] | hanbox2016@outlook.com |
2642dcc1841f9839672f19ee775f808f44b85d20 | 058c5bb34215da4e489f38e964c950d5b7402b94 | /bubbleSort.py | fe2ed6da16f1c1b2af453c62b6c2b4274d50e63d | [] | no_license | Martinzzy/Basic_Sort-suanfa- | 8970b55198e407db13666e59b719c557a6e8f9db | 6338d3529b85f2c84bba432604b4aa354b827070 | refs/heads/master | 2020-03-13T12:58:11.312274 | 2018-04-27T01:18:59 | 2018-04-27T01:18:59 | 131,129,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | #冒泡排序的思想:从列表的开头出开始,并且比较一对数据项,直到移到列表的末尾,每当成对的两项之间的顺序不正确的时候,算法就会交换其位置
#将最大的项以冒泡的方式排列到列表的末尾,然后,算法从列表开头到倒数第2个列表项开始重复这一个过程,依次类推,直到该算法从列表的最后一项开始执行。
#用for循环来进行冒泡排序
def bubbleSort(arr):
n = len(arr)
for i in range(0,n):
for j in range(i+1,n):
if arr[i]>arr[j]:
arr[i],arr[j] = arr[j],arr[i]
return arr
#用while循环进行冒泡排序
def bubbleSort(arr):
n = len(arr)
while n>1:
i=1
while i<n:
if arr[i]<arr[i-1]:
arr[i],arr[i-1] = arr[i-1],arr[i]
i+=1
n-=1
| [
"noreply@github.com"
] | Martinzzy.noreply@github.com |
9481ac67037833a7e5a1787d40d3d596b0162dba | 255ebcb73a48bba54e0caacadfe649f6112c018e | /Image Processing/Blur_edge_dect/canny.py | f54f6fdc593d0b3fee993ed6983044706952ded8 | [] | no_license | amrit-das/background_sub | 501dd5ceee50b3050d90eea42c66ed0bd4ff3718 | 91f64105c2d82464a8fc6a0fb8487ba5572b1c34 | refs/heads/master | 2020-03-26T20:31:58.826183 | 2018-08-20T00:43:28 | 2018-08-20T00:43:28 | 145,330,512 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | import cv2
import numpy as np
def CannyThreshold(lowThreshold):
detected_edges = cv2.GaussianBlur(gray,(3,3),0)
detected_edges = cv2.Canny(detected_edges,lowThreshold,lowThreshold*ratio,apertureSize = kernel_size)
dst = cv2.bitwise_and(img,img,mask = detected_edges) # just add some colours to edges from original image.
cv2.imshow('canny demo',dst)
lowThreshold = 0
max_lowThreshold = 100
ratio = 3
kernel_size = 3
img = cv2.imread('bike.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cv2.namedWindow('canny demo')
cv2.createTrackbar('Min threshold','canny demo',lowThreshold, max_lowThreshold, CannyThreshold)
CannyThreshold(0) # initialization
cv2.destroyAllWindows()
| [
"gchourasia123@gmail.com"
] | gchourasia123@gmail.com |
c81bb0a4b958572fe3a26f3797fc1b507e25645b | e1229295d9cb780b4948f8ea34fc70cce9d76946 | /src/ltree/maxDepthOfBinaryTree.py | 3852af126e880be4659438a042d56f118f317628 | [] | no_license | tuobulatuo/Leetcode | ce89ab89dda527ccbe6058b7068bfd00b43f7b41 | 66acce87c64ce05e063be84578e50a23417334a7 | refs/heads/master | 2022-02-25T00:56:26.749416 | 2016-01-19T00:33:05 | 2016-01-19T00:33:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | __author__ = 'hanxuan'
"""
Given a binary tree, find its maximum depth.
The maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.
"""
from ltree.TreeNode import TreeNode
def maxDepth(root):
"""
:param root: TreeNode
:return: int
"""
if root is None:
return 0
return 1 + max(maxDepth(root.left), maxDepth(root.right))
| [
"heroxdream@gmail.com"
] | heroxdream@gmail.com |
12744794d12058e70ddaa03647ebcda94b7ec889 | 9916a3955cd4e35b46b263d5442a88800d53da4c | /day08/Maoyan/Maoyan/spiders/maoyan2.py | 3d9ad0f504ca82fd22d505600cf49e6cfdbbcf78 | [] | no_license | hanon1mous/spider | e6fe85450a3cd3ba053aea4aa338cdd6a2f007f6 | 0f35d32dde6f62030c8e24b7ed3a501c5eb933ed | refs/heads/master | 2020-12-07T17:57:06.350230 | 2020-01-09T09:12:09 | 2020-01-09T09:12:09 | 232,765,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | # -*- coding: utf-8 -*-
import scrapy
from ..items import MaoyanItem
class MaoyanSpider(scrapy.Spider):
# scrapy crawl 'name'
name = 'maoyan2'
allowed_domains = ['maoyan.com']
url = 'https://maoyan.com/board/4?offset={}'
def start_requests(self):
for page in range(0, 91, 10):
yield scrapy.Request(
url=self.url.format(page),
callback=self.parse
)
def parse(self, response):
dd_list = response.xpath('//dl[@class="board-wrapper"]/dd')
for film in dd_list:
item = MaoyanItem()
item['name'] = film.xpath('./a/@title').get().strip()
item['stars'] = film.xpath('.//p[@class="star"]/text()').get().strip()
item['time'] = film.xpath('.//p[@class="releasetime"]/text()').get().strip()
yield item
| [
"linkai1115746371@qq.com"
] | linkai1115746371@qq.com |
847aa3dc08b143bad37a16ea82ddcb381e5a7045 | 466c8da73f01f6e598de4f6150a4f53bdeff39c2 | /tree/binary_tree/sum_root_to_leaf129.py | d74fa877f8c36e373a24c80100ce855ba47c160e | [] | no_license | MrinaliniTh/Algorithms | 08db96e230ccc397275504ea07e4f88db5cb027a | 17ea3bf49dcf6f43fdb59b6c56bf0384ebf05145 | refs/heads/master | 2023-06-05T14:39:54.068466 | 2021-06-22T15:34:27 | 2021-06-22T15:34:27 | 293,578,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | class Node:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class BinaryTree:
def __init__(self):
self.tree = None
def print_binary_tree(self):
if self.tree:
self.get(self.tree)
def get(self, node):
if node:
self.get(node.left)
print(str(node.val) + ' ')
self.get(node.right)
def find_sum_root_to_leaf(self):
res = []
self.find_sum(self.tree, res, 0)
return sum(res)
def find_sum(self, node, res, cur):
if not node:
return 0
cur = cur * 10 + node.val
if not node.left and not node.right:
res.append(cur)
return
self.find_sum(node.left, res, cur)
self.find_sum(node.right, res, cur)
node = Node(1)
node.left = Node(2)
node.right = Node(3)
node.left.left = Node(4)
node.left.right = Node(5)
node.right.left = Node(6)
node.right.right = Node(7)
bt = BinaryTree()
bt.tree = node
bt.print_binary_tree()
print(bt.find_sum_root_to_leaf()) | [
"thokchom.m.devi@boeing.com"
] | thokchom.m.devi@boeing.com |
9df581571de2cd5c92f2fe670a11671d8e980a12 | 4fab62bad8797eb48bc44d665dc3f48ec49e876b | /src/androguard/build/scripts-2.7/androgui.py | ce8863cc5347c37d685cbdddb420b2e804bcb6fe | [
"Apache-2.0"
] | permissive | lounchat02/analisis | ae2f5d7911278ae157686a348713d99c9dbc9338 | aaa666aca3bd6d4bd5e48623e432342dcf851d0d | refs/heads/master | 2021-01-10T07:37:52.574940 | 2016-01-05T13:12:08 | 2016-01-05T13:12:08 | 48,122,856 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,859 | py | #!/home/jburgos/Documentos/Malware/entorno/analisis/bin/python
'''Androguard Gui'''
import argparse
import sys
from androguard.core import androconf
from androguard.session import Session
from androguard.gui.mainwindow import MainWindow
from androguard.misc import init_print_colors
from PySide import QtCore, QtGui
from threading import Thread
class IpythonConsole(Thread):
def __init__(self):
Thread.__init__(self)
def run(self):
from IPython.terminal.embed import InteractiveShellEmbed
from traitlets.config import Config
cfg = Config()
ipshell = InteractiveShellEmbed(
config=cfg,
banner1="Androguard version %s" % androconf.ANDROGUARD_VERSION)
init_print_colors()
ipshell()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Androguard GUI")
parser.add_argument("-d", "--debug", action="store_true", default=False)
parser.add_argument("-i", "--input_file", default=None)
parser.add_argument("-c", "--console", action="store_true", default=False)
args = parser.parse_args()
if args.debug:
androconf.set_debug()
# We need that to save huge sessions when leaving and avoid
# RuntimeError: maximum recursion depth exceeded while pickling an object
# or
# RuntimeError: maximum recursion depth exceeded in cmp
# http://stackoverflow.com/questions/2134706/hitting-maximum-recursion-depth-using-pythons-pickle-cpickle
sys.setrecursionlimit(50000)
session = Session(export_ipython=args.console)
console = None
if args.console:
console = IpythonConsole()
console.start()
app = QtGui.QApplication(sys.argv)
window = MainWindow(session=session, input_file=args.input_file)
window.resize(1024, 768)
window.show()
sys.exit(app.exec_())
| [
"Temporal11!"
] | Temporal11! |
faa7c11e3e13102d54f99d965ff44b03f6eb92fc | 627456483e8c743351d68229cdc47a9d9d459cc8 | /inner_class.py | c0ee7df1bd22df0716dd100448988f13227b56f7 | [] | no_license | sunnydavidli/week3 | 78a036790feb501d7fa74cce86859373b6822415 | 49d10d12687e86a3c0c132e2e8bba8baf4b070b3 | refs/heads/master | 2022-09-12T12:27:53.445891 | 2020-05-13T06:59:38 | 2020-05-13T06:59:38 | 263,546,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | # define a function firstly
def format_string(string, formatter=None):
# inside the function, we define a class
class DefaultFormatter:
def format(self, string):
return str(string).title()
if not formatter:
formatter = DefaultFormatter()
return formatter.format(string)
hello_string = "Hello, everyone, nice to meet you!"
print("input: " + hello_string)
print("output: " + format_string(hello_string)) | [
"sunnydavidli@gmail.com"
] | sunnydavidli@gmail.com |
6c5abd4b495148c26595fd3b4245f48693bad9ce | 0ede61f8004258e5b3ca59d3ba75e0cd5888362b | /exp_proc.py | 18e66091f609fbd71d9a0673dac76f5bfbcec053 | [] | no_license | Boris1961/lesson1 | c499d350cadf4011a8e8601287c2587c736a6d84 | 8cc3b73bcf57fcd14c76cab206f7898fd0c59c59 | refs/heads/master | 2020-06-12T23:26:25.809009 | 2017-04-29T10:51:03 | 2017-04-29T10:51:03 | 75,479,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,664 | py | def compile_str_to_postfix(esource):
# синтаксический анализ и разбиение на выражения на литералы
pattern_scheme=""
pattern_detail=""
words=[]
lex_incomp_sch = [ "D.", "D(", "DD", "()", "%%", "%)" ]
lex_incomp_det = [ "(*", "(^", "(/" ]
if esource.find('=')>=0:
return('#error: sintax error')
s=esource+'='
brace_deep=0
while s[0]!='=':
if '.0123456789'.find(s[0])>0: # обработка цифры
pattern_scheme+='D'
i=1
while '.0123456789'.find(s[i])>=0:
i+=1
c_word=s[0:i]
if c_word.count('.')==0:
pattern_detail+=('I')
elif c_word.count('.')==1:
pattern_detail+=('R')
else:
return "#error: namber is wrong"
elif s[0]=='(':
pattern_scheme+='('
pattern_detail+='('
c_word='('
brace_deep+=1
elif s[0]==')':
pattern_scheme+=')'
pattern_detail+=')'
brace_deep-=1
if brace_deep<0:
return "#error: braces no match"
c_word=')'
elif s[0]=='*':
if s[1]=='*':
pattern_scheme+='%'
pattern_detail+='^'
c_word='**'
else:
pattern_scheme+='%'
pattern_detail+='*'
c_word='*'
elif s[0]=='/':
pattern_scheme+='%'
pattern_detail+='/'
c_word='/'
elif s[0]=='+':
if len(words)==0 or words[-1]=='(':
pattern_scheme+='D'
pattern_detail+='I'
words.append('0')
pattern_scheme+='%'
pattern_detail+='+'
c_word='+'
elif s[0]=='-':
if len(words)==0 or words[-1]=='(':
pattern_scheme+='D'
pattern_detail+='I'
words.append('0')
pattern_scheme+='%'
pattern_detail+='-'
c_word='-'
s=s[len(c_word):]
words.append(c_word)
if len(pattern_scheme)>1 and (lex_incomp_sch.count(pattern_scheme[-2]+pattern_scheme[-1])>0 or lex_incomp_det.count(pattern_detail[-2]+pattern_detail[-1])>0):
return "#error: lexical shit"
if brace_deep!=0:
return "#error: braces no match"
# компиляция списка литералов в постфиксную запись
operators_priority = {')': 0, '+': 1, '-': 1, '*': 2, '/': 2, '^': 3, '(': 4}
stack=[]
postfix=[]
for i in range(0,len(words)):
c_word_det=pattern_detail[i]
c_word_sch=pattern_scheme[i]
if c_word_det=='I':
postfix.append(int(words[i]))
elif c_word_det=='R':
postfix.append(float(words[i]))
elif c_word_det=='(' :
stack.append(c_word_det)
elif c_word_det==')' :
while stack[-1]!='(' :
postfix.append(stack.pop())
stack.pop()
elif c_word_sch=='%':
if len(stack)==0 or stack[-1]=='(' or operators_priority[c_word_det]>operators_priority[stack[-1]]:
stack.append(c_word_det)
else:
while len(stack)>0 and stack[-1]!='(' :
postfix.append(stack.pop())
stack.append(c_word_det)
while len(stack)>0:
postfix.append(stack.pop(-1))
return postfix
def execute_postfix(postfix):
stack=[]
for c_item in postfix :
if not c_item in ['+', '-', '/', '*', '^'] :
stack.append(c_item)
else:
opn2=stack.pop()
opn1=stack.pop()
if c_item=='+' :
stack.append(opn1+opn2)
elif c_item=='-' :
stack.append(opn1-opn2)
elif c_item=='*' :
stack.append(opn1*opn2)
elif c_item=='/' :
if opn2==0:
return "#error: null division"
stack.append(opn1/opn2)
elif c_item=='^' :
stack.append(opn1**opn2)
else:
return "#error: unknown"
return stack.pop()
if __name__ == '__main__':
c_exp=input("Введи выражение:")
postfix=compile_str_to_postfix(c_exp)
print("Постфиксная запись", postfix)
result=execute_postfix(postfix)
print(c_exp, '=', result)
| [
"borlov@inbox.ru"
] | borlov@inbox.ru |
1727d04b8a7d1014b6e1d7a1ae539f023ea9f601 | 1713334f9b68255f9adab70175c21f399d0460f3 | /python/125_Valid_Palindrome.py | 4d198f026b9d9fad4550fee87f5e98972fb8c355 | [
"MIT"
] | permissive | coy0725/leetcode | 0a798b7adafe80f726e51c06c34835c4aa51b563 | 743a0bfa22402ec39858dc9c4c7dc531f825b953 | refs/heads/master | 2020-05-21T18:25:09.683714 | 2019-05-11T13:00:40 | 2019-05-11T13:00:40 | 186,132,894 | 2 | 0 | MIT | 2019-05-11T12:55:22 | 2019-05-11T12:55:21 | null | UTF-8 | Python | false | false | 395 | py | class Solution(object):
def isPalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
alnum_s = [t.lower() for t in s if t.isalnum()]
ls = len(alnum_s)
if ls <= 1:
return True
mid = ls / 2
for i in range(mid):
if alnum_s[i] != alnum_s[ls - 1 - i]:
return False
return True | [
"qiyuangong@gmail.com"
] | qiyuangong@gmail.com |
627d2531befb8e58975114c5db3f2f387fb441f8 | 51e364c41e68c37fbacc326020a8c0cce8241222 | /牛客37-左旋转字符串/1.py | 24c2548239be1e5b3652aa5b48c534eda321945c | [] | no_license | WCC-wcc/-offer | e5fcbbd87296a129dab21c11ba9a3e4bf9742925 | ca484a42ac938b8049c6cc69fec41ec1543c6728 | refs/heads/master | 2022-05-22T05:37:59.739991 | 2020-04-29T11:52:02 | 2020-04-29T11:52:02 | 259,903,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | # -*- coding:utf-8 -*-
class Solution:
def LeftRotateString(self, s, n):
# write code here
if n > len(s):
return ""
temp = list(s)
res = temp[n:len(s)] + temp[0:n]
return "".join(res)
# -*- coding:utf-8 -*-
class Solution:
def LeftRotateString(self, s, n):
# write code here
length = len(s)
if n > length:
return ""
s += s
return s[n:n + length] | [
"noreply@github.com"
] | WCC-wcc.noreply@github.com |
4a81e015e407dc2a0034e0e9c14849afc44a0119 | e5b18ed76eba5c4b8c25b950094e6412ab11354e | /strains.py | a5d82381066daa2d8a8a3c9433b1f56b9842d860 | [] | no_license | riddhisera/microbio | c7b0034768bce735daea1649dd4c7a63844f6b5d | 720c11226a56d39e44976fdecba8b8e454719734 | refs/heads/master | 2023-07-08T14:15:21.780742 | 2021-08-09T13:41:54 | 2021-08-09T13:41:54 | 385,215,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 887 | py | #to get list of unique strains from the csv file
from Bio import Entrez
import csv
Entrez.email = "sera.1@iitj.ac.in"
no = 1
strains = []
filename = "indianKlebStrains58.txt"
import csv
with open('indCompletegenome_60.csv', 'r') as file: #input file name - refer for format, check line 22
reader = csv.reader(file)
itercars = iter(reader)
next(itercars)
for row in itercars:
if row != []:
new_strain = row[2].split(' ')[row[2].split(' ').index("strain")+ 1]
write_file = open("all.txt", "a")
write_file.write(new_strain)
write_file.write("\n")
if new_strain in strains:
None
else:
write_file = open(filename, "a")
write_file.write(new_strain)
write_file.write("\n")
strains.append(new_strain)
| [
"sera.1@iitj.ac.in"
] | sera.1@iitj.ac.in |
4d579d6f89566148e7eb2b5c8c481c5ed1cee3df | 1bb7612234df0e77a479a35a7cee5b288652b45c | /i_entity_query.py | 56dbf96801ccd849d2fcc4208a49f03cc2d089ab | [] | no_license | Jiaozl/mkgDjangoProject | 7e17a663cfc1e388634dfcfffd398bbdcd1c8a6b | 5562f9b300aa0ef71f33db4bd64c1011e470d901 | refs/heads/master | 2020-07-19T23:54:31.520732 | 2019-05-29T12:06:21 | 2019-05-29T12:06:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,400 | py | # from mkg_demo.api.baseAPI import BaseApi
import py2neo as neo
# BaseApi
class EntityQueryApi():
def __init__(self, name=None):
self.name = name
self.result = {}
def push(self, **kwargs):
# send data to Invoker
# out: (1) 如果数据库没有此实体,请返回 {}
# (2) 如果有数据,返回格式:
#self.result = {
# 'entity': [ {
# name: '名称',
# des: '详细简略信息,<字数控制在60字左右>',
# symbolSize: 60, # 查询实体大小为60, 其余取30
# itemStyle: {
# normal: {
# color: 'red' # 环状颜色填充:相同类型相同颜色,不同类型不同颜色['#f5a5c0', 'yellow', '#f4c89e', '#c23531', '#f2acf6', '#acf3f6', 'acf6c9']
# }
# }
# },...],
# 'relation':
# [{
# source: '蔡成功', # entity1
# target: '欧阳菁', # entity2
# name: "举报" # 关系名
# }...]
# }
self.result={'entity':'李四'}
entity = kwargs.get('entity')
print(entity)
grath = neo.Graph('http://localhost:7474',username='neo4j',password='123456')
return self.result
test = EntityQueryApi(name='张三')
a = test.push()
print(a)
| [
"zhaoguanzhi1992@163.com"
] | zhaoguanzhi1992@163.com |
09176ff5e75a411a9865b882ed9175030b99d1ce | 5ed2a472f097eda40ffbcf3bc99407b8cfb8985b | /PiAlgo/reverse_tan_John_Machin.py | 242223651eadbfb70b6acb0dcbb533406aaea9c0 | [
"MIT"
] | permissive | Page-David/PiPy | 7572c5ce1a3f7f28592e5479be9e66f32f1844eb | d857c61b54909b9a18b354a66fa3093c0905cd4a | refs/heads/master | 2021-06-14T09:23:21.967417 | 2017-02-04T01:28:51 | 2017-02-04T01:28:51 | 79,871,528 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | #!/usr/bin/env python
from mpmath import *
def pi(digit):
mp.dps = digit
# pi = 4*(12*atan(1/mpf('49')) + 32*atan(1/mpf('57')) - 5*atan(1/mpf('239')) + 12*atan(1/mpf('110443')))
pi = 16 * atan(1/mpf('5')) - 4 * atan(1/mpf('239'))
return str(pi)
| [
"david991010@gmail.com"
] | david991010@gmail.com |
eaa80930195f2d2f5b35a7ac5f752714269efcf5 | 08a693a2e00e6cf88d76b0ba75254c484e8894a4 | /Research/Drawing.py | d4468d518ccbc4fcb266c7d9f1ce87b4e5baeb89 | [] | no_license | ethansudman/Research | 2a68c881130e80448e48e6362dd3838328d80fd9 | 35fd747a9a4cbf02bd2bde246af3b16b257e4015 | refs/heads/main | 2023-06-05T19:17:52.289126 | 2021-07-03T05:07:16 | 2021-07-03T05:07:16 | 328,842,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 6 20:19:36 2021
@author: Ethan_000
"""
import networkx as nx
import numpy as np
import pandas as pd
import glob
import re
import networkx.drawing as dw
import networkx.drawing.layout as lt
import matplotlib.pyplot as plt
import matplotlib
| [
"ethansudman@comcast.net"
] | ethansudman@comcast.net |
7626c1ff2ee5615e69bd18190213dcfcfc7119ef | 11f0cfb1e9c7a66e2a1b4634875cf3f48afa85ae | /lib/fmm.py | 07ad938d4bfbef07c5de9ee02fbda876359b838c | [
"MIT"
] | permissive | schipp/eikonal_tomography | 68bbad91d01890f9d4f0cccc924382cc8efbca73 | a750803ceb492ec6e00583f41e6b8b2d479d5bca | refs/heads/main | 2023-04-11T03:00:10.362692 | 2021-05-09T10:38:09 | 2021-05-09T10:38:09 | 365,721,985 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | import skfmm
import numpy as np
def get_traveltimes_for_source_station(source_station, stations, velocity_model):
phi = np.ones((100, 100))
phi[source_station[0], source_station[1]] = -1
traveltimes = skfmm.travel_time(phi, speed=velocity_model)
return traveltimes | [
"sven@Svens-Mac-mini.fritz.box"
] | sven@Svens-Mac-mini.fritz.box |
6669bf6e7e0a28992f2b3512b83b6c2701acd86c | 3252f4fc60d3b5a59a2f3b37e2fce1ea792ef694 | /setup.py | 5a63878c1afa27951d668111d5a614f6eca464a0 | [] | no_license | dustin999/pomodoro-indicator | c861098888e7dd07fac14bdf816f52214fde3bad | be83d05d4296a8eda685a892fad0e01e69ae1af3 | refs/heads/master | 2021-01-22T04:34:00.367896 | 2012-09-24T02:26:14 | 2012-09-24T02:26:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,027 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
#
# Copyright 2011 malev.com.ar
#
# Author: Marcos Vanetta <marcosvanetta@gmail.com>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of either or both of the following licenses:
#
# 1) the GNU Lesser General Public License version 3, as published by the
# Free Software Foundation; and/or
# 2) the GNU Lesser General Public License version 2.1, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the applicable version of the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of both the GNU Lesser General Public
# License version 3 and version 2.1 along with this program. If not, see
# <http://www.gnu.org/licenses/>
#
"""Build tar.gz for pomodoro-indicator.
Needed packages to run (using Debian/Ubuntu package names):
python-appindicator 0.3.0-0ubuntu1
python-gobject 2.28.3-1ubuntu1.1
python-notify 0.1.1-2build4
python-gtk2-dev 0.1.1-2build4
"""
import os, sys
from distutils.command.install import install
from distutils.command.build import build
from distutils.core import setup
PROJECTNAME='pomodoro-indicator'
class CustomInstall(install):
"""Custom installation class on package files.
It copies all the files into the "PREFIX/share/PROJECTNAME" dir.
"""
def run(self):
"""Run parent install, and then save the install dir in the script."""
install.run(self)
for script in self.distribution.scripts:
script_path = os.path.join(self.install_scripts,
os.path.basename(script))
with open(script_path, 'rb') as fh:
content = fh.read()
content = content.replace('@ INSTALLED_BASE_DIR @',
self._custom_data_dir)
with open(script_path, 'wb') as fh:
fh.write(content)
def finalize_options(self):
"""Alter the installation path."""
install.finalize_options(self)
# the data path is under 'prefix'
data_dir = os.path.join(self.prefix, "share",
self.distribution.get_name())
# if we have 'root', put the building path also under it (used normally
# by pbuilder)
if self.root is None:
build_dir = data_dir
else:
build_dir = os.path.join(self.root, data_dir[1:])
# change the lib install directory so all package files go inside here
self.install_lib = build_dir
# save this custom data dir to later change the scripts
self._custom_data_dir = data_dir
def main():
SHARE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"share")
data_files = []
# don't trash the users system icons!!
black_list = ['index.theme', 'index.theme~']
for path, dirs, files in os.walk(SHARE_PATH):
data_files.append(tuple((path.replace(SHARE_PATH,"share", 1),
[os.path.join(path, file) for file in files if file not in
black_list])))
setup(
name = PROJECTNAME,
version = '0.0.2',
license = 'GPL-3',
author = 'The Pomodoro Developers',
author_email = 'marcosvanetta@gmail.com',
description = 'Pomodoro technique app indicator.',
long_description = 'Pomodoro technique app indicator',
url = 'https://launchpad.net/pomodoro-indicator',
packages = ["pomodoro"],
#package_data = {"pomodoro": ["images/*.png", ]},
data_files = data_files,
scripts = [os.path.join("bin","pomodoro-indicator")],
cmdclass = {
'install': CustomInstall
}
)
if __name__ == '__main__':
sys.exit(main())
| [
"dustin@Dustin-xps-15z.(none)"
] | dustin@Dustin-xps-15z.(none) |
3eaaa2cc8db214f0baca34cb57f8743fb0439aba | 983b9660961f6e5066f075a594c635d8bb51f93c | /stellar_ages.py | 6d98f331de75d846ee1b61b356feb403f608102a | [] | no_license | jhcohn/prospector-eelgs | 4205bdfca06e7b1cb1ceb5ba3faf03c3ad1c9c37 | ad629e1623ea889cb8e228bbcc2f7e8d89516fe4 | refs/heads/master | 2021-01-19T23:33:59.433194 | 2018-10-12T22:52:49 | 2018-10-12T22:52:49 | 88,999,832 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,996 | py | import numpy as np
import os
import pickle
import random
import matplotlib.pyplot as plt
def randraw_sfr_perc(infile, num=1000): # num=1000
"""
For a given galaxy, randraw samples the posterior num times for each point in extra_output['extras']['sfh'][i]
:param infile: ID_field_base_extra_out.py, where extra_output is stored using output.py
:param num: number of times to sample the galaxy posterior at each point in the SFH
:return: draw_from_sfh = 22 x num, lists the num random posterior samples, t_sfh = time vector associated with SFH
"""
with open(infile, 'rb') as exout:
extra_output = pickle.load(exout)
draw_from_sfh = np.zeros(shape=(len(extra_output['extras']['sfh']), num)) # shape=(22, num)
# print(len(extra_output['extras']['ssfr']), len(extra_output['extras']['ssfr'][0])) # 22, 2000
# print(len(draw_from_sfh), len(draw_from_sfh[0])) # 22, num
for i in range(len(extra_output['extras']['sfh'])): # at each of these 22 points
for j in range(num): # randomly draw from the ssfr posterior num times
draw_from_sfh[i][j] = extra_output['extras']['sfh'][i][random.randint(0, num)]
median_sfh = []
for i in range(len(draw_from_sfh)): # 22
median_sfh.append(np.percentile(draw_from_sfh[i], [16., 50., 84.])[1]) # median value of draw_from_sfh[i]
return median_sfh, extra_output['extras']['t_sfh']
def stacker(gal_draws):
"""
stacker takes input of random points drawn from a list of galaxies' SFH posteriors, concatenates them within each
bin, and then calculates the median and 1 sigma errors in each bin
gal_draws should be in format draws = [draw_from_sfh1, draw_from_sfh2, ...]
each draw_from_sfh has shape=(22,num)
:param gal_draws: list comprised of draw_from_sfh (i.e. comprised of the output from randraw) for a list of galaxies
:return: perc = stored lists of the median and +/- sigma SFH values calculated from the gal_draws
"""
# len(gal_draws) = number of galaxies in stack; len(gal_draws[0]) = 22, len(gal_draws[0][0]) = num (1000)
all_draws = np.zeros(shape=(len(gal_draws[0]), len(gal_draws[0][0]) * len(gal_draws)))
for k in range(len(gal_draws)):
# append the num=1000 values in each gal_draws[k] at each of the 22 points to all_draws:
note = k * len(gal_draws[0][0])
for i in range(len(gal_draws[k])):
for j in range(len(gal_draws[k][i])):
all_draws[i][note+j] += gal_draws[k][i][j]
print(len(all_draws), len(all_draws[0])) # 22, (number of galaxies in stack) * (num=1000)
perc = np.zeros(shape=(len(gal_draws[0]), 3)) # len(gal_draws[0])=22=len(t); len(perc)=22, len(perc[0])=3
for jj in xrange(len(gal_draws[0])):
perc[jj, :] = np.percentile(all_draws[jj, :], [16.0, 50.0, 84.0]) # median, +/- 34% = +/- 1sigma
return perc
def smooth(perc):
"""
Takes the stacked sfh that is output from stacker and averages the sfr values in each bin, such that the sfr within
each bin is flat, as is the case in the original extra_output['extras']['sfh'] output
:param perc: stored lists of the median and +/1 1sigma SFH values calculated from the gal_draws
:return: smoother = same shape as perc, but with all points within a bin averaged s.t. all points within a bin=flat
"""
# from perc: bin1 0:2, bin2 3:6, bin3 7:10, bin4 11:14, bin5 15:18, bin6 19:22
smoother = np.zeros(shape=(len(perc), 1)) # len(perc[0]))) # shape=(22, 3)
yax = perc # perc[:, j]
for i in range(3):
smoother[i] = (yax[0] + yax[1] + yax[2]) / 3
smoother[i+19] = (yax[-1] + yax[-2] + yax[-3]) / 3
for i in range(4):
smoother[i+3] = (yax[3] + yax[4] + yax[5] + yax[6]) / 4
smoother[i+7] = (yax[7] + yax[8] + yax[9] + yax[10]) / 4
smoother[i+11] = (yax[11] + yax[12] + yax[13] + yax[14]) / 4
smoother[i+15] = (yax[15] + yax[16] + yax[17] + yax[18]) / 4
# print(smoother)
return smoother
def stellar_age(sfr, agebins):
# let's do it
# from sfh: bin1 0:2, bin2 3:6, bin3 7:10, bin4 11:14, bin5 15:18, bin6 19:22
sfr_per_bin = [sfr[1], sfr[4], sfr[8], sfr[13], sfr[17], sfr[21]]
time_per_bin = np.diff(10**agebins, axis=-1)[:, 0]
avg_bin_age = [] # average age in each bin
for i in range(len(time_per_bin)):
j = 0
time = 0
while j < i:
time += time_per_bin[j]
j += 1
avg_bin_age.append(time + time_per_bin[i]/2)
# SUM((SFR in each bin) * (time in each bin) * (average age of bin)) / SUM(SFR in each bin * time in each bin)
num = []
denom = []
for i in range(len(time_per_bin)):
num.append(sfr_per_bin[i] * time_per_bin[i] * avg_bin_age[i])
denom.append(sfr_per_bin[i] * time_per_bin[i])
numerator = np.sum(num)
denominator = np.sum(denom)
age = numerator / denominator
return age
def get_gal_lists(base, objlists=False, normal=True):
'''
Choose param file, then load lists of relevant EELGs and SFGs that were run with that param file
:param base: base name of param file for the run
:return: list of EELGs run with relevant param file, list of SFGs run with relevant pr file
'''
'''
eelg_list = open('eelg_specz_ids', 'r')
eelgs = []
e_objs = []
e_fields = []
for line in eelg_list:
if line[0] == '#':
pass
else:
cols = line.split()
e_objs.append(cols[1])
e_fields.append(cols[0])
eelgs.append(cols[1] + '_' + cols[0] + '_' + base[0]) # base[0] = fixedmet (or otherbins)
eelg_list.close()
'''
eelg_list = open('Comp_10.dat', 'r')
eelgs = []
e_objs = []
e_fields = []
for line in eelg_list:
if line[0] == '#':
pass
else:
cols = line.split()
if int(cols[0]) - 200000 > 0:
eelgs.append(str(int(cols[0]) - 200000) + '_uds_' + base[0]) # base[1] = noelg (or nother)
e_objs.append(int(cols[0]) - 200000)
e_fields.append('uds')
elif int(cols[0]) - 100000 > 0:
eelgs.append(str(int(cols[0]) - 100000) + '_cosmos_' + base[0]) # base[1] = noelg (or nother)
e_objs.append(int(cols[0]) - 100000)
e_fields.append('cosmos')
else:
eelgs.append(str(int(cols[0])) + '_cdfs_' + base[0]) # base[1] = noelg (or nother)
e_objs.append(int(cols[0]))
e_fields.append('cdfs')
eelg_list.close()
if normal:
lbg_list = open('lbg_ids1', 'r')
flist = {}
lbgs = []
l_objs = []
l_fields = []
for line in lbg_list:
if int(line) - 200000 > 0:
flist[str(int(line) - 200000)] = 'uds'
lbgs.append(str(int(line) - 200000) + '_uds_' + base[1]) # base[1] = noelg (or nother)
l_objs.append(int(line) - 200000)
l_fields.append('uds')
elif int(line) - 100000 > 0:
flist[str(int(line) - 100000)] = 'cosmos'
lbgs.append(str(int(line) - 100000) + '_cosmos_' + base[1])
l_objs.append(int(line) - 100000)
l_fields.append('cosmos')
else:
flist[str(int(line))] = 'cdfs'
lbgs.append(str(int(line)) + '_cdfs_' + base[1])
l_objs.append(int(line))
l_fields.append('cdfs')
lbg_list.close()
else:
lbg_list = open('Comp_14_zm_EL_Z004.awk.dat', 'r')
flist = {}
lbgs = []
l_objs = []
l_fields = []
for line in lbg_list:
if line[0] != '#':
cols = line.split()
if int(cols[0]) - 200000 > 0:
flist[str(int(cols[0]) - 200000)] = 'uds'
lbgs.append(str(int(cols[0]) - 200000) + '_uds_' + base[1]) # base[1] = noelg (or nother)
l_objs.append(int(cols[0]) - 200000)
l_fields.append('uds')
elif int(cols[0]) - 100000 > 0:
flist[str(int(cols[0]) - 100000)] = 'cosmos'
lbgs.append(str(int(cols[0]) - 100000) + '_cosmos_' + base[1])
l_objs.append(int(cols[0]) - 100000)
l_fields.append('cosmos')
else:
flist[str(int(cols[0]))] = 'cdfs'
lbgs.append(str(int(cols[0])) + '_cdfs_' + base[1])
l_objs.append(int(cols[0]))
l_fields.append('cdfs')
lbg_list.close()
if objlists:
return e_objs, e_fields, l_objs, l_fields
else:
return eelgs, lbgs
if __name__ == "__main__":
fico = 1
corr = 0
vary = 0
fifty = 0
fix = 0
newu = 0
short = 0
if fico:
folders = ['pkl_efico/', 'pkl_ncorr/']
base = ['fico', 'corr']
import eelg_fifty_params as param
import eelg_varymet_params as nparam
elif corr:
folders = ['pkl_ecorr/', 'pkl_ncorr/']
base = ['corr', 'corr']
import eelg_varymet_params as param
elif vary:
folders = ['pkl_evar/', 'pkl_nvar/']
base = ['vary', 'vary']
import eelg_varymet_params as param
elif fix:
folders = ['pkl_efix/', 'pkl_nvary/']
base = ['fix', 'vary']
import eelg_fixedmet_params as param
elif newu:
folders = ['pkl_enewu/', 'pkl_nvary/']
base = ['newu', 'vary']
import eelg_newu_params as param
elif fifty:
folders = ['pkl_efifty/', 'pkl_nvary/']
base = ['fifty', 'vary']
import eelg_fifty_params as param
elif short:
folders = ['pkl_eshort/', 'pkl_nshort/']
base = ['short', 'short']
import eelg_short_params as param
else:
folders = ['pkls/', 'nmpkls/']
base = ['fixedmet', 'noelg']
import eelg_fixedmet_params_orig as param
pkls = '/home/jonathan/.conda/envs/snowflakes/lib/python2.7/site-packages/prospector/git/' + folders[0]
l_pkls = '/home/jonathan/.conda/envs/snowflakes/lib/python2.7/site-packages/prospector/git/' + folders[1]
eelgs, lbgs = get_gal_lists(base)
# START STACKING
t1 = []
e_draws = []
boots = []
num_e = 0
e_sample = 0
for glxy in eelgs:
e_sample += 1 # number of galaxies in sample
file = pkls + glxy + '_extra_out.pkl'
if os.path.exists(file):
num_e += 1 # number of galaxies for which we have output
temp = randraw_sfr_perc(file) # temp[0] lists the num=1000 random posterior samples; temp[1] = time vector
e_draws.append(smooth(temp[0]))
t1.append(temp[1])
else:
print(file) # print galaxy if pkls don't exist for it
print('enums', num_e, e_sample)
l_draws = []
num_l = 0
l_sample = 0
for glxy in lbgs:
l_sample += 1 # number of galaxies in sample
file = l_pkls + glxy + '_extra_out.pkl'
if os.path.exists(file):
num_l += 1 # number of galaxies for which we have output
temp = randraw_sfr_perc(file)
l_draws.append(smooth(temp[0]))
else:
print(file) # print galaxy if pkls don't exist for it
print('lnums', num_l, l_sample)
model = param.load_model(objname='21442', field='cdfs')
agebins = model.params['agebins']
if fico:
nmodel = nparam.load_model(objname='7817', field='cdfs')
nagebins = nmodel.params['agebins']
e_age = []
for i in range(len(e_draws)): # len(eelgs)
e_age.append(stellar_age(e_draws[i], agebins) / 1e9)
e_age_percs = np.percentile(e_age, [16., 50., 84.])
l_age = []
for i in range(len(l_draws)): # len(eelgs)
if fico:
l_age.append(stellar_age(l_draws[i], nagebins) / 1e9)
else:
l_age.append(stellar_age(l_draws[i], agebins) / 1e9)
l_age_percs = np.percentile(l_age, [16., 50., 84.])
fig = plt.figure()
ax1 = plt.subplot(1, 1, 1)
ax1.hist(e_age, bins=50, histtype="step", weights=[1./num_e]*len(e_age), normed=False, color='b', lw=2,
label='EELGs')
ax1.hist(l_age, bins=50, histtype="step", weights=[1./num_l]*len(l_age), normed=False, color='r', lw=2,
label='LBGs')
# plot median, +/-1sigma for both histograms
ax1.axvline(x=e_age_percs[1], color='b', linestyle='--', lw=2)
ax1.axvline(x=l_age_percs[1], color='r', linestyle='--', lw=2)
# shade in +/-1sigma region
ax1.axvspan(e_age_percs[0], e_age_percs[2], color='b', alpha=0.2)
ax1.axvspan(l_age_percs[0], l_age_percs[2], color='r', alpha=0.2)
# print(e_age_percs)
print(e_age_percs[1] - e_age_percs[0], e_age_percs[1], e_age_percs[2] - e_age_percs[1])
# print(l_age_percs)
print(l_age_percs[1] - l_age_percs[0], l_age_percs[1], l_age_percs[2] - l_age_percs[1])
ax1.set_ylim(0, 0.1)
# figure labels
fs = 20
ax1.legend(numpoints=1, loc='upper left', prop={'size': fs})
ax1.set_xlabel('Stellar ages [Gyr]', ha='center', fontsize=fs)
ax1.set_ylabel(r'Fraction of galaxies', fontsize=fs)
plt.show()
'''
# AGES (vary)
(0.18668044750262114, 0.8719976513813491, 0.16742254424042702)
(0.17024825284171152, 0.83524879157556775, 0.11934187725706735)
# AGES (vary) - C 14, then C_04, then C_10
(0.23099788101880292, 0.92203538712313915, 0.15938519673082208) # C_14
(0.15307990804784499, 0.85969159869973266, 0.16347328836410369) # C_04
(0.24644009000896339, 0.82414637217865405, 0.22501361212020365) # C_10
# AGES (short)
(0.09660974411331974, 0.44348389790922788, 0.055033720340872394)
(0.085011178245219421, 0.4455555097442836, 0.042534426024111394)
# AGES (fix)
(0.15090972221525728, 0.59494284124772501, 0.21548295765867065)
(0.16691239230645616, 0.82298808510664001, 0.15309535095412585)
'''
| [
"joncohn@tamu.edu"
] | joncohn@tamu.edu |
8f5e62b99442a5e86fccd3cb4ceafa38f1881d42 | d5cc357480a6fba171c72c4b78a67d8ca3f5c1a4 | /forms.py | b2a1260fe3d5fcadc39d00b4c623c870e3063de8 | [] | no_license | Yosombo/auth_exercise- | f0fa9b88e4dbfb54ac39d33979640dc44f9c7aac | 4cae0437157aaa7c819bb03db181947c8fba437b | refs/heads/master | 2023-07-08T12:03:43.635207 | 2021-08-11T07:51:18 | 2021-08-11T07:51:18 | 394,908,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField
from wtforms.validators import InputRequired
from wtforms_alchemy import model_form_factory
from models import User, db
BaseModelForm = model_form_factory(FlaskForm)
class UserForm(FlaskForm):
username = StringField("Username", validators=[InputRequired()])
password = PasswordField("Password", validators=[InputRequired()])
email = StringField("Email", validators=[InputRequired()])
first_name = StringField("First name", validators=[InputRequired()])
last_name = StringField("Last name", validators=[InputRequired()])
class LoginForm(FlaskForm):
username = StringField("Username", validators=[InputRequired()])
password = PasswordField("Password", validators=[InputRequired()])
# class ModelForm(BaseModelForm):
# @classmethod
# def get_session(self):
# return db.session
# class UserForm(ModelForm):
# class Meta:
# model = User
# include_primary_keys = True
| [
"yosomboo@gmail.com"
] | yosomboo@gmail.com |
8cc018e415c9e6bd636bc0dd00afe49c1c5b3b96 | d78ff11af64b9fb4a4a3f198592ab56ce03a50fd | /wowsitosite/wsgi.py | 20d8123bfe13f9c5807225de4e60ab203e11c12d | [] | no_license | f94f/wowsitosite | 31ae052572be336c7633f4cd6328c56fcc08dd99 | ff7ecb66bad3ecc97862b535de1ebede11d32bdd | refs/heads/main | 2023-04-14T01:22:03.149361 | 2021-04-22T13:19:32 | 2021-04-22T13:19:32 | 353,191,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for wowsitosite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'wowsitosite.settings')
application = get_wsgi_application()
| [
"f.mate10@uniandes.edu.co"
] | f.mate10@uniandes.edu.co |
0021e87bd28d03c367ac243ec599af910864d24b | 72997b492fdcd80c855efc06ba5ded3c3cbc8a81 | /Notices.py | 2c85fed1a0576546e653439972ad13b5e7c2f8b5 | [] | no_license | whatifigo/nsut-website | f9d86354d1c8415c577e842a41c24fe7e3446c15 | aab7e42a2cb8638f1930112323f6f320e76e0220 | refs/heads/master | 2022-11-29T23:05:36.072480 | 2020-08-02T00:19:29 | 2020-08-02T00:19:29 | 284,359,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | from . import db
class Notices(db.Model):
__tablename__ = 'notices'
notice_id = db.Column(db.Integer(), primary_key=True, autoincrement = True)
title = db.Column(db.String(100))
category = db.Column(db.Integer())
new = db.Column(db.Integer())
url = db.Column(db.String(500))
#support constructor with no arguments so that rows can be added from admin panel
def __init__(self, title="", category = 0, new = 0, url=""):
self.title = title
self.category = category
self.new = new
self.url = url
def __repr__(self):
return "<Notice(news_id='%s',title='%s')>" % (
self.notice_id, self.title)
def get_id(self):
return self.faculty_id | [
"noreply@github.com"
] | whatifigo.noreply@github.com |
1eba03259747d6475ffed567575d41bc12d3e286 | 22e050e4f14d01d4c364982181052641c16ce4aa | /Tests/Exame 2012-2013 1fase.py | 0b3b41db340171ddac4804f5811842b7a8ec4920 | [] | no_license | andrefmrocha/MNUM_18_19 | b04eaa3063300f8f7ae12fd96c580cbde46488b6 | 6fe11f260a60b5b8f6e969855457c0341a14b0cf | refs/heads/master | 2020-03-31T18:35:07.022461 | 2019-01-14T18:26:19 | 2019-01-14T18:26:19 | 152,464,244 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | import math
def math_ex_4(x):
return math.exp(1.5 * x)
def simpson(a, b, h, func):
n = int(abs(b-a)/h)
values = []
integral = 0
for i in range(1, n, 2):
values.append(func(a + i * h) * 4)
for i in range(2, n, 2):
values.append(func(a + i * h) * 2)
for i in values:
integral+=i
integral+= func(a) + func(b)
integral*=h/3
return integral
# print(simpson(1 , 1.5, 0.125/4, math_ex_4))
def math_ex_5(x):
return x - 3.7 + pow(math.cos(x + 1.2), 3)
def math_ex_dev_5(x):
return 1 - 3 * pow(math.cos(x + 1.2), 2)*math.sin(x + 1.2)
def newton_first_iter(x, func, func_dev, error):
return (x - func(x)/func_dev(x))
print(newton_first_iter(3.8, math_ex_5, math_ex_dev_5, 10**(-5))) | [
"andrefmrocha@live.com.pt"
] | andrefmrocha@live.com.pt |
d9e06504505b6a186387d2ff84264d0ecf0308fb | 83d657c787529f01a8ecc8a874421738a7eecec7 | /Paths/Harmonise Curve to Line.py | 753600a50daceb8ddc9121810ba918269ff339b9 | [
"Apache-2.0"
] | permissive | BurgAndOeden/Glyphs-Scripts | e31b5164b491dfe0cd2d57f6cf1422c4aadda104 | f0195d6b8f0a6c055e4e44d5ef41ba48bdd1e3a6 | refs/heads/master | 2020-09-16T08:01:06.345898 | 2019-11-24T00:15:44 | 2019-11-24T00:15:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,124 | py | #MenuTitle: Harmonise Curve to Line
# -*- coding: utf-8 -*-
__doc__="""
Maximises opposing handles and reduces adjacent handles of line segments.
"""
from Foundation import NSPoint
def intersectionWithNSPoints( pointA, pointB, pointC, pointD ):
"""
Returns an NSPoint of the intersection AB with CD.
Or False if there is no intersection
"""
try:
x1, y1 = pointA.x, pointA.y
x2, y2 = pointB.x, pointB.y
x3, y3 = pointC.x, pointC.y
x4, y4 = pointD.x, pointD.y
try:
slope12 = ( float(y2) - float(y1) ) / ( float(x2) - float(x1) )
except:
# division by zero if vertical
slope12 = None
try:
slope34 = ( float(y4) - float(y3) ) / ( float(x4) - float(x3) )
except:
# division by zero if vertical
slope34 = None
if slope12 == slope34:
# parallel, no intersection
return None
elif slope12 is None:
# first line is vertical
x = x1
y = slope34 * ( x - x3 ) + y3
elif slope34 is None:
# second line is vertical
x = x3
y = slope12 * ( x - x1 ) + y1
else:
# both lines have an angle
x = ( slope12 * x1 - y1 - slope34 * x3 + y3 ) / ( slope12 - slope34 )
y = slope12 * ( x - x1 ) + y1
intersectionPoint = NSPoint( x, y )
if bothPointsAreOnSameSideOfOrigin( intersectionPoint, pointB, pointA ) and bothPointsAreOnSameSideOfOrigin( intersectionPoint, pointC, pointD ):
if pointIsBetweenOtherPoints( intersectionPoint, pointB, pointA ) or pointIsBetweenOtherPoints( intersectionPoint, pointC, pointD ):
return None
return intersectionPoint
else:
return None
except Exception as e:
print str(e)
import traceback
print traceback.format_exc()
return None
def pointDistance( P1, P2 ):
"""Calculates the distance between P1 and P2."""
x1, y1 = P1.x, P1.y
x2, y2 = P2.x, P2.y
dist = ( ( float(x2) - float(x1) ) ** 2 + ( float(y2) - float(y1) ) **2 ) ** 0.5
return dist
def bezier( x1, y1, x2,y2, x3,y3, x4,y4, t ):
x = x1*(1-t)**3 + x2*3*t*(1-t)**2 + x3*3*t**2*(1-t) + x4*t**3
y = y1*(1-t)**3 + y2*3*t*(1-t)**2 + y3*3*t**2*(1-t) + y4*t**3
return x, y
def bothPointsAreOnSameSideOfOrigin( pointA, pointB, pointOrigin ):
returnValue = True
xDiff = (pointA.x-pointOrigin.x) * (pointB.x-pointOrigin.x)
yDiff = (pointA.y-pointOrigin.y) * (pointB.y-pointOrigin.y)
if xDiff <= 0.0 and yDiff <= 0.0:
returnValue = False
return returnValue
def pointIsBetweenOtherPoints( thisPoint, otherPointA, otherPointB) :
returnValue = False
xDiffAB = otherPointB.x - otherPointA.x
yDiffAB = otherPointB.y - otherPointA.y
xDiffAP = thisPoint.x - otherPointA.x
yDiffAP = thisPoint.y - otherPointA.y
xDiffFactor = divideAndTolerateZero( xDiffAP, xDiffAB )
yDiffFactor = divideAndTolerateZero( yDiffAP, yDiffAB )
if xDiffFactor:
if 0.0<=xDiffFactor<=1.0:
returnValue = True
if yDiffFactor:
if 0.0<=xDiffFactor<=1.0:
returnValue = True
return returnValue
def divideAndTolerateZero( dividend, divisor ):
if float(divisor) == 0.0:
return None
else:
return dividend/divisor
def handleLength(a,b,intersection):
return pointDistance(a,b)/pointDistance(a,intersection)
def moveHandle(a,b,intersection,bPercentage):
x = a.x + (intersection.x-a.x) * bPercentage
y = a.y + (intersection.y-a.y) * bPercentage
return NSPoint(x,y)
Font = Glyphs.font
if len(Font.selectedLayers) > 1:
selectionCounts = False
elif not Font.selectedLayers[0].selection:
selectionCounts = False
else:
selectionCounts = True
for selectedLayer in Font.selectedLayers:
selectedGlyph = selectedLayer.parent
selectedGlyph.beginUndo()
# put original state in background:
selectedLayer.contentToBackgroundCheckSelection_keepOldBackground_(False,False)
for path in selectedLayer.paths:
for n in path.nodes:
processedHandles = []
if (n.selected or not selectionCounts) and n.type == OFFCURVE:
# determine the segment:
if n.prevNode.type == OFFCURVE:
a = n.prevNode.prevNode
b = n.prevNode
c = n
d = n.nextNode
else:
a = n.prevNode
b = n
c = n.nextNode
d = n.nextNode.nextNode
if not a in processedHandles and not b in processedHandles:
# intersection of the magic triangle:
intersection = intersectionWithNSPoints( a.position, b.position, c.position, d.position )
if intersection:
# calculate percentages:
bLength = handleLength(a,b,intersection)
cLength = handleLength(d,c,intersection)
shortLength = (abs(bLength) + abs(cLength) - 1.0) - (1.0-abs(bLength))*(1.0-abs(cLength))
if d.nextNode.type == LINE and a.prevNode.type != LINE and d.connection == GSSMOOTH:
# max handle:
b.position = intersection
# reduced handle:
c.position = moveHandle(d,c,intersection,shortLength)
elif a.prevNode.type == LINE and d.nextNode.type != LINE and a.connection == GSSMOOTH:
# max handle:
c.position = intersection
# reduced handle:
b.position = moveHandle(a,b,intersection,shortLength)
# mark handles as processed:
processedHandles.append(a)
processedHandles.append(b)
selectedGlyph.endUndo()
| [
"res@glyphsapp.com"
] | res@glyphsapp.com |
ca6614d1525ec0e63d5e206a1c3a583f1a55a64d | 8ff0d563a294ccebca4f6310a72474bfe01a6733 | /user_input.py | dfc8aa3ebffe7dfce10bd29d66cb0ada3faa03c6 | [] | no_license | luisdomin5/PdfBundler | e46de4c545a511b91a255535d73e3687395f92ed | 1cb43dee1f4e443c2fa42e28eb58a1abafe46d31 | refs/heads/master | 2021-09-22T01:32:47.287357 | 2018-09-04T20:59:39 | 2018-09-04T20:59:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | import os
from collections import OrderedDict
books_pages_dict = OrderedDict()
art_bun = []
'''
1. Add bookname.pdf to bundler folder
2. Specify booknames and associated pages into books_and_pages
3. Specify with True or False if book cover(s) should be included in bundle
'''
#Format of books_pages_dict is - books_pages_dict['bookname.pdf'] = [pages]
#Format of [pages] is - [pagenumber, [pagerange_firstpage, pagerange_lastpage], pagenumber] - ad infinitum
books_pages_dict['Quick.pdf'] = [[41, 55], [201, 219], [234, 246]]
books_pages_dict['McRae.pdf'] = [[247, 249], [269, 271], [272, 273],
[275, 282], [284, 288], 290, 294, [296, 299]]
books_pages_dict['Kumar.pdf'] = [0]
art_bun.append('genga2017.pdf')
art_bun.append('lameire2005.pdf')
output_folder_dest = None
folder_index = 1
def set_path():
current_folder = os.path.dirname(os.path.realpath(__file__))
main_folder = 'Reading Material'
new_folder = 'Week ' + str(folder_index)
path = os.path.join(current_folder, main_folder, new_folder)
return path
while True:
if not os.path.exists(set_path()):
os.makedirs(set_path())
os.makedirs(os.path.join(set_path(), 'Bundles'))
output_folder_dest = (set_path())
else:
folder_index += 1
continue
break
#Format of book_cover_included is - True or False
book_cover_included = True | [
"y.n.hanekamp@student.rug.nl"
] | y.n.hanekamp@student.rug.nl |
cb7a47466e39d25e935d6597fa0487fe85a53f8e | e808cd98533cb48e2db219fd6cddfa623781202a | /I'm the villian/main2.py | 45db35e5655eb2e74b9e51a0952e0e6163033fb8 | [] | no_license | bendtherules/pytry | 5685d43960c7b0f913d63a88d5678b6470e6e2d1 | 851c36b887e60c1f9760cf657812186b0822ef22 | refs/heads/master | 2021-01-23T08:11:13.159849 | 2014-02-27T11:37:52 | 2014-02-27T11:37:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,513 | py | # all imports
import math;
import pygame;
from pygame.locals import *
# init
pygame.init()
# setting display
surf=pygame.display.set_mode((400,400))
pygame.display.set_caption("Wtf is this??")
#custom image-loading function
def load_image(self,sprite_filename=None,alpha=None):
'''Load correct image with convert calls'''
if sprite_filename!=None:
self.spr=pygame.image.load(sprite_filename);
if alpha==None:
self.spr=self.spr.convert();
elif alpha==1:
self.spr=self.spr.convert();
self.spr.set_colorkey(self.spr.get_at((self.spr.get_width()-1,self.spr.get_height()-1)));
elif alpha==2:
self.spr=self.spr.convert_alpha();
self.image=self.spr;#spr and image point to the same surface, to be checked
else:
self.image=self.spr=None;
class obj(pygame.sprite.Sprite):
'''Can be inherited as a base class'''
#all_obj_derived=pygame.sprite.Group();
def __init__(self,x,y,sprite_filename=None,alpha=None):
''' alpha may have values None(no alpha set),1(set_colorkey as the bottom-right corner),2(per-pixel alpha)'''
super(obj,self).__init__();
self.x=x;
self.y=y;
self.xprevious=x;
self.yprevious=y; #ToDO : Make way for acceleration ("force" model) and setting speed
self.hspeed=0;
self.vspeed=0;
global load_image;
load_image(self,sprite_filename,alpha);
# as it is a derived class of Sprite, it can be directly added to gr group
#obj.all_obj_derived.add(self);
#obj.list.append(self);
def draw_self(self):
surf.blit(self.sprite,(self.x-self.sprite.get_width()/2,self.y-self.sprite.get_height()/2));
def update(self):
''' Should be called after draw_self (or as the last method) to update xprevious.
If not called, get_speed(),get_hspeed(),get_vspeed() won't work'''
self.xprevious=self.x;
self.yprevious=self.y;
self.x+=self.hspeed;
self.y+=self.vspeed;
self.speed=int(math.sqrt(self.hspeed**2+self.vspeed**2));
self.rect=self.image.get_rect(center=(self.x,self.y));
#custom-made functions
def update_inside(group_name):
'''e.g. group_name=a.all_instances.
Calls group_name.update only when sprite.image is within screen, else delete it.'''
temp=None;
for temp in group_name.sprites():
if (temp.x-temp.image.get_width()/2<surf.get_width()):
temp.update();
else:
group_name.remove(temp);
# creating new classes
class player(obj):
all_instances=pygame.sprite.RenderUpdates();
def __init__(self,x,y,sprite_filename,alpha=None):
super().__init__(x,y,sprite_filename,alpha)
player.all_instances.add(self);
a1=player(surf.get_width()/2,surf.get_height()/2,"a.png",alpha=1)
a1.vspeed=.1;
a2=player(100,100,"a.png",alpha=1)
# game startup code
c=pygame.time.Clock();
#pygame.display.update()
pygame.display.update(surf.fill((0,0,0)))
# main game loop
while True:
# event check
for event in pygame.event.get():
if event.type == QUIT:
exit();
c.tick();
pygame.display.set_caption(str(c.get_fps()));
update_inside(player.all_instances);
player.all_instances.clear(surf,pygame.Surface((400,400)))
pygame.display.update(player.all_instances.draw(surf));
#Events here
#--------------------------------------------------------- end---------------------------------------------------# | [
"abhas_2016@students.becs.ac.in"
] | abhas_2016@students.becs.ac.in |
6fdca42faef0b0dd2671c4ca0610544f839ebf09 | 76e25b82e8ab21e83c545d08d433128daf3cf09c | /wildfire/models/__init__.py | 71401021e136b863067ff39fb7d0592eb94c7152 | [
"MIT"
] | permissive | joyprojects/wildfire | a2ea2f931df0595ec8e2489a3b1abe735a9b0169 | db04c3749832ff77ffc618dd2380f8ea23dda53d | refs/heads/master | 2020-09-11T16:39:47.384542 | 2020-04-29T21:30:11 | 2020-04-29T21:30:11 | 222,127,533 | 1 | 1 | MIT | 2020-04-27T19:46:44 | 2019-11-16T16:29:25 | Python | UTF-8 | Python | false | false | 28 | py | """Models for wildfires."""
| [
"noreply@github.com"
] | joyprojects.noreply@github.com |
998dbc4a900cf93aa3ee0d2e520aed575aca4de5 | 02ad25c4ac78a98b5493a2aa7f744a77f381aaae | /dashboard_app/migrations/0010_auto_20201211_0846.py | 2168834a1f6db118e06a45e41521adce387ce856 | [] | no_license | cavidanhasanli/Havayi | 1f85d0d7608c964b0ddc80e3b526b32cdb81e8bf | bd30c9e3e700c7381b5961b5051cbcb398adc449 | refs/heads/main | 2023-02-03T09:25:03.866784 | 2020-12-22T18:09:07 | 2020-12-22T18:09:07 | 316,319,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | # Generated by Django 3.1.3 on 2020-12-11 08:46
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dashboard_app', '0009_auto_20201211_0839'),
]
operations = [
migrations.DeleteModel(
name='CreditTypeInterest',
),
migrations.AddField(
model_name='banklist',
name='credit_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='dashboard_app.creditfields'),
),
migrations.AddField(
model_name='banklist',
name='interest',
field=models.FloatField(blank=True, default=0, null=True, validators=[django.core.validators.MinValueValidator(0.1), django.core.validators.MaxValueValidator(100)]),
),
]
| [
"cavidan.hasanli@mail.ru"
] | cavidan.hasanli@mail.ru |
dec34afc8e102f80246c36dd1e04c34324b28d36 | b2b2130f18ba293f0a4cceab8094f1a4addc04f6 | /Basics/3.FactorialOfGivenNumber.py | 2d95dfb5ff4df32440ba23de6b83607b1755bccf | [] | no_license | ShuklaG1608/PythonPrograms | fdc16f22b8fe2674792c25b78b7942411d63e861 | ad2435a21b279b38014856db00efb1ccb5377358 | refs/heads/main | 2023-08-11T09:42:11.353698 | 2021-09-24T18:42:24 | 2021-09-24T18:42:24 | 409,683,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | #Factorial of Given Number
num = int(input(" Please Enter the Number : "))
fact = 1
if(num == 0 or num == 1):
print(f' Factorial of {num} = {fact}')
else:
for i in range(1,num+1):
fact = fact * i
print(f' Factorial of {num} = {fact}')
| [
"46064636+ShuklaG1608@users.noreply.github.com"
] | 46064636+ShuklaG1608@users.noreply.github.com |
d4e96ddfa8c091f87bd220375da45cf8ce6295f4 | 679ce4b323f79b2425976201324c6c1f88b95199 | /Python/Stanley Cup/csv_parser.py | 53294c7db661e390948575da2be855cee905e598 | [] | no_license | abriggs914/Coding_Practice | ff690fb5f145a11f4da144f3882b37f473b10450 | 3afd7c59e0d90f0ef5f6203853e69f853312019b | refs/heads/master | 2023-08-31T04:04:58.048554 | 2023-08-29T13:23:29 | 2023-08-29T13:23:29 | 161,865,421 | 0 | 1 | null | 2022-10-27T08:35:29 | 2018-12-15T03:20:14 | Python | UTF-8 | Python | false | false | 7,689 | py | import csv
from utility import *
file_name = "past winners.csv"
# skip 2005 back fill
with open(file_name) as csv_file:
lines = csv.DictReader(csv_file)
data_by_year = {}
header = lines.fieldnames
print("header", header)
last = None
for i, line in enumerate(lines):
if last is not None:
if any([val is None or val == "" for val in line.values()]):
#print("missing values, check last:", last)
if line["Year"] == "2005":
continue
for last_key, curr_key in zip(last, line):
last_val = last[last_key]
curr_val = line[curr_key]
if curr_val is None or curr_val == "":
line[curr_key] = last_val
line["Winning Team"] = line["Winning Team"].split("(")[0].strip()
line["Losing Team"] = line["Losing Team"].split("(")[0].strip()
print(dict_print(line))
data_by_year[str(line["Year"])] = line
if 0 < i:
last = line
data_by_year = {k:v for k, v in data_by_year.items() if "1995" <= k}
print(dict_print(data_by_year, "data_by_year"))
data_by_team = {}
data_by_coach = {}
first_year = None
last_year = None
for key, val in data_by_year.items():
year = int(key)
if first_year is None:
first_year = year
if last_year is None or year > last_year:
last_year = year
w_team = val["Winning Team"]
l_team = val["Losing Team"]
if w_team not in data_by_team:
data_by_team[w_team] = {"WYear": [], "LYear": [], "appearances": 0}
if l_team not in data_by_team:
data_by_team[l_team] = {"WYear": [], "LYear": [], "appearances": 0}
data_by_team[w_team]["WYear"].append(key)
data_by_team[l_team]["LYear"].append(key)
data_by_team[w_team]["appearances"] += 1
data_by_team[l_team]["appearances"] += 1
data_by_team[w_team]["W% (per appearance)"] = len(data_by_team[w_team]["WYear"]) / data_by_team[w_team]["appearances"]
data_by_team[l_team]["W% (per appearance)"] = len(data_by_team[l_team]["WYear"]) / data_by_team[l_team]["appearances"]
data_by_team[l_team]["L% (per appearance)"] = len(data_by_team[l_team]["LYear"]) / data_by_team[l_team]["appearances"]
data_by_team[w_team]["L% (per appearance)"] = len(data_by_team[w_team]["LYear"]) / data_by_team[w_team]["appearances"]
w_coach = val["WCoach"]
l_coach = val["LCoach"]
if w_coach not in data_by_coach:
data_by_coach[w_coach] = {"WYear": [], "LYear": [], "appearances": 0}
if l_coach not in data_by_coach:
data_by_coach[l_coach] = {"WYear": [], "LYear": [], "appearances": 0}
data_by_coach[w_coach]["WYear"].append(key)
data_by_coach[l_coach]["LYear"].append(key)
data_by_coach[w_coach]["appearances"] += 1
data_by_coach[l_coach]["appearances"] += 1
data_by_coach[w_coach]["W% (per appearance)"] = percent(len(data_by_coach[w_coach]["WYear"]) / data_by_coach[w_coach]["appearances"])
data_by_coach[l_coach]["W% (per appearance)"] = percent(len(data_by_coach[l_coach]["WYear"]) / data_by_coach[l_coach]["appearances"])
data_by_coach[l_coach]["L% (per appearance)"] = percent(len(data_by_coach[l_coach]["LYear"]) / data_by_coach[l_coach]["appearances"])
data_by_coach[w_coach]["L% (per appearance)"] = percent(len(data_by_coach[w_coach]["LYear"]) / data_by_coach[w_coach]["appearances"])
teams_list = list(data_by_team.keys())
teams_list.sort()
for team in data_by_team:
w_list = data_by_team[team]["WYear"]
l_list = data_by_team[team]["LYear"]
data_by_team[team]["Appearance % ({} to {})".format(first_year, last_year)] = percent((len(w_list) + len(l_list)) / (last_year - first_year))
data_by_team[team]["Appearance W% ({} to {})".format(first_year, last_year)] = percent(len(w_list) / (last_year - first_year))
data_by_team[team]["Appearance L% ({} to {})".format(first_year, last_year)] = percent(len(l_list) / (last_year - first_year))
#data_by_team[team]["won_against"] = []
#data_by_team[team]["lost_against"] = []
greatest_rival = None
most_lost_to = None
most_won_against = None
for team_b in teams_list:
# if team != team_b:
if team_b not in data_by_team[team]:
data_by_team[team][team_b] = {"won_against": [], "lost_against": []}
for year in data_by_team[team]["WYear"]:
if data_by_year[year]["Losing Team"] == team_b:
data_by_team[team][team_b]["won_against"].append(year)
for year in data_by_team[team]["LYear"]:
if data_by_year[year]["Winning Team"] == team_b:
data_by_team[team][team_b]["lost_against"].append(year)
if greatest_rival is None:
greatest_rival = (team_b, data_by_team[team][team_b]["won_against"] + data_by_team[team][team_b]["lost_against"])
elif len(data_by_team[team][team_b]["won_against"]) + len(data_by_team[team][team_b]["lost_against"]) > len(greatest_rival[1]):
greatest_rival = (team_b, data_by_team[team][team_b]["won_against"] + data_by_team[team][team_b]["lost_against"])
elif len(data_by_team[team][team_b]["won_against"]) + len(data_by_team[team][team_b]["lost_against"]) == len(greatest_rival[1]):
if data_by_team[team][team_b]["won_against"] + data_by_team[team][team_b]["lost_against"]:
if max(data_by_team[team][team_b]["won_against"] + data_by_team[team][team_b]["lost_against"]) > max(greatest_rival[1]):
greatest_rival = (team_b, data_by_team[team][team_b]["won_against"] + data_by_team[team][team_b]["lost_against"])
if most_lost_to is None:
most_lost_to = (team_b, data_by_team[team][team_b]["lost_against"])
elif len(data_by_team[team][team_b]["lost_against"]) > len(most_lost_to[1]):
most_lost_to = (team_b, data_by_team[team][team_b]["lost_against"])
elif len(data_by_team[team][team_b]["lost_against"]) == len(most_lost_to[1]):
if data_by_team[team][team_b]["lost_against"]:
if max(data_by_team[team][team_b]["lost_against"]) > max(most_lost_to[1]):
most_lost_to = (team_b, data_by_team[team][team_b]["lost_against"])
if most_won_against is None:
most_won_against = (team_b, data_by_team[team][team_b]["won_against"])
elif len(data_by_team[team][team_b]["won_against"]) > len(most_won_against[1]):
most_won_against = (team_b, data_by_team[team][team_b]["won_against"])
elif len(data_by_team[team][team_b]["won_against"]) == len(most_won_against[1]):
if data_by_team[team][team_b]["won_against"]:
if max(data_by_team[team][team_b]["won_against"]) > max(most_won_against[1]):
most_won_against = (team_b, data_by_team[team][team_b]["won_against"])
data_by_team[team]["greatest_rival"] = greatest_rival
if most_lost_to[1]:
data_by_team[team]["most_lost_to"] = most_lost_to
if most_won_against[1]:
data_by_team[team]["most_won_against"] = most_won_against
print(dict_print(data_by_team, "Data By Team"))
print("parsed teams:\n", "\n".join(teams_list))
for coach in data_by_coach:
w_list = data_by_coach[coach]["WYear"]
l_list = data_by_coach[coach]["LYear"]
data_by_coach[coach]["Appearance % ({} to {})".format(first_year, last_year)] = (len(w_list) + len(l_list)) / (last_year - first_year)
data_by_coach[coach]["Appearance W% ({} to {})".format(first_year, last_year)] = len(w_list) / (last_year - first_year)
data_by_coach[coach]["Appearance L% ({} to {})".format(first_year, last_year)] = len(l_list) / (last_year - first_year)
print(dict_print(data_by_coach, "Data By Team"))
coaches_list = list(data_by_coach.keys())
coaches_list.sort()
print("parsed coaches:\n", "\n".join(coaches_list))
# count # time each team / coach has won.
# count # time each team met and won/lost against each other team.
# count # GWG -> period, timeOfPeriod
| [
"abriggs1@unb.ca"
] | abriggs1@unb.ca |
0a59fb1d8a9f7ddd39f8695a2a5953734f4052f1 | 07e05982d312aa91e32882ee4a05b62eda7cad49 | /div/div-2.py | 0878c6c69ef7acd7a43861d4622e4068e084c823 | [
"MIT"
] | permissive | khalidammarmo/python-cs | 172344a44fc0af1014425a849f45516638e26dab | 11fbab99f036d7460b8356cd1aff1a550f5ab5e4 | refs/heads/master | 2020-06-04T01:33:42.861492 | 2018-09-10T15:05:30 | 2018-09-10T15:05:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | """
Time to fix the div() so it gives right answers.
"""
def get_values():
"""
Retrieves two values to divide
"""
x = input("What is the divisor? ")
y = input("What is the dividend? ")
try:
#Before, the program would have failed on non-integer input. Now we can catch it.
int_x = int(x)
int_y = int(y)
except ValueError: #If the inputs can't be parsed into
print("We need two integers to divide!")
return get_values() #On an error, we're going to run the function again
return (int_x,int_y)
def div(terms):
"""
Takes a 2 tuple TERMS—dividend and divisor—and returns the quotient.
This one uses a simple reversal of the mult() function.
"""
dividend,divisor = terms[0], terms[1]
quotient = 0
#We use a a while loop
while dividend - divisor >= 0:
dividend -= divisor
quotient += 1
#Fun bonus! The divident, after all this subtraction, is actually the remainder.
return (quotient,dividend)
values = get_values()
answer = div(values)
print(answer)
| [
"michael.t.taggart@gmail.com"
] | michael.t.taggart@gmail.com |
995c1e2fab4e27106a598531c2b07f1713e8a134 | c87626ae68f83784c1aea9b6f8f81ecb2048b355 | /training_csv_generator_diseases.py | 505fc4122bbb8dca5754ac0203328da66c59417b | [] | no_license | sawrov/QUANTUM_HACK_TRAIN_IBM_WATSON | 6934e6990ca05d9cf8c26a3ceaa4605b40bbde39 | f0c7d83f1f2f391ecde09f6d809e9074a9a01742 | refs/heads/master | 2022-12-06T20:49:07.504742 | 2020-08-29T06:32:46 | 2020-08-29T06:32:46 | 291,010,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | #make csv to train
import csv
huha="this"
with open('train_diseases.csv', mode='w') as csv_file:
csv_writer=csv.writer(csv_file,delimiter=",")
with open('filtered_final_list.txt') as diseases:
for disease in diseases:
csv_writer.writerow(['object_of_interest',disease]) | [
"sawrov@hotmail.com"
] | sawrov@hotmail.com |
ed9f9207bf8b67bed0dcd2f2ade7aa486e265490 | f08e44df644678dc6b2852f8c5dcde744bf80e6e | /api_clan/apps.py | a80daf74865a3ad1150ff5182519ea9ea64cacb7 | [] | no_license | E1-Bart0/abstract_clan | f836322254ca86889c884e2d33e4755fa846d981 | f5b2c15920fbc2fb14692dfa65c01da374e83c1b | refs/heads/master | 2023-04-03T01:54:59.595684 | 2021-04-15T13:40:03 | 2021-04-15T13:40:03 | 353,965,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | from django.apps import AppConfig
class ApiClanConfig(AppConfig):
name = 'api_clan'
| [
"starovoitov.vadik1@gmail.com"
] | starovoitov.vadik1@gmail.com |
ee824e6b9b7691a064d6ec0a0a4aca640c8d4611 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/preclud.py | 92806b29aea1beb79e849a1ee0a0da996f253cc9 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 1,193 | py | ii = [('BentJDO2.py', 2), ('CookGHP3.py', 2), ('LyelCPG2.py', 1), ('MarrFDI.py', 1), ('RogePAV2.py', 5), ('CoolWHM2.py', 1), ('GodwWSL2.py', 6), ('RogePAV.py', 6), ('WilbRLW4.py', 1), ('ProuWCM.py', 2), ('AubePRP2.py', 10), ('CookGHP.py', 4), ('MartHSI2.py', 5), ('WilkJMC3.py', 1), ('AubePRP.py', 16), ('ChalTPW2.py', 3), ('AdamWEP.py', 2), ('WilbRLW2.py', 2), ('ClarGE2.py', 4), ('CoopJBT2.py', 1), ('AinsWRR3.py', 2), ('CookGHP2.py', 3), ('KiddJAE.py', 1), ('AdamHMM.py', 3), ('ClarGE.py', 11), ('LyelCPG.py', 4), ('DibdTRL2.py', 1), ('AinsWRR.py', 1), ('WadeJEB.py', 7), ('TalfTIT.py', 2), ('CoopJBT.py', 2), ('KirbWPW2.py', 3), ('SoutRD2.py', 2), ('BackGNE.py', 1), ('MedwTAI2.py', 4), ('WheeJPT.py', 6), ('MereHHB3.py', 1), ('MereHHB.py', 1), ('WilkJMC.py', 3), ('MartHRW.py', 2), ('FitzRNS4.py', 1), ('CoolWHM3.py', 1), ('BentJRP.py', 6), ('StorJCC.py', 8), ('MackCNH2.py', 1), ('BellCHM.py', 1), ('JacoWHI2.py', 1), ('WilbRLW3.py', 1), ('ClarGE3.py', 4), ('MartHRW2.py', 1), ('DibdTRL.py', 1), ('FitzRNS2.py', 3), ('HogaGMM2.py', 2), ('MartHSI.py', 6), ('EvarJSP.py', 6), ('DwigTHH.py', 5), ('LyelCPG3.py', 2), ('TaylIF.py', 4), ('WordWYR.py', 2), ('KeigTSS.py', 1), ('KirbWPW.py', 1)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
e3e68768ed47828d1a553fd93239900319c4fb14 | e38499956d46f771a143c9faa1492d17a3fb5854 | /dynatrace_base/tests/test_dynatrace_client.py | aca334db45b14ff1f209a3911ffd9a88b8c84659 | [] | permissive | StackVista/stackstate-agent-integrations | 959f83bc76d00c407f90f438032d03d4c072bc5d | 350cb6e239157b50b5943cdf5ca13163da9b9307 | refs/heads/master | 2023-07-20T03:51:47.814264 | 2023-07-11T09:13:28 | 2023-07-11T09:13:28 | 195,226,626 | 3 | 9 | BSD-3-Clause | 2023-08-29T08:10:51 | 2019-07-04T11:10:24 | Python | UTF-8 | Python | false | false | 1,480 | py | # (C) StackState 2021
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
def test_endpoint_generation(dynatrace_client):
"""
Check if the URL sanitization is correct.
"""
urls = ["https://custom.domain.com/e/abc123", "https://custom.domain.com/e/abc123/"]
paths = ["api/v1/entity/infrastructure/processes", "/api/v1/entity/infrastructure/processes"]
expected_url = "https://custom.domain.com/e/abc123/api/v1/entity/infrastructure/processes"
for url in urls:
for path in paths:
assert dynatrace_client.get_endpoint(url, path) == expected_url
def test_raising_exception_on_not_200_status(dynatrace_client, requests_mock, test_instance):
"""
Check if client raised exception on non 200 status.
"""
endpoint = dynatrace_client.get_endpoint(test_instance.get('url'), '/api/v1/events')
requests_mock.get(endpoint, text='{"response": "123"}', status_code=400)
with pytest.raises(Exception):
dynatrace_client.get_dynatrace_json_response(endpoint)
def test_status_200(dynatrace_client, requests_mock, test_instance):
"""
Basic client test.
"""
endpoint = dynatrace_client.get_endpoint(test_instance.get('url'), 'api/v1/events/')
requests_mock.get(endpoint, text='{"events": [{"eventId": "123"}]}', status_code=200)
response = dynatrace_client.get_dynatrace_json_response(endpoint)
assert response["events"][0]['eventId'] == '123'
| [
"noreply@github.com"
] | StackVista.noreply@github.com |
4eb48a87e664b4cabd5416d2d6729ed9a88b43a1 | 49cd9ba075ed2ab6b267f6e012bfb03267b7bc08 | /project_42_formsModelpagination/app42/forms.py | 99db23b3c75ea231d95bd12b4e9224ed18e651db | [] | no_license | Satputev/DjangoApps | 4d47a76f20815b2b1313e8b3e3c61b5406f5da60 | c6fb5e9fa131f07d1f5920e98699f9daaa49d424 | refs/heads/master | 2023-02-14T00:42:36.037749 | 2020-12-24T07:39:54 | 2020-12-24T07:39:54 | 323,857,826 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | from django import forms
from app42.models import ProductsModel
from django.forms import ValidationError
class ProductForm(forms.ModelForm):
class Meta:
model=ProductsModel
fields='__all__'
exclude=('pid',)
labels={'pname':'Product Name','pprice':'Product Price','pimg':'Product Image'}
def clean_pprice(self):
price=self.cleaned_data['pprice']
if price < 1:
raise ValidationError('price should be greater than "0"')
else:
return price
| [
"satputevishal8@gmail.com"
] | satputevishal8@gmail.com |
40027cf73a5da98259bba7170f6dfe2dab5cabc3 | fb638b267cc5bb648cc1adb7cc32cb41553d547e | /ifstmt.py | 901637298d3d368ea3084f099366b883646c5692 | [] | no_license | Sohnav19/icta_Calicut | 92630b3a8d64ffbde5c8a326d6206121d1b22848 | e50a6157972da5e932ae72700861b7d8b2d196bf | refs/heads/master | 2020-04-05T03:58:57.511460 | 2018-11-17T06:37:19 | 2018-11-17T06:37:19 | 156,533,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | num1=int(input("Enter a number : "))
if(num1<=10):
print("number is less or equal to 10")
else:
print("number is greater thn 10") | [
"noreply@github.com"
] | Sohnav19.noreply@github.com |
4c20712069f7e4ae6f6956949a5944345b4fc284 | 97e669497d8767cbff5dce3146463dfbf2555a44 | /Projects/2048.gyp | 0e1f8206c443d3893dd81eae171f26cd70e525ab | [] | no_license | ghostassault/AutomateTheBoringWithPython | cbe3d3eccf91bbbf1b6e15f8e74e7330dd627363 | ca3bb07a3552433478e446b06c97cbf4f5849680 | refs/heads/main | 2023-02-17T16:00:30.678379 | 2021-01-15T17:36:42 | 2021-01-15T17:36:42 | 329,977,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | gyp | '''write a program that will open the game at
https://gabrielecirulli.github.io/2048/ and keep sending up, right, down, and left keystrokes to
automatically play the game.'''
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
def main():
url = 'https://gabrielecirulli.github.io/2048/'
browser = webdriver.Safari()
browser.set_window_position(0,0)
browser.set_window_size(1600,900)
browser.get(url)
time.sleep(3)
a = browser.find_element_by_tag_name('html')
while True:
a.send_keys(Keys.UP)
a.send_keys(Keys.RIGHT)
a.send_keys(Keys.DOWN)
a.send_keys(Keys.LEFT)
print("Game over")
main() | [
"noreply@github.com"
] | ghostassault.noreply@github.com |
29cd9fe209087460896af0f86d64ae6b2497aacf | b0d6fee5d65d9499c6f5e9e580fcb85ddb32b4de | /sugipms/urls.py | 6e4dee75ff8e2ae5dc48f3b84bd156c6a8ab7614 | [] | no_license | marcuscoti/sugipms | a43a93296d0037b3706d26b28ac8e129e9d963ab | 450ad8b806d229a28bdfe1e980349741575d4094 | refs/heads/master | 2020-04-27T16:04:48.862379 | 2019-03-08T04:52:34 | 2019-03-08T04:52:34 | 174,471,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,130 | py | """sugipms URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from accounts.views import login_view, logout_view
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^projects/', include('projects.urls', namespace='projects')),
url(r'^groups/', include('groups.urls', namespace='groups')),
url(r'^acts/', include('activities.urls', namespace='activities')),
url(r'^login', login_view, name='login'),
url(r'^logout/', logout_view, name='logout'),
]
| [
"marcuscoti@gmail.com"
] | marcuscoti@gmail.com |
8bf01cb2845267651a76d98fa4af3d1e6e1bcc2b | f3903f97dce2aa85143d77a9c28ad146011c6edb | /users/models.py | 767cedf88d285bc765689b8e2a1711fbd14b1ffa | [] | no_license | taemin410/AirSupplyPilot | 29916e09a027c95ea53c5c84612cf2a88bad8fb2 | 6531ddd303819b980822f5d4ce293e8ca4b3c526 | refs/heads/master | 2020-04-08T12:00:51.599194 | 2018-12-04T14:17:41 | 2018-12-04T14:17:41 | 159,329,958 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,764 | py | from django.contrib.auth.models import AbstractUser
from django.db import models
from django.forms.fields import ChoiceField
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
from django.contrib.auth.models import BaseUserManager
from authtools.models import AbstractEmailUser
class Clinic(models.Model):
name = models.CharField(max_length=256)
longitude = models.DecimalField(max_digits=9, decimal_places=6)
latitude = models.DecimalField(max_digits=9, decimal_places=6)
altitude = models.IntegerField()
def __str__(self):
return self.name
def __hash__(self):
return hash((self.name, self.longitude, self.latitude))
def __eq__(self, other):
return (self.name, self.longitude, self.latitude) == (self.name, self.longitude, self.latitude)
class CustomUser(AbstractEmailUser):
ROLES = (
('Clinic Manager', 'Clinic Manager'),
('Warehouse Personnel', 'Warehouse Personnel'),
('Dispatcher', 'Dispatcher'),
)
name = models.CharField(max_length=40, blank=True, default='')
email = models.EmailField(unique=True)
first_name = models.CharField(max_length=30, blank=True, default='')
last_name = models.CharField(max_length=30, blank=True, default='')
role = models.CharField(choices=ROLES, max_length=30)
clinic = models.ForeignKey(Clinic, on_delete=models.CASCADE, null=True)
USERNAME_FIELD = 'email'
def __str__(self):
return self.email
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
| [
"taemin410@gmail.com"
] | taemin410@gmail.com |
694b8b138f3b4862d4b35953cdb3675a91e2a179 | fd25231975acd147e04dc3ed3627c92cb1a4f86c | /FlaskAPI/vir_env/lib/python3.7/site-packages/scipy/spatial/tests/test_distance.py | c0b831a2879fa2a21e753350d7b7edefe48591cf | [] | no_license | sumitkutty/Flight-Price-Prediction | 832a2802a3367e655b46d3b44f073d917abd2320 | d974a8b75fbcbfa42f11703602af3e45a3f08b3c | refs/heads/master | 2022-12-25T07:13:06.375888 | 2020-10-08T18:46:44 | 2020-10-08T18:46:44 | 302,366,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:a99a0a8e8696f85040b18a7b95996060265dec4c0607ab9bc90551e2f2dc9bd2
size 81424
| [
"sumitkutty37@gmail.com"
] | sumitkutty37@gmail.com |
4769ff8ea0b328b26c83151ea48d0dab28698d1a | 20780f8619ae61efe55f59417e0014c391bcff1a | /src/basic/file.py | bd95cae10b09b86a600a1a14f6566b8602c10367 | [
"Apache-2.0"
] | permissive | mumupy/pythonlearn | e694fd4fb915c90792db30f8090f370acb3ac68e | 5be03d156f11af2467a6052a476de4b706f7d53a | refs/heads/master | 2020-03-23T17:26:09.811562 | 2019-09-22T06:21:17 | 2019-09-22T06:21:17 | 141,859,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,381 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/7/15/015 20:52
# @Author : ganliang
# @File : file.py
# @Desc : 文件目录
import glob
import os
import sys
def readFile(filePath):
"""文件io操作符"""
file = open(filePath)
lines = file.readlines(100)
for line in lines:
print(line)
file.seek(0)
filcontent = file.read()
print(filcontent)
def writeFile(filePath):
"""文件写入"""
file = open(filePath, "a")
file.write("first line\n")
file.close()
def detaillist(filePath):
"""详细打印文件夹下的所有文件"""
if os.path.exists(filePath):
if os.path.isfile(filePath):
print(filePath)
elif os.path.isdir(filePath):
files = os.listdir(filePath)
for file in files:
detaillist(os.path.join(filePath, file))
else:
print("filePath:%s 不存在" % filePath)
def isFile(filePath):
"""判断文件是否为文件"""
# os.path.exists(filePath)
return os.path.isfile(filePath)
def globMatch(filePath):
files = glob.glob(filePath)
for file in files:
print(os.path.join(filePath, file))
def main():
filename = "file.txt"
writeFile(filename)
readFile(filename)
# main()
detaillist("D:\data\webmagic\\finance")
# globMatch("D:\data\webmagic\\finance\\*.json")
| [
"babmm@aliyun.com"
] | babmm@aliyun.com |
3e0a6ab1bc5bff84c37b8472b705715f599464d3 | 28a90d04b197b48337b0a6a1aa52929c082a100b | /Trabalho03_ Controle de Concorrência e Recuperação a Falhas/Controle_Recuperacao.py | f187dc972a29257ae6328f85b5dbd8c3ed776277 | [] | no_license | AndyFernandes/database-management | 57bdb92181e11fc2f1706ce52e3223fd82dd8abc | 43312e5fc77cf25a4604e9e6a79ca336adca604a | refs/heads/master | 2021-09-24T08:19:34.345669 | 2018-10-05T17:57:02 | 2018-10-05T17:57:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,209 | py | # ALUNOS:
# Andreza Fernandes de Oliveira, 384341
# Thiago Fraxe Correa Pessoa, 397796
import re
############## REGEX ############
# Utilizamos o Regex para encontrar números(ids/timestamps) das transações e os dados/objetos do banco
numberRegex = re.compile('\d+') # encontra o número/timestamp da transação nas operações de BT, C, w e r.
dadoRegex = re.compile('\(\s*([a-z]+)\s*\)') # encontra o dado/objeto do banco.
espacoBranco = re.compile('\s+')
# leitura do arquivo.txt
with open("teste3.txt") as t:
historicoRaw = t.read()
historicoRaw = re.sub(espacoBranco, '', historicoRaw)
#print(historicoRaw2)
historico = historicoRaw.split(',')
################################ 1a PARTE: CONTROLE DE CONCORRÊNCIA ################################
#Indicar o estado de cada dadoRegex. 1 caractere só para o dadoRegex
# <nomeDoDado, timeStampLeitura, timeStampEscrita>
#TS da transação mais recente que leu/escreveu o dadoRegex
#Algoritmo do timestamp básico (ver no slide fornecido pelo professor)
#Mostrar situação dos dados e os timestamp
#Resultado: todo o scheduler sem FL() e sem CP() (se sucesso). scheduler corrigido (caso falha)
'''
if op = 'Read'
Verifca TS do dadoRegex. Se TS da transação (que tá fazendo operação) for menor ou igual que o TS de escrita do dadoRegex, então há problema
Caso contrário, atualiza o timestamp do dadoRegex
elif op = "Write"
Verifica TS do dadoRegex. Se ts da transação (que tá fazendo opração) for menor ou igual que TS de leitura ou menor ou igual que TS de escrita do dadoRegex, então há problema
'''
#Usando dicionário e regex para encontrar todos os dados:
dicDados = {}
for dado in dadoRegex.findall(historicoRaw):
if dado not in dicDados:
dicDados[dado] = [0, 0, ""] # primeira posição: TS leitura; segunda posição: TS escrita; terceira posição: última operação que utilizou o dado/objeto
condicao = True
print("ENTRADA: \n")
while(condicao):
for dado in dicDados:
dicDados[dado] = [0, 0, ""]
print(*historico)
print("=" * 101 + "\n")
for operation in historico:
if (operation[:1] == 'w'):
timestampTransacao = int(numberRegex.search(operation).group())
dado = dadoRegex.search(operation).group(1)
timestampRead = dicDados[dado][0]
timestampWrite = dicDados[dado][1]
if ((timestampTransacao < timestampWrite) or (timestampTransacao < timestampRead)):
ultimaOperacao = dicDados[dado][2]
idUltimaOperacao = int(numberRegex.search(ultimaOperacao).group())
problema = ""
if timestampTransacao < timestampRead:
problema = " JA LEU O DADO " + dado
elif (timestampTransacao < timestampWrite):
problema = " JA ESCREVEU O DADO " + dado
print("\nPROBLEMA ENCONTRADO NA OPERACAO " + operation + ", POIS A TRANSAÇÃO " + str(idUltimaOperacao) + problema)
print("\n" +"=" * 101)
historico.remove(operation)
ultimaOperacao = dicDados[dado][2]
indice = historico.index(ultimaOperacao)
historico.insert(indice, operation)
condicao = True
print("\n" + "=" * 101)
print("\nTENTATIVA: ")
break;
else:
dicDados[dado][1] =timestampTransacao
dicDados[dado][2] = operation
condicao = False
print(operation + ": ")
for dados in dicDados:
print("< " + dados + ", r: " + str(dicDados[dados][0]) + ", w: " + str(dicDados[dados][1]) + ">")
elif (operation[:1] == 'r'):
timestampTransacao = int(numberRegex.search(operation).group())
dado = dadoRegex.search(operation).group(1)
timestampWrite = dicDados[dado][1]
if (timestampTransacao < timestampWrite):
ultimaOperacao = dicDados[dado][2]
idUltimaOperacao = int(numberRegex.search(ultimaOperacao).group())
print("\nPROBLEMA ENCONTRADO NA OPERACAO " + operation + ", POIS A TRANSAÇÃO " + str(idUltimaOperacao) + " JA LEU O DADO " + dado)
print("=" * 101)
historico.remove(operation)
ultimaOperacao = dicDados[dado][2]
indice = historico.index(ultimaOperacao)
historico.insert(indice, operation)
condicao = True
print("\nTENTATIVA: ")
break;
else:
dicDados[dado][0] = timestampTransacao
dicDados[dado][2] = operation
condicao = False
print(operation + ": ")
for dados in dicDados:
print("< " + dados + ", r: " + str(dicDados[dados][0]) + ", w: " + str(dicDados[dados][1]) + ">")
print("\n" + "=" * 101)
print("\nRESULTADO: ")
print(*historico)
print("\n" + "=" * 101)
################################ 2a parte: RECUPERAÇÃO A FALHAS ################################
# No código utilizamos três listas: - iniciadas: Guarda o número(timestamp) das transações que foram inicializadas. Identificamos isso com o "BT" que sinaliza o início da transação.
# - commitadas: Guarda o número(timestamp) das transações que foram commitadas. Identificamos isso com o "CM" que sinaliza que a transação foi commitada.
# Quando a transação é commitada, nós retiramos ela da lista de iniciadas e incluimos na lista de commitadas.
# - terminadas: Guarda o número(timestamp) das transações que foram terminadas. Identificamos isso quando há o checkpoint - "CP", então copiamos toda a lista das transações commitadas e inserimos na lista de terminadas, e então zeramos a lista das transações commitadas.
# Também utilizamos um dicionário para guardar as transações e as operações dessa transação. Em cada if identificamos o tipo da operação ('BT', 'W', 'CM', 'CP') e adicionamos
# a transação respectiva (que está como a chave do dicionário).
# Quanfo um transação é iniciada adicionamos a chave de sua transação(corresponde ao seu tempo) no dicionário.
#
# No final imprimos as transações com a lista de suas respectivas operações iniciadas (através do nosso dicionário de transações)
# como correspondente as transações, e respectivamente suas operações, que devem ser desfeitas (UNDO).
# Imprimos também as transações com a lista de suas respectivas operações commitadas (através do nosso dicionário de transações)
# como correspondente as tranções, e respectivamente suas operações, que devem ser REFEITAS (REDO).
iniciadas = []
commitadas = []
terminadas = []
dicTransacoes = {}
print("="*101)
for operation in historico:
if operation[:2] == 'BT': # início da transação
idTransacao = numberRegex.search(operation).group()
dicTransacoes[idTransacao] = []
iniciadas.append(idTransacao)
elif operation[:1] == 'w': # operação de escrita
idTransacao = numberRegex.search(operation).group()
dicTransacoes[idTransacao].append(operation)
elif operation[:2] == 'CM': # operação de commit
idTransacao = numberRegex.search(operation).group()
iniciadas.remove(idTransacao)
commitadas.append(idTransacao)
elif operation[:2] == 'CP': # momento de checkpoint
for k in commitadas:
terminadas.append(k)
commitadas = []
elif operation[:2] == 'FL': # momento de falha
break;
print('UNDO: ')
for k in iniciadas:
print("TRANSAÇÃO %s: " % k + str(dicTransacoes[k]))
print('\nREDO: ')
for k in commitadas:
print("TRANSAÇÃO %s: " % k + str(dicTransacoes[k]))
| [
"andrezafernandes@alu.ufc.br"
] | andrezafernandes@alu.ufc.br |
60d9dfec8163702f03f8c422ff8d5a69d3d02082 | 17c70a768d15b32a3d336214231c9f641ae76da8 | /venv1/lib/python3.6/genericpath.py | 70c2ac3f6f5d424dfbaefd9f0ee53b873607a18c | [] | no_license | smritidahal/FluffyCurrency | 8e58852bdb6973b495b97c182102bf2ced5bfe5a | c023c4a3749ce8df11d51dde056b4fdbe8fdadd0 | refs/heads/master | 2022-11-07T20:15:03.546821 | 2018-03-03T17:51:09 | 2018-03-03T17:51:09 | 123,726,418 | 0 | 1 | null | 2022-11-03T01:03:34 | 2018-03-03T19:56:28 | Python | UTF-8 | Python | false | false | 51 | py | /Users/omars/anaconda3/lib/python3.6/genericpath.py | [
"omars@zillowgroup.com"
] | omars@zillowgroup.com |
b5682a3df3f2a3487ff80dffd14cbbdb5c7856eb | e926cc05032db0ea1502131ba7b276d87aa03189 | /pyapr/tests/test_iterator.py | 552b6b8e327866890e4d19ad17168ab584928afc | [
"Python-2.0",
"Apache-2.0"
] | permissive | AdaptiveParticles/pyapr | c6b433ade22341c958de4b330986c577691cc7ed | e3fc6fadb917a209ac25ab76c962c46af4d8dc62 | refs/heads/master | 2023-04-16T17:53:35.026955 | 2022-11-08T11:29:39 | 2022-11-08T11:29:39 | 184,399,854 | 3 | 1 | Apache-2.0 | 2022-11-07T09:38:02 | 2019-05-01T10:19:52 | Python | UTF-8 | Python | false | false | 1,279 | py | import pyapr
from .helpers import load_test_apr
import numpy as np
import math
def test_iterator_vs_slicer():
apr, parts = load_test_apr(3)
it = apr.iterator()
for level_delta in [0, -1, -2]:
slicer = pyapr.reconstruction.APRSlicer(apr, parts, level_delta=level_delta)
level = it.level_max() + level_delta
for z in range(5, 13):
for x in range(1, 9):
recon_row = slicer[z, x]
for idx in range(it.begin(level, z, x), it.end()):
assert parts[idx] == recon_row[it.y(idx)]
def test_iterator_find_x():
apr, parts = load_test_apr(3)
it = apr.iterator()
_shape = apr.shape()
z_coords = [0] + list(np.random.randint(1, _shape[0]-1, size=4)) + [_shape[0]-1]
for z in z_coords:
for x in range(_shape[1]):
for y in range(_shape[2]):
# find particle at z, x, y
idx = it.find_particle(z, x, y)
# find coordinates of particle
level, z_l, x_l, y_l = it.find_coordinates(idx)
size_factor = 2 ** (it.level_max() - level)
assert z_l == (z // size_factor)
assert x_l == (x // size_factor)
assert y_l == (y // size_factor)
| [
"jonsson@mpi-cbg.de"
] | jonsson@mpi-cbg.de |
52ca31c6ca4ba11e6a906ab521f69d43554cfa86 | 1fe7f68cc4d4addf4cb9f555db098f81db644752 | /setup.py | f6e57eb5d5e9d5bcc1c84c3005ec5b5da147d5ab | [
"Apache-2.0"
] | permissive | allthingstalk/rpi-python-gateway-client | a0f0ec025fb61e3f40db8f9afd0ae06ad446d5f4 | b06602d46d167d08ca48ed661f6cacf49014346b | refs/heads/master | 2020-04-03T21:32:16.022419 | 2017-10-24T07:51:27 | 2017-10-24T07:51:27 | 31,375,699 | 2 | 1 | null | 2017-10-24T07:51:28 | 2015-02-26T16:13:29 | Python | UTF-8 | Python | false | false | 1,117 | py | from distutils.core import setup
# Copyright 2014-2016 AllThingsTalk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
setup(
name='att_iot_gateway',
version='1.0.0',
packages=['att_iot_gateway'], # pip and setup tools are loaded in the virtual environment for the IDE.
install_requires='paho-mqtt',
url='https://github.com/allthingstalk/rpi-python-gateway-client',
license='Apache Software License',
author='Jan Bogaerts',
author_email='jb@allthingstalk.com',
keywords = ['ATT', 'iot', 'internet of things', 'AllThingsTalk'],
description='This package provides device & asset management + data feed features for the AllThingsTalk platform to your application.'
)
| [
"jb@allthingstalk.com"
] | jb@allthingstalk.com |
035f453b189a37c9677088804e6c18447aabdbbe | 75dcb56e318688499bdab789262839e7f58bd4f6 | /_algorithms_challenges/leetcode/LeetCode/733 Flood Fill.py | 4350e4e56af74a61b1f948707760e1b580de0573 | [] | no_license | syurskyi/Algorithms_and_Data_Structure | 9a1f358577e51e89c862d0f93f373b7f20ddd261 | 929dde1723fb2f54870c8a9badc80fc23e8400d3 | refs/heads/master | 2023-02-22T17:55:55.453535 | 2022-12-23T03:15:00 | 2022-12-23T03:15:00 | 226,243,987 | 4 | 1 | null | 2023-02-07T21:01:45 | 2019-12-06T04:14:10 | Jupyter Notebook | UTF-8 | Python | false | false | 2,124 | py | #!/usr/bin/python3
"""
An image is represented by a 2-D array of integers, each integer representing
the pixel value of the image (from 0 to 65535).
Given a coordinate (sr, sc) representing the starting pixel (row and column) of
the flood fill, and a pixel value newColor, "flood fill" the image.
To perform a "flood fill", consider the starting pixel, plus any pixels
connected 4-directionally to the starting pixel of the same color as the
starting pixel, plus any pixels connected 4-directionally to those pixels (also
with the same color as the starting pixel), and so on. Replace the color of all
of the aforementioned pixels with the newColor.
At the end, return the modified image.
Example 1:
Input:
image = [[1,1,1],[1,1,0],[1,0,1]]
sr = 1, sc = 1, newColor = 2
Output: [[2,2,2],[2,2,0],[2,0,1]]
Explanation:
From the center of the image (with position (sr, sc) = (1, 1)), all pixels
connected
by a path of the same color as the starting pixel are colored with the new
color.
Note the bottom corner is not colored 2, because it is not 4-directionally
connected
to the starting pixel.
Note:
The length of image and image[0] will be in the range [1, 50].
The given starting pixel will satisfy 0 <= sr < image.length and 0 <= sc <
image[0].length.
The value of each color in image[i][j] and newColor will be an integer in
[0, 65535].
"""
from typing import List
dirs = ((-1, 0), (1, 0), (0, -1), (0, 1))
class Solution:
def floodFill(self, image: List[List[int]], sr: int, sc: int, newColor: int) -> List[List[int]]:
"""
dfs fill
mistake: corner case image == new color
"""
cur_color = image[sr][sc]
if cur_color == newColor:
return image
self.dfs(image, sr, sc, cur_color, newColor)
return image
def dfs(self, image, i, j, cur_color, new_color):
image[i][j] = new_color
m, n = len(image), len(image[0])
for di, dj in dirs:
I = i + di
J = j + dj
if 0 <= I < m and 0 <= J < n and image[I][J] == cur_color:
self.dfs(image, I, J, cur_color, new_color)
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
3e344fd8d3cae98a76d3174cee73f3889e9427b7 | 821350c63f23af43cdd4d32d8ad34f6c02f95402 | /id_number_detector/id_num_pub.py | 4bab5a79b3924fdc53e6075e2b3547819c47058e | [] | no_license | naseef139/Robot-Assistant-Dev-Bank | 533f13309f719215e90009646e53fc274065459d | 37b957f48f0200ffafaf7a4299eec32d97abe456 | refs/heads/master | 2023-04-03T08:30:20.228847 | 2021-04-14T19:10:10 | 2021-04-14T19:10:10 | 265,094,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py | #!/usr/bin/env python
import cv2
import pytesseract
import re
import rospy
from std_msgs.msg import String
import numpy as np
def gray_scale():
while(True):
pub = rospy.Publisher("id_number", String, queue_size=10)
rospy.init_node('id_no_send', anonymous=True)
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
kernel = np.ones((5,5),np.float32)/25
gray1 = cv2.filter2D(gray,-1,kernel)
blurred = cv2.GaussianBlur(gray1, (5, 5), 0)
edges = cv2.Canny(blurred,400,600,apertureSize = 5)
cv2.imshow('frame', gray1)
print("sending......")
text = pytesseract.image_to_string(gray1)
result = re.match(r'\d\d\d\d\d\d\d\d\d', text)
if result:
pub.publish(text)
print(text)
cap.release()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == '__main__':
pytesseract.pytesseract.tesseract_cmd = '/usr/bin/tesseract'
cap = cv2.VideoCapture(0)
try:
gray_scale()
except rospy.ROSInterruptException:
pass
| [
"60naseef2@gmail.com"
] | 60naseef2@gmail.com |
33691cd01b4e227e38da0013bd69d2e9a0095153 | 38e8bf2990bcdadfc8f0c526720039f842753e9c | /venv/Scripts/wsdump.py | 3ee0de7f71459e398c0e867b9e98f874ab97ca3b | [] | no_license | KNHui/cafeteria | d58f4bece1ffc4c321af79be01534ff29ea73e3a | cbb218dd9efd8d5e8096610473d48cb2ffd5f498 | refs/heads/master | 2020-04-12T18:07:57.852974 | 2019-11-08T07:00:58 | 2019-11-08T07:00:58 | 162,670,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,952 | py | #!C:\Users\student\PycharmProjects\untitled3\venv\Scripts\python.exe
import argparse
import code
import sys
import threading
import time
import ssl
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = map(str.strip, args.headers.split(','))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if not args.verbose and opcode in OPCODE_DATA:
msg = data
elif args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
| [
"rlasoaen@naver.com"
] | rlasoaen@naver.com |
6b5da27a898f4cb50dfdbee5455232ec0d1be0ae | 31d2b0e4131be98fa47a9aba78766f867746028e | /docker/sane-doc-reports/src/sane_doc_reports/domain/Wrapper.py | 699320980b4bb468f5771af9ecad9e42cb68e21e | [
"MIT",
"Artistic-2.0",
"LicenseRef-scancode-secret-labs-2011",
"LGPL-2.0-or-later",
"BSD-3-Clause",
"ISC",
"Python-2.0",
"Artistic-1.0-Perl",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | glicht/dockerfiles | 37e2489304d8cd96237184d346bdf08b1c247037 | 7dff92792a7ec6ac1e04950a41927867af5f147c | refs/heads/master | 2021-08-18T09:39:27.438297 | 2019-09-25T10:24:23 | 2019-09-25T10:24:23 | 210,816,968 | 1 | 0 | MIT | 2021-06-22T16:48:43 | 2019-09-25T10:22:52 | Python | UTF-8 | Python | false | false | 502 | py | from abc import ABC, abstractmethod
class Wrapper(ABC):
""" Insert elements and styles that wrap normal Element objects """
def __init__(self, cell_object, section):
self.cell_object = cell_object
self.section = section
@abstractmethod
def wrap(self):
"""
Inserts the start of the wrapping element, in a way that inserted
Elements will be inside the wrapping element
"""
pass
def __str__(self):
return str(self)
| [
"agam.more@demisto.com"
] | agam.more@demisto.com |
6c33907c782ffce7adb1fbb958a47151d2c7be12 | 86e0c72061d195bf8359845995cae9f58b5a38cd | /ml-foundations/PLA.py | a2d0f1dac1af6ee3dec969cd3db39171917a6bfa | [
"MIT"
] | permissive | Jack-An/RecommendationSystem | 6ff8ab94a2fe8c821ef51601f71ec6b6643eab76 | a04070d4d6cef2b781748eddd18049679c6c942b | refs/heads/master | 2023-05-11T17:54:04.478855 | 2023-04-27T09:24:26 | 2023-04-27T09:24:26 | 127,634,741 | 0 | 1 | null | 2023-04-27T09:24:27 | 2018-04-01T14:06:03 | Jupyter Notebook | UTF-8 | Python | false | false | 2,000 | py | import numpy as np
def pla(data):
theta = np.zeros(data.shape[1])
data = np.c_[np.ones(data.shape[0]), data]
iterations = 0
while True:
pred = np.sign(data[:, :-1].dot(theta.reshape(theta.size, 1)))
equal = pred.flatten() - data[:, -1]
si = np.where(equal != 0)[0] # 找到误分类的数据
if si.size != 0:
iterations += 1
idx = np.random.choice(si, 1)[0] # 随机选取一个误分类的数据
theta += data[idx][:-1] * data[idx][-1]
else: # 直到没有误分类的数据,然后退出循环
break
return theta, iterations
def pocket_pla(data):
theta = np.zeros(data.shape[1])
data = np.c_[np.ones(data.shape[0]), data]
current_theta = theta
error = cost_function(theta, data)
iterations = 1500
for i in range(iterations):
pred = np.sign(data[:, :-1].dot(theta.reshape(theta.size, 1)))
equal = pred.flatten() - data[:, -1]
si = np.where(equal != 0)[0] # 找到误分类的数据
if si.size != 0:
iterations += 1
idx = np.random.choice(si, 1)[0] # 随机选取一个误分类的数据
current_theta += data[idx][:-1] * data[idx][-1]
current_error = cost_function(current_theta, data)
if current_error < error:
error = current_error
theta = current_theta
return theta, error
def cost_function(theta, data):
pred = np.sign(data[:, :-1].dot(theta.reshape(theta.size, 1)))
equal = pred.flatten() - data[:, -1]
si = np.where(equal != 0)[0] # 找到误分类的数据
return si.size / data.shape[0]
def main():
data = np.genfromtxt('../data/pocket_train.dat')
theta, error = pocket_pla(data)
print(error)
print(theta)
test = np.genfromtxt('../data/pocket_test.dat')
test = np.c_[np.ones(test.shape[0]), test]
print(cost_function(theta, test))
if __name__ == '__main__':
main()
| [
"jackanszu@gmail.com"
] | jackanszu@gmail.com |
c03964aecb43060dcbd70918d434bf0f25131a64 | 8a8e082ab6ade44efc0875225414c9127058fa62 | /downstream_tasks/promoter_prediction/dataset.py | 3374e5b8771f42362570e4f08ad187c1004d1500 | [
"MIT"
] | permissive | AIRI-Institute/GENA_LM | 5933e1dbc4b5ddd8a2ab940ef227566b532e672f | 8fd7d99fb653027db9f52b8ac689f0860863dd01 | refs/heads/main | 2023-08-21T14:30:21.282797 | 2023-08-10T10:21:48 | 2023-08-10T10:25:44 | 505,468,738 | 96 | 11 | null | null | null | null | UTF-8 | Python | false | false | 2,557 | py | from pathlib import Path
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
class EPDnewPromoterDataset(Dataset):
def __init__(self, datafiles, tokenizer, x_field='x', label_field='label', max_seq_len=512, pad_to_max=True,
truncate='right'):
if isinstance(datafiles, str):
# convert str path to folder to Path
datafiles = Path(datafiles)
if isinstance(datafiles, Path) and datafiles.is_dir():
# get all files from folder
datafiles = list(datafiles.iterdir())
self.data = pd.DataFrame()
for f in datafiles:
self.data = pd.concat([self.data, pd.read_csv(f)])
self.data = self.data.reset_index()
self.x_field = x_field
self.label_field = label_field
self.tokenizer = tokenizer
self.max_seq_len = max_seq_len
self.pad_to_max = pad_to_max
self.truncate = truncate
@staticmethod
def get_features(x, tokenizer, max_seq_len=512, pad_to_max=True, truncate='right'):
tokens = tokenizer.tokenize(x)
if truncate == 'right':
tokens = tokens[:max_seq_len-2]
elif truncate == 'left':
tokens = tokens[-(max_seq_len-2):]
elif truncate == 'mid':
mid = len(tokens) // 2
left_ctx = (max_seq_len-2) // 2
right_ctx = (max_seq_len-2) - left_ctx
tokens = tokens[max(0, mid - left_ctx): min(mid + right_ctx, len(tokens))]
tokens = [tokenizer.cls_token] + tokens + [tokenizer.sep_token]
input_ids = tokenizer.convert_tokens_to_ids(tokens)
seq_len = len(tokens)
token_type_ids = [0] * seq_len
attention_mask = [1] * seq_len
if pad_to_max:
input_ids += [tokenizer.pad_token_id] * max(max_seq_len - seq_len, 0)
token_type_ids += [0] * max(max_seq_len - seq_len, 0)
attention_mask += [0] * max(max_seq_len - seq_len, 0)
return {'input_ids': np.array(input_ids),
'token_type_ids': np.array(token_type_ids),
'attention_mask': np.array(attention_mask)}
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
x = self.data[self.x_field][idx]
features = EPDnewPromoterDataset.get_features(x, self.tokenizer, self.max_seq_len, self.pad_to_max,
self.truncate)
label = {'labels': self.data[self.label_field][idx]}
return {**features, **label}
| [
"9271630+yurakuratov@users.noreply.github.com"
] | 9271630+yurakuratov@users.noreply.github.com |
7d375196ec6a89c43b9391ff60129464324ce322 | f4fdb0c1213bbb403b87c2dbbde390918ac08861 | /convert_uk_decl_num3.py | accb16c1dd9181350a97f4be6023784d4fd9b64a | [] | no_license | benwing2/RuNounChanges | 0d5076e576237f10b50049ed52b91f96c95cca95 | 048dfed5abe09b8d5629c5772292027ce0a170f2 | refs/heads/master | 2023-09-03T22:48:06.972127 | 2023-09-03T06:27:56 | 2023-09-03T06:27:56 | 41,480,942 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,246 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pywikibot, re, sys, argparse
import blib
from blib import getparam, rmparam, tname, pname, msg, errandmsg, site
def process_text_on_page(index, pagetitle, text):
global args
def pagemsg(txt):
msg("Page %s %s: %s" % (index, pagetitle, txt))
notes = []
parsed = blib.parse_text(text)
for t in parsed.filter_templates():
tn = tname(t)
origt = str(t)
def getp(param):
return getparam(t, param)
if tn == "uk-decl-num3":
def clean_part(part):
return blib.remove_links(part).replace(" ", "").strip()
acc = clean_part(getp("4"))
if "," in acc:
nom = clean_part(getp("1"))
gen = clean_part(getp("2"))
dat = clean_part(getp("3"))
ins = clean_part(getp("5"))
loc = clean_part(getp("6"))
acc_parts = acc.split(",")
if len(acc_parts) == 2:
acc_in, acc_an = acc_parts
for param in t.params:
pn = pname(param)
pv = str(param.value)
if not re.search("^[1-6]$", pn):
pagemsg("WARNING: Unrecognized param: %s=%s" % (pn, pv))
return
del t.params[:]
blib.set_template_name(t, "uk-adecl-manual")
t.add("special", "plonly\n", preserve_spacing=False)
t.add("nom_p", nom + "\n", preserve_spacing=False)
t.add("gen_p", gen + "\n", preserve_spacing=False)
t.add("dat_p", dat + "\n", preserve_spacing=False)
t.add("acc_p_in", acc_in + "\n", preserve_spacing=False)
t.add("acc_p_an", "%s,%s\n" % (acc_in, acc_an), preserve_spacing=False)
t.add("ins_p", ins + "\n", preserve_spacing=False)
t.add("loc_p", loc + "\n", preserve_spacing=False)
notes.append("replace {{uk-decl-num3}} with {{uk-adecl-manual}}")
pagemsg("Replaced %s with %s" % (origt, str(t)))
return str(parsed), notes
parser = blib.create_argparser("Convert {{uk-decl-num3}} to {{uk-adecl-manual}}", include_pagefile=True, include_stdin=True)
args = parser.parse_args()
start, end = blib.parse_start_end(args.start, args.end)
blib.do_pagefile_cats_refs(args, start, end, process_text_on_page, edit=True, stdin=True,
default_refs=["Template:uk-decl-num3"])
| [
"ben@benwing.com"
] | ben@benwing.com |
c82fb2af0351326e36a0a0e24533b4d2079e358d | 1c86bcfeb3f681b057cd8e3b8c2df196de5020e3 | /lib/python2.7/site-packages/tensorflow/core/framework/log_memory_pb2.py | 7e04e8f43e623e82fdaa2768753f115456a24fd2 | [] | no_license | AkiyoshiOkano/zuckerberg-detect-ai | 28d797f64113b0cae8ab90e3be6120d3a3daf2d8 | d47a88c057f65fbd49df76b1d3faab0a68e7334d | refs/heads/First_commit | 2022-12-01T21:28:21.628266 | 2017-05-18T00:35:26 | 2017-05-18T00:35:26 | 91,037,177 | 7 | 5 | null | 2022-11-28T10:39:16 | 2017-05-12T01:12:18 | Python | UTF-8 | Python | false | true | 15,750 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/framework/log_memory.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.framework import tensor_description_pb2 as tensorflow_dot_core_dot_framework_dot_tensor__description__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/framework/log_memory.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n*tensorflow/core/framework/log_memory.proto\x12\ntensorflow\x1a\x32tensorflow/core/framework/tensor_description.proto\"0\n\rMemoryLogStep\x12\x0f\n\x07step_id\x18\x01 \x01(\x03\x12\x0e\n\x06handle\x18\x02 \x01(\t\"p\n\x19MemoryLogTensorAllocation\x12\x0f\n\x07step_id\x18\x01 \x01(\x03\x12\x13\n\x0bkernel_name\x18\x02 \x01(\t\x12-\n\x06tensor\x18\x03 \x01(\x0b\x32\x1d.tensorflow.TensorDescription\"L\n\x1bMemoryLogTensorDeallocation\x12\x15\n\rallocation_id\x18\x01 \x01(\x03\x12\x16\n\x0e\x61llocator_name\x18\x02 \x01(\t\"{\n\x15MemoryLogTensorOutput\x12\x0f\n\x07step_id\x18\x01 \x01(\x03\x12\x13\n\x0bkernel_name\x18\x02 \x01(\t\x12\r\n\x05index\x18\x03 \x01(\x05\x12-\n\x06tensor\x18\x04 \x01(\x0b\x32\x1d.tensorflow.TensorDescription\"\x8b\x01\n\x16MemoryLogRawAllocation\x12\x0f\n\x07step_id\x18\x01 \x01(\x03\x12\x11\n\toperation\x18\x02 \x01(\t\x12\x11\n\tnum_bytes\x18\x03 \x01(\x03\x12\x0b\n\x03ptr\x18\x04 \x01(\x04\x12\x15\n\rallocation_id\x18\x05 \x01(\x03\x12\x16\n\x0e\x61llocator_name\x18\x06 \x01(\t\"\x7f\n\x18MemoryLogRawDeallocation\x12\x0f\n\x07step_id\x18\x01 \x01(\x03\x12\x11\n\toperation\x18\x02 \x01(\t\x12\x15\n\rallocation_id\x18\x03 \x01(\x03\x12\x16\n\x0e\x61llocator_name\x18\x04 \x01(\t\x12\x10\n\x08\x64\x65\x66\x65rred\x18\x05 \x01(\x08\x42-\n\x18org.tensorflow.frameworkB\x0fLogMemoryProtosP\x01\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_framework_dot_tensor__description__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_MEMORYLOGSTEP = _descriptor.Descriptor(
name='MemoryLogStep',
full_name='tensorflow.MemoryLogStep',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='step_id', full_name='tensorflow.MemoryLogStep.step_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='handle', full_name='tensorflow.MemoryLogStep.handle', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=110,
serialized_end=158,
)
_MEMORYLOGTENSORALLOCATION = _descriptor.Descriptor(
name='MemoryLogTensorAllocation',
full_name='tensorflow.MemoryLogTensorAllocation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='step_id', full_name='tensorflow.MemoryLogTensorAllocation.step_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kernel_name', full_name='tensorflow.MemoryLogTensorAllocation.kernel_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tensor', full_name='tensorflow.MemoryLogTensorAllocation.tensor', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=160,
serialized_end=272,
)
_MEMORYLOGTENSORDEALLOCATION = _descriptor.Descriptor(
name='MemoryLogTensorDeallocation',
full_name='tensorflow.MemoryLogTensorDeallocation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='allocation_id', full_name='tensorflow.MemoryLogTensorDeallocation.allocation_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allocator_name', full_name='tensorflow.MemoryLogTensorDeallocation.allocator_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=274,
serialized_end=350,
)
_MEMORYLOGTENSOROUTPUT = _descriptor.Descriptor(
name='MemoryLogTensorOutput',
full_name='tensorflow.MemoryLogTensorOutput',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='step_id', full_name='tensorflow.MemoryLogTensorOutput.step_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kernel_name', full_name='tensorflow.MemoryLogTensorOutput.kernel_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='index', full_name='tensorflow.MemoryLogTensorOutput.index', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tensor', full_name='tensorflow.MemoryLogTensorOutput.tensor', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=352,
serialized_end=475,
)
_MEMORYLOGRAWALLOCATION = _descriptor.Descriptor(
name='MemoryLogRawAllocation',
full_name='tensorflow.MemoryLogRawAllocation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='step_id', full_name='tensorflow.MemoryLogRawAllocation.step_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='operation', full_name='tensorflow.MemoryLogRawAllocation.operation', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_bytes', full_name='tensorflow.MemoryLogRawAllocation.num_bytes', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ptr', full_name='tensorflow.MemoryLogRawAllocation.ptr', index=3,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allocation_id', full_name='tensorflow.MemoryLogRawAllocation.allocation_id', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allocator_name', full_name='tensorflow.MemoryLogRawAllocation.allocator_name', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=478,
serialized_end=617,
)
_MEMORYLOGRAWDEALLOCATION = _descriptor.Descriptor(
name='MemoryLogRawDeallocation',
full_name='tensorflow.MemoryLogRawDeallocation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='step_id', full_name='tensorflow.MemoryLogRawDeallocation.step_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='operation', full_name='tensorflow.MemoryLogRawDeallocation.operation', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allocation_id', full_name='tensorflow.MemoryLogRawDeallocation.allocation_id', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allocator_name', full_name='tensorflow.MemoryLogRawDeallocation.allocator_name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='deferred', full_name='tensorflow.MemoryLogRawDeallocation.deferred', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=619,
serialized_end=746,
)
_MEMORYLOGTENSORALLOCATION.fields_by_name['tensor'].message_type = tensorflow_dot_core_dot_framework_dot_tensor__description__pb2._TENSORDESCRIPTION
_MEMORYLOGTENSOROUTPUT.fields_by_name['tensor'].message_type = tensorflow_dot_core_dot_framework_dot_tensor__description__pb2._TENSORDESCRIPTION
DESCRIPTOR.message_types_by_name['MemoryLogStep'] = _MEMORYLOGSTEP
DESCRIPTOR.message_types_by_name['MemoryLogTensorAllocation'] = _MEMORYLOGTENSORALLOCATION
DESCRIPTOR.message_types_by_name['MemoryLogTensorDeallocation'] = _MEMORYLOGTENSORDEALLOCATION
DESCRIPTOR.message_types_by_name['MemoryLogTensorOutput'] = _MEMORYLOGTENSOROUTPUT
DESCRIPTOR.message_types_by_name['MemoryLogRawAllocation'] = _MEMORYLOGRAWALLOCATION
DESCRIPTOR.message_types_by_name['MemoryLogRawDeallocation'] = _MEMORYLOGRAWDEALLOCATION
MemoryLogStep = _reflection.GeneratedProtocolMessageType('MemoryLogStep', (_message.Message,), dict(
DESCRIPTOR = _MEMORYLOGSTEP,
__module__ = 'tensorflow.core.framework.log_memory_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.MemoryLogStep)
))
_sym_db.RegisterMessage(MemoryLogStep)
MemoryLogTensorAllocation = _reflection.GeneratedProtocolMessageType('MemoryLogTensorAllocation', (_message.Message,), dict(
DESCRIPTOR = _MEMORYLOGTENSORALLOCATION,
__module__ = 'tensorflow.core.framework.log_memory_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.MemoryLogTensorAllocation)
))
_sym_db.RegisterMessage(MemoryLogTensorAllocation)
MemoryLogTensorDeallocation = _reflection.GeneratedProtocolMessageType('MemoryLogTensorDeallocation', (_message.Message,), dict(
DESCRIPTOR = _MEMORYLOGTENSORDEALLOCATION,
__module__ = 'tensorflow.core.framework.log_memory_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.MemoryLogTensorDeallocation)
))
_sym_db.RegisterMessage(MemoryLogTensorDeallocation)
MemoryLogTensorOutput = _reflection.GeneratedProtocolMessageType('MemoryLogTensorOutput', (_message.Message,), dict(
DESCRIPTOR = _MEMORYLOGTENSOROUTPUT,
__module__ = 'tensorflow.core.framework.log_memory_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.MemoryLogTensorOutput)
))
_sym_db.RegisterMessage(MemoryLogTensorOutput)
MemoryLogRawAllocation = _reflection.GeneratedProtocolMessageType('MemoryLogRawAllocation', (_message.Message,), dict(
DESCRIPTOR = _MEMORYLOGRAWALLOCATION,
__module__ = 'tensorflow.core.framework.log_memory_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.MemoryLogRawAllocation)
))
_sym_db.RegisterMessage(MemoryLogRawAllocation)
MemoryLogRawDeallocation = _reflection.GeneratedProtocolMessageType('MemoryLogRawDeallocation', (_message.Message,), dict(
DESCRIPTOR = _MEMORYLOGRAWDEALLOCATION,
__module__ = 'tensorflow.core.framework.log_memory_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.MemoryLogRawDeallocation)
))
_sym_db.RegisterMessage(MemoryLogRawDeallocation)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\017LogMemoryProtosP\001'))
# @@protoc_insertion_point(module_scope)
| [
"akiyoshiokano0415@gmail.com"
] | akiyoshiokano0415@gmail.com |
72b2edede7efbfa084df8c557aaab85598104bc3 | 424213d62d7e88a907bfc30b52cef61f6d4b95b7 | /018 - Maximum path sum I/018.py | ecf059895e9827aa6923af06d7ec177011db3c00 | [] | no_license | David-Jackson/project-euler-solutions | ae1b3e88987c4e428be046cb6d9562995e0e0298 | 5ba9d9914f6d170110c4f9b533357c167b9f986d | refs/heads/master | 2021-01-19T10:43:04.444228 | 2017-03-15T18:29:51 | 2017-03-15T18:29:51 | 82,206,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py |
with open("triangle.txt") as f:
tri = [map(int, line.split()) for line in f]
for row_index in range(len(tri)-2, -1, -1):
row = tri[row_index]
for index in range(len(row)):
node = row[index]
tri[row_index][index] = (
node + max(
tri[row_index + 1][index],
tri[row_index + 1][index + 1]
)
)
print "Answer:", tri[0][0]
| [
"jack8679@kettering.edu"
] | jack8679@kettering.edu |
a07ef36a3fa19fd0fe0ac1f21c201d243dd28783 | 1695fc01287375f7edba4956729df8adab3963a4 | /tests/integration_tests/functional/test_rtc.py | 180e1cc82588f4d16b8d5f2af4d39c1347385944 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | WalterSmuts/firecracker | eb8dbd4975d891544fe22707585b4e0e4ba9de43 | d99423ad7c18a71ee5dbccf3d9beca5a42503efe | refs/heads/main | 2023-07-02T08:36:13.152473 | 2021-08-12T09:54:14 | 2021-08-13T08:36:21 | 395,273,105 | 0 | 0 | Apache-2.0 | 2021-08-12T09:59:25 | 2021-08-12T09:59:24 | null | UTF-8 | Python | false | false | 1,303 | py | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Check the well functioning af the RTC device on aarch64 platforms."""
import re
import platform
import pytest
import framework.utils as utils
from host_tools.network import SSHConnection
DMESG_LOG_REGEX = r'rtc-pl031\s+(\d+).rtc: setting system clock to'
@pytest.mark.skipif(
platform.machine() != "aarch64",
reason="RTC exists only on aarch64."
)
def test_rtc(test_microvm_with_ssh, network_config):
"""
Test RTC functionality on aarch64.
@type: functional
"""
vm = test_microvm_with_ssh
vm.spawn()
vm.memory_monitor = None
vm.basic_config()
_tap, _, _ = vm.ssh_network_config(network_config, '1')
vm.start()
conn = SSHConnection(vm.ssh_config)
# check that the kernel creates an rtcpl031 base device.
_, stdout, _ = conn.execute_command("dmesg")
rtc_log = re.findall(DMESG_LOG_REGEX, stdout.read())
assert rtc_log is not None
_, stdout, _ = conn.execute_command("stat /dev/rtc0")
assert "character special file" in stdout.read()
_, host_stdout, _ = utils.run_cmd("date +%s")
_, guest_stdout, _ = conn.execute_command("date +%s")
assert abs(int(guest_stdout.read()) - int(host_stdout)) < 5
| [
"plp.github@gmail.com"
] | plp.github@gmail.com |
2355dbd99fc92e0a506ba9ab70095fe05701cfae | 897980906263aa72e8bd8e77fe7ea6d346ba36df | /bot/commands/infractions.py | 880d5a1f6456bb6a2536cff123c7cd8f903a3398 | [
"MIT"
] | permissive | rembutquaglet/forum-sweats | 0c9441b5f013e23ea0f4882bdefa00ef7703040a | 54c348de7cd57d4172ecd1fd241137c7a276c944 | refs/heads/master | 2023-01-02T06:33:16.203550 | 2020-10-24T04:15:24 | 2020-10-24T04:15:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,446 | py | from utils import confirmed_emoji
from ..discordbot import has_role
from ..betterbot import Member
import discord
import db
name = 'infractions'
bot_channel = False
async def run(message, member: Member = None):
'Checks the infractions that a user has (mutes, warns, bans, etc)'
if not member:
member = message.author
is_checking_self = message.author.id == member.id
if (
not is_checking_self
and not has_role(message.author.id, 717904501692170260, 'helper')
and not has_role(message.author.id, 717904501692170260, 'trialhelper')
):
return
infractions = await db.get_infractions(member.id)
embed_title = 'Your infractions' if is_checking_self else f'{member}\'s infractions'
embed = discord.Embed(
title=embed_title
)
for infraction in infractions[-30:]:
value = infraction.get('reason') or '<no reason>'
name = infraction['type']
infraction_partial_id = infraction['_id'][:8]
if 'date' in infraction:
date_pretty = infraction['date'].strftime('%m/%d/%Y')
name += f' ({date_pretty} {infraction_partial_id})'
else:
name += f' ({infraction_partial_id})'
if len(value) > 1000:
value = value[:1000] + '...'
embed.add_field(
name=name,
value=value,
inline=False
)
if len(infractions) == 0:
embed.description = 'No infractions'
if is_checking_self:
await message.author.send(embed=embed)
await message.add_reaction(confirmed_emoji)
else:
await message.send(embed=embed)
| [
"27899617+mat-1@users.noreply.github.com"
] | 27899617+mat-1@users.noreply.github.com |
e06190c723711959a7d80df49b1943a925d32b9b | 655a30a543c9fccab2da3eb3bbc1b1755ac1630a | /app.py | 4570b444fa6e366156ef4f0edc32bd4ee05248af | [] | no_license | soyam5/NBAproject | 8ba4bd7e2253ec37d03ec2d28e995e91a3c36701 | 462e1b01901baff3cb30e61c2711c46a07f015ae | refs/heads/master | 2020-09-03T18:39:46.267296 | 2019-12-04T08:44:27 | 2019-12-04T08:44:27 | 219,535,463 | 0 | 0 | null | 2019-11-05T16:01:32 | 2019-11-04T15:36:30 | Python | UTF-8 | Python | false | false | 5,480 | py | from flask import Flask,render_template,request,url_for,json
from databasetool import *
import databasetool
from flask import jsonify
from draw_tool import *
app = Flask(__name__)
draw_tool=draw_event()
rank_data = databasetool.rank_data()
team_data=databasetool.team_data()
play1=""
play2=""
@app.route('/')
def index():
return render_template("index.html")
@app.route('/api_upload',methods=['POST'])
def api_upload():
print("111112321312")
data = request.get_data().decode()
result=databasetool.search(data)
print(result,"1111")
return jsonify(result)
@app.route('/page1/')
def page1():
return render_template("page1.html",data=rank_data)
@app.route('/page2/')
def page2():
return render_template("page2.html",data=rank_data)
@app.route('/page3/')
def page3():
return render_template("page3.html",data=rank_data)
@app.route('/page4/')
def page4():
return render_template("page4.html",data=rank_data)
@app.route('/page5/')
def page5():
return render_template("page5.html",data=rank_data)
@app.route('/page6/')
def page6():
return render_template("page6.html",data=rank_data)
@app.route('/player/')
def player():
player_basis_info=databasetool.player_basis_info(request.values.get('name'))
player_current_game=databasetool.player_current_game(request.values.get('name'))
len1=len(player_current_game)
avg_data_normal_get=databasetool.avg_data_normal_get(request.values.get('name'))
history_normal_data=databasetool.history_normal_data(request.values.get('name'))
len3=len(history_normal_data)
avg_data_normal_get=databasetool.avg_data_normal_get(request.values.get('name'))
history_jihousai_data=databasetool.history_jihousai_data(request.values.get('name'))
len5=len(history_jihousai_data)
imgList1=list()
for i in range(17):
img = draw_tool.draw_plot_interface(request.values.get('name'), i, 3)
if img:
imgList1.append(img)
imgList2=list()
for i in range(18):
img = draw_tool.draw_plot_interface(request.values.get('name'), i, 1)
if img:
imgList2.append(img)
imgList3 = list()
for i in range(18):
img = draw_tool.draw_plot_interface(request.values.get('name'), i, 2)
if img:
imgList3.append(img)
return render_template("player.html",
data=player_basis_info,
data1=player_current_game,
len1=len1,
data2=avg_data_normal_get,
data3=history_normal_data,
len3=len3,
data4=avg_data_normal_get,
data5=history_jihousai_data,
len5=len5,
list1=imgList1,
list2=imgList2,
list3=imgList3
)
@app.route('/team/')
def team():
content=len(team_data)
return render_template("teams.htm",data=team_data,lenth=content)
@app.route('/predict/')
def predict():
content=databasetool.predict_game()
lenth=len(content)
return render_template("predict.html",data=content,lenth=lenth)
@app.route('/compare/',methods=['post', 'get'])
def compare():
data = ""
qd_name = getname()
if request.method == 'POST':
rev = request.get_data().decode()
# 对收到的数据进行判断,调用相应函数
# 通过球队获取球队成员
if rev in qd_name:
print(0000)
data = qd_names_get(rev)
elif "one" in rev:
print(111)
data = get_year(rev)
elif "second" in rev:
print(222)
data = player_basis_infos(rev)
elif "third" in rev:
print(333)
data+=get_img1(rev)
data+=" "+get_img2(rev)
data+=" "+get_img3(rev)
print(data)
return data
else:
return render_template("player_compare.html")
# 获得年份
def get_year(info):
year = ""
L = info.split(" ")
y = year_get(L[1], L[2], L[3])
for i in y:
year += ("".join(i) + " ")
return year
def get_img1(msg):
s=msg.split(" ")
result = draw_tool.draw_subplot_year_interface(int(s[1]), s[2], s[4])
print(type(result))
return result
def get_img2(msg):
s1=msg.split(" ")
print(s1)
result = draw_tool.draw_subplot_many_interface(s1[2], s1[3], int(s1[1]), str(s1[4]))
print(result)
return result
# 获得球队的名字
def getname():
qd = (search_team_name())
qd_name = ""
for i in qd:
qd_name += ("".join(i) + " ")
return qd_name
def get_img3(msg):
s=msg.split(" ")
result = draw_tool.draw_subplot_year_interface(int(s[1]), s[3], s[4])
return result
def qd_names_get(name):
qd_names = ""
qy = (search_name_from_team(name))
for i in qy:
qd_names += ("".join(i) + " ")
return qd_names
# 获得球员信息
def player_basis_infos(name):
Name=name.split(" ")
print(Name[1])
msg = ""
info = player_basis_info(Name[1])
for i in info:
for j in i:
msg += ("".join(j) + " ")
return msg
if __name__ == '__main__':
app.run()
| [
"noreply@github.com"
] | soyam5.noreply@github.com |
c427f26b73f072b8ce4f83e435df6bceb8cbe557 | 4bd1c32535de5ebd7cf3646e9bbb9093361ae991 | /functions.py | 00c68e4b3c248782a07fac6f4eb227cb46b8ea28 | [] | no_license | jahoo459/self_driving_car_nd_project_2 | 01a639ac4dc937ca21701958d4cf3f73e530b51e | 4108933b1ea1fc0b35f03fbf516390773e1f0ecd | refs/heads/master | 2020-06-02T12:45:03.041019 | 2019-06-17T19:26:50 | 2019-06-17T19:26:50 | 191,157,287 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,676 | py | import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
from AdvancedLineDetection import Line
# Global variables
# Real world dist calculation
ym_per_pix = 30 / 720
xm_per_pix = 3.7 / 700
def bgr2rgb(rgb_img):
return cv2.cvtColor(rgb_img, cv2.COLOR_BGR2RGB)
def calibrateCamera():
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6 * 9, 3), np.float32)
objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob('camera_cal/calibration*.jpg')
# Step through the list and search for chessboard corners
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# undistort the camera
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
return ret, mtx, dist, rvecs, tvecs
def apply_threshold_operations(img):
# input to sobel shall be grayscale undistored image
# First convert to hls and go for yellow and white color
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Apply threshold on gradient (xsobel)
sobelx = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))
thresh_min = 20
thresh_max = 100
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
# transform from RGB to HLS
hls_image = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
S = hls_image[:, :, 2]
# apply threshold on S channel
s_thres = (90, 255)
s_binary = np.zeros_like(S)
s_binary[(S > s_thres[0]) & (S <= s_thres[1])] = 1
# Combine the two binary thresholds
combined_binary = np.zeros_like(sxbinary)
combined_binary[(s_binary == 1) | (sxbinary == 1)] = 1
### VISUALIZATION ###
# Plotting thresholded images
# f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
# ax1.set_title('Stacked thresholds')
# ax1.imshow(color_binary)
#
# ax2.set_title('Combined S channel and gradient thresholds')
# ax2.imshow(combined_binary, cmap='gray')
### VISUALIZATION ###
return combined_binary
def apply_perspective_transform(undistored_img, combined_binary):
# use gray undistored image for visualization
gray_undistored_img = cv2.cvtColor(undistored_img, cv2.COLOR_BGR2GRAY)
# Select the soruce points on original image
pts = np.float32([[231, 690], [1075, 690], [713, 465], [570, 465]])
# define offset
offset = 200
img_size = combined_binary.shape[::-1]
# Choose destination points
dst = np.float32([[offset, img_size[1]],
[img_size[0] - offset, img_size[1]],
[img_size[0] - offset, 0],
[offset, 0]])
# Given src and dst points, calculate the perspective transform matrix
M = cv2.getPerspectiveTransform(pts, dst)
Minv = cv2.getPerspectiveTransform(dst, pts)
# Warp the image using OpenCV warpPerspective()
warped = cv2.warpPerspective(combined_binary, M, combined_binary.shape[::-1])
### VISUALIZATION ###
# copy = np.copy(gray_undistored_img)
#
# cv2.line(copy, tuple(pts[0]), tuple(pts[1]), (255, 0, 0), 2)
# cv2.line(copy, tuple(pts[1]), tuple(pts[2]), (255, 0, 0), 2)
# cv2.line(copy, tuple(pts[2]), tuple(pts[3]), (255, 0, 0), 2)
# cv2.line(copy, tuple(pts[3]), tuple(pts[0]), (255, 0, 0), 2)
#
# f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
# ax1.set_title('selected points in original image')
# ax1.imshow(copy, cmap='gray')
#
# warped_visu = cv2.warpPerspective(copy, M, copy.shape[::-1])
# ax2.set_title('image after perspective transform')
# ax2.imshow(warped_visu, cmap='gray')
### VISUALIZATION ###
return warped, M, Minv
def fit_polynomial_init(binary_warped, lines: Line):
# Find our lane pixels first using sliding windows approach
leftx, lefty, rightx, righty, out_img = find_lane_pixels_sliding_windows(binary_warped)
# Fit a second order polynomial for both lines
left_fit, right_fit, left_fitx, right_fitx, ploty, left_fit_cr, right_fit_cr, offset_m = fit_poly(binary_warped.shape, leftx, lefty, rightx, righty)
# Update the lines
update_lines(lines, left_fit, right_fit, leftx, rightx,
lefty, righty, left_fitx, right_fitx, left_fit_cr, right_fit_cr, ploty, offset_m)
# Colors in the left and right lane regions
out_img[lefty, leftx] = [255, 0, 0]
out_img[righty, rightx] = [0, 0, 255]
### Visualization ###
# Plots the left and right polynomials on the lane lines
plt.figure()
plt.imshow(out_img)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
### Visualization ###
def fit_poly(img_shape, leftx, lefty, rightx, righty):
left_fit_cr = np.polyfit(lefty * ym_per_pix, leftx * xm_per_pix, 2)
right_fit_cr = np.polyfit(righty *ym_per_pix, rightx *xm_per_pix, 2)
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, img_shape[0] - 1, img_shape[0])
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
# Calculate distance of the car from the center of the line
# Assumption: center of the car is pixel 1280/2 = 640
center = 640
# The value is calculated for y = 719 (closest to the bottom of the image)
offset_left = (center - left_fitx[-1])
offset_right = (right_fitx[-1] - center)
offset_m = (offset_left - offset_right) * xm_per_pix
return left_fit, right_fit, left_fitx, right_fitx, ploty, left_fit_cr, right_fit_cr, offset_m
def find_lane_pixels_sliding_windows(warped):
# calculate histogram
histogram = np.sum(warped[warped.shape[0] // 2:, :], axis=0)
### Visualize ###
# Visualize histogram
# plt.figure()
# plt.plot(histogram)
### Visualize ###
# Prepare the output (3-channel) image
out_img = np.dstack((warped, warped, warped)) * 255
# Search for lines to the left and right from the midpoint
midpoint = np.int(histogram.shape[0] // 2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# HYPERPARAMETERS
# number of sliding windows
nwindows = 9
# width of the windows +/- margin
margin = 80
# minimum number of pixels found to recenter window
minpix = 150
# height of windows - based on nwindows above and image shape
window_height = np.int(warped.shape[0] // nwindows)
# x and y positions of all nonzero (i.e. activated) pixels in the image
nonzero = warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = warped.shape[0] - (window + 1) * window_height
win_y_high = warped.shape[0] - window * window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
### VISUALIZATION ###
# Draw the windows on the visualization image
cv2.rectangle(out_img, (win_xleft_low, win_y_low),
(win_xleft_high, win_y_high), (0, 255, 0), 2)
cv2.rectangle(out_img, (win_xright_low, win_y_low),
(win_xright_high, win_y_high), (0, 255, 0), 2)
### VISUALIZATION ###
# Identify the nonzero pixels in x and y within the window #
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx, lefty, rightx, righty, out_img
def update_lines(lines: Line, left_fit, right_fit, leftx, rightx,
lefty, righty, left_fitx, right_fitx, left_fit_cr, right_fit_cr, ploty, offset_m):
if (left_fit != []): # Line was detected
lines[0].detected = True
lines[0].detetction_counter = 0
lines[0].previous_fit = lines[0].current_fit
lines[0].recent_xfitted = left_fitx
lines[0].current_fit = left_fit
lines[0].allx = leftx
lines[0].ally = lefty
lines[0].current_fit_cr = left_fit_cr
lines[0].ploty = ploty
lines[0].dist_from_center_m = offset_m
else: #Line wasn't detected
lines[0].detected = False
if (right_fit != []):
lines[1].detected = True
lines[1].detetction_counter = 0
lines[1].previous_fit = lines[0].current_fit
lines[1].recent_xfitted = right_fitx
lines[1].current_fit = right_fit
lines[0].allx = rightx
lines[0].ally = righty
lines[1].current_fit_cr = right_fit_cr
lines[1].ploty = ploty
lines[1].dist_from_center_m = offset_m
else:
lines[1].detected = False
def search_around_poly(binary_warped, lines: Line):
# Choose the width of the margin around the previous polynomial to search
margin = 100
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
left_fit = lines[0].current_fit
right_fit = lines[1].current_fit
left_lane_inds = ((nonzerox > (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy +
left_fit[2] - margin)) & (nonzerox < (left_fit[0] * (nonzeroy ** 2) +
left_fit[1] * nonzeroy + left_fit[
2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy +
right_fit[2] - margin)) & (nonzerox < (right_fit[0] * (nonzeroy ** 2) +
right_fit[1] * nonzeroy + right_fit[
2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit new polynomials
left_fit, right_fit, left_fitx, right_fitx, ploty, left_fit_cr, right_fit_cr, offset_m = fit_poly(binary_warped.shape, leftx, lefty, rightx, righty)
# Update the lines status
update_lines(lines, left_fit, right_fit, leftx, rightx,
lefty, righty, left_fitx, right_fitx, left_fit_cr, right_fit_cr, ploty, offset_m)
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx - margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx + margin,
ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx - margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx + margin,
ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
### Visualization ###
# Plot the polynomial lines onto the image
# plt.plot(left_fitx, ploty, color='yellow')
# plt.plot(right_fitx, ploty, color='yellow')
### Visualization ###
def measure_curvature_real(lines):
'''
Calculates the curvature of polynomial functions in meters.
'''
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30 / 720 # meters per pixel in y dimension
xm_per_pix = 3.7 / 700 # meters per pixel in x dimension
ploty = lines[0].ploty
left_fit_cr = lines[0].current_fit_cr
right_fit_cr = lines[1].current_fit_cr
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
# Calculation of R_curve (radius of curvature)
left_curverad = ((1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(
2 * left_fit_cr[0])
right_curverad = ((1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5) / np.absolute(
2 * right_fit_cr[0])
lines[0].radius_of_curvature_cr = left_curverad
lines[1].radius_of_curvature_cr = right_curverad
def reproject_lines(lines, warped, Minv, image):
# Create an image to draw the lines on
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
left_fitx = lines[0].recent_xfitted
right_fitx = lines[1].recent_xfitted
ploty = lines[0].ploty
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (image.shape[1], image.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(image, 1, newwarp, 0.3, 0)
return result
def write_measured_info(img, lines):
curvature_left = lines[0].radius_of_curvature_cr
curvature_right = lines[1].radius_of_curvature_cr
dist_from_center = lines[0].dist_from_center_m
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerY = 100
bottomLeftCornerX = 10
delta = 50
fontScale = 1
fontColor = (255,255,255)
lineType = 2
text = []
if(curvature_left > 5000):
text.append("Radius Left: inf")
else:
text.append(str("Radius Left: " + str(int(curvature_left)) + "m."))
if(curvature_right > 5000):
text.append("Radius right: inf")
else:
text.append(str("Radius right: " + str(int(curvature_right)) + "m."))
text.append(str("Distance from center: " + str("{:.3f}".format(dist_from_center)) + "m."))
# bottom left corner changes
blc = [bottomLeftCornerX, 0]
# Draw text on the image
for i in range(len(text)):
blc[1] = bottomLeftCornerY + i * delta
cv2.putText(img, text[i], tuple(blc), font, fontScale, fontColor, lineType)
return img | [
"jan@haberny.pl"
] | jan@haberny.pl |
fbfa4af6739e251fef1d94b0ce852a6cb2c6cca3 | c1b8ff60ed4d8c70e703f71b7c96a649a75c0cec | /ostPython4/context_mgr.py | 5d67ab14436a6f258a36aef585b8624eba812c9d | [] | no_license | deepbsd/OST_Python | 836d4fae3d98661a60334f66af5ba3255a0cda5c | b32f83aa1b705a5ad384b73c618f04f7d2622753 | refs/heads/master | 2023-02-14T17:17:28.186060 | 2023-01-31T02:09:05 | 2023-01-31T02:09:05 | 49,534,454 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | #!/usr/bin/env python3
#
#
# context_mgr.py
#
# Lesson 14: Context Managers
#
# by David S. Jackson
# 8/17/15
#
# OST Python4: Advanced Python
# for Pat Barton, Instructor
#
"""
Project:
Write a context manager class that suppresses any ValueError
exceptions that occur in the controlled suite, but allows any
other exception to be raised in the surrounding context.
"""
class ctx_mgr:
def __init__(self, raising=True):
self.raising = raising
def __enter__(self):
cm = object()
return cm
def __exit__(self, exc_type, exc_val, exc_tb):
"Self.raising can be overridden, so I reset it excplicitly."
self.raising = True
if exc_type == ValueError:
return self.raising
elif exc_type:
raise
if __name__ == "__main__":
with ctx_mgr(raising=True) as cm:
print('To create ValueError, enter a float or string.')
num = int(input("Enter a number: "))
print('To create an IndexError, enter an int greater than 4.')
myindex = int(input('lst1 = [1,2,3,4,5]. What index is number 4? '))
lst1 = [1,2,3,4,5]
print("The value you selected is: ", lst1[myindex])
print("Divide by zero!", 3/0)
| [
"deepbsd@yahoo.com"
] | deepbsd@yahoo.com |
47090964e324910f247fd920b15518fdb4231728 | f4c0172e70ca5ffbe01695245e82a28291f88d04 | /v0.5.3-all/StudyTensroFlow/keras/tests/keras/engine/test_training.py | 6854ffaec08ce2a5aade75e5566d2eb9ec2b49fb | [
"MIT"
] | permissive | huangxinkid/DeepLearning_Wavelet-LSTM | a84e667d5f2db477ac5a9993d8ae329ec9fd115f | b726f99a8631fc48e6943655ace222b0f6b0290b | refs/heads/master | 2020-03-24T07:11:52.832149 | 2018-05-30T18:43:38 | 2018-05-30T18:43:38 | 142,556,218 | 0 | 1 | null | 2018-07-27T09:21:18 | 2018-07-27T09:21:18 | null | UTF-8 | Python | false | false | 43,976 | py | import pytest
import numpy as np
import pandas as pd
from numpy.testing import assert_allclose
import sys
import scipy.sparse as sparse
import keras
from keras import losses
from keras.layers import Dense, Dropout
from keras.engine.topology import Input
from keras.engine.training import Model
from keras.engine.training import _check_loss_and_target_compatibility
from keras.engine.training import _weighted_masked_objective
from keras.engine.training import _check_array_lengths
from keras.engine.training import _slice_arrays
from keras.models import Sequential
from keras import backend as K
from keras.utils import Sequence
from keras.utils.test_utils import keras_test
from keras.callbacks import LambdaCallback
class RandomSequence(Sequence):
def __init__(self, batch_size, sequence_length=12):
self.batch_size = batch_size
self.sequence_length = sequence_length
def __len__(self):
return self.sequence_length
def __getitem__(self, idx):
return [np.random.random((self.batch_size, 3)), np.random.random((self.batch_size, 3))], [
np.random.random((self.batch_size, 4)),
np.random.random((self.batch_size, 3))]
def on_epoch_end(self):
pass
@keras_test
def test_check_array_lengths():
_check_array_lengths(None, None, None)
a_np = np.random.random((4, 3, 3))
_check_array_lengths(a_np, a_np, a_np)
_check_array_lengths([a_np, a_np], [a_np, a_np], [a_np, a_np])
_check_array_lengths([None], [None], [None])
b_np = np.random.random((3, 4))
with pytest.raises(ValueError):
_check_array_lengths(a_np, None, None)
with pytest.raises(ValueError):
_check_array_lengths(a_np, a_np, None)
with pytest.raises(ValueError):
_check_array_lengths([a_np], [None], None)
with pytest.raises(ValueError):
_check_array_lengths([a_np], [b_np], None)
with pytest.raises(ValueError):
_check_array_lengths([a_np], None, [b_np])
@keras_test
def test_slice_arrays():
input_a = np.random.random((10, 3))
_slice_arrays(None)
_slice_arrays(input_a, 0)
_slice_arrays(input_a, 0, 1)
_slice_arrays(input_a, stop=2)
input_a = [None, [1, 1], None, [1, 1]]
_slice_arrays(input_a, 0)
_slice_arrays(input_a, 0, 1)
_slice_arrays(input_a, stop=2)
input_a = [None]
_slice_arrays(input_a, 0)
_slice_arrays(input_a, 0, 1)
_slice_arrays(input_a, stop=2)
input_a = None
_slice_arrays(input_a, 0)
_slice_arrays(input_a, 0, 1)
_slice_arrays(input_a, stop=2)
@keras_test
def test_weighted_masked_objective():
a = Input(shape=(3,), name='input_a')
# weighted_masked_objective
def mask_dummy(y_true=None, y_pred=None, weight=None):
return K.placeholder(y_true.shape)
weighted_function = _weighted_masked_objective(losses.categorical_crossentropy)
weighted_function(a, a, None)
@keras_test
def test_model_methods():
a = Input(shape=(3,), name='input_a')
b = Input(shape=(3,), name='input_b')
a_2 = Dense(4, name='dense_1')(a)
dp = Dropout(0.5, name='dropout')
b_2 = dp(b)
model = Model([a, b], [a_2, b_2])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
# training/testing doesn't work before compiling.
with pytest.raises(RuntimeError):
model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np])
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
sample_weight_mode=None)
# test train_on_batch
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np})
# test fit
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np], epochs=1, batch_size=4)
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np], epochs=1, batch_size=4)
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np},
epochs=1, batch_size=4)
# test validation_split
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np],
epochs=1, batch_size=4, validation_split=0.5)
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np],
epochs=1, batch_size=4, validation_split=0.5)
# test validation data
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np],
epochs=1, batch_size=4,
validation_data=([input_a_np, input_b_np], [output_a_np, output_b_np]))
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np],
epochs=1, batch_size=4, validation_split=0.5,
validation_data=({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np]))
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np},
epochs=1, batch_size=4, validation_split=0.5,
validation_data=(
{'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np}))
# test_on_batch
out = model.test_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np})
# predict_on_batch
out = model.predict_on_batch([input_a_np, input_b_np])
out = model.predict_on_batch({'input_a': input_a_np, 'input_b': input_b_np})
# predict, evaluate
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4)
out = model.predict([input_a_np, input_b_np], batch_size=4)
# with sample_weight
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
sample_weight = [None, np.random.random((10,))]
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
sample_weight=sample_weight)
out = model.test_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
sample_weight=sample_weight)
# test accuracy metric
model.compile(optimizer, loss, metrics=['acc'],
sample_weight_mode=None)
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
assert len(out) == 5
out = model.test_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
assert len(out) == 5
# this should also work
model.compile(optimizer, loss, metrics={'dense_1': 'acc'},
sample_weight_mode=None)
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
assert len(out) == 4
out = model.test_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
assert len(out) == 4
# and this as well
model.compile(optimizer, loss, metrics={'dense_1': ['acc']},
sample_weight_mode=None)
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
assert len(out) == 4
out = model.test_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
assert len(out) == 4
# test starting from non-zero initial epoch
trained_epochs = []
trained_batches = []
# define tracer callback
def on_epoch_begin(epoch, logs):
trained_epochs.append(epoch)
def on_batch_begin(batch, logs):
trained_batches.append(batch)
tracker_cb = LambdaCallback(on_epoch_begin=on_epoch_begin,
on_batch_begin=on_batch_begin)
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np], epochs=5, batch_size=4,
initial_epoch=2, callbacks=[tracker_cb])
assert trained_epochs == [2, 3, 4]
# test starting from non-zero initial epoch for generator too
trained_epochs = []
def gen_data(batch_sz):
while True:
yield ([np.random.random((batch_sz, 3)), np.random.random((batch_sz, 3))],
[np.random.random((batch_sz, 4)), np.random.random((batch_sz, 3))])
out = model.fit_generator(gen_data(4), steps_per_epoch=3, epochs=5,
initial_epoch=2, callbacks=[tracker_cb])
assert trained_epochs == [2, 3, 4]
# test with a custom metric function
def mse(y_true, y_pred):
return K.mean(K.pow(y_true - y_pred, 2))
model.compile(optimizer, loss, metrics=[mse],
sample_weight_mode=None)
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
out_len = 1 + 2 * (1 + 1) # total loss + 2 outputs * (loss + metric)
assert len(out) == out_len
out = model.test_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
assert len(out) == out_len
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4, epochs=1)
out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4)
out = model.predict([input_a_np, input_b_np], batch_size=4)
# enable verbose for evaluate_generator
out = model.evaluate_generator(gen_data(4), steps=3, verbose=1)
# empty batch
with pytest.raises(ValueError):
def gen_data():
while True:
yield (np.asarray([]), np.asarray([]))
out = model.evaluate_generator(gen_data(), steps=1)
# x is not a list of numpy arrays.
with pytest.raises(ValueError):
out = model.predict([None])
# x does not match _feed_input_names.
with pytest.raises(ValueError):
out = model.predict([input_a_np, None, input_b_np])
with pytest.raises(ValueError):
out = model.predict([None, input_a_np, input_b_np])
# all input/output/weight arrays should have the same number of samples.
with pytest.raises(ValueError):
out = model.train_on_batch([input_a_np, input_b_np[:2]],
[output_a_np, output_b_np],
sample_weight=sample_weight)
with pytest.raises(ValueError):
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np[:2]],
sample_weight=sample_weight)
with pytest.raises(ValueError):
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
sample_weight=[sample_weight[1], sample_weight[1][:2]])
# `sample_weight` is neither a dict nor a list.
with pytest.raises(TypeError):
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
sample_weight=tuple(sample_weight))
# `validation_data` is neither a tuple nor a triple.
with pytest.raises(ValueError):
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np],
epochs=1, batch_size=4,
validation_data=([input_a_np, input_b_np],))
# `loss` does not match outputs.
with pytest.raises(ValueError):
model.compile(optimizer, loss=['mse', 'mae', 'mape'])
# `loss_weights` does not match output_names.
with pytest.raises(ValueError):
model.compile(optimizer, loss='mse', loss_weights={'lstm': 0.5})
# `loss_weights` does not match outputs.
with pytest.raises(ValueError):
model.compile(optimizer, loss='mse', loss_weights=[0.5])
# `loss_weights` is invalid type.
with pytest.raises(TypeError):
model.compile(optimizer, loss='mse', loss_weights=(0.5, 0.5))
# `sample_weight_mode` does not match output_names.
with pytest.raises(ValueError):
model.compile(optimizer, loss='mse', sample_weight_mode={'lstm': 'temporal'})
# `sample_weight_mode` does not match output_names.
with pytest.raises(ValueError):
model.compile(optimizer, loss='mse', sample_weight_mode=['temporal'])
# `sample_weight_mode` matches output_names partially.
with pytest.raises(ValueError):
model.compile(optimizer, loss='mse', sample_weight_mode={'dense_1': 'temporal'})
# `loss` does not exist.
with pytest.raises(ValueError):
model.compile(optimizer, loss=[])
model.compile(optimizer, loss=['mse', 'mae'])
model.compile(optimizer, loss='mse', loss_weights={'dense_1': 0.2, 'dropout': 0.8})
model.compile(optimizer, loss='mse', loss_weights=[0.2, 0.8])
# the rank of weight arrays should be 1.
with pytest.raises(ValueError):
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
sample_weight=[None, np.random.random((10, 20, 30))])
model.compile(optimizer, loss='mse', sample_weight_mode={'dense_1': None, 'dropout': 'temporal'})
model.compile(optimizer, loss='mse', sample_weight_mode=[None, 'temporal'])
# the rank of output arrays should be at least 3D.
with pytest.raises(ValueError):
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
sample_weight=sample_weight)
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
sample_weight_mode=None)
trained_epochs = []
trained_batches = []
out = model.fit_generator(generator=RandomSequence(3), steps_per_epoch=3, epochs=5,
initial_epoch=0, validation_data=RandomSequence(4),
validation_steps=3, callbacks=[tracker_cb])
assert trained_epochs == [0, 1, 2, 3, 4]
assert trained_batches == list(range(3)) * 5
# steps_per_epoch will be equal to len of sequence if it's unspecified
trained_epochs = []
trained_batches = []
out = model.fit_generator(generator=RandomSequence(3), epochs=5,
initial_epoch=0, validation_data=RandomSequence(4),
callbacks=[tracker_cb])
assert trained_epochs == [0, 1, 2, 3, 4]
assert trained_batches == list(range(12)) * 5
# fit_generator will throw an exception if steps is unspecified for regular generator
with pytest.raises(ValueError):
def gen_data():
while True:
yield (np.asarray([]), np.asarray([]))
out = model.fit_generator(generator=gen_data(), epochs=5,
initial_epoch=0, validation_data=gen_data(),
callbacks=[tracker_cb])
# Check if generator is only accessed an expected number of times
gen_counters = [0, 0]
def gen_data(i):
while True:
gen_counters[i] += 1
yield ([np.random.random((1, 3)), np.random.random((1, 3))],
[np.random.random((1, 4)), np.random.random((1, 3))])
out = model.fit_generator(generator=gen_data(0), epochs=3,
steps_per_epoch=2,
validation_data=gen_data(1),
validation_steps=1,
max_queue_size=2,
workers=2)
# Need range check here as filling of the queue depends on sleep in the enqueuers
assert 6 <= gen_counters[0] <= 8
# 12 = (epoch * workers * validation steps * max_queue_size)
assert 3 <= gen_counters[1] <= 12
gen_counters = [0]
out = model.fit_generator(generator=RandomSequence(3), epochs=3,
validation_data=gen_data(0),
validation_steps=1,
max_queue_size=2,
workers=2)
# 12 = (epoch * workers * validation steps * max_queue_size)
# Need range check here as filling of the queue depends on sleep in the enqueuers
assert 3 <= gen_counters[0] <= 12
# predict_generator output shape behavior should be consistent
def expected_shape(batch_size, n_batches):
return (batch_size * n_batches, 4), (batch_size * n_batches, 3)
# Multiple outputs and one step.
batch_size = 5
sequence_length = 1
shape_0, shape_1 = expected_shape(batch_size, sequence_length)
out = model.predict_generator(RandomSequence(batch_size,
sequence_length=sequence_length))
assert np.shape(out[0]) == shape_0 and np.shape(out[1]) == shape_1
# Multiple outputs and multiple steps.
batch_size = 5
sequence_length = 2
shape_0, shape_1 = expected_shape(batch_size, sequence_length)
out = model.predict_generator(RandomSequence(batch_size,
sequence_length=sequence_length))
assert np.shape(out[0]) == shape_0 and np.shape(out[1]) == shape_1
# Create a model with a single output.
single_output_model = Model([a, b], a_2)
single_output_model.compile(optimizer, loss, metrics=[], sample_weight_mode=None)
# Single output and one step.
batch_size = 5
sequence_length = 1
shape_0, _ = expected_shape(batch_size, sequence_length)
out = single_output_model.predict_generator(RandomSequence(batch_size,
sequence_length=sequence_length))
assert np.shape(out) == shape_0
# Single output and multiple steps.
batch_size = 5
sequence_length = 2
shape_0, _ = expected_shape(batch_size, sequence_length)
out = single_output_model.predict_generator(RandomSequence(batch_size,
sequence_length=sequence_length))
assert np.shape(out) == shape_0
@pytest.mark.skipif(sys.version_info < (3,), reason='Cannot catch warnings in python 2')
@keras_test
def test_warnings():
a = Input(shape=(3,), name='input_a')
b = Input(shape=(3,), name='input_b')
a_2 = Dense(4, name='dense_1')(a)
dp = Dropout(0.5, name='dropout')
b_2 = dp(b)
model = Model([a, b], [a_2, b_2])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
sample_weight_mode=None)
def gen_data(batch_sz):
while True:
yield ([np.random.random((batch_sz, 3)), np.random.random((batch_sz, 3))],
[np.random.random((batch_sz, 4)), np.random.random((batch_sz, 3))])
with pytest.warns(Warning) as w:
out = model.fit_generator(gen_data(4), steps_per_epoch=10, use_multiprocessing=True, workers=2)
warning_raised = any(['Sequence' in str(w_.message) for w_ in w])
assert warning_raised, 'No warning raised when using generator with processes.'
with pytest.warns(None) as w:
out = model.fit_generator(RandomSequence(3), steps_per_epoch=4, use_multiprocessing=True, workers=2)
assert all(['Sequence' not in str(w_.message) for w_ in w]), 'A warning was raised for Sequence.'
@keras_test
def test_sparse_inputs_targets():
test_inputs = [sparse.random(6, 3, density=0.25).tocsr() for _ in range(2)]
test_outputs = [sparse.random(6, i, density=0.25).tocsr() for i in range(3, 5)]
in1 = Input(shape=(3,))
in2 = Input(shape=(3,))
out1 = Dropout(0.5, name='dropout')(in1)
out2 = Dense(4, name='dense_1')(in2)
model = Model([in1, in2], [out1, out2])
model.predict(test_inputs, batch_size=2)
model.compile('rmsprop', 'mse')
model.fit(test_inputs, test_outputs, epochs=1, batch_size=2, validation_split=0.5)
model.evaluate(test_inputs, test_outputs, batch_size=2)
@pytest.mark.skipif(K.backend() != 'tensorflow', reason='sparse operations supported only by TensorFlow')
@keras_test
def test_sparse_placeholder_fit():
test_inputs = [sparse.random(6, 3, density=0.25).tocsr() for _ in range(2)]
test_outputs = [sparse.random(6, i, density=0.25).tocsr() for i in range(3, 5)]
in1 = Input(shape=(3,))
in2 = Input(shape=(3,), sparse=True)
out1 = Dropout(0.5, name='dropout')(in1)
out2 = Dense(4, name='dense_1')(in2)
model = Model([in1, in2], [out1, out2])
model.predict(test_inputs, batch_size=2)
model.compile('rmsprop', 'mse')
model.fit(test_inputs, test_outputs, epochs=1, batch_size=2, validation_split=0.5)
model.evaluate(test_inputs, test_outputs, batch_size=2)
@keras_test
def test_trainable_argument():
x = np.random.random((5, 3))
y = np.random.random((5, 2))
model = Sequential()
model.add(Dense(2, input_dim=3, trainable=False))
model.compile('rmsprop', 'mse')
out = model.predict(x)
model.train_on_batch(x, y)
out_2 = model.predict(x)
assert_allclose(out, out_2)
# test with nesting
inputs = Input(shape=(3,))
outputs = model(inputs)
model = Model(inputs, outputs)
model.compile('rmsprop', 'mse')
out = model.predict(x)
model.train_on_batch(x, y)
out_2 = model.predict(x)
assert_allclose(out, out_2)
@keras_test
def test_with_list_as_targets():
model = Sequential()
model.add(Dense(1, input_dim=3, trainable=False))
model.compile('rmsprop', 'mse')
x = np.random.random((2, 3))
y = [0, 1]
model.train_on_batch(x, y)
@keras_test
def test_check_not_failing():
a = np.random.random((2, 1, 3))
_check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [a.shape])
_check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [(2, None, 3)])
@keras_test
def test_check_last_is_one():
a = np.random.random((2, 3, 1))
with pytest.raises(ValueError) as exc:
_check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [a.shape])
assert 'You are passing a target array' in str(exc)
@keras_test
def test_check_bad_shape():
a = np.random.random((2, 3, 5))
with pytest.raises(ValueError) as exc:
_check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [(2, 3, 6)])
assert 'targets to have the same shape' in str(exc)
@pytest.mark.skipif(K.backend() != 'tensorflow', reason='Requires TensorFlow backend')
@keras_test
def test_model_with_input_feed_tensor():
"""We test building a model with a TF variable as input.
We should be able to call fit, evaluate, predict,
by only passing them data for the placeholder inputs
in the model.
"""
import tensorflow as tf
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32))
b = Input(shape=(3,), name='input_b')
a_2 = Dense(4, name='dense_1')(a)
dp = Dropout(0.5, name='dropout')
b_2 = dp(b)
model = Model([a, b], [a_2, b_2])
model.summary()
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(optimizer, loss, metrics=['mean_squared_error'],
loss_weights=loss_weights,
sample_weight_mode=None)
# test train_on_batch
out = model.train_on_batch(input_b_np,
[output_a_np, output_b_np])
out = model.train_on_batch({'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.test_on_batch({'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.predict_on_batch({'input_b': input_b_np})
# test fit
out = model.fit({'input_b': input_b_np},
[output_a_np, output_b_np], epochs=1, batch_size=10)
out = model.fit(input_b_np,
[output_a_np, output_b_np], epochs=1, batch_size=10)
# test evaluate
out = model.evaluate({'input_b': input_b_np},
[output_a_np, output_b_np], batch_size=10)
out = model.evaluate(input_b_np,
[output_a_np, output_b_np], batch_size=10)
# test predict
out = model.predict({'input_b': input_b_np}, batch_size=10)
out = model.predict(input_b_np, batch_size=10)
assert len(out) == 2
# Now test a model with a single input
# i.e. we don't pass any data to fit the model.
a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32))
a_2 = Dense(4, name='dense_1')(a)
a_2 = Dropout(0.5, name='dropout')(a_2)
model = Model(a, a_2)
model.summary()
optimizer = 'rmsprop'
loss = 'mse'
model.compile(optimizer, loss, metrics=['mean_squared_error'])
# test train_on_batch
out = model.train_on_batch(None,
output_a_np)
out = model.train_on_batch(None,
output_a_np)
out = model.test_on_batch(None,
output_a_np)
out = model.predict_on_batch(None)
out = model.train_on_batch([],
output_a_np)
out = model.train_on_batch({},
output_a_np)
# test fit
out = model.fit(None,
output_a_np, epochs=1, batch_size=10)
out = model.fit(None,
output_a_np, epochs=1, batch_size=10)
# test evaluate
out = model.evaluate(None,
output_a_np, batch_size=10)
out = model.evaluate(None,
output_a_np, batch_size=10)
# test predict
out = model.predict(None, steps=3)
out = model.predict(None, steps=3)
assert out.shape == (10 * 3, 4)
# Same, without learning phase
# i.e. we don't pass any data to fit the model.
a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32))
a_2 = Dense(4, name='dense_1')(a)
model = Model(a, a_2)
model.summary()
optimizer = 'rmsprop'
loss = 'mse'
model.compile(optimizer, loss, metrics=['mean_squared_error'])
# test train_on_batch
out = model.train_on_batch(None,
output_a_np)
out = model.train_on_batch(None,
output_a_np)
out = model.test_on_batch(None,
output_a_np)
out = model.predict_on_batch(None)
out = model.train_on_batch([],
output_a_np)
out = model.train_on_batch({},
output_a_np)
# test fit
out = model.fit(None,
output_a_np, epochs=1, batch_size=10)
out = model.fit(None,
output_a_np, epochs=1, batch_size=10)
# test evaluate
out = model.evaluate(None,
output_a_np, batch_size=10)
out = model.evaluate(None,
output_a_np, batch_size=10)
# test predict
out = model.predict(None, steps=3)
out = model.predict(None, steps=3)
assert out.shape == (10 * 3, 4)
@keras_test
def test_model_with_partial_loss():
a = Input(shape=(3,), name='input_a')
a_2 = Dense(4, name='dense_1')(a)
dp = Dropout(0.5, name='dropout')
a_3 = dp(a_2)
model = Model(a, [a_2, a_3])
optimizer = 'rmsprop'
loss = {'dropout': 'mse'}
model.compile(optimizer, loss, metrics=['mae'])
input_a_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
# test train_on_batch
out = model.train_on_batch(input_a_np, output_a_np)
out = model.test_on_batch(input_a_np, output_a_np)
# fit
out = model.fit(input_a_np, [output_a_np])
# evaluate
out = model.evaluate(input_a_np, [output_a_np])
# Same without dropout.
a = Input(shape=(3,), name='input_a')
a_2 = Dense(4, name='dense_1')(a)
a_3 = Dense(4, name='dense_2')(a_2)
model = Model(a, [a_2, a_3])
optimizer = 'rmsprop'
loss = {'dense_2': 'mse'}
model.compile(optimizer, loss, metrics={'dense_1': 'mae'})
# test train_on_batch
out = model.train_on_batch(input_a_np, output_a_np)
out = model.test_on_batch(input_a_np, output_a_np)
# fit
out = model.fit(input_a_np, [output_a_np])
# evaluate
out = model.evaluate(input_a_np, [output_a_np])
@keras_test
@pytest.mark.skipif((K.backend() == 'cntk'),
reason='cntk does not support external loss yet')
def test_model_with_external_loss():
# None loss, only regularization loss.
a = Input(shape=(3,), name='input_a')
a_2 = Dense(4, name='dense_1',
kernel_regularizer='l1',
bias_regularizer='l2')(a)
dp = Dropout(0.5, name='dropout')
a_3 = dp(a_2)
model = Model(a, [a_2, a_3])
optimizer = 'rmsprop'
loss = None
model.compile(optimizer, loss, metrics=['mae'])
input_a_np = np.random.random((10, 3))
# test train_on_batch
out = model.train_on_batch(input_a_np, None)
out = model.test_on_batch(input_a_np, None)
# fit
out = model.fit(input_a_np, None)
# evaluate
out = model.evaluate(input_a_np, None)
# No dropout, external loss.
a = Input(shape=(3,), name='input_a')
a_2 = Dense(4, name='dense_1')(a)
a_3 = Dense(4, name='dense_2')(a)
model = Model(a, [a_2, a_3])
model.add_loss(K.mean(a_3 + a_2))
optimizer = 'rmsprop'
loss = None
model.compile(optimizer, loss, metrics=['mae'])
# test train_on_batch
out = model.train_on_batch(input_a_np, None)
out = model.test_on_batch(input_a_np, None)
# fit
out = model.fit(input_a_np, None)
# evaluate
out = model.evaluate(input_a_np, None)
# Test fit with no external data at all.
if K.backend() == 'tensorflow':
import tensorflow as tf
a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32))
a_2 = Dense(4, name='dense_1')(a)
a_2 = Dropout(0.5, name='dropout')(a_2)
model = Model(a, a_2)
model.add_loss(K.mean(a_2))
model.compile(optimizer='rmsprop',
loss=None,
metrics=['mean_squared_error'])
# test train_on_batch
out = model.train_on_batch(None, None)
out = model.test_on_batch(None, None)
out = model.predict_on_batch(None)
# test fit
with pytest.raises(ValueError):
out = model.fit(None, None, epochs=1, batch_size=10)
out = model.fit(None, None, epochs=1, steps_per_epoch=1)
# test fit with validation data
with pytest.raises(ValueError):
out = model.fit(None, None,
epochs=1,
steps_per_epoch=None,
validation_steps=2)
out = model.fit(None, None,
epochs=1,
steps_per_epoch=2,
validation_steps=2)
# test evaluate
with pytest.raises(ValueError):
out = model.evaluate(None, None, batch_size=10)
out = model.evaluate(None, None, steps=3)
# test predict
with pytest.raises(ValueError):
out = model.predict(None, batch_size=10)
out = model.predict(None, steps=3)
assert out.shape == (10 * 3, 4)
# Test multi-output model without external data.
a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32))
a_1 = Dense(4, name='dense_1')(a)
a_2 = Dropout(0.5, name='dropout')(a_1)
model = Model(a, [a_1, a_2])
model.add_loss(K.mean(a_2))
model.compile(optimizer='rmsprop',
loss=None,
metrics=['mean_squared_error'])
# test train_on_batch
out = model.train_on_batch(None, None)
out = model.test_on_batch(None, None)
out = model.predict_on_batch(None)
# test fit
with pytest.raises(ValueError):
out = model.fit(None, None, epochs=1, batch_size=10)
out = model.fit(None, None, epochs=1, steps_per_epoch=1)
# test fit with validation data
with pytest.raises(ValueError):
out = model.fit(None, None,
epochs=1,
steps_per_epoch=None,
validation_steps=2)
out = model.fit(None, None,
epochs=1,
steps_per_epoch=2,
validation_steps=2)
# test evaluate
with pytest.raises(ValueError):
out = model.evaluate(None, None, batch_size=10)
out = model.evaluate(None, None, steps=3)
# test predict
with pytest.raises(ValueError):
out = model.predict(None, batch_size=10)
out = model.predict(None, steps=3)
assert len(out) == 2
assert out[0].shape == (10 * 3, 4)
assert out[1].shape == (10 * 3, 4)
@keras_test
def test_target_tensors():
# single-output, as list
model = keras.models.Sequential()
model.add(keras.layers.Dense(4, input_shape=(4,), name='dense'))
input_val = np.random.random((10, 4))
target_val = np.random.random((10, 4))
target = keras.backend.variable(target_val)
model.compile(optimizer='rmsprop', loss='mse', target_tensors=[target])
model.train_on_batch(input_val, None)
# single-output, as dict
model.compile(optimizer='rmsprop', loss='mse',
target_tensors={'dense': target})
model.train_on_batch(input_val, None)
# test invalid arguments
with pytest.raises(TypeError):
model.compile(optimizer='rmsprop', loss='mse',
target_tensors=set())
with pytest.raises(ValueError):
model.compile(optimizer='rmsprop', loss='mse',
target_tensors=[target, target])
with pytest.raises(ValueError):
model.compile(optimizer='rmsprop', loss='mse',
target_tensors={'dense2': None})
with pytest.raises(ValueError):
model.compile(optimizer='rmsprop', loss='mse',
target_tensors=[target])
model.train_on_batch(input_val, target_val)
# multi-output, as list
input_val = np.random.random((10, 4))
target_val_a = np.random.random((10, 4))
target_val_b = np.random.random((10, 4))
target_a = keras.backend.variable(target_val_a)
target_b = keras.backend.variable(target_val_b)
inputs = keras.layers.Input(shape=(4,))
output_a = keras.layers.Dense(4, name='dense_a')(inputs)
output_b = keras.layers.Dense(4, name='dense_b')(inputs)
model = keras.models.Model(inputs, [output_a, output_b])
model.compile(optimizer='rmsprop', loss='mse',
target_tensors=[target_a, target_b])
model.train_on_batch(input_val, None)
# multi-output, as dict
model.compile(optimizer='rmsprop', loss='mse',
target_tensors={'dense_a': target_a,
'dense_b': target_b})
model.train_on_batch(input_val, None)
# test with sample weights
model.compile(optimizer='rmsprop', loss='mse',
target_tensors=[target_a, target_b])
model.train_on_batch(input_val, None,
sample_weight={'dense_a': np.random.random((10,))})
@keras_test
def test_model_custom_target_tensors():
a = Input(shape=(3,), name='input_a')
b = Input(shape=(3,), name='input_b')
a_2 = Dense(4, name='dense_1')(a)
dp = Dropout(0.5, name='dropout')
b_2 = dp(b)
y = K.placeholder([10, 4], name='y')
y1 = K.placeholder([10, 3], name='y1')
y2 = K.placeholder([7, 5], name='y2')
model = Model([a, b], [a_2, b_2])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
# test list of target tensors
with pytest.raises(ValueError):
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
sample_weight_mode=None, target_tensors=[y, y1, y2])
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
sample_weight_mode=None, target_tensors=[y, y1])
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
{y: np.random.random((10, 4)),
y1: np.random.random((10, 3))})
# test dictionary of target_tensors
with pytest.raises(ValueError):
model.compile(optimizer, loss,
metrics=[],
loss_weights=loss_weights,
sample_weight_mode=None,
target_tensors={'does_not_exist': y2})
# test dictionary of target_tensors
model.compile(optimizer, loss,
metrics=[],
loss_weights=loss_weights,
sample_weight_mode=None,
target_tensors={'dense_1': y, 'dropout': y1})
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
{y: np.random.random((10, 4)),
y1: np.random.random((10, 3))})
if K.backend() == 'tensorflow':
import tensorflow as tf
# test with custom TF placeholder as target
pl_target_a = tf.placeholder('float32', shape=(None, 4))
model.compile(optimizer='rmsprop', loss='mse',
target_tensors={'dense_1': pl_target_a})
model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
@pytest.mark.skipif(sys.version_info < (3,), reason='Cannot catch warnings in python 2')
@keras_test
def test_trainable_weights_count_consistency():
"""Tests the trainable weights consistency check of Model.
This verifies that a warning is shown if model.trainable is modified
and the model is summarized/run without a new call to .compile()
Reproduce issue #8121
"""
a = Input(shape=(3,), name='input_a')
model1 = Model(inputs=a, outputs=Dense(1)(a))
model1.trainable = False
b = Input(shape=(3,), name='input_b')
y = model1(b)
model2 = Model(inputs=b, outputs=Dense(1)(y))
model2.compile(optimizer='adam', loss='mse')
model1.trainable = True
# Should warn on .summary()
with pytest.warns(UserWarning) as w:
model2.summary()
warning_raised = any(['Discrepancy' in str(w_.message) for w_ in w])
assert warning_raised, 'No warning raised when trainable is modified without .compile.'
# And on .fit()
with pytest.warns(UserWarning) as w:
model2.fit(x=np.zeros((5, 3)), y=np.zeros((5, 1)))
warning_raised = any(['Discrepancy' in str(w_.message) for w_ in w])
assert warning_raised, 'No warning raised when trainable is modified without .compile.'
# And shouldn't warn if we recompile
model2.compile(optimizer='adam', loss='mse')
with pytest.warns(None) as w:
model2.summary()
assert len(w) == 0, "Warning raised even when .compile() is called after modifying .trainable"
@keras_test
def test_pandas_dataframe():
input_a = Input(shape=(3,), name='input_a')
input_b = Input(shape=(3,), name='input_b')
x = Dense(4, name='dense_1')(input_a)
y = Dense(3, name='desne_2')(input_b)
model_1 = Model(inputs=input_a, outputs=x)
model_2 = Model(inputs=[input_a, input_b], outputs=[x, y])
optimizer = 'rmsprop'
loss = 'mse'
model_1.compile(optimizer=optimizer, loss=loss)
model_2.compile(optimizer=optimizer, loss=loss)
input_a_df = pd.DataFrame(np.random.random((10, 3)))
input_b_df = pd.DataFrame(np.random.random((10, 3)))
output_a_df = pd.DataFrame(np.random.random((10, 4)))
output_b_df = pd.DataFrame(np.random.random((10, 3)))
model_1.fit(input_a_df,
output_a_df)
model_2.fit([input_a_df, input_b_df],
[output_a_df, output_b_df])
model_1.fit([input_a_df],
[output_a_df])
model_1.fit({'input_a': input_a_df},
output_a_df)
model_2.fit({'input_a': input_a_df, 'input_b': input_b_df},
[output_a_df, output_b_df])
model_1.predict(input_a_df)
model_2.predict([input_a_df, input_b_df])
model_1.predict([input_a_df])
model_1.predict({'input_a': input_a_df})
model_2.predict({'input_a': input_a_df, 'input_b': input_b_df})
model_1.predict_on_batch(input_a_df)
model_2.predict_on_batch([input_a_df, input_b_df])
model_1.predict_on_batch([input_a_df])
model_1.predict_on_batch({'input_a': input_a_df})
model_2.predict_on_batch({'input_a': input_a_df, 'input_b': input_b_df})
model_1.evaluate(input_a_df,
output_a_df)
model_2.evaluate([input_a_df, input_b_df],
[output_a_df, output_b_df])
model_1.evaluate([input_a_df],
[output_a_df])
model_1.evaluate({'input_a': input_a_df},
output_a_df)
model_2.evaluate({'input_a': input_a_df, 'input_b': input_b_df},
[output_a_df, output_b_df])
model_1.train_on_batch(input_a_df,
output_a_df)
model_2.train_on_batch([input_a_df, input_b_df],
[output_a_df, output_b_df])
model_1.train_on_batch([input_a_df],
[output_a_df])
model_1.train_on_batch({'input_a': input_a_df},
output_a_df)
model_2.train_on_batch({'input_a': input_a_df, 'input_b': input_b_df},
[output_a_df, output_b_df])
model_1.test_on_batch(input_a_df,
output_a_df)
model_2.test_on_batch([input_a_df, input_b_df],
[output_a_df, output_b_df])
model_1.test_on_batch([input_a_df],
[output_a_df])
model_1.test_on_batch({'input_a': input_a_df},
output_a_df)
model_2.test_on_batch({'input_a': input_a_df, 'input_b': input_b_df},
[output_a_df, output_b_df])
if __name__ == '__main__':
pytest.main([__file__])
| [
"hello.sea@qq.com"
] | hello.sea@qq.com |
2b60eaa82e0cb6e335b0b9ef46b327feb5847ac0 | 5c588f9debc3f9ac5d4fa2f68934072659b54f09 | /wproto/message.py | d52ce468bf20e47711214066214e5f366fbddac1 | [] | no_license | dtomasiewicz/Wombat | cd959b566005845b58b6144a41a477e08d200bb0 | 7afe088d7d3686e566f338e646016919114fee82 | refs/heads/master | 2016-09-10T17:28:06.742181 | 2012-02-22T11:30:21 | 2012-02-22T11:30:21 | 2,759,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | class Message:
def __init__(self, type, data={}, **kwargs):
self.type = type
self._data = data.copy()
self._data.update(kwargs)
def istype(self, type):
return self.type == type
def get(self, field):
return self._data[field] if field in self._data else None
def set(self, field, value):
self._data[field] = value
def __getattr__(self, field):
return self.get(field)
def __str__(self):
return self.type+" "+str(self._data)
| [
"dtomasiewicz@gmail.com"
] | dtomasiewicz@gmail.com |
e77eea664a66f01bec65316714e74254e3fdcee4 | e53474b0f25c23db9aef70068346e829a78666b6 | /tests/test_Login.py | e04e3896f4726f595cb93d5a48ed8a268f962fa7 | [] | no_license | theVernon124/trello-api-python-poc | a56c7b5a966933b62a785fa7d9bef5075cfde28d | c5e43e07b1d006dffbe26f7dda7fd39a4e97cba3 | refs/heads/master | 2022-12-22T12:24:03.474997 | 2019-08-21T01:01:14 | 2019-08-21T01:01:14 | 203,478,361 | 0 | 1 | null | 2022-12-11T00:34:20 | 2019-08-21T00:59:09 | Python | UTF-8 | Python | false | false | 1,522 | py | import requests
import pytest
from commands.Utils import Utils
from commands.TestSteps import TestSteps
class TestLogin(object):
utils = None
steps = None
config_data = None
login_data = None
@classmethod
def setup_class(cls):
cls.utils = Utils()
cls.steps = TestSteps()
cls.config_data = cls.utils.get_config_data()
cls.login_data = cls.utils.get_test_data("login")
@pytest.mark.smoke
def test_valid_login(self):
s = requests.Session()
dsc = s.get(self.config_data["url_home"]).cookies["dsc"]
req = self.steps.perform_auth(self.login_data["input_data"]["login_method"],
self.login_data["input_data"]["email"], self.login_data["input_data"]["password"])
code = req.json()["code"]
headers = {
"Content-Type": "application/x-www-form-urlencoded"
}
data = {
"authentication": code,
"dsc": dsc
}
req = s.post(self.config_data["url_session"], headers=headers, data=data)
assert req.status_code == 204
@pytest.mark.negative
def test_invalid_auth_method(self):
s = requests.Session()
self.steps.perform_auth()
data = {
"method": self.login_data["input_data"]["invalid_login_method"],
"factors[user]": self.login_data["input_data"]["email"],
"factors[password]": self.login_data["input_data"]["password"]
}
req = s.post()
| [
"paivordev@gmail.com"
] | paivordev@gmail.com |
300298b7b5d8f8eec4023b3e511062e1ac6c6c7d | c78a5a3f231fccadc98d11bf3793fb1db1a6348a | /app/execute.py | fba5050deeb6e40bd95b6903a7a31f5d3f8eca21 | [] | no_license | forgetso/arb | fa819dc1ff4f2df1a4e761672e38116f15c9bfda | c26d27ed9141c4a1ccd0c9f720d18fdd81958530 | refs/heads/master | 2023-06-30T20:19:30.670146 | 2021-07-29T09:10:43 | 2021-07-29T09:10:43 | 171,485,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,580 | py | import threading
import app.settings as settings
from app.lib.jobqueue import Jobqueue, JOB_STATUS_COLLECTION, JOB_DEFINITIONS, JOB_COLLECTION, STATUS_RUNNING, \
STATUS_FAILED, STATUS_CREATING, STATUS_COMPLETE, MAX_STDLOG_SIZE
import logging
import traceback
import datetime
from app.lib.setup import update_fiat_rates
from app.lib.db import remove_api_method_locks
from os import getpid, kill
# Create an instance of the app! Execute a job queue. Begin scraping prices of crypto. Look for jobs to start based on
# the scrapes.
class JobQueueExecutor:
def __init__(self):
self.jq = Jobqueue()
self.runningjobs = []
self.compare_trade_pairs_intervals = {}
self.start_jobs_interval = None
self._id = None
self.running = False
self.finishedjobs = []
return
def execute(self):
# this is the job queue monitor that polls the database for new, completed, and failed jobs
# it will periodically run compares for a set list of currency pairs
self.jq.db[JOB_STATUS_COLLECTION].remove()
self._id = self.jq.db[JOB_STATUS_COLLECTION].insert({'running': True, 'pid': getpid()})
# remove any api locks that were stored previously
remove_api_method_locks()
# we are going to constantly check apis for arbitrage opportunities
for trade_pair in settings.TRADE_PAIRS:
logging.debug(trade_pair)
self.compare_trade_pairs_intervals[trade_pair] = call_repeatedly(settings.INTERVAL_COMPARE,
self.compare_trade_pair,
trade_pair)
# we periodically update the fiat rate of BTC to identify potential profit
self.fiat_rate_interval = call_repeatedly(settings.INTERVAL_FIAT_RATE, update_fiat_rates)
# when opportunities are identified they are added as jobs to the db. this next function will be notified of any
# new jobs that are added and it will be called
self.jq.bind_to(self.start_job)
self.check_running_jobs_interval = call_repeatedly(settings.INTERVAL_RUNNINGJOBS, self.check_running_jobs)
# TODO jobs to check balances between exchanges and periodically move large amounts
# job is called REPLENISH
# TODO job to convert BTC (TODO set this as a master currency) to all live currencies in MASTER EXCHANGE (set in settings)
# TODO jobs to move crypto into fiat periodically to protect against price fluctuations
return
def start_job(self, _id):
job = self.jq.db[JOB_COLLECTION].find_one({'_id': _id})
job_type = job['job_type']
if job_type not in JOB_DEFINITIONS:
raise TypeError('Unknown job type {}'.format(job_type))
if job_type in settings.JOBS_NOT_RUNNING:
return
safecmd = ['app.jobs.{}'.format(job_type.lower())]
for arg_key, arg_value in job['job_args'].items():
type_function = None
try:
type_function = JOB_DEFINITIONS[job_type][arg_key].get('type')
safecmd.append(type_function(arg_value))
except:
raise TypeError('Invalid job argument supplied: {} should be {}'.format(arg_value, type_function))
jobthread = JobQueueThread(self.jq, job, safecmd)
jobthread.setDaemon(True)
jobthread.start()
job['job_startat'] = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
job['job_status'] = STATUS_RUNNING
job['job_lock'] = True
job['jobqueue_id'] = self._id
self.jq.update_job(job)
self.runningjobs.append(jobthread)
def check_running_jobs(self):
ok = True
self.finishedjobs = []
for jobthread in self.runningjobs:
if not jobthread.is_alive():
# the thread is /probably/ dead. But it may simply not have quite started yet. Give it a while to join, then check again
jobthread.join(2)
if not jobthread.is_alive():
self.finishedjobs.append(jobthread)
# we had an internal error => immediate quit!
if jobthread.err:
# just print it out normally, cron will nab and email it!
print(jobthread.err)
print(jobthread.job)
logging.debug(jobthread.err)
ok = False
for jobthread in self.finishedjobs:
self.runningjobs.remove(jobthread)
jobthread.job['job_status'] = STATUS_COMPLETE
self.jq.update_job(jobthread.job)
# logging.debug('Job has finished {}'.format(jobthread.job['job_pid']))
return ok
def compare_trade_pair(self, trade_pair):
# stop command may have been issued
self.is_running()
if not self.running:
# cancels the interval
self.compare_trade_pairs_intervals[trade_pair]()
# logging.info('Now adding compare trade pair jobs')
# Always run a compare job for each trade pair
trade_pair_split = trade_pair.split('-')
curr_x = trade_pair_split[0]
curr_y = trade_pair_split[1]
existing_job = self.jq.db[JOB_COLLECTION].find_one({'job_type': 'COMPARE',
'job_args.curr_x': curr_x,
'job_args.curr_y': curr_y,
'job_args.jobqueue_id': str(self._id),
'job_status':
{'$in': [STATUS_CREATING,
STATUS_RUNNING]
}
})
if not existing_job:
# logging.info('Adding comparison job for {}'.format(trade_pair))
# check for an existing job running under **this** job queue. means old dead RUNNING jobs are ignored.
self.jq.add_job(
{
'job_type': 'COMPARE',
'job_args': {'curr_x': curr_x, 'curr_y': curr_y, 'jobqueue_id': str(self._id)},
},
self._id)
else:
logging.debug('Not adding COMPARE {} {} job: Existing job!'.format(curr_x, curr_y))
def job_finished(self, process):
retcode = process.wait()
while retcode != 0:
self.job_finished(process)
return
def is_running(self):
try:
self.running = self.jq.db[JOB_STATUS_COLLECTION].find_one({'_id': self._id}).get('running')
except TypeError:
self.running = False
def stop_jobqueue(self):
self.running = False
try:
# cancels the interval
self.start_jobs_interval()
except:
pass
try:
# cancel the intervals
for k in self.compare_trade_pairs_intervals:
self.compare_trade_pairs_intervals[k]()
except:
pass
self.jq.db.jobs.remove({'job_status': STATUS_RUNNING}, multi=True)
logging.info('Job queue stopped')
class JobQueueThread(threading.Thread):
def __init__(self, jq, job, safecmd):
super(JobQueueThread, self).__init__(target=jq.run_command, args=(job, safecmd))
self.err = None
self.safecmd = safecmd
self.job = job
self.output = None
def run(self):
try:
super(JobQueueThread, self).run()
except RunCommandException:
pass
except Exception as e:
if self.safecmd:
print(self.safecmd)
if self.job:
print(self.job)
raise Exception(e)
self.err = traceback.format_exc()
# finally:
# runningjobslock.acquire()
# runningjobslock.notify()
# runningjobslock.release()
class RunCommandException(Exception):
pass
def call_repeatedly(interval, func, *args):
stopped = threading.Event()
def loop():
while not stopped.wait(interval): # the first call is in `interval` secs
ok = func(*args)
threading.Thread(target=loop).start()
return stopped.set
| [
"forgetso86@gmail.com"
] | forgetso86@gmail.com |
1afd6e65c23e6b6155373914250891153d97897f | 8fa6f5dfb571e9e7a733aef9b1714507e5c95d8e | /runme.py | b0c35f77ac74f997c411ee6fa55fad2a82906ce4 | [
"MIT"
] | permissive | mcdougallab/cpp-and-python | d56ae2d3bcefc4005ebfbd5087d4b24b93ba9df2 | f508ebf34fafd8d2ad199fb78a12293b90751094 | refs/heads/master | 2022-09-18T03:58:18.275671 | 2020-06-06T13:03:36 | 2020-06-06T13:03:36 | 269,831,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | import ctypes
import sys
import numpy
import numpy.ctypeslib
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
n = 1000
double_p = numpy.ctypeslib.ndpointer(dtype=float, ndim=1, flags='CONTIGUOUS')
double = ctypes.c_double
# connect to the module that does the calculation
try:
libsim = ctypes.cdll.LoadLibrary('simulate.so')
except OSError:
print('Missing simulate.so; aborting.')
print('Did you forget to run "make" first?')
sys.exit()
simulate = libsim.simulate
simulate.argtypes = [ctypes.c_int64, double_p, double_p, double_p, double, double, double]
simulate.restype = None
ax = plt.axes(projection='3d')
for x0, color in [(1, 'blue'), (1.01, 'red'), (1.02, 'green')]:
xs = numpy.zeros(n)
ys = numpy.zeros(n)
zs = numpy.zeros(n)
simulate(n, xs, ys, zs, x0, 0, 0)
ax.plot3D(xs, ys, zs, color=color)
plt.show()
| [
"robert.mcdougal@yale.edu"
] | robert.mcdougal@yale.edu |
0d16857d7fd0f668e17201298d880dd834ab42de | d304c27c095a7e897bb9c02e78d34bed4398c8fc | /alex/components/simulator/user_simulator/demos/ptien/ptien_metadata.py | 1d75d96f2c4f6859871f52671389df68aeecb270 | [
"Apache-2.0"
] | permissive | thanhlct/alex | 876630e7cb2a6b1affce5bb646e6bd0489305393 | 9fabefb62572e96d14654d3ec0c8861daf51ffa7 | refs/heads/master | 2020-04-05T18:29:37.300215 | 2016-05-19T08:51:21 | 2016-05-19T08:51:21 | 45,947,050 | 0 | 0 | null | 2015-11-10T23:23:27 | 2015-11-10T23:23:27 | null | UTF-8 | Python | false | false | 39,235 | py | from alex.utils.sample_distribution import sample_from_list
from alex.utils.sample_distribution import sample_a_prob
import alex.utils.matlab_functions as matlab
from infer_place_info import add_place_info
def values_generator1(goal, slot):
'''Generate all values for a slot'''
return [1,2,3]
def values_generator2(goal, slot):
return [7,8,9]
def alternative_value_fun():
'''A generator for a slot during conversation'''
a = ['next', 'prev', 'last', '1', '2', '3', '4', 'next hour']
return sample_from_list(a)
def post_process_act(das):
#return das
das = das[0]
#print 'in das:', das
#import pdb
da_des = get_dialogue_act_metadata(das)
#FILTER from/to borough out of user act if this turn doesn' include from/to street, stop and also keep inform borough with prob. of 0.5
if 'inform' in da_des and 'from_borough' in da_des['inform']['slots'] and len(da_des['inform']['slots'])>1:
lst = matlab.subtract(['from_stop'], da_des['inform']['slots'])
prob = 0.7
if len(lst)<1:
prob=0.3
if is_only_borough(da_des):
prob = 0.0
if sample_a_prob(prob):
das.dais.remove('inform(from_borough="' + da_des['inform']['slot_value']['from_borough'] + '")')
print 'remove from_borough'
#pdb.set_trace()
if 'inform' in da_des and 'to_borough' in da_des['inform']['slots'] and len(da_des['inform']['slots'])>1:
lst = matlab.subtract(['to_stop'], da_des['inform']['slots'])
prob = 0.7#70% remove borough from inform
if len(lst)<1:#has to_stop, remove with 30%
prob=0.3
if is_only_borough(da_des):#only borough don't remove
prob = 0.0
if sample_a_prob(prob):
das.dais.remove('inform(to_borough="' + da_des['inform']['slot_value']['to_borough'] + '")')
print 'remove to_borough'
#pdb.set_trace()
return [das]
def is_only_borough(des):
if len(des['inform']['slots'])==2 and matlab.is_equal(['from_borough', 'to_borough'], des['inform']['slots']):
return True
elif len(des['inform']['slots'])==1 and ('from_borough' in des['inform']['slots'] or 'to_borough' in des['inform']['slots']):
return True
else:
return False
def post_process_final_goal(goal):
goal= add_place_info(goal)
return goal
def reward_last_turn(goal, last_da):
return -1
def reward_final_goal(goal, turns):
#Successful diaogue: 20; Unsuccessful: 0
success_reward = 20
failure_reward = 0
last_offer = None
for i in range(len(turns)-1, -1, -1):
da = turns[i]['sys_da'][0]
if da.has_dat('offer'):
last_offer = da
break
if last_offer is None:
return failure_reward
reward = success_reward
last_offer = get_dialogue_act_metadata(last_offer)['offer']['slot_value']
for k, v in goal.items():
if v != get_slot_value(last_offer, k):
print 'WRONG: ', k, '~', v
reward=failure_reward
break
return reward
def get_slot_value(offer, slot):
if slot in offer.keys():
return offer[slot]
eq_slots=[('from_borough', 'from_stop', 'from_city', 'from_street'), ('to_borough', 'to_stop', 'to_city', 'to_street'),
('arrival_time', 'arrival_time_rel'), ('departure_time', 'departure_time_rel'),]
for eq in eq_slots:
if slot in eq:
break
for s in eq:
if s in offer.keys():
return offer[s]
return None
def get_dialogue_act_metadata(da):
'''Return metadata describe the dialogue act given.
Returns:
A dict presenting statistical info about all slots, values used for each action in the given da.
'''
d = {}
for item in da:
act = item.dat
slot = item.name
value = item.value
if act in d.keys():
d[act]['slots'].append(slot)
d[act]['values'].append(value)
d[act]['slot_value'][slot] = value
else:
d[act] = {
'slots': [slot],
'values': [value],
'slot_value': {slot:value},
}
return d
config = {
'user_simulator':{
'SimpleUserSimulator':{
'debug': True,
'patience_level':8,#minimum 1,the number of repeated ask the same thing to get angry and hang up, set to 0 mean never hang up
'patience_levels':{
4: 0.5,
7: 0.2,
3: 0.3,
#2: 0.2,
},
'out_of_patience_act':'hangup()',
'metadata':{
'slots': ['from_stop', 'to_stop', 'from_city', 'to_city', 'from_street', 'to_street',
'departure_time', 'departure_date', 'arrival_time', 'arrival_date',
'vihecle', 'arrival_time_rel', 'depature_time_rel', 'number_transfers', 'duration',' distance',
'street', 'city', 'state',
'alternative', 'date_rel',#How to push it in to the simulator
'slot_fun',#only for test slots have value list generating dyanmically from fun
],#only for easy seeing and imagining, not being used in coding
'goals': [
{'fixed_slots':[('task','find_connection'),],
'changeable_slots':['from_stop', 'to_stop', 'from_city', 'to_city', 'from_street', 'to_street',
'departure_time', 'arrival_time', 'departure_time_rel', 'arrival_time_rel',
'vehicle',
'number_transfer', 'duration', 'distance',#users dont know these slot
],
'one_of_slot_set':[
{('from_stop', 'to_stop'):0.3,#choose only one of these set
('from_city', 'to_city'):0.2,
('from_street', 'to_street'):0.3,
('from_stop', 'to_street'):0.2,
},#end of the fist defination of one_of_slot_set
{():0.3,
('arrival_time',):0.1,
('departure_time',):0.1,
('arrival_time_rel',):0.25,
('departure_time_rel',):0.25,
},
{():0.5,
('vehicle',):0.5,
},
],
'equivalent_slots':[#('from_stop', 'from_street', 'from_borough', 'from_city'), ('to_stop', 'to_street', 'to_borough', 'to_city'),
('from_stop', 'from_street', 'from_city'), ('to_stop', 'to_street', 'to_city'),
('arrival_time', 'arrival_time_rel'), ('departure_time', 'departure_time_rel'),
],
'sys_unaskable_slots':['number_transfer', 'duration', 'distance',],
#'default_slots_values':[('departure_time', 'as soon as possible'), ('vehicle', 'dontcare'), ('arrival_time', 'as soon as possible')],
'default_slots_values':[('departure_time', 'now'), ('vehicle', 'dontcare'), ('arrival_time', 'now')],
#'add_fixed_slot_to_goal': True,
'active_prob':1.0,#probability of observing the task being active
'same_table_slot_keys':[],#defining when serveral slots connected to a row in a table and we would like to get them linked together
'goal_post_process_fun': post_process_final_goal,#post process function to refine the sampled goal, which will be defined for specific semantic relations
'act_post_process_fun': post_process_act,#post process function to refine user act
'goal_slot_relax_fun': None,#support function, relax the value of a slot given curretn goal, e.g. more late arrival, departure sooner, not used yet, for this purpose will be pushed into action handler
'reward_last_da_fun': reward_last_turn,
'reward_final_goal_fun': reward_final_goal,
'end_dialogue_post_process_fun': None,
'slot_used_sequence':{#higher level is only able to used when one of slot at previous level used#TODO not used in the code yet
0:('task',),
1:('from_stop', 'from_city', 'from_street', 'to_stop', 'to_city', 'to_street'),
#1:('from_stop', 'from_city', 'from_street', 'to_stop', 'to_city', 'to_street', 'departure_time', 'arrival_time', 'departure_tiem_rel', 'arrival_time_rel', 'vehicle'),
2:('departure_time', 'arrival_time', 'departure_tiem_rel', 'arrival_time_rel', 'vehicle'),
#only need one of slot in each level informed to get next level
},
},
{'fixed_slots':[('task','find_platform'),],
'changeable_slots':['street', 'city', 'state'],
'one_of_slot_set':[],
'sys_unaskable_slots':[],
'default_slots_values':[],
'active_prob':0.0,
'same_table_slot_keys': ['place'],
'goal_post_process_fun': None,
'goal_slot_relax_fun': None,
},
{'fixed_slots':[('task','weather'),],
'changeable_slots':['city', 'state'],
'one_of_slot_set':[],
'sys_unaskable_slots':[],
'default_slots_values':[],
'active_prob':0.0,
'same_table_slot_keys':['place'],
'goal_post_process_fun': None,
'goal_slot_relax_fun': None,
},
],
'slot_table_field_mapping':{'from_stop':[('stops','stop')],
'to_stop':[('stops', 'stop')],
'from_city':[('cities', 'city')],
'to_city':[('cities', 'city')],
'from_street':[('streets', 'street')],
'to_street':[('streets', 'street')],
'departure_time':[('time', 'time')],
'departure_time_rel':[('time_relative', 'relative')],
'arrival_time': [('time', 'time')],
'arrival_time_rel': [('time_relative', 'relative')],
'vehicle': [('vehicles', 'vehicle')],
'street':[('streets', 'street'), ('places', 'street')],
'city':[('cities', 'city'), ('places', 'city')],
'state':[('states', 'state'), ('places', 'state')],
'slot_fun':[values_generator1, values_generator2]#slot has the list of values being generated dynamically from functions, each function has to return a list of values, the list could includes only one element.
},
'same_table_slots':{'place':{'table': 'places',
'slots': ['street', 'city', 'state'],
},
'from_place':{'table':'places',#just for future when whe have such data.
'slots': ['from_stop', 'from_street', 'from_city', 'from_state'],
},
'to_place':{'table':'places',
'slots': ['to_stop', 'to_street', 'to_city', 'to_state'],
}
},
'status_included': ['correct', 'incorect', 'unmentioned'],#'pending', 'filled', 'all'],# only for imagining
'slot_value_from':['goal', 'sys_da'],#only for imagining
'slot_from': ['sys_da', 'none', 'goal'],
'answer_types':['direct_answer', 'over_answer', 'complete_answer'],#only for easy seeing and imagining
'dialogue_act_definitions': {#dialogue acts which user simulator used for answering
'request':{
'slot_included': True,
'value_included': False,
'combineable_slots': ['duration'],#['number_transfer', 'duration', 'distance']# return confliction after request
},
'inform':{
'slot_included': True,
'value_included': True,
'slot_from': 'sys_da', #in normal case, list of slots will be informed is taken from system dialogue request act, or from goal
'value_from': 'goal', #in normal case, where to get values for selected slots
#'limited_slots': ['from_borough', 'to_borough'], #list of slot cant combine, except syste ask directly
'accept_used_slots': False,
'use_slot_sequence': False,
},
'oog':{
'slot_included': False,
'value_included': False,
'act_without_slot': True,
},
'deny':{
'slot_included': True,
'value_included': True,
'slot_from': 'sys_da',
'value_from': 'sys_da',
'status_included': 'incorrect',
},
'repeat':{
'slot_included': False,
'value_included': False,
},
'help':{
'slot_included': False,
'value_included': False,
},
'apology':{
'slot_included': False,
'value_included': False,
},
'confirm':{#make a question to clarify something, ?User may also make this action?? How to make it? only at the end?, since simulator always know exactly what is going on
'slot_included': True,
'value_included': True,
'status_included': 'filled',
},
'canthearyou, notunderstood':{#only available for system, not for user
},
'affirm':{#simply YES #something interesting here, doesn't include slot/value, but slots consider from sys_da and they are correct
'slot_included': False,
'value_included': False,
'slot_from': 'sys_da',
'status_included': 'correct',
'status_in_all_slots': True,
},
'ack':{
'slot_included': False,
'value_included': False,
},
'thankyou':{
'slot_included': False,
'value_included': False,
'act_without_slot': True,
},
'silence':{
'slot_included': False,
'value_included': False,
'act_without_slot': True,
},
'reqalts':{
'slot_included': True,
'value_included': True,
'combineable_slots': ['alternative'],
'slot_from': 'none',
'value_from': 'function',
'value_fun': alternative_value_fun,
},
'negate':{
'slot_included': False,
'value_included': False,
'slot_from': 'sys_da',
'status_included': 'incorrect',
},
'bye':{
'slot_included': False,
'value_included': False,
'act_without_slot': True,
},
'hello':{
'slot_included': False,
'value_included': False,
'act_without_slot': True,
#'add_to_da_prob':0.5,
},
'restart':{#TODO how to user this action?
'slot_included': False,
'value_included': False,
},
'hangup':{
'slot_included': False,
'value_included': False,
'act_without_slot': True,
},
'help':{#How?
'slot_included': False,
'value_included': False,
},
},
'act_formats':{#not being used
'slot_value_correct':{
'slot_included': True,
'value_included': True,
'correct_slot_included': False,
'incorrect_slot_included': False,
'value_from': 'goal', #or from sys_da
}
},
'reply_system_acts':{#how to combine several act types to respon an actions,list like below is quite ok, but ???
'request':[{'return_acts':['inform'],#return acts canbe multiple act
'inform_answer_types':{
'direct_answer':0.7,
'over_answer':0.2,
'complete_answer':0.1,
},
'inform_overridden_properties':{
#'use_slot_sequence': True,#will be error someday when system ask a slot which is absen in the current goal
},
'active_prob':0.95,
},
{'return_acts':['silence'],
'active_prob':0.00,
},
{'return_acts':['oog'],
'active_prob':0.05,
},
],
'confirm':[{#explict confirm
#only one action in the set or specify explicitly the apply order and stop when first appliable
#can we change to return_acts, what is different to keep booth? should maintain both for short config and clear distuiguish between two cases
'ordered_return_acts':[
{ 'case1':{'return_acts':['affirm'],
'active_prob':0.7, #0.5
},
'case2':{'return_acts':['affirm', 'inform'],
'active_prob':0.3,#0.5
'inform_answer_types':{
'over_answer':0.8,
'complete_answer': 0.2,
},
'inform_overridden_properties':{
'slot_from': 'none',#should be none - nowhere, dont take slot form any where
'accept_used_slots': True,
},
},
},#end of first priority answer
{ 'case1':{'return_acts':['negate', 'inform'],
'active_prob':0.4,
'inform_answer_types':{
'direct_answer':1.0,
},
'inform_overridden_properties':{
'slot_from': 'sys_da',
'status_included': 'incorrect',
'value_from': 'goal',
#'use_slot_sequence': True,
},
},
'case2':{'return_acts':['deny'],
'active_prob':0.2,
},
'case3':{'return_acts':['deny', 'inform'],
'active_prob':0.4,
'inform_overridden_properties':{
'slot_from': 'sys_da',
'status_included': 'incorrect',
'value_from': 'goal',
#'use_slot_sequence': True,
},
},
}#end of seond priority answer
],
'active_prob':1.0
},#end of the firs way of answer
],
'implconfirm':[{'active_prob': 1.0,
'ordered_return_acts':[
{ 'case1':{'return_acts':['affirm'],
'active_prob':1.0,
'affirm_overridden_properties':{
'add_to_da_prob':0.5,
}
},#end of first way in the firs priority answer
},#end of first priority answer
{ 'case1':{'return_acts':['negate', 'inform'],
'active_prob':0.7,
'inform_answer_types':{
'direct_answer':1.0,
},
'inform_overridden_properties':{
'slot_from': 'sys_da',
'status_included': 'incorrect',
'value_from': 'goal',
#'use_slot_sequence': True,
},
},
'case2':{'return_acts':['deny', 'inform'],
'active_prob':0.3,
'inform_overridden_properties':{
'slot_from': 'sys_da',
'status_included': 'incorrect',
'value_from': 'goal',
#'use_slot_sequence': True,
},
},
}#end of seond priority answer
],
},#end of the first way of answer
],
'iconfirm':[{'active_prob': 1.0,
'ordered_return_acts':[
{ 'case1':{'return_acts':['affirm'],
'active_prob':1.0,
'affirm_overridden_properties':{
'add_to_da_prob':0.5,
}
},#end of first way in the firs priority answer
},#end of first priority answer
{ 'case1':{'return_acts':['negate', 'inform'],
'active_prob':0.7,
'inform_answer_types':{
'direct_answer':1.0,
},
'inform_overridden_properties':{
'slot_from': 'sys_da',
'status_included': 'incorrect',
'value_from': 'goal',
#'use_slot_sequence': True,
},
},
'case2':{'return_acts':['deny', 'inform'],
'active_prob':0.3,
'inform_overridden_properties':{
'slot_from': 'sys_da',
'status_included': 'incorrect',
'value_from': 'goal',
#'use_slot_sequence': True,
},
},
}#end of seond priority answer
],
},#end of the first way of answer
],
'inform':[{'active_prob': 1.0,
'ordered_return_acts':[
{ 'case1':{'return_acts':['affirm'],
'active_prob':1.0,
'affirm_overridden_properties':{
'add_to_da_prob':0.5,
}
},#end of first way in the firs priority answer
},#end of first priority answer
{ 'case1':{'return_acts':['negate', 'inform'],
'active_prob':0.7,
'inform_answer_types':{
'direct_answer':1.0,
},
'inform_overridden_properties':{
'slot_from': 'sys_da',
'status_included': 'incorrect',
'value_from': 'goal',
#'use_slot_sequence': True,
},
},
'case2':{'return_acts':['deny', 'inform'],
'active_prob':0.3,
'inform_overridden_properties':{
'slot_from': 'sys_da',
'status_included': 'incorrect',
'value_from': 'goal',
#'use_slot_sequence': True,
},
},
},#end of seond priority answer
{ 'case1':{'return_acts':['bye'],
'active_prob':0.5,
'affirm_overridden_properties':{
'add_to_da_prob':1.0,
},
},#end of first way in the firs priority answer
'case2':{'return_acts':['thankyou', 'hangup'],
'active_prob':0.5,
'affirm_overridden_properties':{
'add_to_da_prob':1.0,
},
},#end of first way in the firs priority answer
},
],
},#end of the first way of answer
],
'select':[{'return_acts':['inform'],
'active_prob': 1.0,
},
],
'apology':[{'return_acts':[],
'active_prob':1.0,
},
],
'help':[{'return_acts':['negate'],
'active_prob':1.0,
'negate_overridden_properties':{
'act_without_slot': True,
}
},
],
'silence':[{'return_acts':['inform'],
'active_prob':1.0,
'inform_answer_types':{
'direct_answer':0.0,
'over_answer':0.9,
'complete_answer':0.1,
},
'inform_overridden_properties':{
'slot_from': 'none',
'accept_used_slots': True,
#'atleast_slots': ['task'],
},
},
],
'notunderstood':[
{ 'return_acts':['repeat'],
'active_prob': 0.4,
},
{ 'return_acts':['repeat', 'inform'],
'active_prob': 0.6,
'inform_answer_types':{
'direct_answer': 0.0,
'over_answer': 0.4,
'complete_answer':0.6,
},
'inform_overridden_properties':{
'slot_from': 'none',
'accept_used_slots': True,
},
},
],
'irepeat':[{'return_acts':['oog'],
'active_prob':1.0,
},
],
'reqmore':[{'return_acts':['negate'],
'active_prob':0.7,
'negate_overridden_properties':{
'act_without_slot': True,
}
},
{ 'return_acts':['request'],
'active_prob':0.3,
},
],
'hello':[{'return_acts':['hello'],
'active_prob':0.3,#0.1
},
{'return_acts':['hello', 'inform'],
'active_prob':0.7,#0.9
'inform_answer_types':{
'over_answer': 0.8,#0.4
'complete_answer': 0.2,#0.6
},
'inform_overridden_properties':{
'slot_from': 'none',
'atleast_slots': ['task'],
},
'hello_overridden_properties':{
'add_to_da_prob':0.5,
}
},
],
'cant_apply':[{'return_acts':['hangup'],
#'cant_apply':[{'return_acts':[],
'active_prob':1.0,
},
],
'offer':{
0:[{'active_prob':1.0,
'ordered_return_acts':[
{ 'case1':{'return_acts':['affirm', 'inform'],
'active_prob':1.0,
'all_act_valid': True,#all acts in return acts mus appliable !new
'affirm_overridden_properties':{
'add_to_da_prob': 0.0,
},
'inform_overridden_properties':{
'slot_from': 'goal',#take all slots from goal as combinable
'status_included': 'unmentioned',#keep only slot which was not mentioned in this turn
#'limited_slots': [],
#NOTE Should whe support multiple status setting such as unmentioned + incorrect (not save that infor now!
},
},
},
{ 'case1':{'return_acts':['affirm', 'bye'],
'active_prob':0.2,
'affirm_overridden_properties':{
'add_to_da_prob':0.0,
},
},#end of first way in the firs priority answer
'case2':{'return_acts':['affirm', 'thankyou', 'bye'],
'active_prob':0.4,
'affirm_overridden_properties':{
'add_to_da_prob':0.0,
},
},#end of second way in the firs priority answer
'case3':{'return_acts':['affirm', 'request'],#NOTE: don't ask at the end since the current DM anser have inform(from_stop..
'active_prob':0.2,
'affirm_overridden_properties':{
'add_to_da_prob':0.0,
},
},#end of third way in the firs priority answer
'case4':{'return_acts':['affirm', 'reqalts'],
'active_prob':0.2,
'affirm_overridden_properties':{
'add_to_da_prob':0.0,
},
},#end of fourth way in the firs priority answer
},#end of first priority answer
{ 'case1':{'return_acts':['negate', 'inform'],
'active_prob':0.7,
'inform_answer_types':{
'direct_answer':1.0,
},
'inform_overridden_properties':{
'slot_from': 'sys_da',
'status_included': 'incorrect',
'value_from': 'goal',
#'limited_slots': [],
#'use_slot_sequence': True,
},
},
'case2':{'return_acts':['deny', 'inform'],
'active_prob':0.3,
'inform_overridden_properties':{
'slot_from': 'sys_da',
'status_included': 'incorrect',
'value_from': 'goal',
#'limited_slots': [],
#'use_slot_sequence': True,
},
},
}#end of seond priority answer
],#end of the list of ordered answer
},#end of first way of anser
],
1:[{'return_acts':['bye'],
'active_prob':0.5,
},
{'return_acts':['thankyou'],
'active_prob':0.5,
},
],
2:[{'return_acts':['bye'],
'active_prob':0.5,
},
{'return_acts':['thankyou'],
'active_prob':0.5,
},
],
},
'offer_old_unconditional':{
0:[{'return_acts':['bye'],#definition for goal_id=0
'active_prob':0.2,
},
{'return_acts':['request'],
'active_prob':0.2,
},
{'return_acts':['reqalts'],
'active_prob':0.2,
},
{'return_acts':['thankyou'],
'active_prob':0.4,
},
],
1:[{'return_acts':['bye'],
'active_prob':0.5,
},
{'return_acts':['thankyou'],
'active_prob':0.5,
},
],
2:[{'return_acts':['bye'],
'active_prob':0.5,
},
{'return_acts':['thankyou'],
'active_prob':0.5,
},
],
},
'bye':[{'return_acts':['hangup'],
'active_prob':1.0,
}
],
},
'data_observation_probability':{
'time_relative':{
('now',):1.0,#key is row in the table, if table has only one field, need add comma before the end of the tuple
},
'time_relative_full_thanh':{
('as soon as possible',):0.2,#key is row in the table, if table has only one field, need add comma before the end of the tuple
('next hour',):0.1,
('morning',):0.1,
('noon',):0.1,
('afternoon',):0.1,
('night',):0.1,
('midnight',):0.05,
('early morning',):0.05,
('today',):0.1,
('tomorrow',):0.05,
('the day after tomorrow',):0.05,
},
},
},#end of metatdata
},#end of SimpleUserSimulator
},#end of user_simulator
}#end of config
| [
"thanhlct@gmail.com"
] | thanhlct@gmail.com |
456864271f3e01f15b001804253e5dd219e0b0b0 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/nlp/Data2vec_for_PyTorch/fairseq/modules/ema_module.py | 7d3733766779e26a60716e82be4ac0eef6859024 | [
"MIT",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 8,436 | py | #!/usr/bin/env python3
# coding:utf-8
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
"""
Used for EMA tracking a given pytorch module. The user is responsible for calling step()
and setting the appropriate decay
"""
import copy
from dataclasses import dataclass, field
import logging
import torch
from omegaconf import II
from fairseq.dataclass import FairseqDataclass
try:
from amp_C import multi_tensor_l2norm
multi_tensor_l2norm_available = True
except ImportError:
multi_tensor_l2norm_available = False
logger = logging.getLogger(__name__)
@dataclass
class EMAModuleConfig(FairseqDataclass):
ema_decay: float = field(
default=0.9999, metadata={"help": "decay for exponential moving average model"}
)
ema_fp32: bool = field(
default=False,
metadata={"help": "If true, store EMA model in fp32 even if model is in fp16"},
)
add_missing_params: bool = True
log_norms: bool = False
class EMAModule:
"""Exponential Moving Average of Fairseq Models"""
def __init__(
self,
model,
config: EMAModuleConfig,
copy_model=True,
device=None,
skip_keys=None,
):
"""
@param model model to initialize the EMA with
@param config EMAConfig object with configuration like
ema_decay, ema_update_freq, ema_fp32
@param device If provided, copy EMA to this device (e.g. gpu).
Otherwise EMA is in the same device as the model.
"""
self.config = config
if copy_model:
self.model = copy.deepcopy(model)
self.model.requires_grad_(False)
else:
self.model = model
self.config = config
self.decay = config.ema_decay
self.skip_keys = skip_keys or set()
self.add_missing_params = config.add_missing_params
self.fp32_params = {}
if device is not None:
logging.info(f"Copying EMA model to device {device}")
self.model = self.model.to(device=device)
if self.config.ema_fp32:
self.build_fp32_params()
self.log_norms = config.log_norms and multi_tensor_l2norm_available
self.logs = {}
def build_fp32_params(self, state_dict=None):
"""
Store a copy of the EMA params in fp32.
If state dict is passed, the EMA params is copied from
the provided state dict. Otherwise, it is copied from the
current EMA model parameters.
"""
if not self.config.ema_fp32:
raise RuntimeError(
"build_fp32_params should not be called if ema_fp32=False. "
"Use ema_fp32=True if this is really intended."
)
if state_dict is None:
state_dict = self.model.state_dict()
def _to_float(t):
return t.float() if torch.is_floating_point(t) else t
for param_key in state_dict:
if param_key in self.fp32_params:
if param_key == "__sq_mom":
self.fp32_params[param_key] = state_dict[param_key]
else:
self.fp32_params[param_key].copy_(state_dict[param_key])
else:
self.fp32_params[param_key] = _to_float(state_dict[param_key])
if "__sq_mom" in self.fp32_params:
self.fp32_params["__sq_mom"][param_key] = torch.zeros_like(
self.fp32_params[param_key]
)
def restore(self, state_dict, build_fp32_params=False):
"""Load data from a model spec into EMA model"""
self.model.load_state_dict(state_dict, strict=False)
if build_fp32_params:
self.build_fp32_params(state_dict)
def set_decay(self, decay, weight_decay=None):
self.decay = decay
if weight_decay is not None:
self.weight_decay = weight_decay
def get_decay(self):
return self.decay
def _step_internal(self, new_model):
"""One update of the EMA model based on new model weights"""
decay = self.decay
ema_state_dict = {}
ema_params = (
self.fp32_params if self.config.ema_fp32 else self.model.state_dict()
)
new_p = []
ema_p = []
for key, param in new_model.named_parameters():
if isinstance(param, dict):
continue
if not self.add_missing_params and key not in ema_params:
continue
try:
ema_param = ema_params[key]
except KeyError:
ema_param = (
param.float().clone() if param.ndim == 1 else copy.deepcopy(param)
)
ema_params[key] = ema_param
if param.shape != ema_param.shape:
raise ValueError(
"incompatible tensor shapes between model param and ema param"
+ "{} vs. {}".format(param.shape, ema_param.shape)
)
if "version" in key:
# Do not decay a model.version pytorch param
continue
lr = 1 - decay
if key in self.skip_keys or not param.requires_grad:
ema_params[key].copy_(param.to(dtype=ema_param.dtype).data)
ema_param = ema_params[key]
else:
if self.log_norms:
new_p.append(param)
ema_p.append(ema_param)
ema_param.mul_(1 - lr)
ema_param.add_(param.data.to(dtype=ema_param.dtype), alpha=lr)
ema_state_dict[key] = ema_param
for key, param in new_model.named_buffers():
ema_state_dict[key] = param
if self.log_norms:
if "model_norm" in self.logs:
self.prev_model_norm = self.logs["model_norm"]
chunk_size = 2048 * 32
has_inf = torch.zeros(
(1, 1), dtype=torch.int, device=next(new_model.parameters()).device
)
new_norm = multi_tensor_l2norm(chunk_size, has_inf, [new_p], False)
old_norm = multi_tensor_l2norm(chunk_size, has_inf, [ema_p], False)
self.logs["model_norm"] = new_norm[0]
self.logs["ema_norm"] = old_norm[0]
self.restore(ema_state_dict, build_fp32_params=False)
@torch.no_grad()
def step(self, new_model):
self._step_internal(new_model)
def reverse(self, model):
"""
Load the model parameters from EMA model.
Useful for inference or fine-tuning from the EMA model.
"""
d = self.model.state_dict()
if "_ema" in d:
del d["_ema"]
model.load_state_dict(d, strict=False)
return model
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
a26978798c0b897c4e83d5d4870426ae593e1ff7 | 649255f0d9b6d90be3d3f68263680081f893a089 | /swagger_client/api/remediation_api.py | 53d8a3bb84c470a14ec8ad7b083b1ad8a31fc380 | [] | no_license | khantext/r7ivm3 | 611e1bbc988d9eb8fbb53294d3ed488130e46818 | bd9b25f511f9e7479ea7069d71929700bed09e87 | refs/heads/master | 2023-05-01T10:01:16.336656 | 2021-05-03T18:16:12 | 2021-05-03T18:16:12 | 237,514,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54,161 | py | # coding: utf-8
"""
InsightVM API
# Overview This guide documents the InsightVM Application Programming Interface (API) Version 3. This API supports the Representation State Transfer (REST) design pattern. Unless noted otherwise this API accepts and produces the `application/json` media type. This API uses Hypermedia as the Engine of Application State (HATEOAS) and is hypermedia friendly. All API connections must be made to the security console using HTTPS. ## Versioning Versioning is specified in the URL and the base path of this API is: `https://<host>:<port>/api/3/`. ## Specification An <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md\">OpenAPI v2</a> specification (also known as Swagger 2) of this API is available. Tools such as <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"https://github.com/swagger-api/swagger-codegen\">swagger-codegen</a> can be used to generate an API client in the language of your choosing using this specification document. <p class=\"openapi\">Download the specification: <a class=\"openapi-button\" target=\"_blank\" rel=\"noopener noreferrer\" download=\"\" href=\"/api/3/json\"> Download </a></p> ## Authentication Authorization to the API uses HTTP Basic Authorization (see <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"https://www.ietf.org/rfc/rfc2617.txt\">RFC 2617</a> for more information). Requests must supply authorization credentials in the `Authorization` header using a Base64 encoded hash of `\"username:password\"`. <!-- ReDoc-Inject: <security-definitions> --> ### 2FA This API supports two-factor authentication (2FA) by supplying an authentication token in addition to the Basic Authorization. The token is specified using the `Token` request header. To leverage two-factor authentication, this must be enabled on the console and be configured for the account accessing the API. ## Resources ### Naming Resource names represent nouns and identify the entity being manipulated or accessed. All collection resources are pluralized to indicate to the client they are interacting with a collection of multiple resources of the same type. Singular resource names are used when there exists only one resource available to interact with. The following naming conventions are used by this API: | Type | Case | | --------------------------------------------- | ------------------------ | | Resource names | `lower_snake_case` | | Header, body, and query parameters parameters | `camelCase` | | JSON fields and property names | `camelCase` | #### Collections A collection resource is a parent resource for instance resources, but can itself be retrieved and operated on independently. Collection resources use a pluralized resource name. The resource path for collection resources follow the convention: ``` /api/3/{resource_name} ``` #### Instances An instance resource is a \"leaf\" level resource that may be retrieved, optionally nested within a collection resource. Instance resources are usually retrievable with opaque identifiers. The resource path for instance resources follows the convention: ``` /api/3/{resource_name}/{instance_id}... ``` ## Verbs The following HTTP operations are supported throughout this API. The general usage of the operation and both its failure and success status codes are outlined below. | Verb | Usage | Success | Failure | | --------- | ------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------- | | `GET` | Used to retrieve a resource by identifier, or a collection of resources by type. | `200` | `400`, `401`, `402`, `404`, `405`, `408`, `410`, `415`, `500` | | `POST` | Creates a resource with an application-specified identifier. | `201` | `400`, `401`, `404`, `405`, `408`, `413`, `415`, `500` | | `POST` | Performs a request to queue an asynchronous job. | `202` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Creates a resource with a client-specified identifier. | `200` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Performs a full update of a resource with a specified identifier. | `201` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `DELETE` | Deletes a resource by identifier or an entire collection of resources. | `204` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `OPTIONS` | Requests what operations are available on a resource. | `200` | `401`, `404`, `405`, `408`, `500` | ### Common Operations #### OPTIONS All resources respond to the `OPTIONS` request, which allows discoverability of available operations that are supported. The `OPTIONS` response returns the acceptable HTTP operations on that resource within the `Allow` header. The response is always a `200 OK` status. ### Collection Resources Collection resources can support the `GET`, `POST`, `PUT`, and `DELETE` operations. #### GET The `GET` operation invoked on a collection resource indicates a request to retrieve all, or some, of the entities contained within the collection. This also includes the optional capability to filter or search resources during the request. The response from a collection listing is a paginated document. See [hypermedia links](#section/Overview/Paging) for more information. #### POST The `POST` is a non-idempotent operation that allows for the creation of a new resource when the resource identifier is not provided by the system during the creation operation (i.e. the Security Console generates the identifier). The content of the `POST` request is sent in the request body. The response to a successful `POST` request should be a `201 CREATED` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. The `POST` to a collection resource can also be used to interact with asynchronous resources. In this situation, instead of a `201 CREATED` response, the `202 ACCEPTED` response indicates that processing of the request is not fully complete but has been accepted for future processing. This request will respond similarly with a `Location` header with link to the job-oriented asynchronous resource that was created and/or queued. #### PUT The `PUT` is an idempotent operation that either performs a create with user-supplied identity, or a full replace or update of a resource by a known identifier. The response to a `PUT` operation to create an entity is a `201 Created` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. `PUT` on a collection resource replaces all values in the collection. The typical response to a `PUT` operation that updates an entity is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. #### DELETE The `DELETE` is an idempotent operation that physically deletes a resource, or removes an association between resources. The typical response to a `DELETE` operation is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. ### Instance Resources Instance resources can support the `GET`, `PUT`, `POST`, `PATCH` and `DELETE` operations. #### GET Retrieves the details of a specific resource by its identifier. The details retrieved can be controlled through property selection and property views. The content of the resource is returned within the body of the response in the acceptable media type. #### PUT Allows for and idempotent \"full update\" (complete replacement) on a specific resource. If the resource does not exist, it will be created; if it does exist, it is completely overwritten. Any omitted properties in the request are assumed to be undefined/null. For \"partial updates\" use `POST` or `PATCH` instead. The content of the `PUT` request is sent in the request body. The identifier of the resource is specified within the URL (not the request body). The response to a successful `PUT` request is a `201 CREATED` to represent the created status, with a valid `Location` header field set to the URI that can be used to access to the newly created (or fully replaced) resource. #### POST Performs a non-idempotent creation of a new resource. The `POST` of an instance resource most commonly occurs with the use of nested resources (e.g. searching on a parent collection resource). The response to a `POST` of an instance resource is typically a `200 OK` if the resource is non-persistent, and a `201 CREATED` if there is a resource created/persisted as a result of the operation. This varies by endpoint. #### PATCH The `PATCH` operation is used to perform a partial update of a resource. `PATCH` is a non-idempotent operation that enforces an atomic mutation of a resource. Only the properties specified in the request are to be overwritten on the resource it is applied to. If a property is missing, it is assumed to not have changed. #### DELETE Permanently removes the individual resource from the system. If the resource is an association between resources, only the association is removed, not the resources themselves. A successful deletion of the resource should return `204 NO CONTENT` with no response body. This operation is not fully idempotent, as follow-up requests to delete a non-existent resource should return a `404 NOT FOUND`. ## Requests Unless otherwise indicated, the default request body media type is `application/json`. ### Headers Commonly used request headers include: | Header | Example | Purpose | | ------------------ | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | | `Accept` | `application/json` | Defines what acceptable content types are allowed by the client. For all types, use `*/*`. | | `Accept-Encoding` | `deflate, gzip` | Allows for the encoding to be specified (such as gzip). | | `Accept-Language` | `en-US` | Indicates to the server the client's locale (defaults `en-US`). | | `Authorization ` | `Basic Base64(\"username:password\")` | Basic authentication | | `Token ` | `123456` | Two-factor authentication token (if enabled) | ### Dates & Times Dates and/or times are specified as strings in the ISO 8601 format(s). The following formats are supported as input: | Value | Format | Notes | | --------------------------- | ------------------------------------------------------ | ----------------------------------------------------- | | Date | YYYY-MM-DD | Defaults to 12 am UTC (if used for a date & time | | Date & time only | YYYY-MM-DD'T'hh:mm:ss[.nnn] | Defaults to UTC | | Date & time in UTC | YYYY-MM-DD'T'hh:mm:ss[.nnn]Z | | | Date & time w/ offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm | | | Date & time w/ zone-offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm[<zone-id>] | | ### Timezones Timezones are specified in the regional zone format, such as `\"America/Los_Angeles\"`, `\"Asia/Tokyo\"`, or `\"GMT\"`. ### Paging Pagination is supported on certain collection resources using a combination of two query parameters, `page` and `size`. As these are control parameters, they are prefixed with the underscore character. The page parameter dictates the zero-based index of the page to retrieve, and the `size` indicates the size of the page. For example, `/resources?page=2&size=10` will return page 3, with 10 records per page, giving results 21-30. The maximum page size for a request is 500. ### Sorting Sorting is supported on paginated resources with the `sort` query parameter(s). The sort query parameter(s) supports identifying a single or multi-property sort with a single or multi-direction output. The format of the parameter is: ``` sort=property[,ASC|DESC]... ``` Therefore, the request `/resources?sort=name,title,DESC` would return the results sorted by the name and title descending, in that order. The sort directions are either ascending `ASC` or descending `DESC`. With single-order sorting, all properties are sorted in the same direction. To sort the results with varying orders by property, multiple sort parameters are passed. For example, the request `/resources?sort=name,ASC&sort=title,DESC` would sort by name ascending and title descending, in that order. ## Responses The following response statuses may be returned by this API. | Status | Meaning | Usage | | ------ | ------------------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `200` | OK | The operation performed without error according to the specification of the request, and no more specific 2xx code is suitable. | | `201` | Created | A create request has been fulfilled and a resource has been created. The resource is available as the URI specified in the response, including the `Location` header. | | `202` | Accepted | An asynchronous task has been accepted, but not guaranteed, to be processed in the future. | | `400` | Bad Request | The request was invalid or cannot be otherwise served. The request is not likely to succeed in the future without modifications. | | `401` | Unauthorized | The user is unauthorized to perform the operation requested, or does not maintain permissions to perform the operation on the resource specified. | | `403` | Forbidden | The resource exists to which the user has access, but the operating requested is not permitted. | | `404` | Not Found | The resource specified could not be located, does not exist, or an unauthenticated client does not have permissions to a resource. | | `405` | Method Not Allowed | The operations may not be performed on the specific resource. Allowed operations are returned and may be performed on the resource. | | `408` | Request Timeout | The client has failed to complete a request in a timely manner and the request has been discarded. | | `413` | Request Entity Too Large | The request being provided is too large for the server to accept processing. | | `415` | Unsupported Media Type | The media type is not supported for the requested resource. | | `500` | Internal Server Error | An internal and unexpected error has occurred on the server at no fault of the client. | ### Security The response statuses 401, 403 and 404 need special consideration for security purposes. As necessary, error statuses and messages may be obscured to strengthen security and prevent information exposure. The following is a guideline for privileged resource response statuses: | Use Case | Access | Resource | Permission | Status | | ------------------------------------------------------------------ | ------------------ |------------------- | ------------ | ------------ | | Unauthenticated access to an unauthenticated resource. | Unauthenticated | Unauthenticated | Yes | `20x` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Authenticated | No | `401` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Non-existent | No | `401` | | Authenticated access to a unauthenticated resource. | Authenticated | Unauthenticated | Yes | `20x` | | Authenticated access to an authenticated, unprivileged resource. | Authenticated | Authenticated | No | `404` | | Authenticated access to an authenticated, privileged resource. | Authenticated | Authenticated | Yes | `20x` | | Authenticated access to an authenticated, non-existent resource | Authenticated | Non-existent | Yes | `404` | ### Headers Commonly used response headers include: | Header | Example | Purpose | | -------------------------- | --------------------------------- | --------------------------------------------------------------- | | `Allow` | `OPTIONS, GET` | Defines the allowable HTTP operations on a resource. | | `Cache-Control` | `no-store, must-revalidate` | Disables caching of resources (as they are all dynamic). | | `Content-Encoding` | `gzip` | The encoding of the response body (if any). | | `Location` | | Refers to the URI of the resource created by a request. | | `Transfer-Encoding` | `chunked` | Specified the encoding used to transform response. | | `Retry-After` | 5000 | Indicates the time to wait before retrying a request. | | `X-Content-Type-Options` | `nosniff` | Disables MIME type sniffing. | | `X-XSS-Protection` | `1; mode=block` | Enables XSS filter protection. | | `X-Frame-Options` | `SAMEORIGIN` | Prevents rendering in a frame from a different origin. | | `X-UA-Compatible` | `IE=edge,chrome=1` | Specifies the browser mode to render in. | ### Format When `application/json` is returned in the response body it is always pretty-printed (indented, human readable output). Additionally, gzip compression/encoding is supported on all responses. #### Dates & Times Dates or times are returned as strings in the ISO 8601 'extended' format. When a date and time is returned (instant) the value is converted to UTC. For example: | Value | Format | Example | | --------------- | ------------------------------ | --------------------- | | Date | `YYYY-MM-DD` | 2017-12-03 | | Date & Time | `YYYY-MM-DD'T'hh:mm:ss[.nnn]Z` | 2017-12-03T10:15:30Z | #### Content In some resources a Content data type is used. This allows for multiple formats of representation to be returned within resource, specifically `\"html\"` and `\"text\"`. The `\"text\"` property returns a flattened representation suitable for output in textual displays. The `\"html\"` property returns an HTML fragment suitable for display within an HTML element. Note, the HTML returned is not a valid stand-alone HTML document. #### Paging The response to a paginated request follows the format: ```json { resources\": [ ... ], \"page\": { \"number\" : ..., \"size\" : ..., \"totalResources\" : ..., \"totalPages\" : ... }, \"links\": [ \"first\" : { \"href\" : \"...\" }, \"prev\" : { \"href\" : \"...\" }, \"self\" : { \"href\" : \"...\" }, \"next\" : { \"href\" : \"...\" }, \"last\" : { \"href\" : \"...\" } ] } ``` The `resources` property is an array of the resources being retrieved from the endpoint, each which should contain at minimum a \"self\" relation hypermedia link. The `page` property outlines the details of the current page and total possible pages. The object for the page includes the following properties: - number - The page number (zero-based) of the page returned. - size - The size of the pages, which is less than or equal to the maximum page size. - totalResources - The total amount of resources available across all pages. - totalPages - The total amount of pages. The last property of the paged response is the `links` array, which contains all available hypermedia links. For paginated responses, the \"self\", \"next\", \"previous\", \"first\", and \"last\" links are returned. The \"self\" link must always be returned and should contain a link to allow the client to replicate the original request against the collection resource in an identical manner to that in which it was invoked. The \"next\" and \"previous\" links are present if either or both there exists a previous or next page, respectively. The \"next\" and \"previous\" links have hrefs that allow \"natural movement\" to the next page, that is all parameters required to move the next page are provided in the link. The \"first\" and \"last\" links provide references to the first and last pages respectively. Requests outside the boundaries of the pageable will result in a `404 NOT FOUND`. Paginated requests do not provide a \"stateful cursor\" to the client, nor does it need to provide a read consistent view. Records in adjacent pages may change while pagination is being traversed, and the total number of pages and resources may change between requests within the same filtered/queries resource collection. #### Property Views The \"depth\" of the response of a resource can be configured using a \"view\". All endpoints supports two views that can tune the extent of the information returned in the resource. The supported views are `summary` and `details` (the default). View are specified using a query parameter, in this format: ```bash /<resource>?view={viewName} ``` #### Error Any error responses can provide a response body with a message to the client indicating more information (if applicable) to aid debugging of the error. All 40x and 50x responses will return an error response in the body. The format of the response is as follows: ```json { \"status\": <statusCode>, \"message\": <message>, \"links\" : [ { \"rel\" : \"...\", \"href\" : \"...\" } ] } ``` The `status` property is the same as the HTTP status returned in the response, to ease client parsing. The message property is a localized message in the request client's locale (if applicable) that articulates the nature of the error. The last property is the `links` property. This may contain additional [hypermedia links](#section/Overview/Authentication) to troubleshoot. #### Search Criteria <a section=\"section/Responses/SearchCriteria\"></a> Multiple resources make use of search criteria to match assets. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": <value>,] [\"lower\": <value>,] [\"upper\": <value>] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The operator is a type and property-specific operating performed on the filtered property. The valid values for fields and operators are outlined in the table below. Depending on the data type of the operator the value may be a numeric or string format. Every filter also defines one or more values that are supplied to the operator. The valid values vary by operator and are outlined below. ##### Fields The following table outlines the search criteria fields and the available operators: | Field | Operators | | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | | `alternate-address-type` | `in` | | `container-image` | `is` `is-not` `starts-with` `ends-with` `contains` `does-not-contain` `is-like` `not-like` | | `container-status` | `is` `is-not` | | `containers` | `are` | | `criticality-tag` | `is` `is-not` `is-greater-than` `is-less-than` `is-applied` ` is-not-applied` | | `custom-tag` | `is` `is-not` `starts-with` `ends-with` `contains` `does-not-contain` `is-applied` `is-not-applied` | | `cve` | `is` `is-not` `contains` `does-not-contain` | | `cvss-access-complexity` | `is` `is-not` | | `cvss-authentication-required` | `is` `is-not` | | `cvss-access-vector` | `is` `is-not` | | `cvss-availability-impact` | `is` `is-not` | | `cvss-confidentiality-impact` | `is` `is-not` | | `cvss-integrity-impact` | `is` `is-not` | | `cvss-v3-confidentiality-impact` | `is` `is-not` | | `cvss-v3-integrity-impact` | `is` `is-not` | | `cvss-v3-availability-impact` | `is` `is-not` | | `cvss-v3-attack-vector` | `is` `is-not` | | `cvss-v3-attack-complexity` | `is` `is-not` | | `cvss-v3-user-interaction` | `is` `is-not` | | `cvss-v3-privileges-required` | `is` `is-not` | | `host-name` | `is` `is-not` `starts-with` `ends-with` `contains` `does-not-contain` `is-empty` `is-not-empty` `is-like` `not-like` | | `host-type` | `in` `not-in` | | `ip-address` | `is` `is-not` `in-range` `not-in-range` `is-like` `not-like` | | `ip-address-type` | `in` `not-in` | | `last-scan-date` | `is-on-or-before` `is-on-or-after` `is-between` `is-earlier-than` `is-within-the-last` | | `location-tag` | `is` `is-not` `starts-with` `ends-with` `contains` `does-not-contain` `is-applied` `is-not-applied` | | `mobile-device-last-sync-time` | `is-within-the-last` `is-earlier-than` | | `open-ports` | `is` `is-not` ` in-range` | | `operating-system` | `contains` ` does-not-contain` ` is-empty` ` is-not-empty` | | `owner-tag` | `is` `is-not` `starts-with` `ends-with` `contains` `does-not-contain` `is-applied` `is-not-applied` | | `pci-compliance` | `is` | | `risk-score` | `is` `is-not` `is-greater-than` `is-less-than` `in-range` | | `service-name` | `contains` `does-not-contain` | | `site-id` | `in` `not-in` | | `software` | `contains` `does-not-contain` | | `vAsset-cluster` | `is` `is-not` `contains` `does-not-contain` `starts-with` | | `vAsset-datacenter` | `is` `is-not` | | `vAsset-host-name` | `is` `is-not` `contains` `does-not-contain` `starts-with` | | `vAsset-power-state` | `in` `not-in` | | `vAsset-resource-pool-path` | `contains` `does-not-contain` | | `vulnerability-assessed` | `is-on-or-before` `is-on-or-after` `is-between` `is-earlier-than` `is-within-the-last` | | `vulnerability-category` | `is` `is-not` `starts-with` `ends-with` `contains` `does-not-contain` | | `vulnerability-cvss-v3-score` | `is` `is-not` | | `vulnerability-cvss-score` | `is` `is-not` `in-range` `is-greater-than` `is-less-than` | | `vulnerability-exposures` | `includes` `does-not-include` | | `vulnerability-title` | `contains` `does-not-contain` `is` `is-not` `starts-with` `ends-with` | | `vulnerability-validated-status` | `are` | ##### Enumerated Properties The following fields have enumerated values: | Field | Acceptable Values | | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `alternate-address-type` | 0=IPv4, 1=IPv6 | | `containers` | 0=present, 1=not present | | `container-status` | `created` `running` `paused` `restarting` `exited` `dead` `unknown` | | `cvss-access-complexity` | <ul><li><code>L</code> = Low</li><li><code>M</code> = Medium</li><li><code>H</code> = High</li></ul> | | `cvss-integrity-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-confidentiality-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-availability-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-access-vector` | <ul><li><code>L</code> = Local</li><li><code>A</code> = Adjacent</li><li><code>N</code> = Network</li></ul> | | `cvss-authentication-required` | <ul><li><code>N</code> = None</li><li><code>S</code> = Single</li><li><code>M</code> = Multiple</li></ul> | | `cvss-v3-confidentiality-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-integrity-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-availability-impact` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-attack-vector` | <ul><li><code>N</code> = Network</li><li><code>A</code> = Adjacent</li><li><code>L</code> = Local</li><li><code>P</code> = Physical</li></ul> | | `cvss-v3-attack-complexity` | <ul><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-user-interaction` | <ul><li><code>N</code> = None</li><li><code>R</code> = Required</li></ul> | | `cvss-v3-privileges-required` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `host-type` | 0=Unknown, 1=Guest, 2=Hypervisor, 3=Physical, 4=Mobile | | `ip-address-type` | 0=IPv4, 1=IPv6 | | `pci-compliance` | 0=fail, 1=pass | | `vulnerability-validated-status` | 0=present, 1=not present | ##### Operator Properties <a section=\"section/Responses/SearchCriteria/OperatorProperties\"></a> The following table outlines which properties are required for each operator and the appropriate data type(s): | Operator | `value` | `lower` | `upper` | | ----------------------|-----------------------|-----------------------|------------------------| | `are` | `string` | | | | `contains` | `string` | | | | `does-not-contain` | `string` | | | | `ends with` | `string` | | | | `in` | `Array[ string ]` | | | | `in-range` | | `numeric` | `numeric` | | `includes` | `Array[ string ]` | | | | `is` | `string` | | | | `is-applied` | | | | | `is-between` | | `string` (yyyy-MM-dd) | `numeric` (yyyy-MM-dd) | | `is-earlier-than` | `numeric` (days) | | | | `is-empty` | | | | | `is-greater-than` | `numeric` | | | | `is-on-or-after` | `string` (yyyy-MM-dd) | | | | `is-on-or-before` | `string` (yyyy-MM-dd) | | | | `is-not` | `string` | | | | `is-not-applied` | | | | | `is-not-empty` | | | | | `is-within-the-last` | `numeric` (days) | | | | `less-than` | `string` | | | | `like` | `string` | | | | `not-contains` | `string` | | | | `not-in` | `Array[ string ]` | | | | `not-in-range` | | `numeric` | `numeric` | | `not-like` | `string` | | | | `starts-with` | `string` | | | #### Discovery Connection Search Criteria <a section=\"section/Responses/DiscoverySearchCriteria\"></a> Dynamic sites make use of search criteria to match assets from a discovery connection. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The list of supported fields vary depending on the type of discovery connection configured for the dynamic site (e.g vSphere, ActiveSync, etc.). The operator is a type and property-specific operating performed on the filtered property. The valid values for fields outlined in the tables below and are grouped by the type of connection. Every filter also defines one or more values that are supplied to the operator. See <a href=\"#section/Responses/SearchCriteria/OperatorProperties\">Search Criteria Operator Properties</a> for more information on the valid values for each operator. ##### Fields (ActiveSync) This section documents search criteria information for ActiveSync discovery connections. The discovery connections must be one of the following types: `\"activesync-ldap\"`, `\"activesync-office365\"`, or `\"activesync-powershell\"`. The following table outlines the search criteria fields and the available operators for ActiveSync connections: | Field | Operators | | --------------------------------- | ------------------------------------------------------------- | | `last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `operating-system` | `contains` ` does-not-contain` | | `user` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (AWS) This section documents search criteria information for AWS discovery connections. The discovery connections must be the type `\"aws\"`. The following table outlines the search criteria fields and the available operators for AWS connections: | Field | Operators | | ----------------------- | ------------------------------------------------------------- | | `availability-zone` | `contains` ` does-not-contain` | | `guest-os-family` | `contains` ` does-not-contain` | | `instance-id` | `contains` ` does-not-contain` | | `instance-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `instance-state` | `in` ` not-in` | | `instance-type` | `in` ` not-in` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `region` | `in` ` not-in` | | `vpc-id` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (DHCP) This section documents search criteria information for DHCP discovery connections. The discovery connections must be the type `\"dhcp\"`. The following table outlines the search criteria fields and the available operators for DHCP connections: | Field | Operators | | --------------- | ------------------------------------------------------------- | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `mac-address` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (Sonar) This section documents search criteria information for Sonar discovery connections. The discovery connections must be the type `\"sonar\"`. The following table outlines the search criteria fields and the available operators for Sonar connections: | Field | Operators | | ------------------- | -------------------- | | `search-domain` | `contains` ` is` | | `ip-address` | `in-range` ` is` | | `sonar-scan-date` | `is-within-the-last` | ##### Fields (vSphere) This section documents search criteria information for vSphere discovery connections. The discovery connections must be the type `\"vsphere\"`. The following table outlines the search criteria fields and the available operators for vSphere connections: | Field | Operators | | -------------------- | ------------------------------------------------------------------------------------------ | | `cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `data-center` | `is` ` is-not` | | `discovered-time` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `guest-os-family` | `contains` ` does-not-contain` | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `power-state` | `in` ` not-in` | | `resource-pool-path` | `contains` ` does-not-contain` | | `last-time-seen` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vm` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Enumerated Properties (vSphere) The following fields have enumerated values: | Field | Acceptable Values | | ------------- | ------------------------------------ | | `power-state` | `poweredOn` `poweredOff` `suspended` | ## HATEOAS This API follows Hypermedia as the Engine of Application State (HATEOAS) principals and is therefore hypermedia friendly. Hyperlinks are returned in the `links` property of any given resource and contain a fully-qualified hyperlink to the corresponding resource. The format of the hypermedia link adheres to both the <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"http://jsonapi.org\">{json:api} v1</a> <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"http://jsonapi.org/format/#document-links\">\"Link Object\"</a> and <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html\">JSON Hyper-Schema</a> <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html#rfc.section.5.2\">\"Link Description Object\"</a> formats. For example: ```json \"links\": [{ \"rel\": \"<relation>\", \"href\": \"<href>\" ... }] ``` Where appropriate link objects may also contain additional properties than the `rel` and `href` properties, such as `id`, `type`, etc. See the [Root](#tag/Root) resources for the entry points into API discovery. # noqa: E501
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class RemediationApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_asset_vulnerability_solutions(self, id, vulnerability_id, **kwargs): # noqa: E501
"""Asset Vulnerability Solution # noqa: E501
Returns the highest-superceding rollup solutions for a vulnerability on an asset. The solution(s) selected will be the most recent and cost-effective means by which the vulnerability can be remediated. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_asset_vulnerability_solutions(id, vulnerability_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the asset. (required)
:param str vulnerability_id: The identifier of the vulnerability. (required)
:return: ResourcesMatchedSolution
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_asset_vulnerability_solutions_with_http_info(id, vulnerability_id, **kwargs) # noqa: E501
else:
(data) = self.get_asset_vulnerability_solutions_with_http_info(id, vulnerability_id, **kwargs) # noqa: E501
return data
def get_asset_vulnerability_solutions_with_http_info(self, id, vulnerability_id, **kwargs): # noqa: E501
"""Asset Vulnerability Solution # noqa: E501
Returns the highest-superceding rollup solutions for a vulnerability on an asset. The solution(s) selected will be the most recent and cost-effective means by which the vulnerability can be remediated. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_asset_vulnerability_solutions_with_http_info(id, vulnerability_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the asset. (required)
:param str vulnerability_id: The identifier of the vulnerability. (required)
:return: ResourcesMatchedSolution
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'vulnerability_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_asset_vulnerability_solutions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_asset_vulnerability_solutions`") # noqa: E501
# verify the required parameter 'vulnerability_id' is set
if ('vulnerability_id' not in params or
params['vulnerability_id'] is None):
raise ValueError("Missing the required parameter `vulnerability_id` when calling `get_asset_vulnerability_solutions`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
if 'vulnerability_id' in params:
path_params['vulnerabilityId'] = params['vulnerability_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/assets/{id}/vulnerabilities/{vulnerabilityId}/solution', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourcesMatchedSolution', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"justinlute@gmail.com"
] | justinlute@gmail.com |
0d1de0f607e05539a5790ab7322a444301420238 | 754a00ebd4002c3b25465a567df38294f5c9b080 | /string_manipulation/MinString.py | 8525052df3fb5c9a92c5fa5c12357b7020b5f8bd | [] | no_license | coderjelly/pythonscripts | 3a524639a16643dea44b650e493eadde1e96bc1a | 8b7661534e6c726a089ba9cbaaf4d90a3a9f9244 | refs/heads/master | 2022-07-07T08:45:27.845491 | 2022-06-30T23:12:45 | 2022-06-30T23:12:45 | 56,618,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | # Given an array with unique characters arr and a string str,
# find the smallest substring of str containing all characters of arr.
# Example:
# arr: [x,y,z], str: xyyzyzyx
# result: zyx
import sys
def findMinStr(arr,str1):
l = len(arr)
l1 = len(str1)
left = 0
right = 0
unique = 0
minstr = None
minLen = sys.maxint
mapper = {}
for a in arr:
mapper[a] = 0
while (right < l1):
if str1[right] in mapper:
num = mapper[str1[right]]
if num == 0:
unique += 1
mapper[str1[right]] += 1
else:
right += 1
continue
while(unique == l):
thislen = len(str1[left:right+1])
if thislen < minLen:
minLen = thislen
minstr = str1[left:right+1]
if str1[left] not in mapper:
left += 1
continue
thisCount = mapper[str1[left]]
if thisCount == 1:
unique -=1
mapper[str1[left]] -= 1
left += 1
right += 1
return minstr
print findMinStr(['x','y','z'],"xyyzyzyx")
| [
"coderjelly@users.noreply.github.com"
] | coderjelly@users.noreply.github.com |
3d56980bad7b1dff7341dd32096b14076bedd0f5 | 28b383fdaaa53dba2115450fe5456cd0d34dd937 | /manage.py | 886f7f3048549b4cfef8b93c4f9e868ed2a79a8a | [] | no_license | M-Qi/blog | 90d8bb1e76d8e6b1569ebcc6373a483daa0ce788 | 9dd0844720e08e725522976ab8cee70e7b65525d | refs/heads/main | 2022-12-29T02:17:42.041769 | 2020-10-06T13:38:40 | 2020-10-06T13:38:40 | 301,735,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "my_site_blog.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"925447599@qq.com"
] | 925447599@qq.com |
1941daf3147a52db83a326417991ec09c645959f | d90283bff72b5a55dd4d0f90c7325355b00ce7b1 | /p1804/p10/打印整数.py | 915fda83f4b58c9a3908ce840830515fba53fb09 | [] | no_license | yuemeiss/p1804daima | f841f52e63081d53d50a199e4d148d4533605bb6 | 6ea08eb9971e42bf4ac535033a006d98ed98bf98 | refs/heads/master | 2020-03-15T23:29:59.691297 | 2018-08-06T02:42:49 | 2018-08-06T02:42:49 | 132,395,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | for i in range(1,5000):
if i % 5 == 0 and i%7==0:
print("能被5和7整除的是: %d "% i)
| [
"1083027306@qq.com"
] | 1083027306@qq.com |
0dc0021787af9e143e2a27ae9cac30c7cb8736ec | d2bdc77444ac1a383f04242078c097a42e555cf1 | /main/settings.py | 2a66dc37ed343ef893f0d587e9225aa95e0f21f4 | [] | no_license | Jgonzalez3/Django-LikesBooks | d9bb3be11451e8f9c65179166f7f580d3cefec3e | 340ed99d9f8c02991bad978a97472b3ad41c2b53 | refs/heads/master | 2021-04-18T19:57:21.828050 | 2018-03-25T00:09:36 | 2018-03-25T00:09:36 | 126,650,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,114 | py | """
Django settings for main project.
Generated by 'django-admin startproject' using Django 1.11.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yu#ajfqayr)d6onhn4%=_^d@+##+l8yin#b106k_wxm7^in3m7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.Books_Likes',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'main.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'main.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"javig56@gmail.com"
] | javig56@gmail.com |
bee3338d709e1ade3c0679f50b0bbd0d391c438e | 90bfce5633045007a086679425e0269d93dca738 | /threads_basics_02.py | 931325764d269b1f56d21b4ec2b41b9e95893e44 | [] | no_license | patrix498/Python_threads_basics | 8517fa72821192fc76e72711733a12641263d487 | 56f0932bc82acf4bd3f8917a53002eb1f6158adf | refs/heads/master | 2022-12-22T17:58:51.521075 | 2019-06-05T08:44:00 | 2019-06-05T08:44:00 | 190,346,721 | 0 | 1 | null | 2022-12-12T04:59:17 | 2019-06-05T07:26:37 | Python | UTF-8 | Python | false | false | 545 | py | import threading
import time
import logging
def daemon():
logging.debug('Starting')
time.sleep(0.2)
logging.debug('Exiting')
def non_daemon():
logging.debug('Starting')
logging.debug('Exiting')
logging.basicConfig(
level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s',
)
d = threading.Thread(name='daemon', target=daemon, daemon=True)
t = threading.Thread(name='non-daemon', target=non_daemon)
d.start()
t.start()
d.join(0.1)
print('d.isAlive()', d.isAlive())
t.join() | [
"noreply@github.com"
] | patrix498.noreply@github.com |
2dc3048f1923ef9d88dff80439338ecd98d70ae9 | c84d2f3719e893487763783d7b29a7f9062c22c0 | /fMNIST/fMNIST_Random.py | 6b3b60c47bc294bb37311175562d60f6365a3a87 | [] | no_license | pod3275/Evolutionary-Successive-Halving-Algorithm | 7685ad3e9ada4f392f699d37229fb05b4d3d7a01 | f4bfe0076ac897c07383220629cf4086f29bf4b9 | refs/heads/master | 2020-05-07T14:25:20.441186 | 2019-10-17T10:58:39 | 2019-10-17T10:58:39 | 180,593,732 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,672 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 29 21:30:58 2019
@author: lawle
"""
import os
import numpy as np
import time
from math import ceil
import concurrent.futures
from hyperopt import hp, STATUS_OK
from hyperopt.pyll.stochastic import sample
from keras.datasets import fashion_mnist
from sklearn import metrics
from sklearn.svm import SVC
from keras import backend as K
init_pop_size = 50
etha = 3
one_budget = 1000
max_budgets = 27
f = open('Random_fMNIST_%spop_%sbudgets.txt' %(init_pop_size, max_budgets), 'w')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
space = {
'C': hp.uniform('C', -3, 2),
'gamma': hp.uniform('gamma', -4, -1)
}
global total_budgets
total_budgets = 0
num_parallel = 10
def get_data():
img_rows = 28
img_cols = 28
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows*img_cols*1)
x_test = x_test.reshape(x_test.shape[0], img_rows*img_cols*1)
x_train = x_train.astype('float32')
x_train /= 255
x_test = x_test.astype('float32')
x_test /= 255
train_data = (x_train, y_train)
validation_data = (x_test, y_test)
return train_data, validation_data
train_data, validation_data = get_data()
def get_accuracy(params):
C = 10 ** params['C']
gamma = 10 ** params['gamma']
classifier = SVC(C=C, gamma=gamma, random_state=1234)
print("Fitting SVM")
classifier.fit(train_data[0][:max_budgets*one_budget], train_data[1][:max_budgets*one_budget])
print("Get accuracy of SVM")
accuracy = metrics.accuracy_score(validation_data[1],
classifier.predict(validation_data[0]))
return {'loss': -accuracy, 'params': params, 'status': STATUS_OK}
def parallel_training(population):
result_pop_acc = []
pop_size = len(population)
for i in range(ceil(pop_size/num_parallel)):
parallel_param = []
if (i+1)*num_parallel > pop_size:
sub_loop = pop_size - i*num_parallel
else:
sub_loop = num_parallel
for j in range(sub_loop):
parallel_param.append(population[i*num_parallel+j])
print("\nPopulation %sth to %sth parallel training"
% (i*num_parallel+1, min((i+1)*num_parallel, pop_size)))
print("Training from budgets %s\n" % max_budgets)
with concurrent.futures.ThreadPoolExecutor(len(parallel_param)) as executor:
results = [x for x in executor.map(get_accuracy, parallel_param)]
global total_budgets
total_budgets = total_budgets + max_budgets*len(parallel_param)
for m in range(len(results)):
result_pop_acc.append(results[m]['loss'])
return result_pop_acc
def print_population(population, pop_acc):
for i in range(len(population)):
f.write("\n%sth population\n" % (i+1))
f.write("pop: %s\n" % population[i])
f.write("pop_acc: %s\n" % -pop_acc[i])
f.write("\nBest accuracy: %s\n" %-min(pop_acc))
f.write("Best param set: %s\n" % (population[np.argmin(pop_acc)]))
f.write("Total spend budgets: %s\n" % total_budgets)
f.write("Now time: %s\n" % time.strftime("%X",time.localtime()))
if __name__ == "__main__":
total_start_time = time.strftime("%X",time.localtime())
f.write("Start time: %s\n" % (total_start_time))
population = []
pop_size = init_pop_size
print("###### RANDOM SAMPLING Training ######")
start_time = time.time()
# random initialize
for i in range(pop_size):
population.append(sample(space))
pop_acc = parallel_training(population)
end_time = time.time()
f.write("============================================================")
f.write("\nRandom Sampling %s sets with %s budgets Training time: %s sec\n"
%(pop_size, max_budgets, round(end_time-start_time, 4)))
f.write("Pop Accuracy: %s\n" % pop_acc)
pop_sum=0
for i in range(len(pop_acc)):
pop_sum+=(-pop_acc[i])
f.write("Average accuracy: %s percent \n" %(pop_sum/len(pop_acc)*100))
total_end_time = time.strftime("%X",time.localtime())
f.write("\nEnd time: " + str(total_end_time))
f.write("\nTotal spend budgets: %s\n" % total_budgets)
f.close()
| [
"pod3275@gmail.com"
] | pod3275@gmail.com |
49de7e6ce41f348e586e2eefc9b9a5e0127f92ad | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03574/s538402697.py | a100b6d62d5fdc1b9953e127ac04d0761a0d8b81 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | h,w=map(int,input().split())
s=["."*(w+2)]
for i in range(h):
s.append("."+input()+".")
s.append("."*(w+2))
dx=[-1,0,1,1,1,0,-1,-1]
dy=[1,1,1,0,-1,-1,-1,0]
ans=[]
for i in range(1,h+1):
wp=""
for j in range(1,w+1):
if s[i][j]=="#":
wp+="#"
continue
count=0
for k in range(8):
if s[i+dy[k]][j+dx[k]]=="#":
count+=1
wp+=str(count)
ans.append(wp)
print(*ans,sep="\n") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
6099e986b2054b690030adc9e7e17a767ae0e2b4 | c6fa248ec5a7e3c67afac98e365cac850c511473 | /generative_adversarial_networks/code/chapter_08/04_train_discriminator.py | c79e832de127b1bae5f94a1889e27d01ecef99ac | [] | no_license | shenjnigxing/deep-learning-material | 44830e07cc2a5bd47b07ca903c1f2b65beef22bb | 24dfee3b9fe1a40303cb2dfe256028d35113babf | refs/heads/master | 2022-12-23T10:08:05.881432 | 2020-09-16T02:24:38 | 2020-09-16T02:24:38 | 295,900,907 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,988 | py | # example of training the discriminator model on real and random cifar10 images
from numpy import ones
from numpy import zeros
from numpy.random import rand
from numpy.random import randint
from keras.datasets.cifar10 import load_data
from keras.optimizers import Adam
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Conv2D
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers import LeakyReLU
# define the standalone discriminator model
def define_discriminator(in_shape=(32,32,3)):
model = Sequential()
# normal
model.add(Conv2D(64, (3,3), padding='same', input_shape=in_shape))
model.add(LeakyReLU(alpha=0.2))
# downsample
model.add(Conv2D(128, (3,3), strides=(2,2), padding='same'))
model.add(LeakyReLU(alpha=0.2))
# downsample
model.add(Conv2D(128, (3,3), strides=(2,2), padding='same'))
model.add(LeakyReLU(alpha=0.2))
# downsample
model.add(Conv2D(256, (3,3), strides=(2,2), padding='same'))
model.add(LeakyReLU(alpha=0.2))
# classifier
model.add(Flatten())
model.add(Dropout(0.4))
model.add(Dense(1, activation='sigmoid'))
# compile model
opt = Adam(lr=0.0002, beta_1=0.5)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
# load and prepare cifar10 training images
def load_real_samples():
# load cifar10 dataset
(trainX, _), (_, _) = load_data()
# convert from unsigned ints to floats
X = trainX.astype('float32')
# scale from [0,255] to [-1,1]
X = (X - 127.5) / 127.5
return X
# select real samples
def generate_real_samples(dataset, n_samples):
# choose random instances
ix = randint(0, dataset.shape[0], n_samples)
# retrieve selected images
X = dataset[ix]
# generate 'real' class labels (1)
y = ones((n_samples, 1))
return X, y
# generate n fake samples with class labels
def generate_fake_samples(n_samples):
# generate uniform random numbers in [0,1]
X = rand(32 * 32 * 3 * n_samples)
# update to have the range [-1, 1]
X = -1 + X * 2
# reshape into a batch of color images
X = X.reshape((n_samples, 32, 32, 3))
# generate 'fake' class labels (0)
y = zeros((n_samples, 1))
return X, y
# train the discriminator model
def train_discriminator(model, dataset, n_iter=20, n_batch=128):
half_batch = int(n_batch / 2)
# manually enumerate epochs
for i in range(n_iter):
# get randomly selected 'real' samples
X_real, y_real = generate_real_samples(dataset, half_batch)
# update discriminator on real samples
_, real_acc = model.train_on_batch(X_real, y_real)
# generate 'fake' examples
X_fake, y_fake = generate_fake_samples(half_batch)
# update discriminator on fake samples
_, fake_acc = model.train_on_batch(X_fake, y_fake)
# summarize performance
print('>%d real=%.0f%% fake=%.0f%%' % (i+1, real_acc*100, fake_acc*100))
# define the discriminator model
model = define_discriminator()
# load image data
dataset = load_real_samples()
# fit the model
train_discriminator(model, dataset) | [
"Shenjx161212@gmail.com"
] | Shenjx161212@gmail.com |
39b9d90cc96fb4f9dbdf65ced59e9aaf5f137b0d | c99b89e8b4d5ebdae4aaaf26c33dd8075e61b5e4 | /AnchorDxLimsApp/migrations/0039_clinicalsampleinfo_review_time.py | e5fb56629475ea5558f0af08a5a440bf6d7a15ae | [] | no_license | ranandrom/Lims | 1afa9f86829b5c09b10bc802501f745c489045c6 | 8a762cad72a334054f4211e46a4b36b403dc06c2 | refs/heads/master | 2020-03-12T00:14:45.192049 | 2018-04-23T09:44:45 | 2018-04-23T09:44:45 | 128,862,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-11-20 03:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('AnchorDxLimsApp', '0038_clinicalsamplepretreatment'),
]
operations = [
migrations.AddField(
model_name='clinicalsampleinfo',
name='Review_Time',
field=models.CharField(default=0, max_length=32),
preserve_default=False,
),
]
| [
"ramandrom@139.com"
] | ramandrom@139.com |
9838c0f699bb416bb92272c4a36bf96dc2462b18 | 31ecda059c0bdc359403438e8f82662d32846ea6 | /Modules/Filtering/MathematicalMorphology/wrapping/test/FlatStructuringElementTest.py | 1b046a1597a4289221e7a065888a9ef10926d0c8 | [
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-4.3TAHOE",
"LicenseRef-scancode-other-permissive",
"libtiff",
"LicenseRef-scancode-mit-old-style",
"Spencer-86",
"FSFUL",
"MIT",
"SMLNJ",
"NTP",
"Libpng",
"LicenseRef-scancode-war... | permissive | AndyJMR/ITK | b4000bc42bed81434015a99c19bc2b85f473fb61 | 81c14ce858a530699ee2fbf7fa48b884ad26b984 | refs/heads/master | 2020-07-26T07:25:27.969568 | 2019-09-13T17:26:49 | 2019-09-14T11:51:04 | 208,575,999 | 2 | 0 | Apache-2.0 | 2019-09-15T10:16:49 | 2019-09-15T10:16:49 | null | UTF-8 | Python | false | false | 1,094 | py | #==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
import itk
from sys import argv, exit
itk.auto_progress(2)
if argv[1] == "Ball":
print("Ball")
strel = itk.FlatStructuringElement[2].Ball(int(argv[2]))
elif argv[1] == "Box":
print("Box")
strel = itk.FlatStructuringElement[2].Box(int(argv[2]))
else:
print("invalid arguement: " + argv[1])
exit(1)
| [
"maggot_fff@hotmail.fr"
] | maggot_fff@hotmail.fr |
e98140b6ab4a78c3e01cdec72713cc4484a5594a | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc118/A/4965258.py | 809490a42b13f7fe626c23248f25ee09f7538d58 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | a, b = map(int, input().split())
if b % a == 0:
print(a + b)
else:
print(b - a) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
77755d9f0e7608712fc29d80c4c9d6de8922f699 | 4bd5900c32977d68f13462ce8ba243ea50c4f357 | /fileOp.py | 35835c82d23cd973084c9f9a4e1b1b05503c559b | [] | no_license | hsq5508210/DPT | d8e2e4ed5a10b367e9c89828cdb4ce48d8599ff3 | 8dc70a709b2dd0e03ef962f3c060fd92da8c1b31 | refs/heads/master | 2020-05-26T11:07:23.451919 | 2019-07-23T12:00:17 | 2019-07-23T12:00:17 | 188,210,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | import IO
import os
import numpy as np
#================================================
# Get file names in a dir 'path'.
def file_name(path):
f = []
for root, dirs, file in os.walk(path):
f.append(file)
return list(f)
#================================================
# Split the dataSet
def splitData(path = None, data = None, num = 10):
if path != None:
data = IO.readData(path)
# Feed the np.array data.
print("Spliting...")
data = np.array(data)
retdata = []
sumLen = data.shape[0]
print("shape is:", data.shape)
subLen = int(sumLen/num)
cnt = 0
for i in range(num):
if i == num-1:
retdata.append(np.array(data[cnt:sumLen-1, :]))
else:
retdata.append(np.array(data[cnt:cnt+subLen, :]))
cnt += subLen
return retdata
#================================================
| [
"2468624503@qq.com"
] | 2468624503@qq.com |
612349d0f617d3db6e1a010bf1923480c2aedb3f | b50ad9165e09ecddb1e3376ecd909737178bbfc5 | /Data_Structure/Caculator.py | 8a35bc852eed31488ff4be5b295f440a4dd2bed5 | [] | no_license | joy20182018/-python | 1fdc076c3ae6640c687d48e8b376a524a879be48 | 13f59b359901c34d0cacac8fec7f79507063b835 | refs/heads/master | 2020-04-29T10:56:23.481542 | 2019-03-17T09:18:34 | 2019-03-17T09:18:34 | 176,078,539 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | '''
栈的应用
'''
from Stack import Stack
def postfixEval(postfixExpr):
operandStack = Stack()
tokenList = postfixExpr.split()
for token in tokenList:
if token in "0123456789":
operandStack.push(int(token))
else:
operand2 = operandStack.pop()
operand1 = operandStack.pop()
result = doMath(token,operand1,operand2)
operandStack.push(result)
return operandStack.pop()
def doMath(op, op1, op2):
if op == "*":
return op1 * op2
elif op == "/":
return op1 / op2
elif op == "+":
return op1 + op2
else:
return op1 - op2
print(postfixEval('7 8 + 3 2 + /')) | [
"noreply@github.com"
] | joy20182018.noreply@github.com |
b4432b868275d8cab85ac0720201f49245e9b333 | 00b41bb652a1cac8e1ff48168a79906f9851b5fb | /webapp/bin/epylint | 3904d1fccb85342f961b975e37c19e1b3c690c27 | [] | no_license | JazKarit/learn_languages | 6220311a8d9bab7673a9dd85795c84b2b981fdcd | 91ee138d4daa4f4e6b598350c8cade0df89f8a73 | refs/heads/master | 2021-01-09T19:40:42.583102 | 2020-02-23T18:54:40 | 2020-02-23T18:54:40 | 242,435,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | #!/home/jaskrit/Documents/hack_cu_vi/webapp/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_epylint
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_epylint())
| [
"Jasi5748@colorado.edu"
] | Jasi5748@colorado.edu | |
e394815f54418f7089b6e45fea5433e37e1563bd | bd2f0a21424dda183dcad7794877d1d16bae0098 | /neural_dimensionality_reduction/scaling.py | b4c7769ca9fd5ebf4af75fb4a914301f81bc92db | [] | no_license | trevortknguyen/dimensionality | 6a1ecc7fd9b27de91d8d45ba06477f7d8e17cc48 | 5701d74e7875b0a10d4d72a22f76aa6d76a94ba6 | refs/heads/master | 2023-03-06T08:02:37.775729 | 2021-02-21T22:14:58 | 2021-02-21T22:14:58 | 294,223,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | import os
import netCDF4 as nc
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
def scale_identity(spikes):
return spikes
def scale_standard_scaler(spikes):
return StandardScaler().fit_transform(spikes)
def scale_custom(spikes):
'''
Scales the spikes by dividing
'''
tetrode_means = np.mean(spikes, axis=1)
broadcasted_means = np.empty(spikes.shape)
broadcasted_means[:,0] = tetrode_means
broadcasted_means[:,1] = tetrode_means
broadcasted_means[:,2] = tetrode_means
broadcasted_means[:,3] = tetrode_means
return spikes / broadcasted_means
def pca_transform(scaled_spikes):
pca = PCA(n_components=3)
return pca.fit_transform(scaled_spikes)
def transform_spikes_to_marks(spikes):
scaled_spikes = scale_custom(spikes)
marks = pca_transform(scaled_spikes)
assert marks.shape[0] == spikes.shape[0]
return marks
| [
"trevortknguyen@utexas.edu"
] | trevortknguyen@utexas.edu |
c3ed332fc355a2f0f910961bf6c97eb7223c1e9c | ea2dbf03b2abd11839d5e53a1fc30aae75dd725b | /photobooth2/app.py | 100d7a56e405646843d924a7b23b28dc22fdb757 | [] | no_license | jasminelam1/photobooth | 277b12348262cfc71153066536178a30f92a7c40 | e87eb21908655d332ec91275664065ffbb3bdff3 | refs/heads/main | 2023-05-27T20:26:31.510706 | 2021-06-17T21:18:05 | 2021-06-17T21:18:05 | 377,959,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,753 | py | from flask import Flask, request, render_template, Response, jsonify
from datetime import datetime
import cv2
import os
from rpi_camera import RPiCamera
from time import sleep
from imutils.video import VideoStream
app = Flask(__name__)
my_camera = None
current_frame = 0
my_effect = None
@app.route('/')
def index():
print("welcome")
return render_template("index.html")
#the generator, a special type of function that yields, instead of returns.
def gen(camera, effect):
global current_frame, my_effect
while True:
"""
In this version we keep a separte jpg frame to capture before
we convert to bytes.
"""
if my_effect == b"gray":
jpeg = cv2.imdecode(camera.get_frame(), cv2.IMREAD_COLOR)
gray = cv2.cvtColor(jpeg, cv2.COLOR_BGR2GRAY)
_,current_frame=cv2.imencode(".jpg", gray)
elif my_effect == b"blur":
jpeg = cv2.imdecode(camera.get_frame(), cv2.IMREAD_COLOR)
blur = cv2.blur(jpeg,(5,5))
_,current_frame=cv2.imencode(".jpg", blur)
else:
current_frame = camera.get_frame()
frame_to_stream = current_frame.tobytes()
# Each frame is set as a jpg content type. Frame data is in bytes.
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame_to_stream + b'\r\n')
@app.route('/filter', methods= ['POST', 'GET'])
def filter():
global my_camera, my_effect
my_effect = request.data
print(my_effect)
return jsonify(result={'status':200})
# feed = Response(gen(my_camera, 'cartoon'), mimetype='multipart/x-mixed-replace; boundary=frame')
# return feed
# global current_frame
# current_frame = camera.IMAGE_EFFECTS()
# for effect in current_frame:
# camera.image_effect = effect
@app.route('/capture', methods=['POST', 'GET'])
def capture():
data = request.data
now = datetime.now()
date_time = now.strftime("%m-%d-%Y-%H:%M:%S")
file_name = date_time + ".jpg"
#image was encoded with cv2.encode, so we need to decode it.
jpeg = cv2.imdecode(current_frame, cv2.IMREAD_COLOR)
#We will store pics in /captured_pics, found in the root folder.
full_path = os.path.join(app.root_path, 'captured_pics', file_name)
#Save the image
cv2.imwrite(full_path , jpeg)
#return full_path does nothing yet, but it could be use to display pic.
return full_path
@app.route('/stream')
def stream():
global my_camera, my_effect
my_camera = RPiCamera()
feed = Response(gen(my_camera, my_effect), mimetype='multipart/x-mixed-replace; boundary=frame')
return feed
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True ) | [
"jasmine.lam@csedge.org"
] | jasmine.lam@csedge.org |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.