id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
3304750 | <filename>cstar/remote_paramiko.py<gh_stars>1-10
# Copyright 2017 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paramiko.client
import re
from cstar.output import err, debug, msg
from cstar.exceptions import BadSSHHost, BadEnvironmentVariable, NoHostsSpecified
from cstar.executionresult import ExecutionResult
from pkg_resources import resource_string
PING_COMMAND = "echo ping"
_alnum_re = re.compile(r"[^a-zA-Z0-9\|_]")
class RemoteParamiko(object):
def __init__(self, hostname, ssh_username=None, ssh_password=<PASSWORD>, ssh_identity_file=None):
if hasattr(hostname, "ip"):
self.hostname = hostname.ip
else:
self.hostname = hostname
if not self.hostname:
raise NoHostsSpecified("No SSH host specified")
self.ssh_username = ssh_username
self.ssh_password = <PASSWORD>
self.ssh_identity_file = ssh_identity_file
self.client = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.close()
def _connect(self):
if self.client:
# Ensure underlying client is still a valid open connection
try:
stdin, stdout, stderr = self.client.exec_command(PING_COMMAND)
except (ConnectionResetError, paramiko.ssh_exception.SSHException):
# ConnectionResetError is raised when a connection was established but then broken
# paramiko.ssh_exception.SSHException is raised if the connection was known to be broken
self.client = None
if not self.client:
try:
self.client = paramiko.client.SSHClient()
pkey = None
if self.ssh_identity_file != None:
pkey = paramiko.RSAKey.from_private_key_file(self.ssh_identity_file, None)
debug("Username : ", self.ssh_username)
debug("Id file: ", self.ssh_identity_file)
self.client.set_missing_host_key_policy(paramiko.client.AutoAddPolicy())
self.client.connect(self.hostname, compress=True, username=self.ssh_username, password=self.ssh_password, pkey=pkey)
except:
self.client = None
raise BadSSHHost("Could not establish an SSH connection to host %s" % (self.hostname,))
def run_job(self, file, jobid, timeout=None, env={}):
try:
self._connect()
transport = self.client.get_transport()
session = transport.open_session()
paramiko.agent.AgentRequestHandler(session)
dir = ".cstar/remote-jobs/" + jobid
self.run(("mkdir", "-p", dir))
self.put_command(file, "%s/job" % (dir,))
# Manually insert environment into script, since passing env into exec_command leads to it being
# ignored on most ssh servers. :-(
for key in env:
if _alnum_re.search(key):
raise BadEnvironmentVariable(key)
env_str = " ".join(key + "=" + self.escape(value) for key, value in env.items())
remote_script = resource_string('cstar.resources', 'scripts/remote_job.sh')
wrapper = remote_script.decode("utf-8") % (env_str,)
self.write_command(wrapper, "%s/wrapper" % (dir,))
cmd = """
cd %s
nohup ./wrapper
""" % (self.escape(dir),)
stdin, stdout, stderr = self.client.exec_command(cmd, timeout=timeout)
stdout.channel.recv_exit_status()
real_output = self.read_file(dir + "/stdout")
real_error = self.read_file(dir + "/stderr")
real_status = int(self.read_file(dir + "/status"))
return ExecutionResult(cmd, real_status, real_output, real_error)
except (ConnectionResetError, paramiko.ssh_exception.SSHException):
raise BadSSHHost("SSH connection to host %s was reset" % (self.hostname,))
def get_job_status(self, jobid):
pass
def run(self, argv):
try:
self._connect()
cmd = " ".join(self.escape(s) for s in argv)
stdin, stdout, stderr = self.client.exec_command(cmd)
status = stdout.channel.recv_exit_status()
out = stdout.read()
error = stderr.read()
if status != 0:
err("Command %s failed with status %d on host %s" % (cmd, status, self.hostname))
else:
debug("Command %s succeeded on host %s, output was %s and %s" %
(cmd, self.hostname, str(out, 'utf-8'), str(error, 'utf-8')))
return ExecutionResult(cmd, status, str(out, 'utf-8'), str(error, 'utf-8'))
except (ConnectionResetError, paramiko.ssh_exception.SSHException):
self.client = None
raise BadSSHHost("SSH connection to host %s was reset" % (self.hostname,))
@staticmethod
def escape(input):
if _alnum_re.search(input):
return "'" + input.replace("'", r"'\''") + "'"
return input
def read_file(self, remotepath):
self._connect()
with self.client.open_sftp() as ftp_client:
with ftp_client.file(remotepath, 'r') as f:
return str(f.read(), 'utf-8')
def put_file(self, localpath, remotepath):
self._connect()
with self.client.open_sftp() as ftp_client:
ftp_client.put(localpath, remotepath)
def put_command(self, localpath, remotepath):
self._connect()
with self.client.open_sftp() as ftp_client:
ftp_client.put(localpath, remotepath)
ftp_client.chmod(remotepath, 0o755)
def write_command(self, definition, remotepath):
self._connect()
with self.client.open_sftp() as ftp_client:
with ftp_client.open(remotepath, 'w') as f:
f.write(definition)
ftp_client.chmod(remotepath, 0o755)
def mkdir(self, path):
self.run("mkdir " + path)
def close(self):
if self.client:
self.client.close()
self.client = None
| StarcoderdataPython |
1742270 | from __future__ import absolute_import, division, print_function
import numpy as np
from .weno5m import WENO5M
class HenrickApproximator:
"""
Approximator for spatial derivatives by Henrick et al. [1]_ with WENO5JS.
Algorithm by Henrick et al. [1]_ is used to approximate spatial derivatives
for governing equations of detonations written in a shock-attached frame.
The algorithm uses WENO5M at the interior of the computational domain and
biased approximations derived from Taylor series at points near the shock.
Notes
-----
Computational grid is divided in three sections: I - ghost points, II -
interior points, III - near-shock points. Staggered grid is used as usual
in numerical simulation of conservation laws. Below the schema is provided
to clarify indexation of the grid points and interfaces between grids.
| I | II | III |
| | |
* | * | * | * | * | * | ... | * | * * *
| | |
0 1 2 3 4 5 N-1 N N+1 Grid for the grid points
0 1 2 3 4 5 N-1 N N+1 Grid for the fluxes
Conserved variables are computed at the grid points.
Fluxes are computed in two different ways:
1. In the interior of the domain fluxes are approximated at the
interfaces between grids using WENO5M algorithm and then are combined to
obtain approximation of the flux derivatives at the grid points (points {3,
4, ..., N-2} on the above schema).
2. Near the shock flux derivatives are approximated using formulae (31),
(32), (33) from [1]_ (points {N-1, N, N+1} on the above schema).
References
----------
.. [1] <NAME>., <NAME>., <NAME>.
Simulations of pulsating one-dimensional detonations with true fifth
order accuracy.
Journal of Computational Physics, vol. 213 (2016), pp. 311-329.
doi: 10.1016/j.jcp.2005.08.013
"""
def __init__(self, size, dx, weno_eps):
self._dx = dx
self._w = WENO5M(weno_eps, size)
self.nghost_points = 3
def approximate_flux_derivatives(self, v, f, char_speed, to_arr):
"""Approximate flux derivatives for flux `f`.
Parameters
----------
v : ndarray
Conservative variable.
f : ndarray
Flux of the conservative variable `v`.
char_speed : float
Maximum characteristic speed.
to_arr : ndarray
Array of the same size as `f` to which the approximation of flux
derivative is written.
"""
f_plus = f + char_speed * v
f_minus = f - char_speed * v
f_hat_plus = np.empty_like(f_plus)
f_hat_minus = np.empty_like(f_minus)
f_hat_plus[2:-3], f_hat_minus[2:-3] = self._w.interpolate(f_plus, f_minus)
f_hat = 0.5 * (f_hat_plus + f_hat_minus)
to_arr[0] = 0.0 # Ghost point 1
to_arr[1] = 0.0 # Ghost point 2
to_arr[2] = 0.0 # Ghost point 3
to_arr[3:-3] = (f_hat[3:-3] - f_hat[2:-4]) / self._dx
to_arr[-3] = self._compute_two_points_away_from_shock(f)
to_arr[-2] = self._compute_one_point_away_from_shock(f)
to_arr[-1] = self._compute_at_shock(f)
def _compute_two_points_away_from_shock(self, f):
"""Approximate flux derivative two points away from the shock."""
coeffs = np.array([-2, 15, -60, 20, 30, -3]) / 60
return coeffs.dot(f[-6:]) / self._dx
def _compute_one_point_away_from_shock(self, f):
"""Approximate flux derivative one point away from the shock."""
coeffs = np.array([-1, 6, -18, 10, 3]) / 12
return coeffs.dot(f[-5:]) / self._dx
def _compute_at_shock(self, f):
coeffs = np.array([-12, 75, -200, 300, -300, 137]) / 60
return coeffs.dot(f[-6:]) / self._dx
| StarcoderdataPython |
176964 | import csv
import os
def menu_call():
user_selection = ""
while user_selection not in range(1, 4):
print("Please chose an option from the menu below:")
print("1: Generate a Password | 2: Create Entry | 3: Edit Entry | 4: Delete Entry.")
user_selection = int(input(">:"))
return user_selection
def create_entry():
user_answer = ""
while user_answer != 'y':
entry_user_website_url = input(
"Please enter the website this is for: ")
entry_user_name = input(
"Please enter the username you would like to store: ")
entry_user_password = input(
"Please enter the password you would like to store: ")
print(
f"Does the following information look correct? website: {entry_user_website_url} Username:{entry_user_name} password: {<PASSWORD>password}")
user_answer = input(
"Please type 'Y' for yes, or any key to redo continue: ")
with open(os.path.dirname(__file__) + "\\" + "PasswordManagerUserFile.csv", 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(entry_user_website_url +
entry_user_name + entry_user_password)
print(os.path.dirname(__file__) + 'PasswordManagerUserFile.csv')
| StarcoderdataPython |
3391982 | import time
def bruteForceBarrier(barrierSeconds):
time.sleep(barrierSeconds)
| StarcoderdataPython |
1797493 | """
>>> from ._microio import *
>>> import time, socket
>>> def foo():
... yield
... raise Return(1)
>>> loop(foo())
1
>>> def bar():
... foo_val = yield foo()
... raise Return(foo_val + 1)
>>> loop(bar())
2
>>> def delayed_print():
... yield time.time() + 0.1 # Delay for 0.1 second
... print("delayed_print")
>>> def main_task():
... print("entering")
... yield delayed_print
... print("exiting")
... raise Return(True)
>>> loop(main_task())
entering
exiting
delayed_print
True
>>> def main_task():
... t1 = time.time()
... yield time.time() + 0.5
... t2 = time.time() - t1
... raise Return(t2 >= 0.5)
>>> loop(main_task())
True
>>> def oneshot_server(sock):
... def _server_task():
... yield sock, POLLREAD
... csock, addr = sock.accept()
... print("Connection")
... yield sock, None
... sock.close()
... yield csock, POLLREAD
... data = csock.recv(1024)
... print("Request: {}".format(data.decode("ascii")))
... yield csock, POLLWRITE
... csock.send(data)
... yield csock, None
... csock.close()
...
... return _server_task
>>> def client(address):
... sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
... sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
... sock.setblocking(False)
... sock.connect_ex(address)
... yield sock, POLLWRITE | POLLERROR
... sock.connect_ex(address)
... yield sock, POLLWRITE | POLLERROR
... sock.send(b"ping")
... yield sock, POLLREAD
... data = sock.recv(1024)
... yield sock, None
... print("Reply: {}".format(data.decode("ascii")))
... sock.close()
>>> def main_task():
... sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
... sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
... sock.bind(('127.0.0.1', 0))
... sock.setblocking(False)
... sock.listen(1)
... yield oneshot_server(sock)
... yield client(sock.getsockname()[:2])
>>> loop(main_task())
Connection
Request: ping
Reply: ping
>>> def failing():
... yield "unknown"
>>> loop(failing()) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
RuntimeError: ...
>>> def failing():
... yield (None,)
>>> loop(failing()) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
RuntimeError: ...
>>> def failing():
... yield None, POLLREAD
>>> loop(failing()) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
RuntimeError: ...
>>> class Error(Exception):
... pass
>>> def failing():
... yield
... raise Error()
>>> loop(failing())
Traceback (most recent call last):
...
Error
>>> def main_task():
... try:
... yield failing()
... except Error:
... print("Error in failing()")
>>> loop(main_task())
Error in failing()
"""
import doctest
doctest.testmod()
| StarcoderdataPython |
175164 | from onegov.ticket.handler import Handler, HandlerRegistry
handlers = HandlerRegistry() # noqa
from onegov.ticket.model import Ticket
from onegov.ticket.model import TicketPermission
from onegov.ticket.collection import TicketCollection
__all__ = [
'Handler',
'handlers',
'Ticket',
'TicketCollection',
'TicketPermission'
]
| StarcoderdataPython |
1673390 | <reponame>xandox/jira_comment
from .image import Image, image_directory
from .table import Row, HeadRow, Table
from .float import FloatValue, is_float_value
from .text import *
from .settings import settings
__all__ = [
"Image",
"image_directory",
"Row",
"HeadRow",
"Table",
"Text",
"Paragraph",
"H1",
"H2",
"H3",
] | StarcoderdataPython |
1725911 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import os,sys
import urllib.request, urllib.parse, http.cookiejar
import requests
import re
from datetime import datetime
def getcontent(url):
text_string = requests.get(url).text
#正则匹配出轮胎列表
html_string = re.findall(r'<a class=\\"item-name\\"(.*?)href=\\"//(.*?)target=\\"_blank\\">(.*?)</a>(.*?)class=\\"c-price\\">(.*?) </span>(.*?)class=\\"sale-num\\">(.*?)</span>(.*?)<span>评价:(.*?)</span>(.*?)',text_string)
tuhupage = []
# 循环遍历轮胎列表
for tuhu in html_string:
wangzhi = tuhu[1]
mingcheng = tuhu[2]
price = tuhu[4]
xiaoliang = tuhu[6]
pingjia = tuhu[8]
tuhupage.append((wangzhi,mingcheng,price,xiaoliang,pingjia))
# 返回轮胎列表
return tuhupage
def gethercontent(page):
tuhulist = []
#https://tuhucn.tmall.com/i/asynSearch.htm?_ksTS=1453466239320_1206&callback=jsonp1207&mid=w-11681573747-0&wid=11681573747&path=/category-753373497.htm&&spm=a1z10.5-b.w4011-11681573747.392.yLWQRY&catName=%C6%B7%C5%C6%C2%D6%CC%A5&catId=753373497&search=y&pageNo=3&scid=753373497
url = 'https://tuhucn.tmall.com/i/asynSearch.htm?_ksTS=1453466239320_1206&callback=jsonp1207&mid=w-11681573747-0&wid=11681573747&path=/category-753373497.htm&&spm=a1z10.5-b.w4011-11681573747.392.yLWQRY&catName=%C6%B7%C5%C6%C2%D6%CC%A5&catId=753373497&search=y&pageNo=' # 基础网址
for i in range(1,page):
urll = url+str(i)+'&scid=753373497'
print(urll)
content = getcontent(urll)
tuhulist.append(content)
return tuhulist
def saveFile(tuhulist):
'''
将列表保存为文件
'''
save_path = os.path.join(sys.path[0], "tuhu_tianmao"+datetime.now().strftime('%Y%m%d')+".csv")
f_obj = open(save_path, 'w',encoding='utf-8')
title = ['网址','轮胎名称','价格', '销量','评价人数']
html = '\t'.join(title)+ '\n'
for page in tuhulist:
for luntai in page:
html += '\t'.join(luntai) + '\n'
f_obj.write(html)
f_obj.close()
page = 15 # 总共爬到多少页
html_doc = gethercontent(page)
saveFile(html_doc)
# 生成的csv默认为ASCII编码,用记事本打开另存为ASCII编码,然后打开再转Excel等
| StarcoderdataPython |
1624999 | """
Checks that the archive library can be successfully built for every
scheme/implementation.
"""
import pqclean
import helpers
def test_compile_lib():
for scheme in pqclean.Scheme.all_schemes():
for implementation in scheme.implementations:
yield check_compile_lib, implementation
@helpers.filtered_test
def check_compile_lib(implementation):
helpers.make('clean', working_dir=implementation.path())
helpers.make(working_dir=implementation.path())
if __name__ == '__main__':
try:
import nose2
nose2.main()
except ImportError:
import nose
nose.runmodule()
| StarcoderdataPython |
583 | <filename>influxdb_service_sdk/model/container/resource_requirements_pb2.py
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: resource_requirements.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from influxdb_service_sdk.model.container import resource_list_pb2 as influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='resource_requirements.proto',
package='container',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'),
serialized_pb=_b('\n\x1bresource_requirements.proto\x12\tcontainer\x1a\x38influxdb_service_sdk/model/container/resource_list.proto\"j\n\x14ResourceRequirements\x12\'\n\x06limits\x18\x01 \x01(\x0b\x32\x17.container.ResourceList\x12)\n\x08requests\x18\x02 \x01(\x0b\x32\x17.container.ResourceListBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\x06proto3')
,
dependencies=[influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2.DESCRIPTOR,])
_RESOURCEREQUIREMENTS = _descriptor.Descriptor(
name='ResourceRequirements',
full_name='container.ResourceRequirements',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='limits', full_name='container.ResourceRequirements.limits', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='requests', full_name='container.ResourceRequirements.requests', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=100,
serialized_end=206,
)
_RESOURCEREQUIREMENTS.fields_by_name['limits'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST
_RESOURCEREQUIREMENTS.fields_by_name['requests'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST
DESCRIPTOR.message_types_by_name['ResourceRequirements'] = _RESOURCEREQUIREMENTS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ResourceRequirements = _reflection.GeneratedProtocolMessageType('ResourceRequirements', (_message.Message,), {
'DESCRIPTOR' : _RESOURCEREQUIREMENTS,
'__module__' : 'resource_requirements_pb2'
# @@protoc_insertion_point(class_scope:container.ResourceRequirements)
})
_sym_db.RegisterMessage(ResourceRequirements)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| StarcoderdataPython |
3204962 | import os
import re
import cv2
import numpy as np
import random
import matplotlib
import matplotlib.pyplot as plt
src_dir = "image/"
t_dir = "tremed_data/t/"
f_dir = "tremed_data/f/"
files = os.listdir(src_dir)
files.remove(".DS_Store")
dataset = np.empty((0, 100*100*3), np.float64)
id = 0
while True:
file = random.choice(files)
print file
img = cv2.imread(src_dir+file)
#print img
#img = cv2.cvtColor(np_img, cv2.COLOR_BGR2RGB)
print img.shape
resized_y = 256
resized_x = int(256. / img.shape[0] * img.shape[1])
resized_img = cv2.resize(img, (resized_x, resized_y))
resized_img_show = cv2.resize(img, (resized_x, resized_y))
patch_size = 128
x = random.randint(0, resized_x-patch_size)
y = random.randint(0, resized_y-patch_size)
cv2.rectangle(resized_img_show,(x,y),(x+patch_size,y+patch_size),(0,0,255),3)
#cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", resized_img_show)
keycode = cv2.waitKey(0)
if keycode == ord("1"):
data = resized_img[y:y+patch_size, x:x+patch_size]
cv2.imwrite("trem/t/"+str(id)+".bmp", data)
elif keycode == ord("0"):
data = resized_img[y:y+patch_size, x:x+patch_size]
cv2.imwrite("trem/f/"+str(id)+".bmp", data)
else:
break
id += 1
| StarcoderdataPython |
3383647 | __all__ = ["algos", "job_script", "MDP_funcs", "samplers", "train_agent", "utils"]
| StarcoderdataPython |
133318 | <reponame>uniparthenope/api-uniparthenope<gh_stars>1-10
import json
import sys
import traceback
import base64
import math
import sqlalchemy
from sqlalchemy import exc
from app import api, db
from flask_restplus import Resource, fields
from datetime import datetime, timedelta
from flask import g, request
from app.apis.uniparthenope.v1.login_v1 import token_required_general, token_required
from app.apis.uniparthenope.v1.professor_v1 import getCourses
from app.apis.uniparthenope.v2.students_v2 import MyExams
from app.config import Config
from app.apis.ga_uniparthenope.models import Reservations, ReservableRoom, Room, Area, Entry, UserTemp
from app.apis.access.models import UserAccess
ns = api.namespace('uniparthenope')
# ------------- GLOBAL FUNCTIONS -------------
def createDate(data):
mesi = ["gennaio", "febbraio", "marzo", "aprile", "maggio", "giugno", "luglio", "agosto", "settembre",
"ottobre", "novembre", "dicembre"]
data = data.split()
# print(data)
ora = data[0][0:2]
minuti = data[0][3:5]
anno = data[5]
giorno = data[3]
mese = mesi.index(data[4]) + 1
# final_data = datetime(anno, mese, giorno, ora, minuti)
final_data = str(anno) + "/" + str(mese) + "/" + str(giorno) + " " + str(ora) + ":" + str(minuti)
return final_data
def extractData(data):
data_split = data.split()[0]
export_data = datetime.strptime(data_split, '%d/%m/%Y')
return export_data
# ------------- GET TODAY SERVICES -------------
class getTodayServices(Resource):
@ns.doc(security='Basic Auth')
@token_required_general
def get(self):
"""Get Today Services"""
if g.status == 200:
username = g.response['user']['userId']
array = []
try:
start = datetime.now().date()
end = start + timedelta(days=2)
grpid = "," + str(g.response['user']['grpId']) + ","
aree = Area.query.all()
for area in aree:
array_area = []
service = []
services = db.session.query(Entry, Room).filter(Room.id == Entry.room_id) \
.filter(Entry.start_time >= start) \
.filter(Entry.end_time <= end) \
.filter(Entry.end_time > datetime.now()) \
.filter(Room.area_id == area.id) \
.filter(Room.user_access.contains(grpid))
for s in services:
reserved = False
resered_id = None
reserved_by = None
reservation = Reservations.query.filter_by(id_lezione=s.Entry.id).filter_by(
username=username)
if reservation.first() is not None:
reserved = True
resered_id = reservation.first().id
reserved_by = reservation.first().reserved_by
if s.Room.piano is None or s.Room.piano == '999':
piano = " "
else:
piano = s.Room.piano
if s.Room.lato is None:
lato = " "
else:
lato = s.Room.lato
service.append({
'id': s.Entry.id,
'start': str(s.Entry.start_time),
'end': str(s.Entry.end_time),
'room': {
'name': s.Room.room_name,
'capacity': math.floor(s.Room.capacity / 2),
'description': "Piano " + piano + " Lato " + lato,
'availability': math.floor(
s.Room.capacity / 2) - Reservations.query.with_for_update().filter_by(
id_lezione=s.Entry.id).count()
},
'reservation': {
'reserved_id': resered_id,
'reserved': reserved,
'reserved_by': reserved_by
}
})
array.append({
'area': area.area_name,
'services': service
})
return array, 200
except:
db.session.rollback()
print("Unexpected error:")
print("Title: " + sys.exc_info()[0].__name__)
print("Description: " + traceback.format_exc())
return {
'errMsgTitle': sys.exc_info()[0].__name__,
'errMsg': traceback.format_exc()
}, 500
else:
return {'errMsg': 'Wrong username/pass'}, g.status
# ------------- GET TODAY LECTURES -------------
parser = api.parser()
parser.add_argument('matId', required=True, help='')
@ns.doc(parser=parser)
class getTodayLecture(Resource):
@ns.doc(security='Basic Auth')
@token_required
def get(self, matId):
"""Get Today Lectures"""
con = sqlalchemy.create_engine(Config.GA_DATABASE, echo=False)
base64_bytes = g.token.encode('utf-8')
message_bytes = base64.b64decode(base64_bytes)
token_string = message_bytes.decode('utf-8')
username = token_string.split(':')[0]
result = MyExams(Resource).get(matId)
status = json.loads(json.dumps(result))[1]
_result = json.loads(json.dumps(result))[0]
if status == 200:
codici = []
codici_res = []
for i in range(len(_result)):
if _result[i]['status']['esito'] == 'P' or _result[i]['status']['esito'] == 'F':
codici.append(_result[i]['codice'])
res = Reservations.query.filter_by(username=username)
for r in res:
if r.id_corso not in codici:
codici.append(r.id_corso)
array = []
start = datetime.now().date()
end = start + timedelta(days=1)
for i in range(len(codici)):
codice = codici[i]
rs = con.execute(
"SELECT * FROM `mrbs_entry` E JOIN `mrbs_room` R WHERE E.room_id = R.id AND `id_corso` LIKE '%%" + str(
codice) + "%%' AND start_time >= '" + str(start) + "' AND end_time <= '" + str(end) + "'")
for row in rs:
reserved = False
resered_id = None
reserved_by = None
reservation = Reservations.query.filter_by(id_lezione=row[0]).filter_by(
username=username)
if reservation.first() is not None:
reserved = True
resered_id = reservation.first().id
reserved_by = reservation.first().reserved_by
array.append({
'id': row[0],
'id_corso': codice,
'start': str(datetime.fromtimestamp(row[1])),
'end': str(datetime.fromtimestamp(row[2])),
'room': {
'name': row[38],
'capacity': math.floor(row[41] / 2),
'description': row[40],
'availability': math.floor(
int(row[41]) / 2) - Reservations.query.with_for_update().filter_by(
id_lezione=row[0]).count()
},
'course_name': row[9],
'prof': row[11],
'reservation': {
'reserved_id': resered_id,
'reserved': reserved,
'reserved_by': reserved_by
}
})
return array, 200
else:
return {'errMsg': _result['errMsg']}, status
# ------------- GET ALL OWN LECTURES -------------
parser = api.parser()
parser.add_argument('matId', required=True, help='')
@ns.doc(parser=parser)
class getLectures(Resource):
@ns.doc(security='Basic Auth')
@token_required
def get(self, matId):
"""Get All Own Lectures"""
base64_bytes = g.token.encode('utf-8')
message_bytes = base64.b64decode(base64_bytes)
token_string = message_bytes.decode('utf-8')
username = token_string.split(':')[0]
result = MyExams(Resource).get(matId)
status = json.loads(json.dumps(result))[1]
_result = json.loads(json.dumps(result))[0]
con = sqlalchemy.create_engine(Config.GA_DATABASE, echo=False)
start = datetime(datetime.now().year, datetime.now().month, datetime.now().day, 0, 0).timestamp()
if status == 200:
array = []
for i in range(len(_result)):
if _result[i]['status']['esito'] == 'P' or _result[i]['status']['esito'] == 'F':
codice = _result[i]['codice']
rs = con.execute("SELECT * FROM `mrbs_entry` E JOIN `mrbs_room` R WHERE E.id_corso LIKE '%%" + str(
codice) + "%%' AND E.start_time >= '" + str(start) + "' AND R.id = E.room_id")
for row in rs:
reserved = False
resered_id = None
reserved_by = None
reservation = Reservations.query.filter_by(id_lezione=row[0]).filter_by(
username=username)
if reservation.first() is not None:
reserved = True
resered_id = reservation.first().id
reserved_by = reservation.first().reserved_by
array.append({
'id': row[0],
'id_corso': codice,
'start': str(datetime.fromtimestamp(row[1])),
'end': str(datetime.fromtimestamp(row[2])),
'room': {
'name': row[38],
'capacity': math.floor(int(row[41]) / 2),
'description': row[40],
'availability': math.floor(
int(row[41]) / 2) - Reservations.query.with_for_update().filter_by(
id_lezione=row[0]).count()
},
'course_name': row[9],
'prof': row[11],
'reservation': {
'reserved_id': resered_id,
'reserved': reserved,
'reserved_by': reserved_by
}
})
res = Reservations.query.filter_by(username=username).filter(
Reservations.start_time >= datetime.fromtimestamp(start)).all()
if len(array) == 0:
for r in res:
rs = con.execute("SELECT * FROM `mrbs_entry` E JOIN `mrbs_room` R WHERE E.id ='" + str(
r.id_lezione) + "' AND E.start_time >= '" + str(start) + "' AND R.id = E.room_id")
for row in rs:
array.append({
'id': row[0],
'id_corso': r.id_corso,
'start': str(datetime.fromtimestamp(row[1])),
'end': str(datetime.fromtimestamp(row[2])),
'room': {
'name': row[38],
'capacity': math.floor(int(row[41]) / 2),
'description': row[40],
'availability': math.floor(
int(row[41]) / 2) - Reservations.query.with_for_update().filter_by(
id_lezione=row[0]).count()
},
'course_name': row[9],
'prof': row[11],
'reservation': {
'reserved_id': r.id,
'reserved': True,
'reserved_by': r.reserved_by
}
})
else:
id_lez = []
for i in range(len(array)):
id_lez.append(array[i]['id'])
print(id_lez)
for r in res:
if r.id_lezione not in id_lez:
rs = con.execute("SELECT * FROM `mrbs_entry` E JOIN `mrbs_room` R WHERE E.id ='" + str(
r.id_lezione) + "' AND E.start_time >= '" + str(start) + "' AND R.id = E.room_id")
for row in rs:
array.append({
'id': row[0],
'id_corso': r.id_corso,
'start': str(datetime.fromtimestamp(row[1])),
'end': str(datetime.fromtimestamp(row[2])),
'room': {
'name': row[38],
'capacity': math.floor(int(row[41]) / 2),
'description': row[40],
'availability': math.floor(
int(row[41]) / 2) - Reservations.query.with_for_update().filter_by(
id_lezione=row[0]).count()
},
'course_name': row[9],
'prof': row[11],
'reservation': {
'reserved_id': r.id,
'reserved': True,
'reserved_by': r.reserved_by
}
})
return array, 200
else:
return {'errMsg': _result['errMsg']}, status
# ------------- GET ALL PROF LECTURES -------------
parser = api.parser()
parser.add_argument('aaId', required=True, help='')
@ns.doc(parser=parser)
class getProfLectures(Resource):
@ns.doc(security='Basic Auth')
@token_required
def get(self, aaId):
"""Get All Prof Lectures"""
result = getCourses(Resource).get(aaId)
status = json.loads(json.dumps(result))[1]
_result = json.loads(json.dumps(result))[0]
con = sqlalchemy.create_engine(Config.GA_DATABASE, echo=False)
if status == 200:
base64_bytes = g.token.encode('utf-8')
message_bytes = base64.b64decode(base64_bytes)
token_string = message_bytes.decode('utf-8')
username = token_string.split(':')[0]
array = []
for i in range(len(_result)):
codice = _result[i]['adDefAppCod']
start = datetime(datetime.now().year, datetime.now().month, datetime.now().day, 0, 0).timestamp()
rs = con.execute("SELECT * FROM `mrbs_entry` E JOIN `mrbs_room` R WHERE E.id_corso LIKE '%%" + str(
codice) + "%%' AND E.start_time >= '" + str(start) + "' AND R.id = E.room_id")
# COGNOMI CON SPAZI E ACCENTI
# AND E.description LIKE '%%" + username.split(".")[1] + "%%'")
courses = []
for row in rs:
courses.append({
'id': row[0],
'start': str(datetime.fromtimestamp(row[1])),
'end': str(datetime.fromtimestamp(row[2])),
'room': {
'name': row[38],
'capacity': math.floor(int(row[41]) / 2),
'description': row[40],
'availability': math.floor(
int(row[41]) / 2) - Reservations.query.with_for_update().filter_by(
id_lezione=row[0]).count()
},
'course_name': row[9],
'prof': row[11]
})
array.append({
'nome': _result[i]['adDes'],
'courses': courses
})
# print(array)
return array, 200
else:
return {'errMsg': _result['errMsg']}, status
# ------------- SERVICES RESERVATIONS -------------
prenotazione_servizi = ns.model("services_reservation", {
"id_entry": fields.String(description="", required=True),
"matricola": fields.String(description="", required=True)
})
def reserve(username, content, rs):
try:
capacity = math.floor(rs.Room.capacity / 2)
now = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) + timedelta(days=2)
if rs.Entry.start_time > now or rs.Entry.end_time > now or rs.Entry.end_time < datetime.now():
return {
'errMsgTitle': 'Attenzione',
'errMsg': 'Prenotazione non consentita.'
}, 500
start = datetime.now().date()
end = start + timedelta(days=2)
today_reservations = Reservations.query.filter_by(username=username).filter(
Reservations.start_time >= start).filter(
Reservations.end_time <= end).all()
for res in today_reservations:
if res.start_time <= rs.Entry.start_time < res.end_time or res.start_time < rs.Entry.end_time <= res.end_time:
return {
'errMsgTitle': 'Attenzione',
'errMsg': 'Già presente una prenotazione in questo lasso di tempo.'
}, 500
r = Reservations(id_corso="SERVICE", course_name=rs.Entry.name,
start_time=rs.Entry.start_time,
end_time=rs.Entry.end_time,
username=username, matricola=content['matricola'],
time=datetime.now(), id_lezione=content['id_entry'],
reserved_by=username)
db.session.add(r)
count = Reservations.query.with_for_update().filter_by(
id_lezione=content['id_entry']).count()
if count > capacity:
db.session.rollback()
return {
'errMsgTitle': 'Attenzione',
'errMsg': 'Raggiunta la capacità massima consentita.'
}, 500
db.session.commit()
return {
"status": "Prenotazione effettuata con successo."
}, 200
except exc.IntegrityError:
db.session.rollback()
return {
'errMsgTitle': 'Attenzione',
'errMsg': 'Prenotazione già effettuata per questo servizio.'
}, 500
except:
db.session.rollback()
print("Unexpected error:")
print("Title: " + sys.exc_info()[0].__name__)
print("Description: " + traceback.format_exc())
return {
'errMsgTitle': sys.exc_info()[0].__name__,
'errMsg': traceback.format_exc()
}, 500
class ServicesReservation(Resource):
@ns.doc(security='Basic Auth')
@token_required_general
@ns.expect(prenotazione_servizi)
def post(self):
"""Set Service Reservation"""
base64_bytes = g.token.encode('utf-8')
message_bytes = base64.b64decode(base64_bytes)
token_string = message_bytes.decode('utf-8')
username = token_string.split(':')[0]
content = request.json
if g.status == 200 and 'id_entry' in content and 'matricola' in content:
rs = db.session.query(Entry, Room).filter(Room.id == Entry.room_id).filter(
Entry.id == content['id_entry']).first()
grpid = "," + str(g.response['user']['grpId']) + ","
if grpid in rs.Room.user_access:
if g.response['user']['grpId'] != 7 and g.response['user']['grpId'] != 99:
user = UserAccess.query.filter_by(username=username).first()
if user is not None and user.greenpass:
return reserve(username, content, rs)
else:
return {'status': 'error',
'errMsg': 'Impossibile prenotarsi in mancanza di Green Pass.'}, 500
else:
return reserve(username, content, rs)
else:
return {
'errMsgTitle': 'Attenzione',
'errMsg': 'Utente non autorizzato per questo servizio.'
}, 500
else:
return {'errMsg': 'Errore username/pass!'}, g.status
# ------------- RESERVATIONS -------------
prenotazione = ns.model("reservation", {
"id_corso": fields.String(description="", required=True),
"id_lezione": fields.String(description="", required=True),
"matricola": fields.String(description="", required=True),
"matId": fields.String(description="", required=True)
})
class Reservation(Resource):
@ns.doc(security='Basic Auth')
@token_required
@ns.expect(prenotazione)
def post(self):
"""Set Reservation"""
base64_bytes = g.token.encode('utf-8')
message_bytes = base64.b64decode(base64_bytes)
token_string = message_bytes.decode('utf-8')
username = token_string.split(':')[0]
content = request.json
if 'id_corso' in content and 'id_lezione' in content and 'matricola' in content and 'matId' in content:
result = MyExams(Resource).get(content['matId'])
status = json.loads(json.dumps(result))[1]
_result = json.loads(json.dumps(result))[0]
codici = []
if status == 200:
for i in range(len(_result)):
if _result[i]['status']['esito'] == 'P' or _result[i]['status']['esito'] == 'F':
codici.append(_result[i]['codice'])
codici_res = []
res_room = ReservableRoom.query.all()
for rr in res_room:
codici_res.append(rr.id_corso)
try:
user_info = UserTemp.query.filter_by(username=username).first()
if content['id_corso'] in codici or user_info is not None:
user = UserAccess.query.filter_by(username=username).first()
if user is not None:
if user.autocertification and user.classroom == "presence":
con = sqlalchemy.create_engine(Config.GA_DATABASE, echo=False)
rs = con.execute(
"SELECT * FROM `mrbs_entry` E JOIN `mrbs_room` R WHERE E.id = '" + content[
'id_lezione'] + "' AND E.room_id = R.id")
result = rs.fetchall()
capacity = int(result[0][41]) / 2
now = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) + timedelta(
days=1)
if datetime.fromtimestamp(result[0][1]) > now or datetime.fromtimestamp(
result[0][2]) > now or datetime.fromtimestamp(result[0][2]) < datetime.now():
return {
'errMsgTitle': 'Attenzione',
'errMsg': 'Prenotazione non consentita.'
}, 500
start = datetime.now().date()
end = start + timedelta(days=1)
today_reservations = Reservations.query.filter_by(username=username).filter(
Reservations.start_time >= start).filter(
Reservations.end_time <= end).all()
for res in today_reservations:
if res.start_time <= datetime.fromtimestamp(
result[0][1]) < res.end_time or res.start_time < datetime.fromtimestamp(
result[0][2]) <= res.end_time:
return {
'errMsgTitle': 'Attenzione',
'errMsg': 'Già presente una prenotazione in questo lasso di tempo.'
}, 500
r = Reservations(id_corso=content['id_corso'], course_name=result[0][9],
start_time=datetime.fromtimestamp(result[0][1]),
end_time=datetime.fromtimestamp(result[0][2]),
username=username, matricola=content['matricola'],
time=datetime.now(), id_lezione=content['id_lezione'],
reserved_by=username)
db.session.add(r)
count = Reservations.query.with_for_update().filter_by(
id_lezione=content['id_lezione']).count()
if count > capacity:
db.session.rollback()
return {
'errMsgTitle': 'Attenzione',
'errMsg': 'Raggiunta la capacità massima consentita.'
}, 500
db.session.commit()
return {
"status": "Prenotazione effettuata con successo."
}, 200
else:
return {'status': 'error',
'errMsg': 'Impossibile prenotarsi in mancanza di autocertificazione/accesso in presenza.'}, 500
else:
return {'status': 'error',
'errMsg': 'Impossibile prenotarsi in mancanza di autocertificazione/accesso in presenza.'}, 500
else:
return {
'errMsgTitle': 'Attenzione',
'errMsg': 'Non è possibile prenotarsi ad una lezione non presente nel proprio piano di studi/già superata.'
}, 500
except exc.IntegrityError:
db.session.rollback()
return {
'errMsgTitle': 'Attenzione',
'errMsg': 'Prenotazione già effettuata per questa lezione.'
}, 500
except:
db.session.rollback()
print("Unexpected error:")
print("Title: " + sys.exc_info()[0].__name__)
print("Description: " + traceback.format_exc())
return {
'errMsgTitle': sys.exc_info()[0].__name__,
'errMsg': traceback.format_exc()
}, 500
else:
return {'errMsg': _result['errMsg']}, status
else:
return {'errMsg': 'Payload error!'}, 500
@ns.doc(security='Basic Auth')
@token_required_general
def get(self):
"""Get Reservations"""
base64_bytes = g.token.encode('utf-8')
message_bytes = base64.b64decode(base64_bytes)
token_string = message_bytes.decode('utf-8')
username = token_string.split(':')[0]
if g.status == 200:
if g.response['user']['grpId'] == 6:
try:
start = datetime.now().date()
end = start + timedelta(days=1)
reservations = Reservations.query.filter_by(username=username).filter(
Reservations.start_time >= datetime.fromtimestamp(start)).all()
array = []
for r in reservations:
array.append({
"id": r.id,
"id_corso": r.id_corso,
"course_name": r.course_name,
"start_time": str(r.start_time),
"end_time": str(r.end_time),
'reserved_by': r.reserved_by
})
return array, 200
except:
db.session.rollback()
print("Unexpected error:")
print("Title: " + sys.exc_info()[0].__name__)
print("Description: " + traceback.format_exc())
return {
'errMsgTitle': sys.exc_info()[0].__name__,
'errMsg': traceback.format_exc()
}, 500
else:
return {
'errMsgTitle': "Attenzione",
'errMsg': "Il tipo di user non è di tipo Studente"
}, 500
else:
return {'errMsg': 'Wrong username/pass'}, g.status
@ns.doc(security='Basic Auth')
@token_required_general
def delete(self, id_prenotazione):
"""Delete Reservation"""
if g.status == 200:
base64_bytes = g.token.encode('utf-8')
message_bytes = base64.b64decode(base64_bytes)
token_string = message_bytes.decode('utf-8')
username = token_string.split(':')[0]
if g.response['user']['grpId'] == 7:
if request.args.get('aaId') != None:
result = getCourses(Resource).get(request.args.get('aaId'))
status = json.loads(json.dumps(result))[1]
_result = json.loads(json.dumps(result))[0]
if status == 200:
codici = []
for i in range(len(_result)):
codici.append(_result[i]['adDefAppCod'])
reservation = Reservations.query.filter_by(id=id_prenotazione)
if reservation.first().id_corso in codici:
reservation.delete()
db.session.commit()
return {
"status": "Cancellazione effettuata con successo."
}, 200
else:
return {
'errMsgTitle': "Attenzione",
'errMsg': "Operazione non consentita."
}, 500
else:
return {
'errMsgTitle': "Attenzione",
'errMsg': "Anno di corso non valido!"
}, 500
else:
try:
reservation = Reservations.query.filter_by(id=id_prenotazione).filter_by(
username=username)
if reservation.first() is not None:
reservation.delete()
db.session.commit()
return {
"status": "Cancellazione effettuata con successo."
}, 200
else:
return {
'errMsgTitle': "Attenzione",
'errMsg': "Operazione non consentita."
}, 500
except AttributeError as error:
return {
'errMsgTitle': "Attenzione",
'errMsg': "Operazione non consentita."
}, 500
except:
db.session.rollback()
print("Unexpected error:")
print("Title: " + sys.exc_info()[0].__name__)
print("Description: " + traceback.format_exc())
return {
'errMsgTitle': sys.exc_info()[0].__name__,
'errMsg': traceback.format_exc()
}, 500
else:
try:
reservation = Reservations.query.filter_by(id=id_prenotazione).filter_by(
username=username)
if reservation.first() is not None:
reservation.delete()
db.session.commit()
return {
"status": "Cancellazione effettuata con successo."
}, 200
else:
return {
'errMsgTitle': "Attenzione",
'errMsg': "Operazione non consentita."
}, 500
except AttributeError as error:
return {
'errMsgTitle': "Attenzione",
'errMsg': "Operazione non consentita."
}, 500
except:
db.session.rollback()
print("Unexpected error:")
print("Title: " + sys.exc_info()[0].__name__)
print("Description: " + traceback.format_exc())
return {
'errMsgTitle': sys.exc_info()[0].__name__,
'errMsg': traceback.format_exc()
}, 500
else:
return {'errMsg': 'Wrong username/pass'}, g.status
# ------------- RESERVE STUDENT BY PROF -------------
prenotazione_prof = ns.model("reservation_prof", {
"id_lezione": fields.String(description="", required=True),
"matricola": fields.String(description="", required=True),
"username": fields.String(description="", required=True),
"aaId": fields.String(description="", required=True)
})
class ReservationByProf(Resource):
@ns.doc(security='Basic Auth')
@token_required
@ns.expect(prenotazione_prof)
def post(self):
"""Set Reservation to student"""
base64_bytes = g.token.encode('utf-8')
message_bytes = base64.b64decode(base64_bytes)
token_string = message_bytes.decode('utf-8')
username = token_string.split(':')[0]
content = request.json
print(content)
if 'id_lezione' in content and 'matricola' in content and 'username' in content and 'aaId' in content:
result = getCourses(Resource).get(content['aaId'])
status = json.loads(json.dumps(result))[1]
_result = json.loads(json.dumps(result))[0]
if status == 200:
try:
codici = []
for i in range(len(_result)):
codici.append(_result[i]['adDefAppCod'])
con = sqlalchemy.create_engine(Config.GA_DATABASE, echo=False)
rs = con.execute("SELECT * FROM `mrbs_entry` E JOIN mrbs_room R WHERE E.id = '" + str(
content['id_lezione']) + "' AND E.room_id = R.id").fetchall()
capacity = int(rs[0][41]) / 2
if len(rs) != 0:
if rs[0][32] in codici:
r = Reservations(id_corso=rs[0][32], course_name=rs[0][9],
start_time=datetime.fromtimestamp(rs[0][1]),
end_time=datetime.fromtimestamp(rs[0][2]),
username=content['username'], matricola=content['matricola'],
time=datetime.now(), id_lezione=content['id_lezione'],
reserved_by=username)
db.session.add(r)
count = Reservations.query.with_for_update().filter_by(
id_lezione=content['id_lezione']).count()
if count > capacity:
db.session.rollback()
return {
'errMsgTitle': 'Attenzione',
'errMsg': 'Raggiunta la capacità massima consentita.'
}, 500
db.session.commit()
return {
"status": "Prenotazione effettuata con successo."
}, 200
else:
return {
'errMsgTitle': "Attenzione",
'errMsg': "Operazione non consentita!"
}, 500
else:
return {
'errMsgTitle': "Attenzione",
'errMsg': "ID lezione errato"
}, 500
except exc.IntegrityError:
db.session.rollback()
return {
'errMsgTitle': 'Attenzione',
'errMsg': 'Prenotazione già effettuata per questa lezione.'
}, 500
except:
db.session.rollback()
print("Unexpected error:")
print("Title: " + sys.exc_info()[0].__name__)
print("Description: " + traceback.format_exc())
return {
'errMsgTitle': sys.exc_info()[0].__name__,
'errMsg': traceback.format_exc()
}, 500
else:
return {
'errMsgTitle': "Attenzione",
'errMsg': "Errore nel caricamento degli esami!!"
}, 500
else:
return {
'errMsgTitle': "Attenzione",
'errMsg': "Errore Payload/Studente non immatricolato!"
}, 500
# ------------- GET STUDENTS LIST -------------
parser = api.parser()
parser.add_argument('id_lezione', required=True, help='')
@ns.doc(parser=parser)
class getStudentsList(Resource):
@ns.doc(security='Basic Auth')
@token_required_general
def get(self, id_lezione):
"""Get Students Lists"""
if g.status == 200:
if g.response['user']['grpId'] == 7:
##TODO controllare se la lezione appartiene a quel determinato professore
try:
mat = Reservations.query.filter_by(id_lezione=id_lezione).all()
array = []
for m in mat:
array.append({
'id': m.id,
'matricola': m.matricola,
'username': m.username
})
return array, 200
except:
db.session.rollback()
print("Unexpected error:")
print("Title: " + sys.exc_info()[0].__name__)
print("Description: " + traceback.format_exc())
return {
'errMsgTitle': sys.exc_info()[0].__name__,
'errMsg': traceback.format_exc()
}, 500
else:
return {
'errMsgTitle': "Attenzione",
'errMsg': "Il tipo di user non è di tipo Studente"
}, 500
else:
return {'errMsg': 'Wrong username/pass'}, g.status
# ------------- GET EVENTS -------------
class getEvents(Resource):
def get(self):
"""Get Events"""
try:
start = datetime(datetime.now().year, datetime.now().month, datetime.now().day, 0, 0).timestamp()
con = sqlalchemy.create_engine(Config.GA_DATABASE, echo=False)
rs = con.execute(
"SELECT * FROM `mrbs_entry` E JOIN mrbs_room R WHERE (E.type = 't' COLLATE utf8mb4_bin OR E.type = 's' COLLATE utf8mb4_bin OR E.type = 'b' COLLATE utf8mb4_bin OR E.type = 'a' COLLATE utf8mb4_bin OR E.type = 'z' COLLATE utf8mb4_bin OR E.type = 'Y' COLLATE utf8mb4_bin OR E.type = 'O' COLLATE utf8mb4_bin) AND start_time >= '" + str(
start) + "' AND E.room_id = R.id")
array = []
for row in rs:
array.append({
'id': row[0],
'start': str(datetime.fromtimestamp(row[1])),
'end': str(datetime.fromtimestamp(row[2])),
'room': {
'name': row[38],
'capacity': int(row[41]) / 2,
'description': row[40],
'availability': int(row[41]) / 2 - Reservations.query.with_for_update().filter_by(
id_lezione=row[0]).count()
},
'course_name': row[9],
'description': row[11],
'type': row[10]
})
return array, 200
except:
print("Unexpected error:")
print("Title: " + sys.exc_info()[0].__name__)
print("Description: " + traceback.format_exc())
return {
'errMsgTitle': sys.exc_info()[0].__name__,
'errMsg': traceback.format_exc()
}, 500
| StarcoderdataPython |
1738582 | #!/usr/bin/env python3
import db_ops
if __name__ == "__main__":
# Open database #
db_con = db_ops.openDB('job.db')
# Create required table #
db_cur = db_con.cursor()
# TODO: Use config file for schema instead of hardcoding
schema_opening = [('Company', 'VARCHAR', 'n'),
('Position', 'VARCHAR', 'n'),
('Area', 'TEXT', 'n'),
('Date of Posting', 'DATE', 'n'),
('Job Listing URL', 'VARCHAR', None),
('Company Website', 'VARCHAR', None),
('Location', 'VARCHAR', None),
('Date of Entry', 'DATE', 'n'),
('Job ID', 'VARCHAR', 'p')]
schema_application = [('Job ID', 'VARCHAR', 'p'),
('Date of Application', 'DATE', 'n'),
('Status', 'TEXT', 'n'),
('Referral', 'VARCHAR', None),
('Confidence', 'VARCHAR', None),
('Comment', 'VARCHAR', None),
('Application ID', 'VARCHAR', 'n')]
# TODO: Create schema for all tables
tables = [('Job Opening', schema_opening),
('Job Application', schema_application)]
#table_name = [('Job Opening', schema_opening),
# ('Job Application', schema_application),
# ('Job Offer', schema_offer)]
for name, schema in tables:
db_ops.createTable(db_con, name, schema)
# Close database #
db_ops.closeDB(db_con)
| StarcoderdataPython |
1709956 | <reponame>Jwsonic/air
import serial
port = serial.Serial("/dev/ttyAMA0", baudrate=9600, timeout=2.0)
def read_pm_line(_port):
rv = b''
while True:
ch1 = _port.read()
if ch1 == b'\x42':
ch2 = _port.read()
if ch2 == b'\x4d':
rv += ch1 + ch2
rv += _port.read(28)
return rv
rcv = read_pm_line(port)
print(rcv[6] * 256 + rcv[7])
port.close() | StarcoderdataPython |
3296125 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class BrandVettingTestCase(IntegrationTestCase):
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.messaging.v1.brand_registrations("BNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.brand_vettings.create(vetting_provider="campaign-verify")
values = {'VettingProvider': "campaign-verify", }
self.holodeck.assert_has_request(Request(
'post',
'https://messaging.twilio.com/v1/a2p/BrandRegistrations/BNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Vettings',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "<KEY>",
"brand_sid": "BN0044409f7e067e279523808d267e2d85",
"brand_vetting_sid": "VT12445353",
"vetting_provider": "CAMPAIGN_VERIFY",
"vetting_id": "cv|1.0|10DLC|NHDHBD",
"vetting_class": "POLITICAL",
"vetting_status": "PENDING",
"date_created": "2021-01-27T14:18:35Z",
"date_updated": "2021-01-27T14:18:35Z",
"url": "https://messaging.twilio.com/v1/a2p/BrandRegistrations/BN0044409f7e067e279523808d267e2d85/Vettings/VT12445353"
}
'''
))
actual = self.client.messaging.v1.brand_registrations("BNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.brand_vettings.create(vetting_provider="campaign-verify")
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.messaging.v1.brand_registrations("BNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.brand_vettings.list()
self.holodeck.assert_has_request(Request(
'get',
'https://messaging.twilio.com/v1/a2p/BrandRegistrations/BNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Vettings',
))
def test_read_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://messaging.twilio.com/v1/a2p/BrandRegistrations/BN0044409f7e067e279523808d267e2d85/Vettings?PageSize=50&Page=0",
"previous_page_url": null,
"next_page_url": null,
"key": "data",
"url": "https://messaging.twilio.com/v1/a2p/BrandRegistrations/BN0044409f7e067e279523808d267e2d85/Vettings?PageSize=50&Page=0"
},
"data": [
{
"account_sid": "<KEY>",
"brand_sid": "BN0044409f7e067e279523808d267e2d85",
"brand_vetting_sid": "VT12445353",
"vetting_provider": "CAMPAIGN_VERIFY",
"vetting_id": "cv|1.0|10DLC|NHDHBD",
"vetting_class": "POLITICAL",
"vetting_status": "PENDING",
"date_created": "2021-01-27T14:18:35Z",
"date_updated": "2021-01-27T14:18:35Z",
"url": "https://messaging.twilio.com/v1/a2p/BrandRegistrations/BN0044409f7e067e279523808d267e2d85/Vettings/VT12445353"
}
]
}
'''
))
actual = self.client.messaging.v1.brand_registrations("BNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.brand_vettings.list()
self.assertIsNotNone(actual)
| StarcoderdataPython |
1710067 | <gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
get_ipython().run_line_magic('matplotlib', 'inline')
from pyvista import set_plot_theme
set_plot_theme('document')
# In[19]:
import pyvista
from pyvista import examples
import numpy as np
import vtk
# pyvista.rcParams['use_panel'] = False
# # Overview of PyVista
#
# This notebook demos the examples present directly in the PyVista package and helps new users learn how to get started using PyVista.
#
# This notebook is outlined in a manner that builds up an understanding of how PyVista wraps VTK data objects so that we can show how to use the PyVista objects then demostrate how those PyVista objects are plotted.
# -----
#
# ## Getting Started
#
# Do you have some VTK data objects or VTK data files that you'd like to use in a Pythonic manner? Then go ahead and load your data file with its appriate class! The classes available in PyVista correspond to VTK classes in the following way:
#
# | PyVista Class | `vtk` Class |
# |--------------|-------------|
# |`pyvista.PolyData`|`vtk.vtkPolyData`|
# |`pyvista.StructuredGrid`|`vtk.vtkStructuredGrid`|
# |`pyvista.UnstructuredGrid`|`vtk.vtkUnstructuredGrid`|
# |`pyvista.UniformGrid`|`vtk.vtkImageData`|
# |`pyvista.RectilinearGrid`|`vtk.vtkRectilinearGrid`|
#
# If you want to load a data file, then go ahead and use PyVista's `read` method to read your file. This function will handle figuing out what kind of data file you have and return the appropriate PyVista object for you:
#
# ```py
# import pyvista
# filename = 'myfile.vtk'
# data = pyvista.read(filename)
# ```
#
# We already have some VTK files under the examples directory so how about we grab that filename and demo this!
# In[2]:
#NBVAL_CHECK_OUTPUT
filename = examples.uniformfile
data = pyvista.read(filename)
type(data)
# Awesome! Now we have a `UniformGrid` ready to use (this is essentially just a wrapped `vtkImageData` object, so you could still pass this on to VTK algorithms.
#
# *So why do I want a PyVista wrapped object?* **First**, these allow you to access attributes of the VTK data object in a more Pythonic manner. For example:
# In[3]:
#NBVAL_CHECK_OUTPUT
data.n_cells
# In[4]:
#NBVAL_CHECK_OUTPUT
data.n_points
# In[5]:
#NBVAL_CHECK_OUTPUT
data.n_arrays
# In[6]:
data.points
# **Second**, PyVista has HTML representation for the VTK data objects that will give you a whole lot more insight into the data object that VTK's typicall printing of the class name and memory address:
# In[7]:
data
# **And third**, PyVista has functions to handle plotting these objects right out of the box. You can simply call the `plot()` method on any PyVista object and a rendering will be produced for you!
# You can also pass the scalar name that you'd like to view and PyVista will handle coloring your dataset by that array:
# In[20]:
data.plot(scalars='Spatial Cell Data')
# -----
#
# ## Creating new VTK objects on the fly
#
# Here is a demo on how to quickly create new VTK data objects using PyVista
# ### vtkPolyData
# In[9]:
x = np.random.uniform(0, 10, 100)
y = np.random.uniform(0, 10, 100)
z = np.random.uniform(0, 10, 100)
mesh = pyvista.PolyData(np.c_[x,y,z])
mesh
# ### vtkStructuredGrid
# In[21]:
xrng = np.arange(-10, 10, 2)
yrng = np.arange(-10, 10, 1)
zrng = np.arange(-10, 10, 0.5)
x, y, z = np.meshgrid(xrng, yrng, zrng)
grid = pyvista.StructuredGrid(x, y, z)
grid.plot()
# ### vtkUnstructuredGrid
# In[22]:
offset = np.array([0, 9])
cells = np.array([8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 15])
cell_type = np.array([vtk.VTK_HEXAHEDRON, vtk.VTK_HEXAHEDRON], np.int8)
cell1 = np.array([[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1]])
cell2 = np.array([[0, 0, 2],
[1, 0, 2],
[1, 1, 2],
[0, 1, 2],
[0, 0, 3],
[1, 0, 3],
[1, 1, 3],
[0, 1, 3]])
points = np.vstack((cell1, cell2))
grid = pyvista.UnstructuredGrid(offset, cells, cell_type, points)
grid.plot()
# ### vtkImageData
# In[12]:
dims = (10, 10, 10)
grid = pyvista.UniformGrid(dims) # Using default spacing and origin
spacing = (2, 1, 5)
grid = pyvista.UniformGrid(dims, spacing) # Usign default origin
origin = (10, 35, 50)
grid = pyvista.UniformGrid(dims, spacing, origin) # Everything is specified
grid.plot()
# ### vtkRectilinearGrid
# In[14]:
xrng = np.arange(-10, 10, 2)
yrng = np.arange(-10, 10, 5)
zrng = np.arange(-10, 10, 1)
grid = pyvista.RectilinearGrid(xrng, yrng, zrng)
grid.plot()
# In[ ]:
| StarcoderdataPython |
137692 | import datetime
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy.orm import relationship
from sqlalchemy import String
from chainerui import database
from chainerui import db
from chainerui.tasks.crawl_result import crawl_result
class Result(database.BASE):
"""Result Model."""
__tablename__ = 'result'
id = Column(Integer, primary_key=True)
project_id = Column(Integer, ForeignKey('project.id'))
path_name = Column(String(512), unique=True)
name = Column(String(512))
is_unregistered = Column(Boolean(), default=False)
logs = relationship('Log', cascade='all, delete-orphan')
args = relationship(
'Argument', uselist=False, cascade='all, delete-orphan'
)
commands = relationship('Command', cascade='all, delete-orphan')
snapshots = relationship('Snapshot', cascade='all, delete-orphan')
log_modified_at = Column(DateTime, default=None)
created_at = Column(DateTime, default=datetime.datetime.now())
updated_at = Column(
DateTime,
default=datetime.datetime.now()
)
def __init__(self, path_name=None, name=None, project_id=None,
log_modified_at=None):
self.path_name = path_name
self.name = name
self.project_id = project_id
self.log_modified_at = log_modified_at
def __repr__(self):
return '<Result id: %r, path_name: %r />' % (self.id, self.path_name)
@classmethod
def create(cls, path_name=None, name=None, project_id=None,
log_modified_at=None):
"""Initialize an instance and save it to db."""
result = cls(path_name, name, project_id, log_modified_at)
db.session.add(result)
db.session.commit()
crawl_result(result, True)
return result
def sampled_logs(self, logs_limit=-1):
"""Return up to `logs_limit` logs.
If `logs_limit` is -1, this function will return all logs that belong
to the result.
"""
logs_count = len(self.logs)
if logs_limit == -1 or logs_count <= logs_limit:
return self.logs
elif logs_limit == 0:
return []
elif logs_limit == 1:
return [self.logs[-1]]
else:
def get_sampled_log(idx):
# always include the first and last element of `self.logs`
return self.logs[idx * (logs_count - 1) // (logs_limit - 1)]
return [get_sampled_log(i) for i in range(logs_limit)]
def serialize_with_sampled_logs(self, logs_limit=-1):
"""serialize a result with up to `logs_limit` logs.
If `logs_limit` is -1, this function will return a result with all its
logs.
"""
return {
'id': self.id,
'pathName': self.path_name,
'name': self.name,
'isUnregistered': self.is_unregistered,
'logs': [log.serialize for log in self.sampled_logs(logs_limit)],
'args': self.args.serialize if self.args is not None else [],
'commands': [cmd.serialize for cmd in self.commands],
'snapshots': [cmd.serialize for cmd in self.snapshots],
'logModifiedAt': self.log_modified_at
}
@property
def serialize(self):
"""serialize."""
return self.serialize_with_sampled_logs(-1)
| StarcoderdataPython |
1712693 | <filename>gravityApp/app/GravityMainStatus.py
#!/usr/bin/env python
"""
Written by: <NAME>
Copyright 2021 Creative Collisions Technology, LLC
MainStatus.py
This class keeps track of different variables that we need to keep track of the state of the system.
This allows us to store information in the main thread that can be accessed by other threads for operation
and status reporting.
"""
class GravityMainStatus():
def __init__(self):
self.server_name = ""
| StarcoderdataPython |
3394713 | import pytest
import os
import numpy as np
import pandas as pd
import taxcrunch.multi_cruncher as mcr
CURRENT_PATH = os.path.abspath(os.path.dirname(__file__))
def test_a18_validation():
taxcrunch_in = os.path.join(CURRENT_PATH, "taxsim_validation/taxcrunch_in_a18.csv")
crunch = mcr.Batch(taxcrunch_in)
table_a18 = crunch.create_table()
taxsim_out = os.path.join(CURRENT_PATH, "taxsim_validation/taxsim_out_a18.csv")
taxsim_df = pd.read_csv(taxsim_out)
taxcrunch_frate = table_a18["Income Tax MTR"]*100
assert np.allclose(table_a18["Individual Income Tax"], taxsim_df["fiitax"], atol=0.01)
assert np.allclose(table_a18["Payroll Tax"], taxsim_df["fica"], atol=0.01)
assert np.allclose(taxcrunch_frate, taxsim_df["frate"], atol=0.01)
def test_c18_validation():
taxcrunch_in = os.path.join(CURRENT_PATH, "taxsim_validation/taxcrunch_in_c18.csv")
crunch = mcr.Batch(taxcrunch_in)
emulation = os.path.join(CURRENT_PATH, "taxsim_validation/taxsim_emulation.json")
table_c18 = crunch.create_table(reform_file=emulation)
taxsim_out = os.path.join(CURRENT_PATH, "taxsim_validation/taxsim_out_c18.csv")
taxsim_df = pd.read_csv(taxsim_out)
taxcrunch_frate = table_c18["Income Tax MTR"]*100
assert np.allclose(table_c18["Individual Income Tax"], taxsim_df["fiitax"], atol=.01)
assert np.allclose(table_c18["Payroll Tax"], taxsim_df["fica"], atol=0.01)
assert np.allclose(taxcrunch_frate, taxsim_df["frate"], atol=0.01)
| StarcoderdataPython |
185897 | import os
from nltk.tokenize import RegexpTokenizer
tokenize = RegexpTokenizer("\w+").tokenize
PICKLE_PATH = "stuff.pkl"
TFIDF_PATH = "tfidf.npy"
VOCABULARY_PATH = "vocabulary.npy"
NPMI_FOLDER = "npmi_parts"
NPMI_PATH_TEMPLATE = os.path.join(NPMI_FOLDER, "npmi{}-{}.npy")
NPMI_PART_SIZE = 100
def get_terms(text):
return tokenize(text.lower())
def make_npmi_dir():
if not os.path.exists(NPMI_FOLDER):
os.mkdir(NPMI_FOLDER)
def get_npmi_part_path(term):
left_border = term / NPMI_PART_SIZE * NPMI_PART_SIZE
return NPMI_PATH_TEMPLATE.format(left_border, left_border + NPMI_PART_SIZE - 1)
| StarcoderdataPython |
3221659 | #!/usr/bin/python3
from os.path import exists
import sqlite3 as sql
import pandas
db_path = './all13f.db'
if exists(db_path):
conn = sql.connect(db_path)
else:
print(db_path + ' does not exist. Exiting.')
exit(1)
# Get funds
funds_df = pandas.read_sql_query('''SELECT * FROM "FUNDS"''', conn, index_col='cik')
funds_dict = funds_df.to_dict(orient='dict')['name']
top_funds = pandas.DataFrame()
cik_df = pandas.read_sql_query('''SELECT DISTINCT cik FROM "SEC13F8"''', conn)
for cik in cik_df['cik'].tolist():
assert(cik), 'Not a valid cik lookup.'
print('-----------------------')
print('Analyzing Fund: ', funds_dict[cik])
dates = pandas.read_sql_query('SELECT DISTINCT date from "SEC13F8" WHERE cik="' + cik + '" ORDER BY date ASC', conn, parse_dates='date')['date']
dateMin = min(dates).date()
dateMax = max(dates).date()
if dateMin == dateMax:
print('Reported:', dateMin)
else:
print('Reports between:', dateMin, 'and', dateMax)
fund_df = pandas.read_sql_query('SELECT cusip, issuer, SUM(value) FROM "SEC13F8" WHERE cik="' + cik + '" GROUP BY cusip ORDER BY SUM(value) DESC', conn)
fund_sum = fund_df['SUM(value)'].sum()
print('Holdings: $%0.2fB' % (fund_sum/1e6))
fund_pct = fund_df['SUM(value)']/fund_sum
fund_df['pct'] = fund_pct
top_df = fund_df[fund_pct > 0.02].copy()
top_funds = pandas.concat([top_funds, top_df]).groupby('cusip', as_index=False).agg({'issuer': 'first', 'SUM(value)': 'sum','pct': 'sum'})
print('Top stocks for fund:')
top_df['SUM(value)'] = top_df['SUM(value)']/1000
print(top_df.rename(columns={'issuer': 'Stock Issuer', 'SUM(value)': 'Value ($M)', 'pct': 'Sum Percentage'}))
top_funds.sort_values('pct', ascending=False, inplace=True)
top_funds.rename(columns={'SUM(value)': 'Value ($k)', 'pct': '% Fund Integrated'}, inplace=True)
print('--------------------------\n---------------------------')
print('Overall top funds, with percentage of portfolio integrated:')
print(top_funds.head(20))
all_df = pandas.read_sql_query('''SELECT cusip, issuer, cik, SUM(value), SUM(shares) FROM "SEC13F8" GROUP BY cusip ORDER BY SUM(value) DESC''', conn)
sum = all_df['SUM(value)'].sum()
pct = all_df['SUM(value)']/sum
all_df['pct'] = pct
top = all_df[pct > 0.02]
print('----------------------------')
print(top[['cusip', 'issuer', 'pct']].rename(columns={'issuer': 'Stock Issuer', 'pct': '% Total Value'}))
print('Funds: ', all_df.cik.nunique())
print('Total holdings: $%0.2fB' % (sum/1e6))
print('Number of investments >2% holding: ',len(top)) | StarcoderdataPython |
1623231 | <reponame>hirorin-demon/hirorin-streamlit
data = (
'Kai ', # 0x00
'Bian ', # 0x01
'Yi ', # 0x02
'Qi ', # 0x03
'Nong ', # 0x04
'Fen ', # 0x05
'Ju ', # 0x06
'Yan ', # 0x07
'Yi ', # 0x08
'Zang ', # 0x09
'Bi ', # 0x0a
'Yi ', # 0x0b
'Yi ', # 0x0c
'Er ', # 0x0d
'San ', # 0x0e
'Shi ', # 0x0f
'Er ', # 0x10
'Shi ', # 0x11
'Shi ', # 0x12
'Gong ', # 0x13
'Diao ', # 0x14
'Yin ', # 0x15
'Hu ', # 0x16
'Fu ', # 0x17
'Hong ', # 0x18
'Wu ', # 0x19
'Tui ', # 0x1a
'Chi ', # 0x1b
'Jiang ', # 0x1c
'Ba ', # 0x1d
'Shen ', # 0x1e
'Di ', # 0x1f
'Zhang ', # 0x20
'Jue ', # 0x21
'Tao ', # 0x22
'Fu ', # 0x23
'Di ', # 0x24
'Mi ', # 0x25
'Xian ', # 0x26
'Hu ', # 0x27
'Chao ', # 0x28
'Nu ', # 0x29
'Jing ', # 0x2a
'Zhen ', # 0x2b
'Yi ', # 0x2c
'Mi ', # 0x2d
'Quan ', # 0x2e
'Wan ', # 0x2f
'Shao ', # 0x30
'Ruo ', # 0x31
'Xuan ', # 0x32
'Jing ', # 0x33
'Dun ', # 0x34
'Zhang ', # 0x35
'Jiang ', # 0x36
'Qiang ', # 0x37
'Peng ', # 0x38
'Dan ', # 0x39
'Qiang ', # 0x3a
'Bi ', # 0x3b
'Bi ', # 0x3c
'She ', # 0x3d
'Dan ', # 0x3e
'Jian ', # 0x3f
'Gou ', # 0x40
'Sei ', # 0x41
'Fa ', # 0x42
'Bi ', # 0x43
'Kou ', # 0x44
'Nagi ', # 0x45
'Bie ', # 0x46
'Xiao ', # 0x47
'Dan ', # 0x48
'Kuo ', # 0x49
'Qiang ', # 0x4a
'Hong ', # 0x4b
'Mi ', # 0x4c
'Kuo ', # 0x4d
'Wan ', # 0x4e
'Jue ', # 0x4f
'Ji ', # 0x50
'Ji ', # 0x51
'Gui ', # 0x52
'Dang ', # 0x53
'Lu ', # 0x54
'Lu ', # 0x55
'Tuan ', # 0x56
'Hui ', # 0x57
'Zhi ', # 0x58
'Hui ', # 0x59
'Hui ', # 0x5a
'Yi ', # 0x5b
'Yi ', # 0x5c
'Yi ', # 0x5d
'Yi ', # 0x5e
'Huo ', # 0x5f
'Huo ', # 0x60
'Shan ', # 0x61
'Xing ', # 0x62
'Wen ', # 0x63
'Tong ', # 0x64
'Yan ', # 0x65
'Yan ', # 0x66
'Yu ', # 0x67
'Chi ', # 0x68
'Cai ', # 0x69
'Biao ', # 0x6a
'Diao ', # 0x6b
'Bin ', # 0x6c
'Peng ', # 0x6d
'Yong ', # 0x6e
'Piao ', # 0x6f
'Zhang ', # 0x70
'Ying ', # 0x71
'Chi ', # 0x72
'Chi ', # 0x73
'Zhuo ', # 0x74
'Tuo ', # 0x75
'Ji ', # 0x76
'Pang ', # 0x77
'Zhong ', # 0x78
'Yi ', # 0x79
'Wang ', # 0x7a
'Che ', # 0x7b
'Bi ', # 0x7c
'Chi ', # 0x7d
'Ling ', # 0x7e
'Fu ', # 0x7f
'Wang ', # 0x80
'Zheng ', # 0x81
'Cu ', # 0x82
'Wang ', # 0x83
'Jing ', # 0x84
'Dai ', # 0x85
'Xi ', # 0x86
'Xun ', # 0x87
'Hen ', # 0x88
'Yang ', # 0x89
'Huai ', # 0x8a
'Lu ', # 0x8b
'Hou ', # 0x8c
'Wa ', # 0x8d
'Cheng ', # 0x8e
'Zhi ', # 0x8f
'Xu ', # 0x90
'Jing ', # 0x91
'Tu ', # 0x92
'Cong ', # 0x93
None, # 0x94
'Lai ', # 0x95
'Cong ', # 0x96
'De ', # 0x97
'Pai ', # 0x98
'Xi ', # 0x99
None, # 0x9a
'Qi ', # 0x9b
'Chang ', # 0x9c
'Zhi ', # 0x9d
'Cong ', # 0x9e
'Zhou ', # 0x9f
'Lai ', # 0xa0
'Yu ', # 0xa1
'Xie ', # 0xa2
'Jie ', # 0xa3
'Jian ', # 0xa4
'Chi ', # 0xa5
'Jia ', # 0xa6
'Bian ', # 0xa7
'Huang ', # 0xa8
'Fu ', # 0xa9
'Xun ', # 0xaa
'Wei ', # 0xab
'Pang ', # 0xac
'Yao ', # 0xad
'Wei ', # 0xae
'Xi ', # 0xaf
'Zheng ', # 0xb0
'Piao ', # 0xb1
'Chi ', # 0xb2
'De ', # 0xb3
'Zheng ', # 0xb4
'Zheng ', # 0xb5
'Bie ', # 0xb6
'De ', # 0xb7
'Chong ', # 0xb8
'Che ', # 0xb9
'Jiao ', # 0xba
'Wei ', # 0xbb
'Jiao ', # 0xbc
'Hui ', # 0xbd
'Mei ', # 0xbe
'Long ', # 0xbf
'Xiang ', # 0xc0
'Bao ', # 0xc1
'Qu ', # 0xc2
'Xin ', # 0xc3
'Shu ', # 0xc4
'Bi ', # 0xc5
'Yi ', # 0xc6
'Le ', # 0xc7
'Ren ', # 0xc8
'Dao ', # 0xc9
'Ding ', # 0xca
'Gai ', # 0xcb
'Ji ', # 0xcc
'Ren ', # 0xcd
'Ren ', # 0xce
'Chan ', # 0xcf
'Tan ', # 0xd0
'Te ', # 0xd1
'Te ', # 0xd2
'Gan ', # 0xd3
'Qi ', # 0xd4
'Shi ', # 0xd5
'Cun ', # 0xd6
'Zhi ', # 0xd7
'Wang ', # 0xd8
'Mang ', # 0xd9
'Xi ', # 0xda
'Fan ', # 0xdb
'Ying ', # 0xdc
'Tian ', # 0xdd
'Min ', # 0xde
'Min ', # 0xdf
'Zhong ', # 0xe0
'Chong ', # 0xe1
'Wu ', # 0xe2
'Ji ', # 0xe3
'Wu ', # 0xe4
'Xi ', # 0xe5
'Ye ', # 0xe6
'You ', # 0xe7
'Wan ', # 0xe8
'Cong ', # 0xe9
'Zhong ', # 0xea
'Kuai ', # 0xeb
'Yu ', # 0xec
'Bian ', # 0xed
'Zhi ', # 0xee
'Qi ', # 0xef
'Cui ', # 0xf0
'Chen ', # 0xf1
'Tai ', # 0xf2
'Tun ', # 0xf3
'Qian ', # 0xf4
'Nian ', # 0xf5
'Hun ', # 0xf6
'Xiong ', # 0xf7
'Niu ', # 0xf8
'Wang ', # 0xf9
'Xian ', # 0xfa
'Xin ', # 0xfb
'Kang ', # 0xfc
'Hu ', # 0xfd
'Kai ', # 0xfe
'Fen ', # 0xff
)
| StarcoderdataPython |
1711055 | import numpy as np
num_of_days = 257
fish_numbers = np.zeros(9)
initial_fish = [5,1,1,5,4,2,1,2,1,2,2,1,1,1,4,2,2,4,1,1,1,1,1,4,1,1,1,1,1,5,3,1,4,1,1,1,1,1,4,1,5,1,1,1,4,1,2,2,3,1,5,1,1,5,1,1,5,4,1,1,1,4,3,1,1,1,3,1,5,5,1,1,1,1,5,3,2,1,2,3,1,5,1,1,4,1,1,2,1,5,1,1,1,1,5,4,5,1,3,1,3,3,5,5,1,3,1,5,3,1,1,4,2,3,3,1,2,4,1,1,1,1,1,1,1,2,1,1,4,1,3,2,5,2,1,1,1,4,2,1,1,1,4,2,4,1,1,1,1,4,1,3,5,5,1,2,1,3,1,1,4,1,1,1,1,2,1,1,4,2,3,1,1,1,1,1,1,1,4,5,1,1,3,1,1,2,1,1,1,5,1,1,1,1,1,3,2,1,2,4,5,1,5,4,1,1,3,1,1,5,5,1,3,1,1,1,1,4,4,2,1,2,1,1,5,1,1,4,5,1,1,1,1,1,1,1,1,1,1,3,1,1,1,1,1,4,2,1,1,1,2,5,1,4,1,1,1,4,1,1,5,4,4,3,1,1,4,5,1,1,3,5,3,1,2,5,3,4,1,3,5,4,1,3,1,5,1,4,1,1,4,2,1,1,1,3,2,1,1,4]
for i in initial_fish:
fish_numbers[i] += 1
for day in range(1, num_of_days):
old_fish_numbers = fish_numbers.copy()
for timer in range(8):
fish_numbers[timer] = old_fish_numbers[timer + 1]
fish_numbers[8] = old_fish_numbers[0]
fish_numbers[6] += old_fish_numbers[0]
print(sum(fish_numbers)) | StarcoderdataPython |
1622238 | <filename>algorithms/search_insert_position.py
from typing import List
# TODO 暴力法,待完善
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
for i, num in enumerate(nums):
if num == target:
return i
if target < num:
nums.insert(i, target)
return i
nums.append(target)
return len(nums) - 1
| StarcoderdataPython |
34488 | <reponame>pabvald/chatbot<filename>brain/mastermind.py
from app import app, nlp
from brain import ACTIONS, LANGUAGES
from dateparser import parse
from datetime import datetime, date
from services import UserService, IntentService, AppointmentService
from utils import get_content
class MasterMind(object):
""" MasterMind class """
def __init__(self, user_service, text):
""" Initializes a MasterMind instance """
self._text = text
self._user_service = user_service
self._lang = self._user_service.get_language()
self._doc = nlp[self._lang](text.lower())
self._user_service.register_user_msg([self._text]) # Register user's message
@classmethod
def from_telegram_msg(cls, tg_user, tg_text):
""" Creates a MasterMind instance from a Telegram message's user and text """
user_service = UserService.from_telegram_user(tg_user)
return cls(user_service, tg_text)
def get_response_for_telegram(self):
""" Generates a response for the message with the Telegram format """
responses = self._get_response()
telegram_responses = list(map(lambda response: {
'text': response,
'parse_mode': 'HTML'
}, responses))
return telegram_responses
def _get_response(self):
""" Generates an adequate response """
try:
if self._user_service.is_new_user:
responses = [self._welcome_message()]
else:
responses = self._intent_driven_message()
except Exception as e:
app.logger.error(str(e))
responses = [self._internal_error_message()]
finally:
self._user_service.register_bot_msg(responses) # Register bot messages
return responses
def _welcome_message(self):
""" Generates a welcome message in the corresponding language """
return get_content(self._lang, ['welcome'])
def _internal_error_message(self):
""" Generates an internal error message in the corresponding language """
return get_content(self._lang, ['internal_error'])
def _intent_driven_message(self):
""" Generates a intent-driven response for the message """
responses = []
errors = {}
# Set intent service
self._set_intent_service()
intent_response = self._intent_service.get_response(self._doc)
# Do action if there is one
action = self._intent_service.get_action()
if action:
intent_params = self._intent_service.get_params()
errors = self._do_action(action, intent_params)
if errors:
responses.extend(list(map(lambda err: err, errors.values())))
self._intent_service.reset_params(**errors)
self._invalidate_doc_entities()
intent_response = self._intent_service.get_response(self._doc)
# Save intent (if not completed)
self._intent_service.save_intent()
responses.append(intent_response)
return responses
def _set_intent_service(self):
""" Identifies the intent of the message and creates an
IntentService """
# Intent identification
all_intents = self._doc.cats
# Check if there's an active intent
active_intent = self._user_service.get_active_intent()
if active_intent:
all_intents[active_intent.name] = 1
# Take intents priorities into account
all_intents = dict(map(lambda kv: (kv[0], kv[1]*IntentService.priority(kv[0])), all_intents.items()))
# Select the intent with the highest probability
intent = max(all_intents, key=all_intents.get)
# Intent service creation
if active_intent and active_intent.name == intent:
self._intent_service = IntentService.from_stored_intent(self._lang, active_intent)
else:
self._intent_service = IntentService.from_new_intent(self._lang, intent, self._user_service.user_id)
def _invalidate_doc_entities(self):
""" Invalidates the the doc """
self._doc.ents = []
def _do_action(self, name, params):
""" Executes the corresponding action """
errors = {}
if name not in ACTIONS:
raise AttributeError("Action '{}' is not a valid action".format(name))
if name == 'deactivate_intent':
errors = self._deactivate_intent(params)
elif name == 'make_appointment':
errors = self._make_appointment(params)
return errors
def _deactivate_intent(self, params):
""" Deactivates the user's current intent"""
errors = {}
content = get_content(self._lang, ['deactivate_intent'])
try:
self._user_service.deactivate_intent()
except Exception:
errors['main'] = content['error'].format(**params)
else:
return errors
def _make_appointment(self, params):
""" Makes an appointment if the established time is valid """
errors = {}
content = get_content(self._lang, ['make_appointment'])
t_date = parse(params['date'], languages=[self._lang]).date()
t_time = parse(params['time'], languages=[self._lang]).time()
t_datetime = datetime.combine(t_date, t_time)
av_slots = AppointmentService.get_available_slots(t_date.isoformat())
# Parameters validation
if t_date < date.today():
errors['date'] = content['past_date'].format(**params)
elif not AppointmentService.office_is_open_on_date(t_date.isoformat()):
errors['date'] = content['office_close_date'].format(**params)
elif not av_slots:
errors['date'] = content['not_available_date'].format(**params)
elif t_datetime < datetime.now():
errors['time'] = content['past_datetime'].format(**params)
elif not AppointmentService.office_is_open_on_datetime(
t_datetime.isoformat()):
errors['time'] = content['office_close_time'].format(**params)
elif not AppointmentService.is_available(t_datetime.isoformat()):
errors['time'] = content['not_available_time'].format(**params)
if not errors:
closest_datetime = AppointmentService.closest_half(t_datetime.isoformat())
t_time = datetime.fromisoformat(closest_datetime).time()
try:
self._user_service.make_appointment(t_date, t_time, params['topic'])
except Exception as e:
app.logger.error(str(e))
errors['main'] = content['error'].format(**params)
return errors
| StarcoderdataPython |
3348364 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 22 15:44:45 2021
@author: odyss
"""
import json
import pandas as pd
from source.database_classes import connect_to_mongo, Tweet, ProcessedTweet
connect_to_mongo()
def tweets_to_dataframe(queryset):
df = pd.DataFrame(list(map(lambda x: json.loads(x.to_json()), queryset)))
df.set_index(["id_str"], inplace=True, drop=False)
return df
tweets_df = tweets_to_dataframe(ProcessedTweet.objects.all())
sampled_ptdf = tweets_df.groupby(["emotion_label"]).apply(lambda x: x.head(150))
ok_ids = sampled_ptdf["id_str"].tolist()
ProcessedTweet.objects(id_str__nin=ok_ids).delete()
| StarcoderdataPython |
3320022 | """LIANNtf_algorithmLIANN.py
# Author:
<NAME> - Copyright (c) 2020-2022 Baxter AI (baxterai.<EMAIL>)
# License:
MIT License
# Installation:
see LIANNtf_main.py
# Usage:
see LIANNtf_main.py
# Description:
LIANNtf algorithm LIANN - define local inhibition artificial neural network (force neural independence)
Emulates unsupervised singular value decomposition (SVD/factor analysis) learning for all hidden layers
"""
import tensorflow as tf
import numpy as np
from ANNtf2_operations import * #generateParameterNameSeq, generateParameterName, defineNetworkParameters
import ANNtf2_operations
import ANNtf2_globalDefs
import LIANNtf_algorithmLIANN_math
import copy
#select learningAlgorithm (unsupervised learning algorithm for intermediate/hidden layers):
learningAlgorithmNone = False #create a very large network (eg x10) neurons per layer, and perform final layer backprop only
learningAlgorithmCorrelationReset = False #minimise correlation between layer neurons #create a very large network (eg x10) neurons per layer, remove/reinitialise neurons that are highly correlated (redundant/not necessary to end performance), and perform final layer backprop only #orig mode
learningAlgorithmPCA = False #note layer construction is nonlinear (use ANNtf2_algorithmAEANN/autoencoder for nonlinear dimensionality reduction simulation) #incomplete
learningAlgorithmCorrelationStocasticOptimise = False #stochastic optimise weights based on objective function; minimise the correlation between layer neurons
learningAlgorithmIndependenceReset = False #randomise neuron weights until layer output independence is detected
learningAlgorithmMaximiseAndEvenSignalStochasticOptimise = False #stochastic optimise weights based on objective functions; #1: maximise the signal (ie successfully uninhibited) across multiple batches (entire dataset), #2: ensure that all layer neurons receive even activation across multiple batches (entire dataset)
learningAlgorithmUninhibitedImpermanenceReset = False #increase the permanence of uninhibited neuron weights, and stocastically modify weights based on their impermanence
learningAlgorithmUninhibitedHebbianStrengthen = False #strengthen weights of successfully activated neurons
learningAlgorithmPerformanceInhibitStocasticOptimise = True #learn to inhibit neurons in net for a given task, maximising final layer performance #neurons remain permanently inhibited, not just during training of weights; see Nactive
learningAlgorithmUnnormalisedActivityReset = False #ensure that average layer activation lies between a minimum and maximum level #regularise layer neural activity
#intialise network properties (configurable);
positiveExcitatoryWeights = False #requires testing #required for biological plausibility of most learningAlgorithms
positiveExcitatoryWeightsActivationFunctionOffsetDisable = False
supportSkipLayers = False #fully connected skip layer network #TODO: add support for skip layers #see ANNtf2_algorithmFBANN for template
supportMultipleNetworks = True
#intialise network properties;
largeBatchSize = False
generateLargeNetwork = False #large number of layer neurons is required for learningAlgorithmUninhibitedHebbianStrengthen:useZAcoincidenceMatrix
generateNetworkStatic = False
generateDeepNetwork = False
generateVeryLargeNetwork = False
#debug parameters;
debugFastTrain = False
debugSmallBatchSize = False #small batch size for debugging matrix output
#select learningAlgorithmFinalLayer (supervised learning algorithm for final layer/testing):
learningAlgorithmFinalLayerBackpropHebbian = True #only apply backprop (effective hebbian) learning at final layer
if(learningAlgorithmFinalLayerBackpropHebbian):
positiveExcitatoryWeightsFinalLayer = False #allow negative weights on final layer to emulate standard backprop/hebbian learning
#default sparsity
estNetworkActivationSparsity = 0.5 #50% of neurons are expected to be active during standard propagation (no inhibition)
#intialise algorithm specific parameters;
inhibitionAlgorithmBinary = False #simplified inhibition algorithm implementation - binary on/off
inhibitionAlgorithmArtificial = False #simplified inhibition algorithm implementation
inhibitionAlgorithmArtificialMoreThanXLateralNeuronActive = False #inhibit layer if more than x lateral neurons active
inhibitionAlgorithmArtificialSparsity = False #inhibition signal increases with number of simultaneously active neurons
#inhibitionAlgorithmSimulation = True #default inhibition algorithm (propagate signal through simulated inhibitory neurons)
#A thresholding;
Athreshold = False
AthresholdValue = 1.0 #do not allow output signal to exceed 1.0
#LIANN hidden layer vs final layer hebbian execution staging;
supportDimensionalityReductionLimitFrequency = False
supportDimensionalityReductionFirstPhaseOnly = True #perform LIANN in first phase only (x epochs of training), then apply hebbian learning at final layer
if(supportDimensionalityReductionFirstPhaseOnly):
supportDimensionalityReductionLimitFrequency = False
supportDimensionalityReductionFirstPhaseOnlyNumEpochs = 1
else:
supportDimensionalityReductionLimitFrequency = True
if(supportDimensionalityReductionLimitFrequency):
supportDimensionalityReductionLimitFrequencyStep = 1000
#intialise algorithm specific parameters;
enableInhibitionTrainAndInhibitSpecificLayerOnly = True
applyInhibitoryNetworkDuringTest = False
randomlyActivateWeightsDuringTrain = False
#enable shared stocastic optimisation parameters;
learningAlgorithmStochastic = False
if(learningAlgorithmCorrelationStocasticOptimise):
learningAlgorithmStochastic = True
elif(learningAlgorithmMaximiseAndEvenSignalStochasticOptimise):
learningAlgorithmStochastic = True
#learning algorithm customisation;
if(learningAlgorithmNone):
#can pass different task datasets through a shared randomised net
#note learningAlgorithmCorrelationReset requires supportSkipLayers - see LIANNtf_algorithmIndependentInput/AEANNtf_algorithmIndependentInput:learningAlgorithmLIANN for similar implementation
#positiveExcitatoryWeights = True #optional
generateDeepNetwork = True #optional #used for algorithm testing
generateVeryLargeNetwork = True
generateNetworkStatic = True
elif(learningAlgorithmCorrelationReset):
#note learningAlgorithmCorrelationReset requires supportSkipLayers - see LIANNtf_algorithmIndependentInput/AEANNtf_algorithmIndependentInput:learningAlgorithmLIANN for similar implementation
#positiveExcitatoryWeights = True #optional
maxCorrelation = 0.95 #requires tuning
supportDimensionalityReductionRandomise = True #randomise weights of highly correlated neurons, else zero them (effectively eliminating neuron from network, as its weights are no longer able to be trained)
generateDeepNetwork = True #optional #used for algorithm testing
generateVeryLargeNetwork = True
generateNetworkStatic = True
elif(learningAlgorithmPCA):
#positiveExcitatoryWeights = True #optional
largeBatchSize = True #1 PCA is performed across entire dataset [per layer]
elif(learningAlgorithmIndependenceReset):
#positiveExcitatoryWeights = True #optional
inhibitionAlgorithmArtificialMoreThanXLateralNeuronActive = True #mandatory
fractionIndependentInstancesAcrossBatchRequired = 0.3 #divide by number of neurons on layer #if found x% of independent instances, then record neuron as independent (solidify weights) #FUTURE: will depend on number of neurons on current layer and previous layer #CHECKTHIS: requires calibration
largeBatchSize = True
elif(learningAlgorithmStochastic):
inhibitionAlgorithmArtificialMoreThanXLateralNeuronActive = True #optional
if(learningAlgorithmCorrelationStocasticOptimise):
learningAlgorithmStochasticAlgorithm = "correlation"
#positiveExcitatoryWeights = True #optional
#learning objective function: minimise the correlation between layer neurons
elif(learningAlgorithmMaximiseAndEvenSignalStochasticOptimise):
learningAlgorithmStochasticAlgorithm = "maximiseAndEvenSignal"
#positiveExcitatoryWeights = True #optional?
#learning objective functions:
#1: maximise the signal (ie successfully uninhibited) across multiple batches (entire dataset)
#2: ensure that all layer neurons receive even activation across multiple batches (entire dataset)
metric1Weighting = 1.0
metric2Weighting = 1000.0 #normalise metric2Weighting relative to metric1Weighting; eg metric1 = 0.9575, metric2 = 0.000863842
numberStochasticIterations = 10
updateParameterSubsetSimultaneously = False #current tests indiciate this is not required/beneficial with significantly high batch size
if(updateParameterSubsetSimultaneously):
numberOfSubsetsTrialledPerBaseParameter = 10 #decreases speed, but provides more robust parameter updates
parameterUpdateSubsetSize = 5 #decreases speed, but provides more robust parameter updates
else:
numberOfSubsetsTrialledPerBaseParameter = 1
parameterUpdateSubsetSize = 1
NETWORK_PARAM_INDEX_TYPE = 0
NETWORK_PARAM_INDEX_LAYER = 1
NETWORK_PARAM_INDEX_H_CURRENT_LAYER = 2
NETWORK_PARAM_INDEX_H_PREVIOUS_LAYER = 3
NETWORK_PARAM_INDEX_VARIATION_DIRECTION = 4
elif(learningAlgorithmUninhibitedImpermanenceReset):
inhibitionAlgorithmArtificialMoreThanXLateralNeuronActive = True #optional
#positiveExcitatoryWeights = True #optional?
enableInhibitionTrainAndInhibitSpecificLayerOnly = False #always enable inhibition #CHECKTHIS
applyInhibitoryNetworkDuringTest = True #CHECKTHIS (set False)
Wpermanence = {}
Bpermanence = {}
WpermanenceInitial = 0.1
BpermanenceInitial = 0.1
WpermanenceUpdateRate = 0.1
BpermanenceUpdateRate = 0.1
permanenceNumberBatches = 10 #if permanenceUpdateRate=1, average number of batches to reset W to random values
solidificationRate = 0.1
elif(learningAlgorithmUninhibitedHebbianStrengthen):
tuneInhibitionNeurons = False #optional
useZAcoincidenceMatrix = True #reduce connection weights for unassociated neurons
positiveExcitatoryWeights = True #mandatory (requires testing)
positiveExcitatoryWeightsThresholds = True #do not allow weights to exceed 1.0 / fall below 0.0 [CHECKTHIS]
Athreshold = True #prevents incremental increase in signal per layer
alwaysApplyInhibition = False
if(useZAcoincidenceMatrix):
alwaysApplyInhibition = True #inhibition is theoretically allowed at all times with useZAcoincidenceMatrix as it simply biases the network against a correlation between layer k neurons (inhibition is not set up to only allow X/1 neuron to fire)
if(alwaysApplyInhibition):
#TODO: note network sparsity/inhibition must be configured such that at least one neuron fires per layer
positiveExcitatoryWeightsActivationFunctionOffsetDisable = True #activation function will always be applied to Z signal comprising positive+negative components #CHECKTHIS
inhibitionAlgorithmArtificialSparsity = True
generateLargeNetwork = True #large is required because it will be sparsely activated due to constant inhibition
generateNetworkStatic = True #equal number neurons per layer for unsupervised layers/testing
enableInhibitionTrainAndInhibitSpecificLayerOnly = False #always enable inhibition
applyInhibitoryNetworkDuringTest = True
else:
inhibitionAlgorithmArtificialMoreThanXLateralNeuronActive = True #optional
enableInhibitionTrainAndInhibitSpecificLayerOnly = True
applyInhibitoryNetworkDuringTest = False
randomlyActivateWeightsDuringTrain = False #randomly activate x weights (simulating input at simulataneous time interval t)
if(randomlyActivateWeightsDuringTrain):
randomlyActivateWeightsProbability = 1.0
WinitialisationFactor = 1.0 #initialise network with relatively low weights #network will be trained (weights will be increased) up until point where activation inhibited
BinitialisationFactor = 1.0 #NOTUSED
weightDecay = False
if(useZAcoincidenceMatrix):
useZAcoincidenceMatrix = True #reduce connection weights for unassociated neurons
if(useZAcoincidenceMatrix):
#inhibitionAlgorithmArtificialMoreThanXLateralNeuronActive = False #useZAcoincidenceMatrix requires real negative weights
normaliseWeightUpdates = False
else:
normaliseWeightUpdates = True #unimplemented: strengthen/update weights up to some maxima as determined by input signal strength (prevent runaway increase in weight strength up to 1.0)
else:
weightDecay = True #constant neural net weight decay, such that network can be continuously trained
weightDecayRate = 0.0 #defined by defineTrainingParametersLIANN
useZAcoincidenceMatrix = False
normaliseWeightUpdates = False
maxWeightUpdateThreshold = False #max threshold weight updates to learningRate
#TODO: ensure learning algorithm does not result in runnaway weight increases
elif(learningAlgorithmPerformanceInhibitStocasticOptimise):
enableInhibitionTrainAndInhibitSpecificLayerOnly = False #always enable inhibition
inhibitionAlgorithmBinary = True
inhibitionAlgorithmBinaryInitialiseRandom = True
supportDimensionalityReductionRandomise = True #randomise weights of highly correlated neurons, else zero them (effectively eliminating neuron from network, as its weights are no longer able to be trained)
generateDeepNetwork = True #optional #used for algorithm testing
generateVeryLargeNetwork = True
generateNetworkStatic = True
elif(learningAlgorithmUnnormalisedActivityReset):
supportDimensionalityReductionRandomise = True #randomise weights of highly correlated neurons, else zero them (effectively eliminating neuron from network, as its weights are no longer able to be trained)
generateDeepNetwork = True #optional #used for algorithm testing
generateVeryLargeNetwork = True
generateNetworkStatic = True
supportDimensionalityReductionRandomise = True
supportDimensionalityReductionRegulariseActivityMinAvg = 0.01 #requires tuning
supportDimensionalityReductionRegulariseActivityMaxAvg = 0.99 #requires tuning
supportDimensionalityReductionRandomise = True
learningRate = 0.0 #defined by defineTrainingParametersLIANN
#network/activation parameters;
#forward excitatory connections;
W = {}
B = {}
if(supportMultipleNetworks):
WallNetworksFinalLayer = None
BallNetworksFinalLayer = None
if(learningAlgorithmStochastic):
Wbackup = {}
Bbackup = {}
useBinaryWeights = False
if(generateVeryLargeNetwork):
generateLargeNetworkRatio = 100 #100 #default: 10
else:
if(generateLargeNetwork):
generateLargeNetworkRatio = 3
else:
generateLargeNetworkRatio = 1
positiveExcitatoryWeightsActivationFunctionOffset = False
if(positiveExcitatoryWeights):
if(positiveExcitatoryWeightsActivationFunctionOffsetDisable):
positiveExcitatoryWeightsActivationFunctionOffset = False
else:
positiveExcitatoryWeightsActivationFunctionOffset = True
normaliseInput = False #TODO: verify that the normalisation operation will not disort the code's capacity to process a new data batch the same as an old data batch
normalisedAverageInput = 1.0 #normalise input signal #arbitrary
if(positiveExcitatoryWeightsActivationFunctionOffset):
positiveExcitatoryThreshold = 0.5 #1.0 #weights are centred around positiveExcitatoryThreshold, from 0.0 to positiveExcitatoryThreshold*2 #arbitrary
Wmean = 0.5 #arbitrary
WstdDev = 0.05 #stddev of weight initialisations #CHECKTHIS
else:
normaliseInput = False
Wmean = 0.0
WstdDev = 0.05 #stddev of weight initialisations
randomUniformMin = 0.0
randomUniformMax = 1.0
randomUniformMid = 0.5
if(inhibitionAlgorithmArtificialMoreThanXLateralNeuronActive or inhibitionAlgorithmArtificialSparsity):
inhibitionAlgorithmArtificial = True
if(inhibitionAlgorithmArtificial):
if(inhibitionAlgorithmArtificialMoreThanXLateralNeuronActive):
inhibitionAlgorithmMoreThanXLateralNeuronActiveFraction = True
if(inhibitionAlgorithmMoreThanXLateralNeuronActiveFraction):
inhibitionAlgorithmMoreThanXLateralNeuronActiveFractionValue = 0.25 #fraction of the layer active allowed before inhibition
else:
inhibitionAlgorithmMoreThanXLateralNeuronActiveValue = 1 #maximum (X) number of neurons activate allowed before inhibition
else:
inhibitionFactor1 = 1.0 #pass through signal #positiveExcitatoryThreshold #CHECKTHIS: requires recalibration for activationFunction:positiveExcitatoryWeights
inhibitionFactor2 = estNetworkActivationSparsity #-(WstdDev) #inhibition contributes a significant (nullifying) effect on layer activation #CHECKTHIS: requires calibration
if(randomlyActivateWeightsDuringTrain):
inhibitionFactor1 = inhibitionFactor1
inhibitionFactor2 = (inhibitionFactor2*randomlyActivateWeightsProbability) #the lower the average activation, the lower the inhibition
#TODO: inhibitionFactor1/inhibitionFactor2 requires recalibration for activationFunction:positiveExcitatoryWeights
singleInhibitoryNeuronPerLayer = False #simplified inhibitory layer
#lateral inhibitory connections (incoming/outgoing);
IWi = {}
IBi = {}
IWo = {}
IWiWeights = inhibitionFactor1 #need at least 1/IWiWeights active neurons per layer for the inhibitory neuron to become activated #CHECKTHIS: requires calibration #WstdDev*2 #0.5 #0.3
IWoWeights = inhibitionFactor2 #will depend on activation sparsity of network (higher the positive activation, higher the inhibition required) #requires calibration such that more than x (e.g. 1) active neuron on a layer will inhibit the layer
In_h = []
if(inhibitionAlgorithmBinary):
Nactive = {} #effective bool [1.0 or 0.0]; whether neuron is active/inhibited
if(learningAlgorithmIndependenceReset):
Bindependent = {} #independent neurons previously identified #effective boolean (0.0 or 1.0) #FUTURE: consider making this a continuous variable, such that the higher the independence the less the variable is randomly shuffled per training iteration
#Network parameters
n_h = []
numberOfLayers = 0
numberOfNetworks = 0
datasetNumClasses = 0
#note high batchSize is required for learningAlgorithmStochastic algorithm objective functions (>= 100)
def defineTrainingParameters(dataset):
global learningRate
global weightDecayRate
if(learningAlgorithmStochastic):
learningRate = 0.001
elif(learningAlgorithmUninhibitedHebbianStrengthen):
learningRate = 0.001
weightDecayRate = learningRate/10.0 #CHECKTHIS #will depend on learningRate
else:
learningRate = 0.005
if(debugSmallBatchSize):
batchSize = 10
else:
if(largeBatchSize):
batchSize = 1000 #current implementation: batch size should contain all examples in training set
else:
batchSize = 100 #3 #100
if(generateDeepNetwork):
numEpochs = 100 #higher num epochs required for convergence
else:
numEpochs = 10 #100 #10
if(debugFastTrain):
trainingSteps = batchSize
else:
trainingSteps = 10000 #1000
displayStep = 100
return learningRate, trainingSteps, batchSize, displayStep, numEpochs
def defineNetworkParameters(num_input_neurons, num_output_neurons, datasetNumFeatures, dataset, numberOfNetworksSet):
global n_h
global numberOfLayers
global numberOfNetworks
global datasetNumClasses
if(not inhibitionAlgorithmArtificial):
global In_h
firstHiddenLayerNumberNeurons = num_input_neurons*generateLargeNetworkRatio
if(generateDeepNetwork):
numberOfLayers = 3
else:
numberOfLayers = 2
n_h, numberOfLayers, numberOfNetworks, datasetNumClasses = defineNetworkParametersDynamic(num_input_neurons, num_output_neurons, datasetNumFeatures, dataset, numberOfNetworksSet, numberOfLayers, firstHiddenLayerNumberNeurons, generateNetworkStatic)
#n_h, numberOfLayers, numberOfNetworks, datasetNumClasses = ANNtf2_operations.defineNetworkParameters(num_input_neurons, num_output_neurons, datasetNumFeatures, dataset, numberOfNetworksSet, generateLargeNetwork=generateLargeNetwork, generateNetworkStatic=generateNetworkStatic)
if(not inhibitionAlgorithmArtificial):
if(singleInhibitoryNeuronPerLayer):
In_h = [1] * len(n_h) #create one inhibitory neuron per layer
else:
In_h = copy.copy(n_h) #create one inhibitory neuron for every excitatory neuron
return numberOfLayers
def defineNeuralNetworkParameters():
print("numberOfNetworks", numberOfNetworks)
global randomNormal
global randomUniformIndex
randomNormal = tf.initializers.RandomNormal(mean=Wmean, stddev=WstdDev)
#randomNormal = tf.initializers.RandomNormal()
randomNormalFinalLayer = tf.initializers.RandomNormal()
randomUniformIndex = tf.initializers.RandomUniform(minval=randomUniformMin, maxval=randomUniformMax) #not available: minval=0, maxval=numberOfSharedComputationalUnitsNeurons, dtype=tf.dtypes.int32;
for networkIndex in range(1, numberOfNetworks+1):
for l1 in range(1, numberOfLayers+1):
#forward excitatory connections;
EWlayer = randomNormal([n_h[l1-1], n_h[l1]])
EBlayer = tf.zeros(n_h[l1])
if(positiveExcitatoryWeights):
EWlayer = tf.abs(EWlayer) #ensure randomNormal generated weights are positive
if((l1 == numberOfLayers) and not positiveExcitatoryWeightsFinalLayer):
EWlayer = randomNormalFinalLayer([n_h[l1-1], n_h[l1]])
if(learningAlgorithmUninhibitedHebbianStrengthen):
EWlayer = tf.multiply(EWlayer, WinitialisationFactor)
EBlayer = tf.multiply(EBlayer, BinitialisationFactor)
W[generateParameterNameNetwork(networkIndex, l1, "W")] = tf.Variable(EWlayer)
B[generateParameterNameNetwork(networkIndex, l1, "B")] = tf.Variable(EBlayer)
if(learningAlgorithmIndependenceReset):
Bindependent[generateParameterNameNetwork(networkIndex, l1, "Bindependent")] = tf.Variable(EBlayer) #initialise all neurons to zero (false)
elif(learningAlgorithmStochastic):
Wbackup[generateParameterNameNetwork(networkIndex, l1, "W")] = tf.Variable(W[generateParameterNameNetwork(networkIndex, l1, "W")])
Bbackup[generateParameterNameNetwork(networkIndex, l1, "B")] = tf.Variable(B[generateParameterNameNetwork(networkIndex, l1, "B")])
elif(learningAlgorithmUninhibitedImpermanenceReset):
EWlayerPermanence = tf.multiply(tf.ones([n_h[l1-1], n_h[l1]]), WpermanenceInitial)
EBlayerPermanence = tf.multiply(tf.ones(n_h[l1]), BpermanenceInitial)
Wpermanence[generateParameterNameNetwork(networkIndex, l1, "Wpermanence")] = tf.Variable(EWlayerPermanence)
Bpermanence[generateParameterNameNetwork(networkIndex, l1, "Bpermanence")] = tf.Variable(EBlayerPermanence)
if(not inhibitionAlgorithmArtificial):
#lateral inhibitory connections (incoming/outgoing);
#do not currently train inhibitory weights;
IWilayer = tf.multiply(tf.ones([n_h[l1], In_h[l1]]), IWiWeights) #CHECKTHIS: inhibitory neuron firing is a function of current (lateral) layer (not previous layer)
IBilayer = tf.zeros(In_h[l1])
if(singleInhibitoryNeuronPerLayer):
IWoWeightsL = IWoWeights
else:
IWoWeightsL = IWoWeights/In_h[l1] #normalise across number inhibitory neurons
IWolayer = tf.multiply(tf.ones([In_h[l1], n_h[l1]]), IWoWeightsL)
IWi[generateParameterNameNetwork(networkIndex, l1, "IWi")] = tf.Variable(IWilayer)
IBi[generateParameterNameNetwork(networkIndex, l1, "IBi")] = tf.Variable(IBilayer)
IWo[generateParameterNameNetwork(networkIndex, l1, "IWo")] = tf.Variable(IWolayer)
if(inhibitionAlgorithmBinary):
if(inhibitionAlgorithmBinaryInitialiseRandom):
Nactivelayer = randomUniformIndex([n_h[l1]]) #tf.cast(), dtype=tf.dtypes.bool)
Nactivelayer = tf.greater(Nactivelayer, randomUniformMid)
Nactivelayer = tf.cast(Nactivelayer, dtype=tf.dtypes.float32)
else:
Nactivelayer = tf.ones(n_h[l1])
Nactive[generateParameterNameNetwork(networkIndex, l1, "Nactive")] = tf.Variable(Nactivelayer)
if(supportMultipleNetworks):
if(numberOfNetworks > 1):
global WallNetworksFinalLayer
global BallNetworksFinalLayer
WlayerF = randomNormal([n_h[numberOfLayers-1]*numberOfNetworks, n_h[numberOfLayers]])
WallNetworksFinalLayer = tf.Variable(WlayerF)
Blayer = tf.zeros(n_h[numberOfLayers])
BallNetworksFinalLayer = tf.Variable(Blayer) #not currently used
def neuralNetworkPropagation(x, networkIndex=1):
return neuralNetworkPropagationLIANNtest(x, networkIndex)
def neuralNetworkPropagationLIANNtest(x, networkIndex=1, l=None):
return neuralNetworkPropagationLIANNminimal(x, networkIndex, l)
def neuralNetworkPropagationLayer(x, networkIndex=1, l=None):
return neuralNetworkPropagationLIANNminimal(x, networkIndex, l)
#return neuralNetworkPropagationLIANN(x, None, networkIndex, trainWeights=False)
def neuralNetworkPropagationLIANNtrainIntro(x, y=None, networkIndex=1):
if(enableInhibitionTrainAndInhibitSpecificLayerOnly):
for l in range(1, numberOfLayers+1):
if(l < numberOfLayers):
return neuralNetworkPropagationLIANNtrain(x, y, networkIndex, layerToTrain=l)
else:
return neuralNetworkPropagationLIANNtrain(x, y, networkIndex, layerToTrain=None)
#if(supportMultipleNetworks):
def neuralNetworkPropagationAllNetworksFinalLayer(AprevLayer):
Z = tf.add(tf.matmul(AprevLayer, WallNetworksFinalLayer), BallNetworksFinalLayer)
#Z = tf.matmul(AprevLayer, WallNetworksFinalLayer)
pred = tf.nn.softmax(Z)
return pred
#minimal code extracted from neuralNetworkPropagationLIANN;
def neuralNetworkPropagationLIANNminimal(x, networkIndex=1, l=None):
randomlyActivateWeights = False
if(l == None):
maxLayer = numberOfLayers
else:
maxLayer = l
AprevLayer = x
ZprevLayer = x
for l in range(1, maxLayer+1):
enableInhibition = False
if(not enableInhibitionTrainAndInhibitSpecificLayerOnly):
enableInhibition = True
A, Z, _ = forwardIteration(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights)
if(learningAlgorithmFinalLayerBackpropHebbian):
A = tf.stop_gradient(A)
AprevLayer = A
ZprevLayer = Z
if(maxLayer == numberOfLayers):
return tf.nn.softmax(Z)
else:
return A
def neuralNetworkPropagationLIANNtrain(x, y=None, networkIndex=1, layerToTrain=None):
if(normaliseInput):
#TODO: verify that the normalisation operation will not disort the code's capacity to process a new data batch the same as an old data batch
averageTotalInput = tf.math.reduce_mean(x)
#print("averageTotalInput = ", averageTotalInput)
x = tf.multiply(x, normalisedAverageInput/averageTotalInput) #normalise input wrt positiveExcitatoryThreshold
#averageTotalInput = tf.math.reduce_mean(x)
if(layerToTrain is None):
maxLayer = numberOfLayers
else: #ie !enableInhibitionTrainAndInhibitSpecificLayerOnly
maxLayer = layerToTrain
AprevLayer = x
ZprevLayer = x
for l in range(1, maxLayer+1):
trainLayer = False
enableInhibition = False
randomlyActivateWeights = False
if(enableInhibitionTrainAndInhibitSpecificLayerOnly):
if(l == layerToTrain):
#enableInhibition = False
enableInhibition = True
trainLayer = True
else:
if(l < numberOfLayers):
enableInhibition = True
trainLayer = True
if(randomlyActivateWeightsDuringTrain):
randomlyActivateWeights = True
if(trainLayer):
#CHECKTHIS: verify learning algorithm (how to modify weights to maximise independence between neurons on each layer)
if(learningAlgorithmNone):
neuralNetworkPropagationLIANNlearningAlgorithmNone(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights)
elif(learningAlgorithmCorrelationReset):
neuralNetworkPropagationLIANNlearningAlgorithmCorrelationReset(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights)
elif(learningAlgorithmPCA):
neuralNetworkPropagationLIANNlearningAlgorithmPCA(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights)
elif(learningAlgorithmIndependenceReset):
neuralNetworkPropagationLIANNlearningAlgorithmIndependenceReset(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights)
elif(learningAlgorithmStochastic):
neuralNetworkPropagationLIANNlearningAlgorithmStochastic(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights)
elif(learningAlgorithmUninhibitedImpermanenceReset):
neuralNetworkPropagationLIANNlearningAlgorithmUninhibitedImpermanenceReset(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights)
elif(learningAlgorithmUninhibitedHebbianStrengthen):
neuralNetworkPropagationLIANNlearningAlgorithmUninhibitedHebbianStrengthen(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights)
elif(learningAlgorithmPerformanceInhibitStocasticOptimise):
neuralNetworkPropagationLIANNlearningAlgorithmPerformanceInhibitStocasticOptimise(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights, x, y)
elif(learningAlgorithmUnnormalisedActivityReset):
neuralNetworkPropagationLIANNlearningAlgorithmUnnormalisedActivityReset(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights)
A, Z, _ = forwardIteration(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition=(not enableInhibitionTrainAndInhibitSpecificLayerOnly), randomlyActivateWeights=False)
else:
A, Z, _ = forwardIteration(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights=False)
AprevLayer = A
ZprevLayer = Z
return tf.nn.softmax(Z)
def calculatePropagationLoss(x, y, networkIndex=1):
costCrossEntropyWithLogits = False
pred = neuralNetworkPropagation(x, networkIndex)
target = y
lossCurrent = calculateLossCrossEntropy(pred, target, datasetNumClasses, costCrossEntropyWithLogits)
#acc = calculateAccuracy(pred, target) #only valid for softmax class targets
return lossCurrent
def neuralNetworkPropagationLIANNlearningAlgorithmNone(networkIndex, AprevLayer, ZprevLayer, l1, enableInhibition, randomlyActivateWeights):
pass
def neuralNetworkPropagationLIANNlearningAlgorithmCorrelationReset(networkIndex, AprevLayer, ZprevLayer, l1, enableInhibition, randomlyActivateWeights):
A, Z, _ = forwardIteration(networkIndex, AprevLayer, ZprevLayer, l1)
#measure and minimise correlation between layer neurons;
neuronActivationCorrelationMinimisation(networkIndex, n_h, l1, A, randomNormal, Wf=W, Wfname="W", Wb=None, Wbname=None, updateAutoencoderBackwardsWeights=False, supportSkipLayers=supportSkipLayers, supportDimensionalityReductionRandomise=supportDimensionalityReductionRandomise, maxCorrelation=maxCorrelation)
def neuralNetworkPropagationLIANNlearningAlgorithmPCA(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights):
#Afinal, Zfinal, _ = forwardIteration(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights) #batched
SVDinputMatrix = LIANNtf_algorithmLIANN_math.generateSVDinputMatrix(l, n_h, AprevLayer)
U, Sigma, VT = LIANNtf_algorithmLIANN_math.calculateSVD(M=SVDinputMatrix, k=n_h[l])
AW = LIANNtf_algorithmLIANN_math.calculateWeights(l, n_h, SVDinputMatrix, U, Sigma, VT)
W[generateParameterNameNetwork(networkIndex, l, "W")] = AW
#weights = U -> Sigma -> VT [linear]
#M_reduced = reduce_to_k_dim(M=spikeCoincidenceMatrix, k=n_h[l])
def neuralNetworkPropagationLIANNlearningAlgorithmIndependenceReset(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights):
layerHasDependentNeurons = True
Bind = Bindependent[generateParameterNameNetwork(networkIndex, l, "Bindependent")]
if(count_zero(Bind) > 0): #more than 1 dependent neuron on layer
layerHasDependentNeurons = True
else:
layerHasDependentNeurons = False
while(layerHasDependentNeurons):
Afinal, Zfinal, _ = forwardIteration(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights) #batched
AnumActive = tf.math.count_nonzero(Afinal, axis=1) #batched
Aindependent = tf.equal(AnumActive, 1) #batched
Aindependent = tf.dtypes.cast(Aindependent, dtype=tf.dtypes.float32) #batched
Aindependent = tf.expand_dims(Aindependent, 1) #batched
#print("Afinal = ", Afinal)
#print("AnumActive = ", AnumActive)
#print("Aindependent = ", Aindependent)
Aactive = tf.greater(Afinal, 0) #2D: batched, for every k neuron
Aactive = tf.dtypes.cast(Aactive, dtype=tf.dtypes.float32) #2D: batched, for every k neuron
#print("Aactive = ", Aactive)
#ex
AactiveAndIndependent = tf.multiply(Aactive, Aindependent) #2D: batched, for every k neuron
AactiveAndIndependent = tf.reduce_sum(AactiveAndIndependent, axis=0) #for every k neuron
AactiveAndIndependentPass = tf.greater(AactiveAndIndependent, fractionIndependentInstancesAcrossBatchRequired*n_h[l]) #for every k neuron
#print("AactiveAndIndependentPass = ", AactiveAndIndependentPass)
BindBool = tf.dtypes.cast(Bind, dtype=tf.dtypes.bool)
AactiveAndIndependentPassRequiresSolidifying = tf.logical_and(AactiveAndIndependentPass, tf.logical_not(BindBool))
#print("AactiveAndIndependentPass = ", AactiveAndIndependentPass)
#print("BindBool = ", BindBool)
print("AactiveAndIndependentPassRequiresSolidifying = ", AactiveAndIndependentPassRequiresSolidifying)
BindNew = tf.logical_or(BindBool, AactiveAndIndependentPassRequiresSolidifying)
BdepNew = tf.logical_not(BindNew)
#update layer weights (reinitialise weights for all dependent neurons);
BindNew = tf.dtypes.cast(BindNew, dtype=tf.dtypes.float32)
BdepNew = tf.dtypes.cast(BdepNew, dtype=tf.dtypes.float32)
EWlayerDep = randomNormal([n_h[l-1], n_h[l]])
if(positiveExcitatoryWeights):
EWlayerDep = tf.abs(EWlayerDep) #ensure randomNormal generated weights are positive
EBlayerDep = tf.zeros(n_h[l])
EWlayerDep = tf.multiply(EWlayerDep, BdepNew) #requires broadcasting
EBlayerDep = tf.multiply(EBlayerDep, BdepNew)
EWlayerInd = W[generateParameterNameNetwork(networkIndex, l, "W")]
EBlayerInd = B[generateParameterNameNetwork(networkIndex, l, "B")]
EWlayerInd = tf.multiply(EWlayerInd, BindNew) #requires broadcasting
EBlayerInd = tf.multiply(EBlayerInd, BindNew)
EWlayerNew = tf.add(EWlayerDep, EWlayerInd)
EBlayerNew = tf.add(EBlayerDep, EBlayerInd)
W[generateParameterNameNetwork(networkIndex, l, "W")] = EWlayerNew
B[generateParameterNameNetwork(networkIndex, l, "B")] = EBlayerNew
#print("EWlayerNew = ", EWlayerNew)
#print("BdepNew = ", BdepNew)
#print("BindNew = ", BindNew)
Bindependent[generateParameterNameNetwork(networkIndex, l, "Bindependent")] = BindNew #update independence record
Bind = BindNew
if(count_zero(Bind) > 0): #more than 1 dependent neuron on layer
layerHasDependentNeurons = True
#print("layerHasDependentNeurons: count_zero(Bind) = ", count_zero(Bind))
else:
layerHasDependentNeurons = False
#print("!layerHasDependentNeurons")
def neuralNetworkPropagationLIANNlearningAlgorithmStochastic(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights):
if(learningAlgorithmStochastic):
if(useBinaryWeights):
variationDirections = 1
else:
variationDirections = 2
#code from ANNtf2_algorithmLREANN_expSUANN;
for s in range(numberStochasticIterations):
for hIndexCurrentLayer in range(0, n_h[l]):
for hIndexPreviousLayer in range(0, n_h[l-1]+1):
if(hIndexPreviousLayer == n_h[l-1]): #ensure that B parameter updates occur/tested less frequently than W parameter updates
parameterTypeWorB = 0
else:
parameterTypeWorB = 1
for variationDirectionInt in range(variationDirections):
networkParameterIndexBase = (parameterTypeWorB, l, hIndexCurrentLayer, hIndexPreviousLayer, variationDirectionInt)
metricBase = learningAlgorithmStochasticCalculateMetric(networkIndex, AprevLayer, ZprevLayer, l)
for subsetTrialIndex in range(0, numberOfSubsetsTrialledPerBaseParameter):
accuracyImprovementDetected = False
currentSubsetOfParameters = []
currentSubsetOfParameters.append(networkParameterIndexBase)
for s in range(1, parameterUpdateSubsetSize):
networkParameterIndex = getRandomNetworkParameter(networkIndex, currentSubsetOfParameters)
currentSubsetOfParameters.append(networkParameterIndex)
for s in range(0, parameterUpdateSubsetSize):
networkParameterIndex = currentSubsetOfParameters[s]
if(not useBinaryWeights):
if(networkParameterIndex[NETWORK_PARAM_INDEX_VARIATION_DIRECTION] == 1):
variationDiff = learningRate
else:
variationDiff = -learningRate
if(networkParameterIndex[NETWORK_PARAM_INDEX_TYPE] == 1):
#Wnp = W[generateParameterNameNetwork(networkIndex, networkParameterIndex[NETWORK_PARAM_INDEX_LAYER], "W")].numpy()
#currentVal = Wnp[networkParameterIndex[NETWORK_PARAM_INDEX_H_PREVIOUS_LAYER], networkParameterIndex[NETWORK_PARAM_INDEX_H_CURRENT_LAYER]]
currentVal = W[generateParameterNameNetwork(networkIndex, networkParameterIndex[NETWORK_PARAM_INDEX_LAYER], "W")][networkParameterIndex[NETWORK_PARAM_INDEX_H_PREVIOUS_LAYER], networkParameterIndex[NETWORK_PARAM_INDEX_H_CURRENT_LAYER]].numpy()
#print("currentVal = ", currentVal)
#print("W1 = ", W[generateParameterNameNetwork(networkIndex, networkParameterIndex[NETWORK_PARAM_INDEX_LAYER], "W")])
if(useBinaryWeights):
if(useBinaryWeightsReduceMemoryWithBool):
newVal = not currentVal
else:
newVal = float(not bool(currentVal))
#print("newVal = ", newVal)
else:
newVal = currentVal + variationDiff
if(positiveExcitatoryWeights):
newVal = max(newVal, 0) #do not allow weights fall below zero [CHECKTHIS]
W[generateParameterNameNetwork(networkIndex, networkParameterIndex[NETWORK_PARAM_INDEX_LAYER], "W")][networkParameterIndex[NETWORK_PARAM_INDEX_H_PREVIOUS_LAYER], networkParameterIndex[NETWORK_PARAM_INDEX_H_CURRENT_LAYER]].assign(newVal)
#print("W2 = ", W[generateParameterNameNetwork(networkIndex, networkParameterIndex[NETWORK_PARAM_INDEX_LAYER], "W")])
else:
#Bnp = B[generateParameterNameNetwork(networkIndex, networkParameterIndex[NETWORK_PARAM_INDEX_LAYER], "B")].numpy()
#currentVal = Bnp[networkParameterIndex[NETWORK_PARAM_INDEX_H_CURRENT_LAYER]]
currentVal = B[generateParameterNameNetwork(networkIndex, networkParameterIndex[NETWORK_PARAM_INDEX_LAYER], "B")][networkParameterIndex[NETWORK_PARAM_INDEX_H_CURRENT_LAYER]].numpy()
if(useBinaryWeights):
if(useBinaryWeightsReduceMemoryWithBool):
newVal = not currentVal
else:
newVal = float(not bool(currentVal))
else:
newVal = currentVal + variationDiff
if(positiveExcitatoryWeights):
newVal = max(newVal, 0) #do not allow weights fall below zero [CHECKTHIS]
B[generateParameterNameNetwork(networkIndex, networkParameterIndex[NETWORK_PARAM_INDEX_LAYER], "B")][networkParameterIndex[NETWORK_PARAM_INDEX_H_CURRENT_LAYER]].assign(newVal)
metricAfterStochasticUpdate = learningAlgorithmStochasticCalculateMetric(networkIndex, AprevLayer, ZprevLayer, l)
#print("metricBase = ", metricBase)
#print("metricAfterStochasticUpdate = ", metricAfterStochasticUpdate)
if(metricAfterStochasticUpdate > metricBase):
#print("(metricAfterStochasticUpdate > metricBase)")
accuracyImprovementDetected = True
metricBase = metricAfterStochasticUpdate
#else:
#print("(metricAfterStochasticUpdate < metricBase)")
if(accuracyImprovementDetected):
#retain weight update
Wbackup[generateParameterNameNetwork(networkIndex, l, "W")].assign(W[generateParameterNameNetwork(networkIndex, l, "W")])
Bbackup[generateParameterNameNetwork(networkIndex, l, "B")].assign(B[generateParameterNameNetwork(networkIndex, l, "B")])
else:
#restore weights
W[generateParameterNameNetwork(networkIndex, l, "W")].assign(Wbackup[generateParameterNameNetwork(networkIndex, l, "W")])
B[generateParameterNameNetwork(networkIndex, l, "B")].assign(Bbackup[generateParameterNameNetwork(networkIndex, l, "B")])
def neuralNetworkPropagationLIANNlearningAlgorithmUninhibitedImpermanenceReset(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights):
Afinal, Zfinal, _ = forwardIteration(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights)
#update W/B permanence;
Afinal2D = tf.reduce_mean(Afinal, axis=0) #average across batch
Afinal2D = tf.expand_dims(Afinal2D, axis=0) #make compatible shape to W
WpermanenceUpdate = tf.multiply(Afinal2D, WpermanenceUpdateRate) #verify that broadcasting works
WpermanenceNew = tf.add(Wpermanence[generateParameterNameNetwork(networkIndex, l, "Wpermanence")], WpermanenceUpdate) #increase the permanence of neuron weights that successfully fired
Wpermanence[generateParameterNameNetwork(networkIndex, l, "Wpermanence")] = WpermanenceNew
print("WpermanenceUpdate = ", WpermanenceUpdate)
#stochastically modify weights based on permanence values:
Wupdate = randomNormal([n_h[l-1], n_h[l]])
Wupdate = tf.divide(Wupdate, Wpermanence[generateParameterNameNetwork(networkIndex, l, "Wpermanence")])
Wupdate = tf.divide(Wupdate, permanenceNumberBatches)
Wnew = tf.add(W[generateParameterNameNetwork(networkIndex, l, "W")], Wupdate)
if(positiveExcitatoryWeights):
Wnew = tf.maximum(Wnew, 0) #do not allow weights fall below zero [CHECKTHIS]
W[generateParameterNameNetwork(networkIndex, l, "W")] = Wnew
#print("Wupdate = ", Wupdate)
def neuralNetworkPropagationLIANNlearningAlgorithmUninhibitedHebbianStrengthen(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights):
AW = W[generateParameterNameNetwork(networkIndex, l, "W")]
Afinal, Zfinal, EWactive = forwardIteration(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights)
#print("Zfinal = ", Zfinal)
if(useZAcoincidenceMatrix):
AWcontribution = tf.matmul(tf.transpose(ZprevLayer), Afinal) #increase excitatory weights that contributed to the output signal #hebbian
else:
AWcontribution = tf.matmul(tf.transpose(AprevLayer), Afinal) #increase excitatory weights that contributed to the output signal #hebbian
if(randomlyActivateWeights):
#do not apply weight updates to temporarily suppressed weights [CHECKTHIS];
AWcontribution = tf.multiply(AWcontribution, EWactive)
if(normaliseWeightUpdates):
print("LIANNtf_algorithmLIANN:neuralNetworkPropagationLIANN error - normaliseWeightUpdates: normaliseWeightUpdatesReduceConnectionWeightsForUnassociatedNeurons unimplemented")
else:
if(maxWeightUpdateThreshold):
AWcontribution = tf.minimum(AWcontribution, 1.0)
AWupdate = tf.multiply(AWcontribution, learningRate)
#print("AWupdate = ", AWupdate)
AW = tf.add(AW, AWupdate) #apply weight updates
if(weightDecay):
#apply decay to all weights;
AWdecay = -weightDecayRate
#print("AWdecay = ", AWdecay)
AW = tf.add(AW, AWdecay)
#print("AWdecay = ", AWdecay)
if(positiveExcitatoryWeightsThresholds):
AW = tf.minimum(AW, 1.0) #do not allow weights to exceed 1.0 [CHECKTHIS]
AW = tf.maximum(AW, 0) #do not allow weights fall below zero [CHECKTHIS]
W[generateParameterNameNetwork(networkIndex, l, "W")] = AW
def neuralNetworkPropagationLIANNlearningAlgorithmPerformanceInhibitStocasticOptimise(networkIndex, AprevLayer, ZprevLayer, l1, enableInhibition, randomlyActivateWeights, x=None, y=None):
A, Z, _ = forwardIteration(networkIndex, AprevLayer, ZprevLayer, l1)
#randomly select a neuron k on layer to trial inhibition performance;
lossCurrent = calculatePropagationLoss(x, y, networkIndex) #moved 15 Mar 2022
Nactivelayer = Nactive[generateParameterNameNetwork(networkIndex, l1, "Nactive")]
NactivelayerBackup = Nactivelayer #tf.Variable(Nactivelayer)
layerInhibitionIndex = tf.cast(randomUniformIndex([1])*n_h[l1], tf.int32)[0].numpy()
#print("layerInhibitionIndex = ", layerInhibitionIndex)
if(inhibitionAlgorithmBinary):
inhibitionValue = randomUniformIndex([1])
inhibitionValue = tf.greater(inhibitionValue, randomUniformMid)
inhibitionValue = tf.cast(inhibitionValue, dtype=tf.dtypes.float32)
inhibitionValue = inhibitionValue[0].numpy()
#print("inhibitionValue = ", inhibitionValue)
else:
inhibitionValue = 0.0
Nactivelayer = tf.Variable(ANNtf2_operations.modifyTensorRowColumn(Nactivelayer, True, layerInhibitionIndex, inhibitionValue, False)) #tf.Variable added to retain formatting
#print("NactivelayerBackup = ", NactivelayerBackup)
#print("Nactivelayer = ", Nactivelayer)
Nactive[generateParameterNameNetwork(networkIndex, l1, "Nactive")] = Nactivelayer
loss = calculatePropagationLoss(x, y, networkIndex)
#acc = calculateAccuracy(pred, target) #only valid for softmax class targets
if(loss < lossCurrent):
lossCurrent = loss
#print("loss < lossCurrent; loss = ", loss, ", lossCurrent = ", lossCurrent)
else:
Nactive[generateParameterNameNetwork(networkIndex, l1, "Nactive")] = NactivelayerBackup
#print("loss !< lossCurrent; loss = ", loss, ", lossCurrent = ", lossCurrent)
def neuralNetworkPropagationLIANNlearningAlgorithmUnnormalisedActivityReset(networkIndex, AprevLayer, ZprevLayer, l1, enableInhibition, randomlyActivateWeights):
A, Z, _ = forwardIteration(networkIndex, AprevLayer, ZprevLayer, l1)
neuronActivationRegularisation(networkIndex, n_h, l1, A, randomNormal, Wf=W, Wfname="W", Wb=None, Wbname=None, updateAutoencoderBackwardsWeights=False, supportSkipLayers=supportSkipLayers, supportDimensionalityReductionRandomise=supportDimensionalityReductionRandomise, supportDimensionalityReductionRegulariseActivityMinAvg=supportDimensionalityReductionRegulariseActivityMinAvg, supportDimensionalityReductionRegulariseActivityMaxAvg=supportDimensionalityReductionRegulariseActivityMaxAvg)
def forwardIteration(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition=False, randomlyActivateWeights=False):
#forward excitatory connections;
EWactive = None
EW = W[generateParameterNameNetwork(networkIndex, l, "W")]
if(randomlyActivateWeights):
#print("EW = ", EW)
EWactive = tf.less(tf.random.uniform(shape=EW.shape), randomlyActivateWeightsProbability) #initialised from 0.0 to 1.0
EWactive = tf.dtypes.cast(EWactive, dtype=tf.dtypes.float32)
#print("EWactive = ", EWactive)
#EWactive = tf.dtypes.cast(tf.random.uniform(shape=EW.shape, minval=0, maxval=2, dtype=tf.dtypes.int32), dtype=tf.dtypes.float32)
EW = tf.multiply(EW, EWactive)
Z = tf.add(tf.matmul(AprevLayer, EW), B[generateParameterNameNetwork(networkIndex, l, "B")])
A = activationFunction(Z, n_h[l-1])
#lateral inhibitory connections (incoming/outgoing);
if(enableInhibition):
Afinal, Zfinal = forwardIterationInhibition(networkIndex, AprevLayer, ZprevLayer, l, A, Z)
else:
Zfinal = Z
Afinal = A
return Afinal, Zfinal, EWactive
def forwardIterationInhibition(networkIndex, AprevLayer, ZprevLayer, l, A, Z):
if(inhibitionAlgorithmBinary):
Afinal = tf.multiply(A, Nactive[generateParameterNameNetwork(networkIndex, l, "Nactive")])
Zfinal = tf.multiply(Z, Nactive[generateParameterNameNetwork(networkIndex, l, "Nactive")])
else:
if(inhibitionAlgorithmArtificial):
if(inhibitionAlgorithmArtificialSparsity):
prevLayerSize = n_h[l-1]
inhibitionResult = tf.math.reduce_mean(AprevLayer, axis=1) #or ZprevLayer? #batched
#print("inhibitionResult = ", inhibitionResult)
inhibitionResult = tf.multiply(inhibitionResult, prevLayerSize) #normalise by prev layer size #batched
inhibitionResult = tf.multiply(inhibitionResult, Wmean) #normalise by average weight
inhibitionResult = tf.expand_dims(inhibitionResult, axis=1) #batched
Zfinal = tf.subtract(Z, inhibitionResult) #requires broadcasting
#print("Z = ", Z)
#print("Zfinal = ", Zfinal)
Afinal = activationFunction(Zfinal, prevLayerSize=prevLayerSize)
elif(inhibitionAlgorithmArtificialMoreThanXLateralNeuronActive):
layerSize = n_h[l]
numActiveLateralNeurons = tf.math.count_nonzero(A, axis=1)
if(inhibitionAlgorithmMoreThanXLateralNeuronActiveFraction):
numberActiveNeuronsAllowed = inhibitionAlgorithmMoreThanXLateralNeuronActiveFractionValue*layerSize
else:
numberActiveNeuronsAllowed = inhibitionAlgorithmMoreThanXLateralNeuronActiveValue
numberActiveNeuronsAllowed = int(numberActiveNeuronsAllowed)
#print("numActiveLateralNeurons = ", numActiveLateralNeurons)
#print("numberActiveNeuronsAllowed = ", numberActiveNeuronsAllowed)
inhibitionResult = tf.greater(numActiveLateralNeurons, numberActiveNeuronsAllowed)
inhibitionResult = tf.logical_not(inhibitionResult)
inhibitionResult = tf.dtypes.cast(inhibitionResult, dtype=tf.dtypes.float32)
inhibitionResult = tf.expand_dims(inhibitionResult, axis=1)
#print("numActiveLateralNeurons = ", numActiveLateralNeurons)
#print("inhibitionResult = ", inhibitionResult)
Zfinal = tf.multiply(Z, inhibitionResult) #requires broadcasting
Afinal = tf.multiply(A, inhibitionResult) #requires broadcasting
else:
#if((l < numberOfLayers) or positiveExcitatoryWeightsFinalLayer):
#print("AprevLayer = ", AprevLayer)
#print("Z = ", Z)
IZi = tf.matmul(A, IWi[generateParameterNameNetwork(networkIndex, l, "IWi")]) #CHECKTHIS: inhibitory neuron firing is a function of current (lateral) layer (not previous layer)
IAi = activationFunction(IZi, n_h[l-1])
#print("IZi = ", IZi)
#print("IAi = ", IAi)
IZo = tf.matmul(IAi, IWo[generateParameterNameNetwork(networkIndex, l, "IWo")])
#print("W = ", W[generateParameterNameNetwork(networkIndex, l, "W")])
#print("IZo = ", IZo)
#final activations;
Zfinal = tf.add(Z, IZo)
#print("Zfinal = ", Zfinal)
Afinal = activationFunction(Zfinal, n_h[l-1])
if(Athreshold):
Afinal = tf.minimum(Afinal, AthresholdValue)
#print("Afinal = ", Afinal)
return Afinal, Zfinal
#LIANNlearningAlgorithmCorrelation metric:
def neuronActivationCorrelationMinimisation(networkIndex, n_h, l1, A, randomNormal, Wf, Wfname="W", Wb=None, Wbname=None, updateAutoencoderBackwardsWeights=False, supportSkipLayers=False, supportDimensionalityReductionRandomise=True, maxCorrelation=0.95):
resetNeuronIfSameValueAcrossBatch = False #reset neuron if all values of a neuron k being the same value across the batch
randomlySelectCorrelatedNeuronToReset = False #randomly select one of each correlated neuron to reset
useCorrelationMatrix = True #only implementation currently available
Atransposed = tf.transpose(A)
if(useCorrelationMatrix):
correlationMatrix = LIANNtf_algorithmLIANN_math.calculateOffDiagonalCorrelationMatrix(A, nanReplacementValue=0.0, getOffDiagonalCorrelationMatrix=True) #off diagonal correlation matrix is required so that do not duplicate k1->k2 and k2->k1 correlations #CHECKTHIS: nanReplacementValue
#nanReplacementValue=0.0; will set the correlation as 0 if all values of a neuron k being the same value across the batch
#print("correlationMatrix = ", correlationMatrix)
#print("correlationMatrix.shape = ", correlationMatrix.shape)
if(useCorrelationMatrix):
if(randomlySelectCorrelatedNeuronToReset):
correlationMatrixRotated = np.transpose(correlationMatrix)
k1MaxCorrelation = correlationMatrix.max(axis=0)
k2MaxCorrelation = correlationMatrixRotated.max(axis=0)
#print("k1MaxCorrelation = ", k1MaxCorrelation)
#print("k2MaxCorrelation = ", k2MaxCorrelation)
kSelect = np.random.randint(0, 2, size=k1MaxCorrelation.shape)
mask1 = kSelect.astype(bool)
mask2 = np.logical_not(mask1)
mask1 = mask1.astype(float)
mask2 = mask2.astype(float)
k1MaxCorrelation = np.multiply(k1MaxCorrelation, mask1)
k2MaxCorrelation = np.multiply(k2MaxCorrelation, mask2)
kMaxCorrelation = np.add(k1MaxCorrelation, k2MaxCorrelation)
#print("correlationMatrix = ", correlationMatrix)
#print("correlationMatrixRotated = ", correlationMatrixRotated)
#print("k1MaxCorrelation = ", k1MaxCorrelation)
#print("k2MaxCorrelation = ", k2MaxCorrelation)
#print("mask1 = ", mask1)
#print("mask2 = ", mask2)
#print("kMaxCorrelation = ", kMaxCorrelation)
else:
k1MaxCorrelation = correlationMatrix.max(axis=0)
k2MaxCorrelation = correlationMatrix.max(axis=1)
#k1MaxCorrelation = np.amax(correlationMatrix, axis=0) #reduce max
#k2MaxCorrelation = np.amax(correlationMatrix, axis=1) #reduce max
kMaxCorrelation = np.maximum(k1MaxCorrelation, k2MaxCorrelation)
#kMaxCorrelationIndex = correlationMatrix.argmax(axis=0) #or axis=1
kMaxCorrelation = tf.convert_to_tensor(kMaxCorrelation, dtype=tf.dtypes.float32) #make sure same type as A
#print("kMaxCorrelation;", kMaxCorrelation)
if(resetNeuronIfSameValueAcrossBatch):
AbatchAllZero = tf.reduce_sum(A, axis=0)
AbatchAllZero = tf.equal(AbatchAllZero, 0.0)
AbatchAllZero = tf.cast(AbatchAllZero, tf.float32)
kMaxCorrelation = tf.add(kMaxCorrelation, AbatchAllZero) #set kMaxCorrelation[k]=1.0 if AbatchAllZero[k]=True
#print("AbatchAllZero;", AbatchAllZero)
else:
#incomplete;
for k1 in range(n_h[l1]):
#calculate maximum correlation;
k1MaxCorrelation = 0.0
for k2 in range(n_h[l1]):
if(k1 != k2):
Ak1 = Atransposed[k1] #Ak: 1d vector of batchsize
Ak2 = Atransposed[k2] #Ak: 1d vector of batchsize
k1k2correlation = calculateCorrelation(Ak1, Ak2) #undefined
#generate masks (based on highly correlated k/neurons);
#print("kMaxCorrelation = ", kMaxCorrelation)
kPassArray = tf.less(kMaxCorrelation, maxCorrelation)
randomiseLayerNeurons(networkIndex, n_h, l1, kPassArray, randomNormal, Wf, Wfname, Wb, Wbname, updateAutoencoderBackwardsWeights, supportSkipLayers, supportDimensionalityReductionRandomise)
def neuronActivationRegularisation(networkIndex, n_h, l1, A, randomNormal, Wf, Wfname="W", Wb=None, Wbname=None, updateAutoencoderBackwardsWeights=False, supportSkipLayers=False, supportDimensionalityReductionRandomise=True, supportDimensionalityReductionRegulariseActivityMinAvg=0.1, supportDimensionalityReductionRegulariseActivityMaxAvg=0.9):
#CHECKTHIS: treat any level/intensity of activation the same
Aactive = tf.cast(A, tf.bool)
AactiveFloat = tf.cast(Aactive, tf.float32)
neuronActivationFrequency = tf.reduce_mean(AactiveFloat, axis=0)
#print("neuronActivationFrequency = ", neuronActivationFrequency)
kPassArray = tf.logical_and(tf.greater(neuronActivationFrequency, supportDimensionalityReductionRegulariseActivityMinAvg), tf.less(neuronActivationFrequency, supportDimensionalityReductionRegulariseActivityMaxAvg))
#print("kPassArray = ", kPassArray)
randomiseLayerNeurons(networkIndex, n_h, l1, kPassArray, randomNormal, Wf, Wfname, Wb, Wbname, updateAutoencoderBackwardsWeights, supportSkipLayers, supportDimensionalityReductionRandomise)
def randomiseLayerNeurons(networkIndex, n_h, l1, kPassArray, randomNormal, Wf, Wfname="W", Wb=None, Wbname=None, updateAutoencoderBackwardsWeights=False, supportSkipLayers=False, supportDimensionalityReductionRandomise=True):
kFailArray = tf.logical_not(kPassArray)
#print("kPassArray = ", kPassArray)
#print("kFailArray = ", kFailArray)
kPassArrayF = tf.expand_dims(kPassArray, axis=0)
kFailArrayF = tf.expand_dims(kFailArray, axis=0)
kPassArrayF = tf.cast(kPassArrayF, tf.float32)
kFailArrayF = tf.cast(kFailArrayF, tf.float32)
if(updateAutoencoderBackwardsWeights):
kPassArrayB = tf.expand_dims(kPassArray, axis=1)
kFailArrayB = tf.expand_dims(kFailArray, axis=1)
kPassArrayB = tf.cast(kPassArrayB, tf.float32)
kFailArrayB = tf.cast(kFailArrayB, tf.float32)
#apply masks to weights (randomise specific k/neurons);
if(supportSkipLayers):
for l2 in range(0, l1):
if(l2 < l1):
#randomize or zero
if(supportDimensionalityReductionRandomise):
WlayerFrand = randomNormal([n_h[l2], n_h[l1]])
else:
WlayerFrand = tf.zeros([n_h[l2], n_h[l1]], dtype=tf.dtypes.float32)
Wf[generateParameterNameNetworkSkipLayers(networkIndex, l2, l1, Wfname)] = applyMaskToWeights(Wf[generateParameterNameNetworkSkipLayers(networkIndex, l2, l1, Wfname)], WlayerFrand, kPassArrayF, kFailArrayF)
if(updateAutoencoderBackwardsWeights):
if(supportDimensionalityReductionRandomise):
WlayerBrand = randomNormal([n_h[l1], n_h[l2]])
else:
WlayerBrand = tf.zeros([n_h[l1], n_h[l2]], dtype=tf.dtypes.float32)
Wb[generateParameterNameNetworkSkipLayers(networkIndex, l2, l1, Wbname)] = applyMaskToWeights(Wb[generateParameterNameNetworkSkipLayers(networkIndex, l2, l1, Wbname)], WlayerBrand, kPassArrayB, kFailArrayB)
else:
if(supportDimensionalityReductionRandomise):
WlayerFrand = randomNormal([n_h[l1-1], n_h[l1]])
else:
WlayerFrand = tf.zeros([n_h[l1-1], n_h[l1]], dtype=tf.dtypes.float32)
Wf[generateParameterNameNetwork(networkIndex, l1, Wfname)] = applyMaskToWeights(Wf[generateParameterNameNetwork(networkIndex, l1, Wfname)], WlayerFrand, kPassArrayF, kFailArrayF)
if(updateAutoencoderBackwardsWeights):
if(supportDimensionalityReductionRandomise):
WlayerBrand = randomNormal([n_h[l1], n_h[l1-1]])
else:
WlayerBrand = tf.zeros([n_h[l1], n_h[l1-1]], dtype=tf.dtypes.float32)
Wb[generateParameterNameNetwork(networkIndex, l1, Wbname)] = applyMaskToWeights(Wb[generateParameterNameNetwork(networkIndex, l1, Wbname)], WlayerBrand, kPassArrayB, kFailArrayB)
def applyMaskToWeights(Wlayer, WlayerRand, kPassArray, kFailArray):
WlayerFail = tf.multiply(WlayerRand, kFailArray)
#print("WlayerFail = ", WlayerFail)
WlayerPass = tf.multiply(Wlayer, kPassArray)
#print("WlayerPass = ", WlayerPass)
Wlayer = tf.add(WlayerPass, WlayerFail)
return Wlayer
#LIANNlearningAlgorithmStochastic metric:
def learningAlgorithmStochasticCalculateMetric(networkIndex, AprevLayer, ZprevLayer, l):
randomlyActivateWeights = False
if(randomlyActivateWeightsDuringTrain):
randomlyActivateWeights = true
if(learningAlgorithmCorrelationStocasticOptimise):
enableInhibition = False
A, Z, _ = forwardIteration(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights)
metric = learningAlgorithmStochasticCalculateMetricCorrelation(A)
elif(learningAlgorithmMaximiseAndEvenSignalStochasticOptimise):
enableInhibition = True
Afinal, Zfinal, _ = forwardIteration(networkIndex, AprevLayer, ZprevLayer, l, enableInhibition, randomlyActivateWeights)
metric = learningAlgorithmStochasticCalculateMetricMaximiseAndEvenSignal(Afinal, metric1Weighting, metric2Weighting)
return metric
def learningAlgorithmStochasticCalculateMetricCorrelation(A):
#print("A = ", A)
meanCorrelation = LIANNtf_algorithmLIANN_math.calculateCorrelationMean(A)
print("meanCorrelation = ", meanCorrelation)
metric = 1 - meanCorrelation
#print("metric = ", metric)
return metric
def learningAlgorithmStochasticCalculateMetricMaximiseAndEvenSignal(Afinal, metric1Weighting, metric2Weighting):
#learning objective functions:
#1: maximise the signal (ie successfully uninhibited) across multiple batches (entire dataset)
#2: ensure that all layer neurons receive even activation across multiple batches (entire dataset)
#print("Afinal = ", Afinal)
AfinalThresholded = tf.greater(Afinal, 0.0) #threshold signal such that higher average weights are not preferenced
AfinalThresholded = tf.dtypes.cast(AfinalThresholded, dtype=tf.dtypes.float32)
#print("Afinal = ", Afinal)
#print("AfinalThresholded = ", AfinalThresholded)
metric1 = tf.reduce_mean(AfinalThresholded) #average output across batch, across layer
#stdDevAcrossLayer = tf.math.reduce_std(Afinal, axis=1) #stddev calculated across layer [1 result per batch index]
#metric2 = tf.reduce_mean(stdDevAcrossLayer) #average output across batch
stdDevAcrossBatches = tf.math.reduce_mean(Afinal, axis=0) #for each dimension (k neuron in layer); calculate the mean across all batch indices
metric2 = tf.math.reduce_std(stdDevAcrossBatches) #then calculate the std dev across these values
metric1 = metric1.numpy()
metric2 = metric2.numpy()
#print("metric1 = ", metric1)
#print("metric2 = ", metric2)
metric1 = metric1*metric1Weighting
metric2 = metric2*metric2Weighting
#print("metric1 = ", metric1)
#print("metric2 = ", metric2)
if(metric2 != 0):
metric = metric1/metric2
else:
metric = 0.0
return metric
def getRandomNetworkParameter(networkIndex, currentSubsetOfParameters):
foundNewParameter = False
while not foundNewParameter:
variationDirection = random.randint(2)
layer = random.randint(1, len(n_h))
parameterTypeWorBtemp = random.randint(n_h[layer-1]+1) #ensure that B parameter updates occur/tested less frequently than W parameter updates #OLD: random.randint(2)
if(parameterTypeWorBtemp == n_h[layer-1]):
parameterTypeWorB = 0
else:
parameterTypeWorB = 1
hIndexCurrentLayer = random.randint(n_h[layer]) #randomNormal(n_h[l])
hIndexPreviousLayer = random.randint(n_h[layer-1]) #randomNormal(n_h[l-1])
networkParameterIndex = (parameterTypeWorB, layer, hIndexCurrentLayer, hIndexPreviousLayer, variationDirection)
matches = [item for item in currentSubsetOfParameters if item == networkParameterIndex]
if len(matches) == 0:
foundNewParameter = True
return networkParameterIndex
def activationFunction(Z, prevLayerSize=None):
return reluCustomPositive(Z, prevLayerSize)
def reluCustomPositive(Z, prevLayerSize=None):
if(positiveExcitatoryWeightsActivationFunctionOffset):
#CHECKTHIS: consider sigmoid instead of relu
#offset required because negative weights are not used:
#Zoffset = tf.ones(Z.shape)
#Zoffset = tf.multiply(Zoffset, normalisedAverageInput)
#Zoffset = tf.multiply(Zoffset, Wmean)
#Zoffset = tf.multiply(Zoffset, prevLayerSize)
Zpred = prevLayerSize*normalisedAverageInput*Wmean
Zoffset = Zpred
#print("Zoffset = ", Zoffset)
Z = tf.subtract(Z, Zoffset)
A = tf.nn.relu(Z)
A = tf.multiply(A, 2.0) #double the slope of A to normalise the input:output signal
#print("A = ", A)
else:
A = tf.nn.relu(Z)
#A = tf.nn.sigmoid(Z)
return A
def count_zero(M, axis=None): #emulates count_nonzero behaviour
if axis is not None:
nonZeroElements = tf.math.count_nonzero(M, axis=axis)
totalElements = tf.shape(M)[axis]
zeroElements = tf.subtract(totalElements, nonZeroElements)
else:
totalElements = tf.size(M)
nonZeroElements = tf.math.count_nonzero(M).numpy()
zeroElements = tf.subtract(totalElements, nonZeroElements)
zeroElements = zeroElements.numpy()
return zeroElements
| StarcoderdataPython |
8384 | #!/usr/bin/python3
"""
Good morning! Here's your coding interview problem for today.
This problem was recently asked by Google.
Given a list of numbers and a number k, return whether any two numbers from the list add up to k.
For example, given [10, 15, 3, 7] and k of 17, return true since 10 + 7 is 17.
Bonus: Can you do this in one pass?
"""
def func(l, k):
sums = []
for index, element in enumerate(l):
print(f'Current element: {element}')
if index == 0:
# first element - need another
print()
continue
for num in range(index):
print(f'Appending {l[index]} + {l[num]}')
sums.append(l[num] + l[index])
print()
print(sums)
return k in sums
print(func([10, 15, 3, 7], 17))
| StarcoderdataPython |
29696 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from ykdl.util.html import default_proxy_handler, get_content
from ykdl.util.match import match1, matchall
from ykdl.extractor import VideoExtractor
from ykdl.videoinfo import VideoInfo
from ykdl.compact import install_opener, build_opener, HTTPCookieProcessor
import json
import sys
import base64
import uuid
import time
py3 = sys.version_info[0] == 3
if py3:
maketrans = bytes.maketrans
bytearray2str = bytearray.decode
else:
from string import maketrans
bytearray2str = str
encode_translation = maketrans(b'+/=', b'_~-')
def generate_did_tk2():
did = str(uuid.uuid4())
s = 'pno=1000|ver=0.3.0001|did={}|clit={}'.format(did, int(time.time()))
if not isinstance(s, bytes):
s = s.encode()
e = bytearray(base64.b64encode(s).translate(encode_translation))
e.reverse()
return did, bytearray2str(e)
class Hunantv(VideoExtractor):
name = u"芒果TV (HunanTV)"
supported_stream_profile = [ u'蓝光', u'超清', u'高清', u'标清' ]
supported_stream_types = [ 'BD', 'TD', 'HD', 'SD' ]
profile_2_types = { u'蓝光': 'BD', u'超清': 'TD', u'高清': 'HD', u'标清': 'SD' }
def prepare(self):
handlers = [HTTPCookieProcessor()]
if default_proxy_handler:
handlers += default_proxy_handler
install_opener(build_opener(*handlers))
info = VideoInfo(self.name)
if self.url and not self.vid:
self.vid = match1(self.url, 'https?://www.mgtv.com/b/\d+/(\d+).html')
if self.vid is None:
html = get_content(self.url)
self.vid = match1(html, 'vid=(\d+)', 'vid=\"(\d+)', 'vid: (\d+)')
did, tk2 = generate_did_tk2()
api_info_url = 'https://pcweb.api.mgtv.com/player/video?video_id={}&did={}&tk2={}'.format(self.vid, did, tk2)
meta = json.loads(get_content(api_info_url))
assert meta['code'] == 200, '[failed] code: {}, msg: {}'.format(meta['code'], meta['msg'])
assert meta['data'], '[Failed] Video info not found.'
pm2 = meta['data']['atc']['pm2']
info.title = meta['data']['info']['title']
api_source_url = 'https://pcweb.api.mgtv.com/player/getSource?video_id={}&did={}&pm2={}&tk2={}'.format(self.vid, did, pm2, tk2)
meta = json.loads(get_content(api_source_url))
assert meta['code'] == 200, '[failed] code: {}, msg: {}'.format(meta['code'], meta['msg'])
assert meta['data'], '[Failed] Video source not found.'
data = meta['data']
domain = data['stream_domain'][0]
for lstream in data['stream']:
if lstream['url']:
url = json.loads(get_content(domain + lstream['url']))['info']
info.streams[self.profile_2_types[lstream['name']]] = {'container': 'm3u8', 'video_profile': lstream['name'], 'src' : [url]}
info.stream_types.append(self.profile_2_types[lstream['name']])
info.stream_types= sorted(info.stream_types, key = self.supported_stream_types.index)
return info
def prepare_list(self):
html = get_content(self.url, headers={})
return matchall(html, ['"a-pic-play" href="([^"]+)"'])
site = Hunantv()
| StarcoderdataPython |
3231138 | # Donut Damage Skin
success = sm.addDamageSkin(2435161)
if success:
sm.chat("The Donut Damage Skin has been added to your account's damage skin collection.")
| StarcoderdataPython |
3387405 | # Generated by Django 3.2.5 on 2021-08-07 11:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20210724_1307'),
('courses', '0006_auto_20210729_1232'),
]
operations = [
migrations.AddField(
model_name='course',
name='students',
field=models.ManyToManyField(blank=True, related_name='courses_joined', to='accounts.User'),
),
]
| StarcoderdataPython |
3286957 | # Desafio 045 -> crie um programa que faça o computador jogar jokenpo com vc (pedra papel ou tesoura)
from random import choice
from time import sleep
from sys import exit
lista = ['pedra', 'papel', 'tesoura']
jk = str(input('Pedra, papel ou tesoura?'))
jkp = jk.strip().lower()
jkppc = choice(lista)
if jkp == 'pedra' or jkp == 'papel' or jkp == 'tesoura':
print('Tudo bem, vamos lá!')
elif jkp != 'pedra' or jkp != 'papel' or jkp != 'tesoura':
print('Acho que estamos jogando jogos diferentes, quer tentar novamente?')
exit()
sleep(1)
print('\033[31;1mJo\033[m...')
sleep(1)
print('...\033[31;1mKen\033[m...')
sleep(1)
print(' ...\033[31;1mPô\033[m!')
sleep(1)
if jkp == 'pedra' and jkppc == 'pedra' or jkp == 'papel' and jkppc == 'papel' or jkp == 'tesoura' and jkppc == 'tesoura':
res = ('Deu empate')
if jkp == 'pedra' and jkppc == 'tesoura' or jkp == 'papel' and jkppc == 'pedra' or jkp == 'tesoura' and jkppc == 'papel':
res = ('Você ganhou')
if jkp == 'pedra' and jkppc == 'papel' or jkp == 'papel' and jkppc == 'tesoura' or jkp == 'tesoura' and jkppc == 'pedra':
res = ('Você perdeu')
print('{}, pois você escolheu \033[1;32m{}\033[m e o \033[1mPython\033[m escolheu \033[33;1m{}\033[m!'.format(res, jkp, jkppc))
print('debug: {}'.format(jkppc))
| StarcoderdataPython |
4802165 | <gh_stars>1-10
from django.test import TestCase
from django.urls import reverse
from http import HTTPStatus
class SignUpViewTests(TestCase):
"""
(view) Signup_view tests.
A class that perform the following tests:
1 - Url by name
"""
def test_signup_view_url_by_name(self):
url = reverse('signup_view')
response = self.client.get(url)
self.assertEquals(response.status_code, HTTPStatus.OK)
| StarcoderdataPython |
1627503 | <reponame>DavidLlorens/algoritmia<filename>src/demos/dynamicprogramming/coinchange6.py
#coding: latin1
#< full
from algoritmia.problems.generalizedcoinchange.dynamicprogramming6 import \
RecursiveDynamicCoinChanger
print(RecursiveDynamicCoinChanger([1, 2, 5], [1, 1, 4]).weight(7))
#> full | StarcoderdataPython |
3364346 | <gh_stars>0
from logging import info
from typing import Union
from borsh_construct import U64
from django.conf import settings
from eth_account.messages import encode_defunct
from eth_utils import remove_0x_prefix
from solana.publickey import PublicKey
from web3 import Web3
from web3.datastructures import AttributeDict
from web3.types import (
HexBytes,
Wei,
)
from backend.consts import (
CONTRACT_ERROR,
NETWORK_NAMES,
SIGNER_INFO,
)
from networks.models import (
Transaction,
)
from networks.types import HASH_LIKE
from ..exceptions import (
ContractPaused,
ContractTransactionAlreadyProcessed,
ContractTransactionAlreadyReverted,
)
from ..models import Contract
CONTRACT_BLOCKCHAIN_IDS_TOKEN_WITH_SIX_DECIMALS = settings \
.CONTRACT_BLOCKCHAIN_IDS_TOKEN_WITH_SIX_DECIMALS
def _get_params_to_transfer_to_other_blockchain(
network,
event: AttributeDict,
tx_hash: str = '',
) -> AttributeDict:
"""
Returns params which will be used for trade in target network
:param rpc_provider: custom rpc provider of source network
:param event: event data of transaction
:param tx_hash: hash of the source transaction
"""
# AttributeDict(
# {
# 'RBCAmountIn': <transit_token_amount_in:int>,
# 'amountSpent': <amount_spent:int>,
# }
# ),
# 'event': <event_name:str>,
# 'logIndex': <log_index:int>,
# 'transactionIndex': <txn_index:int>,
# 'transactionHash': HexBytes(
# <txn_hash:str>
# ),
# 'address': <user_address:str>,
# 'blockHash': HexBytes(
# <block_hash:str>
# ),
# 'blockNumber': <block_number:int>
if not isinstance(event, AttributeDict):
event['args'] = AttributeDict(event['args'])
event = AttributeDict(event)
original_txn_hash = event.transactionHash
if tx_hash:
original_txn = Transaction.get_transaction(
network_id=network.id,
txn_hash=tx_hash,
)
else:
original_txn = Transaction.get_transaction(
network_id=network.id,
txn_hash=original_txn_hash,
)
blockchain_id = original_txn.data.get('params')[0]
amount_spent = original_txn.event_data \
.get('args') \
.get('amountSpent')
transit_token_amount_in = original_txn.event_data \
.get('args') \
.get('RBCAmountIn')
token_out_min = original_txn.data.get('params')[5]
new_address = original_txn.data.get('params')[6]
second_path = original_txn.data.get('params')[3]
provider_address = original_txn.data.get('params')[7]
swap_to_crypto = original_txn.data.get('params')[8]
swap_exact_for = original_txn.data.get('params')[9]
contract_function = original_txn.data.get('params')[-1]
return AttributeDict(
{
'original_txn_hash': original_txn_hash,
'blockchain_id': blockchain_id,
'token_out_min': token_out_min,
'second_path': second_path,
'new_address': new_address,
'transit_token_amount_in': transit_token_amount_in,
'amount_spent': amount_spent,
'provider_address': provider_address,
'swap_to_crypto': swap_to_crypto,
'swap_exact_for': swap_exact_for,
'contract_function': contract_function,
}
)
def _sign_hash(hash: HASH_LIKE) -> str:
"""
Signs hash string
"""
if not isinstance(hash, str):
hash = hash.hex()
signed_hash = Web3().eth.account.sign_message(
encode_defunct(hexstr=hash),
settings.VALIDATOR_PRIVATE_KEY,
)
info(
SIGNER_INFO.format(
f'Hash \"{hash}\" was signed.'
f' Signature: \"{signed_hash.signature.hex()}\".'
)
)
return signed_hash.signature.hex()
def _check_contract_is_paused(
contract: Contract,
original_txn_hash: HASH_LIKE,
hashed_params: HexBytes,
):
"""
Checks if contract paused right now or not
using contract read method 'paused'
:param contract: Contract instance which will be checked
:param original_txn_hash: hash of transaction in source network, for logging
:param hashed_params: hashed params, for logging
"""
contract_status = contract.is_paused
if not contract_status:
return True
raise ContractPaused(
CONTRACT_ERROR.format(
f'Contract with the \"{contract.address}\" address is paused.'
),
{
'contract': contract,
'original_txn_hash': original_txn_hash,
'hashed_params': hashed_params,
}
)
def _check_is_processed_transaction(
contract: Contract,
original_txn_hash: HASH_LIKE,
hashed_params: HexBytes = '',
):
"""
Checks if transaction was already completed in target network or not
using contract read method 'processedTransactions'
:param contract: Contract instance which will be checked
:param original_txn_hash: hash of transaction in source network, for logging
:param hashed_params: hashed params, for logging
"""
contract_status = contract.is_processed_transaction(original_txn_hash)
if isinstance(original_txn_hash, (bytes, HexBytes)):
original_txn_hash = original_txn_hash.hex()
exception_error_messsage = (
f'Transaction with the \"{original_txn_hash}\" original'
' hash is already {action} '
f'at the \"{contract.address}\" contract address.'
)
if not contract_status:
return contract_status
elif contract_status == 1:
raise ContractTransactionAlreadyProcessed(
exception_error_messsage.format(action='processed'),
{
'contract': contract,
'original_txn_hash': original_txn_hash,
'hashed_params': hashed_params,
}
)
elif contract_status == 2:
raise ContractTransactionAlreadyReverted(
exception_error_messsage.format(action='reverted'),
{
'contract': contract,
'original_txn_hash': original_txn_hash,
'hashed_params': hashed_params,
}
)
def _transform_params(params: AttributeDict, from_contract: Contract):
"""
Transform transit_token_amount_in in params if has different decimals
in both blockchains
"""
to_contract = Contract.get_contract_by_blockchain_id(
blockchain_id=params.blockchain_id,
)
new_params = dict(params)
# TODO: Костыль с пересчетом decimals транзитных токенов.
if to_contract.blockchain_id in CONTRACT_BLOCKCHAIN_IDS_TOKEN_WITH_SIX_DECIMALS:
new_params['transit_token_amount_in'] = int(
new_params['transit_token_amount_in'] / 10 ** 12
)
elif from_contract.blockchain_id in CONTRACT_BLOCKCHAIN_IDS_TOKEN_WITH_SIX_DECIMALS:
new_params['transit_token_amount_in'] = int(
new_params['transit_token_amount_in'] * 10 ** 12
)
return AttributeDict(new_params)
def get_hash_packed_solana(
new_address: str,
rbc_amount_in: int,
original_txn_hash: HASH_LIKE,
blockchain_id: int,
) -> str:
"""
Hashes parameters for Solana blockchain
:param new_address: wallet address in target network
:param rbc_amount_in: amount of transit token which will be used in target network
:param original_txn_hash: hash of the source transaction
:param blockchain_id: number of target network
"""
if not isinstance(original_txn_hash, str):
original_txn_hash = original_txn_hash.hex()
keccak_hex = Web3.solidityKeccak(
['bytes32', 'bytes8', 'bytes32', 'bytes8'],
[
bytes(PublicKey(new_address)),
U64.build(rbc_amount_in),
Web3.toBytes(hexstr=original_txn_hash),
U64.build(blockchain_id)
]
).hex()
return keccak_hex
def _get_signature(
original_txn_hash: HASH_LIKE,
blockchain_id: int,
new_address: str,
transit_token_amount_in: Union[int, Wei],
) -> str:
"""
Returns signature of hashed params
:param original_txn_hash: hash of the source transaction
:param blockchain_id: number of target network
:param new_address: wallet address in target network
:param transit_token_amount_in: amount of transit token which will be used in target network
"""
if not all((transit_token_amount_in,)):
raise Exception(
f'Field \"transit_token_amount_in\" ({transit_token_amount_in=}) '
'not be equal by 0.'
)
contract = Contract.get_contract_by_blockchain_id(blockchain_id)
if contract.network.title == NETWORK_NAMES.get('solana'):
hashed_params = get_hash_packed_solana(
new_address=new_address,
rbc_amount_in=transit_token_amount_in,
original_txn_hash=original_txn_hash,
blockchain_id=blockchain_id,
)
elif contract.network.title == NETWORK_NAMES.get('near'):
hashed_params = Contract.get_contract_by_blockchain_id(1).get_hash_packed(
new_address,
transit_token_amount_in,
original_txn_hash,
blockchain_id,
)
else:
hashed_params = contract.get_hash_packed(
new_address,
transit_token_amount_in,
original_txn_hash,
blockchain_id,
)
return remove_0x_prefix(
_sign_hash(
hash=hashed_params,
)
)
| StarcoderdataPython |
118483 | import numpy as np
from methods_project_old import MaximumIterationError
from robot_arm import RobotArm
def test_point_outside_outer_circle(lengths, n, plot_initial=True, plot_minimizer=True, animate=False):
print('---- Test with destination outside configuration space ----')
analytical_tests('ooc', lengths, n, plot_initial, plot_minimizer, animate)
def test_point_innside_inner_circle(lengths, n, plot_initial=True, plot_minimizer=True, animate=False):
# Precondition: Configuration space is an annulus
longest = np.argmax(lengths)
if np.sum(np.delete(lengths, longest)) > lengths[longest]:
print("---- Configuration space not an annulus. Can't run test ----")
return
print("---- Test with destination innside inner circle of configuration space ----")
analytical_tests('iic', lengths, n, plot_initial, plot_minimizer, animate)
def analytical_tests(test, lengths, n, plot_initial, plot_minimizer, animate):
if test == 'iic':
longest = np.argmax(lengths)
inner_radius = lengths[longest] - np.sum(lengths)
p_distance_from_origin = inner_radius / 2
elif test == 'ooc':
reach = np.sum(lengths)
p_distance_from_origin = reach + 1
else:
print ("Test not implemented.")
raise NotImplementedError
angle = 2 * np.pi * np.random.random()
p = p_distance_from_origin * np.array([np.cos(angle), np.sin(angle)])
theta0 = 2 * np.pi * np.random.random(n)
run_test(lengths, theta0, p, plot_initial, plot_minimizer, animate)
def test_bfgs_local_max(lengths, n, plot_initial=True, plot_minimizer=True, animate=False):
print('---- Test with a boundary point that is a \n'
'local (not global) maximum as starting point ----')
bfgs_tests('lm', lengths, n, plot_initial, plot_minimizer, animate)
def test_bfgs_global_max(lengths, n, plot_initial=True, plot_minimizer=True, animate=False):
print('---- Test with global maximum as starting point ----')
bfgs_tests('gm', lengths, n, plot_initial, plot_minimizer, animate)
def test_bfgs_saddle_point(lengths, n, plot_initial=True, plot_minimizer=True, animate=False):
print('---- Test with an interior point that is either a\n'
' saddle point or local maximum as starting point ----')
bfgs_tests('sp', lengths, n, plot_initial, plot_minimizer, animate)
def bfgs_tests(test, lengths, n, plot_initial, plot_minimizer, animate):
first_angle = 2 * np.pi * np.random.random()
if test == 'sp': last_angle = np.pi
theta0 = np.zeros(n)
theta0[0] = first_angle
longest = np.argmax(lengths)
annulus = np.sum(np.delete(lengths, longest)) < lengths[longest]
if annulus:
if test == 'sp':
if longest != len(lengths) - 1: theta0[-1] = last_angle
else: theta0[-2] = last_angle
inner_radius = lengths[longest] - np.sum(np.delete(lengths, longest))
outer_radius = np.sum(lengths)
p_distance_from_origin = inner_radius + (outer_radius - inner_radius) * np.random.random()
else:
p_distance_from_origin = np.sum(lengths) * np.random.random()
if test == 'lm' or test == 'sp':
p = p_distance_from_origin * np.array([np.cos(first_angle), np.sin(first_angle)])
elif test == 'gm':
p = p_distance_from_origin * np.array([-np.cos(first_angle), -np.sin(first_angle)])
else:
print ("Test not implemented.")
raise NotImplementedError
run_test(lengths, theta0, p, plot_initial, plot_minimizer, animate)
def test_random(m, plot_initial=False, plot_minimizer=False, animate=False):
print('---- Test with m random configurations ----')
n_cap = 100
l_cap = 1000
for i in range(0, m):
n = int(n_cap * np.random.random()) + 1
lengths = l_cap * np.random.random(n)
theta0 = 2 * np.pi * np.random.random(n)
p_distance_to_origin = 2 * np.sum(lengths) * np.random.uniform(low=-1, high=1)
p_angle = 2 * np.pi * np.random.random()
p = p_distance_to_origin * np.array([np.cos(p_angle), np.sin(p_angle)])
run_test(lengths, theta0, p, plot_initial, plot_minimizer, animate)
def run_test(lengths, theta0, p, plot_initial, plot_minimizer, animate):
WALL_E = RobotArm(lengths, p, theta0, precision=epsilon)
if plot_initial: WALL_E.plot()
try:
WALL_E.move_to_destination()
except MaximumIterationError:
np.save('initial_values_bug', (lengths, theta0, p))
WALL_E.save_animation()
raise
except AssertionError:
np.save('initial_values_bug', (lengths, theta0, p))
WALL_E.save_animation()
raise
if plot_minimizer: WALL_E.plot()
if animate: WALL_E.save_animation()
if __name__ == '__main__':
arms = []
arms.append(np.array([3, 2, 2])) # disk-shaped configuration space
arms.append(np.array([1, 4, 1])) # annulus-shaped configuration space
epsilon = 1e-3
k = len(arms)
for lengths in arms:
n = len(lengths)
test_point_outside_outer_circle(lengths, n)
test_point_innside_inner_circle(lengths, n)
test_bfgs_local_max(lengths, n)
test_bfgs_global_max(lengths, n)
test_bfgs_saddle_point(lengths, n)
test_random(100)
| StarcoderdataPython |
1775230 | <filename>geoportal/tests/functional/test_xsd.py
# Copyright (c) 2018-2019, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
# pylint: disable=missing-docstring,attribute-defined-outside-init,protected-access,attribute-defined-outside-init
from unittest import TestCase
from unittest.mock import Mock, patch
from tests.functional import setup_common as setup_module
from tests.functional import teardown_common as teardown_module # noqa
class TestXSDGenerator(TestCase):
_tables = None
def setup_method(self, _):
setup_module()
import transaction
from sqlalchemy import Column, ForeignKey, types
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from c2cgeoportal_commons.models import DBSession
from c2cgeoportal_geoportal.lib.dbreflection import _AssociationProxy
# Always see the diff
# https://docs.python.org/2/library/unittest.html#unittest.TestCase.maxDiff
self.maxDiff = None
Base = declarative_base(bind=DBSession.c2c_rw_bind) # noqa
class Child(Base): # type: ignore
__tablename__ = "child"
id = Column(types.Integer, primary_key=True)
name = Column(types.Unicode)
custom_order = Column(types.Integer)
def __init__(self, name, custom_order):
self.name = name
self.custom_order = custom_order
class Parent(Base): # type: ignore
__tablename__ = "parent"
id = Column(types.Integer, primary_key=True)
child1_id = Column(types.Integer, ForeignKey("child.id"))
child2_id = Column(types.Integer, ForeignKey("child.id"))
other = Column(types.String)
readonly = Column(types.String, info={"readonly": True})
child1_ = relationship(Child, primaryjoin=(child1_id == Child.id))
child1 = _AssociationProxy("child1_", "name")
child1_id.info["association_proxy"] = "child1"
child2_ = relationship(Child, primaryjoin=(child2_id == Child.id))
child2 = _AssociationProxy("child2_", "name", nullable=False, order_by="custom_order")
child2_id.info["association_proxy"] = "child2"
Child.__table__.create()
Parent.__table__.create()
self._tables = [Parent.__table__, Child.__table__]
DBSession.add_all([Child("foo", 2), Child("zad", 1), Child("bar", 2)])
transaction.commit()
self.metadata = Base.metadata
self.cls = Parent
def teardown_method(self, _):
import transaction
transaction.commit()
if self._tables is not None:
for table in self._tables:
table.drop()
@patch("c2cgeoportal_geoportal.lib.xsd.XSDGenerator.add_column_property_xsd")
def test_add_class_properties_xsd_column_order(self, column_mock):
from c2cgeoportal_geoportal.lib.xsd import XSDGenerator
tb = Mock()
self.cls.__attributes_order__ = ["child1_id", "other"]
gen = XSDGenerator(include_foreign_keys=True)
gen.add_class_properties_xsd(tb, self.cls)
called_properties = [kall[0][1].class_attribute.name for kall in column_mock.call_args_list]
assert len(called_properties) == 5
assert self.cls.__attributes_order__ == called_properties[: len(self.cls.__attributes_order__)]
@patch("c2cgeoportal_geoportal.lib.xsd.XSDGenerator.add_association_proxy_xsd")
@patch("c2cgeoportal_geoportal.lib.xsd.PapyrusXSDGenerator.add_column_property_xsd")
def test_add_column_property_xsd(self, column_mock, proxy_mock):
from sqlalchemy.orm.util import class_mapper
from c2cgeoportal_geoportal.lib.xsd import XSDGenerator
gen = XSDGenerator(include_foreign_keys=True)
tb = Mock()
mapper = class_mapper(self.cls)
p = mapper.attrs["child1_id"]
gen.add_column_property_xsd(tb, p)
proxy_mock.assert_called_once_with(tb, p)
p = mapper.attrs["other"]
gen.add_column_property_xsd(tb, p)
column_mock.assert_called_once_with(tb, p)
def test_add_column_readonly(self):
from xml.etree.ElementTree import TreeBuilder, tostring
from sqlalchemy.orm.util import class_mapper
from c2cgeoportal_geoportal.lib.xsd import XSDGenerator
gen = XSDGenerator(include_foreign_keys=True)
mapper = class_mapper(self.cls)
tb = TreeBuilder()
p = mapper.attrs["readonly"]
gen.add_column_property_xsd(tb, p)
e = tb.close()
self.assertEqual(
'<xsd:element name="readonly" minOccurs="0" nillable="true" type="xsd:string">'
"<xsd:annotation>"
"<xsd:appinfo>"
'<readonly value="true" />'
"</xsd:appinfo>"
"</xsd:annotation>"
"</xsd:element>",
tostring(e).decode("utf-8"),
)
def test_add_association_proxy_xsd(self):
from xml.etree.ElementTree import TreeBuilder, tostring
from sqlalchemy.orm.util import class_mapper
from c2cgeoportal_geoportal.lib.xsd import XSDGenerator
gen = XSDGenerator(include_foreign_keys=True)
mapper = class_mapper(self.cls)
tb = TreeBuilder()
gen.add_association_proxy_xsd(tb, mapper.attrs["child1_id"])
e = tb.close()
self.assertEqual(
'<xsd:element minOccurs="0" nillable="true" name="child1">'
"<xsd:simpleType>"
'<xsd:restriction base="xsd:string">'
'<xsd:enumeration value="foo" />'
'<xsd:enumeration value="zad" />'
'<xsd:enumeration value="bar" />'
"</xsd:restriction>"
"</xsd:simpleType>"
"</xsd:element>",
tostring(e).decode("utf-8"),
)
# Test child2 enumeration is ordered by Child.custom_order
tb = TreeBuilder()
gen.add_association_proxy_xsd(tb, mapper.attrs["child2_id"])
e = tb.close()
self.assertEqual(
'<xsd:element name="child2">'
"<xsd:simpleType>"
'<xsd:restriction base="xsd:string">'
'<xsd:enumeration value="zad" />'
'<xsd:enumeration value="foo" />'
'<xsd:enumeration value="bar" />'
"</xsd:restriction>"
"</xsd:simpleType>"
"</xsd:element>",
tostring(e).decode("utf-8"),
)
| StarcoderdataPython |
85134 | <gh_stars>1-10
#!/usr/bin/env python3
import pathlib
from setuptools import setup, find_packages
HERE = pathlib.Path(__file__).parent
long_description = (HERE / 'README.md').read_text(encoding='utf-8')
setup(
name='blockfrost-python',
version='0.3.0',
description='The official Python SDK for Blockfrost API v0.1.30',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/blockfrost/blockfrost-python',
# Author details
author='<EMAIL>',
author_email='<EMAIL>',
ghostwriter='https://github.com/mathiasfrohlich',
license='Apache-2.0',
keywords='blockfrost blockchain cardano ipfs',
packages=find_packages(exclude=['tests', 'tests.*']),
python_requires='>=3.7, <4',
install_requires=[
"requests",
],
tests_require=[
"pytest",
"mock",
'requests-mock',
],
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3 :: Only',
],
)
| StarcoderdataPython |
97906 | <gh_stars>1-10
from django.shortcuts import render
from django.http import HttpResponse
from django.template import RequestContext, loader
from slimmermeten.models import ElektricityReading, GasReading, PowerConsumption
from django.db.models.aggregates import Count
from django.db.models import Avg
import colorsys
from datetime import datetime, date, time, timedelta
from random import random
import math
# Create your views here.
def get_random_color(colors=1):
returnset = set()
h,s,v=random()*6,.5,243.2
i=0
while len(returnset) < colors:
h+=3.708;
returnset.add('#'+'%02x'*3%((v,v-v*s*abs(1-h%2),v-v*s)*3)[5**int(h)/3%3::int(h)%2+1][:3])
if i%5/4:
s+=.1
v-=51.2
i += 1
return list(returnset)
def all_power(request):
# Unfiltered
consumptions = PowerConsumption.objects.all()
consumptions = consumptions.extra({"hour": "date_trunc('hour', date)"}).values("hour").order_by('hour').annotate(power=Avg("power"))
series = []
cur_date = None
cur_row = [0,]*24
for cons in consumptions:
hour_date = cons.get('hour')
power = cons.get('power')
if not cur_date:
# Initial
cur_date = hour_date.date()
elif cur_date != hour_date.date():
# next day
series.append((cur_date, cur_row))
cur_row = [0,]*24
cur_date = hour_date.date()
cur_row[hour_date.hour] = int(power)
series.append((cur_date, cur_row))
labels = [u"{0:02d}:00".format(x) for x in range(0,24)]
context = {'series': series, 'labels': labels, 'yaxis_label': 'Power consumption (Watt)', 'graph_title':"Power Consumption", "value_suffix": " Watt"}
return render(request, 'chart.html', context)
def power_date(request, day=0, month=0, year=0, hour=0, dayname=None):
if dayname and dayname in ['today','yesterday']:
thedate = datetime.now()
name = "Today"
if dayname =='yesterday':
thedate = thedate - timedelta(hours=24)
name = "Yesterday"
elif dayname:
raise "No such day"
if day and month and year:
thedate = datetime(int(year), int(month), int(day))
name = "{0}-{1}-{2}".format(day, month, year)
if hour:
hour = int(hour)
name = "{0} {1:02d}:00".format(name, hour)
today_min = datetime.combine(thedate.date(), time(hour-1,30))
today_max = datetime.combine(thedate.date(), time(hour,31))
else:
today_min = datetime.combine(thedate.date(), time.min)
today_max = datetime.combine(thedate.date(), time.max)
consumptions = PowerConsumption.objects.filter(date__range=(today_min, today_max))
if not hour:
# When not zoomed on an hour, group per hour
consumptions = consumptions.extra({"date": "date_trunc('hour', date)"}).values("date").order_by('date').annotate(power=Avg("power"))
values = [cons.get('power') for cons in consumptions]
labels = ["{0:02d}:{1:02d}".format(cons.get('date').hour, cons.get('date').minute) for cons in consumptions]
else:
values = [cons.power for cons in consumptions]
labels = ["{0:02d}:{1:02d}".format(cons.date.hour, cons.date.minute) for cons in consumptions]
series = [(name,values)]
context = {'series': series, 'labels': labels, 'yaxis_label': 'Power consumption (Watt)', 'graph_title':"Power Consumption", "value_suffix": " Watt"}
return render(request, 'chart.html', context) | StarcoderdataPython |
4814882 | # coding = 'utf-8'
import pandas as pd
import numpy as np
import sklearn
class TargetMeanEncoderConfig:
def __init__(self, fold=5, smooth_parameter=0.9):
self.fold = fold
self.smooth_parameter = smooth_parameter
def encode_one_column(dfs, y, target_var, config):
"""
:param df:
:param y:
:param target_var:
:param config:
:return:
"""
def target_mean_encoder(df, y, target_vars, config):
"""
:param df:
:param y:
:param target_vars:
:param config:
:return:
"""
| StarcoderdataPython |
184421 | <reponame>JakobGM/robotarm-optimization
import numpy as np
from functools import partial
from problem import (
generate_objective_function,
generate_objective_gradient_function,
)
from constraints import (
generate_constraints_function,
generate_constraint_gradients_function,
)
from methods import BFGS, MaximumIterationError, gradient_descent
from plotting import path_figure
def generate_quadratically_penalized_objective(robot_arm):
'''
Given a RobotArm object with a valid joint lenghts and destinations
this function returns a quadratically penalized objective function
taking in two parameters: thetas and mu
Function: R^(ns) x R --> R
'''
n = robot_arm.n
s = robot_arm.s
objective = generate_objective_function(robot_arm)
constraints_func = generate_constraints_function(robot_arm)
def quadratically_penalized_objective(thetas, mu):
if not thetas.shape == (n * s,):
raise ValueError('Thetas is not given as 1D-vector, but as: ' + \
str(thetas.shape))
return objective(thetas) + \
0.5 * mu * np.sum(constraints_func(thetas) ** 2)
return quadratically_penalized_objective
def generate_quadratically_penalized_objective_gradient(robot_arm):
n = robot_arm.n
s = robot_arm.s
objective_gradient = generate_objective_gradient_function(robot_arm)
constraints_func = generate_constraints_function(robot_arm)
constraint_gradients_func = generate_constraint_gradients_function(robot_arm)
def quadratic_constraint_gradients(thetas):
if not thetas.shape == (n * s,):
raise ValueError('Thetas is not given as 1D-vector, but as: ' + \
str(thetas.shape))
constraint_gradients = constraint_gradients_func(thetas)
constraints = constraints_func(thetas)
assert constraint_gradients.shape == (n*s, 2*s,)
assert constraints.shape == (2*s,)
return constraints.reshape((1, 2*s,)) * constraint_gradients
def quadratically_penalized_objective_gradient(thetas, mu):
if not thetas.shape == (n * s,):
raise ValueError('Thetas is not given as 1D-vector, but as: ' + \
str(thetas.shape))
grad = objective_gradient(thetas)
constraint_gradients = 0.5 * mu * quadratic_constraint_gradients(thetas)
return grad + 0.5 * mu * np.sum(constraint_gradients, axis=1)
return quadratically_penalized_objective_gradient
def quadratic_penalty_method(robot_arm):
n = robot_arm.n
s = robot_arm.s
Q = generate_quadratically_penalized_objective(robot_arm)
grad_Q = generate_quadratically_penalized_objective_gradient(robot_arm)
thetas = robot_arm.generate_initial_guess(show=False)
mu = 1
tau = 1e-4
def get_f(mu):
return partial(Q, mu=mu)
def get_grad_f(mu):
return partial(grad_Q, mu=mu)
print('Starting quadratic penalty method')
for yolo in range(1000):
f = get_f(mu)
grad_f = get_grad_f(mu)
try:
thetas = BFGS(thetas, f, grad_f, tau)
except MaximumIterationError:
print('Reached MaximumIterationError in loop number {}'.format(yolo))
break
mu = 1.5 * mu
tau = 0.5 * tau
return thetas
| StarcoderdataPython |
1756650 | <reponame>TheCarvalho/atividades-wikipython
'''
8. Faça um programa que leia 5 números e informe a soma e a média dos números.
'''
soma = 0
for i in range(5):
num = int(input('Insira o número: '))
soma += num
print('-'*10)
print(f'A soma é {soma}')
print(f'A média é {soma/5}')
| StarcoderdataPython |
113550 | <gh_stars>0
import numpy as np
from numba import njit
@njit
def sigmoid(x: np.ndarray):
return 1 / (1 + np.exp(-x))
class SufficientStats:
def __init__(self, funcs):
"""
:param funcs: list of callable functions
"""
self.funcs = funcs
def __call__(self, variable_values):
return np.array([
func(variable_values) for func in self.funcs
]).reshape(-1)
class FactorGraph:
r"""
Binary Random Variables have IDs \in {0, 1, 2, .., #Vars - 1} and possible values in { +1, polarities[i] }
"""
def __init__(self, n_vars, potentials, polarities, priors=None, seed=77):
"""
:param n_vars: #Random Variables
:param potentials: list of tuples of (callable function, Image cardinality)
Image cardinality just means the number of values the function returns (>= 1)
:param polarities: a list of n_vars tuples, where the i-th entry corresponds to the possible values
that variable i can take.
:param priors: a list of n_vars tuples, where the i-th entry corresponds to the prior probability for variable i
taking the values in the order given in polarities.
"""
assert all([len(pol) <= 2 for pol in polarities]), "Only binary variables supported"
assert len(polarities) == n_vars
self.n_variables = n_vars
self.sufficient_statistics = SufficientStats([func for func, _ in potentials])
self.n_params = sum([img_card for _, img_card in potentials])
self.parameter = np.zeros(self.n_params, dtype=np.float32)
self.parameter[:n_vars] = 0.7
self.polarities = polarities
self.priors = [[0.5, 0.5] for _ in range(n_vars)] if priors is None else priors
self.sampler = None
self.rng = np.random.default_rng(seed=seed)
# self.parameter[:] = 0.7
def conditional(self, target_varID, values):
r"""
:param values: of shape (#Vars,).
Note that the values corresponding to the target variable is not used, i.e.
values[target_varID] can be set to any arbitrary value.
:param target_varID: ID \in {0, .., #Vars-1} of the variable to be inferred
:return:The conditional P(Var_ID = 1 | Vars_-ID) of the given variable given all the others.
"""
vals_copy = values.copy()
vals_copy[target_varID] = self.polarities[target_varID][0] # e.g. +1
pos = self.sufficient_statistics(vals_copy)
vals_copy[target_varID] = self.polarities[target_varID][1] # e.g. -1
neg = self.sufficient_statistics(vals_copy)
return sigmoid(
self.parameter @ (pos - neg)
)
def predict(self, target_varID, values):
r"""
:param values: of shape (#Vars,).
Note that the values corresponding to the target variable is not used, i.e.
values[target_varID] can be set to any arbitrary value.
:param target_varID: ID \in {0, .., #Vars-1} of the variable to be inferred
:return: 1 if P(target_var = 1| Vars_{-target_var}) > 0.5, and the opposite polarity otherwise
"""
return self.polarities[target_varID][0] if self.conditional(target_varID, values) > 0.5 \
else self.polarities[target_varID][1]
def predict_proba(self, data, target_varID=0):
r"""
:param data: of shape (#samples, #Vars).
Note that the values corresponding to the target variable are not used, i.e.
data[:, target_varID] can be set to any arbitrary value.
:param target_varID: ID \in {0, .., #Vars-1} of the variable to be inferred
:return: the soft labels computed from the learned posterior of shape (#samples,)
"""
Y_soft = [
self.conditional(target_varID, observed_vals)
for observed_vals in data.astype(np.int32)
]
return np.array(Y_soft)
def fit(self, observations, lr=0.01, decay=1.0, burn_ins=10, n_epochs=25, gibbs_steps_per_sample=20, batch_size=32,
n_gibbs_samples=50, evaluate_func=None,
verbose=True, persistive_sampling=True, eval_args=(), eval_kwargs=dict()):
"""
This method will fit the Factor Graph/MRF parameter to the given data/observation using:
Stochastic maximum likelihood for fitting an Markov Random Field (MRF) algorithm from
Machine Learning: A Probabilistic Perspective, <NAME> (page 680)
:param observations: The data to learn from, a (#samples, #variables) array
:param lr: learning rate
:param burn_ins: #TODO
:param n_epochs: Epochs of SGD
:param batch_size: Batch size to compute the gradient on
:param n_gibbs_samples: How many Gibbs chain samples to use for approximating the expectation of the sufficient statistics
:return:
"""
assert set(np.unique(observations)) == {-1, 0, 1}, "Unsupported labels!"
assert observations.shape[1] == self.n_variables, f"Observations should have {self.n_variables} columns!"
n_samples = observations.shape[0]
observations = np.array(observations, dtype=np.int32)
self.sampler = GibbsSampler(self.polarities, self.priors, rng=self.rng)
history = {"accuracy": [], "f1": [], "auc": [], "epochs": range(1, n_epochs + 1)}
for epoch in range(n_epochs):
if persistive_sampling:
self.sampler.burn_in(self.predict, burn_ins=burn_ins)
permutation = self.rng.permutation(n_samples) # shuffle the training set/observations
for i in range(0, n_samples, batch_size):
if not persistive_sampling:
self.sampler = GibbsSampler(self.polarities, self.priors, rng=self.rng)
# print("---"*20, "\n", self.sampler.varID_to_sample)
self.sampler.burn_in(self.predict, burn_ins=burn_ins)
indices = permutation[i:i + batch_size]
batch = observations[indices, :]
""" Get estimate of the expectation of the sufficient statistics by repeated MCMC sampling"""
approx_Expectation, observed = np.zeros(self.n_params), np.zeros(self.n_params)
for _ in range(n_gibbs_samples):
approx_Expectation += self.sufficient_statistics(
self.sampler.sample(self.predict)
) # single sample of the random variables
approx_Expectation /= n_gibbs_samples
""" Observed value for the sufficient statistics """
for observation in batch:
observed += self.sufficient_statistics(observation)
observed /= len(batch)
""" Compute the gradient over the mini-batch """
gradient = observed - approx_Expectation
""" Stochastic gradient descent step"""
self.parameter -= lr * gradient
lr *= decay # decay stepsize
if evaluate_func is not None:
stats = evaluate_func(*eval_args, **eval_kwargs)
[history[metric].append(stats[metric]) for metric in ["accuracy", "f1", "auc"]]
print(f"Epoch {epoch}: Acc: {stats['accuracy']} | F1: {stats['f1']} | AUC: {stats['auc']}")
elif verbose and epoch % (n_epochs / 5) == 0:
print(f"Epoch {epoch}...")
return history
'''def gibbs_sample(self, n_steps=10):
if n_steps < 1:
return self.sampler.copy()
k = 0
while True:
for varID in range(self.n_variables): # chain
r"""
Set x_i = argmax_{x_i \in values(X_i)} P_\theta (x_i | x_{-i})
I.e. set the new x_i to the most probable state, given all the other variables.
"""
self.sampler[varID] = self.predict(varID, self.sampler)
k += 1
if k == n_steps:
return self.sampler.copy()'''
class GibbsSampler:
def __init__(self, polarities, priors, seed=77, rng=None):
self.rng = np.random.default_rng(seed=seed) if rng is None else rng
self.n_variables = len(polarities)
self.polarities = polarities
self.priors = priors
self.samples = np.array([
self.rng.choice(pol, p=prior) for pol, prior in zip(self.polarities, self.priors)
], dtype=np.int32) # observations[self.rng.choice(self.n_variables), :] # OR: init to some arbitrary value
self.varID_to_sample = self.rng.choice(self.n_variables)
def burn_in(self, conditional_func, burn_ins=20):
for _ in range(burn_ins):
self.sample(conditional_func)
def sample(self, conditional_func):
r"""
Set x_i = argmax_{x_i \in values(X_i)} P_\theta (x_i | x_{-i})
I.e. set the new x_i to the most probable state, given all the other variables.
"""
self.samples[self.varID_to_sample] = conditional_func(self.varID_to_sample, self.samples)
self.varID_to_sample = (self.varID_to_sample + 1) % self.n_variables
return self.samples.copy()
| StarcoderdataPython |
141158 | <reponame>ektai/frappe3<filename>frappe/tests/test_formatter.py<gh_stars>0
# -*- coding: utf-8 -*-
import frappe
from frappe import format
import unittest
class TestFormatter(unittest.TestCase):
def test_currency_formatting(self):
df = frappe._dict({
'fieldname': 'amount',
'fieldtype': 'Currency',
'options': 'currency'
})
doc = frappe._dict({
'amount': 5
})
frappe.db.set_default("currency", 'INR')
# if currency field is not passed then default currency should be used.
self.assertEqual(format(100, df, doc), '100.00 ₹')
doc.currency = 'USD'
self.assertEqual(format(100, df, doc), "100.00 $")
frappe.db.set_default("currency", None) | StarcoderdataPython |
3280202 | # Copyright 2020 getcarrier.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pika
from json import dumps
from uuid import uuid4
from time import sleep
from arbiter.config import Config
from arbiter.event.process import ProcessEventHandler
class ProcessWatcher:
def __init__(self, process_id, host, port, user, password, vhost="carrier", all_queue="arbiterAll",
wait_time=2.0):
self.config = Config(host, port, user, password, vhost, None, all_queue)
self.connection = self._get_connection()
self.process_id = process_id
self.state = {}
self.subscriptions = dict()
self.arbiter_id = str(uuid4())
self.handler = ProcessEventHandler(self.config, self.subscriptions, self.state, self.process_id)
self.handler.start()
self.handler.wait_running()
self.wait_time = wait_time
def _get_connection(self): # This code duplication needed to avoid thread safeness problem of pika
_connection = pika.BlockingConnection(
pika.ConnectionParameters(
host=self.config.host,
port=self.config.port,
virtual_host=self.config.vhost,
credentials=pika.PlainCredentials(
self.config.user,
self.config.password
)
)
)
channel = _connection.channel()
return channel
def send_message(self, msg, queue="", exchange=""):
self._get_connection().basic_publish(
exchange=exchange, routing_key=queue,
body=dumps(msg).encode("utf-8"),
properties=pika.BasicProperties(
delivery_mode=2
)
)
def collect_state(self, tasks):
if self.process_id not in self.state:
self.state[self.process_id] = {
"running": [],
"done": []
}
message = {
"type": "task_state",
"tasks": tasks,
"arbiter": self.process_id
}
self.send_message(message, exchange=self.config.all)
sleep(self.wait_time)
return self.state.get(self.process_id, {})
def clear_state(self, tasks):
message = {
"type": "clear_state",
"tasks": tasks,
"arbiter": self.process_id
}
self.send_message(message, exchange=self.config.all)
sleep(self.wait_time)
def close(self):
self.handler.stop()
self._get_connection().queue_delete(queue=self.process_id)
self.handler.join()
| StarcoderdataPython |
135546 | <gh_stars>0
#!flask/bin/python
from app import app, port
app.run(host='0.0.0.0', port=port, debug=True, use_reloader=True) | StarcoderdataPython |
103599 | <filename>linear_binning/test/test_linear_binning.py
from linear_binning import linear_binning
import numpy as np
import logging
from timeit import default_timer as timer
logging.basicConfig(level=logging.INFO)
def generate_data(n_samples=100000, D=2):
sample_coords = np.random.random(size=(n_samples, D))
sample_weights = np.random.random(size=n_samples)
# NOTE: purposely limiting the range to test over- and underflow bins
extents = np.tile([0.02, 0.8999], D).reshape((D, 2))
sizes = np.full(D, 51)
return sample_coords, sample_weights, extents, sizes
def test_sum_of_weights():
# tests that the sum of weights in the binned grid is preserved
sample_coords, sample_weights, extents, sizes = generate_data(1000000)
start = timer()
coords, weights = linear_binning(sample_coords, sample_weights,
extents, sizes)
end = timer()
logging.info('\n')
logging.info('One million 2D points binned with linear_binning in {}s'.format(end - start))
assert np.allclose(weights.sum(), sample_weights.sum())
x = np.ascontiguousarray(sample_coords[:,0])
y = np.ascontiguousarray(sample_coords[:,1])
start = timer()
np.histogram2d(x, y,
weights=sample_weights,
bins=sizes, range=extents)
end = timer()
logging.info('For comparison, np.histogram2d finished in {}s'.format(end - start))
# tests specific values on the grid
sample_coords = np.array([[0.2, 0.9], [0.5, 1.1], [-0.1, 0.7]])
sample_weights = np.array([25, 50, 25])
extents = np.array([[0.0, 1.0], [0.0, 1.0]])
sizes = np.array([11, 11])
coords, weights = linear_binning(sample_coords, sample_weights,
extents, sizes)
pass_value_test = True
value_tests = 0
for i in range(coords.shape[0]):
if np.allclose(coords[i, 0], 0.0) and np.allclose(coords[i, 1], 0.7):
pass_value_test &= np.allclose(weights[i], 25.0)
value_tests += 1
elif np.allclose(coords[i, 0], 0.2) and np.allclose(coords[i, 1], 0.9):
pass_value_test &= np.allclose(weights[i], 25.0)
value_tests += 1
elif np.allclose(coords[i, 0], 0.5) and np.allclose(coords[i, 1], 1.0):
pass_value_test &= np.allclose(weights[i], 50.0)
value_tests += 1
else:
pass_value_test &= np.allclose(weights[i], 0.0)
assert pass_value_test and value_tests == 3
| StarcoderdataPython |
64948 | <gh_stars>0
"""Plot Milky Way spiral arms."""
import numpy as np
import matplotlib.pyplot as plt
from astropy.units import Quantity
from gammapy.astro.population import simulate
from gammapy.astro.population import FaucherSpiral
from gammapy.utils.coordinates import polar, cartesian
catalog = simulate.make_base_catalog_galactic(
n_sources=int(1e4), rad_dis="YK04", vel_dis="H05", max_age=Quantity(1e6, "yr")
)
spiral = FaucherSpiral()
fig = plt.figure(figsize=(6, 6))
rect = [0.12, 0.12, 0.85, 0.85]
ax_cartesian = fig.add_axes(rect)
ax_cartesian.set_aspect("equal")
ax_polar = fig.add_axes(rect, polar=True, frameon=False)
ax_polar.axes.get_xaxis().set_ticklabels([])
ax_polar.axes.get_yaxis().set_ticklabels([])
ax_cartesian.plot(
catalog["x"],
catalog["y"],
marker=".",
linestyle="none",
markersize=5,
alpha=0.3,
fillstyle="full",
)
ax_cartesian.set_xlim(-20, 20)
ax_cartesian.set_ylim(-20, 20)
ax_cartesian.set_xlabel("x [kpc]", labelpad=2)
ax_cartesian.set_ylabel("y [kpc]", labelpad=-4)
ax_cartesian.plot(
0, 8, color="k", markersize=10, fillstyle="none", marker="*", linewidth=2
)
ax_cartesian.annotate(
"Sun",
xy=(0, 8),
xycoords="data",
xytext=(-15, 15),
arrowprops=dict(arrowstyle="->", color="k"),
weight=400,
)
plt.grid(True)
# TODO: document what these magic numbers are or simplify the code
# `other_idx = [95, 90, 80, 80]` and below `theta_idx = int(other_idx * 0.97)`
for spiral_idx, other_idx in zip(range(4), [95, 90, 80, 80]):
spiralarm_name = spiral.spiralarms[spiral_idx]
theta_0 = spiral.theta_0[spiral_idx].value
theta = Quantity(np.linspace(theta_0, theta_0 + 2 * np.pi, 100), "rad")
x, y = spiral.xy_position(theta=theta, spiralarm_index=spiral_idx)
ax_cartesian.plot(x.value, y.value, color="k")
rad, phi = polar(x[other_idx], y[other_idx])
x_pos, y_pos = cartesian(rad + Quantity(1, "kpc"), phi)
theta_idx = int(other_idx * 0.97)
rotation = theta[theta_idx].to("deg").value
ax_cartesian.text(
x_pos.value,
y_pos.value,
spiralarm_name,
ha="center",
va="center",
rotation=rotation - 90,
weight=400,
)
plt.show()
| StarcoderdataPython |
3202856 | <filename>examples/starkex-cairo/starkware/cairo/lang/compiler/ast/imports.py
import dataclasses
from typing import Optional, Sequence
from starkware.cairo.lang.compiler.ast.expr import ExprIdentifier
from starkware.cairo.lang.compiler.ast.formatting_utils import LocationField
from starkware.cairo.lang.compiler.ast.node import AstNode
from starkware.cairo.lang.compiler.error_handling import Location
@dataclasses.dataclass
class ImportItem(AstNode):
orig_identifier: ExprIdentifier
local_name: Optional[ExprIdentifier]
location: Optional[Location] = LocationField
def format(self):
return f'{self.orig_identifier.format()}' + \
(f' as {self.local_name.format()}' if self.local_name else '')
@property
def identifier(self):
return self.local_name if self.local_name is not None else self.orig_identifier
def get_children(self) -> Sequence[Optional[AstNode]]:
return [self.orig_identifier, self.local_name]
| StarcoderdataPython |
1705823 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 11 16:58:02 2018
@author: jack.lingheng.meng
"""
import matplotlib.pyplot as plt
import tensorflow as tf
import keras.backend as K
from Environment.LASEnv import LASEnv
from LASAgent.ExtrinsicallyMotivatedLASAgent import ExtrinsicallyMotivatedLASAgent
from LASAgent.RandomLASAgent import RandomLASAgent
def plot_cumulative_reward(cumulativeReward):
line, = plt.plot(cumulativeReward)
plt.ion()
#plt.ylim([0,10])
plt.show()
plt.pause(0.0001)
if __name__ == '__main__':
"""
This script is for interaction between extrinsically motivated LASAgent and
Environment.
Note
----
You should instantiate Environment first, because LASAgent need using Environment
object as parameter to instantiate.
"""
sess = tf.Session()
K.set_session(sess)
# Instantiate LASEnv
envLAS = LASEnv('127.0.0.1', 19997)
# Instantiate Extrinsically Motivated LAS-agent
Ext_Mot_LASAgent = ExtrinsicallyMotivatedLASAgent(envLAS,
sess,
learnFromScratch = True)
# Iinstantiate Random Action LAS-agent
Random_LASAgent = RandomLASAgent(envLAS)
# Step counter
i = 1
observationForLAS, rewardLAS, done, info = envLAS.reset()
#while not done:
for temp in range(10000):
actionLAS = Ext_Mot_LASAgent.perceive_and_act(observationForLAS, rewardLAS, done)
observationForLAS, rewardLAS, done, info = envLAS.step_LAS(actionLAS)
print("Ext_Mot_LASAgent Step: {}, reward: {}".format(i, rewardLAS))
i += 1
observationForLAS, rewardLAS, done, info = envLAS.reset()
for temp in range(10000):
actionLAS = Random_LASAgent.perceive_and_act(observationForLAS, rewardLAS, done)
observationForLAS, rewardLAS, done, info = envLAS.step_LAS(actionLAS)
print("Random_LASAgent Step: {}, reward: {}".format(i, rewardLAS))
i += 1
envLAS.destroy()
plot_cumulative_reward(Ext_Mot_LASAgent._cumulativeRewardMemory)
plot_cumulative_reward(Random_LASAgent._cumulativeRewardMemory) | StarcoderdataPython |
82239 | import os
from django.conf.urls import url
from django.forms.forms import pretty_name
from django.http import HttpResponse, HttpResponseForbidden, JsonResponse
from django.template import Context
from django.template.loader import get_template
from glitter.assets.forms import ImageForm
from glitter.assets.models import Image
from glitter.assets.widgets import LIMIT_IMAGES_TO, ImageRelatedFieldWidgetWrapper, ImageSelect
from glitter.blockadmin import blocks
from .forms import ImageBlockForm
from .models import ImageBlock
class ImageBlockAdmin(blocks.BlockAdmin):
form = ImageBlockForm
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
formfield = super().formfield_for_dbfield(db_field, **kwargs)
if db_field.name == 'image':
formfield.widget = ImageRelatedFieldWidgetWrapper(
ImageSelect(), db_field.rel, self.admin_site, can_add_related=True,
can_change_related=True,
)
return formfield
def get_urls(self):
urls = super().get_urls()
app_label, model_name = self.model._meta.app_label, self.model._meta.model_name
image_block_urls = [
url(r'^get-lazy-images/$', self.get_lazy_images, name='get-lazy-images'),
url(
r'^drop-image/$',
self.drop_image,
name='{app_label}_{model_name}_drop_image'.format(
app_label=app_label, model_name=model_name
)
),
]
return image_block_urls + urls
def get_lazy_images(self, request):
last_image_id = request.GET.get('last_image_id', None)
if last_image_id and last_image_id.isdigit():
images = Image.objects.filter(
id__lt=last_image_id
).order_by(
'-created_at', 'modified_at', 'title'
)[:LIMIT_IMAGES_TO]
template = get_template('glitter/blocks/includes/lazy_images.html')
context = Context({'images': images})
html = template.render(context)
return JsonResponse({'html': html, 'last_image_id': last_image_id})
else:
response = JsonResponse({'error': 'No last image id passed'})
response.status_code = 400
return response
def drop_image(self, request):
permission_name = '{}.edit_{}'.format(self.opts.app_label, self.opts.model_name)
if not request.user.has_perm(permission_name):
return HttpResponseForbidden('403 Forbidden', content_type='text/html')
form = ImageForm(request.POST, request.FILES or None)
if form.is_valid():
filename, ext = os.path.splitext(form.files['file'].name)
filename = pretty_name(filename)
image = form.save(commit=False)
image.title = filename
image.save()
return JsonResponse({'image_id': image.id, 'filename': filename})
else:
message = ''
for field_name, error_list in form.errors.items():
message = '{field} - {message}'.format(
field=field_name.capitalize(), message=''.join(error_list)
)
return HttpResponse(message, status=403)
blocks.site.register(ImageBlock, ImageBlockAdmin)
blocks.site.register_block(ImageBlock, 'Common')
| StarcoderdataPython |
1784506 | <filename>DailyChallenge/LC_986.py
class Solution:
def intervalIntersection(self, firstList: List[List[int]], secondList: List[List[int]]) -> List[List[int]]:
# intervals is pairwise disjoint and in sorted order.
p1 = p2 = 0
res = []
while p1 < len(firstList) and p2 < len(secondList):
# check larger startpoint and smaller endpoint
start = max(firstList[p1][0], secondList[p2][0])
end = min(firstList[p1][1], secondList[p2][1])
if start <= end:
res.append([start, end])
# remove smallest end of List1 and List2
# since no other intervals can match it
if firstList[p1][1] < secondList[p2][1]:
p1 += 1
else:
p2 += 1
return res
| StarcoderdataPython |
3336807 | # coding: utf-8
"""
Utility Functions for training, test and prediction of model.
Required: Python 3.6
TensorFlow 1.10.1
Copyright (c) 2018 <NAME>
"""
import tensorflow as tf
import tensorflow.contrib.eager as tfe
def loss(model, x, y, training=False):
prediction = model(x, training)
return tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction, labels=y)
def grad(model, x, y, training=False):
with tf.GradientTape() as tape:
loss_value = loss(model, x, y, training)
return tape.gradient(loss_value, model.variables)
def train(model, optimizer, train_ds, val_ds, epochs, device="cpu:0"):
with tf.device(device):
for e in range(epochs):
epoch_loss_avg = tfe.metrics.Mean()
train_accuracy = tfe.metrics.Accuracy()
val_accuracy = tfe.metrics.Accuracy()
x, y = iter(train_ds).next()
for (i, (x, y)) in enumerate(train_ds):
grads = grad(model, x, y, training=True)
optimizer.apply_gradients(zip(grads, model.variables), global_step=tf.train.get_or_create_global_step())
train_accuracy(tf.argmax(model(x), axis=1, output_type=tf.int32),
tf.argmax(y, axis=1, output_type=tf.int32))
if i % 200 == 0:
for (x_val, y_val) in val_ds:
val_accuracy(tf.argmax(model(x_val), axis=1, output_type=tf.int32),
tf.argmax(y_val, axis=1, output_type=tf.int32))
print("Loss: {:.4f} - Acc: {:.4f} | Val Acc: {:.4f}".format(
epoch_loss_avg(loss(model, x, y)), train_accuracy.result(), val_accuracy.result()
))
print("-"*50)
print("Epochs {} / {} | Loss: {:.4f} - Accuracy: {:.3%}".format(
e + 1, epochs, epoch_loss_avg(loss(model, x, y)), train_accuracy.result()
))
def test(model, dataset, device="cpu:0"):
with tf.device(device):
test_accuracy = tfe.metrics.Accuracy()
for (x, y) in dataset:
test_accuracy(tf.argmax(model(x), axis=1, output_type=tf.int32),
tf.argmax(y, axis=1, output_type=tf.int32))
print("Test set accuracy: {:.3%}".format(test_accuracy.result()))
def predict(model, x):
pred = model(x)
result = []
for p in pred:
class_idx = tf.argmax(p).numpy()
result.append(class_idx)
return result
| StarcoderdataPython |
3393953 | <filename>batchnormlstm/layers.py
import numpy as np
""" basics """
def relu(x):
return np.maximum(x, 0)
def tanh(x):
return np.tanh(x)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
""" layers """
def affine_forward(x, w, b):
"""
Computes the forward pass for an affine (fully-connected) layer.
The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N
examples, where each example x[i] has shape (d_1, ..., d_k). We will
reshape each input into a vector of dimension D = d_1 * ... * d_k, and
then transform it to an output vector of dimension M.
Inputs:
- x: A numpy array containing input data, of shape (N, d_1, ..., d_k)
- w: A numpy array of weights, of shape (D, M)
- b: A numpy array of biases, of shape (M,)
Returns a tuple of:
- out: output, of shape (N, M)
- cache: (x, w, b)
"""
out = None
#############################################################################
# Implement the affine forward pass. Store the result in out. #
# Will need to reshape the input into rows. #
#############################################################################
z = x.reshape(np.shape(x)[0], -1) # N*D
out = np.dot(z, w) + b # N*M
#############################################################################
# #
#############################################################################
cache = (x, w, b)
return out, cache
def affine_backward(dout, cache):
"""
Computes the backward pass for an affine layer.
Inputs:
- dout: Upstream derivative, of shape (N, M)
- cache: Tuple of:
- x: Input data, of shape (N, d_1, ... d_k)
- w: Weights, of shape (D, M)
- b: Biases, of shape (M,)
Returns a tuple of:
- dx: Gradient with respect to x, of shape (N, d1, ..., d_k)
- dw: Gradient with respect to w, of shape (D, M)
- db: Gradient with respect to b, of shape (M,)
"""
x, w, b = cache
dx, dw, db = None, None, None
#############################################################################
# Implement the affine backward pass. #
#############################################################################
shapes = np.shape(x)
N = shapes[0]
z = x.reshape(N, -1)
dx = np.dot(dout, w.T).reshape(shapes)
dw = np.dot(z.T, dout)
db = np.dot(np.ones(N), dout)
# Note: Gradient here is the sum of the gradients of N data
#############################################################################
# #
#############################################################################
return dx, dw, db
def relu_forward(x):
"""
Computes the forward pass for a layer of rectified linear units (ReLUs).
Input:
- x: Inputs, of any shape
Returns a tuple of:
- out: Output, of the same shape as x
- cache: x
"""
out = None
#############################################################################
# Implement the ReLU forward pass. #
#############################################################################
out = relu(x)
#############################################################################
# #
#############################################################################
cache = x
return out, cache
def relu_backward(dout, cache):
"""
Computes the backward pass for a layer of rectified linear units (ReLUs).
Input:
- dout: Upstream derivatives, of any shape
- cache: Input x, of same shape as dout
Returns:
- dx: Gradient with respect to x
"""
dx, x = None, cache
#############################################################################
# Implement the ReLU backward pass. #
#############################################################################
dx = np.greater(x, 0) * dout
#############################################################################
# #
#############################################################################
return dx
def batchnorm_forward(x, gamma, beta, bn_param):
"""
Forward pass for batch normalization.
During training the sample mean and (uncorrected) sample variance are
computed from minibatch statistics and used to normalize the incoming data.
During training we also keep an exponentially decaying running mean of the mean
and variance of each feature, and these averages are used to normalize data
at test-time.
At each timestep we update the running averages for mean and variance using
an exponential decay based on the momentum parameter:
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
running_var = momentum * running_var + (1 - momentum) * sample_var
Note that the batch normalization paper suggests a different test-time
behavior: they compute sample mean and variance for each feature using a
large number of training images rather than using a running average. For
this implementation we have chosen to use running averages instead since
they do not require an additional estimation step; the torch7 implementation
of batch normalization also uses running averages.
Input:
- x: Data of shape (N, D)
- gamma: Scale parameter of shape (D,)
- beta: Shift paremeter of shape (D,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: of shape (N, D)
- cache: A tuple of values needed in the backward pass
"""
mode = bn_param['mode']
eps = bn_param.get('eps', 1e-5)
momentum = bn_param.get('momentum', 0.9)
N, D = x.shape
running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))
running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))
out, cache = None, None
if mode == 'train':
#############################################################################
# Implement the training-time forward pass for batch normalization. #
# #
# Use minibatch statistics to compute the mean and variance, use these #
# statistics to normalize the incoming data, and scale and shift the #
# normalized data using gamma and beta. #
# #
# Store the output in the variable out. #
# Any intermediates that are need for the backward pass should be stored #
# in the cache variable. #
# #
# Use computed sample mean and variance together with the momentum #
# variable to update the running mean and running variance, then #
# store the result in the running_mean and running_var variables. #
#############################################################################
sample_mean = np.mean(x, axis=0) # D
sample_var = np.var(x, axis=0) # D
x_norm = (x - sample_mean) / np.sqrt(sample_var + eps) # N*D
out = gamma * x_norm + beta # N*D
cache = (x_norm, gamma, beta, sample_mean, sample_var, x, eps)
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
running_var = momentum * running_var + (1 - momentum) * sample_var
#############################################################################
# #
#############################################################################
elif mode == 'test':
#############################################################################
# Implement the test-time forward pass for batch normalization. #
# Use the running mean and variance to normalize the incoming data, #
# then scale and shift the normalized data using gamma and beta. #
# Store the result in the out variable. #
#############################################################################
x_norm = (x - running_mean) / np.sqrt(running_var + eps)
out = gamma * x_norm + beta
#############################################################################
# #
#############################################################################
else:
raise ValueError('Invalid forward batchnorm mode "%s"' % mode)
# Store the updated running means back into bn_param
bn_param['running_mean'] = running_mean
bn_param['running_var'] = running_var
return out, cache
def batchnorm_backward(dout, cache):
"""
Backward pass for batch normalization.
For this implementation, you should write out a computation graph for
batch normalization on paper and propagate gradients backward through
intermediate nodes.
Inputs:
- dout: Upstream derivatives, of shape (N, D)
- cache: Variable of intermediates from batchnorm_forward.
Returns a tuple of:
- dx: Gradient with respect to inputs x, of shape (N, D)
- dgamma: Gradient with respect to scale parameter gamma, of shape (D,)
- dbeta: Gradient with respect to shift parameter beta, of shape (D,)
"""
dx, dgamma, dbeta = None, None, None
#############################################################################
# Implement the backward pass for batch normalization. #
# Store the results in the dx, dgamma, and dbeta variables. #
#############################################################################
x_norm, gamma, beta, sample_mean, sample_var, x, eps = cache
x_mu = x - sample_mean
std_mod = (sample_var + eps) ** -0.5
N, _ = x.shape
dx_norm = dout * gamma # N*D
dsample_var = -0.5 * np.sum(dx_norm * x_mu, axis=0) * std_mod ** 3 # D
dsample_mean = -1. * np.sum(dx_norm * std_mod, axis=0) - 2./N * dsample_var * np.sum(x_mu, axis=0) # D
dx = dx_norm * std_mod + 2./N * dsample_var * x_mu + 1./N * dsample_mean
dgamma = np.sum(dout * x_norm, axis=0)
dbeta = np.sum(dout, axis=0)
#############################################################################
# #
#############################################################################
return dx, dgamma, dbeta
def batchnorm_backward_alt(dout, cache):
"""
Alternative backward pass for batch normalization.
For this implementation you should work out the derivatives for the batch
normalizaton backward pass on paper and simplify as much as possible. You
should be able to derive a simple expression for the backward pass.
Note: This implementation should expect to receive the same cache variable
as batchnorm_backward, but might not use all of the values in the cache.
Inputs / outputs: Same as batchnorm_backward
"""
dx, dgamma, dbeta = None, None, None
#############################################################################
# Implement the backward pass for batch normalization. #
# Store the results in the dx, dgamma, and dbeta variables. #
# #
# After computing the gradient with respect to the centered inputs, we #
# should be able to compute gradients with respect to the inputs in a #
# single statement. #
#############################################################################
x_norm, gamma, beta, sample_mean, sample_var, x, eps = cache
x_mu = x - sample_mean
std_mod = (sample_var + eps) ** -0.5
N, _ = x.shape
dx_norm = dout * gamma # N*D
dx = std_mod * (dx_norm - 1./N * (x_mu * np.sum(dx_norm * x_mu * std_mod ** 2, axis=0) + np.sum(dx_norm, axis=0)))
dgamma = np.sum(dout * x_norm, axis=0)
dbeta = np.sum(dout, axis=0)
#############################################################################
# #
#############################################################################
return dx, dgamma, dbeta
def dropout_forward(x, dropout_param):
"""
Performs the forward pass for (inverted) dropout.
Inputs:
- x: Input data, of any shape
- dropout_param: A dictionary with the following keys:
- p: Dropout parameter. We drop each neuron output with probability p.
- mode: 'test' or 'train'. If the mode is train, then perform dropout;
if the mode is test, then just return the input.
- seed: Seed for the random number generator. Passing seed makes this
function deterministic, which is needed for gradient checking but not in
real networks.
Outputs:
- out: Array of the same shape as x.
- cache: A tuple (dropout_param, mask). In training mode, mask is the dropout
mask that was used to multiply the input; in test mode, mask is None.
"""
p, mode = dropout_param['p'], dropout_param['mode']
if 'seed' in dropout_param:
np.random.seed(dropout_param['seed'])
mask = None
out = None
if mode == 'train':
###########################################################################
# Implement the training phase forward pass for inverted dropout. #
# Store the dropout mask in the mask variable. #
###########################################################################
mask = (np.random.rand(*x.shape) < p) / p
out = x * mask
###########################################################################
# #
###########################################################################
elif mode == 'test':
###########################################################################
# Implement the test phase forward pass for inverted dropout. #
###########################################################################
out = x
###########################################################################
# #
###########################################################################
cache = (dropout_param, mask)
out = out.astype(x.dtype, copy=False)
return out, cache
def dropout_backward(dout, cache):
"""
Perform the backward pass for (inverted) dropout.
Inputs:
- dout: Upstream derivatives, of any shape
- cache: (dropout_param, mask) from dropout_forward.
"""
dropout_param, mask = cache
mode = dropout_param['mode']
dx = None
if mode == 'train':
###########################################################################
# Implement the training phase backward pass for inverted dropout. #
###########################################################################
dx = dout * mask
###########################################################################
# #
###########################################################################
elif mode == 'test':
dx = dout
return dx
def conv_forward_naive(x, w, b, conv_param):
"""
A naive implementation of the forward pass for a convolutional layer.
The input consists of N data points, each with C channels, height H and width
W. We convolve each input with F different filters, where each filter spans
all C channels and has height HH and width HH.
Input:
- x: Input data of shape (N, C, H, W)
- w: Filter weights of shape (F, C, HH, WW)
- b: Biases, of shape (F,)
- conv_param: A dictionary with the following keys:
- 'stride': The number of pixels between adjacent receptive fields in the
horizontal and vertical directions.
- 'pad': The number of pixels that will be used to zero-pad the input.
Returns a tuple of:
- out: Output data, of shape (N, F, H', W') where H' and W' are given by
H' = 1 + (H + 2 * pad - HH) / stride
W' = 1 + (W + 2 * pad - WW) / stride
- cache: (x, w, b, conv_param)
"""
out = None
#############################################################################
# Implement the convolutional forward pass. #
#############################################################################
stride, pad = conv_param['stride'], conv_param['pad']
N, C, H, W = x.shape
F, C, HH, WW = w.shape
x_padded = np.pad(x, ((0, 0), (0, 0), (pad, pad), (pad, pad)), mode='constant')
H_new = int(1 + (H + 2 * pad - HH) / stride)
W_new = int(1 + (W + 2 * pad - WW) / stride)
s = stride
out = np.zeros((N, F, H_new, W_new))
for i in range(N): # ith image
for f in range(F): # fth filter
for j in range(H_new):
for k in range(W_new):
out[i, f, j, k] = np.sum(x_padded[i, :, j*s:HH+j*s, k*s:WW+k*s] * w[f]) + b[f]
#############################################################################
# #
#############################################################################
cache = (x, w, b, conv_param)
return out, cache
def conv_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a convolutional layer.
Inputs:
- dout: Upstream derivatives.
- cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive
Returns a tuple of:
- dx: Gradient with respect to x
- dw: Gradient with respect to w
- db: Gradient with respect to b
"""
dx, dw, db = None, None, None
#############################################################################
# Implement the convolutional backward pass. #
#############################################################################
x, w, b, conv_param = cache
pad = conv_param['pad']
stride = conv_param['stride']
F, C, HH, WW = w.shape
N, C, H, W = x.shape
H_new = 1 + (H + 2 * pad - HH) / stride
W_new = 1 + (W + 2 * pad - WW) / stride
dx = np.zeros_like(x)
dw = np.zeros_like(w)
db = np.zeros_like(b)
s = stride
x_padded = np.pad(x, ((0, 0), (0, 0), (pad, pad), (pad, pad)), 'constant')
dx_padded = np.pad(dx, ((0, 0), (0, 0), (pad, pad), (pad, pad)), 'constant')
for i in range(N): # ith image
for f in range(F): # fth filter
for j in range(H_new):
for k in range(W_new):
window = x_padded[i, :, j*s:HH+j*s, k*s:WW+k*s]
db[f] += dout[i, f, j, k]
dw[f] += window * dout[i, f, j, k]
dx_padded[i, :, j*s:HH+j*s, k*s:WW+k*s] += w[f] * dout[i, f, j, k]
# Unpad
dx = dx_padded[:, :, pad:pad+H, pad:pad+W]
#############################################################################
# #
#############################################################################
return dx, dw, db
def max_pool_forward_naive(x, pool_param):
"""
A naive implementation of the forward pass for a max pooling layer.
Inputs:
- x: Input data, of shape (N, C, H, W)
- pool_param: dictionary with the following keys:
- 'pool_height': The height of each pooling region
- 'pool_width': The width of each pooling region
- 'stride': The distance between adjacent pooling regions
Returns a tuple of:
- out: Output data
- cache: (x, pool_param)
"""
out = None
#############################################################################
# Implement the max pooling forward pass #
#############################################################################
HH, WW = pool_param['pool_height'], pool_param['pool_width']
s = pool_param['stride']
N, C, H, W = x.shape
H_new = int(1 + (H - HH) / s)
W_new = int(1 + (W - WW) / s)
out = np.zeros((N, C, H_new, W_new))
for i in range(N):
for j in range(C):
for k in range(H_new):
for l in range(W_new):
window = x[i, j, k*s:HH+k*s, l*s:WW+l*s]
out[i, j, k, l] = np.max(window)
cache = (x, pool_param)
#############################################################################
# #
#############################################################################
cache = (x, pool_param)
return out, cache
def max_pool_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a max pooling layer.
Inputs:
- dout: Upstream derivatives
- cache: A tuple of (x, pool_param) as in the forward pass.
Returns:
- dx: Gradient with respect to x
"""
dx = None
#############################################################################
# Implement the max pooling backward pass #
#############################################################################
x, pool_param = cache
HH, WW = pool_param['pool_height'], pool_param['pool_width']
s = pool_param['stride']
N, C, H, W = x.shape
H_new = int(1 + (H - HH) / s)
W_new = int(1 + (W - WW) / s)
dx = np.zeros_like(x)
for i in range(N):
for j in range(C):
for k in range(H_new):
for l in range(W_new):
window = x[i, j, k*s:HH+k*s, l*s:WW+l*s]
m = np.max(window)
dx[i, j, k*s:HH+k*s, l*s:WW+l*s] = (window == m) * dout[i, j, k, l]#only max x has dx,else = 0
#############################################################################
# #
#############################################################################
return dx
def spatial_batchnorm_forward(x, gamma, beta, bn_param):
"""
Computes the forward pass for spatial batch normalization.
Inputs:
- x: Input data of shape (N, C, H, W)
- gamma: Scale parameter, of shape (C,)
- beta: Shift parameter, of shape (C,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance. momentum=0 means that
old information is discarded completely at every time step, while
momentum=1 means that new information is never incorporated. The
default of momentum=0.9 should work well in most situations.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: Output data, of shape (N, C, H, W)
- cache: Values needed for the backward pass
"""
out, cache = None, None
#############################################################################
# Implement the forward pass for spatial batch normalization. #
#############################################################################
N, C, H, W = x.shape
x_new = x.transpose(0, 2, 3, 1).reshape(N*H*W, C)
out, cache = batchnorm_forward(x_new, gamma, beta, bn_param)
out = out.reshape(N, H, W, C).transpose(0, 3, 1, 2)
#############################################################################
# #
#############################################################################
return out, cache
def spatial_batchnorm_backward(dout, cache):
"""
Computes the backward pass for spatial batch normalization.
Inputs:
- dout: Upstream derivatives, of shape (N, C, H, W)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient with respect to inputs, of shape (N, C, H, W)
- dgamma: Gradient with respect to scale parameter, of shape (C,)
- dbeta: Gradient with respect to shift parameter, of shape (C,)
"""
dx, dgamma, dbeta = None, None, None
#############################################################################
# Implement the backward pass for spatial batch normalization. #
#############################################################################
N, C, H, W = dout.shape
dout_new = dout.transpose(0, 2, 3, 1).reshape(N*H*W, C)
dx, dgamma, dbeta = batchnorm_backward(dout_new, cache)
dx = dx.reshape(N, H, W, C).transpose(0, 3, 1, 2)
#############################################################################
# #
#############################################################################
return dx, dgamma, dbeta
# new layers
def tanh_forward(x):
"""
Computes the forward pass for a layer of tanh units.
Input:
- x: Inputs, of any shape
Returns a tuple of:
- out: Output, of the same shape as x
- cache: tanh(x)
"""
out = None
#############################################################################
# Implement the tanh forward pass. #
#############################################################################
out = tanh(x)
#############################################################################
# #
#############################################################################
cache = out
return out, cache
def tanh_backward(dout, cache):
"""
Computes the backward pass for a layer of tanh units.
Input:
- dout: Upstream derivatives, of any shape
- cache: Values from the forward pass, of same shape as dout
Returns:
- dx: Gradient with respect to x
"""
dx, tanh_x = None, cache
#############################################################################
# Implement the tanh backward pass. #
#############################################################################
dx = (1 - tanh_x ** 2) * dout
#############################################################################
# #
#############################################################################
return dx
def sigmoid_forward(x):
"""
Computes the forward pass for a layer of sigmoid units.
Input:
- x: Inputs, of any shape
Returns a tuple of:
- out: Output, of the same shape as x
- cache: sigmoid(x)
"""
out = None
#############################################################################
# Implement the Sigmoid forward pass. #
#############################################################################
out = sigmoid(x)
#############################################################################
# #
#############################################################################
cache = out
return out, cache
def sigmoid_backward(dout, cache):
"""
Computes the backward pass for a layer of sigmoid units.
Input:
- dout: Upstream derivatives, of any shape
- cache: Values from the forward pass, of same shape as dout
Returns:
- dx: Gradient with respect to x
"""
dx, sigmoid_x = None, cache
#############################################################################
# Implement the ReLU backward pass. #
#############################################################################
dx = sigmoid_x * (1 - sigmoid_x) * dout
#############################################################################
# #
#############################################################################
return dx
def lstm_forward_unit(x, w, u, b, h_prev=None, c_prev=None):
"""
Computes the forward pass for a single time step of a long short term memory layer.
ref: https://blog.aidangomez.ca/2016/04/17/Backpropogating-an-LSTM-A-Numerical-Example/
The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N
examples, where each example x[i] has shape (d_1, ..., d_k). We will
reshape each input into a vector of dimension D = d_1 * ... * d_k, and
then transform it to an output vector of dimension M.
Inputs:
- x: A numpy array containing input data, of shape (N, d_1, ..., d_k)
- w: A numpy array of input weights, of shape (D, 4M)
- u: A numpy array of state weights, of shape (M, 4M)
- b: A numpy array of biases, of shape (4M,)
- h_prev: A numpy array containing hidden state generated by previous step, of shape (N, M)
- c_prev: A numpy array containing cell state generated by previous step, of shape (N, M)
Returns a tuple of:
- h: new hidden state, of shape (N, M)
- c: new cell state, of shape (N, M)
- cache: cache for back propagation, (x, h_prev, c_prev, tanh_c, g, w, u)
- g: A numpy array of gates, of shape (N, 4M)
"""
N = np.shape(x)[0]
M = np.shape(u)[0]
h_prev = np.zeros((N, M)) if h_prev is None else h_prev
c_prev = np.zeros((N, M)) if c_prev is None else c_prev
h, c = None, None
#############################################################################
# Implement the LSTM forward pass unit. Store the result in out. #
# Will need to reshape the input into rows. #
#############################################################################
z = x.reshape(np.shape(x)[0], -1) # N*D
mid = np.dot(z, w) + np.dot(h_prev, u) + b # N*4M
a = tanh(mid[:, :M]) # N*M
i = sigmoid(mid[:, M:(2*M)]) # N*M
f = sigmoid(mid[:, (2*M):(3*M)]) # N*M
o = sigmoid(mid[:, (3*M):(4*M)]) # N*M
g = np.concatenate((a, i, f, o), axis=1) # N*4M
c = a * i + f * c_prev # N*M
tanh_c = tanh(c)
h = tanh_c * o # N*M
#############################################################################
# #
#############################################################################
cache = (x, h_prev, c_prev, tanh_c, g, w, u)
return h, c, cache
def lstm_backward_unit(dout, cache, dh=None, dc_next=None, f_next=None):
"""
Computes the backward pass for a LSTM unit.
ref: https://blog.aidangomez.ca/2016/04/17/Backpropogating-an-LSTM-A-Numerical-Example/
Inputs:
- dout: Upstream derivative, of shape (N, M)
- cache: Tuple of x, h_prev, c_prev, tanh_c, g, w, u
- dh: derivative of hidden state passed from the next step, of shape (N, M)
- dc_next: derivative of cell state of the next step, of shape (N, M)
- f_next: value of forget gate of the next step, of shape (N, M)
Returns a tuple of:
- dx: Gradient with respect to x, of shape (N, d1, ..., d_k)
- dw: Gradient with respect to w, of shape (D, 4M)
- du: Gradient with respect to u, of shape (M, 4M)
- db: Gradient with respect to b, of shape (4M,)
- dh_prev, dc, f: Time cache to be passed to the previous step of propagation. Correspond to dh, dc_next, f_next
"""
x, h_prev, c_prev, tanh_c, g, w, u = cache
N, M = np.shape(h_prev)
a = g[:, :M]
i = g[:, M:(2*M)]
f = g[:, (2*M):(3*M)]
o = g[:, (3*M):(4*M)]
dh = np.zeros((N, M)) if dh is None else dh
dc_next = np.zeros((N, M)) if dc_next is None else dc_next
f_next = np.zeros((N, M)) if f_next is None else f_next
dx, dw, du, db = None, None, None, None
#############################################################################
# Implement the affine backward pass. #
#############################################################################
dy = dout + dh # N*M
dc = dy * o * (1 - tanh_c ** 2) + dc_next * f_next # N*M
da = dc * i * (1 - a ** 2)
di = dc * a * i * (1 - i)
df = dc * c_prev * f * (1 - f)
do = dy * tanh_c * o * (1 - o)
dg = np.concatenate((da, di, df, do), axis=1) # N*4M
shapes = np.shape(x)
z = x.reshape(N, -1)
dx = np.dot(dg, w.T).reshape(shapes) # N*d1*...*dk
dh_prev = np.dot(dg, u.T) # N*M
dw = np.dot(z.T, dg) # D*4M
du = np.dot(h_prev.T, dg) # M*4M
db = np.dot(np.ones(N), dg) # 4M
# Note: Gradient here is the sum of the gradients of N data
#############################################################################
# #
#############################################################################
# update: dw, du, db
# time cache: dh_prev, dc, f
# space cache: dx
return dx, dw, du, db, dh_prev, dc, f
""" loss """
def svm_loss(x, y):
"""
Computes the loss and gradient using for multiclass SVM classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
N = x.shape[0]
correct_class_scores = x[np.arange(N), y]
margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
margins[np.arange(N), y] = 0
loss = np.sum(margins) / N
num_pos = np.sum(margins > 0, axis=1)
dx = np.zeros_like(x)
dx[margins > 0] = 1
dx[np.arange(N), y] -= num_pos
dx /= N
return loss, dx
def softmax_loss(x, y):
"""
Computes the loss and gradient for softmax classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
probs = np.exp(x - np.max(x, axis=1, keepdims=True))
probs /= np.sum(probs, axis=1, keepdims=True)
N = x.shape[0]
loss = -np.sum(np.log(probs[np.arange(N), y])) / N
dx = probs.copy()
dx[np.arange(N), y] -= 1
dx /= N
return loss, dx
# new loss
def mse_loss(x, y, w=None):
"""
Computes the loss and gradient for mean squared error regression.
Inputs:
- x: Input data, of shape (N, F) where x[i, j] is the predicted value for the jth dimension
of the ith input.
- y: Vector of true outputs, of shape (N, F) where y[i] is the label for x[i] and
0 <= y[i] < C
- w: Weight of features, of shape (D, )
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
N, F = np.shape(x)
if w is None:
w = np.ones(F)
err = x - y # N*F
loss = np.mean(np.dot(err ** 2, w / F))
dx = 2./F * w ** 0.5 * err
dx /= N
return loss, dx
| StarcoderdataPython |
1682161 | <reponame>BlueCapacitor/Tree-Of-Life
'''
Created on Mar 1, 2019
@author: gosha
'''
from math import cos, pi
import turtle
import tkinter as tk
canvasPosition = [0, 0]
zoom = 1
class Tree(object):
def __init__(self, table):
self.branches = list(table.rows)
self.table = table
def link(self, orgs):
branch = []
for org in orgs:
tempBranch = self.branches[recFind(self.branches, org)[0]]
if(type(tempBranch) == str):
tempBranch = [tempBranch]
else:
tempBranch = list(tempBranch)
del self.branches[recFind(self.branches, org)[0]]
branch.append(tempBranch)
self.branches.append(branch)
def generate(self, combine = True, colors = None):
def score(v, r, c):
return(-v)
def condition(v, r, c):
return(recFind(self.branches, r)[0] != recFind(self.branches, c)[0])
nextOrgs = self.table.getBest(score, condition = condition, multiple = True)
while(nextOrgs != None):
self.link(nextOrgs)
nextOrgs = self.table.getBest(score, condition = condition, multiple = True)
def draw(self, master, colors, row = 0, column = 0, width = 1440, height = 800, size = 3):
global defaultScreenSize, window, canvasPosition, speed, canvas
window = tk.Frame(width = width, height = height, relief = tk.RAISED, borderwidth = 3)
# window.grid(row = row, column = column)
window.pack()
# window.geometry("1440x800")
# window.resizable(True, True)
speed = 16
canvas = turtle.Canvas(window, width = 4096, height = 2048)
canvasPosition = [0, height / 2 - 1024]
move(0, 0)
t = turtle.RawTurtle(canvas)
t.speed(0)
master.bind("<KeyPress-Up>", down)
master.bind("<KeyPress-Down>", up)
master.bind("<KeyPress-Right>", right)
master.bind("<KeyPress-Left>", left)
master.bind("<KeyPress-p>", printCenter)
master.bind("<KeyPress-r>", resetPosition)
print(self.branches)
t.hideturtle()
if(size == 1):
drawBranch(t, self.branches[0], -2048, 0, 16, 32, 1, deepestLevel(self.branches) - 2, colors = colors)
elif(size == 2):
drawBranch(t, self.branches[0], -2048, 0, 24, 64, 1, deepestLevel(self.branches) - 2, colors = colors)
else:
drawBranch(t, self.branches[0], -2048, 0, 32, 64, 2, deepestLevel(self.branches) - 2, colors = colors)
def drawBranch(t, b, x, y, height, length, step, distanceFromDL, size = 2, fontSize = 16, colors = None):
t.width(size)
space = height * recCount(b)
s = y - space / 2
for branchNumber in range(len(b)):
branchY = s + height * recCount(b[branchNumber]) / 2
s += height * recCount(b[branchNumber])
branchX = x + length
t.up()
t.goto(x, y)
t.down()
if(colors == None):
t.color("Black")
else:
t.color(branchColor(b[branchNumber], colors))
if((type(b[branchNumber]) in [list, tuple, map, range]) and not(len(b[branchNumber]) == 0)):
sCurve(t, x, branchX, y, branchY, step)
drawBranch(t, b[branchNumber], branchX, branchY, height, length, step, distanceFromDL - 1, size, fontSize, colors)
elif(type(b[branchNumber]) in [list, tuple, map, range]):
t.fd(distanceFromDL * length)
t.up()
t.setx(t.position()[0] + fontSize / 2)
t.sety(t.position()[1] - fontSize / 2)
t.write(str(b[branchNumber][0]), font = ("Arial", fontSize, "normal"))
else:
t.fd(distanceFromDL * length)
t.up()
t.setx(t.position()[0] + fontSize / 2)
t.sety(t.position()[1] - fontSize / 2)
t.write(str(b[branchNumber]), font = ("Arial", fontSize, "normal"))
def sCurve(t, x0, x1, y0, y1, step):
for x in range(round(x0), round(x1), step):
xShift = cos(((x - x0) * pi) / (x1 - x0))
y = (xShift - 1) * (y0 - y1) / 2 + y0
t.goto(x, y)
def recFind(iterable, o):
iterable = list(iterable)
for itemNumber in range(len(iterable)):
item = iterable[itemNumber]
if(item == o):
return([itemNumber])
if(type(item) in [list, tuple, map, range]):
result = recFind(item, o)
if(result != None):
return([itemNumber] + result)
def recCount(item):
if(type(item) in [list, tuple, map, range]):
return(sum([recCount(x) for x in item]))
else:
return(1)
def branchColor(b, colors):
if(type(b) in [list, tuple, map, range]):
out = branchColor(b[0], colors)
for subB in b[1:]:
bc = branchColor(subB, colors)
if(bc != out):
return("Black")
return(out)
if(b in colors.keys()):
return(colors[b])
return("Black")
def deepestLevel(item):
if(type(item) in [list, tuple, map, range]):
return(max([deepestLevel(x) for x in item]) + 1)
else:
return(0)
def move(x, y):
global canvasPosition, canvas, t, speed
canvasPosition[0] -= x * speed
canvasPosition[1] -= y * speed
canvas.place(x = canvasPosition[0], y = canvasPosition[1])
def up(_):
move(0, 1)
def down(_):
move(0, -1)
def right(_):
move(1, 0)
def left(_):
move(-1, 0)
def resetPosition(_):
global canvasPosition
canvasPosition = [0, 800 / 2 - 1024]
move(0, 0)
print("Reset")
def printCenter(_):
global canvasPosition
print(canvasPosition)
| StarcoderdataPython |
1641720 | _base_ = "./FlowNet512_1.5AugCosyAAEGray_Aggressive_Flat_Pbr_01_ape.py"
OUTPUT_DIR = "output/deepim/lmPbrSO/FlowNet512_1.5AugCosyAAEGray_Aggressive_Flat_lmPbr_SO/camera"
DATASETS = dict(TRAIN=("lm_pbr_camera_train",), TEST=("lm_real_camera_test",))
# bbnc7
# objects camera Avg(1)
# ad_2 30.20 30.20
# ad_5 86.57 86.57
# ad_10 99.02 99.02
# rete_2 41.67 41.67
# rete_5 97.55 97.55
# rete_10 100.00 100.00
# re_2 41.67 41.67
# re_5 97.55 97.55
# re_10 100.00 100.00
# te_2 99.51 99.51
# te_5 100.00 100.00
# te_10 100.00 100.00
# proj_2 81.96 81.96
# proj_5 99.22 99.22
# proj_10 100.00 100.00
# re 2.35 2.35
# te 0.00 0.00
| StarcoderdataPython |
3219006 | """CrowdStrike S3 Bucket Protection with QuickScan.
Creation date: 09.01.21 - <EMAIL>
Modification: 12.21.21 - <EMAIL>
"""
import io
import os
import time
import logging
import urllib.parse
import json
import boto3
from botocore.exceptions import ClientError
# FalconPy SDK - Auth, Sample Uploads and Quick Scan
from falconpy import OAuth2, SampleUploads, QuickScan # pylint: disable=E0401
from functions import generate_manifest, send_to_security_hub
# Maximum file size for scan (35mb)
MAX_FILE_SIZE = 36700160
# Log config
log = logging.getLogger()
log.setLevel(logging.INFO)
# Boto handlers
s3 = boto3.resource('s3')
ssm = boto3.client('ssm')
# Current region
region = os.environ.get('AWS_REGION')
# Mitigate threats?
MITIGATE = bool(json.loads(os.environ.get("MITIGATE_THREATS", "TRUE").lower()))
# Base URL
try:
BASE_URL = os.environ["BASE_URL"]
except KeyError:
BASE_URL = "https://api.crowdstrike.com"
# Grab our SSM parameter store variable names from the environment if they exist
try:
CLIENT_ID_PARAM_NAME = os.environ["CLIENT_ID_PARAM"]
except KeyError:
CLIENT_ID_PARAM_NAME = "BUCKET_SCAN_CLIENT_ID"
try:
CLIENT_SEC_PARAM_NAME = os.environ["CLIENT_SECRET_PARAM"]
except KeyError:
CLIENT_SEC_PARAM_NAME = "BUCKET_SCAN_CLIENT_SECRET"
# Grab our Falcon API credentials from SSM Parameter Store
try:
ssm_response = ssm.get_parameters(Names=[CLIENT_ID_PARAM_NAME, CLIENT_SEC_PARAM_NAME],
WithDecryption=True
)
client_id = ssm_response['Parameters'][0]['Value']
client_secret = ssm_response['Parameters'][1]['Value']
except IndexError as no_creds:
raise SystemExit("Unable to retrieve CrowdStrike Falcon API credentials.") from no_creds
except KeyError as bad_creds:
raise SystemExit("Unable to retrieve CrowdStrike Falcon API credentials.") from bad_creds
# Authenticate to the CrowdStrike Falcon API
auth = OAuth2(creds={
"client_id": client_id,
"client_secret": client_secret
}, base_url=BASE_URL)
# Connect to the Samples Sandbox API
Samples = SampleUploads(auth_object=auth)
# Connect to the Quick Scan API
Scanner = QuickScan(auth_object=auth)
# Main routine
def lambda_handler(event, _): # pylint: disable=R0912,R0914,R0915
"""Lambda execution entry point."""
bucket_name = event['Records'][0]['s3']['bucket']['name']
bucket = s3.Bucket(bucket_name)
key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')
upload_file_size = int(
bucket.Object(key=key).get()["ResponseMetadata"]["HTTPHeaders"]["content-length"]
)
if upload_file_size < MAX_FILE_SIZE: # pylint: disable=R1702 # (6 is fine)
try:
filename = os.path.basename(key)
response = Samples.upload_sample(file_name=filename,
file_data=io.BytesIO(
bucket.Object(key=key).get()["Body"].read()
)
)
except Exception as err:
print(f"Error uploading object {key} from bucket {bucket_name} to Falcon X Sandbox. "
"Make sure your API key has the Sample Uploads permission.")
raise err
try:
# Uploaded file unique identifier
upload_sha = response["body"]["resources"][0]["sha256"]
# Scan request ID, generated when the request for the scan is made
scan_id = Scanner.scan_samples(body={"samples": [upload_sha]})["body"]["resources"][0]
scanning = True
# Loop until we get a result or the lambda times out
while scanning:
# Retrieve our scan using our scan ID
scan_results = Scanner.get_scans(ids=scan_id)
try:
if scan_results["body"]["resources"][0]["status"] == "done":
# Scan is complete, retrieve our results (there will be only one)
result = scan_results["body"]["resources"][0]["samples"][0]
# and break out of the loop
scanning = False
else:
# Not done yet, sleep for a bit
time.sleep(3)
except IndexError:
# Results aren't populated yet, skip
pass
if result["sha256"] == upload_sha:
if "no specific threat" in result["verdict"]:
# File is clean
scan_msg = f"No threat found in {key}"
log.info(scan_msg)
elif "unknown" in result["verdict"]:
if "error" in result:
# Error occurred
scan_msg = f"Scan error for {key}: {result['error']}"
log.info(scan_msg)
else:
# Undertermined scan failure
scan_msg = f"Unable to scan {key}"
log.info(scan_msg)
elif "malware" in result["verdict"]:
# Mitigation would trigger from here
scan_msg = f"Verdict for {key}: {result['verdict']}"
detection = {}
detection["sha"] = upload_sha
detection["bucket"] = bucket_name
detection["file"] = key
log.warning(scan_msg)
threat_removed = False
if MITIGATE:
# Remove the threat
try:
threat = s3.Object(bucket_name, key)
threat.delete()
threat_removed = True
except ClientError as err:
log.warning("Unable to remove threat %s from bucket %s", key, bucket_name)
print(f"{err}")
else:
# Mitigation is disabled. Complain about this in the log.
log.warning("Threat discovered (%s). Mitigation disabled, threat persists in %s bucket.",
key,
bucket_name
)
# Inform Security Hub of the threat and our mitigation status
manifest = generate_manifest(detection, region, threat_removed)
_ = send_to_security_hub(manifest, region)
else:
# Unrecognized response
scan_msg = f"Unrecognized response ({result['verdict']}) received from API for {key}."
log.info(scan_msg)
# Clean up the artifact in the sandbox
response = Samples.delete_sample(ids=upload_sha)
if response["status_code"] > 201:
log.warning("Could not remove sample (%s) from sandbox.", key)
return scan_msg
except Exception as err:
print(err)
print(f"Error getting object {key} from bucket {bucket_name}. "
"Make sure they exist and your bucket is in the same region as this function.")
raise err
else:
msg = f"File ({key}) exceeds maximum file scan size ({MAX_FILE_SIZE} bytes), skipped."
log.warning(msg)
return msg
# ██████
# ██ ██
# ██ ████
# ██ ██▓▓████░░
# ████▓▓░░██ ██░░░░
# ██▓▓░░░░██░░░░██░░░░░░
# ██▓▓░░░░██░░██▒▒▓▓██░░░░░░ Let's pour out
# ██▓▓░░░░ ░░██▒▒▓▓▓▓████░░░░ the spoiled bits.
# ██▓▓░░░░ ░░░░▒▒▓▓▓▓████ ░░░░░░
# ██░░░░ ░░░░▒▒▓▓▓▓████ ░░░░░░
# ██░░ ░░░░▒▒▒▒▓▓████ ░░░░
# ██░░░░▒▒▓▓▓▓████ ░░░░░░
# ██▒▒▓▓▓▓████ ░░░░
# ██▓▓████ ░░░░
# ████ ░░
| StarcoderdataPython |
176485 | <reponame>Ryearwood/Python-Chatbot
#!/usr/bin/python
# Import Tools Libraries
import numpy as np
import random
import pickle
import json
# Import Deep Learning Libraries
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.optimizers import SGD
# Import Language Libraries
import nltk
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
# Load Data from saved file
intents_file = open('intents.json').read()
intents = json.loads(intents_file)
# Preprocess the Loaded Data from json format
# words = all vocab
words = []
# classes = intent behind words
classes = []
# documents = combination between patterns and intents
documents = []
ignore_letters = ['!','?',',','.']
for intent in intents['intents']:
for pattern in intent['patterns']:
# Tokenize Words
word = nltk.word_tokenize(pattern)
words.extend(word)
# Add to Corpus
documents.append((word, intent['tag']))
# Add to Class list
if intent['tag'] not in classes:
classes.append(intent['tag'])
print(documents)
# Lemmatize, create and save vocabulary to be used
words = [lemmatizer.lemmatize(w.lower()) for w in words if w not in ignore_letters]
words = sorted(list(set(words)))
# Sort classes
classes = sorted(list(set(classes)))
print(len(documents),'Documents')
print(len(classes),'Classes', classes)
print(len(words),'Unique Lemmatized Words', words)
# Save data as pickle Files
pickle.dump(words,open('words.pkl','wb'))
pickle.dump(classes,open('classes.pkl','wb'))
# Create training and output data storage
training = []
output_empty = [0]*len(classes)
# bag of words for each sentence - training
for doc in documents:
bag = []
word_patterns = doc[0]
word_patterns = [lemmatizer.lemmatize(word.lower()) for word in word_patterns]
for word in words:
bag.append(1) if word in word_patterns else bag.append(0)
# This results in a '0' for each tag and '1' for the current tag - for each pattern
output_row = list(output_empty)
output_row[classes.index(doc[1])] = 1
training.append([bag, output_row])
random.shuffle(training)
training = np.array(training)
train_x = list(training[:,0])
train_y = list(training[:,1])
print('Training Data Created')
# Neural network Model Layers
model = Sequential()
model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(train_y[0]), activation='softmax'))
# Compile Model: SGD with Nesterov
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
# Train Model
history = model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
model.save('chatbot_model.h5', history)
print("model is trained") | StarcoderdataPython |
147109 | <gh_stars>0
from rest_framework.serializers import ModelSerializer, PrimaryKeyRelatedField
from db_api.models import Item
from db_api.serializers.user import UserSerializer
from db_api.serializers.asset_bundle import AssetBundleSerializer
class ItemSerializer(ModelSerializer):
owner = PrimaryKeyRelatedField(read_only=True)
class Meta:
model = Item
fields = (
'id',
'asset_bundle',
'owner',
'created_at'
)
read_only_fields = ('id',)
class ItemDetailSerializer(ModelSerializer):
owner = UserSerializer(read_only=True)
asset_bundle = AssetBundleSerializer(read_only=True)
class Meta:
model = Item
fields = (
'id',
'asset_bundle',
'owner',
'likes_count',
'likes',
'comments_count',
'comments',
'created_at',
'updated_at',
)
read_only_fields = ('id',) | StarcoderdataPython |
101223 | from django.contrib import admin
from .models import *
admin.site.register(Visualization)
admin.site.register(Type)
admin.site.register(TypeToVisualization) | StarcoderdataPython |
1676652 | import logging
from helpers import assert_redirect
import pytest
def test_login_flow(auth, client):
assert_redirect(client.get('/'), '/auth/login')
assert b"Sign In" in client.get('/auth/login').data
assert_redirect(auth.login(), '/')
assert client.get('/').status_code == 200
assert_redirect(auth.logout(), '/auth/login')
assert_redirect(client.get('/'), '/auth/login')
def test_registration_flow(auth, client):
assert_redirect(client.get('/'), '/auth/login')
response = client.get('/auth/register')
assert response.status_code == 200
assert b'Register' in response.data
response = client.post(
'/auth/register',
data={
'username': 'other',
'email': '<EMAIL>',
'password': '<PASSWORD>',
'password2': '<PASSWORD>'
})
assert_redirect(response, '/auth/login')
response = auth.login(email='<EMAIL>', password='<PASSWORD>')
assert_redirect(response, '/')
assert client.get('/').status_code == 200
def assert_flashes(client, expected_message, expected_category='message'):
with client.session_transaction() as session:
try:
category, message = session['_flashes'][0]
except KeyError:
raise AssertionError('nothing flashed')
assert expected_message in message
assert expected_category == category
@pytest.mark.parametrize(
'data, error', [({
'username': 'other',
'email': '<EMAIL>',
'password': '<PASSWORD>',
'password2': '<PASSWORD>'
}, 'tried to register already exists'), ({
'username': 'test',
'email': '<EMAIL>',
'password': '<PASSWORD>',
'password2': '<PASSWORD>'
}, "tried to register, username already exist"), ({
'username': 'bogus',
'email': '<EMAIL>',
'password': '<PASSWORD>',
'password2': '<PASSWORD>'
}, 'tried to register but does not exist')],
ids=['email-exists', 'username-exists', 'missing'])
def test_registration_fail(caplog, client, data, error):
caplog.set_level(logging.INFO)
assert_redirect(client.get('/'), '/auth/login')
response = client.get('/auth/register')
assert response.status_code == 200
assert b'Register' in response.data
response = client.post('/auth/register', data=data)
assert response.status_code == 200
assert b'Please use a different ' in response.data
for record in caplog.records:
if error in record.message:
break
else:
raise AssertionError(f"Message {error!r} was never raised")
def test_login_fail(auth):
response = auth.login(email='<EMAIL>', password='<PASSWORD>')
assert_redirect(response, '/auth/login')
def test_login_unregistered(auth):
response = auth.login(email='<EMAIL>', password='<PASSWORD>')
assert_redirect(response, '/auth/login')
def test_login_redirect(auth, client):
auth.login()
assert_redirect(client.get('/auth/login'), '/')
assert_redirect(client.get('/auth/register'), '/')
| StarcoderdataPython |
3393183 | from django.shortcuts import render
def home(request):
return render(request, "principal/home.html", {})
| StarcoderdataPython |
81619 | <reponame>fuhrerguxez/telack
#!/usr/bin/env python
# pylint: disable=R0902,R0912,R0913
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2016
# <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an object that represents a Telegram Message."""
import sys
from datetime import datetime
from time import mktime
from telegram import (Audio, Contact, Document, Chat, Location, PhotoSize, Sticker, TelegramObject,
User, Video, Voice, Venue, MessageEntity, Game)
class Message(TelegramObject):
"""This object represents a Telegram Message.
Note:
* In Python `from` is a reserved word, use `from_user` instead.
Attributes:
message_id (int):
from_user (:class:`telegram.User`):
date (:class:`datetime.datetime`):
forward_from (:class:`telegram.User`):
forward_from_chat (:class:`telegram.Chat`):
forward_date (:class:`datetime.datetime`):
reply_to_message (:class:`telegram.Message`):
edit_date (:class:`datetime.datetime`):
text (str):
audio (:class:`telegram.Audio`):
document (:class:`telegram.Document`):
game (:class:`telegram.Game`):
photo (List[:class:`telegram.PhotoSize`]):
sticker (:class:`telegram.Sticker`):
video (:class:`telegram.Video`):
voice (:class:`telegram.Voice`):
caption (str):
contact (:class:`telegram.Contact`):
location (:class:`telegram.Location`):
new_chat_member (:class:`telegram.User`):
left_chat_member (:class:`telegram.User`):
new_chat_title (str):
new_chat_photo (List[:class:`telegram.PhotoSize`]):
delete_chat_photo (bool):
group_chat_created (bool):
supergroup_chat_created (bool):
migrate_to_chat_id (int):
migrate_from_chat_id (int):
channel_chat_created (bool):
Deprecated: 4.0
new_chat_participant (:class:`telegram.User`): Use `new_chat_member`
instead.
left_chat_participant (:class:`telegram.User`): Use `left_chat_member`
instead.
Args:
message_id (int):
from_user (:class:`telegram.User`):
date (:class:`datetime.datetime`):
chat (:class:`telegram.Chat`):
**kwargs: Arbitrary keyword arguments.
Keyword Args:
forward_from (Optional[:class:`telegram.User`]):
forward_from_chat (:class:`telegram.Chat`):
forward_date (Optional[:class:`datetime.datetime`]):
reply_to_message (Optional[:class:`telegram.Message`]):
edit_date (Optional[:class:`datetime.datetime`]):
text (Optional[str]):
audio (Optional[:class:`telegram.Audio`]):
document (Optional[:class:`telegram.Document`]):
game (Optional[:class:`telegram.Game`]):
photo (Optional[List[:class:`telegram.PhotoSize`]]):
sticker (Optional[:class:`telegram.Sticker`]):
video (Optional[:class:`telegram.Video`]):
voice (Optional[:class:`telegram.Voice`]):
caption (Optional[str]):
contact (Optional[:class:`telegram.Contact`]):
location (Optional[:class:`telegram.Location`]):
new_chat_member (Optional[:class:`telegram.User`]):
left_chat_member (Optional[:class:`telegram.User`]):
new_chat_title (Optional[str]):
new_chat_photo (Optional[List[:class:`telegram.PhotoSize`]):
delete_chat_photo (Optional[bool]):
group_chat_created (Optional[bool]):
supergroup_chat_created (Optional[bool]):
migrate_to_chat_id (Optional[int]):
migrate_from_chat_id (Optional[int]):
channel_chat_created (Optional[bool]):
bot (Optional[Bot]): The Bot to use for instance methods
"""
def __init__(self,
message_id,
from_user,
date,
chat,
forward_from=None,
forward_from_chat=None,
forward_date=None,
reply_to_message=None,
edit_date=None,
text='',
entities=None,
audio=None,
document=None,
photo=None,
sticker=None,
video=None,
voice=None,
caption='',
contact=None,
location=None,
venue=None,
new_chat_member=None,
left_chat_member=None,
new_chat_title='',
new_chat_photo=None,
delete_chat_photo=False,
group_chat_created=False,
supergroup_chat_created=False,
migrate_to_chat_id=0,
migrate_from_chat_id=0,
channel_chat_created=False,
pinned_message=None,
bot=None,
**kwargs):
# Required
self.message_id = int(message_id)
self.from_user = from_user
self.date = date
self.chat = chat
# Optionals
self.forward_from = forward_from
self.forward_from_chat = forward_from_chat
self.forward_date = forward_date
self.reply_to_message = reply_to_message
self.edit_date = edit_date
self.text = text
self.entities = entities or list()
self.audio = audio
self.game = kwargs.get('game')
self.document = document
self.photo = photo
self.sticker = sticker
self.video = video
self.voice = voice
self.caption = caption
self.contact = contact
self.location = location
self.venue = venue
self.new_chat_member = new_chat_member
self.left_chat_member = left_chat_member
self.new_chat_title = new_chat_title
self.new_chat_photo = new_chat_photo
self.delete_chat_photo = bool(delete_chat_photo)
self.group_chat_created = bool(group_chat_created)
self.supergroup_chat_created = bool(supergroup_chat_created)
self.migrate_to_chat_id = int(migrate_to_chat_id)
self.migrate_from_chat_id = int(migrate_from_chat_id)
self.channel_chat_created = bool(channel_chat_created)
self.pinned_message = pinned_message
self.bot = bot
@property
def chat_id(self):
"""int: Short for :attr:`Message.chat.id`"""
return self.chat.id
@staticmethod
def de_json(data, bot):
"""
Args:
data (dict):
bot (telegram.Bot):
Returns:
telegram.Message:
"""
if not data:
return None
data['from_user'] = User.de_json(data.get('from'), bot)
data['date'] = datetime.fromtimestamp(data['date'])
data['chat'] = Chat.de_json(data.get('chat'), bot)
data['entities'] = MessageEntity.de_list(data.get('entities'), bot)
data['forward_from'] = User.de_json(data.get('forward_from'), bot)
data['forward_from_chat'] = Chat.de_json(data.get('forward_from_chat'), bot)
data['forward_date'] = Message._fromtimestamp(data.get('forward_date'))
data['reply_to_message'] = Message.de_json(data.get('reply_to_message'), bot)
data['edit_date'] = Message._fromtimestamp(data.get('edit_date'))
data['audio'] = Audio.de_json(data.get('audio'), bot)
data['document'] = Document.de_json(data.get('document'), bot)
data['game'] = Game.de_json(data.get('game'), bot)
data['photo'] = PhotoSize.de_list(data.get('photo'), bot)
data['sticker'] = Sticker.de_json(data.get('sticker'), bot)
data['video'] = Video.de_json(data.get('video'), bot)
data['voice'] = Voice.de_json(data.get('voice'), bot)
data['contact'] = Contact.de_json(data.get('contact'), bot)
data['location'] = Location.de_json(data.get('location'), bot)
data['venue'] = Venue.de_json(data.get('venue'), bot)
data['new_chat_member'] = User.de_json(data.get('new_chat_member'), bot)
data['left_chat_member'] = User.de_json(data.get('left_chat_member'), bot)
data['new_chat_photo'] = PhotoSize.de_list(data.get('new_chat_photo'), bot)
data['pinned_message'] = Message.de_json(data.get('pinned_message'), bot)
return Message(bot=bot, **data)
def __getitem__(self, item):
if item in self.__dict__.keys():
return self.__dict__[item]
elif item == 'chat_id':
return self.chat.id
def to_dict(self):
"""
Returns:
dict:
"""
data = super(Message, self).to_dict()
# Required
data['from'] = data.pop('from_user', None)
data['date'] = self._totimestamp(self.date)
# Optionals
if self.forward_date:
data['forward_date'] = self._totimestamp(self.forward_date)
if self.edit_date:
data['edit_date'] = self._totimestamp(self.edit_date)
if self.photo:
data['photo'] = [p.to_dict() for p in self.photo]
if self.entities:
data['entities'] = [e.to_dict() for e in self.entities]
if self.new_chat_photo:
data['new_chat_photo'] = [p.to_dict() for p in self.new_chat_photo]
return data
@staticmethod
def _fromtimestamp(unixtime):
"""
Args:
unixtime (int):
Returns:
datetime.datetime:
"""
if not unixtime:
return None
return datetime.fromtimestamp(unixtime)
@staticmethod
def _totimestamp(dt_obj):
"""
Args:
dt_obj (:class:`datetime.datetime`):
Returns:
int:
"""
if not dt_obj:
return None
try:
# Python 3.3+
return int(dt_obj.timestamp())
except AttributeError:
# Python 3 (< 3.3) and Python 2
return int(mktime(dt_obj.timetuple()))
def _quote(self, kwargs):
"""Modify kwargs for replying with or without quoting"""
if 'reply_to_message_id' in kwargs:
if 'quote' in kwargs:
del kwargs['quote']
elif 'quote' in kwargs:
if kwargs['quote']:
kwargs['reply_to_message_id'] = self.message_id
del kwargs['quote']
else:
if self.chat.type != Chat.PRIVATE:
kwargs['reply_to_message_id'] = self.message_id
def reply_text(self, *args, **kwargs):
"""
Shortcut for ``bot.sendMessage(update.message.chat_id, *args, **kwargs)``
Keyword Args:
quote (Optional[bool]): If set to ``True``, the message is sent as an actual reply to
this message. If ``reply_to_message_id`` is passed in ``kwargs``, this parameter
will be ignored. Default: ``True`` in group chats and ``False`` in private chats.
"""
self._quote(kwargs)
return self.bot.sendMessage(self.chat_id, *args, **kwargs)
def reply_photo(self, *args, **kwargs):
"""
Shortcut for ``bot.sendPhoto(update.message.chat_id, *args, **kwargs)``
Keyword Args:
quote (Optional[bool]): If set to ``True``, the photo is sent as an actual reply to
this message. If ``reply_to_message_id`` is passed in ``kwargs``, this parameter
will be ignored. Default: ``True`` in group chats and ``False`` in private chats.
"""
self._quote(kwargs)
return self.bot.sendPhoto(self.chat_id, *args, **kwargs)
def reply_audio(self, *args, **kwargs):
"""
Shortcut for ``bot.sendAudio(update.message.chat_id, *args, **kwargs)``
Keyword Args:
quote (Optional[bool]): If set to ``True``, the audio is sent as an actual reply to
this message. If ``reply_to_message_id`` is passed in ``kwargs``, this parameter
will be ignored. Default: ``True`` in group chats and ``False`` in private chats.
"""
self._quote(kwargs)
return self.bot.sendAudio(self.chat_id, *args, **kwargs)
def reply_document(self, *args, **kwargs):
"""
Shortcut for ``bot.sendDocument(update.message.chat_id, *args, **kwargs)``
Keyword Args:
quote (Optional[bool]): If set to ``True``, the document is sent as an actual reply to
this message. If ``reply_to_message_id`` is passed in ``kwargs``, this parameter
will be ignored. Default: ``True`` in group chats and ``False`` in private chats.
"""
self._quote(kwargs)
return self.bot.sendDocument(self.chat_id, *args, **kwargs)
def reply_sticker(self, *args, **kwargs):
"""
Shortcut for ``bot.sendSticker(update.message.chat_id, *args, **kwargs)``
Keyword Args:
quote (Optional[bool]): If set to ``True``, the sticker is sent as an actual reply to
this message. If ``reply_to_message_id`` is passed in ``kwargs``, this parameter
will be ignored. Default: ``True`` in group chats and ``False`` in private chats.
"""
self._quote(kwargs)
return self.bot.sendSticker(self.chat_id, *args, **kwargs)
def reply_video(self, *args, **kwargs):
"""
Shortcut for ``bot.sendVideo(update.message.chat_id, *args, **kwargs)``
Keyword Args:
quote (Optional[bool]): If set to ``True``, the video is sent as an actual reply to
this message. If ``reply_to_message_id`` is passed in ``kwargs``, this parameter
will be ignored. Default: ``True`` in group chats and ``False`` in private chats.
"""
self._quote(kwargs)
return self.bot.sendVideo(self.chat_id, *args, **kwargs)
def reply_voice(self, *args, **kwargs):
"""
Shortcut for ``bot.sendVoice(update.message.chat_id, *args, **kwargs)``
Keyword Args:
quote (Optional[bool]): If set to ``True``, the voice is sent as an actual reply to
this message. If ``reply_to_message_id`` is passed in ``kwargs``, this parameter
will be ignored. Default: ``True`` in group chats and ``False`` in private chats.
"""
self._quote(kwargs)
return self.bot.sendVoice(self.chat_id, *args, **kwargs)
def reply_location(self, *args, **kwargs):
"""
Shortcut for ``bot.sendLocation(update.message.chat_id, *args, **kwargs)``
Keyword Args:
quote (Optional[bool]): If set to ``True``, the location is sent as an actual reply to
this message. If ``reply_to_message_id`` is passed in ``kwargs``, this parameter
will be ignored. Default: ``True`` in group chats and ``False`` in private chats.
"""
self._quote(kwargs)
return self.bot.sendLocation(self.chat_id, *args, **kwargs)
def reply_venue(self, *args, **kwargs):
"""
Shortcut for ``bot.sendVenue(update.message.chat_id, *args, **kwargs)``
Keyword Args:
quote (Optional[bool]): If set to ``True``, the venue is sent as an actual reply to
this message. If ``reply_to_message_id`` is passed in ``kwargs``, this parameter
will be ignored. Default: ``True`` in group chats and ``False`` in private chats.
"""
self._quote(kwargs)
return self.bot.sendVenue(self.chat_id, *args, **kwargs)
def reply_contact(self, *args, **kwargs):
"""
Shortcut for ``bot.sendContact(update.message.chat_id, *args, **kwargs)``
Keyword Args:
quote (Optional[bool]): If set to ``True``, the contact is sent as an actual reply to
this message. If ``reply_to_message_id`` is passed in ``kwargs``, this parameter
will be ignored. Default: ``True`` in group chats and ``False`` in private chats.
"""
self._quote(kwargs)
return self.bot.sendContact(self.chat_id, *args, **kwargs)
def forward(self, chat_id, disable_notification=False):
"""Shortcut for
bot.forwardMessage(chat_id=chat_id,
from_chat_id=update.message.chat_id,
disable_notification=disable_notification,
message_id=update.message.message_id)
"""
return self.bot.forwardMessage(
chat_id=chat_id,
from_chat_id=self.chat_id,
disable_notification=disable_notification,
message_id=self.message_id)
def edit_text(self, *args, **kwargs):
"""
Shortcut for ``bot.editMessageText(chat_id=message.chat_id,
message_id=message.message_id,
*args, **kwargs)``
Note:
You can only edit messages that the bot sent itself,
therefore this method can only be used on the
return value of the bot.send_* family of methods.
"""
return self.bot.edit_message_text(
chat_id=self.chat_id, message_id=self.message_id, *args, **kwargs)
def edit_caption(self, *args, **kwargs):
"""
Shortcut for ``bot.editMessageCaption(chat_id=message.chat_id,
message_id=message.message_id,
*args, **kwargs)``
Note:
You can only edit messages that the bot sent itself,
therefore this method can only be used on the
return value of the bot.send_* family of methods.
"""
return self.bot.edit_message_caption(
chat_id=self.chat_id, message_id=self.message_id, *args, **kwargs)
def edit_reply_markup(self, *args, **kwargs):
"""
Shortcut for ``bot.editReplyMarkup(chat_id=message.chat_id,
message_id=message.message_id,
*args, **kwargs)``
Note:
You can only edit messages that the bot sent itself,
therefore this method can only be used on the
return value of the bot.send_* family of methods.
"""
return self.bot.edit_message_caption(
chat_id=self.chat_id, message_id=self.message_id, *args, **kwargs)
def parse_entity(self, entity):
"""
Returns the text from a given :class:`telegram.MessageEntity`.
Note:
This method is present because Telegram calculates the offset and length in
UTF-16 codepoint pairs, which some versions of Python don't handle automatically.
(That is, you can't just slice ``Message.text`` with the offset and length.)
Args:
entity (MessageEntity): The entity to extract the text from. It must be an entity that
belongs to this message.
Returns:
str: The text of the given entity
"""
# Is it a narrow build, if so we don't need to convert
if sys.maxunicode == 0xffff:
return self.text[entity.offset:entity.offset + entity.length]
else:
entity_text = self.text.encode('utf-16-le')
entity_text = entity_text[entity.offset * 2:(entity.offset + entity.length) * 2]
return entity_text.decode('utf-16-le')
def parse_entities(self, types=None):
"""
Returns a ``dict`` that maps :class:`telegram.MessageEntity` to ``str``.
It contains entities from this message filtered by their ``type`` attribute as the key, and
the text that each entity belongs to as the value of the ``dict``.
Note:
This method should always be used instead of the ``entities`` attribute, since it
calculates the correct substring from the message text based on UTF-16 codepoints.
See ``get_entity_text`` for more info.
Args:
types (Optional[list]): List of ``MessageEntity`` types as strings. If the ``type``
attribute of an entity is contained in this list, it will be returned.
Defaults to a list of all types. All types can be found as constants in
:class:`telegram.MessageEntity`.
Returns:
dict[:class:`telegram.MessageEntity`, ``str``]: A dictionary of entities mapped to the
text that belongs to them, calculated based on UTF-16 codepoints.
"""
if types is None:
types = MessageEntity.ALL_TYPES
return {
entity: self.parse_entity(entity)
for entity in self.entities if entity.type in types
}
| StarcoderdataPython |
55972 |
class ConnectedSIPMessage(object):
def __init__(self, a_sip_transport_connection, a_sip_message):
self.connection = a_sip_transport_connection
self.sip_message = a_sip_message
@property
def raw_string(self):
if self.sip_message:
return self.sip_message.raw_string
else:
return None
| StarcoderdataPython |
97544 | <gh_stars>0
from django.shortcuts import render,redirect
from django.http import HttpResponse,Http404
from .models import Profile,Image,Comments
from .forms import NewImageForm,NewProfileForm,NewCommentForm
from django.contrib.auth.decorators import login_required
# Create your views here.
def home(request):
return render(request,'home.html')
def user_profile(request):
current_user = request.user
profiles = Profile.objects.filter(user_id=current_user.id)[0:1]
posts = Image.objects.filter(user_id=current_user.id)
return render(request,'insta.html',{"profile_pic":profiles,"posts":posts})
def feeds(request):
comments= Comments.objects.all()
profiles = Profile.objects.all()
posts = Image.objects.all()
return render(request,'feeds.html',{"posts":posts,"profiles":profiles,"comments":comments})
def search(request):
if 'user' in request.GET and request.GET['user']:
s_term=request.GET.get("user")
found=Image.search_users(s_term)
message=f'{s_term}'
return render(request,'search.html',{'message':message,'founds':found,"term":s_term})
else:
message="You did not search any user please input a user name"
return render(request,"search.html",{"message":message})
@login_required(login_url='/accounts/login')
def comments(request):
current_user=request.user
if request.method == 'POST':
form = NewCommentForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.user = current_user
post.save()
return redirect('feed')
else:
form = NewCommentForm()
return render(request,'comments.html',{"form":form})
@login_required(login_url='/accounts/login')
def new_post(request):
current_user = request.user
if request.method == 'POST':
form = NewImageForm(request.POST,request.FILES)
if form.is_valid():
post = form.save(commit=False)
post.user = current_user
post.save()
return redirect('userProfile')
else:
form = NewImageForm()
return render(request,'new_post.html',{"form":form})
@login_required(login_url='/accounts/login')
def profile(request):
current_user = request.user
if request.method == 'POST':
form = NewProfileForm(request.POST,request.FILES)
if form.is_valid():
profile = form.save(commit=False)
profile.user = current_user
profile_pic = form.cleaned_data['profile_pic']
bio = form.cleaned_data['bio']
Profile.objects.filter(user=current_user).update(bio=bio,profile_pic=profile_pic)
profile.save()
return redirect('userProfile')
else:
form = NewProfileForm()
return render(request,'profile.html',{"form":form})
| StarcoderdataPython |
123040 | <reponame>kuanpern/jupyterlab-snippets-multimenus
exptrigsimp(exp(z) + exp(-z)) | StarcoderdataPython |
1684135 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import torch as T
import torch.nn as nn
class DNINetwork(nn.Module):
def __init__(
self,
input_size,
hidden_size,
output_size
):
super(DNINetwork, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
def forward(self, input, hidden):
raise Exception('Not implemented')
# default_class_dnis = {
# 'Linear': LinearDNI
# 'Conv2d':
# 'LSTM':
# 'LSTMCell':
# 'GRU':
# 'GRUCell':
# 'RNN':
# 'RNNCell':
# }
# class Network_Dependent_DNI(DNINetwork):
# def __init__(
# self,
# input_size,
# hidden_size,
# output_size,
# **kwargs
# ):
# super(Network_Dependent_DNI, self).__init__(input_size, hidden_size, output_size)
# assert 'module' in kwargs, 'module: parameter not provided'
# assert 'class_dnis' in kwargs, 'class_dnis: parameter not provided'
# self.module = kwargs.pop('module')
# self.class_dnis = kwargs.pop('class_dnis')
# self.network_params = kwargs
# class_name = module.__class__.__name__
# if class_name in self.class_dnis:
# self.net = self.class_dnis[class_name](
# input_size=self.input_size,
# hidden_size=self.hidden_size,
# output_size=self.output_size,
# **kwargs
# )
# else:
# self.net = LinearDNI(
# input_size=self.input_size,
# hidden_size=self.hidden_size,
# output_size=self.output_size
# )
# def forward(self, input, hidden):
# return self.net(input, hidden)
| StarcoderdataPython |
1744648 | from __future__ import print_function, with_statement
import logging
import os
from contextlib import contextmanager
from file_utils import temporary_file
from subprocess import Popen, PIPE
from urllib2 import urlopen
logger = logging.getLogger(__name__)
class CommandError(Exception):
"""Problem executing or initializing a binary command."""
class GitError(CommandError):
"""Problem invoking git."""
class BinaryUtils(object):
@classmethod
def download(cls, url, destination):
"""Downloads a file.
:param str url: url to download.
:param str destination: destination file.
"""
try:
if os.path.exists(destination):
os.remove(destination)
response = urlopen(url)
with open(destination, 'w+') as output:
output.write(response.read())
return os.path.exists(destination)
except Exception as e:
logger.debug('Failed to download {} to {} ({}).'.format(url, destination, e))
return False
@classmethod
def pause(cls, message):
"""Prints out the message, waiting for user input to continue."""
return raw_input('\n{}\nPress Return to continue, Ctrl+C to abort.'.format(message))
@classmethod
def run_dev_pants(cls, args, cwd=None):
"""Run PANTS_DEV=1 ./pants with the given arguments.
:param list args: list of arguments to ./pants.
:param str cwd: path to the repo directory (where ./pants lives);
defaults to the Java repo.
"""
cwd = cwd or BinaryUtils.find_java_dir()
args = [os.path.join(cwd, 'pants')] + [str(a) for a in args]
try:
logger.info('PANTS_DEV=1 {}'.format(' '.join(args)))
env = os.environ.copy()
env['PANTS_DEV'] = '1'
p = Popen(args, env=env, cwd=cwd)
p.wait()
return p.returncode
except Exception as e:
logger.warning('Could not run pants: {}'.format(e))
return False
@classmethod
def squarepants_binary(self, name, java_repo=None):
"""Returns the path to a binary in squarepants/bin.
:param str name: name of the binary.
:param str java_repo: path to the java repo directory, if not the cwd.
"""
java_repo = java_repo or os.path.abspath('.')
return os.path.join(java_repo, 'squarepants', 'bin', name)
@classmethod
def is_java_dir(cls, cwd=None):
"""Checks whether the given directory is the java directory.
:param str cwd: the directory to check (defaults to current directory).
"""
if cwd is None:
cwd = os.path.abspath('.')
git = Git(cwd)
return git.status() and os.path.exists(cls.squarepants_binary('', cwd))
@classmethod
def find_java_dir(cls, cwd=None):
cwd = cwd or os.path.abspath('.')
if not os.path.exists(cwd):
return None
last_path = None
while cwd != last_path:
if cls.is_java_dir(cwd):
return cwd
last_path = cwd
cwd = os.path.dirname(cwd)
return None
class Command(object):
"""Wraps up the configuration for running a command.
Commands are callable, allowing the convenient syntax:
echo = Command('echo')
echo('Hello, world!') # Executes: echo 'Hello, world!'
"""
def __init__(self, name, args=None, cwd=None, env=None, pipe=True):
"""Creates a new command.
:param str name: name of the command (eg, 'git').
:param list args: list of arguments which form the beginning of the command invocation (the rest
of it being supplied when __call__ is invoked). Defaults to [name,].
:param str cwd: working directory to run the command in (defaults to current working directory).
:param env: environment variables to run the command with (defaults to os.environ.copy()).
:param bool pipe: whether to pipe the output to a returned variable, or just dump it into
stdout.
"""
self.name = name
self.args = args or [name]
self.cwd = cwd or os.path.abspath('.')
self.env = env or os.environ.copy()
self.pipe = pipe
if not os.path.exists(self.cwd):
raise CommandError('{}: working directory {} does not exist.'.format(self.name, cwd))
def __call__(self, *vargs, **kwargs):
"""Invokes the system command with the given *vargs.
If the command needs to query the user via stdin (eg, for a password prompt), pipe=False
should be passed as a kwarg.
:return:
"""
pipe = kwargs.get('pipe', self.pipe)
args = list(self.args)
current_dir = os.path.abspath('.')
os.chdir(self.cwd)
try:
args.extend(vargs)
logger.info('{} > {}'.format(self.cwd, ' '.join(args)))
if pipe:
p = Popen(args, cwd=self.cwd, stdout=PIPE, stderr=PIPE, env=self.env)
out, err = p.communicate()
if p.returncode == 0:
return out or True
logger.warning('Subprocess error: {}\n{}'.format(err, out))
return False
p = Popen(args, cwd=self.cwd, env=self.env)
p.wait()
return p.returncode == 0
finally:
os.chdir(current_dir)
def __str__(self):
return "'{}' in {}".format(' '.join(self.args), self.cwd)
class Git(Command):
def __init__(self, cwd, **kwargs):
super(Git, self).__init__('git', ['git'], cwd, None, **kwargs)
def status(self):
"""Result of git status."""
return self('status')
def is_clean(self):
"""Returns whether the git directory is clean."""
status = self.status()
return status and ', working directory clean' in str(status)
def branch(self):
"""Returns the name of the current branch."""
return self('rev-parse', '--abbrev-ref', 'HEAD').strip()
def branch_exists(self, branch):
"""Returns true if the branch exists in the local repo."""
return self('rev-parse', '--verify', branch)
def commit(self, message):
"""Commits changes to the repo, with the supplied commit message."""
return self('commit', '-am', message)
def remotes(self):
"""Returns a list of the names of the remote repos."""
try:
return self('remote').split('\n')
except:
return []
def _apply_patch(self, patch_url, patch_name, commit=False):
if patch_url.endswith('.diff') or patch_url.endswith('.patch'):
logger.debug('Patch identified as a raw diff, attempting git apply.')
with temporary_file() as patch_dest:
if not BinaryUtils.download(patch_url, patch_dest):
raise GitError('Failed to download {} ({})'.format(patch_url, patch_name))
if not self('apply', patch_dest):
raise GitError('Failed to apply {} from {}'.format(patch_name, patch_url))
if not self('add', '-A'):
raise GitError('Failed to add untracked files.')
if commit and not self.commit('{}\n\nPatched in from:\n{}'.format(patch_name, patch_url)):
raise GitError('Failed to commit {}'.format(patch_name))
else:
logger.debug('Patch identified as a branch, attempting git merge.')
if not self('merge', '--no-edit', patch_url):
raise GitError('Failed to merge {} from {}'.format(patch_name, patch_url))
@contextmanager
def apply_patches(self, patches, on_branch=None, commit=False):
"""Applies the given sequence of patches to the git repo, yields, then reverts all patches.
This requires the local repo to be clean, and will error otherwise. This will abort cleanly and
raise an error if anything goes wrong.
:param list patches: list of patches of the form (patch_url, patch_name). The url can either be
a literal url to a .diff or .patch file, in which case the url is used to download the diff
(which is then git apply'd). Otherwise, it is assumed that the url is actually a branch, and
git merge is run on it. The name is used for logging and for the commit message (if commit is
True).
:param str on_branch: name of the temporary branch to use for patching; defaults to a temporary
branch name. This branch WILL BE DELETED and created freshly in the local repo.
:param bool commit: whether to commit after patching.
"""
if not self.is_clean():
raise GitError('{} is not clean; please commit or stash changes.'.format(self))
branch = self.branch()
if not branch:
raise GitError('Could determine current branch.')
logger.debug('Current branch: {}'.format(branch))
temp_branch_name = on_branch or 'temp/temporary-patching-branch'
if self.branch_exists(temp_branch_name):
self('branch', '-D', temp_branch_name)
if not self('checkout', '-b', temp_branch_name):
raise GitError('Could not create temporary patching branch.')
try:
for patch_url, patch_name in patches:
self._apply_patch(patch_url, patch_name, commit=commit)
yield
finally:
logger.debug('\nCleaning up repo ...')
if not self.is_clean():
self('reset', '--hard')
self('checkout', branch)
class PantsGit(Git):
"""Creates a new Git command, automatically setting the cwd to the location of the pants repo."""
def __init__(self, cwd=None, **kwargs):
if not cwd:
cwd = PantsGit.find_pants_src()
if not cwd:
raise GitError('Could not find pants source directory (try setting PANTS_SRC).')
if not os.path.exists(cwd):
raise GitError('Pants source directory set to "{}", but does not exist.'.format(cwd))
super(PantsGit, self).__init__(cwd, **kwargs)
def commit(self, message):
p = Popen(['build-support/bin/isort.sh', '-f'], stdout=PIPE, stderr=PIPE, cwd=self.cwd)
p.communicate()
return super(PantsGit, self).commit(message)
@classmethod
def find_pants_src(cls):
"""Tries to find the pants source code.
Returns the path to the source directory if it finds it, otherwise returns None.
"""
if 'PANTS_SRC' in os.environ:
return os.environ['PANTS_SRC']
home = os.path.expanduser('~')
srcs = [os.path.join(home, name) for name in os.listdir(home) if name.lower()=='src']
srcs = filter(os.path.isdir, srcs)
pants_srcs = []
for source_dir in srcs:
pants_srcs.extend(os.path.join(source_dir, name) for name in os.listdir(source_dir) if name.lower()=='pants')
pants_srcs = filter(os.path.isdir, pants_srcs)
pants_srcs = [src for src in pants_srcs if Git(src).status()]
return pants_srcs.pop() if pants_srcs else None
| StarcoderdataPython |
3205812 | <gh_stars>0
# exc. 7.3.1 (Rolling Mission)
def show_hidden_word(secret_word, old_letters_guessed):
"""
the function returns a string that contains underlines and letters
that show the letters from the list of letters the user guessed
in their excat location and the other letters the user didnt guess
as underlines.
:param secret_word: the secret word the user needs to guess
:param old_letter_guessed: list of letters the user guessed
:type secret_word: string
:type old_letters_guessed: list
:return new_word: new word that contains letters and underlines
:rtype: string
"""
new_word = ''
for i in range(0, len(secret_word)):
if secret_word[i] in old_letters_guessed:
new_word += secret_word[i] + " "
elif secret_word[i] not in old_letters_guessed:
new_word += " _ "
return new_word
def main():
secret_word = "mammalas"
old_letters_guessed = ['s', 'p', 'j', 'i', 'm', 'k']
print(show_hidden_word(secret_word, old_letters_guessed))
if __name__ == "__main__":
main() | StarcoderdataPython |
1753965 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
from azure.iot.hub.devicesdk.auth.sas_authentication_provider import (
SharedAccessSignatureAuthenticationProvider,
)
sas_device_token_format = "SharedAccessSignature sr={}&sig={}&se={}"
sas_device_skn_token_format = "SharedAccessSignature sr={}&sig={}&se={}&skn={}"
shared_access_key_name = "alohomora"
hostname = "beauxbatons.academy-net"
device_id = "MyPensieve"
module_id = "Divination"
signature = "IsolemnlySwearThatIamuUptoNogood"
expiry = "1539043658"
def create_sas_token_string_device(is_module=False, is_key_name=False):
uri = hostname + "/devices/" + device_id
if is_module:
uri = uri + "/modules/" + module_id
if is_key_name:
return sas_device_skn_token_format.format(uri, signature, expiry, shared_access_key_name)
else:
return sas_device_token_format.format(uri, signature, expiry)
def test_sas_auth_provider_is_created_from_device_sas_token_string():
sas_string = create_sas_token_string_device()
sas_auth_provider = SharedAccessSignatureAuthenticationProvider.parse(sas_string)
assert sas_auth_provider.hostname == hostname
assert sas_auth_provider.device_id == device_id
assert hostname in sas_auth_provider.sas_token_str
assert device_id in sas_auth_provider.sas_token_str
def test_sas_auth_provider_is_created_from_module_sas_token_string():
sas_string = create_sas_token_string_device(True)
sas_auth_provider = SharedAccessSignatureAuthenticationProvider.parse(sas_string)
assert sas_auth_provider.hostname == hostname
assert sas_auth_provider.device_id == device_id
assert hostname in sas_auth_provider.sas_token_str
assert device_id in sas_auth_provider.sas_token_str
assert sas_auth_provider.module_id == module_id
assert hostname in sas_auth_provider.sas_token_str
assert device_id in sas_auth_provider.sas_token_str
assert module_id in sas_auth_provider.sas_token_str
def test_sas_auth_provider_is_created_from_device_sas_token_string_with_keyname():
sas_string = create_sas_token_string_device(False, True)
sas_auth_provider = SharedAccessSignatureAuthenticationProvider.parse(sas_string)
assert sas_auth_provider.hostname == hostname
assert sas_auth_provider.device_id == device_id
assert hostname in sas_auth_provider.sas_token_str
assert device_id in sas_auth_provider.sas_token_str
assert shared_access_key_name in sas_auth_provider.sas_token_str
def test_sas_auth_provider_is_created_from_device_sas_token_string_quoted():
sas_string_quoted = "SharedAccessSignature sr=beauxbatons.academy-net%2Fdevices%2FMyPensieve&sig=IsolemnlySwearThatIamuUptoNogood&se=1539043658&skn=alohomora"
sas_auth_provider = SharedAccessSignatureAuthenticationProvider.parse(sas_string_quoted)
assert sas_auth_provider.hostname == hostname
assert sas_auth_provider.device_id == device_id
assert hostname in sas_auth_provider.sas_token_str
assert device_id in sas_auth_provider.sas_token_str
def test_raises_when_auth_provider_created_from_empty_shared_access_signature_string():
with pytest.raises(
ValueError,
match="The Shared Access Signature is required and should not be empty or blank and must be supplied as a string consisting of two parts in the format 'SharedAccessSignature sr=<resource_uri>&sig=<signature>&se=<expiry>' with an optional skn=<keyname>",
):
SharedAccessSignatureAuthenticationProvider.parse("")
def test_raises_when_auth_provider_created_from_none_shared_access_signature_string():
with pytest.raises(
ValueError,
match="The Shared Access Signature is required and should not be empty or blank and must be supplied as a string consisting of two parts in the format 'SharedAccessSignature sr=<resource_uri>&sig=<signature>&se=<expiry>' with an optional skn=<keyname>",
):
SharedAccessSignatureAuthenticationProvider.parse(None)
def test_raises_when_auth_provider_created_from_blank_shared_access_signature_string():
with pytest.raises(
ValueError,
match="The Shared Access Signature is required and should not be empty or blank and must be supplied as a string consisting of two parts in the format 'SharedAccessSignature sr=<resource_uri>&sig=<signature>&se=<expiry>' with an optional skn=<keyname>",
):
SharedAccessSignatureAuthenticationProvider.parse(" ")
def test_raises_when_auth_provider_created_from_numeric_shared_access_signature_string():
with pytest.raises(
ValueError,
match="The Shared Access Signature is required and should not be empty or blank and must be supplied as a string consisting of two parts in the format 'SharedAccessSignature sr=<resource_uri>&sig=<signature>&se=<expiry>' with an optional skn=<keyname>",
):
SharedAccessSignatureAuthenticationProvider.parse(873915)
def test_raises_when_auth_provider_created_from_object_shared_access_signature_string():
with pytest.raises(
ValueError,
match="The Shared Access Signature is required and should not be empty or blank and must be supplied as a string consisting of two parts in the format 'SharedAccessSignature sr=<resource_uri>&sig=<signature>&se=<expiry>' with an optional skn=<keyname>",
):
SharedAccessSignatureAuthenticationProvider.parse(object)
def test_raises_when_auth_provider_created_from_shared_access_signature_string_blank_second_part():
with pytest.raises(
ValueError,
match="The Shared Access Signature is required and should not be empty or blank and must be supplied as a string consisting of two parts in the format 'SharedAccessSignature sr=<resource_uri>&sig=<signature>&se=<expiry>' with an optional skn=<keyname>",
):
SharedAccessSignatureAuthenticationProvider.parse("SharedAccessSignature ")
def test_raises_when_auth_provider_created_from_shared_access_signature_string_numeric_second_part():
with pytest.raises(
ValueError,
match="The Shared Access Signature is required and should not be empty or blank and must be supplied as a string consisting of two parts in the format 'SharedAccessSignature sr=<resource_uri>&sig=<signature>&se=<expiry>' with an optional skn=<keyname>",
):
SharedAccessSignatureAuthenticationProvider.parse("SharedAccessSignature 67998311999")
def test_raises_when_auth_provider_created_from_shared_access_signature_string_numeric_value_second_part():
with pytest.raises(
ValueError,
match="One of the name value pair of the Shared Access Signature string should be a proper resource uri",
):
SharedAccessSignatureAuthenticationProvider.parse(
"SharedAccessSignature sr=67998311999&sig=24234234&se=1539043658&skn=25245245"
)
def test_raises_when_auth_provider_created_from_shared_access_signature_string_with_incomplete_sr():
with pytest.raises(
ValueError,
match="One of the name value pair of the Shared Access Signature string should be a proper resource uri",
):
SharedAccessSignatureAuthenticationProvider.parse(
"SharedAccessSignature sr=MyPensieve&sig=IsolemnlySwearThatIamuUptoNogood&se=1539043658&skn=alohomora"
)
def test_raises_auth_provider_created_from_missing_part_shared_access_signature_string():
with pytest.raises(
ValueError,
match="The Shared Access Signature is required and should not be empty or blank and must be supplied as a string consisting of two parts in the format 'SharedAccessSignature sr=<resource_uri>&sig=<signature>&se=<expiry>' with an optional skn=<keyname>",
):
one_part_sas_str = "sr=beauxbatons.academy-net%2Fdevices%2FMyPensieve&sig=IsolemnlySwearThatIamuUptoNogood&se=1539043658&skn=alohomora"
SharedAccessSignatureAuthenticationProvider.parse(one_part_sas_str)
def test_raises_auth_provider_created_from_more_parts_shared_access_signature_string():
with pytest.raises(
ValueError,
match="The Shared Access Signature must be of the format 'SharedAccessSignature sr=<resource_uri>&sig=<signature>&se=<expiry>' or/and it can additionally contain an optional skn=<keyname> name=value pair.",
):
more_part_sas_str = "SharedAccessSignature sr=beauxbatons.academy-net%2Fdevices%2FMyPensieve&sig=IsolemnlySwearThatIamuUptoNogood&se=1539043658&skn=alohomora SharedAccessSignature"
SharedAccessSignatureAuthenticationProvider.parse(more_part_sas_str)
def test_raises_auth_provider_created_from_shared_access_signature_string_duplicate_keys():
with pytest.raises(ValueError, match="Invalid Shared Access Signature - Unable to parse"):
duplicate_sas_str = "SharedAccessSignature sr=beauxbatons.academy-net%2Fdevices%2FMyPensieve&sig=IsolemnlySwearThatIamuUptoNogood&se=1539043658&sr=alohomora"
SharedAccessSignatureAuthenticationProvider.parse(duplicate_sas_str)
def test_raises_auth_provider_created_from_shared_access_signature_string_bad_keys():
with pytest.raises(
ValueError,
match="Invalid keys in Shared Access Signature. The valid keys are sr, sig, se and an optional skn.",
):
bad_key_sas_str = "SharedAccessSignature sr=beauxbatons.academy-net%2Fdevices%2FMyPensieve&signature=IsolemnlySwearThatIamuUptoNogood&se=1539043658&skn=alohomora"
SharedAccessSignatureAuthenticationProvider.parse(bad_key_sas_str)
def test_raises_auth_provider_created_from_incomplete_shared_access_signature_string():
with pytest.raises(
ValueError,
match="Invalid Shared Access Signature. It must be of the format 'SharedAccessSignature sr=<resource_uri>&sig=<signature>&se=<expiry>' or/and it can additionally contain an optional skn=<keyname> name=value pair.",
):
incomplete_sas_str = "SharedAccessSignature sr=beauxbatons.academy-net%2Fdevices%2FMyPensieve&se=1539043658&skn=alohomora"
SharedAccessSignatureAuthenticationProvider.parse(incomplete_sas_str)
| StarcoderdataPython |
53410 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
from abc import ABC, abstractmethod
from typing import Union, Tuple, List, Optional, Type
import oef.query_pb2 as query_pb2
from oef.schema import ATTRIBUTE_TYPES, AttributeSchema, DataModel, ProtobufSerializable, Description, Location
RANGE_TYPES = Union[Tuple[str, str], Tuple[int, int], Tuple[float, float], Tuple[Location, Location]]
ORDERED_TYPES = Union[int, str, float]
SET_TYPES = Union[List[float], List[str], List[bool], List[int], List[Location]]
Query = None
class ConstraintExpr(ProtobufSerializable, ABC):
"""
This class is used to represent a constraint expression.
"""
@abstractmethod
def check(self, description: Description) -> bool:
"""
Check if a description satisfies the constraint expression.
:param description: the description to check.
:return: ``True`` if the description satisfy the constraint expression, ``False`` otherwise.
"""
@abstractmethod
def is_valid(self, data_model: DataModel) -> bool:
"""
Check whether a constraint expression is valid wrt a data model. Specifically, check the following conditions:
- If all the attributes referenced by the constraints are correctly associated with the Data Model attributes.
:param data_model: the data model used to check the validity of the constraint expression.
:return: ``True`` if the constraint expression is valid wrt the data model, ``False`` otherwise.
"""
def _check_validity(self) -> None:
"""Check whether a Constraint Expression satisfies some basic requirements.
E.g. an :class:`~oef.query.And` expression must have at least 2 subexpressions.
:return ``None``
:raises ValueError: if the object does not satisfy some requirements."""
return
@staticmethod
def _to_pb(expression):
constraint_expr_pb = query_pb2.Query.ConstraintExpr()
expression_pb = expression.to_pb()
if isinstance(expression, And):
constraint_expr_pb.and_.CopyFrom(expression_pb)
elif isinstance(expression, Or):
constraint_expr_pb.or_.CopyFrom(expression_pb)
elif isinstance(expression, Not):
constraint_expr_pb.not_.CopyFrom(expression_pb)
elif isinstance(expression, Constraint):
constraint_expr_pb.constraint.CopyFrom(expression_pb)
return constraint_expr_pb
@staticmethod
def _from_pb(expression_pb):
expression = expression_pb.WhichOneof("expression")
if expression == "and_":
return And.from_pb(expression_pb.and_)
elif expression == "or_":
return Or.from_pb(expression_pb.or_)
elif expression == "not_":
return Not.from_pb(expression_pb.not_)
elif expression == "constraint":
return Constraint.from_pb(expression_pb.constraint)
class And(ConstraintExpr):
"""
A constraint type that allows you to specify a conjunction of constraints.
That is, the :class:`~oef.query.And` constraint is satisfied whenever
all the constraints that constitute the and are satisfied.
Examples:
All the books whose title is between 'I' and 'J' (alphanumeric order) but not equal to 'It'
>>> c = And([Constraint("title", Range(("I", "J"))), Constraint("title", NotEq("It"))])
>>> c.check(Description({"title": "I, Robot"}))
True
>>> c.check(Description({"title": "It"}))
False
>>> c.check(Description({"title": "1984"}))
False
"""
def __init__(self, constraints: List[ConstraintExpr]) -> None:
"""
Initialize an :class:`~oef.query.And` constraint.
:param constraints: the list of constraints to be interpreted in conjunction.
"""
self.constraints = constraints
self._check_validity()
def to_pb(self):
"""
From an instance of :class:`~oef.query.And` to its associated Protobuf object.
:return: the ConstraintExpr Protobuf object that contains the :class:`~oef.query.And` constraint.
"""
and_pb = query_pb2.Query.ConstraintExpr.And()
constraint_expr_pbs = [ConstraintExpr._to_pb(constraint) for constraint in self.constraints]
and_pb.expr.extend(constraint_expr_pbs)
return and_pb
@classmethod
def from_pb(cls, constraint_pb: query_pb2.Query.ConstraintExpr.And):
"""
From the ``And`` Protobuf object to the associated instance of :class:`~oef.query.And`.
:param constraint_pb: the Protobuf object that represents the ``And`` constraint.
:return: an instance of :class:`~oef.query.And` equivalent to the Protobuf object.
"""
expr = [ConstraintExpr._from_pb(c) for c in constraint_pb.expr]
return cls(expr)
def check(self, description: Description) -> bool:
"""
Check if a value satisfies the :class:`~oef.query.And` constraint expression.
:param description: the description to check.
:return: ``True`` if the description satisfy the constraint expression, ``False`` otherwise.
"""
return all(expr.check(description) for expr in self.constraints)
def is_valid(self, data_model: DataModel) -> bool:
return all(c.is_valid(data_model) for c in self.constraints)
def _check_validity(self):
if len(self.constraints) < 2:
raise ValueError("Invalid input value for type '{}': number of "
"subexpression must be at least 2.".format(type(self).__name__))
for c in self.constraints:
c._check_validity()
def __eq__(self, other):
if type(other) != And:
return False
else:
return self.constraints == other.constraints
class Or(ConstraintExpr):
"""
A constraint type that allows you to specify a disjunction of constraints.
That is, the Or constraint is satisfied whenever at least one of the constraints
that constitute the or is satisfied.
Examples:
All the books that have been published either before the year 1960 or after the year 1970
>>> c = Or([Constraint("year", Lt(1960)), Constraint("year", Gt(1970))])
>>> c.check(Description({"year": 1950}))
True
>>> c.check(Description({"year": 1975}))
True
>>> c.check(Description({"year": 1960}))
False
>>> c.check(Description({"year": 1970}))
False
"""
def __init__(self, constraints: List[ConstraintExpr]) -> None:
"""
Initialize an :class:`~oef.query.Or` constraint.
:param constraints: the list of constraints to be interpreted in disjunction.
"""
self.constraints = constraints
self._check_validity()
def to_pb(self):
"""
From an instance of :class:`~oef.query.Or` to its associated Protobuf object.
:return: the Protobuf object that contains the :class:`~oef.query.Or` constraint.
"""
or_pb = query_pb2.Query.ConstraintExpr.Or()
constraint_expr_pbs = [ConstraintExpr._to_pb(constraint) for constraint in self.constraints]
or_pb.expr.extend(constraint_expr_pbs)
return or_pb
@classmethod
def from_pb(cls, constraint_pb: query_pb2.Query.ConstraintExpr.Or):
"""
From the ``Or`` Protobuf object to the associated instance of :class:`~oef.query.Or`.
:param constraint_pb: the Protobuf object that represents the ``Or`` constraint.
:return: an instance of :class:`~oef.query.Or` equivalent to the Protobuf object.
"""
expr = [ConstraintExpr._from_pb(c) for c in constraint_pb.expr]
return cls(expr)
def check(self, description: Description) -> bool:
"""
Check if a value satisfies the :class:`~oef.query.Or` constraint expression.
:param description: the description to check.
:return: ``True`` if the description satisfy the constraint expression, ``False`` otherwise.
"""
return any(expr.check(description) for expr in self.constraints)
def is_valid(self, data_model: DataModel) -> bool:
return all(c.is_valid(data_model) for c in self.constraints)
def _check_validity(self):
if len(self.constraints) < 2:
raise ValueError("Invalid input value for type '{}': number of "
"subexpression must be at least 2.".format(type(self).__name__))
for c in self.constraints:
c._check_validity()
def __eq__(self, other):
if type(other) != Or:
return False
else:
return self.constraints == other.constraints
class Not(ConstraintExpr):
"""
A constraint type that allows you to specify a negation of a constraint.
That is, the Not constraint is satisfied whenever the constraint
that constitutes the Not expression is not satisfied.
Examples:
All the books whose genre is science fiction, but the year is not between 1990 and 2000
>>> c = And([Constraint("genre", Eq("science-fiction")), Not(Constraint("year", Range((1990, 2000))))])
>>> c.check(Description({"genre": "science-fiction", "year": 1995}))
False
>>> c.check(Description({"genre": "science-fiction", "year": 2001}))
True
"""
def __init__(self, constraint: ConstraintExpr) -> None:
self.constraint = constraint
def check(self, description: Description) -> bool:
"""
Check if a value satisfies the :class:`~oef.query.Not` constraint expression.
:param description: the description to check.
:return: ``True`` if the description satisfy the constraint expression, ``False`` otherwise.
"""
return not self.constraint.check(description)
def to_pb(self):
"""
From an instance of :class:`~oef.query.Not` to its associated Protobuf object.
:return: the Protobuf object that contains the :class:`~oef.query.Not` constraint.
"""
not_pb = query_pb2.Query.ConstraintExpr.Not()
constraint_expr_pb = ConstraintExpr._to_pb(self.constraint)
not_pb.expr.CopyFrom(constraint_expr_pb)
return not_pb
@classmethod
def from_pb(cls, constraint_pb: query_pb2.Query.ConstraintExpr.Not):
"""
From the ``Not`` Protobuf object to the associated instance of :class:`~oef.query.Not`.
:param constraint_pb: the Protobuf object that represents the ``Not`` constraint.
:return: an instance of :class:`~oef.query.Not` equivalent to the Protobuf object.
"""
expression = ConstraintExpr._from_pb(constraint_pb.expr)
return cls(expression)
def is_valid(self, data_model: DataModel) -> bool:
return self.constraint.is_valid(data_model)
def __eq__(self, other):
if type(other) != Not:
return False
else:
return self.constraint == other.constraint
class ConstraintType(ProtobufSerializable, ABC):
"""
This class is used to represent a constraint type.
"""
@abstractmethod
def check(self, value: ATTRIBUTE_TYPES) -> bool:
"""
Check if an attribute value satisfies the constraint.
The implementation depends on the constraint type.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
def is_valid(self, attribute: AttributeSchema) -> bool:
"""
Check if the constraint type is valid wrt a given attribute.
:param attribute: the data model used to check the validity of the constraint type.
:return: ``True`` if the constraint type is valid wrt the attribute, ``False`` otherwise.
"""
return self._get_type() is None or self._get_type() == attribute.type
@abstractmethod
def _get_type(self) -> Optional[Type[ATTRIBUTE_TYPES]]:
"""
Get the type of attributes values that can be compared with this constraint
:return: the type of this constraint type, or ``None`` if it can't be determined.
"""
class Relation(ConstraintType, ABC):
"""
A constraint type that allows you to impose specific values
for the attributes.
The specific operator of the relation is defined in the
subclasses that extend this class.
"""
def __init__(self, value: ATTRIBUTE_TYPES) -> None:
"""
Initialize a Relation object.
:param value: the right value of the relation.
"""
self.value = value
@property
@abstractmethod
def _operator(self) -> query_pb2.Query.Relation:
"""The operator of the relation."""
@classmethod
def from_pb(cls, relation: query_pb2.Query.Relation):
"""
From the Relation Protobuf object to the associated
instance of a subclass of Relation.
:param relation: the Protobuf object that represents the relation constraint.
:return: an instance of one of the subclasses of Relation.
"""
relations_from_pb = {
query_pb2.Query.Relation.GTEQ: GtEq,
query_pb2.Query.Relation.GT: Gt,
query_pb2.Query.Relation.LTEQ: LtEq,
query_pb2.Query.Relation.LT: Lt,
query_pb2.Query.Relation.NOTEQ: NotEq,
query_pb2.Query.Relation.EQ: Eq
}
relation_class = relations_from_pb[relation.op]
value_case = relation.val.WhichOneof("value")
if value_case == "s":
return relation_class(relation.val.s)
elif value_case == "b":
return relation_class(relation.val.b)
elif value_case == "i":
return relation_class(relation.val.i)
elif value_case == "d":
return relation_class(relation.val.d)
elif value_case == "l":
return relation_class(Location.from_pb(relation.val.l))
def to_pb(self) -> query_pb2.Query.Relation:
"""
From an instance of Relation to its associated Protobuf object.
:return: the Protobuf object that contains the relation.
"""
relation = query_pb2.Query.Relation()
relation.op = self._operator()
query_value = query_pb2.Query.Value()
if isinstance(self.value, bool):
query_value.b = self.value
elif isinstance(self.value, int):
query_value.i = self.value
elif isinstance(self.value, float):
query_value.d = self.value
elif isinstance(self.value, str):
query_value.s = self.value
elif isinstance(self.value, Location):
query_value.l.CopyFrom(self.value.to_pb())
relation.val.CopyFrom(query_value)
return relation
def _get_type(self) -> Type[ATTRIBUTE_TYPES]:
return type(self.value)
def __eq__(self, other):
if type(other) != type(self):
return False
else:
return self.value == other.value
class OrderingRelation(Relation, ABC):
"""A specialization of the :class:`~oef.query.Relation` class to represent ordering relation (e.g. greater-than)."""
def __init__(self, value: ORDERED_TYPES):
super().__init__(value)
def _get_type(self) -> Type[ORDERED_TYPES]:
return type(self.value)
class Eq(Relation):
"""
The equality relation. That is, if the value of an attribute is equal to the value specified then
the :class:`~oef.query.Constraint` with this constraint type is satisfied.
Examples:
All the books whose author is <NAME>
>>> c = Constraint("author", Eq("<NAME>"))
>>> c.check(Description({"author": "<NAME>"}))
True
>>> c.check(Description({"author": "<NAME>"}))
False
"""
def _operator(self):
return query_pb2.Query.Relation.EQ
def check(self, value: ATTRIBUTE_TYPES) -> bool:
"""
Check if a value is equal to the value of the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
return value == self.value
class NotEq(Relation):
"""
The non-equality relation. That is, if the value of an attribute is not equal to the value specified then
the :class:`~oef.query.Constraint` with this constraint type is satisfied.
Examples:
All the books that are not of the genre Horror
>>> c = Constraint("genre", NotEq("horror"))
>>> c.check(Description({"genre": "non-fiction"}))
True
>>> c.check(Description({"author": "horror"}))
False
"""
def _operator(self):
return query_pb2.Query.Relation.NOTEQ
def check(self, value: ATTRIBUTE_TYPES) -> bool:
"""
Check if a value is not equal to the value of the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
return value != self.value
class Lt(OrderingRelation):
"""
The Less-than relation. That is, if the value of an attribute is less than the value specified then
the :class:`~oef.query.Constraint` with this constraint type is satisfied.
Examples:
All the books published before 1990
>>> c = Constraint("year", Lt(1990))
>>> c.check(Description({"year": 1985}))
True
>>> c.check(Description({"year": 2000}))
False
"""
def _operator(self):
return query_pb2.Query.Relation.LT
def check(self, value: ORDERED_TYPES) -> bool:
"""
Check if a value is less than the value of the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
return value < self.value
class LtEq(OrderingRelation):
"""
Less-than-equal relation. That is, if the value of an attribute is less than or equal to the value specified then
the :class:`~oef.query.Constraint` with this constraint type is satisfied.
Examples:
All the books published before 1990, 1990 included
>>> c = Constraint("year", LtEq(1990))
>>> c.check(Description({"year": 1990}))
True
>>> c.check(Description({"year": 1991}))
False
"""
def _operator(self):
return query_pb2.Query.Relation.LTEQ
def check(self, value: ORDERED_TYPES) -> bool:
"""
Check if a value is less than or equal to the value of the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
return value <= self.value
class Gt(OrderingRelation):
"""
Greater-than relation. That is, if the value of an attribute is greater than the value specified then
the :class:`~oef.query.Constraint` with this constraint type is satisfied.
Examples:
All the books with rating greater than 4.0
>>> c = Constraint("average_rating", Gt(4.0))
>>> c.check(Description({"average_rating": 4.5}))
True
>>> c.check(Description({"average_rating": 3.0}))
False
"""
def _operator(self):
return query_pb2.Query.Relation.GT
def check(self, value: ORDERED_TYPES) -> bool:
"""
Check if a value is greater than the value of the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
return value > self.value
class GtEq(OrderingRelation):
"""
Greater-than-equal relation. That is, if the value of an attribute is greater than or equal to the value specified
then the :class:`~oef.query.Constraint` with this constraint type is satisfied.
Examples:
All the books published after 2000, included
>>> c = Constraint("year", GtEq(2000))
>>> c.check(Description({"year": 2000}))
True
>>> c.check(Description({"year": 1990}))
False
"""
def _operator(self):
return query_pb2.Query.Relation.GTEQ
def check(self, value: ORDERED_TYPES) -> bool:
"""
Check if a value greater than or equal to the value of the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
return value >= self.value
class Range(ConstraintType):
"""
A constraint type that allows you to restrict the values of the attribute in a given range.
Examples:
All the books published after 2000, included
>>> c = Constraint("year", Range((2000, 2005)))
>>> c.check(Description({"year": 2000}))
True
>>> c.check(Description({"year": 2005}))
True
>>> c.check(Description({"year": 1990}))
False
>>> c.check(Description({"year": 2010}))
False
"""
def __init__(self, values: RANGE_TYPES) -> None:
"""
Initialize a range constraint type.
:param values: a pair of ``int``, a pair of ``str``, a pair of ``float` or
| a pair of :class:`~oef.schema.Location`.
"""
self.values = values
def to_pb(self) -> query_pb2.Query:
"""
From an instance of Range to its associated Protobuf object.
:return: the Protobuf object that contains the range.
"""
range_ = query_pb2.Query.Range()
if type(self.values[0]) == str:
values = query_pb2.Query.StringPair()
values.first = self.values[0]
values.second = self.values[1]
range_.s.CopyFrom(values)
elif type(self.values[0]) == int:
values = query_pb2.Query.IntPair()
values.first = self.values[0]
values.second = self.values[1]
range_.i.CopyFrom(values)
elif type(self.values[0]) == float:
values = query_pb2.Query.DoublePair()
values.first = self.values[0]
values.second = self.values[1]
range_.d.CopyFrom(values)
elif type(self.values[0]) == Location:
values = query_pb2.Query.LocationPair()
values.first.CopyFrom(self.values[0].to_pb())
values.second.CopyFrom(self.values[1].to_pb())
range_.l.CopyFrom(values)
return range_
@classmethod
def from_pb(cls, range_pb: query_pb2.Query.Range):
"""
From the Range Protobuf object to the associated instance of ``Range``.
:param range_pb: the Protobuf object that represents the range.
:return: an instance of ``Range`` equivalent to the Protobuf object provided as input.
"""
range_case = range_pb.WhichOneof("pair")
if range_case == "s":
return cls((range_pb.s.first, range_pb.s.second))
elif range_case == "i":
return cls((range_pb.i.first, range_pb.i.second))
elif range_case == "d":
return cls((range_pb.d.first, range_pb.d.second))
elif range_case == "l":
return cls((Location.from_pb(range_pb.l.first), Location.from_pb(range_pb.l.second)))
def check(self, value: RANGE_TYPES) -> bool:
"""
Check if a value is in the range specified by the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
left, right = self.values
return left <= value <= right
def _get_type(self) -> Type[Union[int, str, float, Location]]:
return type(self.values[0])
def __eq__(self, other):
if type(other) != Range:
return False
else:
return self.values == other.values
class Set(ConstraintType, ABC):
"""
A constraint type that allows you to restrict the values of the attribute in a specific set.
The specific operator of the relation is defined in the subclasses that extend this class.
"""
def __init__(self, values: SET_TYPES) -> None:
"""
Initialize a :class:`~oef.query.Set` constraint.
:param values: a list of values for the set relation.
"""
self.values = values
@property
@abstractmethod
def _operator(self) -> query_pb2.Query.Set:
"""The operator over the set."""
def to_pb(self):
"""
From an instance of one of the subclasses of :class:`~oef.query.Set` to its associated Protobuf object.
:return: the Protobuf object that contains the set constraint.
"""
set_ = query_pb2.Query.Set()
set_.op = self._operator()
value_type = type(self.values[0]) if len(self.values) > 0 else str
if value_type == str:
values = query_pb2.Query.Set.Values.Strings()
values.vals.extend(self.values)
set_.vals.s.CopyFrom(values)
elif value_type == bool:
values = query_pb2.Query.Set.Values.Bools()
values.vals.extend(self.values)
set_.vals.b.CopyFrom(values)
elif value_type == int:
values = query_pb2.Query.Set.Values.Ints()
values.vals.extend(self.values)
set_.vals.i.CopyFrom(values)
elif value_type == float:
values = query_pb2.Query.Set.Values.Doubles()
values.vals.extend(self.values)
set_.vals.d.CopyFrom(values)
elif value_type == Location:
values = query_pb2.Query.Set.Values.Locations()
values.vals.extend([value.to_pb() for value in self.values])
set_.vals.l.CopyFrom(values)
return set_
@classmethod
def from_pb(cls, set_pb: query_pb2.Query.Set):
"""
From the Set Protobuf object to the associated instance of a subclass of :class:`~oef.query.Set`.
:param set_pb: the Protobuf object that represents the set constraint.
:return: the object of one of the subclasses of :class:`~oef.query.Set`.
"""
op_from_pb = {
query_pb2.Query.Set.IN: In,
query_pb2.Query.Set.NOTIN: NotIn
}
set_class = op_from_pb[set_pb.op]
value_case = set_pb.vals.WhichOneof("values")
if value_case == "s":
return set_class(set_pb.vals.s.vals)
elif value_case == "b":
return set_class(set_pb.vals.b.vals)
elif value_case == "i":
return set_class(set_pb.vals.i.vals)
elif value_case == "d":
return set_class(set_pb.vals.d.vals)
elif value_case == "l":
locations = [Location.from_pb(loc) for loc in set_pb.vals.l.vals]
return set_class(locations)
def _get_type(self) -> Optional[Type[ATTRIBUTE_TYPES]]:
return type(next(iter(self.values))) if len(self.values) > 0 else None
def __eq__(self, other):
if type(other) != type(self):
return False
return self.values == other.values
class In(Set):
"""
Class that implements the 'in set' constraint type.
That is, the value of attribute over which the constraint is defined
must be in the set of values provided.
Examples:
All the books whose genre is one of the following: `Horror`, `Science fiction`, `Non-fiction`
>>> c = Constraint("genre", In(["horror", "science fiction", "non-fiction"]))
>>> c.check(Description({"genre": "horror"}))
True
>>> c.check(Description({"genre": "thriller"}))
False
"""
def __init__(self, values: SET_TYPES):
super().__init__(values)
def _operator(self):
return query_pb2.Query.Set.IN
def check(self, value: ATTRIBUTE_TYPES) -> bool:
"""
Check if a value is in the set of values specified by the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
return value in self.values
class NotIn(Set):
"""
Class that implements the 'not in set' constraint type.
That is, the value of attribute over which the constraint is defined
must be not in the set of values provided.
Examples:
All the books that have not been published neither in 1990, nor in 1995, nor in 2000
>>> c = Constraint("year", NotIn([1990, 1995, 2000]))
>>> c.check(Description({"year": 1991}))
True
>>> c.check(Description({"year": 2000}))
False
"""
def __init__(self, values: SET_TYPES):
super().__init__(values)
def _operator(self):
return query_pb2.Query.Set.NOTIN
def check(self, value: ATTRIBUTE_TYPES) -> bool:
"""
Check if a value is not in the set of values specified by the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
return value not in self.values
class Distance(ConstraintType):
"""
Class that implements the 'distance' constraint type.
That is, the locations we are looking for
must be within a given distance from a given location.
The distance is interpreted as a radius from a center.
Examples:
Define a location of interest, e.g. the Tour Eiffel
>>> tour_eiffel = Location(48.8581064, 2.29447)
Find all the locations close to the Tour Eiffel within 1 km
>>> close_to_tour_eiffel = Distance(tour_eiffel, 1.0)
Le Jules Verne, a famous restaurant close to the Tour Eiffel, satisfies the constraint.
>>> le_jules_verne_restaurant = Location(48.8579675, 2.2951849)
>>> close_to_tour_eiffel.check(le_jules_verne_restaurant)
True
The Colosseum does not satisfy the constraint (farther than 1 km from the Tour Eiffel).
>>> colosseum = Location(41.8902102, 12.4922309)
>>> close_to_tour_eiffel.check(colosseum)
False
"""
def __init__(self, center: Location, distance: float) -> None:
"""
Instantiate the ``Distance`` constraint.
:param center: the center from where compute the distance.
:param distance: the maximum distance from the center, in km.
"""
self.center = center
self.distance = distance
def check(self, value: Location) -> bool:
return self.center.distance(value) <= self.distance
def to_pb(self) -> query_pb2.Query.Distance:
"""
From an instance :class:`~oef.query.Distance` to its associated Protobuf object.
:return: the Protobuf object that contains the :class:`~oef.query.Distance` constraint.
"""
distance_pb = query_pb2.Query.Distance()
distance_pb.distance = self.distance
distance_pb.center.CopyFrom(self.center.to_pb())
return distance_pb
@classmethod
def from_pb(cls, distance_pb: query_pb2.Query.Distance):
"""
From the ``Distance`` Protobuf object to the associated instance of :class:`~oef.query.Distance`.
:param distance_pb: the Protobuf object that represents the ``~oef.query.Distance`` constraint.
:return: an instance of ``~oef.query.Distance``.
"""
center = Location.from_pb(distance_pb.center)
distance = distance_pb.distance
return cls(center, distance)
def _get_type(self) -> Optional[Type[ATTRIBUTE_TYPES]]:
return Location
def __eq__(self, other):
if type(other) != Distance:
return False
return self.center == other.center and self.distance == other.distance
class Constraint(ConstraintExpr):
"""
A class that represent a constraint over an attribute.
"""
def __init__(self,
attribute_name: str,
constraint: ConstraintType) -> None:
self.attribute_name = attribute_name
self.constraint = constraint
def to_pb(self):
"""
Return the associated Protobuf object.
:return: a Protobuf object equivalent to the caller object.
"""
constraint = query_pb2.Query.ConstraintExpr.Constraint()
constraint.attribute_name = self.attribute_name
if isinstance(self.constraint, Relation):
constraint.relation.CopyFrom(self.constraint.to_pb())
elif isinstance(self.constraint, Range):
constraint.range_.CopyFrom(self.constraint.to_pb())
elif isinstance(self.constraint, Set):
constraint.set_.CopyFrom(self.constraint.to_pb())
elif isinstance(self.constraint, Distance):
constraint.distance.CopyFrom(self.constraint.to_pb())
else:
raise ValueError("The constraint type is not valid: {}".format(self.constraint))
return constraint
@classmethod
def from_pb(cls, constraint_pb: query_pb2.Query.ConstraintExpr.Constraint):
"""
From the ``Constraint`` Protobuf object to the associated instance of ``Constraint``.
:param constraint_pb: the Protobuf object that represents the ``Constraint`` object.
:return: an instance of ``Constraint`` equivalent to the Protobuf object provided in input.
"""
constraint_case = constraint_pb.WhichOneof("constraint")
constraint_type = None
if constraint_case == "relation":
constraint_type = Relation.from_pb(constraint_pb.relation)
elif constraint_case == "set_":
constraint_type = Set.from_pb(constraint_pb.set_)
elif constraint_case == "range_":
constraint_type = Range.from_pb(constraint_pb.range_)
elif constraint_case == "distance":
constraint_type = Distance.from_pb(constraint_pb.distance)
return cls(constraint_pb.attribute_name, constraint_type)
def check(self, description: Description) -> bool:
"""
Check if a description satisfies the constraint. The implementation depends on the type of the constraint.
:param description: the description to check.
:return: ``True`` if the description satisfies the constraint, ``False`` otherwise.
Examples:
>>> attr_author = AttributeSchema("author" , str, True, "The author of the book.")
>>> attr_year = AttributeSchema("year", int, True, "The year of publication of the book.")
>>> c1 = Constraint("author", Eq("<NAME>"))
>>> c2 = Constraint("year", Gt(1990))
>>> book_1 = Description({"author": "<NAME>", "year": 1991})
>>> book_2 = Description({"author": "<NAME>", "year": 1948})
The ``"author"`` attribute instantiation satisfies the constraint, so the result is ``True``.
>>> c1.check(book_1)
True
Here, the ``"author"`` does not satisfy the constraints. Hence, the result is ``False``.
>>> c1.check(book_2)
False
In this case, there is a missing field specified by the query, that is ``"year"``
So the result is ``False``, even in the case it is not required by the schema:
>>> c2.check(Description({"author": "<NAME>"}))
False
If the type of some attribute of the description is not correct, the result is ``False``.
In this case, the field ``"year"`` has a string instead of an integer:
>>> c2.check(Description({"author": "<NAME>", "year": "1991"}))
False
>>> Constraint("position", Distance(Location(0.0, 0.0), 1.0)).check(Description({"position": "1.0,1.0"}))
False
"""
# if the name of the attribute is not present, return false.
name = self.attribute_name
if name not in description.values:
return False
# if the type of the value is different from the type of the attribute schema, return false.
value = description.values[name]
if type(value) != self.constraint._get_type():
return False
# dispatch the check to the right implementation for the concrete constraint type.
return self.constraint.check(value)
def is_valid(self, data_model: DataModel) -> bool:
# if the attribute name of the constraint is not present in the data model, the constraint is not valid.
if self.attribute_name not in data_model.attributes_by_name:
return False
attribute = data_model.attributes_by_name[self.attribute_name]
return self.constraint.is_valid(attribute)
def __eq__(self, other):
if type(other) != Constraint:
return False
else:
return self.attribute_name == other.attribute_name and self.constraint == other.constraint
class Query(ProtobufSerializable):
"""
Representation of a search that is to be performed. Currently a search is represented as a
set of key value pairs that must be contained in the description of the service/ agent.
Examples:
Return all the books written by <NAME> published after 1990, and available as an e-book:
>>> attr_author = AttributeSchema("author" , str, True, "The author of the book.")
>>> attr_year = AttributeSchema("year", int, True, "The year of publication of the book.")
>>> attr_ebook = AttributeSchema("ebook_available", bool, False, "If the book can be sold as an e-book.")
>>> q = Query([
... Constraint("author", Eq("<NAME>")),
... Constraint("year", Gt(1990)),
... Constraint("ebook_available", Eq(True))
... ])
With a query, you can check that a `~oef.schema.Description` object satisfies the constraints.
>>> q.check(Description({"author": "<NAME>", "year": 1991, "ebook_available": True}))
True
>>> q.check(Description({"author": "<NAME>", "year": 1948, "ebook_available": False}))
False
"""
def __init__(self,
constraints: List[ConstraintExpr],
model: Optional[DataModel] = None) -> None:
"""
Initialize a query.
:param constraints: a list of ``Constraint``.
:param model: the data model where the query is defined.
"""
self.constraints = constraints
self.model = model
self._check_validity()
def to_pb(self) -> query_pb2.Query.Model:
"""
Return the associated Protobuf object.
:return: a Protobuf object equivalent to the caller object.
"""
query = query_pb2.Query.Model()
constraint_expr_pbs = [ConstraintExpr._to_pb(constraint) for constraint in self.constraints]
query.constraints.extend(constraint_expr_pbs)
if self.model is not None:
query.model.CopyFrom(self.model.to_pb())
return query
@classmethod
def from_pb(cls, query: query_pb2.Query.Model):
"""
From the ``Query`` Protobuf object to the associated instance of :class:`~oef.query.Query`.
:param query: the Protobuf object that represents the :class:`~oef.query.Query` object.
:return: an instance of :class:`~oef.query.Query` equivalent to the Protobuf object provided in input.
"""
constraints = [ConstraintExpr._from_pb(c) for c in query.constraints]
return cls(constraints, DataModel.from_pb(query.model) if query.HasField("model") else None)
def check(self, description: Description) -> bool:
"""
Check if a description satisfies the constraints of the query.
The constraints are interpreted as conjunction.
:param description: the description to check.
:return: ``True`` if the description satisfies all the constraints, ``False`` otherwise.
"""
return all(c.check(description) for c in self.constraints)
def is_valid(self, data_model: DataModel) -> bool:
"""
Given a data model, check whether the query is valid for that data model.
:return: ``True`` if the query is compliant with the data model, ``False`` otherwise.
"""
if data_model is None:
return True
return all(c.is_valid(data_model) for c in self.constraints)
def _check_validity(self):
"""Check whether the :class:`~oef.query.Query` object is valid.
:return ``None``
:raises ValueError: if the query does not satisfy some sanity requirements."""
if len(self.constraints) < 1:
raise ValueError("Invalid input value for type '{}': empty list of constraints. The number of "
"constraints must be at least 1.".format(type(self).__name__))
if not self.is_valid(self.model):
raise ValueError("Invalid input value for type '{}': the query is not valid "
"for the given data model.".format(type(self).__name__))
def __eq__(self, other):
if type(other) != Query:
return False
return self.constraints == other.constraints and self.model == other.model
| StarcoderdataPython |
3209801 | <reponame>SDRAST/Math<filename>geometry.py
# -*- coding: utf-8 -*-
"""
Classes for properties of geometrical objects.
This isn't exactly difficult math but it illustrates object-oriented
programming pretty well.
For simple calculations it isn't necessary to create instances of the objects.
For example::
In [1]: from Math.geometry import *
In [2]: Sphere(6000).area
Out[2]: 452389342.11693019
In [3]: Sphere(6000).horizon(0.01)
Out[3]: 10.95443745708116
Sometimes, an instance is useful though::
In [4]: s = Sphere(6000)
In [5]: r = s.horizon(300)
In [6]: visible_fraction = Circle(r).area/s.area
In [7]: visible_fraction
Out[7]: 0.023242630385487552
"""
import math
class Circular(object):
"""
Superclass for circular objects, like Circle() and Sphere()
"""
def __init__(self,radius):
"""
Create an instance and define radius, circumference and horizon
"""
self.radius = radius
self.circumference = self._circumference()
def _circumference(self):
return 2*math.pi*self.radius
def tangent_distance(self, height):
"""
Distance to the tangent point
@param height : radial outward distance from circle, in same units as radius
"""
assert height >= 0, "Height cannot be negative"
cos_theta = float(self.radius)/(self.radius + height)
theta = math.acos(cos_theta)
return self.radius*math.sin(theta)
class Circle(Circular):
"""
Circle, a subclass of Circular()
"""
def __init__(self,radius):
"""
Create an instance and define the area
"""
super(Circle,self).__init__(radius)
self.area = self._area()
def _area(self):
return math.pi*self.radius**2
class Sphere(Circular):
"""
Sphere, a subclass of Circular()
"""
def __init__(self,radius):
"""
Create an instance and define area and volume
"""
super(Sphere,self).__init__(radius)
self.area = self._area()
self.volume = self._volume()
def _area(self):
return 4*math.pi*self.radius**2
def _volume(self):
return 4*math.pi*self.radius**3/3 | StarcoderdataPython |
3316169 | <gh_stars>1-10
# Copyright (C) <NAME> 2020.
# Distributed under the MIT License (see the accompanying README.md and LICENSE files).
import argparse
import dataset
import numpy as np
def included_queries(data_split, squashed_clicks, rerank):
if rerank:
doc_per_q = (data_split.doclist_ranges[1:]
- data_split.doclist_ranges[:-1])
rerank_weight_ranges = np.concatenate([[0], doc_per_q])
rerank_weight_ranges = np.cumsum(rerank_weight_ranges**2)
num_queries = data_split.num_queries()
q_mask = np.zeros(num_queries, dtype=np.bool)
for qid in range(num_queries):
s_i, e_i = rerank_weight_ranges[qid:qid+2]
q_mask[qid] = np.sum(squashed_clicks[s_i:e_i]) > 0
else:
summed_weights = np.cumsum(squashed_clicks)
q_sum = summed_weights[data_split.doclist_ranges[1:]-1]
q_mask = np.greater(np.diff(q_sum), 0)
first_n = summed_weights[data_split.doclist_ranges[1]-1]
q_mask = np.concatenate(([first_n > 0], q_mask))
return q_mask
def read_model(model_file_path, data, scale=1.0):
model = np.zeros(data.num_features)
with open(model_file_path, 'r') as model_file:
model_line = model_file.readlines()[-1]
model_line = model_line[:model_line.find('#')]
for feat_tuple in model_line.split()[1:]:
f_i, f_v = feat_tuple.split(':')
f_i = data.inverse_feature_map[int(f_i)]
model[f_i] = float(f_v)
if np.linalg.norm(model) > 0:
model /= np.linalg.norm(model)/scale
return model
def sample_from_click_probs(click_probs):
coin_flips = np.random.uniform(size=click_probs.shape)
return np.where(np.less(coin_flips, click_probs))[0]
def rank_and_invert(scores):
n_docs = scores.shape[0]
rank_ind = np.argsort(scores)[::-1]
inverted = np.empty(n_docs, dtype=rank_ind.dtype)
inverted[rank_ind] = np.arange(n_docs)
return rank_ind, inverted
def generate_clicks(data_split,
ranking_model,
click_model,
n_clicks,
cutoff,
eta):
def inverse_rank_prop(inv_ranking, cutoff):
result = (1./(inv_ranking+1.))**eta
if cutoff > 0:
result[inv_ranking>=cutoff] = 0.
return result
if click_model == 'binarized':
def relevance_click_prob(labels):
n_docs = labels.shape[0]
rel_prob = np.full(n_docs, 0.1)
rel_prob[labels>2] = 1.
return rel_prob
elif click_model == 'noisybinarized':
def relevance_click_prob(labels):
n_docs = labels.shape[0]
rel_prob = np.full(n_docs, 0.1)
rel_prob[labels>2] = .15
return rel_prob
elif click_model == 'linear':
def relevance_click_prob(labels):
n_docs = labels.shape[0]
max_click_prob = 1.
min_click_prob = 0.1
click_prob_step = (max_click_prob-min_click_prob)/4.
rel_prob = np.full(n_docs, min_click_prob)
rel_prob += click_prob_step*labels
return rel_prob
else:
raise ValueError('Unknown click model: %s' % click_model)
max_len = np.amax(data_split.doclist_ranges[1:]
- data_split.doclist_ranges[:-1])
no_cutoff_i = 0
no_cutoff_result = {
'qid': np.empty(n_clicks, dtype=np.int64),
'clicked': np.empty(n_clicks, dtype=np.int64),
'prop': np.empty(n_clicks),
}
cutoff_det_i = 0
cutoff_det_result = {
'qid': np.empty(n_clicks, dtype=np.int64),
'clicked': np.empty(n_clicks, dtype=np.int64),
'prop': np.empty(n_clicks),
'included': np.empty((n_clicks, cutoff), dtype=np.int64),
}
replace_last_i = 0
cutoff_obs_result = {
'qid': np.empty(n_clicks, dtype=np.int64),
'clicked': np.empty(n_clicks, dtype=np.int64),
'prop': np.empty(n_clicks),
'included': np.empty((n_clicks, cutoff), dtype=np.int64),
}
cutoff_rep_result = {
'qid': np.empty(n_clicks, dtype=np.int64),
'clicked': np.empty(n_clicks, dtype=np.int64),
'prop': np.empty(n_clicks),
}
all_docs = data_split.feature_matrix
all_scores = np.dot(all_docs, ranking_model)
num_queries_sampled = 0
while min(no_cutoff_i,
cutoff_det_i,
replace_last_i) < n_clicks:
num_queries_sampled += 1
qid = np.random.choice(data_split.num_queries())
s_i, e_i = data_split.doclist_ranges[qid:qid+2]
n_docs = e_i - s_i
q_scores = all_scores[s_i:e_i]
q_labels = data_split.query_labels(qid)
all_rel = relevance_click_prob(q_labels)
full_rank, full_inv = rank_and_invert(q_scores)
if no_cutoff_i < n_clicks:
prop = inverse_rank_prop(full_inv, 0)
for c_i in sample_from_click_probs(all_rel*prop):
no_cutoff_result['qid'][no_cutoff_i] = qid
no_cutoff_result['clicked'][no_cutoff_i] = c_i
no_cutoff_result['prop'][no_cutoff_i] = prop[c_i]
no_cutoff_i += 1
if no_cutoff_i >= n_clicks:
break
if cutoff_det_i < n_clicks:
prop = inverse_rank_prop(full_inv, cutoff)
cur_included = np.where(np.greater(prop, 0))[0]
cutoff_diff = cutoff - cur_included.shape[0]
if cutoff_diff > 0:
cur_included = np.concatenate(
(cur_included,
np.repeat(cur_included[0],
cutoff_diff)),
axis=0)
for c_i in sample_from_click_probs(all_rel*prop):
cutoff_det_result['qid'][cutoff_det_i] = qid
cutoff_det_result['clicked'][cutoff_det_i] = c_i
cutoff_det_result['prop'][cutoff_det_i] = prop[c_i]
cutoff_det_result['included'][cutoff_det_i, :] = cur_included
cutoff_det_i += 1
if cutoff_det_i >= n_clicks:
break
if replace_last_i < n_clicks:
cut_rank = full_rank.copy()
cut_inv = full_inv.copy()
if cutoff < n_docs:
inc_doc = np.random.choice(cut_rank[cutoff-1:])
swp_doc = cut_rank[cutoff-1]
cut_inv[swp_doc] = cut_inv[inc_doc]
cut_inv[inc_doc] = cutoff-1
prop = inverse_rank_prop(cut_inv, cutoff)
cur_included = np.where(np.greater(prop, 0))[0]
cutoff_diff = cutoff - cur_included.shape[0]
if cutoff_diff > 0:
cur_included = np.concatenate(
(cur_included,
np.repeat(cur_included[0],
cutoff_diff)),
axis=0)
for c_i in sample_from_click_probs(all_rel*prop):
cutoff_obs_result['qid'][replace_last_i] = qid
cutoff_obs_result['clicked'][replace_last_i] = c_i
cutoff_obs_result['prop'][replace_last_i] = prop[c_i]
cutoff_obs_result['included'][replace_last_i, :] = cur_included
n_outside = max(n_docs-cutoff+1,1)
cutoff_rep_result['qid'][replace_last_i] = qid
cutoff_rep_result['clicked'][replace_last_i] = c_i
if cutoff < n_docs and c_i == inc_doc:
cutoff_rep_result['prop'][replace_last_i] = prop[c_i]/float(n_outside)
else:
cutoff_rep_result['prop'][replace_last_i] = prop[c_i]
replace_last_i += 1
if replace_last_i >= n_clicks:
break
return {
'deterministic': {
'num_queries_sampled': num_queries_sampled,
'data_split_name': data_split.name,
'qid': no_cutoff_result['qid'],
'clicked': no_cutoff_result['clicked'],
'prop': no_cutoff_result['prop'],
'cutoff': 0,
},
'deterministic_cutoff': {
'num_queries_sampled': num_queries_sampled,
'data_split_name': data_split.name,
'qid': cutoff_det_result['qid'],
'clicked': cutoff_det_result['clicked'],
'prop': cutoff_det_result['prop'],
'included': cutoff_det_result['included'],
'cutoff': cutoff,
},
'replacelast_oblivious': {
'num_queries_sampled': num_queries_sampled,
'data_split_name': data_split.name,
'qid': cutoff_obs_result['qid'],
'clicked': cutoff_obs_result['clicked'],
'prop': cutoff_obs_result['prop'],
'included': cutoff_obs_result['included'],
'cutoff': cutoff,
},
'replacelast_policyaware': {
'num_queries_sampled': num_queries_sampled,
'data_split_name': data_split.name,
'qid': cutoff_rep_result['qid'],
'clicked': cutoff_rep_result['clicked'],
'prop': cutoff_rep_result['prop'],
'cutoff': cutoff,
},
}
def generate_squashed_clicks(logging_policy,
data_split,
ranking_model,
click_model,
n_clicks,
cutoff,
eta,
clipping_thres):
def inverse_rank_prop(inv_ranking, cutoff):
result = (1./(inv_ranking+1.))**eta
if cutoff > 0:
result[inv_ranking>=cutoff] = 0.
return result
if click_model == 'binarized':
def relevance_click_prob(labels):
n_docs = labels.shape[0]
rel_prob = np.full(n_docs, 0.1)
rel_prob[labels>2] = 1.
return rel_prob
else:
raise ValueError('Unknown click model: %s' % click_model)
rerank = 'rerank' in logging_policy
if rerank:
max_len = np.amax(data_split.doclist_ranges[1:]
- data_split.doclist_ranges[:-1])
ave_weights = np.zeros((data_split.num_docs(), max_len))
clicks_per_doc = np.zeros((data_split.num_docs(), max_len),
dtype=np.int64)
doc_per_q = (data_split.doclist_ranges[1:]
- data_split.doclist_ranges[:-1])
n_weights = np.sum(doc_per_q**2)
ave_weights = np.zeros(n_weights)
clicks_per_doc = np.zeros(n_weights, dtype=np.int64)
rerank_weight_ranges = np.concatenate([[0], doc_per_q])
rerank_weight_ranges = np.cumsum(rerank_weight_ranges**2)
else:
ave_weights = np.zeros(data_split.num_docs())
clicks_per_doc = np.zeros(data_split.num_docs(),
dtype=np.int64)
all_docs = data_split.feature_matrix
all_scores = np.dot(all_docs, ranking_model)
normal_ranking = np.zeros(data_split.num_docs(),
dtype=np.int64)
inverted_ranking = np.zeros(data_split.num_docs(),
dtype=np.int64)
rel_click_prob = np.zeros(data_split.num_docs(),
dtype=np.float64)
for qid in np.arange(data_split.num_queries()):
s_i, e_i = data_split.doclist_ranges[qid:qid+2]
n_docs = e_i - s_i
q_scores = all_scores[s_i:e_i]
(normal_ranking[s_i:e_i],
inverted_ranking[s_i:e_i]) = rank_and_invert(q_scores)
q_labels = data_split.query_labels(qid)
rel_click_prob[s_i:e_i] = relevance_click_prob(q_labels)
clip_after = clipping_thres > 0
clicks_generated = 0
num_queries_sampled = 0
while clicks_generated < n_clicks:
num_queries_sampled += 1
qid = np.random.choice(data_split.num_queries())
s_i, e_i = data_split.doclist_ranges[qid:qid+2]
n_docs = e_i - s_i
rel_prob = rel_click_prob[s_i:e_i]
norm_rank = normal_ranking[s_i:e_i]
inv_rank = inverted_ranking[s_i:e_i]
if 'deterministic' in logging_policy:
prop = inverse_rank_prop(inv_rank, cutoff)
c_i = sample_from_click_probs(rel_prob*prop)
d_i = c_i + s_i
if not rerank:
clicks_per_doc[d_i] += 1
ave_weights[d_i] = 1./prop[c_i]
elif c_i.size > 0:
inc = norm_rank[:cutoff]
s_j, e_j = rerank_weight_ranges[qid:qid+2]
cur_weights = np.reshape(ave_weights[s_j:e_j], (n_docs, n_docs))
cur_clicks = np.reshape(clicks_per_doc[s_j:e_j], (n_docs, n_docs))
cur_clicks[c_i[:, None], inc[None, :]] += 1
cur_weights[c_i[:, None], inc[None, :]] = 1./prop[c_i, None]
clicks_generated += c_i.size
elif 'replacelast' in logging_policy:
if cutoff < n_docs:
inc_doc = np.random.choice(norm_rank[cutoff-1:])
swp_doc = norm_rank[cutoff-1]
inv_rank[swp_doc] = inv_rank[inc_doc]
inv_rank[inc_doc] = cutoff-1
norm_rank[cutoff-1] = inc_doc
norm_rank[inv_rank[swp_doc]] = swp_doc
prop = inverse_rank_prop(inv_rank, cutoff)
c_i = sample_from_click_probs(rel_prob*prop)
d_i = c_i + s_i
if not rerank:
clicks_per_doc[d_i] += 1
else:
inc = norm_rank[:cutoff]
s_j, e_j = rerank_weight_ranges[qid:qid+2]
cur_weights = np.reshape(ave_weights[s_j:e_j], (n_docs, n_docs))
cur_clicks = np.reshape(clicks_per_doc[s_j:e_j], (n_docs, n_docs))
cur_clicks[c_i[:, None], inc[None, :]] += 1
clicks_generated += c_i.size
n_outside = max(n_docs-cutoff+1,1)
if cutoff < n_docs and 'oblivious' not in logging_policy:
denom = np.ones(c_i.shape)
denom[np.greater_equal(inv_rank[c_i], cutoff-1)] = n_outside
ave_weights[d_i] = denom/prop[c_i]
else:
if not rerank:
ave_weights[d_i] = 1./prop[c_i]
else:
cur_weights[c_i[:, None], inc[None, :]] = 1./prop[c_i, None]
if clip_after:
ave_weights = np.minimum(ave_weights, clipping_thres)
query_mask = included_queries(data_split, clicks_per_doc, rerank)
queries = np.arange(data_split.num_queries())[query_mask]
result = {
'rerank': rerank,
'num_queries_sampled': num_queries_sampled,
'data_split_name': data_split.name,
'average_weights': ave_weights,
'clicks_per_doc': clicks_per_doc,
'num_clicks': clicks_generated,
'cutoff': cutoff,
'queries': queries,
}
if rerank:
result.update({
'rerank_ranges': rerank_weight_ranges,
'inverted_ranking': inverted_ranking,
})
return result
| StarcoderdataPython |
156681 | <reponame>danielnbalasoiu/ws-nexus-integration<gh_stars>10-100
#!/usr/bin/env python3
import base64
import json
import logging
import os
import re
import sys
from configparser import ConfigParser
from distutils.util import strtobool
from multiprocessing import Pool, Manager
from typing import Union
from urllib.parse import urlparse, urljoin
from ws_nexus_integration._version import __version__, __tool_name__
import requests
from ws_sdk import WSClient, ws_constants
SUPPORTED_FORMATS = {'maven2', 'npm', 'pypi', 'rubygems', 'nuget', 'raw', 'docker'}
DOCKER_TIMEOUT = 600
VER_3_26 = ["3", "26"]
config = None
logging.basicConfig(level=logging.DEBUG if os.environ.get("DEBUG") else logging.INFO,
handlers=[logging.StreamHandler(stream=sys.stdout)],
format='%(levelname)s %(asctime)s %(thread)d %(name)s: %(message)s',
datefmt='%y-%m-%d %H:%M:%S')
logging.getLogger('urllib3').setLevel(logging.WARNING)
logging.getLogger('docker').setLevel(logging.WARNING)
class Configuration:
# @dataclass
# class Config:
# nexus_base_url: str
# nexus_alt_docker_registry_address: str
# nexus_user: str
# nexus_password: str
# nexus_auth_token: str
# nexus_repos: str
# nexus_ip: str
# headers: dict
def __init__(self) -> str:
def convert_to_basic_string(user_name: str, password:str):
"""
Encode username and password per RFC 7617
:param user_name:
:param password:
:return:
"""
auth_string_plain = f"{user_name}:{password}"
basic_bytes = base64.b64encode(bytes(auth_string_plain, "utf-8"))
basic_string = str(basic_bytes)[2:-1]
return basic_string
def get_nexus_auth_token(nexus_user: str, nexus_password: str) -> str:
nexus_auth_token = conf.get('Nexus Settings', 'NexusAuthToken', fallback=None)
if nexus_auth_token:
logging.debug(f"Using Nexus authentication token")
else:
logging.debug('Converting user and password to basic string')
try:
nexus_auth_token = convert_to_basic_string(nexus_user, nexus_password)
except KeyError:
logging.error("Nexus username or password are missing from the configuration file")
sys.exit(1)
return nexus_auth_token
def generate_dirs():
for k, v in self.__dict__.items():
if k.endswith("_dir") and not os.path.exists(v):
logging.debug(f"Directory {v} does not exist and will be created")
os.mkdir(v)
def set_lang_include(includes: str):
inc_l = includes.split(',') if len(includes) else None
if inc_l:
ret_l = []
for i in inc_l:
ret_l += ws_constants.LibMetaData.LangSuffix.__dict__[i]
self.ws_conn.ua_conf.set_include_suffices_to_scan(ret_l)
def read_conf_file():
conf_file = 'params.config'
if len(sys.argv) > 1:
conf_file = sys.argv[1]
if os.path.isfile(conf_file):
logging.debug(f"Using configuration file: '{conf_file}'")
c = ConfigParser()
c.optionxform = str
c.read(conf_file)
return c
else:
print("""Missing configuration file. Be sure to create params.config file with the following values:\"
[Nexus Settings]
NexusBaseUrl=
NexusAuthToken=
NexusUser=
NexusPassword=
NexusRepositories=
NexusAltDockerRegistryAddress=
[WhiteSource Settings]
WSUserKey=
WSApiKey=
WSProductName=Nexus
WSCheckPolicies=False
WSUrl=
WSLang=
[General Settings]
ThreadCount=1
WorkDir=
JavaBin=
""")
exit(-1)
conf = read_conf_file()
# Nexus Settings
self.nexus_base_url = conf.get('Nexus Settings', 'NexusBaseUrl', fallback='http://localhost:8081').strip('/')
self.nexus_alt_docker_registry_address = conf.get('Nexus Settings', 'NexusAltDockerRegistryAddress', fallback=None)
self.nexus_user = conf.get('Nexus Settings', 'NexusUser', fallback=None)
self.nexus_password = conf['Nexus Settings']['NexusPassword']
self.nexus_auth_token = get_nexus_auth_token(self.nexus_user, self.nexus_password)
self.nexus_repos = conf.get('Nexus Settings', 'NexusRepositories')
self.nexus_ip = self.nexus_base_url.split('//')[1].split(':')[0]
self.headers = {'Authorization': f'Basic {self.nexus_auth_token}',
'accept': 'application/json'}
# WhiteSource Settings
self.product_name = conf.get('WhiteSource Settings', 'WSProductName', fallback='Nexus')
self.check_policies = conf.getboolean('WhiteSource Settings', 'WSCheckPolicies', fallback=False)
self.policies = 'true' if self.check_policies else 'false'
ws_name = f"ws-{__tool_name__.replace('_', '-')}"
base_dir = conf.get('General Settings', 'WorkDir')
if not base_dir:
base_dir = f"c:/tmp/ws-{ws_name}" if sys.platform == "win32" else f"/tmp/{ws_name}"
self.base_dir = base_dir
self.is_docker_scan = False
self.scan_dir = os.path.join(self.base_dir, '_wstemp')
java_bin = conf.get('General Settings', 'JavaBin', fallback="java")
self.ws_conn = WSClient(user_key=conf['WhiteSource Settings']['WSUserKey'],
token=conf['WhiteSource Settings']['WSApiKey'],
url=conf.get('WhiteSource Settings', 'WSUrl'),
java_bin=java_bin if java_bin else "java",
ua_path=self.base_dir,
tool_details=(f"ps-{__tool_name__.replace('_', '-')}", __version__))
set_lang_include(conf.get('WhiteSource Settings', 'WSLang').replace(" ", ""))
# General Settings
self.threads_number = conf.getint('General Settings', 'ThreadCount', fallback=5)
generate_dirs()
def set_nexus_resources_url(full_version: str):
ver = full_version.strip("Nexus/ (OSS)").split(".")
if ver[0] < VER_3_26[0] or (ver[0] == VER_3_26[0] and ver[1] < VER_3_26[1]):
config.resources_url = "/service/rest/beta/repositories"
else:
config.resources_url = "/service/rest/v1/repositorySettings"
logging.debug(f"Using repository: {config.resources_url}")
def retrieve_nexus_repositories():
def get_nexus_ver(nexus_version):
if nexus_version:
logging.info(f"Nexus Version: {nexus_version}")
else:
logging.warning("Server headers does not contain Nexus version. Assuming >=3.26")
nexus_version = "3.26"
return nexus_version
def get_valid_repositories(repos):
valid_repos = []
for repo in repos:
repo_format = repo.get("format")
if repo_format in SUPPORTED_FORMATS:
repo_name = repo["name"]
valid_repos.append(repo_name)
else:
logging.warning(f"Repository: {repo['name']} is unsupported format: {repo_format}. Skipping")
return valid_repos
logging.debug("Sending request for retrieving Nexus repository list")
repositories, resp_headers = call_nexus_api("/service/rest/v1/repositories", include_resp_headers=True)
config.nexus_version = get_nexus_ver(resp_headers.get('Server'))
existing_nexus_repository_list = get_valid_repositories(repositories)
return existing_nexus_repository_list
def validate_selected_repositories(nexus_input_repositories, existing_nexus_repository_list):
selected_repositories = [existing_nexus_repository_list[int(n)] for n in nexus_input_repositories]
if not selected_repositories:
logging.error("No repositories were found to be scanned")
sys.exit(1)
return selected_repositories
def download_components_from_repositories(selected_repos):
for repo_name in selected_repos:
logging.info(f'Repository: {repo_name}')
repo_comp_url = f'{config.nexus_base_url}/service/rest/v1/components?repository={repo_name}'
continuation_token = "init"
all_repo_items = []
logging.info('Validate artifact list')
while continuation_token:
if continuation_token != 'init':
cur_repo_comp_url = f'{repo_comp_url}&continuationToken={continuation_token}'
else:
cur_repo_comp_url = repo_comp_url
cur_comp_response = call_nexus_api(cur_repo_comp_url)
for item in cur_comp_response['items']:
all_repo_items.append(item)
continuation_token = cur_comp_response['continuationToken']
if not all_repo_items:
logging.debug(f'No artifacts found in {repo_name}')
else:
cur_dest_folder = os.path.join(config.scan_dir, repo_name)
os.makedirs(cur_dest_folder, exist_ok=True)
logging.info('Retrieving artifacts...')
manager = Manager()
docker_images_q = manager.Queue()
with Pool(config.threads_number) as pool:
pool.starmap(repo_worker, [(comp, repo_name, cur_dest_folder, config.headers, config, docker_images_q)
for i, comp in enumerate(all_repo_items)])
# Updating UA env vars to include Docker images from Nexus
docker_images = set()
while not docker_images_q.empty():
docker_images.add(docker_images_q.get(block=True, timeout=0.05))
if docker_images:
config.is_docker_scan = True
logging.info(f"Found total {len(docker_images)} docker images")
config.docker_images = docker_images
def call_nexus_api(url: str, headers: dict = None, include_resp_headers: bool = False) -> Union[dict, bytes]:
if headers is None:
headers = config.headers
if not url.startswith("http"):
url = urljoin(config.nexus_base_url, url)
logging.debug(f"Calling Nexus URL: {url}")
ret = None
try:
resp = requests.get(url, headers=headers)
if resp.status_code != 200:
logging.error(f"Error calling API return code {resp.status_code} Error: {resp.reason}")
else:
try:
ret = json.loads(resp.text)
except json.decoder.JSONDecodeError:
ret = resp.content
if include_resp_headers:
ret = ret, resp.headers
except requests.RequestException:
logging.exception(f"Received Error on endpoint: {url}")
return ret
def handle_docker_repo(component: dict, conf) -> str:
"""
Locally pull Docker Image from a given repository (component)
:param component:
:param conf: global config
:return: Retrieve Docker Image ID so UA will only scan images downloaded from Nexus
"""
def get_repos_as_dict(c) -> dict:
"""
Convert repository data into dictionary
:returns name -> repo dictionary
:rtype: dict
"""
repos_list = call_nexus_api(conf.nexus_base_url + conf.resources_url, c.headers)
logging.debug(f"found {len(repos_list)} repositories")
repo_dict = {}
for r in repos_list:
repo_dict[r['name']] = r
return repo_dict
def get_docker_repo_url(repository: dict) -> str:
"""
Retrieves Repository URL with port
:param repository:
:return: Image ID in form of string
"""
https_port = repository['docker'].get('httpsPort')
http_port = repository['docker']['httpPort']
parsed_url = urlparse(repository['url'])
if http_port:
r_url = f"{parsed_url.hostname}:{http_port}"
elif https_port:
r_url = f"{parsed_url.hostname}:{https_port}"
else:
logging.error("Unable to get repository port. Using default URL")
r_url = f"{parsed_url.hostname}:{parsed_url.port}"
logging.debug(f"Returned docker repo URL: {r_url}")
return r_url
ret = None
dl_url = component['assets'][0]["downloadUrl"]
logging.debug(f"Component repository: {component['repository']}")
logging.debug(f"Getting manifest file from: {dl_url}")
manifest = call_nexus_api(dl_url, conf.headers)
repos = get_repos_as_dict(conf)
try:
import docker
except ImportError:
logging.error("Found Docker repository but Docker package is not installed.")
return ret
repo = repos.get(component['repository'])
ret = None
if conf.nexus_alt_docker_registry_address:
docker_repo_url = conf.nexus_alt_docker_registry_address
logging.info(f"Using user-defined docker registry URL: {docker_repo_url}")
elif repo:
logging.debug(f"Repository data: {repo}")
docker_repo_url = get_docker_repo_url(repo)
if docker_repo_url:
image_name = f"{docker_repo_url}/{manifest['name']}"
image_full_name = f"{image_name}:{manifest['tag']}"
logging.info(f"Pulling Docker image: {image_name}")
try:
docker_client = docker.from_env(timeout=DOCKER_TIMEOUT)
# Configuring Nexus user and password are mandatory for non-anonymous Docker repositories
docker_client.login(username=conf.nexus_user, password=conf.nexus_password, registry=docker_repo_url)
pull_res = docker_client.images.pull(image_full_name)
logging.debug(f"Image ID: {image_full_name} successfully pulled")
ret = image_name
except docker.errors.DockerException:
logging.exception(f"Error loading image: {image_name}")
else:
logging.warning(f"Repository was not found for {component['repository']}. Skipping")
return ret
def repo_worker(comp, repo_name, cur_dest_folder, headers, conf, d_images_q):
"""
:param d_images_q:
:param conf:
:param comp:
:param repo_name:
:param cur_dest_folder:
:param headers:
"""
all_components = []
component_assets = comp['assets']
logging.debug(f"Handling component ID: {comp['id']} on repository: {comp['repository']} Format: {comp['format']}")
if comp['format'] == 'nuget':
comp_name = '{}.{}.nupkg'.format(comp['name'], comp['version'])
all_components.append(comp_name)
elif re.match('(maven).*', comp['format']):
component_assets_size = len(component_assets)
for asset in range(0, component_assets_size):
comp_name = component_assets[asset]['path'].rpartition('/')[-1]
if comp_name.split(".")[-1] == "jar":
all_components.append(comp_name)
elif comp['format'] == 'docker':
image_id = handle_docker_repo(comp, conf)
if image_id:
d_images_q.put(image_id)
else:
comp_name = component_assets[0]['path'].rpartition('/')[-1]
all_components.append(comp_name)
for comp_name in all_components:
comp_worker(repo_name, component_assets, cur_dest_folder, headers, comp_name)
def comp_worker(repo_name, component_assets, cur_dest_folder, headers, comp_name):
logging.info(f'Downloading {comp_name} component from {repo_name}')
comp_download_url = component_assets[0]["downloadUrl"]
comp_data = call_nexus_api(comp_download_url, headers)
logging.debug(f"Download URL: {comp_download_url}")
os.makedirs(cur_dest_folder, exist_ok=True)
with open(os.path.join(cur_dest_folder, comp_name), 'wb') as f:
f.write(comp_data)
logging.info(f'Component {comp_name} has successfully downloaded')
def execute_scan():
config.ws_conn.ua_conf.productName = config.product_name
config.ws_conn.ua_conf.checkPolicies = strtobool(config.policies)
config.ws_conn.ua_conf.forceCheckAllDependencies = strtobool(config.policies)
config.ws_conn.ua_conf.offline = True if os.environ.get("OFFLINE", "").lower() == "true" else False
if config.is_docker_scan:
config.ws_conn.ua_conf.resolveAllDependencies = True
config.ws_conn.ua_conf.archiveExtractionDepth = ws_constants.UAArchiveFiles.ARCHIVE_EXTRACTION_DEPTH_MAX
config.ws_conn.ua_conf.archiveIncludes = ws_constants.UAArchiveFiles.ALL_ARCHIVE_FILES
ret = config.ws_conn.scan_docker(product_name=config.product_name, docker_images=config.docker_images)
else:
config.ws_conn.ua_conf.projectPerFolder = True
ret = config.ws_conn.scan(scan_dir=config.scan_dir, product_name=config.product_name)
logging.debug(f"Unified Agent standard output:\n {ret[1]}")
return ret[0]
def get_repos_to_scan():
def validate_selected_repos_from_config(nexus_input_repositories, existing_nexus_repository_list):
"""
Validate selected repositories when running in configMode=True (production mode)
:param nexus_input_repositories:
:param existing_nexus_repository_list:
:return:
"""
existing_nexus_repository_set = set(existing_nexus_repository_list)
user_selected_repos_list = list(nexus_input_repositories.split(","))
user_selected_repos_set = set(user_selected_repos_list)
missing_repos = user_selected_repos_set - existing_nexus_repository_set
if missing_repos:
logging.error(f'Could not find the following repositories: {",".join(missing_repos)}')
logging.error(
"Specified repositories not found or their format is not supported, check params.config and try again")
sys.exit(1)
logging.info('Getting region parameters has finished')
return user_selected_repos_list
all_repos = retrieve_nexus_repositories()
if config.nexus_repos:
logging.info('Validate specified repositories')
repos_to_scan = validate_selected_repos_from_config(config.nexus_repos, all_repos)
else:
repos_to_scan = all_repos
logging.info('No specific repositories specified, all repositories will be scanned')
return repos_to_scan
def main():
global config
config = Configuration()
selected_repositories = get_repos_to_scan()
set_nexus_resources_url(config.nexus_version)
download_components_from_repositories(selected_repositories)
return_code = execute_scan()
return return_code
if __name__ == '__main__':
main()
| StarcoderdataPython |
3268317 | <reponame>woodrow/pyoac
from pypy.module.marshal import interp_marshal
from pypy.interpreter.error import OperationError
import sys
class AppTestMarshalMore:
def test_long_0(self):
import marshal
z = 0L
z1 = marshal.loads(marshal.dumps(z))
assert z == z1
def test_unmarshal_int64(self):
# test that we can unmarshal 64-bit ints on 32-bit platforms
# (of course we only test that if we're running on such a
# platform :-)
import marshal
z = marshal.loads('I\x00\xe4\x0bT\x02\x00\x00\x00')
assert z == 10000000000
z = marshal.loads('I\x00\x1c\xf4\xab\xfd\xff\xff\xff')
assert z == -10000000000
def test_buffer(self):
import marshal
z = marshal.loads(buffer('i\x02\x00\x00\x00???'))
assert z == 2
def test_marshal_buffer_object(self):
import marshal
s = marshal.dumps(buffer('foobar'))
t = marshal.loads(s)
assert type(t) is str and t == 'foobar'
def test_marshal_bufferlike_object(self):
import marshal, array
s = marshal.dumps(array.array('c', 'asd'))
t = marshal.loads(s)
assert type(t) is str and t == 'asd'
| StarcoderdataPython |
101573 | <gh_stars>10-100
"""Contains a main function for training and/or evaluating a model."""
import os
import numpy as np
from pycrayon import CrayonClient
from slackclient import SlackClient
from slackclient.exceptions import SlackClientError
from parse_args import interpret_args
import atis_data
from interaction_model import InteractionATISModel
from logger import Logger
from model import ATISModel
from model_util import Metrics, evaluate_utterance_sample, evaluate_interaction_sample, \
train_epoch_with_utterances, train_epoch_with_interactions, evaluate_using_predicted_queries
from visualize_attention import AttentionGraph
VALID_EVAL_METRICS = [
Metrics.LOSS,
Metrics.TOKEN_ACCURACY,
Metrics.STRING_ACCURACY]
TRAIN_EVAL_METRICS = [Metrics.LOSS, Metrics.TOKEN_ACCURACY]
FINAL_EVAL_METRICS = [
Metrics.STRING_ACCURACY,
Metrics.TOKEN_ACCURACY,
Metrics.CORRECT_TABLES,
Metrics.STRICT_CORRECT_TABLES,
Metrics.SYNTACTIC_QUERIES,
Metrics.SEMANTIC_QUERIES]
def send_slack_message(username, message, channel):
"""Sends a message to your Slack channel.
Input:
username (str): Username to send from.
message (str): The message to send.
channel (str): Channel to send the message to.
"""
token = '' # TODO: put your Slack token here.
try:
client = SlackClient(token)
client.api_call(
'chat.postMessage',
channel=channel,
text=message,
username=username,
icon_emoji=':robot_face:')
except SlackClientError as error:
print("Couldn't send slack message with exception " + str(error))
def train(model, data, params):
""" Trains a model.
Inputs:
model (ATISModel): The model to train.
data (ATISData): The data that is used to train.
params (namespace): Training parameters.
"""
# Get the training batches.
log = Logger(os.path.join(params.logdir, params.logfile), "w")
num_train_original = atis_data.num_utterances(data.train_data)
log.put("Original number of training utterances:\t"
+ str(num_train_original))
eval_fn = evaluate_utterance_sample
trainbatch_fn = data.get_utterance_batches
trainsample_fn = data.get_random_utterances
validsample_fn = data.get_all_utterances
batch_size = params.batch_size
if params.interaction_level:
batch_size = 1
eval_fn = evaluate_interaction_sample
trainbatch_fn = data.get_interaction_batches
trainsample_fn = data.get_random_interactions
validsample_fn = data.get_all_interactions
maximum_output_length = params.train_maximum_sql_length
train_batches = trainbatch_fn(batch_size,
max_output_length=maximum_output_length,
randomize=not params.deterministic)
if params.num_train >= 0:
train_batches = train_batches[:params.num_train]
training_sample = trainsample_fn(params.train_evaluation_size,
max_output_length=maximum_output_length)
valid_examples = validsample_fn(data.valid_data,
max_output_length=maximum_output_length)
num_train_examples = sum([len(batch) for batch in train_batches])
num_steps_per_epoch = len(train_batches)
log.put(
"Actual number of used training examples:\t" +
str(num_train_examples))
log.put("(Shortened by output limit of " +
str(maximum_output_length) +
")")
log.put("Number of steps per epoch:\t" + str(num_steps_per_epoch))
log.put("Batch size:\t" + str(batch_size))
print(
"Kept " +
str(num_train_examples) +
"/" +
str(num_train_original) +
" examples")
print(
"Batch size of " +
str(batch_size) +
" gives " +
str(num_steps_per_epoch) +
" steps per epoch")
# Keeping track of things during training.
epochs = 0
patience = params.initial_patience
learning_rate_coefficient = 1.
previous_epoch_loss = float('inf')
maximum_validation_accuracy = 0.
maximum_string_accuracy = 0.
crayon = CrayonClient(hostname="localhost")
experiment = crayon.create_experiment(params.logdir)
countdown = int(patience)
keep_training = True
while keep_training:
log.put("Epoch:\t" + str(epochs))
model.set_dropout(params.dropout_amount)
model.set_learning_rate(
learning_rate_coefficient *
params.initial_learning_rate)
# Run a training step.
if params.interaction_level:
epoch_loss = train_epoch_with_interactions(
train_batches,
params,
model,
randomize=not params.deterministic)
else:
epoch_loss = train_epoch_with_utterances(
train_batches,
model,
randomize=not params.deterministic)
log.put("train epoch loss:\t" + str(epoch_loss))
experiment.add_scalar_value("train_loss", epoch_loss, step=epochs)
model.set_dropout(0.)
# Run an evaluation step on a sample of the training data.
train_eval_results = eval_fn(training_sample,
model,
params.train_maximum_sql_length,
"train-eval",
gold_forcing=True,
metrics=TRAIN_EVAL_METRICS)[0]
for name, value in train_eval_results.items():
log.put(
"train final gold-passing " +
name.name +
":\t" +
"%.2f" %
value)
experiment.add_scalar_value(
"train_gold_" + name.name, value, step=epochs)
# Run an evaluation step on the validation set.
valid_eval_results = eval_fn(valid_examples,
model,
"valid-eval",
gold_forcing=True,
metrics=VALID_EVAL_METRICS)[0]
for name, value in valid_eval_results.items():
log.put("valid gold-passing " + name.name + ":\t" + "%.2f" % value)
experiment.add_scalar_value(
"valid_gold_" + name.name, value, step=epochs)
valid_loss = valid_eval_results[Metrics.LOSS]
valid_token_accuracy = valid_eval_results[Metrics.TOKEN_ACCURACY]
string_accuracy = valid_eval_results[Metrics.STRING_ACCURACY]
if valid_loss > previous_epoch_loss:
learning_rate_coefficient *= params.learning_rate_ratio
log.put(
"learning rate coefficient:\t" +
str(learning_rate_coefficient))
experiment.add_scalar_value(
"learning_rate",
learning_rate_coefficient,
step=epochs)
previous_epoch_loss = valid_loss
saved = False
if valid_token_accuracy > maximum_validation_accuracy:
saved = True
maximum_validation_accuracy = valid_token_accuracy
patience = patience * params.patience_ratio
countdown = int(patience)
last_save_file = os.path.join(params.logdir, "save_" + str(epochs))
model.save(last_save_file)
log.put("maximum accuracy:\t" + str(maximum_validation_accuracy))
log.put("patience:\t" + str(patience))
log.put("save file:\t" + str(last_save_file))
if not saved and string_accuracy > maximum_string_accuracy:
maximum_string_accuracy = string_accuracy
log.put(
"maximum string accuracy:\t" +
str(maximum_string_accuracy))
last_save_file = os.path.join(params.logdir, "save_" + str(epochs))
model.save(last_save_file)
send_slack_message(
username=params.logdir,
message="Epoch " +
str(epochs) +
": " +
str(string_accuracy) +
" validation accuracy; countdown is " +
str(countdown),
channel="models")
if countdown <= 0:
keep_training = False
countdown -= 1
log.put("countdown:\t" + str(countdown))
experiment.add_scalar_value("countdown", countdown, step=epochs)
log.put("")
epochs += 1
log.put("Finished training!")
send_slack_message(username=params.logdir,
message="Done training!!",
channel="@alsuhr")
log.close()
return last_save_file
def evaluate(model, data, params, last_save_file):
"""Evaluates a pretrained model on a dataset.
Inputs:
model (ATISModel): Model class.
data (ATISData): All of the data.
params (namespace): Parameters for the model.
last_save_file (str): Location where the model save file is.
"""
if last_save_file:
model.load(last_save_file)
else:
if not params.save_file:
raise ValueError(
"Must provide a save file name if not training first.")
model.load(params.save_file)
split = None
if params.evaluate_split == 'dev':
split = data.dev_data
elif params.evaluate_split == 'train':
split = data.train_data
elif params.evaluate_split == 'test':
split = data.test_data
elif params.evaluate_split == 'valid':
split = data.valid_data
else:
raise ValueError("Split not recognized: " + str(params.evaluate_split))
filename = params.evaluate_split
if params.use_predicted_queries:
filename += "predicted"
else:
filename += "gold"
full_name = os.path.join(params.logdir, filename) + params.results_note
if params.interaction_level or params.use_predicted_queries:
examples = data.get_all_interactions(split)
if params.interaction_level:
evaluate_interaction_sample(
examples,
model,
name=full_name,
metrics=FINAL_EVAL_METRICS,
total_num=atis_data.num_utterances(split),
database_username=params.database_username,
database_password=params.database_password,
database_timeout=params.database_timeout,
use_predicted_queries=params.use_predicted_queries,
max_generation_length=params.eval_maximum_sql_length,
write_results=True,
use_gpu=True)
else:
evaluate_using_predicted_queries(
examples,
model,
name=full_name,
metrics=FINAL_EVAL_METRICS,
total_num=atis_data.num_utterances(split),
database_username=params.database_username,
database_password=<PASSWORD>,
database_timeout=params.database_timeout)
else:
examples = data.get_all_utterances(split)
evaluate_utterance_sample(
examples,
model,
name=full_name,
gold_forcing=False,
metrics=FINAL_EVAL_METRICS,
total_num=atis_data.num_utterances(split),
max_generation_length=params.eval_maximum_sql_length,
database_username=params.database_username,
database_password=<PASSWORD>,
database_timeout=params.database_timeout,
write_results=True)
def evaluate_attention(model, data, params, last_save_file):
"""Evaluates attention distributions during generation.
Inputs:
model (ATISModel): The model.
data (ATISData): Data to evaluate.
params (namespace): Parameters for the run.
last_save_file (str): The save file to load from.
"""
if not params.save_file:
raise ValueError(
"Must provide a save file name for evaluating attention.")
model.load(last_save_file)
all_data = data.get_all_interactions(data.dev_data)
found_one = None
for interaction in all_data:
if interaction.identifier.replace("/", "") == params.reference_results:
found_one = interaction
break
data = [found_one]
# Do analysis on the random example.
ignore_with_gpu = [line.strip() for line in open(
"cpu_full_interactions.txt").readlines()]
for interaction in data:
if interaction.identifier in ignore_with_gpu:
continue
identifier = interaction.identifier.replace("/", "")
full_path = os.path.join(params.logdir, identifier)
if os.path.exists(full_path):
continue
if params.use_predicted_queries:
predictions = model.predict_with_predicted_queries(
interaction, params.eval_maximum_sql_length, syntax_restrict=True)
else:
predictions = model.predict_with_gold_queries(
interaction, params.eval_maximum_sql_length)
for i, prediction in enumerate(predictions):
item = interaction.gold_utterances()[i]
input_sequence = [token for utterance in item.histories(
params.maximum_utterances - 1) + [item.input_sequence()] for token in utterance]
attention_graph = AttentionGraph(input_sequence)
if params.use_predicted_queries:
item = interaction.processed_utterances[i]
output_sequence = prediction[0]
attentions = [
result.attention_results for result in prediction[-1].predictions]
for token, attention in zip(output_sequence, attentions):
attention_graph.add_attention(
token, np.transpose(
attention.distribution.value())[0])
suffix = identifier + "_attention_" + str(i) + ".tex"
filename = os.path.join(full_path, suffix)
if not os.path.exists(full_path):
os.mkdir(full_path)
attention_graph.render_as_latex(filename)
os.system(
"cd " +
str(full_path) +
"; pdflatex " +
suffix +
"; rm *.log; rm *.aux")
print("rendered " + str(filename))
def interact(model, params, anonymizer, last_save_file=""):
"""Interactive command line tool.
Inputs:
model (ATISModel): The model to interact with.
params (namespace): Parameters for the run.
anonymizer (Anonymizer): Class for anonymizing user input.
last_save_file (str): The save file to load from.
"""
if last_save_file:
model.load(last_save_file)
else:
if not params.save_file:
raise ValueError(
"Must provide a save file name if not training first.")
model.load(params.save_file)
model.interactive_prediction(anonymizer)
def main():
"""Main function that trains and/or evaluates a model."""
params = interpret_args()
# Prepare the dataset into the proper form.
data = atis_data.ATISDataset(params)
# Construct the model object.
model_type = InteractionATISModel if params.interaction_level else ATISModel
model = model_type(
params,
data.input_vocabulary,
data.output_vocabulary,
data.anonymizer if params.anonymize and params.anonymization_scoring else None)
last_save_file = ""
if params.train:
last_save_file = train(model, data, params)
if params.evaluate:
evaluate(model, data, params, last_save_file)
if params.interactive:
interact(model, params, data.anonymizer, last_save_file)
if params.attention:
evaluate_attention(model, data, params, params.save_file)
if __name__ == "__main__":
main()
| StarcoderdataPython |
66869 | <gh_stars>0
from floodsystem.datafetcher import fetch_measure_levels
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.flood import stations_level_over_threshold
from floodsystem.analysis import polyfit
import datetime
import matplotlib
import numpy as np
def run():
stations = build_station_list()
update_water_levels(stations)
dt = 10
stations = stations_level_over_threshold(stations, -999)
stations_risk_level=[]
for station_tuple in stations:
if station_tuple[1] <= 1.2:
stations_risk_level.append(station_tuple)
continue
station = station_tuple[0]
skip = False
try:
print("Fetching levels for " + station.name)
results = fetch_measure_levels(station.measure_id, dt=datetime.timedelta(days=dt))
except KeyError:
print("Levels for " + station.name + " not found.")
skip = True
if skip:
continue
try:
x=matplotlib.dates.date2num(results[0])
poly, _ = polyfit(results[0],results[1],4)
except TypeError:
print("Cannot process retrieved data.")
skip = True
if skip:
continue
severity = 0
levels_tomorrow = poly(1)
levels_in_two_days = poly(2)
rel_level_tomorrow = levels_tomorrow - station.typical_range[0]
rel_level_tomorrow /= station.typical_range[1] - station.typical_range[0]
rel_level_in_two_days = levels_in_two_days - station.typical_range[0]
rel_level_in_two_days /= station.typical_range[1] - station.typical_range[0]
severity += station_tuple[1]
if rel_level_tomorrow > 1.5:
severity += station_tuple[1]
if rel_level_in_two_days > 2:
severity += station_tuple[1]
stations_risk_level.append((station, severity))
low_risk = []
moderate_risk = []
high_risk = []
severe_risk = []
for station_tuple in stations_risk_level:
if station_tuple[1] < 1:
low_risk.append(station_tuple[0])
elif station_tuple[1] < 3:
moderate_risk.append(station_tuple[0])
elif station_tuple[1] < 5:
high_risk.append(station_tuple[0])
else:
severe_risk.append(station_tuple[0])
# now taking from stations to towns
towns=list(set([station_tuple[0].town for station_tuple in stations]))
severe_risk_towns=[]
high_risk_towns=[]
moderate_risk_towns=[]
low_risk_towns=[]
for town in towns:
for station in severe_risk:
if station.town == town:
if town not in severe_risk_towns:
severe_risk_towns.append(town)
for station in high_risk:
if station.town == town:
if town not in severe_risk_towns:
if town not in high_risk_towns:
high_risk_towns.append(station.town)
for station in moderate_risk:
if station.town == town:
if town not in high_risk_towns:
if town not in moderate_risk_towns:
if town not in severe_risk_towns:
moderate_risk_towns.append(station.town)
for station in low_risk:
if station.town == town:
if town not in severe_risk_towns:
if town not in high_risk_towns:
if town not in moderate_risk_towns:
if town not in low_risk_towns:
low_risk_towns.append(station.town)
print("towns at low risk-----------------------------")
for town in low_risk_towns:
if town is not None:
print(town)
print("towns at severe risk----------------------------")
for town in severe_risk_towns:
if town is not None:
print(town)
print("towns at high risk-----------------------------")
for town in high_risk_towns:
if town is not None:
print(town)
print("towns at moderate risk-------------------------")
for town in moderate_risk_towns:
if town is not None:
print(town)
if __name__=="__main__":
print("*** Task 2G: CUED IA Flood Warning Project ***")
run() | StarcoderdataPython |
51621 | <gh_stars>10-100
import sublime
from ui.read import regions as read_regions
from structs.highlight_list import *
# Hmmm....
def highlight(view, regions, info):
if regions != None:
view.add_regions(info.name, regions, info.format, info.icon, info.mode)
else:
remove_highlight(view, info)
def remove_highlight(view, x):
view.erase_regions(x.name)
def remove_highlights(view, xs):
map(lambda x: view.erase_regions(x.name), xs)
def regions(view, highlights):
missing_list = filter(lambda x: is_valid_spot(view, x), highlights.missing)
missing_regions = map(lambda x: sublime.Region(x.begin, x.end), missing_list)
incorrect_regions = dep_region(view, highlights.incorrect)
unused_regions = dep_region(view, highlights.unused)
return HighlightList(incorrect_regions, missing_regions, unused_regions)
def dep_region(view, plasmas):
return map(lambda x: read_regions.dep(view, x.dep), plasmas)
def is_valid_spot(view, spot):
return spot.begin < spot.end and view.substr(sublime.Region(spot.begin, spot.end)) == spot.token
| StarcoderdataPython |
3378713 | # python
# from datetime import datetime
# TODAY = datetime.today()
# TODAY = str(TODAY.year)+"-"+str(TODAY.month)+"-"+str(TODAY.day)
# steem
from steem import Steem
# 3.
import discord
class Keys:
accounts = [
{"username":"hakancelik","weight":100,"posting_key":"<KEY>"},
{"username":"coogger","weight":100,"posting_key":"<KEY>"},
]
keys = [account["posting_key"] for account in accounts]
users = [account["username"] for account in accounts]
weight = [account["weight"] for account in accounts]
STEEM = Steem(nodes=['https://api.steemit.com'],keys = Keys.keys)
CLIENT = discord.Client()
CLIENT_ID = "NDI2ODY4ODM3Nzk1NzU4MDgx.DZcQxQ.1uEAYsScuQCtFSAq9JmzZXlvk1c"
BOT_ID = "426868837795758081"
COMMANDS = ["follow","post","sp","account","balance","price","payout","transfer","help","calculate"] # coogger kanalı için komutlar
POSTSHARE = ["coogger","dmania","dlive","dtube","utopian-io","dsound","steepshot"] # kanallar için
UP_PERMISSION = [
dict(
username = "hakancelik",discord_id = "403671940507631618",
),
]
HELP_MS = """
\nMerhaba <@{}> sana yapabildiğim bir kaç özellikten bahsetmeme izin ver
\n- **$account steemit_kullanıcı_adın** Hesabın saygınlık puanını, steem power miktarını, yüzde kaç steem power'ın kaldığını, cüzdan bilgilerini gösterir.
\n- **$sp steemit_kullanıcı_adın** Hesabındaki steem power ve ne kadar gücünün kaldıgını görebilirsin.
\n- **$follow steemit_kullanıcı_adın** hesap takip bilgilerini gösterir.
\n- **$post steemit_post_adresi** post bilgilerini gösterir.
\n- **$balance steemit_kullanıcı_adı** hesabındaki sbd, steem, steem power miktarını gösterir.
\n- **$price** coinlerin dolar cinsinden değerlerini gösterir.
\n- **$payout steemit_kullanıcı_adı** ödeme bekleyen gönderi bilgilerini gösterir.
\n- **$transfer steemit_kullanıcı_adı** eğer coinlerini bloctras aracılığı ile bitcoine ve koinim aracılığı ile tl ye dönültüreceksen şeklinde yazdığında sana kesintiler ile birlikte kaç tl alacağını gösterebilirim.
\n- **$calculate 40** şeklinde yazarsan sana $40 değerin %50/%50 olarak ayarlandığını varsayarak ödeme sonunda kaç sbd kaç sp
\n alacagınızı gösterebilirim.
\n- cooggerup kanalında **$cooggerup steemit_post_adresi** ile upvote atılıyor fakat şuan için bu özellik sadece hakancelik ve sedatcelik tarafından
\n belirli postlara yönelik kullanılmakta.
\n made by @hakancelik
"""
WELCOME_MS = """
\nMerhaba <@{}> seni aramızda görmekten büyük mutluluk duyduğumuzu söylemek istiyorum
\n- coogger projesi, coogger discord kanalı, cooggerup botu hakkında öğrenmen gereken bütün bilgileri
\n<#421086987571822592> kanalında bulabilirsin,
\nlütfen ilk ziyaret edeceğin yer burası olsun daha sonra coogger topluluğunda ki
\nkullanıcılar ile takipleşmek için <#421390753881653248> kanalını ziyaret edebilirsin
\nbenim yani cooggerup botunun diğer hünerlerini
\ngörmek istersen seni mutlaka <#424979369820160012> kanalına bekliyorum $help yazman yeterli
\nsponsor olan kişilerin kanalı hemen burası <#421085667653713930> sende sponsor olabilirsin
\nbunun için <@403671940507631618>'e özelden mesaj at.
\ndeveloperların arge, tartışma vb kanalları vardır eğer sende developer isen ve coogger'da rol olmak istersen yine hakancelik'e
\nözelden yazabilirsin veya developer olan biri ile konuşabilirsin.
\nve son olarak sohbet muhabbet aşkına <#419852543368101891> kanalına geçebilirsin
\ntekrar görüşmek üzere.
\nBütün kanalların ne için açıldığı vb bilgiler kanal başlığında yazar.
"""
COOGGERUP_REPLY = """
#### Tebrikler içeriğiniz coogger projesi tarafından seçildi.
> Bu oluşum hakkında detaylı bilgileri aşağıdaki adreslere tıklayarak öğrenebilirsiniz.
----
#### Coogger projesi ile ilgili detaylı bilgi.
- http://www.coogger.com/@coogger/version/v130/
#### Diğer hizmetimiz olan steemitapp
- [steemitapp nedir nasil kullanilir](https://steemit.com/tr/@hakancelik/steemitapp-nedir-nasil-kullanilir)
- [coogger.com/apps/steemitapp](http://www.coogger.com/apps/steemitapp/)
---
- Bizimle [discord](https://discord.gg/q2rRY8Q) üzerinden iletişime geçebilir ve **cooggerup** botunun yararlı özelliklerini burada kullanabilirsiniz.
- Bir sonraki paylaşımınızı [www.coogger.com](http://www.coogger.com) üzerinden paylaşarak daha fazla destek alabilir, bizlere daha fazla destek verebilirsiniz.
<center>**Siz bizlere bizler ise sizlere destek olmalıyız.**</center>
<center>Sende bu oluşumun bir parçası olabilir, destek verebilir veya alabilirsin, discord kanalımıza bekleriz. </center>
----
"""
| StarcoderdataPython |
157866 | <filename>yoti_python_sdk/doc_scan/session/create/filter/__init__.py<gh_stars>1-10
from .document_restrictions_filter import (
DocumentRestrictionBuilder,
DocumentRestrictionsFilterBuilder,
)
from .orthogonal_restrictions_filter import OrthogonalRestrictionsFilterBuilder
from .required_id_document import RequiredIdDocumentBuilder
from .required_supplementary_document import RequiredSupplementaryDocumentBuilder
__all__ = [
"DocumentRestrictionsFilterBuilder",
"DocumentRestrictionBuilder",
"OrthogonalRestrictionsFilterBuilder",
"RequiredIdDocumentBuilder",
"RequiredSupplementaryDocumentBuilder",
]
| StarcoderdataPython |
3262681 | from ruledxml import destination, source, foreach
@foreach("/xml/element", "/doc/message")
@source("/xml/element/child")
@destination("/doc/message/text")
def rule34(child_text):
return child_text + "2"
| StarcoderdataPython |
3238749 | <filename>skpalm/permutations/utils/nextperm.py
import numpy as np
def nextperm(a):
n = a.shape[0]
j = n - 1
while j > 0 and a[j - 1, 0] >= a[j, 0]:
j = j - 1
if j > 0:
l = n
while a[j - 1, 0] >= a[l - 1, 0]:
l = l - 1
tmp = a[j - 1, :].copy()
a[j - 1, :] = a[l - 1, :].copy()
a[l - 1, :] = tmp.copy()
k = j + 1
l = n
while k < l:
tmp = a[k - 1, :].copy()
a[k - 1, :] = a[l - 1, :].copy()
a[l - 1, :] = tmp.copy()
k = k + 1
l = l - 1
else:
a = np.flipud(a)
return a
| StarcoderdataPython |
4830175 | <reponame>PingjunChen/pytorch-study<gh_stars>1-10
# -*- coding: utf-8 -*-
import os, sys, pdb
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
import shutil, time
from loader import train_imagenet_loader
from loader import val_imagenet_loader
from utils import adjust_learning_rate
from utils import accuracy, AverageMeter
def train_imagenet(model, args):
optimizer = optim.SGD(model.parameters(), weight_decay=args.weight_decay,
lr=args.lr, momentum=args.momentum)
criterion =nn.CrossEntropyLoss()
train_loader = train_imagenet_loader(args)
val_loader = val_imagenet_loader(args)
if os.path.exists(args.model_dir):
shutil.rmtree(args.model_dir)
os.makedirs(args.model_dir)
best_prec = 0.0
for epoch in range(1, args.epochs+1):
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
cur_prec, _ = validate(val_loader, model, criterion, args)
# remember best prec@1 and save checkpoint
is_best = cur_prec > best_prec
if is_best == True:
best_prec = cur_prec
cur_model_name = args.model_name + "-" + str(epoch).zfill(2) + "-{:.3f}.pth".format(best_prec)
torch.save(model.state_dict(), os.path.join(args.model_dir, cur_model_name))
print('Save weights at {}/{}'.format(args.model_dir, cur_model_name))
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (inputs, targets) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.cuda:
inputs, targets = inputs.cuda(args.device_id), targets.cuda(args.device_id)
inputs, targets = Variable(inputs), Variable(targets)
# compute output
outputs = model(inputs)
loss = criterion(outputs, targets)
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs, targets, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1[0], inputs.size(0))
top5.update(prec5[0], inputs.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.log_interval == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
with torch.no_grad():
end = time.time()
for i, (inputs, targets) in enumerate(val_loader):
if args.cuda:
inputs, targets = inputs.cuda(args.device_id), targets.cuda(args.device_id)
inputs, targets = Variable(inputs), Variable(targets)
# compute output
outputs = model(inputs)
loss = criterion(outputs, targets)
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs, targets, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1[0], inputs.size(0))
top5.update(prec5[0], inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.log_interval == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'.format(top1=top1, top5=top5))
return top1.avg.item(), top5.avg.item()
| StarcoderdataPython |
3220441 | <reponame>Anilkumar95/python-75-hackathon
#creating a file if doesn't exist in the directory
exist = open("cmr.txt", "w")
| StarcoderdataPython |
1670606 | import pygame
from .chars import Enemy
from .shells import *
import os
# Working file paths
BASE_PATH = os.path.dirname(__file__)
IMAGES_PATH = os.path.join(BASE_PATH, 'resources/Images/')
# virus_1
class Virus1(Enemy):
ammo = Virus1shell
height = 78
width = 78
hp = 100
health_max = 100
# Virus_2
class Virus2(Enemy):
ammo = Virus2shell
height = 78
width = 78
hp = 200
health_max = 200
# Virus3
class Virus3(Enemy):
ammo = Virus3shell
height = 156
width = 156
hp = 300
health_max = 300
# Virus4
class Virus4(Enemy):
ammo = Virus4shell
height = 156
width = 156
hp = 350
health_max = 350
# virus_boss
class VirusBoss(Enemy):
ammo = Virus1shell
height = 320
width = 320
hp = 500
health_max = 500
spawn_cooldown = 70
enemy_list = []
enemy = Virus2
# Loading the boss health bar
health_bar = pygame.image.load(IMAGES_PATH + "HUD/bossbar.png")
# Function to spawn enemies
def spawn_enemies(self, player):
# checking if there is room to spawn enemies
if self.spawn_cooldown <= 0 and len(self.enemy_list) < 11:
self.spawn_cooldown = 70
# checking for the distance from player
if 700 > player.x - self.x or 700 > self.x - player.x:
self.enemy_list.append(self.enemy(self.x - 50, 500))
self.enemy_list[-1].load_anim(IMAGES_PATH + "/Characters/Virus/Virus_2/idle.png",
IMAGES_PATH + "Projectiles/virus_1_")
self.enemy_list[-1].Tracking = True
else:
self.spawn_cooldown -= 1
# Checking for damage by player
def check_hurt(self, player):
if player.current_weapon != 0:
if not self.enemy_list:
ammo_list = player.weapons[player.weapon_list[player.current_weapon]].ammo_list
for ammo in ammo_list:
if self.x + self.width > ammo.x > self.x and self.y + self.height > ammo.y > self.y:
self.hp -= ammo.damage
ammo_list.pop(ammo_list.index(ammo))
# Update function for boss health-bar
def update_health_bar(self, win):
if self.hp > 0:
win.blit(self.health_bar, (430, 22), (0, 0, (self.hp/self.health_max) * 500, 20))
# function to kill player if in the virus
def kill_on_contact(self, player):
if self.x + self.width > player.x > self.x and self.y + self.height > player.y > self.y:
player.hp = 0
| StarcoderdataPython |
1745028 | <gh_stars>0
#encoding:utf-8
subreddit = 'bangladesh'
t_channel = '@r_bangladesh'
submissions_ranking = 'new'
def send_post(submission, r2t):
return r2t.send_simple(submission)
| StarcoderdataPython |
3236863 | <gh_stars>0
'''
Uses PyMOL to convert cms structure into pdb
Supports renaming of lipid atoms (currently DPPC and DMPC only) to their amber/charmm counter parts
Also supports renaming:
> non-standard residues (i.e. GLH) to standard (i.e. GLU)
> solvent (i.e. SPC) to HOH
> ions (i.e. Na) to charmm names (i.e. SOD)
> histidines to HIS or charmm names
-2020- <EMAIL>
'''
from __future__ import print_function
import sys
import argparse
from os import path
import logging
from pymol import cmd
logging.basicConfig(stream=sys.stdout,
level=logging.INFO,
format='[%(asctime)s %(levelname)s] %(message)s',
datefmt='%Y/%m/%d %H:%M:%S')
parser = argparse.ArgumentParser()
parser.add_argument('structure',help='Desmond input file')
parser.add_argument('output', help='Output file name')
parser.add_argument('--rename_water', dest='solvent', type=bool, help='Renames solvent atoms to HOH', default=True)
parser.add_argument('--rename_lipids', dest='lipid', type=bool, help='Rename lipids from OPLS naming to AMBER (only POPC is supported currently)', default=False)
parser.add_argument('--rename_residues', dest='poly', type=bool, help='Rename non-standard residues like ASH and GLH to their non-charged equivalents', default=False)
parser.add_argument('--rename_ions_charmm', dest='ions', type=bool, help='Renames ions from OPLS', default=False)
parser.add_argument('--rename_his', dest='his', choices=('HIS','CHARMM'), help='Rename histidines from OPLS i.e. HIP, HIE and HID to HIS or CHARMM format (HSP, HSE, HSD)')
args = parser.parse_args()
lipid_dict = {
'O1':'O11',
'O2':'O12',
'O3':'O13',
'O4':'O14'
}
residue_dict = {
'ASH':'ASP',
'GLH':'GLU',
'LYN':'LYS',
'CYX':'CYS'
}
ion_dict = {
'Na':'SOD',
'Cl':'CLA',
'K':'POT',
'CA':'CAL'
}
histidine_dict = {
'HIE':'HSE',
'HID':'HSD',
'HIP':'HSP'
}
# load structure
cmd.load(args.structure,'structure')
# change lipid atom names
if args.lipid:
logging.info('---Renaming lipid atoms---')
if cmd.count_atoms('resn popc+pope') > 0:
logging.warning('This script does not support converting POPC or POPE atom names!')
for key, val in lipid_dict.items():
out = cmd.alter('structure and resn DPPC+DMPC and name "%s"' % key, 'name = "%s"' % val)
logging.info('Renamed %s atoms from %s to %s' % (out, key, val))
# Change Solvent name
if args.solvent:
logging.info('---Renaming water---')
out = cmd.alter('solvent', 'resn = "HOH"')
logging.info('Renamed %s water residue atoms to HOH' % (out))
# Change non-standard residue names
if args.poly:
logging.info('---Renaming non-standard residues---')
for key, val in residue_dict.items():
out = cmd.alter('structure and resn "%s"' % key, 'resn = "%s"' % val)
logging.info('Renamed %s residue atom from %s to %s' % (out, key, val))
# Change ion names
if args.ions:
logging.info('---Renaming ions---')
for key, val in ion_dict.items():
out = cmd.alter('structure and resn "%s"' % key, 'resn = "%s"' % val)
logging.info('Renamed %s ions from %s to %s' % (out, key, val))
# Change histidine names
if args.his:
logging.info('---Renaming histidines to %s naming---' % (args.his))
if args.his == 'CHARMM':
for key, val in histidine_dict.items():
out = cmd.alter('structure and resn "%s"' % key, 'resn = "%s"' % val)
logging.info('Renamed %s residue atoms from %s to %s' % (out, key, val))
elif args.his == 'HIS':
out = cmd.alter('structure and resn HIP+HIE+HID', 'resn = "HIS"')
logging.info('Renamed %s histidine residue atoms to HIS' % (out))
cmd.save(args.output, 'structure') | StarcoderdataPython |
3221067 | # -*- coding: utf-8 -*-
# Author: <NAME>
# Date: 9/13/17
class ComplexitySpec(object):
"""
Base class of complexity specification
:cvar environment: configs for environmental noise
:cvar propostion: configs for propositional noise
:cvar interaction: configs for interactional noise
:cvar social: configs for social noise
"""
environment = None
proposition = None
interaction = None
social = None
class Complexity(object):
"""
Complexity object used to decides the task difficulities
:ivar asr_acc: the mean value of asr confidence
:ivar asr_std: the std of asr confidence distribution
:ivar yn_question: the chance the user will ask yn_question
:ivar reject_stype: the distribution over different rejection style
:ivar multi_slots: the distriibution over how many slots in a inform
:ivar multi_goals: the distribution over how many goals in a dialog
:ivar dont_care: the chance that user dont are about a slot
:ivar hesitation: the chance that user will hesitate in an utterance
:ivar self_restart: the chance that user will restart
:ivar self_correct: the chance that user will correct itself in an utterance.
:ivar self_discloure: the chance that system will do self discloure
:ivar ref_shared: the chacne that system will do refernece
:ivar violation_sn: the chance that system will do VSN
"""
def __init__(self, complexity_spec):
# environment
self.asr_acc = complexity_spec.environment['asr_acc']
self.asr_std = complexity_spec.environment['asr_std']
# propositional
self.yn_question = complexity_spec.proposition['yn_question']
self.reject_style = complexity_spec.proposition['reject_style']
self.multi_slots = complexity_spec.proposition['multi_slots']
self.multi_goals = complexity_spec.proposition['multi_goals']
self.dont_care = complexity_spec.proposition['dont_care']
# interactional
self.hesitation = complexity_spec.interaction['hesitation']
self.self_restart = complexity_spec.interaction['self_restart']
self.self_correct = complexity_spec.interaction['self_correct']
# social
self.self_disclosure = complexity_spec.social['self_disclosure']
self.ref_shared = complexity_spec.social['ref_shared']
self.violation_sn = complexity_spec.social['violation_sn']
def get_name(self):
return self.__class__.__name__
class MixSpec(ComplexitySpec):
"""
An example spec for the easy setting
"""
environment = {'asr_acc': 0.7,
'asr_std': 0.15}
proposition = {'yn_question': 0.4,
'reject_style': {'reject': 0.5, 'reject+inform': 0.5},
'multi_slots': {1: 0.7, 2: 0.3},
'dont_care': 0.1,
'multi_goals': {1: 0.6, 2: 0.4},
}
interaction = {'hesitation': 0.4,
'self_restart': 0.1,
'self_correct': 0.2}
social = {'self_disclosure': None,
'ref_shared': None,
'violation_sn': None}
class PropSpec(ComplexitySpec):
"""
An example spec for the easy setting
"""
environment = {'asr_acc': 1.0,
'asr_std': 0.0}
proposition = {'yn_question': 0.4,
'reject_style': {'reject': 0.5, 'reject+inform': 0.5},
'multi_slots': {1: 0.7, 2: 0.3},
'dont_care': 0.1,
'multi_goals': {1: 0.7, 2: 0.3},
}
interaction = {'hesitation': 0.0,
'self_restart': 0.0,
'self_correct': 0.0}
social = {'self_disclosure': None,
'ref_shared': None,
'violation_sn': None}
class EnvSpec(ComplexitySpec):
"""
An example spec for the easy setting
"""
environment = {'asr_acc': 0.7,
'asr_std': 0.2}
proposition = {'yn_question': 0.0,
'reject_style': {'reject': 1.0, 'reject+inform': 0.0},
'multi_slots': {1: 1.0, 2: 0.0},
'dont_care': 0.0,
'multi_goals': {1: 1.0, 2: 0.0},
}
interaction = {'hesitation': 0.0,
'self_restart': 0.0,
'self_correct': 0.0}
social = {'self_disclosure': None,
'ref_shared': None,
'violation_sn': None}
class InteractSpec(ComplexitySpec):
"""
An example spec for the easy setting
"""
environment = {'asr_acc': 1.0,
'asr_std': 0.0}
proposition = {'yn_question': 0.0,
'reject_style': {'reject': 1.0, 'reject+inform': 0.0},
'multi_slots': {1: 1.0, 2: 0.0},
'dont_care': 0.0,
'multi_goals': {1: 1.0, 2: 0.0},
}
interaction = {'hesitation': 0.4,
'self_restart': 0.1,
'self_correct': 0.2}
social = {'self_disclosure': None,
'ref_shared': None,
'violation_sn': None}
class CleanSpec(ComplexitySpec):
"""
An example spec for the easy setting
"""
environment = {'asr_acc': 1.0,
'asr_std': 0.0}
proposition = {'yn_question': 0.0,
'reject_style': {'reject': 1.0, 'reject+inform': 0.0},
'multi_slots': {1: 1.0, 2: 0.0},
'dont_care': 0.0,
'multi_goals': {1: 1.0, 2: 0.0},
}
interaction = {'hesitation': 0.0,
'self_restart': 0.0,
'self_correct': 0.0}
social = {'self_disclosure': None,
'ref_shared': None,
'violation_sn': None} | StarcoderdataPython |
1796422 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Integration with `theHarvester <https://code.google.com/p/theharvester/>`_.
"""
__license__ = """
GoLismero 2.0 - The web knife - Copyright (C) 2011-2014
Golismero project site: https://github.com/golismero
Golismero project mail: <EMAIL>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
from golismero.api.config import Config
from golismero.api.data import discard_data
from golismero.api.data.resource.domain import Domain
from golismero.api.data.resource.email import Email
from golismero.api.data.resource.ip import IP
from golismero.api.external import get_tools_folder
from golismero.api.logger import Logger
from golismero.api.plugin import TestingPlugin
import os, os.path
import socket
import StringIO
import sys
import traceback
import warnings
# Import theHarvester as a library.
cwd = os.path.abspath(get_tools_folder())
cwd = os.path.join(cwd, "theHarvester")
sys.path.insert(0, cwd)
try:
import discovery
from discovery import * #noqa
finally:
sys.path.remove(cwd)
del cwd
#------------------------------------------------------------------------------
class HarvesterPlugin(TestingPlugin):
"""
Integration with
`theHarvester <https://github.com/MarioVilas/theHarvester/>`_.
"""
# Supported theHarvester modules.
SUPPORTED = (
"google", "bing", "linkedin", "dogpile", #"pgp"
)
#--------------------------------------------------------------------------
def get_accepted_types(self):
return [Domain]
#--------------------------------------------------------------------------
def run(self, info):
# Get the search parameters.
word = info.hostname
limit = 100
try:
limit = int(Config.plugin_config.get("limit", str(limit)), 0)
except ValueError:
pass
# Search every supported engine.
total = float(len(self.SUPPORTED))
all_emails, all_hosts = set(), set()
for step, engine in enumerate(self.SUPPORTED):
try:
Logger.log_verbose("Searching keyword %r in %s" % (word, engine))
self.update_status(progress=float(step * 80) / total)
emails, hosts = self.search(engine, word, limit)
except Exception, e:
t = traceback.format_exc()
Logger.log_error(str(e))
Logger.log_error_more_verbose(t)
continue
all_emails.update(address.lower() for address in emails if address)
all_hosts.update(name.lower() for name in hosts if name)
self.update_status(progress=80)
Logger.log_more_verbose("Search complete for keyword %r" % word)
# Adapt the data into our model.
results = []
# Email addresses.
emails_found = set()
emails_count = 0
for address in all_emails:
if "..." in address: # known bug in theHarvester
continue
while address and not address[0].isalnum():
address = address[1:] # known bug in theHarvester
while address and not address[-1].isalnum():
address = address[:-1]
if not address:
continue
if not "@" in address:
continue
if address in emails_found:
continue
emails_found.add(address)
try:
data = Email(address)
except Exception, e:
warnings.warn("Cannot parse email address: %r" % address)
continue
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
in_scope = data.is_in_scope()
if in_scope:
data.add_resource(info)
results.append(data)
all_hosts.add(data.hostname)
emails_count += 1
else:
Logger.log_more_verbose(
"Email address out of scope: %s" % address)
discard_data(data)
# Hostnames.
visited = set()
total = float(len(all_hosts))
hosts_count = 0
ips_count = 0
for step, name in enumerate(all_hosts):
while name and not name[0].isalnum(): # known bug in theHarvester
name = name[1:]
while name and not name[-1].isalnum():
name = name[:-1]
if not name:
continue
visited.add(name)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
in_scope = name in Config.audit_scope
if not in_scope:
Logger.log_more_verbose("Hostname out of scope: %s" % name)
continue
try:
self.update_status(progress=(float(step * 20) / total) + 80.0)
Logger.log_more_verbose("Checking hostname: %s" % name)
real_name, aliaslist, addresslist = \
socket.gethostbyname_ex(name)
except socket.error:
continue
all_names = set()
all_names.add(name)
all_names.add(real_name)
all_names.update(aliaslist)
for name in all_names:
if name and name not in visited:
visited.add(name)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
in_scope = name in Config.audit_scope
if not in_scope:
Logger.log_more_verbose(
"Hostname out of scope: %s" % name)
continue
data = Domain(name)
data.add_resource(info)
results.append(data)
hosts_count += 1
for ip in addresslist:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
in_scope = ip in Config.audit_scope
if not in_scope:
Logger.log_more_verbose(
"IP address out of scope: %s" % ip)
continue
d = IP(ip)
data.add_resource(d)
results.append(d)
ips_count += 1
self.update_status(progress=100.0)
text = "Found %d emails, %d hostnames and %d IP addresses " \
"for keyword %r" % (emails_count, hosts_count, ips_count, word)
if len(results) > 0:
Logger.log(text)
else:
Logger.log_more_verbose(text)
# Return the data.
return results
#--------------------------------------------------------------------------
@staticmethod
def search(engine, word, limit = 100):
"""
Run a theHarvester search on the given engine.
:param engine: Search engine.
:type engine: str
:param word: Word to search for.
:type word: str
:param limit: Maximum number of results.
Its exact meaning may depend on the search engine.
:type limit: int
:returns: All email addresses, hostnames and usernames collected.
:rtype: tuple(list(str), list(str), list(str))
"""
Logger.log_more_verbose("Searching on: %s" % engine)
# Get the search class.
search_mod = getattr(discovery, "%ssearch" % engine)
search_fn = getattr(search_mod, "search_%s" % engine)
# Run the search, hiding all the prints.
fd = StringIO.StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = fd, fd
class Options:
pass
options = Options()
options.word = word
options.limit = limit
options.start = 0
search = search_fn(word, options)
search.process()
finally:
sys.stdout, sys.stderr = old_out, old_err
# Extract the results.
emails, hosts = [], []
results = search.get_results()
if hasattr(results, "emails"):
try:
emails = results.emails
except Exception, e:
t = traceback.format_exc()
Logger.log_error(str(e))
Logger.log_error_more_verbose(t)
if hasattr(results, "hostnames"):
try:
hosts = results.hostnames
except Exception, e:
t = traceback.format_exc()
Logger.log_error(str(e))
Logger.log_error_more_verbose(t)
Logger.log_verbose(
"Found %d emails and %d hostnames on %s for domain %s" %
(len(emails), len(hosts), engine, word)
)
# Return the results.
return emails, hosts
| StarcoderdataPython |
3338264 | import argparse
import numpy as np
import os
import torch
from torch import nn
from torch.autograd import Variable
from torch.backends import cudnn
from torch.utils import data
from utils.mpnet import MPNet
from utils.plan_class import plan_dataset
def main(args):
cudnn.benchmark = True
# Parameters
train_params = {'batch_size': args.batch_size,
'shuffle': True,
'num_workers': args.num_workers}
test_params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
partition = {'train': [i + 1 for i in range(int(0.9 * args.num_files))],
'test': [i + 1 for i in range(int(0.9 * args.num_files), args.num_files)]}
point_cloud = np.load(os.path.join(args.path, 'point_cloud.npy'))
point_cloud = torch.from_numpy(point_cloud)
if (args.cuda == 'cuda'):
point_cloud = point_cloud.cuda().float()
training_set = plan_dataset(partition['train'], args.path)
train_loader = data.DataLoader(training_set, **train_params)
test_set = plan_dataset(partition['test'], args.path)
test_loader = data.DataLoader(test_set, **test_params)
mse = nn.MSELoss()
planner = MPNet(88920, 14, 7)
if (args.cuda == 'cuda'):
planner = planner.cuda()
parameters = planner.parameters()
optimizer = torch.optim.Adam(parameters, lr=args.learning_rate)
n_total_steps = len(train_loader)
for epoch in range(args.num_epochs):
for i, (states, plan) in enumerate(train_loader):
states = states.float()
plan = plan.float()
states = Variable(states)
ones = torch.ones(states.shape[0], 1)
if (args.cuda == 'cuda'):
states = states.cuda()
plan = plan.cuda()
ones = ones.cuda()
pc = point_cloud * ones
prediction = planner(pc, states)
loss = mse(plan, prediction)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if ((i + 1) % 1 == 0):
print('epoch {0}/{1}, step {2}/{3}, loss = {4:4f}'.format(epoch + 1, args.num_epochs, i + 1,
n_total_steps,
loss.item()))
torch.save(planner.state_dict(), os.path.join(os.path.curdir, 'end_to_end_weights.pt'))
with torch.no_grad():
n_correct = 0
n_samples = 0
for (states, plan) in test_loader:
states = states.float()
plan = plan.float()
ones = torch.ones(states.shape[0], 1)
if (args.cuda == 'cuda'):
states = states.cuda()
plan = plan.cuda()
ones = ones.cuda()
pc = point_cloud * ones
prediction = planner(pc, states)
print(prediction[0], plan[0])
n_samples += plan.shape[0]
n_correct = (abs(prediction - plan) <= 0.01).sum().item()
acc = 100.0 * n_correct / n_samples
print('accuracy = {0}'.format(acc))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, default='/scratch/rkm350/mpnet/dataset',
help='location of dataset directory')
parser.add_argument('--num_epochs', type=int, default=100, help='number of epochs')
parser.add_argument('--batch_size', type=int, default=32, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Learning rate')
parser.add_argument('--num_files', type=int, default=10000, help='num of files')
parser.add_argument('--num_workers', type=int, default=6, help='number of sub processes for loading data')
parser.add_argument('--lam', type=float, default=0.001, help='lambda value for the CAE network')
parser.add_argument('--cuda', type=str, default='cuda', help='Cuda for processing the network')
args = parser.parse_args()
main(args)
| StarcoderdataPython |
3275488 | <reponame>bio-phys/capriqorn
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# Capriqorn --- CAlculation of P(R) and I(Q) Of macRomolcules in solutioN
#
# Copyright (c) <NAME>, <NAME>, and contributors.
# See the file AUTHORS.rst for the full list of contributors.
#
# Released under the GNU Public Licence, v2 or any higher version, see the file LICENSE.txt.
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
from past.utils import old_div
import os
import sys
import math
import numpy as np
from . import rdf
from . import formFactor as ff
import cadishi.util
# --- use fast Cython functions for time critical parts, if available
try:
from capriqorn.kernel import c_pddf
have_c_pddf = True
except:
have_c_pddf = False
print(" Note: capriqorn.lib.pddf: could not import c_pddf")
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2015 <NAME>"
__license__ = "license_tba"
def j0(x):
"""Spherical Bessel functions of the 1st kind for n=0,
which is the sinc() function."""
if x == 0:
return 1.
else:
return old_div(math.sin(x), x)
def binning(data, n, dr, distQ=False):
"""Collect n adjacent bins in a single bin. The result is a histogram.
The x-value of the new bin is given by the average of the n x-values.
Add a line to data at the beginning, which contains differences in
particle numbers, and set x-value to zero.
"""
tmp = data[0, :].copy()
data[0, :] = 0
new = np.zeros((old_div(len(data), n) + 1, len(data[0])))
if distQ:
fac = 1. / float(n)
iadd = 1
else:
fac = 1.
iadd = 0
for i in range(old_div(len(data), n)):
total = np.zeros(len(data[0]))
for j in range(n):
total[:] += data[i * n + j + iadd, :]
# average/=float(n) #DANGER
new[i + 1, 1:] = total[1:] * fac
new[i + 1, 0] = total[0] / float(n)
new[0, :] = tmp[:]
new[0, 0] = 0
return new, dr * n
def coarsen_histogram(data, n, dr, distQ=False):
"""Re-implementation of the `binning()` routine,
compatible w/ capriqorn data structures.
"""
data_new = {}
if distQ:
fac = 1. / float(n)
iadd = 1
else:
fac = 1.
iadd = 0
# --- coarsen data
radii = data['radii']
radii_new = [] # Python list, to be converted to NumPy below
count = 0
r_avg = 0.0
for rad in radii:
count += 1
r_avg += rad
if (count % n == 0):
radii_new.append(r_avg / float(n))
r_avg = 0.0
if (count % n > 0):
# remainder
radii_new.append(r_avg / float(count % n))
data_new['radii'] = np.array(radii_new)
# --- coarsen data
for key in data:
if (key == 'radii'):
continue
histo_new = [] # Python list, to be converted to NumPy below
count = 0
bin_new = 0
for bin_old in data[key]:
count += 1
bin_new += bin_old
if (count % n == 0):
histo_new.append(fac * bin_new)
bin_new = 0
if (count % n > 0):
histo_new.append(fac * bin_new)
data_new[key] = np.array(histo_new)
# --- first line special entry
(data_new[key])[0] = (data[key])[0]
# --- first line special entry
(data_new['radii'])[0] = 0.0
return data_new, dr * n
def getPartCharFunc1_Python(partDHisto, key, paramProd, rArray, drPrime, delta):
"""Calculates intra-atom contributions.
"""
partCF = np.zeros((len(rArray), 2))
partCF[:, 0] = rArray[:]
param = paramProd[key]
elements = key.split(",")
NMean = partDHisto[key][0, 1]
# print "NMean("+key+") =", NMean
partCF[0, 1] += NMean * param[0][0]
# print "param[0][0]=", param[0][0]
dr = drPrime # DANGER
for i, [r, valCF] in enumerate(partCF[1:]):
# if (i % 100 == 0): print " r =", r
rSqr = r * r
# DANGER
tmp = 0.
# for rr in np.arange(r-0.5*(dr-delta), r+0.5*dr, delta):
# rrSqr=rr*rr
# tmp+=ff.FT1(param, rrSqr)*delta/dr
tmp = ff.FT1(param, rSqr)
partCF[i + 1, 1] += NMean * tmp
return partCF
def getPartCharFunc1(partDHisto, key, paramProd, rArray, drPrime, delta):
"""Calculates intra-atom contributions.
"""
if have_c_pddf:
return c_pddf.getPartCharFunc1(partDHisto, key, paramProd, rArray, drPrime, delta)
else:
return getPartCharFunc1_Python(partDHisto, key, paramProd, rArray, drPrime, delta)
def getPartCharFunc2Org(partDHisto, key, paramProd, rArray, drPrime, delta):
"""
Calculates inter-atom contributions to the EPDDF for a partial distance histogram.
@param partDHisto Partial distance histogram.
@param key string Representing pair of particle species.
@param paramProd Parameter dictionary for products of form factors.
@param rArray Array of r-values at which characteristic function is evaluated.
@param drPrime Interval size in r'. At r' the partial histtogram is evaluated.
@param delta Interval size for integration in r'. The \f$c_{ij}(r,r')\f$ are
evaluated within the bin at r' at sub-intevals delta.
"""
partCF = np.zeros((len(rArray), 2))
partCF[:, 0] = rArray[:]
param = paramProd[key]
elements = key.split(",")
NMean = partDHisto[key][0, 1]
print("NMean(" + key + ") =", NMean)
a = np.asarray(param[0][1:])
b = np.asarray(param[1][1:])
dr = drPrime # DANGER
for i, [r, valCF] in enumerate(partCF[1:]):
if (i % 100 == 0):
print(" r =", r)
tmp = 0.
for [rPrime, val] in partDHisto[key][1:]:
if val != 0:
for rr in np.arange(rPrime - 0.5 * (drPrime - delta), rPrime + 0.5 * drPrime, delta):
tmp += old_div(val * r, rr * ff.FT2(a, b, r, rr) * delta / drPrime)
partCF[i + 1, 1] += tmp
return partCF
# @do_profile(follow=[])
def getPartCharFunc2_Python(partDHisto, key, paramProd, rArray, drPrime, delta):
"""
Calculates inter-atom contributions to the EPDDF for a partial distance histogram.
@param partDHisto Partial distance histogram.
@param key string Representing pair of particle species.
@param paramProd Parameter dictionary for products of form factors.
@param rArray Array of r-values at which characteristic function is evaluated.
@param drPrime Interval size in r'. At r' the partial histtogram is evaluated.
@param delta Interval size for integration in r'. The \f$c_{ij}(r,r')\f$ are
evaluated within the bin at r' at sub-intevals delta.
"""
partCF = np.zeros((len(rArray), 2))
partCF[:, 0] = rArray[:]
param = paramProd[key]
elements = key.split(",")
NMean = partDHisto[key][0, 1]
# print "NMean("+key+") =", NMean
a = np.asarray(param[0][1:])
b = np.asarray(param[1][1:])
fac = old_div(a, (2. * np.sqrt(b * np.pi)))
facEx = 1. / (4. * b)
dr = drPrime # DANGER
for i, [r, valCF] in enumerate(partCF[1:]):
# if (i % 100 == 0): print " r =", r
tmp = 0.
for [rPrime, val] in partDHisto[key][1:]:
if (rPrime - r) ** 2 < 9: # MODI
if val != 0:
val *= old_div(r * delta, drPrime)
for rr in np.arange(rPrime - 0.5 * (drPrime - delta), rPrime + 0.5 * drPrime, delta):
tmp += old_div(val, rr * (fac * (np.exp(-(r - rr) ** 2 * facEx) -
np.exp(-(r + rr) ** 2 * facEx))).sum())
partCF[i + 1, 1] += tmp
return partCF
def getPartCharFunc2(partDHisto, key, paramProd, rArray, drPrime, delta):
"""
Calculates inter-atom contributions to the EPDDF for a partial distance histogram.
@param partDHisto Partial distance histogram.
@param key string Representing pair of particle species.
@param paramProd Parameter dictionary for products of form factors.
@param rArray Array of r-values at which characteristic function is evaluated.
@param drPrime Interval size in r'. At r' the partial histtogram is evaluated.
@param delta Interval size for integration in r'. The \f$c_{ij}(r,r')\f$ are
evaluated within the bin at r' at sub-intevals delta.
"""
if have_c_pddf:
return c_pddf.getPartCharFunc2(partDHisto, key, paramProd, rArray, drPrime, delta)
else:
return getPartCharFunc2_Python(partDHisto, key, paramProd, rArray, drPrime, delta)
# --- original function before any Cython work
# def getPartCharFunc2(partDHisto, key, paramProd, rArray, drPrime, delta):
# """
# Calculates inter-atom contributions to the EPDDF for a partial distance histogram.
#
# @param partDHisto Partial distance histogram.
# @param key string Representing pair of particle species.
# @param paramProd Parameter dictionary for products of form factors.
# @param rArray Array of r-values at which characteristic function is evaluated.
# @param drPrime Interval size in r'. At r' the partial histtogram is evaluated.
# @param delta Interval size for integration in r'. The \f$c_{ij}(r,r')\f$ are
# evaluated within the bin at r' at sub-intevals delta.
# """
# partCF=np.zeros((len(rArray), 2))
# partCF[:,0]=rArray[:]
# param=paramProd[key]
# elements=key.split(",")
# NMean=partDHisto[key][0,1]
# print "NMean("+key+") =", NMean
#
# a=np.asarray(param[0][1:])
# b=np.asarray(param[1][1:])
# fac=a/(2.*np.sqrt(b*np.pi))
# facEx=1./(4.*b)
# dr=drPrime #DANGER
# for i, [r, valCF] in enumerate(partCF[1:]):
# if (i % 100 == 0): print " r =", r
# tmp=0.
# for [rPrime, val] in partDHisto[key][1:]:
# if (rPrime-r)**2<9: #MODI
# if val!=0:
# val*=r*delta/drPrime
# for rr in np.arange(rPrime-0.5*(drPrime-delta), rPrime+0.5*drPrime, delta):
# tmp+=val/rr*(fac*(np.exp(-(r-rr)**2*facEx)-np.exp(-(r+rr)**2*facEx))).sum()
# partCF[i+1,1]+=tmp
# return partCF
def partCharFuncAdd_Python(partCF, partDHisto, key, paramProd, rArray, drPrime):
"""
Adds contributions \f$ \int H(r') \delta(r-r') a_{i\nu}a_{j\mu} d r'\f$ to
the inter-particle EPDDF.
If (r=='r) then the corresponding value of the partial histgram weighted by
the atom electron number square is added.
@param partCF Partial EPDDF.
@param partDHisto Partial distance histogram.
@param key string Representing pair of particle species.
@param paramProd Parameter dictionary for products of form factors.
@param rArray Array of r-values at which characteristic function is evaluated.
@param drPrime Interval size in r'. At r' the partial histtogram is evaluated.
"""
param = paramProd[key]
elements = key.split(",")
# if elements[0]==elements[1]:
NMean = partDHisto[key][0, 1]
# else:
# NMean=0.
# print "NMean("+key+") =", NMean
# partCF[0,1]+=NMean*param[0][0]
# print "param[0][0]=", param[0][0]
# exit(-1)
for i, [r, valCF] in enumerate(partCF[1:]):
tmp = 0.
for [rPrime, val] in partDHisto[key][1:]:
# print rPrime, val
# if val!=0:
if (round(r * 100000) == round(rPrime * 100000)): # DANGER
# if (i % 100 == 0): print r, val*param[0][0]
tmp += old_div(val * param[0][0], drPrime)
break
partCF[i + 1, 1] += tmp
return partCF
def partCharFuncAdd(partCF, partDHisto, key, paramProd, rArray, drPrime):
"""
Adds contributions \f$ \int H(r') \delta(r-r') a_{i\nu}a_{j\mu} d r'\f$ to
the inter-particle EPDDF.
If (r=='r) then the corresponding value of the partial histgram weighted by
the atom electron number square is added.
@param partCF Partial EPDDF.
@param partDHisto Partial distance histogram.
@param key string Representing pair of particle species.
@param paramProd Parameter dictionary for products of form factors.
@param rArray Array of r-values at which characteristic function is evaluated.
@param drPrime Interval size in r'. At r' the partial histtogram is evaluated.
"""
if have_c_pddf:
return c_pddf.partCharFuncAdd(partCF, partDHisto, key, paramProd, rArray, drPrime)
else:
return partCharFuncAdd_Python(partCF, partDHisto, key, paramProd, rArray, drPrime)
# --- original function before any Cython work
# def partCharFuncAdd(partCF, partDHisto, key, paramProd, rArray, drPrime):
# """
# Adds contributions \f$ \int H(r') \delta(r-r') a_{i\nu}a_{j\mu} d r'\f$ to
# the inter-particle EPDDF.
#
# If (r=='r) then the corresponding value of the partial histgram weighted by
# the atom electron number square is added.
#
# @param partCF Partial EPDDF.
# @param partDHisto Partial distance histogram.
# @param key string Representing pair of particle species.
# @param paramProd Parameter dictionary for products of form factors.
# @param rArray Array of r-values at which characteristic function is evaluated.
# @param drPrime Interval size in r'. At r' the partial histtogram is evaluated.
# """
# param=paramProd[key]
# elements=key.split(",")
# #if elements[0]==elements[1]:
# NMean=partDHisto[key][0,1]
# #else:
# # NMean=0.
# print "NMean("+key+") =", NMean
#
# #partCF[0,1]+=NMean*param[0][0]
# #print "param[0][0]=", param[0][0]
# #exit(-1)
# for i, [r, valCF] in enumerate(partCF[1:]):
# tmp=0.
# for [rPrime, val] in partDHisto[key][1:]:
# #print rPrime, val
# #if val!=0:
# if (round(r*100000) == round(rPrime*100000)): #DANGER
# if (i % 100 == 0): print r, val*param[0][0]
# tmp+=val*param[0][0]/drPrime
# break
# partCF[i+1,1]+=tmp
# return partCF
def FTCharFunc(partCFArray, dr, qList):
"""
Fourier transform of the characteristic function.
@param partCFArray Partial EPDDFs.
@param dr Distance interval size of the partial EPDDFs.
@param qList List of q-values.
"""
intensity = np.zeros((len(qList), 2))
partInt = np.zeros((len(qList), len(partCFArray[0])))
intensity[:, 0] = qList[:]
partInt[:, 0] = qList[:]
for j, q in enumerate(qList):
for i, r in enumerate(partCFArray[:, 0]):
sinc = j0(q * r)
for k in range(1, len(partCFArray[i])):
if r > 0:
partInt[j, k] += partCFArray[i, k] * sinc * dr
else:
partInt[j, k] += partCFArray[i, k] * sinc
# partInt[:,1:]*=dr
for j, q in enumerate(intensity[:, 0]):
intensity[j, 1] = partInt[j, 1:].sum()
return intensity, partInt
def bulkSolvHistogram(g, dr):
"""
Generate partial pair-distance histograms of bulk solvent from partial radial distribution functions.
"""
nProd = len(g[0, 1:])
nrEl = rdf.getNrElements(nProd)
elCol = rdf.getElementColumns(nrEl)
indexPairs = rdf.getIndexPairs(nrEl)
print(" indexPairs =", indexPairs)
elPairs = rdf.getElementPairs(indexPairs, elCol)
print(" elPairs =", elPairs)
tmp = 4. * math.pi * g[1:, 0] * g[1:, 0] * dr
for i in range(1, nProd + 1):
print(indexPairs[i])
i0 = elPairs[i][0]
i1 = elPairs[i][1]
rhoProd = g[0, i0] * g[0, i1]
# print i0, i1, rhoProd, g[0,i0], g[0, i1]
g[1:, i] = rhoProd * tmp * (g[1:, i] - g[-1, i])
return g
def bulkSolvHistogram_dict(g, dr):
"""Generate partial pair-distance histograms of bulk solvent from partial radial distribution functions.
Dictionary-based re-implementation of bulkSolvHistogram(g, dr), to be compatible with Capriqorn.
"""
tmp = 4. * math.pi * g['radii'][1:] * g['radii'][1:] * dr
for key in g:
if (key == 'radii'):
continue
(el1, el2) = key.split(',')
rhoProd = g[el1 + ',' + el1][0] * g[el2 + ',' + el2][0]
g[key][1:] = rhoProd * tmp * (g[key][1:] - g[key][-1])
return g
| StarcoderdataPython |
1709777 | from . import pyhandy
name = "pyhandy"
__author__ = "callmexss"
__email__ = "<EMAIL>"
__copyright__ = "(c) 2019 callmexss"
__version__ = "0.0.5"
__license__ = "MIT"
__title__ = 'pyhandy'
__description__ = "A collection of python tools to make my life easier."
| StarcoderdataPython |
145770 | arr = list(map(int, input().split()))
print(arr[0] + arr[2] + arr[4] + arr[6] + arr[8])
print(arr[1] + arr[3] + arr[5] + arr[7]) | StarcoderdataPython |
126745 | <gh_stars>1-10
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pathlib import Path
import warnings
from ..representation import BrainTumorSegmentationAnnotation
from ..utils import get_path, read_txt, read_pickle
from ..config import StringField, PathField
from .format_converter import DirectoryBasedAnnotationConverter
from ..representation.segmentation_representation import GTMaskLoader
class BratsConverter(DirectoryBasedAnnotationConverter):
__provider__ = 'brats'
annotation_types = (BrainTumorSegmentationAnnotation, )
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'image_folder': StringField(optional=True, default='imagesTr', description="Image folder."),
'mask_folder': StringField(optional=True, default='labelsTr', description="Mask folder.")
})
return parameters
def configure(self):
self.data_dir = self.get_value_from_config('data_dir')
self.image_folder = self.get_value_from_config('image_folder')
self.mask_folder = self.get_value_from_config('mask_folder')
def convert(self):
mask_folder = Path(self.mask_folder)
image_folder = Path(self.image_folder)
image_dir = get_path(self.data_dir / image_folder, is_directory=True)
mask_dir = get_path(self.data_dir / mask_folder, is_directory=True)
annotations = []
for file_in_dir in image_dir.iterdir():
file_name = file_in_dir.parts[-1]
mask = mask_dir / file_name
if not mask.exists():
warnings.warn('Annotation mask for {} does not exists. File will be ignored.'.format(file_name))
continue
annotation = BrainTumorSegmentationAnnotation(
str(image_folder / file_name),
str(mask_folder / file_name),
)
annotations.append(annotation)
return annotations
class BratsNumpyConverter(DirectoryBasedAnnotationConverter):
__provider__ = 'brats_numpy'
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'ids_file': PathField(description="Path to file, which contains names of images in dataset"),
'labels_file': PathField(
optional=True, default=None,
description='Path to file, which contains labels (if omitted no labels will be shown)'
),
'boxes_file': PathField(optional=True, default=None, description='Path to file with brain boxes'),
'data_suffix': StringField(
optional=True, default='_data_cropped', description='Suffix for files with data'
),
'label_suffix': StringField(
optional=True, default='_label_cropped', description='Suffix for files with ground truth data'
)
})
return parameters
def configure(self):
self.data_dir = self.get_value_from_config('data_dir')
self.ids_file = self.get_value_from_config('ids_file')
self.labels_file = self.get_value_from_config('labels_file')
self.boxes_file = self.get_value_from_config('boxes_file')
self.data_suffix = self.get_value_from_config('data_suffix')
self.label_suffix = self.get_value_from_config('label_suffix')
def convert(self):
ids = read_pickle(get_path(self.ids_file), encoding='latin1')
boxes = read_pickle(get_path(self.boxes_file), encoding='latin1') if self.boxes_file else None
annotations = []
for i, name in enumerate(ids):
data = name + self.data_suffix + '.npy'
label = name + self.label_suffix + '.npy'
if not self._check_files(data, label):
warnings.warn('One of files {} or {} is not exist. Files will be ignored'.format(data, label))
continue
box = boxes[i, :, :] if self.boxes_file else None
annotation = BrainTumorSegmentationAnnotation(
data,
label,
GTMaskLoader.NUMPY,
box
)
annotations.append(annotation)
return annotations, self._get_meta()
def _get_meta(self):
if not self.labels_file:
return None
return {'label_map': [line for line in read_txt(self.labels_file)]}
def _check_files(self, data, label):
try:
get_path(self.data_dir / data)
get_path(self.data_dir / label)
return True
except (FileNotFoundError, IsADirectoryError):
return False
| StarcoderdataPython |
4827286 | <reponame>uzakotim/programming_for_engineers_ctu
# Created on iPad (Timur).
print ('Hello World!') | StarcoderdataPython |
3226685 | <reponame>nerevu/riko
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
riko.modules.rssitembuilder
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides functions for creating a single-item RSS data source
Can be used to create a single new RSS item from scratch, or reformat and
restructure an existing item into an RSS structure.
Examples:
basic usage::
>>> from riko.modules.rssitembuilder import pipe
>>>
>>> conf = {'title': 'the title', 'description': 'description'}
>>> next(pipe(conf=conf))['y:title'] == 'the title'
True
Attributes:
OPTS (dict): The default pipe options
DEFAULTS (dict): The default parser options
"""
from datetime import datetime as dt
from . import processor
import pygogo as gogo
from riko.dotdict import DotDict
OPTS = {"emit": True}
DEFAULTS = {"pubDate": dt.now().isoformat()}
logger = gogo.Gogo(__name__, monolog=True).logger
# yahoo style rss items (dots are for sub-levels)
RSS = {
"title": "y:title",
"guid": "y:id",
"mediaThumbURL": "media:thumbnail.url",
"mediaThumbHeight": "media:thumbnail.height",
"mediaThumbWidth": "media:thumbnail.width",
"mediaContentType": "media:content.type",
"mediaContentURL": "media:content.url",
"mediaContentHeight": "media:content.height",
"mediaContentWidth": "media:content.width",
}
def parser(item, objconf, skip=False, **kwargs):
"""Parses the pipe content
Args:
item (obj): The entry to process (a DotDict instance)
objconf (obj): The pipe configuration (an Objectify instance)
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
stream (dict): The original item
Returns:
Iter[dict]: The stream of items
Examples:
>>> from riko.dotdict import DotDict
>>> from meza.fntools import Objectify
>>>
>>> item = DotDict()
>>> conf = {'guid': 'a1', 'mediaThumbURL': 'image.png'}
>>> objconf = Objectify(conf)
>>> kwargs = {'stream': item}
>>> result = parser(item, objconf, **kwargs)
>>> result == {'media:thumbnail': {'url': 'image.png'}, 'y:id': 'a1'}
True
"""
if skip:
stream = kwargs["stream"]
else:
items = objconf.items()
rdict = ((RSS.get(k, k), item.get(v, v, **kwargs)) for k, v in items)
stream = DotDict(rdict)
return stream
@processor(DEFAULTS, isasync=True, **OPTS)
def async_pipe(*args, **kwargs):
"""A source that asynchronously builds an rss item.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. All keys are optional.
title (str): The item title
description (str): The item description
author (str): The item author
guid (str): The item guid
pubdate (str): The item publication date
link (str): The item url
mediaContentType (str): The item media content type
mediaContentURL (str): The item media content url
mediaContentHeight (str): The item media content height
mediaContentWidth (str): The item media content width
mediaThumbURL (str): The item media thumbnail url
mediaThumbHeight (str): The item media thumbnail height
mediaThumbWidth (str): The item media thumbnail width
Returns:
dict: twisted.internet.defer.Deferred an iterator of items
Examples:
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>>
>>> def run(reactor):
... resp = {'url': 'image.png'}
... callback = lambda x: print(next(x)['media:thumbnail'] == resp)
... conf = {
... 'title': 'Hi', 'guid': 'a1', 'mediaThumbURL': 'image.png'}
... d = async_pipe(conf=conf)
... return d.addCallbacks(callback, logger.error)
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... pass
... except SystemExit:
... pass
...
True
"""
return parser(*args, **kwargs)
@processor(DEFAULTS, **OPTS)
def pipe(*args, **kwargs):
"""A source that builds an rss item.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. All keys are optional.
title (str): The item title
description (str): The item description
author (str): The item author
guid (str): The item guid
pubdate (str): The item publication date
link (str): The item url
mediaContentType (str): The item media content type
mediaContentURL (str): The item media content url
mediaContentHeight (str): The item media content height
mediaContentWidth (str): The item media content width
mediaThumbURL (str): The item media thumbnail url
mediaThumbHeight (str): The item media thumbnail height
mediaThumbWidth (str): The item media thumbnail width
Yields:
dict: an rss item
Examples:
>>> # conf based
>>> conf = {'title': 'Hi', 'guid': 'a1', 'mediaThumbURL': 'image.png'}
>>> rss = next(pipe(conf=conf))
>>> rss['media:thumbnail'] == {'url': 'image.png'}
True
>>> sorted(rss.keys()) == [
... 'media:thumbnail', 'pubDate', 'y:id', 'y:title']
True
>>>
>>> # source based
>>> # TODO: look into subkey
>>> item = {'heading': 'Hi', 'id': 'a1', 'thumbnail': 'image.png'}
>>> conf = {
... 'title': 'heading', 'guid': 'id', 'mediaThumbURL': 'thumbnail'}
>>> next(pipe(item, conf=conf)) == rss
True
"""
return parser(*args, **kwargs)
| StarcoderdataPython |
3323615 | """
Comparing models using Hierarchical modelling. Toy Model.
"""
from __future__ import division
import numpy as np
import pymc3 as pm
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
# THE DATA.
N = 30
z = 8
y = np.repeat([1, 0], [z, N-z])
# THE MODEL.
with pm.Model() as model:
# Hyperprior on model index:
model_index = pm.DiscreteUniform('model_index', lower=0, upper=1)
# Prior
nu = pm.Normal('nu', mu=0, tau=0.1) # it is posible to use tau or sd
eta = pm.Gamma('eta', .1, .1)
theta0 = 1 / (1 + pm.math.exp(-nu)) # theta from model index 0
theta1 = pm.math.exp(-eta) # theta from model index 1
theta = pm.math.switch(pm.math.eq(model_index, 0), theta0, theta1)
# Likelihood
y = pm.Bernoulli('y', p=theta, observed=y)
# Sampling
trace = pm.sample(1000)
# EXAMINE THE RESULTS.
## Print summary for each trace
#pm.summary(trace)
## Check for mixing and autocorrelation
#pm.autocorrplot(trace, vars =[nu, eta])
## Plot KDE and sampled values for each parameter.
#pm.traceplot(trace)
model_idx_sample = trace['model_index']
pM1 = sum(model_idx_sample == 0) / len(model_idx_sample)
pM2 = 1 - pM1
nu_sample_M1 = trace['nu'][model_idx_sample == 0]
eta_sample_M2 = trace['eta'][model_idx_sample == 1]
plt.figure()
plt.subplot(2, 1, 1)
pm.plot_posterior(nu_sample_M1)
plt.xlabel(r'$\nu$')
plt.ylabel('frequency')
plt.title(r'p($\nu$|D,M2), with p(M2|D)={:.3}f'.format(pM1), fontsize=14)
plt.xlim(-8, 8)
plt.subplot(2, 1, 2)
pm.plot_posterior(eta_sample_M2)
plt.xlabel(r'$\eta$')
plt.ylabel('frequency')
plt.title(r'p($\eta$|D,M2), with p(M2|D)={:.3f}'.format(pM2), fontsize=14)
plt.xlim(0, 8)
plt.savefig('figure_ex_10.2_a.png')
plt.show()
| StarcoderdataPython |
1759109 | <filename>backend/class_list.py
"""
class_list.py
==============
Endpoints for retrieving a list of all classes. All routes start with
/api/classList
All incoming request parameters are wrapped in a JSON body.
All outgoing response returns are wrapped in a JSON entry with key 'payload',
like this:
.. code-block::
{
"error": "false",
"error-msg": None,
"payload": {
"return-1": "true"
}
}
Note that method documentation assumes you are using jsonResponse/errorResponse
to generate the response, and only shows the actual returns within payload.
Ditto for request parameters.
"""
import csv
import os
import flask
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for, current_app
)
bp = Blueprint('class_list', __name__, url_prefix='/api/classList')
from form_response import jsonResponse, errorResponse
@bp.route('/get', methods=('GET', 'POST'))
def class_list():
""" POST Get the class list for a given subject.
Parameters
------
subjectArea: str
Returns
------
uid: list[str]
list of all classes associated with the given subject area.
Raises
------
BadRequest
Some part of the required parameters is missing.
"""
response = None
if request.method == 'POST':
data = request.get_json()
if not data:
return errorResponse('Data Body Required')
subjectArea = data.get('subjectArea')
if not subjectArea:
return errorResponse('Subject Area Required')
this_folder = os.path.dirname(os.path.abspath(__file__))
class_file = open(os.path.join(this_folder, 'resources/classes.csv'), mode='r')
reader = csv.reader(class_file)
classes = []
for row in reader:
if row[0] == subjectArea:
classes.append(row[0] + ' ' + row[2])
class_file.close()
return jsonResponse({"classList": classes})
return errorResponse('POST to this endpoint')
| StarcoderdataPython |
145076 | <gh_stars>1-10
from tkinter import Entry
from helperobjects.EntryCell import EntryCell
from typing import List
class EntryCellRow:
def __init__(self):
self.file_name:str = None
self.entry_cell_list:List[EntryCell] = []
self.comment_entry:Entry = None
def add_cell(self, entry_cell:EntryCell):
self.entry_cell_list.append(entry_cell)
def get_all(self):
return self.entry_cell_list
def __hash__(self):
return hash(self.__key)
def __key(self):
return (self.file_name.__hash__, self.entry_cell_list.__hash__)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.