id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
1770182 | import abc
import numpy as np
class HamiltonianDynamicsContext(metaclass=abc.ABCMeta):
@classmethod
@abc.abstractmethod
def configuration_space_dimension (cls):
pass
@classmethod
@abc.abstractmethod
def H (cls, qp):
"""Evaluates the Hamiltonian on the (2,N)-shaped (q,p) coordinate."""
pass
@classmethod
@abc.abstractmethod
def dH_dq (cls, q, p):
"""Evaluates the partial of H with respect to q on the (2,N)-shaped (q,p) coordinate. Returns a (N,)-vector."""
pass
@classmethod
@abc.abstractmethod
def dH_dp (cls, q, p):
"""Evaluates the partial of H with respect to q on the (2,N)-shaped (q,p) coordinate. Returns a (N,)-vector."""
pass
@classmethod
def X_H (cls, qp):
"""
Computes the Hamiltonian vector field on coordinates qp (with shape (2,N)), returning the same shape.
\omega^-1 * dH (i.e. the symplectic gradient of H) is the hamiltonian vector field for this system.
If the tautological one-form on the cotangent bundle is
tau := p dq
then the symplectic form is
omega := -dtau = -dq wedge dp
which, e.g. in the coordinates (q_0, q_1, p_0, p_1), has the matrix
[ 0 0 -1 0 ]
[ 0 0 0 -1 ]
[ 1 0 0 0 ]
[ 0 1 0 0 ],
or in matrix notation, with I denoting the 2x2 identity matrix,
[ 0 -I ]
[ I 0 ],
having inverse
[ 0 I ]
[ -I 0 ].
With dH:
dH = dH/dq * dq + dH/dp * dp, (here, dH/dq denotes the partial of H w.r.t. q)
or expressed in coordinates as
[ dH/dq ]
[ dH/dp ]
it follows that the sympletic gradient of H is
dH/dp * dq - dH/dq * dp
or expressed in coordinates as
[ dH/dp ]
[ -dH/dq ].
The equation defining the flow for this vector field is
dq/dt = dH/dp
dp/dt = -dH/dq,
which is Hamilton's equations.
"""
q = coordinates[0,:]
p = coordinates[1,:]
# This is the symplectic gradient of H.
retval = np.vstack((cls.dH_dp(q,p), -cls.dH_dq(q,p)))
assert retval.shape[0] == 2
return retval
@classmethod
def phase_space_dimension (cls):
return 2*cls.configuration_space_dimension()
| StarcoderdataPython |
6692779 | <reponame>hushaohan/aqua
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Code inside the test is the chemistry sample from the readme.
If this test fails and code changes are needed here to resolve
the issue then ensure changes are made to readme too.
"""
import unittest
from test.chemistry import QiskitChemistryTestCase
from qiskit.chemistry import QiskitChemistryError
class TestReadmeSample(QiskitChemistryTestCase):
"""Test sample code from readme"""
def setUp(self):
super().setUp()
try:
# pylint: disable=import-outside-toplevel
from qiskit.chemistry.drivers import PySCFDriver
PySCFDriver(atom='Li .0 .0 .0; H .0 .0 1.6')
except QiskitChemistryError:
self.skipTest('PYSCF driver does not appear to be installed')
try:
# pylint: disable=import-outside-toplevel
# pylint: disable=unused-import
from qiskit import Aer
except Exception as ex: # pylint: disable=broad-except
self.skipTest("Aer doesn't appear to be installed. Error: '{}'".format(str(ex)))
return
def test_readme_sample(self):
""" readme sample test """
# pylint: disable=import-outside-toplevel,redefined-builtin
def print(*args):
""" overloads print to log values """
if args:
self.log.debug(args[0], *args[1:])
# --- Exact copy of sample code ----------------------------------------
from qiskit.chemistry import FermionicOperator
from qiskit.chemistry.drivers import PySCFDriver, UnitsType
from qiskit.aqua.operators import Z2Symmetries
# Use PySCF, a classical computational chemistry software
# package, to compute the one-body and two-body integrals in
# molecular-orbital basis, necessary to form the Fermionic operator
driver = PySCFDriver(atom='H .0 .0 .0; H .0 .0 0.735',
unit=UnitsType.ANGSTROM,
basis='sto3g')
molecule = driver.run()
num_particles = molecule.num_alpha + molecule.num_beta
num_spin_orbitals = molecule.num_orbitals * 2
# Build the qubit operator, which is the input to the VQE algorithm in Aqua
ferm_op = FermionicOperator(h1=molecule.one_body_integrals, h2=molecule.two_body_integrals)
map_type = 'PARITY'
qubit_op = ferm_op.mapping(map_type)
qubit_op = Z2Symmetries.two_qubit_reduction(qubit_op, num_particles)
num_qubits = qubit_op.num_qubits
# setup a classical optimizer for VQE
from qiskit.aqua.components.optimizers import L_BFGS_B
optimizer = L_BFGS_B()
# setup the initial state for the variational form
from qiskit.chemistry.components.initial_states import HartreeFock
init_state = HartreeFock(num_spin_orbitals, num_particles)
# setup the variational form for VQE
from qiskit.circuit.library import TwoLocal
var_form = TwoLocal(num_qubits, ['ry', 'rz'], 'cz', initial_state=init_state)
# setup and run VQE
from qiskit.aqua.algorithms import VQE
algorithm = VQE(qubit_op, var_form, optimizer)
# set the backend for the quantum computation
from qiskit import Aer
backend = Aer.get_backend('statevector_simulator')
result = algorithm.run(backend)
print(result.eigenvalue.real)
# ----------------------------------------------------------------------
self.assertAlmostEqual(result.eigenvalue.real, -1.8572750301938803, places=6)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
8049398 | <filename>src/main.py
from PyQt5.QtWidgets import QFileDialog
import numpy as np
import cv2 as cv
from PyQt5 import QtGui
from PyQt5.QtWidgets import (
QApplication,
QLabel,
QVBoxLayout,
QHBoxLayout,
QWidget,
QPushButton,
)
from Algorithms import RGB2BGR, RGB2GRAY
class ImageTemp:
"""
image temp
"""
def __init__(self):
"""
creat ImageTemp object
"""
self._image_data = np.zeros([512, 512, 3])
self._image_data = self._image_data.astype(np.uint8)
def set_image_data(self, image_data: np.ndarray):
"""
update image_data
"""
self._image_data = image_data
self._image_data = self._image_data.astype(np.uint8)
def get_image_data(self) -> np.ndarray:
"""
return a copy of image_data
"""
return self._image_data.copy()
def get_QImage(self):
"""
return QImage of data
"""
image_bytes = self._image_data.data.tobytes()
qimage = QtGui.QImage(
image_bytes,
self._image_data.shape[1],
self._image_data.shape[0],
self._image_data.shape[1] * 3,
# keep image not skew
QtGui.QImage.Format_RGB888,
)
return qimage
class MainWindow:
def __init__(self):
"""
init all the widgets
"""
self.image_temp = ImageTemp()
self.start_path = "C:/"
self.button_openfile = QPushButton("Open File")
self.button_openfile.clicked.connect(self.load_image)
self.button_saveas = QPushButton("Save As")
self.button_saveas.clicked.connect(self.save_image)
self.button_rgb2bgr = QPushButton("RGB To BGR")
self.button_rgb2bgr.clicked.connect(self.rgb2bgr)
self.button_rgb2gray = QPushButton("RGB To Gray")
self.button_rgb2gray.clicked.connect(self.rgb2gray)
self.image_label = QLabel("this is an image")
self.tool_box = self.get_tool_box()
self.window = self.get_main_window()
def get_tool_box(self):
"""
layout of tool box
"""
tool_box = QWidget()
layout = QVBoxLayout()
layout.addWidget(self.button_openfile)
layout.addWidget(self.button_saveas)
layout.addWidget(self.button_rgb2bgr)
layout.addWidget(self.button_rgb2gray)
tool_box.setLayout(layout)
return tool_box
def get_main_window(self):
"""
window and layout of main window
"""
window = QWidget()
layout = QHBoxLayout()
layout.addWidget(self.image_label)
self.update_image_label()
layout.addWidget(self.tool_box)
window.setLayout(layout)
return window
def update_image_label(self):
"""
update image show
"""
self.image_label.setPixmap(QtGui.QPixmap(self.image_temp.get_QImage()))
def load_image(self):
"""
load a image
"""
data_path, _ = QFileDialog.getOpenFileName(
None, "Select File", self.start_path, "*.png;*.jpg;*.jpeg"
)
image = cv.imread(data_path, cv.IMREAD_COLOR)
image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
self.image_temp.set_image_data(image)
self.update_image_label()
def save_image(self):
"""
save image temp as a image
"""
save_path, _ = QFileDialog.getSaveFileName(
None, "Save As", self.start_path, "*.png;*.jpg;*.jpeg"
)
image = self.image_temp.get_image_data()
image = cv.cvtColor(image, cv.COLOR_RGB2BGR)
cv.imwrite(save_path, image)
def rgb2bgr(self):
"""
rgb image show as bgr image
"""
image = self.image_temp.get_image_data()
image = RGB2BGR(image)
self.image_temp.set_image_data(image)
self.update_image_label()
def rgb2gray(self):
"""
rgb image to gray image
"""
image = self.image_temp.get_image_data()
image = RGB2GRAY(image)
image = np.array([image, image, image])
image = np.transpose(image, (1, 2, 0))
self.image_temp.set_image_data(image)
self.update_image_label()
# create app
app = QApplication([])
main_window = MainWindow()
main_window.window.show()
# run app
app.exec()
| StarcoderdataPython |
1690697 | <reponame>GuilloteauQ/nbpreview
"""Render execution results from Jupyter Notebooks."""
from pathlib import Path
from typing import Dict, Iterator, Union
from nbformat import NotebookNode
from nbpreview.component.content.output.result import display_data, link
from nbpreview.component.content.output.result.display_data import DisplayData
from nbpreview.component.content.output.result.drawing import Drawing, ImageDrawing
from nbpreview.component.content.output.result.execution_indicator import Execution
from nbpreview.component.content.output.result.link import FileLink
Result = Union[FileLink, DisplayData, Drawing]
def render_result(
output: NotebookNode,
plain: bool,
unicode: bool,
execution: Union[Execution, None],
hyperlinks: bool,
nerd_font: bool,
files: bool,
hide_hyperlink_hints: bool,
theme: str,
images: bool,
image_drawing: ImageDrawing,
color: bool,
negative_space: bool,
relative_dir: Path,
) -> Iterator[Result]:
"""Render executed result outputs."""
data: Dict[str, Union[str, NotebookNode]] = output.get("data", {})
link_result = link.render_link(
data,
unicode=unicode,
hyperlinks=hyperlinks,
execution=execution,
nerd_font=nerd_font,
files=files,
hide_hyperlink_hints=hide_hyperlink_hints,
)
main_result = display_data.render_display_data(
data,
unicode=unicode,
plain=plain,
nerd_font=nerd_font,
theme=theme,
images=images,
image_drawing=image_drawing,
color=color,
negative_space=negative_space,
hyperlinks=hyperlinks,
files=files,
hide_hyperlink_hints=hide_hyperlink_hints,
relative_dir=relative_dir,
)
for result in (link_result, main_result):
if result is not None:
yield result
| StarcoderdataPython |
6609853 | """Module containing the logic for our debugging logic."""
import platform
from typing import Any
from typing import Dict
from flake8.plugins.finder import Plugins
def information(version: str, plugins: Plugins) -> Dict[str, Any]:
"""Generate the information to be printed for the bug report."""
versions = sorted(
{
(loaded.plugin.package, loaded.plugin.version)
for loaded in plugins.all_plugins()
if loaded.plugin.package not in {"flake8", "local"}
}
)
return {
"version": version,
"plugins": [
{"plugin": plugin, "version": version}
for plugin, version in versions
],
"platform": {
"python_implementation": platform.python_implementation(),
"python_version": platform.python_version(),
"system": platform.system(),
},
}
| StarcoderdataPython |
1779577 | import numpy as np
import time
import my_sgf
B = -1 # black
N = 0 # none
W = 1 # white
# B and W do not matter when call the function. Because this is onyl a test file.
# But make sure set the blank(empty) as 0.
if __name__ == "__main__":
"""
------------------------------------------
move(board, pos, turn, check_flag = False)
------------------------------------------
:param board: 19x19 np.array
:param pos: 2D np.array (e.g. [10,15])
:param turn: whose turn to move; -1 or 1 make sure set the blank(empty) as 0
:param check_flag: True or False. whether to check the validation of the move. If set as False, assume the move
is valid.
:return: return a np.array.
Warning: If the check_flag is set as True and movement is invalid. This function change the content
of the original pointer. Thus, make sure dont use this function directly in a tree sturcture.
"""
print("start")
brd = np.zeros((19, 19), np.int64)
white_pos = np.array([1, 1],dtype = np.int64)
brd[0,1]= -1
brd[0, 2] = 1
brd[0, 3] = 1
brd[0, 4] = 1
brd[1, 0] = -1
brd[1, 2] = -1
brd[1, 3] = -1
brd[1, 4] = 1
brd[2, 0] = 1
brd[2, 1] = -1
brd[2, 2] = -1
brd[2, 3] = -1
brd[2, 4] = 1
brd[3, 1] = 1
brd[3, 2] = 1
brd[3, 3] = 1
print("-"*20)
print(" Before:")
print("-" * 20)
print(brd)
print("Calculating 10000 times...\nPlease wait.")
st =time.time()
brd0=None
for i in range(10000):
brd0=np.copy(brd)
ret = my_sgf.move(brd0,white_pos,W,check_flag=False)
print("-" * 20)
print(" After:")
print("-" * 20)
print(ret)
ed = time.time()
print("\nTime usage: %f s per 10000times" % (ed-st))
time.sleep(2)
print("\nExtreme test:")
time.sleep(1)
st=time.time()
print("-" * 20)
print(" Before:")
print("-" * 20)
brd2 = brd = np.zeros((19, 19), np.int64)
brd2 += -1
brd[9,9]=0
white_pos = np.array([9, 9],dtype = np.int64)
print(brd2)
print("Calculating 10000 times...\nPlease wait.")
for i in range(10000):
my_sgf.move(brd2, white_pos, W)
print("-" * 20)
print(" After:")
print("-" * 20)
print(brd2)
ed = time.time()
print("\nTime usage: %f s per 10000 times" % (ed - st))
| StarcoderdataPython |
1899245 | <reponame>rajeshmajumdar/TheFatCat<gh_stars>1-10
# License for freakymonk
# Copyright 2017-18 by <NAME> <@freakym0nk>
# Web : www.rajeshmajumdar.github.io
# Contact : @freakym0nk
# All Rights Reserved
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation.
# Project Name : freakymonk
# Project Date : 17-12-2016
# Author : <NAME>
# Contact : @freakym0nk
#!/usr/bin/env python
#-*- coding:utf-8-*-
from Tkinter import *
from tkMessageBox import *
MainWindow = Tk()
MainWindow.geometry("155x300+150+100")
MainWindow.title("License")
def Button1Click():
print "Hello freakymonk "
Button1 = Button(text = "Click Me", command = Button1Click)
Button1 .place(relx = 0.5, rely = 0.5, relheight = 0.20)
mainloop()
| StarcoderdataPython |
5022478 | # Copyright 2020 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import numpy as np
from ....utilities import cosd, sind, tand
from ..base_velocity_deficit import VelocityDeficit
class GaussianModel(VelocityDeficit):
"""
This is the super-class for all Gaussian-type wake models. It includes
implementations of functions that subclasses should use to perform
Gaussian-related calculations (see :cite:`gmb-King2019Controls`)
References:
.. bibliography:: /source/zrefs.bib
:style: unsrt
:filter: docname in docnames
:keyprefix: gmb-
"""
def __init__(self, parameter_dictionary):
"""
See super-class for initialization details.
Args:
parameter_dictionary (dict): Model-specific parameters.
"""
super().__init__(parameter_dictionary)
def correction_steps(
self, U_local, U, V, W, x_locations, y_locations, turbine, turbine_coord
):
"""
This method corrects the U-component velocities when yaw added recovery
is enabled. For more details on how the velocities are changed, see [1].
# TODO add reference to 1
Args:
U_local (np.array): U-component velocities across the flow field.
U (np.array): U-component velocity deficits across the flow field.
V (np.array): V-component velocity deficits across the flow field.
W (np.array): W-component velocity deficits across the flow field.
x_locations (np.array): Streamwise locations in wake.
y_locations (np.array): Spanwise locations in wake.
turbine (:py:class:`floris.simulation.turbine.Turbine`):
Turbine object.
turbine_coord (:py:obj:`floris.simulation.turbine_map.TurbineMap.coords`):
Spatial coordinates of wind turbine.
Returns:
np.array: U-component velocity deficits across the flow field.
"""
if self.use_yaw_added_recovery:
U = self.yaw_added_recovery_correction(
U_local, U, W, x_locations, y_locations, turbine, turbine_coord
)
return U
def calculate_VW(
self, V, W, coord, turbine, flow_field, x_locations, y_locations, z_locations
):
"""
This method calculates the V- and W-component velocities using
methods developed in [1].
# TODO add reference to 1
# TODO is this function needed? It simply calls another function
Args:
V (np.array): V-component velocity deficits across the flow field.
W (np.array): W-component velocity deficits across the flow field.
coord (:py:obj:`floris.simulation.turbine_map.TurbineMap.coords`):
Spatial coordinates of wind turbine.
turbine (:py:class:`floris.simulation.turbine.Turbine`):
Turbine object.
flow_field ([type]): [description]
x_locations (np.array): Streamwise locations in wake.
y_locations (np.array): Spanwise locations in wake.
z_locations (np.array): Vertical locations in wake.
Raises:
ValueError: It appears that 'use_yaw_added_recovery' is set
to True and 'calculate_VW_velocities' is set to False.
This configuration is not valid. Please set
'calculate_VW_velocities' to True if you wish to use
yaw-added recovery.
Returns:
np.array, np.array:
- V-component velocity deficits across the flow field.
- W-component velocity deficits across the flow field.
"""
if self.use_yaw_added_recovery:
if not self.calculate_VW_velocities:
err_msg = (
"It appears that 'use_yaw_added_recovery' is set "
+ "to True and 'calculate_VW_velocities' is set to False. "
+ "This configuration is not valid. Please set "
+ "'calculate_VW_velocities' to True if you wish to use "
+ "yaw-added recovery."
)
self.logger.error(err_msg, stack_info=True)
raise ValueError(err_msg)
if self.calculate_VW_velocities:
V, W = self.calc_VW(
coord, turbine, flow_field, x_locations, y_locations, z_locations
)
return V, W
def yaw_added_recovery_correction(
self, U_local, U, W, x_locations, y_locations, turbine, turbine_coord
):
"""
This method corrects the U-component velocities when yaw added recovery
is enabled. For more details on how the velocities are changed, see [1].
# TODO add reference to 1
Args:
U_local (np.array): U-component velocities across the flow field.
U (np.array): U-component velocity deficits across the flow field.
W (np.array): W-component velocity deficits across the flow field.
x_locations (np.array): Streamwise locations in wake.
y_locations (np.array): Spanwise locations in wake.
turbine (:py:class:`floris.simulation.turbine.Turbine`):
Turbine object.
turbine_coord (:py:obj:`floris.simulation.turbine_map.TurbineMap.coords`):
Spatial coordinates of wind turbine.
Returns:
np.array: U-component velocity deficits across the flow field.
"""
# compute the velocity without modification
U1 = U_local - U
# set dimensions
D = turbine.rotor_diameter
xLocs = x_locations - turbine_coord.x1
ky = self.ka * turbine.current_turbulence_intensity + self.kb
U2 = (np.mean(W) * xLocs) / ((ky * xLocs + D / 2))
U_total = U1 + np.nan_to_num(U2)
# turn it back into a deficit
U = U_local - U_total
# zero out anything before the turbine
U[x_locations < turbine_coord.x1] = 0
return U
def calc_VW(
self, coord, turbine, flow_field, x_locations, y_locations, z_locations
):
"""
This method calculates the V- and W-component velocities using
methods developed in [1].
# TODO add reference to 1
Args:
coord (:py:obj:`floris.simulation.turbine_map.TurbineMap.coords`):
Spatial coordinates of wind turbine.
turbine (:py:class:`floris.simulation.turbine.Turbine`):
Turbine object.
flow_field ([type]): [description]
x_locations (np.array): Streamwise locations in wake.
y_locations (np.array): Spanwise locations in wake.
z_locations (np.array): Vertical locations in wake.
Returns:
np.array, np.array:
- V-component velocity deficits across the flow field.
- W-component velocity deficits across the flow field.
"""
# turbine parameters
D = turbine.rotor_diameter
HH = turbine.hub_height
yaw = turbine.yaw_angle
Ct = turbine.Ct
TSR = turbine.tsr
aI = turbine.aI
# flow parameters
Uinf = np.mean(flow_field.wind_map.grid_wind_speed)
scale = 1.0
vel_top = (
Uinf
* ((HH + D / 2) / flow_field.specified_wind_height) ** flow_field.wind_shear
) / Uinf
vel_bottom = (
Uinf
* ((HH - D / 2) / flow_field.specified_wind_height) ** flow_field.wind_shear
) / Uinf
Gamma_top = (
scale * (np.pi / 8) * D * vel_top * Uinf * Ct * sind(yaw) * cosd(yaw)
)
Gamma_bottom = (
-scale * (np.pi / 8) * D * vel_bottom * Uinf * Ct * sind(yaw) * cosd(yaw)
)
Gamma_wake_rotation = (
0.25 * 2 * np.pi * D * (aI - aI ** 2) * turbine.average_velocity / TSR
)
# compute the spanwise and vertical velocities induced by yaw
eps = self.eps_gain * D # Use set value
# decay the vortices as they move downstream - using mixing length
lmda = D / 8
kappa = 0.41
lm = kappa * z_locations / (1 + kappa * z_locations / lmda)
z = np.linspace(
np.min(z_locations), np.max(z_locations), np.shape(flow_field.u_initial)[2]
)
dudz_initial = np.gradient(flow_field.u_initial, z, axis=2)
nu = lm ** 2 * np.abs(dudz_initial[0, :, :])
# top vortex
yLocs = y_locations + 0.01 - (coord.x2)
zT = z_locations + 0.01 - (HH + D / 2)
rT = yLocs ** 2 + zT ** 2
V1 = (
(zT * Gamma_top)
/ (2 * np.pi * rT)
* (1 - np.exp(-rT / (eps ** 2)))
* eps ** 2
/ (4 * nu * (x_locations - coord.x1) / Uinf + eps ** 2)
)
W1 = (
(-yLocs * Gamma_top)
/ (2 * np.pi * rT)
* (1 - np.exp(-rT / (eps ** 2)))
* eps ** 2
/ (4 * nu * (x_locations - coord.x1) / Uinf + eps ** 2)
)
# bottom vortex
zB = z_locations + 0.01 - (HH - D / 2)
rB = yLocs ** 2 + zB ** 2
V2 = (
(zB * Gamma_bottom)
/ (2 * np.pi * rB)
* (1 - np.exp(-rB / (eps ** 2)))
* eps ** 2
/ (4 * nu * (x_locations - coord.x1) / Uinf + eps ** 2)
)
W2 = (
((-yLocs * Gamma_bottom) / (2 * np.pi * rB))
* (1 - np.exp(-rB / (eps ** 2)))
* eps ** 2
/ (4 * nu * (x_locations - coord.x1) / Uinf + eps ** 2)
)
# top vortex - ground
yLocs = y_locations + 0.01 - (coord.x2)
zLocs = z_locations + 0.01 + (HH + D / 2)
V3 = (
(
((zLocs * -Gamma_top) / (2 * np.pi * (yLocs ** 2 + zLocs ** 2)))
* (1 - np.exp(-(yLocs ** 2 + zLocs ** 2) / (eps ** 2)))
+ 0.0
)
* eps ** 2
/ (4 * nu * (x_locations - coord.x1) / Uinf + eps ** 2)
)
W3 = (
((-yLocs * -Gamma_top) / (2 * np.pi * (yLocs ** 2 + zLocs ** 2)))
* (1 - np.exp(-(yLocs ** 2 + zLocs ** 2) / (eps ** 2)))
* eps ** 2
/ (4 * nu * (x_locations - coord.x1) / Uinf + eps ** 2)
)
# bottom vortex - ground
yLocs = y_locations + 0.01 - (coord.x2)
zLocs = z_locations + 0.01 + (HH - D / 2)
V4 = (
(
((zLocs * -Gamma_bottom) / (2 * np.pi * (yLocs ** 2 + zLocs ** 2)))
* (1 - np.exp(-(yLocs ** 2 + zLocs ** 2) / (eps ** 2)))
+ 0.0
)
* eps ** 2
/ (4 * nu * (x_locations - coord.x1) / Uinf + eps ** 2)
)
W4 = (
((-yLocs * -Gamma_bottom) / (2 * np.pi * (yLocs ** 2 + zLocs ** 2)))
* (1 - np.exp(-(yLocs ** 2 + zLocs ** 2) / (eps ** 2)))
* eps ** 2
/ (4 * nu * (x_locations - coord.x1) / Uinf + eps ** 2)
)
# wake rotation vortex
zC = z_locations + 0.01 - (HH)
rC = yLocs ** 2 + zC ** 2
V5 = (
(zC * Gamma_wake_rotation)
/ (2 * np.pi * rC)
* (1 - np.exp(-rC / (eps ** 2)))
* eps ** 2
/ (4 * nu * (x_locations - coord.x1) / Uinf + eps ** 2)
)
W5 = (
(-yLocs * Gamma_wake_rotation)
/ (2 * np.pi * rC)
* (1 - np.exp(-rC / (eps ** 2)))
* eps ** 2
/ (4 * nu * (x_locations - coord.x1) / Uinf + eps ** 2)
)
# wake rotation vortex - ground effect
yLocs = y_locations + 0.01 - coord.x2
zLocs = z_locations + 0.01 + HH
V6 = (
(
(
(zLocs * -Gamma_wake_rotation)
/ (2 * np.pi * (yLocs ** 2 + zLocs ** 2))
)
* (1 - np.exp(-(yLocs ** 2 + zLocs ** 2) / (eps ** 2)))
+ 0.0
)
* eps ** 2
/ (4 * nu * (x_locations - coord.x1) / Uinf + eps ** 2)
)
W6 = (
((-yLocs * -Gamma_wake_rotation) / (2 * np.pi * (yLocs ** 2 + zLocs ** 2)))
* (1 - np.exp(-(yLocs ** 2 + zLocs ** 2) / (eps ** 2)))
* eps ** 2
/ (4 * nu * (x_locations - coord.x1) / Uinf + eps ** 2)
)
# print(Gamma_wake_rotation, np.mean(W5), np.mean(W6))
# total spanwise velocity
V = V1 + V2 + V3 + V4 + V5 + V6
W = W1 + W2 + W3 + W4 + W5 + W6
# no spanwise and vertical velocity upstream of the turbine
V[x_locations < coord.x1 + 10] = 0.0
W[x_locations < coord.x1 + 10] = 0.0
W[W < 0] = 0
return V, W
@property
def calculate_VW_velocities(self):
"""
Flag to enable the calculation of V- and W-component velocities using
methods developed in [1].
**Note:** This is a virtual property used to "get" or "set" a value.
Args:
value (bool): Value to set.
Returns:
float: Value currently set.
Raises:
ValueError: Invalid value.
"""
return self._calculate_VW_velocities
@calculate_VW_velocities.setter
def calculate_VW_velocities(self, value):
if type(value) is not bool:
err_msg = (
"Value of calculate_VW_velocities must be type "
+ "float; {} given.".format(type(value))
)
self.logger.error(err_msg, stack_info=True)
raise ValueError(err_msg)
self._calculate_VW_velocities = value
@property
def use_yaw_added_recovery(self):
"""
Flag to use yaw added recovery on the wake velocity using methods
developed in [1].
**Note:** This is a virtual property used to "get" or "set" a value.
Args:
value (bool): Value to set.
Returns:
float: Value currently set.
Raises:
ValueError: Invalid value.
"""
return self._use_yaw_added_recovery
@use_yaw_added_recovery.setter
def use_yaw_added_recovery(self, value):
if type(value) is not bool:
# TODO Shouldn't this be a bool?
err_msg = (
"Value of use_yaw_added_recovery must be type "
+ "float; {} given.".format(type(value))
)
self.logger.error(err_msg, stack_info=True)
raise ValueError(err_msg)
self._use_yaw_added_recovery = value
@property
def eps_gain(self):
"""
Tuning value for calculating the V- and W- component velocities using
methods developed in [1].
**Note:** This is a virtual property used to "get" or "set" a value.
Args:
value (bool): Value to set.
Returns:
float: Value currently set.
Raises:
ValueError: Invalid value.
"""
return self._eps_gain
@eps_gain.setter
def eps_gain(self, value):
if type(value) is not float:
err_msg = "Value of eps_gain must be type " + "float; {} given.".format(
type(value)
)
self.logger.error(err_msg, stack_info=True)
raise ValueError(err_msg)
self._eps_gain = value
@staticmethod
def mask_upstream_wake(y_locations, turbine_coord, yaw):
"""
Calculates values to be used for masking the upstream wake relative to
the current turbine.
Args:
y_locations (np.array): Spanwise locations in wake.
turbine_coord (:py:obj:`floris.simulation.turbine_map.TurbineMap.coords`):
Spatial coordinates of wind turbine.
yaw (float): The turbine yaw angle.
Returns:
tuple: tuple containing:
- yR (np.array): Y locations to mask upstream wake.
- xR (np.array): X locations to mask upstream wake.
"""
yR = y_locations - turbine_coord.x2
xR = yR * tand(yaw) + turbine_coord.x1
return xR, yR
@staticmethod
def initial_velocity_deficits(U_local, Ct):
"""
Calculates the initial velocity deficits used in determining the wake
expansion in a Gaussian wake velocity model.
Args:
U_local (np.array): U-component velocities across the flow field.
Ct (float): The thrust coefficient of a turbine at the current
operating conditions.
Returns:
tuple: tuple containing:
- uR (np.array): Initial velocity deficit used in calculation
of wake expansion.
- u0 (np.array): Initial velocity deficit used in calculation
of wake expansion.
"""
uR = U_local * Ct / (2.0 * (1 - np.sqrt(1 - Ct)))
u0 = U_local * np.sqrt(1 - Ct)
return uR, u0
@staticmethod
def initial_wake_expansion(turbine, U_local, veer, uR, u0):
"""
Calculates the initial wake widths associated with wake expansion.
Args:
turbine (:py:class:`floris.simulation.turbine.Turbine`):
Turbine object.
U_local (np.array): U-component velocities across the flow field.
veer (float): The amount of veer across the rotor.
uR (np.array): Initial velocity deficit used in calculation of wake
expansion.
u0 (np.array): Initial velocity deficit used in calculation of wake
expansion.
Returns:
tuple: tuple containing:
- sigma_y0 (np.array): Initial wake width in the spanwise
direction.
- sigma_z0 (np.array): Initial wake width in the vertical
direction.
"""
yaw = -1 * turbine.yaw_angle
sigma_z0 = turbine.rotor_diameter * 0.5 * np.sqrt(uR / (U_local + u0))
sigma_y0 = sigma_z0 * cosd(yaw) * cosd(veer)
return sigma_y0, sigma_z0
@staticmethod
def gaussian_function(U, C, r, n, sigma):
"""
A general form of the Gaussian function used in the Gaussian wake
models.
Args:
U (np.array): U-component velocities across the flow field.
C (np.array): Velocity deficit at the wake center normalized by the
incoming wake velocity.
r (float): Radial distance from the wake center.
n (float): Exponent of radial distance from the wake center.
sigma (np.array): Standard deviation of the wake.
Returns:
np.array: U (np.array): U-component velocity deficits across the
flow field.
"""
return U * C * np.exp(-1 * r ** n / (2 * sigma ** 2))
| StarcoderdataPython |
9626146 | """
Escriba un programa que imprima todos los enteros positivos impares menores que 100 omitiéndose aquellos que sean divisibles por 7.
"""
lista=[]
for i in range (1,101):
if(i%2!=0 and i%7!=0):
lista.append(i)
print(lista) | StarcoderdataPython |
5108927 | from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.contrib.messages.views import SuccessMessageMixin
from django.views.generic import TemplateView, FormView
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.urls import reverse_lazy
from django.core.exceptions import ImproperlyConfigured
from interactions.functions import (
save_self_answers_to_db,
save_relation_answers_to_db,
find_similar_usernames,
find_answer_groups_counts,
get_data_fn,
return_questions,
form_json_data, send_relation_email
)
from interactions.forms import (
RelationSelectorForm, ReferralCodeForm,
AnswerFormset
)
from core.mixins import CustomLoginRequiredMixin, GoogleRecaptchaMixin
class HowtoView(TemplateView):
template_name = 'interactions/howto_self.html'
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
messages.add_message(
request, messages.WARNING,
'You are not logged in, you will be redirected to login'
)
return super().dispatch(request, *args, **kwargs)
class View(PermissionRequiredMixin, TemplateView):
template_name = 'interactions/view.html'
permission_required = ('users.special_access',)
permission_denied_message = (
'You do not have the required permissions to access that page'
)
raise_exception = True
extra_context = {'data': get_data_fn()}
class BaseQuestionView(
CustomLoginRequiredMixin, GoogleRecaptchaMixin,
SuccessMessageMixin, FormView
):
form_class = AnswerFormset
answer_group_model = None
questions = None
answer_saver = None
success_message = 'Your answers were saved successfully'
permission_denied_message = ('You have to be logged in to '
'attempt the tests')
def dispatch(self, request, *args, **kwargs):
if not self.answer_group_model:
raise ImproperlyConfigured(
'Attribute answer_group_model set incorrectly. Valid '
'options are SelfAnswerGroup and RelationsAnswerGroup.'
)
self.questions = return_questions(self.answer_group_model)
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['questions'] = self.questions
return context
def form_valid(self, formset):
json_data = form_json_data(formset, self.questions)
try:
profile_pk = self.kwargs['profile_pk']
against = self.kwargs['against']
except KeyError:
profile_pk = None
against = None
finally:
request = self.request
if self.answer_group_model == 'SelfAnswerGroup':
answer_saver = save_self_answers_to_db
elif self.answer_group_model == 'RelationAnswerGroup':
answer_saver = save_relation_answers_to_db
returned_data = answer_saver(
json_data, request, profile_pk, against
)
try:
if len(returned_data) == 2:
primary_key, attempted_against = returned_data
self.request.session['rel_ans_gp'] = primary_key
self.success_url = reverse_lazy(
'graphs:comparison_view',
kwargs={'self_pk': attempted_against,
'relation_pk': primary_key}
)
except TypeError:
self.request.session['self_ans_gp'] = returned_data
self.success_url = reverse_lazy(
'graphs:single_result', kwargs={'pk': returned_data}
)
return super().form_valid(formset)
class SelfQuestionView(BaseQuestionView):
""" Displays the ``form`` which eventually saves all the answers in the
database after necessary validation. The function ``return_questions``
yields a list of questions which is present as a json file present in
``interactions/static/data/self_questions.json``. After a test is
successfully attempted, a ``request.session['self_ans_gp']`` is set
to the Test ID of the newly created test. This can be used for
notification purposes on the results page. """
template_name = 'interactions/questions.html'
answer_group_model = 'SelfAnswerGroup'
class RelationQuestionView(BaseQuestionView):
""" Same as ``SelfQuestionView`` with the difference that the questions
sent are loaded from ``interactions/static/data/relation_questions.json``.
A ``request.session['rel_ans_gp']`` set in this case. """
template_name = 'interactions/questions.html'
answer_group_model = 'RelationAnswerGroup'
def get_success_url(self):
primary_key = self.request.session['rel_ans_gp']
email = send_relation_email(primary_key, self.request)
return super().get_success_url()
class ReferralView(CustomLoginRequiredMixin, FormView):
form_class = ReferralCodeForm
template_name = 'interactions/howto_referral.html'
def get_initial(self):
initial = super().get_initial()
return initial
def form_valid(self, form):
profile_pk, against = form.get_form_contents()
self.success_url = reverse_lazy(
'interactions:taketest-relations',
kwargs={'profile_pk': profile_pk, 'against': against}
)
return super().form_valid(form)
@login_required
def howto_relations_view(request):
if request.method == 'POST':
form = RelationSelectorForm(request.POST)
if form.is_valid():
queryset = find_similar_usernames(form, request)
answer_groups_counts = find_answer_groups_counts(queryset)
context = {
'form': form,
'queryset': list(zip(queryset, answer_groups_counts))
}
if not queryset:
messages.info(request, 'No such profile exists')
return render(request, 'interactions/howto_relations.html',
context)
if len(queryset) > 1:
messages.info(
request, 'There are multiple profiles with that username')
return render(request, 'interactions/howto_relations.html',
context)
if len(queryset) == 1:
messages.success(request, 'The requested profile was found!')
return render(request, 'interactions/howto_relations.html',
context)
else:
messages.info(request, 'Please correct the errors below ')
else:
form = RelationSelectorForm(request.GET or None)
return render(request, 'interactions/howto_relations.html', {'form': form})
| StarcoderdataPython |
8004927 | <gh_stars>0
import math
from .ffx import Feistel_cipher
from Crypto.Cipher import AES
class FF3(Feistel_cipher):
#Implements the NIST standarddised round function FF3
#Following the NIST example
#https://nvlpubs.nist.gov/nistpubs/specialpublications/nist.sp.800-38g.pdf
#https://csrc.nist.gov/CSRC/media/Projects/Cryptographic-Standards-and-Guidelines/documents/examples/FF3samples.pdf
tweak = bytes.fromhex('0000000000000000')
def __init__(self, key, radix):
super().__init__(key, radix)
key = bytes(reversed(list(key)))
self.cipherECB = AES.new(key, AES.MODE_ECB)
@staticmethod
def byte_xor(v1, v2):
return bytes([_a ^ _b for _a, _b in zip(v1, v2)])
@staticmethod
def reverse_mask_64(by):
x = int.from_bytes(by, 'big')
x = ((x & 0x5555555555555555) << 1) | ((x & 0xAAAAAAAAAAAAAAAA) >> 1)
x = ((x & 0x3333333333333333) << 2) | ((x & 0xCCCCCCCCCCCCCCCC) >> 2)
x = ((x & 0x0F0F0F0F0F0F0F0F) << 4) | ((x & 0xF0F0F0F0F0F0F0F0) >> 4)
x = ((x & 0x00FF00FF00FF00FF) << 8) | ((x & 0xFF00FF00FF00FF00) >> 8)
x = ((x & 0x0000FFFF0000FFFF) << 16) | ((x & 0xFFFF0000FFFF0000) >> 16)
x = ((x & 0x00000000FFFFFFFF) << 32) | ((x & 0xFFFFFFFF00000000) >> 32)
return x.to_bytes(8, 'big')
@staticmethod
def reverse(by):
by = list(by)
by.reverse()
return bytes(by)
def _numRaxixX(self, X, reverse=False):
x = 0
if reverse:
for i in range(len(X)):
x = x*self.radix + X[len(X)-i-1]
else:
for i in range(len(X)):
x = x*self.radix + X[i]
return x
def splitN(self, n, length):
ret = list()
for _ in range(length):
ret.append(n % self.radix)
n = n//self.radix
ret.reverse()
return ret
def split(self, v):
#Splits the list into two lists split at floor(len(v)/2)
#FF1 uses floor, FF3 uses celing
s = int((len(v)+1) //2)
return v[:s], v[s:]
def setP(self, W, i, B):
p = [0]*16
p[0:4] = list(W)
p[3] = p[3] ^ i
numRadixB = self._numRaxixX(B, reverse=True)
p[4:16] = list(numRadixB.to_bytes(12, byteorder='big'))
return p
def setS(self, p):
#int.from_bytes(bytes, byteorder='little')
return bytes(reversed(list(self.cipherECB.encrypt(bytes(reversed(list(p)))))))
def encrypt(self, v, tweak = None):
if not tweak:
tweak = self.tweak
if len(tweak) != 8:
raise ValueError('Tweak not 64-bit')
T = [tweak[0:4], tweak[4:8]]
A, B = self.split(v)
#print (A, B, T[0].hex(), T[1].hex())
for i in range(8):
#print(f'Round {i}')
P = self.setP(T[(i+1)%2], i, B)
S = self.setS(bytes(P))
#print(P, S.hex())
y = int.from_bytes(bytes(S), byteorder='big')
m = (len(v)+(i+1)%2)//2
c = (self._numRaxixX(A, True) + y)%(self.radix ** m)
C = self.splitN(c, m)
C.reverse()
#print (y, c, C)
A, B = B, C
#print('A B', A, B)
return A + B
def decrypt(self, v, tweak = []):
if not tweak:
tweak = self.tweak
if len(tweak) != 8:
raise ValueError('Tweak not 64-bit')
T = [tweak[0:4], tweak[4:8]]
A, B = self.split(v)
#print (A, B, T[0].hex(), T[1].hex())
for i in range(7,-1,-1):
#print(f'Round {i}')
P = self.setP(T[(i+1)%2], i, A)
S = self.setS(bytes(P))
#print(P, S.hex())
y = int.from_bytes(bytes(S), byteorder='big')
m = (len(v)+(i+1)%2)//2
c = (self._numRaxixX(B, True) - y)%(self.radix ** m)
C = self.splitN(c, m)
C.reverse()
#print (y, c, C)
A, B = C, A
#print('A B', A, B)
return A + B
def add(self, a, b):
raise NotImplementedError()
def sub(self, a, b):
raise NotImplementedError()
def round(self, i, s, msg_length, tweak):
#Subclasses must implement the round function and handle keys
#i is the round number, s is the round input string, msg_length is the
#length of the message, tweak is the tweak
raise NotImplementedError()
| StarcoderdataPython |
5176106 | <filename>blog/views.py
from django.shortcuts import render, get_object_or_404
from blog.models import Category, Article
from django.core.paginator import Paginator
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required(login_url="login")
def list(request):
# Sacar artículos
articles = Article.objects.all()
# Paginar los artículos
paginator = Paginator(articles, 3)
# Recoger número página
page = request.GET.get('page')
page_articles = paginator.get_page(page)
return render(request, 'articles/list.html', {
'title': 'Artículos',
'articles': page_articles
})
@login_required(login_url="login")
def category(request, category_id):
category = get_object_or_404(Category, id=category_id)
#articles = Article.objects.filter(categories=category)
return render(request, 'categories/category.html', {
'category': category,
#'articles': articles
})
@login_required(login_url="login")
def article(request, article_id):
article = get_object_or_404(Article, id = article_id)
return render(request, 'articles/detail.html', {
'article': article
}) | StarcoderdataPython |
3327731 | <reponame>mmphego/misc-py-utils
import pathlib
from collections import OrderedDict,defaultdict, namedtuple
from functools import partial, wraps
from pprint import PrettyPrinter as pprint
pprint = PrettyPrinter(indent=4).pprint
def iterate_recursive_dict(dictionary, keys=()):
"""
Generator; walk through a recursive dict structure
yields the compound key and value of each leaf.
:example:
>>> eg = {
'key_1': 'value_1',
'key_2': {'key_21': 21,
'key_22': {'key_221': 221, 'key_222': 222}}}
>>> for compound_key, val in iterate_recursive_dict(eg):
print '{}: {}'.format(compound_key, val)
# should produce output:
::
('key_1', ): value_1
('key_2', 'key_21'): 21
('key_2', 'key_22', 'key_221'): 221
('key_2', 'key_22', 'key_222'): 222
"""
if isinstance(dictionary, dict):
for k in dictionary:
for rv in iterate_recursive_dict(dictionary[k], keys + (k,)):
yield rv
else:
yield (keys, dictionary)
class DictToNamespace(object):
"""
Convert `nested` dictionary into namespace
:type _dict: dictionary
:example:
>>> a_dict = {"x":1, "y": "foo", "z": "bar"}
>>> DictClass = DictToNamespace(a_dict)
>>> DictClass.x
1
>>> DictClass.y
'foo'
>>> DictClass.z
'bar'
>>> DictClass.z.__class__
str
# Works even with nested dicts
>>> d = {'a': 1, 'b': {'c': 2}, 'd': ["hi", {'foo': "bar"}]}
>>> DictClass = DictToNamespace(d)
>>> DictClass.b.c
2
>>> DictClass.d[1].foo
'bar'
"""
def __init__(self, _dict):
for key, value in _dict.items():
if isinstance(value, (list, tuple)):
setattr(
self,
key,
[DictObject(x) if isinstance(x, dict) else x for x in value],
)
else:
setattr(
self, key, DictObject(value) if isinstance(value, dict) else value
)
class Structure:
"""
Base class for a data structure class
NB: No support for **kwags
:type _fields: list
:example:
>>> class Address(Structure):
... _fields = ["hostname", "port"]
>>> class Point(Structure):
.... _fields = ["x", "y"]
>>> A = Address("localhost", 8080)
>>> A.hostname
'localhost'
>>> A.port
8080
"""
_fields = []
def __init__(self, *args):
for name, val in zip(self._fields, args):
setattr(self, name, val)
def list_flatten(lst):
"""
The flatten function here turns the list into a string, takes out all of the square brackets,
attaches square brackets back onto the ends, and turns it back into a list.
:type lst: list
:example:
"""
return eval("[" + str(lst).replace("[", "").replace("]", "") + "]")
def dict_search_value(d, kname):
"""Search for a key inside a dictionary, visiting it in a recursive way
and return the value of this key.
d: dictionary to search into
kname: key name to search for
"""
if type(d) == dict:
for k, v in d.items():
if type(v) == dict:
value = dict_search_value(v, kname)
if value:
return value
if type(v) == list:
if len(v) > 0:
for element in v:
if type(element) == dict:
value = dict_search_value(element, kname)
if value:
return value
if k == kname:
return v
def compare_assert_dicts(self, fields, dict_1, dict_2):
"""Given a list of fields/keys and two dictionaries, it searches for each key in both
dictionaries and assert as equal the found value.
fields: a list of fields/keys to verify
dict_1: first dictionary
dict_2: second dictionary
"""
for f in fields:
value_dict1 = dict_search_value(dict_1, f)
value_dict2 = dict_search_value(dict_2, f)
if type(value_dict1) is list:
value_dict1 = list(value_dict1).sort()
if type(value_dict2) is list:
value_dict2 = list(value_dict2).sort()
self.assertEqual(
value_dict1,
value_dict2,
"Returned value: %s, expected value: %s"
% (str(value_dict1), str(value_dict2)),
)
def config_to_dict(
config_file: str, preserve_case: bool = True, pretty_print: bool = False
) -> dict:
"""
Converts a ConfigParser object into a dictionary.
The resulting dictionary has sections as keys which point to a dict of the
sections options as key => value pairs.
Example:
$ cat config.ini
[SectionOne]
Status: Single
Name: Derek
Value: Yes
Age: 30
Single: True
[SectionTwo]
FavoriteColor = Green
[SectionThree]
FamilyName: Johnson
[Others]
Route: 66
$ python
>>> from py_utils import config_to_dict
>>> config_file = "config.ini"
>>> my_config = config_to_dict(config_file, pretty_print=True)
{ 'Others': {'Route': '66'},
'SectionOne': { 'Age': '30',
'Name': 'Derek',
'Single': 'True',
'Status': 'Single',
'Value': 'Yes'},
'SectionThree': {'FamilyName': 'Johnson'},
'SectionTwo': {'FavoriteColor': 'Green'}}
"""
import configparser
parser = configparser.ConfigParser()
if not pathlib.Path(config_file).exists():
raise FileNotFoundError("Config file not parsed!")
if preserve_case:
parser.optionxform = lambda option: option
parser.read(config_file)
config_dict = {
section: dict(parser.items(section)) for section in parser.sections()
}
if pretty_print:
pprint(config_dict)
return config_dict
| StarcoderdataPython |
6434936 | <reponame>MadManSC2/sharpy-sc2
import os
import shutil
import subprocess
import argparse
import zipfile
from typing import Tuple, List, Optional
from version import update_version_txt
root_dir = os.path.dirname(os.path.abspath(__file__))
# Files or folders common to all bots.
common = [
("jsonpickle", None),
(os.path.join("python-sc2", "sc2"), "sc2"),
("sc2pathlibp", None),
("requirements.txt", None),
("version.txt", None),
("config.py", None),
("config.ini", None),
("ladder.py", None),
("ladderbots.json", None),
]
json = """{
"Bots": {
"[NAME]": {
"Race": "[RACE]",
"Type": "Python",
"RootPath": "./",
"FileName": "run.py",
"Debug": false
}
}
}"""
json_exe = """{
"Bots": {
"[NAME]": {
"Race": "Protoss",
"Type": "cppwin32",
"RootPath": "./",
"FileName": "[NAME].exe",
"Debug": false
}
}
}"""
class LadderZip:
archive: str
files: List[Tuple[str, Optional[str]]]
def __init__(self, archive_name: str, race: str,
files: List[Tuple[str, Optional[str]]],
common_files: List[Tuple[str, Optional[str]]] = None):
self.name = archive_name
self.race = race
self.archive = archive_name + ".zip"
self.files = files
if common_files:
self.files.extend(common_files)
else:
self.files.extend(common)
# Executable
# --specpath /opt/bk/spec --distpath /opt/bk/dist --workpath /opt/bk/build
self.pyinstaller = 'pyinstaller -y --add-data "[FOLDER]/sc2pathlibp' \
'";"sc2pathlibp/" --add-data "[FOLDER]/sc2";"sc2/" ' \
'--add-data "[FOLDER]/config.ini";"." --add-data ' \
'"[FOLDER]/version.txt";"." ' \
'"[FOLDER]/run.py" ' \
'-n "[NAME]" ' \
'--distpath "[OUTPUTFOLDER]"'
def create_json(self):
return json.replace("[NAME]", self.name).replace("[RACE]", self.race)
def create_bin_json(self):
return json_exe.replace("[NAME]", self.name).replace("[RACE]", self.race)
def pre_zip(self):
""" Override this as needed, actions to do before creating the zip"""
pass
def post_zip(self):
""" Override this as needed, actions to do after creating the zip"""
pass
def package_executable(self, output_dir: str):
zip_name = f'{self.name}_bin.zip'
print()
print("unzip")
zip_path = os.path.join(output_dir, self.archive)
source_path = os.path.join(output_dir, self.name + "_source")
bin_path = os.path.join(output_dir, self.name)
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
zip_ref.extractall(source_path)
print("run pyinstaller")
self.pyinstaller = self.pyinstaller.replace('[FOLDER]', source_path)\
.replace('[OUTPUTFOLDER]', bin_path)\
.replace("[NAME]", self.name)
print(self.pyinstaller)
subprocess.run(self.pyinstaller)
# Reset bin path as pyinstaller likes to make a new run folder
run_path = os.path.join(bin_path, self.name)
# remove PIL and cv2
print("removing PIL and cv2")
shutil.rmtree(os.path.join(run_path, "cv2"))
shutil.rmtree(os.path.join(run_path, "PIL"))
# Create new ladderbots.json
f = open(os.path.join(run_path, "ladderbots.json"), "w+")
f.write(self.create_bin_json())
f.close()
print("Zip executable version")
zipf = zipfile.ZipFile(os.path.join(output_dir, zip_name), 'w', zipfile.ZIP_DEFLATED)
zipdir(run_path, zipf, run_path)
zipf.close()
shutil.rmtree(bin_path)
shutil.rmtree(source_path)
class DummyZip(LadderZip):
def __init__(self, archive_name: str, race: str, file: str, build: str = None):
self.dummy_file = file
self.new_dummy_file = os.path.join(root_dir, "dummy", "dummy.py")
self.build = build
files = [
("sharpy", None),
("dummy", None),
(os.path.join("dummies", "run.py"), "run.py"),
]
super().__init__(archive_name, race, files)
def pre_zip(self):
if self.build:
with open("config.ini", 'a', newline='\n') as handle:
handle.writelines([self.build, ""])
shutil.copy(self.dummy_file, self.new_dummy_file)
def post_zip(self):
if self.build:
with open("config.ini", "r") as f:
lines = f.readlines()
with open("config.ini", "w") as f:
for line in lines:
if self.build not in line:
f.write(line)
os.remove(self.new_dummy_file)
zip_types = {
# Protoss dummies
"zealot": DummyZip("SharpKnives", "Protoss", os.path.join("dummies", "protoss", "proxy_zealot_rush.py")),
"cannonrush_all": DummyZip("SharpCannonAll", "Protoss", os.path.join("dummies", "protoss", "cannon_rush.py")),
"cannonrush_1": DummyZip("SharpCannonRush", "Protoss", os.path.join("dummies", "protoss", "cannon_rush.py"), "cannon_rush = 0"),
"cannonrush_2": DummyZip("SharpCannonContain", "Protoss", os.path.join("dummies", "protoss", "cannon_rush.py"), "cannon_rush = 1"),
"cannonrush_3": DummyZip("SharpCannonExpand", "Protoss", os.path.join("dummies", "protoss", "cannon_rush.py"), "cannon_rush = 2"),
"dt": DummyZip("SharpShadows", "Protoss", os.path.join("dummies", "protoss", "dark_templar_rush.py")),
"4gate": DummyZip("SharpRush", "Protoss", os.path.join("dummies", "protoss", "gate4.py")),
"stalker": DummyZip("SharpSpiders", "Protoss", os.path.join("dummies", "protoss", "macro_stalkers.py")),
"madai": DummyZip("MadAI", "Protoss", os.path.join("dummies", "protoss", "MadAI.py")),
"robo": DummyZip("SharpRobots", "Protoss", os.path.join("dummies", "protoss", "robo.py")),
"voidray": DummyZip("SharpRays", "Protoss", os.path.join("dummies", "protoss", "voidray.py")),
# Terran dummies
"cyclone": DummyZip("RustyLocks", "Terran", os.path.join("dummies", "terran", "cyclones.py")),
"oldrusty": DummyZip("OldRusty", "Terran", os.path.join("dummies", "terran", "rusty.py")),
"bc": DummyZip("FlyingRust", "Terran", os.path.join("dummies", "terran", "battle_cruisers.py")),
"marine_all": DummyZip("RustyMarinesAll", "Terran", os.path.join("dummies", "terran", "marine_rush.py")),
"marine_1": DummyZip("RustyMarines1", "Terran", os.path.join("dummies", "terran", "marine_rush.py"), "marine = 0"),
"marine_2": DummyZip("RustyMarines2", "Terran", os.path.join("dummies", "terran", "marine_rush.py"), "marine = 1"),
"marine_3": DummyZip("RustyMarines3", "Terran", os.path.join("dummies", "terran", "marine_rush.py"), "marine = 2"),
"tank": DummyZip("RustyTanks", "Terran", os.path.join("dummies", "terran", "two_base_tanks.py")),
"bio": DummyZip("RustyInfantry", "Terran", os.path.join("dummies", "terran", "bio.py")),
"banshee": DummyZip("RustyScreams", "Terran", os.path.join("dummies", "terran", "banshees.py")),
# Zerg dummies
"lings": DummyZip("BluntTeeth", "Zerg", os.path.join("dummies", "zerg", "lings.py")),
"200roach": DummyZip("BluntRoach", "Zerg", os.path.join("dummies", "zerg", "macro_roach.py")),
"macro": DummyZip("BluntMacro", "Zerg", os.path.join("dummies", "zerg", "macro_zerg_v2.py")),
"mutalisk": DummyZip("BluntFlies", "Zerg", os.path.join("dummies", "zerg", "mutalisk.py")),
"hydra": DummyZip("BluntSpit", "Zerg", os.path.join("dummies", "zerg", "roach_hydra.py")),
# "spine": DummyZip("BluntDefender", "Zerg", os.path.join("dummies", "debug", "spine_defender.py")),
"12pool": DummyZip("BluntCheese", "Zerg", os.path.join("dummies", "zerg", "twelve_pool.py")),
"workerrush": DummyZip("BluntyWorkers", "Zerg", os.path.join("dummies", "zerg", "worker_rush.py")),
# All
"all": None
}
def zipdir(path: str, ziph: zipfile.ZipFile, remove_path: Optional[str] = None):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
if "__pycache__" not in root:
path_to_file = os.path.join(root, file)
if remove_path:
ziph.write(path_to_file, path_to_file.replace(remove_path, ""))
else:
ziph.write(path_to_file)
def create_ladder_zip(archive_zip: LadderZip, exe: bool):
update_version_txt()
print()
archive_name = archive_zip.archive
bot_specific_paths = archive_zip.files
# Remove previous archive
if os.path.isfile(archive_name):
print(f"Deleting {archive_name}")
os.remove(archive_name)
files_to_zip = []
directories_to_zip = []
files_to_delete = []
f = open("ladderbots.json", "w+")
f.write(archive_zip.create_json())
f.close()
archive_zip.pre_zip()
for src, dest in bot_specific_paths:
if not os.path.exists(src):
raise ValueError(f"'{src}' does not exist.")
if dest is None:
# the file or folder can be used as is.
if os.path.isdir(src):
directories_to_zip.append(src)
else:
files_to_zip.append(src)
else: # need to move the file or folder.
if os.path.isdir(src):
shutil.copytree(src, dest)
directories_to_zip.append(dest)
files_to_delete.append(dest)
else: # src is a file.
src_file = os.path.basename(src)
if os.path.isdir(dest):
# Join directory with filename
dest_path = os.path.join(dest, src_file)
else:
# Rename into another file
dest_path = dest
files_to_zip.append(dest_path)
print(f"Copying {src} ... {dest_path}")
files_to_delete.append(dest_path)
shutil.copy(src, dest_path)
print()
print(f"Zipping {archive_name}")
zipf = zipfile.ZipFile(archive_name, 'w', zipfile.ZIP_DEFLATED)
for file in files_to_zip:
zipf.write(file)
for directory in directories_to_zip:
zipdir(directory, zipf)
zipf.close()
print()
for file in files_to_delete:
if os.path.isdir(file):
print(f"Deleting directory {file}")
# os.rmdir(file)
shutil.rmtree(file)
else:
print(f"Deleting file {file}")
os.remove(file)
os.remove("ladderbots.json")
if not os.path.exists('publish'):
os.mkdir('publish')
shutil.move(archive_name, os.path.join("publish", archive_name))
archive_zip.post_zip()
print(f"\nSuccessfully created {os.path.join('publish', archive_name)}")
if exe:
archive_zip.package_executable("publish")
def get_archive(bot_name: str) -> LadderZip:
bot_name = bot_name.lower()
return zip_types.get(bot_name)
def main():
zip_keys = list(zip_types.keys())
parser = argparse.ArgumentParser(
description="Create a Ladder Manager ready zip archive for SC2 AI, AI Arena, Probots, ..."
)
parser.add_argument("-n", "--name", help=f"Bot name: {zip_keys}.")
parser.add_argument("-e", "--exe", help="Also make executable (Requires pyinstaller)", action="store_true")
args = parser.parse_args()
bot_name = args.name
if not os.path.exists('dummy'):
os.mkdir('dummy')
if bot_name == "all" or not bot_name:
zip_keys.remove("all")
for key in zip_keys:
create_ladder_zip(get_archive(key), args.exe)
else:
if bot_name not in zip_keys:
raise ValueError(f'Unknown bot: {bot_name}, allowed values are: {zip_keys}')
create_ladder_zip(get_archive(bot_name), args.exe)
if __name__ == "__main__":
main()
| StarcoderdataPython |
300843 | <filename>Variado_GeekUniversity/guppe/doctests.py
"""
Doctests
Doctests são testes que colocamos na docstring das funções/métodos Python.
def soma(a, b):
# soma os números a e b
#>>> soma(1, 2)
#3
#>>> soma(4, 6)
#10
#
return a + b
Para rodar um test do doctest:
python -m doctest -v nome_do_mobulo.py
# Saída
Trying:
soma(1, 2)
Expecting:
3
ok
1 items had no tests:
doctests
1 items passed all tests:
1 tests in doctests.soma
1 tests in 2 items.
1 passed and 0 failed.
Test passed.
# Outro Exemplo, Aplicando o TDD
def duplicar(valores):
#duplica os valores em uma lista
#>>> duplicar([1, 2, 3, 4])
#[2, 4, 6, 8]
#>>> duplicar([])
#[]
#>>> duplicar(['a', 'b', 'c'])
#['aa', 'bb', 'cc']
#>>> duplicar([True, None])
#Traceback (most recent call last):
# ...
#TypeError: unsupported operand type(s) for *: 'int' and 'NoneType'
#
#return [2 * elemento for elemento in valores]
# Erro inesperado...
OBS: Dentro do doctest, o Python não reconhece string com aspas duplas. Precisa ser aspas simples.
def fala_oi():
#Fala oi
#>>> fala_oi()
#'oi'
#
#return "oi"
"""
# Um último caso estranho...
def verdade():
"""Retorna verdade
>>> verdade()
True
"""
return True
| StarcoderdataPython |
9716380 | #!/home/daniel/anaconda3/bin/python
# -*- coding: utf-8 -*-
"""
================================================
main_process_trt_data.py
================================================
This program processes TRT data obtaining plots of time series of
parameters over the TRT cell trajectory
"""
# Author: fvj
# License: BSD 3 clause
import datetime
import argparse
import atexit
import glob
import os
from shutil import copy
from warnings import warn
import numpy as np
from pyrad.io import read_trt_traj_data, write_trt_cell_scores
from pyrad.io import write_trt_cell_lightning
from pyrad.util import belongs_roi_indices
from pyrad.graph import plot_timeseries, plot_scatter_comp, plot_pos
print(__doc__)
def main():
"""
"""
# parse the arguments
parser = argparse.ArgumentParser(
description='Entry to Pyrad processing framework')
# positional arguments
parser.add_argument(
'days', nargs='+', type=str,
help='Dates to process. Format YYYY-MM-DD')
# keyword arguments
parser.add_argument(
'--trtbase', type=str,
default='/store/msrad/radar/trt/',
help='name of folder containing the TRT cell data')
parser.add_argument(
'--lon', type=str,
default='8.9000010,9.2000000,9.4999970,9.4999970,8.9000010',
help=('longitude of the points defining the perimeter of the area ' +
'of interest'))
parser.add_argument(
'--lat', type=str,
default='47.0000030,47.0000030,47.0000030,47.5999930,47.5999930',
help=('latitude of the points defining the perimeter of the area ' +
'of interest'))
args = parser.parse_args()
print("====== TRT cell processing started: %s" %
datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
atexit.register(_print_end_msg,
"====== comparison finished: ")
time_dir_list = args.days
lons = args.lon.split(',')
lats = args.lat.split(',')
if np.size(lons) != np.size(lats):
warn(
str(np.size(lons))+' longitudes but '+str(np.size(lats)) +
' latitudes. Their number must be equal')
return
lon_list = []
lat_list = []
for i, lon in enumerate(lons):
lon_list.append(float(lon))
lat_list.append(float(lats[i]))
roi = {
'lon': lon_list,
'lat': lat_list
}
# List for collection of max data
cell_ID_max_list = []
nflashes_max_list = []
area_flash_max_list = []
flash_density_max_list = []
time_flash_density_max_list = []
flash_density_max_rank_list = []
rank_max_list = []
time_rank_max_list = []
# List for collection of flashes data
cell_ID_list = np.ma.asarray([], dtype=int)
time_list = np.ma.asarray([], dtype=datetime.datetime)
lon_list = np.ma.asarray([], dtype=float)
lat_list = np.ma.asarray([], dtype=float)
flash_density_list = np.ma.asarray([], dtype=float)
rank_flash_density_list = np.ma.asarray([], dtype=float)
area_list = np.ma.asarray([], dtype=float)
nflash_list = np.ma.asarray([], dtype=int)
for i, time_dir in enumerate(time_dir_list):
data_input_path = args.trtbase+time_dir+'/TRTC_cell/'
data_output_base = args.trtbase+time_dir+'/TRTC_cell_plots/'
flist = glob.glob(data_input_path+'*.trt')
for fname in flist:
print('Reading TRT trajectory file '+fname)
(traj_ID, yyyymmddHHMM, lon, lat, _, _, _, area, vel_x, vel_y,
det, RANKr, CG_n, CG_p, CG, _, ET45, ET45m, ET15, ET15m, VIL,
maxH, maxHm, POH, _, _, _, _) = read_trt_traj_data(fname)
inds, is_roi = belongs_roi_indices(lat, lon, roi)
if is_roi == 'None':
continue
elif is_roi == 'Some' and len(lat[inds]) < 3:
continue
data_output_path = data_output_base+is_roi+'/'
if not os.path.isdir(data_output_path):
os.makedirs(data_output_path)
# copy file
copy(fname, data_output_path)
# general caracteristics
flash_density = CG/area
cell_ID_max_list.append(traj_ID[0])
flash_density_max_list.append(np.max(flash_density))
nflashes_max_list.append(CG[np.argmax(flash_density)])
area_flash_max_list.append(area[np.argmax(flash_density)])
time_flash_density_max_list.append(
yyyymmddHHMM[np.argmax(flash_density)])
flash_density_max_rank_list.append(
RANKr[np.argmax(flash_density)])
rank_max_list.append(np.max(RANKr))
time_rank_max_list.append(yyyymmddHHMM[np.argmax(RANKr)])
cell_ID_list = np.append(cell_ID_list, traj_ID)
time_list = np.append(time_list, yyyymmddHHMM)
lon_list = np.append(lon_list, lon)
lat_list = np.append(lat_list, lat)
flash_density_list = np.append(flash_density_list, flash_density)
rank_flash_density_list = np.append(
rank_flash_density_list, RANKr)
area_list = np.append(area_list, area)
nflash_list = np.append(nflash_list, CG)
# Time series plots
figfname = data_output_path+str(traj_ID[0])+'_flash_density.png'
plot_timeseries(
yyyymmddHHMM, [flash_density], [figfname], labelx='Time UTC',
labely='Flash density [flashes/km2]',
title=str(traj_ID[0])+' flash density')
figfname = data_output_path+str(traj_ID[0])+'_area.png'
plot_timeseries(
yyyymmddHHMM, [area], [figfname], labelx='Time UTC',
labely='area [km2]', title=str(traj_ID[0])+' cell area')
figfname = data_output_path+str(traj_ID[0])+'_vel.png'
plot_timeseries(
yyyymmddHHMM, [vel_x, vel_y], [figfname], labelx='Time UTC',
labely='Velocity [km/h]', labels=['x speed', 'y speed'],
title=str(traj_ID[0])+' cell velocity')
figfname = data_output_path+str(traj_ID[0])+'_det.png'
plot_timeseries(
yyyymmddHHMM, [det], [figfname], labelx='Time UTC',
labely='Detection threshold [dBZ]',
title=str(traj_ID[0])+' cell detection threshold')
figfname = data_output_path+str(traj_ID[0])+'_rank.png'
plot_timeseries(
yyyymmddHHMM, [RANKr], [figfname], labelx='Time UTC',
labely='Rank [-]', title=str(traj_ID[0])+' cell rank')
figfname = data_output_path+str(traj_ID[0])+'_lightning.png'
plot_timeseries(
yyyymmddHHMM, [CG_n, CG_p, CG], [figfname], labelx='Time UTC',
labely='N flash [-]', labels=['CG-', 'CG+', 'CG'],
title=str(traj_ID[0])+' flashes in cell')
figfname = data_output_path+str(traj_ID[0])+'_ET.png'
plot_timeseries(
yyyymmddHHMM, [ET45, ET45m, ET15, ET15m], [figfname],
labelx='Time UTC', labely='Echo Top [km]',
labels=['ET45', 'ET45m', 'ET15', 'ET15m'],
title=str(traj_ID[0])+' Echo top')
figfname = data_output_path+str(traj_ID[0])+'_VIL.png'
plot_timeseries(
yyyymmddHHMM, [VIL], [figfname], labelx='Time UTC',
labely='VIL [Kg/m2]', labels=['VIL'],
title=str(traj_ID[0])+' VIL')
figfname = data_output_path+str(traj_ID[0])+'_maxH.png'
plot_timeseries(
yyyymmddHHMM, [maxH, maxHm], [figfname], labelx='Time UTC',
labely='Max. Echo Height [Km]', labels=['maxH', 'maxHm'],
title=str(traj_ID[0])+' Height of Max. Reflectivity')
figfname = data_output_path+str(traj_ID[0])+'_POH.png'
plot_timeseries(
yyyymmddHHMM, [POH], [figfname], labelx='Time UTC',
labely='POH [%]', labels=['POH'],
title=str(traj_ID[0])+' Probability of Hail')
# plot position
# get time since start of cell in s
td_vec = yyyymmddHHMM-yyyymmddHHMM[0]
tt_s = np.empty(td_vec.size, dtype=float)
for j, td in enumerate(td_vec):
tt_s[j] = td.total_seconds()
cb_label = (
'Time since '+yyyymmddHHMM[0].strftime('%Y-%m-%d %H:%M') +
' [s]')
figfname = data_output_path+str(traj_ID[0])+'_pos.png'
figfname = plot_pos(
lat, lon, tt_s, [figfname], cb_label=cb_label,
titl=str(traj_ID[0])+' Cell Position')
print('Plotted '+' '.join(figfname))
fname = args.trtbase+'Santis_cell_scores.csv'
write_trt_cell_scores(
cell_ID_max_list, time_flash_density_max_list,
flash_density_max_rank_list, nflashes_max_list, area_flash_max_list,
flash_density_max_list, time_rank_max_list, rank_max_list, fname)
fname = args.trtbase+'Santis_cell_euclid_lightning.csv'
write_trt_cell_lightning(
cell_ID_list, time_list, lon_list, lat_list, area_list,
rank_flash_density_list, nflash_list, flash_density_list, fname)
plot_scatter_comp(
flash_density_list, rank_flash_density_list/10.,
[args.trtbase+'hist_flash_density_rank'],
labelx='flash density [flashes/km2]', labely='rank',
titl='Flash density vs Rank', axis=None, metadata=None, dpi=72)
def _print_end_msg(text):
"""
prints end message
Parameters
----------
text : str
the text to be printed
Returns
-------
Nothing
"""
print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
# ---------------------------------------------------------
# Start main:
# ---------------------------------------------------------
if __name__ == "__main__":
main()
| StarcoderdataPython |
1931968 | # -*- coding: utf-8 -*-
"""
Defines the typeset class and a number of builtin type sets.
"""
from __future__ import print_function, division, absolute_import
import collections
from functools import reduce
from itertools import starmap
from itertools import izip
from numba import typesystem
from numba.typesystem import types
__all__ = [ 'typeset', 'numeric', 'integral', 'floating', 'complextypes' ]
#----------------------------------------------------------------------------
# Signature matching
#----------------------------------------------------------------------------
def _match_argtype(type1, type2):
return (type1.is_typeset and type2 in type1.types) or type1 == type2
def _build_position_table(signature):
table = collections.defaultdict(list)
for i, argtype in enumerate(signature.args):
if argtype.is_typeset:
table[argtype].append(i)
return table
def get_effective_argtypes(promote, signature, argtypes):
"""
Get promoted argtypes for typeset arguments, e.g.
signature = floating(floating, floating)
argtypes = [float, double]
=>
[double, double]
"""
args = list(argtypes)
position_table = _build_position_table(signature)
promotion_table = {}
for poslist in position_table.values():
if len(poslist) > 1:
# Find all argument types corresponding to a type set
types = [args[i] for i in poslist]
# Promote corresponding argument types
result_type = reduce(promote, types)
# Update promotion table
type_set = signature.args[poslist[-1]]
promotion_table[type_set] = result_type
# Build coherent argument type list
for i in poslist:
args[i] = result_type
return promotion_table, args
def match(promote, signature, argtypes):
"""
See whether a specialization matches the given function signature.
"""
if len(signature.args) == len(argtypes):
promotion_table, args = get_effective_argtypes(
promote, signature, argtypes)
if all(starmap(_match_argtype, izip(signature.args, args))):
restype = signature.return_type
restype = promotion_table.get(restype, restype)
return restype(*args)
return None
#----------------------------------------------------------------------------
# Type sets
#----------------------------------------------------------------------------
class typeset(types.NumbaType):
"""
Holds a set of types that can be used to specify signatures for
type inference.
"""
typename = "typeset"
argnames = ["types", "name"]
defaults = {"name": None}
flags = ["object"]
def __init__(self, types, name):
super(typeset, self).__init__(frozenset(types), name)
self.first_type = types[0]
self._from_argtypes = {}
for type in types:
if type.is_function:
self._from_argtypes[type.args] = type
def find_match(self, promote, argtypes):
argtypes = tuple(argtypes)
if argtypes in self._from_argtypes:
return self._from_argtypes[argtypes]
for type in self.types:
signature = match(promote, type, argtypes)
if signature:
return signature
return None
def __iter__(self):
return iter(self.types)
def __repr__(self):
return "typeset(%s, ...)" % (self.first_type,)
def __hash__(self):
return hash(id(self))
numeric = typeset(typesystem.numeric)
integral = typeset(typesystem.integral)
floating = typeset(typesystem.floating)
complextypes = typeset(typesystem.complextypes)
| StarcoderdataPython |
11311703 | <filename>browseragents/core.py
import useragentdata as data
from random import choice
def random(browser=None, os=None):
agents = data.agents
if browser and os:
agents = [x for x in agents if browser.lower(
) in x['browserName'].lower() if os.lower() in x['osName'].lower()]
elif browser:
agents = [
x for x in agents if browser.lower() in x['browserName'].lower()]
elif os:
agents = [x for x in agents if os.lower() in x['osName'].lower()]
return choice(agents)['userAgent']
| StarcoderdataPython |
3464397 | <gh_stars>0
from logging import getLogger
import anndata
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__) | StarcoderdataPython |
387002 | import os
import sys
from typing import List
from .model import ExecutedChecksResultList
from .runner import Runner
from .repository import Repository
from .config import ConfigLoader
from .scheduler import Scheduler
from .versioning import get_version
from rkd.api.inputoutput import IO
class Controller(object):
"""
Constructs application context and passes actions to given services that are taking care about the processing
"""
project_dirs: list
runner: Runner
repository: Repository
config_loader: ConfigLoader
io: IO
def __init__(self, project_dir: str, server_port: int, server_path_prefix: str,
db_path: str, wait_time: int, timeout: int, log_level: str):
self.io = IO()
self.io.set_log_level(log_level)
self.project_dirs = self._combine_project_dirs(project_dir)
self.config_loader = ConfigLoader(self.project_dirs, self.io)
self.repository = Repository(self.project_dirs, db_path)
self.runner = Runner(dirs=self.project_dirs, config_loader=self.config_loader,
repository=self.repository, timeout=timeout, wait_time=wait_time, io=self.io)
self.scheduler = Scheduler(self.runner, self.repository, self.io)
def list_enabled_configs(self) -> List[str]:
return self.repository.get_configured_checks(with_disabled=False)
def list_available_checks(self) -> List[str]:
return self.repository.get_available_checks()
def list_all_configs(self) -> List[str]:
return self.repository.get_configured_checks(with_disabled=True)
def spawn_threaded_application(self, refresh_time: int) -> None:
"""
Spawns a background worker
"""
self.scheduler.schedule_jobs_in_background(every_seconds=refresh_time)
@staticmethod
def get_version() -> dict:
"""
Gets Infracheck version
"""
return {
"version": get_version(),
"python": sys.version
}
def retrieve_checks(self) -> ExecutedChecksResultList:
"""
Only retrieves results of last checking
"""
return self.runner.get_checks_results(self.list_enabled_configs())
def perform_checks(self) -> ExecutedChecksResultList:
"""
Runs and returns results synchronously
"""
configs = self.list_enabled_configs()
self.runner.run_checks(configs)
return self.runner.get_checks_results(configs)
@staticmethod
def _combine_project_dirs(project_dir: str) -> list:
paths = [
# directory specified by eg. the "--directory" commandline parameter
project_dir,
# standalone application running from cloned repository
os.path.dirname(os.path.realpath(__file__)) + '/../',
# official docker container
'/app',
'/data',
# current directory
os.getcwd(),
]
return list(filter(lambda path: os.path.isdir(path + '/configured'), paths))
| StarcoderdataPython |
3360202 | # Copyright 2016 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm import AlgorithmImpl
# Implements the "None" algorith, where every client gets whatever
# it asks for.
class NoneAlgorithm(AlgorithmImpl):
def __init__(self, algo, server_level):
self._get_default_parameters(algo)
def run_client(self, resource, cr):
self.create_lease(resource, cr, cr.wants)
def run_server(self, resource, sr):
self.create_lease(resource, sr, sum(w.wants for w in sr.wants))
| StarcoderdataPython |
5145622 | import logging
from .base_plugin import BasePlugin
logger = logging.getLogger(__name__)
class VolumePlotter(BasePlugin, plugin_name='volume_plot'):
def process(self, df, meta, other):
logger.critical(other[self.config['input']])
return df
@staticmethod
def lint(plugin_dict):
pass
| StarcoderdataPython |
3426400 | chicken_soup = [
"爱迪生说,天才是百分之一的勤奋加百分之九十九的汗水。",
"查尔斯·史说,一个人几乎可以在任何他怀有无限热忱的事情上成功。",
"培根说过,深窥自己的心,而后发觉一切的奇迹在你自己。",
"歌德曾经说,流水在碰到底处时才会释放活力。",
"莎士比亚说,那脑袋里的智慧,就像打火石里的火花一样,不去打它是不肯出来的。",
"戴尔·卡耐基说,多数人都拥有自己不了解的能力和机会,都有可能做到未曾梦想的事情。",
"白哲特说,坚强的信念能赢得强者的心,并使他们变得更坚强。",
"伏尔泰说, 不经巨大的困难,不会有伟大的事业。",
"富勒曾经说, 苦难磨炼一些人,也毁灭另一些人。",
"文森特·皮尔说, 改变你的想法,你就改变了自己的世界。",
"拿破仑·希尔说, 不要等待,时机永远不会恰到好处。",
"塞涅卡说, 生命如同寓言,其价值不在与长短,而在与内容。",
"奥普拉·温弗瑞说, 你相信什么,你就成为什么样的人。",
"吕凯特说, 生命不可能有两次,但许多人连一次也不善于度过。",
"莎士比亚说, 人的一生是短的,但如果卑劣地过这一生,就太长了。",
"笛卡儿说, 我的努力求学没有得到别的好处,只不过是愈来愈发觉自己的无知。",
"左拉说, 生活的道路一旦选定,就要勇敢地走到底,决不回头。",
"米歇潘说, 生命是一条艰险的峡谷,只有勇敢的人才能通过。",
"吉姆·罗恩说, 要么你主宰生活,要么你被生活主宰。",
"日本谚语说, 不幸可能成为通向幸福的桥梁。",
"海贝尔说, 人生就是学校。在那里,与其说好的教师是幸福,不如说好的教师是不幸。",
"杰纳勒尔·乔治·S·巴顿说, 接受挑战,就可以享受胜利的喜悦。",
"德谟克利特说, 节制使快乐增加并使享受加强。",
"裴斯泰洛齐说, 今天应做的事没有做,明天再早也是耽误了。",
"歌德说, 决定一个人的一生,以及整个命运的,只是一瞬之间。",
"卡耐基说, 一个不注意小事情的人,永远不会成就大事业。",
"卢梭说, 浪费时间是一桩大罪过。",
"康德说, 既然我已经踏上这条道路,那么,任何东西都不应妨碍我沿着这条路走下去。",
"克劳斯·莫瑟爵士说, 教育需要花费钱,而无知也是一样。",
"伏尔泰说, 坚持意志伟大的事业需要始终不渝的精神。",
"亚伯拉罕·林肯说, 你活了多少岁不算什么,重要的是你是如何度过这些岁月的。",
"韩非说, 内外相应,言行相称。",
"富兰克林说, 你热爱生命吗?那么别浪费时间,因为时间是组成生命的材料。",
"马尔顿说, 坚强的信心,能使平凡的人做出惊人的事业。",
"笛卡儿说, 读一切好书,就是和许多高尚的人谈话。",
"塞涅卡说, 真正的人生,只有在经过艰难卓绝的斗争之后才能实现。",
"易卜生说, 伟大的事业,需要决心,能力,组织和责任感。",
"歌德说, 没有人事先了解自己到底有多大的力量,直到他试过以后才知道。",
"达尔文说, 敢于浪费哪怕一个钟头时间的人,说明他还不懂得珍惜生命的全部价值。",
"佚名说, 感激每一个新的挑战,因为它会锻造你的意志和品格。",
"奥斯特洛夫斯基说, 共同的事业,共同的斗争,可以使人们产生忍受一切的力量。 ",
"苏轼说, 古之立大事者,不惟有超世之才,亦必有坚忍不拔之志。",
"王阳明说, 故立志者,为学之心也;为学者,立志之事也。",
"歌德说, 读一本好书,就如同和一个高尚的人在交谈。",
"乌申斯基说, 学习是劳动,是充满思想的劳动。",
"别林斯基说, 好的书籍是最贵重的珍宝。",
"富兰克林说, 读书是易事,思索是难事,但两者缺一,便全无用处。",
"鲁巴金说, 读书是在别人思想的帮助下,建立起自己的思想。",
"培根说, 合理安排时间,就等于节约时间。",
"屠格涅夫说, 你想成为幸福的人吗?但愿你首先学会吃得起苦。",
"莎士比亚说, 抛弃时间的人,时间也抛弃他。",
"叔本华说, 普通人只想到如何度过时间,有才能的人设法利用时间。",
"博说, 一次失败,只是证明我们成功的决心还够坚强。 维",
"拉罗什夫科说, 取得成就时坚持不懈,要比遭到失败时顽强不屈更重要。",
"莎士比亚说, 人的一生是短的,但如果卑劣地过这一生,就太长了。",
"俾斯麦说, 失败是坚忍的最后考验。",
"池田大作说, 不要回避苦恼和困难,挺起身来向它挑战,进而克服它。",
"莎士比亚说, 那脑袋里的智慧,就像打火石里的火花一样,不去打它是不肯出来的。",
"希腊说, 最困难的事情就是认识自己。",
"黑塞说, 有勇气承担命运这才是英雄好汉。",
"非洲说, 最灵繁的人也看不见自己的背脊。",
"培根说, 阅读使人充实,会谈使人敏捷,写作使人精确。",
"斯宾诺莎说, 最大的骄傲于最大的自卑都表示心灵的最软弱无力。",
"西班牙说, 自知之明是最难得的知识。",
"塞内加说, 勇气通往天堂,怯懦通往地狱。",
"赫尔普斯说, 有时候读书是一种巧妙地避开思考的方法。",
"笛卡儿说, 阅读一切好书如同和过去最杰出的人谈话。",
"邓拓说, 越是没有本领的就越加自命不凡。",
"爱尔兰说, 越是无能的人,越喜欢挑剔别人的错儿。",
"老子说, 知人者智,自知者明。胜人者有力,自胜者强。",
"歌德说, 意志坚强的人能把世界放在手中像泥块一样任意揉捏。",
"迈克尔·F·斯特利说, 最具挑战性的挑战莫过于提升自我。",
"爱迪生说, 失败也是我需要的,它和成功对我一样有价值。",
"罗素·贝克说, 一个人即使已登上顶峰,也仍要自强不息。",
"马云说, 最大的挑战和突破在于用人,而用人最大的突破在于信任人。",
"雷锋说, 自己活着,就是为了使别人过得更美好。",
"布尔沃说, 要掌握书,莫被书掌握;要为生而读,莫为读而生。",
"培根说, 要知道对好事的称颂过于夸大,也会招来人们的反感轻蔑和嫉妒。",
"莫扎特说, 谁和我一样用功,谁就会和我一样成功。",
"马克思说, 一切节省,归根到底都归结为时间的节省。",
"莎士比亚说, 意志命运往往背道而驰,决心到最后会全部推倒。",
"卡莱尔说, 过去一切时代的精华尽在书中。",
"培根说, 深窥自己的心,而后发觉一切的奇迹在你自己。",
"罗曼·罗兰说, 只有把抱怨环境的心情,化为上进的力量,才是成功的保证。",
"孔子说, 知之者不如好之者,好之者不如乐之者。",
"达·芬奇说, 大胆和坚定的决心能够抵得上武器的精良。",
"叔本华说, 意志是一个强壮的盲人,倚靠在明眼的跛子肩上。",
"黑格尔说, 只有永远躺在泥坑里的人,才不会再掉进坑里。",
"普列姆昌德说, 希望的灯一旦熄灭,生活刹那间变成了一片黑暗。",
"维龙说, 要成功不需要什么特别的才能,只要把你能做的小事做得好就行了。",
"郭沫若说, 形成天才的决定因素应该是勤奋。",
"洛克说, 学到很多东西的诀窍,就是一下子不要学很多。",
"西班牙说, 自己的鞋子,自己知道紧在哪里。",
"拉罗什福科说, 我们唯一不会改正的缺点是软弱。",
"亚伯拉罕·林肯说, 我这个人走得很慢,但是我从不后退。",
"美华纳说, 勿问成功的秘诀为何,且尽全力做你应该做的事吧。",
"俾斯麦说, 对于不屈不挠的人来说,没有失败这回事。",
"阿卜·日·法拉兹说, 学问是异常珍贵的东西,从任何源泉吸收都不可耻。",
"白哲特说, 坚强的信念能赢得强者的心,并使他们变得更坚强。 ",
"查尔斯·史考伯说, 一个人几乎可以在任何他怀有无限热忱的事情上成功。 ",
"贝多芬说, 卓越的人一大优点是:在不利与艰难的遭遇里百折不饶。",
"莎士比亚说, 本来无望的事,大胆尝试,往往能成功。",
"卡耐基说, 我们若已接受最坏的,就再没有什么损失。",
"德国说, 只有在人群中间,才能认识自己。",
"史美尔斯说, 书籍把我们引入最美好的社会,使我们认识各个时代的伟大智者。",
"冯学峰说, 当一个人用工作去迎接光明,光明很快就会来照耀着他。",
"吉格·金克拉说, 如果你能做梦,你就能实现它。"
] | StarcoderdataPython |
370358 | <filename>startup/41-ESM_motion.py
import IPython
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import scipy.optimize as opt
import os
from bluesky.plans import scan, adaptive_scan, spiral_fermat, spiral,scan_nd
from bluesky.plan_stubs import abs_set, mv
from bluesky.preprocessors import baseline_decorator, subs_decorator
# from bluesky.callbacks import LiveTable,LivePlot, CallbackBase
from pyOlog.SimpleOlogClient import SimpleOlogClient
from esm import ss_csv
from cycler import cycler
from collections import ChainMap
import math
import re
from builtins import input as pyinput
from boltons.iterutils import chunked
import sys
ip=IPython.get_ipython()
###MOVING MOTORS
### The following set of code are used to move motors to pre-determined locations. For instance
### moving the Low Temperature manipulator from the Low Temperature chamber to the Analysis chamber
### and vice-versa. They can involve running through a series of steps to ensure that it is safe, and
### then following a safe motion path to reach the desired location.
### The Locations for the various locations and steps are to be stored in .csv files that are read by the program
### prior to performing the move.
### ESM motion device definitions
## Definition of the motion device class located at ESM.
class ESM_motion_device:
def __init__(self,definition_file_path,name):
'''
Move a series of motors to one of a series of predefined locations.
This plan allows the user to move a series of motors to various locations as described in a
.csv setup file. There are a number of attribute functions involved that allow the motors to
be moved to the various locations from the current location. As described below, optionally
the motion can occur via a transfer location
Attribute Parameters and Functions
---------
definition_file_path_str: str
A string containing the filepath to the .csv file that defines the instance.
name: str
The name of the instance for use in later code.
data_dict: dict
A dictionary that holds the information from the definition file with each sample position
having it's own item callable by the position name keyword.
read_location_position_data: function
Extracts the data from the .csv definition file to data_dict
axes: function
Extracts the list of motion axes from data_dict.
axes_dict: function
Extracts the dictionary of motion axes an postions from data_dict for the inputted location.
chambers: function
Extracts the list of chambers.
chambers_dict: function
Extracts the dictionary of information about each chamber using the transfer locations defined
by the suffix '_Tr' in the .csv file.
current_chamber: function
Determines which chamber the manipualtor is currently in.
locations: function
Extracts a list of loctations from data_dict.
ask_user_continue: function
provides a user prompt in the command line asking them to confirm the move.
move_to: function
moves the series of motors to the location defined by "location".
DEFINITION FILE DESCRIPTION
---------------------------
The definition file is a .csv file which has a column for each motor axis to be defined in the instance. The first
column should be labelled 'position_info'. Additional columns for each motion axis associated with this "device"
should be included and lablled with the 'Bluesky' axis name. For each defined "location" the name should be written
in the 'position_info' column and the position for each of the motor axes written in the corresponding column, if any
motor axis column is left blank then this axis is not moved for this location.
If the optional parameters (see below) associated with a transfer axis are included then motion between the 2, or more
defined "sections" occurs by first moving to the defined 'transfer location' in the current section and then moving
the transfer axis to the new section then finally moving inside the new section to the required position.
This is useful for manipulators when they should be "centered" before moving through a gate-valve, for example.
When using this method, 1 transfer location for each section (or "chamber") should be defined, and given the
prefix '_Tr'. In addition to the motion axes these locations should also include the 'optional' columns:
chamber_info - To give a name to the chamber (all transfer locations should include a chamber).
gate_valve_list_info - The list of the gate-valves to open and close during motion (i.e. the gate valve
that seperates 2 chambers (can be "None").
gate_valve_open_list - Should be "Yes", "No" or "Manual" to indicate if the gate valve should be opened,
closed or if the gatevale should be opened manually prior to moving to this chamber
transfer_axis_name - should be the name of the motor axis that transfers the manipulator from one chamber to
another
transfer_axis_high_limit_info - should be the max value for the transfer axis while in this chamber.
transfer_axis_low_limit_info - should be the min value for the transfer axis while in this chamber.
For the non transfer locations all of these optional cloumns, with the exception of the chamber_info column, can
be left blank.
'''
#define the inputted values when defining an instance.
self.definition_file_path=definition_file_path
self.name=name
#define the information dictionaries for the instance.
self.data_dict=self.read_location_position_data
# Define the class properties here
@property
#Define the information functions here
def read_location_position_data(self):
'''
Reads the definition csv file and record the info in data_dict.
This function reads the definition .csv file and writes the information to data_dict in order to
be used in the future motion.
PARAMETERS
----------
self.definition_file_path : str
The path to the definiton .csv file for this instance.
data_dict_temp : dict
The output dictionary that contains the inforamtion form the .csv definition file.
'''
#define the dictionary to store the setup info.
data_dict_temp = {}
# extract the information formthe file and write it to the dictionary
f=pd.read_csv(self.definition_file_path)
f=f.set_index('position_info')
for row_name in f.index:
data_dict_temp[row_name]=dict(f.loc[row_name])
return data_dict_temp
def locations(self):
'''
Reads through data_dict file and generates a list of 'locations'.
This function sorts through data_dict and generates a list of location names. This is done assuming
that the locations is the keyword list from data_dict.
PARAMETERS
----------
location_list :
The output list that contains the locations names.
'''
#define the locations list
locations_list=list( self.data_dict.keys() )
return locations_list
def detectors(self):
'''
Reads through data_dict file and generates a list of 'detector channels'.
This function sorts through data_dict and generates a list of detector channel names. This is done assuming
that only the detectors in data_dict do not have the suffixare in list form in the column
'detector_list_info'.
PARAMETERS
----------
detector_list :
The output list that contains the detector channel names.
'''
#define the output list
detector_list = (self.data_dict[list(self.data_dict.keys())[0]]['detector_list_info'] ).split(',')
return detector_list
def axes(self):
'''
Reads through data_dict file and generates a list of 'axes names'.
This function sorts through data_dict and generates a list of axes names. This is done assuming
that only the motion axes keywords in data_dict do not have the suffix '_info'.
PARAMETERS
----------
axes_list :
The output list that contains the axes names.
'''
#define the output list
axes_list = list(key for key in list(self.data_dict[ list(self.data_dict.keys())[0] ].keys()) if not
key.endswith('_info') )
return axes_list
def axes_dict(self, to_location ):
'''
Reads through data_dict file and generates a dictionary with the axes information.
This function sorts through data_dict and generates a dictionary, with the keywords being the
possible locations and the entries being a dictionary of the axes information.
PARAMETERS
----------
axis_dict : dict
The output dictionary that contains the chamber names and information.
'''
#define the input parameter
self.to_location=to_location
#define the output dictionary
axis_dict={}
#define the list of axes
axis_list=self.axes()
#define the dicionary of chamber info
for axis in axis_list:
axis_dict[axis]=self.data_dict[to_location][axis]
return axis_dict
def chambers(self):
'''
Reads through data_dict file and generates a list of chambers.
This function sorts through data_dict and generates a list of chambers using the 'chambers' column.
PARAMETERS
----------
chamber_list : list
The list of chamber information.
'''
chamber_list=list(s for s in list(self.data_dict.keys()) if s.endswith('_Tr'))
if len(chamber_list) <= 0:
chamber_list=['No chamber']
return chamber_list
def chambers_dict(self):
'''
Reads through data_dict file and generates a dictionary with the chamber information.
This function sorts through data_dict and generates a dictionary, with the keywords being the
possible chambers and the entries being a dictionary of the important information. This is done
assuming that only transfer postions for each chamber have the suffix '_Tr'.
PARAMETERS
----------
chamber_dict : dict
The output dictionary that contains the chamber names and information.
'''
#define the output dictionary
chamber_dict={}
#define the list of chambers
chamber_list=self.chambers()
if chamber_list[0] == 'No chamber':
chamber_dict['No chamber'] = chamber_list
else:
#define the dicionary of chamber info
for pos in chamber_list:
chamber_dict[self.data_dict[pos]['chamber_info']]=self.data_dict[pos]
return chamber_dict
def current_chamber(self):
'''
Determines what the current chamber is.
This function reads the position of the transfer axis and uses this to determine which chamber the
manipulator is currently located in. It retruns the name of the chamber as a string.
PARAMETERS
----------
chamber_name : str
The output string which gives the name of the current chamber.
'''
# define the chamber information dictioanry
chamber_dict=self.chambers_dict()
chamber_name = 'error'
if 'No chamber' in list(chamber_dict.keys()):
chamber_name= 'No chamber'
else:
#define the motor record and axis attribute for the ttransfer axis
obj,_,attr = chamber_dict[ list(chamber_dict.keys())[0] ] ['transfer_axis_name_info'].partition('_')
#defien the transfer axis object
transfer_axis=getattr(ip.user_ns[obj],attr)
#determine the current transfer axis postion
axis_pos=transfer_axis.position
#determine which chamber contains the axis position.
for chamber in list(chamber_dict.keys()):
if float(chamber_dict[chamber]['transfer_axis_low_limit_info']) <= axis_pos <= float(chamber_dict[chamber]['transfer_axis_high_limit_info']):
chamber_name=chamber
return chamber_name
def status(self, output='string'):
'''
Reads the status of every axis defined for the device and outputs the result as a dictioanry or a
formatted string.
Reads the position of every axis for the device and returns a dictionary, returns a formatted string
or appends to a file.
PARAMETERS
----------
output : str, optional
Indicates what to do with the output, default is to return a formatted string. Can take the values:
- 'string', indicates the routine should return a formatted string.
- 'string_and_file', indicates the routine should return a formatted string and append to a
status file for the device.
- 'dict', indicates the routine should return a dictionary of positions.
f_string : str
Possible output string for formatting.
status_dict : dict
Possible outputted dictionary, which has keywords for each motor in the axis list and contains
a dictionary of axes names and positions.
'''
#define the dictionary of motion axes for the current instance.
detectors_list=self.detectors()
temp_list=detectors_list
det_status_dict={}
exit_val=0
#continue looping over the list of remaining axes until none exist.
while len(temp_list)>0 and exit_val<=20:
device_name,_,channel = temp_list[0].partition('_')
temp_detector_list = list(det for det in temp_list if det.startswith(device_name) )
det_status_dict[device_name]=temp_detector_list
temp_list = list(det for det in temp_list if not det.startswith(device_name) )
exit_val+=1
#define the dictionary of motion axes for the current instance.
axis_list=self.axes()
temp_list=axis_list
status_dict={}
exit_val=0
#continue looping over the list of remaining axes until none exist.
while len(temp_list)>0 and exit_val<=20:
device_name,_,axis = temp_list[0].partition('_')
temp_axes_list = list(key for key in temp_list if key.startswith(device_name) )
status_dict[device_name]=temp_axes_list
temp_list = list(key for key in temp_list if not key.startswith(device_name) )
exit_val+=1
f_string='************************************************************\n'
f_string+=self.name+' STATUS: '+time.strftime("%c") + '\n'
f_string+='************************************************************\n\n'
#step through the detectors and read the values.
f_string+='EPICS SIGNAL COMPONENTS\n'
f_string+='-----------------------\n'
device_list=list(det_status_dict.keys())
device_list.sort()
for key in device_list:
f_string+=' '+key+':\n'
key_dict = det_status_dict[key]
for det in key_dict:
obj,_,attr = det.partition('_')
f_string+='\t '+det.ljust(25)+' --> %f\n' % getattr(ip.user_ns[obj],attr).get()
f_string+='\n'
# step through the motors and read the values
f_string+='EPICS MOTOR COMPONENTS\n'
f_string+='-----------------------\n'
device_list=list(status_dict.keys())
device_list.sort()
for key in device_list:
f_string+=' '+key+':\n'
key_dict = status_dict[key]
for axis in key_dict:
obj,_,attr = axis.partition('_')
f_string+='\t '+axis.ljust(25)+' --> %f\n' % getattr(ip.user_ns[obj],attr).position
f_string+='\n'
if output.startswith('string'):
print (f_string)
if output.endswith('file'):
fl="/direct/XF21ID1/status_files/"
fl+=self.name+'_status'
f = open(fl, "a")
f.write(f_string)
f.close()
if output == 'dict':
return status_dict
def ask_user_continue(self,request_str):
'''
This function asks the user to confirm that the current process should be completed.
This function asks the user, using the request_str to give specifics, if they should continue.
PARAMETERS
----------
request_str : str
The output string given with the user prompt.
'''
valid={'yes':1,'no':0}
prompt_str = ', continue (yes or no):'
while True:
sys.stdout.write(request_str + prompt_str)
choice = pyinput().lower()
if choice in valid:
return valid[choice]
#Define the motion functions here
def move_to(self, location):
'''
moves the manipulator to the position given by "location"
This function moves the manipulator to the location defined by "location". it returns
and error message indicating if the sample transfer was succesful, and if not what
went wrong.
PARAMETERS
----------
location : str
The position to move the manipulator to.
'''
#define the axes that need to be moved in the transfer.
axis_dict=self.axes_dict(location)
axis_list = list(axis for axis in list(axis_dict.keys()) if not np.isnan(axis_dict[axis]) )
#check if the transfer goes between chambers
if self.current_chamber() == 'No chamber':
if self.ask_user_continue('This will move the manipulator, unless print_summary was used to call it') ==0:
raise RuntimeError('user quit move')
else:
for axis in axis_list:
#define the motor record and axis attribute for the ttransfer axis
obj,_,attr = axis.partition('_')
#define the transfer axis object
move_axis=getattr(ip.user_ns[obj],attr)
#move the axis to the new location
yield from mv(move_axis, axis_dict[axis] )
#if the transfer has multiple chambers.
else:
from_chamber=self.current_chamber()
to_chamber=self.data_dict[location]['chamber_info']
chamber_dict=self.chambers_dict()
if from_chamber == 'error':
raise RuntimeError('current manipulator position is outside all "chamber" ranges')
elif from_chamber == to_chamber :
if self.ask_user_continue('This will move the manipulator, unless print_summary was used to call it') ==0:
raise RuntimeError('user quit move')
else:
#MOVE DIRECTLY TO THE NEW POSITION
for axis in axis_list:
#define the motor record and axis attribute for the ttransfer axis
obj,_,attr = axis.partition('_')
#define the transfer axis object
move_axis=getattr(ip.user_ns[obj],attr)
#move the axis to the new location
yield from mv(move_axis, axis_dict[axis] )
elif chamber_dict[to_chamber]['gate_valve_open_info'] in ('Yes','Manual') :
if self.ask_user_continue('This will move the manipulator and open or close gate valves,'+
' unless print_summary was used to call it') ==0:
raise RuntimeError('user quit move')
elif chamber_dict[to_chamber]['gate_valve_open_info'] == 'Manual' :
if self.ask_user_continue('one or more gate valves must be opened or closed manually. "ARE GATE VALVES OPEN"') ==0:
raise RuntimeError('user quit move')
else:
from_axes_list= list(key for key in chamber_dict[from_chamber].keys() if not key.endswith('_info') )
from_axis_list=list(axis for axis in from_axes_list if not np.isnan(chamber_dict[from_chamber][axis]) )
####MOVE TO 'FROM CHAMBER' TRANSFER POSITION####
for axis in from_axis_list:
if not axis.endswith('_info'):
#define the motor record and axis attribute for the transfer axis
obj,_,attr = axis.partition('_')
#define the transfer axis object
move_axis=getattr(ip.user_ns[obj],attr)
#move the axis to the new location
yield from mv(move_axis, chamber_dict[from_chamber][axis] )
####MOVE TO 'TO CHAMBER' TRANSFER POSITION ALONG 'TRANSFER AXIS'####
#define the motor record and axis attribute for the transfer axis
axis=chamber_dict[to_chamber]['transfer_axis_name_info']
obj,_,attr = axis.partition('_')
#define the transfer axis object
move_axis=getattr(ip.user_ns[obj],attr)
#move the axis to the new location
yield from mv(move_axis, chamber_dict[to_chamber][axis] )
####MOVE TO POSITION IN 'TO CHAMBER'####
for axis in axis_list:
#define the motor record and axis attribute for the ttransfer axis
obj,_,attr = axis.partition('_')
#define the transfer axis object
transfer_axis=getattr(ip.user_ns[obj],attr)
#move the axis to the new location
yield from mv(transfer_axis, axis_dict[axis] )
else:
if self.ask_user_continue('This will move the manipulator to the other chamber'+
', unless print_summary was used to call it') ==0:
raise RuntimeError('user quit move')
else:
####MOVE TO 'FROM CHAMBER' TRANSFER POSITION####
from_axes_list= list(key for key in chamber_dict[from_chamber].keys() if not key.endswith('_info') )
from_axis_list=list(axis for axis in from_axes_list if not np.isnan(chamber_dict[from_chamber][axis]) )
for axis in from_axis_list:
if not axis.endswith('_info'):
#define the motor record and axis attribute for the transfer axis
obj,_,attr = axis.partition('_')
#define the transfer axis object
move_axis=getattr(ip.user_ns[obj],attr)
#move the axis to the new location
yield from mv(move_axis, chamber_dict[from_chamber][axis] )
####MOVE TO 'TO CHAMBER' TRANSFER POSITION ALONG 'TRANSFER AXIS'####
#define the motor record and axis attribute for the transfer axis
axis=chamber_dict[to_chamber]['transfer_axis_name_info']
obj,_,attr = axis.partition('_')
#define the transfer axis object
move_axis=getattr(ip.user_ns[obj],attr)
#move the axis to the new location
yield from mv(move_axis, chamber_dict[to_chamber][axis] )
####MOVE TO POSITION IN 'TO CHAMBER'####
for axis in axis_list:
#define the motor record and axis attribute for the ttransfer axis
obj,_,attr = axis.partition('_')
#define the transfer axis object
transfer_axis=getattr(ip.user_ns[obj],attr)
#move the axis to the new location
yield from mv(transfer_axis, axis_dict[axis] )
## Define the instances of the ESM_device class
#The low temperature manipulator
# LT_manip=ESM_motion_device(os.environ['HOME']+
# '/.ipython/profile_collection/startup/motion_definition_files/LT_manip_definition.csv',
# 'LOW TEMPERATURE MANIPULATOR')
#The beamline as a whole (swap branches, etc).
Beamline=ESM_motion_device(os.path.join(get_ipython().profile_dir.startup_dir, 'motion_definition_files/Beamline_definition.csv'),
'BEAMLINE')
| StarcoderdataPython |
1715545 | # Valid Mountain Array
from typing import List
class Solution:
def validMountainArray(self, arr: List[int]) -> bool:
if len(arr) <= 2:
return False
n = len(arr)
index = 1
while index < n and arr[index - 1] < arr[index]:
index += 1
# if no decreasing or increasing point
if index == 1 or index == n:
return False
while index < n:
if arr[index - 1] > arr[index]:
index += 1
else:
return False
return True
if __name__ == "__main__":
sol = Solution()
arr = [2,1]
arr = [3,5,5]
arr = [0,3,3,1]
arr = [9,8,7,6,5]
print(sol.validMountainArray(arr))
| StarcoderdataPython |
3475359 | <reponame>asthajn6/archive<gh_stars>1-10
def minimum_spanning_tree(edges):
'''Solves the minimum spanning tree (MST) problem using Prim's algorithm.
This algorithm "grows" the tree by starting with the smallest edge and then
iterativly adding the smallest edge that extends the tree without creating
a cycle.
Our version simply sorts the edges by weight and linearly searches the list
for the first edge it can add. Since we're searching the list every time,
this algorithm has a time complexity in ``O(e^2)`` where ``e`` is the
number of edges. In practice, this is fine for small graphs (a few hundred
edges), but it can be a problem for large graphs.
You can improve the time complexity by using a min-heap of unvisited nodes
ordered by the smallest edge that connects it to the tree (or infinity if
no such edge exists). As you add nodes/edges to the tree, you update the
heap. (You'd also want a map from nodes to the set of edges involving that
node). This can bring your time complexity down to ``O(e * log(n))`` where
``n`` is the number of nodes. This version is left as an exercise to the
reader.
Arguments:
edges (list of edges):
The list of edges forming the graph. Each edge is a tripple
``(A, B, W)`` where ``A`` and ``B`` are labels for the nodes
connected by the edge and ``W`` is the weight of the edge.
Returns:
tree (list of edges):
The minimum spanning tree of the input graph. It is an edge list
like the input, but only containg the edges of the MST.
'''
# Sort edges by weight.
edges.sort(key=lambda edge: edge[2])
# We "grow" the tree by appending to this list.
tree = []
# We also keep up with the nodes visited by the tree as a hash-set.
# This lets us check if adding an edge creates a cycle in constant time.
visited = set()
# Extend the tree by adding the smallest edge that extends the tree without
# creating a cycle. If the tree is currently empty, this adds the edge with
# the lowest weight.
#
# This returns False if no acceptable edge is found and True otherwise.
def grow_tree():
for i, edge in enumerate(edges):
(a, b, w) = edge
# One of the nodes must touch the existing tree and the other
# node must be new, or the tree must be empty.
valid = (a in visited and b not in visited) or \
(a not in visited and b in visited) or \
len(tree) == 0
# As soon as we find a valid edge, we add it to the graph and exit.
# This is why the algorithm is called "greedy".
if valid:
del edges[i]
tree.append(edge)
visited.add(a)
visited.add(b)
return True
return False
# Call `grow_tree` until nothing new can be added.
while grow_tree():
pass
# This creates the minimum spanning tree!
return tree
def test_mst():
# The example graph from Wikipedia:
# https://en.wikipedia.org/wiki/Minimum_spanning_tree
# https://upload.wikimedia.org/wikipedia/commons/d/d2/Minimum_spanning_tree.svg
edges = [
(0, 1, 6), (0, 2, 3), (0, 3, 9),
(1, 2, 4), (1, 4, 2), (1, 5, 9),
(2, 4, 2), (2, 3, 9), (2, 6, 9),
(3, 6, 8), (3, 7, 18), (4, 5, 9),
(4, 6, 8), (5, 6, 7), (5, 8, 4),
(5, 9, 5), (6, 7, 10), (6, 9, 9),
(7, 8, 4), (7, 9, 3), (8, 9, 1)
]
soln = [
(0, 2, 3),
(1, 4, 2),
(2, 4, 2),
(3, 6, 8),
(4, 6, 8),
(5, 6, 7),
(5, 8, 4),
(7, 9, 3),
(8, 9, 1)
]
# Compare the output of our function and the solution as sets,
# since the order of edges does not matter.
mst = minimum_spanning_tree(edges)
assert set(soln) == set(mst)
| StarcoderdataPython |
4908797 | <filename>startup/SST/RSoXS/Functions/acquisitions.py
import datetime
from .alignment import load_sample, load_configuration, avg_scan_time, move_to_location
from .configurations import all_out
from .sample_spreadsheets import save_samplesxls
from operator import itemgetter
from ..HW.slackbot import rsoxs_bot
from ...CommonFunctions.functions import boxed_text, run_report, colored
from ..startup import db
from ..Functions import rsoxs_queue_plans
run_report(__file__)
def run_sample(sam_dict):
yield from load_sample(sam_dict)
yield from do_acquisitions(sam_dict["acquisitions"])
def do_acquisitions(acq_list):
uids = []
for acq in acq_list:
uid = yield from run_acquisition(acq)
uids.append(uid)
return uids
def run_acquisition(acq):
# runs an acquisition the old way (from run_bar or from the bar directly)
yield from load_configuration(acq["configuration"])
try:
plan = getattr(rsoxs_queue_plans, acq["plan_name"])
except Exception:
print("Invalid Plan Name")
return -1
uid = yield from plan(*acq["args"], **acq["kwargs"])
return uid
def run_queue_plan(
acquisition_plan_name, configuration, sample_md, simulation=False, **kwargs
):
if simulation:
return avg_scan_time(acquisition_plan_name)
if acquisition_plan_name == "all_out":
yield from all_out()
uid = 0
else:
yield from load_configuration(configuration)
yield from move_to_location(sample_md["location"])
planref = getattr(rsoxs_queue_plans, acquisition_plan_name)
uid = yield from planref(md=sample_md, **kwargs)
return uid
def run_bar(
bar,
sort_by=["sample_num"],
dryrun=0,
rev=[False],
delete_as_complete=True,
retract_when_done=False,
save_as_complete="",
):
"""
run all sample dictionaries stored in the list bar
@param bar: a list of sample dictionaries
@param sort_by: list of strings determining the sorting of scans
strings include project, configuration, sample_id, plan, plan_args, spriority, apriority
within which all of one acquisition, etc
@param dryrun: Print out the list of plans instead of actually doing anything - safe to do during setup
@param rev: list the same length of sort_by, or booleans, wetierh to reverse that sort
@param delete_as_complete: remove the acquisitions from the bar as we go, so we can automatically start back up
@param retract_when_done: go to throughstation mode at the end of all runs.
@param save_as_complete: if a valid path, will save the running bar to this position in case of failure
@return:
"""
config_change_time = 120 # time to change between configurations, in seconds.
save_to_file = False
try:
open(save_as_complete, "w")
except OSError:
save_to_file = False
pass
else:
save_to_file = True
list_out = []
for samp_num, s in enumerate(bar):
sample = s
sample_id = s["sample_id"]
sample_project = s["project_name"]
for acq_num, a in enumerate(s["acquisitions"]):
if "priority" not in a.keys():
a["priority"] = 50
list_out.append(
[
sample_id, # 0 X
sample_project, # 1 X
a["configuration"], # 2 X
a["plan_name"], # 3
avg_scan_time(a["plan_name"], 2), # 4 calculated plan time
sample, # 5 full sample dict
a, # 6 full acquisition dict
samp_num, # 7 sample index
acq_num, # 8 acq index
a["args"], # 9 X
s["density"], # 10
s["proposal_id"], # 11 X
s["sample_priority"], # 12 X
a["priority"],
]
) # 13 X
switcher = {
"sample_id": 0,
"project": 1,
"config": 2,
"plan": 3,
"plan_args": 9,
"proposal": 11,
"spriority": 12,
"apriority": 13,
"sample_num": 7,
}
# add anything to the above list, and make a key in the above dictionary,
# using that element to sort by something else
try:
sort_by.reverse()
rev.reverse()
except AttributeError:
if isinstance(sort_by, str):
sort_by = [sort_by]
rev = [rev]
else:
print(
"sort_by needs to be a list of strings\n"
"such as project, configuration, sample_id, plan, plan_args, spriority, apriority"
)
return
try:
for k, r in zip(sort_by, rev):
list_out = sorted(list_out, key=itemgetter(switcher[k]), reverse=r)
except KeyError:
print(
"sort_by needs to be a list of strings\n"
"such as project, configuration, sample_id, plan, plan_args, spriority, apriority"
)
return
if dryrun:
text = ""
total_time = 0
for i, step in enumerate(list_out):
# check configuration
# check sample position
# check acquisition
text += "load {} from {}, config {}, run {} (p {} a {}), starts @ {} takes {}\n".format(
step[5]["sample_name"],
step[1],
step[2],
step[3],
step[12],
step[13],
time_sec(total_time),
time_sec(step[4]),
)
total_time += step[4]
if step[2] != list_out[i - 1][2]:
total_time += config_change_time
text += (
f"\n\nTotal estimated time including config changes {time_sec(total_time)}"
)
boxed_text("Dry Run", text, "lightblue", width=120, shrink=True)
else:
run_start_time = datetime.datetime.now()
for i, step in enumerate(list_out):
time_remaining = sum([avg_scan_time(row[3],nscans=2) for row in list_out[i:]])
this_step_time = avg_scan_time(step[3],nscans=2)
start_time = datetime.datetime.now()
total_time = datetime.datetime.now() - run_start_time
boxed_text(
"Scan Status",
"\nTime so far: {}".format(str(total_time))
+ "\nStarting scan {} out of {}".format(
colored(f"#{i + 1}", "blue"), len(list_out)
)
+ "{} of {} in project {} Proposal # {}\n which should take {}\n".format(
colored(step[3], "blue"), # plan
colored(step[0], "blue"), # sample_id
colored(step[1], "blue"), # project
colored(step[11], "blue"), # proposal
time_sec(this_step_time),
)
+ f"time remaining approx {time_sec(time_remaining)} \n\n",
"red",
width=120,
shrink=True,
)
rsoxs_bot.send_message(
f"Starting scan {i + 1} out of {len(list_out)}\n"
+ f"{step[3]} of {step[0]} in project {step[1]} Proposal # {step[11]}"
f"\nwhich should take {time_sec(this_step_time)}"
+ f"\nTime so far: {str(total_time)}"
f"time remaining approx {time_sec(time_remaining)}"
)
yield from load_configuration(step[2]) # move to configuration
yield from load_sample(step[5]) # move to sample / load sample metadata
yield from do_acquisitions(
[step[6]]
) # run acquisition (will load configuration again)
uid = db[-1].uid
print(f"acq uid = {uid}")
scan_id = db[uid].start["scan_id"]
timestamp = db[uid].start["time"]
success = db[uid].stop["exit_status"]
bar[step[7]].setdefault("acq_history", []).append(
{
"uid": uid,
"scan_id": scan_id,
"acq": step[6],
"time": timestamp,
"status": success,
}
)
if delete_as_complete:
bar[step[7]]["acquisitions"].remove(step[6])
if save_to_file:
save_samplesxls(bar, save_as_complete)
elapsed_time = datetime.datetime.now() - start_time
rsoxs_bot.send_message(
f"Acquisition {scan_id} complete. Actual time : {str(elapsed_time)},"
)
rsoxs_bot.send_message("All scans complete!")
if retract_when_done:
yield from all_out()
def time_sec(seconds):
dt = datetime.timedelta(seconds=seconds)
return str(dt).split(".")[0]
# function to take in excel sheet and perhaps some other sorting options, and produce a list of dictionaries with
# plan_name, arguments
# include in this list all of the metadata about the sample.
| StarcoderdataPython |
9732361 | <filename>DifferentialCalc/Laplacian.py
from manimlib.imports import *
class Opening_Quote(Scene):
def construct(self):
quote1 = TextMobject("Imagination is more important than knowledge")
quote1.set_color(RED)
quote1.to_edge(UP)
author = TextMobject("-<NAME>")
author.set_color(GOLD_B)
author.scale(0.75)
author.next_to(quote1.get_corner(DOWN+RIGHT),DOWN)
self.play(Write(quote1),Write(author))
LINE1 = TextMobject("What is Laplace's equation?\\\\ It is a second-order P.D.E of form $$\\nabla^2f=0$$")
LINE1.to_edge(UP,buff=2)
LINE1.set_color(BLUE)
self.play(FadeIn(LINE1))
self.wait()
LINE2 = TextMobject("Where $\\nabla^2 = \\vec \\nabla \\cdot \\vec \\nabla $ and f is scalar valued function.")
LINE2.next_to(LINE1,DOWN)
self.play(FadeIn(LINE2))
self.wait(2)
Poission = TexMobject("\\nabla^2V=-\\frac{\\rho}{\\epsilon}")
Poission.set_color(GREEN)
title = Title("Laplace's Equation")
title.set_color(RED)
title.to_edge(UP)
self.play(Transform(quote1,title),FadeOut(author))
self.remove(LINE2)
self.wait()
Poission.next_to(title,DOWN)
Laplace = TexMobject("\\nabla^2V=0")
Laplace.set_color(GREEN)
rect = SurroundingRectangle(Poission)
rect.set_color(GOLD_B)
rect1 = SurroundingRectangle(Laplace)
rect.set_color(GOLD_B)
Poission_T = TextMobject("Poisson Equation")
Poission_T.next_to(rect, RIGHT)
Poission_T.scale(0.75)
self.play(Transform(LINE1,Poission),ShowCreation(rect),Write(Poission_T))
self.wait(2)
LINE3 = TextMobject("When $\\rho =0$ Poisson's eqn Transform to Laplace eqn.")
LINE3.next_to(Poission.get_corner(DOWN+RIGHT),DOWN)
self.play(Write(LINE3))
self.wait()
Laplace_T = TextMobject("Laplace Equation")
Laplace_T.scale(0.75)
Laplace_T.next_to(rect1, RIGHT)
self.play(Transform(Poission,Laplace),Transform(rect,rect1),Transform(Poission_T,Laplace_T))
self.wait(2)
LINE4 = TextMobject("The Solution of Laplace eqn are called $\\textbf{Harmonic Functions}$.")
LINE4.set_color(ORANGE)
LINE4.next_to(Laplace,DOWN)
self.play(Write(LINE4))
self.wait(2)
LINE5 = TextMobject("Laplacian is used in various fields of maths and physics but\\\\We will examine its's value in electrostatics.")
LINE5.next_to(LINE4,DOWN)
self.play(GrowFromCenter(LINE5))
self.wait(2)
self.remove(*self.mobjects)
Laplace.next_to(title.get_corner(DOWN+LEFT),DOWN)
self.play(Write(title),FadeIn(Laplace))
self.wait(2)
TYPE = TextMobject("Here We will learn the topic in 2 steps:")
TYPE.set_color(BLUE)
TYPE.next_to(title,DOWN)
TYPE1 = TextMobject("1.General Intution of Laplacian","2.Laplace's Equation in many dimension and their Solution")
TYPE1[0].set_color(GOLD_B)
TYPE1[1].set_color(GOLD_B)
TYPE1[0].next_to(TYPE,DOWN)
TYPE1[1].next_to(TYPE1[0],DOWN)
self.play(Write(TYPE),GrowFromCenter(TYPE1[0]),GrowFromCenter(TYPE1[1]))
self.wait(2)
LINE6 = TextMobject("We will see the Intution and significance.. \\\\ and also what it means to be the Laplacian in next video.")
LINE6.set_color(ORANGE)
LINE6.to_edge(DOWN)
self.play(GrowFromCenter(LINE6))
self.wait(2)
class Introduction_Intution(Scene):
def construct(self):
INTEXT1 = TextMobject("In the previous video we have seen that"," $\\nabla^2V=0$ is called Laplace's Equation","But have does it signify?," ,"Here we will learn that:")
INTEXT1[0].set_color(RED)
INTEXT1[1].set_color(BLUE)
INTEXT1[2].set_color(GOLD_B)
INTEXT1[3].set_color(GREEN)
INTEXT1[0].to_edge(UP+RIGHT)
INTEXT1[1].next_to(INTEXT1[0],DOWN)
INTEXT1[2].next_to(INTEXT1[1],DOWN)
INTEXT1[3].next_to(INTEXT1[2],DOWN)
self.play(Write(INTEXT1[0]),Write(INTEXT1[1]),Write(INTEXT1[2]),Write(INTEXT1[3]))
self.wait(3)
self.play(FadeOut(INTEXT1[0]),FadeOut(INTEXT1[1]),FadeOut(INTEXT1[2]),FadeOut(INTEXT1[3]))
self.wait()
INtitle = Title("Laplace's Equation")
INtitle.set_color(RED)
INtitle.to_edge(UP)
self.play(GrowFromCenter(INtitle))
INTEXT2 =TextMobject("Now to understand the equation we have to understand","$\\nabla^2 \\simeq \\vec{\\nabla}\\cdot\\vec{\\nabla}$ itself.","$\\nabla^2 V \\simeq ((divergence)(gradient)(V)$")
INTEXT2[0].set_color(BLUE)
INTEXT2[1].set_color(GOLD_B)
INTEXT2[2].set_color(GREEN)
INTEXT2[0].next_to(INtitle,DOWN)
INTEXT2[1].next_to(INTEXT2[0],DOWN)
INTEXT2[2].next_to(INTEXT2[1],DOWN)
INTEXT2[2].scale(0.7)
INTEXT3 = TextMobject("Divergence","Gradient")
INTEXT3[0].set_color(RED)
INTEXT3[1].set_color(RED)
INTEXT3[0].to_edge(DOWN+RIGHT)
INTEXT3[1].to_edge(DOWN+LEFT)
self.play(FadeIn(INTEXT2[0]),Write(INTEXT2[1]))
self.wait(2)
self.play(Write(INTEXT2[2]),runtime=4)
self.play(FadeIn(INTEXT3[0]),FadeIn(INTEXT3[1]))
self.wait(74)
self.remove(INTEXT2[0],INTEXT2[1],INTEXT2[2],INTEXT3[0],INTEXT3[1])
self.remove(7)
class LaplacianSig1(ThreeDScene):
def construct(self):
axes = ThreeDAxes(
number_line_config={
"color": RED,
"include_tip": False,
"exclude_zero_from_default_numbers": True,
}
)
parabola = ParametricSurface(
lambda u,v: np.array([
u,
v,
7*u*v/np.exp((u**2 + v**2))]),u_max=2,u_min=-2,v_max=2,v_min=-2,
checkerboard_colors=[PURPLE_D,PURPLE_E],
resolution=(10,32)).scale(2)
self.set_camera_orientation(phi=75 * DEGREES)
self.begin_ambient_camera_rotation(0.3)
self.play(Write(parabola),Write(axes))
self.wait()
circle1 = Circle(radius=0.1,fill_opacity=1.0)
circle2 = Circle(radius=0.1,fill_opacity=1.0)
circle7 = Circle(radius=0.1,fill_opacity=1.0)
circle8 = Circle(radius=0.1,fill_opacity=1.0)
vect1=np.array([np.sqrt(2),-np.sqrt(2),-2*1.2876])
vect2=np.array([-np.sqrt(2),np.sqrt(2),-2*1.2876])
vect7=np.array([np.sqrt(2),np.sqrt(2),2*1.2876])
vect8=np.array([-np.sqrt(2),-np.sqrt(2),2*1.2876])
circle1.move_to(vect1)
circle2.move_to(vect2)
circle7.move_to(vect7)
circle8.move_to(vect8)
line0 = Arrow(np.array([np.sqrt(2),np.sqrt(2),2*1.2876]),np.array([-np.sqrt(2),np.sqrt(2),-2*1.2876]))
line1 = Arrow(np.array([np.sqrt(2),-np.sqrt(2),-2*1.2876]),np.array([np.sqrt(2),np.sqrt(2),2*1.2876]))
line2 = Arrow(np.array([np.sqrt(2),-np.sqrt(2),2*1.2876]),np.array([-np.sqrt(2),-np.sqrt(2),2*1.2876]))
line2.set_color(BLUE)
line1.set_color(BLUE)
line0.set_color(GREEN)
line3 = Arrow(np.array([-np.sqrt(2),np.sqrt(2),-2*1.2876]),np.array([-np.sqrt(2),-np.sqrt(2),2*1.2876]))
line4 = Arrow(np.array([-np.sqrt(2),np.sqrt(2),-2*1.2876]),np.array([np.sqrt(2),np.sqrt(2),2*1.2876]))
line3.set_color(GOLD_B)
line4.set_color(GOLD_B)
circle7.set_color(GREEN)
circle8.set_color(GREEN)
self.set_camera_orientation(phi=0 * DEGREES)
self.play(FadeIn(circle1),FadeIn(circle2),FadeIn(circle7),FadeIn(circle8))
self.wait(21)
self.play(Write(line2),Write(line1),Write(line3),Write(line0))
self.wait(12)
self.play(Transform(line0,line4))
self.begin_ambient_camera_rotation(0.0)
self.wait()
#INtitle1 = Title("Laplacian significance")
#INtitle1.set_color(RED)
#INtitle1.to_edge(UP)
#self.add_fixed_in_frame_mobjects(INtitle1)
#self.play(Write(INtitle1))
self.wait(5)
self.remove(line1,line2,line3,line4,circle2,circle1,circle7,circle8,line0)
self.play(FadeOut(parabola),FadeOut(axes))
#Text100=TextMobject("Now Drawing the Gradient of the surface \\\\ for all possible points in XY plane")
self.wait()
#Text100.set_color(RED)
#self.play(GrowFromCenter(Text100))
#self.wait(4)
#field = VGroup(*[self.calc_field(u*RIGHT+v*UP)])
class Field(Scene):
CONFIG = {
"color_list": ['#2be22b', '#e88e10', '#eae600', '#88ea00',
'#00eae2', '#0094ea', "#2700ea", '#bf00ea', '#ea0078'],
"prop": 0
}
def construct(self):
axes_config = {"x_min": -5,
"x_max": 5,
"y_min": -5,
"y_max": 5,
"z_axis_config": {},
"z_min": -1,
"z_max": 1,
"z_normal": DOWN,
"num_axis_pieces": 20,
"light_source": 9 * DOWN + 7 * LEFT + 10 * OUT,
"number_line_config": {
"include_tip": False,
},
}
axes = Axes(**axes_config)
axes.set_color(PURPLE_D)
f = VGroup(
*[self.calc_field_color(x * RIGHT + y * UP, self.vect, prop=0)
for x in np.arange(-5, 5, 1)
for y in np.arange(-5, 5, 1)
]
)
field = VGroup(axes, f)
circle3=Circle(radius=0.5)
circle3.set_color(BLUE)
vect3=np.array([1/np.sqrt(2),1/np.sqrt(2),0])
circle3.move_to(vect3)
circle4=Circle(radius=0.5)
circle4.set_color(BLUE)
vect4=np.array([-1/np.sqrt(2),-1/np.sqrt(2),0])
circle4.move_to(vect4)
circle5=Circle(radius=0.5)
circle5.set_color(GOLD_B)
vect5=np.array([-1/np.sqrt(2),1/np.sqrt(2),0])
circle5.move_to(vect5)
circle6=Circle(radius=0.5)
circle6.set_color(GOLD_B)
vect6=np.array([1/np.sqrt(2),-1/np.sqrt(2),0])
circle6.move_to(vect6)
Source1=TextMobject("-SOURCE")
Source2=TextMobject("-SOURCE")
Source1.next_to(circle3)
Source2.next_to(circle4,LEFT)
Source3=TextMobject("+SOURCE")
Source4=TextMobject("+SOURCE")
Source3.next_to(circle5,LEFT)
Source4.next_to(circle6)
Gauss=TexMobject("\\vec E =-\\nabla V")
Gauss.set_color(BLUE)
Gauss.to_edge(DOWN)
Poision2 = TexMobject("\\nabla^2V=-\\frac{\\rho}{\\epsilon}","\\vec \\nabla\\cdot\\vec E=-\\frac{\\rho}{\\epsilon}")
Poision2[0].set_color(RED)
Poision2[1].set_color(RED)
Poision2[0].to_edge(DOWN)
Poision2[1].to_edge(DOWN)
self.wait(2)
self.play(ShowCreation(field))
self.wait(17)
self.play(Write(circle3),Write(circle4))
self.wait(5)
self.play(Write(circle5),Write(circle6))
self.wait(16)
self.play(Write(Source1),Write(Source2))
self.wait(8)
self.play(Write(Source3),Write(Source4))
self.wait(55)
self.play(FadeIn(Gauss))
self.wait(2)
self.wait(34)
self.play(FadeOut(Gauss))
self.wait(20)
self.play(Write(Poision2[1]))
self.play(ReplacementTransform(Poision2[1],Poision2[0]))
self.wait(37)
def calc_field_color(self, point, f, prop=0.0, opacity=None):
x, y = point[:2]
func = f(x, y)
magnitude = math.sqrt(func[0] ** 2 + func[1] ** 2)
func = func / magnitude if magnitude != 0 else np.array([0, 0])
func = func / 1.7
v = int(magnitude / 10 ** prop)
index = len(self.color_list) - 1 if v > len(self.color_list) - 1 else v
c = self.color_list[index]
v = Vector(func, color=c).shift(point)
if opacity:
v.set_fill(opacity=opacity)
return v
@staticmethod
def vect(x, y):
return np.array([
7*y*(1-2*x**2)/np.exp(x**2+y**2),
7*x*(1-2*y**2)/np.exp(x**2+y**2),
0
])
class Source_term(ThreeDScene):
def construct(self):
axes = ThreeDAxes(
number_line_config={
"color": RED,
"include_tip": False,
"exclude_zero_from_default_numbers": True,
}
)
potential = ParametricSurface(
lambda u,v: np.array([
u,
v,
0.25+7*u*v/np.exp((u**2 + v**2))]),u_max=2,u_min=-2,v_max=2,v_min=-2,
checkerboard_colors=[PURPLE_D,PURPLE_E],
resolution=(10,32)).scale(2)
plane = ParametricSurface(
lambda u,v: np.array([
u,
v,
0.25]),u_max=2,u_min=-2,v_max=2,v_min=-2,
checkerboard_colors=[PURPLE_D,PURPLE_E],
resolution=(10,32)).scale(2)
self.set_camera_orientation(phi=75 * DEGREES)
self.begin_ambient_camera_rotation(0.2)
self.play(Write(potential),Write(axes))
self.wait()
self.wait(10)
self.play(ReplacementTransform(potential,plane),run_time=5)
self.wait(5)
self.wait(2)
plane_tilt = ParametricSurface(
lambda u,v: np.array([
u,
v,
0.6*(u+0.01*v)]),u_max=2,u_min=-2,v_max=2,v_min=-2,
checkerboard_colors=[PURPLE_D,PURPLE_E],
resolution=(10,32)).scale(2)
self.play(ReplacementTransform(plane,plane_tilt),run_time=4)
self.wait(3)
self.wait(2)
class They_See_Me_Rolling(ThreeDScene):
CONFIG = {
"t":4,
"r": 0.2,
"R": 4,
"laps":2*2*np.pi
}
def they_hating(self,da):
#try removing da
ball_1 = ParametricSurface(
lambda u, v: np.array([
self.r * np.cos(u) * np.cos(v),
self.r * np.sin(u) * np.cos(v),
self.r * np.sin(v)
]), v_min=0, v_max=TAU / 2, u_min=0, u_max=TAU / 2, checkerboard_colors=[BLUE_D, BLUE_D],
resolution=(20, 20))
ball_2 = ParametricSurface(
lambda u, v: np.array([
self.r * np.cos(u) * np.cos(v),
self.r * np.sin(u) * np.cos(v),
self.r * np.sin(v)
]), v_min=TAU / 2, v_max=TAU, u_min=TAU / 2, u_max=TAU, checkerboard_colors=[RED_D, RED_D],
resolution=(20, 20))
ball = VGroup(ball_1, ball_2)
trajectory = ParametricFunction(
lambda j: np.array([
(1/np.sqrt(2)),
j,
(7*j/(np.sqrt(2)*np.exp(0.5+(j)**2)))
]), j_min=-1/(4*np.sqrt(2)), j_max=1/(4*np.sqrt(2)), color=PURPLE_D
)
ball.rotate(self.t*self.R/self.r*da,[1/np.sqrt(2),1/np.sqrt(2),(7)/(2*np.exp(1))])
position_ball = np.array([
(1/np.sqrt(2)),
(1/np.sqrt(2)),
(7/(np.sqrt(2)*np.exp(0.5+0.5)))*da+self.r ])
ball.move_to(position_ball)
group=VGroup(ball,trajectory)
return group
def construct(self):
ball_1 = ParametricSurface(
lambda u, v: np.array([
self.r * np.cos(u) * np.cos(v),
self.r * np.sin(u) * np.cos(v),
self.r * np.sin(v)
]), v_min=0, v_max=TAU / 2, u_min=0, u_max=TAU / 2, checkerboard_colors=[BLUE_D, BLUE_D],
resolution=(20, 20))
ball_2 = ParametricSurface(
lambda u, v: np.array([
self.r * np.cos(u) * np.cos(v),
self.r * np.sin(u) * np.cos(v),
self.r * np.sin(v)
]), v_min=TAU / 2, v_max=TAU, u_min=TAU / 2, u_max=TAU, checkerboard_colors=[RED_D, RED_D],
resolution=(20, 20))
ball=VGroup(ball_1,ball_2)
#parabola = ParametricSurface(
#lambda u,v: np.array([
# u,
#v,
#7*u*v/np.exp((u**2 + v**2))]),u_max=2,u_min=-2,v_max=2,v_min=-2,
#checkerboard_colors=[PURPLE_D,PURPLE_E])
Random_Surface = ParametricSurface(
lambda u, v: np.array([
u,
v,
7*u*v/np.exp((u**2 + v**2))
]), v_min=-1.8*TAU, v_max=TAU, u_min=-1.8*TAU, u_max=TAU, checkerboard_colors=[PURPLE_D, PURPLE_E],
resolution=(40, 40))
ball.move_to([1/np.sqrt(2),1/np.sqrt(2),(7)/(2*np.exp(1))+self.r])
trajectory = ParametricFunction(
lambda da: np.array([
0,0,0
]), t_min=0, t_max=1, color=RED
)
group=VGroup(ball,trajectory)
def update(iteration,da):
new_it = self.they_hating(da)
iteration.become(new_it)
return iteration
self.set_camera_orientation(phi=50 * DEGREES, theta=50 * DEGREES, distance=8)
self.begin_ambient_camera_rotation(rate=0.1)
self.play(GrowFromCenter(ball),ShowCreation(Random_Surface))
self.wait(5)
self.play(UpdateFromAlphaFunc(group,update),run_time=self.t, rate_func=linear)
self.wait(5)
class Ending(Scene):
def construct(self):
title = Title("Laplace's Equation Solution Properties")
title.set_color(RED)
title.to_edge(UP)
PROPERTY1 = TextMobject("1.The Solution tolerates no local maxima or minima.\\\\If is does, then it will become poisson's solution.")
PROPERTY1.set_color(BLUE)
PROPERTY2 = TextMobject("2.It represent average around the point of interest.\\\\ i.e., It is a averaging function.")
PROPERTY2.set_color(BLUE)
PROPERTY1.next_to(title.get_corner(DOWN),DOWN)
note1 = TextMobject("The maxima and minima can only exist at the boundaries.")
note1.set_color(GOLD_B)
note1.scale(0.70)
note1.next_to(PROPERTY1.get_corner(DOWN),DOWN)
note2 = TextMobject("The first points is explained the second one will \\\\ be explaned seperately for different dimensions.")
note2.set_color(GOLD_B)
note2.next_to(PROPERTY2.get_corner(DOWN),DOWN)
note2.scale(0.70)
PROPERTY2.next_to(note1.get_corner(DOWN),DOWN)
self.play(Write(title))
self.wait(3)
self.play(Write(PROPERTY1),run_time=5)
self.wait()
self.play(Write(note1))
self.wait(3)
self.play(Write(PROPERTY2),run_time=4)
self.wait()
self.play(Write(note2))
self.wait(3)
self.remove(title,PROPERTY1,PROPERTY2,note1,note2)
next1 = TextMobject("In the next video we will see Laplacian in 1D.\\\\ See you next time.")
next1.set_color(GREEN)
self.play(Write(next1))
self.wait(2)
| StarcoderdataPython |
4967254 | <reponame>boeddeker/sphinxcontrib-matlabdomain
# -*- coding: utf-8 -*-
"""
sphinxcontrib.adadomain
~~~~~~~~~~~~~~~~~~~~~~~~~~
Ada domain.
:copyright: Copyright 2010 by <NAME>
:license: BSD, see LICENSE for details.
Some parts of the code copied from erlangdomain by <NAME>.
"""
import re
import string
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx import addnodes
from sphinx.roles import XRefRole
from sphinx.locale import l_, _
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, ObjType, Index
from sphinx.util.compat import Directive
from sphinx.util.nodes import make_refnode
from sphinx.util.docfields import Field, TypedField
# RE to split at word boundaries
wsplit_re = re.compile(r'(\W+)')
# REs for Ada function signatures
ada_func_sig_re = re.compile(
r'''^function\s+([\w.]*\.)?
(\w+)\s*
(\((.*)\))?\s*
return\s+(\w+)\s*
(is\s+abstract)?;$
''', re.VERBOSE)
ada_proc_sig_re = re.compile(
r'''^procedure\s+([\w.]*\.)?
(\w+)\s*
(\((.*)\))?\s*
(is\s+abstract)?\s*;$
''', re.VERBOSE)
ada_type_sig_re = re.compile(
r'''^type\s+(\w+)\s+is(.*);$''', re.VERBOSE)
ada_paramlist_re = re.compile(r'(;)')
class AdaObject(ObjectDescription):
"""
Description of an Ada language object.
"""
doc_field_types = [
TypedField('parameter', label=l_('Parameters'),
names=('param', 'parameter', 'arg', 'argument'),
typerolename='type', typenames=('type',)),
Field('returnvalue', label=l_('Returns'), has_arg=False,
names=('returns', 'return')),
Field('returntype', label=l_('Return type'), has_arg=False,
names=('rtype',)),
]
def needs_arglist(self):
return self.objtype == 'function' or self.objtype == 'procedure'
def _resolve_module_name(self, signode, modname, name):
env_modname = self.options.get(
'module', self.env.temp_data.get('ada:module', 'ada'))
if modname:
fullname = modname + name
signode['module'] = modname[:-1]
else:
fullname = env_modname + '.' + name
signode['module'] = env_modname
signode['fullname'] = fullname
name_prefix = signode['module'] + '.'
signode += addnodes.desc_addname(name_prefix, name_prefix)
signode += addnodes.desc_name(name, name)
return fullname
def _handle_function_signature(self, sig, signode):
m = ada_func_sig_re.match(sig)
if m is None:
print "m did not match the function"
raise ValueError
modname, name, dummy, arglist, returntype, abstract = m.groups()
fullname = self._resolve_module_name(signode, modname, name)
if not arglist:
if self.needs_arglist():
# for functions and procedures, add an empty parameter list
new_node = addnodes.desc_parameterlist()
new_node.child_text_separator = '; '
signode += new_node
if returntype:
signode += addnodes.desc_returns(returntype, returntype)
return fullname
signode += nodes.Text(' ')
new_node = addnodes.desc_parameterlist()
new_node.child_text_separator = '; '
signode += new_node
stack = [signode[-1]]
counters = [0, 0]
for token in string.split(arglist, ';'):
pieces = string.split(token, ':')
name = pieces[0].strip()
stack[-1] += addnodes.desc_parameter(name, name + " : " + pieces[1].strip())
if len(stack) == 1:
counters[0] += 1
else:
counters[1] += 1
if len(stack) != 1:
raise ValueError
if not counters[1]:
fullname = '%s/%d' % (fullname, counters[0])
else:
fullname = '%s/%d..%d' % (fullname, counters[0], sum(counters))
if returntype:
signode += addnodes.desc_returns(returntype,returntype)
return fullname
def _handle_procedure_signature(self, sig, signode):
m = ada_proc_sig_re.match(sig)
if m is None:
print "m did not match"
raise ValueError
modname, name, dummy, arglist, abstract = m.groups()
fullname = self._resolve_module_name(signode, modname, name)
if not arglist:
if self.needs_arglist():
# for functions and procedures, add an empty parameter list
newnode = addnodes.desc_parameterlist()
newnode.child_text_separator = '; '
signode += newnode
signode += nodes.Text(' ')
newnode = addnodes.desc_parameterlist()
newnode.child_text_separator = '; '
signode += newnode
stack = [signode[-1]]
counters = [0, 0]
for token in string.split(arglist, ';'):
pieces = string.split(token, ':')
name = pieces[0].strip()
stack[-1] += addnodes.desc_parameter(name, name + " : " + pieces[1].strip())
if len(stack) == 1:
counters[0] += 1
else:
counters[1] += 1
if len(stack) != 1:
raise ValueError
if not counters[1]:
fullname = '%s/%d' % (fullname, counters[0])
else:
fullname = '%s/%d..%d' % (fullname, counters[0], sum(counters))
return fullname
def _handle_type_signature(self, sig, signode):
m = ada_type_sig_re.match(sig)
if m is None:
print "m did not match"
raise ValueError
name, value = m.groups()
fullname = self._resolve_module_name(signode, '', name)
# signode += addnodes.desc_parameterlist()
# stack = [signode[-1]]
# signode += addnodes.desc_type(name, name + " is " + value)
signode += addnodes.desc_type(name, '')
return fullname
def handle_signature(self, sig, signode):
if sig.startswith('function'):
return self._handle_function_signature (sig, signode)
elif sig.startswith('type'):
return self._handle_type_signature (sig, signode)
else: # sig.startswith('procedure'):
return self._handle_procedure_signature (sig, signode)
def _get_index_text(self, name):
if self.objtype == 'function':
return _('%s (Ada function)') % name
elif self.objtype == 'procedure':
return _('%s (Ada procedure)') % name
elif self.objtype == 'type':
return _('%s (Ada type)') % name
else:
return ''
def add_target_and_index(self, name, sig, signode):
pieces = string.split(name, '.')
if name not in self.state.document.ids:
signode['names'].append(name)
signode['ids'].append(name)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
if self.objtype =='function':
finv = self.env.domaindata['ada']['functions']
fname, arity = name.split('/')
if '..' in arity:
first, last = map(int, arity.split('..'))
else:
first = last = int(arity)
for arity_index in range(first, last+1):
if fname in finv and arity_index in finv[fname]:
self.env.warn(
self.env.docname,
('duplicate Ada function description'
'of %s, ') % name +
'other instance in ' +
self.env.doc2path(finv[fname][arity_index][0]),
self.lineno)
arities = finv.setdefault(fname, {})
arities[arity_index] = (self.env.docname, name)
if self.objtype == 'procedure':
finv = self.env.domaindata['ada']['procedures']
fname, arity = name.split('/')
if '..' in arity:
first, last = map(int, arity.split('..'))
else:
first = last = int(arity)
for arity_index in range(first, last+1):
if fname in finv and arity_index in finv[fname]:
self.env.warn(
self.env.docname,
('duplicate Ada procedure description'
'of %s, ') % name +
'other instance in ' +
self.env.doc2path(finv[fname][arity_index][0]),
self.lineno)
arities = finv.setdefault(fname, {})
arities[arity_index] = (self.env.docname, name)
else:
oinv = self.env.domaindata['ada']['objects']
if name in oinv:
self.env.warn(
self.env.docname,
'duplicate Ada object description of %s, ' % name +
'other instance in ' + self.env.doc2path(oinv[name][0]),
self.lineno)
oinv[name] = (self.env.docname, self.objtype)
indextext = self._get_index_text(name)
if indextext:
self.indexnode['entries'].append(('single', indextext, name, name))
plain_name = pieces[-1]
indextext = self._get_index_text(plain_name)
if indextext:
self.indexnode['entries'].append(('single', indextext, name, plain_name))
class AdaModule(Directive):
"""
Directive to mark description of a new module.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'platform': lambda x: x,
'synopsis': lambda x: x,
'noindex': directives.flag,
'deprecated': directives.flag,
}
def run(self):
env = self.state.document.settings.env
modname = self.arguments[0].strip()
noindex = 'noindex' in self.options
env.temp_data['ada:module'] = modname
env.domaindata['ada']['modules'][modname] = \
(env.docname, self.options.get('synopsis', ''),
self.options.get('platform', ''), 'deprecated' in self.options)
targetnode = nodes.target('', '', ids=['module-' + modname], ismod=True)
self.state.document.note_explicit_target(targetnode)
ret = [targetnode]
# XXX this behavior of the module directive is a mess...
if 'platform' in self.options:
platform = self.options['platform']
node = nodes.paragraph()
node += nodes.emphasis('', _('Platforms: '))
node += nodes.Text(platform, platform)
ret.append(node)
# the synopsis isn't printed; in fact, it is only used in the
# modindex currently
if not noindex:
indextext = _('%s (module)') % modname
inode = addnodes.index(entries=[('single', indextext,
'module-' + modname, modname)])
ret.append(inode)
return ret
class AdaCurrentModule(Directive):
"""
This directive is just to tell Sphinx that we're documenting
stuff in module foo, but links to module foo won't lead here.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {}
def run(self):
env = self.state.document.settings.env
modname = self.arguments[0].strip()
if modname == 'None':
env.temp_data['ada:module'] = None
else:
env.temp_data['ada:module'] = modname
return []
class AdaXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
refnode['ada:module'] = env.temp_data.get('ada:module')
if not has_explicit_title:
title = title.lstrip(':') # only has a meaning for the target
target = target.lstrip('~') # only has a meaning for the title
# if the first character is a tilde, don't display the module/class
# parts of the contents
if title[0:1] == '~':
title = title[1:]
colon = title.rfind(':')
if colon != -1:
title = title[colon+1:]
return title, target
class AdaModuleIndex(Index):
"""
Index subclass to provide the Ada module index.
"""
name = 'modindex'
localname = l_('Ada Module Index')
shortname = l_('Ada modules')
def generate(self, docnames=None):
content = {}
# list of prefixes to ignore
ignores = self.domain.env.config['modindex_common_prefix']
ignores = sorted(ignores, key=len, reverse=True)
# list of all modules, sorted by module name
modules = sorted(self.domain.data['modules'].iteritems(),
key=lambda x: x[0].lower())
# sort out collapsable modules
prev_modname = ''
num_toplevels = 0
for modname, (docname, synopsis, platforms, deprecated) in modules:
if docnames and docname not in docnames:
continue
for ignore in ignores:
if modname.startswith(ignore):
modname = modname[len(ignore):]
stripped = ignore
break
else:
stripped = ''
# we stripped the whole module name?
if not modname:
modname, stripped = stripped, ''
entries = content.setdefault(modname[0].lower(), [])
package = modname.split(':')[0]
if package != modname:
# it's a submodule
if prev_modname == package:
# first submodule - make parent a group head
entries[-1][1] = 1
elif not prev_modname.startswith(package):
# submodule without parent in list, add dummy entry
entries.append([stripped + package, 1, '', '', '', '', ''])
subtype = 2
else:
num_toplevels += 1
subtype = 0
qualifier = deprecated and _('Deprecated') or ''
entries.append([stripped + modname, subtype, docname,
'module-' + stripped + modname, platforms,
qualifier, synopsis])
prev_modname = modname
# apply heuristics when to collapse modindex at page load:
# only collapse if number of toplevel modules is larger than
# number of submodules
collapse = len(modules) - num_toplevels < num_toplevels
# sort by first letter
content = sorted(content.iteritems())
return content, collapse
class AdaDomain(Domain):
"""Ada language domain."""
name = 'ada'
label = 'Ada'
object_types = {
'function': ObjType(l_('function'), 'func'),
'procedure': ObjType(l_('procedure'), 'proc'),
'type': ObjType(l_('type'), 'type'),
'module': ObjType(l_('module'), 'mod'),
}
directives = {
'function': AdaObject,
'procedure': AdaObject,
'type': AdaObject,
'module': AdaModule,
'currentmodule': AdaCurrentModule,
}
roles = {
'func': AdaXRefRole(),
'proc': AdaXRefRole(),
'type': AdaXRefRole(),
'mod': AdaXRefRole(),
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
'functions' : {}, # fullname -> arity -> (targetname, docname)
'procedures' : {}, # fullname -> arity -> (targetname, docname)
'modules': {}, # modname -> docname, synopsis, platform, deprecated
}
indices = [
AdaModuleIndex,
]
def clear_doc(self, docname):
for fullname, (fn, _) in self.data['objects'].items():
if fn == docname:
del self.data['objects'][fullname]
for modname, (fn, _, _, _) in self.data['modules'].items():
if fn == docname:
del self.data['modules'][modname]
for fullname, funcs in self.data['functions'].items():
for arity, (fn, _) in funcs.items():
if fn == docname:
del self.data['functions'][fullname][arity]
if not self.data['functions'][fullname]:
del self.data['functions'][fullname]
for fullname, funcs in self.data['procedures'].items():
for arity, (fn, _) in funcs.items():
if fn == docname:
del self.data['procedures'][fullname][arity]
if not self.data['procedures'][fullname]:
del self.data['procedures'][fullname]
def _find_obj(self, env, modname, name, objtype, searchorder=0):
"""
Find a Ada object for "name", perhaps using the given module and/or
classname.
"""
if not name:
return None, None
if ":" not in name:
name = "%s:%s" % (modname, name)
if name in self.data['objects']:
return name, self.data['objects'][name][0]
if '/' in name:
fname, arity = name.split('/')
arity = int(arity)
else:
fname = name
arity = -1
if fname in self.data['functions']:
arities = self.data['functions'][fname]
elif fname in self.data['procedures']:
arities = self.data['procedures'][fname]
else:
return None, None
if arity == -1:
arity = min(arities)
if arity in arities:
docname, targetname = arities[arity]
return targetname, docname
return None, None
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
if typ == 'mod' and target in self.data['modules']:
docname, synopsis, platform, deprecated = \
self.data['modules'].get(target, ('','','', ''))
if not docname:
return None
else:
title = '%s%s%s' % ((platform and '(%s) ' % platform),
synopsis,
(deprecated and ' (deprecated)' or ''))
return make_refnode(builder, fromdocname, docname,
'module-' + target, contnode, title)
else:
modname = node.get('ada:module')
searchorder = node.hasattr('refspecific') and 1 or 0
name, obj = self._find_obj(env, modname, target, typ, searchorder)
if not obj:
return None
else:
return make_refnode(builder, fromdocname, obj, name,
contnode, name)
def get_objects(self):
for refname, (docname, type) in self.data['objects'].iteritems():
yield (refname, refname, type, docname, refname, 1)
def setup(app):
app.add_domain(AdaDomain)
| StarcoderdataPython |
3246038 | # Auto generated by 'inv collect-airflow'
from airfly._vendor.airflow.models.baseoperator import BaseOperator
class OracleToOracleOperator(BaseOperator):
oracle_destination_conn_id: "str"
destination_table: "str"
oracle_source_conn_id: "str"
source_sql: "str"
source_sql_params: "typing.Union[dict, NoneType]"
rows_chunk: "int"
| StarcoderdataPython |
11238149 | <reponame>Meso272/PyTorch-VAE<filename>do_cesm768_sz.py
import os
import numpy as np
import sys
fieldname=sys.argv[1]
datafolder="/home/jliu447/lossycompression/cesm-768"
ebs=[i*1e-4 for i in range(1,10)]+[i*1e-3 for i in range(1,10)]+[i*1e-2 for i in range(1,11)]
cr=np.zeros((29,17),dtype=np.float32)
psnr=np.zeros((29,17),dtype=np.float32)
for i,eb in enumerate(ebs):
cr[i+1][0]=eb
psnr[i+1][0]=eb
for j in range(0,16):
cr[0][j+1]=j
psnr[0][j+1]=j
filename="%s_%d.dat" % (fieldname,j)
filepath=os.path.join(datafolder,filename)
comm="sz -z -f -i %s -M REL -R %f -2 1152 768" % (filepath,eb)
os.system(comm)
szpath=filepath+".sz"
comm="sz -x -f -i %s -s %s -2 1152 768 -a>temp_sz_cesm_768.txt" % (filepath,szpath)
os.system(comm)
with open("temp_sz_cesm_768.txt","r") as f:
lines=f.read().splitlines()
p=eval(lines[4].split(',')[0].split('=')[1])
r=eval(lines[7].split('=')[1])
cr[i+1][j+1]=r
psnr[i+1][j+1]=p
comm="rm -f %s" % szpath
os.system(comm)
comm="rm -f %s" % szpath+".out"
os.system(comm)
os.system("rm -f temp_sz_cesm_768.txt")
np.savetxt("768-sz_%s_cr.txt" % fieldname,cr,delimiter='\t')
np.savetxt("768-sz_%s_psnr.txt" % fieldname,psnr,delimiter='\t') | StarcoderdataPython |
6619851 | <filename>manager/views/employee.py
from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from manager import models as hmod
from account import models as amod
from . import templater
from base_app.user_util import user_check, my_account
from django.contrib.auth.decorators import login_required, user_passes_test
@login_required
@my_account
def process_request(request):
'''Shows the stores'''
if request.urlparams[1] == 'delete':
u = amod.User.objects.get(id=request.urlparams[0])
u.is_active = False
u.save()
return HttpResponseRedirect('/index/')
e = ''
u = ''
try:
u = amod.User.objects.get(id=request.urlparams[0])
except:
return HttpResponseRedirect('/index/')
try:
e = amod.Employee.objects.get(user=u)
except:
return HttpResponseRedirect('/index/')
user = u
if u.is_active == False:
return Http404()
form = UserForm(initial={
'username' : u.username,
'first_name' : u.first_name,
'last_name' : u.last_name,
'email' : u.email,
'phone' : u.phone,
# 'security_question' : u.security_question,
# 'security_answer' : u.security_answer,
'is_staff' : u.is_staff,
'street1' : u.street1,
'street2' : u.street2,
'city' : u.city,
'state' : u.state,
'zipCode' : u.zipCode,
'hireDate':e.hireDate,
'salary':e.salary,
})
if request.method == 'POST':
form = UserForm(request.POST)
if form.is_valid():
#time to save the data
u.username = form.cleaned_data['username']
# u.set_password(form.cleaned_data['password'])
u.first_name = form.cleaned_data['first_name']
u.last_name = form.cleaned_data['last_name']
u.email = form.cleaned_data['email']
u.phone = form.cleaned_data['phone']
# u.security_question = form.cleaned_data['security_question']
# u.security_answer = form.cleaned_data['security_answer']
u.is_staff = form.cleaned_data['is_staff']
u.street1 = form.cleaned_data['street1']
u.street2 = form.cleaned_data['street2']
u.city = form.cleaned_data['city']
u.state = form.cleaned_data['state']
u.zipCode = form.cleaned_data['zipCode']
u.save()
e.hireDate = form.cleaned_data['hireDate']
e.salary = form.cleaned_data['salary']
e.save()
passwordForm = UserPasswordForm()
if request.method == 'POST':
passwordForm = UserPasswordForm(request.POST)
if passwordForm.is_valid():
#time to save the data
if u.check_password(passwordForm.cleaned_data['password']):
if passwordForm.cleaned_data['newpassword1'] == passwordForm.cleaned_data['newpassword2']:
u.set_password(passwordForm.cleaned_data['<PASSWORD>'])
u.save()
tvars = {
'user':user,
'form':form,
'passwordForm':passwordForm,
}
return templater.render_to_response(request, 'employee.html', tvars)
class UserForm(forms.Form):
username = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Username',}))
first_name = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': '<NAME>',}))
last_name = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': '<NAME>',}))
email = forms.CharField(max_length=50, widget=forms.EmailInput(attrs={'class': 'form-control', 'placeholder': '<EMAIL>',}))
phone = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': '801-555-1234',}))
# security_question = forms.CharField(label='Security Question', widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'What is your mother\'s maiden name?',}))
# security_answer = forms.CharField(label='Answer', widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Smith',}))
is_staff = forms.BooleanField(label='Employee?', widget=forms.CheckboxInput(), required=False, )
street1 = forms.CharField(label = "Street 1", widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': '123 Center St.',}))
street2 = forms.CharField(label = "Street 2", required = False, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': '#242',}))
city = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Provo',}))
state = forms.CharField(max_length=2, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'UT',}))
zipCode = forms.IntegerField(widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': '84601',}))
hireDate = forms.DateField(label='Hire Date', widget=forms.DateInput(attrs={'class': 'form-control', 'placeholder': 'Hire Date',}))
salary= forms.DecimalField(widget=forms.NumberInput(attrs={'class':'form-control', 'placeholder': 'Salary',}))
class UserPasswordForm(forms.Form):
password = forms.CharField(label='Current Password', max_length=25, widget=forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Password',}))
newpassword1 = forms.CharField(label='New Password',max_length=25, widget=forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Password',}))
newpassword2 = forms.CharField(label='Repeat Password',max_length=25, widget=forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Password',}))
def clean(self):
if (self.cleaned_data.get('newpassword1') !=
self.cleaned_data.get('newpassword2')):
raise forms.ValidationError("New password does not match.")
return self.cleaned_data | StarcoderdataPython |
9787314 |
urls = {}
def check_url(url):
if urls.get(url):
return f"{urls[url]} - cached"
else:
data = f"some data obtained from url {url}"
urls[url] = data
return data
def main():
pages = ["www.google.com", "www.elsitio.com", "www.twitter.com", "www.google.com", "www.elsitio.com"]
for page in pages:
data = check_url(page)
print(data)
if __name__ == "__main__":
main() | StarcoderdataPython |
265616 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
extraction of information using MIT-LL MITIE
"""
import re
import sys, os
import extractor
# import MITIE
if 'MITIE_HOME' not in os.environ:
raise ValueError('MITIE_HOME not set.')
MITIE_HOME = os.environ['MITIE_HOME']
sys.path.append(MITIE_HOME+'/mitielib')
from mitie import *
from collections import defaultdict
from bs4 import BeautifulSoup
class ExtractInfo(extractor.Extractor):
def human_name(self):
return "mitieinfo"
def name(self):
return "info"
def version(self):
return "0.0"
myName = "info"
def __init__(self):
self.ner = named_entity_extractor(MITIE_HOME+'/MITIE-models/english/ner_model.dat')
# self.myCompiledRE = re.compile('([a-z,A-Z,0-9,\.,_,\-]+@[a-z,A-Z,0-9,_,\-]+\.[a-z,A-Z,0-9,_,\-,\.]+)')
# This lets us keep B@nGiN
#self.myCompiledRE = re.compile('([a-z,A-Z,0-9,\.,_,\-]+@[a-z,A-Z,0-9,_,\-]+[a-z,A-Z,0-9,_,\-,\.]+)')
# legacy
#^([a-zA-Z0-9_\-\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)$
def extract(self, url, status, headers, flags, body, timestamp, source):
soup = BeautifulSoup(body)
tokens = tokenize(soup.get_text().encode('ascii', 'ignore'))
entities = self.ner.extract_entities(tokens)
#print entities
ents = {}
for e in entities:
range = e[0]
tag = e[1]
entity_text = " ".join(tokens[i] for i in range)
txt = tag + ' -> ' + entity_text
if ents.get(txt) == None:
ents[txt] = 0
ents[txt] += 1
ent_array = ents.keys()
ent_set = list(set(ent_array))
feature_list = map(lambda x: self.create_attribute(x),ent_set)
return feature_list | StarcoderdataPython |
6690337 | <filename>froide/foirequest/tasks.py
import os
from django.conf import settings
from django.utils import translation
from django.db import transaction
from django.core.files import File
from froide.celery import app as celery_app
from froide.publicbody.models import PublicBody
from froide.helper.document import convert_to_pdf
from .models import FoiRequest, FoiMessage, FoiAttachment, FoiProject
from .foi_mail import _process_mail, _fetch_mail
@celery_app.task(acks_late=True, time_limit=60)
def process_mail(*args, **kwargs):
translation.activate(settings.LANGUAGE_CODE)
with transaction.atomic():
_process_mail(*args, **kwargs)
@celery_app.task(expires=60)
def fetch_mail():
for rfc_data in _fetch_mail():
process_mail.delay(rfc_data)
@celery_app.task
def detect_overdue():
translation.activate(settings.LANGUAGE_CODE)
for foirequest in FoiRequest.objects.get_to_be_overdue():
foirequest.set_overdue()
@celery_app.task
def detect_asleep():
translation.activate(settings.LANGUAGE_CODE)
for foirequest in FoiRequest.objects.get_to_be_asleep():
foirequest.set_asleep()
@celery_app.task
def classification_reminder():
translation.activate(settings.LANGUAGE_CODE)
for foirequest in FoiRequest.objects.get_unclassified():
foirequest.send_classification_reminder()
@celery_app.task
def count_same_foirequests(instance_id):
translation.activate(settings.LANGUAGE_CODE)
try:
count = FoiRequest.objects.filter(same_as_id=instance_id).count()
FoiRequest.objects.filter(id=instance_id).update(same_as_count=count)
except FoiRequest.DoesNotExist:
pass
@celery_app.task
def check_delivery_status(message_id, count=None):
try:
message = FoiMessage.objects.get(id=message_id)
except FoiMessage.DoesNotExist:
return
message.check_delivery_status(count=count)
@celery_app.task
def create_project_requests(project_id, publicbody_ids):
for seq, pb_id in enumerate(publicbody_ids):
create_project_request.delay(project_id, pb_id, sequence=seq)
@celery_app.task
def create_project_request(project_id, publicbody_id, sequence=0):
from .services import CreateRequestFromProjectService
try:
project = FoiProject.objects.get(id=project_id)
except FoiProject.DoesNotExist:
# project does not exist anymore?
return
try:
pb = PublicBody.objects.get(id=publicbody_id)
except PublicBody.DoesNotExist:
# pb was deleted?
return
service = CreateRequestFromProjectService({
'project': project,
'publicbody': pb,
'subject': project.title,
'user': project.user,
'body': project.description,
'public': project.public,
'reference': project.reference,
'tags': [t.name for t in project.tags.all()],
'project_order': sequence
})
foirequest = service.execute()
if project.request_count == project.foirequest_set.all().count():
project.status = FoiProject.STATUS_READY
project.save()
return foirequest.pk
@celery_app.task(time_limit=60)
def convert_attachment_task(instance_id):
try:
att = FoiAttachment.objects.get(pk=instance_id)
except FoiAttachment.DoesNotExist:
return
return convert_attachment(att)
def convert_attachment(att):
result_file = convert_to_pdf(
att.file.path,
binary_name=settings.FROIDE_CONFIG.get(
'doc_conversion_binary'
),
construct_call=settings.FROIDE_CONFIG.get(
'doc_conversion_call_func'
)
)
if result_file is None:
return
path, filename = os.path.split(result_file)
if att.converted:
new_att = att.converted
else:
if FoiAttachment.objects.filter(
belongs_to=att.belongs_to,
name=filename).exists():
name, extension = filename.rsplit('.', 1)
filename = '%s_converted.%s' % (name, extension)
new_att = FoiAttachment(
belongs_to=att.belongs_to,
approved=False,
filetype='application/pdf',
is_converted=True
)
new_att.name = filename
with open(result_file, 'rb') as f:
new_file = File(f)
new_att.size = new_file.size
new_att.file.save(filename, new_file)
new_att.save()
att.converted = new_att
att.save()
| StarcoderdataPython |
1887227 | <filename>tika-example.py<gh_stars>1-10
import tika
from tika import parser
parsed = parser.from_file('temp.wav')
print(parsed["metadata"])
print(parsed["content"])
exit() | StarcoderdataPython |
5012993 | <reponame>duliodenis/mit-cs-courses
#
# MIT 6.01 (Week 1)
# Design Lab 1. Problem 1.3.1
#
# Created by <NAME> on 1/4/15.
# Copyright (c) 2015 ddApps. All rights reserved.
# ------------------------------------------------
# http://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-01sc-introduction-to-electrical-engineering-and-computer-science-i-spring-2011/unit-1-software-engineering/object-oriented-programming/MIT6_01SCS11_designLab01.pdf
#
# Objective: Write the definition of a Python procedure fib, such that fib(n) returns
# the nth Fibonacci number.
def fib(n):
if n == 0:
return 0
if n == 1:
return 1
else:
return fib(n-1) + fib(n-2)
# Test the Fibonacci procedure
firstTest = 6 # fib(6) -> 8
secondTest = 7 # fib(7) -> 13
thirdTest = 8 # fib(8) -> 21
firstResult = fib(firstTest)
secondResult = fib(secondTest)
thirdResult = fib(thirdTest)
print "fib1(%r) = %r" % (firstTest, firstResult)
print "fib2(%r) = %r" % (secondTest, secondResult)
print "fib3(%r) = %r" % (thirdTest, thirdResult) | StarcoderdataPython |
6649896 | <reponame>pmorerio/deep-learning-tutorials<gh_stars>0
__author__ = 'm.bashari'
import numpy as np
from sklearn import datasets, linear_model
import matplotlib.pyplot as plt
class Config:
nn_input_dim = 2 # input layer dimensionality
nn_output_dim = 2 # output layer dimensionality
# Gradient descent parameters (I picked these by hand)
epsilon = 0.01 # learning rate for gradient descent
reg_lambda = 0.01 # regularization strength
def generate_data():
np.random.seed(0)
X, y = datasets.make_moons(200, noise=0.20)
return X, y
def visualize(X, y, model):
# plt.scatter(X[:, 0], X[:, 1], s=40, c=y, cmap=plt.cm.Spectral)
# plt.show()
plot_decision_boundary(lambda x:predict(model,x), X, y)
plt.title("Logistic Regression")
def plot_decision_boundary(pred_func, X, y):
# Set min and max values and give it some padding
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole gid
Z = pred_func(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)
plt.show()
# Helper function to evaluate the total loss on the dataset
def calculate_loss(model, X, y):
num_examples = len(X) # training set size
W1, b1, W2, b2, W3, b3 = model['W1'], model['b1'], model['W2'], model['b2'], model['W3'], model['b3']
# Forward propagation to calculate our predictions
z1 = X.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
a2 = np.tanh(z2)
z3 = a2.dot(W3) + b3
exp_scores = np.exp(z3)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
# Calculating the loss
corect_logprobs = -np.log(probs[range(num_examples), y])
data_loss = np.sum(corect_logprobs)
# Add regulatization term to loss (optional)
data_loss += Config.reg_lambda / 2 * (np.sum(np.square(W1)) + np.sum(np.square(W2)) + np.sum(np.square(W3)))
return 1. / num_examples * data_loss
def predict(model, x):
W1, b1, W2, b2, W3, b3 = model['W1'], model['b1'], model['W2'], model['b2'], model['W3'], model['b3']
# Forward propagation
z1 = x.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
a2 = np.tanh(z2)
z3 = a2.dot(W3) + b3
exp_scores = np.exp(z3)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
return np.argmax(probs, axis=1)
# This function learns parameters for the neural network and returns the model.
# - nn_hdim1: Number of nodes in the first hidden layer
# - nn_hdim2: Number of nodes in the second hidden layer
# - num_passes: Number of passes through the training data for gradient descent
# - print_loss: If True, print the loss every 1000 iterations
def build_model(X, y, nn_hdim1, nn_hdim2, num_passes=20000, print_loss=False):
# Initialize the parameters to random values. We need to learn these.
num_examples = len(X)
np.random.seed(0)
W1 = np.random.randn(Config.nn_input_dim, nn_hdim1) / np.sqrt(Config.nn_input_dim)
b1 = np.zeros((1, nn_hdim1))
W2 = np.random.randn(nn_hdim1, nn_hdim2) / np.sqrt(nn_hdim1)
b2 = np.zeros((1, nn_hdim2))
W3 = np.random.randn(nn_hdim2, Config.nn_output_dim) / np.sqrt(nn_hdim2)
b3 = np.zeros((1, Config.nn_output_dim))
# This is what we return at the end
model = {}
# Gradient descent. For each batch...
for i in range(0, num_passes):
# Forward propagation
z1 = X.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
a2 = np.tanh(z2)
z3 = a2.dot(W3) + b3
exp_scores = np.exp(z3)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
# Backpropagation
delta3 = probs
delta3[range(num_examples), y] -= 1
delta2 = delta3.dot(W3.T) * (1 - np.power(a2, 2))
delta1 = delta2.dot(W2.T) * (1 - np.power(a1, 2))
dW3 = (a2.T).dot(delta3)
db3 = np.sum(delta3, axis=0, keepdims=True)
dW2 = np.dot(a1.T, delta2)
db2 = np.sum(delta2, axis=0)
dW1 = np.dot(X.T, delta1)
db1 = np.sum(delta1, axis=0)
# Add regularization terms (b1 and b2 don't have regularization terms)
dW2 += Config.reg_lambda * W2
dW1 += Config.reg_lambda * W1
dW3 += Config.reg_lambda * W3
# Gradient descent parameter update
W1 += -Config.epsilon * dW1
b1 += -Config.epsilon * db1
W2 += -Config.epsilon * dW2
b2 += -Config.epsilon * db2
W3 += -Config.epsilon * dW3
b3 += -Config.epsilon * db3
# Assign new parameters to the model
model = {'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2, 'W3': W3, 'b3': b3}
# Optionally print the loss.
# This is expensive because it uses the whole dataset, so we don't want to do it too often.
if print_loss and i % 1000 == 0:
print("Loss after iteration %i: %f" % (i, calculate_loss(model, X, y)))
return model
def classify(X, y):
# clf = linear_model.LogisticRegressionCV()
# clf.fit(X, y)
# return clf
pass
def main():
X, y = generate_data()
model = build_model(X, y, 8, 8, print_loss=True)
visualize(X, y, model)
if __name__ == "__main__":
main()
| StarcoderdataPython |
397767 | import os
import time
from PySide2.QtWidgets import QMainWindow, QFileDialog, QMessageBox
from PySide2.QtGui import QDoubleValidator, QIntValidator
from PySide2 import QtCore
from genexpa.uis.uis.MainWindowUI import Ui_MainWindow
from genexpa.uis.WaitPopup import WaitPopup
from genexpa.uis.TableDisplayDialog import TableDisplayDialog
from genexpa.core.actions import Actions
from genexpa.core.system import System
from genexpa.core import helpers, explib
class CoherenceScoreModel(QtCore.QAbstractListModel):
def __init__(self):
super().__init__()
self.coherence_list = [] # gene, value
self.coherence_df = None
def data(self, index, role):
if role == QtCore.Qt.DisplayRole:
gene, value = self.coherence_list[index.row()]
gene_str = "{:<14} {:.2f}".format(gene, value)
return gene_str
def rowCount(self, index):
return len(self.coherence_list)
def clearModel(self):
if len(self.coherence_list) == 0:
return None
self.beginRemoveRows(QtCore.QModelIndex(), 0, len(self.coherence_list)-1)
self.coherence_list.clear()
self.coherence_df = None
self.endRemoveRows()
def setCoherence(self, df):
if self.coherence_df is df:
return
self.clearModel()
self.coherence_df = df
genes = self.coherence_df.columns.get_level_values(0).unique()
self.beginInsertRows(QtCore.QModelIndex(), 0, len(genes))
values_sum = 0.0
for gene in genes:
value = df[gene].values.mean()
self.coherence_list.append([
gene,
value
])
values_sum += value
mean = values_sum/len(genes)
self.coherence_list.append(["Average", mean])
self.endInsertRows()
class MainWindowApp(QMainWindow):
def __init__(self, app):
super().__init__()
self.actions = Actions()
self.system = System(self, app, self.actions)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.coherence_score_model = CoherenceScoreModel()
self.ui.listCoherence.setModel(self.coherence_score_model)
self.ui.listCoherence.setSelectionRectVisible(False)
self.remove_validator = QIntValidator(0,100)
self.ui.lineEditRemoveRep.setValidator(self.remove_validator)
#self.ui.lineEditSampleRepetition.setValidator(self.remove_validator)
self.ui.lineEditConfidence.setText(str(self.system.parameters_data.confidence))
self.ui.lineEditRemoveRep.setText(str(self.system.parameters_data.remove_rep))
#self.ui.lineEditSampleRepetition.setText(str(self.system.parameters_data.sample_rep))
self.ui.checkSelectBest.setChecked(self.system.parameters_data.select_best)
self.ui.comboBoxNormalizationAlgorithm.setModel(self.system.parameters_data.normalization_algorithms_model)
self.ui.comboBoxStatModel.setModel(self.system.parameters_data.stat_algorithms_model)
self.input_data = None
self.init_connections()
self.update_gui()
def init_connections(self):
self.ui.actionRead_from_combined_input.triggered.connect(self.actions.read_from_combined_input)
self.ui.actionLoad_Quantified.triggered.connect(self.actions.load_quantified)
self.ui.actionActivate_license.triggered.connect(self.actions.open_licence_popup)
self.system.reference_genes_data.changed.connect(self.reference_genes_data_changed)
self.system.target_genes_data.changed.connect(self.target_genes_data_changed)
self.system.line_models_data.changed.connect(self.line_models_data_changed)
self.system.quantitive_data.changed.connect(self.update_gui)
self.system.results_data.changed.connect(self.update_gui)
self.ui.buttonShowRef.clicked.connect(self.show_ref_table)
self.ui.buttonShowTarget.clicked.connect(self.show_target_table)
self.ui.buttonShowModels.clicked.connect(self.show_model_list)
self.ui.buttonShowQuantitive.clicked.connect(self.show_quantitivie)
self.ui.buttonRemoveQuantitive.clicked.connect(self.system.quantitive_data.clear)
self.ui.buttonRunCalculations.clicked.connect(self.run_calculations)
self.ui.lineEditConfidence.textChanged.connect(
lambda x: self.parameters_changed(x, "confidence"))
self.ui.lineEditRemoveRep.textChanged.connect(
lambda x:self.parameters_changed(x, "remove_rep"))
"""
self.ui.lineEditSampleRepetition.textChanged.connect(
lambda x: self.parameters_changed(x, "sample_rep"))
"""
self.ui.checkSelectBest.stateChanged.connect(
lambda x: self.parameters_changed(x, "select_best"))
self.ui.buttonShowBestRef.clicked.connect(self.show_best_ref)
self.ui.buttonShowRQ.clicked.connect(self.show_RQ)
self.ui.buttonShowStatistic.clicked.connect(self.show_statistic)
#self.ui.buttonShowCoherence.clicked.connect(self.show_coherence_score)
self.ui.buttonExportResults.clicked.connect(self.export_results)
self.ui.buttonExportGraphs.clicked.connect(self.export_graphs)
def parameters_changed(self, value, parameter_str):
self.system.parameters_data.update(value, parameter_str)
self.update_gui()
def reference_genes_data_changed(self):
self.ui.labelRefCnt.setText(str(self.system.reference_genes_data.get_genes_count()))
self.update_gui()
def target_genes_data_changed(self):
self.ui.labelTargetCnt.setText(str(self.system.target_genes_data.get_genes_count()))
self.update_gui()
def line_models_data_changed(self):
self.ui.labelModelsCnt.setText(str(self.system.line_models_data.get_models_cout()))
self.update_gui()
def show_ref_table(self):
self.ref_table = TableDisplayDialog(self.system.mainWindowApp,
self.system.reference_genes_data.df,
"Reference genes data")
self.ref_table.show()
def show_target_table(self):
self.target_table = TableDisplayDialog(self.system.mainWindowApp,
self.system.target_genes_data.df,
"Target genes data")
self.target_table.show()
def show_model_list(self):
self.models_table =TableDisplayDialog(self.system.mainWindowApp,
self.system.line_models_data.as_df(),
"Lines models data",
[0])
self.models_table.show()
def show_quantitivie(self):
self.quantitive_table = TableDisplayDialog(self.system.mainWindowApp,
self.system.quantitive_data.df,
"Quantitive gene data")
self.quantitive_table.show()
def show_best_ref(self):
self.best_table = TableDisplayDialog(self.system.mainWindowApp,
self.system.results_data.df_best_ref,
"Best reference gene/pair results data",
[0]
)
self.best_table.show()
def show_RQ(self):
self.RQ_table = TableDisplayDialog(self.system.mainWindowApp,
self.system.results_data.df_RQ,
"RQ results data"
)
self.RQ_table.show()
def show_statistic(self):
self.stat_table = TableDisplayDialog(self.system.mainWindowApp,
self.system.results_data.df_stat,
"Statistic results data"
)
self.stat_table.show()
def show_coherence_score(self):
self.coherence_table = TableDisplayDialog(self.system.mainWindowApp,
self.system.results_data.df_coherence.T,
"Coherence score results data",
str_col = list(range(len(self.system.results_data.df_coherence.columns)))
)
self.coherence_table.show()
def run_calculations(self):
if not (self.system.input_valid() and self.system.parameters_data.is_valid()):
return None
df_ref = explib.append_group_index(self.system.reference_genes_data.df)
df_target = self.system.target_genes_data.df
df_ref_norm = self.system.quantitive_data.df
models = self.system.line_models_data.models
remove_rep = self.system.parameters_data.remove_rep
sample_rep = self.system.parameters_data.sample_rep
confidence = self.system.parameters_data.confidence
stat_mode = self.ui.comboBoxStatModel.currentIndex()
ind = self.ui.comboBoxNormalizationAlgorithm.currentIndex()
norm_alog = self.system.parameters_data.normalization_algorithms[ind]
apply_remove_mask = self.ui.checkSelectBest.isChecked()
wp = WaitPopup(self.system)
try:
wp.start()
res = explib.full_analyze(df_ref, df_target, models, remove_rep, sample_rep, stat_mode, confidence, df_ref_norm, norm_alog)
except:
import traceback
print(traceback.format_exc())
res = None
helpers.popup("Run ERROR", "Error occured during analyze step,\npleas double check you input data", "critical")
wp.calculation_finished()
if res is not None:
self.system.results_data.set_results(res, apply_remove_mask)
else:
self.system.results_data.clear()
def export_results(self):
export_dfs = self.system.results_data.get_all_df()
if export_dfs is None:
return None
filename, filter = QFileDialog.getSaveFileName(
parent=self, caption='Select result file', dir='.',
filter='Microsoft Excel 2007-2013 XML (*.xlsx)')
if filename == "":
return None
if filename.split(".")[-1] != "xlsx":
filename = filename + ".xlsx"
explib.full_excel_export(filename, export_dfs)
def export_graphs(self):
export_dfs = self.system.results_data.get_all_df()
if export_dfs is None:
return
dir_name = QFileDialog.getExistingDirectory(
parent=self.system.mainWindowApp,
caption='Select directory to save generated graphs',
dir='.')
if dir_name == "":
return None
RQ_df = export_dfs[2]
stat_df = export_dfs[3]
wp = WaitPopup(self.system)
wp.start()
explib.create_boxplots(RQ_df, stat_df, dir_name)
try:
pass
except Exception as e:
helpers.popup("Graph export ERROR", "Error occured during graph export", 'critical')
print(e)
wp.calculation_finished()
def update_gui(self):
input_valid = self.system.input_valid()
if input_valid:
self.ui.buttonShowRef.setEnabled(True)
self.ui.buttonShowTarget.setEnabled(True)
self.ui.buttonShowModels.setEnabled(True)
else:
self.ui.buttonShowRef.setEnabled(False)
self.ui.buttonShowTarget.setEnabled(False)
self.ui.buttonShowModels.setEnabled(False)
self.ui.buttonShowQuantitive.setEnabled(False)
self.ui.labelQantitive.setText("Not loaded")
quanta_valid = self.system.quantitive_data.is_valid()
if quanta_valid:
self.ui.buttonShowQuantitive.setEnabled(True)
self.ui.buttonRemoveQuantitive.setEnabled(True)
self.ui.labelQantitive.setText("Loaded!")
else:
self.ui.buttonRemoveQuantitive.setEnabled(False)
parameters_valid = self.system.parameters_data.is_valid()
if (input_valid and parameters_valid):
self.ui.buttonRunCalculations.setEnabled(True)
else:
self.ui.buttonRunCalculations.setEnabled(False)
results_valid = self.system.results_data.is_valid()
if results_valid:
self.ui.groupBoxResultsSummary.setEnabled(True)
#self.ui.labelCoherenceScore.setText("{0:.3f}".format(self.system.results_data.get_summary_coherence()))
self.coherence_score_model.setCoherence(self.system.results_data.df_coherence)
else:
self.ui.groupBoxResultsSummary.setEnabled(False)
| StarcoderdataPython |
321786 | <filename>tests/testflows/rbac/tests/syntax/alter_user.py
import hashlib
from contextlib import contextmanager
from testflows.core import *
import rbac.helper.errors as errors
from rbac.requirements import *
@TestFeature
@Name("alter user")
@Args(format_description=False)
def feature(self, node="clickhouse1"):
"""Check alter user query syntax.
```sql
ALTER USER [IF EXISTS] name [ON CLUSTER cluster_name]
[RENAME TO new_name]
[IDENTIFIED [WITH {PLAINTEXT_PASSWORD|SHA256_PASSWORD|DOUBLE_SHA1_PASSWORD}] BY {'password'|'hash'}]
[[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
[DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ]
[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]
```
"""
node = self.context.cluster.node(node)
@contextmanager
def setup(user):
try:
with Given("I have a user"):
node.query(f"CREATE USER OR REPLACE {user}")
yield
finally:
with Finally("I drop the user", flags=TE):
node.query(f"DROP USER IF EXISTS {user}")
with Scenario("I alter user, base command", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter("1.0")]):
with setup("user0"):
with When("I alter user"):
node.query("ALTER USER user0")
with Scenario("I alter user that does not exist without if exists, throws exception", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter("1.0")]):
with When("I run alter user command, expecting error 192"):
exitcode, message = errors.user_not_found_in_disk(name="user0")
node.query(f"ALTER USER user0",exitcode=exitcode, message=message)
with Scenario("I alter user with if exists", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_IfExists("1.0")]):
with setup("user0"):
with When(f"I alter user with if exists"):
node.query(f"ALTER USER IF EXISTS user0")
with Scenario("I alter user that does not exist with if exists", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_IfExists("1.0")]):
user = "user0"
with Given("I don't have a user"):
node.query(f"DROP USER IF EXISTS {user}")
with When(f"I alter user {user} with if exists"):
node.query(f"ALTER USER IF EXISTS {user}")
del user
with Scenario("I alter user on a cluster", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_Cluster("1.0")]):
with Given("I have a user on a cluster"):
node.query("CREATE USER OR REPLACE user0 ON CLUSTER sharded_cluster")
with When("I alter user on a cluster"):
node.query("ALTER USER user0 ON CLUSTER sharded_cluster")
with Finally("I drop user from cluster"):
node.query("DROP USER IF EXISTS user0 ON CLUSTER sharded_cluster")
with Scenario("I alter user on a fake cluster, throws exception", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_Cluster("1.0")]):
with When("I alter user on a fake cluster"):
exitcode, message = errors.cluster_not_found("fake_cluster")
node.query("ALTER USER user0 ON CLUSTER fake_cluster", exitcode=exitcode, message=message)
with Scenario("I alter user to rename, target available", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_Rename("1.0")]):
with setup("user15"):
with When("I alter user name"):
node.query("ALTER USER user15 RENAME TO user15")
with Scenario("I alter user to rename, target unavailable", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_Rename("1.0")]):
with setup("user15"):
new_user = "user16"
try:
with Given(f"Ensure target name {new_user} is NOT available"):
node.query(f"CREATE USER IF NOT EXISTS {new_user}")
with When(f"I try to rename to {new_user}"):
exitcode, message = errors.cannot_rename_user(name="user15", name_new=new_user)
node.query(f"ALTER USER user15 RENAME TO {new_user}", exitcode=exitcode, message=message)
finally:
with Finally(f"I cleanup target name {new_user}"):
node.query(f"DROP USER IF EXISTS {new_user}")
del new_user
with Scenario("I alter user password plaintext password", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_Password_PlainText("1.0")]):
with setup("user1"):
with When("I alter user with plaintext password"):
node.query("ALTER USER user1 IDENTIFIED WITH PLAINTEXT_PASSWORD BY '<PASSWORD>'", step=When)
with Scenario("I alter user password to sha256", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_Password_Sha256Password("1.0")]):
with setup("user2"):
with When("I alter user with sha256_password"):
password = <PASSWORD>6("<PASSWORD>".encode("utf-8")).<PASSWORD>()
node.query(f"ALTER USER user2 IDENTIFIED WITH SHA256_PASSWORD BY '{password}'",step=When)
with Scenario("I alter user password to double_sha1_password", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_Password_DoubleSha1Password("1.0")]):
with setup("user3"):
with When("I alter user with double_sha1_password"):
def hash(password):
return hashlib.sha1(password.encode("utf-8")).hexdigest()
password = hash(hash("<PASSWORD>"))
node.query(f"ALTER USER user3 IDENTIFIED WITH DOUBLE_SHA1_PASSWORD BY '{password}'", step=When)
with Scenario("I alter user host local", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_Host_Local("1.0")]):
with setup("user4"):
with When("I alter user with host local"):
node.query("ALTER USER user4 HOST LOCAL")
with Scenario("I alter user host name", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_Host_Name("1.0")]):
with setup("user5"):
with When("I alter user with host name"):
node.query("ALTER USER user5 HOST NAME 'localhost', NAME 'clickhouse.com'")
with Scenario("I alter user host regexp", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_Host_Regexp("1.0")]):
with setup("user6"):
with When("I alter user with host regexp"):
node.query("ALTER USER user6 HOST REGEXP 'lo..*host', 'lo*host'")
with Scenario("I alter user host ip", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_Host_IP("1.0")]):
with setup("user7"):
with When("I alter user with host ip"):
node.query("ALTER USER user7 HOST IP '127.0.0.1', IP '127.0.0.2'")
with Scenario("I alter user host like", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_Host_Like("1.0")]):
with setup("user8"):
with When("I alter user with host like"):
node.query("ALTER USER user8 HOST LIKE '%.clickhouse.com'")
with Scenario("I alter user host any", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_Host_Any("1.0")]):
with setup("user9"):
with When("I alter user with host any"):
node.query("ALTER USER user9 HOST ANY")
with Scenario("I alter user host many hosts", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_Host_Like("1.0")]):
with setup("user11"):
with When("I alter user with multiple hosts"):
node.query("ALTER USER user11 HOST LIKE '%.clickhouse.com', \
IP '127.0.0.2', NAME 'localhost', REGEXP 'lo*host'")
with Scenario("I alter user default role set to none", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_Host_None("1.0")]):
with setup("user12"):
with When("I alter user with default role none"):
node.query("ALTER USER user12 DEFAULT ROLE NONE")
with Scenario("I alter user default role set to all", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_DefaultRole_All("1.0")]):
with setup("user13"):
with When("I alter user with all roles set to default"):
node.query("ALTER USER user13 DEFAULT ROLE ALL")
@contextmanager
def setup_role(role):
try:
with Given(f"I have a role {role}"):
node.query(f"CREATE ROLE OR REPLACE {role}")
yield
finally:
with Finally(f"I drop the role {role}", flags=TE):
node.query(f"DROP ROLE IF EXISTS {role}")
with Scenario("I alter user default role", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_DefaultRole("1.0")]):
with setup("user14"), setup_role("role2"):
with Given("I have a user with a role"):
node.query("GRANT role2 TO user14")
with When("I alter user default role"):
node.query("ALTER USER user14 DEFAULT ROLE role2")
with Scenario("I alter user default role, setting default role", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_DefaultRole("1.0")]):
with setup("user14a"), setup_role("default"):
with Given("I grant default role to the user"):
node.query("GRANT default TO user14a")
with When("I alter user default role"):
node.query("ALTER USER user14a DEFAULT ROLE default")
with Scenario("I alter user default role, role doesn't exist, throws exception", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_DefaultRole("1.0")]):
with setup("user12"):
role = "role0"
with Given(f"I ensure that role {role} does not exist"):
node.query(f"DROP ROLE IF EXISTS {role}")
with When(f"I alter user with default role {role}"):
exitcode, message = errors.role_not_found_in_disk(role)
node.query(f"ALTER USER user12 DEFAULT ROLE {role}",exitcode=exitcode, message=message)
del role
with Scenario("I alter user default role, all except role doesn't exist, throws exception", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_DefaultRole("1.0")]):
with setup("user12"):
role = "role0"
with Given(f"I ensure that role {role} does not exist"):
node.query(f"DROP ROLE IF EXISTS {role}")
with When(f"I alter user with default role {role}"):
exitcode, message = errors.role_not_found_in_disk(role)
node.query(f"ALTER USER user12 DEFAULT ROLE ALL EXCEPT {role}",exitcode=exitcode, message=message)
del role
with Scenario("I alter user default role multiple", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_DefaultRole("1.0")]):
with setup("user15"), setup_role("second"), setup_role("third"):
with Given("I have a user with multiple roles"):
node.query("GRANT second,third TO user15")
with When("I alter user default role to second, third"):
node.query("ALTER USER user15 DEFAULT ROLE second, third")
with Scenario("I alter user default role set to all except", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_DefaultRole_AllExcept("1.0")]):
with setup("user16"), setup_role("second"):
with Given("I have a user with a role"):
node.query("GRANT second TO user16")
with When("I alter user default role"):
node.query("ALTER USER user16 DEFAULT ROLE ALL EXCEPT second")
with Scenario("I alter user default role multiple all except", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_DefaultRole_AllExcept("1.0")]):
with setup("user17"), setup_role("second"), setup_role("third"):
with Given("I have a user with multiple roles"):
node.query("GRANT second,third TO user17")
with When("I alter user default role to all except second"):
node.query("ALTER USER user17 DEFAULT ROLE ALL EXCEPT second")
with Scenario("I alter user settings profile", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_Settings("1.0"), \
RQ_SRS_006_RBAC_User_Alter_Settings_Profile("1.0")]):
with setup("user18"):
try:
with Given("I have a profile"):
node.query(f"CREATE SETTINGS PROFILE profile10")
with When("I alter user with settings and set profile to profile1"):
node.query("ALTER USER user18 SETTINGS PROFILE profile10, max_memory_usage = 100 MIN 0 MAX 1000 READONLY")
finally:
with Finally("I drop the profile"):
node.query(f"DROP SETTINGS PROFILE profile10")
with Scenario("I alter user settings profile, fake profile, throws exception", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_Settings("1.0"),
RQ_SRS_006_RBAC_User_Alter_Settings_Profile("1.0")]):
with setup("user18a"):
profile = "profile0"
with Given(f"I ensure that profile {profile} does not exist"):
node.query(f"DROP SETTINGS PROFILE IF EXISTS {profile}")
with When(f"I alter user with Settings and set profile to fake profile {profile}"):
exitcode, message = errors.settings_profile_not_found_in_disk(profile)
node.query("ALTER USER user18a SETTINGS PROFILE profile0", exitcode=exitcode, message=message)
del profile
with Scenario("I alter user settings with a fake setting, throws exception", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_Settings("1.0")]):
with setup("user18b"):
with When("I alter settings profile using settings and nonexistent value"):
exitcode, message = errors.unknown_setting("fake_setting")
node.query("ALTER USER user18b SETTINGS fake_setting = 100000001", exitcode=exitcode, message=message)
with Scenario("I alter user settings without profile (no equals)", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_Settings("1.0"),
RQ_SRS_006_RBAC_User_Alter_Settings_Min("1.0"),
RQ_SRS_006_RBAC_User_Alter_Settings_Max("1.0")]):
with setup("user19"):
with When("I alter user with settings without profile using no equals"):
node.query("ALTER USER user19 SETTINGS max_memory_usage=10000000 MIN 100000 MAX 1000000000 READONLY")
#equals sign (=) syntax verify
with Scenario("I alter user settings without profile (yes equals)", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_Settings("1.0"),
RQ_SRS_006_RBAC_User_Alter_Settings_Min("1.0"),
RQ_SRS_006_RBAC_User_Alter_Settings_Max("1.0")]):
with setup("user20"):
with When("I alter user with settings without profile using equals"):
node.query("ALTER USER user20 SETTINGS max_memory_usage=10000000 MIN=100000 MAX=1000000000 READONLY")
#Add requirement to host: add/drop
with Scenario("I alter user to add host", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_Host_AddDrop("1.0")]):
with setup("user21"):
with When("I alter user by adding local host"):
node.query("ALTER USER user21 ADD HOST LOCAL")
with And("I alter user by adding no host"):
node.query("ALTER USER user21 ADD HOST NONE")
with And("I alter user by adding host like"):
node.query("ALTER USER user21 ADD HOST LIKE 'local%'")
with And("I alter user by adding host ip"):
node.query("ALTER USER user21 ADD HOST IP '127.0.0.1'")
with And("I alter user by adding host name"):
node.query("ALTER USER user21 ADD HOST NAME 'localhost'")
with Scenario("I alter user to remove host", flags=TE, requirements=[
RQ_SRS_006_RBAC_User_Alter_Host_AddDrop("1.0")]):
with setup("user22"):
with When("I alter user by removing local host"):
node.query("ALTER USER user22 DROP HOST LOCAL")
with And("I alter user by removing no host"):
node.query("ALTER USER user22 DROP HOST NONE")
with And("I alter user by removing like host"):
node.query("ALTER USER user22 DROP HOST LIKE 'local%'")
with And("I alter user by removing host ip"):
node.query("ALTER USER user22 DROP HOST IP '127.0.0.1'")
with And("I alter user by removing host name"):
node.query("ALTER USER user22 DROP HOST NAME 'localhost'")
| StarcoderdataPython |
3566763 | <gh_stars>100-1000
# python 2/3 interoperability
from __future__ import division
from __future__ import print_function
import os
import sys
import ctypes
# Getting memory info, based on
# http://stackoverflow.com/a/2017659/6621667 and
# https://doeidoei.wordpress.com/2009/03/22/python-tip-3-checking-available-ram-with-python/
class MemoryStatus(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
def __init__(self):
self.dwLength = ctypes.sizeof(self)
super(MemoryStatus, self).__init__()
def platform_free_memory():
if sys.platform == 'win32':
memory_status = MemoryStatus()
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(memory_status))
mem = memory_status.ullAvailPhys / (1024**3) # return in gigabytes
elif sys.platform == 'darwin':
try:
import psutil
except ImportError:
print('Please, install psutil.')
memory_status = psutil.virtual_memory
mem = memory_status.free / (1024**3) # return in gigabytes
else:
memory_status = os.popen("free -m").readlines()
if memory_status[0].split()[2].lower() == 'free':
mem = int(memory_status[1].split()[3]) / 1024 # return in gigabytes
else:
raise ValueError('Unrecognized memory info')
return mem
def get_available_memory():
try:
import psutil # preferred way
res = psutil.virtual_memory().available
except ImportError:
res = platform_free_memory()
return res
| StarcoderdataPython |
1956977 | <reponame>dynalz/coredis
import pytest
import coredis
@pytest.fixture
def s(redis_cluster_server):
cluster = coredis.StrictRedisCluster(
startup_nodes=[{"host": "localhost", "port": 7000}]
)
assert cluster.connection_pool.nodes.slots == {}
assert cluster.connection_pool.nodes.nodes == {}
yield cluster
cluster.connection_pool.disconnect()
@pytest.fixture
def sr(redis_cluster_server):
cluster = coredis.StrictRedisCluster(
startup_nodes=[{"host": "localhost", "port": 7000}], reinitialize_steps=1
)
yield cluster
cluster.connection_pool.disconnect()
@pytest.fixture
def ro(redis_cluster_server):
cluster = coredis.StrictRedisCluster(
startup_nodes=[{"host": "localhost", "port": 7000}], readonly=True
)
yield cluster
cluster.connection_pool.disconnect()
| StarcoderdataPython |
2810 | <reponame>lapaniku/GAS<gh_stars>10-100
# This program was generated by "Generative Art Synthesizer"
# Generation date: 2021-11-28 09:21:40 UTC
# GAS change date: 2021-11-28 09:20:21 UTC
# GAS md5 hash: ad55481e87ca5a7e9a8e92cd336d1cad
# Python version: 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# For more information visit: https://github.com/volotat/GAS
#import python libraries
import os #OS version: default
import numpy as np #Numpy version: 1.19.5
from PIL import Image #PIL version: 8.1.2
#set initial params
SIZE = 768
GRID_CHANNELS = 16
def test_values(arr):
if np.isnan(arr).any():
raise Exception('Array has None elements!')
if np.amin(arr) < -1 or np.amax(arr) > 1:
raise Exception('Values went to far! [ %.2f : %.2f ]'%(np.amin(arr), np.amax(arr)) )
return arr
#define grid transformation methods
def transit(x, t_indx, s_indx, alphas):
res = x.copy()
res[:,:,t_indx] = np.sum(x[:,:,s_indx] * alphas, axis = -1)
return test_values(res.clip(-1,1))
def sin(x, t_indx, s_indx, scale = 1, shift = 0):
res = x.copy()
res[:,:,t_indx] = np.sin(x[:,:,s_indx] * 0.5 * np.pi * scale + shift)
return test_values(res)
def magnitude(x, t_indx, s_indx, ord = 2):
res = x.copy()
res[:,:,t_indx] = np.linalg.norm(x[:,:,s_indx], axis = -1, ord = ord) / np.sqrt(len(s_indx))
return test_values(res)
def shift(x, t_indx, s_indx, shift):
res = x.copy()
if shift > 0: res[:,:,t_indx] = (-np.abs(((x[:,:,s_indx] + 1) / 2) ** (1 + shift) - 1) ** (1 / (1 + shift)) + 1) * 2 - 1
if shift < 0: res[:,:,t_indx] = np.abs((1 - (x[:,:,s_indx] + 1) / 2) ** (1 - shift) - 1) ** (1 / (1 - shift)) * 2 - 1
return test_values(res)
def inverse(x, t_indx, s_indx):
res = x.copy()
res[:,:,t_indx] = -x[:,:,s_indx]
return test_values(res)
def smooth_max(x, t_indx, s1_indx, s2_indx, p = 10):
res = x.copy()
res[:,:,t_indx] = np.log((np.exp(x[:,:,s1_indx] * p) + np.exp(x[:,:,s2_indx] * p)) ** (1/p)) / 1.07
return test_values(res)
def smooth_min(x, t_indx, s1_indx, s2_indx, p = 10):
res = x.copy()
res[:,:,t_indx] = -np.log((np.exp(-x[:,:,s1_indx] * p) + np.exp(-x[:,:,s2_indx] * p)) ** (1/p)) / 1.07
return test_values(res)
def prod(x, t_indx, s_indx):
res = x.copy()
res[:,:,t_indx] = np.prod(x[:,:,s_indx], -1)
return test_values(res)
def power(x, t_indx, s_indx, p = 1):
res = x.copy()
res[:,:,t_indx] = np.sign(x[:,:,s_indx]) * np.abs(x[:,:,s_indx]) ** p
return test_values(res)
#set initial grid
grid = np.zeros((SIZE, SIZE, GRID_CHANNELS))
x = ((np.arange(SIZE)/(SIZE-1) - 0.5) * 2).reshape((1, SIZE)).repeat(SIZE, 0)
y = ((np.arange(SIZE)/(SIZE-1) - 0.5) * 2).reshape((SIZE, 1)).repeat(SIZE, 1)
grid[:,:,0] = (x * 0.9386329219527516 + y * -0.45147169454413794) / 2
grid[:,:,1] = (x * 0.8090860808441245 + y * 0.2914526739617249) / 2
grid[:,:,2] = (x * 0.9804797761207309 + y * -0.5063344373124843) / 2
grid[:,:,3] = (x * -0.8484277738516293 + y * -0.5155435342135386) / 2
grid[:,:,4] = (x * -0.6644350461377522 + y * 0.1739322518414499) / 2
grid[:,:,5] = (x * -0.5986715486203882 + y * 0.9515468928881716) / 2
grid[:,:,6] = (x * 0.2265055481768512 + y * 0.4365452266748293) / 2
grid[:,:,7] = (x * 0.5049774961793401 + y * 0.05113255120007798) / 2
grid[:,:,8] = (x * -0.3391983246964396 + y * -0.5135707069423852) / 2
grid[:,:,9] = (x * -0.4075423366723827 + y * 0.5388833863473126) / 2
grid[:,:,10] = (x * -0.4262457935185371 + y * -0.6817079327248272) / 2
grid[:,:,11] = (x * 0.8435706697714382 + y * 0.7746597063144072) / 2
grid[:,:,12] = (x * -0.5303146721156469 + y * -0.41048419195488317) / 2
grid[:,:,13] = (x * -0.5864100240508576 + y * -0.9425245660964123) / 2
grid[:,:,14] = (x * -0.7665883618456049 + y * -0.3867357840809138) / 2
grid[:,:,15] = (x * 0.49037959172682255 + y * -0.7671554143072785) / 2
#apply transformations to the grid
grid = transit(grid, 4, [7, 6, 12, 8, 9, 0, 1], [0.05863158300898051, 0.3467981515651057, 0.262107802795733, 0.038001653167336905, 0.2112967596903696, 0.002128256606899112, 0.08103579316557531])
grid = shift(grid, 3, 3, 2.4622222565241207)
grid = sin(grid, 10, 0, 0.5112825397666086, 37.95950546335726)
grid = sin(grid, 12, 13, 3.6938747278005737, 76.37702042567852)
grid = magnitude(grid, 15, [5, 3, 8, 0, 15], 2)
grid = prod(grid, 2, [3, 11, 1])
grid = smooth_min(grid, 3, 2, 7)
grid = smooth_max(grid, 8, 10, 6)
grid = prod(grid, 3, [2, 6, 10, 7, 4])
grid = smooth_min(grid, 7, 12, 0)
grid = transit(grid, 2, [1, 2], [0.9078557995211777, 0.09214420047882232])
grid = smooth_max(grid, 1, 0, 1)
grid = sin(grid, 9, 4, 3.0281102269529683, 11.185401112275173)
grid = sin(grid, 10, 4, 1.2844464834351186, -45.836492724169695)
grid = sin(grid, 1, 2, -1.5301674594368837, -60.29431568717391)
grid = transit(grid, 2, [13, 11, 5], [0.421270391024163, 0.5054038923567993, 0.07332571661903758])
grid = transit(grid, 11, [1, 15, 5, 0, 6, 12, 2, 7, 4], [0.03047869593495055, 0.024092687676923453, 0.02665655056773558, 0.17667886361751853, 0.15211061797378253, 0.016462544099609754, 0.0072484377164178625, 0.4477791048998878, 0.11849249751317383])
grid = transit(grid, 10, [5, 11, 15, 8, 2, 13, 12, 3, 6], [0.1020239434902293, 0.05405846145210329, 0.11525379082942891, 0.11556721863292163, 0.12372657123165616, 0.1356897031789931, 0.20047556686480725, 0.09921434949484752, 0.05399039482501285])
grid = transit(grid, 9, [5], [1.0])
grid = transit(grid, 15, [12, 0, 1, 11], [0.01847979792505241, 0.33442336387003857, 0.15192425697494277, 0.4951725812299663])
grid = sin(grid, 4, 8, 3.386521226555936, 60.95572898751007)
grid = shift(grid, 14, 2, 2.55681173849493)
grid = sin(grid, 10, 14, 0.8649185298731181, 3.1973516320924773)
grid = sin(grid, 9, 7, -2.4657577404884132, 72.95418196004374)
grid = transit(grid, 12, [7, 4, 10, 5], [0.5076634403621766, 0.003404332378773421, 0.04142944289977586, 0.4475027843592742])
grid = inverse(grid, 4, 5)
grid = transit(grid, 1, [4, 14, 0, 13], [0.2785496566747933, 0.004915230889640017, 0.30146401859790545, 0.4150710938376613])
grid = sin(grid, 3, 11, -6.496603906160505, -73.75617586359363)
grid = transit(grid, 6, [6, 14], [0.7201753385758813, 0.2798246614241187])
grid = prod(grid, 4, [10, 0, 2, 4, 8, 5, 6, 7])
grid = transit(grid, 8, [3], [1.0])
grid = inverse(grid, 8, 5)
grid = smooth_max(grid, 10, 5, 13)
grid = sin(grid, 9, 10, -1.8565532127479274, -54.75186223635349)
grid = transit(grid, 10, [14], [1.0])
grid = transit(grid, 15, [11, 4, 10], [0.6926745567135898, 0.1831142410590532, 0.12421120222735695])
grid = magnitude(grid, 7, [6, 12, 7, 13, 8], 2)
grid = transit(grid, 8, [3, 15, 9, 6, 11], [0.036102265915692405, 0.1224495166624379, 0.2384660328868578, 0.3357862916746864, 0.2671958928603256])
grid = smooth_min(grid, 1, 1, 11)
grid = transit(grid, 5, [11, 4, 2, 1, 13, 12, 0, 8], [0.08486049729383285, 0.15069099224942706, 0.024923245737924458, 0.07191051851248272, 0.25942601829807205, 0.16834508849259286, 0.14540219911263502, 0.094441440303033])
grid = transit(grid, 11, [12], [1.0])
grid = power(grid, 3, 5, 0.10200689258338674)
grid = transit(grid, 2, [10, 11, 4, 15, 0, 6], [0.24973877983541862, 0.3378766591098989, 0.15974656746239488, 0.027776085211312595, 0.02330072841260748, 0.20156117996836745])
grid = smooth_min(grid, 0, 5, 1)
grid = magnitude(grid, 0, [5, 0], 2)
grid = transit(grid, 6, [15, 8], [0.5303803951305812, 0.4696196048694189])
grid = inverse(grid, 0, 0)
grid = magnitude(grid, 13, [8], 2)
grid = transit(grid, 13, [15, 5, 9, 4, 6, 12], [0.18067242214638962, 0.12939497982917472, 0.08164480089591167, 0.24583958083442445, 0.2244518823086713, 0.13799633398542827])
grid = transit(grid, 11, [0], [1.0])
grid = magnitude(grid, 0, [4, 13], 2)
grid = transit(grid, 8, [5, 4, 15, 6, 14, 0, 3, 11], [0.13835365002720226, 0.008781149737259792, 0.24627334258742545, 0.04870190081124998, 0.049950480577274, 0.15123046752435387, 0.31255198044446264, 0.04415702829077187])
grid = transit(grid, 1, [3], [1.0])
grid = magnitude(grid, 14, [4], 2)
grid = sin(grid, 1, 5, 8.18216846853571, -6.729427492311089)
grid = magnitude(grid, 11, [8, 2], 2)
grid = transit(grid, 7, [12, 11, 13, 4], [0.1713900685471786, 0.14082681623065177, 0.19859698568682838, 0.4891861295353413])
grid = transit(grid, 13, [12, 15, 9, 2, 0, 1, 5], [0.18796556626817826, 0.19260744772691155, 0.11226112831146452, 0.08161640805634696, 0.08706050582840198, 0.2243337708440404, 0.11415517296465624])
grid = sin(grid, 11, 13, -6.909579361872105, 70.84834564082374)
grid = transit(grid, 2, [11, 7, 13], [0.3629247592109436, 0.10073172896374764, 0.5363435118253088])
grid = sin(grid, 1, 5, 0.6814927249849106, 30.75954926767548)
grid = inverse(grid, 8, 7)
grid = prod(grid, 10, [5, 2])
grid = transit(grid, 15, [0, 3], [0.29345909580747953, 0.7065409041925205])
grid = sin(grid, 12, 4, -1.6398586072056767, 84.51374680259704)
grid = sin(grid, 1, 1, -0.183401440709518, -88.40242580975152)
grid = transit(grid, 12, [3, 13, 2, 9, 0], [0.24803411847529433, 0.2425397323068922, 0.0904752958055755, 0.11683555248582808, 0.30211530092641004])
grid = sin(grid, 5, 2, -2.2972705471452146, -12.522748365129786)
grid = smooth_min(grid, 12, 9, 11)
grid = sin(grid, 4, 15, -1.9527829039221054, 20.537776250912316)
grid = transit(grid, 7, [11, 9, 2], [0.5001532946669459, 0.42070604285213226, 0.07914066248092186])
grid = inverse(grid, 5, 12)
grid = sin(grid, 10, 2, 0.9155140652310594, -34.1653400637653)
grid = transit(grid, 8, [14], [1.0])
grid = transit(grid, 4, [1, 12, 15, 13, 3], [0.32356965941479515, 0.022696478437764827, 0.2132573540073865, 0.11957266769813353, 0.3209038404419199])
grid = transit(grid, 6, [1, 7, 0, 2, 9, 4, 8], [0.06904450551777742, 0.12680650314665426, 0.1756104206123629, 0.013987480750913602, 0.1337935702206657, 0.39097327478734406, 0.08978424496428203])
grid = smooth_min(grid, 9, 9, 10)
grid = shift(grid, 8, 1, -0.2952350240798842)
grid = sin(grid, 11, 6, 1.576100090732909, -21.508000199215132)
grid = shift(grid, 11, 5, 1.0526879494498724)
grid = transit(grid, 1, [14], [1.0])
grid = transit(grid, 8, [9, 10, 2, 15, 13], [0.3265190472987195, 0.21568397721657098, 0.06226802479442838, 0.0028158122366541832, 0.39271313845362693])
grid = magnitude(grid, 11, [13, 10, 12, 2, 11, 14], 2)
grid = transit(grid, 12, [8, 11, 3], [0.2717231795161624, 0.38648847983305307, 0.3417883406507845])
grid = transit(grid, 15, [7, 3], [0.9172074355564371, 0.08279256444356292])
grid = transit(grid, 13, [1, 2, 7, 5, 8, 9, 15], [0.085742434722219, 0.4119764535375412, 0.08377067725345017, 0.13045782410775286, 0.02917564277599849, 0.12489006625007311, 0.13398690135296518])
grid = transit(grid, 2, [2, 0, 11, 10, 5, 4, 15, 13], [0.1869735689344564, 0.06343641920215143, 0.038951322931441136, 0.04613309733662021, 0.19750663742298355, 0.16072124228620793, 0.15869932715876592, 0.14757838472737334])
grid = transit(grid, 2, [1, 7], [0.18247956114317448, 0.8175204388568255])
grid = transit(grid, 8, [11, 15, 0], [0.08195235243098883, 0.6796005904358621, 0.23844705713314918])
grid = power(grid, 14, 0, 0.10854801586669052)
grid = shift(grid, 8, 9, 2.766857264282361)
grid = transit(grid, 3, [6, 14, 0, 3, 15, 4, 2, 11, 13], [0.03597236183123865, 0.04938629068404894, 0.08457069101219464, 0.014801187461296406, 0.3649334871683411, 0.28062233683539095, 0.08637063851194285, 0.06076815802338077, 0.022574848472165728])
grid = transit(grid, 4, [11, 4, 15, 10, 8, 5, 2, 3], [0.23701292672659616, 0.08316792464084911, 0.017867439461611043, 0.36417402420248035, 0.02841485585755143, 0.19916101840344472, 0.03422984110049058, 0.03597196960697647])
grid = magnitude(grid, 13, [11, 7], 2)
grid = sin(grid, 4, 8, 4.28026157040775, -75.14180284322572)
grid = prod(grid, 3, [14, 15])
grid = inverse(grid, 5, 5)
grid = transit(grid, 4, [8, 4, 15, 9, 10], [0.10267794314653868, 0.019022820046952493, 0.061606568183823145, 0.4832751235896067, 0.33341754503307897])
grid = transit(grid, 13, [10, 8, 9, 12, 2], [0.031587088727564654, 0.024264739611302585, 0.0306940545567164, 0.19611241111174804, 0.7173417059926683])
grid = transit(grid, 0, [7, 1, 11, 0, 15], [0.036901331671075975, 0.5054281720479712, 0.13288430351514774, 0.10820806749406277, 0.21657812527174225])
grid = transit(grid, 3, [7, 3, 12, 9], [0.13643904772292245, 0.38438336340747, 0.15936221296996333, 0.31981537589964426])
grid = sin(grid, 10, 3, -2.5681840787633137, -30.256455817944243)
grid = sin(grid, 8, 2, 3.501615294498545, -75.50049353340206)
grid = prod(grid, 9, [1, 4, 0, 6])
grid = transit(grid, 8, [9, 3], [0.30088974760959275, 0.6991102523904072])
grid = transit(grid, 8, [2, 11, 15, 4, 1, 0, 14], [0.29712982335534416, 0.2526657169525107, 0.08415696601637544, 0.18541009701166816, 0.011062110917544764, 0.017334502896306194, 0.1522407828502505])
grid = prod(grid, 2, [8, 7, 11, 10, 15, 0, 5])
grid = transit(grid, 11, [7, 2, 3, 9, 5], [0.24039798004748805, 0.2886075990223525, 0.18742374307846998, 0.11615833154358073, 0.16741234630810867])
grid = prod(grid, 0, [0, 1, 2, 14])
grid = prod(grid, 9, [10, 11, 8, 15, 0, 12, 3])
grid = transit(grid, 13, [5, 15, 10], [0.13237609957996088, 0.22944646977966682, 0.6381774306403722])
grid = transit(grid, 6, [15], [1.0])
grid = sin(grid, 15, 0, -0.033265790773207085, 51.94880270063618)
grid = smooth_min(grid, 13, 10, 15)
grid = transit(grid, 1, [12, 8, 10, 4, 2], [0.43102537693091664, 0.25433300797798253, 0.21618454566402304, 0.046743011673522995, 0.05171405775355483])
grid = sin(grid, 10, 10, 0.9558311639914843, -47.618914508652054)
grid = shift(grid, 9, 8, -1.1449289879251126)
grid = transit(grid, 7, [4, 10, 1, 13, 5, 0, 7, 8, 9, 12, 6, 11, 14], [0.10006330804326793, 0.03891760159161208, 0.005474465860804227, 0.12962618248625338, 0.03090992138168193, 0.016043163973997736, 0.13259375374543056, 0.09920705802758992, 0.1415090600653345, 0.09597789664069131, 0.06106766497801195, 0.14032187015082653, 0.008288053054498123])
grid = prod(grid, 15, [12, 15])
grid = prod(grid, 8, [11, 7, 4, 12])
grid = transit(grid, 7, [15, 6, 2, 7], [0.45073658968521574, 0.16060948991238613, 0.12949271785123345, 0.2591612025511646])
grid = transit(grid, 10, [11, 4, 2, 8, 14], [0.3705316303566195, 0.1755951969700656, 0.043989590834687294, 0.22866693087969006, 0.1812166509589377])
grid = sin(grid, 4, 2, -3.329894296119046, -76.41676919069447)
grid = smooth_min(grid, 11, 8, 12)
grid = transit(grid, 1, [1, 14, 8], [0.38986786543390084, 0.40057743619803005, 0.20955469836806906])
grid = transit(grid, 9, [5], [1.0])
grid = shift(grid, 9, 13, -5.367438086043798)
grid = magnitude(grid, 13, [2, 0], 2)
grid = transit(grid, 13, [6, 2, 3, 15, 5, 7], [0.06492287400539203, 0.21223490901058306, 0.36311130408652753, 0.09994467226348329, 0.12833432959710458, 0.1314519110369097])
grid = transit(grid, 8, [6, 2], [0.6857167761482571, 0.31428322385174284])
grid = shift(grid, 6, 15, 4.115946851379848)
grid = transit(grid, 15, [13, 3], [0.5897775709748927, 0.41022242902510725])
grid = sin(grid, 12, 14, 1.097917736937588, 58.87772371184383)
grid = transit(grid, 11, [9, 11], [0.37033495928182997, 0.6296650407181701])
grid = smooth_min(grid, 4, 1, 8)
grid = sin(grid, 4, 4, 3.47544933993972, -37.11795195118333)
grid = sin(grid, 11, 7, -0.3409112713023047, 75.93313567333723)
grid = transit(grid, 11, [5, 10, 7], [0.22694849313985146, 0.5162695719847235, 0.25678193487542517])
grid = sin(grid, 9, 9, -4.261918262131112, 18.680580924548693)
grid = smooth_max(grid, 2, 2, 11)
grid = sin(grid, 13, 13, 7.718114740496995, 55.242200715207815)
grid = sin(grid, 12, 10, -3.1151555334821888, 17.571856948335267)
grid = prod(grid, 6, [2, 4, 13])
grid = transit(grid, 5, [1, 9, 3, 10, 4], [0.24075568684771534, 0.02527375632067568, 0.4828116495090197, 0.09546712897709621, 0.15569177834549294])
grid = sin(grid, 6, 3, -0.1377650382373763, -96.34412250071645)
grid = sin(grid, 7, 3, 1.6405444007982959, -37.09230830685477)
grid = transit(grid, 9, [8], [1.0])
grid = sin(grid, 5, 10, -1.5052434957207308, 24.900059771988836)
grid = sin(grid, 8, 10, 2.5947698108630664, -90.74050288622541)
grid = sin(grid, 9, 8, -0.8743741598911887, 15.92872484723533)
grid = transit(grid, 4, [3, 13, 9, 8, 5, 2, 12], [0.05731677054419865, 0.08527765171582982, 0.33929504571762287, 0.1932983536368378, 0.0036374435750729187, 0.12289545051895708, 0.19827928429148084])
grid = transit(grid, 8, [13, 9, 5, 7, 14], [0.05801706264076675, 0.341923243761946, 0.0494872820880747, 0.29583940098242745, 0.2547330105267852])
grid = inverse(grid, 11, 5)
grid = magnitude(grid, 14, [4, 6, 1, 0], 2)
grid = transit(grid, 13, [11, 0], [0.6569516962992897, 0.3430483037007103])
grid = sin(grid, 14, 5, 0.053526366336325744, 4.147364704932215)
grid = transit(grid, 4, [3], [1.0])
grid = sin(grid, 3, 12, -4.078686662791614, 24.459526349523884)
grid = inverse(grid, 15, 10)
grid = shift(grid, 6, 1, -1.115193397983063)
grid = smooth_max(grid, 13, 3, 8)
grid = transit(grid, 13, [13, 0, 5, 14], [0.09662806703796267, 0.1621478194912538, 0.21548762580464817, 0.5257364876661353])
grid = inverse(grid, 1, 0)
grid = smooth_max(grid, 1, 15, 12)
grid = prod(grid, 11, [3])
grid = smooth_max(grid, 8, 11, 15)
grid = sin(grid, 12, 6, -3.621533174445339, 24.02414911462421)
grid = sin(grid, 1, 11, 0.5071121900678415, 10.950101187785563)
grid = shift(grid, 13, 3, 5.677279514103952)
grid = transit(grid, 3, [15, 11, 2, 8, 0], [0.28772794692354614, 0.1935939805514465, 0.06024872230823076, 0.13457223936247906, 0.32385711085429764])
grid = transit(grid, 1, [7, 2, 6, 1, 4, 0], [0.2070905138265326, 0.06562120796792839, 0.17355051228662716, 0.05514926535269553, 0.0829726599151083, 0.41561584065110807])
grid = transit(grid, 2, [0, 4, 2], [0.010597803396528332, 0.7371576932264431, 0.25224450337702853])
grid = sin(grid, 11, 8, 4.303514875116891, -67.11152580467314)
grid = prod(grid, 5, [3, 9, 2])
grid = sin(grid, 5, 1, 2.0751861425380627, 63.37681521624819)
grid = smooth_min(grid, 11, 10, 9)
grid = sin(grid, 13, 2, 4.295107938126156, 57.378601701270014)
grid = sin(grid, 10, 2, -0.010214061334835559, 20.43114218394348)
grid = transit(grid, 8, [1], [1.0])
grid = sin(grid, 4, 9, 0.2366252211469413, -40.63773874328931)
grid = sin(grid, 9, 15, -2.507870105026106, -89.43842740853354)
grid = transit(grid, 0, [12, 6, 4, 9, 1, 0, 14], [0.36336761526831185, 0.17372789204937897, 0.08036453739500136, 0.09747098994785518, 0.040818441056887325, 0.16796111771248814, 0.07628940657007711])
grid = transit(grid, 3, [11, 1, 12, 9, 0, 8, 15, 2, 10, 14], [0.20381942291270427, 0.07753380798970702, 0.11445683149439734, 0.08475226158626031, 0.1416941580568898, 0.020968563089492034, 0.0847896752697893, 0.0921589665387646, 0.008240731277180186, 0.17158558178481512])
grid = transit(grid, 5, [11, 10], [0.9817011300708863, 0.018298869929113594])
grid = sin(grid, 14, 8, -0.4693746108213766, -98.17810769380118)
grid = sin(grid, 12, 10, 3.6427863324838423, 99.297524709649)
grid = sin(grid, 5, 14, -1.45141083652418, -99.85812912291547)
grid = transit(grid, 0, [4, 3, 8], [0.23275058190778222, 0.49901982570530873, 0.2682295923869092])
grid = magnitude(grid, 8, [10, 9, 12, 4, 7, 15], 2)
grid = sin(grid, 12, 7, 1.439019575760617, 13.126437741104823)
grid = transit(grid, 10, [15, 8, 13, 2], [0.32464063956303774, 0.20922781529873477, 0.16179927966914437, 0.30433226546908315])
grid = magnitude(grid, 6, [14, 5, 13, 11, 2, 9], 2)
grid = sin(grid, 9, 5, -5.606152225672729, -35.928477282758536)
grid = transit(grid, 0, [7, 11, 15, 8, 12, 0, 4, 14, 3, 5], [0.11084510086381213, 0.003439701966452383, 0.10819642722960272, 0.15371289739415475, 0.25812192912399506, 0.005727171643985687, 0.14633649245899077, 0.033890406689391105, 0.05550396325806974, 0.1242259093715456])
grid = smooth_max(grid, 10, 15, 10)
grid = transit(grid, 11, [9, 0, 11, 7, 3, 8], [0.03500911832175082, 0.03265868671024263, 0.3248025339288217, 0.4234363710484886, 0.13338109758306646, 0.050712192407629864])
grid = transit(grid, 7, [14, 2, 13, 1, 11, 3, 8, 7], [0.207462236904601, 0.11516125867317799, 0.12240760599022518, 0.05066197369764289, 0.13869178538077429, 0.09948828746526778, 0.16686217850764798, 0.09926467338066268])
grid = transit(grid, 6, [6, 13, 7], [0.16813621041531998, 0.42150135317124293, 0.410362436413437])
grid = inverse(grid, 6, 6)
grid = sin(grid, 7, 15, -4.9164570678736865, 86.15931416043557)
grid = sin(grid, 1, 7, 1.6265187305620117, -97.13150019385894)
grid = transit(grid, 11, [0, 9], [0.1290607634325389, 0.8709392365674611])
grid = transit(grid, 14, [14, 13, 15], [0.530662002197574, 0.1082014600047566, 0.36113653779766947])
grid = transit(grid, 14, [10, 14, 4, 9, 13, 6], [0.3199750359220948, 0.07376266150860299, 0.03622483092076182, 0.09070212266434277, 0.4030414045204916, 0.07629394446370606])
grid = magnitude(grid, 13, [7, 4, 15], 2)
grid = transit(grid, 13, [6, 15, 11, 9, 12], [0.21908823570589997, 0.1636179110868493, 0.03797238284324163, 0.29532957711092916, 0.2839918932530799])
grid = sin(grid, 4, 3, 2.634465399239887, 62.07538440217337)
grid = sin(grid, 7, 2, 3.41043792019894, 65.36615977552518)
grid = transit(grid, 0, [14, 3, 11, 10, 7], [0.5203714128788618, 0.068511863728177, 0.10141059844877331, 0.2728285912351676, 0.036877533709020166])
grid = transit(grid, 7, [11], [1.0])
grid = transit(grid, 5, [9, 13, 3, 14], [0.28064413535886806, 0.5181512474389621, 0.1504742947642479, 0.050730322437922])
grid = prod(grid, 1, [12, 13])
grid = sin(grid, 6, 14, -1.927951619591129, -65.3028706482776)
grid = prod(grid, 14, [13])
grid = sin(grid, 1, 12, -0.5111321725063378, 18.261359970959475)
grid = power(grid, 6, 5, 0.9223892145169746)
grid = transit(grid, 2, [9, 11, 10], [0.2662646690994658, 0.2460545507972383, 0.4876807801032959])
grid = transit(grid, 2, [7], [1.0])
grid = sin(grid, 10, 9, 6.219381309190064, -71.03631884776823)
grid = sin(grid, 9, 6, 1.6821417847846682, -64.12547446801875)
grid = sin(grid, 13, 3, -0.15800274281797377, 90.63950889076133)
grid = sin(grid, 14, 14, -1.842523240371888, 74.23947694195837)
grid = inverse(grid, 7, 8)
grid = smooth_max(grid, 10, 3, 15)
grid = magnitude(grid, 9, [15, 7], 2)
grid = transit(grid, 4, [4, 12, 14, 15, 7, 1], [0.20378471182464508, 0.038241020379710625, 0.16903312106740406, 0.3387613981701764, 0.11303295854369695, 0.13714679001436697])
grid = transit(grid, 4, [14, 11, 12, 13, 4, 7], [0.23221079251346607, 0.3307147367708056, 0.26199556841553734, 0.018127231672754242, 0.13788777275073352, 0.01906389787670339])
grid = sin(grid, 4, 7, 3.7705302330112063, 56.91558505626969)
grid = sin(grid, 3, 9, 1.4275963527158242, -76.78247379244436)
grid = sin(grid, 2, 5, -5.225820110717917, 57.71107021356826)
grid = transit(grid, 2, [12], [1.0])
grid = prod(grid, 14, [11, 10])
grid = transit(grid, 2, [0, 15, 10], [0.005204838856346087, 0.5116602651328436, 0.48313489601081044])
grid = transit(grid, 10, [10], [1.0])
grid = transit(grid, 1, [8, 10, 15, 14, 9], [0.33493798319460544, 0.14040206011900094, 0.3010385316537353, 0.07412413198773361, 0.14949729304492473])
grid = magnitude(grid, 10, [11, 0, 5], 2)
grid = magnitude(grid, 9, [15, 3, 11, 0, 14], 2)
grid = sin(grid, 4, 5, -1.8457292172108153, -53.43885199947502)
grid = sin(grid, 10, 0, 7.741409383532979, -12.082110529508299)
grid = prod(grid, 11, [9])
grid = sin(grid, 4, 3, 0.10154488887533689, 12.479110491961137)
grid = magnitude(grid, 1, [7], 2)
grid = smooth_min(grid, 7, 4, 13)
grid = magnitude(grid, 5, [7], 2)
grid = transit(grid, 6, [9, 11, 2, 13], [0.381505247910628, 0.12073241493361198, 0.3454992433435407, 0.15226309381221942])
grid = magnitude(grid, 10, [7, 15, 5], 2)
grid = magnitude(grid, 9, [12, 14, 4], 2)
grid = shift(grid, 3, 9, 3.0393348894939773)
grid = shift(grid, 2, 4, 2.1961962516242517)
grid = prod(grid, 15, [3, 5, 0, 1])
grid = sin(grid, 6, 11, -0.7697482296056479, 23.55348445076298)
grid = sin(grid, 7, 7, 0.5492744322205282, 35.873568370773654)
grid = transit(grid, 7, [13], [1.0])
grid = sin(grid, 3, 12, 6.470760426148978, -53.62090724330151)
grid = sin(grid, 10, 10, 0.7827958631857042, -90.82177259964699)
grid = transit(grid, 6, [8, 6, 5, 7, 4, 2], [0.39579476392315127, 0.3200094081197146, 0.06439062651950353, 0.03284446726347166, 0.04732779189481446, 0.13963294227934445])
grid = smooth_min(grid, 0, 13, 15)
grid = smooth_max(grid, 5, 8, 4)
grid = transit(grid, 10, [1], [1.0])
grid = transit(grid, 15, [15], [1.0])
grid = prod(grid, 13, [6, 3, 7])
grid = sin(grid, 0, 3, -3.561651028660104, 11.539889679902203)
grid = power(grid, 10, 5, 0.12539493928522222)
grid = power(grid, 0, 12, 2.5526439221510495)
grid = sin(grid, 4, 10, -3.680544885171134, 30.633332441673872)
grid = transit(grid, 11, [12, 6, 9], [0.1597221050818672, 0.523275926379751, 0.31700196853838186])
grid = sin(grid, 14, 7, 5.409920766787869, -58.09956716630187)
grid = sin(grid, 2, 15, -2.5319898824657017, -45.01904701883333)
grid = shift(grid, 5, 5, 3.1584260780059252)
grid = transit(grid, 10, [9, 8], [0.7777441717493406, 0.22225582825065934])
grid = transit(grid, 3, [9], [1.0])
grid = transit(grid, 11, [2], [1.0])
#create color space
def shift_colors(x, shift):
res = x.copy()
for i in range(x.shape[-1]):
if shift[i] > 0: res[:,:,i] = (-np.abs(((x[:,:,i] + 1) / 2) ** (1 + shift[i]) - 1) ** (1 / (1 + shift[i])) + 1) * 2 - 1
if shift[i] < 0: res[:,:,i] = np.abs((1 - (x [:,:,i]+ 1) / 2) ** (1 - shift[i]) - 1) ** (1 / (1 - shift[i])) * 2 - 1
return test_values(res)
res = np.zeros((SIZE, SIZE, 3))
res += shift_colors(grid[:,:,0:1].repeat(3, -1), [1.9355805467383669, 1.4677093499726706, 1.2451388311186942])
res = res / 1
res = ((res + 1) / 2 * 255).clip(0,255)
#save results
im = Image.fromarray(np.uint8(res))
im.save(os.path.basename(__file__) + '.png')
#save layers
img = np.zeros((SIZE * 4, SIZE * 4))
for j in range(GRID_CHANNELS):
x = j % 4
y = j // 4
img[x*SIZE:(x + 1)*SIZE, y*SIZE:(y+1)*SIZE] = grid[:,:,j]
img = (img + 1) * 127.5
im = Image.fromarray(np.uint8(img))
im.save(os.path.basename(__file__) + '_layers.png')
| StarcoderdataPython |
1858448 | <filename>sandbox/old/train_quadrotor_12d.py
import torch
import numpy as np
import time
from constraint import Constraint
from quadrotor_12d import Quadrotor12D
from reinforce import Reinforce
from feedback_linearization import FeedbackLinearization
from logger import Logger
# Seed everything.
seed = np.random.choice(1000)
torch.manual_seed(seed)
np.random.seed(seed)
# Create a quadrotor.
mass = 1.0
Ix = 1.0
Iy = 1.0
Iz = 1.0
time_step = 0.01
dyn = Quadrotor12D(mass, Ix, Iy, Iz, time_step)
mass_scaling = 0.75
Ix_scaling = 0.5
Iy_scaling = 0.5
Iz_scaling = 0.5
bad_dyn = Quadrotor12D(
mass_scaling * mass, Ix_scaling * Ix,
Iy_scaling * Iy, Iz_scaling * Iz, time_step)
# Create a feedback linearization object.
num_layers = 2
num_hidden_units = 32
activation = torch.nn.Tanh()
noise_std = 0.25
fb = FeedbackLinearization(
bad_dyn, num_layers, num_hidden_units, activation, noise_std)
# Choose Algorithm
do_PPO=0
do_Reinforce=1
# Create an initial state sampler for the double pendulum.
def initial_state_sampler(num):
lower0 = np.array([[-0.25, -0.25, -0.25,
-0.1, -0.1,
-0.1, -0.1, -0.1,
-1.0, # This is the thrust acceleration - g.
-0.1, -0.1, -0.1]]).T
lower1 = np.array([[-2.5, -2.5, -2.5,
-np.pi / 4.0, -np.pi / 4.0,
-0.3, -0.3, -0.3,
-3.0, # This is the thrust acceleration - g.
-0.3, -0.3, -0.3]]).T
frac = 1.0 #min(float(num) / 1500.0, 1.0)
lower = frac * lower1 + (1.0 - frac) * lower0
upper = -lower
lower[8, 0] = (lower[8, 0] + 9.81) / mass
upper[8, 0] = (upper[8, 0] + 9.81) / mass
return np.random.uniform(lower, upper)
# Create REINFORCE.
num_iters = 3000
learning_rate = 1e-4
desired_kl = -1.0
discount_factor = 1.0
num_rollouts = 25
num_steps_per_rollout = 100
# Constraint on state so that we don't go nuts.
class Quadrotor12DConstraint(Constraint):
def contains(self, x):
BIG = 100.0
SMALL = 0.01
return abs(x[0, 0]) < BIG and \
abs(x[1, 0]) < BIG and \
abs(x[2, 0]) < BIG and \
abs(x[3, 0]) < np.pi / 2.5 and \
abs(x[4, 0]) < np.pi / 2.5 and \
abs(x[5, 0]) < BIG and \
abs(x[6, 0]) < BIG and \
abs(x[7, 0]) < BIG and \
abs(x[8, 0]) > SMALL and \
abs(x[9, 0]) < BIG and \
abs(x[10, 0]) < BIG and \
abs(x[11, 0]) < BIG
state_constraint = Quadrotor12DConstraint()
#Algorithm Params ** Only for Reinforce:
## Train for zero (no bad dynamics)
from_zero=False
# Rewards scaling - default is 10.0
scale_rewards=10.0
# norm to use
norm=2
if from_zero:
fb._M1= lambda x : np.zeros((4,4))
fb._f1= lambda x : np.zeros((4,1))
if do_PPO:
logger = Logger(
"logs/quadrotor_12d_PPO_%dx%d_std%f_lr%f_kl%f_%d_%d_dyn_%f_%f_%f_%f_seed_%d.pkl" %
(num_layers, num_hidden_units, noise_std, learning_rate, desired_kl,
num_rollouts, num_steps_per_rollout,
mass_scaling, Ix_scaling, Iy_scaling, Iz_scaling,
seed))
solver = PPO(num_iters,
learning_rate,
desired_kl,
discount_factor,
num_rollouts,
num_steps_per_rollout,
dyn,
initial_state_sampler,
fb,
logger)
if do_Reinforce:
logger = Logger(
"logs/quadrotor_12d_Reinforce_%dx%d_std%f_lr%f_kl%f_%d_%d_fromzero_%s_dyn_%f_%f_%f_%f_seed_%d_norm_%d_smallweights_tanh.pkl" %
(num_layers, num_hidden_units, noise_std, learning_rate, desired_kl,
num_rollouts, num_steps_per_rollout, str(from_zero),
mass_scaling, Ix_scaling, Iy_scaling, Iz_scaling,
seed, norm))
solver = Reinforce(num_iters,
learning_rate,
desired_kl,
discount_factor,
num_rollouts,
num_steps_per_rollout,
dyn,
initial_state_sampler,
fb,
logger,
norm,
scale_rewards,
state_constraint)
# Set number of threads.
torch.set_num_threads(1)
# Run this guy.
solver.run(plot=False, show_diff=False)
# Dump the log.
logger.dump()
| StarcoderdataPython |
389840 | <gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
import numpy as np
import cv2 as cv
from boundingbox_art.cvdrawtext import CvDrawText
def bba_translucent_rectangle(
image,
p1,
p2,
color=(0, 64, 0),
thickness=5,
font=None, # unused
text=None, # unused
fps=None, # unused
animation_count=None, # unused
):
draw_image = copy.deepcopy(image)
image_width, image_height = draw_image.shape[1], draw_image.shape[0]
x1, y1 = p1[0], p1[1]
x2, y2 = p2[0], p2[1]
draw_add_image = np.zeros((image_height, image_width, 3), np.uint8)
cv.rectangle(
draw_add_image, (x1, y1), (x2, y2), color, thickness=thickness)
draw_image = cv.add(draw_image, draw_add_image)
return draw_image
def bba_translucent_rectangle_fill1(
image,
p1,
p2,
color=(0, 64, 0),
thickness=None, # unused
font='boundingbox_art/cvdrawtext/font/x12y20pxScanLine.ttf',
text=None,
fps=None, # unused
animation_count=None, # unused
):
draw_image = copy.deepcopy(image)
image_width, image_height = draw_image.shape[1], draw_image.shape[0]
x1, y1 = p1[0], p1[1]
x2, y2 = p2[0], p2[1]
rectangle_width = x2 - x1
rectangle_height = y2 - y1
font_size = int((y2 - y1) * (2 / 9))
draw_add_image = np.zeros((image_height, image_width, 3), np.uint8)
cv.rectangle(draw_add_image, (x1, y1), (x2, y2), color, thickness=10)
cv.rectangle(
draw_add_image,
(x1 + int(rectangle_width / 20), y1 + int(rectangle_height / 20)),
(x2 - int(rectangle_width / 20), y2 - int(rectangle_height / 20)),
color,
thickness=-1)
if (font is not None) and (text is not None):
draw_add_image = CvDrawText.puttext(
draw_add_image, text, (int(((x1 + x2) / 2) - (font_size / 0.9)),
int(((y1 + y2) / 2) - (font_size / 2))),
font, font_size, (0, 0, 0))
draw_image = cv.add(draw_image, draw_add_image)
return draw_image
def bba_translucent_circle(
image,
p1,
p2,
color=(128, 0, 0),
thickness=5,
font=None, # unused
text=None, # unused
fps=None, # unused
animation_count=None, # unused
):
draw_image = copy.deepcopy(image)
image_width, image_height = draw_image.shape[1], draw_image.shape[0]
x1, y1 = p1[0], p1[1]
x2, y2 = p2[0], p2[1]
draw_add_image = np.zeros((image_height, image_width, 3), np.uint8)
cv.circle(
draw_add_image, (int((x1 + x2) / 2), int((y1 + y2) / 2)),
int((y2 - y1) * (1 / 2)),
color,
thickness=thickness)
draw_image = cv.add(draw_image, draw_add_image)
return draw_image
| StarcoderdataPython |
83728 | # -*- coding: utf-8 -*-
"""common/tests/__init__.py
By <NAME>, thePortus.com, <EMAIL>
The init file for the common module tests
"""
| StarcoderdataPython |
1665065 | from .base_symbol_analyzer import SymbolAnalyzer
class Prices(SymbolAnalyzer):
@classmethod
def run(cls, df):
return dict(
open=df.Open.iloc[-1],
high=df.High.iloc[-1],
low=df.Low.iloc[-1],
close=df.Close.iloc[-1],
volume=df.Volume.iloc[-1],
prev_close=df.Close.iloc[-2] if len(df) > 2 else None,
)
| StarcoderdataPython |
9790581 | <filename>jishaku/shim/paginator_base.py
# -*- coding: utf-8 -*-
"""
jishaku.paginators (non-shim dependencies)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Paginator-related tools and interfaces for Jishaku.
:copyright: (c) 2021 Devon (Gorialis) R
:license: MIT, see LICENSE for more details.
"""
import collections
# emoji settings, this sets what emoji are used for PaginatorInterface
EmojiSettings = collections.namedtuple("EmojiSettings", "start back forward end close")
EMOJI_DEFAULT = EmojiSettings(
start="\u23ee",
back="\u25c0",
forward="\u25b6",
end="\u23ed",
close="\N{HEAVY MULTIPLICATION X}",
)
| StarcoderdataPython |
3505098 | <reponame>shammellee/pendulum
import pickle
import pendulum
from datetime import timedelta
def test_pickle():
dt1 = pendulum.datetime(2016, 11, 18)
dt2 = pendulum.datetime(2016, 11, 20)
p = pendulum.period(dt1, dt2)
s = pickle.dumps(p)
p2 = pickle.loads(s)
assert p.start == p2.start
assert p.end == p2.end
assert p.invert == p2.invert
p = pendulum.period(dt2, dt1)
s = pickle.dumps(p)
p2 = pickle.loads(s)
assert p.start == p2.start
assert p.end == p2.end
assert p.invert == p2.invert
p = pendulum.period(dt2, dt1, True)
s = pickle.dumps(p)
p2 = pickle.loads(s)
assert p.start == p2.start
assert p.end == p2.end
assert p.invert == p2.invert
def test_comparison_to_timedelta():
dt1 = pendulum.datetime(2016, 11, 18)
dt2 = pendulum.datetime(2016, 11, 20)
period = dt2 - dt1
assert period < timedelta(days=4)
| StarcoderdataPython |
3515168 | from ._validate_data import check_query, check_response, check_query_response
from ._validate_size import check_size
from ._data_format import data_format, check_format
from ._data_format import QueryFormat, ResponseFormat, Format
from ._typing import Query, Response | StarcoderdataPython |
5184083 | <filename>rom/generate_reduced_basis_five_param.py
import time
import numpy as np
from mshr import Rectangle, generate_mesh
from dolfin import *
from forward_solve import Fin
from error_optimization import optimize_five_param
from model_constr_adaptive_sampling import sample
set_log_level(40)
# Create a fin geometry
geometry = Rectangle(Point(2.5, 0.0), Point(3.5, 4.0)) \
+ Rectangle(Point(0.0, 0.75), Point(2.5, 1.0)) \
+ Rectangle(Point(0.0, 1.75), Point(2.5, 2.0)) \
+ Rectangle(Point(0.0, 2.75), Point(2.5, 3.0)) \
+ Rectangle(Point(0.0, 3.75), Point(2.5, 4.0)) \
+ Rectangle(Point(3.5, 0.75), Point(6.0, 1.0)) \
+ Rectangle(Point(3.5, 1.75), Point(6.0, 2.0)) \
+ Rectangle(Point(3.5, 2.75), Point(6.0, 3.0)) \
+ Rectangle(Point(3.5, 3.75), Point(6.0, 4.0)) \
mesh = generate_mesh(geometry, 40)
V = FunctionSpace(mesh, 'CG', 1)
dofs = len(V.dofmap().dofs())
solver = Fin(V)
##########################################################3
# Basis initialization with dummy solves and POD
##########################################################3
samples = 10
Y = np.zeros((samples, dofs))
for i in range(0,samples):
k = np.random.uniform(0.1, 1.0, 5)
w = solver.forward_five_param(k)[0]
Y[i,:] = w.vector()[:]
K = np.dot(Y, Y.T)
# Initial basis vectors computed using proper orthogonal decomposition
e,v = np.linalg.eig(K)
basis_size = 5
U = np.zeros((basis_size, dofs))
for i in range(basis_size):
e_i = v[:,i].real
U[i,:] = np.sum(np.dot(np.diag(e_i), Y),0)
basis = U.T
def random_initial_five_param():
'''
Currently uses a simple random initialization.
Eventually replace with a more sophisticated function
with Bayesian prior sampling
'''
return np.random.uniform(0.1,1.0,5)
##########################################################3
# Create reduced basis with adaptive sampling
##########################################################3
t_i = time.time()
basis = sample(basis, random_initial_five_param, optimize_five_param, solver)
t_f = time.time()
print("Sampling time taken: {}".format(t_f - t_i))
print("Computed basis with shape {}".format(basis.shape))
np.savetxt("data/basis_five_param.txt", basis, delimiter=",")
| StarcoderdataPython |
3457916 | """
Embedded Python Blocks:
Each time this file is saved, GRC will instantiate the first class it finds
to get ports and parameters of your block. The arguments to __init__ will
be the parameters. All of them are required to have default values!
"""
import numpy as np
from gnuradio import gr
from polarcodes import *
from polarcodes import Sim_utils
import pprint
import time
import pmt
class blk(gr.basic_block): # other base classes are basic_block, decim_block, interp_block
"""Embedded Python Block example - a simple multiply const"""
def __init__(self,msg_len=1,rate=1,use_RM=True,channel='ucl'): # only default arguments here
"""arguments to this function show up as parameters in GRC"""
gr.basic_block.__init__(
self,
name='Message generator', # will show up in GRC
# in_sig=[np.int8],
in_sig=None,
out_sig=[np.int8]
)
# if an attribute with the same name as a parameter is found,
# a callback is registered (properties work, too).
self.msg_len=msg_len
self.rate=rate
self.useRM= use_RM
self.channel=channel
def general_work(self, input_items, output_items):
"""example: multiply with constant"""
# print(len(input_items[0]))
# print((input_items[0].dtype))
# pprint.pprint((input_items[0]))
# print(self.msg_len)
time.sleep(5)
msg_len=[self.msg_len] # message length
crc_n=Sim_utils.crc_selector(msg_len,self.channel) # crc polynomial generator length
k=list(crc_n+np.array(msg_len))
M=(np.array(msg_len)/np.array(self.rate)) #codeword E = A/R
self.M=list(M.astype(int))
N = Sim_utils.mothercode(self.channel,k,M)
if self.useRM == False:
self.M=N # no rate matching M == N
for i in range(0,len(msg_len)):
my_message = np.random.randint(2, size=msg_len[i],dtype=np.int8)
zobo = np.zeros(self.M[i])
with open('sim6msg.txt','a') as f:
f.write(f'{list(my_message)},{N[i]},{self.M[i]},{self.rate}.\n')
f.close()
output_items[0][:self.M[i]] = zobo
output_items[0][:msg_len[i]] = my_message.astype(np.int8)
print((output_items[0][:msg_len[0]]))
# print((self.M[0]))
# print((output_items[0][:msg_len]))
# self.produce(0, msg_len) #consume port 0 input
return len(output_items[0][:self.M[0]])
# return
| StarcoderdataPython |
8155300 | <gh_stars>0
from urllib.request import urlopen, Request
import config
def get_html(source_url):
request = Request(
source_url,
headers={'User-Agent': config.crawl_user_agent})
return urlopen(request).read()
| StarcoderdataPython |
260413 | <reponame>marcelosalles/pyidf
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.economics import CurrencyType
log = logging.getLogger(__name__)
class TestCurrencyType(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_currencytype(self):
pyidf.validation_level = ValidationLevel.error
obj = CurrencyType()
# alpha
var_monetary_unit = "USD"
obj.monetary_unit = var_monetary_unit
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.currencytypes[0].monetary_unit, var_monetary_unit) | StarcoderdataPython |
1909053 | <reponame>recruit-communications/rcl-contest-2020
import argparse
import subprocess as sp
import sys
N = 500
M = 5000
class XorShift:
mask = (1 << 64) - 1
def __init__(self, seed=None):
self.x = seed or 88172645463325252
def next(self):
self.x ^= self.x << 13
self.x &= self.mask
self.x ^= self.x >> 7
self.x ^= self.x << 17
self.x &= self.mask
return self.x
def next_int(self, n):
upper = self.mask // n * n
v = self.next()
while upper <= v:
v = self.next()
return v % n
class Tester:
def __init__(self, seed, info):
self.rnd = XorShift(seed)
self.v = [0] * (N + 1)
self.v[N] = 5000
for _ in range(100):
size = self.rnd.next_int(10) + 1
add = self.rnd.next_int(200) + 1
pos = self.rnd.next_int(N - 5 - size) + 1
for i in range(size):
self.v[pos + i] += add
if info:
print("各マスの値", file=sys.stderr)
for i in range(0, N + 1, 20):
v_str = map(lambda x: "%3d" % x, self.v[i:i+20])
print("%3d: %s" % (i, " ".join(v_str)), file=sys.stderr)
def judge(self, command, info, debug):
score = 0
goal = 0
with sp.Popen(command, shell=True, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sys.stderr,
universal_newlines=True) as proc:
proc.stdin.write("%d %d\n" % (N, M))
proc.stdin.flush()
prev_dice = [1, 2, 3, 4, 5, 6]
prev_pos = 0
pos = 0
hide = [False] + [True] * (N - 6) + [False] * 5
pos_count = [0] * (N + 1)
for t in range(M):
row = proc.stdout.readline()
try:
dice = list(map(int, row.split()))
except Exception:
print("[ERROR] 出力が不正です : %s" % row)
proc.kill()
sys.exit(1)
if len(dice) != 6:
print("[ERROR] 値が6個ではありません : %s" % dice)
proc.kill()
sys.exit(1)
change_count = 0
for i in range(6):
if dice[i] < 1 or 6 < dice[i]:
print("[ERROR] 値が範囲外です : %d" % dice[i])
proc.kill()
sys.exit(1)
if dice[i] != prev_dice[i]:
change_count += 1
if change_count > 1:
print("[ERROR] 2つ以上のサイコロの面を変更しました : %s => %s" %
(prev_dice, dice))
proc.kill()
sys.exit(1)
dice_index = self.rnd.next_int(6)
move = dice[dice_index]
pos += move
if pos > N:
pos = N - (pos - N)
move_pos = pos
score += self.v[pos]
pos_count[pos] += 1
proc.stdin.write("%d %d %d\n" % (move, self.v[pos], pos))
proc.stdin.flush()
if pos == N:
pos = 0
goal += 1
hide[pos] = False
if debug:
dice_str = []
for j in range(6):
if j == dice_index:
dice_str.append("[%d]" % dice[j])
else:
dice_str.append(" %d " % dice[j])
print("%4d, pos: %3d => %3d, dice: %s score: %d goal: %d" %
(t, prev_pos, move_pos, "".join(dice_str), score, goal),
file=sys.stderr)
values = []
indexes = []
for j in range(pos, pos + 20):
if hide[j % N]:
values.append(" ?")
else:
values.append("%3d" % self.v[j % N])
indexes.append("%3d" % (j % N))
print("value: %s\nindex: %s\n" % (" ".join(values), " ".join(indexes)),
file=sys.stderr)
prev_dice = dice
prev_pos = pos
if info:
print("各マスに到達した回数", file=sys.stderr)
for i in range(0, N + 1, 20):
count_str = map(lambda x: "%3d" % x, pos_count[i:i+20])
print("%3d: %s" % (i, " ".join(count_str)), file=sys.stderr)
return (score, goal)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-seed", type=int, required=True)
parser.add_argument("-command", required=True)
parser.add_argument("-info", action='store_true')
parser.add_argument("-debug", action='store_true')
args = parser.parse_args()
tester = Tester(args.seed, args.info)
result = tester.judge(args.command, args.info, args.debug)
print("score: %d goal: %d" % result)
| StarcoderdataPython |
6592695 | import csv
import cv2
import numpy as np
#import sklearn
from math import ceil
from sklearn.utils import shuffle
lines = []
with open('/home/workspace/CarND-Behavioral-Cloning-P3/run1/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
from sklearn.model_selection import train_test_split
train_samples, validation_samples = train_test_split(lines, test_size=0.2)
def generator(samples, batch_size=32):
num_samples=len(samples)
while 1:
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples=samples[offset:offset+batch_size]
images =[]
measurements = []
for batch_sample in batch_samples:
for i in range(3):
source_path = batch_sample[i]
filename=source_path.split('/')[-1]
current_path = '/home/workspace/CarND-Behavioral-Cloning-P3/run1/IMG/' + filename
image =cv2.imread(current_path)
image =cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
images.append(image)
measurement = float(batch_sample[3])
if i==0:
measurements.append(measurement)
elif i==1:
measurements.append(measurement+0.1)
else:
measurements.append(measurement-0.1)
augmented_images, augmented_measurements =[],[]
for image,measurement in zip(images,measurements):
augmented_images.append(image)
augmented_measurements.append(measurement)
augmented_images.append(cv2.flip(image,1))
augmented_measurements.append(measurement*-1.0)
X_train = np.array(augmented_images)
y_train = np.array(augmented_measurements)
yield shuffle(X_train, y_train)
batch_size=32
train_generator = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size)
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Cropping2D
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
model = Sequential()
model.add(Lambda(lambda x: x/255.0 - 0.5, input_shape = (160,320,3)))
model.add(Cropping2D(cropping=((70,25),(0,0))))
model.add(Conv2D(24,(5,5), strides=(2,2), activation='relu'))
model.add(Conv2D(36,(5,5), strides=(2,2), activation='relu'))
model.add(Conv2D(48,(5,5), strides=(2,2), activation='relu'))
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
model.compile(loss='mse',optimizer='adam')
model.fit(X_train, y_train, batch_size=128, validation_split=0.2, shuffle=True, nb_epoch = 6)
model.save('model2.h5')
print(history_object.history.keys())
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.savefig('./result_image.png')
| StarcoderdataPython |
6438580 | <reponame>zxdavb/evohome_rf<filename>ramses_rf/protocol/packet.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
"""RAMSES RF - a RAMSES-II protocol decoder & analyser.
Decode/process a packet (packet that was received).
"""
import logging
from datetime import datetime as dt
from datetime import timedelta as td
from typing import ByteString, Optional, Tuple
from .address import pkt_addrs
from .exceptions import InvalidPacketError
from .frame import PacketBase
from .logger import getLogger
from .ramses import EXPIRES, RAMSES_CODES
from .const import ( # noqa: F401, isort: skip, pylint: disable=unused-import
I_,
MESSAGE_REGEX,
RP,
RQ,
W_,
__dev_mode__,
)
from .const import ( # noqa: F401, isort: skip, pylint: disable=unused-import
_0001,
_0002,
_0004,
_0005,
_0006,
_0008,
_0009,
_000A,
_000C,
_000E,
_0016,
_0100,
_01D0,
_01E9,
_0404,
_0418,
_042F,
_0B04,
_1030,
_1060,
_1090,
_10A0,
_10E0,
_1100,
_1260,
_1280,
_1290,
_1298,
_12A0,
_12B0,
_12C0,
_12C8,
_1F09,
_1F41,
_1FC9,
_1FD4,
_2249,
_22C9,
_22D0,
_22D9,
_22F1,
_22F3,
_2309,
_2349,
_2D49,
_2E04,
_30C9,
_3120,
_313F,
_3150,
_31D9,
_31DA,
_31E0,
_3220,
_3B00,
_3EF0,
_3EF1,
_PUZZ,
)
DEV_MODE = __dev_mode__ and False
_LOGGER = logging.getLogger(__name__)
if DEV_MODE:
_LOGGER.setLevel(logging.DEBUG)
_PKT_LOGGER = getLogger(f"{__name__}_log", pkt_log=True)
class Packet(PacketBase):
"""The packet class; should trap/log all invalid PKTs appropriately."""
def __init__(self, gwy, dtm: dt, frame: str, **kwargs) -> None:
"""Create a packet from a valid frame.
Will raise InvalidPacketError (or InvalidAddrSetError) if it is invalid.
"""
super().__init__()
self._gwy = gwy
self._dtm = dtm
self._frame = frame
self.comment = kwargs.get("comment")
self.error_text = kwargs.get("err_msg")
self.raw_frame = kwargs.get("raw_frame")
self._src, self._dst, self._addrs, self._len = self._validate(
self._frame[11:40]
) # ? raise InvalidPacketError
self._rssi = frame[0:3]
self._verb = frame[4:6]
self._seqn = frame[7:10]
self._code = frame[41:45]
self._payload = frame[50:]
self._timeout = None
# if DEV_MODE: # TODO: remove (is for testing only)
# _ = self._has_array
# _ = self._has_ctl
def __repr__(self) -> str:
"""Return an unambiguous string representation of this object."""
hdr = f' # {self._hdr}{f" ({self._ctx})" if self._ctx else ""}'
try:
return f"{self.dtm.isoformat(timespec='microseconds')} ... {self}{hdr}"
except AttributeError:
print()
@property
def dtm(self) -> dt:
return self._dtm
def __eq__(self, other) -> bool:
if not hasattr(other, "_frame"):
return NotImplemented
return self._frame[4:] == other._frame[4:]
@staticmethod
def _partition(pkt_line: str) -> Tuple[str, str, str]:
"""Partition a packet line into its three parts.
Format: packet[ < parser-hint: ...][ * evofw3-err_msg][ # evofw3-comment]
"""
fragment, _, comment = pkt_line.partition("#")
fragment, _, err_msg = fragment.partition("*")
pkt_str, _, _ = fragment.partition("<") # discard any parser hints
return map(str.strip, (pkt_str, err_msg, comment))
@property
def _expired(self) -> float:
"""Return fraction used of the normal lifetime of packet.
A packet is 'expired' when >1.0, and should be tombstoned when >2.0. Returns
False if the packet does not expire (or None is N/A?).
NB: this is only the fact if the packet has expired, or not. Any opinion to
whether it *matters* that the packet has expired, is up to higher layers of the
stack.
"""
if self._timeout is None:
self._timeout = pkt_timeout(self) or False
if self._timeout is False:
return False
return (self._gwy._dt_now() - self.dtm) / self._timeout
def _validate(self, addr_frag) -> None:
"""Validate the packet, and parse the addresses if so (will log all packets).
Raise an exception (InvalidPacketError, InvalidAddrSetError) if it is not valid.
"""
try:
if self.error_text:
raise InvalidPacketError(self.error_text)
if not self._frame and self.comment: # log null pkts only if has a comment
raise InvalidPacketError("Null packet")
if not MESSAGE_REGEX.match(self._frame):
raise InvalidPacketError("Invalid packet structure")
length = int(self._frame[46:49])
if len(self._frame[50:]) != length * 2:
raise InvalidPacketError("Invalid payload length")
src, dst, addrs = pkt_addrs(addr_frag) # self._frame[11:40]
except InvalidPacketError as exc: # incl. InvalidAddrSetError
if self._frame or self.error_text:
_PKT_LOGGER.warning("%s", exc, extra=self.__dict__)
raise
_PKT_LOGGER.info("", extra=self.__dict__)
return src, dst, addrs, length
@classmethod
def from_dict(cls, gwy, dtm: str, pkt_line: str):
"""Constructor to create a packet from a saved state (a curated dict)."""
frame, _, comment = cls._partition(pkt_line)
return cls(gwy, dt.fromisoformat(dtm), frame, comment=comment)
@classmethod
def from_file(cls, gwy, dtm: str, pkt_line: str):
"""Constructor to create a packet from a log file line."""
frame, err_msg, comment = cls._partition(pkt_line)
return cls(gwy, dt.fromisoformat(dtm), frame, err_msg=err_msg, comment=comment)
@classmethod
def from_port(cls, gwy, dtm: dt, pkt_line: str, raw_line: ByteString = None):
"""Constructor to create a packet from a usb port (HGI80, evofw3)."""
frame, err_msg, comment = cls._partition(pkt_line)
return cls(
gwy, dtm, frame, err_msg=err_msg, comment=comment, raw_frame=raw_line
)
def pkt_timeout(pkt) -> Optional[td]: # NOTE: import OtbGateway ??
"""Return the pkt lifetime, or None if the packet does not expire (e.g. 10E0).
Some codes require a valid payload to best determine lifetime (e.g. 1F09).
"""
timeout = None
if pkt.verb in (RQ, W_):
timeout = td(seconds=3)
elif pkt.code in (_0005, _000C, _10E0):
return # TODO: exclude/remove devices caused by corrupt ADDRs?
elif pkt.code == _1FC9 and pkt.verb == RP:
return # TODO: check other verbs, they seem variable
elif pkt.code == _1F09:
timeout = td(seconds=300) if pkt.verb == I_ else td(seconds=3)
elif pkt.code == _000A and pkt._has_array:
timeout = td(minutes=60) # sends I /1h
elif pkt.code in (_2309, _30C9) and pkt._has_array:
timeout = td(minutes=15) # sends I /sync_cycle
elif pkt.code == _3220:
from ..devices import OtbGateway # to prevent circular references
if pkt.payload[4:6] in OtbGateway.SCHEMA_MSG_IDS:
timeout = None
elif pkt.payload[4:6] in OtbGateway.PARAMS_MSG_IDS:
timeout = td(minutes=60)
# elif pkt.payload[4:6] in OtbGateway.STATUS_MSG_IDS:
# timeout = td(minutes=5)
else:
timeout = td(minutes=5)
# elif pkt.code in (_3B00, _3EF0, ): # TODO: 0008, 3EF0, 3EF1
# timeout = td(minutes=6.7) # TODO: WIP
elif pkt.code in RAMSES_CODES:
timeout = RAMSES_CODES[pkt.code].get(EXPIRES)
return timeout or td(minutes=60)
| StarcoderdataPython |
388677 | <reponame>ryfi/Chiron<gh_stars>0
#!/usr/bin/env python3
"""
Invoke a CWL Squeezemeta workflow
Code adapted from work originally done by <NAME> (<EMAIL>)
"""
import logging
import os
import subprocess
import sys
from argparse import ArgumentParser
import yaml
CWL_WORKFLOW = os.path.dirname(os.path.realpath(sys.path[0])) + "/pipelines/squeezemeta/squeezemeta_all_args_coassembly.cwl"
CWL_WORKFLOW_JOIN = os.path.dirname(os.path.realpath(sys.path[0])) + "/pipelines/squeezemeta/squeezemeta_all_args_coassembly.cwl"
########
# Main #
########
def main():
# Set up options parser and help statement
parser = ArgumentParser(description="Invoke a Squeezemeta pipeline using the Common Workflow Language")
parser.add_argument("--input_file_list", "-i",
help="List file containing paths to input samples",
metavar="/path/to/test.samples",
required=True)
parser.add_argument("--config_file", "-c",
help="YAML-based config file of parameters to add",
metavar="/path/to/config.yml",
required=True)
parser.add_argument("--out_dir",
"-o",
help="Optional. Directory to stage output files. Default is ./cwl_output",
metavar="/path/to/outdir",
default="./cwl_output")
parser.add_argument("--debug", "-d",
help="Set the debug level",
default="ERROR",
metavar="DEBUG/INFO/WARNING/ERROR/CRITICAL")
args = parser.parse_args()
check_args(args)
yaml_dict = read_config(args.config_file)
inputs = read_in_list(args.input_file_list)
add_input_files_to_yaml(yaml_dict, inputs)
# Path for data output by Squeezemeta
yaml_dict['input_dir'] = {'class': 'Directory', 'path': args.out_dir}
# Write out YAML
final_yaml = create_final_yaml_name(args.config_file)
write_final_yaml(yaml_dict, final_yaml)
# Invoke CWL with new YAML job input
run_cwl_command(args.out_dir, final_yaml, CWL_WORKFLOW)
def check_args(args):
""" Validate the passed arguments """
log_level = args.debug.upper()
num_level = getattr(logging, log_level)
# Verify that our specified log_level has a numerical value associated
if not isinstance(num_level, int):
raise ValueError('Invalid log level: %s' % log_level)
# Create the logger
logging.basicConfig(level=num_level)
def read_config(yaml_in):
""" Convert YAML config file into dictionary """
with open(yaml_in) as f:
return yaml.safe_load(f)
def read_in_list(list_in):
""" Convert list file into list object """
file_list = [line.rstrip('\n') for line in open(list_in)]
return file_list
def add_input_files_to_yaml(yaml_dict, inputs):
""" Join the list of input files into the existing YAML dictionary """
yaml_dict['input_file'] = list()
for i in inputs:
# Add the sequence to the YAML dict
input_dict = {'class': 'File', 'path': i}
yaml_dict['input_file'].append(input_dict)
def create_final_yaml_name(orig_yaml_name):
""" Use YAML template to create final file name """
orig_file_base = os.path.basename(orig_yaml_name)
name_root = os.path.splitext(orig_file_base)[0]
final_yaml_name = name_root + ".final.yml" # NOTE: Will write to current directory
return final_yaml_name
def write_final_yaml(yaml_dict, out_yaml):
""" Write out the final YAML """
with open(out_yaml, "w") as f:
yaml.dump(yaml_dict, f)
def run_cwl_command(outdir, job_input, workflow):
""" Create and run the CWL-runner job """
tmp_dir = get_tmpdir()
cwl_cmd = "cwl-runner --tmp-outdir-prefix=tmp_out --tmpdir-prefix={3} --outdir={0} {1} {2}".format(outdir, workflow, job_input, tmp_dir)
# https://stackoverflow.com/questions/4417546/constantly-print-subprocess-output-while-process-is-running
with subprocess.Popen(cwl_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True) as p:
for line in p.stdout:
print(line.decode(), end='')
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, p.args)
def get_tmpdir():
""" Determine where to write tmp files"""
base_dir = "/opt"
if not os.path.exists("/opt") or not os.access('/opt', os.W_OK):
base_dir = "."
tmp_dir = base_dir + "/tmp"
return tmp_dir
if __name__ == '__main__':
main()
sys.exit(0)
| StarcoderdataPython |
86697 | from abc import ABC, abstractmethod
from PIL import Image, ImageTk
from riem.debug import Debug, DebugChannel
from riem.graphics import Align, Graphics, ImageLoader, Menu
from riem.input import Action, Controller, Keyboard
from riem.library import ArrayList, Dimensions, Point
from riem.version import __version__
from tkinter import Canvas, Tk
from typing import Any, Callable, Dict
import importlib, inspect, os, re, sys, time
class Application:
def __init__(self, title: str, state_initial: str, state_directory: str, default_text: Dict[str, str] = None, icon: str = None, size: Dimensions = Dimensions(960, 720), tick_ms: int = 250, **kwargs) -> None:
# Default Properties
maximise: bool = False
# Parse Kwargs
for k, v in kwargs.items():
# Option: Debug
if k == "debug": Application._debug_parse(v, title, size, tick_ms)
# Option: Maximise
if k == "fullscreen" and v == True:
maximise = True
# Public Properties
self.size: Dimensions = size
# State Logic
def state_load(directory: str) -> Dict[str, State]:
# Debug
Debug.print("Loading states from directory %s" % directory, DebugChannel.STATE)
# Directory Path
directory_path = os.path.join(os.getcwd(), directory)
# List Files
file_list = ArrayList(os.listdir(directory_path)).reject(lambda it: it.startswith("_")).map(lambda it: it.split(".")[0])
# NOTE: current reject is not going to ignore directories
# Module Logic
# def load_module(module: module) -> List[module]:
# type is <class 'module'> but hasn't been imported
def load_module(module):
# List Attributes
result = ArrayList(list(module.__dict__.keys())).reject(lambda it: it == "State")
# Map Classes
result = result.map(lambda it: (it, getattr(module, it)))
# Return States
return result.filter(lambda _, v: inspect.isclass(v) and issubclass(v, State))
# Return States
result = {}
for module in file_list.map(lambda it: load_module(importlib.import_module("%s.%s" % (directory.split("/")[-1], it)))):
for name, state in module:
result[name] = state
Debug.print(" - %s" % name, DebugChannel.STATE)
return result
# State Management
self.state_active = None
self.state_stored = None
self.state_loaded = state_load(state_directory)
self.state_bind = lambda: self.app.bind("<Key>", self.on_key_pressed)
# NOTE: these shouldn't be public
# Create Application
Debug.print("Creating application", DebugChannel.RIEM)
self.app = Tk()
self.app.title(title)
self.app.geometry("%dx%d" % (self.size.width, self.size.height))
self.app.resizable(False, False)
if maximise is True:
self.app.attributes("-fullscreen", True)
if icon is not None:
Debug.print(" - locating custom icon %s" % icon, DebugChannel.RIEM)
self.app.iconbitmap(r'%s' % os.path.join(os.getcwd(), "resources", "icons", "%s.ico" % icon))
# NOTE: self.app shouldn't be public
# Create Canvas
canvas = Canvas(self.app, bg = "black", width = self.size.width, height = self.size.height, highlightthickness = 0)
canvas.pack()
# Create Graphics
gfx: Graphics = Graphics(canvas, default_text)
# Intro State
self.state_active = StateIntro(self, state_initial)
# Initialise Controller
Debug.print("Initialising controller", DebugChannel.INPUT)
self.controller = Controller(self)
# Application Status
self.running = True
# Create Loop
def loop() -> None:
# Not Running
if self.running is not True:
return
# Timer Start
loop_time: int = (time.time() * 1000)
# Controller Actions
self.controller.get_actions().each(lambda it: self.action(it))
# Application Tick
self.state_active.tick()
self.state_active.tick_event()
# Application Render
gfx.draw_rect(Point(0, 0), self.size, "black", True)
self.state_active.render(gfx)
# Schedule Loop
loop_time = (time.time() * 1000) - loop_time
loop_wait: int = 0
if loop_time < tick_ms:
loop_wait = tick_ms - loop_time
self.app.after(int(loop_wait), loop)
# Invoke Loop
loop()
# Start Application
Debug.print("Initialising application loop", DebugChannel.RIEM)
self.app.mainloop()
def _debug_parse(value: str, title: str, size: Dimensions, tick_ms: int) -> None:
# Invalid Value
if not isinstance(value, str) or re.match(r"^[\+\-][A-Z]*$", value) is False:
raise Exception("Invalid debug string!")
# Disable Channels
if value[0] == "-":
# Disable All
if len(value) == 1:
return
# Disable Specific
if "R" not in value: Debug.debug_channels[DebugChannel.RIEM] = True
if "S" not in value: Debug.debug_channels[DebugChannel.STATE] = True
if "A" not in value: Debug.debug_channels[DebugChannel.AUDIO] = True
if "G" not in value: Debug.debug_channels[DebugChannel.GRAPHICS] = True
if "I" not in value: Debug.debug_channels[DebugChannel.INPUT] = True
# Enable Channels
if value[0] == "+":
# Enable All
if len(value) == 1:
Debug.debug_channels = {
DebugChannel.RIEM: True,
DebugChannel.STATE: True,
DebugChannel.AUDIO: True,
DebugChannel.GRAPHICS: True,
DebugChannel.INPUT: True
}
# Enable Specific
if "R" in value: Debug.debug_channels[DebugChannel.RIEM] = True
if "S" in value: Debug.debug_channels[DebugChannel.STATE] = True
if "A" in value: Debug.debug_channels[DebugChannel.AUDIO] = True
if "G" in value: Debug.debug_channels[DebugChannel.GRAPHICS] = True
if "I" in value: Debug.debug_channels[DebugChannel.INPUT] = True
# Print Info
print("")
Debug.print("Application Debug Mode")
Debug.print("======================")
Debug.print("Version: %s" % __version__)
Debug.print("Project: %s" % title)
Debug.print("Window: %d x %d" % (size.width, size.height))
Debug.print("Tick: %d ms" % tick_ms)
Debug.print()
def action(self, action: Action) -> None:
Debug.print(action, DebugChannel.STATE)
self.state_active.on_action(action)
def get_dimensions(self) -> Dimensions:
return self.size
def get_version(self) -> str:
return __version__
def on_key_pressed(self, event: Any) -> None:
# NOTE: event should be specifically typed here
if event.keycode in Keyboard.action:
Debug.print(event, DebugChannel.INPUT)
self.action(Keyboard.action[event.keycode])
def state_revert(self, data: Dict = None) -> None:
# Debug
Debug.print("Reverting to stored state", DebugChannel.STATE)
# Purge Images
ImageLoader.purge()
# Nothing Stored
if self.state_stored is None:
raise Exception("No stored state to revert to!")
# Terminate Existing
self.state_active.on_terminate()
# Revert State
self.state_active = self.state_stored
self.state_active.on_revert(data)
self.state_stored = None
# Bind Events
self.state_bind()
def state_update(self, state: str, store: bool = False, data: Dict = None) -> None:
# Debug
Debug.print("Updating to %s state" % state, DebugChannel.STATE)
# Purge Images
ImageLoader.purge()
# Existing State
if self.state_active is not None:
# Store Existing
if store is True:
self.state_active.on_store()
self.state_stored = self.state_active
# Terminate Existing
else:
self.state_active.on_terminate()
# Initialise State
self.state_active = self.state_loaded[state](self)
# NOTE: put the above into a method to gracefully handle this
self.state_active.on_start(data)
# Bind Events
self.state_bind()
def terminate(self) -> None:
# Already Terminating
if self.running is False:
return
# Debug
Debug.print("Terminating application")
# Application Status
self.running = False
# Terminate Controller
self.controller.terminate()
# System Exit
sys.exit()
class State(ABC):
def __init__(self, app: Application) -> None:
self.app = app
self.event = ArrayList()
def add_event(self, time_ms: int, logic: Callable) -> None:
# Create Event
event_new = {
"logic": logic,
"timer": (time.time() * 1000) + time_ms
}
# Debug
Debug.print("Creating new event %d to fire in %d ms" % (id(event_new), time_ms), DebugChannel.STATE)
# Append Event
self.event = self.event.add(event_new)
def on_action(self, action: Action) -> None:
pass
def on_revert(self, data: Dict) -> None:
pass
def on_start(self, data: Dict) -> None:
pass
def on_store(self) -> None:
pass
def on_terminate(self) -> None:
pass
@abstractmethod
def render(self, gfx: Graphics) -> None:
pass
def render_hint(self, gfx: Graphics, value: str) -> None:
gfx.draw_text(value, Point(10, self.app.get_dimensions().height - 25), Align.LEFT, "Inconsolata 12")
# NOTE: maybe make this game specific (move to a helper class or graphics library of styles?)
def render_title(self, gfx: Graphics, value: str) -> None:
gfx.draw_text(value, Point(25, 25), Align.LEFT, "Inconsolata 22", "#E62959", "#801731")
# NOTE: maybe make this game specific (move to a helper class or graphics library of styles?)
@abstractmethod
def tick(self) -> None:
pass
def tick_event(self) -> None:
# Check Events
time_ms: int = time.time() * 1000
for event in self.event.filter(lambda it: time_ms >= it["timer"]):
# Debug
Debug.print("Invoking event %d" % id(event), DebugChannel.STATE)
# Invoke Logic
event["logic"]()
# Remove Event
self.event = self.event.remove(event)
class StateIntro(State):
def __init__(self, app: Application, state_initial: str) -> None:
super().__init__(app)
# Create Event
self.add_event(2000, lambda: self.app.state_update(state_initial))
def render(self, gfx: Graphics) -> None:
# Load Logo
self.logo = ImageTk.PhotoImage(Image.open("resources/images/brand/riem_logo.png"))
# NOTE: this is currently assuming the file exists in project
# change this later to use a predefined byte array
# Render Logo
gfx.draw_image(self.logo, Point(self.app.get_dimensions().width / 2, self.app.get_dimensions().height / 2), Align.CENTER)
# Render Loading
# NOTE: when resources are preloaded, there should be an object that fires off these tasks
# and provides a completion percentage to the a progress bar object that renders
def tick(self) -> None:
pass | StarcoderdataPython |
3236595 | <reponame>Meet088/Scrapping-images-from-instagram<filename>eval_captions_prex.py<gh_stars>0
from utils.coco.coco import COCO
from utils.coco.pycocoevalcap.eval import COCOEvalCap
eval_gt_coco = COCO('./val/captions_val2014.json')
eval_result_coco = eval_gt_coco.loadRes('./val/results.json')
scorer = COCOEvalCap(eval_gt_coco, eval_result_coco)
scorer.evaluate()
print('complete')
| StarcoderdataPython |
5174014 | #!/usr/bin/env python3
import argparse
import os
from xcanalyzer.argparse import parse_ignored_folders
from xcanalyzer.xcodeproject.parsers import XcProjectParser
from xcanalyzer.xcodeproject.generators import XcProjReporter
from xcanalyzer.xcodeproject.exceptions import XcodeProjectReadException
# --- Arguments ---
argument_parser = argparse.ArgumentParser(description="List files from the folder not referenced in the Xcode project. Ignore folders named `.git` and `DerivedData`.")
# Project folder argument
argument_parser.add_argument('path',
help='Path of the folder containing your `.xcodeproj` folder.')
# Ignore folders argument
argument_parser.add_argument('-d', '--ignore-dir',
action='append',
dest='ignored_folders',
metavar='<dirpath>',
help='Path or name of a folder to ignore.')
# Mode
argument_parser.add_argument('-m', '--mode',
choices=['all', 'project', 'target', 'unreferenced', 'referenced'],
dest='orphan_mode',
default='all',
help="Orphan mode:\
'referenced' means all *Info.plist and *.h files referenced by at least one target. \
'unreferenced' means all *Info.plist and *.h files in the project not referenced by any target. \
'project' means all files in the folder but not referenced in the project.\
'target' means all files in the project but not referenced by any target (excluding *Info.plist and *.h files).\
'all' (default) means all files in the folder but not referenced by any target (neither the project).")
# --- Parse arguments ---
args = argument_parser.parse_args()
# Argument: path => Remove ending slashes from path
path = args.path
while path and path[-1] == os.path.sep:
path = path[:-1]
# Parse ignored folders
ignored_folders = set(args.ignored_folders or []) | {
'DerivedData/',
'.git/',
}
ignored_dirpaths, ignored_dirs = parse_ignored_folders(ignored_folders)
# Xcode code project reader
xcode_project_reader = XcProjectParser(path)
# Loading the project
try:
xcode_project_reader.load()
except XcodeProjectReadException as e:
print("An error occurred when loading Xcode project: {}".format(e.message))
exit()
# Reporter
reporter = XcProjReporter(xcode_project_reader.xc_project)
reporter.print_orphan_files(ignored_dirpaths,
ignored_dirs,
mode=args.orphan_mode)
| StarcoderdataPython |
11244820 | import logging
import warnings
from datetime import datetime, timedelta
from django.contrib.auth.backends import ModelBackend
from django.core.cache import cache
from .exceptions import RateLimitException
logger = logging.getLogger('ratelimitbackend')
class RateLimitMixin(object):
"""
A mixin to enable rate-limiting in an existing authentication backend.
"""
cache_prefix = 'ratelimitbackend-'
minutes = 5
requests = 30
def authenticate(self, username=None, password=None, request=None):
if request is not None:
counts = self.get_counters(request)
if sum(counts.values()) >= self.requests:
logger.warning(
u"Login rate-limit reached: username '{0}', IP {1}".format(
username, request.META['REMOTE_ADDR']
)
)
raise RateLimitException('Rate-limit reached', counts)
else:
warnings.warn(u"No request passed to the backend, unable to "
u"rate-limit. Username was '%s'" % username,
stacklevel=2)
user = super(RateLimitMixin, self).authenticate(username, password)
if user is None and request is not None:
logger.info(
u"Login failed: username '{0}', IP {1}".format(
username,
request.META['REMOTE_ADDR'],
)
)
cache_key = self.get_cache_key(request)
self.cache_incr(cache_key)
return user
def get_counters(self, request):
return cache.get_many(self.keys_to_check(request))
def keys_to_check(self, request):
now = datetime.now()
return [
self.key(
request,
now - timedelta(minutes=minute),
) for minute in range(self.minutes + 1)
]
def get_cache_key(self, request):
return self.key(request, datetime.now())
def key(self, request, dt):
return '%s%s-%s' % (
self.cache_prefix,
request.META.get('REMOTE_ADDR', ''),
dt.strftime('%Y%m%d%H%M'),
)
def cache_incr(self, key):
"""
Non-atomic cache increment operation. Not optimal but
consistent across different cache backends.
"""
cache.set(key, cache.get(key, 0) + 1, self.expire_after())
def expire_after(self):
"""Cache expiry delay"""
return (self.minutes + 1) * 60
class RateLimitModelBackend(RateLimitMixin, ModelBackend):
pass
| StarcoderdataPython |
1824872 | # -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.api.v1.handlers import base
from nailgun.api.v1.handlers.base import content
from nailgun.api.v1.validators import plugin
from nailgun import objects
class PluginHandler(base.SingleHandler):
validator = plugin.PluginValidator
single = objects.Plugin
class PluginCollectionHandler(base.CollectionHandler):
collection = objects.PluginCollection
validator = plugin.PluginValidator
@content
def POST(self):
""":returns: JSONized REST object.
:http: * 201 (object successfully created)
* 400 (invalid object data specified)
* 409 (object with such parameters already exists)
"""
data = self.checked_data(self.validator.validate)
obj = self.collection.single.get_by_name_version(
data['name'], data['version'])
if obj:
raise self.http(409, self.collection.single.to_json(obj))
return super(PluginCollectionHandler, self).POST()
| StarcoderdataPython |
11339372 | <filename>rlberry/rendering/tests/test_rendering_interface.py
import os
import pytest
from pyvirtualdisplay import Display
from rlberry.envs.classic_control import MountainCar
from rlberry.envs.classic_control import Acrobot
from rlberry.envs.classic_control import Pendulum
from rlberry.envs.finite import Chain
from rlberry.envs.finite import GridWorld
from rlberry.envs.benchmarks.grid_exploration.four_room import FourRoom
from rlberry.envs.benchmarks.grid_exploration.six_room import SixRoom
from rlberry.envs.benchmarks.grid_exploration.apple_gold import AppleGold
from rlberry.envs.benchmarks.ball_exploration import PBall2D, SimplePBallND
from rlberry.rendering import RenderInterface
from rlberry.rendering import RenderInterface2D
from rlberry.envs import Wrapper
try:
display = Display(visible=0, size=(1400, 900))
display.start()
except Exception:
pass
classes = [
Acrobot,
Pendulum,
MountainCar,
GridWorld,
Chain,
PBall2D,
SimplePBallND,
FourRoom,
SixRoom,
AppleGold
]
@pytest.mark.parametrize("ModelClass", classes)
def test_instantiation(ModelClass):
env = ModelClass()
if isinstance(env, RenderInterface):
env.disable_rendering()
assert not env.is_render_enabled()
env.enable_rendering()
assert env.is_render_enabled()
@pytest.mark.parametrize("ModelClass", classes)
def test_render2d_interface(ModelClass):
env = ModelClass()
if isinstance(env, RenderInterface2D):
env.enable_rendering()
if env.is_online():
for _ in range(2):
state = env.reset()
for _ in range(5):
assert env.observation_space.contains(state)
action = env.action_space.sample()
next_s, _, _, _ = env.step(action)
state = next_s
env.render(loop=False)
env.save_video('test_video.mp4')
env.clear_render_buffer()
try:
os.remove('test_video.mp4')
except Exception:
pass
@pytest.mark.parametrize("ModelClass", classes)
def test_render2d_interface_wrapped(ModelClass):
env = Wrapper(ModelClass())
if isinstance(env.env, RenderInterface2D):
env.enable_rendering()
if env.is_online():
for _ in range(2):
state = env.reset()
for _ in range(5):
assert env.observation_space.contains(state)
action = env.action_space.sample()
next_s, _, _, _ = env.step(action)
state = next_s
env.render(loop=False)
env.save_video('test_video.mp4')
env.clear_render_buffer()
try:
os.remove('test_video.mp4')
except Exception:
pass
| StarcoderdataPython |
8155094 | from PyQt5 import QtWidgets, uic
import sys
import numpy as np
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
# Load the UI Page
self. ui = uic.loadUi('mainwindow.ui', self)
# Create a sin wave
x_time = np.arange(0, 100, 0.1);
y_amplitude = np.sin(x_time)
pltSignal = self.widgetSignal
pltSignal.clear()
pltSignal.setLabel('left', 'Signal Sin Wave', units ='(V)')
pltSignal.setLabel('bottom', 'Time', units ='(sec)')
pltSignal.plot(x_time, y_amplitude, clear = True)
self.ui.show()
def main():
app = QtWidgets.QApplication(sys.argv)
window = MainWindow()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| StarcoderdataPython |
85498 | <filename>pirates/piratesgui/PirateButtonChain.py
# File: P (Python 2.4)
from direct.showbase.ShowBaseGlobal import *
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.fsm import StateData
from otp.otpbase import OTPGlobals
from pirates.piratesbase import PLocalizer
from pirates.piratesgui import SocialPage
from pirates.piratesgui import PiratesGuiGlobals
from pirates.piratesbase import PiratesGlobals
from otp.otpbase import OTPGlobals
from otp.friends.FriendInfo import FriendInfo
from pirates.piratesbase import Freebooter
import GuiButton
class PirateButtonChain:
def __init__(self, width, parent, fromBottom = False):
self.fromBottom = fromBottom
self.width = width
self.baseFrame = DirectFrame(parent = parent, relief = None)
self.load()
def load(self):
self.buttonCount = 0
self.buttonIndex = 0
self.buttonList = []
gui = loader.loadModel('models/gui/avatar_chooser_rope')
topPanel = gui.find('**/avatar_c_A_top')
topPanelOver = gui.find('**/avatar_c_A_top_over')
self.topButton = (topPanel, topPanel, topPanelOver, topPanel)
middlePanel = gui.find('**/avatar_c_A_middle')
middlePanelOver = gui.find('**/avatar_c_A_middle_over')
self.middleButton = (middlePanel, middlePanel, middlePanelOver, middlePanel)
bottomPanel = gui.find('**/avatar_c_A_bottom')
bottomPanelOver = gui.find('**/avatar_c_A_bottom_over')
self.bottomButton = (bottomPanel, bottomPanel, bottomPanelOver, bottomPanel)
self.iScale = 0.25
self.gScale = (self.width * 0.65000000000000002, 0.0, 0.28000000000000003)
self.tPos = (0.0, -0.014999999999999999, 0.0)
self.tBPos = (0.0, 0.025000000000000001, 0.0)
self.iPos = (0.10000000000000001, 0, -0.0)
self.offX = self.width * 0.5
self.topZ = 0.080000000000000002
self.midZ = 0.074999999999999997
self.endZ = 0.11
self.startZ = -0.029999999999999999
def show(self):
self.baseFrame.show()
def hide(self):
self.baseFrame.hide()
def destroy(self):
self.buttonList = []
self.baseFrame.destroy()
def setPos(self, x, y, z):
self.baseFrame.setPos(x, y, z)
def premakeButton(self, inText, inCommand, extra = None, textPos = None):
if not hasattr(self, 'buttonQueue'):
self.buttonQueue = []
if not textPos:
buttonTextPos = self.tPos
else:
xLoc = self.tPos[0] + textPos[0]
yLoc = self.tPos[1] + textPos[1]
zLoc = self.tPos[2] + textPos[2]
buttonTextPos = (xLoc, yLoc, zLoc)
preformButton = DirectButton(parent = self.baseFrame, relief = None, text = inText, text_scale = PiratesGuiGlobals.TextScaleLarge, text_pos = buttonTextPos, text_align = TextNode.ACenter, text0_fg = PiratesGuiGlobals.TextFG2, text1_fg = PiratesGuiGlobals.TextFG3, text2_fg = PiratesGuiGlobals.TextFG1, text3_fg = PiratesGuiGlobals.TextFG9, text_shadow = PiratesGuiGlobals.TextShadow, textMayChange = 1, command = inCommand, geom = self.middleButton, geom_scale = self.gScale, geom0_color = (1, 1, 1, 1), geom1_color = (1, 1, 1, 1), geom2_color = (1, 1, 1, 1), geom3_color = (0.5, 0.5, 0.5, 1))
self.buttonList.append(preformButton)
self.buttonQueue.append((inText, inCommand))
self.buttonIndex += 1
return preformButton
def makeButtons(self):
if self.fromBottom:
self.startZ += self.midZ * len(self.buttonQueue)
for index in range(0, len(self.buttonQueue)):
isLast = False
if index == len(self.buttonQueue) - 1:
isLast = True
self.createButtons(self.buttonQueue[index][0], self.buttonQueue[index][1], isLast)
self.buttonQueue = []
def getButton(self, index):
return self.buttonList[index]
def createButtons(self, inText, inCommand, inLast = False):
formingButton = self.buttonList[self.buttonCount]
if self.buttonCount == 0:
formingButton.setPos(self.offX, 0, self.startZ)
formingButton['geom'] = self.topButton
elif inLast and not (self.fromBottom):
formingButton.setPos(self.offX, 0, self.startZ - self.topZ + self.midZ * (self.buttonCount - 2) + self.endZ)
formingButton['geom'] = self.bottomButton
formingButton['text_pos'] = self.tBPos
else:
formingButton.setPos(self.offX, 0, self.startZ - self.topZ + self.midZ * (self.buttonCount - 1))
formingButton['geom'] = self.middleButton
formingButton.resetFrameSize()
self.buttonCount += 1
def getWidth(self):
return self.width
def getHeight(self):
return self.topZ + self.midZ * (self.buttonCount - 2) + self.endZ
| StarcoderdataPython |
296288 | import boto3
from botocore.exceptions import ClientError
from boto3.dynamodb.conditions import Attr
import uuid
class DynamodbHandler:
def __init__(self, MESSAGE_TABLE, RATING_TABLE):
self.dynamodb = boto3.resource('dynamodb')
self.messages_table = self.dynamodb.Table(MESSAGE_TABLE)
self.rating_table = self.dynamodb.Table(RATING_TABLE)
def put_message(self, user_id, time, message_txt, bot_given_response):
response = self.messages_table.put_item(
Item={
'interaction_id': str(uuid.uuid4()),
'user_id': user_id,
'time': time,
'message_txt': message_txt,
'bot_given_response': bot_given_response,
}
)
return response
def put_rating(self, user_id, time, rating, message_user, bot_given_response):
try:
rating = int(rating)
response = self.rating_table.put_item(
Item={
'interaction_id': str(uuid.uuid4()),
'user_id': user_id,
'time': time,
'message_user': message_user,
'bot_given_response': bot_given_response,
'rating': rating,
}
)
return response
except Exception as e:
print(e)
return None
def get_last_interaction(self, user_id):
try:
response = self.messages_table.scan(FilterExpression=Attr('user_id').eq(user_id))
items = response['Items']
if len(items) < 1:
return None, None, None
last_time = -999
last_interaction = ""
last_bot_response = ""
for item in items:
if item['time'] > last_time:
last_time = item['time']
last_interaction = item['message_txt']
last_bot_response = item['bot_given_response']
return last_interaction, last_bot_response, last_time
except ClientError as e:
print(e.response['Error']['Message'])
return None, None, None
| StarcoderdataPython |
4866774 | <gh_stars>0
# class Solution:
# def movesToStamp(self, stamp: str, target: str) -> List[int]:
# n = len(target)
# m = len(stamp)
# t = list(target)
# s = list(stamp)
# ans = []
# def check(ix):
# found = False
# for jx in range(m):
# if t[ix + jx] == '?':
# continue
# if t[ix + jx] != s[jx]:
# return False
# found = True
# if found:
# t[ix:ix+m] = ['?'] * m
# ans.append(ix)
# return found
# found = True
# while found:
# found = False
# for i in range(n - m + 1):
# found = found | check(i)
# print(ans, t)
# if t == ['?'] * n:
# return ans[::-1]
# else:
# return []
class Solution:
def movesToStamp(self, stamp: str, target: str) -> List[int]:
'''
redo the problem on 4/1/2021
'''
ls = len(stamp)
lt = len(target)
s = list(stamp)
t = list(target)
ans = []
def check(ix):
flg = 0
for jx in range(ls):
if t[ix + jx] == '?':
continue
if t[ix + jx] != s[jx]:
return 0
elif t[ix + jx] == s[jx]:
flg = 1
if flg:
t[ix:ix+ls] = ['?'] * ls
ans.append(ix)
return flg
found = 1
while found:
found = 0
for i in range(lt - ls + 1):
found = found | check(i)
print(ans)
if t == ['?'] * lt:
return ans[::-1]
else:
return []
| StarcoderdataPython |
235864 | <gh_stars>1-10
import numpy as np
import numpy.random as rng
from abc import ABC, abstractmethod
from copy import deepcopy
import os
import logging
import math
class GibbsChain(ABC):
"""
A chain that could sample from a gibbs distribution
"""
def __init__(self, beta = 0, startpoint = None):
""" set beta and startpoint , if startpoint is not given, set it """
self.current = startpoint
self.beta = beta
self.offset = self.get_offset()
if (self.current == None):
self.set_startpoint()
def restart_and_sample(self, tvd = None, steps = None):
if (tvd == None and steps == None):
print("error in restart and sample!")
self.set_startpoint()
if tvd != None:
mixtime = self.compute_mixingtime(tvd)
else:
mixtime = steps
for _ in range(int(mixtime)):
self.step()
def get_uniform_mixing(self):
""" return uniform mixing time """
Lambda = self.get_Lambda()
pai_min = self.get_lower_paimin()
return int(np.ceil(np.log(1/pai_min)/np.log(1/Lambda)))
def compute_mixingtime(self, tvd):
""" compute mixing time given tvd """
Lambda = self.get_Lambda()
spectral_gap = 1-Lambda
pai_min = self.get_lower_paimin()
return int(np.ceil((np.log(1/pai_min)/2 - np.log(2 * tvd)) / spectral_gap))
def get_upper_Q(self):
""" return an upper bound on Q """
Hbar = self.get_Hbar()
return np.exp(Hbar)
def get_lower_paimin(self):
""" return a lower bound on pai_min """
Hbar = self.get_Hbar()
Hmin = self.get_Hmin()
Hmax = self.get_Hmax()
rho = (Hbar - Hmin) / (Hmax - Hmin)
upper_invQ = (1 - rho) * np.exp(-self.beta * Hmax) + rho * np.exp(-self.beta * Hmin)
Zmin = self.get_Zmin()
return np.exp(-self.beta * Hmax) / Zmin/ upper_invQ
@abstractmethod
def set_startpoint(self):
""" helper function to set the startpoint """
pass
@abstractmethod
def get_Hmax(self):
pass
@abstractmethod
def get_Hmin(self):
pass
@abstractmethod
def get_Hbar(self):
pass
@abstractmethod
def get_Lambda(self):
pass
@abstractmethod
def get_Zmin(self):
pass
@abstractmethod
def step(self):
""" take a single step of the chain, update the value for *current* """
pass
@abstractmethod
def get_Hamiltonian(self):
""" compute the hamiltonian value of current sample """
pass
@abstractmethod
def get_offset(self):
return 0
class VotingChain(GibbsChain):
def __init__(self, beta = 0, startpoint = None, n = 10, w = None, wt = None, wf = None):
self.n = n
self.w = w
self.wt = wt
self.wf = wf
super().__init__(beta, startpoint)
def get_Lambda(self):
n_variable = 2*self.n+1
hw = 2*n_variable+1
return 1 - np.exp(-3*hw*self.beta*2)/n_variable
def get_Hmax(self):
return self.offset + sum([x for x in self.wt if x>0]) + sum([x for x in self.wf if x>0]) + np.abs(self.w)*self.n
def get_Hmin(self):
return self.offset + sum([x for x in self.wt if x<0]) + sum([x for x in self.wf if x<0]) - np.abs(self.w)*self.n
def get_Hbar(self):
return sum(self.wf)/2 + sum(self.wt)/2 + self.offset
def get_Zmin(self):
return 2**(2*self.n+1)
def set_startpoint(self):
Q = rng.randint(0, 2)*2-1 # {-1, 1}
Ts = rng.randint(0, 2, self.n)
Fs = rng.randint(0, 2, self.n)
self.current = {"Q": Q, "Ts": Ts, "Fs": Fs}
def step(self):
Q = self.current["Q"]
Ts = self.current["Ts"]
Fs = self.current["Fs"]
rv = rng.randint(0, self.n * 2 + 1) # pick the changed variable uniformly
if rv == 0:
ratio = np.exp(-self.beta * (-Q-Q) * (sum(Ts) - sum(Fs)))
elif rv <= self.n:
ratio = np.exp(-self.beta * (-Ts[rv-1]+0.5)*2)
else:
ratio = np.exp(-self.beta * (-Fs[rv-self.n-1]+0.5)*2)
if rng.random() < ratio/(1+ratio):
if rv == 0:
self.current["Q"] = - self.current["Q"]
elif rv <= self.n:
self.current["Ts"][rv-1] = 1 - self.current["Ts"][rv-1]
else:
self.current["Fs"][rv-self.n-1] = 1 - self.current["Fs"][rv-self.n-1]
def get_Hamiltonian(self, X):
Q = X["Q"]
Ts = X["Ts"]
Fs = X["Fs"]
Hx = self.w * Q * (sum(Ts) - sum(Fs))
for i in range(self.n):
Hx += self.wt[i]*Ts[i] + self.wf[i]*Fs[i]
Hx += self.offset # this is to make sure Hx >= 1
return Hx
def get_offset(self):
return sum([-x for x in self.wt if x<0]) + sum([-x for x in self.wf if x<0]) + np.abs(self.w)*self.n + 1
class VotingChainLogical(GibbsChain):
def __init__(self, beta = 0, startpoint = None, n = 10, w = None, wt = None, wf = None):
self.n = n
self.w = w
self.wt = wt
self.wf = wf
super().__init__(beta, startpoint)
def get_Lambda(self):
n_variable = 2*self.n+1
hw = 3
return 1 - np.exp(-3*hw*self.beta*2)/n_variable
def get_Hmax(self):
return self.offset+ np.abs(self.w)+ sum([x for x in self.wt if x>0]) + sum([x for x in self.wf if x>0])
def get_Hmin(self):
return self.offset- np.abs(self.w)+ sum([x for x in self.wt if x<0]) + sum([x for x in self.wf if x<0])
def get_Hbar(self):
return sum(self.wf)/2 + sum(self.wt)/2 + self.offset
def get_Zmin(self):
return 2**(2*self.n+1)
def set_startpoint(self):
Q = rng.randint(0, 2)*2-1 # {-1, 1}
Ts = rng.randint(0, 2, self.n)
Fs = rng.randint(0, 2, self.n)
self.current = {"Q": Q, "Ts": Ts, "Fs": Fs}
def step(self):
Ham1 = self.get_Hamiltonian(self.current)
rv = rng.randint(0, self.n * 2 + 1) # pick the changed variable uniformly
if rv == 0:
self.current["Q"] = -self.current["Q"]
elif rv <= self.n:
self.current["Ts"][rv-1] = 1 - self.current["Ts"][rv-1]
else:
self.current["Fs"][rv-self.n-1] = 1 - self.current["Fs"][rv-self.n-1]
ratio = np.exp(-self.beta * (self.get_Hamiltonian(self.current) - Ham1))
if rng.uniform() >= ratio/(1+ratio):
# change back
if rv == 0:
self.current["Q"] = -self.current["Q"]
elif rv <= self.n:
self.current["Ts"][rv-1] = 1 - self.current["Ts"][rv-1]
else:
self.current["Fs"][rv-self.n-1] = 1 - self.current["Fs"][rv-self.n-1]
def get_Hamiltonian(self, X):
Q = X["Q"]
Ts = X["Ts"]
Fs = X["Fs"]
Hx = self.w * Q * (max(Ts) - max(Fs))
for i in range(self.n):
Hx += self.wt[i]*Ts[i] + self.wf[i]*Fs[i]
Hx += self.offset # this is to make sure Hx >= 1
return Hx
def get_offset(self):
return sum([-x for x in self.wt if x<0]) + sum([-x for x in self.wf if x<0]) + np.abs(self.w) + 1
class IsingChainLattice(GibbsChain):
def __init__(self, beta = 0, startpoint = None, n = 10):
self.n = n
super().__init__(beta, startpoint)
def get_Lambda(self):
return 1-1/(10*self.n**2*2*np.log(self.n))
def get_Hmax(self):
return self.offset + (self.n-1)*self.n*2
def get_Hmin(self):
return self.offset
def get_Hbar(self):
return self.offset + (self.n-1)*self.n
def get_offset(self):
return 1
def set_startpoint(self):
self.current = rng.randint(0,2,size=self.n**2)
def step(self):
rv = rng.randint(0, self.n**2)
neighbors = [rv-1, rv+1, rv-self.n, rv+self.n]
match = 0
for neighbor in neighbors:
if 0<=neighbor<self.n**2:
match += int(self.current[rv] == self.current[neighbor])
diff = 4-match*2
ratio = np.exp(-self.beta*diff)
if rng.uniform() < ratio/(1+ratio):
self.current[rv] = 1 - self.current[rv]
def get_Hamiltonian(self, X):
match = 0
for p in range(self.n**2):
neighbors = [p+1, p+self.n]
for neighbor in neighbors:
if 0<=neighbor<self.n**2 and X[p] == X[neighbor]:
match += 1
# print(X, p, neighbor, match)
return self.offset + match
def get_upper_Q(self):
Hbar = self.get_Hbar()
Hmin = self.get_Hmin()
Hmax = self.get_Hmax()
rho = (Hbar - Hmin) / (Hmax - Hmin)
return (1 - rho) * np.exp(-self.beta * Hmax) + rho * np.exp(-self.beta * Hmin)
def get_lower_paimin(self):
Hmin = self.get_Hmin()
upper_Q = self.get_upper_Q()
Zmax = 2**(self.n**2)
return np.exp(-self.beta*Hmin)/Zmax/upper_Q
def get_Zmin(self):
# this should not be called
pass
class ProductGibbsChain():
def __init__(self, gibbsChain, betas = [0, 1], startpoint = None):
""" set beta and startpoint , if startpoint is not given, set it """
self.current = startpoint
self.chains = [deepcopy(gibbsChain) for b in betas]
for i in range(len(betas)):
self.chains[i].beta = betas[i]
if (self.current == None):
self.set_startpoint()
def get_Lambda(self):
return 1 - (1-self.chains[-1].get_Lambda())/ len(self.chains)
def get_lower_paimin(self):
paimins = [chain.get_lower_paimin() for chain in self.chains]
paimin = 1.0
for p in paimins:
paimin *= p
return paimin
def get_uniform_mixing(self):
Lambda = self.get_Lambda()
pai_min = self.get_lower_paimin()
return int(np.ceil(np.log(1/pai_min)/np.log(1/Lambda)))
def compute_mixingtime(self, tvd):
Lambda = self.get_Lambda()
spectral_gap = 1-Lambda
pai_min = self.get_lower_paimin()
return int(np.ceil((np.log(1/pai_min)/2 - np.log(2 * tvd)) / spectral_gap))
def set_startpoint(self):
""" helper function to set the startpoint """
for i in range(len(self.chains)):
self.chains[i].set_startpoint()
self.current = [chain.current for chain in self.chains]
def step(self):
""" take a single step of the chain, update the value for *current* """
n = len(self.chains)
i = rng.randint(0, n)
self.chains[i].step()
self.current[i] = self.chains[i].current
def restart_and_sample(self, tvd = None, steps = None):
if (tvd == None and steps == None):
print("error in restart and sample!")
self.set_startpoint()
if tvd != None:
mixtime = self.compute_mixingtime(tvd)
else:
mixtime = steps
for _ in range(int(mixtime)):
self.step()
| StarcoderdataPython |
3500335 | <reponame>rarguelloF/remote-testing
#!/usr/bin/python
from docx import Document
from docx.shared import Pt, RGBColor
from docx.enum.style import WD_STYLE_TYPE
COLOURS = {
'red': (255, 0, 0),
'green': (0, 255, 0),
'blue': (0, 0, 255)
}
DOC_NAME = 'DEFAULT_NAME'
def create(title):
global DOC_NAME
DOC_NAME = title
document = Document()
document.add_heading(DOC_NAME, 0)
document.save('%s.docx' % DOC_NAME)
return document
def end_doc(document):
document.add_page_break()
document.save('%s.docx' % DOC_NAME)
def write(document, text, style_name, font_name, font_size, opts=None):
doc_styles = document.styles
try:
#intentamos encontrar el estilo en el documento para ver si antes existia
doc_charstyle = doc_styles[style_name]
except:
#si no existia, lo anadimos
doc_charstyle = doc_styles.add_style(style_name, WD_STYLE_TYPE.PARAGRAPH)
doc_font = doc_charstyle.font
doc_font.size = Pt(font_size)
doc_font.name = font_name
if opts:
for opt, value in opts.iteritems():
if opt == 'color':
r,g,b = COLOURS[value]
doc_font.color.rgb = RGBColor(r,g,b)
if opt == 'bold' and value == True:
doc_font.bold = True
if opt == 'underline' and value == True:
doc_font.underline = True
if opt == 'all_caps' and value == True:
doc_font.all_caps = True
lines = text.split('\n')
for line in lines:
document.add_paragraph(line, style = style_name)
document.save('%s.docx' % DOC_NAME)
return document | StarcoderdataPython |
9642913 | <reponame>ThomasKoscheck/Telegrambots
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from chatterbot import ChatBot
import telepot
from telepot.delegate import per_chat_id, create_open, pave_event_space
from telepot.namedtuple import ReplyKeyboardMarkup, KeyboardButton, ReplyKeyboardRemove, ForceReply
from telepot.namedtuple import InlineKeyboardMarkup, InlineKeyboardButton
from telepot.namedtuple import InlineQueryResultArticle, InlineQueryResultPhoto, InputTextMessageContent
from time import strftime as strftime
from codecs import register as register
from codecs import lookup as lookup
from os import system as system
from os import path as path
from random import randint as randint
import sys
import re
# own sripts (must be in the same folder)
import imagescraper # returns photos
import giphy # returns gif's
import translate # translates into german
import video # returns videos
import faceanalyze # return face details
import googleanswers # returns asnwers form google to userquestions
register(lambda name: lookup('utf8') if name == 'utf8mb4' else None) # utf8mb4 activate
# path and name of the disclaimer
disclaimer = 'disclaimer.txt'
# path and name of the errorlog
errorfile = 'error.log'
# insert valid database URI here
db_uri="mongodb://your_mongodatabase_uri_here"
# insert database name here
db_name="your_mongodatabase_name_here"
# used for special actions
Zeige = ['zeige', 'zeig', 'schicke', 'schick','sende', 'schreibe', 'schreib']
Bilder = ['bilder','bild', 'foto', 'photo']
Gif = ['gif', 'gifs']
Translate = ['\xc3\x9cbersetze', 'bedeutet', 'heißt']
Video = ['video', 'videos', 'clip', 'clips', 'film', 'filme', 'youtubevideo', 'youtubevideos']
# remove markup
removemarkup = ReplyKeyboardRemove()
# Uncomment the following line to enable verbose logging
# logging.basicConfig(level=logging.INFO)
# create a new instance of a ChatBot
susan = ChatBot("Susan",
storage_adapter='chatterbot.storage.MongoDatabaseAdapter',
logic_adapters=[
"chatterbot.logic.MathematicalEvaluation",
# "chatterbot.logic.TimeLogicAdapter",
{
'import_path': 'chatterbot.logic.BestMatch'
}
],
filters=["chatterbot.filters.RepetitiveResponseFilter"],
database_uri=db_uri,
database=db_name,
trainer='chatterbot.trainers.ChatterBotCorpusTrainer'
)
susan.train("chatterbot.corpus.german")
# checks if user is already registered
def userCheck(chat_id):
# path and name of the disclaimer
datafile = file(disclaimer)
for line in datafile:
if str(chat_id) in line:
return True
return False
# finding a word in a string
def findWholeWord(w):
return re.compile(r'\b({0})\b'.format(w), flags=re.IGNORECASE).search
# function to save log messages to specified log file/print it to terminal
def log(message, path, terminal = False):
# open the specified log file
file = open(path,"a")
# write log message with timestamp to log file
file.write(message + ' at ' + str(strftime("%d.%m.%Y %H-%M-%S")) + "\n") # strftime gets actual time, format: 06.02.2014 20:49:56
# close log file
file.close
# output in terminal (for debugging)
if terminal:
print (message)
# finding and returning Pronomen
def findePronomen(input):
if findWholeWord('einem')(input.lower()): # checks for 'Zeige mir Bilder von einem Hund'
index = input.find('einem') + 6
return index, 'von einem '
elif findWholeWord('einer')(input.lower()): # "checks for 'Bilder von einer Katze'
index = input.find('einer') + 6
return index, 'von einer '
elif findWholeWord('von')(input.lower()): # "checks for 'Bilder von Katzen'
index = input.find('von') + 4
return index, 'von '
elif findWholeWord('vom')(input.lower()): # checks for 'Bilder vom Matterhorn'
index = input.find('vom') + 4
return index, 'vom '
# Main loop
class ChatHandler(telepot.helper.ChatHandler):
def __init__(self, *args, **kwargs):
super(ChatHandler, self).__init__(*args, **kwargs)
# sending images and deleting the folder afterwards
def sendMedia(self, filetype):
if filetype == 'gif':
try:
self.sender.sendDocument(open('tmp/' + str(chat_id) + '/image.gif'))
system('rm -r tmp/' + str(chat_id))
return True
except:
self.sender.sendMessage('Hmm, da habe ich nichts gefunden. Zum Trost ein Bild von einer Katze.')
self.sender.sendPhoto(open('tmp/katze.jpg'))
return False # error sending a gif
else:
try:
self.sender.sendPhoto(open('tmp/' + str(chat_id) + '/image.' + str(filetype)))
system('rm -r tmp/' + str(chat_id))
return True
except:
self.sender.sendMessage('Hmm, da habe ich nichts gefunden. Zum Trost ein Bild von einer Katze.')
self.sender.sendPhoto(open('tmp/katze.jpg'))
return False # error sending an image
# finding actionword
def findActionWord(self, input, username):
count = 0
while count < len(Zeige):
if Zeige[count] in str(input.lower()): # checks if input contains any words from Zeige[]
# sending images
count2 = 0
while count2 < len(Bilder):
if Bilder[count2] in str(input.lower()): # checks if input contains any words from Bilder[]
# sending this because downloading and sending the image takes a moment
self.sender.sendMessage(str("Einen Moment, ich suche das beste Bild für dich heraus... ⏱"))
index, pronomen = findePronomen(input)
imagescraper.search((input[index:]).encode('utf-8'), chat_id) # calling imagescraper.py
success = self.sendMedia('jpg')
if success: # images was sent successful
self.sender.sendMessage(str("So, hier ein Bild " + pronomen + "'" + input[index:].encode('utf-8') + "'"))
return True
count2 += 1
# sending gifs
count2 = 0
while count2 < len(Gif):
if Gif[count2] in str(input.lower()): # checks if input contains any words from Gif[]
index, pronomen = findePronomen(input)
giphy.downloadGif(input[index:].encode('utf-8'), chat_id)
success = self.sendMedia('gif')
if success: # gif was sent successful
self.sender.sendMessage(str("Hier, ein GIF " + pronomen + "'" + input[index:].encode('utf-8') + "'"))
return True
count2 += 1
# sending videos
count2 = 0
while count2 < len(Gif):
if Video[count2] in str(input.lower()): # checks if input contains any words from Videos[]
index, pronomen = findePronomen(input)
result = video.fetch_youtube_url(input[index:].encode('utf-8')) # hands over the searchterm to video.py, getting link to video back
self.sender.sendMessage(result, str("Hier, ein Video mit den Stichworten " + "'" + input[index:].encode('utf-8') + "'"))
return True
count2 += 1
count = count + 1
count = 0
count2 = 0
# translating
if Translate[0] in str(input.lower()): # checks if input contains any words from Translate[]
index = input.find('\xc3\x9cbersetze') + 11
translation = translate.translate(input[index:].encode('utf-8'))
if translation == input[index:].encode('utf-8'): # translation was from german to german -> no translation needed
return False
else:
self.sender.sendMessage(str("'" + input[index:].encode('utf-8') + "' bedeutet: " + translation))
return True
if Translate[1] in str(input.lower()): # checks if input contains any words from Translate[]
index = input.find('bedeutet') + 9
translation = translate.translate(input[index:].encode('utf-8'))
if translation == input[index:].encode('utf-8'): # translation was from german to german -> no translation needed
return False
else:
self.sender.sendMessage(str("'" + input[index:].encode('utf-8') + "' bedeutet: " + translation))
return True
return False
# any input from telepot.DelegatorBot comes here
def on_chat_message(self, msg):
global chat_id
content_type, chat_type, chat_id = telepot.glance(msg)
reload(sys)
sys.setdefaultencoding("utf-8")
try:
chat_id = msg['chat']['id']
firstname = msg['from']['first_name'].encode('utf8')
# group_name = msg['chat']['title']
try: username = msg['from']['username'].encode('utf8')
except:
# falls user keinen usernamen besitzt
username = firstname
if content_type == 'photo':
try:
bot.download_file(msg['photo'][-1]['file_id'], 'tmp/%s.jpg' % chat_id)
imagepath = 'tmp/' + str(chat_id) + '.jpg'
# aufruf und übergabe zu gesichtsanalyse
details = faceanalyze.getdetails(chat_id, imagepath)
# Auswertung der Analyse
# # gender = str(details['faces'][0]['attributes']['gender']['value']) # Male, Female
# # age = str(details['faces'][0]['attributes']['age']['value'])
# # ethnie = str(details['faces'][0]['attributes']['ethnicity']['value']) # White, Black, Asian
glass = str(details['faces'][0]['attributes']['glass']['value']) # None, Normal, Dark
facequality = str(details['faces'][0]['attributes']['facequality']['value']) # >70.1
smile = details['faces'][0]['attributes']['smile']['value'] # threshold 30.1
# Emotionen
neutral = str(details['faces'][0]['attributes']['emotion']['neutral'])
sadness = str(details['faces'][0]['attributes']['emotion']['sadness'])
disgust = str(details['faces'][0]['attributes']['emotion']['disgust'])
anger = str(details['faces'][0]['attributes']['emotion']['anger'])
surprise = str(details['faces'][0]['attributes']['emotion']['surprise'])
fear = str(details['faces'][0]['attributes']['emotion']['fear'])
happiness = str(details['faces'][0]['attributes']['emotion']['happiness'])
# Fügt alle Emotionen in eine Liste und sortiert absteigend -> Erste Emotion ist am wahrscheinlichsten
emotions =[neutral, sadness, disgust, anger, surprise, fear, happiness]
emotions.sort(reverse=True)
# finde die Emotion
if emotions[0] == neutral:
semotion = "Neutral"
elif emotions[0] == sadness:
semotion = "Traurig"
elif emotions[0] == disgust:
semotion = "Empört"
elif emotions[0] == anger:
semotion = "wütend"
elif emotions[0] == surprise:
semotion = "überrascht"
elif emotions[0] == fear:
semotion = "ängstlich"
elif emotions[0] == happiness:
semotion = "glücklich"
# übersetze Brillen
if glass == "None":
sglass = "Keine Brille"
elif glass == "Normal":
sglass = "Normale Brille"
elif glass == "Dark":
sglass = "Sonnenbrille"
self.sender.sendMessage(str("Die Analyse hat folgendes ergeben: 📄\n\n" +
"Geschlecht: " + str(translate.translate(str(details['faces'][0]['attributes']['gender']['value']).encode('utf-8'))) + '\n' +
"Alter: " + str(details['faces'][0]['attributes']['age']['value']) + '\n' +
"Emotion: " + str(semotion) + '\n' +
"Brille: " + str(sglass) + '\n' +
"Ethnie: " + str(translate.translate(str(details['faces'][0]['attributes']['ethnicity']['value']).encode('utf-8'))) + '\n\n' +
"Ich bin mir mit meiner Analyse zu " + str(details['faces'][0]['attributes']['facequality']['value']) +'% sicher'))
if smile > 40.1:
self.sender.sendMessage(str("Schön, dass du auf dem Bild lächelst! Das gefällt mir 😊 "))
except Exception as e:
# if any error accured in the try-block
self.sender.sendMessage(str("Es ist nicht deine Schuld. \nAber ich konnte kein Gesicht erkennen 😔\nVielleicht ist auch die Datei zu groß? (2 MB)"))
log(message='Error: ' + unicode(e).encode("utf-8"), path=errorfile, terminal=True)
elif content_type == 'text':
input = msg['text'].encode("utf-8").split('@')[0] # .split returns a list ->[0] for the first element of the list (essential for chatgroups)
# command: /start (shown to all users if they start a new conversation)
if input == '/help' or input == '/start':
self.sender.sendMessage(str('Hi, ich bin Susan. Ich bin nicht so ganz ein Mensch wie du, aber ich versuche, so menschlich wie möglich zu sein. Dazu verwende ich Machine-Learnig.' +
' Ich werde anfangs sicher ein paar Fehler machen, bitte verzeihe mir 😁'))
self.sender.sendMessage(str('Du kannst mir aber dabei helfen besser zu werden, indem du mit mir schreibst und dich nicht über meine Fehler ärgerst. \nDankeschön 😘' ))
self.sender.sendMessage(str('Dir gefällt dieser Bot? Dann bewerte mich doch bitte hier mit 5 Sternen: https://telegram.me/storebot?start=suusanbot'))
# command: /credits
elif input == '/credits':
markup = InlineKeyboardMarkup(inline_keyboard=[
[dict(text='Conversational engine:\nChatterbot', url='https://github.com/gunthercox/ChatterBot')],
[dict(text='Telegrambot API:\nTelepot', url='https://github.com/nickoala/telepot')],
[dict(text="GIF's:\ngiphypop", url='https://github.com/shaunduncan/giphypop')],
[dict(text='Translation:\nGoogle Translate', url='https://github.com/MrS0m30n3/google-translate')],
[dict(text='Bilder:\nGoogle Bilder', url='https://github.com/hardikvasa/google-images-download/blob/master/google-images-download.py')],
[dict(text='Gesichtsanalyse:\nFace++', url='https://faceplusplus.com')],
[dict(text='Wissensfrage:\nGoogle Featured Snippets', url='https://github.com/Areeb-M/GoogleAnswers')],
[dict(text='Integration, Tools, Anpassungen und der Rest: @ThomasKoscheck', url='https://github.com/ThomasKoscheck/Telegrambots')],
])
self.sender.sendMessage('Nur mit Hilfe verschiedene fantastische Teile von freier und offener Software konnte ich zu dem werden, was ich heute bin. Hier ist die vollständige Auflistung.', reply_markup=markup)
# command: /knowledge
elif input == '/knowledge':
self.sender.sendMessage(str("Konversation: Ich kann ausgehend von Deinem Input eine (meist) sinnvolle Antwort geben."))
self.sender.sendMessage(str("Bilder: Wenn du mich nach Bilder fragst, kann ich dir ausgehend von der Google-Bildersuche ein Bild schicken. (z.B: 'Zeige mir ein Bild von Ziegen')"))
self.sender.sendMessage(str("Videos: Wenn du mich nach Videos fragst, kann ich dir ausgehend von der YouTubesuche ein Video schicken. (z.B: 'Zeige mir ein Video von <NAME>')"))
self.sender.sendMessage(str("GIF's: Wenn du mich nach GIF'S fragst, kann ich dir ausgehend von der Datenbank giphy.com ein GIF schicken. (z.B: 'Zeige mir ein GIF von Star Wars')"))
self.sender.sendMessage(str("Übersetzungen: Ich kann dir jede Sprache nach Deutsch übersetzen (z.B: Was bedeutet Hi, I am a cat)"))
self.sender.sendMessage(str("Gesichtsanalyse: Ich kann Daten wie Geschlecht, Emotion oder Alter anhand der Bilder, die du mir schickst erraten"))
self.sender.sendMessage(str("Wissensfragen: Ich kann auf Wissensfragen sinnvoll antworten. (z.B: 'Wie lange geht ein Marathon?')"))
# command: /tools
elif input == '/tools':
markup = ReplyKeyboardMarkup(keyboard=[
[KeyboardButton(text='/würfeln')],
# [KeyboardButton(text='/zeit'), KeyboardButton(text='/würfeln')],
])
self.sender.sendMessage('Auflistung an kleinen Features, dich ich beherrsche', reply_markup=markup)
# command: /würfeln
elif input == '/würfeln':
self.sender.sendMessage(str(randint(1,6)), reply_markup=removemarkup)
# command: /akzeptieren
elif input == '/akzeptieren':
if userCheck(chat_id): # user already in database
self.sender.sendMessage(str('Du hast den Haftungsausschluss bereits akzeptiert 👌'))
else:
self.sender.sendMessage(str('Du hast den Haftungsausschluss akzeptiert. Hier kannst Du ihn dir in Ruhe durchlesen: https://www.thomaskoscheck.de/projekte/telegrambot/haftungsausschluss.php'))
log(str(chat_id) + ', ' + firstname + ', ' + username, disclaimer)
# chatfunction
elif not input.startswith('/') and userCheck(chat_id): # and is_chatting: no /command, user is already in database and is_chatting is true (/chat was executed)
action = self.findActionWord(input, username) # checks if specialaction was accomplished
googleResult = googleanswers.ask(input)
if action == False and len(googleResult) == 0: # no special action was accomplished, telegrambot should answer conversational now
response = susan.get_response(input)
# sending conversational response in telegram
self.sender.sendMessage(unicode(response).encode("utf-8"), reply_markup=removemarkup)
elif len(googleResult) > 0:
self.sender.sendMessage(unicode(googleResult).encode("utf-8"), reply_markup=removemarkup)
# user not registered
elif userCheck(chat_id) == False:
self.sender.sendMessage(str('Du bist leider kein registrierter Benutzer! 😔'))
self.sender.sendMessage(str('Registrieren kannst du dich, in dem du den Haftungsausschluss mit /akzeptieren annimmst.'))
# user sent something that isnt text
else:
self.sender.sendMessage(str('Bisher verstehe ich nur Textnachrichten und Bilder 😔'))
self.sender.sendMessage(str('Das wird sich in Zukunft aber sicher ändern!'))
except Exception as e:
# if any error accured in the try-block
self.sender.sendMessage(str("Es ist nicht deine Schuld. \nAber bei mir ist etwas schief gelaufen. 😔 "))
exc_type, exc_obj, exc_tb = sys.exc_info()
log(message=unicode(exc_type).encode("utf-8") + ': ' + unicode(e).encode("utf-8") + ' : ' + username + ' : ' + 'Input:' + input + ' : ' + strftime("%d.%m.%Y %H:%M") + ' : ' + 'Line :' + unicode(exc_tb.tb_lineno).encode("utf-8"), path=errorfile, terminal=True)
TOKEN = 'your-bot-token'
# creating the bot
bot = telepot.DelegatorBot(TOKEN, [
pave_event_space()(
per_chat_id(), create_open, ChatHandler, timeout=30
),
])
# run the loop forever
bot.message_loop(run_forever='Listening ...') | StarcoderdataPython |
8172527 | <filename>google/cloud/monitoring_metrics_scope_v1/types/metrics_scopes.py
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.protobuf import timestamp_pb2 # type: ignore
import proto # type: ignore
from google.cloud.monitoring_metrics_scope_v1.types import metrics_scope
__protobuf__ = proto.module(
package="google.monitoring.metricsscope.v1",
manifest={
"GetMetricsScopeRequest",
"ListMetricsScopesByMonitoredProjectRequest",
"ListMetricsScopesByMonitoredProjectResponse",
"CreateMonitoredProjectRequest",
"DeleteMonitoredProjectRequest",
"OperationMetadata",
},
)
class GetMetricsScopeRequest(proto.Message):
r"""Request for the ``GetMetricsScope`` method.
Attributes:
name (str):
Required. The resource name of the ``Metrics Scope``.
Example:
``locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}``
"""
name = proto.Field(
proto.STRING,
number=1,
)
class ListMetricsScopesByMonitoredProjectRequest(proto.Message):
r"""Request for the ``ListMetricsScopesByMonitoredProject`` method.
Attributes:
monitored_resource_container (str):
Required. The resource name of the ``Monitored Project``
being requested. Example:
``projects/{MONITORED_PROJECT_ID_OR_NUMBER}``
"""
monitored_resource_container = proto.Field(
proto.STRING,
number=1,
)
class ListMetricsScopesByMonitoredProjectResponse(proto.Message):
r"""Response for the ``ListMetricsScopesByMonitoredProject`` method.
Attributes:
metrics_scopes (Sequence[google.cloud.monitoring_metrics_scope_v1.types.MetricsScope]):
A set of all metrics scopes that the
specified monitored project has been added to.
"""
metrics_scopes = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=metrics_scope.MetricsScope,
)
class CreateMonitoredProjectRequest(proto.Message):
r"""Request for the ``CreateMonitoredProject`` method.
Attributes:
parent (str):
Required. The resource name of the existing
``Metrics Scope`` that will monitor this project. Example:
``locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}``
monitored_project (google.cloud.monitoring_metrics_scope_v1.types.MonitoredProject):
Required. The initial ``MonitoredProject`` configuration.
Specify only the ``monitored_project.name`` field. All other
fields are ignored. The ``monitored_project.name`` must be
in the format:
``locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}/projects/{MONITORED_PROJECT_ID_OR_NUMBER}``
"""
parent = proto.Field(
proto.STRING,
number=1,
)
monitored_project = proto.Field(
proto.MESSAGE,
number=2,
message=metrics_scope.MonitoredProject,
)
class DeleteMonitoredProjectRequest(proto.Message):
r"""Request for the ``DeleteMonitoredProject`` method.
Attributes:
name (str):
Required. The resource name of the ``MonitoredProject``.
Example:
``locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}/projects/{MONITORED_PROJECT_ID_OR_NUMBER}``
Authorization requires the following `Google
IAM <https://cloud.google.com/iam>`__ permissions on both
the ``Metrics Scope`` and on the ``MonitoredProject``:
``monitoring.metricsScopes.link``
"""
name = proto.Field(
proto.STRING,
number=1,
)
class OperationMetadata(proto.Message):
r"""Contains metadata for longrunning operation for the edit
Metrics Scope endpoints.
Attributes:
state (google.cloud.monitoring_metrics_scope_v1.types.OperationMetadata.State):
Current state of the batch operation.
create_time (google.protobuf.timestamp_pb2.Timestamp):
The time when the batch request was received.
update_time (google.protobuf.timestamp_pb2.Timestamp):
The time when the operation result was last
updated.
"""
class State(proto.Enum):
r"""Batch operation states."""
STATE_UNSPECIFIED = 0
CREATED = 1
RUNNING = 2
DONE = 3
CANCELLED = 4
state = proto.Field(
proto.ENUM,
number=1,
enum=State,
)
create_time = proto.Field(
proto.MESSAGE,
number=5,
message=timestamp_pb2.Timestamp,
)
update_time = proto.Field(
proto.MESSAGE,
number=6,
message=timestamp_pb2.Timestamp,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| StarcoderdataPython |
9784739 | <reponame>schinmayee/nimbus
# This is a rewrite of setup_boostbook.sh in Python
# It will work on Posix and Windows systems
# The rewrite is not finished yet, so please don't use it
# right now it is used only be release scripts
# User configuration
DOCBOOK_XSL_VERSION = "1.67.2"
DOCBOOK_DTD_VERSION = "4.2"
FOP_VERSION = "0.20.5"
FOP_MIRROR = "http://mirrors.ibiblio.org/pub/mirrors/apache/xml/fop/"
SOURCEFORGE_MIRROR = "http://puzzle.dl.sourceforge.net"
# No user configuration below this point-------------------------------------
import os
import re
import sys
import optparse
import shutil
sys.path.append( os.path.join( os.path.dirname( sys.modules[ __name__ ].__file__ )
, "../regression/xsl_reports/utils" ) )
import checked_system
import urllib2
import tarfile
import zipfile
def accept_args( args ):
parser = optparse.OptionParser()
parser.add_option( "-t", "--tools", dest="tools", help="directory downloaded tools will be installed into. Optional. Used by release scripts to put the tools separately from the tree to be archived." )
parser.usage = "setup_boostbook [options]"
( options, args ) = parser.parse_args( args )
if options.tools is None:
options.tools = os.getcwd()
return options.tools
def to_posix( path ):
return path.replace( "\\", "/" )
def unzip( archive_path, result_dir ):
z = zipfile.ZipFile( archive_path, 'r', zipfile.ZIP_DEFLATED )
for f in z.infolist():
print f.filename
if not os.path.exists( os.path.join( result_dir, os.path.dirname( f.filename ) ) ):
os.makedirs( os.path.join( result_dir, os.path.dirname( f.filename ) ) )
result = open( os.path.join( result_dir, f.filename ), 'wb' )
result.write( z.read( f.filename ) )
result.close()
z.close()
def gunzip( archive_path, result_dir ):
tar = tarfile.open( archive_path, 'r:gz' )
for tarinfo in tar:
tar.extract( tarinfo, result_dir )
tar.close()
def http_get( file, url ):
f = open( file, "wb" )
f.write( urllib2.urlopen( url ).read() )
f.close()
def find_executable( executable_name, env_variable, test_args, error_message ):
print "Looking for %s ..." % executable_name
if os.environ.has_key( env_variable ):
specified = os.environ[ env_variable ]
print " Trying %s specified in env. variable %s" % ( specified, env_variable )
if os.path.exists( specified ):
return specified.replace( "\\", "/" )
else:
print "Cannot find %s specified in env. variable %s" % ( specified, env_variable )
rc = checked_system.system( [ "%s %s" % ( executable_name, test_args ) ] )
print ""
if rc != 0:
print error_message
return None
else:
return executable_name.replace( "\\", "/" )
def adjust_user_config( config_file
, docbook_xsl_dir
, docbook_dtd_dir
, xsltproc
, doxygen
, fop
, java
):
print "Modifying user-config.jam ..."
r = []
using_boostbook = 0
eaten=0
lines = open( config_file, "r" ).readlines()
for line in lines:
if re.match( "^\s*using boostbook", line ):
using_boostbook = 1
r.append( "using boostbook\n" )
r.append( " : %s\n" % docbook_xsl_dir )
r.append( " : %s\n" % docbook_dtd_dir )
r.append( " ; \n" )
eaten = 1
elif using_boostbook == 1 and re.match( ";", line ):
using_boostbook = 2
elif using_boostbook == 1:
eaten=1
elif re.match( "^\s*using xsltproc.*$", line ):
eaten=1
elif re.match( "^\s*using doxygen.*$", line ):
eaten=1
elif re.match( "^\s*using fop.*$", line ):
eaten=1
else:
if eaten == 0:
r.append( line )
eaten=0
if using_boostbook==0:
r.append( "using boostbook\n" )
r.append( " : %s\n" % docbook_xsl_dir )
r.append( " : %s\n" % docbook_dtd_dir )
r.append( " ;\n" )
r.append( "using xsltproc : %s ;\n" % xsltproc )
if doxygen is not None:
r.append( "using doxygen : %s ;\n" % doxygen )
if fop is not None:
print r.append( "using fop : %s : : %s ;\n" % ( fop, java ) )
open( config_file + ".tmp", "w" ).writelines( r )
try:
os.rename( config_file + ".tmp", config_file )
except OSError, e:
os.unlink( config_file )
os.rename( config_file + ".tmp", config_file )
def setup_docbook_xsl( tools_directory ):
print "DocBook XSLT Stylesheets ..."
DOCBOOK_XSL_TARBALL = os.path.join( tools_directory, "docbook-xsl-%s.tar.gz" % DOCBOOK_XSL_VERSION )
DOCBOOK_XSL_URL = "%s/sourceforge/docbook/%s" % ( SOURCEFORGE_MIRROR, os.path.basename( DOCBOOK_XSL_TARBALL ) )
if os.path.exists( DOCBOOK_XSL_TARBALL ):
print " Using existing DocBook XSLT Stylesheets (version %s)." % DOCBOOK_XSL_VERSION
else:
print " Downloading DocBook XSLT Stylesheets version %s..." % DOCBOOK_XSL_VERSION
print " from %s" % DOCBOOK_XSL_URL
http_get( DOCBOOK_XSL_TARBALL, DOCBOOK_XSL_URL )
DOCBOOK_XSL_DIR = to_posix( os.path.join( tools_directory, "docbook-xsl-%s" % DOCBOOK_XSL_VERSION ) )
if not os.path.exists( DOCBOOK_XSL_DIR ):
print " Expanding DocBook XSLT Stylesheets into %s..." % DOCBOOK_XSL_DIR
gunzip( DOCBOOK_XSL_TARBALL, tools_directory )
print " done."
return DOCBOOK_XSL_DIR
def setup_docbook_dtd( tools_directory ):
print "DocBook DTD ..."
DOCBOOK_DTD_ZIP = to_posix( os.path.join( tools_directory, "docbook-xml-%s.zip" % DOCBOOK_DTD_VERSION ) )
DOCBOOK_DTD_URL = "http://www.oasis-open.org/docbook/xml/%s/%s" % ( DOCBOOK_DTD_VERSION, os.path.basename( DOCBOOK_DTD_ZIP ) )
if os.path.exists( DOCBOOK_DTD_ZIP ):
print " Using existing DocBook XML DTD (version %s)." % DOCBOOK_DTD_VERSION
else:
print " Downloading DocBook XML DTD version %s..." % DOCBOOK_DTD_VERSION
http_get( DOCBOOK_DTD_ZIP, DOCBOOK_DTD_URL )
DOCBOOK_DTD_DIR = to_posix( os.path.join( tools_directory, "docbook-dtd-%s" % DOCBOOK_DTD_VERSION ) )
if not os.path.exists( DOCBOOK_DTD_DIR ):
print "Expanding DocBook XML DTD into %s... " % DOCBOOK_DTD_DIR
unzip( DOCBOOK_DTD_ZIP, DOCBOOK_DTD_DIR )
print "done."
return DOCBOOK_DTD_DIR
def find_xsltproc():
return to_posix( find_executable( "xsltproc", "XSLTPROC", "--version"
, "If you have already installed xsltproc, please set the environment\n"
+ "variable XSLTPROC to the xsltproc executable. If you do not have\n"
+ "xsltproc, you may download it from http://xmlsoft.org/XSLT/." ) )
def find_doxygen():
return to_posix( find_executable( "doxygen", "DOXYGEN", "--version"
, "Warning: unable to find Doxygen executable. You will not be able to\n"
+ " use Doxygen to generate BoostBook documentation. If you have Doxygen,\n"
+ " please set the DOXYGEN environment variable to the path of the doxygen\n"
+ " executable." ) )
def find_java():
return to_posix( find_executable( "java", "JAVA", "-version"
, "Warning: unable to find Java executable. You will not be able to\n"
+ " generate PDF documentation. If you have Java, please set the JAVA\n"
+ " environment variable to the path of the java executable." ) )
def setup_fop( tools_directory ):
print "FOP ..."
FOP_TARBALL = os.path.join( tools_directory, "fop-%s-bin.tar.gz" % FOP_VERSION )
FOP_URL = "%s/%s" % ( FOP_MIRROR, os.path.basename( FOP_TARBALL ) )
FOP_DIR = to_posix( "%s/fop-%s" % ( tools_directory, FOP_VERSION ) )
if sys.platform == 'win32':
fop_driver = "fop.bat"
else:
fop_driver = "fop.sh"
FOP = to_posix( os.path.join( FOP_DIR, fop_driver ) )
if os.path.exists( FOP_TARBALL ) :
print " Using existing FOP distribution (version %s)." % FOP_VERSION
else:
print " Downloading FOP distribution version %s..." % FOP_VERSION
http_get( FOP_TARBALL, FOP_URL )
if not os.path.exists( FOP_DIR ):
print " Expanding FOP distribution into %s... " % FOP_DIR
gunzip( FOP_TARBALL, tools_directory )
print " done."
return FOP
def find_user_config():
print "Looking for user-config.jam ..."
JAM_CONFIG_OUT = os.path.join( os.environ[ "HOME" ], "user-config.jam" )
if os.path.exists( JAM_CONFIG_OUT ):
JAM_CONFIG_IN ="user-config-backup.jam"
print " Found user-config.jam in HOME directory (%s)" % JAM_CONFIG_IN
shutil.copyfile( JAM_CONFIG_OUT, os.path.join( os.environ[ "HOME" ], "user-config-backup.jam" ) )
JAM_CONFIG_IN_TEMP="yes"
print " Updating Boost.Jam configuration in %s... " % JAM_CONFIG_OUT
return JAM_CONFIG_OUT
elif os.environ.has_key( "BOOST_ROOT" ) and os.path.exists( os.path.join( os.environ[ "BOOST_ROOT" ], "tools/build/v2/user-config.jam" ) ):
JAM_CONFIG_IN=os.path.join( os.environ[ "BOOST_ROOT" ], "tools/build/v2/user-config.jam" )
print " Found user-config.jam in BOOST_ROOT directory (%s)" % JAM_CONFIG_IN
JAM_CONFIG_IN_TEMP="no"
print " Writing Boost.Jam configuration to %s... " % JAM_CONFIG_OUT
return JAM_CONFIG_IN
return None
def setup_boostbook( tools_directory ):
print "Setting up boostbook tools..."
print "-----------------------------"
print ""
DOCBOOK_XSL_DIR = setup_docbook_xsl( tools_directory )
DOCBOOK_DTD_DIR = setup_docbook_dtd( tools_directory )
XSLTPROC = find_xsltproc()
DOXYGEN = find_doxygen()
JAVA = find_java()
FOP = None
if JAVA is not None:
print "Java is present."
FOP = setup_fop( tools_directory )
user_config = find_user_config()
# Find the input jamfile to configure
if user_config is None:
print "ERROR: Please set the BOOST_ROOT environment variable to refer to your"
print "Boost installation or copy user-config.jam into your home directory."
sys.exit()
adjust_user_config( config_file = user_config
, docbook_xsl_dir = DOCBOOK_XSL_DIR
, docbook_dtd_dir = DOCBOOK_DTD_DIR
, xsltproc = XSLTPROC
, doxygen = DOXYGEN
, fop = FOP
, java = JAVA
)
print "done."
print "Done! Execute \"bjam --v2\" in a documentation directory to generate"
print "documentation with BoostBook. If you have not already, you will need"
print "to compile Boost.Jam."
print ""
print "WARNING FOR WIN32: please obtain a patched version of xsltproc "
print "from http://engineering.meta-comm.com/xsltproc-win32.zip if you"
print "are running BoostBook build on Win32 system (this patched version"
print "solves the long-standing xsltproc problem with "
print "creating directories for output files)."
def main():
( tools_directory ) = accept_args( sys.argv[ 1: ] )
setup_boostbook( tools_directory )
if __name__ == "__main__":
main()
| StarcoderdataPython |
8188901 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:pyy
# datetime:2018/12/28 15:17
import redis
from tornado.web import RequestHandler
class IndexHandler(RequestHandler):
def get(self, *args, **kwargs):
self.write("Hello Tornado!!!")
class BaseHandler(RequestHandler):
def set_default_headers(self):
self.set_header('Access-Control-Allow-Origin', '*')
self.set_header('Access-Control-Allow-Headers', '*')
self.set_header('Access-Control-Max-Age', 1000)
self.set_header('Content-type', 'application/json')
self.set_header('Access-Control-Allow-Methods', 'POST, GET, DELETE, PUT, PATCH, OPTIONS')
self.set_header('Access-Control-Allow-Headers',
'Content-Type, tsessionid, Access-Control-Allow-Origin, Access-Control-Allow-Headers, X-Requested-By, Access-Control-Allow-Methods')
def options(self, *args, **kwargs):
pass
class RedisHandler(BaseHandler):
def __init__(self, application, request, **kwargs):
super().__init__( application, request, **kwargs)
self.redis_conn = redis.StrictRedis(**self.settings["redis"])
| StarcoderdataPython |
9701638 | import numpy as np
import cv2
from scipy import signal
import matplotlib.pyplot as plt
import os
from absl import app, flags, logging
from absl.flags import FLAGS
import shutil
from tqdm import tqdm
flags.DEFINE_string('annotation', "/mnt/data0/Garmin/datasets/aug_data/anno/03_scale_aware.txt",
'path to annotation file')
flags.DEFINE_string('output', "/mnt/data0/Garmin/datasets/aug_data/03_images_sw/",
'path to output file')
def gkern(kernlen=21, std=3):
"""Returns a 2D Gaussian kernel array."""
gkern1d = signal.gaussian(kernlen, std=std).reshape(kernlen, 1)
gkern2d = np.outer(gkern1d, gkern1d)
gkern2d *= (1/gkern2d.max())
return gkern2d
# def draw_kernel(img, x=0, y=0, kernlen=21, std=3):
# kernel = gkern(kernlen, std)
"""def main(_argv):
files = os.listdir(FLAGS.annotation)
os.makedirs(FLAGS.output, exist_ok=True)
os.makedirs(FLAGS.output+'/test_blur', exist_ok=True)
os.makedirs(FLAGS.output+'/test_image', exist_ok=True)
for i in tqdm(files):
anno_data = open(FLAGS.annotation+'/'+i, 'r')
image = cv2.imread(FLAGS.image+'/'+i[:-4]+'.jpg') #read image shape
shutil.copy2(FLAGS.image+'/'+i[:-4]+'.jpg', FLAGS.output+'/test_image') #copy image to output path
image_filter = np.zeros((image.shape[0], image.shape[1]), dtype=np.float64)
for j in anno_data:
anno = j.split(',')
x = int(int(anno[0]) + int(anno[2])/2)
y = int(int(anno[1]) + int(anno[3])/2)
kernel = gkern(61, 12)
for x1 in range(kernel.shape[0]):
for y1 in range(kernel.shape[1]):
print(int(kernel.shape[0]))
if (x - int(kernel.shape[0]/2) + x1) < 0 or (x - int(kernel.shape[0]/2) + x1) >= image.shape[1]:
continue
elif (y - int(kernel.shape[1]/2) + y1) < 0 or (y - int(kernel.shape[1]/2) + y1) >= image.shape[0]:
continue
else :
image_filter[y - int(kernel.shape[1]/2) + y1][x - int(kernel.shape[0]/2) + x1] = max(image_filter[y - int(kernel.shape[1]/2) + y1][x - int(kernel.shape[0]/2) + x1], kernel[x1][y1])
# print(image_filter.max())
# cv2.imwrite(FLAGS.output+'/test_blur/'+i[:-4]+'.jpg', image_filter*255)
cv2.imwrite(FLAGS.output+'/'+i[:-4]+'.jpg', image_filter*255)
# print(image_filter.max())
break
# print(image.shape)
# print(image.shape[1])
# cv2.imwrite('test.jpg', gkern(61, 12)*255)
# plt.imshow(gkern(21), interpolation='none')
# plt.imsave('test.png',gkern(61, 12))
"""
def main(_argv):
anno_data = open(FLAGS.annotation)
basename = os.path.basename(FLAGS.annotation)
dirname = os.path.dirname(FLAGS.annotation)
filename, ext = basename.split('.')
os.makedirs(FLAGS.output, exist_ok=True)
f = open(os.path.join(dirname, f'{filename}_sa.txt'), 'w')
with tqdm() as pbar:
for idx, line in enumerate(anno_data):
info = line.split()
info[-1] = info[-1].replace("\n","")
pic_path = info[0].split('/')
title,ext = pic_path[-1].split('.')
name = pic_path[-1]
boxnum = len(info)-1
image = cv2.imread(info[0]) #read image shape
#shutil.copy2(FLAGS.image+'/'+i[:-4]+'.jpg', FLAGS.output+'/test_image') #copy image to output path
image_filter = np.zeros((image.shape[0], image.shape[1]), dtype=np.float64)
for j in range(1,boxnum+1):
data = info[j].split(',')
for x in range(int(data[0]),int(data[2])):
y=int(data[1])
kernel = gkern(61, 12)
for x1 in range(kernel.shape[0]):
for y1 in range(kernel.shape[1]):
if (x - int(kernel.shape[0]/2) + x1) < 0 or (x - int(kernel.shape[0]/2) + x1) >= image.shape[1]:
continue
elif (y - int(kernel.shape[1]/2) + y1) < 0 or (y - int(kernel.shape[1]/2) + y1) >= image.shape[0]:
continue
else :
image_filter[y - int(kernel.shape[1]/2) + y1][x - int(kernel.shape[0]/2) + x1] = max(image_filter[y - int(kernel.shape[1]/2) + y1][x - int(kernel.shape[0]/2) + x1], kernel[x1][y1])
for x in range(int(data[0]),int(data[2])):
y=int(data[3])
kernel = gkern(61, 12)
for x1 in range(kernel.shape[0]):
for y1 in range(kernel.shape[1]):
if (x - int(kernel.shape[0]/2) + x1) < 0 or (x - int(kernel.shape[0]/2) + x1) >= image.shape[1]:
continue
elif (y - int(kernel.shape[1]/2) + y1) < 0 or (y - int(kernel.shape[1]/2) + y1) >= image.shape[0]:
continue
else :
image_filter[y - int(kernel.shape[1]/2) + y1][x - int(kernel.shape[0]/2) + x1] = max(image_filter[y - int(kernel.shape[1]/2) + y1][x - int(kernel.shape[0]/2) + x1], kernel[x1][y1])
for y in range(int(data[1]),int(data[3])):
x=int(data[0])
kernel = gkern(61, 12)
for x1 in range(kernel.shape[0]):
for y1 in range(kernel.shape[1]):
if (x - int(kernel.shape[0]/2) + x1) < 0 or (x - int(kernel.shape[0]/2) + x1) >= image.shape[1]:
continue
elif (y - int(kernel.shape[1]/2) + y1) < 0 or (y - int(kernel.shape[1]/2) + y1) >= image.shape[0]:
continue
else :
image_filter[y - int(kernel.shape[1]/2) + y1][x - int(kernel.shape[0]/2) + x1] = max(image_filter[y - int(kernel.shape[1]/2) + y1][x - int(kernel.shape[0]/2) + x1], kernel[x1][y1])
for y in range(int(data[1]),int(data[3])):
x=int(data[2])
kernel = gkern(61, 12)
for x1 in range(kernel.shape[0]):
for y1 in range(kernel.shape[1]):
if (x - int(kernel.shape[0]/2) + x1) < 0 or (x - int(kernel.shape[0]/2) + x1) >= image.shape[1]:
continue
elif (y - int(kernel.shape[1]/2) + y1) < 0 or (y - int(kernel.shape[1]/2) + y1) >= image.shape[0]:
continue
else :
image_filter[y - int(kernel.shape[1]/2) + y1][x - int(kernel.shape[0]/2) + x1] = max(image_filter[y - int(kernel.shape[1]/2) + y1][x - int(kernel.shape[0]/2) + x1], kernel[x1][y1])
#print(image.shape)
#print(image_filter.shape)
#image_hsv = cv2.cvtColor(image,cv2.COLOR_BGR2HLS)
fimage = image.astype(np.float32)
fimage = fimage / 255.0
image_hsv = cv2.cvtColor(fimage, cv2.COLOR_BGR2HLS)
hlsCopy = np.copy(image_hsv)
hlsCopy[:, :, 1] = (1 + image_filter*0.3) * hlsCopy[:, :, 1]
hlsCopy[:, :, 1][hlsCopy[:, :, 1] > 1] = 1 # 應該要介於 0~1,計算出
#image_hsv[:, :, 1] = (1 + image_filter ) * image_hsv[:, :, 1]
#image_hsv[:, :, 1][image_hsv[:, :, 1] > 200] = 200
#image = cv2.cvtColor(image_hsv,cv2.COLOR_HLS2BGR)
# print(image_filter.max())
# cv2.imwrite(FLAGS.output+'/test_blur/'+i[:-4]+'.jpg', image_filter*255)
result_img = cv2.cvtColor(hlsCopy, cv2.COLOR_HLS2BGR)
result_img = ((result_img * 255).astype(np.uint8))
cv2.imwrite(FLAGS.output+'/'+f'{idx}_{title}'+'.jpg', result_img)
anno_line = FLAGS.output+'/'+f'{idx}_{title}'+'.jpg' + ' ' + ' '.join(info[1:]) + '\n'
f.write(anno_line)
pbar.set_postfix({
'line': pic_path
})
pbar.update(1)
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass | StarcoderdataPython |
279521 | """
Lab 8
"""
#3.1
demo='hello world!'
def cal_words(input_str):
return len(input_str.split())
#3.2
demo_str='Hello world!'
print(cal_words(demo_str))
#3.3
def find_min(inpu_list):
min_item = inpu_list[0]
for num in inpu_list:
if type(num) is not str:
if min_item >= num:
min_item = num
return min_item
#3.4
demo_list = [1,2,3,4,5,6]
print(find_min(demo_list))
#3.5
mix_list = [1,2,3,4,'a',6]
print(find_min(mix_list)) | StarcoderdataPython |
1953354 | <reponame>mpeven/Pytorch_Datasets
'''
Simulated "Around The World" needle passing videos using the da Vinci Surgical Systemself.
Dataset collected by Anand from 07/23/15 - 07/31/15 during his internship at Intuitive surgical.
Around the World
----------------
Person controls 2 needle drivers.
8 pairs of targets, starts at 3:00 and goes clockwise.
Enter through the flashing target, exit through the solid.
Dataset locations
-----------------
LCSR Server -- lcsr-cirl$/Language Of Surgery Data/ISI-SG-Sim-NP/data/raw/
Titan -- /hdd/Datasets/Intuitive
Dataset info/annotations
------------------------
191 videos
32 different users
Phase annotations (ProgressLog.txt)
Insertion target locations (MetaData.txt/Targets.txt)
Events (SimEvents.txt)
Instrument motion (USM<instrument#>.txt)
Needle motion (EnvMotion.txt )
Important
---------
Timestamps do not line up with actual video time.
For the correct conversion from timestamp in log to frame # in video:
Get the timestamp from the log file
Get the closest timestamp from "left_avi_ts.txt"
Get the corresponding frame number in "left_avi_ts.txt"
That frame number is the frame in the video
'''
import os
import glob
import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset
BASE_PATH = '/hdd/Datasets/Intuitive'
class IntuitiveSimulated(Dataset):
def __init__(self, split):
if split not in ["train", "val", "test"]:
raise ValueError("Split must be one of 'train', 'val', 'test'. Get {}".format(split))
self.split = split
self.dataset = self.create_dataset()
def create_dataset(self):
# Crawl through dataset to get paths
all_paths = []
all_videos = sorted(glob.glob('{}/Recorded/*/video_files/*/left.avi'.format(BASE_PATH)))
for vid in all_videos:
all_paths.append({
'video_path': vid,
'ts_path': vid.replace("left.avi", "left_avi_ts.txt"),
'phase_path': vid.replace("video_files", "log_files").replace("left.avi", "ProgressLog.txt"),
'events_path': vid.replace("video_files", "log_files").replace("left.avi", "SimEvents.txt"),
})
def get_video_phases(phase_path, ts_path):
# Get {timestamp, phase} data
phase_df = pd.read_csv(phase_path, sep=" ", names=["start_time", "event_id", "event_desc"])
# Get {timestamp, frame} data
tsp_array = np.loadtxt(ts_path, usecols=1)
total_frames = len(tsp_array)
# Convert timestamps to frame numbers
phase_df["start_frame"] = phase_df["start_time"].apply(lambda ts: np.argmin(np.abs(tsp_array - ts)))
print(phase_df)
# Convert {start_frame, phase} dataframe to per-frame labels
per_frame_labels = []
print(phase_df)
print(total_frames)
return phase_df
# Crawl through paths to read in data
dataset = []
for path_dict in all_paths:
# Get log events
phase_df = get_video_phases(path_dict["phase_path"], path_dict["ts_path"])
exit()
# for phase in
dataset.append({
'video_file': path_dict["video_path"],
'start_frame': None,
'end_frame': None,
'phase': None,
})
def __len__(self):
return len(self.df_dataset)
def image_transforms(self, numpy_images):
""" Transformations on a list of images """
# Get random parameters to apply same transformation to all images in list
color_jitter = transforms.ColorJitter.get_params(.25, .25, .25, .25)
rotation_param = transforms.RandomRotation.get_params((-15, 15))
flip_param = random.random()
# Apply transformations
images = []
for numpy_image in numpy_images:
i = transforms.functional.to_pil_image(numpy_image)
i = transforms.functional.resize(i, (224, 224))
if self.train:
i = color_jitter(i)
i = transforms.functional.rotate(i, rotation_param)
if flip_param < 0.5:
i = transforms.functional.hflip(i)
i = transforms.functional.to_tensor(i)
i = transforms.functional.normalize(i, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
images.append(i)
return torch.stack(images)
def pad_or_trim(self, torch_array, bound):
if torch_array.size(0) <= bound:
return torch.cat([torch_array, torch.zeros([bound - torch_array.size(0), 3, 224, 224])])
return torch_array[:bound]
def __getitem__(self, idx):
# Max num images
bound = 500
# Get path of video
row = self.df_dataset.iloc[idx].to_dict()
image_directory = "{}/rgb/{}_{}".format(DATASET_DIR, row['user_id'], row['video_id'])
op_flow_directory = "{}/op_flow/{}_{}".format(DATASET_DIR, row['user_id'], row['video_id'])
# Load images
raw_images = []
for x in range(row['frame_first'], min(row['frame_last'], row['frame_first'] + bound)):
im_path = "{}/frame{:08}.jpg".format(image_directory, x)
try:
raw_images.append(cv2.imread(im_path)[:, :, ::-1])
except TypeError:
# print(row)
print(im_path)
print(os.path.isfile(im_path))
# Transform images
try:
transformed_images = self.image_transforms(raw_images)
except ValueError:
print("ERROR")
print(row)
transformed_images = []
# Pad or trim to length "bound"
# print(transformed_images.size())
padded_images = self.pad_or_trim(transformed_images, bound)
# Load optical flow
# raw_optical_flow = []
# for x in range(row['frame_first'], row['frame_last']+1):
# optical_flow_path = "{}/frame{:08}.png".format(op_flow_directory, x)
# optical_flow = cv2.imread(optical_flow_path, -1)[:,:,:2]
# # Un-quantize and un-bound (bound=1000 -- from jupyter notebook)
# optical_flow = (optical_flow * 2000. / 65534.) - 1000.
# Get label
label = row['event_id']
return padded_images, label
if __name__ == '__main__':
IntuitiveDataset("train")
| StarcoderdataPython |
3451375 | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
import win32evtlog
from . import common
pytestmark = [pytest.mark.integration]
@pytest.mark.parametrize('server', ['localhost', '127.0.0.1'])
def test_expected(aggregator, dd_run_check, new_check, instance, report_event, server):
instance['server'] = server
check = new_check(instance)
report_event('message')
dd_run_check(check)
aggregator.assert_event(
'message',
alert_type='info',
priority='normal',
host=check.hostname,
source_type_name=check.SOURCE_TYPE_NAME,
aggregation_key=common.EVENT_SOURCE,
msg_title='Application/{}'.format(common.EVENT_SOURCE),
tags=[],
)
@pytest.mark.parametrize(
'event_type, level',
[
pytest.param(win32evtlog.EVENTLOG_INFORMATION_TYPE, 'info', id='INFORMATION_TYPE'),
pytest.param(win32evtlog.EVENTLOG_WARNING_TYPE, 'warning', id='WARNING_TYPE'),
pytest.param(win32evtlog.EVENTLOG_ERROR_TYPE, 'error', id='ERROR_TYPE'),
],
)
def test_levels(aggregator, dd_run_check, new_check, instance, report_event, event_type, level):
check = new_check(instance)
report_event('foo', event_type=event_type)
dd_run_check(check)
aggregator.assert_event('foo', alert_type=level)
def test_event_priority(aggregator, dd_run_check, new_check, instance, report_event):
instance['event_priority'] = 'low'
check = new_check(instance)
report_event('foo')
dd_run_check(check)
aggregator.assert_event('foo', priority='low')
def test_event_id(aggregator, dd_run_check, new_check, instance, report_event):
instance['tag_event_id'] = True
check = new_check(instance)
report_event('foo')
dd_run_check(check)
aggregator.assert_event('foo', tags=['event_id:{}'.format(common.EVENT_ID)])
def test_included_messages(aggregator, dd_run_check, new_check, instance, report_event):
instance['included_messages'] = ['bar']
check = new_check(instance)
report_event('foo')
report_event('bar')
report_event('baz')
dd_run_check(check)
assert len(aggregator.events) == 1
aggregator.assert_event('bar')
def test_excluded_messages(aggregator, dd_run_check, new_check, instance, report_event):
instance['excluded_messages'] = ['bar']
check = new_check(instance)
report_event('foo')
report_event('bar')
report_event('baz')
dd_run_check(check)
assert len(aggregator.events) == 2
aggregator.assert_event('foo')
aggregator.assert_event('baz')
def test_excluded_messages_override(aggregator, dd_run_check, new_check, instance, report_event):
instance['included_messages'] = ['bar']
instance['excluded_messages'] = ['bar']
check = new_check(instance)
report_event('foo')
report_event('bar')
report_event('baz')
dd_run_check(check)
assert len(aggregator.events) == 0
def test_custom_query(aggregator, dd_run_check, new_check, instance, report_event):
instance['query'] = "*[System[Provider[@Name='{}']] and System[(Level=1 or Level=2)]]".format(common.EVENT_SOURCE)
check = new_check(instance)
report_event('foo', level='error')
report_event('bar')
dd_run_check(check)
assert len(aggregator.events) == 1
aggregator.assert_event('foo')
def test_bookmark(aggregator, dd_run_check, new_check, instance, report_event):
instance['start'] = 'oldest'
check = new_check(instance)
report_event('foo')
report_event('bar')
dd_run_check(check)
assert len(aggregator.events) > 1
aggregator.reset()
check = new_check(instance)
dd_run_check(check)
assert len(aggregator.events) == 0
report_event('foo')
dd_run_check(check)
assert len(aggregator.events) == 1
aggregator.assert_event('foo')
def test_query_override(aggregator, dd_run_check, new_check, instance, report_event):
instance['query'] = "*[System[Provider[@Name='foo']]]"
check = new_check(instance)
report_event('message')
dd_run_check(check)
assert len(aggregator.events) == 0
def test_sid(aggregator, dd_run_check, new_check, instance):
instance['tag_sid'] = True
instance['start'] = 'oldest'
instance['path'] = 'System'
instance['query'] = "*[System[Provider[@Name='Microsoft-Windows-Kernel-Boot']]]"
del instance['filters']
check = new_check(instance)
dd_run_check(check)
assert any(
'sid:NT AUTHORITY\\SYSTEM' in event['tags'] for event in aggregator.events
), 'Unable to find any expected `sid` tags' # no cov
| StarcoderdataPython |
5064616 | <gh_stars>1-10
"""Imports."""
from django.contrib.auth.models import User
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.views.generic import DetailView, UpdateView, TemplateView, ListView
from .models import StaffProfile
from .forms import ProfileEditForm
class ProfileView(LoginRequiredMixin, TemplateView):
"""Profile view class."""
template_name = 'profile.html'
login_url = reverse_lazy('auth_login')
model = StaffProfile
context_object_name = 'profile'
def get(self, *args, **kwargs):
"""Get username."""
if kwargs:
return super().get(*args, **kwargs)
else:
kwargs.update({'username': self.request.user.username})
return super().get(*args, **kwargs)
def get_context_data(self, **kwargs):
"""Get context data for profiles."""
context = super().get_context_data(**kwargs)
profile = get_object_or_404(
StaffProfile, user__username=context['username'])
context['profile'] = profile
return context
class ProfileEditView(LoginRequiredMixin, UpdateView):
"""Lets the user edit their profile."""
template_name = 'profile_edit.html'
model = StaffProfile
form_class = ProfileEditForm
login_url = reverse_lazy('auth_login')
success_url = reverse_lazy('profile')
slug_url_kwarg = 'username'
slug_field = 'user__username'
def get(self, *args, **kwargs):
"""Get."""
self.kwargs['username'] = self.request.user.get_username()
return super().get(*args, **kwargs)
def post(self, *args, **kwargs):
"""Post."""
self.kwargs['username'] = self.request.user.get_username()
return super().post(*args, **kwargs)
def get_form_kwargs(self):
"""Get kwargs."""
kwargs = super().get_form_kwargs()
kwargs.update({'username': self.request.user.get_username()})
return kwargs
def form_valid(self, form):
"""Validate form."""
form.instance.user.email = form.data['email']
form.instance.user.first_name = form.data['first_name']
form.instance.user.last_name = form.data['last_name']
form.instance.user.save()
return super().form_valid(form)
class StaffListView(LoginRequiredMixin, ListView):
"""Staff List view."""
template_name = 'staff_list.html'
login_url = reverse_lazy('auth_login')
context_object_name = 'profile'
model = StaffProfile
def get(self, *args, **kwargs):
"""Get username."""
if kwargs:
return super().get(*args, **kwargs)
else:
kwargs.update({'username': self.request.user.username})
return super().get(*args, **kwargs)
def get_context_data(self, **kwargs):
"""Get context."""
context = super().get_context_data(**kwargs)
return context
| StarcoderdataPython |
383007 | import os, sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import torch
import torch.nn as nn
from config import batch_size_train, device
class ModelLSTMShakespeare(nn.Module):
def __init__(self):
super(ModelLSTMShakespeare, self).__init__()
self.embedding_len = 8
self.seq_len = 80
self.num_classes = 80
self.n_hidden = 256
self.batch_size = batch_size_train
self.embeds = nn.Embedding(self.seq_len, self.embedding_len)
self.multi_lstm = nn.LSTM(input_size=self.embedding_len, hidden_size=self.n_hidden, num_layers=2, batch_first=True, dropout=0.5)
self.fc = nn.Linear(self.n_hidden, self.num_classes)
def forward(self, x, out_activation=False):
x = x.to(torch.int64)
x_ = self.embeds(x)
h0 = torch.rand(2, x_.size(0), self.n_hidden).to(device)
c0 = torch.rand(2, x_.size(0), self.n_hidden).to(device)
activation, (h_n, c_n) = self.multi_lstm(x_,(h0,c0))
fc_ = activation[:, -1, :]
output = self.fc(fc_)
if out_activation:
return output, activation
else:
return output
| StarcoderdataPython |
1678037 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test the CX Direction pass"""
import unittest
from math import pi
from qiskit import ClassicalRegister, QuantumRegister, QuantumCircuit
from qiskit.transpiler import TranspilerError
from qiskit.transpiler import CouplingMap
from qiskit.transpiler.passes import GateDirection
from qiskit.converters import circuit_to_dag
from qiskit.test import QiskitTestCase
class TestGateDirection(QiskitTestCase):
"""Tests the GateDirection pass."""
def test_no_cnots(self):
"""Trivial map in a circuit without entanglement
qr0:---[H]---
qr1:---[H]---
qr2:---[H]---
CouplingMap map: None
"""
qr = QuantumRegister(3, "qr")
circuit = QuantumCircuit(qr)
circuit.h(qr)
coupling = CouplingMap()
dag = circuit_to_dag(circuit)
pass_ = GateDirection(coupling)
after = pass_.run(dag)
self.assertEqual(dag, after)
def test_direction_error(self):
"""The mapping cannot be fixed by direction mapper
qr0:---------
qr1:---(+)---
|
qr2:----.----
CouplingMap map: [2] <- [0] -> [1]
"""
qr = QuantumRegister(3, "qr")
circuit = QuantumCircuit(qr)
circuit.cx(qr[1], qr[2])
coupling = CouplingMap([[0, 1], [0, 2]])
dag = circuit_to_dag(circuit)
pass_ = GateDirection(coupling)
with self.assertRaises(TranspilerError):
pass_.run(dag)
def test_direction_correct(self):
"""The CX is in the right direction
qr0:---(+)---
|
qr1:----.----
CouplingMap map: [0] -> [1]
"""
qr = QuantumRegister(2, "qr")
circuit = QuantumCircuit(qr)
circuit.cx(qr[0], qr[1])
coupling = CouplingMap([[0, 1]])
dag = circuit_to_dag(circuit)
pass_ = GateDirection(coupling)
after = pass_.run(dag)
self.assertEqual(dag, after)
def test_direction_flip(self):
"""Flip a CX
qr0:----.----
|
qr1:---(+)---
CouplingMap map: [0] -> [1]
qr0:-[H]-(+)-[H]--
|
qr1:-[H]--.--[H]--
"""
qr = QuantumRegister(2, "qr")
circuit = QuantumCircuit(qr)
circuit.cx(qr[1], qr[0])
coupling = CouplingMap([[0, 1]])
dag = circuit_to_dag(circuit)
expected = QuantumCircuit(qr)
expected.h(qr[0])
expected.h(qr[1])
expected.cx(qr[0], qr[1])
expected.h(qr[0])
expected.h(qr[1])
pass_ = GateDirection(coupling)
after = pass_.run(dag)
self.assertEqual(circuit_to_dag(expected), after)
def test_ecr_flip(self):
"""Flip a ECR gate.
┌──────┐
q_0: ┤1 ├
│ ECR │
q_1: ┤0 ├
└──────┘
CouplingMap map: [0, 1]
"""
qr = QuantumRegister(2, "qr")
circuit = QuantumCircuit(qr)
circuit.ecr(qr[1], qr[0])
coupling = CouplingMap([[0, 1]])
dag = circuit_to_dag(circuit)
expected = QuantumCircuit(qr)
expected.ry(pi / 2, qr[0])
expected.ry(-pi / 2, qr[1])
expected.ecr(qr[0], qr[1])
expected.h(qr[0])
expected.h(qr[1])
pass_ = GateDirection(coupling)
after = pass_.run(dag)
self.assertEqual(circuit_to_dag(expected), after)
def test_flip_with_measure(self):
"""
qr0: -(+)-[m]-
| |
qr1: --.---|--
|
cr0: ------.--
CouplingMap map: [0] -> [1]
qr0: -[H]--.--[H]-[m]-
| |
qr1: -[H]-(+)-[H]--|--
|
cr0: --------------.--
"""
qr = QuantumRegister(2, "qr")
cr = ClassicalRegister(1, "cr")
circuit = QuantumCircuit(qr, cr)
circuit.cx(qr[1], qr[0])
circuit.measure(qr[0], cr[0])
coupling = CouplingMap([[0, 1]])
dag = circuit_to_dag(circuit)
expected = QuantumCircuit(qr, cr)
expected.h(qr[0])
expected.h(qr[1])
expected.cx(qr[0], qr[1])
expected.h(qr[0])
expected.h(qr[1])
expected.measure(qr[0], cr[0])
pass_ = GateDirection(coupling)
after = pass_.run(dag)
self.assertEqual(circuit_to_dag(expected), after)
def test_preserves_conditions(self):
"""Verify GateDirection preserves conditional on CX gates.
┌───┐ ┌───┐
q_0: |0>───■────┤ X ├───■──┤ X ├
┌─┴─┐ └─┬─┘ ┌─┴─┐└─┬─┘
q_1: |0>─┤ X ├────■───┤ X ├──■──
└─┬─┘ │ └───┘
┌──┴──┐┌──┴──┐
c_0: 0 ╡ = 0 ╞╡ = 0 ╞══════════
└─────┘└─────┘
"""
qr = QuantumRegister(2, "q")
cr = ClassicalRegister(1, "c")
circuit = QuantumCircuit(qr, cr)
circuit.cx(qr[0], qr[1]).c_if(cr, 0)
circuit.cx(qr[1], qr[0]).c_if(cr, 0)
circuit.cx(qr[0], qr[1])
circuit.cx(qr[1], qr[0])
coupling = CouplingMap([[0, 1]])
dag = circuit_to_dag(circuit)
expected = QuantumCircuit(qr, cr)
expected.cx(qr[0], qr[1]).c_if(cr, 0)
# Order of H gates is important because DAG comparison will consider
# different conditional order on a creg to be a different circuit.
# See https://github.com/Qiskit/qiskit-terra/issues/3164
expected.h(qr[1]).c_if(cr, 0)
expected.h(qr[0]).c_if(cr, 0)
expected.cx(qr[0], qr[1]).c_if(cr, 0)
expected.h(qr[1]).c_if(cr, 0)
expected.h(qr[0]).c_if(cr, 0)
expected.cx(qr[0], qr[1])
expected.h(qr[1])
expected.h(qr[0])
expected.cx(qr[0], qr[1])
expected.h(qr[1])
expected.h(qr[0])
pass_ = GateDirection(coupling)
after = pass_.run(dag)
self.assertEqual(circuit_to_dag(expected), after)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
280545 | <reponame>iserko/lookml-tools<gh_stars>0
"""
a drill down rule
Authors:
<NAME> (<EMAIL>)
"""
from lkmltools.linter.field_rule import FieldRule
from lkmltools.lookml_field import LookMLField
class DrillDownRule(FieldRule):
"""
does this have drilldowns?
"""
def run(self, lookml_field):
"""does this have drilldowns?
Args:
lookml_field (LookMLField): instance of LookMLField
Returns:
(tuple): tuple containing:
relevant (bool): is this rule relevant for this JSON chunk?
passed (bool): did the rule pass?
"""
if not lookml_field.is_measure():
return False, None
if not lookml_field.has_key("drill_fields") or lookml_field.drill_fields == []:
return True, False
return True, True
| StarcoderdataPython |
1753027 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
def cf_reservations(cli_ctx, **_):
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.mgmt.reservations.azure_reservation_api import AzureReservationAPI
return get_mgmt_service_client(cli_ctx, AzureReservationAPI, subscription_bound=False)
def reservation_mgmt_client_factory(cli_ctx, kwargs):
return cf_reservations(cli_ctx, **kwargs).reservation
def reservation_order_mgmt_client_factory(cli_ctx, kwargs):
return cf_reservations(cli_ctx, **kwargs).reservation_order
| StarcoderdataPython |
12807895 | #
# Copyright (c) 2017, Massachusetts Institute of Technology All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
def _mimport(name, level=1):
try:
return __import__(name, globals(), level=level)
except Exception:
return __import__(name, globals())
import numpy as _N
import ctypes as _C
_dsc = _mimport('descriptor')
_dat = _mimport('mdsdata')
_scr = _mimport('mdsscalar')
_arr = _mimport('mdsarray')
_ver = _mimport('version')
class Apd(_dat.TreeRefX, _arr.Array):
"""The Apd class represents the Array of Pointers to Descriptors structure.
This structure provides a mechanism for storing an array of non-primitive items.
"""
mdsclass = 196
dtype_id = 24
maxdesc = 1 << 31
@property
def _descriptor(self):
descs = self.descs
d = _dsc.DescriptorAPD()
d.scale = 0
d.digits = 0
d.aflags = 0
d.dtype = self.dtype_id
d.dimct = 1
d.length = _C.sizeof(_C.c_void_p)
ndesc = len(descs)
d.array = [None]*ndesc
if ndesc:
d.arsize = d.length*ndesc
descs_ptrs = (_dsc.Descriptor.PTR*ndesc)()
for idx, desc in enumerate(descs):
if desc is None:
descs_ptrs[idx] = None
else: # keys in dicts have to be python types
if isinstance(desc, _dsc.Descriptor):
d.array[idx] = desc
else:
d.array[idx] = _dat.Data(desc)._descriptor
descs_ptrs[idx] = d.array[idx].ptr_
d.pointer = _C.cast(_C.pointer(descs_ptrs), _C.c_void_p)
d.a0 = d.pointer
return _cmp.Compound._descriptorWithProps(self, d)
@classmethod
def fromDescriptor(cls, d):
num = d.arsize//d.length
dptrs = _C.cast(d.pointer, _C.POINTER(_C.c_void_p*num)).contents
descs = [_dsc.pointerToObject(dptr, d.tree) for dptr in dptrs]
return cls(descs)._setTree(d.tree)
def __init__(self, value=None, dtype=0):
"""Initializes a Apd instance
"""
if value is self:
return
self.dtype_id = dtype
self._descs = []
if value is not None:
if isinstance(value, _ver.listlike):
for val in value:
self.append(_dat.Data(val))
else:
raise TypeError(
"must provide tuple of items when creating ApdData: %s" % (type(value),))
def __len__(self):
"""Return the number of descriptors in the apd"""
return self.getNumDescs()
def append(self, value):
"""Append a value to apd"""
self[len(self)] = _dat.Data(value)
return self
@property
def value(self):
return _N.array(self.descs, object)
@property
def _value(self):
"""Returns native representation of the List"""
return _N.asarray(tuple(d.value for d in self._descs), 'object')
_ver.listlike = tuple(set(_ver.listlike).union(set((Apd,))))
class Dictionary(dict, Apd):
"""dictionary class"""
class dict_np(_N.ndarray):
def __new__(cls, items):
return _N.asarray(tuple(d for d in items), 'object').view(Dictionary.dict_np)
def tolist(self):
return dict(kv for kv in self)
_key_value_exception = Exception(
'A dictionary requires an even number of elements read as key-value pairs.')
dtype_id = 216
def __init__(self, value=None):
if value is self:
return
if value is not None:
if isinstance(value, dict):
for key, val in value.items():
self.setdefault(key, val)
elif isinstance(value, (Apd, tuple, list, _ver.mapclass, _N.ndarray)):
if isinstance(value, (_ver.mapclass,)) and not isinstance(value, (tuple,)):
value = tuple(value)
if len(value) & 1:
raise Dictionary._key_value_exception
for idx in range(0, len(value), 2):
self.setdefault(value[idx], value[idx+1])
elif isinstance(value, (_ver.generator)):
for key in value:
self.setdefault(key, next(value))
else:
raise TypeError(
'Cannot create Dictionary from type: '+str(type(value)))
@staticmethod
def toKey(key):
if isinstance(key, (_scr.Scalar,)):
key = key.value
if isinstance(key, (_ver.npbytes, _ver.npunicode)):
return _ver.tostr(key)
if isinstance(key, (_N.int32,)):
return int(key)
if isinstance(key, (_N.float32, _N.float64)):
return float(key)
return _dat.Data(key).data().tolist()
def setdefault(self, key, val):
"""check keys and converts values to instances of Data"""
key = Dictionary.toKey(key)
if not isinstance(val, _dat.Data):
val = _dat.Data(val)
super(Dictionary, self).setdefault(key, val)
def remove(self, key):
"""remove pair with key"""
del(self[Dictionary.toKey(key)])
def __setitem__(self, name, value):
"""sets values as instances of Data"""
self.setdefault(name, value)
def __getitem__(self, name):
"""gets values as instances of Data"""
return super(Dictionary, self).__getitem__(Dictionary.toKey(name))
@property
def value(self):
"""Return native representation of data item"""
return Dictionary.dict_np(self.items())
def toApd(self):
return Apd(self.descs, self.dtype_id)
@property
def descs(self):
"""Returns the descs of the Apd.
@rtype: tuple
"""
return self._descs
@property
def _descs(self): return sum(self.items(), ())
class List(list, Apd):
"""list class"""
dtype_id = 214
def __init__(self, value=None):
if value is self:
return
if value is not None:
if isinstance(value, (Apd, tuple, list, _ver.mapclass, _ver.generator, _N.ndarray)):
for val in value:
List.append(self, _dat.Data(val))
else:
raise TypeError(
'Cannot create List from type: '+str(type(value)))
@property
def descs(self):
"""Returns the descs of the Apd.
@rtype: tuple
"""
return tuple(self)
@property
def _descs(self): return self
descriptor = _mimport('descriptor')
descriptor.dtypeToClass[Apd.dtype_id] = Apd
descriptor.dtypeToClass[List.dtype_id] = List
descriptor.dtypeToClass[Dictionary.dtype_id] = Dictionary
descriptor.dtypeToArrayClass[Apd.dtype_id] = Apd
descriptor.dtypeToArrayClass[List.dtype_id] = List
descriptor.dtypeToArrayClass[Dictionary.dtype_id] = Dictionary
_tre = _mimport('tree')
_cmp = _mimport('compound')
| StarcoderdataPython |
4981369 | from datetime import timedelta
import pendulum
import pytest
from ..conftest import assert_datetime
def test_sub_years_positive():
assert pendulum.datetime(1975, 1, 1).subtract(years=1).year == 1974
def test_sub_years_zero():
assert pendulum.datetime(1975, 1, 1).subtract(years=0).year == 1975
def test_sub_years_negative():
assert pendulum.datetime(1975, 1, 1).subtract(years=-1).year == 1976
def test_sub_months_positive():
assert pendulum.datetime(1975, 12, 1).subtract(months=1).month == 11
def test_sub_months_zero():
assert pendulum.datetime(1975, 12, 1).subtract(months=0).month == 12
def test_sub_months_negative():
assert pendulum.datetime(1975, 12, 1).subtract(months=-1).month == 1
def test_sub_days_positive():
assert pendulum.datetime(1975, 5, 31).subtract(days=1).day == 30
def test_sub_days_zero():
assert pendulum.datetime(1975, 5, 31).subtract(days=0).day == 31
def test_sub_days_negative():
assert pendulum.datetime(1975, 5, 31).subtract(days=-1).day == 1
def test_sub_weeks_positive():
assert pendulum.datetime(1975, 5, 21).subtract(weeks=1).day == 14
def test_sub_weeks_zero():
assert pendulum.datetime(1975, 5, 21).subtract(weeks=0).day == 21
def test_sub_weeks_negative():
assert pendulum.datetime(1975, 5, 21).subtract(weeks=-1).day == 28
def test_sub_hours_positive():
assert pendulum.datetime(1975, 5, 21, 0, 0, 0).subtract(hours=1).hour == 23
def test_sub_hours_zero():
assert pendulum.datetime(1975, 5, 21, 0, 0, 0).subtract(hours=0).hour == 0
def test_sub_hours_negative():
assert pendulum.datetime(1975, 5, 21, 0, 0, 0).subtract(hours=-1).hour == 1
def test_sub_minutes_positive():
assert pendulum.datetime(1975, 5, 21, 0, 0, 0).subtract(minutes=1).minute == 59
def test_sub_minutes_zero():
assert pendulum.datetime(1975, 5, 21, 0, 0, 0).subtract(minutes=0).minute == 0
def test_sub_minutes_negative():
assert pendulum.datetime(1975, 5, 21, 0, 0, 0).subtract(minutes=-1).minute == 1
def test_sub_seconds_positive():
assert pendulum.datetime(1975, 5, 21, 0, 0, 0).subtract(seconds=1).second == 59
def test_sub_seconds_zero():
assert pendulum.datetime(1975, 5, 21, 0, 0, 0).subtract(seconds=0).second == 0
def test_sub_seconds_negative():
assert pendulum.datetime(1975, 5, 21, 0, 0, 0).subtract(seconds=-1).second == 1
def test_subtract_timedelta():
delta = timedelta(days=6, seconds=16, microseconds=654321)
d = pendulum.datetime(2015, 3, 14, 3, 12, 15, 777777)
d = d - delta
assert d.day == 8
assert d.minute == 11
assert d.second == 59
assert d.microsecond == 123456
def test_subtract_duration():
duration = pendulum.duration(
years=2, months=3, days=6, seconds=16, microseconds=654321
)
d = pendulum.datetime(2015, 3, 14, 3, 12, 15, 777777)
d = d - duration
assert 2012 == d.year
assert 12 == d.month
assert 8 == d.day
assert 3 == d.hour
assert 11 == d.minute
assert 59 == d.second
assert 123456 == d.microsecond
def test_subtract_time_to_new_transition_skipped():
dt = pendulum.datetime(2013, 3, 31, 3, 0, 0, 0, tz="Europe/Paris")
assert_datetime(dt, 2013, 3, 31, 3, 0, 0, 0)
assert dt.timezone_name == "Europe/Paris"
assert dt.offset == 7200
assert dt.is_dst()
dt = dt.subtract(microseconds=1)
assert_datetime(dt, 2013, 3, 31, 1, 59, 59, 999999)
assert dt.timezone_name == "Europe/Paris"
assert dt.offset == 3600
assert not dt.is_dst()
dt = pendulum.datetime(2013, 3, 10, 3, 0, 0, 0, tz="America/New_York")
assert_datetime(dt, 2013, 3, 10, 3, 0, 0, 0)
assert dt.timezone_name == "America/New_York"
assert dt.offset == -4 * 3600
assert dt.is_dst()
dt = dt.subtract(microseconds=1)
assert_datetime(dt, 2013, 3, 10, 1, 59, 59, 999999)
assert dt.timezone_name == "America/New_York"
assert dt.offset == -5 * 3600
assert not dt.is_dst()
dt = pendulum.datetime(1957, 4, 28, 3, 0, 0, 0, tz="America/New_York")
assert_datetime(dt, 1957, 4, 28, 3, 0, 0, 0)
assert dt.timezone_name == "America/New_York"
assert dt.offset == -4 * 3600
assert dt.is_dst()
dt = dt.subtract(microseconds=1)
assert_datetime(dt, 1957, 4, 28, 1, 59, 59, 999999)
assert dt.timezone_name == "America/New_York"
assert dt.offset == -5 * 3600
assert not dt.is_dst()
def test_subtract_time_to_new_transition_skipped_big():
dt = pendulum.datetime(2013, 3, 31, 3, 0, 0, 0, tz="Europe/Paris")
assert_datetime(dt, 2013, 3, 31, 3, 0, 0, 0)
assert dt.timezone_name == "Europe/Paris"
assert dt.offset == 7200
assert dt.is_dst()
dt = dt.subtract(days=1)
assert_datetime(dt, 2013, 3, 30, 3, 0, 0, 0)
assert dt.timezone_name == "Europe/Paris"
assert dt.offset == 3600
assert not dt.is_dst()
def test_subtract_time_to_new_transition_repeated():
dt = pendulum.datetime(2013, 10, 27, 2, 0, 0, 0, tz="Europe/Paris")
assert_datetime(dt, 2013, 10, 27, 2, 0, 0, 0)
assert dt.timezone_name == "Europe/Paris"
assert dt.offset == 3600
assert not dt.is_dst()
dt = dt.subtract(microseconds=1)
assert_datetime(dt, 2013, 10, 27, 2, 59, 59, 999999)
assert dt.timezone_name == "Europe/Paris"
assert dt.offset == 7200
assert dt.is_dst()
dt = pendulum.datetime(2013, 11, 3, 1, 0, 0, 0, tz="America/New_York")
assert_datetime(dt, 2013, 11, 3, 1, 0, 0, 0)
assert dt.timezone_name == "America/New_York"
assert dt.offset == -5 * 3600
assert not dt.is_dst()
dt = dt.subtract(microseconds=1)
assert_datetime(dt, 2013, 11, 3, 1, 59, 59, 999999)
assert dt.timezone_name == "America/New_York"
assert dt.offset == -4 * 3600
assert dt.is_dst()
def test_subtract_time_to_new_transition_repeated_big():
dt = pendulum.datetime(2013, 10, 27, 2, 0, 0, 0, tz="Europe/Paris")
assert_datetime(dt, 2013, 10, 27, 2, 0, 0, 0)
assert dt.timezone_name == "Europe/Paris"
assert dt.offset == 3600
assert not dt.is_dst()
dt = dt.subtract(days=1)
assert_datetime(dt, 2013, 10, 26, 2, 0, 0, 0)
assert dt.timezone_name == "Europe/Paris"
assert dt.offset == 7200
assert dt.is_dst()
def test_subtract_invalid_type():
d = pendulum.datetime(1975, 5, 21, 0, 0, 0)
with pytest.raises(TypeError):
d - "ab"
with pytest.raises(TypeError):
"ab" - d
| StarcoderdataPython |
174367 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
#move this notebook to folder above syndef to run
from syndef import synfits #import synestia snapshot (impact database)
import numpy as np
import matplotlib.pyplot as plt
test_rxy=np.linspace(7e6,60e6,100) #m
test_z=np.linspace(0.001e6,30e6,50) #m
rxy=np.log10(test_rxy/1e6) #Mm log10
z=np.log10(test_z/1e6) #Mm log10
TESTRXY,TESTZ=np.meshgrid(test_rxy,test_z) #2-D grid of rxy, z for color plot
#y=np.zeros(np.shape(rxy)) #array of zeros for residual fit
#rho1=synfits.resfuncspl(synfits.SNAP_Canup.rhomidfit[1],rxy,y)
#rho2=synfits.resfuncspl(synfits.SNAP_CukStewart.rhomidfit[1],rxy,y)
#rho3=synfits.resfuncspl(synfits.SNAP_Quintana.rhomidfit[1],rxy,y)
snaprho1=np.log10(synfits.SNAP_Canup.rho[synfits.SNAP_Canup.ind_outer])
snaprho2=np.log10(synfits.SNAP_CukStewart.rho[synfits.SNAP_CukStewart.ind_outer])
snaprho3=np.log10(synfits.SNAP_Quintana.rho[synfits.SNAP_Quintana.ind_outer])
snaprhomid1=np.log10(synfits.SNAP_Canup.rho[synfits.SNAP_Canup.ind_outer_mid])
snaprhomid2=np.log10(synfits.SNAP_CukStewart.rho[synfits.SNAP_CukStewart.ind_outer_mid])
snaprhomid3=np.log10(synfits.SNAP_Quintana.rho[synfits.SNAP_Quintana.ind_outer_mid])
snaprxy1=np.log10(synfits.SNAP_Canup.rxy[synfits.SNAP_Canup.ind_outer]/1e6)
snaprxy2=np.log10(synfits.SNAP_CukStewart.rxy[synfits.SNAP_CukStewart.ind_outer]/1e6)
snaprxy3=np.log10(synfits.SNAP_Quintana.rxy[synfits.SNAP_Quintana.ind_outer]/1e6)
snaprxymid1=np.log10(synfits.SNAP_Canup.rxy[synfits.SNAP_Canup.ind_outer_mid]/1e6)
snaprxymid2=np.log10(synfits.SNAP_CukStewart.rxy[synfits.SNAP_CukStewart.ind_outer_mid]/1e6)
snaprxymid3=np.log10(synfits.SNAP_Quintana.rxy[synfits.SNAP_Quintana.ind_outer_mid]/1e6)
snapz1=np.log10(synfits.SNAP_Canup.z[synfits.SNAP_Canup.ind_outer]/1e6)
snapz2=np.log10(synfits.SNAP_CukStewart.z[synfits.SNAP_CukStewart.ind_outer]/1e6)
snapz3=np.log10(synfits.SNAP_Quintana.z[synfits.SNAP_Quintana.ind_outer]/1e6)
const1=10.5#10 to 11; 10.55 (fiducial)
const2=0.86#0.85 to 0.9; 0.86 (fiducial)
const3=1e38 #0.9e35 (fiducial) / 1.5e33 (underestimate) / 1.1e37 (cross) / 1e38 (overestimate)
const4=-5.1 #-4.7 (fiducial) / -4.5 (underestimate) / -5 (cross) / -5.1 (overestimate)
test_z_s=const1*np.power(TESTRXY,const2) #scale height fit in m
test_rho_g=const3*np.power(TESTRXY,const4)*np.exp(-np.power(TESTZ/test_z_s,2))
test_rho_gmid=const3*np.power(test_rxy,const4)
plt.figure(figsize=(16,5))
plt.subplot(131)
#plt.plot(rxy,rho1,'b')
plt.plot(snaprxymid1,snaprhomid1,'r.')
plt.plot(np.log10(test_rxy/1e6),np.log10(test_rho_gmid),'k')
plt.xlabel('log r$_{xy}$ (Mm)')
plt.ylabel('log midplane density (kg/m$^3$)')
plt.title('Canup')
plt.xlim([.8,2])
plt.ylim([-2,3])
plt.subplot(132)
#plt.plot(rxy,rho2,'b')
plt.plot(snaprxymid2,snaprhomid2,'r.')
plt.plot(np.log10(test_rxy/1e6),np.log10(test_rho_gmid),'k')
plt.xlabel('log r$_{xy}$ (Mm)')
plt.ylabel('log midplane density (kg/m$^3$)')
plt.title('Cuk and Stewart')
plt.xlim([.8,2])
plt.ylim([-2,3])
plt.subplot(133)
#plt.plot(rxy,rho3,'b')
plt.plot(snaprxymid3,snaprhomid3,'r.')
plt.plot(np.log10(test_rxy/1e6),np.log10(test_rho_gmid),'k')
plt.xlabel('log r$_{xy}$ (Mm)')
plt.ylabel('log midplane density (kg/m$^3$)')
plt.title('Quintana')
plt.xlim([.8,2])
plt.ylim([-2,3])
plt.show()
plt.close()
plt.figure(figsize=(16,5))
plt.subplot(131)
plt.pcolor(np.log10(TESTRXY/1e6),np.log10(TESTZ/1e6),np.log10(test_rho_g))
plt.scatter(snaprxy1,snapz1,c=snaprho1)
plt.xlabel('log r$_{xy}$ (Mm)')
plt.ylabel('log z (Mm)')
plt.colorbar(label='log density (kg/m$^3$)')
plt.xlim([.8,2])
plt.subplot(132)
plt.pcolor(np.log10(TESTRXY/1e6),np.log10(TESTZ/1e6),np.log10(test_rho_g))
plt.scatter(snaprxy2,snapz2,c=snaprho2)
plt.xlabel('log r$_{xy}$ (Mm)')
plt.ylabel('log z (Mm)')
plt.colorbar(label='log density (kg/m$^3$)')
plt.xlim([.8,2])
plt.subplot(133)
plt.pcolor(np.log10(TESTRXY/1e6),np.log10(TESTZ/1e6),np.log10(test_rho_g))
plt.scatter(snaprxy3,snapz3,c=snaprho3)
plt.xlabel('log r$_{xy}$ (Mm)')
plt.ylabel('log z (Mm)')
plt.colorbar(label='log density (kg/m$^3$)')
plt.xlim([.8,2])
plt.show()
plt.close()
# In[ ]:
| StarcoderdataPython |
1950919 | # -*- coding: utf-8 -*-
"""
Catch-up TV & More
Copyright (C) 2018 SylvainCecchetto
This file is part of Catch-up TV & More.
Catch-up TV & More is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Catch-up TV & More is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with Catch-up TV & More; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# The unicode_literals import only has
# an effect on Python 2.
# It makes string literals as unicode like in Python 3
from __future__ import unicode_literals
from __future__ import division
from builtins import str
from resources.lib.py_utils import old_div
from codequick import Route, Resolver, Listitem, utils, Script
from resources.lib.labels import LABELS
from resources.lib import web_utils
from resources.lib.kodi_utils import get_kodi_version
from resources.lib import download
from resources.lib.menu_utils import item_post_treatment
from resources.lib.kodi_utils import get_selected_item_art, get_selected_item_label, get_selected_item_info
import inputstreamhelper
import json
import re
import urlquick
from kodi_six import xbmc
from kodi_six import xbmcgui
# TO DO
URL_ROOT = 'https://www.questod.co.uk'
URL_SHOWS = URL_ROOT + '/api/shows/%s'
# mode
URL_SHOWS_AZ = URL_ROOT + '/api/shows%s'
# mode
URL_VIDEOS = URL_ROOT + '/api/show-detail/%s'
# showId
URL_STREAM = URL_ROOT + '/api/video-playback/%s'
# path
URL_LIVE = 'https://www.questod.co.uk/channel/%s'
URL_LICENCE_KEY = 'https://lic.caas.conax.com/nep/wv/license|Content-Type=&User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3041.0 Safari/537.36&PreAuthorization=%s&Host=lic.caas.conax.com|R{SSM}|'
# videoId
CATEGORIES_MODE = {
'FEATURED': 'featured',
'MOST POPULAR': 'most-popular',
'NEW': 'new',
'LEAVING SOON': 'last-chance'
}
CATEGORIES_MODE_AZ = {'A-Z': '-az'}
def replay_entry(plugin, item_id, **kwargs):
"""
First executed function after replay_bridge
"""
return list_categories(plugin, item_id)
@Route.register
def list_categories(plugin, item_id, **kwargs):
"""
Build categories listing
- Tous les programmes
- Séries
- Informations
- ...
"""
for category_name_title, category_name_value in list(CATEGORIES_MODE.items(
)):
item = Listitem()
item.label = category_name_title
item.set_callback(list_programs_mode,
item_id=item_id,
category_name_value=category_name_value)
item_post_treatment(item)
yield item
for category_name_title, category_name_value in list(CATEGORIES_MODE_AZ.items(
)):
item = Listitem()
item.label = category_name_title
item.set_callback(list_programs_mode_az,
item_id=item_id,
category_name_value=category_name_value)
item_post_treatment(item)
yield item
@Route.register
def list_programs_mode(plugin, item_id, category_name_value, **kwargs):
"""
Build programs listing
- Les feux de l'amour
- ...
"""
resp = urlquick.get(URL_SHOWS % category_name_value)
json_parser = json.loads(resp.text)
for program_datas in json_parser["items"]:
program_title = program_datas["title"]
program_id = program_datas["id"]
program_image = ''
if 'image' in program_datas:
program_image = program_datas["image"]["src"]
item = Listitem()
item.label = program_title
item.art['thumb'] = item.art['landscape'] = program_image
item.set_callback(list_program_seasons,
item_id=item_id,
program_id=program_id)
item_post_treatment(item)
yield item
@Route.register
def list_programs_mode_az(plugin, item_id, category_name_value, **kwargs):
"""
Build programs listing
- Les feux de l'amour
- ...
"""
resp = urlquick.get(URL_SHOWS_AZ % category_name_value)
json_parser = json.loads(resp.text)
for program_datas_letter in json_parser["items"]:
for program_datas in program_datas_letter["items"]:
program_title = program_datas["title"]
program_id = program_datas["id"]
item = Listitem()
item.label = program_title
item.set_callback(list_program_seasons,
item_id=item_id,
program_id=program_id)
item_post_treatment(item)
yield item
@Route.register
def list_program_seasons(plugin, item_id, program_id, **kwargs):
"""
Build programs listing
- Season 1
- ...
"""
resp = urlquick.get(URL_VIDEOS % program_id)
json_parser = json.loads(resp.text)
for program_season_datas in json_parser["show"]["seasonNumbers"]:
program_season_name = 'Season - ' + str(program_season_datas)
program_season_number = program_season_datas
item = Listitem()
item.label = program_season_name
item.set_callback(list_videos,
item_id=item_id,
program_id=program_id,
program_season_number=program_season_number)
item_post_treatment(item)
yield item
@Route.register
def list_videos(plugin, item_id, program_id, program_season_number, **kwargs):
resp = urlquick.get(URL_VIDEOS % program_id)
json_parser = json.loads(resp.text)
at_least_one_item = False
if 'episode' in json_parser["videos"]:
if str(program_season_number) in json_parser["videos"]["episode"]:
for video_datas in json_parser["videos"]["episode"][str(
program_season_number)]:
at_least_one_item = True
video_title = video_datas["title"]
video_duration = old_div(int(video_datas["videoDuration"]), 1000)
video_plot = video_datas["description"]
video_image = video_datas["image"]["src"]
video_id = video_datas["path"]
item = Listitem()
item.label = video_title
item.art['thumb'] = item.art['landscape'] = video_image
item.art['fanart'] = video_image
item.info["plot"] = video_plot
item.info["duration"] = video_duration
item.set_callback(get_video_url,
item_id=item_id,
video_id=video_id)
item_post_treatment(item,
is_playable=True,
is_downloadable=True)
yield item
if not at_least_one_item:
plugin.notify(plugin.localize(LABELS['No videos found']), '')
yield False
@Resolver.register
def get_video_url(plugin,
item_id,
video_id,
download_mode=False,
**kwargs):
resp = urlquick.get(URL_STREAM % video_id, max_age=-1)
json_parser = json.loads(resp.text)
if 'error' in json_parser:
if json_parser["error"] is not None:
if json_parser["error"]["status"] == '403':
plugin.notify('ERROR', plugin.localize(30713))
else:
plugin.notify('ERROR', plugin.localize(30716))
return False
if 'drmToken' in json_parser["playback"]:
if get_kodi_version() < 18:
xbmcgui.Dialog().ok('Info', plugin.localize(30602))
return False
is_helper = inputstreamhelper.Helper('mpd', drm='widevine')
if not is_helper.check_inputstream():
return False
if download_mode:
xbmcgui.Dialog().ok('Info', plugin.localize(30603))
return False
token = json_parser["playback"]["drmToken"]
item = Listitem()
item.path = json_parser["playback"]["streamUrlDash"]
item.label = get_selected_item_label()
item.art.update(get_selected_item_art())
item.info.update(get_selected_item_info())
item.property['inputstreamaddon'] = 'inputstream.adaptive'
item.property['inputstream.adaptive.manifest_type'] = 'mpd'
item.property[
'inputstream.adaptive.license_type'] = 'com.widevine.alpha'
item.property[
'inputstream.adaptive.license_key'] = URL_LICENCE_KEY % token
return item
else:
final_video_url = json_parser["playback"]["streamUrlHls"]
if download_mode:
return download.download_video(final_video_url)
return final_video_url
def live_entry(plugin, item_id, **kwargs):
return get_live_url(plugin, item_id, item_id.upper())
@Resolver.register
def get_live_url(plugin, item_id, video_id, **kwargs):
if get_kodi_version() < 18:
xbmcgui.Dialog().ok('Info', plugin.localize(30602))
return False
is_helper = inputstreamhelper.Helper('mpd', drm='widevine')
if not is_helper.check_inputstream():
return False
if item_id == 'questtv':
resp = urlquick.get(URL_LIVE % 'quest', max_age=-1)
elif item_id == 'questred':
resp = urlquick.get(URL_LIVE % 'quest-red', max_age=-1)
if len(re.compile(r'drmToken\"\:\"(.*?)\"').findall(resp.text)) > 0:
token = re.compile(r'drmToken\"\:\"(.*?)\"').findall(resp.text)[0]
if len(re.compile(r'streamUrlDash\"\:\"(.*?)\"').findall(
resp.text)) > 0:
live_url = re.compile(r'streamUrlDash\"\:\"(.*?)\"').findall(
resp.text)[0]
item = Listitem()
item.path = live_url
item.label = get_selected_item_label()
item.art.update(get_selected_item_art())
item.info.update(get_selected_item_info())
item.property['inputstreamaddon'] = 'inputstream.adaptive'
item.property['inputstream.adaptive.manifest_type'] = 'mpd'
item.property[
'inputstream.adaptive.license_type'] = 'com.widevine.alpha'
item.property[
'inputstream.adaptive.license_key'] = URL_LICENCE_KEY % token
return item
plugin.notify('ERROR', plugin.localize(30713))
return False
| StarcoderdataPython |
4864846 | <gh_stars>0
"""
Revision ID: 0365_add_nhs_branding
Revises: 0364_drop_old_column
Create Date: 2022-02-17 16:31:21.415065
"""
import os
from alembic import op
revision = '0365_add_nhs_branding'
down_revision = '0364_drop_old_column'
environment = os.environ['NOTIFY_ENVIRONMENT']
def upgrade():
if environment not in ["live", "production"]:
op.execute("""
DELETE FROM service_email_branding
WHERE email_branding_id in (
SELECT id
FROM email_branding
WHERE name = 'NHS'
)
""")
op.execute("""
UPDATE organisation SET email_branding_id = null
WHERE email_branding_id in(
SELECT id
FROM email_branding
WHERE name = 'NHS'
)
""")
op.execute("""
DELETE FROM email_branding WHERE name = 'NHS'
""")
op.execute("""
INSERT INTO email_branding (
id, logo, name, brand_type
)
VALUES (
'a7dc4e56-660b-4db7-8cff-12c37b12b5ea',
'1ac6f483-3105-4c9e-9017-dd7fb2752c44-nhs-blue_x2.png',
'NHS',
'org'
)
""")
def downgrade():
"""
No downgrade step since this is not fully reversible, but won't be run in production.
"""
| StarcoderdataPython |
1845103 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from tesseract import evaluation, temporal, metrics, viz
import pickle
line_kwargs = {'linewidth': 1, 'markersize': 5}
# def plot_f1(ax, l, data, alpha=1.0, neg=False, label=None, color='dodgerblue',
# marker='o'):
# if label is None:
# label = 'F1 (gw)' if neg else 'F1 (mw)'
# color = '#BCDEFE' if neg else color
# # series = data['f1_n'] if neg else data['f1']
# # print(data.index)
# print(data[l])
# ax.plot([i for i in range(0,len(data[l]))], data[l], label=label, alpha=alpha, marker=marker,
# c=color, markeredgewidth=1, **line_kwargs)
#
# viz.set_style()
features = ["api_calls", "app_permissions", "api_permissions", "interesting_calls", "urls", "intents", "activities"]
data = {}
# for feature in features:
# f = open("../data/drebin_split_" + feature + ".pickle", "rb")
# results = pickle.load(f)
# f.close()
# data[feature] = results['f1']
# fig, axes = plt.subplots(1, len(data))
# axes = axes if hasattr(axes, '__iter__') else (axes,)
#
# for l, ax in zip(data, axes):
# print(l)
# plot_f1(ax, l, data)
#
# viz.style_axes(axes, len(data["urls"]))
# fig.set_size_inches(4 * len(data), 4)
# plt.tight_layout()
#---
AUTs = {}
# plt.figure(0)
for feature in features:
f = open("../data/drebin_split_" + feature + ".pickle", "rb")
results = pickle.load(f)
f.close()
out = results['f1']
# plt.plot([i for i in range(0, len(out))], out, label=feature, markeredgewidth=1, markersize=3, marker='o')
# plt.tight_layout()
# plt.rcParams["font.family"] = "serif"
# plt.ylabel("F1")
# plt.xlabel("Testing Period")
# plt.legend(loc="lower left", fancybox=True, fontsize='small', title="Removed element")
# plt.grid(axis="y", which="both")
# plt.xticks([i for i in range(0, len(out))])
# plt.yticks([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1])
AUTs[feature] = np.trapz(out) / (len(out) - 1)
# plt.show()
# plt.figure(1)
# plt.clf()
plotdata1 = []
plotdata2 = []
manifest = ["app_permissions", "intents", "activities"]
plt.xticks([])
plt.yticks([])
plotdata1.append(["$\\bf{Drebin\\ manifest\\ feature\\ set}$", "$\\bf{AUT(F1, 24)}$"])
plotdata2.append(["$\\bf{Drebin\\ bytecode\\ feature\\ set}$", "$\\bf{AUT(F1, 24)}$"])
avg1 = 0
avg2 = 0
for a in AUTs:
if a in manifest:
plotdata1.append([a, AUTs[a]])
avg1 += AUTs[a]
else:
plotdata2.append([a, AUTs[a]])
avg2 += AUTs[a]
plotdata1.append(["$\\bf{Average}$", str(avg1/3)])
plotdata2.append(["$\\bf{Average}$", str(avg1/4)])
plt.tight_layout()
plt.axis("off")
plt.autoscale()
plt.table(cellText=plotdata1, loc="best", bbox=None)
plt.table(cellText=plotdata2, loc="center", bbox=None)
plt.show() | StarcoderdataPython |
1600467 | import math
import sys
from PyQt5 import QtGui, QtWidgets
from code_wheel.resources import resources_rc
def wheel_radius(pixmap_item):
"""
Determine the radius of the given wheel part.
:param pixmap_item: The wheel part.
:type pixmap_item: QtWidgets.QPixmapGraphicsItem
:returns: The radius of the given wheel.
:rtype: float
"""
return pixmap_item.boundingRect().width() / 2
def rotation(position, wheel):
"""
Calculate the rotation angle (in degrees) of the given position on the given wheel.
:param position: The position os the mouse.
:type position: QtGui.QPosition
:param wheel: The wheel part.
:type wheel: QtWidgets.QPixmapGraphicsItem
:returns: The rotation angle of the mouse in degrees.
:rtype: float
"""
center = wheel.boundingRect().center()
position = position - center
rotation = math.atan2(position.y(), position.x())
return math.degrees(rotation) + 180
class Scene(QtWidgets.QGraphicsScene):
"""A graphics scene containing the code wheel from bard's tale 4."""
def __init__(self):
"""Add the parts of the wheel to the scene."""
super().__init__()
self.current_wheel = None
self.pressed = False
self.click_position = None
self.starting_rotation = 0
# Set the background to be a comfortable black.
self.setBackgroundBrush(QtGui.QBrush(QtGui.QColor('#252525')))
# Set up all the wheels and make them rotate around their centers.
self.bottom_wheel = self.addPixmap(QtGui.QPixmap(':/bottom_wheel.png'))
self.bottom_wheel.setTransformOriginPoint(wheel_radius(self.bottom_wheel), wheel_radius(self.bottom_wheel))
self.middle_wheel = self.addPixmap(QtGui.QPixmap(':/middle_wheel.png'))
self.middle_wheel.setTransformOriginPoint(wheel_radius(self.middle_wheel), wheel_radius(self.middle_wheel))
self.middle_wheel.moveBy(
wheel_radius(self.bottom_wheel) - wheel_radius(self.middle_wheel),
wheel_radius(self.bottom_wheel) - wheel_radius(self.middle_wheel),
)
self.top_wheel = self.addPixmap(QtGui.QPixmap(':/top_wheel.png'))
self.top_wheel.setTransformOriginPoint(wheel_radius(self.top_wheel), wheel_radius(self.top_wheel))
self.top_wheel.moveBy(
wheel_radius(self.bottom_wheel) - wheel_radius(self.top_wheel),
wheel_radius(self.bottom_wheel) - wheel_radius(self.top_wheel),
)
def mousePressEvent(self, event):
"""
Called when the mouse is pressed.
Determine which part of the wheel was clicked.
:param event: The mouse press event.
:type event: QtWidgets.QGraphicsSceneMouseEvent
"""
clicked_item = self.itemAt(event.scenePos().x(), event.scenePos().y(), self.views()[0].transform())
if clicked_item is None:
return
self.pressed = True
self.click_position = event.scenePos()
self.current_wheel = clicked_item
self.starting_rotation = self.current_wheel.rotation()
def mouseReleaseEvent(self, event):
"""
Called when the mouse is released.
Reset all of the cached values about what was clicked.
:param event: The mouse press event.
:type event: QtWidgets.QGraphicsSceneMouseEvent
"""
self.click_position = None
self.current_wheel = None
self.original_rotation = None
self.pressed = False
self.starting_rotation = 0
def mouseMoveEvent(self, event):
"""
Called when the mouse is moved.
If any part of the wheel was selected and the mouse is still pressed, determine
how much we need to rotate the selected part.
:param event: The mouse press event.
:type event: QtWidgets.QGraphicsSceneMouseEvent
"""
if not self.pressed or self.current_wheel is None:
return
click_rotation = rotation(self.click_position, self.current_wheel)
current_rotation = rotation(event.scenePos(), self.current_wheel)
self.current_wheel.setRotation(self.starting_rotation + (current_rotation - click_rotation))
def main():
"""Run the program."""
app = QtWidgets.QApplication([])
app.setApplicationName('Bard\'s Tale IV Code Wheel')
app.setWindowIcon(QtGui.QIcon(':icon.ico'))
scene = Scene()
view = QtWidgets.QGraphicsView(scene)
view.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| StarcoderdataPython |
6537573 | <reponame>zjzh/vega
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Report."""
import json
import logging
import os
import traceback
from pickle import HIGHEST_PROTOCOL
from vega.common import FileOps, TaskOps, JsonEncoder, Status
logger = logging.getLogger(__name__)
class ReportPersistence(object):
"""Save report to file (reports.json)."""
def __init__(self):
"""Initialize object."""
self.step_names = []
self.steps = {}
def set_step_names(self, step_names):
"""Set name of steps."""
self.step_names = step_names
def update_step_info(self, **kwargs):
"""Update step info."""
if "step_name" in kwargs:
step_name = kwargs["step_name"]
if step_name not in self.steps:
self.steps[step_name] = {}
for key in kwargs:
if key in ["step_name", "start_time", "end_time", "status", "message", "num_epochs", "num_models"]:
self.steps[step_name][key] = kwargs[key]
else:
logger.warn("Invilid step info {}:{}".format(key, kwargs[key]))
else:
logger.warn("Invilid step info: {}.".format(kwargs))
def save_report(self, records):
"""Save report to `reports.json`."""
try:
_file = FileOps.join_path(TaskOps().local_output_path, "reports.json")
FileOps.make_base_dir(_file)
data = self.get_report(records)
with open(_file, "w") as f:
json.dump(data, f, indent=4, cls=JsonEncoder)
except Exception as e:
logging.warning(f"Failed to save report, message: {e}")
logging.debug(traceback.format_exc())
def get_report(self, records):
"""Save report to `reports.json`."""
try:
data = {"_steps_": []}
for step in self.step_names:
if step in self.steps:
data["_steps_"].append(self.steps[step])
else:
data["_steps_"].append({
"step_name": step,
"status": Status.unstarted
})
for record in records:
if record.step_name in data:
data[record.step_name].append(record.to_dict())
else:
data[record.step_name] = [record.to_dict()]
return data
except Exception as e:
logging.warning(f"Failed to get report, message: {e}")
logging.debug(traceback.format_exc())
def pickle_report(self, records, report_instance):
"""Pickle report to `.reports`."""
try:
_file = os.path.join(TaskOps().step_path, ".reports")
_dump_data = [records, report_instance]
FileOps.dump_pickle(_dump_data, _file, protocol=HIGHEST_PROTOCOL)
except Exception as e:
logging.warning(f"Failed to pickle report, message: {e}")
logging.debug(traceback.format_exc())
| StarcoderdataPython |
3370444 | astring = "Hello World!"
print(astring[::-1])
| StarcoderdataPython |
8135409 | <reponame>24TangC/gpa-calculator<filename>classes.py
class classes:
def __init__(self, name):
print('How many classes are you taking?')
self.num_classes = int(input())
self.grades = {}
self.credits = {}
self.level = {}
for x in range(self.num_classes):
print(f'What is your #{x+1} input class?')
temp = input()
print(f'What is the level of {temp}? AP - 1, Honors - 2, CP - 3')
self.level[temp] = int(input())
print(f'What is your letter grade in {temp}?')
self.grades[temp] = input()
print(f'How many credits is this class? (Out of 5)')
self.credits[temp] = float(input())
| StarcoderdataPython |
11308788 | # Generated by Django 2.1.3 on 2020-01-05 07:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('result', '0071_auto_20191229_0935'),
]
operations = [
migrations.AlterField(
model_name='btutor',
name='created',
field=models.DateTimeField(default='2020-01-05', max_length=200),
),
migrations.AlterField(
model_name='downloadformat',
name='created',
field=models.DateTimeField(default='2020-01-05', max_length=200),
),
migrations.AlterField(
model_name='qsubject',
name='agn',
field=models.FloatField(blank=True, default=0, max_length=2, null=True),
),
migrations.AlterField(
model_name='qsubject',
name='atd',
field=models.FloatField(blank=True, default=0, max_length=2, null=True),
),
migrations.AlterField(
model_name='qsubject',
name='exam',
field=models.FloatField(blank=True, default=0, max_length=2, null=True),
),
migrations.AlterField(
model_name='qsubject',
name='test',
field=models.FloatField(blank=True, default=0, max_length=2, null=True),
),
migrations.AlterField(
model_name='qsubject',
name='total',
field=models.FloatField(blank=True, default=0, max_length=4, null=True),
),
]
| StarcoderdataPython |
3399941 | #/usr/bin/env python
# Author: <NAME>
# Histroy:
# 2021-03-14 Define plot_fft function
#
import numpy as np
import matplotlib.pyplot as plt
def plot_fft(freq,amp,ylabel=None):
'''
Make three plots of amplitude:
1.Amplitude - Frequency
2.Amplitude - Log-frequency
3.Amplitude - Period.
No zero frequency allowed. Error in the third plot
'''
fig1 = plt.figure(1,figsize=(10,4))
plt.rcParams['xtick.bottom'] = True
plt.rcParams['xtick.labelbottom'] = True
plt.rcParams['xtick.top'] = False
plt.rcParams['xtick.labeltop'] = False
ax1 = plt.subplot(1,3,1)
plt.xlabel("Frequency (Hz)")
plt.ylabel("Amplitude spectrum")
plt.plot(freq,amp)
ax2 = plt.subplot(1,3,2)
plt.xlabel("Frequency (Hz)")
plt.ylabel("Amplitude spectrum")
plt.plot(freq,amp)
plt.xscale("log")
ax3 = plt.subplot(1,3,3)
plt.xlabel("Period (s)")
plt.ylabel("Amplitude spectrum")
plt.plot(np.reciprocal(freq),amp)
plt.tight_layout()
plt.show()
| StarcoderdataPython |
50885 | import Foundation
from PyObjCTest.testhelper import PyObjC_TestClass3
from PyObjCTools.TestSupport import TestCase
class TestNSHost(TestCase):
def testCreation(self):
#
# This gives an exception on GNUstep:
# Foundation.NSRecursiveLockException - unlock: failed to unlock mutex
#
# testIndirectCreation performs the same operation from Objective-C
# and doesn't fail. I don't see any significant differences in the
# implementation of -hostWithAddress: and -hostWithName: yet the latter
# does not have the problem we're seeing here.
#
o = Foundation.NSHost.hostWithAddress_(b"127.0.0.1".decode("ascii"))
self.assertEqual(o.addresses(), (b"127.0.0.1".decode("ascii"),))
self.assertEqual(o.address(), b"127.0.0.1".decode("ascii"))
def testCreation2(self):
o = Foundation.NSHost.hostWithName_(b"localhost".decode("ascii"))
lst = sorted(o.addresses())
self.assertIn(
lst,
(
[b"127.0.0.1".decode("ascii"), b"::1".decode("ascii")],
[b"127.0.0.1".decode("ascii")],
),
)
self.assertEqual(o.address(), o.addresses()[0])
def testIndirectCreation(self):
o = PyObjC_TestClass3.createAHostWithAddress_(b"127.0.0.1".decode("ascii"))
self.assertEqual(o.address(), b"127.0.0.1".decode("ascii"))
def testMethods(self):
self.assertArgIsBOOL(Foundation.NSHost.setHostCacheEnabled_, 0)
self.assertResultIsBOOL(Foundation.NSHost.isHostCacheEnabled)
self.assertResultIsBOOL(Foundation.NSHost.isEqualToHost_)
| StarcoderdataPython |
3388583 |
def read_sensor():
global temp, hum
temp = hum = 0
try:
sensor.measure()
temp = sensor.temperature()
hum = sensor.humidity()
if (isinstance(temp, float) and isinstance(hum, float)) or (isinstance(temp, int) and isinstance(hum, int)):
msg = (b'{0:3.1f},{1:3.1f}'.format(temp, hum))
# temperature to Fahrenheit: temp = temp * (9/5) + 32.0
# calibrate temperature
temp = temp - 4.0
hum = round(hum, 2)
return(msg)
else:
return('Invalid sensor readings.')
except OSError as e:
return('Failed to read sensor.')
def web_page():
html = """{
"sensor1":
{
"temperature":"""+str(temp)+""",
"humidity":"""+str(hum)+"""
}
}"""
return html
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 80))
s.listen(5)
while True:
conn, addr = s.accept()
print('Got a connection from %s' % str(addr))
request = conn.recv(1024)
print('Content = %s' % str(request))
sensor_readings = read_sensor()
print(sensor_readings)
response = web_page()
conn.send('HTTP/1.1 200 OK\n')
conn.send('Content-Type: application/json\n')
conn.send('Connection: close\n\n')
conn.sendall(response)
conn.close()
| StarcoderdataPython |
9656037 | <gh_stars>0
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.conf import settings
class User(AbstractUser):
pass
class Post(models.Model):
author = models.ForeignKey(User, on_delete=models.CASCADE)
content = models.TextField(max_length=255)
date_posted = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"ID: {self.pk} Author: {self.author} Posted at: {self.date_posted}."
class Follow(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='following')
following_user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='followers')
def __str__(self):
return f"{self.user}"
class Like(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
def __str__(self):
return f"{self.user} liked {self.post}" | StarcoderdataPython |
3365953 | import pytest
from unittest import mock
from aio_message_handler.consumer import Consumer
@pytest.fixture
def create_mock_coro(monkeypatch):
def _create_mock_patch_coro(to_patch=None):
m = mock.Mock()
async def _coro(*args, **kwargs):
return m(*args, **kwargs)
if to_patch:
monkeypatch.setattr(to_patch, _coro)
return m, _coro
return _create_mock_patch_coro
@pytest.fixture
def consumer():
return Consumer(
amqp_url="url",
queue="queue",
exchange="exchange",
binding_key="key"
)
@pytest.fixture
def stopped_consumer():
c = Consumer(
amqp_url="url",
queue="queue",
exchange="exchange",
binding_key="key"
)
c._stopped = True
return c
@pytest.fixture
def mock_conn():
mock_connection = mock.Mock()
return mock_connection
| StarcoderdataPython |
3455313 | <filename>models/SyncNetModelFBank.py
#! /usr/bin/python
# -*- encoding: utf-8 -*-
import torch
import torch.nn as nn
import torchaudio
class SyncNetModel(nn.Module):
def __init__(self, nOut = 1024, stride = 1):
super(SyncNetModel, self).__init__();
self.netcnnaud = nn.Sequential(
nn.Conv2d(1, 96, kernel_size=(5,7), stride=(1,1), padding=(2,2)),
nn.BatchNorm2d(96),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=(1,3), stride=(1,2)),
nn.Conv2d(96, 256, kernel_size=(5,5), stride=(2,1), padding=(1,1)),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=(3,3), stride=(2,1)),
nn.Conv2d(256, 384, kernel_size=(3,3), padding=(1,1)),
nn.BatchNorm2d(384),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=(3,3), padding=(1,1)),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=(3,3), padding=(1,1)),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=(3,3), stride=(2,2)),
nn.Conv2d(256, 512, kernel_size=(4,1), padding=(0,0), stride=(1,stride)),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
);
self.netfcaud = nn.Sequential(
nn.Conv1d(512, 512, kernel_size=1),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Conv1d(512, nOut, kernel_size=1),
);
self.netfclip = nn.Sequential(
nn.Conv1d(512, 512, kernel_size=1),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Conv1d(512, nOut, kernel_size=1),
);
self.netfcspk = nn.Sequential(
nn.Conv1d(512, 512, kernel_size=1),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Conv1d(512, nOut, kernel_size=1),
);
self.netfcface = nn.Sequential(
nn.Conv1d(512, 512, kernel_size=1),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Conv1d(512, nOut, kernel_size=1),
);
self.netcnnlip = nn.Sequential(
nn.Conv3d(3, 96, kernel_size=(5,7,7), stride=(stride,2,2), padding=0),
nn.BatchNorm3d(96),
nn.ReLU(inplace=True),
nn.MaxPool3d(kernel_size=(1,3,3), stride=(1,2,2)),
nn.Conv3d(96, 256, kernel_size=(1,5,5), stride=(1,2,2), padding=(0,1,1)),
nn.BatchNorm3d(256),
nn.ReLU(inplace=True),
nn.MaxPool3d(kernel_size=(1,3,3), stride=(1,2,2), padding=(0,1,1)),
nn.Conv3d(256, 256, kernel_size=(1,3,3), padding=(0,1,1)),
nn.BatchNorm3d(256),
nn.ReLU(inplace=True),
nn.Conv3d(256, 256, kernel_size=(1,3,3), padding=(0,1,1)),
nn.BatchNorm3d(256),
nn.ReLU(inplace=True),
nn.Conv3d(256, 256, kernel_size=(1,3,3), padding=(0,1,1)),
nn.BatchNorm3d(256),
nn.ReLU(inplace=True),
nn.MaxPool3d(kernel_size=(1,3,3), stride=(1,2,2)),
nn.Conv3d(256, 512, kernel_size=(1,6,6), padding=0),
nn.BatchNorm3d(512),
nn.ReLU(inplace=True),
);
self.instancenorm = nn.InstanceNorm1d(40)
self.torchfb = torchaudio.transforms.MelSpectrogram(sample_rate=16000, n_fft=512, win_length=400, hop_length=160, f_min=0.0, f_max=8000, pad=0, n_mels=40)
def forward_vid(self, x):
## Image stream
mid = self.netcnnlip(x);
mid = mid.view((mid.size()[0], mid.size()[1], -1)); # N x (ch x 24)
out1 = self.netfclip(mid);
out2 = self.netfcface(mid);
return out1, out2
def forward_aud(self, x):
## Audio stream
x = self.torchfb(x)+1e-6
x = self.instancenorm(x.log())
x = x[:,:,1:-1].detach()
mid = self.netcnnaud(x.unsqueeze(1)); # N x ch x 24 x M
mid = mid.view((mid.size()[0], mid.size()[1], -1)); # N x (ch x 24)
out1 = self.netfcaud(mid);
out2 = self.netfcspk(mid);
return out1, out2 | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.