id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1 value |
|---|---|---|
3383001 | <gh_stars>0
import pandas as pd
from tqdm import tqdm
tqdm.pandas()
# Load concat file SRS
df = pd.read_csv("/gstock/biolo_datasets/ENCODE/ENCODE_SRS_concat.tsv.gz", compression="gzip", sep="\t")
# Process files
df[["ENST", "ENSG", "VEGAT", "VEGAG", "transcript_id", "GeneID", "transcript_length", "transcript_biotype", ""]] = df[
"target_id"
].str.split("|", expand=True)
df = df.loc[df["ENSG"].isna() != True]
df["ENST"] = df["ENST"].apply(lambda r: r.split(".")[0])
df["ENSG"] = df["ENSG"].apply(lambda r: r.split(".")[0])
def filter_tpm(tpm):
return tpm.loc[tpm > 0.1].shape[0] / tpm.shape[0]
tpm_transcripts_ratio = df.groupby("ENST")["tpm"].progress_apply(filter_tpm)
print(tpm_transcripts_ratio)
tpm_transcripts_ratio = tpm_transcripts_ratio.rename("TPM_ratio").reset_index()
tpm_transcripts_ratio.to_csv(
"/gstock/biolo_datasets/ENCODE/ENCODE_SRS_TPM_summary.tsv.gz", compression="gzip", sep="\t", index=False
)
# df = pd.merge(df, tpm_transcripts_ratio.rename('TPM_ratio').reset_index(), on='ENST')
# df | StarcoderdataPython |
3369331 | <gh_stars>10-100
import logging
from abc import ABC, abstractmethod
from typing import Optional, Dict
import copy
import pandas as pd
import xarray as xr
from pywatts.core.computation_mode import ComputationMode
from pywatts.core.filemanager import FileManager
from pywatts.core.run_setting import RunSetting
from pywatts.utils._xarray_time_series_utils import _get_time_indexes
from pywatts.core.summary_object import SummaryObjectList, SummaryCategory
logger = logging.getLogger(__name__)
class BaseStep(ABC):
"""
The base class of all steps.
:param input_steps: The input steps
:type input_steps: Optional[Dict[str, BaseStep]]
:param targets: The target steps
:type targets: Optional[Dict[str, BaseStep]]
:param condition: A function which evaluates to False or True for detecting if the module should be executed.
:type condition: Callable
:param computation_mode: The computation mode for this module
:type computation_mode: ComputationMode
"""
def __init__(self, input_steps: Optional[Dict[str, "BaseStep"]] = None,
targets: Optional[Dict[str, "BaseStep"]] = None, condition=None,
computation_mode=ComputationMode.Default, name="BaseStep"):
self.default_run_setting = RunSetting(computation_mode=computation_mode)
self.current_run_setting = self.default_run_setting.clone()
self.input_steps: Dict[str, "BaseStep"] = dict() if input_steps is None else input_steps
self.targets: Dict[str, "BaseStep"] = dict() if targets is None else targets
self.condition = condition
self.cached_result = {"cached": None, "start": None, "end": None}
self.name = name
self.id = -1
self.finished = False
self.last = True
self._current_end = None
self.buffer: Dict[str, xr.DataArray] = {}
self.training_time = SummaryObjectList(self.name + " Training Time", category=SummaryCategory.FitTime)
self.transform_time = SummaryObjectList(self.name + " Transform Time", category=SummaryCategory.TransformTime)
def get_result(self, start: pd.Timestamp, end: Optional[pd.Timestamp], buffer_element: str = None,
return_all=False):
"""
This method is responsible for providing the result of this step.
Therefore,
this method triggers the get_input and get_target data methods.
Additionally, it triggers the computations and checks if all data are processed.
:param start: The start date of the requested results of the step
:type start: pd.Timedstamp
:param end: The end date of the requested results of the step (exclusive)
:type end: Optional[pd.Timestamp]
:param buffer_element: if the buffer of the step contains multiple results, this determines the result which is
returned.
:type buffer_element: str
:param return_all: Flag that indicates if all results in the buffer should be returned.
:type return_all: bool
:return: The resulting data or None if no data are calculated
"""
# Check if step should be executed.
if self._should_stop(start, end):
return None
# Only execute the module if the step is not finished and the results are not yet calculated
if not self.finished and not (end is not None and self._current_end is not None and end <= self._current_end):
if not self.buffer or not self._current_end or end > self._current_end:
self.cached_result["cached"] = self._compute(start, end)
self.cached_result["start"] = start
self.cached_result["end"] = end
self._current_end = end
if not end:
self.finished = True
else:
self.finished = not self.further_elements(end)
# Only call callbacks if the step is finished
if self.finished:
self._callbacks()
# Check if the cached results fits to the request, if yes return it.
if self.cached_result["cached"] is not None and self.cached_result["start"] == start and self.cached_result[
"end"] == end:
return copy.deepcopy(self.cached_result["cached"]) if return_all else copy.deepcopy(self.cached_result["cached"][
buffer_element]) if buffer_element is not None else copy.deepcopy(list(self.cached_result["cached"].values())[
0])
return self._pack_data(start, end, buffer_element, return_all=return_all)
def _compute(self, start, end) -> Dict[str, xr.DataArray]:
pass
def further_elements(self, counter: pd.Timestamp) -> bool:
"""
Checks if there exist at least one data for the time after counter.
:param counter: The timestampe for which it should be tested if there exist further data after it.
:type counter: pd.Timestamp
:return: True if there exist further data
:rtype: bool
"""
if not self.buffer or all(
[counter < b.indexes[_get_time_indexes(self.buffer)[0]][-1] for b in self.buffer.values()]):
return True
for input_step in self.input_steps.values():
if not input_step.further_elements(counter):
return False
for target_step in self.targets.values():
if not target_step.further_elements(counter):
return False
return True
def _pack_data(self, start, end, buffer_element=None, return_all=False):
# Provide requested data
time_index = _get_time_indexes(self.buffer)
if end and start and end > start:
index = list(self.buffer.values())[0].indexes[time_index[0]]
start = max(index[0], start.to_numpy())
# After sel copy is not needed, since it returns a new array.
if buffer_element is not None:
return self.buffer[buffer_element].sel(
**{time_index[0]: index[(index >= start) & (index < end.to_numpy())]})
elif return_all:
return {key: b.sel(**{time_index[0]: index[(index >= start) & (index < end.to_numpy())]}) for
key, b in self.buffer.items()}
else:
return list(self.buffer.values())[0].sel(
**{time_index[0]: index[(index >= start) & (index < end.to_numpy())]})
else:
self.finished = True
if buffer_element is not None:
return self.buffer[buffer_element].copy()
elif return_all:
return copy.deepcopy(self.buffer)
else:
return list(self.buffer.values())[0].copy()
def _transform(self, input_step):
pass
def _fit(self, input_step, target_step):
pass
def _callbacks(self):
pass
def _post_transform(self, result):
if isinstance(result, dict) and len(result) <= 1:
result = {self.name: list(result.values())[0]}
elif not isinstance(result, dict):
result = {self.name: result}
if not self.buffer:
self.buffer = result
else:
# Time dimension is mandatory, consequently there dim has to exist
dim = _get_time_indexes(result)[0]
for key in self.buffer.keys():
self.buffer[key] = xr.concat([self.buffer[key], result[key]], dim=dim)
return result
def get_json(self, fm: FileManager) -> Dict:
"""
Returns a dictionary containing all information needed for restoring the step.
:param fm: The filemanager which can be used by the step for storing the state of the step.
:type fm: FileManager
:return: A dictionary containing all information needed for restoring the step.
:rtype: Dict
"""
return {
"target_ids": {step.id: key for key, step in self.targets.items()},
"input_ids": {step.id: key for key, step in self.input_steps.items()},
"id": self.id,
"module": self.__module__,
"class": self.__class__.__name__,
"name": self.name,
"last": self.last,
"default_run_setting": self.default_run_setting.save()
}
@classmethod
@abstractmethod
def load(cls, stored_step: dict, inputs, targets, module, file_manager):
"""
Restores the step.
:param stored_step: Information about the stored step
:param inputs: The input steps of the step which should be restored
:param targets: The target steps of the step which should be restored
:param module: The module which is contained by this step
:param file_manager: The filemanager of the step
:return: The restored step.
"""
def _get_input(self, start, batch):
return None
def _get_target(self, start, batch):
return None
def _should_stop(self, start, end) -> bool:
# Fetch input and target data
input_result = self._get_input(start, end)
target_result = self._get_target(start, end)
# Check if either the condition is True or some of the previous steps stopped (return_value is None)
return (self.condition is not None and not self.condition(input_result, target_result)) or \
self._input_stopped(input_result) or self._input_stopped(target_result)
@staticmethod
def _input_stopped(input_data):
return (input_data is not None and len(input_data) > 0 and any(map(lambda x: x is None, input_data.values())))
def reset(self):
"""
Resets all information of the step concerning a specific run.
"""
self.buffer = {}
self.finished = False
self.current_run_setting = self.default_run_setting.clone()
def set_run_setting(self, run_setting: RunSetting):
"""
Sets the computation mode of the step for the current run. Note that after reset the all mode is restored.
Moreover, setting the computation_mode is only possible if the computation_mode is not set explicitly while
adding the corresponding module to the pipeline.
:param computation_mode: The computation mode which should be set.
:type computation_mode: ComputationMode
"""
self.current_run_setting = self.default_run_setting.update(run_setting)
| StarcoderdataPython |
1660973 | from astrodash.preprocessing import ReadSpectrumFile
from astrodash.helpers import temp_list
import pickle
import os
import gzip
class SaveTemplateSpectra(object):
def __init__(self, parameterFile):
with open(parameterFile, 'rb') as f:
pars = pickle.load(f)
self.w0, self.w1, self.nw = pars['w0'], pars['w1'], pars['nw']
def read_template_file(self, filename):
readSpectrumFile = ReadSpectrumFile(filename, self.w0, self.w1, self.nw)
spectrum = readSpectrumFile.file_extension()
return spectrum
def template_spectra_to_list(self, tempFileList, templateDirectory):
tempList = temp_list(tempFileList)
templates = []
for filename in tempList:
spectrum = self.read_template_file(templateDirectory + filename)
templates.append(spectrum)
print(filename)
return templates
def save_templates(self, snTempFileList, snTemplateDirectory, galTempFileList, galTemplateDirectory, saveFilename):
snTemplates = self.template_spectra_to_list(snTempFileList, snTemplateDirectory)
galTemplates = self.template_spectra_to_list(galTempFileList, galTemplateDirectory)
templates = {'sn': snTemplates, 'gal': galTemplates}
# Saving the objects
with gzip.open(saveFilename, 'wb') as f:
pickle.dump(templates, f, protocol=2)
print("Saved templates to %s" % saveFilename)
def save_templates():
scriptDirectory = os.path.dirname(os.path.abspath(__file__))
snidTemplateDirectory = os.path.join(scriptDirectory, "../templates/snid_templates_Modjaz_BSNIP/")
snidTempFileList = snidTemplateDirectory + 'templist.txt'
galTemplateDirectory = os.path.join(scriptDirectory, "../templates/superfit_templates/gal/")
galTempFileList = galTemplateDirectory + 'gal.list'
saveTemplateSpectra = SaveTemplateSpectra('data_files/training_params.pickle')
saveFilename = 'data_files/sn_and_gal_templates.pklz'
saveTemplateSpectra.save_templates(snidTempFileList, snidTemplateDirectory, galTempFileList, galTemplateDirectory,
saveFilename)
return saveFilename
if __name__ == '__main__':
templatesFilename1 = save_templates()
| StarcoderdataPython |
30954 | <filename>test/test_commands/test_tail.py<gh_stars>10-100
from pypsi.shell import Shell
from pypsi.commands.tail import TailCommand
class CmdShell(Shell):
tail = TailCommand()
class TestTail:
def setup(self):
self.shell = CmdShell()
def teardown(self):
self.shell.restore()
| StarcoderdataPython |
4819445 | <reponame>TheTrafficNetwork/Automation
"""
Takes a list of devices from NetBox with a LibreNMS tag and checks to see if
they are programmed in LibreNMS. If they are missing, they are then added.
"""
import json
import os
import pynetbox
import requests
from dotenv import find_dotenv, load_dotenv
from sty import fg
from urllib3.exceptions import InsecureRequestWarning
# Disable warnings for self signed certificates
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
# Load environment variables
load_dotenv(find_dotenv())
def create_missing_devices() -> None:
"""Parse NetBox for a list of all devices in the system.
Create an instance in LibreNMS for each device.
Args:
api_key (str): API key to access LibreNMS.
api_url (str): API URL for LibreNMS device creation.
"""
NETBOX_API_KEY = os.getenv("NETBOX_API_KEY")
NETBOX_URL = os.getenv("NETBOX_URL")
LIBRENMS_URL = os.getenv("LIBRENMS_URL")
LIBRENMS_API_KEY = os.getenv("LIBRENMS_API_KEY")
nb = pynetbox.api(NETBOX_URL, token=NETBOX_API_KEY)
nb.http_session.verify = False
device_list = nb.dcim.devices.filter(tag="librenms")
for device in device_list:
device_ip = (str(device.primary_ip).split("/"))[0]
if device_ip == "None":
color = fg.red
print(f"{color}{device.name} missing primary ip address.{fg.rs}")
continue
check = check_in_librenms(device_ip, LIBRENMS_API_KEY, LIBRENMS_URL)
if check is False:
create_librenms_device(device_ip, LIBRENMS_API_KEY, LIBRENMS_URL)
def check_in_librenms(ip: str, api: str, url: str) -> bool:
"""Checks LibreNMS for the existance of a device.
Args:
ip (str): IP address of the device to add to LibreNMS.
api (str): API Key to access LibreNMS.
url (str): API URL for LibreNMS device creation.
Returns:
bool: Boolean response whether the device exists in LibreNMS.
"""
get_device_url = url + ip
headers = {"X-Auth-Token": api}
response = requests.request("GET", get_device_url, headers=headers).json()
# TODO raise an error for message in response
return True if response["status"] == "ok" else False
def create_librenms_device(ip: str, api: str, url: str) -> None:
"""Creates a device instance in LibreNMS
Args:
ip (str): IP address of the device to add to LibreNMS.
api (str): API Key to access LibreNMS.
url (str): API URL for LibreNMS device creation.
"""
version = "v2c"
SNMP_COMMUNITY = os.getenv("SNMP_COMMUNITY")
payload = {"hostname": ip, "version": version, "community": SNMP_COMMUNITY}
data = json.dumps(payload)
headers = {"X-Auth-Token": api, "Content-Type": "text/plain"}
response = requests.request("POST", url, headers=headers, data=data)
if response.status_code == 200:
color = fg.green
elif response.status_code == 500:
color = fg.yellow
else:
color = fg.red
print(f"{color}{response.text}{fg.rs}")
if __name__ == "__main__":
create_missing_devices()
| StarcoderdataPython |
56224 | <reponame>youlei202/tensorforce-lei<filename>tensorforce/execution/runner.py
# Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import time
from six.moves import xrange
class Runner(object):
"""
Simple runner for non-realtime single-process execution.
"""
def __init__(self, agent, environment, repeat_actions=1, history=None):
"""
Initialize a single Runner object (one Agent/one Environment).
Args:
agent (Agent): Agent object to use for the run.
environment (Environment): Environment object to use for the run.
repeat_actions (int): How many times the same given action will be repeated in subsequent calls to
Environment's `execute` method. Rewards collected in these calls are accumulated and reported
as a sum in the following call to Agent's `observe` method.
history (dict): A dictionary containing an already run experiment's results. Keys should be:
episode_rewards (list of rewards), episode_timesteps (lengths of episodes), episode_times (run-times)
"""
self.agent = agent
self.environment = environment
self.repeat_actions = repeat_actions
self.episode = None # the Agent's episode number
self.timestep = None # the Agent's timestep
self.episode_timestep = None # the timestep in the current episode
# lists of episode data (rewards, wall-times/timesteps)
self.start_time = None # the time when an episode was started
self.episode_rewards = None # list of accumulated episode rewards
self.episode_timesteps = None # list of total timesteps taken in the episodes
self.episode_times = None # list of durations for the episodes
self.reset(history)
def reset(self, history=None):
# If history is empty, use default values in history.get().
if not history:
history = dict()
self.episode_rewards = history.get('episode_rewards', list())
self.episode_timesteps = history.get('episode_timesteps', list())
self.episode_times = history.get('episode_times', list())
def run(
self,
timesteps=None,
episodes=None,
max_episode_timesteps=None,
deterministic=False,
episode_finished=None
):
"""
Runs the agent on the environment.
Args:
timesteps (int): Max. number of total timesteps to run (across episodes).
episodes (int): Max. number of episodes to run.
max_episode_timesteps (int): Max. number of timesteps per episode.
deterministic (bool): If true, pick actions from model without exploration/sampling.
episode_finished (callable): Function handler taking a `Runner` argument and returning a boolean indicating
whether to continue execution. For instance, useful for reporting intermediate performance or
integrating termination conditions.
"""
# Keep track of episode reward and episode length for statistics.
self.start_time = time.time()
self.agent.reset()
self.episode = self.agent.episode
if episodes is not None:
episodes += self.agent.episode
self.timestep = self.agent.timestep
if timesteps is not None:
timesteps += self.agent.timestep
# episode loop
while True:
episode_start_time = time.time()
self.agent.reset()
state = self.environment.reset()
episode_reward = 0
self.episode_timestep = 0
# timestep (within episode) loop
while True:
action = self.agent.act(states=state, deterministic=deterministic)
if self.repeat_actions > 1:
reward = 0
for repeat in xrange(self.repeat_actions):
state, terminal, step_reward = self.environment.execute(actions=action)
reward += step_reward
if terminal:
break
else:
state, terminal, reward = self.environment.execute(actions=action)
if max_episode_timesteps is not None and self.episode_timestep >= max_episode_timesteps:
terminal = True
self.agent.observe(terminal=terminal, reward=reward)
self.episode_timestep += 1
self.timestep += 1
episode_reward += reward
if terminal or self.agent.should_stop(): # TODO: should_stop also terminate?
break
# Update our episode stats.
time_passed = time.time() - episode_start_time
self.episode_rewards.append(episode_reward)
self.episode_timesteps.append(self.episode_timestep)
self.episode_times.append(time_passed)
self.episode += 1
# Check, whether we should stop this run.
if (episode_finished is not None and not episode_finished(self)) or \
(episodes is not None and self.agent.episode >= episodes) or \
(timesteps is not None and self.agent.timestep >= timesteps) or \
self.agent.should_stop():
# agent.episode / agent.timestep are globally updated
break
self.agent.close()
self.environment.close()
| StarcoderdataPython |
100864 | <reponame>EfficientDL/codelab_utils
import setuptools
with open('README.md', 'r', encoding='utf-8') as fh:
long_description = fh.read()
setuptools.setup(
name='codelab_utils',
version='0.2',
author='Efficient Deep Learning Book',
author_email='<EMAIL>',
description='Some util methods related to the EDL book codelabs.',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/EfficientDL/codelab_utils',
project_urls = {
"Bug Tracker": 'https://github.com/EfficientDL/codelab_utils/issues'
},
license='MIT',
packages=['codelab_utils'],
install_requires=['matplotlib'],
) | StarcoderdataPython |
136748 | import os
import random
import pathlib
import shutil
import glob
import cv2
import numpy as np
def load_name_images(image_path_pattern):
name_images = []
# ์ง์ ํ Path Pattern์ ์ผ์นํ๋ ํ์ผ ์ป๊ธฐ
image_paths = glob.glob(image_path_pattern)
# ํ์ผ๋ณ๋ก ์ฝ๊ธฐ
for image_path in image_paths:
path = pathlib.Path(image_path)
# ํ์ผ ๊ฒฝ๋ก
fullpath = str(path.resolve())
print(f"์ด๋ฏธ์ง ํ์ผ(์ ๋๊ฒฝ๋ก):{fullpath}")
# ํ์ผ๋ช
filename = path.name
print(f"์ด๋ฏธ์งํ์ผ(ํ์ผ๋ช
):{filename}")
# ์ด๋ฏธ์ง ์ฝ๊ธฐ
image = Image.open(fullpath)
# image = cv2.imread(fullpath)
if image is None:
print(f"์ด๋ฏธ์งํ์ผ({fullpath})์ ์ฝ์ ์ ์์ต๋๋ค.")
continue
name_images.append((filename, image))
return name_images
def scratch_image(image, use_flip=True, use_threshold=True, use_filter=True):
# ์ด๋ค ์ฆ๊ฐ๊ท์น์ ์ฌ์ฉํ ๊ฒ์ธ์ง ์ค์ (Flip or ๋ฐ๊ธฐ or ํ์ ...)
methods = [use_flip, use_threshold, use_filter]
# ํ๋ฆฐ ํํฐ ์์ฑ
# filter1 = np.ones((3, 3))
# ์ค๋ฆฌ์ง๋ ์ด๋ฏธ์ง๋ฅผ ๋ฐฐ์ด๋ก ์ ์ฅ
images = [image]
# ์ฆ๊ฐ ๊ท์น์ ์ํ ํจ์
scratch = np.array([
#flip ์ฒ๋ฆฌ
lambda x:cv2.flip(x,1),
#๋ฐ๊ธฐ์ฒ๋ฆฌ
lambda x: cv2.threshold(x, 100, 255,cv2.THRESH_TOZERO)[1],
#๋ธ๋ฌ์ฒ๋ฆฌ
lambda x: cv2.GaussianBlur(x,(5,5), 0),
])
# ์ด๋ฏธ์ง ์ฆ๊ฐ
created_images = lambda f, img: np.r_[img, [f(i) for i in img]]
for func in scratch[methods]:
images = created_images(func, images)
print(len(images)) #์ฒ์์๋ 2๊ฐ, 2๊ฐ๊ฐ ๋ค์ด๊ฐ์ 4๊ฐ๊ฐ ๋๊ณ , 4๊ฐ๊ฐ ๋ค์ด๊ฐ์ 8๊ฐ๊ฐ๋จ
return images
def delete_dir(dir_path, is_delete_top_dir=True):
for root, dirs, files in os.walk(dir_path, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
if is_delete_top_dir:
os.rmdir(dir_path)
RETURN_SUCCESS = 0
RETURN_FAILURE = -1
# Test Image Directory
TEST_IMAGE_PATH = "./dataset/grass_output"
# Face Image Directory
IMAGE_PATH_PATTERN = "./dataset/grass_output/*"
# Output Directory
OUTPUT_IMAGE_DIR = "./dataset/grass_scratch"
def main():
print("===================================================================")
print("์ด๋ฏธ์ง ์ฆ๊ฐ๋ฅผ ์ํ OpenCV ์ด์ฉ")
print("์ง์ ํ ์ด๋ฏธ์ง ํ์ผ์ ์๋ฅผ ์ฆ๊ฐ (Flip, ์๊ณ๊ฐ ๋ฑ์ ์์
์ผ๋ก 8๋ฐฐ ์ฆ๊ฐ)")
print("===================================================================")
# ๋๋ ํ ๋ฆฌ ์์ฑ
if not os.path.isdir(OUTPUT_IMAGE_DIR):
os.mkdir(OUTPUT_IMAGE_DIR)
# ๋๋ ํ ๋ฆฌ ๋ด ํ์ผ ์ ๊ฑฐ
delete_dir(OUTPUT_IMAGE_DIR, False)
# ๋๋ ํ ๋ฆฌ ์์ฑ
if not os.path.isdir(TEST_IMAGE_PATH):
os.mkdir(TEST_IMAGE_PATH)
# ๋๋ ํ ๋ฆฌ ๋ด ํ์ผ ์ ๊ฑฐ
delete_dir(TEST_IMAGE_PATH, False)
# ๋์ ์ด๋ฏธ์ง์ 2๋ฐฐ ์ ๋ณด๋ฅผ ํ
์คํธ์ฉ ํ์ผ๋ก ๊ตฌ๋ถ
image_files = glob.glob(IMAGE_PATH_PATTERN)
random.shuffle(image_files)
for i in range(len(image_files)//5): #25%์ ๋๋ง ํ๋ณธ์ถ์ถํด์ ๋ณํํ ์์
shutil.move(str(image_files[i]),TEST_IMAGE_PATH)
# ๋์ ์ด๋ฏธ์ง ์ฝ๊ธฐ
name_images = load_name_images(IMAGE_PATH_PATTERN)
# ๋์ ์ด๋ฏธ์ง ๋ณ๋ก ์ฆ๊ฐ ์์
for name_image in name_images:
filename, extension =os.path.splitext(name_image[0]) #name_image์ 0๋ฒ์งธ๋ ์ด๋ฏธ์ง ์ด๋ฆ, 1๋ฒ์งธ๋ ์ด๋ฏธ์ง๋ฐ์ดํฐ
image = name_image[1]
#์ด๋ฏธ์ง ์ ์ฆ๊ฐ ํจ์ ํธ์ถ
scratch_face_image = scratch_image(image)
#๋์์ด๋ฏธ์ง์ ์ฅ
print(scratch_face_image)
for (idx, image) in enumerate(scratch_face_image):
output_path = os.path.join(OUTPUT_IMAGE_DIR, f"{filename}_{str(idx)}.jpg")
print(f"์ถ๋ ฅํ์ผ(์ ๋๊ฒฝ๋ก): {output_path}")
cv2.imwrite(output_path, image)
#
return RETURN_SUCCESS
if __name__ == "__main__":
main() | StarcoderdataPython |
30752 | import re
import operator
from collections import namedtuple
SCHEMA_TYPES = {'str', 'int', 'bool'}
ROWID_KEY = '_rowid'
class Literal(namedtuple('Literal', 'value')):
@classmethod
def eval_value(cls, value):
if not isinstance(value, str):
raise ValueError(f"Parameter {value} must be a str")
if value in ('True', 'False'):
return eval(value)
try:
return int(value)
except Exception:
pass
try:
return eval(value)
except Exception:
pass
raise ValueError(f'Paramater {value} is not valid')
def __new__(cls, value):
evaled_value = cls.eval_value(value)
return super().__new__(cls, evaled_value)
class Column(namedtuple('Column', 'name')):
pass
class Comparison(namedtuple('Comparison', 'left, op, right')):
ops = {
'=': operator.eq,
'!=': operator.ne,
'>': operator.gt,
'<': operator.lt,
'<=': operator.le,
'>=': operator.ge
}
def match(self, row):
if type(self.left) is Column:
left = Literal(row[self.left.name]).value
elif type(self.left) is Literal:
left = self.left.value
else:
raise ValueError(f'Invalid left value type; {self.left}')
if type(self.right) is Column:
right = Literal(row[self.right.name]).value
elif type(self.right) is Literal:
right = self.right.value
else:
raise ValueError(f'Invalid right value type; {self.left}')
return self.ops[self.op](left, right)
class ConditionList(namedtuple('ConditionList', 'comp_type, comparisons')):
types = {'or': any, 'and': all}
def match(self, row):
if not self.comp_type:
return True
return self.types[self.comp_type](comp.match(row)
for comp in self.comparisons)
class CreateDbCmd(namedtuple('CreateDbCmd', 'name')):
def execute(self, db_manager):
db_manager.create_db(self.name)
class UseDbCmd(namedtuple('UseDbCmd', 'name')):
def execute(self, db_manager):
db_manager.use_db(self.name)
class DeleteDbCmd(namedtuple('DeleteDbCmd', 'name')):
def execute(self, db_manager):
db_manager.delete_db(self.name)
class CreateTableCmd(namedtuple('CreateTableCmd', 'name, schema')):
def validate(self):
if set(self.schema.values()) - SCHEMA_TYPES:
raise CommandError(f'Only schema accepted types are {SCHEMA_TYPES}')
def execute(self, db_manager):
self.validate()
db_manager.create_table(name=self.name, schema=self.schema)
class DeleteTableCmd(namedtuple('DeleteTableCmd', 'name')):
def execute(self, db_manager):
db_manager.delete_table(name=self.name)
class AddColumnCmd(namedtuple('AddColumnCmd', 'name, col_type, col_name')):
def validate(self, db_manager):
schema = db_manager.get_table_schema(table_name=self.name)
if self.col_name in schema:
raise CommandError(f'{self.col_name} col is already existing')
if self.col_type not in SCHEMA_TYPES:
raise CommandError(f'Only schema accepted types are {SCHEMA_TYPES}')
def execute(self, db_manager):
self.validate(db_manager)
db_manager.add_column(name=self.name,
col_name=self.col_name, col_type=self.col_type)
class DelColumnCmd(namedtuple('DelColumnCmd', 'name, col_name')):
def validate(self, db_manager):
schema = db_manager.get_table_schema(table_name=self.name)
if self.col_name not in schema:
raise CommandError(f'Col {self.col_name} does not exist')
def execute(self, db_manager):
self.validate(db_manager)
db_manager.del_column(name=self.name, col_name=self.col_name)
def validate_cmd_row_values(schema={}, row={}):
for col_name, col_val in row.items():
lit_val = Literal(col_val)
needed_col_type = eval(schema[col_name])
if not isinstance(lit_val.value, needed_col_type):
raise CommandError(f'Col\'s {col_name} value {col_val} has to be {schema[col_name]}')
class InsertCmd(namedtuple('InsertCmd', 'table, row')):
def validate(self, db_manager):
schema = db_manager.get_table_schema(table_name=self.table)
if self.row.keys() != schema.keys():
raise CommandError(f'Schema {schema.keys()} is mandatory')
validate_cmd_row_values(schema=schema, row=self.row)
def execute(self, db_manager):
self.validate(db_manager)
db_manager.insert_row(table=self.table, row=self.row)
def validate_cmd_conditions_list(schema={}, conditions_list=[]):
for comparison in conditions_list.comparisons:
col = comparison.left
lit = comparison.right
needed_col_type = eval(schema[col.name])
if col.name not in schema:
raise CommandError(f'Col {col.name} in conditions does not exist in schema')
if not isinstance(lit.value, needed_col_type):
raise CommandError(f'Col\'s {col.name} value {lit.value} has to be {schema[col.name]}')
class QueryCmd(namedtuple('QueryCmd', 'table, projection, conditions_list')):
def validate(self, db_manager):
schema = db_manager.get_table_schema(table_name=self.table)
if self.projection[0] != '*':
if set(self.projection) - set(schema.keys()):
raise CommandError(f'Query projection is enforced by schema; Only {schema.keys()} or * are allowed')
validate_cmd_conditions_list(schema=schema,
conditions_list=self.conditions_list)
def execute(self, db_manager):
self.validate(db_manager)
star_proj = len(self.projection) == 1 and self.projection[0] == '*'
for row in db_manager.scan_rows(table=self.table):
if self.conditions_list.match(row):
result_row = {ROWID_KEY: row[ROWID_KEY]}
del row[ROWID_KEY]
for key, val in row.items():
if not star_proj:
if key in self.projection:
result_row[key] = Literal(val).value
else:
result_row[key] = Literal(val).value
yield result_row
class DeleteCmd(namedtuple('DeleteCmd', 'table, conditions_list')):
def validate(self, db_manager):
schema = db_manager.get_table_schema(table_name=self.table)
validate_cmd_conditions_list(schema, self.conditions_list)
def execute(self, db_manager):
self.validate(db_manager)
for row in db_manager.scan_rows(table=self.table):
if self.conditions_list.match(row):
db_manager.delete_row(table=self.table, rowid=row['_rowid'])
class UpdateCmd(namedtuple('UpdateCmd', 'table, values, conditions_list')):
def validate(self, db_manager):
schema = db_manager.get_table_schema(table_name=self.table)
validate_cmd_row_values(schema=schema, row=self.values)
validate_cmd_conditions_list(schema=schema,
conditions_list=self.conditions_list)
def execute(self, db_manager):
self.validate(db_manager)
for row in db_manager.scan_rows(table=self.table):
if self.conditions_list.match(row):
db_manager.update_row(table=self.table,
rowid=row['_rowid'], new_row=self.values)
class FromCsvCmd(namedtuple('FromCsvCmd', 'csv_path')):
def execute(self, db_manager):
db_manager.from_csv(csv_path=self.csv_path)
class ToCsvCmd(namedtuple('ToCsvCmd', 'csv_path')):
def execute(self, db_manager):
db_manager.to_csv(csv_path=self.csv_path)
class SchemaCmd(namedtuple('FromCsvCmd', 'table_name')):
def execute(self, db_manager):
schema = db_manager.get_table_schema(self.table_name)
return schema
class TablesCmd(namedtuple('TablesCmd', 'db_name')):
def execute(self, db_manager):
yield from db_manager.get_tables(db_name=self.db_name)
class DbCmd(namedtuple('DbCmd', '')):
def validate(self, db_manager):
pass
def execute(self, db_manager):
self.validate(db_manager)
current_db = db_manager.get_current_db()
return current_db
class CommandError(Exception):
""" Generic command error """
def __init__(self, message):
super(CommandError, self).__init__(message)
class QueryParser(object):
re_db_create = re.compile(r'^create\s+sdb\s+(?P<name>\w+);$')
re_db_use = re.compile(r'^use\s+sdb\s+(?P<name>\w+);$')
re_db_delete = re.compile(r'^delete\s+sdb\s+(?P<name>\w+);$')
re_table_create_main = re.compile(r'^create\s+table\s+(?P<name>\w+)\s+columns\s+(?P<columns>((int|str|bool):(\w+)\s?)+);$')
re_table_create_col = re.compile(r'(int|str|bool):(\w+)')
re_table_delete = re.compile(r'^delete\s+table\s+(?P<name>\w+);$')
re_table_add_column = re.compile(r'^change\s+table\s+(?P<name>\w+)\s+add\s+column\s+(?P<col_type>int|str|bool):(?P<col_name>\w+);$')
re_table_del_column = re.compile(r'^change\s+table\s+(?P<name>\w+)\s+del\s+column\s+(?P<col_name>\w+);$')
re_table_insert_main = re.compile(r'^insert\s+into\s+(?P<table_name>\w+)\s+values\s+(?P<values>(\w+=(True|False|\d+?|\"(\w|[\/\<\>:`~.,?!@;\'#$%\^&*\-_+=\[\{\]\}\\\|()\ ])*?\")\s?)+?);$')
re_table_values = re.compile(r'(\w+)=(True|False|(\d+)|\"([A-Za-z0-9\/\<\>\:\`\~\.\,\?\!\@\;\'\#\$\%\^\&\*\-\_\+\=\[\{\]\}\\\|\(\)\ ])*?\")')
re_where_conditions = re.compile(r'(?P<col_name>\w+?)(?P<op>=|!=|<|>|<=|>=)(?P<value>(\d+)|(True|False)|\"([A-Za-z0-9\/\<\>\:\`\~\.\,\?\!\@\;\'\#\$\%\^\&\*\-\_\+\=\[\{\]\}\\\|\(\)\ ])*?\")')
re_table_scan_rows = re.compile(r'^query\s+(?P<projection>\*|(\w+\,?)+?)\s+(?P<table_name>\w+)(\s+where\s+op:(?P<op>or|and)\s+conditions\s+(?P<conditions>((\w+?)(=|!=|<|>|<=|>=)((\d+?)|(True|False)|\"([A-Za-z0-9\/\<\>\:\`\~\.\,\?\!\@\;\'\#\$\%\^\&\*\-\_\+\=\[\{\]\}\\\|\(\)\ ])*?\")(\s+)?)+))?;$')
re_table_update_rows = re.compile(r'^update\s+(?P<table_name>\w+)\s+set\s+(?P<setters>(((\w+)=(True|False|(\d+)|\"([A-Za-z0-9\/\<\>\:\`\~\.\,\?\!\@\;\'\#\$\%\^\&\*\-\_\+\=\[\{\]\}\\\|\(\)\ ])*?\"))\s?)+)(\s+where\s+op:(?P<op>or|and)\s+conditions\s+(?P<conditions>((\w+?)(=|!=|<|>|<=|>=)((\d+?)|(True|False)|\"([A-Za-z0-9\/\<\>\:\`\~\.\,\?\!\@\;\'\#\$\%\^\&\*\-\_\+\=\[\{\]\}\\\|\(\)\ ])*?\")(\s+)?)+))?;$')
re_table_delete_rows = re.compile(r'^delete\s+in\s+(?P<table_name>\w+)(\s+where\s+op:(?P<op>or|and)\s+conditions\s+(?P<conditions>((\w+?)(=|!=|<|>|<=|>=)((\d+?)|(True|False)|\"([A-Za-z0-9\/\<\>\:\`\~\.\,\?\!\@\;\'\#\$\%\^\&\*\-\_\+\=\[\{\]\}\\\|\(\)\ ])*?\")(\s+)?)+))?;$')
re_from_csv = re.compile(r'^from\s+csv\s+(?P<csv_path>[^ ]+?\.csv)\s*?;$')
re_to_csv = re.compile(r'^to\s+csv\s+(?P<csv_path>[^ ]+?\.csv)\s*?;$')
re_schema = re.compile(r'^schema\s+(?P<table_name>\w+)\s*?;$')
re_tables = re.compile(r'^tables\s+(?P<db_name>\w+)\s*?;$')
re_db = re.compile(r'^db\s*?;$')
def __init__(self):
pass
def _get_parse_methods(self):
for meth_name in dir(self.__class__):
meth = getattr(self.__class__, meth_name)
if meth_name.startswith('_parse') and callable(meth):
yield meth
def parse(self, query):
for meth in self._get_parse_methods():
rv = meth(self, query)
if rv is not None:
return rv
raise CommandError('No command matches; fix or retry (another) query')
def _parse_db_create(self, query):
result = self.re_db_create.fullmatch(query)
if not result:
return
return CreateDbCmd(name=result.group('name'))
def _parse_db_use(self, query):
result = self.re_db_use.fullmatch(query)
if not result:
return
return UseDbCmd(name=result.group('name'))
def _parse_db_delete(self, query):
result = self.re_db_delete.fullmatch(query)
if not result:
return
return DeleteDbCmd(name=result.group('name'))
def _parse_table_create(self, query):
result_main = self.re_table_create_main.fullmatch(query)
if not result_main:
return
name = result_main.group('name')
columns_str = result_main.group('columns')
result_cols = self.re_table_create_col.findall(columns_str)
if not result_cols:
return
schema = {col_name:col_type for col_type, col_name in result_cols}
return CreateTableCmd(name=name, schema=schema)
def _parse_table_delete(self, query):
result = self.re_table_delete.fullmatch(query)
if not result:
return
return DeleteTableCmd(name=result.group('name'))
def _parse_add_column(self, query):
result = self.re_table_add_column.fullmatch(query)
if not result:
return
name = result.group('name')
col_type = result.group('col_type')
col_name = result.group('col_name')
return AddColumnCmd(name=name, col_type=col_type, col_name=col_name)
def _parse_del_column(self, query):
result = self.re_table_del_column.fullmatch(query)
if not result:
return
name = result.group('name')
col_name = result.group('col_name')
return DelColumnCmd(name=name, col_name=col_name)
def _parse_insert_row(self, query):
result_main = self.re_table_insert_main.fullmatch(query)
if not result_main:
return
name = result_main.group('table_name')
values_str = result_main.group('values')
result_values = self.re_table_values.findall(values_str)
if not result_values:
return
row = {col_name:col_value
for col_name, col_value, _, _ in result_values}
return InsertCmd(table=name, row=row)
def _parse_scan_rows(self, query):
result_main = self.re_table_scan_rows.fullmatch(query)
if not result_main:
return
projection = result_main.group('projection').split(',')
name = result_main.group('table_name')
main_op = result_main.group('op')
conditions_str = result_main.group('conditions')
conditions = ConditionList('', [])
if conditions_str:
result_conditions = self.re_where_conditions.findall(conditions_str)
conditions = ConditionList(main_op,
[Comparison(Column(left), op, Literal(right))
for left, op, right, _, _, _ in result_conditions])
return QueryCmd(table=name, projection=projection,
conditions_list=conditions)
def _parse_table_update_rows(self, query):
result_main = self.re_table_update_rows.fullmatch(query)
if not result_main:
return
setters_str = result_main.group('setters')
result_setters = self.re_table_values.findall(setters_str)
if not result_setters:
return
name = result_main.group('table_name')
main_op = result_main.group('op')
conditions_str = result_main.group('conditions')
conditions = ConditionList('', [])
if conditions_str:
result_conditions = self.re_where_conditions.findall(conditions_str)
conditions = ConditionList(main_op,
[Comparison(Column(left), op, Literal(right))
for left, op, right, _, _, _ in result_conditions])
new_values = {col_name: col_value for col_name, col_value, _, _ in result_setters}
return UpdateCmd(table=name, values=new_values, conditions_list=conditions)
def _parse_table_delete_rows(self, query):
result_main = self.re_table_delete_rows.fullmatch(query)
if not result_main:
return
name = result_main.group('table_name')
main_op = result_main.group('op')
conditions_str = result_main.group('conditions')
conditions = ConditionList('', [])
if conditions_str:
result_conditions = self.re_where_conditions.findall(conditions_str)
conditions = ConditionList(main_op,
[Comparison(Column(left), op, Literal(right))
for left, op, right, _, _, _ in result_conditions])
return DeleteCmd(table=name, conditions_list=conditions)
def _parse_tables(self, query):
result = self.re_tables.fullmatch(query)
if not result:
return
return TablesCmd(db_name=result.group('db_name'))
def _parse_db(self, query):
result = self.re_db.fullmatch(query)
if not result:
return
return DbCmd()
def _parse_from_csv(self, query):
result = self.re_from_csv.fullmatch(query)
if not result:
return
return FromCsvCmd(csv_path=result.group('csv_path'))
def _parse_to_csv(self, query):
result = self.re_to_csv.fullmatch(query)
if not result:
return
return ToCsvCmd(csv_path=result.group('csv_path'))
def _parse_schema(self, query):
result = self.re_schema.fullmatch(query)
if not result:
return
return SchemaCmd(table_name=result.group('table_name')) | StarcoderdataPython |
4825662 | <reponame>sherlockliu/pythonic
import pytest
@pytest.fixture(scope="function")
def before_1():
print('\nbefore each test')
def test_1(before_1):
print('test_1()')
def test_2(before_1):
print('test_2()')
| StarcoderdataPython |
198486 | <reponame>robertwenquan/nyu-course-assignment<filename>design-and-analysi-of-algorithms/class5/quick-sort.py
#!/usr/bin/python
import random
__author__ = 'Wen'
def partition(AA, start, end):
pivot = random.randint(start, end)
AA[start], AA[pivot] = AA[pivot], AA[start]
pivot = start
pointA = start+1
pointB = end
while True:
while (AA[pointB] > AA[pivot]):
pointB -= 1
if (pointB == pivot):
break
AA[pivot], AA[pointB] = AA[pointB], AA[pivot]
pivot = pointB
pointB -= 1
while (AA[pointA] < AA[pivot]):
pointA += 1
if (pointA == pivot):
break
AA[pivot], AA[pointA] = AA[pointA], AA[pivot]
pivot = pointA
pointA += 1
return pivot
def quick_sort(AA, start, end):
# Here is a KEY point
# if(start>end) is wrong!
# has to be >=
if (start >= end):
return
pivot = partition(AA, start, end)
quick_sort(AA, start, pivot-1)
quick_sort(AA, pivot+1, end)
AA = [3, 4, 3, 2, 6, 44, 22, 3, 1, 90]
quick_sort(AA, 0, len(AA)-1)
print (AA)
| StarcoderdataPython |
1781608 | <gh_stars>0
from contextlib import contextmanager
from functools import wraps
from io import TextIOWrapper
import djclick as click
from django.utils import timezone
def show_command_time(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
start_time = timezone.now()
result = fn(*args, **kwargs)
end_time = timezone.now()
timedelta = end_time - start_time
click.secho(f"Done in {timedelta.total_seconds()}s")
return result
return wrapper
@contextmanager
def read_stream_with_progress(
stream: TextIOWrapper, progress_label: str, length: int = None, reader=None
):
length = length or sum(1 for _ in stream)
reader = reader or stream
stream.seek(0)
click.secho(f"Found {length} lines")
with click.progressbar(
reader, length=length, label=progress_label
) as progress_reader:
yield progress_reader
| StarcoderdataPython |
1705328 | from xml.sax.saxutils import escape
class Node:
def __init__(self, start, end, tag, parent, attrs):
self.start = start
self.end = end
self.tag = tag
self.parent = parent
self.children = []
if attrs:
self.attrs = attrs
else:
self.attrs = []
def __str__(self):
s = '(%d, %d) tag: %s' % (self.start, self.end, self.tag)
if self.attrs:
s += ' attrs: %s' % self.attrs
if self.children:
s += ' children: %d' % len(self.children)
return '{%s}' % s
def __repr__(self):
return self.__str__()
def add_child(self, child):
#assert child.start >= self.start and child.end <= self.end
self.children.append(child)
def get_id(tag, idmap):
if tag in idmap:
idmap[tag] += 1
else:
idmap[tag] = 1
return idmap[tag]
def annotate_doc(doc, nodelist):
inserts = []
idmap = {}
segmentmap = {}
flatten_nodes(nodelist, inserts, idmap, segmentmap)
#print len(segmentmap.keys())
return insert_markers(doc, inserts), segmentmap
def insert_markers(text, inserts):
sections = []
lastpos = 0
for pos, marker in inserts:
if pos > lastpos:
sections.append(escape(text[lastpos:pos]))
elif pos < lastpos:
print ("INVALID MARKER", pos, marker, inserts)
assert 0
sections.append(marker)
lastpos = pos
if lastpos < len(text):
sections.append(escape(text[lastpos:]))
return u''.join(sections)
def flatten_nodes(nodelist, inserts, idmap, segmentmap):
for node in nodelist:
attrlist = node.attrs[:]
#id1 = '%s_%d' % (node.tag, get_id(node.tag, idmap))
#attrlist.append(('id', id1))
if attrlist:
attrs = ' '.join('%s="%s"' % (attr[0], attr[1]) for attr in attrlist)
start_tag = '<%s %s>' % (node.tag, attrs)
else:
start_tag = '<%s>' % node.tag
inserts.append((node.start, start_tag))
if node.tag not in segmentmap:
segmentmap[node.tag] = []
segmentmap[node.tag].append(node.start)
flatten_nodes(node.children, inserts, idmap, segmentmap)
inserts.append((node.end, '</%s>\n' % node.tag))
| StarcoderdataPython |
3208008 | <reponame>muhammadrazaali-RAZA/AI---Snake-Game
import random
from display import display_base
from itertools import product
class Apple(display_base):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.location = None
def refresh(self, snake):
"""
Generate a new apple
"""
available_positions = set(product(range(self.cell_width - 1), range(self.cell_height - 1))) - set(snake.body)
# If there's no available node for new apple, it reaches the perfect solution. Don't draw the apple then.
location = random.sample(available_positions, 1)[0] if available_positions else (-1, -1)
self.location = location
| StarcoderdataPython |
1613583 | <reponame>kattni/Adafruit_CircuitPython_seesaw<filename>examples/seesaw_analogin_test.py
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
# Simple seesaw test reading analog value
# on SAMD09, analog in can be pins 2, 3, or 4
# on Attiny8x7, analog in can be pins 0, 1, 2, 3, 6, 7, 18, 19, 20
#
# See the seesaw Learn Guide for wiring details:
# https://learn.adafruit.com/adafruit-seesaw-atsamd09-breakout?view=all#circuitpython-wiring-and-test
import time
import board
from adafruit_seesaw.seesaw import Seesaw
from adafruit_seesaw.analoginput import AnalogInput
i2c_bus = board.I2C()
ss = Seesaw(i2c_bus)
analogin_pin = 2
analog_in = AnalogInput(ss, analogin_pin)
while True:
print(analog_in.value)
time.sleep(0.1)
| StarcoderdataPython |
3217328 | <reponame>common-config-bot/prjuray
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2022 F4PGA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
from csv import DictReader
MAX_GLOBAL_CLOCKS = 24
MAX_COLUMN_CLOCKS = 12
def gen_rclk_int(grid):
for tile_name in sorted(grid.tiles()):
loc = grid.loc_of_tilename(tile_name)
gridinfo = grid.gridinfo_at_loc(loc)
if gridinfo.tile_type in ["RCLK_INT_L", "RCLK_INT_R"]:
yield loc
def walk_tile(grid, start_loc, dy, clocks):
key = (start_loc, dy)
assert key not in clocks
clocks[key] = set()
x, y = start_loc
while True:
y += dy
loc = (x, y)
gridinfo = grid.gridinfo_at_loc(loc)
if gridinfo.tile_type != 'INT':
break
left_gridinfo = grid.gridinfo_at_loc((x - 1, y))
for site, site_type in left_gridinfo.sites.items():
if site_type in ['SLICEL', 'SLICEM']:
clocks[key].add(site)
right_gridinfo = grid.gridinfo_at_loc((x + 1, y))
for site, site_type in right_gridinfo.sites.items():
if site_type in ['SLICEL', 'SLICEM']:
clocks[key].add(site)
def populate_leafs(grid):
clocks = {}
for rclk_tile_loc in gen_rclk_int(grid):
walk_tile(grid, rclk_tile_loc, 1, clocks)
walk_tile(grid, rclk_tile_loc, -1, clocks)
return clocks
class ClockColumns():
def __init__(self, grid):
self.sites = {}
self.clocks_active = {}
self.global_clocks = set()
clock_leafs = populate_leafs(grid)
for key, sites in clock_leafs.items():
self.clocks_active[key] = set()
for site in sites:
self.sites[site] = key
def columns(self):
return self.clocks_active.keys()
def remove_column(self, disabled_columns):
for key in disabled_columns:
del self.clocks_active[key]
sites_to_remove = set()
for site, key in self.sites.items():
if key in disabled_columns:
sites_to_remove.add(site)
for site in sites_to_remove:
del self.sites[site]
def add_clock(self, site, clock):
key = self.sites[site]
if clock in self.clocks_active[key]:
# Clock already in use!
return True
if len(self.clocks_active[key]) >= MAX_COLUMN_CLOCKS:
# No more column clocks!
return False
if clock not in self.global_clocks:
if len(self.global_clocks) >= MAX_GLOBAL_CLOCKS:
# No more global clocks!
return False
self.global_clocks.add(clock)
self.clocks_active[key].add(clock)
return True
class GlobalClockBuffers():
def __init__(self, bufg_outputs_file):
self.bufgs = {}
self.unused_bufgs = set()
for idx in range(MAX_GLOBAL_CLOCKS):
self.bufgs[idx] = []
self.unused_bufgs.add(idx)
with open(bufg_outputs_file) as f:
for bufg in DictReader(f):
if bufg['hroute_output'] == 'all':
for idx in range(MAX_GLOBAL_CLOCKS):
self.bufgs[idx].append(bufg['site'])
else:
self.bufgs[int(bufg['hroute_output'])].append(bufg['site'])
for idx in range(MAX_GLOBAL_CLOCKS):
self.bufgs[idx].sort()
def random_bufg_for_hroute(self, hroute_idx, random_choice):
self.unused_bufgs.remove(hroute_idx)
return random_choice(self.bufgs[hroute_idx]), hroute_idx
def random_bufg(self, random_choice):
hroute_idx = random_choice(sorted(self.unused_bufgs))
return self.random_bufg_for_hroute(hroute_idx, random_choice)
def make_bufg(site, site_type, idx, ce_inputs, randlib):
if site_type in ['BUFGCE', 'BUFGCE_HDIO']:
s = """
wire bufg_o_{idx};
(* LOC="{loc}", KEEP, DONT_TOUCH *) BUFGCE #(
.IS_CE_INVERTED({invert_ce}),
.CE_TYPE("{ce_type}")
) bufg_{idx} (
.CE({ce}),
.O(bufg_o_{idx})
);""".format(
loc=site,
idx=idx,
invert_ce=randlib.randint(2),
ce_type=randlib.choice(["SYNC", "ASYNC"]),
ce=randlib.choice(ce_inputs))
elif site_type == 'BUFGCE_DIV':
s = """
wire bufg_o_{idx};
(* LOC="{loc}", KEEP, DONT_TOUCH *) BUFGCE_DIV #(
.IS_CE_INVERTED({invert_ce}),
.CE_TYPE("{ce_type}"),
.BUFGCE_DIVIDE({bufce_divide})
) bufg_{idx} (
.CE({ce}),
.CLR({clr}),
.O(bufg_o_{idx})
);""".format(
loc=site,
idx=idx,
invert_ce=randlib.randint(2),
ce_type=randlib.choice(["SYNC", "ASYNC"]),
ce=randlib.choice(ce_inputs),
clr=randlib.choice(ce_inputs),
bufce_divide=randlib.choice(range(1, 9)))
elif site_type == 'BUFG_PS':
s = """
wire bufg_o_{idx};
(* LOC="{loc}", KEEP, DONT_TOUCH *) BUFG_PS #(
) bufg_{idx} (
.O(bufg_o_{idx})
);""".format(
loc=site, idx=idx)
elif site_type == 'BUFGCTRL':
preselect_i0 = randlib.randint(2)
if not preselect_i0:
preselect_i1 = randlib.randint(2)
else:
preselect_i1 = 0
s0 = randlib.choice(ce_inputs)
s1 = randlib.choice(ce_inputs)
if s0 == '0':
while s1 == '0':
s1 = randlib.choice(ce_inputs)
if s0 == '0' and s1 == '1':
invert_s0 = randlib.randint(2)
invert_s1 = 0
elif s0 == '1' and s1 == '0':
invert_s1 = randlib.randint(2)
invert_s0 = 0
elif s0 == '1' and s1 == '1':
invert_s0 = randlib.randint(2)
if invert_s0:
invert_s1 = 0
else:
invert_s1 = randlib.randint(2)
else:
invert_s0 = randlib.randint(2)
invert_s1 = randlib.randint(2)
s = """
wire bufg_o_{idx};
(* LOC="{loc}", KEEP, DONT_TOUCH *) BUFGCTRL #(
.INIT_OUT({init_out}),
.IS_CE0_INVERTED({invert_ce0}),
.IS_CE1_INVERTED({invert_ce1}),
.IS_S0_INVERTED({invert_s0}),
.IS_S1_INVERTED({invert_s1}),
.IS_IGNORE0_INVERTED({invert_ignore0}),
.IS_IGNORE1_INVERTED({invert_ignore1}),
.PRESELECT_I0({preselect_i0}),
.PRESELECT_I1({preselect_i1})
) bufg_{idx} (
.IGNORE0({ignore0}),
.IGNORE1({ignore1}),
.S0({s0}),
.S1({s1}),
.CE0({ce0}),
.CE1({ce1}),
.O(bufg_o_{idx})
);""".format(
loc=site,
idx=idx,
init_out=randlib.randint(2),
s0=s0,
s1=s1,
ce0=randlib.choice(ce_inputs),
ce1=randlib.choice(ce_inputs),
ignore0=randlib.choice(ce_inputs),
ignore1=randlib.choice(ce_inputs),
invert_ce0=randlib.randint(2),
invert_ce1=randlib.randint(2),
invert_s0=invert_s0,
invert_s1=invert_s1,
invert_ignore0=randlib.randint(2),
invert_ignore1=randlib.randint(2),
preselect_i0=preselect_i0,
preselect_i1=preselect_i1,
)
else:
assert False, site_type
return s, 'bufg_o_{idx}'.format(idx=idx)
| StarcoderdataPython |
4807683 | # -*- coding: utf-8 -*-
# @File : tryEverything/beam_search.py
# @Info : @ TSMC-SIGGRAPH, 2018/6/19
# @Desc : refer to google/im2txt
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import heapq
import math
import numpy as np
class Caption(object):
"""Represents a complete or partial caption."""
def __init__(self, sentence, state, logprob, score):
"""Initializes the Caption.
Args:
sentence: List of word ids in the caption.
state: Model state after generating the previous word.
logprob: Log-probability of the caption.
score: Score of the caption.
"""
self.sentence = sentence
self.state = state
self.logprob = logprob
self.score = score
def __cmp__(self, other):
"""Compares Captions by score."""
assert isinstance(other, Caption)
if self.score == other.score:
return 0
elif self.score < other.score:
return -1
else:
return 1
# For Python 3 compatibility (__cmp__ is deprecated).
def __lt__(self, other):
assert isinstance(other, Caption)
return self.score < other.score
# Also for Python 3 compatibility.
def __eq__(self, other):
assert isinstance(other, Caption)
return self.score == other.score
# ็ปดๆคไธไธชๅ
็ด ไธชๆฐไธบn็ๅ ,่ฆไนๅ
็ด ๅ
ฅๅ ,่ฆไนๆธ
็ฉบๅ ่พๅบๅ
จ้จๅ
็ด (ๅฎๅ
จ่ตท่ง,ๆญคๆถๅ้ขๅกๅฟ
็ซๅปๆฅreset()ๆนๆณ)
class TopN(object):
"""Maintains the top n elements of an incrementally provided set."""
def __init__(self, n):
self._n = n
self._data = []
def size(self):
assert self._data is not None
return len(self._data)
def push(self, x):
"""Pushes a new element."""
assert self._data is not None
if len(self._data) < self._n:
heapq.heappush(self._data, x)
else:
heapq.heappushpop(self._data, x) # is equivalent to pushing first, then popping
def extract(self, sort=False):
"""Extracts all elements from the TopN. This is a destructive operation.
The only method that can be called immediately after extract() is reset().
Args:
sort: Whether to return the elements in descending sorted order.
Returns:
A list of dataset; the top n elements provided to the set.
"""
assert self._data is not None
data = self._data
self._data = None
if sort:
data.sort(reverse=True)
return data
def reset(self):
"""Returns the TopN to an empty state."""
self._data = []
class CaptionGenerator(object):
"""Class to generate captions from an image-to-text model."""
def __init__(self,
model,
vocab,
new_state,
beam_size=3,
max_caption_length=50):
"""Initializes the generator.
Args:
model: Object encapsulating a trained image-to-text model. Must have
methods feed_image() and inference_step(). For example, an instance of
InferenceWrapperBase.
vocab: A Vocabulary object.
beam_size: Beam size to use when generating captions.
max_caption_length: The maximum caption length before stopping the search.
"""
self.vocab = vocab
self.model = model
self.new_state = new_state
self.beam_size = beam_size
self.max_caption_length = max_caption_length
def get_initial_beam_state(self, sess, start_words):
# new_state = sess.run(self.model.initial_state)
new_state = self.new_state
start_ids = [self.vocab.word_to_id(word) for word in start_words]
x = np.zeros((1, 1)) # because we have set sampling=True, means batch_size,n_seqs=1,1
logprob = 0.0
for ids in start_ids:
x[0, 0] = ids # input one word
word_probs, new_state = self.inference(sess, x, new_state)
if word_probs[ids] < 1e-12:
continue # Avoid log(0).
logprob = logprob + math.log(word_probs[ids])
return logprob, new_state, start_ids[-1]
def inference(self, sess, input_feed, state_feed):
feed = {self.model.inputs: input_feed, self.model.keep_prob: 1., self.model.initial_state: state_feed}
word_probs, new_state = sess.run([self.model.prediction, self.model.final_state], feed_dict=feed)
return word_probs[0], new_state
def beam_search(self, sess, start_words):
init_logprob, init_state, init_ids = self.get_initial_beam_state(sess, start_words)
partial_captions = TopN(self.beam_size)
initial_beam = Caption(sentence=[init_ids], state=init_state, logprob=init_logprob, score=init_logprob)
partial_captions.push(initial_beam)
complete_captions = TopN(self.beam_size)
# Run beam search.
for _ in range(self.max_caption_length - 1):
partial_captions_list = partial_captions.extract() # ่ทๅ่ตทๅงๅบๅ,่ฟไธชๅบๅไผ้searchๅพช็ฏไธๆญๅปถ้ฟ
partial_captions.reset()
wp_list = list()
new_states = list()
for cur_seq_tag, partial_caption in enumerate(partial_captions_list):
input_feed = np.array([partial_caption.sentence[-1]]).reshape((1, 1))
state_feed = partial_caption.state[-1]
# hint: inference_step
word_probs, new_state = self.inference(sess, input_feed, state_feed)
new_states.append(new_state)
words_and_probs = list(enumerate(word_probs))
words_and_probs.sort(key=lambda x: -x[1]) # In probability, in descending order
words_and_probs = words_and_probs[0:self.beam_size]
for next_idx, prob in words_and_probs:
if prob < 1e-12:
wp_list.append(
(cur_seq_tag, next_idx, partial_caption.logprob)) # [cur_seq_tag, next_idx,logprob]
else:
wp_list.append(
(cur_seq_tag, next_idx, partial_caption.logprob + math.log(prob)))
wp_list.sort(key=lambda x: -x[2])
wp_list = wp_list[0:self.beam_size]
for cur_seq_tag, w, p in wp_list:
sentence = partial_captions_list[cur_seq_tag].sentence + [w]
logprob = p
score = logprob
beam = Caption(sentence, new_states[cur_seq_tag], logprob, score)
partial_captions.push(beam)
if partial_captions.size() == 0:
# We have run out of partial candidates; happens when beam_size = 1.
break
# If we have no complete captions then fall back to the partial captions.
# But never output a mixture of complete and partial captions because a
# partial caption could have a higher score than all the complete captions.
if not complete_captions.size():
complete_captions = partial_captions
return complete_captions.extract(sort=True)
| StarcoderdataPython |
1664809 | <filename>d3m/primitive_interfaces/distance.py
import abc
import typing
from d3m import types
from d3m.primitive_interfaces.base import *
from d3m.primitive_interfaces.transformer import TransformerPrimitiveBase
__all__ = ('PairwiseDistanceLearnerPrimitiveBase', 'PairwiseDistanceTransformerPrimitiveBase', 'InputLabels')
InputLabels = typing.TypeVar('InputLabels', bound=typing.Union[types.Container]) # type: ignore
# Defining Generic with all type variables allows us to specify the order and an additional type variable.
class PairwiseDistanceLearnerPrimitiveBase(PrimitiveBase[Inputs, Outputs, Params, Hyperparams], typing.Generic[Inputs, InputLabels, Outputs, Params, Hyperparams]):
"""
A base class for primitives which learn distances (however defined) between two
different sets of instances.
Class is parameterized using five type variables, ``Inputs``, ``InputLabels``, ``Outputs``, ``Params``, and ``Hyperparams``.
"""
@abc.abstractmethod
def produce(self, *, inputs: Inputs, second_inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]: # type: ignore
"""
Computes distance matrix between two sets of data.
Implementations of this method should use ``inputs_across_samples`` decorator to mark ``inputs``
and ``second_inputs`` as being computed across samples.
Parameters
----------
inputs:
The first set of collections of instances.
second_inputs:
The second set of collections of instances.
timeout:
A maximum time this primitive should take to produce outputs during this method call, in seconds.
iterations:
How many of internal iterations should the primitive do.
Returns
---------
A n by m distance matrix describing the relationship between each instance in inputs[0] and each instance
in inputs[1] (n and m are the number of instances in inputs[0] and inputs[1], respectively),
wrapped inside ``CallResult``.
"""
@abc.abstractmethod
def set_training_data(self, *, inputs: Inputs, input_labels: InputLabels) -> None: # type: ignore
"""
Sets training data of this primitive.
Parameters
----------
inputs:
The inputs.
input_labels:
A set of class labels for the inputs.
"""
def multi_produce(self, *, produce_methods: typing.Sequence[str], inputs: Inputs, second_inputs: Inputs, timeout: float = None, iterations: int = None) -> MultiCallResult: # type: ignore
"""
A method calling multiple produce methods at once.
Parameters
----------
produce_methods:
A list of names of produce methods to call.
inputs:
The first set of collections of instances.
second_inputs:
The second set of collections of instances.
timeout:
A maximum time this primitive should take to produce outputs for all produce methods
listed in ``produce_methods`` argument, in seconds.
iterations:
How many of internal iterations should the primitive do.
Returns
-------
A dict of values for each produce method wrapped inside ``MultiCallResult``.
"""
return self._multi_produce(produce_methods=produce_methods, timeout=timeout, iterations=iterations, inputs=inputs, second_inputs=second_inputs)
def fit_multi_produce(self, *, produce_methods: typing.Sequence[str], inputs: Inputs, input_labels: InputLabels,
second_inputs: Inputs, timeout: float = None, iterations: int = None) -> MultiCallResult: # type: ignore
"""
A method calling ``fit`` and after that multiple produce methods at once.
Parameters
----------
produce_methods:
A list of names of produce methods to call.
inputs:
The first set of collections of instances.
input_labels:
A set of class labels for the inputs.
second_inputs:
The second set of collections of instances.
timeout:
A maximum time this primitive should take to both fit the primitive and produce outputs
for all produce methods listed in ``produce_methods`` argument, in seconds.
iterations:
How many of internal iterations should the primitive do for both fitting and producing
outputs of all produce methods.
Returns
-------
A dict of values for each produce method wrapped inside ``MultiCallResult``.
"""
return self._fit_multi_produce(produce_methods=produce_methods, timeout=timeout, iterations=iterations, inputs=inputs, input_labels=input_labels, second_inputs=second_inputs)
class PairwiseDistanceTransformerPrimitiveBase(TransformerPrimitiveBase[Inputs, Outputs, Hyperparams]):
"""
A base class for primitives which compute distances (however defined) between two
different sets of instances without learning any sort of model.
"""
@abc.abstractmethod
def produce(self, *, inputs: Inputs, second_inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]: # type: ignore
"""
Computes distance matrix between two sets of data.
Implementations of this method should use ``inputs_across_samples`` decorator to mark ``inputs``
and ``second_inputs`` as being computed across samples.
Parameters
----------
inputs:
The first set of collections of instances.
second_inputs:
The second set of collections of instances.
timeout:
A maximum time this primitive should take to produce outputs during this method call, in seconds.
iterations:
How many of internal iterations should the primitive do.
Returns
---------
A n by m distance matrix describing the relationship between each instance in inputs[0] and each instance
in inputs[1] (n and m are the number of instances in inputs[0] and inputs[1], respectively),
wrapped inside ``CallResult``.
"""
def multi_produce(self, *, produce_methods: typing.Sequence[str], inputs: Inputs, second_inputs: Inputs, timeout: float = None, iterations: int = None) -> MultiCallResult: # type: ignore
"""
A method calling multiple produce methods at once.
Parameters
----------
produce_methods:
A list of names of produce methods to call.
inputs:
The first set of collections of instances.
second_inputs:
The second set of collections of instances.
timeout:
A maximum time this primitive should take to produce outputs for all produce methods
listed in ``produce_methods`` argument, in seconds.
iterations:
How many of internal iterations should the primitive do.
Returns
-------
A dict of values for each produce method wrapped inside ``MultiCallResult``.
"""
return self._multi_produce(produce_methods=produce_methods, timeout=timeout, iterations=iterations, inputs=inputs, second_inputs=second_inputs)
def fit_multi_produce(self, *, produce_methods: typing.Sequence[str], inputs: Inputs, second_inputs: Inputs, timeout: float = None, iterations: int = None) -> MultiCallResult: # type: ignore
"""
A method calling ``fit`` and after that multiple produce methods at once.
Parameters
----------
produce_methods:
A list of names of produce methods to call.
inputs:
The first set of collections of instances.
second_inputs:
The second set of collections of instances.
timeout:
A maximum time this primitive should take to both fit the primitive and produce outputs
for all produce methods listed in ``produce_methods`` argument, in seconds.
iterations:
How many of internal iterations should the primitive do for both fitting and producing
outputs of all produce methods.
Returns
-------
A dict of values for each produce method wrapped inside ``MultiCallResult``.
"""
return self._fit_multi_produce(produce_methods=produce_methods, timeout=timeout, iterations=iterations, inputs=inputs, second_inputs=second_inputs)
| StarcoderdataPython |
3322704 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
class AdalineGD(object):
def __init__(self, eta=0.01, n_iter=50):
self.eta = eta
self.n_iter = n_iter
def fit(self, x, y):
self.w_ = np.zeros(1 + x.shape[1])
self.cost_ = []
for _ in range(self.n_iter):
output = self.net_input(x)
errors = y - output
self.w_[1:] += self.eta * x.T.dot(errors)
self.w_[0] += self.eta * errors.sum()
cost = (errors**2).sum() / 2.0
self.cost_.append(cost)
return self
def net_input(self, x):
# Calculate net input
return np.dot(x, self.w_[1:]) + self.w_[0]
def activation(self, x):
# Compute liner activation
return self.net_input(x)
def predict(self, x):
# Return class label after unit step
return np.where(self.activation(x) >= 0.0, 1, -1)
def plot_decision_regions(x, y, classifier, resolution=0.02):
# marker gen and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# decision surface
x1_min, x1_max = x[:, 0].min() - 1, x[:, 0].max() + 1
x2_min, x2_max = x[:, 1].min() - 1, x[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution))
z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
z = z.reshape(xx1.shape)
plt.contourf(xx1, xx2, z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
#plot class samples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=x[y == cl, 0], y=x[y == cl, 1], alpha=0.8, c=cmap(idx), marker=markers[idx], label =cl)
iris = datasets.load_iris()
x = iris.data[:100, [0, 2]]
y = iris.target[:100]
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8, 4))
ada1 = AdalineGD(n_iter=10, eta=0.01).fit(x, y)
ax[0].plot(range(1, len(ada1.cost_)+1), np.log10(ada1.cost_), marker='o')
ax[0].set_xlabel('Epochs')
ax[0].set_ylabel('log(Sum-squarred-error)')
ax[0].set_title('Adaline - Learning rate 0.01')
ada2 = AdalineGD(n_iter=10, eta=0.0001).fit(x, y)
ax[1].plot(range(1, len(ada2.cost_)+1), ada2.cost_, marker='o')
ax[1].set_xlabel('Epochs')
ax[1].set_ylabel('log(Sum-squarred-error)')
ax[1].set_title('Adaline - Learning rate 0.0001')
plt.show()
# standardization
x_std = np.copy(x)
x_std[:, 0] = (x[:, 0] - x[:, 0].mean()) / x[:, 0].std()
x_std[:, 1] = (x[:, 1] - x[:, 1].mean()) / x[:, 1].std()
ada3 = AdalineGD(n_iter=15, eta=0.01)
ada3.fit(x_std, y)
plot_decision_regions(x_std, y, classifier=ada3)
plt.title('Adaline -Gradient Descent')
plt.xlabel('sepal len[standardized]')
plt.ylabel('petal len[standardized]')
plt.legend(loc='upper left')
plt.show()
plt.plot(range(1, len(ada3.cost_)+1), ada3.cost_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Sum-squarred-error')
plt.show() | StarcoderdataPython |
3343980 | from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.Excepcion import Excepcion
import numpy as np
class Min(Instruccion):
def __init__(self, valor, tipo, strGram, linea, columna):
Instruccion.__init__(self,tipo,linea,columna, strGram)
self.valor = valor
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
resultado = self.valor.ejecutar(tabla, arbol)
if isinstance(resultado , Excepcion):
return resultado
listaejemplo = []
for x in range(0, len(resultado)):
#print(f"posicion {x}")
#print(f"valor {resultado[x][0]}")
if str.isnumeric(str(resultado[x][0])):
listaejemplo.append(int(resultado[x][0]))
elif str.isdecimal(str(resultado[x][0])):
listaejemplo.append(float(resultado[x][0]))
else:
error = Excepcion("22023", "Semantico", "Parametro de evaluacion invalido", self.linea, self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
listaNums = np.array(listaejemplo)
minimo = np.amin(listaNums)
return np.array([[minimo]])
def analizar(self, tabla, arbol):
return super().analizar(tabla, arbol)
def traducir(self, tabla, arbol):
super().traducir(tabla, arbol)
cadena = "MIN("
cadena += self.valor.concatenar(tabla,arbol)
cadena += ")"
return cadena
| StarcoderdataPython |
25886 | <reponame>gwtnb/jubatus-python-client<filename>test/jubatus_test/classifier/test.py
#!/usr/bin/env python
import unittest
import json
import msgpackrpc
from jubatus.classifier.client import Classifier
from jubatus.classifier.types import *
from jubatus_test.test_util import TestUtil
from jubatus.common import Datum
host = "127.0.0.1"
port = 21001
timeout = 10
class ClassifierTest(unittest.TestCase):
def setUp(self):
self.config = {
"method": "AROW",
"converter": {
"string_filter_types": {},
"string_filter_rules": [],
"num_filter_types": {},
"num_filter_rules": [],
"string_types": {},
"string_rules": [{"key": "*", "type": "str", "sample_weight": "bin", "global_weight": "bin"}],
"num_types": {},
"num_rules": [{"key": "*", "type": "num"}]
},
"parameter": {
"regularization_weight": 1.001
}
}
TestUtil.write_file('config_classifier.json', json.dumps(self.config))
self.srv = TestUtil.fork_process('classifier', port, 'config_classifier.json')
try:
self.cli = Classifier(host, port, "name")
except:
TestUtil.kill_process(self.srv)
raise
def tearDown(self):
TestUtil.kill_process(self.srv)
def test_get_client(self):
self.assertTrue(isinstance(self.cli.get_client(), msgpackrpc.client.Client))
def test_get_config(self):
config = self.cli.get_config()
self.assertEqual(json.dumps(json.loads(config), sort_keys=True), json.dumps(self.config, sort_keys=True))
def test_train(self):
d = Datum({"skey1": "val1", "skey2": "val2", "nkey1": 1.0, "nkey2": 2.0})
data = [["label", d]]
self.assertEqual(self.cli.train(data), 1)
def test_classify(self):
d = Datum({"skey1": "val1", "skey2": "val2", "nkey1": 1.0, "nkey2": 2.0})
data = [d]
result = self.cli.classify(data)
def test_save(self):
self.assertEqual(self.cli.save("classifier.save_test.model"), True)
def test_load(self):
model_name = "classifier.load_test.model"
self.cli.save(model_name)
self.assertEqual(self.cli.load(model_name), True)
def test_get_status(self):
self.cli.get_status()
def test_str(self):
self.assertEqual("estimate_result{label: label, score: 1.0}",
str(EstimateResult("label", 1.0)))
if __name__ == '__main__':
test_suite = unittest.TestLoader().loadTestsFromTestCase(ClassifierTest)
unittest.TextTestRunner().run(test_suite)
| StarcoderdataPython |
3292356 | <reponame>Instagram/LibCST<filename>libcst/_typed_visitor.py
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file was generated by libcst.codegen.gen_matcher_classes
from typing import Optional, TYPE_CHECKING, Union
from libcst._flatten_sentinel import FlattenSentinel
from libcst._maybe_sentinel import MaybeSentinel
from libcst._removal_sentinel import RemovalSentinel
from libcst._typed_visitor_base import mark_no_op
if TYPE_CHECKING:
from libcst._nodes.expression import ( # noqa: F401
Annotation,
Arg,
Asynchronous,
Attribute,
Await,
BaseDictElement,
BaseElement,
BaseExpression,
BaseFormattedStringContent,
BaseSlice,
BinaryOperation,
BooleanOperation,
Call,
Comparison,
ComparisonTarget,
CompFor,
CompIf,
ConcatenatedString,
Dict,
DictComp,
DictElement,
Element,
Ellipsis,
Float,
FormattedString,
FormattedStringExpression,
FormattedStringText,
From,
GeneratorExp,
IfExp,
Imaginary,
Index,
Integer,
Lambda,
LeftCurlyBrace,
LeftParen,
LeftSquareBracket,
List,
ListComp,
Name,
NamedExpr,
Param,
Parameters,
ParamSlash,
ParamStar,
RightCurlyBrace,
RightParen,
RightSquareBracket,
Set,
SetComp,
SimpleString,
Slice,
StarredDictElement,
StarredElement,
Subscript,
SubscriptElement,
Tuple,
UnaryOperation,
Yield,
)
from libcst._nodes.module import Module # noqa: F401
from libcst._nodes.op import ( # noqa: F401
Add,
AddAssign,
And,
AssignEqual,
BaseAugOp,
BaseBinaryOp,
BaseBooleanOp,
BaseCompOp,
BaseUnaryOp,
BitAnd,
BitAndAssign,
BitInvert,
BitOr,
BitOrAssign,
BitXor,
BitXorAssign,
Colon,
Comma,
Divide,
DivideAssign,
Dot,
Equal,
FloorDivide,
FloorDivideAssign,
GreaterThan,
GreaterThanEqual,
ImportStar,
In,
Is,
IsNot,
LeftShift,
LeftShiftAssign,
LessThan,
LessThanEqual,
MatrixMultiply,
MatrixMultiplyAssign,
Minus,
Modulo,
ModuloAssign,
Multiply,
MultiplyAssign,
Not,
NotEqual,
NotIn,
Or,
Plus,
Power,
PowerAssign,
RightShift,
RightShiftAssign,
Semicolon,
Subtract,
SubtractAssign,
)
from libcst._nodes.statement import ( # noqa: F401
AnnAssign,
AsName,
Assert,
Assign,
AssignTarget,
AugAssign,
BaseSmallStatement,
BaseStatement,
BaseSuite,
Break,
ClassDef,
Continue,
Decorator,
Del,
Else,
ExceptHandler,
Expr,
Finally,
For,
FunctionDef,
Global,
If,
Import,
ImportAlias,
ImportFrom,
IndentedBlock,
NameItem,
Nonlocal,
Pass,
Raise,
Return,
SimpleStatementLine,
SimpleStatementSuite,
Try,
While,
With,
WithItem,
)
from libcst._nodes.whitespace import ( # noqa: F401
BaseParenthesizableWhitespace,
Comment,
EmptyLine,
Newline,
ParenthesizedWhitespace,
SimpleWhitespace,
TrailingWhitespace,
)
class CSTTypedBaseFunctions:
@mark_no_op
def visit_Add(self, node: "Add") -> Optional[bool]:
pass
@mark_no_op
def visit_Add_whitespace_before(self, node: "Add") -> None:
pass
@mark_no_op
def leave_Add_whitespace_before(self, node: "Add") -> None:
pass
@mark_no_op
def visit_Add_whitespace_after(self, node: "Add") -> None:
pass
@mark_no_op
def leave_Add_whitespace_after(self, node: "Add") -> None:
pass
@mark_no_op
def visit_AddAssign(self, node: "AddAssign") -> Optional[bool]:
pass
@mark_no_op
def visit_AddAssign_whitespace_before(self, node: "AddAssign") -> None:
pass
@mark_no_op
def leave_AddAssign_whitespace_before(self, node: "AddAssign") -> None:
pass
@mark_no_op
def visit_AddAssign_whitespace_after(self, node: "AddAssign") -> None:
pass
@mark_no_op
def leave_AddAssign_whitespace_after(self, node: "AddAssign") -> None:
pass
@mark_no_op
def visit_And(self, node: "And") -> Optional[bool]:
pass
@mark_no_op
def visit_And_whitespace_before(self, node: "And") -> None:
pass
@mark_no_op
def leave_And_whitespace_before(self, node: "And") -> None:
pass
@mark_no_op
def visit_And_whitespace_after(self, node: "And") -> None:
pass
@mark_no_op
def leave_And_whitespace_after(self, node: "And") -> None:
pass
@mark_no_op
def visit_AnnAssign(self, node: "AnnAssign") -> Optional[bool]:
pass
@mark_no_op
def visit_AnnAssign_target(self, node: "AnnAssign") -> None:
pass
@mark_no_op
def leave_AnnAssign_target(self, node: "AnnAssign") -> None:
pass
@mark_no_op
def visit_AnnAssign_annotation(self, node: "AnnAssign") -> None:
pass
@mark_no_op
def leave_AnnAssign_annotation(self, node: "AnnAssign") -> None:
pass
@mark_no_op
def visit_AnnAssign_value(self, node: "AnnAssign") -> None:
pass
@mark_no_op
def leave_AnnAssign_value(self, node: "AnnAssign") -> None:
pass
@mark_no_op
def visit_AnnAssign_equal(self, node: "AnnAssign") -> None:
pass
@mark_no_op
def leave_AnnAssign_equal(self, node: "AnnAssign") -> None:
pass
@mark_no_op
def visit_AnnAssign_semicolon(self, node: "AnnAssign") -> None:
pass
@mark_no_op
def leave_AnnAssign_semicolon(self, node: "AnnAssign") -> None:
pass
@mark_no_op
def visit_Annotation(self, node: "Annotation") -> Optional[bool]:
pass
@mark_no_op
def visit_Annotation_annotation(self, node: "Annotation") -> None:
pass
@mark_no_op
def leave_Annotation_annotation(self, node: "Annotation") -> None:
pass
@mark_no_op
def visit_Annotation_whitespace_before_indicator(self, node: "Annotation") -> None:
pass
@mark_no_op
def leave_Annotation_whitespace_before_indicator(self, node: "Annotation") -> None:
pass
@mark_no_op
def visit_Annotation_whitespace_after_indicator(self, node: "Annotation") -> None:
pass
@mark_no_op
def leave_Annotation_whitespace_after_indicator(self, node: "Annotation") -> None:
pass
@mark_no_op
def visit_Arg(self, node: "Arg") -> Optional[bool]:
pass
@mark_no_op
def visit_Arg_value(self, node: "Arg") -> None:
pass
@mark_no_op
def leave_Arg_value(self, node: "Arg") -> None:
pass
@mark_no_op
def visit_Arg_keyword(self, node: "Arg") -> None:
pass
@mark_no_op
def leave_Arg_keyword(self, node: "Arg") -> None:
pass
@mark_no_op
def visit_Arg_equal(self, node: "Arg") -> None:
pass
@mark_no_op
def leave_Arg_equal(self, node: "Arg") -> None:
pass
@mark_no_op
def visit_Arg_comma(self, node: "Arg") -> None:
pass
@mark_no_op
def leave_Arg_comma(self, node: "Arg") -> None:
pass
@mark_no_op
def visit_Arg_star(self, node: "Arg") -> None:
pass
@mark_no_op
def leave_Arg_star(self, node: "Arg") -> None:
pass
@mark_no_op
def visit_Arg_whitespace_after_star(self, node: "Arg") -> None:
pass
@mark_no_op
def leave_Arg_whitespace_after_star(self, node: "Arg") -> None:
pass
@mark_no_op
def visit_Arg_whitespace_after_arg(self, node: "Arg") -> None:
pass
@mark_no_op
def leave_Arg_whitespace_after_arg(self, node: "Arg") -> None:
pass
@mark_no_op
def visit_AsName(self, node: "AsName") -> Optional[bool]:
pass
@mark_no_op
def visit_AsName_name(self, node: "AsName") -> None:
pass
@mark_no_op
def leave_AsName_name(self, node: "AsName") -> None:
pass
@mark_no_op
def visit_AsName_whitespace_before_as(self, node: "AsName") -> None:
pass
@mark_no_op
def leave_AsName_whitespace_before_as(self, node: "AsName") -> None:
pass
@mark_no_op
def visit_AsName_whitespace_after_as(self, node: "AsName") -> None:
pass
@mark_no_op
def leave_AsName_whitespace_after_as(self, node: "AsName") -> None:
pass
@mark_no_op
def visit_Assert(self, node: "Assert") -> Optional[bool]:
pass
@mark_no_op
def visit_Assert_test(self, node: "Assert") -> None:
pass
@mark_no_op
def leave_Assert_test(self, node: "Assert") -> None:
pass
@mark_no_op
def visit_Assert_msg(self, node: "Assert") -> None:
pass
@mark_no_op
def leave_Assert_msg(self, node: "Assert") -> None:
pass
@mark_no_op
def visit_Assert_comma(self, node: "Assert") -> None:
pass
@mark_no_op
def leave_Assert_comma(self, node: "Assert") -> None:
pass
@mark_no_op
def visit_Assert_whitespace_after_assert(self, node: "Assert") -> None:
pass
@mark_no_op
def leave_Assert_whitespace_after_assert(self, node: "Assert") -> None:
pass
@mark_no_op
def visit_Assert_semicolon(self, node: "Assert") -> None:
pass
@mark_no_op
def leave_Assert_semicolon(self, node: "Assert") -> None:
pass
@mark_no_op
def visit_Assign(self, node: "Assign") -> Optional[bool]:
pass
@mark_no_op
def visit_Assign_targets(self, node: "Assign") -> None:
pass
@mark_no_op
def leave_Assign_targets(self, node: "Assign") -> None:
pass
@mark_no_op
def visit_Assign_value(self, node: "Assign") -> None:
pass
@mark_no_op
def leave_Assign_value(self, node: "Assign") -> None:
pass
@mark_no_op
def visit_Assign_semicolon(self, node: "Assign") -> None:
pass
@mark_no_op
def leave_Assign_semicolon(self, node: "Assign") -> None:
pass
@mark_no_op
def visit_AssignEqual(self, node: "AssignEqual") -> Optional[bool]:
pass
@mark_no_op
def visit_AssignEqual_whitespace_before(self, node: "AssignEqual") -> None:
pass
@mark_no_op
def leave_AssignEqual_whitespace_before(self, node: "AssignEqual") -> None:
pass
@mark_no_op
def visit_AssignEqual_whitespace_after(self, node: "AssignEqual") -> None:
pass
@mark_no_op
def leave_AssignEqual_whitespace_after(self, node: "AssignEqual") -> None:
pass
@mark_no_op
def visit_AssignTarget(self, node: "AssignTarget") -> Optional[bool]:
pass
@mark_no_op
def visit_AssignTarget_target(self, node: "AssignTarget") -> None:
pass
@mark_no_op
def leave_AssignTarget_target(self, node: "AssignTarget") -> None:
pass
@mark_no_op
def visit_AssignTarget_whitespace_before_equal(self, node: "AssignTarget") -> None:
pass
@mark_no_op
def leave_AssignTarget_whitespace_before_equal(self, node: "AssignTarget") -> None:
pass
@mark_no_op
def visit_AssignTarget_whitespace_after_equal(self, node: "AssignTarget") -> None:
pass
@mark_no_op
def leave_AssignTarget_whitespace_after_equal(self, node: "AssignTarget") -> None:
pass
@mark_no_op
def visit_Asynchronous(self, node: "Asynchronous") -> Optional[bool]:
pass
@mark_no_op
def visit_Asynchronous_whitespace_after(self, node: "Asynchronous") -> None:
pass
@mark_no_op
def leave_Asynchronous_whitespace_after(self, node: "Asynchronous") -> None:
pass
@mark_no_op
def visit_Attribute(self, node: "Attribute") -> Optional[bool]:
pass
@mark_no_op
def visit_Attribute_value(self, node: "Attribute") -> None:
pass
@mark_no_op
def leave_Attribute_value(self, node: "Attribute") -> None:
pass
@mark_no_op
def visit_Attribute_attr(self, node: "Attribute") -> None:
pass
@mark_no_op
def leave_Attribute_attr(self, node: "Attribute") -> None:
pass
@mark_no_op
def visit_Attribute_dot(self, node: "Attribute") -> None:
pass
@mark_no_op
def leave_Attribute_dot(self, node: "Attribute") -> None:
pass
@mark_no_op
def visit_Attribute_lpar(self, node: "Attribute") -> None:
pass
@mark_no_op
def leave_Attribute_lpar(self, node: "Attribute") -> None:
pass
@mark_no_op
def visit_Attribute_rpar(self, node: "Attribute") -> None:
pass
@mark_no_op
def leave_Attribute_rpar(self, node: "Attribute") -> None:
pass
@mark_no_op
def visit_AugAssign(self, node: "AugAssign") -> Optional[bool]:
pass
@mark_no_op
def visit_AugAssign_target(self, node: "AugAssign") -> None:
pass
@mark_no_op
def leave_AugAssign_target(self, node: "AugAssign") -> None:
pass
@mark_no_op
def visit_AugAssign_operator(self, node: "AugAssign") -> None:
pass
@mark_no_op
def leave_AugAssign_operator(self, node: "AugAssign") -> None:
pass
@mark_no_op
def visit_AugAssign_value(self, node: "AugAssign") -> None:
pass
@mark_no_op
def leave_AugAssign_value(self, node: "AugAssign") -> None:
pass
@mark_no_op
def visit_AugAssign_semicolon(self, node: "AugAssign") -> None:
pass
@mark_no_op
def leave_AugAssign_semicolon(self, node: "AugAssign") -> None:
pass
@mark_no_op
def visit_Await(self, node: "Await") -> Optional[bool]:
pass
@mark_no_op
def visit_Await_expression(self, node: "Await") -> None:
pass
@mark_no_op
def leave_Await_expression(self, node: "Await") -> None:
pass
@mark_no_op
def visit_Await_lpar(self, node: "Await") -> None:
pass
@mark_no_op
def leave_Await_lpar(self, node: "Await") -> None:
pass
@mark_no_op
def visit_Await_rpar(self, node: "Await") -> None:
pass
@mark_no_op
def leave_Await_rpar(self, node: "Await") -> None:
pass
@mark_no_op
def visit_Await_whitespace_after_await(self, node: "Await") -> None:
pass
@mark_no_op
def leave_Await_whitespace_after_await(self, node: "Await") -> None:
pass
@mark_no_op
def visit_BinaryOperation(self, node: "BinaryOperation") -> Optional[bool]:
pass
@mark_no_op
def visit_BinaryOperation_left(self, node: "BinaryOperation") -> None:
pass
@mark_no_op
def leave_BinaryOperation_left(self, node: "BinaryOperation") -> None:
pass
@mark_no_op
def visit_BinaryOperation_operator(self, node: "BinaryOperation") -> None:
pass
@mark_no_op
def leave_BinaryOperation_operator(self, node: "BinaryOperation") -> None:
pass
@mark_no_op
def visit_BinaryOperation_right(self, node: "BinaryOperation") -> None:
pass
@mark_no_op
def leave_BinaryOperation_right(self, node: "BinaryOperation") -> None:
pass
@mark_no_op
def visit_BinaryOperation_lpar(self, node: "BinaryOperation") -> None:
pass
@mark_no_op
def leave_BinaryOperation_lpar(self, node: "BinaryOperation") -> None:
pass
@mark_no_op
def visit_BinaryOperation_rpar(self, node: "BinaryOperation") -> None:
pass
@mark_no_op
def leave_BinaryOperation_rpar(self, node: "BinaryOperation") -> None:
pass
@mark_no_op
def visit_BitAnd(self, node: "BitAnd") -> Optional[bool]:
pass
@mark_no_op
def visit_BitAnd_whitespace_before(self, node: "BitAnd") -> None:
pass
@mark_no_op
def leave_BitAnd_whitespace_before(self, node: "BitAnd") -> None:
pass
@mark_no_op
def visit_BitAnd_whitespace_after(self, node: "BitAnd") -> None:
pass
@mark_no_op
def leave_BitAnd_whitespace_after(self, node: "BitAnd") -> None:
pass
@mark_no_op
def visit_BitAndAssign(self, node: "BitAndAssign") -> Optional[bool]:
pass
@mark_no_op
def visit_BitAndAssign_whitespace_before(self, node: "BitAndAssign") -> None:
pass
@mark_no_op
def leave_BitAndAssign_whitespace_before(self, node: "BitAndAssign") -> None:
pass
@mark_no_op
def visit_BitAndAssign_whitespace_after(self, node: "BitAndAssign") -> None:
pass
@mark_no_op
def leave_BitAndAssign_whitespace_after(self, node: "BitAndAssign") -> None:
pass
@mark_no_op
def visit_BitInvert(self, node: "BitInvert") -> Optional[bool]:
pass
@mark_no_op
def visit_BitInvert_whitespace_after(self, node: "BitInvert") -> None:
pass
@mark_no_op
def leave_BitInvert_whitespace_after(self, node: "BitInvert") -> None:
pass
@mark_no_op
def visit_BitOr(self, node: "BitOr") -> Optional[bool]:
pass
@mark_no_op
def visit_BitOr_whitespace_before(self, node: "BitOr") -> None:
pass
@mark_no_op
def leave_BitOr_whitespace_before(self, node: "BitOr") -> None:
pass
@mark_no_op
def visit_BitOr_whitespace_after(self, node: "BitOr") -> None:
pass
@mark_no_op
def leave_BitOr_whitespace_after(self, node: "BitOr") -> None:
pass
@mark_no_op
def visit_BitOrAssign(self, node: "BitOrAssign") -> Optional[bool]:
pass
@mark_no_op
def visit_BitOrAssign_whitespace_before(self, node: "BitOrAssign") -> None:
pass
@mark_no_op
def leave_BitOrAssign_whitespace_before(self, node: "BitOrAssign") -> None:
pass
@mark_no_op
def visit_BitOrAssign_whitespace_after(self, node: "BitOrAssign") -> None:
pass
@mark_no_op
def leave_BitOrAssign_whitespace_after(self, node: "BitOrAssign") -> None:
pass
@mark_no_op
def visit_BitXor(self, node: "BitXor") -> Optional[bool]:
pass
@mark_no_op
def visit_BitXor_whitespace_before(self, node: "BitXor") -> None:
pass
@mark_no_op
def leave_BitXor_whitespace_before(self, node: "BitXor") -> None:
pass
@mark_no_op
def visit_BitXor_whitespace_after(self, node: "BitXor") -> None:
pass
@mark_no_op
def leave_BitXor_whitespace_after(self, node: "BitXor") -> None:
pass
@mark_no_op
def visit_BitXorAssign(self, node: "BitXorAssign") -> Optional[bool]:
pass
@mark_no_op
def visit_BitXorAssign_whitespace_before(self, node: "BitXorAssign") -> None:
pass
@mark_no_op
def leave_BitXorAssign_whitespace_before(self, node: "BitXorAssign") -> None:
pass
@mark_no_op
def visit_BitXorAssign_whitespace_after(self, node: "BitXorAssign") -> None:
pass
@mark_no_op
def leave_BitXorAssign_whitespace_after(self, node: "BitXorAssign") -> None:
pass
@mark_no_op
def visit_BooleanOperation(self, node: "BooleanOperation") -> Optional[bool]:
pass
@mark_no_op
def visit_BooleanOperation_left(self, node: "BooleanOperation") -> None:
pass
@mark_no_op
def leave_BooleanOperation_left(self, node: "BooleanOperation") -> None:
pass
@mark_no_op
def visit_BooleanOperation_operator(self, node: "BooleanOperation") -> None:
pass
@mark_no_op
def leave_BooleanOperation_operator(self, node: "BooleanOperation") -> None:
pass
@mark_no_op
def visit_BooleanOperation_right(self, node: "BooleanOperation") -> None:
pass
@mark_no_op
def leave_BooleanOperation_right(self, node: "BooleanOperation") -> None:
pass
@mark_no_op
def visit_BooleanOperation_lpar(self, node: "BooleanOperation") -> None:
pass
@mark_no_op
def leave_BooleanOperation_lpar(self, node: "BooleanOperation") -> None:
pass
@mark_no_op
def visit_BooleanOperation_rpar(self, node: "BooleanOperation") -> None:
pass
@mark_no_op
def leave_BooleanOperation_rpar(self, node: "BooleanOperation") -> None:
pass
@mark_no_op
def visit_Break(self, node: "Break") -> Optional[bool]:
pass
@mark_no_op
def visit_Break_semicolon(self, node: "Break") -> None:
pass
@mark_no_op
def leave_Break_semicolon(self, node: "Break") -> None:
pass
@mark_no_op
def visit_Call(self, node: "Call") -> Optional[bool]:
pass
@mark_no_op
def visit_Call_func(self, node: "Call") -> None:
pass
@mark_no_op
def leave_Call_func(self, node: "Call") -> None:
pass
@mark_no_op
def visit_Call_args(self, node: "Call") -> None:
pass
@mark_no_op
def leave_Call_args(self, node: "Call") -> None:
pass
@mark_no_op
def visit_Call_lpar(self, node: "Call") -> None:
pass
@mark_no_op
def leave_Call_lpar(self, node: "Call") -> None:
pass
@mark_no_op
def visit_Call_rpar(self, node: "Call") -> None:
pass
@mark_no_op
def leave_Call_rpar(self, node: "Call") -> None:
pass
@mark_no_op
def visit_Call_whitespace_after_func(self, node: "Call") -> None:
pass
@mark_no_op
def leave_Call_whitespace_after_func(self, node: "Call") -> None:
pass
@mark_no_op
def visit_Call_whitespace_before_args(self, node: "Call") -> None:
pass
@mark_no_op
def leave_Call_whitespace_before_args(self, node: "Call") -> None:
pass
@mark_no_op
def visit_ClassDef(self, node: "ClassDef") -> Optional[bool]:
pass
@mark_no_op
def visit_ClassDef_name(self, node: "ClassDef") -> None:
pass
@mark_no_op
def leave_ClassDef_name(self, node: "ClassDef") -> None:
pass
@mark_no_op
def visit_ClassDef_body(self, node: "ClassDef") -> None:
pass
@mark_no_op
def leave_ClassDef_body(self, node: "ClassDef") -> None:
pass
@mark_no_op
def visit_ClassDef_bases(self, node: "ClassDef") -> None:
pass
@mark_no_op
def leave_ClassDef_bases(self, node: "ClassDef") -> None:
pass
@mark_no_op
def visit_ClassDef_keywords(self, node: "ClassDef") -> None:
pass
@mark_no_op
def leave_ClassDef_keywords(self, node: "ClassDef") -> None:
pass
@mark_no_op
def visit_ClassDef_decorators(self, node: "ClassDef") -> None:
pass
@mark_no_op
def leave_ClassDef_decorators(self, node: "ClassDef") -> None:
pass
@mark_no_op
def visit_ClassDef_lpar(self, node: "ClassDef") -> None:
pass
@mark_no_op
def leave_ClassDef_lpar(self, node: "ClassDef") -> None:
pass
@mark_no_op
def visit_ClassDef_rpar(self, node: "ClassDef") -> None:
pass
@mark_no_op
def leave_ClassDef_rpar(self, node: "ClassDef") -> None:
pass
@mark_no_op
def visit_ClassDef_leading_lines(self, node: "ClassDef") -> None:
pass
@mark_no_op
def leave_ClassDef_leading_lines(self, node: "ClassDef") -> None:
pass
@mark_no_op
def visit_ClassDef_lines_after_decorators(self, node: "ClassDef") -> None:
pass
@mark_no_op
def leave_ClassDef_lines_after_decorators(self, node: "ClassDef") -> None:
pass
@mark_no_op
def visit_ClassDef_whitespace_after_class(self, node: "ClassDef") -> None:
pass
@mark_no_op
def leave_ClassDef_whitespace_after_class(self, node: "ClassDef") -> None:
pass
@mark_no_op
def visit_ClassDef_whitespace_after_name(self, node: "ClassDef") -> None:
pass
@mark_no_op
def leave_ClassDef_whitespace_after_name(self, node: "ClassDef") -> None:
pass
@mark_no_op
def visit_ClassDef_whitespace_before_colon(self, node: "ClassDef") -> None:
pass
@mark_no_op
def leave_ClassDef_whitespace_before_colon(self, node: "ClassDef") -> None:
pass
@mark_no_op
def visit_Colon(self, node: "Colon") -> Optional[bool]:
pass
@mark_no_op
def visit_Colon_whitespace_before(self, node: "Colon") -> None:
pass
@mark_no_op
def leave_Colon_whitespace_before(self, node: "Colon") -> None:
pass
@mark_no_op
def visit_Colon_whitespace_after(self, node: "Colon") -> None:
pass
@mark_no_op
def leave_Colon_whitespace_after(self, node: "Colon") -> None:
pass
@mark_no_op
def visit_Comma(self, node: "Comma") -> Optional[bool]:
pass
@mark_no_op
def visit_Comma_whitespace_before(self, node: "Comma") -> None:
pass
@mark_no_op
def leave_Comma_whitespace_before(self, node: "Comma") -> None:
pass
@mark_no_op
def visit_Comma_whitespace_after(self, node: "Comma") -> None:
pass
@mark_no_op
def leave_Comma_whitespace_after(self, node: "Comma") -> None:
pass
@mark_no_op
def visit_Comment(self, node: "Comment") -> Optional[bool]:
pass
@mark_no_op
def visit_Comment_value(self, node: "Comment") -> None:
pass
@mark_no_op
def leave_Comment_value(self, node: "Comment") -> None:
pass
@mark_no_op
def visit_CompFor(self, node: "CompFor") -> Optional[bool]:
pass
@mark_no_op
def visit_CompFor_target(self, node: "CompFor") -> None:
pass
@mark_no_op
def leave_CompFor_target(self, node: "CompFor") -> None:
pass
@mark_no_op
def visit_CompFor_iter(self, node: "CompFor") -> None:
pass
@mark_no_op
def leave_CompFor_iter(self, node: "CompFor") -> None:
pass
@mark_no_op
def visit_CompFor_ifs(self, node: "CompFor") -> None:
pass
@mark_no_op
def leave_CompFor_ifs(self, node: "CompFor") -> None:
pass
@mark_no_op
def visit_CompFor_inner_for_in(self, node: "CompFor") -> None:
pass
@mark_no_op
def leave_CompFor_inner_for_in(self, node: "CompFor") -> None:
pass
@mark_no_op
def visit_CompFor_asynchronous(self, node: "CompFor") -> None:
pass
@mark_no_op
def leave_CompFor_asynchronous(self, node: "CompFor") -> None:
pass
@mark_no_op
def visit_CompFor_whitespace_before(self, node: "CompFor") -> None:
pass
@mark_no_op
def leave_CompFor_whitespace_before(self, node: "CompFor") -> None:
pass
@mark_no_op
def visit_CompFor_whitespace_after_for(self, node: "CompFor") -> None:
pass
@mark_no_op
def leave_CompFor_whitespace_after_for(self, node: "CompFor") -> None:
pass
@mark_no_op
def visit_CompFor_whitespace_before_in(self, node: "CompFor") -> None:
pass
@mark_no_op
def leave_CompFor_whitespace_before_in(self, node: "CompFor") -> None:
pass
@mark_no_op
def visit_CompFor_whitespace_after_in(self, node: "CompFor") -> None:
pass
@mark_no_op
def leave_CompFor_whitespace_after_in(self, node: "CompFor") -> None:
pass
@mark_no_op
def visit_CompIf(self, node: "CompIf") -> Optional[bool]:
pass
@mark_no_op
def visit_CompIf_test(self, node: "CompIf") -> None:
pass
@mark_no_op
def leave_CompIf_test(self, node: "CompIf") -> None:
pass
@mark_no_op
def visit_CompIf_whitespace_before(self, node: "CompIf") -> None:
pass
@mark_no_op
def leave_CompIf_whitespace_before(self, node: "CompIf") -> None:
pass
@mark_no_op
def visit_CompIf_whitespace_before_test(self, node: "CompIf") -> None:
pass
@mark_no_op
def leave_CompIf_whitespace_before_test(self, node: "CompIf") -> None:
pass
@mark_no_op
def visit_Comparison(self, node: "Comparison") -> Optional[bool]:
pass
@mark_no_op
def visit_Comparison_left(self, node: "Comparison") -> None:
pass
@mark_no_op
def leave_Comparison_left(self, node: "Comparison") -> None:
pass
@mark_no_op
def visit_Comparison_comparisons(self, node: "Comparison") -> None:
pass
@mark_no_op
def leave_Comparison_comparisons(self, node: "Comparison") -> None:
pass
@mark_no_op
def visit_Comparison_lpar(self, node: "Comparison") -> None:
pass
@mark_no_op
def leave_Comparison_lpar(self, node: "Comparison") -> None:
pass
@mark_no_op
def visit_Comparison_rpar(self, node: "Comparison") -> None:
pass
@mark_no_op
def leave_Comparison_rpar(self, node: "Comparison") -> None:
pass
@mark_no_op
def visit_ComparisonTarget(self, node: "ComparisonTarget") -> Optional[bool]:
pass
@mark_no_op
def visit_ComparisonTarget_operator(self, node: "ComparisonTarget") -> None:
pass
@mark_no_op
def leave_ComparisonTarget_operator(self, node: "ComparisonTarget") -> None:
pass
@mark_no_op
def visit_ComparisonTarget_comparator(self, node: "ComparisonTarget") -> None:
pass
@mark_no_op
def leave_ComparisonTarget_comparator(self, node: "ComparisonTarget") -> None:
pass
@mark_no_op
def visit_ConcatenatedString(self, node: "ConcatenatedString") -> Optional[bool]:
pass
@mark_no_op
def visit_ConcatenatedString_left(self, node: "ConcatenatedString") -> None:
pass
@mark_no_op
def leave_ConcatenatedString_left(self, node: "ConcatenatedString") -> None:
pass
@mark_no_op
def visit_ConcatenatedString_right(self, node: "ConcatenatedString") -> None:
pass
@mark_no_op
def leave_ConcatenatedString_right(self, node: "ConcatenatedString") -> None:
pass
@mark_no_op
def visit_ConcatenatedString_lpar(self, node: "ConcatenatedString") -> None:
pass
@mark_no_op
def leave_ConcatenatedString_lpar(self, node: "ConcatenatedString") -> None:
pass
@mark_no_op
def visit_ConcatenatedString_rpar(self, node: "ConcatenatedString") -> None:
pass
@mark_no_op
def leave_ConcatenatedString_rpar(self, node: "ConcatenatedString") -> None:
pass
@mark_no_op
def visit_ConcatenatedString_whitespace_between(
self, node: "ConcatenatedString"
) -> None:
pass
@mark_no_op
def leave_ConcatenatedString_whitespace_between(
self, node: "ConcatenatedString"
) -> None:
pass
@mark_no_op
def visit_Continue(self, node: "Continue") -> Optional[bool]:
pass
@mark_no_op
def visit_Continue_semicolon(self, node: "Continue") -> None:
pass
@mark_no_op
def leave_Continue_semicolon(self, node: "Continue") -> None:
pass
@mark_no_op
def visit_Decorator(self, node: "Decorator") -> Optional[bool]:
pass
@mark_no_op
def visit_Decorator_decorator(self, node: "Decorator") -> None:
pass
@mark_no_op
def leave_Decorator_decorator(self, node: "Decorator") -> None:
pass
@mark_no_op
def visit_Decorator_leading_lines(self, node: "Decorator") -> None:
pass
@mark_no_op
def leave_Decorator_leading_lines(self, node: "Decorator") -> None:
pass
@mark_no_op
def visit_Decorator_whitespace_after_at(self, node: "Decorator") -> None:
pass
@mark_no_op
def leave_Decorator_whitespace_after_at(self, node: "Decorator") -> None:
pass
@mark_no_op
def visit_Decorator_trailing_whitespace(self, node: "Decorator") -> None:
pass
@mark_no_op
def leave_Decorator_trailing_whitespace(self, node: "Decorator") -> None:
pass
@mark_no_op
def visit_Del(self, node: "Del") -> Optional[bool]:
pass
@mark_no_op
def visit_Del_target(self, node: "Del") -> None:
pass
@mark_no_op
def leave_Del_target(self, node: "Del") -> None:
pass
@mark_no_op
def visit_Del_whitespace_after_del(self, node: "Del") -> None:
pass
@mark_no_op
def leave_Del_whitespace_after_del(self, node: "Del") -> None:
pass
@mark_no_op
def visit_Del_semicolon(self, node: "Del") -> None:
pass
@mark_no_op
def leave_Del_semicolon(self, node: "Del") -> None:
pass
@mark_no_op
def visit_Dict(self, node: "Dict") -> Optional[bool]:
pass
@mark_no_op
def visit_Dict_elements(self, node: "Dict") -> None:
pass
@mark_no_op
def leave_Dict_elements(self, node: "Dict") -> None:
pass
@mark_no_op
def visit_Dict_lbrace(self, node: "Dict") -> None:
pass
@mark_no_op
def leave_Dict_lbrace(self, node: "Dict") -> None:
pass
@mark_no_op
def visit_Dict_rbrace(self, node: "Dict") -> None:
pass
@mark_no_op
def leave_Dict_rbrace(self, node: "Dict") -> None:
pass
@mark_no_op
def visit_Dict_lpar(self, node: "Dict") -> None:
pass
@mark_no_op
def leave_Dict_lpar(self, node: "Dict") -> None:
pass
@mark_no_op
def visit_Dict_rpar(self, node: "Dict") -> None:
pass
@mark_no_op
def leave_Dict_rpar(self, node: "Dict") -> None:
pass
@mark_no_op
def visit_DictComp(self, node: "DictComp") -> Optional[bool]:
pass
@mark_no_op
def visit_DictComp_key(self, node: "DictComp") -> None:
pass
@mark_no_op
def leave_DictComp_key(self, node: "DictComp") -> None:
pass
@mark_no_op
def visit_DictComp_value(self, node: "DictComp") -> None:
pass
@mark_no_op
def leave_DictComp_value(self, node: "DictComp") -> None:
pass
@mark_no_op
def visit_DictComp_for_in(self, node: "DictComp") -> None:
pass
@mark_no_op
def leave_DictComp_for_in(self, node: "DictComp") -> None:
pass
@mark_no_op
def visit_DictComp_lbrace(self, node: "DictComp") -> None:
pass
@mark_no_op
def leave_DictComp_lbrace(self, node: "DictComp") -> None:
pass
@mark_no_op
def visit_DictComp_rbrace(self, node: "DictComp") -> None:
pass
@mark_no_op
def leave_DictComp_rbrace(self, node: "DictComp") -> None:
pass
@mark_no_op
def visit_DictComp_lpar(self, node: "DictComp") -> None:
pass
@mark_no_op
def leave_DictComp_lpar(self, node: "DictComp") -> None:
pass
@mark_no_op
def visit_DictComp_rpar(self, node: "DictComp") -> None:
pass
@mark_no_op
def leave_DictComp_rpar(self, node: "DictComp") -> None:
pass
@mark_no_op
def visit_DictComp_whitespace_before_colon(self, node: "DictComp") -> None:
pass
@mark_no_op
def leave_DictComp_whitespace_before_colon(self, node: "DictComp") -> None:
pass
@mark_no_op
def visit_DictComp_whitespace_after_colon(self, node: "DictComp") -> None:
pass
@mark_no_op
def leave_DictComp_whitespace_after_colon(self, node: "DictComp") -> None:
pass
@mark_no_op
def visit_DictElement(self, node: "DictElement") -> Optional[bool]:
pass
@mark_no_op
def visit_DictElement_key(self, node: "DictElement") -> None:
pass
@mark_no_op
def leave_DictElement_key(self, node: "DictElement") -> None:
pass
@mark_no_op
def visit_DictElement_value(self, node: "DictElement") -> None:
pass
@mark_no_op
def leave_DictElement_value(self, node: "DictElement") -> None:
pass
@mark_no_op
def visit_DictElement_comma(self, node: "DictElement") -> None:
pass
@mark_no_op
def leave_DictElement_comma(self, node: "DictElement") -> None:
pass
@mark_no_op
def visit_DictElement_whitespace_before_colon(self, node: "DictElement") -> None:
pass
@mark_no_op
def leave_DictElement_whitespace_before_colon(self, node: "DictElement") -> None:
pass
@mark_no_op
def visit_DictElement_whitespace_after_colon(self, node: "DictElement") -> None:
pass
@mark_no_op
def leave_DictElement_whitespace_after_colon(self, node: "DictElement") -> None:
pass
@mark_no_op
def visit_Divide(self, node: "Divide") -> Optional[bool]:
pass
@mark_no_op
def visit_Divide_whitespace_before(self, node: "Divide") -> None:
pass
@mark_no_op
def leave_Divide_whitespace_before(self, node: "Divide") -> None:
pass
@mark_no_op
def visit_Divide_whitespace_after(self, node: "Divide") -> None:
pass
@mark_no_op
def leave_Divide_whitespace_after(self, node: "Divide") -> None:
pass
@mark_no_op
def visit_DivideAssign(self, node: "DivideAssign") -> Optional[bool]:
pass
@mark_no_op
def visit_DivideAssign_whitespace_before(self, node: "DivideAssign") -> None:
pass
@mark_no_op
def leave_DivideAssign_whitespace_before(self, node: "DivideAssign") -> None:
pass
@mark_no_op
def visit_DivideAssign_whitespace_after(self, node: "DivideAssign") -> None:
pass
@mark_no_op
def leave_DivideAssign_whitespace_after(self, node: "DivideAssign") -> None:
pass
@mark_no_op
def visit_Dot(self, node: "Dot") -> Optional[bool]:
pass
@mark_no_op
def visit_Dot_whitespace_before(self, node: "Dot") -> None:
pass
@mark_no_op
def leave_Dot_whitespace_before(self, node: "Dot") -> None:
pass
@mark_no_op
def visit_Dot_whitespace_after(self, node: "Dot") -> None:
pass
@mark_no_op
def leave_Dot_whitespace_after(self, node: "Dot") -> None:
pass
@mark_no_op
def visit_Element(self, node: "Element") -> Optional[bool]:
pass
@mark_no_op
def visit_Element_value(self, node: "Element") -> None:
pass
@mark_no_op
def leave_Element_value(self, node: "Element") -> None:
pass
@mark_no_op
def visit_Element_comma(self, node: "Element") -> None:
pass
@mark_no_op
def leave_Element_comma(self, node: "Element") -> None:
pass
@mark_no_op
def visit_Ellipsis(self, node: "Ellipsis") -> Optional[bool]:
pass
@mark_no_op
def visit_Ellipsis_lpar(self, node: "Ellipsis") -> None:
pass
@mark_no_op
def leave_Ellipsis_lpar(self, node: "Ellipsis") -> None:
pass
@mark_no_op
def visit_Ellipsis_rpar(self, node: "Ellipsis") -> None:
pass
@mark_no_op
def leave_Ellipsis_rpar(self, node: "Ellipsis") -> None:
pass
@mark_no_op
def visit_Else(self, node: "Else") -> Optional[bool]:
pass
@mark_no_op
def visit_Else_body(self, node: "Else") -> None:
pass
@mark_no_op
def leave_Else_body(self, node: "Else") -> None:
pass
@mark_no_op
def visit_Else_leading_lines(self, node: "Else") -> None:
pass
@mark_no_op
def leave_Else_leading_lines(self, node: "Else") -> None:
pass
@mark_no_op
def visit_Else_whitespace_before_colon(self, node: "Else") -> None:
pass
@mark_no_op
def leave_Else_whitespace_before_colon(self, node: "Else") -> None:
pass
@mark_no_op
def visit_EmptyLine(self, node: "EmptyLine") -> Optional[bool]:
pass
@mark_no_op
def visit_EmptyLine_indent(self, node: "EmptyLine") -> None:
pass
@mark_no_op
def leave_EmptyLine_indent(self, node: "EmptyLine") -> None:
pass
@mark_no_op
def visit_EmptyLine_whitespace(self, node: "EmptyLine") -> None:
pass
@mark_no_op
def leave_EmptyLine_whitespace(self, node: "EmptyLine") -> None:
pass
@mark_no_op
def visit_EmptyLine_comment(self, node: "EmptyLine") -> None:
pass
@mark_no_op
def leave_EmptyLine_comment(self, node: "EmptyLine") -> None:
pass
@mark_no_op
def visit_EmptyLine_newline(self, node: "EmptyLine") -> None:
pass
@mark_no_op
def leave_EmptyLine_newline(self, node: "EmptyLine") -> None:
pass
@mark_no_op
def visit_Equal(self, node: "Equal") -> Optional[bool]:
pass
@mark_no_op
def visit_Equal_whitespace_before(self, node: "Equal") -> None:
pass
@mark_no_op
def leave_Equal_whitespace_before(self, node: "Equal") -> None:
pass
@mark_no_op
def visit_Equal_whitespace_after(self, node: "Equal") -> None:
pass
@mark_no_op
def leave_Equal_whitespace_after(self, node: "Equal") -> None:
pass
@mark_no_op
def visit_ExceptHandler(self, node: "ExceptHandler") -> Optional[bool]:
pass
@mark_no_op
def visit_ExceptHandler_body(self, node: "ExceptHandler") -> None:
pass
@mark_no_op
def leave_ExceptHandler_body(self, node: "ExceptHandler") -> None:
pass
@mark_no_op
def visit_ExceptHandler_type(self, node: "ExceptHandler") -> None:
pass
@mark_no_op
def leave_ExceptHandler_type(self, node: "ExceptHandler") -> None:
pass
@mark_no_op
def visit_ExceptHandler_name(self, node: "ExceptHandler") -> None:
pass
@mark_no_op
def leave_ExceptHandler_name(self, node: "ExceptHandler") -> None:
pass
@mark_no_op
def visit_ExceptHandler_leading_lines(self, node: "ExceptHandler") -> None:
pass
@mark_no_op
def leave_ExceptHandler_leading_lines(self, node: "ExceptHandler") -> None:
pass
@mark_no_op
def visit_ExceptHandler_whitespace_after_except(
self, node: "ExceptHandler"
) -> None:
pass
@mark_no_op
def leave_ExceptHandler_whitespace_after_except(
self, node: "ExceptHandler"
) -> None:
pass
@mark_no_op
def visit_ExceptHandler_whitespace_before_colon(
self, node: "ExceptHandler"
) -> None:
pass
@mark_no_op
def leave_ExceptHandler_whitespace_before_colon(
self, node: "ExceptHandler"
) -> None:
pass
@mark_no_op
def visit_Expr(self, node: "Expr") -> Optional[bool]:
pass
@mark_no_op
def visit_Expr_value(self, node: "Expr") -> None:
pass
@mark_no_op
def leave_Expr_value(self, node: "Expr") -> None:
pass
@mark_no_op
def visit_Expr_semicolon(self, node: "Expr") -> None:
pass
@mark_no_op
def leave_Expr_semicolon(self, node: "Expr") -> None:
pass
@mark_no_op
def visit_Finally(self, node: "Finally") -> Optional[bool]:
pass
@mark_no_op
def visit_Finally_body(self, node: "Finally") -> None:
pass
@mark_no_op
def leave_Finally_body(self, node: "Finally") -> None:
pass
@mark_no_op
def visit_Finally_leading_lines(self, node: "Finally") -> None:
pass
@mark_no_op
def leave_Finally_leading_lines(self, node: "Finally") -> None:
pass
@mark_no_op
def visit_Finally_whitespace_before_colon(self, node: "Finally") -> None:
pass
@mark_no_op
def leave_Finally_whitespace_before_colon(self, node: "Finally") -> None:
pass
@mark_no_op
def visit_Float(self, node: "Float") -> Optional[bool]:
pass
@mark_no_op
def visit_Float_value(self, node: "Float") -> None:
pass
@mark_no_op
def leave_Float_value(self, node: "Float") -> None:
pass
@mark_no_op
def visit_Float_lpar(self, node: "Float") -> None:
pass
@mark_no_op
def leave_Float_lpar(self, node: "Float") -> None:
pass
@mark_no_op
def visit_Float_rpar(self, node: "Float") -> None:
pass
@mark_no_op
def leave_Float_rpar(self, node: "Float") -> None:
pass
@mark_no_op
def visit_FloorDivide(self, node: "FloorDivide") -> Optional[bool]:
pass
@mark_no_op
def visit_FloorDivide_whitespace_before(self, node: "FloorDivide") -> None:
pass
@mark_no_op
def leave_FloorDivide_whitespace_before(self, node: "FloorDivide") -> None:
pass
@mark_no_op
def visit_FloorDivide_whitespace_after(self, node: "FloorDivide") -> None:
pass
@mark_no_op
def leave_FloorDivide_whitespace_after(self, node: "FloorDivide") -> None:
pass
@mark_no_op
def visit_FloorDivideAssign(self, node: "FloorDivideAssign") -> Optional[bool]:
pass
@mark_no_op
def visit_FloorDivideAssign_whitespace_before(
self, node: "FloorDivideAssign"
) -> None:
pass
@mark_no_op
def leave_FloorDivideAssign_whitespace_before(
self, node: "FloorDivideAssign"
) -> None:
pass
@mark_no_op
def visit_FloorDivideAssign_whitespace_after(
self, node: "FloorDivideAssign"
) -> None:
pass
@mark_no_op
def leave_FloorDivideAssign_whitespace_after(
self, node: "FloorDivideAssign"
) -> None:
pass
@mark_no_op
def visit_For(self, node: "For") -> Optional[bool]:
pass
@mark_no_op
def visit_For_target(self, node: "For") -> None:
pass
@mark_no_op
def leave_For_target(self, node: "For") -> None:
pass
@mark_no_op
def visit_For_iter(self, node: "For") -> None:
pass
@mark_no_op
def leave_For_iter(self, node: "For") -> None:
pass
@mark_no_op
def visit_For_body(self, node: "For") -> None:
pass
@mark_no_op
def leave_For_body(self, node: "For") -> None:
pass
@mark_no_op
def visit_For_orelse(self, node: "For") -> None:
pass
@mark_no_op
def leave_For_orelse(self, node: "For") -> None:
pass
@mark_no_op
def visit_For_asynchronous(self, node: "For") -> None:
pass
@mark_no_op
def leave_For_asynchronous(self, node: "For") -> None:
pass
@mark_no_op
def visit_For_leading_lines(self, node: "For") -> None:
pass
@mark_no_op
def leave_For_leading_lines(self, node: "For") -> None:
pass
@mark_no_op
def visit_For_whitespace_after_for(self, node: "For") -> None:
pass
@mark_no_op
def leave_For_whitespace_after_for(self, node: "For") -> None:
pass
@mark_no_op
def visit_For_whitespace_before_in(self, node: "For") -> None:
pass
@mark_no_op
def leave_For_whitespace_before_in(self, node: "For") -> None:
pass
@mark_no_op
def visit_For_whitespace_after_in(self, node: "For") -> None:
pass
@mark_no_op
def leave_For_whitespace_after_in(self, node: "For") -> None:
pass
@mark_no_op
def visit_For_whitespace_before_colon(self, node: "For") -> None:
pass
@mark_no_op
def leave_For_whitespace_before_colon(self, node: "For") -> None:
pass
@mark_no_op
def visit_FormattedString(self, node: "FormattedString") -> Optional[bool]:
pass
@mark_no_op
def visit_FormattedString_parts(self, node: "FormattedString") -> None:
pass
@mark_no_op
def leave_FormattedString_parts(self, node: "FormattedString") -> None:
pass
@mark_no_op
def visit_FormattedString_start(self, node: "FormattedString") -> None:
pass
@mark_no_op
def leave_FormattedString_start(self, node: "FormattedString") -> None:
pass
@mark_no_op
def visit_FormattedString_end(self, node: "FormattedString") -> None:
pass
@mark_no_op
def leave_FormattedString_end(self, node: "FormattedString") -> None:
pass
@mark_no_op
def visit_FormattedString_lpar(self, node: "FormattedString") -> None:
pass
@mark_no_op
def leave_FormattedString_lpar(self, node: "FormattedString") -> None:
pass
@mark_no_op
def visit_FormattedString_rpar(self, node: "FormattedString") -> None:
pass
@mark_no_op
def leave_FormattedString_rpar(self, node: "FormattedString") -> None:
pass
@mark_no_op
def visit_FormattedStringExpression(
self, node: "FormattedStringExpression"
) -> Optional[bool]:
pass
@mark_no_op
def visit_FormattedStringExpression_expression(
self, node: "FormattedStringExpression"
) -> None:
pass
@mark_no_op
def leave_FormattedStringExpression_expression(
self, node: "FormattedStringExpression"
) -> None:
pass
@mark_no_op
def visit_FormattedStringExpression_conversion(
self, node: "FormattedStringExpression"
) -> None:
pass
@mark_no_op
def leave_FormattedStringExpression_conversion(
self, node: "FormattedStringExpression"
) -> None:
pass
@mark_no_op
def visit_FormattedStringExpression_format_spec(
self, node: "FormattedStringExpression"
) -> None:
pass
@mark_no_op
def leave_FormattedStringExpression_format_spec(
self, node: "FormattedStringExpression"
) -> None:
pass
@mark_no_op
def visit_FormattedStringExpression_whitespace_before_expression(
self, node: "FormattedStringExpression"
) -> None:
pass
@mark_no_op
def leave_FormattedStringExpression_whitespace_before_expression(
self, node: "FormattedStringExpression"
) -> None:
pass
@mark_no_op
def visit_FormattedStringExpression_whitespace_after_expression(
self, node: "FormattedStringExpression"
) -> None:
pass
@mark_no_op
def leave_FormattedStringExpression_whitespace_after_expression(
self, node: "FormattedStringExpression"
) -> None:
pass
@mark_no_op
def visit_FormattedStringExpression_equal(
self, node: "FormattedStringExpression"
) -> None:
pass
@mark_no_op
def leave_FormattedStringExpression_equal(
self, node: "FormattedStringExpression"
) -> None:
pass
@mark_no_op
def visit_FormattedStringText(self, node: "FormattedStringText") -> Optional[bool]:
pass
@mark_no_op
def visit_FormattedStringText_value(self, node: "FormattedStringText") -> None:
pass
@mark_no_op
def leave_FormattedStringText_value(self, node: "FormattedStringText") -> None:
pass
@mark_no_op
def visit_From(self, node: "From") -> Optional[bool]:
pass
@mark_no_op
def visit_From_item(self, node: "From") -> None:
pass
@mark_no_op
def leave_From_item(self, node: "From") -> None:
pass
@mark_no_op
def visit_From_whitespace_before_from(self, node: "From") -> None:
pass
@mark_no_op
def leave_From_whitespace_before_from(self, node: "From") -> None:
pass
@mark_no_op
def visit_From_whitespace_after_from(self, node: "From") -> None:
pass
@mark_no_op
def leave_From_whitespace_after_from(self, node: "From") -> None:
pass
@mark_no_op
def visit_FunctionDef(self, node: "FunctionDef") -> Optional[bool]:
pass
@mark_no_op
def visit_FunctionDef_name(self, node: "FunctionDef") -> None:
pass
@mark_no_op
def leave_FunctionDef_name(self, node: "FunctionDef") -> None:
pass
@mark_no_op
def visit_FunctionDef_params(self, node: "FunctionDef") -> None:
pass
@mark_no_op
def leave_FunctionDef_params(self, node: "FunctionDef") -> None:
pass
@mark_no_op
def visit_FunctionDef_body(self, node: "FunctionDef") -> None:
pass
@mark_no_op
def leave_FunctionDef_body(self, node: "FunctionDef") -> None:
pass
@mark_no_op
def visit_FunctionDef_decorators(self, node: "FunctionDef") -> None:
pass
@mark_no_op
def leave_FunctionDef_decorators(self, node: "FunctionDef") -> None:
pass
@mark_no_op
def visit_FunctionDef_returns(self, node: "FunctionDef") -> None:
pass
@mark_no_op
def leave_FunctionDef_returns(self, node: "FunctionDef") -> None:
pass
@mark_no_op
def visit_FunctionDef_asynchronous(self, node: "FunctionDef") -> None:
pass
@mark_no_op
def leave_FunctionDef_asynchronous(self, node: "FunctionDef") -> None:
pass
@mark_no_op
def visit_FunctionDef_leading_lines(self, node: "FunctionDef") -> None:
pass
@mark_no_op
def leave_FunctionDef_leading_lines(self, node: "FunctionDef") -> None:
pass
@mark_no_op
def visit_FunctionDef_lines_after_decorators(self, node: "FunctionDef") -> None:
pass
@mark_no_op
def leave_FunctionDef_lines_after_decorators(self, node: "FunctionDef") -> None:
pass
@mark_no_op
def visit_FunctionDef_whitespace_after_def(self, node: "FunctionDef") -> None:
pass
@mark_no_op
def leave_FunctionDef_whitespace_after_def(self, node: "FunctionDef") -> None:
pass
@mark_no_op
def visit_FunctionDef_whitespace_after_name(self, node: "FunctionDef") -> None:
pass
@mark_no_op
def leave_FunctionDef_whitespace_after_name(self, node: "FunctionDef") -> None:
pass
@mark_no_op
def visit_FunctionDef_whitespace_before_params(self, node: "FunctionDef") -> None:
pass
@mark_no_op
def leave_FunctionDef_whitespace_before_params(self, node: "FunctionDef") -> None:
pass
@mark_no_op
def visit_FunctionDef_whitespace_before_colon(self, node: "FunctionDef") -> None:
pass
@mark_no_op
def leave_FunctionDef_whitespace_before_colon(self, node: "FunctionDef") -> None:
pass
@mark_no_op
def visit_GeneratorExp(self, node: "GeneratorExp") -> Optional[bool]:
pass
@mark_no_op
def visit_GeneratorExp_elt(self, node: "GeneratorExp") -> None:
pass
@mark_no_op
def leave_GeneratorExp_elt(self, node: "GeneratorExp") -> None:
pass
@mark_no_op
def visit_GeneratorExp_for_in(self, node: "GeneratorExp") -> None:
pass
@mark_no_op
def leave_GeneratorExp_for_in(self, node: "GeneratorExp") -> None:
pass
@mark_no_op
def visit_GeneratorExp_lpar(self, node: "GeneratorExp") -> None:
pass
@mark_no_op
def leave_GeneratorExp_lpar(self, node: "GeneratorExp") -> None:
pass
@mark_no_op
def visit_GeneratorExp_rpar(self, node: "GeneratorExp") -> None:
pass
@mark_no_op
def leave_GeneratorExp_rpar(self, node: "GeneratorExp") -> None:
pass
@mark_no_op
def visit_Global(self, node: "Global") -> Optional[bool]:
pass
@mark_no_op
def visit_Global_names(self, node: "Global") -> None:
pass
@mark_no_op
def leave_Global_names(self, node: "Global") -> None:
pass
@mark_no_op
def visit_Global_whitespace_after_global(self, node: "Global") -> None:
pass
@mark_no_op
def leave_Global_whitespace_after_global(self, node: "Global") -> None:
pass
@mark_no_op
def visit_Global_semicolon(self, node: "Global") -> None:
pass
@mark_no_op
def leave_Global_semicolon(self, node: "Global") -> None:
pass
@mark_no_op
def visit_GreaterThan(self, node: "GreaterThan") -> Optional[bool]:
pass
@mark_no_op
def visit_GreaterThan_whitespace_before(self, node: "GreaterThan") -> None:
pass
@mark_no_op
def leave_GreaterThan_whitespace_before(self, node: "GreaterThan") -> None:
pass
@mark_no_op
def visit_GreaterThan_whitespace_after(self, node: "GreaterThan") -> None:
pass
@mark_no_op
def leave_GreaterThan_whitespace_after(self, node: "GreaterThan") -> None:
pass
@mark_no_op
def visit_GreaterThanEqual(self, node: "GreaterThanEqual") -> Optional[bool]:
pass
@mark_no_op
def visit_GreaterThanEqual_whitespace_before(
self, node: "GreaterThanEqual"
) -> None:
pass
@mark_no_op
def leave_GreaterThanEqual_whitespace_before(
self, node: "GreaterThanEqual"
) -> None:
pass
@mark_no_op
def visit_GreaterThanEqual_whitespace_after(self, node: "GreaterThanEqual") -> None:
pass
@mark_no_op
def leave_GreaterThanEqual_whitespace_after(self, node: "GreaterThanEqual") -> None:
pass
@mark_no_op
def visit_If(self, node: "If") -> Optional[bool]:
pass
@mark_no_op
def visit_If_test(self, node: "If") -> None:
pass
@mark_no_op
def leave_If_test(self, node: "If") -> None:
pass
@mark_no_op
def visit_If_body(self, node: "If") -> None:
pass
@mark_no_op
def leave_If_body(self, node: "If") -> None:
pass
@mark_no_op
def visit_If_orelse(self, node: "If") -> None:
pass
@mark_no_op
def leave_If_orelse(self, node: "If") -> None:
pass
@mark_no_op
def visit_If_leading_lines(self, node: "If") -> None:
pass
@mark_no_op
def leave_If_leading_lines(self, node: "If") -> None:
pass
@mark_no_op
def visit_If_whitespace_before_test(self, node: "If") -> None:
pass
@mark_no_op
def leave_If_whitespace_before_test(self, node: "If") -> None:
pass
@mark_no_op
def visit_If_whitespace_after_test(self, node: "If") -> None:
pass
@mark_no_op
def leave_If_whitespace_after_test(self, node: "If") -> None:
pass
@mark_no_op
def visit_IfExp(self, node: "IfExp") -> Optional[bool]:
pass
@mark_no_op
def visit_IfExp_test(self, node: "IfExp") -> None:
pass
@mark_no_op
def leave_IfExp_test(self, node: "IfExp") -> None:
pass
@mark_no_op
def visit_IfExp_body(self, node: "IfExp") -> None:
pass
@mark_no_op
def leave_IfExp_body(self, node: "IfExp") -> None:
pass
@mark_no_op
def visit_IfExp_orelse(self, node: "IfExp") -> None:
pass
@mark_no_op
def leave_IfExp_orelse(self, node: "IfExp") -> None:
pass
@mark_no_op
def visit_IfExp_lpar(self, node: "IfExp") -> None:
pass
@mark_no_op
def leave_IfExp_lpar(self, node: "IfExp") -> None:
pass
@mark_no_op
def visit_IfExp_rpar(self, node: "IfExp") -> None:
pass
@mark_no_op
def leave_IfExp_rpar(self, node: "IfExp") -> None:
pass
@mark_no_op
def visit_IfExp_whitespace_before_if(self, node: "IfExp") -> None:
pass
@mark_no_op
def leave_IfExp_whitespace_before_if(self, node: "IfExp") -> None:
pass
@mark_no_op
def visit_IfExp_whitespace_after_if(self, node: "IfExp") -> None:
pass
@mark_no_op
def leave_IfExp_whitespace_after_if(self, node: "IfExp") -> None:
pass
@mark_no_op
def visit_IfExp_whitespace_before_else(self, node: "IfExp") -> None:
pass
@mark_no_op
def leave_IfExp_whitespace_before_else(self, node: "IfExp") -> None:
pass
@mark_no_op
def visit_IfExp_whitespace_after_else(self, node: "IfExp") -> None:
pass
@mark_no_op
def leave_IfExp_whitespace_after_else(self, node: "IfExp") -> None:
pass
@mark_no_op
def visit_Imaginary(self, node: "Imaginary") -> Optional[bool]:
pass
@mark_no_op
def visit_Imaginary_value(self, node: "Imaginary") -> None:
pass
@mark_no_op
def leave_Imaginary_value(self, node: "Imaginary") -> None:
pass
@mark_no_op
def visit_Imaginary_lpar(self, node: "Imaginary") -> None:
pass
@mark_no_op
def leave_Imaginary_lpar(self, node: "Imaginary") -> None:
pass
@mark_no_op
def visit_Imaginary_rpar(self, node: "Imaginary") -> None:
pass
@mark_no_op
def leave_Imaginary_rpar(self, node: "Imaginary") -> None:
pass
@mark_no_op
def visit_Import(self, node: "Import") -> Optional[bool]:
pass
@mark_no_op
def visit_Import_names(self, node: "Import") -> None:
pass
@mark_no_op
def leave_Import_names(self, node: "Import") -> None:
pass
@mark_no_op
def visit_Import_semicolon(self, node: "Import") -> None:
pass
@mark_no_op
def leave_Import_semicolon(self, node: "Import") -> None:
pass
@mark_no_op
def visit_Import_whitespace_after_import(self, node: "Import") -> None:
pass
@mark_no_op
def leave_Import_whitespace_after_import(self, node: "Import") -> None:
pass
@mark_no_op
def visit_ImportAlias(self, node: "ImportAlias") -> Optional[bool]:
pass
@mark_no_op
def visit_ImportAlias_name(self, node: "ImportAlias") -> None:
pass
@mark_no_op
def leave_ImportAlias_name(self, node: "ImportAlias") -> None:
pass
@mark_no_op
def visit_ImportAlias_asname(self, node: "ImportAlias") -> None:
pass
@mark_no_op
def leave_ImportAlias_asname(self, node: "ImportAlias") -> None:
pass
@mark_no_op
def visit_ImportAlias_comma(self, node: "ImportAlias") -> None:
pass
@mark_no_op
def leave_ImportAlias_comma(self, node: "ImportAlias") -> None:
pass
@mark_no_op
def visit_ImportFrom(self, node: "ImportFrom") -> Optional[bool]:
pass
@mark_no_op
def visit_ImportFrom_module(self, node: "ImportFrom") -> None:
pass
@mark_no_op
def leave_ImportFrom_module(self, node: "ImportFrom") -> None:
pass
@mark_no_op
def visit_ImportFrom_names(self, node: "ImportFrom") -> None:
pass
@mark_no_op
def leave_ImportFrom_names(self, node: "ImportFrom") -> None:
pass
@mark_no_op
def visit_ImportFrom_relative(self, node: "ImportFrom") -> None:
pass
@mark_no_op
def leave_ImportFrom_relative(self, node: "ImportFrom") -> None:
pass
@mark_no_op
def visit_ImportFrom_lpar(self, node: "ImportFrom") -> None:
pass
@mark_no_op
def leave_ImportFrom_lpar(self, node: "ImportFrom") -> None:
pass
@mark_no_op
def visit_ImportFrom_rpar(self, node: "ImportFrom") -> None:
pass
@mark_no_op
def leave_ImportFrom_rpar(self, node: "ImportFrom") -> None:
pass
@mark_no_op
def visit_ImportFrom_semicolon(self, node: "ImportFrom") -> None:
pass
@mark_no_op
def leave_ImportFrom_semicolon(self, node: "ImportFrom") -> None:
pass
@mark_no_op
def visit_ImportFrom_whitespace_after_from(self, node: "ImportFrom") -> None:
pass
@mark_no_op
def leave_ImportFrom_whitespace_after_from(self, node: "ImportFrom") -> None:
pass
@mark_no_op
def visit_ImportFrom_whitespace_before_import(self, node: "ImportFrom") -> None:
pass
@mark_no_op
def leave_ImportFrom_whitespace_before_import(self, node: "ImportFrom") -> None:
pass
@mark_no_op
def visit_ImportFrom_whitespace_after_import(self, node: "ImportFrom") -> None:
pass
@mark_no_op
def leave_ImportFrom_whitespace_after_import(self, node: "ImportFrom") -> None:
pass
@mark_no_op
def visit_ImportStar(self, node: "ImportStar") -> Optional[bool]:
pass
@mark_no_op
def visit_In(self, node: "In") -> Optional[bool]:
pass
@mark_no_op
def visit_In_whitespace_before(self, node: "In") -> None:
pass
@mark_no_op
def leave_In_whitespace_before(self, node: "In") -> None:
pass
@mark_no_op
def visit_In_whitespace_after(self, node: "In") -> None:
pass
@mark_no_op
def leave_In_whitespace_after(self, node: "In") -> None:
pass
@mark_no_op
def visit_IndentedBlock(self, node: "IndentedBlock") -> Optional[bool]:
pass
@mark_no_op
def visit_IndentedBlock_body(self, node: "IndentedBlock") -> None:
pass
@mark_no_op
def leave_IndentedBlock_body(self, node: "IndentedBlock") -> None:
pass
@mark_no_op
def visit_IndentedBlock_header(self, node: "IndentedBlock") -> None:
pass
@mark_no_op
def leave_IndentedBlock_header(self, node: "IndentedBlock") -> None:
pass
@mark_no_op
def visit_IndentedBlock_indent(self, node: "IndentedBlock") -> None:
pass
@mark_no_op
def leave_IndentedBlock_indent(self, node: "IndentedBlock") -> None:
pass
@mark_no_op
def visit_IndentedBlock_footer(self, node: "IndentedBlock") -> None:
pass
@mark_no_op
def leave_IndentedBlock_footer(self, node: "IndentedBlock") -> None:
pass
@mark_no_op
def visit_Index(self, node: "Index") -> Optional[bool]:
pass
@mark_no_op
def visit_Index_value(self, node: "Index") -> None:
pass
@mark_no_op
def leave_Index_value(self, node: "Index") -> None:
pass
@mark_no_op
def visit_Integer(self, node: "Integer") -> Optional[bool]:
pass
@mark_no_op
def visit_Integer_value(self, node: "Integer") -> None:
pass
@mark_no_op
def leave_Integer_value(self, node: "Integer") -> None:
pass
@mark_no_op
def visit_Integer_lpar(self, node: "Integer") -> None:
pass
@mark_no_op
def leave_Integer_lpar(self, node: "Integer") -> None:
pass
@mark_no_op
def visit_Integer_rpar(self, node: "Integer") -> None:
pass
@mark_no_op
def leave_Integer_rpar(self, node: "Integer") -> None:
pass
@mark_no_op
def visit_Is(self, node: "Is") -> Optional[bool]:
pass
@mark_no_op
def visit_Is_whitespace_before(self, node: "Is") -> None:
pass
@mark_no_op
def leave_Is_whitespace_before(self, node: "Is") -> None:
pass
@mark_no_op
def visit_Is_whitespace_after(self, node: "Is") -> None:
pass
@mark_no_op
def leave_Is_whitespace_after(self, node: "Is") -> None:
pass
@mark_no_op
def visit_IsNot(self, node: "IsNot") -> Optional[bool]:
pass
@mark_no_op
def visit_IsNot_whitespace_before(self, node: "IsNot") -> None:
pass
@mark_no_op
def leave_IsNot_whitespace_before(self, node: "IsNot") -> None:
pass
@mark_no_op
def visit_IsNot_whitespace_between(self, node: "IsNot") -> None:
pass
@mark_no_op
def leave_IsNot_whitespace_between(self, node: "IsNot") -> None:
pass
@mark_no_op
def visit_IsNot_whitespace_after(self, node: "IsNot") -> None:
pass
@mark_no_op
def leave_IsNot_whitespace_after(self, node: "IsNot") -> None:
pass
@mark_no_op
def visit_Lambda(self, node: "Lambda") -> Optional[bool]:
pass
@mark_no_op
def visit_Lambda_params(self, node: "Lambda") -> None:
pass
@mark_no_op
def leave_Lambda_params(self, node: "Lambda") -> None:
pass
@mark_no_op
def visit_Lambda_body(self, node: "Lambda") -> None:
pass
@mark_no_op
def leave_Lambda_body(self, node: "Lambda") -> None:
pass
@mark_no_op
def visit_Lambda_colon(self, node: "Lambda") -> None:
pass
@mark_no_op
def leave_Lambda_colon(self, node: "Lambda") -> None:
pass
@mark_no_op
def visit_Lambda_lpar(self, node: "Lambda") -> None:
pass
@mark_no_op
def leave_Lambda_lpar(self, node: "Lambda") -> None:
pass
@mark_no_op
def visit_Lambda_rpar(self, node: "Lambda") -> None:
pass
@mark_no_op
def leave_Lambda_rpar(self, node: "Lambda") -> None:
pass
@mark_no_op
def visit_Lambda_whitespace_after_lambda(self, node: "Lambda") -> None:
pass
@mark_no_op
def leave_Lambda_whitespace_after_lambda(self, node: "Lambda") -> None:
pass
@mark_no_op
def visit_LeftCurlyBrace(self, node: "LeftCurlyBrace") -> Optional[bool]:
pass
@mark_no_op
def visit_LeftCurlyBrace_whitespace_after(self, node: "LeftCurlyBrace") -> None:
pass
@mark_no_op
def leave_LeftCurlyBrace_whitespace_after(self, node: "LeftCurlyBrace") -> None:
pass
@mark_no_op
def visit_LeftParen(self, node: "LeftParen") -> Optional[bool]:
pass
@mark_no_op
def visit_LeftParen_whitespace_after(self, node: "LeftParen") -> None:
pass
@mark_no_op
def leave_LeftParen_whitespace_after(self, node: "LeftParen") -> None:
pass
@mark_no_op
def visit_LeftShift(self, node: "LeftShift") -> Optional[bool]:
pass
@mark_no_op
def visit_LeftShift_whitespace_before(self, node: "LeftShift") -> None:
pass
@mark_no_op
def leave_LeftShift_whitespace_before(self, node: "LeftShift") -> None:
pass
@mark_no_op
def visit_LeftShift_whitespace_after(self, node: "LeftShift") -> None:
pass
@mark_no_op
def leave_LeftShift_whitespace_after(self, node: "LeftShift") -> None:
pass
@mark_no_op
def visit_LeftShiftAssign(self, node: "LeftShiftAssign") -> Optional[bool]:
pass
@mark_no_op
def visit_LeftShiftAssign_whitespace_before(self, node: "LeftShiftAssign") -> None:
pass
@mark_no_op
def leave_LeftShiftAssign_whitespace_before(self, node: "LeftShiftAssign") -> None:
pass
@mark_no_op
def visit_LeftShiftAssign_whitespace_after(self, node: "LeftShiftAssign") -> None:
pass
@mark_no_op
def leave_LeftShiftAssign_whitespace_after(self, node: "LeftShiftAssign") -> None:
pass
@mark_no_op
def visit_LeftSquareBracket(self, node: "LeftSquareBracket") -> Optional[bool]:
pass
@mark_no_op
def visit_LeftSquareBracket_whitespace_after(
self, node: "LeftSquareBracket"
) -> None:
pass
@mark_no_op
def leave_LeftSquareBracket_whitespace_after(
self, node: "LeftSquareBracket"
) -> None:
pass
@mark_no_op
def visit_LessThan(self, node: "LessThan") -> Optional[bool]:
pass
@mark_no_op
def visit_LessThan_whitespace_before(self, node: "LessThan") -> None:
pass
@mark_no_op
def leave_LessThan_whitespace_before(self, node: "LessThan") -> None:
pass
@mark_no_op
def visit_LessThan_whitespace_after(self, node: "LessThan") -> None:
pass
@mark_no_op
def leave_LessThan_whitespace_after(self, node: "LessThan") -> None:
pass
@mark_no_op
def visit_LessThanEqual(self, node: "LessThanEqual") -> Optional[bool]:
pass
@mark_no_op
def visit_LessThanEqual_whitespace_before(self, node: "LessThanEqual") -> None:
pass
@mark_no_op
def leave_LessThanEqual_whitespace_before(self, node: "LessThanEqual") -> None:
pass
@mark_no_op
def visit_LessThanEqual_whitespace_after(self, node: "LessThanEqual") -> None:
pass
@mark_no_op
def leave_LessThanEqual_whitespace_after(self, node: "LessThanEqual") -> None:
pass
@mark_no_op
def visit_List(self, node: "List") -> Optional[bool]:
pass
@mark_no_op
def visit_List_elements(self, node: "List") -> None:
pass
@mark_no_op
def leave_List_elements(self, node: "List") -> None:
pass
@mark_no_op
def visit_List_lbracket(self, node: "List") -> None:
pass
@mark_no_op
def leave_List_lbracket(self, node: "List") -> None:
pass
@mark_no_op
def visit_List_rbracket(self, node: "List") -> None:
pass
@mark_no_op
def leave_List_rbracket(self, node: "List") -> None:
pass
@mark_no_op
def visit_List_lpar(self, node: "List") -> None:
pass
@mark_no_op
def leave_List_lpar(self, node: "List") -> None:
pass
@mark_no_op
def visit_List_rpar(self, node: "List") -> None:
pass
@mark_no_op
def leave_List_rpar(self, node: "List") -> None:
pass
@mark_no_op
def visit_ListComp(self, node: "ListComp") -> Optional[bool]:
pass
@mark_no_op
def visit_ListComp_elt(self, node: "ListComp") -> None:
pass
@mark_no_op
def leave_ListComp_elt(self, node: "ListComp") -> None:
pass
@mark_no_op
def visit_ListComp_for_in(self, node: "ListComp") -> None:
pass
@mark_no_op
def leave_ListComp_for_in(self, node: "ListComp") -> None:
pass
@mark_no_op
def visit_ListComp_lbracket(self, node: "ListComp") -> None:
pass
@mark_no_op
def leave_ListComp_lbracket(self, node: "ListComp") -> None:
pass
@mark_no_op
def visit_ListComp_rbracket(self, node: "ListComp") -> None:
pass
@mark_no_op
def leave_ListComp_rbracket(self, node: "ListComp") -> None:
pass
@mark_no_op
def visit_ListComp_lpar(self, node: "ListComp") -> None:
pass
@mark_no_op
def leave_ListComp_lpar(self, node: "ListComp") -> None:
pass
@mark_no_op
def visit_ListComp_rpar(self, node: "ListComp") -> None:
pass
@mark_no_op
def leave_ListComp_rpar(self, node: "ListComp") -> None:
pass
@mark_no_op
def visit_MatrixMultiply(self, node: "MatrixMultiply") -> Optional[bool]:
pass
@mark_no_op
def visit_MatrixMultiply_whitespace_before(self, node: "MatrixMultiply") -> None:
pass
@mark_no_op
def leave_MatrixMultiply_whitespace_before(self, node: "MatrixMultiply") -> None:
pass
@mark_no_op
def visit_MatrixMultiply_whitespace_after(self, node: "MatrixMultiply") -> None:
pass
@mark_no_op
def leave_MatrixMultiply_whitespace_after(self, node: "MatrixMultiply") -> None:
pass
@mark_no_op
def visit_MatrixMultiplyAssign(
self, node: "MatrixMultiplyAssign"
) -> Optional[bool]:
pass
@mark_no_op
def visit_MatrixMultiplyAssign_whitespace_before(
self, node: "MatrixMultiplyAssign"
) -> None:
pass
@mark_no_op
def leave_MatrixMultiplyAssign_whitespace_before(
self, node: "MatrixMultiplyAssign"
) -> None:
pass
@mark_no_op
def visit_MatrixMultiplyAssign_whitespace_after(
self, node: "MatrixMultiplyAssign"
) -> None:
pass
@mark_no_op
def leave_MatrixMultiplyAssign_whitespace_after(
self, node: "MatrixMultiplyAssign"
) -> None:
pass
@mark_no_op
def visit_Minus(self, node: "Minus") -> Optional[bool]:
pass
@mark_no_op
def visit_Minus_whitespace_after(self, node: "Minus") -> None:
pass
@mark_no_op
def leave_Minus_whitespace_after(self, node: "Minus") -> None:
pass
@mark_no_op
def visit_Module(self, node: "Module") -> Optional[bool]:
pass
@mark_no_op
def visit_Module_body(self, node: "Module") -> None:
pass
@mark_no_op
def leave_Module_body(self, node: "Module") -> None:
pass
@mark_no_op
def visit_Module_header(self, node: "Module") -> None:
pass
@mark_no_op
def leave_Module_header(self, node: "Module") -> None:
pass
@mark_no_op
def visit_Module_footer(self, node: "Module") -> None:
pass
@mark_no_op
def leave_Module_footer(self, node: "Module") -> None:
pass
@mark_no_op
def visit_Module_encoding(self, node: "Module") -> None:
pass
@mark_no_op
def leave_Module_encoding(self, node: "Module") -> None:
pass
@mark_no_op
def visit_Module_default_indent(self, node: "Module") -> None:
pass
@mark_no_op
def leave_Module_default_indent(self, node: "Module") -> None:
pass
@mark_no_op
def visit_Module_default_newline(self, node: "Module") -> None:
pass
@mark_no_op
def leave_Module_default_newline(self, node: "Module") -> None:
pass
@mark_no_op
def visit_Module_has_trailing_newline(self, node: "Module") -> None:
pass
@mark_no_op
def leave_Module_has_trailing_newline(self, node: "Module") -> None:
pass
@mark_no_op
def visit_Modulo(self, node: "Modulo") -> Optional[bool]:
pass
@mark_no_op
def visit_Modulo_whitespace_before(self, node: "Modulo") -> None:
pass
@mark_no_op
def leave_Modulo_whitespace_before(self, node: "Modulo") -> None:
pass
@mark_no_op
def visit_Modulo_whitespace_after(self, node: "Modulo") -> None:
pass
@mark_no_op
def leave_Modulo_whitespace_after(self, node: "Modulo") -> None:
pass
@mark_no_op
def visit_ModuloAssign(self, node: "ModuloAssign") -> Optional[bool]:
pass
@mark_no_op
def visit_ModuloAssign_whitespace_before(self, node: "ModuloAssign") -> None:
pass
@mark_no_op
def leave_ModuloAssign_whitespace_before(self, node: "ModuloAssign") -> None:
pass
@mark_no_op
def visit_ModuloAssign_whitespace_after(self, node: "ModuloAssign") -> None:
pass
@mark_no_op
def leave_ModuloAssign_whitespace_after(self, node: "ModuloAssign") -> None:
pass
@mark_no_op
def visit_Multiply(self, node: "Multiply") -> Optional[bool]:
pass
@mark_no_op
def visit_Multiply_whitespace_before(self, node: "Multiply") -> None:
pass
@mark_no_op
def leave_Multiply_whitespace_before(self, node: "Multiply") -> None:
pass
@mark_no_op
def visit_Multiply_whitespace_after(self, node: "Multiply") -> None:
pass
@mark_no_op
def leave_Multiply_whitespace_after(self, node: "Multiply") -> None:
pass
@mark_no_op
def visit_MultiplyAssign(self, node: "MultiplyAssign") -> Optional[bool]:
pass
@mark_no_op
def visit_MultiplyAssign_whitespace_before(self, node: "MultiplyAssign") -> None:
pass
@mark_no_op
def leave_MultiplyAssign_whitespace_before(self, node: "MultiplyAssign") -> None:
pass
@mark_no_op
def visit_MultiplyAssign_whitespace_after(self, node: "MultiplyAssign") -> None:
pass
@mark_no_op
def leave_MultiplyAssign_whitespace_after(self, node: "MultiplyAssign") -> None:
pass
@mark_no_op
def visit_Name(self, node: "Name") -> Optional[bool]:
pass
@mark_no_op
def visit_Name_value(self, node: "Name") -> None:
pass
@mark_no_op
def leave_Name_value(self, node: "Name") -> None:
pass
@mark_no_op
def visit_Name_lpar(self, node: "Name") -> None:
pass
@mark_no_op
def leave_Name_lpar(self, node: "Name") -> None:
pass
@mark_no_op
def visit_Name_rpar(self, node: "Name") -> None:
pass
@mark_no_op
def leave_Name_rpar(self, node: "Name") -> None:
pass
@mark_no_op
def visit_NameItem(self, node: "NameItem") -> Optional[bool]:
pass
@mark_no_op
def visit_NameItem_name(self, node: "NameItem") -> None:
pass
@mark_no_op
def leave_NameItem_name(self, node: "NameItem") -> None:
pass
@mark_no_op
def visit_NameItem_comma(self, node: "NameItem") -> None:
pass
@mark_no_op
def leave_NameItem_comma(self, node: "NameItem") -> None:
pass
@mark_no_op
def visit_NamedExpr(self, node: "NamedExpr") -> Optional[bool]:
pass
@mark_no_op
def visit_NamedExpr_target(self, node: "NamedExpr") -> None:
pass
@mark_no_op
def leave_NamedExpr_target(self, node: "NamedExpr") -> None:
pass
@mark_no_op
def visit_NamedExpr_value(self, node: "NamedExpr") -> None:
pass
@mark_no_op
def leave_NamedExpr_value(self, node: "NamedExpr") -> None:
pass
@mark_no_op
def visit_NamedExpr_lpar(self, node: "NamedExpr") -> None:
pass
@mark_no_op
def leave_NamedExpr_lpar(self, node: "NamedExpr") -> None:
pass
@mark_no_op
def visit_NamedExpr_rpar(self, node: "NamedExpr") -> None:
pass
@mark_no_op
def leave_NamedExpr_rpar(self, node: "NamedExpr") -> None:
pass
@mark_no_op
def visit_NamedExpr_whitespace_before_walrus(self, node: "NamedExpr") -> None:
pass
@mark_no_op
def leave_NamedExpr_whitespace_before_walrus(self, node: "NamedExpr") -> None:
pass
@mark_no_op
def visit_NamedExpr_whitespace_after_walrus(self, node: "NamedExpr") -> None:
pass
@mark_no_op
def leave_NamedExpr_whitespace_after_walrus(self, node: "NamedExpr") -> None:
pass
@mark_no_op
def visit_Newline(self, node: "Newline") -> Optional[bool]:
pass
@mark_no_op
def visit_Newline_value(self, node: "Newline") -> None:
pass
@mark_no_op
def leave_Newline_value(self, node: "Newline") -> None:
pass
@mark_no_op
def visit_Nonlocal(self, node: "Nonlocal") -> Optional[bool]:
pass
@mark_no_op
def visit_Nonlocal_names(self, node: "Nonlocal") -> None:
pass
@mark_no_op
def leave_Nonlocal_names(self, node: "Nonlocal") -> None:
pass
@mark_no_op
def visit_Nonlocal_whitespace_after_nonlocal(self, node: "Nonlocal") -> None:
pass
@mark_no_op
def leave_Nonlocal_whitespace_after_nonlocal(self, node: "Nonlocal") -> None:
pass
@mark_no_op
def visit_Nonlocal_semicolon(self, node: "Nonlocal") -> None:
pass
@mark_no_op
def leave_Nonlocal_semicolon(self, node: "Nonlocal") -> None:
pass
@mark_no_op
def visit_Not(self, node: "Not") -> Optional[bool]:
pass
@mark_no_op
def visit_Not_whitespace_after(self, node: "Not") -> None:
pass
@mark_no_op
def leave_Not_whitespace_after(self, node: "Not") -> None:
pass
@mark_no_op
def visit_NotEqual(self, node: "NotEqual") -> Optional[bool]:
pass
@mark_no_op
def visit_NotEqual_value(self, node: "NotEqual") -> None:
pass
@mark_no_op
def leave_NotEqual_value(self, node: "NotEqual") -> None:
pass
@mark_no_op
def visit_NotEqual_whitespace_before(self, node: "NotEqual") -> None:
pass
@mark_no_op
def leave_NotEqual_whitespace_before(self, node: "NotEqual") -> None:
pass
@mark_no_op
def visit_NotEqual_whitespace_after(self, node: "NotEqual") -> None:
pass
@mark_no_op
def leave_NotEqual_whitespace_after(self, node: "NotEqual") -> None:
pass
@mark_no_op
def visit_NotIn(self, node: "NotIn") -> Optional[bool]:
pass
@mark_no_op
def visit_NotIn_whitespace_before(self, node: "NotIn") -> None:
pass
@mark_no_op
def leave_NotIn_whitespace_before(self, node: "NotIn") -> None:
pass
@mark_no_op
def visit_NotIn_whitespace_between(self, node: "NotIn") -> None:
pass
@mark_no_op
def leave_NotIn_whitespace_between(self, node: "NotIn") -> None:
pass
@mark_no_op
def visit_NotIn_whitespace_after(self, node: "NotIn") -> None:
pass
@mark_no_op
def leave_NotIn_whitespace_after(self, node: "NotIn") -> None:
pass
@mark_no_op
def visit_Or(self, node: "Or") -> Optional[bool]:
pass
@mark_no_op
def visit_Or_whitespace_before(self, node: "Or") -> None:
pass
@mark_no_op
def leave_Or_whitespace_before(self, node: "Or") -> None:
pass
@mark_no_op
def visit_Or_whitespace_after(self, node: "Or") -> None:
pass
@mark_no_op
def leave_Or_whitespace_after(self, node: "Or") -> None:
pass
@mark_no_op
def visit_Param(self, node: "Param") -> Optional[bool]:
pass
@mark_no_op
def visit_Param_name(self, node: "Param") -> None:
pass
@mark_no_op
def leave_Param_name(self, node: "Param") -> None:
pass
@mark_no_op
def visit_Param_annotation(self, node: "Param") -> None:
pass
@mark_no_op
def leave_Param_annotation(self, node: "Param") -> None:
pass
@mark_no_op
def visit_Param_equal(self, node: "Param") -> None:
pass
@mark_no_op
def leave_Param_equal(self, node: "Param") -> None:
pass
@mark_no_op
def visit_Param_default(self, node: "Param") -> None:
pass
@mark_no_op
def leave_Param_default(self, node: "Param") -> None:
pass
@mark_no_op
def visit_Param_comma(self, node: "Param") -> None:
pass
@mark_no_op
def leave_Param_comma(self, node: "Param") -> None:
pass
@mark_no_op
def visit_Param_star(self, node: "Param") -> None:
pass
@mark_no_op
def leave_Param_star(self, node: "Param") -> None:
pass
@mark_no_op
def visit_Param_whitespace_after_star(self, node: "Param") -> None:
pass
@mark_no_op
def leave_Param_whitespace_after_star(self, node: "Param") -> None:
pass
@mark_no_op
def visit_Param_whitespace_after_param(self, node: "Param") -> None:
pass
@mark_no_op
def leave_Param_whitespace_after_param(self, node: "Param") -> None:
pass
@mark_no_op
def visit_ParamSlash(self, node: "ParamSlash") -> Optional[bool]:
pass
@mark_no_op
def visit_ParamSlash_comma(self, node: "ParamSlash") -> None:
pass
@mark_no_op
def leave_ParamSlash_comma(self, node: "ParamSlash") -> None:
pass
@mark_no_op
def visit_ParamStar(self, node: "ParamStar") -> Optional[bool]:
pass
@mark_no_op
def visit_ParamStar_comma(self, node: "ParamStar") -> None:
pass
@mark_no_op
def leave_ParamStar_comma(self, node: "ParamStar") -> None:
pass
@mark_no_op
def visit_Parameters(self, node: "Parameters") -> Optional[bool]:
pass
@mark_no_op
def visit_Parameters_params(self, node: "Parameters") -> None:
pass
@mark_no_op
def leave_Parameters_params(self, node: "Parameters") -> None:
pass
@mark_no_op
def visit_Parameters_star_arg(self, node: "Parameters") -> None:
pass
@mark_no_op
def leave_Parameters_star_arg(self, node: "Parameters") -> None:
pass
@mark_no_op
def visit_Parameters_kwonly_params(self, node: "Parameters") -> None:
pass
@mark_no_op
def leave_Parameters_kwonly_params(self, node: "Parameters") -> None:
pass
@mark_no_op
def visit_Parameters_star_kwarg(self, node: "Parameters") -> None:
pass
@mark_no_op
def leave_Parameters_star_kwarg(self, node: "Parameters") -> None:
pass
@mark_no_op
def visit_Parameters_posonly_params(self, node: "Parameters") -> None:
pass
@mark_no_op
def leave_Parameters_posonly_params(self, node: "Parameters") -> None:
pass
@mark_no_op
def visit_Parameters_posonly_ind(self, node: "Parameters") -> None:
pass
@mark_no_op
def leave_Parameters_posonly_ind(self, node: "Parameters") -> None:
pass
@mark_no_op
def visit_ParenthesizedWhitespace(
self, node: "ParenthesizedWhitespace"
) -> Optional[bool]:
pass
@mark_no_op
def visit_ParenthesizedWhitespace_first_line(
self, node: "ParenthesizedWhitespace"
) -> None:
pass
@mark_no_op
def leave_ParenthesizedWhitespace_first_line(
self, node: "ParenthesizedWhitespace"
) -> None:
pass
@mark_no_op
def visit_ParenthesizedWhitespace_empty_lines(
self, node: "ParenthesizedWhitespace"
) -> None:
pass
@mark_no_op
def leave_ParenthesizedWhitespace_empty_lines(
self, node: "ParenthesizedWhitespace"
) -> None:
pass
@mark_no_op
def visit_ParenthesizedWhitespace_indent(
self, node: "ParenthesizedWhitespace"
) -> None:
pass
@mark_no_op
def leave_ParenthesizedWhitespace_indent(
self, node: "ParenthesizedWhitespace"
) -> None:
pass
@mark_no_op
def visit_ParenthesizedWhitespace_last_line(
self, node: "ParenthesizedWhitespace"
) -> None:
pass
@mark_no_op
def leave_ParenthesizedWhitespace_last_line(
self, node: "ParenthesizedWhitespace"
) -> None:
pass
@mark_no_op
def visit_Pass(self, node: "Pass") -> Optional[bool]:
pass
@mark_no_op
def visit_Pass_semicolon(self, node: "Pass") -> None:
pass
@mark_no_op
def leave_Pass_semicolon(self, node: "Pass") -> None:
pass
@mark_no_op
def visit_Plus(self, node: "Plus") -> Optional[bool]:
pass
@mark_no_op
def visit_Plus_whitespace_after(self, node: "Plus") -> None:
pass
@mark_no_op
def leave_Plus_whitespace_after(self, node: "Plus") -> None:
pass
@mark_no_op
def visit_Power(self, node: "Power") -> Optional[bool]:
pass
@mark_no_op
def visit_Power_whitespace_before(self, node: "Power") -> None:
pass
@mark_no_op
def leave_Power_whitespace_before(self, node: "Power") -> None:
pass
@mark_no_op
def visit_Power_whitespace_after(self, node: "Power") -> None:
pass
@mark_no_op
def leave_Power_whitespace_after(self, node: "Power") -> None:
pass
@mark_no_op
def visit_PowerAssign(self, node: "PowerAssign") -> Optional[bool]:
pass
@mark_no_op
def visit_PowerAssign_whitespace_before(self, node: "PowerAssign") -> None:
pass
@mark_no_op
def leave_PowerAssign_whitespace_before(self, node: "PowerAssign") -> None:
pass
@mark_no_op
def visit_PowerAssign_whitespace_after(self, node: "PowerAssign") -> None:
pass
@mark_no_op
def leave_PowerAssign_whitespace_after(self, node: "PowerAssign") -> None:
pass
@mark_no_op
def visit_Raise(self, node: "Raise") -> Optional[bool]:
pass
@mark_no_op
def visit_Raise_exc(self, node: "Raise") -> None:
pass
@mark_no_op
def leave_Raise_exc(self, node: "Raise") -> None:
pass
@mark_no_op
def visit_Raise_cause(self, node: "Raise") -> None:
pass
@mark_no_op
def leave_Raise_cause(self, node: "Raise") -> None:
pass
@mark_no_op
def visit_Raise_whitespace_after_raise(self, node: "Raise") -> None:
pass
@mark_no_op
def leave_Raise_whitespace_after_raise(self, node: "Raise") -> None:
pass
@mark_no_op
def visit_Raise_semicolon(self, node: "Raise") -> None:
pass
@mark_no_op
def leave_Raise_semicolon(self, node: "Raise") -> None:
pass
@mark_no_op
def visit_Return(self, node: "Return") -> Optional[bool]:
pass
@mark_no_op
def visit_Return_value(self, node: "Return") -> None:
pass
@mark_no_op
def leave_Return_value(self, node: "Return") -> None:
pass
@mark_no_op
def visit_Return_whitespace_after_return(self, node: "Return") -> None:
pass
@mark_no_op
def leave_Return_whitespace_after_return(self, node: "Return") -> None:
pass
@mark_no_op
def visit_Return_semicolon(self, node: "Return") -> None:
pass
@mark_no_op
def leave_Return_semicolon(self, node: "Return") -> None:
pass
@mark_no_op
def visit_RightCurlyBrace(self, node: "RightCurlyBrace") -> Optional[bool]:
pass
@mark_no_op
def visit_RightCurlyBrace_whitespace_before(self, node: "RightCurlyBrace") -> None:
pass
@mark_no_op
def leave_RightCurlyBrace_whitespace_before(self, node: "RightCurlyBrace") -> None:
pass
@mark_no_op
def visit_RightParen(self, node: "RightParen") -> Optional[bool]:
pass
@mark_no_op
def visit_RightParen_whitespace_before(self, node: "RightParen") -> None:
pass
@mark_no_op
def leave_RightParen_whitespace_before(self, node: "RightParen") -> None:
pass
@mark_no_op
def visit_RightShift(self, node: "RightShift") -> Optional[bool]:
pass
@mark_no_op
def visit_RightShift_whitespace_before(self, node: "RightShift") -> None:
pass
@mark_no_op
def leave_RightShift_whitespace_before(self, node: "RightShift") -> None:
pass
@mark_no_op
def visit_RightShift_whitespace_after(self, node: "RightShift") -> None:
pass
@mark_no_op
def leave_RightShift_whitespace_after(self, node: "RightShift") -> None:
pass
@mark_no_op
def visit_RightShiftAssign(self, node: "RightShiftAssign") -> Optional[bool]:
pass
@mark_no_op
def visit_RightShiftAssign_whitespace_before(
self, node: "RightShiftAssign"
) -> None:
pass
@mark_no_op
def leave_RightShiftAssign_whitespace_before(
self, node: "RightShiftAssign"
) -> None:
pass
@mark_no_op
def visit_RightShiftAssign_whitespace_after(self, node: "RightShiftAssign") -> None:
pass
@mark_no_op
def leave_RightShiftAssign_whitespace_after(self, node: "RightShiftAssign") -> None:
pass
@mark_no_op
def visit_RightSquareBracket(self, node: "RightSquareBracket") -> Optional[bool]:
pass
@mark_no_op
def visit_RightSquareBracket_whitespace_before(
self, node: "RightSquareBracket"
) -> None:
pass
@mark_no_op
def leave_RightSquareBracket_whitespace_before(
self, node: "RightSquareBracket"
) -> None:
pass
@mark_no_op
def visit_Semicolon(self, node: "Semicolon") -> Optional[bool]:
pass
@mark_no_op
def visit_Semicolon_whitespace_before(self, node: "Semicolon") -> None:
pass
@mark_no_op
def leave_Semicolon_whitespace_before(self, node: "Semicolon") -> None:
pass
@mark_no_op
def visit_Semicolon_whitespace_after(self, node: "Semicolon") -> None:
pass
@mark_no_op
def leave_Semicolon_whitespace_after(self, node: "Semicolon") -> None:
pass
@mark_no_op
def visit_Set(self, node: "Set") -> Optional[bool]:
pass
@mark_no_op
def visit_Set_elements(self, node: "Set") -> None:
pass
@mark_no_op
def leave_Set_elements(self, node: "Set") -> None:
pass
@mark_no_op
def visit_Set_lbrace(self, node: "Set") -> None:
pass
@mark_no_op
def leave_Set_lbrace(self, node: "Set") -> None:
pass
@mark_no_op
def visit_Set_rbrace(self, node: "Set") -> None:
pass
@mark_no_op
def leave_Set_rbrace(self, node: "Set") -> None:
pass
@mark_no_op
def visit_Set_lpar(self, node: "Set") -> None:
pass
@mark_no_op
def leave_Set_lpar(self, node: "Set") -> None:
pass
@mark_no_op
def visit_Set_rpar(self, node: "Set") -> None:
pass
@mark_no_op
def leave_Set_rpar(self, node: "Set") -> None:
pass
@mark_no_op
def visit_SetComp(self, node: "SetComp") -> Optional[bool]:
pass
@mark_no_op
def visit_SetComp_elt(self, node: "SetComp") -> None:
pass
@mark_no_op
def leave_SetComp_elt(self, node: "SetComp") -> None:
pass
@mark_no_op
def visit_SetComp_for_in(self, node: "SetComp") -> None:
pass
@mark_no_op
def leave_SetComp_for_in(self, node: "SetComp") -> None:
pass
@mark_no_op
def visit_SetComp_lbrace(self, node: "SetComp") -> None:
pass
@mark_no_op
def leave_SetComp_lbrace(self, node: "SetComp") -> None:
pass
@mark_no_op
def visit_SetComp_rbrace(self, node: "SetComp") -> None:
pass
@mark_no_op
def leave_SetComp_rbrace(self, node: "SetComp") -> None:
pass
@mark_no_op
def visit_SetComp_lpar(self, node: "SetComp") -> None:
pass
@mark_no_op
def leave_SetComp_lpar(self, node: "SetComp") -> None:
pass
@mark_no_op
def visit_SetComp_rpar(self, node: "SetComp") -> None:
pass
@mark_no_op
def leave_SetComp_rpar(self, node: "SetComp") -> None:
pass
@mark_no_op
def visit_SimpleStatementLine(self, node: "SimpleStatementLine") -> Optional[bool]:
pass
@mark_no_op
def visit_SimpleStatementLine_body(self, node: "SimpleStatementLine") -> None:
pass
@mark_no_op
def leave_SimpleStatementLine_body(self, node: "SimpleStatementLine") -> None:
pass
@mark_no_op
def visit_SimpleStatementLine_leading_lines(
self, node: "SimpleStatementLine"
) -> None:
pass
@mark_no_op
def leave_SimpleStatementLine_leading_lines(
self, node: "SimpleStatementLine"
) -> None:
pass
@mark_no_op
def visit_SimpleStatementLine_trailing_whitespace(
self, node: "SimpleStatementLine"
) -> None:
pass
@mark_no_op
def leave_SimpleStatementLine_trailing_whitespace(
self, node: "SimpleStatementLine"
) -> None:
pass
@mark_no_op
def visit_SimpleStatementSuite(
self, node: "SimpleStatementSuite"
) -> Optional[bool]:
pass
@mark_no_op
def visit_SimpleStatementSuite_body(self, node: "SimpleStatementSuite") -> None:
pass
@mark_no_op
def leave_SimpleStatementSuite_body(self, node: "SimpleStatementSuite") -> None:
pass
@mark_no_op
def visit_SimpleStatementSuite_leading_whitespace(
self, node: "SimpleStatementSuite"
) -> None:
pass
@mark_no_op
def leave_SimpleStatementSuite_leading_whitespace(
self, node: "SimpleStatementSuite"
) -> None:
pass
@mark_no_op
def visit_SimpleStatementSuite_trailing_whitespace(
self, node: "SimpleStatementSuite"
) -> None:
pass
@mark_no_op
def leave_SimpleStatementSuite_trailing_whitespace(
self, node: "SimpleStatementSuite"
) -> None:
pass
@mark_no_op
def visit_SimpleString(self, node: "SimpleString") -> Optional[bool]:
pass
@mark_no_op
def visit_SimpleString_value(self, node: "SimpleString") -> None:
pass
@mark_no_op
def leave_SimpleString_value(self, node: "SimpleString") -> None:
pass
@mark_no_op
def visit_SimpleString_lpar(self, node: "SimpleString") -> None:
pass
@mark_no_op
def leave_SimpleString_lpar(self, node: "SimpleString") -> None:
pass
@mark_no_op
def visit_SimpleString_rpar(self, node: "SimpleString") -> None:
pass
@mark_no_op
def leave_SimpleString_rpar(self, node: "SimpleString") -> None:
pass
@mark_no_op
def visit_SimpleWhitespace(self, node: "SimpleWhitespace") -> Optional[bool]:
pass
@mark_no_op
def visit_SimpleWhitespace_value(self, node: "SimpleWhitespace") -> None:
pass
@mark_no_op
def leave_SimpleWhitespace_value(self, node: "SimpleWhitespace") -> None:
pass
@mark_no_op
def visit_Slice(self, node: "Slice") -> Optional[bool]:
pass
@mark_no_op
def visit_Slice_lower(self, node: "Slice") -> None:
pass
@mark_no_op
def leave_Slice_lower(self, node: "Slice") -> None:
pass
@mark_no_op
def visit_Slice_upper(self, node: "Slice") -> None:
pass
@mark_no_op
def leave_Slice_upper(self, node: "Slice") -> None:
pass
@mark_no_op
def visit_Slice_step(self, node: "Slice") -> None:
pass
@mark_no_op
def leave_Slice_step(self, node: "Slice") -> None:
pass
@mark_no_op
def visit_Slice_first_colon(self, node: "Slice") -> None:
pass
@mark_no_op
def leave_Slice_first_colon(self, node: "Slice") -> None:
pass
@mark_no_op
def visit_Slice_second_colon(self, node: "Slice") -> None:
pass
@mark_no_op
def leave_Slice_second_colon(self, node: "Slice") -> None:
pass
@mark_no_op
def visit_StarredDictElement(self, node: "StarredDictElement") -> Optional[bool]:
pass
@mark_no_op
def visit_StarredDictElement_value(self, node: "StarredDictElement") -> None:
pass
@mark_no_op
def leave_StarredDictElement_value(self, node: "StarredDictElement") -> None:
pass
@mark_no_op
def visit_StarredDictElement_comma(self, node: "StarredDictElement") -> None:
pass
@mark_no_op
def leave_StarredDictElement_comma(self, node: "StarredDictElement") -> None:
pass
@mark_no_op
def visit_StarredDictElement_whitespace_before_value(
self, node: "StarredDictElement"
) -> None:
pass
@mark_no_op
def leave_StarredDictElement_whitespace_before_value(
self, node: "StarredDictElement"
) -> None:
pass
@mark_no_op
def visit_StarredElement(self, node: "StarredElement") -> Optional[bool]:
pass
@mark_no_op
def visit_StarredElement_value(self, node: "StarredElement") -> None:
pass
@mark_no_op
def leave_StarredElement_value(self, node: "StarredElement") -> None:
pass
@mark_no_op
def visit_StarredElement_comma(self, node: "StarredElement") -> None:
pass
@mark_no_op
def leave_StarredElement_comma(self, node: "StarredElement") -> None:
pass
@mark_no_op
def visit_StarredElement_lpar(self, node: "StarredElement") -> None:
pass
@mark_no_op
def leave_StarredElement_lpar(self, node: "StarredElement") -> None:
pass
@mark_no_op
def visit_StarredElement_rpar(self, node: "StarredElement") -> None:
pass
@mark_no_op
def leave_StarredElement_rpar(self, node: "StarredElement") -> None:
pass
@mark_no_op
def visit_StarredElement_whitespace_before_value(
self, node: "StarredElement"
) -> None:
pass
@mark_no_op
def leave_StarredElement_whitespace_before_value(
self, node: "StarredElement"
) -> None:
pass
@mark_no_op
def visit_Subscript(self, node: "Subscript") -> Optional[bool]:
pass
@mark_no_op
def visit_Subscript_value(self, node: "Subscript") -> None:
pass
@mark_no_op
def leave_Subscript_value(self, node: "Subscript") -> None:
pass
@mark_no_op
def visit_Subscript_slice(self, node: "Subscript") -> None:
pass
@mark_no_op
def leave_Subscript_slice(self, node: "Subscript") -> None:
pass
@mark_no_op
def visit_Subscript_lbracket(self, node: "Subscript") -> None:
pass
@mark_no_op
def leave_Subscript_lbracket(self, node: "Subscript") -> None:
pass
@mark_no_op
def visit_Subscript_rbracket(self, node: "Subscript") -> None:
pass
@mark_no_op
def leave_Subscript_rbracket(self, node: "Subscript") -> None:
pass
@mark_no_op
def visit_Subscript_lpar(self, node: "Subscript") -> None:
pass
@mark_no_op
def leave_Subscript_lpar(self, node: "Subscript") -> None:
pass
@mark_no_op
def visit_Subscript_rpar(self, node: "Subscript") -> None:
pass
@mark_no_op
def leave_Subscript_rpar(self, node: "Subscript") -> None:
pass
@mark_no_op
def visit_Subscript_whitespace_after_value(self, node: "Subscript") -> None:
pass
@mark_no_op
def leave_Subscript_whitespace_after_value(self, node: "Subscript") -> None:
pass
@mark_no_op
def visit_SubscriptElement(self, node: "SubscriptElement") -> Optional[bool]:
pass
@mark_no_op
def visit_SubscriptElement_slice(self, node: "SubscriptElement") -> None:
pass
@mark_no_op
def leave_SubscriptElement_slice(self, node: "SubscriptElement") -> None:
pass
@mark_no_op
def visit_SubscriptElement_comma(self, node: "SubscriptElement") -> None:
pass
@mark_no_op
def leave_SubscriptElement_comma(self, node: "SubscriptElement") -> None:
pass
@mark_no_op
def visit_Subtract(self, node: "Subtract") -> Optional[bool]:
pass
@mark_no_op
def visit_Subtract_whitespace_before(self, node: "Subtract") -> None:
pass
@mark_no_op
def leave_Subtract_whitespace_before(self, node: "Subtract") -> None:
pass
@mark_no_op
def visit_Subtract_whitespace_after(self, node: "Subtract") -> None:
pass
@mark_no_op
def leave_Subtract_whitespace_after(self, node: "Subtract") -> None:
pass
@mark_no_op
def visit_SubtractAssign(self, node: "SubtractAssign") -> Optional[bool]:
pass
@mark_no_op
def visit_SubtractAssign_whitespace_before(self, node: "SubtractAssign") -> None:
pass
@mark_no_op
def leave_SubtractAssign_whitespace_before(self, node: "SubtractAssign") -> None:
pass
@mark_no_op
def visit_SubtractAssign_whitespace_after(self, node: "SubtractAssign") -> None:
pass
@mark_no_op
def leave_SubtractAssign_whitespace_after(self, node: "SubtractAssign") -> None:
pass
@mark_no_op
def visit_TrailingWhitespace(self, node: "TrailingWhitespace") -> Optional[bool]:
pass
@mark_no_op
def visit_TrailingWhitespace_whitespace(self, node: "TrailingWhitespace") -> None:
pass
@mark_no_op
def leave_TrailingWhitespace_whitespace(self, node: "TrailingWhitespace") -> None:
pass
@mark_no_op
def visit_TrailingWhitespace_comment(self, node: "TrailingWhitespace") -> None:
pass
@mark_no_op
def leave_TrailingWhitespace_comment(self, node: "TrailingWhitespace") -> None:
pass
@mark_no_op
def visit_TrailingWhitespace_newline(self, node: "TrailingWhitespace") -> None:
pass
@mark_no_op
def leave_TrailingWhitespace_newline(self, node: "TrailingWhitespace") -> None:
pass
@mark_no_op
def visit_Try(self, node: "Try") -> Optional[bool]:
pass
@mark_no_op
def visit_Try_body(self, node: "Try") -> None:
pass
@mark_no_op
def leave_Try_body(self, node: "Try") -> None:
pass
@mark_no_op
def visit_Try_handlers(self, node: "Try") -> None:
pass
@mark_no_op
def leave_Try_handlers(self, node: "Try") -> None:
pass
@mark_no_op
def visit_Try_orelse(self, node: "Try") -> None:
pass
@mark_no_op
def leave_Try_orelse(self, node: "Try") -> None:
pass
@mark_no_op
def visit_Try_finalbody(self, node: "Try") -> None:
pass
@mark_no_op
def leave_Try_finalbody(self, node: "Try") -> None:
pass
@mark_no_op
def visit_Try_leading_lines(self, node: "Try") -> None:
pass
@mark_no_op
def leave_Try_leading_lines(self, node: "Try") -> None:
pass
@mark_no_op
def visit_Try_whitespace_before_colon(self, node: "Try") -> None:
pass
@mark_no_op
def leave_Try_whitespace_before_colon(self, node: "Try") -> None:
pass
@mark_no_op
def visit_Tuple(self, node: "Tuple") -> Optional[bool]:
pass
@mark_no_op
def visit_Tuple_elements(self, node: "Tuple") -> None:
pass
@mark_no_op
def leave_Tuple_elements(self, node: "Tuple") -> None:
pass
@mark_no_op
def visit_Tuple_lpar(self, node: "Tuple") -> None:
pass
@mark_no_op
def leave_Tuple_lpar(self, node: "Tuple") -> None:
pass
@mark_no_op
def visit_Tuple_rpar(self, node: "Tuple") -> None:
pass
@mark_no_op
def leave_Tuple_rpar(self, node: "Tuple") -> None:
pass
@mark_no_op
def visit_UnaryOperation(self, node: "UnaryOperation") -> Optional[bool]:
pass
@mark_no_op
def visit_UnaryOperation_operator(self, node: "UnaryOperation") -> None:
pass
@mark_no_op
def leave_UnaryOperation_operator(self, node: "UnaryOperation") -> None:
pass
@mark_no_op
def visit_UnaryOperation_expression(self, node: "UnaryOperation") -> None:
pass
@mark_no_op
def leave_UnaryOperation_expression(self, node: "UnaryOperation") -> None:
pass
@mark_no_op
def visit_UnaryOperation_lpar(self, node: "UnaryOperation") -> None:
pass
@mark_no_op
def leave_UnaryOperation_lpar(self, node: "UnaryOperation") -> None:
pass
@mark_no_op
def visit_UnaryOperation_rpar(self, node: "UnaryOperation") -> None:
pass
@mark_no_op
def leave_UnaryOperation_rpar(self, node: "UnaryOperation") -> None:
pass
@mark_no_op
def visit_While(self, node: "While") -> Optional[bool]:
pass
@mark_no_op
def visit_While_test(self, node: "While") -> None:
pass
@mark_no_op
def leave_While_test(self, node: "While") -> None:
pass
@mark_no_op
def visit_While_body(self, node: "While") -> None:
pass
@mark_no_op
def leave_While_body(self, node: "While") -> None:
pass
@mark_no_op
def visit_While_orelse(self, node: "While") -> None:
pass
@mark_no_op
def leave_While_orelse(self, node: "While") -> None:
pass
@mark_no_op
def visit_While_leading_lines(self, node: "While") -> None:
pass
@mark_no_op
def leave_While_leading_lines(self, node: "While") -> None:
pass
@mark_no_op
def visit_While_whitespace_after_while(self, node: "While") -> None:
pass
@mark_no_op
def leave_While_whitespace_after_while(self, node: "While") -> None:
pass
@mark_no_op
def visit_While_whitespace_before_colon(self, node: "While") -> None:
pass
@mark_no_op
def leave_While_whitespace_before_colon(self, node: "While") -> None:
pass
@mark_no_op
def visit_With(self, node: "With") -> Optional[bool]:
pass
@mark_no_op
def visit_With_items(self, node: "With") -> None:
pass
@mark_no_op
def leave_With_items(self, node: "With") -> None:
pass
@mark_no_op
def visit_With_body(self, node: "With") -> None:
pass
@mark_no_op
def leave_With_body(self, node: "With") -> None:
pass
@mark_no_op
def visit_With_asynchronous(self, node: "With") -> None:
pass
@mark_no_op
def leave_With_asynchronous(self, node: "With") -> None:
pass
@mark_no_op
def visit_With_leading_lines(self, node: "With") -> None:
pass
@mark_no_op
def leave_With_leading_lines(self, node: "With") -> None:
pass
@mark_no_op
def visit_With_whitespace_after_with(self, node: "With") -> None:
pass
@mark_no_op
def leave_With_whitespace_after_with(self, node: "With") -> None:
pass
@mark_no_op
def visit_With_whitespace_before_colon(self, node: "With") -> None:
pass
@mark_no_op
def leave_With_whitespace_before_colon(self, node: "With") -> None:
pass
@mark_no_op
def visit_WithItem(self, node: "WithItem") -> Optional[bool]:
pass
@mark_no_op
def visit_WithItem_item(self, node: "WithItem") -> None:
pass
@mark_no_op
def leave_WithItem_item(self, node: "WithItem") -> None:
pass
@mark_no_op
def visit_WithItem_asname(self, node: "WithItem") -> None:
pass
@mark_no_op
def leave_WithItem_asname(self, node: "WithItem") -> None:
pass
@mark_no_op
def visit_WithItem_comma(self, node: "WithItem") -> None:
pass
@mark_no_op
def leave_WithItem_comma(self, node: "WithItem") -> None:
pass
@mark_no_op
def visit_Yield(self, node: "Yield") -> Optional[bool]:
pass
@mark_no_op
def visit_Yield_value(self, node: "Yield") -> None:
pass
@mark_no_op
def leave_Yield_value(self, node: "Yield") -> None:
pass
@mark_no_op
def visit_Yield_lpar(self, node: "Yield") -> None:
pass
@mark_no_op
def leave_Yield_lpar(self, node: "Yield") -> None:
pass
@mark_no_op
def visit_Yield_rpar(self, node: "Yield") -> None:
pass
@mark_no_op
def leave_Yield_rpar(self, node: "Yield") -> None:
pass
@mark_no_op
def visit_Yield_whitespace_after_yield(self, node: "Yield") -> None:
pass
@mark_no_op
def leave_Yield_whitespace_after_yield(self, node: "Yield") -> None:
pass
class CSTTypedVisitorFunctions(CSTTypedBaseFunctions):
@mark_no_op
def leave_Add(self, original_node: "Add") -> None:
pass
@mark_no_op
def leave_AddAssign(self, original_node: "AddAssign") -> None:
pass
@mark_no_op
def leave_And(self, original_node: "And") -> None:
pass
@mark_no_op
def leave_AnnAssign(self, original_node: "AnnAssign") -> None:
pass
@mark_no_op
def leave_Annotation(self, original_node: "Annotation") -> None:
pass
@mark_no_op
def leave_Arg(self, original_node: "Arg") -> None:
pass
@mark_no_op
def leave_AsName(self, original_node: "AsName") -> None:
pass
@mark_no_op
def leave_Assert(self, original_node: "Assert") -> None:
pass
@mark_no_op
def leave_Assign(self, original_node: "Assign") -> None:
pass
@mark_no_op
def leave_AssignEqual(self, original_node: "AssignEqual") -> None:
pass
@mark_no_op
def leave_AssignTarget(self, original_node: "AssignTarget") -> None:
pass
@mark_no_op
def leave_Asynchronous(self, original_node: "Asynchronous") -> None:
pass
@mark_no_op
def leave_Attribute(self, original_node: "Attribute") -> None:
pass
@mark_no_op
def leave_AugAssign(self, original_node: "AugAssign") -> None:
pass
@mark_no_op
def leave_Await(self, original_node: "Await") -> None:
pass
@mark_no_op
def leave_BinaryOperation(self, original_node: "BinaryOperation") -> None:
pass
@mark_no_op
def leave_BitAnd(self, original_node: "BitAnd") -> None:
pass
@mark_no_op
def leave_BitAndAssign(self, original_node: "BitAndAssign") -> None:
pass
@mark_no_op
def leave_BitInvert(self, original_node: "BitInvert") -> None:
pass
@mark_no_op
def leave_BitOr(self, original_node: "BitOr") -> None:
pass
@mark_no_op
def leave_BitOrAssign(self, original_node: "BitOrAssign") -> None:
pass
@mark_no_op
def leave_BitXor(self, original_node: "BitXor") -> None:
pass
@mark_no_op
def leave_BitXorAssign(self, original_node: "BitXorAssign") -> None:
pass
@mark_no_op
def leave_BooleanOperation(self, original_node: "BooleanOperation") -> None:
pass
@mark_no_op
def leave_Break(self, original_node: "Break") -> None:
pass
@mark_no_op
def leave_Call(self, original_node: "Call") -> None:
pass
@mark_no_op
def leave_ClassDef(self, original_node: "ClassDef") -> None:
pass
@mark_no_op
def leave_Colon(self, original_node: "Colon") -> None:
pass
@mark_no_op
def leave_Comma(self, original_node: "Comma") -> None:
pass
@mark_no_op
def leave_Comment(self, original_node: "Comment") -> None:
pass
@mark_no_op
def leave_CompFor(self, original_node: "CompFor") -> None:
pass
@mark_no_op
def leave_CompIf(self, original_node: "CompIf") -> None:
pass
@mark_no_op
def leave_Comparison(self, original_node: "Comparison") -> None:
pass
@mark_no_op
def leave_ComparisonTarget(self, original_node: "ComparisonTarget") -> None:
pass
@mark_no_op
def leave_ConcatenatedString(self, original_node: "ConcatenatedString") -> None:
pass
@mark_no_op
def leave_Continue(self, original_node: "Continue") -> None:
pass
@mark_no_op
def leave_Decorator(self, original_node: "Decorator") -> None:
pass
@mark_no_op
def leave_Del(self, original_node: "Del") -> None:
pass
@mark_no_op
def leave_Dict(self, original_node: "Dict") -> None:
pass
@mark_no_op
def leave_DictComp(self, original_node: "DictComp") -> None:
pass
@mark_no_op
def leave_DictElement(self, original_node: "DictElement") -> None:
pass
@mark_no_op
def leave_Divide(self, original_node: "Divide") -> None:
pass
@mark_no_op
def leave_DivideAssign(self, original_node: "DivideAssign") -> None:
pass
@mark_no_op
def leave_Dot(self, original_node: "Dot") -> None:
pass
@mark_no_op
def leave_Element(self, original_node: "Element") -> None:
pass
@mark_no_op
def leave_Ellipsis(self, original_node: "Ellipsis") -> None:
pass
@mark_no_op
def leave_Else(self, original_node: "Else") -> None:
pass
@mark_no_op
def leave_EmptyLine(self, original_node: "EmptyLine") -> None:
pass
@mark_no_op
def leave_Equal(self, original_node: "Equal") -> None:
pass
@mark_no_op
def leave_ExceptHandler(self, original_node: "ExceptHandler") -> None:
pass
@mark_no_op
def leave_Expr(self, original_node: "Expr") -> None:
pass
@mark_no_op
def leave_Finally(self, original_node: "Finally") -> None:
pass
@mark_no_op
def leave_Float(self, original_node: "Float") -> None:
pass
@mark_no_op
def leave_FloorDivide(self, original_node: "FloorDivide") -> None:
pass
@mark_no_op
def leave_FloorDivideAssign(self, original_node: "FloorDivideAssign") -> None:
pass
@mark_no_op
def leave_For(self, original_node: "For") -> None:
pass
@mark_no_op
def leave_FormattedString(self, original_node: "FormattedString") -> None:
pass
@mark_no_op
def leave_FormattedStringExpression(
self, original_node: "FormattedStringExpression"
) -> None:
pass
@mark_no_op
def leave_FormattedStringText(self, original_node: "FormattedStringText") -> None:
pass
@mark_no_op
def leave_From(self, original_node: "From") -> None:
pass
@mark_no_op
def leave_FunctionDef(self, original_node: "FunctionDef") -> None:
pass
@mark_no_op
def leave_GeneratorExp(self, original_node: "GeneratorExp") -> None:
pass
@mark_no_op
def leave_Global(self, original_node: "Global") -> None:
pass
@mark_no_op
def leave_GreaterThan(self, original_node: "GreaterThan") -> None:
pass
@mark_no_op
def leave_GreaterThanEqual(self, original_node: "GreaterThanEqual") -> None:
pass
@mark_no_op
def leave_If(self, original_node: "If") -> None:
pass
@mark_no_op
def leave_IfExp(self, original_node: "IfExp") -> None:
pass
@mark_no_op
def leave_Imaginary(self, original_node: "Imaginary") -> None:
pass
@mark_no_op
def leave_Import(self, original_node: "Import") -> None:
pass
@mark_no_op
def leave_ImportAlias(self, original_node: "ImportAlias") -> None:
pass
@mark_no_op
def leave_ImportFrom(self, original_node: "ImportFrom") -> None:
pass
@mark_no_op
def leave_ImportStar(self, original_node: "ImportStar") -> None:
pass
@mark_no_op
def leave_In(self, original_node: "In") -> None:
pass
@mark_no_op
def leave_IndentedBlock(self, original_node: "IndentedBlock") -> None:
pass
@mark_no_op
def leave_Index(self, original_node: "Index") -> None:
pass
@mark_no_op
def leave_Integer(self, original_node: "Integer") -> None:
pass
@mark_no_op
def leave_Is(self, original_node: "Is") -> None:
pass
@mark_no_op
def leave_IsNot(self, original_node: "IsNot") -> None:
pass
@mark_no_op
def leave_Lambda(self, original_node: "Lambda") -> None:
pass
@mark_no_op
def leave_LeftCurlyBrace(self, original_node: "LeftCurlyBrace") -> None:
pass
@mark_no_op
def leave_LeftParen(self, original_node: "LeftParen") -> None:
pass
@mark_no_op
def leave_LeftShift(self, original_node: "LeftShift") -> None:
pass
@mark_no_op
def leave_LeftShiftAssign(self, original_node: "LeftShiftAssign") -> None:
pass
@mark_no_op
def leave_LeftSquareBracket(self, original_node: "LeftSquareBracket") -> None:
pass
@mark_no_op
def leave_LessThan(self, original_node: "LessThan") -> None:
pass
@mark_no_op
def leave_LessThanEqual(self, original_node: "LessThanEqual") -> None:
pass
@mark_no_op
def leave_List(self, original_node: "List") -> None:
pass
@mark_no_op
def leave_ListComp(self, original_node: "ListComp") -> None:
pass
@mark_no_op
def leave_MatrixMultiply(self, original_node: "MatrixMultiply") -> None:
pass
@mark_no_op
def leave_MatrixMultiplyAssign(self, original_node: "MatrixMultiplyAssign") -> None:
pass
@mark_no_op
def leave_Minus(self, original_node: "Minus") -> None:
pass
@mark_no_op
def leave_Module(self, original_node: "Module") -> None:
pass
@mark_no_op
def leave_Modulo(self, original_node: "Modulo") -> None:
pass
@mark_no_op
def leave_ModuloAssign(self, original_node: "ModuloAssign") -> None:
pass
@mark_no_op
def leave_Multiply(self, original_node: "Multiply") -> None:
pass
@mark_no_op
def leave_MultiplyAssign(self, original_node: "MultiplyAssign") -> None:
pass
@mark_no_op
def leave_Name(self, original_node: "Name") -> None:
pass
@mark_no_op
def leave_NameItem(self, original_node: "NameItem") -> None:
pass
@mark_no_op
def leave_NamedExpr(self, original_node: "NamedExpr") -> None:
pass
@mark_no_op
def leave_Newline(self, original_node: "Newline") -> None:
pass
@mark_no_op
def leave_Nonlocal(self, original_node: "Nonlocal") -> None:
pass
@mark_no_op
def leave_Not(self, original_node: "Not") -> None:
pass
@mark_no_op
def leave_NotEqual(self, original_node: "NotEqual") -> None:
pass
@mark_no_op
def leave_NotIn(self, original_node: "NotIn") -> None:
pass
@mark_no_op
def leave_Or(self, original_node: "Or") -> None:
pass
@mark_no_op
def leave_Param(self, original_node: "Param") -> None:
pass
@mark_no_op
def leave_ParamSlash(self, original_node: "ParamSlash") -> None:
pass
@mark_no_op
def leave_ParamStar(self, original_node: "ParamStar") -> None:
pass
@mark_no_op
def leave_Parameters(self, original_node: "Parameters") -> None:
pass
@mark_no_op
def leave_ParenthesizedWhitespace(
self, original_node: "ParenthesizedWhitespace"
) -> None:
pass
@mark_no_op
def leave_Pass(self, original_node: "Pass") -> None:
pass
@mark_no_op
def leave_Plus(self, original_node: "Plus") -> None:
pass
@mark_no_op
def leave_Power(self, original_node: "Power") -> None:
pass
@mark_no_op
def leave_PowerAssign(self, original_node: "PowerAssign") -> None:
pass
@mark_no_op
def leave_Raise(self, original_node: "Raise") -> None:
pass
@mark_no_op
def leave_Return(self, original_node: "Return") -> None:
pass
@mark_no_op
def leave_RightCurlyBrace(self, original_node: "RightCurlyBrace") -> None:
pass
@mark_no_op
def leave_RightParen(self, original_node: "RightParen") -> None:
pass
@mark_no_op
def leave_RightShift(self, original_node: "RightShift") -> None:
pass
@mark_no_op
def leave_RightShiftAssign(self, original_node: "RightShiftAssign") -> None:
pass
@mark_no_op
def leave_RightSquareBracket(self, original_node: "RightSquareBracket") -> None:
pass
@mark_no_op
def leave_Semicolon(self, original_node: "Semicolon") -> None:
pass
@mark_no_op
def leave_Set(self, original_node: "Set") -> None:
pass
@mark_no_op
def leave_SetComp(self, original_node: "SetComp") -> None:
pass
@mark_no_op
def leave_SimpleStatementLine(self, original_node: "SimpleStatementLine") -> None:
pass
@mark_no_op
def leave_SimpleStatementSuite(self, original_node: "SimpleStatementSuite") -> None:
pass
@mark_no_op
def leave_SimpleString(self, original_node: "SimpleString") -> None:
pass
@mark_no_op
def leave_SimpleWhitespace(self, original_node: "SimpleWhitespace") -> None:
pass
@mark_no_op
def leave_Slice(self, original_node: "Slice") -> None:
pass
@mark_no_op
def leave_StarredDictElement(self, original_node: "StarredDictElement") -> None:
pass
@mark_no_op
def leave_StarredElement(self, original_node: "StarredElement") -> None:
pass
@mark_no_op
def leave_Subscript(self, original_node: "Subscript") -> None:
pass
@mark_no_op
def leave_SubscriptElement(self, original_node: "SubscriptElement") -> None:
pass
@mark_no_op
def leave_Subtract(self, original_node: "Subtract") -> None:
pass
@mark_no_op
def leave_SubtractAssign(self, original_node: "SubtractAssign") -> None:
pass
@mark_no_op
def leave_TrailingWhitespace(self, original_node: "TrailingWhitespace") -> None:
pass
@mark_no_op
def leave_Try(self, original_node: "Try") -> None:
pass
@mark_no_op
def leave_Tuple(self, original_node: "Tuple") -> None:
pass
@mark_no_op
def leave_UnaryOperation(self, original_node: "UnaryOperation") -> None:
pass
@mark_no_op
def leave_While(self, original_node: "While") -> None:
pass
@mark_no_op
def leave_With(self, original_node: "With") -> None:
pass
@mark_no_op
def leave_WithItem(self, original_node: "WithItem") -> None:
pass
@mark_no_op
def leave_Yield(self, original_node: "Yield") -> None:
pass
class CSTTypedTransformerFunctions(CSTTypedBaseFunctions):
pass
@mark_no_op
def leave_Add(self, original_node: "Add", updated_node: "Add") -> "BaseBinaryOp":
return updated_node
@mark_no_op
def leave_AddAssign(
self, original_node: "AddAssign", updated_node: "AddAssign"
) -> "BaseAugOp":
return updated_node
@mark_no_op
def leave_And(self, original_node: "And", updated_node: "And") -> "BaseBooleanOp":
return updated_node
@mark_no_op
def leave_AnnAssign(
self, original_node: "AnnAssign", updated_node: "AnnAssign"
) -> Union[
"BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel
]:
return updated_node
@mark_no_op
def leave_Annotation(
self, original_node: "Annotation", updated_node: "Annotation"
) -> "Annotation":
return updated_node
@mark_no_op
def leave_Arg(
self, original_node: "Arg", updated_node: "Arg"
) -> Union["Arg", FlattenSentinel["Arg"], RemovalSentinel]:
return updated_node
@mark_no_op
def leave_AsName(self, original_node: "AsName", updated_node: "AsName") -> "AsName":
return updated_node
@mark_no_op
def leave_Assert(
self, original_node: "Assert", updated_node: "Assert"
) -> Union[
"BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel
]:
return updated_node
@mark_no_op
def leave_Assign(
self, original_node: "Assign", updated_node: "Assign"
) -> Union[
"BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel
]:
return updated_node
@mark_no_op
def leave_AssignEqual(
self, original_node: "AssignEqual", updated_node: "AssignEqual"
) -> Union["AssignEqual", MaybeSentinel]:
return updated_node
@mark_no_op
def leave_AssignTarget(
self, original_node: "AssignTarget", updated_node: "AssignTarget"
) -> Union["AssignTarget", FlattenSentinel["AssignTarget"], RemovalSentinel]:
return updated_node
@mark_no_op
def leave_Asynchronous(
self, original_node: "Asynchronous", updated_node: "Asynchronous"
) -> "Asynchronous":
return updated_node
@mark_no_op
def leave_Attribute(
self, original_node: "Attribute", updated_node: "Attribute"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_AugAssign(
self, original_node: "AugAssign", updated_node: "AugAssign"
) -> Union[
"BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel
]:
return updated_node
@mark_no_op
def leave_Await(
self, original_node: "Await", updated_node: "Await"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_BinaryOperation(
self, original_node: "BinaryOperation", updated_node: "BinaryOperation"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_BitAnd(
self, original_node: "BitAnd", updated_node: "BitAnd"
) -> "BaseBinaryOp":
return updated_node
@mark_no_op
def leave_BitAndAssign(
self, original_node: "BitAndAssign", updated_node: "BitAndAssign"
) -> "BaseAugOp":
return updated_node
@mark_no_op
def leave_BitInvert(
self, original_node: "BitInvert", updated_node: "BitInvert"
) -> "BaseUnaryOp":
return updated_node
@mark_no_op
def leave_BitOr(
self, original_node: "BitOr", updated_node: "BitOr"
) -> "BaseBinaryOp":
return updated_node
@mark_no_op
def leave_BitOrAssign(
self, original_node: "BitOrAssign", updated_node: "BitOrAssign"
) -> "BaseAugOp":
return updated_node
@mark_no_op
def leave_BitXor(
self, original_node: "BitXor", updated_node: "BitXor"
) -> "BaseBinaryOp":
return updated_node
@mark_no_op
def leave_BitXorAssign(
self, original_node: "BitXorAssign", updated_node: "BitXorAssign"
) -> "BaseAugOp":
return updated_node
@mark_no_op
def leave_BooleanOperation(
self, original_node: "BooleanOperation", updated_node: "BooleanOperation"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_Break(
self, original_node: "Break", updated_node: "Break"
) -> Union[
"BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel
]:
return updated_node
@mark_no_op
def leave_Call(
self, original_node: "Call", updated_node: "Call"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_ClassDef(
self, original_node: "ClassDef", updated_node: "ClassDef"
) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]:
return updated_node
@mark_no_op
def leave_Colon(
self, original_node: "Colon", updated_node: "Colon"
) -> Union["Colon", MaybeSentinel]:
return updated_node
@mark_no_op
def leave_Comma(
self, original_node: "Comma", updated_node: "Comma"
) -> Union["Comma", MaybeSentinel]:
return updated_node
@mark_no_op
def leave_Comment(
self, original_node: "Comment", updated_node: "Comment"
) -> "Comment":
return updated_node
@mark_no_op
def leave_CompFor(
self, original_node: "CompFor", updated_node: "CompFor"
) -> "CompFor":
return updated_node
@mark_no_op
def leave_CompIf(self, original_node: "CompIf", updated_node: "CompIf") -> "CompIf":
return updated_node
@mark_no_op
def leave_Comparison(
self, original_node: "Comparison", updated_node: "Comparison"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_ComparisonTarget(
self, original_node: "ComparisonTarget", updated_node: "ComparisonTarget"
) -> Union[
"ComparisonTarget", FlattenSentinel["ComparisonTarget"], RemovalSentinel
]:
return updated_node
@mark_no_op
def leave_ConcatenatedString(
self, original_node: "ConcatenatedString", updated_node: "ConcatenatedString"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_Continue(
self, original_node: "Continue", updated_node: "Continue"
) -> Union[
"BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel
]:
return updated_node
@mark_no_op
def leave_Decorator(
self, original_node: "Decorator", updated_node: "Decorator"
) -> Union["Decorator", FlattenSentinel["Decorator"], RemovalSentinel]:
return updated_node
@mark_no_op
def leave_Del(
self, original_node: "Del", updated_node: "Del"
) -> Union[
"BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel
]:
return updated_node
@mark_no_op
def leave_Dict(
self, original_node: "Dict", updated_node: "Dict"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_DictComp(
self, original_node: "DictComp", updated_node: "DictComp"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_DictElement(
self, original_node: "DictElement", updated_node: "DictElement"
) -> Union["BaseDictElement", FlattenSentinel["BaseDictElement"], RemovalSentinel]:
return updated_node
@mark_no_op
def leave_Divide(
self, original_node: "Divide", updated_node: "Divide"
) -> "BaseBinaryOp":
return updated_node
@mark_no_op
def leave_DivideAssign(
self, original_node: "DivideAssign", updated_node: "DivideAssign"
) -> "BaseAugOp":
return updated_node
@mark_no_op
def leave_Dot(
self, original_node: "Dot", updated_node: "Dot"
) -> Union["Dot", FlattenSentinel["Dot"], RemovalSentinel]:
return updated_node
@mark_no_op
def leave_Element(
self, original_node: "Element", updated_node: "Element"
) -> Union["BaseElement", FlattenSentinel["BaseElement"], RemovalSentinel]:
return updated_node
@mark_no_op
def leave_Ellipsis(
self, original_node: "Ellipsis", updated_node: "Ellipsis"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_Else(self, original_node: "Else", updated_node: "Else") -> "Else":
return updated_node
@mark_no_op
def leave_EmptyLine(
self, original_node: "EmptyLine", updated_node: "EmptyLine"
) -> Union["EmptyLine", FlattenSentinel["EmptyLine"], RemovalSentinel]:
return updated_node
@mark_no_op
def leave_Equal(
self, original_node: "Equal", updated_node: "Equal"
) -> "BaseCompOp":
return updated_node
@mark_no_op
def leave_ExceptHandler(
self, original_node: "ExceptHandler", updated_node: "ExceptHandler"
) -> Union["ExceptHandler", FlattenSentinel["ExceptHandler"], RemovalSentinel]:
return updated_node
@mark_no_op
def leave_Expr(
self, original_node: "Expr", updated_node: "Expr"
) -> Union[
"BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel
]:
return updated_node
@mark_no_op
def leave_Finally(
self, original_node: "Finally", updated_node: "Finally"
) -> "Finally":
return updated_node
@mark_no_op
def leave_Float(
self, original_node: "Float", updated_node: "Float"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_FloorDivide(
self, original_node: "FloorDivide", updated_node: "FloorDivide"
) -> "BaseBinaryOp":
return updated_node
@mark_no_op
def leave_FloorDivideAssign(
self, original_node: "FloorDivideAssign", updated_node: "FloorDivideAssign"
) -> "BaseAugOp":
return updated_node
@mark_no_op
def leave_For(
self, original_node: "For", updated_node: "For"
) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]:
return updated_node
@mark_no_op
def leave_FormattedString(
self, original_node: "FormattedString", updated_node: "FormattedString"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_FormattedStringExpression(
self,
original_node: "FormattedStringExpression",
updated_node: "FormattedStringExpression",
) -> Union[
"BaseFormattedStringContent",
FlattenSentinel["BaseFormattedStringContent"],
RemovalSentinel,
]:
return updated_node
@mark_no_op
def leave_FormattedStringText(
self, original_node: "FormattedStringText", updated_node: "FormattedStringText"
) -> Union[
"BaseFormattedStringContent",
FlattenSentinel["BaseFormattedStringContent"],
RemovalSentinel,
]:
return updated_node
@mark_no_op
def leave_From(self, original_node: "From", updated_node: "From") -> "From":
return updated_node
@mark_no_op
def leave_FunctionDef(
self, original_node: "FunctionDef", updated_node: "FunctionDef"
) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]:
return updated_node
@mark_no_op
def leave_GeneratorExp(
self, original_node: "GeneratorExp", updated_node: "GeneratorExp"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_Global(
self, original_node: "Global", updated_node: "Global"
) -> Union[
"BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel
]:
return updated_node
@mark_no_op
def leave_GreaterThan(
self, original_node: "GreaterThan", updated_node: "GreaterThan"
) -> "BaseCompOp":
return updated_node
@mark_no_op
def leave_GreaterThanEqual(
self, original_node: "GreaterThanEqual", updated_node: "GreaterThanEqual"
) -> "BaseCompOp":
return updated_node
@mark_no_op
def leave_If(
self, original_node: "If", updated_node: "If"
) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]:
return updated_node
@mark_no_op
def leave_IfExp(
self, original_node: "IfExp", updated_node: "IfExp"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_Imaginary(
self, original_node: "Imaginary", updated_node: "Imaginary"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_Import(
self, original_node: "Import", updated_node: "Import"
) -> Union[
"BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel
]:
return updated_node
@mark_no_op
def leave_ImportAlias(
self, original_node: "ImportAlias", updated_node: "ImportAlias"
) -> Union["ImportAlias", FlattenSentinel["ImportAlias"], RemovalSentinel]:
return updated_node
@mark_no_op
def leave_ImportFrom(
self, original_node: "ImportFrom", updated_node: "ImportFrom"
) -> Union[
"BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel
]:
return updated_node
@mark_no_op
def leave_ImportStar(
self, original_node: "ImportStar", updated_node: "ImportStar"
) -> "ImportStar":
return updated_node
@mark_no_op
def leave_In(self, original_node: "In", updated_node: "In") -> "BaseCompOp":
return updated_node
@mark_no_op
def leave_IndentedBlock(
self, original_node: "IndentedBlock", updated_node: "IndentedBlock"
) -> "BaseSuite":
return updated_node
@mark_no_op
def leave_Index(self, original_node: "Index", updated_node: "Index") -> "BaseSlice":
return updated_node
@mark_no_op
def leave_Integer(
self, original_node: "Integer", updated_node: "Integer"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_Is(self, original_node: "Is", updated_node: "Is") -> "BaseCompOp":
return updated_node
@mark_no_op
def leave_IsNot(
self, original_node: "IsNot", updated_node: "IsNot"
) -> "BaseCompOp":
return updated_node
@mark_no_op
def leave_Lambda(
self, original_node: "Lambda", updated_node: "Lambda"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_LeftCurlyBrace(
self, original_node: "LeftCurlyBrace", updated_node: "LeftCurlyBrace"
) -> "LeftCurlyBrace":
return updated_node
@mark_no_op
def leave_LeftParen(
self, original_node: "LeftParen", updated_node: "LeftParen"
) -> Union[
"LeftParen", MaybeSentinel, FlattenSentinel["LeftParen"], RemovalSentinel
]:
return updated_node
@mark_no_op
def leave_LeftShift(
self, original_node: "LeftShift", updated_node: "LeftShift"
) -> "BaseBinaryOp":
return updated_node
@mark_no_op
def leave_LeftShiftAssign(
self, original_node: "LeftShiftAssign", updated_node: "LeftShiftAssign"
) -> "BaseAugOp":
return updated_node
@mark_no_op
def leave_LeftSquareBracket(
self, original_node: "LeftSquareBracket", updated_node: "LeftSquareBracket"
) -> "LeftSquareBracket":
return updated_node
@mark_no_op
def leave_LessThan(
self, original_node: "LessThan", updated_node: "LessThan"
) -> "BaseCompOp":
return updated_node
@mark_no_op
def leave_LessThanEqual(
self, original_node: "LessThanEqual", updated_node: "LessThanEqual"
) -> "BaseCompOp":
return updated_node
@mark_no_op
def leave_List(
self, original_node: "List", updated_node: "List"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_ListComp(
self, original_node: "ListComp", updated_node: "ListComp"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_MatrixMultiply(
self, original_node: "MatrixMultiply", updated_node: "MatrixMultiply"
) -> "BaseBinaryOp":
return updated_node
@mark_no_op
def leave_MatrixMultiplyAssign(
self,
original_node: "MatrixMultiplyAssign",
updated_node: "MatrixMultiplyAssign",
) -> "BaseAugOp":
return updated_node
@mark_no_op
def leave_Minus(
self, original_node: "Minus", updated_node: "Minus"
) -> "BaseUnaryOp":
return updated_node
@mark_no_op
def leave_Module(self, original_node: "Module", updated_node: "Module") -> "Module":
return updated_node
@mark_no_op
def leave_Modulo(
self, original_node: "Modulo", updated_node: "Modulo"
) -> "BaseBinaryOp":
return updated_node
@mark_no_op
def leave_ModuloAssign(
self, original_node: "ModuloAssign", updated_node: "ModuloAssign"
) -> "BaseAugOp":
return updated_node
@mark_no_op
def leave_Multiply(
self, original_node: "Multiply", updated_node: "Multiply"
) -> "BaseBinaryOp":
return updated_node
@mark_no_op
def leave_MultiplyAssign(
self, original_node: "MultiplyAssign", updated_node: "MultiplyAssign"
) -> "BaseAugOp":
return updated_node
@mark_no_op
def leave_Name(
self, original_node: "Name", updated_node: "Name"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_NameItem(
self, original_node: "NameItem", updated_node: "NameItem"
) -> Union["NameItem", FlattenSentinel["NameItem"], RemovalSentinel]:
return updated_node
@mark_no_op
def leave_NamedExpr(
self, original_node: "NamedExpr", updated_node: "NamedExpr"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_Newline(
self, original_node: "Newline", updated_node: "Newline"
) -> "Newline":
return updated_node
@mark_no_op
def leave_Nonlocal(
self, original_node: "Nonlocal", updated_node: "Nonlocal"
) -> Union[
"BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel
]:
return updated_node
@mark_no_op
def leave_Not(self, original_node: "Not", updated_node: "Not") -> "BaseUnaryOp":
return updated_node
@mark_no_op
def leave_NotEqual(
self, original_node: "NotEqual", updated_node: "NotEqual"
) -> "BaseCompOp":
return updated_node
@mark_no_op
def leave_NotIn(
self, original_node: "NotIn", updated_node: "NotIn"
) -> "BaseCompOp":
return updated_node
@mark_no_op
def leave_Or(self, original_node: "Or", updated_node: "Or") -> "BaseBooleanOp":
return updated_node
@mark_no_op
def leave_Param(
self, original_node: "Param", updated_node: "Param"
) -> Union["Param", MaybeSentinel, FlattenSentinel["Param"], RemovalSentinel]:
return updated_node
@mark_no_op
def leave_ParamSlash(
self, original_node: "ParamSlash", updated_node: "ParamSlash"
) -> Union["ParamSlash", MaybeSentinel]:
return updated_node
@mark_no_op
def leave_ParamStar(
self, original_node: "ParamStar", updated_node: "ParamStar"
) -> Union["ParamStar", MaybeSentinel]:
return updated_node
@mark_no_op
def leave_Parameters(
self, original_node: "Parameters", updated_node: "Parameters"
) -> "Parameters":
return updated_node
@mark_no_op
def leave_ParenthesizedWhitespace(
self,
original_node: "ParenthesizedWhitespace",
updated_node: "ParenthesizedWhitespace",
) -> Union["BaseParenthesizableWhitespace", MaybeSentinel]:
return updated_node
@mark_no_op
def leave_Pass(
self, original_node: "Pass", updated_node: "Pass"
) -> Union[
"BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel
]:
return updated_node
@mark_no_op
def leave_Plus(self, original_node: "Plus", updated_node: "Plus") -> "BaseUnaryOp":
return updated_node
@mark_no_op
def leave_Power(
self, original_node: "Power", updated_node: "Power"
) -> "BaseBinaryOp":
return updated_node
@mark_no_op
def leave_PowerAssign(
self, original_node: "PowerAssign", updated_node: "PowerAssign"
) -> "BaseAugOp":
return updated_node
@mark_no_op
def leave_Raise(
self, original_node: "Raise", updated_node: "Raise"
) -> Union[
"BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel
]:
return updated_node
@mark_no_op
def leave_Return(
self, original_node: "Return", updated_node: "Return"
) -> Union[
"BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel
]:
return updated_node
@mark_no_op
def leave_RightCurlyBrace(
self, original_node: "RightCurlyBrace", updated_node: "RightCurlyBrace"
) -> "RightCurlyBrace":
return updated_node
@mark_no_op
def leave_RightParen(
self, original_node: "RightParen", updated_node: "RightParen"
) -> Union[
"RightParen", MaybeSentinel, FlattenSentinel["RightParen"], RemovalSentinel
]:
return updated_node
@mark_no_op
def leave_RightShift(
self, original_node: "RightShift", updated_node: "RightShift"
) -> "BaseBinaryOp":
return updated_node
@mark_no_op
def leave_RightShiftAssign(
self, original_node: "RightShiftAssign", updated_node: "RightShiftAssign"
) -> "BaseAugOp":
return updated_node
@mark_no_op
def leave_RightSquareBracket(
self, original_node: "RightSquareBracket", updated_node: "RightSquareBracket"
) -> "RightSquareBracket":
return updated_node
@mark_no_op
def leave_Semicolon(
self, original_node: "Semicolon", updated_node: "Semicolon"
) -> Union["Semicolon", MaybeSentinel]:
return updated_node
@mark_no_op
def leave_Set(self, original_node: "Set", updated_node: "Set") -> "BaseExpression":
return updated_node
@mark_no_op
def leave_SetComp(
self, original_node: "SetComp", updated_node: "SetComp"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_SimpleStatementLine(
self, original_node: "SimpleStatementLine", updated_node: "SimpleStatementLine"
) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]:
return updated_node
@mark_no_op
def leave_SimpleStatementSuite(
self,
original_node: "SimpleStatementSuite",
updated_node: "SimpleStatementSuite",
) -> "BaseSuite":
return updated_node
@mark_no_op
def leave_SimpleString(
self, original_node: "SimpleString", updated_node: "SimpleString"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_SimpleWhitespace(
self, original_node: "SimpleWhitespace", updated_node: "SimpleWhitespace"
) -> Union["BaseParenthesizableWhitespace", MaybeSentinel]:
return updated_node
@mark_no_op
def leave_Slice(self, original_node: "Slice", updated_node: "Slice") -> "BaseSlice":
return updated_node
@mark_no_op
def leave_StarredDictElement(
self, original_node: "StarredDictElement", updated_node: "StarredDictElement"
) -> Union["BaseDictElement", FlattenSentinel["BaseDictElement"], RemovalSentinel]:
return updated_node
@mark_no_op
def leave_StarredElement(
self, original_node: "StarredElement", updated_node: "StarredElement"
) -> Union["BaseElement", FlattenSentinel["BaseElement"], RemovalSentinel]:
return updated_node
@mark_no_op
def leave_Subscript(
self, original_node: "Subscript", updated_node: "Subscript"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_SubscriptElement(
self, original_node: "SubscriptElement", updated_node: "SubscriptElement"
) -> Union[
"SubscriptElement", FlattenSentinel["SubscriptElement"], RemovalSentinel
]:
return updated_node
@mark_no_op
def leave_Subtract(
self, original_node: "Subtract", updated_node: "Subtract"
) -> "BaseBinaryOp":
return updated_node
@mark_no_op
def leave_SubtractAssign(
self, original_node: "SubtractAssign", updated_node: "SubtractAssign"
) -> "BaseAugOp":
return updated_node
@mark_no_op
def leave_TrailingWhitespace(
self, original_node: "TrailingWhitespace", updated_node: "TrailingWhitespace"
) -> "TrailingWhitespace":
return updated_node
@mark_no_op
def leave_Try(
self, original_node: "Try", updated_node: "Try"
) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]:
return updated_node
@mark_no_op
def leave_Tuple(
self, original_node: "Tuple", updated_node: "Tuple"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_UnaryOperation(
self, original_node: "UnaryOperation", updated_node: "UnaryOperation"
) -> "BaseExpression":
return updated_node
@mark_no_op
def leave_While(
self, original_node: "While", updated_node: "While"
) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]:
return updated_node
@mark_no_op
def leave_With(
self, original_node: "With", updated_node: "With"
) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]:
return updated_node
@mark_no_op
def leave_WithItem(
self, original_node: "WithItem", updated_node: "WithItem"
) -> Union["WithItem", FlattenSentinel["WithItem"], RemovalSentinel]:
return updated_node
@mark_no_op
def leave_Yield(
self, original_node: "Yield", updated_node: "Yield"
) -> "BaseExpression":
return updated_node
| StarcoderdataPython |
3211806 | """
Module about Pauli (and I) matrices.
"""
import numpy as np
from numpy import zeros_like
from scipy.linalg import svd
s0 = np.array([[1, 0], [0, 1]])
s1 = np.array([[0, 1], [1, 0]])
s2 = np.array([[0, -1j], [1j, 0]])
s3 = np.array([[1, 0], [0, -1]])
s0T = s0.T
s1T = s1.T
s2T = s2.T
s3T = s3.T
pauli_dict = {0: s0, 1: s1, 2: s2, 3: s3}
def pauli_mat(nbasis, i):
"""
nbasis: size of the matrix. should be multiple of 2.
i: index of pauli dictionary.
"""
N = nbasis // 2
assert (N * 2 == nbasis)
M = np.ones((N, N), dtype='complex')
spm = pauli_dict[i]
return np.block([[M * spm[0, 0], M * spm[0, 1]],
[M * spm[1, 0], M * spm[1, 1]]])
def pauli_decomp(M):
""" Given a 2*2 matrix, get the I, x, y, z component.
:param M: 2*2 matrix
:returns: (I, x, y, z) are four scalars.
:rtype: same as dtype of M
"""
return (np.trace(s0.dot(M)) / 2, np.trace(s1.dot(M)) / 2,
np.trace(s2.dot(M)) / 2, np.trace(s3.dot(M)) / 2)
def pauli_decomp2(M):
""" Given a 2*2 matrix, get the I, x, y, z component. (method2)
:param M: 2*2 matrix
:returns: (I, x, y, z) are four scalars.
:rtype: same as dtype of M
"""
return (np.sum(M * s0T) / 2, np.sum(M * s1T) / 2, np.sum(M * s2T) / 2,
np.sum(M * s3T) / 2)
def pauli_sigma_norm(M):
MI, Mx, My, Mz = pauli_decomp2(M)
return np.linalg.norm([Mx, My, Mz])
def pauli_block_I(M, norb):
"""
I compoenent of a matrix, see pauli_block
"""
ret = zeros_like(M)
tmp = (M[::2, ::2] + M[1::2, 1::2]) / 2
ret[::2, ::2] = ret[1::2, 1::2] = tmp
return ret
def pauli_block_x(M, norb):
"""
x compoenent of a matrix, see pauli_block
"""
ret = zeros_like(M)
tmp = (M[::2, 1::2] + M[1::2, ::2]) / 2
ret[::2, 1::2] = ret[1::2, ::2] = tmp
return ret
def pauli_block_y(M, norb):
"""
y compoenent of a matrix, see pauli_block
"""
ret = zeros_like(M)
tmp = (M[::2, 1::2] * (-1j) + M[1::2, ::2] * (1j)) / 2
ret[::2, 1::2] = tmp * (-1j)
ret[1::2, ::2] = tmp * 1j
return tmp, ret
def pauli_block_z(M, norb):
""" z compoenent of a matrix, see pauli_block
:param M:
:param norb:
:returns:
:rtype:
"""
ret = zeros_like(M)
tmp = (M[::2, ::2] - M[1::2, 1::2]) / 2
ret[::2, ::2] = tmp
ret[1::2, 1::2] = -tmp
return tmp, ret
def pauli_block(M, idim):
""" Get the I, x, y, z component of a matrix.
:param M: The input matrix, aranged in four blocks:
[[upup, updn], [dnup, dndn]]. let norb be number of orbitals in
each block. (so M has dim 2norb1*2norb2)
:param idim: 0, 1,2, 3 for I , x, y, z.
:returns: the idim(th) component of M
:rtype: a matrix with shape of M.shape//2
"""
# ret = zeros_like(M)
if idim == 0:
tmp = (M[::2, ::2] + M[1::2, 1::2]) / 2.0
elif idim == 1:
tmp = (M[::2, 1::2] + M[1::2, ::2]) / 2.0
elif idim == 2:
# Note that this is not element wise product with sigma_y but dot product
# sigma_y=[[0, -1j],[1j, 0]]
tmp = (M[::2, 1::2] * (1.0j) + M[1::2, ::2] * (-1.0j)) / 2.0
elif idim == 3:
tmp = (M[::2, ::2] - M[1::2, 1::2]) / 2.0
else:
raise NotImplementedError()
return tmp
def pauli_block_all(M):
MI = (M[::2, ::2] + M[1::2, 1::2]) / 2.0
Mx = (M[::2, 1::2] + M[1::2, ::2]) / 2.0
# Note that this is not element wise product with sigma_y but dot product
My = (M[::2, 1::2] - M[1::2, ::2]) * 0.5j
Mz = (M[::2, ::2] - M[1::2, 1::2]) / 2.0
return MI, Mx, My, Mz
def op_norm(M):
return max(svd(M)[1])
def pauli_block_sigma_norm(M):
"""
M= MI * I + \vec{P} dot \vec{sigma}
= MI*I + p * \vec{e} dot \vec{sigma}
where p is the norm of P.
"""
MI, Mx, My, Mz = pauli_block_all(M)
ex, ey, ez = np.trace(Mx), np.trace(My), np.trace(Mz)
#ex,ey,ez = op_norm(Mx), op_norm(My), op_norm(Mz)
evec = np.array((ex, ey, ez))
evec = evec / np.linalg.norm(evec)
return Mx * evec[0] + My * evec[1] + Mz * evec[2]
| StarcoderdataPython |
3350469 | # -*- coding:utf-8 -*-
# Created Time: 2018/03/12 10:48:38
# Author: <NAME> <<EMAIL>>
from dataset import config, MultiCelebADataset
from nets import Encoder, Decoder, Discriminator
import os
import argparse
import torch
from torchvision import transforms
from PIL import Image
import numpy as np
from tensorboardX import SummaryWriter
from itertools import chain
class ELEGANT(object):
def __init__(self, args,
config=config, dataset=MultiCelebADataset, \
encoder=Encoder, decoder=Decoder, discriminator=Discriminator):
self.args = args
self.attributes = args.attributes
self.n_attributes = len(self.attributes)
self.gpu = args.gpu
self.mode = args.mode
self.restore = args.restore
# init dataset and networks
self.config = config
self.dataset = dataset(self.attributes)
self.Enc = encoder()
self.Dec = decoder()
self.D1 = discriminator(self.n_attributes, self.config.nchw[-1])
self.D2 = discriminator(self.n_attributes, self.config.nchw[-1]//2)
self.adv_criterion = torch.nn.BCELoss()
self.recon_criterion = torch.nn.MSELoss()
self.restore_from_file()
self.set_mode_and_gpu()
def restore_from_file(self):
if self.restore is not None:
ckpt_file_enc = os.path.join(self.config.model_dir, 'Enc_iter_{:06d}.pth'.format(self.restore))
assert os.path.exists(ckpt_file_enc)
ckpt_file_dec = os.path.join(self.config.model_dir, 'Dec_iter_{:06d}.pth'.format(self.restore))
assert os.path.exists(ckpt_file_dec)
if self.gpu:
self.Enc.load_state_dict(torch.load(ckpt_file_enc), strict=False)
self.Dec.load_state_dict(torch.load(ckpt_file_dec), strict=False)
else:
self.Enc.load_state_dict(torch.load(ckpt_file_enc, map_location='cpu'), strict=False)
self.Dec.load_state_dict(torch.load(ckpt_file_dec, map_location='cpu'), strict=False)
if self.mode == 'train':
ckpt_file_d1 = os.path.join(self.config.model_dir, 'D1_iter_{:06d}.pth'.format(self.restore))
assert os.path.exists(ckpt_file_d1)
ckpt_file_d2 = os.path.join(self.config.model_dir, 'D2_iter_{:06d}.pth'.format(self.restore))
assert os.path.exists(ckpt_file_d2)
if self.gpu:
self.D1.load_state_dict(torch.load(ckpt_file_d1), strict=False)
self.D2.load_state_dict(torch.load(ckpt_file_d2), strict=False)
else:
self.D1.load_state_dict(torch.load(ckpt_file_d1, map_location='cpu'), strict=False)
self.D2.load_state_dict(torch.load(ckpt_file_d2, map_location='cpu'), strict=False)
self.start_step = self.restore + 1
else:
self.start_step = 1
def set_mode_and_gpu(self):
if self.mode == 'train':
self.Enc.train()
self.Dec.train()
self.D1.train()
self.D2.train()
self.writer = SummaryWriter(self.config.log_dir)
self.optimizer_G = torch.optim.Adam(chain(self.Enc.parameters(), self.Dec.parameters()),
lr=self.config.G_lr, betas=(0.5, 0.999),
weight_decay=self.config.weight_decay)
self.optimizer_D = torch.optim.Adam(chain(self.D1.parameters(), self.D2.parameters()),
lr=self.config.D_lr, betas=(0.5, 0.999),
weight_decay=self.config.weight_decay)
self.G_lr_scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer_G, step_size=self.config.step_size, gamma=self.config.gamma)
self.D_lr_scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer_D, step_size=self.config.step_size, gamma=self.config.gamma)
if self.restore is not None:
for _ in range(self.restore):
self.G_lr_scheduler.step()
self.D_lr_scheduler.step()
if self.gpu:
with torch.cuda.device(0):
self.Enc.cuda()
self.Dec.cuda()
self.D1.cuda()
self.D2.cuda()
self.adv_criterion.cuda()
self.recon_criterion.cuda()
if len(self.gpu) > 1:
self.Enc = torch.nn.DataParallel(self.Enc, device_ids=list(range(len(self.gpu))))
self.Dec = torch.nn.DataParallel(self.Dec, device_ids=list(range(len(self.gpu))))
self.D1 = torch.nn.DataParallel(self.D1, device_ids=list(range(len(self.gpu))))
self.D2 = torch.nn.DataParallel(self.D2, device_ids=list(range(len(self.gpu))))
elif self.mode == 'test':
self.Enc.eval()
self.Dec.eval()
if self.gpu:
with torch.cuda.device(0):
self.Enc.cuda()
self.Dec.cuda()
if len(self.gpu) > 1:
self.Enc = torch.nn.DataParallel(self.Enc, device_ids=list(range(len(self.gpu))))
self.Dec = torch.nn.DataParallel(self.Dec, device_ids=list(range(len(self.gpu))))
else:
raise NotImplementationError()
def tensor2var(self, tensors, volatile=False):
if not hasattr(tensors, '__iter__'): tensors = [tensors]
out = []
for tensor in tensors:
if len(self.gpu):
tensor = tensor.cuda(0)
var = torch.autograd.Variable(tensor, volatile=volatile)
out.append(var)
if len(out) == 1:
return out[0]
else:
return out
def get_attr_chs(self, encodings, attribute_id):
num_chs = encodings.size(1)
per_chs = float(num_chs) / self.n_attributes
start = int(np.rint(per_chs * attribute_id))
end = int(np.rint(per_chs * (attribute_id + 1)))
# return encodings[:, start:end]
return encodings.narrow(1, start, end-start)
def forward_G(self):
self.z_A, self.A_skip = self.Enc(self.A, return_skip=True)
self.z_B, self.B_skip = self.Enc(self.B, return_skip=True)
self.z_C = torch.cat([self.get_attr_chs(self.z_A, i) if i != self.attribute_id \
else self.get_attr_chs(self.z_B, i) for i in range(self.n_attributes)], 1)
self.z_D = torch.cat([self.get_attr_chs(self.z_B, i) if i != self.attribute_id \
else self.get_attr_chs(self.z_A, i) for i in range(self.n_attributes)], 1)
self.R_A = self.Dec(self.z_A, self.z_A, skip=self.A_skip)
self.R_B = self.Dec(self.z_B, self.z_B, skip=self.B_skip)
self.R_C = self.Dec(self.z_C, self.z_A, skip=self.A_skip)
self.R_D = self.Dec(self.z_D, self.z_B, skip=self.B_skip)
self.A1 = torch.clamp(self.A + self.R_A, -1, 1)
self.B1 = torch.clamp(self.B + self.R_B, -1, 1)
self.C = torch.clamp(self.A + self.R_C, -1, 1)
self.D = torch.clamp(self.B + self.R_D, -1, 1)
def forward_D_real_sample(self):
self.d1_A = self.D1(self.A, self.y_A)
self.d1_B = self.D1(self.B, self.y_B)
self.d2_A = self.D2(self.A, self.y_A)
self.d2_B = self.D2(self.B, self.y_B)
def forward_D_fake_sample(self, detach):
self.y_C, self.y_D = self.y_A.clone(), self.y_B.clone()
self.y_C.data[:, self.attribute_id] = self.y_B.data[:, self.attribute_id]
self.y_D.data[:, self.attribute_id] = self.y_A.data[:, self.attribute_id]
if detach:
self.d1_C = self.D1(self.C.detach(), self.y_C)
self.d1_D = self.D1(self.D.detach(), self.y_D)
self.d2_C = self.D2(self.C.detach(), self.y_C)
self.d2_D = self.D2(self.D.detach(), self.y_D)
else:
self.d1_C = self.D1(self.C, self.y_C)
self.d1_D = self.D1(self.D, self.y_D)
self.d2_C = self.D2(self.C, self.y_C)
self.d2_D = self.D2(self.D, self.y_D)
def compute_loss_D(self):
self.D_loss = {
'D1': self.adv_criterion(self.d1_A, torch.ones_like(self.d1_A)) + \
self.adv_criterion(self.d1_B, torch.ones_like(self.d1_B)) + \
self.adv_criterion(self.d1_C, torch.zeros_like(self.d1_C)) + \
self.adv_criterion(self.d1_D, torch.zeros_like(self.d1_D)),
'D2': self.adv_criterion(self.d2_A, torch.ones_like(self.d2_A)) + \
self.adv_criterion(self.d2_B, torch.ones_like(self.d2_B)) + \
self.adv_criterion(self.d2_C, torch.zeros_like(self.d2_C)) + \
self.adv_criterion(self.d2_D, torch.zeros_like(self.d2_D)),
}
self.loss_D = (self.D_loss['D1'] + 0.5 * self.D_loss['D2']) / 4
def compute_loss_G(self):
self.G_loss = {
'reconstruction': self.recon_criterion(self.A1, self.A) + self.recon_criterion(self.B1, self.B),
'adv1': self.adv_criterion(self.d1_C, torch.ones_like(self.d1_C)) + \
self.adv_criterion(self.d1_D, torch.ones_like(self.d1_D)),
'adv2': self.adv_criterion(self.d2_C, torch.ones_like(self.d2_C)) + \
self.adv_criterion(self.d2_D, torch.ones_like(self.d2_D)),
}
self.loss_G = 5 * self.G_loss['reconstruction'] + self.G_loss['adv1'] + 0.5 * self.G_loss['adv2']
def backward_D(self):
self.loss_D.backward()
self.optimizer_D.step()
def backward_G(self):
self.loss_G.backward()
self.optimizer_G.step()
def img_denorm(self, img, scale=255):
return (img + 1) * scale / 2.
def save_image_log(self, save_num=20):
image_info = {
'A/img' : self.img_denorm(self.A.data.cpu(), 1)[:save_num],
'B/img' : self.img_denorm(self.B.data.cpu(), 1)[:save_num],
'C/img' : self.img_denorm(self.C.data.cpu(), 1)[:save_num],
'D/img' : self.img_denorm(self.D.data.cpu(), 1)[:save_num],
'A1/img' : self.img_denorm(self.A1.data.cpu(), 1)[:save_num],
'B1/img' : self.img_denorm(self.B1.data.cpu(), 1)[:save_num],
'R_A/img' : self.img_denorm(self.R_A.data.cpu(), 1)[:save_num],
'R_B/img' : self.img_denorm(self.R_B.data.cpu(), 1)[:save_num],
'R_C/img' : self.img_denorm(self.R_C.data.cpu(), 1)[:save_num],
'R_D/img' : self.img_denorm(self.R_D.data.cpu(), 1)[:save_num],
}
for tag, images in image_info.items():
for idx, image in enumerate(images):
self.writer.add_image(tag+'/{}_{:02d}'.format(self.attribute_id, idx), image, self.step)
def save_sample_images(self, save_num=5):
canvas = torch.cat((self.A, self.B, self.C, self.D, self.A1, self.B1), -1)
img_array = np.transpose(self.img_denorm(canvas.data.cpu().numpy()), (0,2,3,1)).astype(np.uint8)
for i in range(save_num):
Image.fromarray(img_array[i]).save(os.path.join(self.config.img_dir, 'step_{:06d}_attr_{}_{:02d}.jpg'.format(self.step, self.attribute_id, i)))
def save_scalar_log(self):
scalar_info = {
'loss_D': self.loss_D.data.cpu().numpy()[0],
'loss_G': self.loss_G.data.cpu().numpy()[0],
'G_lr' : self.G_lr_scheduler.get_lr()[0],
'D_lr' : self.D_lr_scheduler.get_lr()[0],
}
for key, value in self.G_loss.items():
scalar_info['G_loss/' + key] = value.data[0]
for key, value in self.D_loss.items():
scalar_info['D_loss/' + key] = value.data[0]
for tag, value in scalar_info.items():
self.writer.add_scalar(tag, value, self.step)
def save_model(self):
reduced = lambda key: key[7:] if key.startswith('module.') else key
torch.save({reduced(key): val.cpu() for key, val in self.Enc.state_dict().items()}, os.path.join(self.config.model_dir, 'Enc_iter_{:06d}.pth'.format(self.step)))
torch.save({reduced(key): val.cpu() for key, val in self.Dec.state_dict().items()}, os.path.join(self.config.model_dir, 'Dec_iter_{:06d}.pth'.format(self.step)))
torch.save({reduced(key): val.cpu() for key, val in self.D1.state_dict().items()}, os.path.join(self.config.model_dir, 'D1_iter_{:06d}.pth'.format(self.step)))
torch.save({reduced(key): val.cpu() for key, val in self.D2.state_dict().items()}, os.path.join(self.config.model_dir, 'D2_iter_{:06d}.pth'.format(self.step)))
def train(self):
for self.step in range(self.start_step, 1 + self.config.max_iter):
self.G_lr_scheduler.step()
self.D_lr_scheduler.step()
for self.attribute_id in range(self.n_attributes):
A, y_A = next(self.dataset.gen(self.attribute_id, True))
B, y_B = next(self.dataset.gen(self.attribute_id, False))
self.A, self.y_A, self.B, self.y_B = self.tensor2var([A, y_A, B, y_B])
# forward
self.forward_G()
# update D
self.forward_D_real_sample()
self.forward_D_fake_sample(detach=True)
self.compute_loss_D()
self.optimizer_D.zero_grad()
self.backward_D()
# update G
self.forward_D_fake_sample(detach=False)
self.compute_loss_G()
self.optimizer_G.zero_grad()
self.backward_G()
if self.step % 100 == 0:
self.save_image_log()
if self.step % 2000 == 0:
self.save_sample_images()
print('step: %06d, loss D: %.6f, loss G: %.6f' % (self.step, self.loss_D.data.cpu().numpy(), self.loss_G.data.cpu().numpy()))
if self.step % 100 == 0:
self.save_scalar_log()
if self.step % 2000 == 0:
self.save_model()
print('Finished Training!')
self.writer.close()
def transform(self, *images):
transform1 = transforms.Compose([
transforms.Resize(self.config.nchw[-2:]),
transforms.ToTensor(),
])
transform2 = lambda x: x.view(1, *x.size()) * 2 - 1
out = [transform2(transform1(image)) for image in images]
return out
def swap(self):
'''
swap attributes of two images.
'''
self.attribute_id = self.args.swap_list[0]
self.B, self.A = self.tensor2var(self.transform(Image.open(self.args.input), Image.open(self.args.target[0])), volatile=True)
self.forward_G()
img = torch.cat((self.B, self.A, self.D, self.C), -1)
img = np.transpose(self.img_denorm(img.data.cpu().numpy()), (0,2,3,1)).astype(np.uint8)[0]
Image.fromarray(img).save('swap.jpg')
def linear(self):
'''
linear interpolation of two images.
'''
self.attribute_id = self.args.swap_list[0]
self.B, self.A = self.tensor2var(self.transform(Image.open(self.args.input), Image.open(self.args.target[0])), volatile=True)
self.z_A = self.Enc(self.A, return_skip=False)
self.z_B, self.B_skip = self.Enc(self.B, return_skip=True)
self.z_D = torch.cat([self.get_attr_chs(self.z_B, i) if i != self.attribute_id \
else self.get_attr_chs(self.z_A, i) for i in range(self.n_attributes)], 1)
m = self.args.size[0]
out = [self.B]
for i in range(1, 1+m):
z_i = float(i) / m * (self.z_D - self.z_B) + self.z_B
R_i = self.Dec(z_i, self.z_B, skip=self.B_skip)
D_i = torch.clamp(self.B + R_i, -1, 1)
out.append(D_i)
out.append(self.A)
out = torch.cat(out, -1)
img = np.transpose(self.img_denorm(out.data.cpu().numpy()), (0,2,3,1)).astype(np.uint8)[0]
Image.fromarray(img).save('linear_interpolation.jpg')
def matrix1(self):
'''
matrix interpolation with respect to one attribute.
'''
self.attribute_id = self.args.swap_list[0]
self.B = self.tensor2var(self.transform(Image.open(self.args.input)), volatile=True)
self.As = [self.tensor2var(self.transform(Image.open(self.args.target[i])), volatile=True) for i in range(3)]
self.z_B, self.B_skip = self.Enc(self.B, return_skip=True)
self.z_As = [self.Enc(self.As[i], return_skip=False) for i in range(3)]
self.z_Ds = [torch.cat([self.get_attr_chs(self.z_B, i) if i != self.attribute_id \
else self.get_attr_chs(self.z_As[j], i) for i in range(self.n_attributes)], 1)
for j in range(3)]
m, n = self.args.size
h, w = self.config.nchw[-2:]
out = torch.ones(1, 3, m * h, n * w)
for i in range(m):
for j in range(n):
a = i / float(m - 1)
b = j / float(n - 1)
four = [(1-a) * (1-b), (1-a) * b, a * (1-b), a * b]
z_ij = four[0] * self.z_B + four[1] * self.z_Ds[0] + four[2] * self.z_Ds[1] + four[3] * self.z_Ds[2]
R_ij = self.Dec(z_ij, self.z_B, skip=self.B_skip)
D_ij = torch.clamp(self.B + R_ij, -1, 1)
out[:,:, i*h:(i+1)*h, j*w:(j+1)*w] = D_ij.data.cpu()
first_col = torch.cat((self.B.data.cpu(), torch.ones(1,3,(m-2)*h,w), self.As[1].data.cpu()), -2)
last_col = torch.cat((self.As[0].data.cpu(), torch.ones(1,3,(m-2)*h,w), self.As[2].data.cpu()), -2)
canvas = torch.cat((first_col, out, last_col), -1)
img = np.transpose(self.img_denorm(canvas.numpy()), (0,2,3,1)).astype(np.uint8)[0]
Image.fromarray(img).save('matrix_interpolation1.jpg')
def matrix2(self):
'''
matrix interpolation with respect to two attributes simultaneously.
'''
self.attribute_ids = self.args.swap_list
self.B, self.A1, self.A2 = self.tensor2var(self.transform(Image.open(self.args.input), Image.open(self.args.target[0]), Image.open(self.args.target[1])), volatile=True)
self.z_B, self.B_skip = self.Enc(self.B, return_skip=True)
self.z_A1, self.z_A2 = self.Enc(self.A1, return_skip=False), self.Enc(self.A2, return_skip=False)
self.z_D1 = torch.cat([self.get_attr_chs(self.z_B, i) if i != self.attribute_ids[0]
else self.get_attr_chs(self.z_A1, i) for i in range(self.n_attributes)], 1)
self.z_D2 = torch.cat([self.get_attr_chs(self.z_B, i) if i != self.attribute_ids[1]
else self.get_attr_chs(self.z_A2, i) for i in range(self.n_attributes)], 1)
m, n = self.args.size
h, w = self.config.nchw[-2:]
out = torch.ones(1, 3, m * h, n * w)
for i in range(m):
for j in range(n):
a = i / float(m - 1)
b = j / float(n - 1)
z_ij = a * self.z_D1 + b * self.z_D2 + (1 - a - b) * self.z_B
R_ij = self.Dec(z_ij, self.z_B, skip=self.B_skip)
D_ij = torch.clamp(self.B + R_ij, -1, 1)
out[:,:, i*h:(i+1)*h, j*w:(j+1)*w] = D_ij.data.cpu()
first_col = torch.cat((self.B.data.cpu(), torch.ones(1,3,(m-2)*h,w), self.A1.data.cpu()), -2)
last_col = torch.cat((self.A2.data.cpu(), torch.ones(1,3,(m-1)*h,w)), -2)
canvas = torch.cat((first_col, out, last_col), -1)
img = np.transpose(self.img_denorm(canvas.numpy()), (0,2,3,1)).astype(np.uint8)[0]
Image.fromarray(img).save('matrix_interpolation2.jpg')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--attributes', nargs='+', type=str, help='Specify attribute names.')
parser.add_argument('-g', '--gpu', default=[], nargs='+', type=str, help='Specify GPU ids.')
parser.add_argument('-m', '--mode', default='train', type=str, choices=['train', 'test'])
parser.add_argument('-r', '--restore', default=None, action='store', type=int, help='Specify checkpoint id to restore')
# test parameters
parser.add_argument('--swap', action='store_true', help='Swap attributes.')
parser.add_argument('--linear', action='store_true', help='Linear interpolation.')
parser.add_argument('--matrix', action='store_true', help='Matraix interpolation with respect to one attribute.')
parser.add_argument('--swap_list', default=[], nargs='+', type=int, help='Specify the attributes ids for swapping.')
parser.add_argument('-i', '--input', type=str, help='Specify the input image.')
parser.add_argument('-t', '--target', nargs='+', type=str, help='Specify target images.')
parser.add_argument('-s', '--size', nargs='+', type=int, help='Specify the interpolation size.')
args = parser.parse_args()
print(args)
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(args.gpu)
if args.mode == 'test':
assert args.swap + args.linear + args.matrix == 1
assert args.restore is not None
model = ELEGANT(args)
if args.mode == 'train':
model.train()
elif args.mode == 'test' and args.swap:
assert len(args.swap_list) == 1 and args.input and len(args.target) == 1
model.swap()
elif args.mode == 'test' and args.linear:
assert len(args.swap_list) == 1 and len(args.size) == 1
model.linear()
elif args.mode == 'test' and args.matrix:
assert len(args.swap_list) in [1,2]
if len(args.swap_list) == 1:
assert len(args.target) == 3 and len(args.size) == 2
model.matrix1()
elif len(args.swap_list) == 2:
assert len(args.target) == 2 and len(args.size) == 2
model.matrix2()
else:
raise NotImplementationError()
if __name__ == "__main__":
main()
| StarcoderdataPython |
3214586 | <gh_stars>1-10
#!/usr/bin/env python3
# Copyright 2020, <NAME>
# Licensed under the terms of the MIT license. See LICENSE file in project root for terms.
# Ensure the device is at the "Enter PIN:" prompt
from serial_port_util import CTFSerial
import sys
if len(sys.argv) < 2:
print("Usage: {} serport".format(sys.argv[0]))
sys.exit(1)
in_serport = sys.argv[1]
ser = CTFSerial(in_serport)
for pin in range(10000):
print("Trying {:04d}".format(pin))
pinstr = "{:04d}\n".format(pin).encode('ascii')
ok = ser.write_and_check(pinstr)
assert ok
bytes1 = ser.read(1)
if bytes1 == b'I':
# "Incorrect PIN!\r\nEnter PIN: "
reply = ser.read(26)
assert len(reply) == 26
else:
print("That was the correct PIN!")
break
| StarcoderdataPython |
1719283 | from beam import ViewSet
from beam.contrib.autocomplete_light import AutocompleteMixin
from beam.registry import RegistryType
from django.core.exceptions import PermissionDenied
from django.test import RequestFactory, TestCase
from test_views import user_with_perms
from testapp.models import Dragonfly
registry: RegistryType = {}
class AutocompleteDragonflyViewSet(AutocompleteMixin, ViewSet):
registry = registry
model = Dragonfly
fields = ["name", "age"]
autocomplete_search_fields = ["name"]
class AutocompleteTest(TestCase):
def test_autocomplete(self):
request = RequestFactory().get("/", {})
request.user = user_with_perms(["testapp.view_dragonfly"])
Dragonfly.objects.create(name="alpha", age=12)
Dragonfly.objects.create(name="omega", age=99)
view = AutocompleteDragonflyViewSet()._get_view(
AutocompleteDragonflyViewSet().components["autocomplete"]
)
response = view(request)
self.assertContains(response, "alpha")
self.assertContains(response, "omega")
def test_autocomplete_search(self):
request = RequestFactory().get("/", {"q": "Al"})
request.user = user_with_perms(["testapp.view_dragonfly"])
Dragonfly.objects.create(name="alpha", age=12)
Dragonfly.objects.create(name="omega", age=99)
view = AutocompleteDragonflyViewSet()._get_view(
AutocompleteDragonflyViewSet().components["autocomplete"]
)
response = view(request)
self.assertContains(response, "alpha")
self.assertNotContains(response, "omega")
def test_autocomplete_requires_permission(self):
Dragonfly.objects.create(name="alpha", age=47)
request = RequestFactory().get("/", {})
request.user = user_with_perms([])
view = AutocompleteDragonflyViewSet()._get_view(
AutocompleteDragonflyViewSet().components["autocomplete"]
)
with self.assertRaises(PermissionDenied):
view(request)
| StarcoderdataPython |
3344570 | """Add address fields to user
Revision ID: 4d1a5fb71db
Revises: <PASSWORD>
Create Date: 2015-10-10 16:56:57.670618
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('address', sa.String(length=256), nullable=True))
op.add_column('user', sa.Column('city', sa.String(length=256), nullable=True))
op.add_column('user', sa.Column('country', sa.String(length=256), nullable=True))
op.add_column('user', sa.Column('zip', sa.String(length=8), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'zip')
op.drop_column('user', 'country')
op.drop_column('user', 'city')
op.drop_column('user', 'address')
### end Alembic commands ###
| StarcoderdataPython |
1757448 | #!/usr/bin/env python3
import curses
import curses.textpad
import threading
import time
import textwrap
import collections
import logging
class HCHandler(logging.Handler):
def __init__(self, con):
super().__init__()
self.con = con
self.setFormatter(logging.Formatter(fmt="{levelname}:{name}:{message}", style="{"))
def emit(self, record):
self.con.log.append(self.format(record))
class ConsoleWindow():
def __init__(self, con, name):
self.con = con
self.name = name
self.win = None
self.pos = -1
self.num = -1
def refresh(self):
self.win.box()
self.win.addstr(0, 2, self.name)
self.win.refresh()
def setpos(self, pos, num):
self.pos = pos
self.num = num
def redraw(self):
if self.win:
del self.win
self.win = None
start = self.get_win_start()
size = self.get_win_size()
self.win = self.con.stdscr.subwin(*size, *start)
self.win.clear()
self.refresh()
def get_win_start(self):
return (0, (self.pos * ((curses.COLS-1) // self.num)))
def get_win_size(self):
return (curses.LINES - 2, ((curses.COLS-1) // self.num) - 1)
def get_draw_start(self):
# Increment start pos by one to account for the box
return tuple(map(lambda x: x+1, self.get_win_start()))
def get_draw_size(self):
return tuple(map(lambda x: x-2, self.get_win_size()))
class LogWindow(ConsoleWindow):
def __init__(self, con, name):
super().__init__(con, name)
self.buffer = []
def append(self, msg):
for m in msg.split("\n")[::-1]:
self.buffer.insert(0, m) # TODO: optimize this
self.refresh()
def refresh(self):
if not self.win:
return
if not self.buffer:
super().refresh()
return
max_outbuflen = self.get_draw_size()[0] # How many lines can we show
max_linelen = self.get_draw_size()[1] # How many characters per line
# Buffer of everything to be printed to screen
outbuf = []
count = 0
for i in self.buffer:
tw = textwrap.wrap(i, max_linelen)[::-1]
d = len(tw) + count - max_outbuflen
# This should be non-negative if we exceed max_outbuflen (how much we exceed by)
if d > 0:
tw = tw[:-d] # Truncate wrap list
outbuf += tw
count += len(tw)
# We went over, bail
if d > 0:
break
y = len(outbuf)
x = 1
for o in outbuf:
self.win.addstr(y, x, " " * max_linelen) # TODO: fix this line clear hack
self.win.addstr(y, x, o)
y -= 1
super().refresh()
# Custom input handler because textbox kind of sucks
class ConsoleInput():
def __init__(self, con):
self.con = con
self.buffer = []
self.history = []
self.cur = 0
self.histcur = -1
def redraw(self):
self.win = self.con.stdscr.subwin(1, curses.COLS - 4, curses.LINES - 2, 4)
def handle(self, c):
# Remove one charater at current location
if c in (curses.KEY_BACKSPACE, 127):
if self.cur == 0: # Bail if at beginning
return
self.cur -= 1
self.buffer.pop(self.cur)
self.win.addstr(0, 0, "".join(self.buffer) + " ")
self.win.move(0, len(self.buffer))
# Send out the line
elif c in (curses.KEY_ENTER, 10):
if len(self.buffer) == 0:
return # don't need to do anything if nothing was typed
self.history.append("".join(self.buffer))
self.con.send("".join(self.buffer))
self.buffer = []
self.win.addstr(0,0, " " * (self.win.getmaxyx()[1]-1))
self.cur = 0
self.histcur = -1
elif c == curses.KEY_LEFT:
self.cur = max(self.cur - 1, 0)
elif c == curses.KEY_RIGHT:
self.cur = min(self.cur + 1, len(self.buffer))
elif c == curses.KEY_UP:
if len(self.history) == 0:
return
tmp = len(self.buffer)
self.buffer = list(self.history[self.histcur])
self.histcur -= 1 if self.histcur > (-(len(self.history))) else 0
self.win.addstr(0, 0, ("".join(self.buffer)).ljust(tmp))
self.cur = len(self.buffer)
elif c == curses.KEY_DOWN:
# Clear input if down is pressed at the bottom
if self.histcur == -1:
self.win.addstr(0,0, " " * len(self.buffer))
self.buffer = []
self.cur = 0
else:
tmp = len(self.buffer)
self.buffer = list(self.history[self.histcur])
self.histcur += 1
self.win.addstr(0, 0, ("".join(self.buffer)).ljust(tmp))
self.cur = len(self.buffer)
else:
self.cur += 1
self.buffer.insert(self.cur-1, chr(c))
self.win.addstr(0, 0, "".join(self.buffer))
self.win.move(0, self.cur)
self.win.refresh()
class Console():
def __init__(self):
self.logger = logging.getLogger("Console")
self.chat = LogWindow(self, "Chat")
self.log = LogWindow(self, "Log")
self.inp = ConsoleInput(self)
# Don't use .enable() for this, no point
self.enabled = [self.chat, self.log]
def toggle(self, wincls):
if wincls in self.enabled:
# Disallow disabling all windows
if len(self.enabled) == 1:
return
self.disable(wincls)
else:
self.enable(wincls)
def enable(self, wincls):
self.enabled.append(wincls)
self.redraw()
def disable(self, wincls):
self.enabled.remove(wincls)
self.redraw()
# This should only be called when either resizing the whole window or a window is toggled
def redraw(self):
self.stdscr.clear()
self.inp.redraw()
for i in range(len(self.enabled)):
self.enabled[i].setpos(i, len(self.enabled))
self.enabled[i].redraw()
self.stdscr.addstr(curses.LINES - 2, 1, ">>")
# Deliver message to chat from user
def send(self, msg):
self.chat.append("user: " + msg)
# TODO: implement this for CLI
def recv(self, msg):
pass
def handle_input(self):
self.stop = False
while not self.stop:
self.input(self.stdscr.getch())
def input(self, c):
if c == curses.KEY_RESIZE:
curses.update_lines_cols()
self.logger.debug("resizing")
self.redraw()
elif c == curses.KEY_END:
self.stop = True
elif c == curses.KEY_F1:
self.toggle(self.chat)
elif c == curses.KEY_F2:
self.toggle(self.log)
else:
self.inp.handle(c)
# Call this to initialize the menu
def run(self):
curses.wrapper(self._main)
def _main(self, stdscr):
self.stdscr = stdscr
stdscr.clear()
stdscr.refresh()
hdlr = HCHandler(self)
logging.getLogger().addHandler(hdlr)
self.redraw()
self.handle_input()
logging.getLogger().removeHandler(hdlr)
if __name__ == "__main__":
con = Console()
con.run()
| StarcoderdataPython |
3286392 | from random import randint
n, k, c = map(int, input().split())
s = input()
cur = 0
workday = 0
worked_left = set()
while cur < n:
if s[cur] == "o":
workday += 1
worked_left.add(cur + 1)
cur += c + 1
else:
cur += 1
if workday == k:
break
else:
exit()
cur = n - 1
workday = 0
worked_right = set()
while cur >= 0:
if s[cur] == "o":
workday += 1
worked_right.add(cur + 1)
cur -= c + 1
else:
cur -= 1
if workday == k:
break
ans = worked_left & worked_right
for i in range(10):
cur = 0
workday = 0
worked_another = set()
while cur < n:
if s[cur] == "o":
if randint(0, 2):
workday += 1
worked_another.add(cur + 1)
cur += c + 1
else:
cur += 1
else:
cur += 1
if workday == k:
ans &= worked_another
break
print(*sorted(ans), sep="\n") | StarcoderdataPython |
74498 | <filename>App/fbpage.py
# coding: utf-8
import os
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0,parentdir)
from fbmq import Page
from config import CONFIG
page = Page(CONFIG['FACEBOOK_TOKEN'])
@page.after_send
def after_send(payload, response):
print('AFTER_SEND : ' + payload.to_json())
print('RESPONSE : ' + response.text)
| StarcoderdataPython |
1736353 | <filename>ex072_tuplas_num_por_extenso.py
extenso = 'zero', 'um', 'dois', 'trรชs', 'quatro', 'cinco', 'seis', 'sete',\
'oito', 'nove', 'dez', 'onze', 'doze', 'treze', 'catorze', 'quinze',\
'dezesseis', 'dezessete', 'dezoito', 'dezenove', 'vinte'
for num in range(0, len(extenso)):
num = int(input('Digite um nรบmero de 0 a 20: '))
while num < 0 or num > 20:
num = int(input('Nรบmero invรกlido! Digite um nรบmero entre 0 e 20: '))
print(f'Vocรช digitou {extenso[num]}')
opcao = str(input('Deseja continuar? [S/N]: ')).strip().upper()
if opcao not in 'SN':
opcao = str(input('Deseja continuar? [S/N]: ')).strip().upper()
else:
if opcao == 'N':
print('PROGRAMA ENCERRADO')
break
#VERSรO DO GUANABARA
'''cont = 'zero', 'um', 'dois', 'trรชs', 'quatro', 'cinco', 'seis', 'sete',\
'oito', 'nove', 'dez', 'onze', 'doze', 'treze', 'catorze', 'quinze',\
'dezesseis', 'dezessete', 'dezoito', 'dezenove', 'vinte'
while True:
nรบm = int(input('Digite um nรบmero entre 0 e 20: '))
if 0 <= nรบm <= 20:
break
print('Tente novamente. ', end='')
print(f'Vocรช digitou o nรบmero {cont[nรบm]}')'''
| StarcoderdataPython |
1772819 | from PyQt5.QtSerialPort import QSerialPort, QSerialPortInfo
from PyQt5.QtCore import QThread
import pyttsx3
import voice
voice = voice.voice
sensors = []
for i in range(15):
sensors.append('')
class SerialReadThread(QThread):
def __init__(self, mainwindow, serial, parent=None):
super().__init__()
self.mainwindow = mainwindow
self.serial = serial
self.serial.readyRead.connect(self.onRead)
def say(self, text):
voice.say(text)
voice.runAndWait()
def onRead(self):
rx = self.serial.readLine()
rxs = str(rx, 'utf-8').strip()
data = rxs.split(',')
print(data)
isError = False
if len(data) >= 2:
for item in data:
if item == '':
isError = True
break
if not isError:
key = int(data[0])
info = int(data[1])
if sensors[key] != info:
if key == 0 and info == 1:
self.say('ะะฝะธะผะฐะฝะธะต! ะะฑะฝะฐััะถะตะฝะพ ะดะฒะธะถะตะฝะธะต ะฝะฐ ะดะฐััะธะบะต ะดะฒะธะถะตะฝะธั ะฝะพะผะตั 0.')
sensors[key] = info | StarcoderdataPython |
1742036 | # SPDX-License-Identifier: Apache-2.0
# Copyright 2019 Blue Cheetah Analog Design Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict
from bag.util.immutable import Param
from bag.layout.template import TemplateDB
from xbase.layout.mos.base import MOSBasePlaceInfo, MOSBase
class TilePatternTest(MOSBase):
"""A MOSBase of only rows of transistors, no connection specs.
"""
def __init__(self, temp_db: TemplateDB, params: Param, **kwargs: Any) -> None:
MOSBase.__init__(self, temp_db, params, **kwargs)
@classmethod
def get_params_info(cls) -> Dict[str, str]:
return dict(
pinfo='the tile specification object.',
ncol='number of columns.',
ntile='number of tiles.',
)
@classmethod
def get_default_param_values(cls) -> Dict[str, Any]:
return dict(min_ntr=0, w_list=None)
def draw_layout(self):
pinfo = MOSBasePlaceInfo.make_place_info(self.grid, self.params['pinfo'])
self.draw_base(pinfo)
ncol: int = self.params['ncol']
ntile: int = self.params['ntile']
self.set_mos_size(ncol, ntile)
for tile_idx in range(ntile):
pinfo = self.get_tile_pinfo(tile_idx)
for row_idx in range(pinfo.num_rows):
rinfo = pinfo.get_row_place_info(row_idx).row_info
if rinfo.row_type.is_substrate:
self.add_substrate_contact(row_idx, 0, tile_idx=tile_idx, seg=ncol)
else:
self.add_mos(row_idx, 0, ncol, tile_idx=tile_idx)
| StarcoderdataPython |
1788950 | <filename>libtaxii/taxii_default_query.py
# Copyright (c) 2017, The MITRE Corporation
# For license information, see the LICENSE.txt file
"""
Creating, handling, and parsing TAXII Default Queries.
"""
import numbers
import datetime
from operator import attrgetter
import os
import dateutil.parser
from lxml import etree
import libtaxii.messages_11 as tm11
from .common import TAXIIBase
from .validation import (do_check, uri_regex, targeting_expression_regex)
from .constants import *
import six
class CapabilityModule(object):
"""
A Capability Module has valid relationships
Each relationship has 0-n valid parameters
"""
def __init__(self, capability_module_id, relationships):
self.capability_module_id = capability_module_id
self.relationships = relationships
@property
def capability_module_id(self):
return self._capability_module_id
@capability_module_id.setter
def capability_module_id(self, value):
do_check(value, 'capability_module_id', type=six.string_types)
self._capability_module_id = value
@property
def relationships(self):
return self._relationships
@relationships.setter
def relationships(self, value):
do_check(value, 'relationships', type=Relationship)
self._relationships = {}
for item in value:
self._relationships[item.name] = item
class Relationship(object):
def __init__(self, name, parameters=None):
self.name = name
self.parameters = parameters or []
@property
def name(self):
return self._name
@name.setter
def name(self, value):
do_check(value, 'name', type=six.string_types)
self._name = value
@property
def parameters(self):
return self._parameters
@parameters.setter
def parameters(self, value):
do_check(value, 'parameters', type=Parameter)
self._parameters = {}
for item in value:
self._parameters[item.name] = item
class Parameter(object):
def __init__(self, name, type, value_tuple=None):
self.name = name
self.type = type
self.value_tuple = value_tuple
def verify(self, value):
do_check(value, 'value', type=self.type, value_tuple=self.value_tuple)
return True, 'OK'
# params - Define parameters for the Core/Regex/Timestamp capability modules
param_str_value = Parameter(P_VALUE, six.string_types)
param_float_value = Parameter(P_VALUE, float)
param_ts_value = Parameter(P_VALUE, datetime.datetime)
param_match_type = Parameter(P_MATCH_TYPE, six.string_types, ('case_sensitive_string', 'case_insensitive_string', 'number'))
param_case_sensitive = Parameter(P_CASE_SENSITIVE, bool, (True, False))
# CORE Relationships - Define relationships for the core capability module
rel_equals = Relationship(R_EQUALS, [param_str_value, param_match_type])
rel_not_equals = Relationship(R_NOT_EQUALS, [param_str_value, param_match_type])
rel_greater_than = Relationship(R_GREATER_THAN, [param_float_value])
rel_greater_than_or_equal = Relationship(R_GREATER_THAN_OR_EQUAL, [param_float_value])
rel_less_than = Relationship(R_LESS_THAN, [param_float_value])
rel_less_than_or_equal = Relationship(R_LESS_THAN_OR_EQUAL, [param_float_value])
rel_dne = Relationship(R_DOES_NOT_EXIST)
rel_ex = Relationship(R_EXISTS)
rel_begins_with = Relationship(R_BEGINS_WITH, [param_case_sensitive, param_str_value])
rel_ends_with = Relationship(R_ENDS_WITH, [param_case_sensitive, param_str_value])
rel_contains = Relationship(R_CONTAINS, [param_case_sensitive, param_str_value])
# REGEX relationships
rel_matches = Relationship(R_MATCHES, [param_case_sensitive, param_str_value])
# TIMESTAMP relationships
rel_ts_eq = Relationship(R_EQUALS, [param_ts_value])
rel_ts_gt = Relationship(R_GREATER_THAN, [param_ts_value])
rel_ts_gte = Relationship(R_GREATER_THAN_OR_EQUAL, [param_ts_value])
rel_ts_lt = Relationship(R_LESS_THAN, [param_ts_value])
rel_ts_lte = Relationship(R_LESS_THAN_OR_EQUAL, [param_ts_value])
# CORE - Define the Core Capability Module
cm_core = CapabilityModule(CM_CORE,
[rel_equals, rel_not_equals, rel_greater_than,
rel_greater_than_or_equal, rel_less_than,
rel_less_than_or_equal, rel_dne, rel_ex,
rel_begins_with, rel_contains, rel_ends_with]
)
# REGEX - Define the RegEx Capability Module
cm_regex = CapabilityModule(CM_REGEX, [rel_matches])
# TIMESTAMP - Define the timestamp Capability Module
cm_timestamp = CapabilityModule(CM_TIMESTAMP, [rel_ts_eq, rel_ts_gt, rel_ts_gte, rel_ts_lt, rel_ts_lte])
capability_modules = {CM_CORE: cm_core, CM_REGEX: cm_regex, CM_TIMESTAMP: cm_timestamp}
class DefaultQueryInfo(tm11.SupportedQuery):
""" Used to describe the TAXII Default Queries that are supported.
:param targeting_expression_infos: Describe the supported targeting expressions
:type targeting_expression_infos: :class:`list` of :class:`TargetingExpressionInfo` objects
:param capability_modules: Indicate the supported capability modules
:type capability_modules: :class:`list` of :class:`str`
"""
def __init__(self, targeting_expression_infos, capability_modules):
super(DefaultQueryInfo, self).__init__(FID_TAXII_DEFAULT_QUERY_10)
self.targeting_expression_infos = targeting_expression_infos
self.capability_modules = capability_modules
@property
def targeting_expression_infos(self):
return self._targeting_expression_infos
@targeting_expression_infos.setter
def targeting_expression_infos(self, value):
do_check(value, 'targeting_expression_infos', type=DefaultQueryInfo.TargetingExpressionInfo)
self._targeting_expression_infos = value
@property
def capability_modules(self):
return self._capability_modules
@capability_modules.setter
def capability_modules(self, value):
do_check(value, 'capability_modules', regex_tuple=uri_regex)
self._capability_modules = value
def to_etree(self):
q = super(DefaultQueryInfo, self).to_etree()
dqi = etree.SubElement(q, '{%s}Default_Query_Info' % ns_map['tdq'])
for expression_info in self.targeting_expression_infos:
dqi.append(expression_info.to_etree())
for cmod in self.capability_modules:
cm = etree.SubElement(dqi, '{%s}Capability_Module' % ns_map['tdq'], nsmap=ns_map)
cm.text = cmod
return q
def to_dict(self):
d = super(DefaultQueryInfo, self).to_dict()
d['targeting_expression_infos'] = []
for expression_info in self.targeting_expression_infos:
d['targeting_expression_infos'].append(expression_info.to_dict())
# TODO: This looks like a serialization bug
d['capability_modules'] = self.capability_modules
return d
def to_text(self, line_prepend=''):
s = super(DefaultQueryInfo, self).to_text(line_prepend)
for expression_info in self.targeting_expression_infos:
s += expression_info.to_text(line_prepend + STD_INDENT)
for capability_module in self.capability_modules:
s += line_prepend + " Capability Module: %s\n" % capability_module
return s
def __hash__(self):
return hash(str(self.to_dict()))
@staticmethod
def from_etree(etree_xml):
texpr_infos = etree_xml.xpath('./tdq:Default_Query_Info/tdq:Targeting_Expression_Info', namespaces=ns_map)
texpr_info_list = []
for texpr_info in texpr_infos:
texpr_info_list.append(DefaultQueryInfo.TargetingExpressionInfo.from_etree(texpr_info))
cms = etree_xml.xpath('./tdq:Default_Query_Info/tdq:Capability_Module', namespaces=ns_map)
cms_list = []
for cm in cms:
cms_list.append(cm.text)
return DefaultQueryInfo(texpr_info_list, cms_list)
@staticmethod
def from_dict(d):
kwargs = {}
kwargs['targeting_expression_infos'] = []
for expression_info in d['targeting_expression_infos']:
kwargs['targeting_expression_infos'].append(DefaultQueryInfo.TargetingExpressionInfo.from_dict(expression_info))
kwargs['capability_modules'] = d['capability_modules']
return DefaultQueryInfo(**kwargs)
class TargetingExpressionInfo(TAXIIBase):
"""This class describes supported Targeting Expressions
:param string targeting_expression_id: The supported targeting expression ID
:param preferred_scope: Indicates the preferred scope of queries
:type preferred_scope: :class:`list` of :class:`string`
:param allowed_scope: Indicates the allowed scope of queries
:type allowed_scope: :class:`list` of :class:`string`
"""
def __init__(self, targeting_expression_id, preferred_scope=None, allowed_scope=None):
self.targeting_expression_id = targeting_expression_id
self.preferred_scope = preferred_scope or []
self.allowed_scope = allowed_scope or []
@property
def sort_key(self):
return self.targeting_expression_id
@property
def targeting_expression_id(self):
return self._targeting_expression_id
@targeting_expression_id.setter
def targeting_expression_id(self, value):
do_check(value, 'targeting_expression_id', regex_tuple=uri_regex)
self._targeting_expression_id = value
@property
def preferred_scope(self):
return self._preferred_scope
@preferred_scope.setter
def preferred_scope(self, value):
do_check(value, 'preferred_scope', type=six.string_types, regex_tuple=targeting_expression_regex)
self._preferred_scope = value
@property
def allowed_scope(self):
return self._allowed_scope
@allowed_scope.setter
def allowed_scope(self, value):
do_check(value, 'allowed_scope', type=six.string_types, regex_tuple=targeting_expression_regex)
self._allowed_scope = value
def to_etree(self):
tei = etree.Element('{%s}Targeting_Expression_Info' % ns_map['tdq'])
tei.attrib['targeting_expression_id'] = self.targeting_expression_id
for scope in self.preferred_scope:
preferred = etree.SubElement(tei, '{%s}Preferred_Scope' % ns_map['tdq'])
preferred.text = scope
for scope in self.allowed_scope:
allowed = etree.SubElement(tei, '{%s}Allowed_Scope' % ns_map['tdq'])
allowed.text = scope
return tei
def to_dict(self):
d = {}
d['targeting_expression_id'] = self.targeting_expression_id
# TODO: Preferred / Allowed scope look like serialization bugs
d['preferred_scope'] = self.preferred_scope
d['allowed_scope'] = self.allowed_scope
return d
def to_text(self, line_prepend=''):
s = line_prepend + "=== Targeting Expression Info ===\n"
s += line_prepend + " Targeting Expression ID: %s\n" % self.targeting_expression_id
for scope in self.preferred_scope:
s += line_prepend + " Preferred Scope: %s\n" % scope
for scope in self.allowed_scope:
s += line_prepend + " Allowed Scope: %s\n" % scope
return s
def __hash__(self):
return hash(str(self.to_dict()))
@staticmethod
def from_etree(etree_xml):
kwargs = {}
kwargs['targeting_expression_id'] = etree_xml.xpath('./@targeting_expression_id', namespaces=ns_map)[0]
kwargs['preferred_scope'] = []
preferred_scope_set = etree_xml.xpath('./tdq:Preferred_Scope', namespaces=ns_map)
for preferred in preferred_scope_set:
kwargs['preferred_scope'].append(preferred.text)
kwargs['allowed_scope'] = []
allowed_scope_set = etree_xml.xpath('./tdq:Allowed_Scope', namespaces=ns_map)
for allowed in allowed_scope_set:
kwargs['allowed_scope'].append(allowed.text)
return DefaultQueryInfo.TargetingExpressionInfo(**kwargs)
@staticmethod
def from_dict(d):
return DefaultQueryInfo.TargetingExpressionInfo(**d)
class DefaultQuery(tm11.Query):
"""Conveys a TAXII Default Query.
:param string targeting_expression_id: The targeting_expression used in the query
:param criteria: The criteria of the query
:type criteria: :class:`DefaultQuery.Criteria`
"""
def __init__(self, targeting_expression_id, criteria):
super(DefaultQuery, self).__init__(FID_TAXII_DEFAULT_QUERY_10)
self.targeting_expression_id = targeting_expression_id
self.criteria = criteria
@property
def targeting_expression_id(self):
return self._targeting_expression_id
@targeting_expression_id.setter
def targeting_expression_id(self, value):
do_check(value, 'targeting_expression_id', regex_tuple=uri_regex)
self._targeting_expression_id = value
@property
def criteria(self):
return self._criteria
@criteria.setter
def criteria(self, value):
do_check(value, 'criteria', type=DefaultQuery.Criteria)
self._criteria = value
def to_etree(self):
q = super(DefaultQuery, self).to_etree()
dq = etree.SubElement(q, '{%s}Default_Query' % ns_map['tdq'], nsmap=ns_map)
dq.attrib['targeting_expression_id'] = self.targeting_expression_id
dq.append(self.criteria.to_etree())
return q
def to_dict(self):
d = super(DefaultQuery, self).to_dict()
d['targeting_expression_id'] = self.targeting_expression_id
d['criteria'] = self.criteria.to_dict()
return d
def to_text(self, line_prepend=''):
s = super(DefaultQuery, self).to_text(line_prepend)
s += line_prepend + " Targeting Expression ID: %s\n" % self.targeting_expression_id
s += self.criteria.to_text(line_prepend)
return s
@staticmethod
def from_etree(etree_xml):
tei = etree_xml.xpath('./tdq:Default_Query/@targeting_expression_id', namespaces=ns_map)[0] # attrib['targeting_expression_id']
criteria = DefaultQuery.Criteria.from_etree(etree_xml.xpath('./tdq:Default_Query/tdq:Criteria', namespaces=ns_map)[0])
return DefaultQuery(tei, criteria)
@staticmethod
def from_dict(d):
tei = d['targeting_expression_id']
criteria = DefaultQuery.Criteria.from_dict(d['criteria'])
return DefaultQuery(tei, criteria)
class Criteria(TAXIIBase):
"""Represents criteria for a :class:`DefaultQuery`. **Note**: At least one criterion OR criteria MUST be present
:param str operator: The logical operator (should be one of `OP_AND` or `OP_OR`)
:param criteria: The criteria for the query
:type criteria: :class:`DefaultQuery.Criteria`
:param criterion: The criterion for the query
:type criterion: :class:`DefaultQuery.Criterion`
"""
def __init__(self, operator, criteria=None, criterion=None):
self.operator = operator
self.criteria = criteria or []
self.criterion = criterion or []
@property
def sort_key(self):
key_list = []
ia = sorted(self.criteria, key=attrgetter('sort_key'))
ion = sorted(self.criterion, key=attrgetter('sort_key'))
for i in ia:
key_list.append(i.sort_key)
for i in ion:
key_list.append(i.sort_key)
return ''.join(key_list)
@property
def operator(self):
return self._operator
@operator.setter
def operator(self, value):
do_check(value, 'operator', value_tuple=OP_TYPES)
self._operator = value
@property
def criteria(self):
return self._criteria
@criteria.setter
def criteria(self, value):
do_check(value, 'critiera', type=DefaultQuery.Criteria)
self._criteria = value
@property
def criterion(self):
return self._criterion
@criterion.setter
def criterion(self, value):
do_check(value, 'criterion', type=DefaultQuery.Criterion)
self._criterion = value
def to_etree(self):
cr = etree.Element('{%s}Criteria' % ns_map['tdq'], nsmap=ns_map)
cr.attrib['operator'] = self.operator
for criteria in self.criteria:
cr.append(criteria.to_etree())
for criterion in self.criterion:
cr.append(criterion.to_etree())
return cr
def to_dict(self):
d = {}
d['operator'] = self.operator
d['criteria'] = []
for criteria in self.criteria:
d['criteria'].append(criteria.to_dict())
d['criterion'] = []
for criterion in self.criterion:
d['criterion'].append(criterion.to_dict())
return d
def to_text(self, line_prepend=''):
s = line_prepend + "=== Criteria ===\n"
s += line_prepend + " Operator: %s\n" % self.operator
for criteria in self.criteria:
s += criteria.to_text(line_prepend + STD_INDENT)
for criterion in self.criterion:
s += criterion.to_text(line_prepend + STD_INDENT)
return s
@staticmethod
def from_etree(etree_xml):
kwargs = {}
kwargs['operator'] = etree_xml.attrib['operator']
kwargs['criteria'] = []
criteria_set = etree_xml.xpath('./tdq:Criteria', namespaces=ns_map)
for criteria in criteria_set:
kwargs['criteria'].append(DefaultQuery.Criteria.from_etree(criteria))
kwargs['criterion'] = []
criterion_set = etree_xml.xpath('./tdq:Criterion', namespaces=ns_map)
for criterion in criterion_set:
kwargs['criterion'].append(DefaultQuery.Criterion.from_etree(criterion))
return DefaultQuery.Criteria(**kwargs)
@staticmethod
def from_dict(d):
kwargs = {}
kwargs['operator'] = d['operator']
kwargs['criteria'] = []
criteria_set = d.get('criteria', [])
for criteria in criteria_set:
kwargs['criteria'].append(DefaultQuery.Criteria.from_dict(criteria))
kwargs['criterion'] = []
criterion_set = d.get('criterion', [])
for criterion in criterion_set:
kwargs['criterion'].append(DefaultQuery.Criterion.from_dict(criterion))
return DefaultQuery.Criteria(**kwargs)
class Criterion(TAXIIBase):
"""Represents criterion for a :class:`DefaultQuery.Criteria`
:param string target: A targeting expression identifying the target
:param test: The test to be applied to the target
:type test: :class:`DefaultQuery.Criterion.Test`
:param bool negate: Whether the result of applying the test to the target should be negated
"""
def __init__(self, target, test, negate=False):
self.negate = negate
self.target = target
self.test = test
@property
def sort_key(self):
return self.target
@property
def negate(self):
return self._negate
@negate.setter
def negate(self, value):
do_check(value, 'negate', value_tuple=(True, False), can_be_none=True)
self._negate = value
@property
def target(self):
return self._target
@target.setter
def target(self, value):
do_check(value, 'target', type=six.string_types)
self._target = value
@property
def test(self):
return self._test
@test.setter
def test(self, value):
do_check(value, value, type=DefaultQuery.Criterion.Test)
self._test = value
def to_etree(self):
cr = etree.Element('{%s}Criterion' % ns_map['tdq'], nsmap=ns_map)
if self.negate is not None:
cr.attrib['negate'] = str(self.negate).lower()
target = etree.SubElement(cr, '{%s}Target' % ns_map['tdq'], nsmap=ns_map)
target.text = self.target
cr.append(self.test.to_etree())
return cr
def to_dict(self):
d = {}
d['negate'] = None
if self.negate is not None:
d['negate'] = self.negate
d['target'] = self.target
d['test'] = self.test.to_dict()
return d
def to_text(self, line_prepend=''):
s = line_prepend + "=== Criterion ===\n"
s += line_prepend + " Negate: %s\n" % (self.negate if (None != self.negate) else False)
s += line_prepend + " Target: %s\n" % self.target
s += self.test.to_text(line_prepend + STD_INDENT)
return s
@staticmethod
def from_etree(etree_xml):
negate_set = etree_xml.xpath('./@negate')
negate = None
if len(negate_set) > 0:
negate = negate_set[0] == 'true'
target = etree_xml.xpath('./tdq:Target', namespaces=ns_map)[0].text
test = DefaultQuery.Criterion.Test.from_etree(etree_xml.xpath('./tdq:Test', namespaces=ns_map)[0])
return DefaultQuery.Criterion(target, test, negate)
@staticmethod
def from_dict(d):
negate = d.get('negate', None)
target = d['target']
test = DefaultQuery.Criterion.Test.from_dict(d['test'])
return DefaultQuery.Criterion(target, test, negate)
class Test(TAXIIBase):
"""
:param string capability_id: The ID of the capability module that defines the relationship & parameters
:param string relationship: The relationship (e.g., equals)
:param parameters: The parameters for the relationship.
:type parameters: :class:`dict` of key/value pairs
"""
def __init__(self, capability_id, relationship, parameters=None):
self.capability_id = capability_id
self.relationship = relationship
self.parameters = parameters or {}
self.validate()
@property
def capability_id(self):
return self._capability_id
@capability_id.setter
def capability_id(self, value):
do_check(value, 'capability_id', regex_tuple=uri_regex)
self._capability_id = value
@property
def relationship(self):
return self._relationship
@relationship.setter
def relationship(self, value):
# TODO: For known capability IDs, check that the relationship is valid
# TODO: provide a way to register other capability IDs
do_check(value, 'relationship', type=six.string_types)
self._relationship = value
@property
def parameters(self):
return self._parameters
@parameters.setter
def parameters(self, value):
do_check(list(value.keys()), 'parameters.keys()', regex_tuple=uri_regex)
self._parameters = value
# TODO: Can this be done better?
def validate(self):
capability_module = capability_modules.get(self.capability_id)
if capability_module is None: # Nothing is defined for this, validation not possible
return True
relationship = capability_module.relationships.get(self.relationship)
if relationship is None:
raise Exception('relationship not in defined relationships. %s not in %s' % (self.relationship, list(capability_module.relationships.keys())))
for name, value in list(self.parameters.items()):
param = relationship.parameters.get(name)
if param is None:
raise Exception('name not valid. %s not in %s' % (name, list(relationship.parameters.keys())))
param.verify(value)
def to_etree(self):
t = etree.Element('{%s}Test' % ns_map['tdq'], nsmap=ns_map)
t.attrib['capability_id'] = self.capability_id
t.attrib['relationship'] = self.relationship
for k, v in list(self.parameters.items()):
p = etree.SubElement(t, '{%s}Parameter' % ns_map['tdq'])
p.attrib['name'] = k
if isinstance(v, bool):
p.text = str(v).lower()
elif isinstance(v, datetime.datetime):
p.text = v.isoformat()
elif isinstance(v, numbers.Number):
p.text = str(v)
else:
p.text = v
return t
def to_dict(self):
d = {}
d['capability_id'] = self.capability_id
d['relationship'] = self.relationship
d['parameters'] = self.parameters
return d
def to_text(self, line_prepend=''):
s = line_prepend + "=== Test ==\n"
s += line_prepend + " Capability ID: %s\n" % self.capability_id
s += line_prepend + " Relationship: %s\n" % self.relationship
for k, v in six.iteritems(self.parameters):
s += line_prepend + " Parameter: %s = %s\n" % (k, v)
return s
@staticmethod
def from_etree(etree_xml):
capability_id = etree_xml.attrib['capability_id']
relationship = etree_xml.attrib['relationship']
parameters = {}
cm = capability_modules.get(capability_id, None)
if cm is not None:
r = cm.relationships.get(relationship, None)
if r is None:
raise ValueError('Relationship (%s) not in CM (%s).' % (r, capability_id))
else:
r = None
for parameter in etree_xml.xpath('./tdq:Parameter', namespaces=ns_map):
k = parameter.attrib['name']
v = parameter.text
if v in ('true', 'false'): # bool is a special case
parameters[k] = v == 'true'
elif r is not None:
type_ = r.parameters[k].type
if type_ == six.string_types: # basestring can't be instantiated, but str can be
type_ = str
elif type_ == datetime.datetime:
# We can use this function to parse datetime strings.
type_ = dateutil.parser.parse
parameters[k] = type_(v)
else:
parameters[k] = v
return DefaultQuery.Criterion.Test(capability_id, relationship, parameters)
@staticmethod
def from_dict(d):
return DefaultQuery.Criterion.Test(**d)
DefaultQueryInfo.TargetingExpressionInfo = TargetingExpressionInfo
DefaultQuery.Criterion = Criterion
DefaultQuery.Criteria = Criteria
DefaultQuery.Criterion.Test = Test
package_dir, package_filename = os.path.split(__file__)
schema_file = os.path.join(package_dir, "xsd", "TAXII_DefaultQuery_Schema.xsd")
tm11.register_query_format(
format_id=FID_TAXII_DEFAULT_QUERY_10,
query=DefaultQuery,
query_info=DefaultQueryInfo,
schema=schema_file)
| StarcoderdataPython |
79913 | <reponame>Miguel-J/pineboo-buscar<filename>pineboolib/kugar/mreportdetail.py
from pineboolib import decorators
from pineboolib.flcontrols import ProjectClass
from pineboolib.kugar.mreportsection import MReportSection
class MReportDetail(ProjectClass, MReportSection):
@decorators.BetaImplementation
def __init__(self, *args):
super(MReportDetail, self).__init__(*args)
@decorators.NotImplementedWarn
# def operator=(self, mrd): #FIXME
def operator(self, mrd):
return self
| StarcoderdataPython |
3280089 | <reponame>wk8/elle<gh_stars>100-1000
# Copyright (C) 2013-2016, Quentin "mefyl" Hocquet
#
# This software is provided "as is" without warranty of any kind,
# either expressed or implied, including but not limited to the
# implied warranties of fitness for a particular purpose.
#
# See the LICENSE file for more information.
class Enumeration(type):
def __new__(self, name, bases, dct,
values = [], orderable = False):
return super(Enumeration, self).__new__(self, name, bases, dct)
def __init__(self, name, bases, dct,
values = [], orderable = False):
super(Enumeration, self).__init__(name, bases, dct)
self.__instances = {}
self.__orderable = orderable
for value in values:
self(name = value)
def __register(self, instance):
if self.__orderable:
instance.index = len(self.__instances)
self.__instances[instance.name] = instance
setattr(self, instance.name, instance)
def __iter__(self):
return iter(self.__instances.values())
class Enumerated(metaclass = Enumeration):
def __init__(self, name):
self.__name = name
self.__class__._Enumeration__register(self)
def __str__(self):
return '%s.%s' % (self.__class__.__name__, self.name)
@property
def name(self):
return self.__name
def __eq__(self, other):
return self is other
def __lt__(self, other):
return self.index < other.index
def __le__(self, other):
return self.index <= other.index
def __hash__(self):
return hash(self.name)
| StarcoderdataPython |
87984 | <gh_stars>0
from paa191t1.dijkstra.datastructs.vector import Vector
from paa191t1.tests.dijkstra.datastructs.test_dijkstra_structs import TestStructsBase
class TestStructsVector(TestStructsBase):
struct = Vector()
| StarcoderdataPython |
3276388 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from ... import units as u
from ..distances import Distance
from .. import transformations as t
from ..builtin_frames import ICRS, FK5, FK4, FK4NoETerms, Galactic, \
Supergalactic, Galactocentric, CIRS, GCRS, AltAz, \
ITRS, PrecessedGeocentric, HeliocentricTrueEcliptic, \
BarycentricTrueEcliptic
from .. import representation as r
from ..baseframe import frame_transform_graph
from ...tests.helper import (pytest, quantity_allclose as allclose,
assert_quantity_allclose as assert_allclose)
from .utils import randomly_sample_sphere
from ...time import Time
#Coordinates just for these tests.
class TCoo1(ICRS):
pass
class TCoo2(ICRS):
pass
def test_transform_classes():
"""
Tests the class-based/OO syntax for creating transforms
"""
tfun = lambda c, f: f.__class__(ra=c.ra, dec=c.dec)
trans1 = t.FunctionTransform(tfun, TCoo1, TCoo2,
register_graph=frame_transform_graph)
c1 = TCoo1(ra=1*u.radian, dec=0.5*u.radian)
c2 = c1.transform_to(TCoo2)
assert_allclose(c2.ra.radian, 1)
assert_allclose(c2.dec.radian, 0.5)
def matfunc(coo, fr):
return [[1, 0, 0],
[0, coo.ra.degree, 0],
[0, 0, 1]]
trans2 = t.DynamicMatrixTransform(matfunc, TCoo1, TCoo2)
trans2.register(frame_transform_graph)
c3 = TCoo1(ra=1*u.deg, dec=2*u.deg)
c4 = c3.transform_to(TCoo2)
assert_allclose(c4.ra.degree, 1)
assert_allclose(c4.ra.degree, 1)
# be sure to unregister the second one - no need for trans1 because it
# already got unregistered when trans2 was created.
trans2.unregister(frame_transform_graph)
def test_transform_decos():
"""
Tests the decorator syntax for creating transforms
"""
c1 = TCoo1(ra=1*u.deg, dec=2*u.deg)
@frame_transform_graph.transform(t.FunctionTransform, TCoo1, TCoo2)
def trans(coo1, f):
return TCoo2(ra=coo1.ra, dec=coo1.dec * 2)
c2 = c1.transform_to(TCoo2)
assert_allclose(c2.ra.degree, 1)
assert_allclose(c2.dec.degree, 4)
c3 = TCoo1(r.CartesianRepresentation(x=1*u.pc, y=1*u.pc, z=2*u.pc))
@frame_transform_graph.transform(t.StaticMatrixTransform, TCoo1, TCoo2)
def matrix():
return [[2, 0, 0],
[0, 1, 0],
[0, 0, 1]]
c4 = c3.transform_to(TCoo2)
assert_allclose(c4.cartesian.x, 2*u.pc)
assert_allclose(c4.cartesian.y, 1*u.pc)
assert_allclose(c4.cartesian.z, 2*u.pc)
def test_shortest_path():
class FakeTransform(object):
def __init__(self, pri):
self.priority = pri
g = t.TransformGraph()
#cheating by adding graph elements directly that are not classes - the
#graphing algorithm still works fine with integers - it just isn't a valid
#TransformGraph
#the graph looks is a down-going diamond graph with the lower-right slightly
#heavier and a cycle from the bottom to the top
#also, a pair of nodes isolated from 1
g._graph[1][2] = FakeTransform(1)
g._graph[1][3] = FakeTransform(1)
g._graph[2][4] = FakeTransform(1)
g._graph[3][4] = FakeTransform(2)
g._graph[4][1] = FakeTransform(5)
g._graph[5][6] = FakeTransform(1)
path, d = g.find_shortest_path(1, 2)
assert path == [1, 2]
assert d == 1
path, d = g.find_shortest_path(1, 3)
assert path == [1, 3]
assert d == 1
path, d = g.find_shortest_path(1, 4)
print('Cached paths:', g._shortestpaths)
assert path == [1, 2, 4]
assert d == 2
#unreachable
path, d = g.find_shortest_path(1, 5)
assert path is None
assert d == float('inf')
path, d = g.find_shortest_path(5, 6)
assert path == [5, 6]
assert d == 1
def test_sphere_cart():
"""
Tests the spherical <-> cartesian transform functions
"""
from ...utils import NumpyRNGContext
from .. import spherical_to_cartesian, cartesian_to_spherical
x, y, z = spherical_to_cartesian(1, 0, 0)
assert_allclose(x, 1)
assert_allclose(y, 0)
assert_allclose(z, 0)
x, y, z = spherical_to_cartesian(0, 1, 1)
assert_allclose(x, 0)
assert_allclose(y, 0)
assert_allclose(z, 0)
x, y, z = spherical_to_cartesian(5, 0, np.arcsin(4. / 5.))
assert_allclose(x, 3)
assert_allclose(y, 4)
assert_allclose(z, 0)
r, lat, lon = cartesian_to_spherical(0, 1, 0)
assert_allclose(r, 1)
assert_allclose(lat, 0 * u.deg)
assert_allclose(lon, np.pi / 2 * u.rad)
#test round-tripping
with NumpyRNGContext(13579):
x, y, z = np.random.randn(3, 5)
r, lat, lon = cartesian_to_spherical(x, y, z)
x2, y2, z2 = spherical_to_cartesian(r, lat, lon)
assert_allclose(x, x2)
assert_allclose(y, y2)
assert_allclose(z, z2)
def test_transform_path_pri():
"""
This checks that the transformation path prioritization works by
making sure the ICRS -> Gal transformation always goes through FK5
and not FK4.
"""
frame_transform_graph.invalidate_cache()
tpath, td = frame_transform_graph.find_shortest_path(ICRS, Galactic)
assert tpath == [ICRS, FK5, Galactic]
assert td == 2
#but direct from FK4 to Galactic should still be possible
tpath, td = frame_transform_graph.find_shortest_path(FK4, Galactic)
assert tpath == [FK4, FK4NoETerms, Galactic]
assert td == 2
def test_obstime():
"""
Checks to make sure observation time is
accounted for at least in FK4 <-> ICRS transformations
"""
b1950 = Time('B1950', scale='utc')
j1975 = Time('J1975', scale='utc')
fk4_50 = FK4(ra=1*u.deg, dec=2*u.deg, obstime=b1950)
fk4_75 = FK4(ra=1*u.deg, dec=2*u.deg, obstime=j1975)
icrs_50 = fk4_50.transform_to(ICRS)
icrs_75 = fk4_75.transform_to(ICRS)
# now check that the resulting coordinates are *different* - they should be,
# because the obstime is different
assert icrs_50.ra.degree != icrs_75.ra.degree
assert icrs_50.dec.degree != icrs_75.dec.degree
| StarcoderdataPython |
36135 | <reponame>Omar-Gonzalez/echangarro-demo
# Generated by Django 2.2.2 on 2020-03-07 03:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ordenes', '0002_auto_20200305_0056'),
]
operations = [
migrations.AddField(
model_name='orden',
name='guia_de_envio',
field=models.CharField(blank=True, max_length=640, null=True),
),
migrations.AlterField(
model_name='orden',
name='estado',
field=models.CharField(choices=[('TENTATIVA', 'TENTATIVA'), ('PENDIENTE PAGO', 'PENDIENTE PAGO'), ('PAGADO', 'PAGADO'), ('ENVIADO', 'ENVIADO'), ('ENTREGADO', 'ENTREGADO'), ('CANCELADO', 'CANCELADO'), ('DEVUELTO', 'DEVUELTO')], default='INICIADO', max_length=110),
),
migrations.AlterField(
model_name='orden',
name='preferencia_de_pago',
field=models.CharField(choices=[('MERCADO PAGO', 'MERCADO PAGO'), ('PAYPAL', 'PAYPAL'), ('TRANSFERENCIA BANCARIA', 'TRANSFERENCIA BANCARIA')], default='MERCADO LIBRE', max_length=110),
),
]
| StarcoderdataPython |
1657828 | <filename>env/lib/python3.6/site-packages/tqdm/_tqdm_pandas.py
import sys
__author__ = "github.com/casperdcl"
__all__ = ['tqdm_pandas']
def tqdm_pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` instance with
`pandas.core.groupby.DataFrameGroupBy.progress_apply`.
It will even close() the `tqdm` instance upon completion.
Parameters
----------
tclass : tqdm class you want to use (eg, tqdm, tqdm_notebook, etc)
targs and tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm, tqdm_pandas
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm_pandas(tqdm, leave=True) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from tqdm import TqdmDeprecationWarning
if isinstance(tclass, type) or (getattr(tclass, '__name__', '').startswith(
'tqdm_')): # delayed adapter case
TqdmDeprecationWarning("""\
Please use `tqdm.pandas(...)` instead of `tqdm_pandas(tqdm, ...)`.
""", fp_write=getattr(tkwargs.get('file', None), 'write', sys.stderr.write))
tclass.pandas(*targs, **tkwargs)
else:
TqdmDeprecationWarning("""\
Please use `tqdm.pandas(...)` instead of `tqdm_pandas(tqdm(...))`.
""", fp_write=getattr(tclass.fp, 'write', sys.stderr.write))
type(tclass).pandas(deprecated_t=tclass)
| StarcoderdataPython |
1708275 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# TextFilterResult.py
# 2021, <NAME>, https://github.com/toolgood/ToolGood.TextFilter.Api
# MIT Licensed
__all__ = ['ToolGood.TextFilter.TextFilterResult']
class TextFilterResult():
'่ฟๅ็ ๏ผ0) ๆๅ๏ผ1) ๅคฑ่ดฅ'
code=0
'่ฟๅ็ ่ฏฆๆ
ๆ่ฟฐ'
message=""
'่ฏทๆฑๆ ่ฏ'
requestId=0
'้ฃ้ฉ็บงๅซ๏ผ PASS๏ผๆญฃๅธธๅ
ๅฎน๏ผๅปบ่ฎฎ็ดๆฅๆพ่ก REVIEW๏ผๅฏ็ๅ
ๅฎน๏ผๅปบ่ฎฎไบบๅทฅๅฎกๆ ธ REJECT๏ผ่ฟ่งๅ
ๅฎน๏ผๅปบ่ฎฎ็ดๆฅๆฆๆช'
riskLevel=""
'้ฃ้ฉ็ฑปๅซ๏ผChar๏ผ้ๆญฃๅธธๅญ็ฌฆ Politics๏ผๆถๆฟๆๆฌ Terrorism๏ผๆถๆๆๆฌ Porn๏ผๆถ้ปๆๆฌ Gamble๏ผๆถ่ตๆๆฌ Drug๏ผๆถๆฏๆๆฌ Contraband๏ผ้ๆณไบคๆ Abuse๏ผ่พฑ้ชๆๆฌ Other๏ผๆจๅนฟๅผ่ฏฑ่ฏ้ช Custom๏ผ่ชๅฎไนๆๆ่ฏ'
riskCode=""
'ๆ
ๆๅผ๏ผ>0 ๆญฃ้ข๏ผ<0 ่ดๅ ๏ผriskLevelไธบREVIEW๏ผๅฏ็ๅ
ๅฎน๏ผ๏ผไผๅบ็ฐๆญคๅผ'
sentimentScore=0.0
'้ฃ้ฉ่ฏฆๆ
, ่ฏฆ่ง details'
details=[]
'่็ณปๆนๅผ่ฏฆๆ
, ่ฏฆ่ง contacts'
contacts=[]
| StarcoderdataPython |
1651297 | <reponame>JamesDownsLab/AdventOfCode2019<filename>Day6/day6code.py
# Universal Orbit Map
# Whenever A orbits B and B orbits C, then A INDIRECTLY ORBITS C
with open('input.txt', 'r') as f:
test_input = f.read()
test_input = test_input.split('\n')[:-1]
class orbiter:
def __init__(self, id, parent):
self.id = id
self.parent = parent
def grandparents(self):
if self.parent.id == 'COM':
return []
grandparents = []
parent = self.parent
at_root = False
while not at_root:
parent = parent.parent
grandparents.append(parent.id)
if parent.id == 'COM':
at_root = True
return grandparents
def parents(self):
if self.parent == None:
return []
else:
return [self.parent] + self.grandparents()
objects = {}
for relationship in test_input:
parent, child = relationship.split(')')
if parent not in objects.keys():
objects[parent] = orbiter(parent, None)
if child not in objects.keys():
objects[child] = orbiter(child, objects[parent])
else:
if objects[child].parent == None:
objects[child].parent = objects[parent]
print('Total orbits: ', sum([len(o.parents()) for o in objects.values()]))
my_gp = objects['YOU'].parents()
santa_gp = objects['SAN'].parents()
def first_common_parent(gp1, gp2):
for gp_a in gp1:
for gp_b in gp2:
if gp_a == gp_b:
return gp_a
common = first_common_parent(my_gp, santa_gp)
dist1 = my_gp.index(common)
dist2 = santa_gp.index(common)
print('Number of transfers: ', dist1 + dist2)
| StarcoderdataPython |
3289591 | _base_ = "./ss_mlBCE_MaskFull_PredDouble_PBR05_woCenter_edgeLower_refinePM10_01_02MasterChefCan.py"
OUTPUT_DIR = "output/self6dpp/ssYCBV/ss_mlBCE_MaskFull_PredDouble_PBR05_woCenter_edgeLower_refinePM10/15_35PowerDrill"
DATASETS = dict(
TRAIN=("ycbv_035_power_drill_train_real_aligned_Kuw",),
TRAIN2=("ycbv_035_power_drill_train_pbr",),
)
MODEL = dict(
WEIGHTS="output/gdrn/ycbvPbrSO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO/15_35PowerDrill/model_final_wo_optim-0769bee7.pth"
)
| StarcoderdataPython |
1606199 | <gh_stars>0
import os
from scalesim.scale_config import scale_config
from scalesim.topology_utils import topologies
from scalesim.simulator import simulator as sim
class scalesim:
def __init__(self,
save_disk_space=False,
verbose=True,
config='',
topology=''):
# Data structures
self.config = scale_config()
self.topo = topologies()
# File paths
self.config_file = ''
self.topology_file = ''
# Member objects
#self.runner = r.run_nets()
self.runner = sim()
# Flags
self.save_space = save_disk_space
self.verbose_flag = verbose
self.run_done_flag = False
self.logs_generated_flag = False
self.set_params(config_filename=config, topology_filename=topology)
#
def set_params(self,
config_filename='',
topology_filename='' ):
# First check if the user provided a valid topology file
if not topology_filename == '':
if not os.path.exists(topology_filename):
print("ERROR: scalesim.scale.py: Topology file not found")
print("Input file:" + topology_filename)
print('Exiting')
exit()
else:
self.topology_file = topology_filename
if not os.path.exists(config_filename):
print("ERROR: scalesim.scale.py: Config file not found")
print("Input file:" + config_filename)
print('Exiting')
exit()
else:
self.config_file = config_filename
# Parse config first
self.config.read_conf_file(self.config_file)
# Take the CLI topology over the one in config
# If topology is not passed from CLI take the one from config
if self.topology_file == '':
self.topology_file = self.config.get_topology_path()
else:
self.config.set_topology_file(self.topology_file)
# Parse the topology
self.topo.load_arrays(topofile=self.topology_file)
#num_layers = self.topo.get_num_layers()
#self.config.scale_memory_maps(num_layers=num_layers)
#
def run_scale(self, top_path='.'):
self.top_path = top_path
save_trace = not self.save_space
self.runner.set_params(
config_obj=self.config,
topo_obj=self.topo,
top_path=self.top_path,
verbosity=self.verbose_flag,
save_trace=save_trace
)
self.run_once()
def run_once(self):
if self.verbose_flag:
self.print_run_configs()
#save_trace = not self.save_space
# TODO: Anand
# TODO: This release
# TODO: Call the class member functions
#self.runner.run_net(
# config=self.config,
# topo=self.topo,
# top_path=self.top_path,
# save_trace=save_trace,
# verbosity=self.verbose_flag
#)
self.runner.run()
self.run_done_flag = True
#self.runner.generate_all_logs()
self.logs_generated_flag = True
if self.verbose_flag:
print("************ SCALE SIM Run Complete ****************")
#
def print_run_configs(self):
df_string = "Output Stationary"
df = self.config.get_dataflow()
if df == 'ws':
df_string = "Weight Stationary"
elif df == 'is':
df_string = "Input Stationary"
print("====================================================")
print("******************* SCALE SIM **********************")
print("====================================================")
arr_h, arr_w = self.config.get_array_dims()
print("Array Size: \t" + str(arr_h) + "x" + str(arr_w))
ifmap_kb, filter_kb, ofmap_kb = self.config.get_mem_sizes()
print("SRAM IFMAP (kB): \t" + str(ifmap_kb))
print("SRAM Filter (kB): \t" + str(filter_kb))
print("SRAM OFMAP (kB): \t" + str(ofmap_kb))
print("Dataflow: \t" + df_string)
print("CSV file path: \t" + self.config.get_topology_path())
print("Number of Remote Memory Banks: \t" + str(self.config.get_mem_banks()))
if self.config.use_user_dram_bandwidth():
print("Bandwidth: \t" + self.config.get_bandwidths_as_string())
print('Working in USE USER BANDWIDTH mode.')
else:
print('Working in ESTIMATE BANDWIDTH mode.')
print("====================================================")
#
def get_total_cycles(self):
me = 'scale.' + 'get_total_cycles()'
if not self.run_done_flag:
message = 'ERROR: ' + me
message += ' : Cannot determine cycles. Run the simulation first'
print(message)
return
return self.runner.get_total_cycles()
| StarcoderdataPython |
1731273 | <reponame>lipovsek/oslo<filename>oslo/torch/nn/parallel/pipeline_parallel/__init__.py
from oslo.torch.nn.parallel.pipeline_parallel.pipeline_parallel import (
PipelineParallel,
)
__ALL__ = [PipelineParallel]
| StarcoderdataPython |
1750102 | <reponame>rahulremanan/HIMA
import os
import sys
import random
import warnings
import types
import time
import gc
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
from itertools import chain
from skimage.io import imread, imshow, imread_collection, concatenate_images
from skimage.transform import resize
from skimage.morphology import label
try:
import warnings
warnings.filterwarnings('ignore')
from keras.models import Model, load_model
from keras.layers import Input
from keras.layers.core import Dropout, Lambda
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D, GlobalAveragePooling2D
from keras.layers.merge import concatenate
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.optimizers import SGD, RMSprop, Adagrad, Adam
from keras import backend as K
from keras.metrics import binary_crossentropy
from keras.models import model_from_json
except:
print ("Install Keras 2 (cmd: $sudo pip3 install keras) to run this notebook.")
import tensorflow as tf
IMG_WIDTH = 256
IMG_HEIGHT = 256
IMG_CHANNELS = 3
DEFAULT_UNIT_SIZE = 128
DEFAULT_DROPOUT = 0.55
warnings.filterwarnings('ignore', category=UserWarning, module='skimage')
seed = 1024
random.seed = seed
np.random.seed = seed
args = types.SimpleNamespace()
args.data_path = ['./data/']
args.config_file = ['./model/trained_2018_03_21-13_26_55_config_UNet.json']
args.weights_file = ['./model/trained_2018_03_21-13_26_55_weights_UNet.model']
args.output_dir = ['./model/']
checkpointer_savepath = os.path.join(args.output_dir[0] +
'checkpoint/UNet_I' +
str(IMG_WIDTH) + '_' +
str(IMG_HEIGHT) + '_' +
'U' + str(DEFAULT_UNIT_SIZE)+
'.h5')
TEST_PATH = os.path.join(args.data_path[0]+'/test/')
print (TEST_PATH)
TRAIN_PATH = os.path.join(args.data_path[0]+'/train_aug/')
print (TRAIN_PATH)
train_ids = next(os.walk(TRAIN_PATH))[1]
test_ids = next(os.walk(TEST_PATH))[1]
use_pre_proc_images = True
merge_pre_proc_images = True
if use_pre_proc_images == True and merge_pre_proc_images == False:
train_data = np.load(os.path.join(args.data_path[0]+'/train_pre_proc_256.npz'))
elif use_pre_proc_images == False and merge_pre_proc_images == False:
train_data = np.load(os.path.join(args.data_path[0]+'/train_aug_256.npz'))
elif use_pre_proc_images == True and merge_pre_proc_images == True:
train_data_pre_proc = np.load(os.path.join(args.data_path[0]+'/train_aug_256.npz'))
train_data_aug = np.load(os.path.join(args.data_path[0]+'/train_pre_proc_256.npz'))
else:
train_data = np.load(os.path.join(args.data_path[0]+'/train_aug_256.npz'))
if merge_pre_proc_images == True:
X_train_aug = train_data_aug['xtrain']
Y_train_aug = train_data_aug['ytrain']
X_train_pre_proc = train_data_pre_proc['xtrain']
Y_train_pre_proc = train_data_pre_proc['ytrain']
X_train = np.concatenate((X_train_aug , X_train_pre_proc), axis =0)
Y_train = np.concatenate((Y_train_aug , Y_train_pre_proc), axis =0)
del train_data_aug
del train_data_pre_proc
del X_train_aug
del Y_train_aug
del X_train_pre_proc
del Y_train_pre_proc
gc.collect()
else:
X_train = train_data['xtrain']
Y_train = train_data['ytrain']
del train_data
gc.collect()
if len(X_train) != len(Y_train):
print ("Mismatched images and prediction masks for training data ...")
sys.exit(1)
split_data = False
split_factor = 4
if split_data == True:
sample_size = len(X_train)
split_size = sample_size//split_factor
n =(randint(0, split_size))
try:
X_train = X_train[n:((sample_size - split_size) + n)]
Y_train = Y_train[n:((sample_size - split_size) + n)]
except:
print ("Failed to split training data ...")
X_train = X_train
Y_train = Y_train
else:
X_train = X_train
Y_train = Y_train
def mean_iou(y_true, y_pred):
prec = []
for t in np.arange(0.5, 1.0, 0.05):
y_pred_ = tf.to_int32(y_pred > t)
score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2)
K.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([up_opt]):
score = tf.identity(score)
prec.append(score)
return K.mean(K.stack(prec), axis=0)
NUM_CLASSES = 2
def mean_iou_tf(y_true, y_pred):
score, up_opt = tf.metrics.mean_iou(y_true, y_pred, NUM_CLASSES)
K.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([up_opt]):
score = tf.identity(score)
return score
smooth = 1.
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def bce_dice(y_true, y_pred):
return binary_crossentropy(y_true, y_pred)-K.log(dice_coef(y_true, y_pred))
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
use_dice = True
inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
DEFAULT_ACTIVATION = 'elu' # 'relu', 'elu'
unit_size = DEFAULT_UNIT_SIZE
dropout = DEFAULT_DROPOUT
final_max_pooling = True
def build_UNet(unit_size = None,
final_max_pooling = None):
s = Lambda(lambda x: x / 255) (inputs)
c1 = Conv2D(unit_size, (3, 3),
activation=DEFAULT_ACTIVATION,
kernel_initializer='he_normal',
padding='same') (s)
c1 = Dropout(dropout) (c1)
c1 = Conv2D(unit_size, (3, 3),
activation=DEFAULT_ACTIVATION,
kernel_initializer='he_normal',
padding='same') (c1)
p1 = MaxPooling2D((2, 2)) (c1)
c2 = Conv2D(unit_size*2, (3, 3),
activation=DEFAULT_ACTIVATION,
kernel_initializer='he_normal',
padding='same') (p1)
c2 = Dropout(dropout) (c2)
c2 = Conv2D(unit_size*2, (3, 3),
activation=DEFAULT_ACTIVATION,
kernel_initializer='he_normal',
padding='same') (c2)
p2 = MaxPooling2D((2, 2)) (c2)
c3 = Conv2D(unit_size*4, (3, 3),
activation=DEFAULT_ACTIVATION,
kernel_initializer='he_normal',
padding='same') (p2)
c3 = Dropout(dropout) (c3)
c3 = Conv2D(unit_size*4, (3, 3),
activation=DEFAULT_ACTIVATION,
kernel_initializer='he_normal',
padding='same') (c3)
p3 = MaxPooling2D((2, 2)) (c3)
c4 = Conv2D(unit_size*8, (3, 3),
activation=DEFAULT_ACTIVATION,
kernel_initializer='he_normal',
padding='same') (p3)
c4 = Dropout(dropout) (c4)
c4 = Conv2D(unit_size*8, (3, 3),
activation=DEFAULT_ACTIVATION,
kernel_initializer='he_normal',
padding='same') (c4)
p4 = MaxPooling2D((2, 2)) (c4)
c5 = Conv2D(unit_size*16, (3, 3),
activation=DEFAULT_ACTIVATION,
kernel_initializer='he_normal',
padding='same') (p4)
c5 = Dropout(dropout) (c5)
c5 = Conv2D(unit_size*16, (3, 3),
activation=DEFAULT_ACTIVATION,
kernel_initializer='he_normal',
padding='same') (c5)
c5 = Dropout(dropout) (c5)
c5 = Conv2D(unit_size*16, (3, 3),
activation=DEFAULT_ACTIVATION,
kernel_initializer='he_normal',
padding='same') (c5)
u6 = Conv2DTranspose(unit_size*8, (2, 2),
strides=(2, 2),
padding='same') (c5)
u6 = concatenate([u6, c4])
c6 = Conv2D(unit_size*8, (3, 3),
activation=DEFAULT_ACTIVATION,
kernel_initializer='he_normal',
padding='same') (u6)
c6 = Dropout(dropout) (c6)
c6 = Conv2D(unit_size*8, (3, 3),
activation=DEFAULT_ACTIVATION,
kernel_initializer='he_normal',
padding='same') (c6)
u7 = Conv2DTranspose(unit_size*4, (2, 2),
strides=(2, 2),
padding='same') (c6)
u7 = concatenate([u7, c3])
c7 = Conv2D(unit_size*4, (3, 3),
activation=DEFAULT_ACTIVATION,
kernel_initializer='he_normal',
padding='same') (u7)
c7 = Dropout(dropout) (c7)
c7 = Conv2D(unit_size*4, (3, 3),
activation=DEFAULT_ACTIVATION,
kernel_initializer='he_normal',
padding='same') (c7)
u8 = Conv2DTranspose(unit_size*2, (2, 2),
strides=(2, 2),
padding='same') (c7)
u8 = concatenate([u8, c2])
c8 = Conv2D(unit_size*2, (3, 3),
activation=DEFAULT_ACTIVATION,
kernel_initializer='he_normal',
padding='same') (u8)
c8 = Dropout(dropout) (c8)
c8 = Conv2D(unit_size*2, (3, 3),
activation=DEFAULT_ACTIVATION,
kernel_initializer='he_normal',
padding='same') (c8)
u9 = Conv2DTranspose(unit_size, (2, 2), strides=(2, 2), padding='same') (c8)
u9 = concatenate([u9, c1], axis=3)
c9 = Conv2D(unit_size, (3, 3),
activation=DEFAULT_ACTIVATION,
kernel_initializer='he_normal',
padding='same') (u9)
c9 = Dropout(dropout) (c9)
c9 = Conv2D(unit_size, (3, 3),
activation=DEFAULT_ACTIVATION,
kernel_initializer='he_normal',
padding='same') (c9)
outputs = Conv2D(1, (1, 1), activation='sigmoid') (c9)
model = Model(inputs=[inputs], outputs=[outputs])
return model
model = build_UNet(unit_size = unit_size,
final_max_pooling = final_max_pooling)
def load_prediction_model(args):
try:
print (args.config_file[0])
with open(args.config_file[0]) as json_file:
model_json = json_file.read()
model = model_from_json(model_json)
return model
except:
print ("Please specify a model configuration file ...")
sys.exit(1)
def load_prediction_model_weights(args):
try:
model.load_weights(args.weights_file[0])
print ("Loaded model weights from: "
+ str(args.weights_file[0]))
return model
except:
print ("Error loading model weights ...")
sys.exit(1)
load_from_checkpoint = True
load_from_config = False
load_model_weights = False
def load_saved_model(checkpointer_savepath = None,
args = None,
mean_iou = None,
mean_iou_tf = None,
dice_coef = None,
bce_dice = None,
dice_coef_loss = None,
load_from_checkpoint = None,
load_from_config = None,
load_model_weights = None):
if load_from_checkpoint == True:
if use_dice == True:
model = load_model(checkpointer_savepath, \
custom_objects={'mean_iou': mean_iou, \
'mean_iou_tf': mean_iou_tf, \
'dice_coef': dice_coef, \
'bce_dice': bce_dice, \
'dice_coef_loss': dice_coef_loss})
else:
model = load_model(checkpointer_savepath, \
custom_objects={'mean_iou': mean_iou})
elif load_from_config == True:
model = load_prediction_model(args)
model = load_prediction_model_weights(args)
elif load_model_weights == True:
try:
model = load_prediction_model_weights(args)
except:
print ("An exception has occurred, while loading model weights ...")
else:
model = model
return model
try:
model = load_saved_model(checkpointer_savepath = checkpointer_savepath,
args = args,
mean_iou = mean_iou,
mean_iou_tf = mean_iou_tf,
dice_coef = dice_coef,
bce_dice = bce_dice,
dice_coef_loss = dice_coef_loss,
load_from_checkpoint = load_from_checkpoint,
load_from_config = load_from_config,
load_model_weights = load_model_weights)
except:
model = model
sgd = SGD(lr=1e-7,
decay=0.5,
momentum=1,
nesterov=True)
rms = RMSprop(lr=1e-7,
rho=0.9,
epsilon=1e-08,
decay=0.0)
ada = Adagrad(lr=1e-7,
epsilon=1e-08,
decay=0.0)
adam = Adam(lr=1e-4,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=0.0)
DEFAULT_OPTIMIZER = adam
use_dice = True
use_dice_loss = True
use_custom_iou = True
if use_dice == True and use_dice_loss == False:
model.compile(optimizer = DEFAULT_OPTIMIZER,
loss = bce_dice,
metrics = ['binary_crossentropy',
dice_coef,
mean_iou,
mean_iou_tf])
elif use_dice_loss == True and use_dice == True :
model.compile(optimizer = DEFAULT_OPTIMIZER,
loss = dice_coef_loss,
metrics = [dice_coef,
mean_iou,
mean_iou_tf,
'acc',
'mse'])
elif use_custom_iou == True:
model.compile(optimizer = DEFAULT_OPTIMIZER,
loss = 'binary_crossentropy',
metrics = [mean_iou,
mean_iou_tf,
'acc',
'mse'])
else:
model.compile(optimizer=DEFAULT_OPTIMIZER,
loss='binary_crossentropy',
metrics=[mean_iou_tf,
'acc',
'mse'])
model_summary = False
if model_summary == True:
model.summary()
earlystopper = EarlyStopping(patience=5, verbose=1)
checkpointer = ModelCheckpoint(checkpointer_savepath,
verbose=1,
save_best_only=True)
results = model.fit(X_train,
Y_train,
validation_split=0.2,
batch_size=8,
epochs=1,
callbacks=[earlystopper,
checkpointer])
def generate_timestamp():
timestring = time.strftime("%Y_%m_%d-%H_%M_%S")
print ("Time stamp generated: "+timestring)
return timestring
timestr = generate_timestamp()
def save_model(args, name, model):
file_loc = args.output_dir[0]
file_pointer = os.path.join(file_loc+"//trained_"+ timestr)
model.save_weights(os.path.join(file_pointer
+ "_weights"
+str(name)
+".model"))
model_json = model.to_json()
with open(os.path.join(file_pointer
+"_config"
+str(name)
+".json"), "w") as json_file:
json_file.write(model_json)
print ("Saved the trained model weights to: " +
str(os.path.join(file_pointer
+ "_weights"+str(name)
+ ".model")))
print ("Saved the trained model configuration as a json file to: " +
str(os.path.join(file_pointer
+ "_config"+str(name)
+ ".json")))
save_model(args, '_UNet', model) | StarcoderdataPython |
1653113 | #!/usr/bin/env python
from flask import Flask, render_template, Response
import cv2
import sys
import numpy
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
def get_frame():
camera_port=0
camera=cv2.VideoCapture(camera_port) #this makes a web cam object
while True:
retval, im = camera.read()
imgencode=cv2.imencode('.jpg',im)[1]
stringData=imgencode.tostring()
yield (b'--frame\r\n'
b'Content-Type: text/plain\r\n\r\n'+stringData+b'\r\n')
del(camera)
@app.route('/calc')
def calc():
return Response(get_frame(),mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='localhost', debug=True, threaded=True)
| StarcoderdataPython |
3264149 | # Copyright 2021 Zuva Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple
from ..api.apicall import ApiCall
from ..models.field import Field
class FieldAPI(object):
"""
FieldAPI contains the functionality accepted by the Fields Microservice
"""
def __init__(self, token: str, url: str):
self._call = ApiCall(token, url)
def get(self) -> Tuple[List[Field], ApiCall]:
"""
Gets the list of fields that exist in the ZDAI, which
the API token has access to.
:return:
"""
caller = self._call.new(method = 'GET', path = 'fields')
caller.send()
fields = []
for field in caller.response.json():
fields.append(Field(
id = str(field.get('field_id')),
name = str(field.get('name')),
description = str(field.get('description')),
bias = float(field.get('bias')),
f_score = float(field.get('f_score')),
precision = float(field.get('precision')),
recall = float(field.get('recall')),
document_count = int(field.get('document_count')),
is_custom = bool(field.get('is_custom'))
))
return fields, caller
| StarcoderdataPython |
3289945 | <gh_stars>1-10
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head, a ListNode
# @return a ListNode
def insertionSortList(self, head):
if not head:
return head
dummy = ListNode(0)
dummy.next = head
curr = head
while curr.next:
if curr.next.val < curr.val:
pre = dummy
while pre.next.val < curr.next.val:
pre = pre.next
tmp = curr.next
curr.next = tmp.next
tmp.next = pre.next
pre.next = tmp
else:
curr = curr.next
return dummy.next
| StarcoderdataPython |
1780035 | <gh_stars>1-10
from .coco_evaluator import COCOEvaluator
from .voc_evaluator import VOCEvaluator
| StarcoderdataPython |
3281575 | def is_even(n):
return n % 2 == 0
def test_product_of_even_numbers_is_even():
"Natural numbers truths"
evens = [(18, 8), (14, 12), (0, 4), (6, 2), (16, 10)]
for e1, e2 in evens:
check_even.description = "for even numbers %d and %d their product is even as well" % (e1, e2)
yield check_even, e1, e2
def check_even(e1, e2):
assert is_even(e1 * e2)
| StarcoderdataPython |
1763779 | from django.conf.urls import *
from django.contrib import admin
import AuShadha.settings
from patient.views import *
from patient.dijit_widgets.pane import render_patient_pane
from patient.dijit_widgets.tree import render_patient_tree
admin.autodiscover()
urlpatterns = patterns('',
################################ PATIENT CRUD ##################################
url(r'new/add/(?P<clinic_id>\d+)/$' ,
'patient.views.patient_detail_add',
name='patient_detail_add'
),
url(r'new/add/$' ,
'patient.views.patient_detail_add',
name='patient_detail_add_without_id'
),
url(r'patient/edit/(?P<id>\d+)/$',
'patient.views.patient_detail_edit',
name='patient_detail_edit'
),
url(r'patient/del/(?P<id>\d+)/$',
'patient.views.patient_detail_del',
name='patient_detail_del'
),
################################ PATIENT JSON ##################################
url(r'patient/json/$',
'patient.views.render_patient_json',
name='render_patient_json'
),
################################ PATIENT SUMMARY ###############################
url(r'patient/summary/$',
'patient.views.render_patient_summary',
name='render_patient_summary_without_id'
),
url(r'patient/summary/(?P<patient_id>\d+)/$',
'patient.views.render_patient_summary',
name='render_patient_summary_with_id'
),
################################ PATIENT INFO #################################
url(r'patient/info/(?P<patient_id>\d+)/$',
'patient.views.render_patient_info',
name='render_patient_info'
),
################################ PATIENT PANE ##################################
url(r'patient/pane/(?P<patient_id>\d+)/$',
'patient.dijit_widgets.pane.render_patient_pane',
name='render_patient_pane_with_id'
),
url(r'patient/pane/$',
'patient.dijit_widgets.pane.render_patient_pane',
name='render_patient_pane_without_id'
),
################################ PATIENT TREE ##################################
url(r'patient/tree/(?P<patient_id>\d+)/$',
'patient.dijit_widgets.tree.render_patient_tree',
name='render_patient_tree_with_id'
),
url(r'patient/tree/$',
'patient.dijit_widgets.tree.render_patient_tree',
name='render_patient_tree_without_id'
),
############################ PATIENT INDEX ######################################
#url(r'patient/index/$',
#'patient.views.patient_index',
#name='patient_index'
#),
############################ PATIENT LIST ######################################
#url(r'patient/list/$',
#'patient.views.render_patient_list' ,
#name='render_patient_list'
#),
# url(r'patient/list/(?P<id>\d+)/$',
# 'patient.views.patient_detail_list',
# name = 'patient_detail_list'
# ),
)
| StarcoderdataPython |
3295251 | <filename>src/config.py<gh_stars>0
from transformers import (BertConfig, RobertaConfig, XLNetConfig, AlbertConfig, LongformerConfig,
BertTokenizer, RobertaTokenizer, XLNetTokenizer, AlbertTokenizer, LongformerTokenizer,
DebertaConfig, DebertaTokenizer, ElectraConfig, ElectraTokenizer, GPT2Config, GPT2Tokenizer)
from models import (BertForRelationIdentification, RoBERTaForRelationIdentification,
XLNetForRelationIdentification, AlbertForRelationIdentification,
LongFormerForRelationIdentification, DebertaForRelationIdentification,
ElectraForRelationIdentification, GPT2ForRelationIdentification)
EN1_START = "[s1]"
EN1_END = "[e1]"
EN2_START = "[s2]"
EN2_END = "[e2]"
# keep the seq order
SPEC_TAGS = [EN1_START, EN1_END, EN2_START, EN2_END]
MODEL_REQUIRE_SEGMENT_ID = {'bert', 'xlnet', 'albert', 'deberta'}
MODEL_DICT = {
"bert": (BertForRelationIdentification, BertConfig, BertTokenizer),
"roberta": (RoBERTaForRelationIdentification, RobertaConfig, RobertaTokenizer),
"xlnet": (XLNetForRelationIdentification, XLNetConfig, XLNetTokenizer),
"albert": (AlbertForRelationIdentification, AlbertConfig, AlbertTokenizer),
"longformer": (LongFormerForRelationIdentification, LongformerConfig, LongformerTokenizer),
"deberta": (DebertaForRelationIdentification, DebertaConfig, DebertaTokenizer),
"electra": (ElectraForRelationIdentification, ElectraConfig, ElectraTokenizer),
"gpt2": (GPT2ForRelationIdentification, GPT2Config, GPT2Tokenizer)
}
TOKENIZER_USE_FOUR_SPECIAL_TOKs = {'roberta', 'longformer', 'gpt2'}
# change VERSION if any major updates
VERSION = "0.1"
CONFIG_VERSION_NAME = "REModelVersion"
# add new args associated to version
NEW_ARGS = {"use_focal_loss": False,
"focal_loss_gamma": 2,
"use_binary_classification_mode": False,
"balance_sample_weights": False}
| StarcoderdataPython |
41729 | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 7 13:43:01 2016
@author: fergal
A series of metrics to quantify the noise in a lightcurve:
Includes:
x sgCdpp
x Marshall's noise estimate
o An FT based estimate of 6 hour artifact strength.
o A per thruster firing estimate of 6 hour artifact strength.
$Id$
$URL$
"""
__version__ = "$Id$"
__URL__ = "$URL$"
from scipy.signal import savgol_filter
import matplotlib.pyplot as mp
import numpy as np
import fft
keplerLongCadence_s = 1765.4679
keplerLongCadence_days = keplerLongCadence_s / float(86400)
def computeRollTweakAmplitude(y, nHarmonics = 3, tweakPeriod_days = .25, \
expTime_days=None, plot=False):
"""Compute strength of roll tweak artifact in K2 data with an FT approach.
Compute FT of lightcurve
Optional Inputs:
-----------------
plot
Show a diagnostic plot
Returns:
--------
float indicating strength of correction. A value of 1 means the
amplitude of the tweak is approx equal to the strength of all other
signals in the FT.
"""
if expTime_days is None:
expTime_days = keplerLongCadence_days
#computes FT with frequencies in cycles per days
ft = fft.computeFft(y, expTime_days)
#Thruster firings every 6 hours
artifactFreq_cd = 1/tweakPeriod_days #cycles per day
if plot:
mp.clf()
mp.plot(ft[:,0], 1e6*ft[:,1], 'b-')
metric = 0
nPtsForMed = 50
for i in range(1, nHarmonics+1):
wh = np.argmin( np.fabs(ft[:,0] - i*artifactFreq_cd))
med = np.median(ft[wh-nPtsForMed:wh+nPtsForMed, 1])
metric += ft[wh, 1] / med
if plot:
mp.axvline(i*artifactFreq_cd, color='m')
return metric / float(nHarmonics)
def computeSgCdpp_ppm(y, transitDuration_cadences=13, plot=False):
"""Estimates 6hr CDPP using <NAME> Cleve's Savitzy-Golay technique
An interesting estimate of the noise in a lightcurve is the scatter
after all long term trends have been removed. This is the kernel of
the idea behind the Combined Differential Photometric Precision (CDPP)
metric used in classic Kepler. <NAME> devised a much simpler
algorithm for computing CDPP using a Savitzy-Golay detrending, which
he called Savitzy-Golay CDPP, or SG-CDPP. We implement his algorithm
here.
Inputs:
----------
y
(1d numpy array) normalised flux to calculate noise from. Flux
should have a mean of zero and be in units of fractional amplitude.
Note: Bad data in input will skew result. Some filtering of
outliers is performed, but Nan's or Infs will not be caught.
Optional Inputs:
-----------------
transitDuration_cadences
(int) Adjust the assumed transit width, in cadences. Default is
13, which corresponds to a 6.5 hour transit in K2
plot
Show a diagnostic plot
Returns:
------------
Estimated noise in parts per million.
Notes:
-------------
Taken from
svn+ssh://murzim/repo/so/trunk/Develop/jvc/common/compute_SG_noise.m
by <NAME>
"""
#These 3 values were chosen for the original algorithm, and we don't
#change them here.
window = 101
polyorder=2
noiseNorm = 1.40
#Name change for consistency with original algorithm
cadencesPerTransit = transitDuration_cadences
if cadencesPerTransit < 4:
raise ValueError("Cadences per transit must be >= 4")
if len(y) < window:
raise ValueError("Can't compute CDPP for timeseries with fewer points than defined window (%i points)" %(window))
trend = savgol_filter(y, window_length=window, polyorder=polyorder)
detrend = y-trend
filtered = np.ones(cadencesPerTransit)/float(cadencesPerTransit)
smoothed = np.convolve(detrend, filtered, mode='same')
if plot:
mp.clf()
mp.plot(y, 'ko')
mp.plot(trend, 'r-')
mp.plot(smoothed, 'g.')
sgCdpp_ppm = noiseNorm*robustStd(smoothed, 1)*1e6
return sgCdpp_ppm
def estimateScatterWithMarshallMethod(flux, plot=False):
"""Estimate the typical scatter in a lightcurve.
Uses the same method as Marshall (Mullally et al 2016 submitted)
Inputs:
----------
flux
(np 1d array). Flux to measure scatter of. Need not have
zero mean.
Optional Inputs:
-----------------
plot
Show a diagnostic plot
Returns:
------------
(float) scatter of data in the same units as in the input ``flux``
Notes:
----------
Algorithm is reasonably sensitive to outliers. For best results
uses outlier rejection on your lightcurve before computing scatter.
Nan's and infs in lightcurve will propegate to the return value.
"""
diff= np.diff(flux)
#Remove egregious outliers. Shouldn't make much difference
idx = sigmaClip(diff, 5)
diff = diff[~idx]
mean = np.mean(diff)
mad = np.median(np.fabs(diff-mean))
std = 1.4826*mad
if plot:
mp.clf()
mp.plot(flux, 'ko')
mp.plot(diff, 'r.')
mp.figure(2)
mp.clf()
bins = np.linspace(-3000, 3000, 61)
mp.hist(1e6*diff, bins=bins, ec="none")
mp.xlim(-3000, 3000)
mp.axvline(-1e6*float(std/np.sqrt(2)), color='r')
mp.axvline(1e6*float(std/np.sqrt(2)), color='r')
#std is the rms of the diff. std on single point
#is 1/sqrt(2) of that value,
return float(std/np.sqrt(2))
def singlePointDifferenceSigmaClip(a, nSigma=4, maxIter=1e4, initialClip=None):
"""Iteratively find and remove outliers in first derivative
If a dataset can be modeled as a constant offset + noise + outliers,
those outliers can be found and rejected with a sigma-clipping approach.
If the data contains some time-varying signal, this signal must be removed
before applying a sigma clip. This function removes the signal by applying
a single point difference.
The function computes a[i+1] - a[i], and sigma clips the result. Slowly
varying trends will have single point differences that are dominated by noise,
but outliers have strong first derivatives and will show up strongly in this
metric.
Inputs:
----------
y
(1d numpy array) Array to be cleaned
nSigma
(float) Threshold to cut at. 5 is typically a good value for
most arrays found in practice.
Optional Inputs:
-------------------
maxIter
(int) Maximum number of iterations
initialClip
(1d boolean array) If an element of initialClip is set to True,
that value is treated as a bad value in the first iteration, and
not included in the computation of the mean and std.
Returns:
------------
1d numpy array. Where set to True, the corresponding element of y
is an outlier.
"""
#Scatter in single point difference is root 2 time larger
#than in initial lightcurve
threshold = nSigma/np.sqrt(2)
diff1 = np.roll(a, -1) - a
diff1[-1] = 0 #Don't trust the last value because a[-1] not necessarily equal to a
idx1 = sigmaClip(diff1, nSigma, maxIter, initialClip)
diff2 = np.roll(a, 1) - a
diff2[0] = 0
idx2 = sigmaClip(diff2, nSigma, maxIter, initialClip)
flags = idx1 & idx2
#This bit of magic ensures only single point outliers are marked,
#not strong trends in the data. It insists that the previous point
#in difference time series is an outlier in the opposite direction, otherwise
#the point is considered unflagged. This prevents marking transits as bad data.
outlierIdx = flags
outlierIdx &= np.roll(idx1, 1)
outlierIdx &= (np.roll(diff1, 1) * diff1 < 0)
return outlierIdx
def sigmaClip(y, nSigma, maxIter=1e4, initialClip=None):
"""Iteratively find and remove outliers
Find outliers by identifiny all points more than **nSigma** from
the mean value. The recalculate the mean and std and repeat until
no more outliers found.
Inputs:
----------
y
(1d numpy array) Array to be cleaned
nSigma
(float) Threshold to cut at. 5 is typically a good value for
most arrays found in practice.
Optional Inputs:
-------------------
maxIter
(int) Maximum number of iterations
initialClip
(1d boolean array) If an element of initialClip is set to True,
that value is treated as a bad value in the first iteration, and
not included in the computation of the mean and std.
Returns:
------------
1d numpy array. Where set to True, the corresponding element of y
is an outlier.
"""
#import matplotlib.pyplot as mp
idx = initialClip
if initialClip is None:
idx = np.zeros( len(y), dtype=bool)
assert(len(idx) == len(y))
#x = np.arange(len(y))
#mp.plot(x, y, 'k.')
oldNumClipped = np.sum(idx)
for i in range(int(maxIter)):
mean = np.nanmean(y[~idx])
std = np.nanstd(y[~idx])
newIdx = np.fabs(y-mean) > nSigma*std
newIdx = np.logical_or(idx, newIdx)
newNumClipped = np.sum(newIdx)
#print "Iter %i: %i (%i) clipped points " \
#%(i, newNumClipped, oldNumClipped)
if newNumClipped == oldNumClipped:
return newIdx
oldNumClipped = newNumClipped
idx = newIdx
i+=1
return idx
def robustMean(y, percent):
"""Compute the mean of the percent.. 100-percent percentile points
A fast, and typically good enough estimate of the mean in the presence
of outliers.
"""
ySorted = np.sort( y[np.isfinite(y)] )
num = len(ySorted)
lwr = int( percent/100. * num)
upr = int( (100-percent)/100. * num)
return np.mean( ySorted[lwr:upr])
def robustStd(y, percent):
"""Compute a robust standard deviation with JVC's technique
A fast, and typically good enough estimate of the mean in the presence
of outliers.Cuts out 1st and 99th percentile values and computes std
of the rest. Used by computeSgCdpp() to match the behaviour of
<NAME> Cleve's original algorithm
Taken from
svn+ssh://murzim/repo/so/trunk/Develop/jvc/common/robust_std.m
"""
ySorted = np.sort( y[np.isfinite(y)] )
num = len(ySorted)
lwr = int( percent/100. * num)
upr = int( (100-percent)/100. * num)
return np.std( ySorted[lwr:upr])
| StarcoderdataPython |
1618498 | # -*- coding: utf-8 -*-
""" ใทใใฅใฌใผใทใงใณๅถๅพกใขใธใฅใผใซ
"""
# python lib
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import time
import multiprocessing as mp
# utils
from controllers.sml.model import *
from controllers.sml.extension.visualization.canvas_grid_visualization_extension import CanvasGrid_3d
from controllers.sml.inc.define import *
class SimulationControl():
""" ใทใใฅใฌใผใทใงใณใฎๅถๅพกใ่กใใฏใฉใน
Attributes:
simulation_step [Int] : ใทใใฅใฌใผใทใงใณๅๆฐ
models_floor_dic [dict]: ใใญใข่พๆธ
dataset [dict]: ่พๆธๅฝขๅผใฎ็ตๆใใผใฟ
output_folder [str] : ๅบๅๅ
ใใฉใซใๅ
"""
def __init__(self,post_data,dataclass):
self.simulation_step = int(post_data["simulation_step"])
self.models_floor_dic = {}
self.dataset = post_data["simulation_data"]
self.output_folder = post_data["output_folder"]
self.dataclass = dataclass
for data in self.dataset:
model = HeatModel(
data["init_bems_data"]["floor"],
self.simulation_step,
data["init_bems_data"],
data["control_data"],
data["layout_data"],
data["source_data"],
self.dataclass.simulation_start_time,
self.dataclass.simulation_end_time
)
self.models_floor_dic[data["init_bems_data"]["floor"]] = model
def _str_simulation_state(self):
""" ใทใใฅใฌใผใทใงใณๅฎ่ก้ๅงๅ
ๅฎนใใณใณใฝใผใซใซ่กจ็คบใใใขใธใฅใผใซ
"""
print("Simulation starts")
time.sleep(0.1)
print("Simulation Calculation Steps: {}".format(self.simulation_step))
time.sleep(0.1)
print("Simulation Results Folder: {}".format(self.output_folder))
def run_simulation(self,key,model,i):
""" ใทใใฅใฌใผใทใงใณๅฎ่กใขใธใฅใผใซ
Args:
key [type] : [description]
model [ใจใผใธใงใณใใขใใซ]: ใจใผใธใงใณใใทใใฅใฌใผใทใงใณใขใใซ
i [Int] : ใฉใใชใณใฐใใใใญใปใน็ชๅท
Returns:
result [tupple]: ็ตๆใๆ ผ็ดใใใฟใใซ
"""
# ใณใณใฝใผใซใซๅบๅใใๆๅญๅ
info = f'ใใญใปใน#{i:>2} '
for _ in tqdm(range(self.simulation_step), desc=info,position=i):
if model.terminate:
break
else:
model.step()
result = (key,model.spaces_agents_list)
return result
def run_all_simulations(self):
""" ใใซใใใญใปในใชใใฎๅ ดๅใฎใทใใฅใฌใผใทใงใณๅถๅพกใขใธใฅใผใซ
Returns:
result_arr [array]: ใทใใฅใฌใผใทใงใณๅฎ่ก็ตๆใๆ ผ็ดใใ้
ๅ
"""
start = time.time()
self._str_simulation_state()
result_arr = []
for key,model in self.models_floor_dic.items():
for i in tqdm(range(self.simulation_step+1)):
if model.terminate:
break
else:
model.step()
if (self.dataclass.bach == False) and (i%60 == 0):
self.dataclass.per_output_data(key,model.spaces_agents_list[-1],i)
if self.dataclass.bach == True:
result_arr.append((key,model.spaces_agents_list))
elapsed_time = time.time() - start
print("Simulation finished!")
print("Simulation time:{}".format(int(elapsed_time)) + "[sec]")
return result_arr
def run_all_simulations_multi_process(self) -> dict:
""" ใใซใใใญใปในๆใฎใทใใฅใฌใผใทใงใณๅถๅพกใขใธใฅใผใซ
Returns:
output_data [dict]: ใใซใใใญใปในใง่พๆธใจใใฆไฟๅญใใ็ตๆใ่ฟใใขใธใฅใผใซ
"""
# ็พๅจใฎๆ้ใๅๅพ
start = time.time()
# ใทใใฅใฌใผใทใงใณๅฎ่กๆใฎใกใใปใผใธใๅบๅ
self._str_simulation_state()
# ้ขๆฐใฎๅผๆฐใๅฎ็พฉ
args = list(zip(self.models_floor_dic.keys(),self.models_floor_dic.values(),range(3)))
# ๅผๆฐใฎ้ทใ
L = len(args)
# ใใซใใใญใปในใฎๅฎ่ก
with mp.Pool() as pool:
self.output_data = pool.starmap(self.run_simulation, args)
print("\n" * L)
# ๅฎ่กๆ้ใฎ่จ็ฎ
elapsed_time = time.time() - start
print("Simulation finished!")
print("Simulation time:{}".format(int(elapsed_time)) + "[sec]")
return self.output_data | StarcoderdataPython |
3204305 | <reponame>JustinTStanley/docdb-rest<gh_stars>1-10
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
"""
import os
import json
import base64
def lambda_handler(event, context):
print(event)
action = "Allow"
authorization = None
if "Authorization" in event["headers"]:
authorization = event["headers"]["Authorization"]
if "authorization" in event["headers"]:
authorization = event["headers"]["authorization"]
if None == authorization:
action = "Deny"
splits = base64.b64decode(authorization.split(" ")[-1]).decode("utf-8").split(":")
username = splits[0]
password = splits[1]
if (username != os.environ["USERNAME"] or password != os.environ["PASSWORD"]):
print("Invalid username or password")
action = "Deny"
return buildPolicy(event, username, action)
def buildPolicy(event, principalId, action):
methodArn = event["methodArn"]
splits = methodArn.split(":")
awsRegion = splits[3]
awsAccountId = splits[4]
apisplits = splits[5].split("/")
restApiId = apisplits[0]
apiStage = apisplits[1]
apiArn = "arn:aws:execute-api:" + awsRegion + ":" + awsAccountId + ":" + restApiId + "/" + apiStage + "/*/*"
policy = {
"principalId": principalId,
"policyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Action": "execute-api:Invoke",
"Effect": action,
"Resource": [apiArn]
}
]
}
}
print(policy)
return policy
| StarcoderdataPython |
119695 | import os
from glob import glob
import subprocess
import sys
import pathlib
import json
import yaml
import nbformat
import nbterm
import traceback
import asyncio
import config
import click
def get_toc_files(notebooks_only=True):
"""return a list of files in the _toc.yml"""
with open("_toc.yml") as fid:
toc_dict = yaml.safe_load(fid)
def _toc_files(toc_dict, file_list=[]):
for key, value in toc_dict.items():
if key in ["root", "file"]:
if notebooks_only and not os.path.exists(value + ".ipynb"):
continue
if notebooks_only:
file_list.append(f"{value}.ipynb")
else:
file_list.append(value)
elif key in ["chapters", "sections", "parts"]:
file_list_ext = []
for sub in value:
file_list_ext = _toc_files(sub, file_list_ext)
file_list.extend(file_list_ext)
return file_list
return _toc_files(toc_dict)
def get_conda_kernel_cwd(name: str):
"""get the directory of a conda kernel by name"""
command = ["conda", "env", "list", "--json"]
output = subprocess.check_output(command).decode("ascii")
envs = json.loads(output)["envs"]
for env in envs:
env = pathlib.Path(env)
if name == env.stem:
return env
else:
return None
def nb_set_kernelname(file_in, kernel_name, file_out=None):
"""set the kernel name to python3"""
if file_out is None:
file_out = file_in
data = nbformat.read(file_in, as_version=nbformat.NO_CONVERT)
data["metadata"]["kernelspec"]["name"] = kernel_name
nbformat.write(data, file_out)
def nb_get_kernelname(file_in):
"""get the kernel name of a notebook"""
data = nbformat.read(file_in, as_version=nbformat.NO_CONVERT)
return data["metadata"]["kernelspec"]["name"]
def nb_clear_outputs(file_in, file_out=None):
"""clear output cells"""
if file_out is None:
file_out = file_in
data = nbformat.read(file_in, as_version=nbformat.NO_CONVERT)
assert isinstance(data["cells"], list), "cells is not a list"
cells = []
for cell in data["cells"]:
if cell["cell_type"] == "code":
cell["execution_count"] = None
cell["outputs"] = []
cells.append(cell)
data["cells"] = cells
nbformat.write(data, file_out)
def nb_execute_nbterm(notebook_path: str, kernel_cwd=None, output_dir=None):
try:
_nb_path = pathlib.Path(notebook_path)
if not output_dir:
output_dir = _nb_path.parent
save_path = pathlib.Path(output_dir) / _nb_path.name
nb = nbterm.Notebook(
nb_path=_nb_path, save_path=save_path
) # kernel_cwd=kernel_cwd,
asyncio.run(nb.run_all())
nb.save(save_path)
print(f"Executed notebook has been saved to: {save_path}")
return True
except Exception:
msg = f'Error executing the notebook "{notebook_path}".\n'
msg += f'See notebook "{notebook_path}" for the traceback.\n'
print(f"{traceback.format_exc()}\n{msg}")
return False
def nb_execute(notebook_filename, output_dir=".", kernel_name="python3"):
"""
Execute a notebook.
see http://nbconvert.readthedocs.io/en/latest/execute_api.html
"""
import io
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors import CellExecutionError
# -- open notebook
with io.open(notebook_filename, encoding="utf-8") as f:
nb = nbformat.read(f, as_version=nbformat.NO_CONVERT)
# config for execution
ep = ExecutePreprocessor(timeout=None, kernel_name=kernel_name)
# run with error handling
try:
out = ep.preprocess(nb, {"metadata": {"path": "./"}})
except CellExecutionError:
out = None
msg = f'Error executing the notebook "{notebook_filename}".\n'
msg += f'See notebook "{notebook_filename}" for the traceback.\n'
print(msg)
finally:
nb_out = os.path.join(output_dir, os.path.basename(notebook_filename))
with io.open(nb_out, mode="w", encoding="utf-8") as f:
nbformat.write(nb, f)
print(f"wrote: {nb_out}")
return out
def kernel_munge(kernel_name):
"""return the kernel name as it's rendered in the notebook metadata"""
return f"conda-env-miniconda3-{kernel_name}-py"
@click.command()
@click.option(
"--notebook",
default=None,
help="Optionally select a particular notebook to run. If omitted, then all notebooks are run.",
)
@click.option(
"--start-after-notebook",
default=None,
help="Run all notebooks occurring after a specified notebook.",
)
@click.option(
"--run-pre",
is_flag=True,
help="Run the 'pre-processing' notebooks; these notebooks are designated in `_config_calc.yml` and are omitted by default.",
)
@click.option(
"--clear-cache",
is_flag=True,
help="Delete all previously cached data prior to running the computation.",
)
@click.option(
"--clear-cache-ec-only",
is_flag=True,
help="Delete cached data associated with emergent constraint fits.",
)
@click.option(
"--list-notebooks",
is_flag=True,
help="List all notebooks and return.",
)
@click.option(
"--skip-notebooks",
default="",
help="List notebooks to skip.",
)
def main(
run_pre,
notebook,
start_after_notebook,
clear_cache,
clear_cache_ec_only,
list_notebooks,
skip_notebooks,
):
"""Command line tool to run all the notebooks comprising this calculation."""
failed_list = _main(
run_pre=run_pre,
notebook=notebook,
start_after_notebook=start_after_notebook,
clear_cache=clear_cache,
clear_cache_ec_only=clear_cache_ec_only,
list_notebooks=list_notebooks,
skip_notebooks=skip_notebooks,
)
if failed_list:
print("failed list")
print(failed_list)
sys.exit(1)
sys.exit(0)
def _main(
run_pre=False,
notebook=None,
start_after_notebook=None,
clear_cache=False,
clear_cache_ec_only=False,
list_notebooks=False,
skip_notebooks="",
):
"""run notebooks"""
stop_on_fail = True
project_kernel = config.get("project_kernel")
assert (
os.environ["CONDA_DEFAULT_ENV"] == project_kernel
), f'activate "{project_kernel}" conda environment before running'
if notebook is None:
notebook_list = config.get("pre_notebooks") if run_pre else []
notebook_list = notebook_list + get_toc_files()
# if the "pre" notebooks are in the _toc, remove them here
if not run_pre:
notebook_list = [
nb for nb in notebook_list if nb not in config.get("pre_notebooks")
]
else:
notebook_list = [notebook]
if skip_notebooks:
skip_notebooks = skip_notebooks.split(",")
else:
skip_notebooks = []
skip_notebooks += config.get("R_notebooks")
notebook_list = [f for f in notebook_list if f not in skip_notebooks]
if start_after_notebook is not None:
assert (
start_after_notebook in notebook_list
), f"{start_after_notebook} not found."
ndx = notebook_list.index(start_after_notebook)
notebook_list = notebook_list[ndx + 1 :]
# check kernels
kernels = {}
for nb in notebook_list:
kernels[nb] = nb_get_kernelname(nb)
if False:
assert (
project_kernel in kernels[nb]
), f"{nb}: unexpected kernel: {kernels[nb]}"
if list_notebooks:
for nb in notebook_list:
print(nb)
return []
if clear_cache:
cache_dirs = config.get("cache_dirs")
for d in cache_dirs:
subprocess.check_call(f"rm -fvr {d}/*", shell=True)
if clear_cache_ec_only:
cache_dirs = config.get("cache_dirs_ec")
for d in cache_dirs:
print("-" * 80)
print(f"clearing {d}")
subprocess.check_call(f"rm -fvr {d}/*", shell=True)
print()
cwd = os.getcwd()
failed_list = []
for nb in notebook_list:
print("-" * 80)
print(f"executing: {nb}")
# set the kernel name to fool nbterm into running this
nb_set_kernelname(nb, kernel_name="python3")
# clear output
nb_clear_outputs(nb)
# run the notebook
ok = nb_execute(nb, output_dir=cwd)
if not ok:
print("failed")
if stop_on_fail:
sys.exit(1)
failed_list.append(nb)
# set the kernel back
nb_set_kernelname(nb, kernel_name=kernel_munge(project_kernel))
print()
return failed_list
if __name__ == "__main__":
main()
| StarcoderdataPython |
3323709 | <filename>Libraries/Python/CommonEnvironment/v1.0/CommonEnvironment/TypeInfo/ClassTypeInfo.py
# ----------------------------------------------------------------------
# |
# | ClassTypeInfo.py
# |
# | <NAME> <<EMAIL>>
# | 2016-09-04 20:29:59
# |
# ----------------------------------------------------------------------
# |
# | Copyright <NAME> 2016-18.
# | Distributed under the Boost Software License, Version 1.0.
# | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# |
# ----------------------------------------------------------------------
import inspect
import os
import sys
from CommonEnvironment import Interface
from . import TypeInfo
from .Impl.ObjectLikeTypeInfo import ObjectLikeTypeInfo
# ----------------------------------------------------------------------
_script_fullpath = os.path.abspath(__file__) if "python" in sys.executable.lower() else sys.executable
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
class _MethodTypeInfo(TypeInfo):
ConstraintsDesc = ''
# ----------------------------------------------------------------------
def __init__(self):
super(_MethodTypeInfo, self).__init__()
# ----------------------------------------------------------------------
@property
def PythonDefinitionString(self):
return "{}({})" \
.format( self.__class__.__name__,
self._PythonDefinitionStringContents,
)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
_ExpectedTypeIsCallable = True
# ----------------------------------------------------------------------
def _ValidateItemNoThrowImpl(self, item, **custom_args):
return
# ----------------------------------------------------------------------
class MethodTypeInfo(_MethodTypeInfo):
Desc = "Method"
ExpectedType = staticmethod(Interface.IsStandardMethod)
# ----------------------------------------------------------------------
class ClassMethodTypeInfo(_MethodTypeInfo):
Desc = "Class Method"
ExpectedType = staticmethod(Interface.IsClassMethod)
# ----------------------------------------------------------------------
class StaticMethodTypeInfo(_MethodTypeInfo):
Desc = "Static Method"
ExpectedType = staticmethod(Interface.IsStaticMethod)
# ----------------------------------------------------------------------
@Interface.staticderived
class ClassTypeInfo(ObjectLikeTypeInfo):
Desc = "Class"
ExpectedType = staticmethod(lambda item: True) # Everything is an object in Python
# ----------------------------------------------------------------------
@classmethod
def _GetAttributeValue(cls, type_info, item, attribute_name):
if isinstance(type_info, _MethodTypeInfo):
item = type(item)
return getattr(item, attribute_name)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
_ExpectedTypeIsCallable = True
| StarcoderdataPython |
184880 | class Pizza:
def __init__(self, name, dough, toppings_capacity):
self.__name = name
self.__dough = dough
self.__toppings_capacity = toppings_capacity
self.__toppings = {}
@property
def name(self):
return self.__name
@name.setter
def name(self, new_name):
self.__name = new_name
@property
def dough(self):
return self.__dough
@dough.setter
def dough(self, new_dough):
self.__dough = new_dough
@property
def toppings(self):
return self.__toppings
@toppings.setter
def toppings(self, value):
self.__toppings = value
@property
def toppings_capacity(self):
return self.__toppings_capacity
@toppings_capacity.setter
def toppings_capacity(self, new_capacity):
self.__toppings_capacity = new_capacity
def add_topping(self, topping):
if len(self.toppings) == self.toppings_capacity:
raise ValueError("Not enough space for another topping")
if topping.topping_type not in self.toppings:
self.toppings[topping.topping_type] = topping.weight
else:
self.toppings[topping.topping_type] += topping.weight
def calculate_total_weight(self):
toppings_weight = sum([value for value in self.toppings.values()])
total_weight = self.__dough.weight + toppings_weight
return total_weight
| StarcoderdataPython |
3300500 | class Card:
def __init__(self, rank, suit):
self.rank = rank
self.suit = suit
self.gameRank = 0
| StarcoderdataPython |
3324270 | <reponame>thanhndv212/pinocchio
import pinocchio as pin
import numpy as np
from os.path import *
# Goal: Build a reduced model from an existing URDF model by fixing the desired joints at a specified position.
# Load UR robot arm
# This path refers to Pinocchio source code but you can define your own directory here.
pinocchio_model_dir = join(dirname(dirname(str(abspath(__file__)))), "models")
model_path = pinocchio_model_dir + '/others/robots'
mesh_dir = model_path
# You should change here to set up your own URDF file
urdf_filename = model_path + '/ur_description/urdf/ur5_robot.urdf'
model, collision_model, visual_model = pin.buildModelsFromUrdf(urdf_filename, mesh_dir)
# Check dimensions of the original model
print('standard model: dim=' + str(len(model.joints)))
for jn in model.joints:
print(jn)
# Create a list of joints to lock
jointsToLock = ['wrist_1_joint', 'wrist_2_joint', 'wrist_3_joint']
# Get the ID of all existing joints
jointsToLockIDs = []
for jn in jointsToLock:
if model.existJointName(jn):
jointsToLockIDs.append(model.getJointId(jn))
else:
print('Warning: joint ' + str(jn) + ' does not belong to the model!')
# Set initial position of both fixed and revoulte joints
initialJointConfig = np.array([0,0,0, # shoulder and elbow
1,1,1]) # gripper)
# Option 1: Build the reduced model including the geometric model for proper displaying of the robot
model_reduced, visual_model_reduced = pin.buildReducedModel(model, visual_model, jointsToLockIDs, initialJointConfig)
# Option 2: Only build the reduced model in case no display needed:
# model_reduced = pin.buildReducedModel(model, jointsToLockIDs, initialJointConfig)
# Check dimensions of the reduced model
print('reduced model: dim=' + str(len(model_reduced.joints)))
for jn in model_reduced.joints:
print(jn)
| StarcoderdataPython |
97882 |
def intersection(right=[], left=[]):
return list(set(right).intersection(set(left)))
def union(right=[], left=[]):
return list(set(right).union(set(left)))
def union(right=[], left=[]):
return list(set(right).difference(set(left))) # not have in left
| StarcoderdataPython |
7049 | import pathlib
print(pathlib.Path(__file__).parent.resolve())
while True:
next_cmd = input("> ")
print(eval(next_cmd))
| StarcoderdataPython |
1661088 | <filename>Flask_01/news/comments.py
from . import comments_bp
@comments_bp.route('/comments')
def comments_project():
return 'comments_project'
| StarcoderdataPython |
1710986 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2021 GEO Secretariat.
#
# geo-knowledge-hub is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""GEO Knowledge Hub Identifiers helpers."""
import idutils
import posixpath
from pydash import py_
from typing import Dict, List
from flask import current_app
def related_identifiers_url_by_scheme(related_identifiers: List[Dict]) -> List[Dict]:
"""Create related identifiers URL by scheme.
Args:
related_identifiers (List[Dict]): List of record related identifiers
Returns:
List[Dict]: List of record related identifiers (with URL resolved)
"""
new_related_identifiers = []
for related_identifier in related_identifiers:
if related_identifier.get("identifier", None):
pass
scheme = related_identifier["scheme"]
identifier = related_identifier["identifier"]
related_identifier_obj = py_.set_(py_.clone_deep(related_identifier), "url", "")
try:
if idutils.is_url(identifier):
related_identifier_obj["url"] = identifier
else:
# checking if the doi is internal
if idutils.is_doi(identifier):
identifier_split = identifier.split("/")
doi_prefix = current_app.config.get("RDM_RECORDS_DOI_DATACITE_PREFIX", None)
if doi_prefix and identifier_split[0] == doi_prefix:
related_identifier_obj["url"] = posixpath.join("/records", identifier_split[1])
if not related_identifier_obj["url"]:
related_identifier_obj["url"] = idutils.to_url(identifier, scheme, "https")
except:
related_identifier_obj["url"] = identifier
new_related_identifiers.append(related_identifier_obj)
return new_related_identifiers
__all__ = (
"related_identifiers_url_by_scheme"
)
| StarcoderdataPython |
3282340 | """The regular expressions to be used in various modules."""
import re
CELL_START_PATTERN = re.compile(r"^\s{0,5}\d+\s+0")
"""Line starts with number followed with 0."""
CELLS_END_PATTERN = re.compile(r"^\s*$")
"""Empty line.
Separates sections in MCNP file.
"""
MATERIAL_PATTERN = re.compile(r"^\s{0,4}[mM](?P<material>\d+)")
"""Start of MCNP with material card title."""
CARD_PATTERN = re.compile(r"^\s{0,4}(?:(?P<comment>[cC]\s)|(?P<card>\w+))")
"""Start of MCNP line with comment or any card."""
MCNP_SECTIONS_SEPARATOR_PATTERN = re.compile(r"^\s*$", re.MULTILINE)
#
# Advise: check regular expressions on: https://pythex.org/
#
| StarcoderdataPython |
1799127 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import torch
import torch.nn as nn
import numpy as np
def gen_non_linearity(A, non_linearity):
'''
Returns required activation for a tensor based on the inputs
non_linearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4']
'''
if non_linearity == "tanh":
return torch.tanh(A)
elif non_linearity == "sigmoid":
return torch.sigmoid(A)
elif non_linearity == "relu":
return torch.relu(A, 0.0)
elif non_linearity == "quantTanh":
return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch.ones_like(A))
elif non_linearity == "quantSigm":
A = (A + 1.0) / 2.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
elif non_linearity == "quantSigm4":
A = (A + 2.0) / 4.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
else:
# non_linearity is a user specified function
if not callable(non_linearity):
raise ValueError("non_linearity is either a callable or a value " +
+ "['tanh', 'sigmoid', 'relu', 'quantTanh', " +
"'quantSigm'")
return non_linearity(A)
class BaseRNN(nn.Module):
'''
Generic equivalent of static_rnn in tf
Used to unroll all the cell written in this file
We assume data to be batch_first by default ie.,
[batchSize, timeSteps, inputDims] else
[timeSteps, batchSize, inputDims]
'''
def __init__(self, RNNCell, batch_first=True):
super(BaseRNN, self).__init__()
self.RNNCell = RNNCell
self.batch_first = batch_first
def forward(self, input, hiddenState=None,
cellState=None):
if self.batch_first is True:
self.device = input.device
hiddenStates = torch.zeros(
[input.shape[0], input.shape[1],
self.RNNCell.output_size]).to(self.device)
if hiddenState is None:
hiddenState = torch.zeros([input.shape[0],
self.RNNCell.output_size]).to(self.device)
if self.RNNCell.cellType == "LSTMLR":
cellStates = torch.zeros(
[input.shape[0], input.shape[1],
self.RNNCell.output_size]).to(self.device)
if cellState is None:
cellState = torch.zeros(
[input.shape[0], self.RNNCell.output_size]).to(self.device)
for i in range(0, input.shape[1]):
hiddenState, cellState = self.RNNCell(
input[:, i, :], (hiddenState, cellState))
hiddenStates[:, i, :] = hiddenState
cellStates[:, i, :] = cellState
return hiddenStates, cellStates
else:
for i in range(0, input.shape[1]):
hiddenState = self.RNNCell(input[:, i, :], hiddenState)
hiddenStates[:, i, :] = hiddenState
return hiddenStates
else:
self.device = input.device
hiddenStates = torch.zeros(
[input.shape[0], input.shape[1],
self.RNNCell.output_size]).to(self.device)
if hiddenState is None:
hiddenState = torch.zeros([input.shape[1],
self.RNNCell.output_size]).to(self.device)
if self.RNNCell.cellType == "LSTMLR":
cellStates = torch.zeros(
[input.shape[0], input.shape[1],
self.RNNCell.output_size]).to(self.device)
if cellState is None:
cellState = torch.zeros(
[input.shape[1], self.RNNCell.output_size]).to(self.device)
for i in range(0, input.shape[0]):
hiddenState, cellState = self.RNNCell(
input[i, :, :], (hiddenState, cellState))
hiddenStates[i, :, :] = hiddenState
cellStates[i, :, :] = cellState
return hiddenStates, cellStates
else:
for i in range(0, input.shape[0]):
hiddenState = self.RNNCell(input[i, :, :], hiddenState)
hiddenStates[i, :, :] = hiddenState
return hiddenStates
class FastGRNNCell(nn.Module):
'''
FastGRNN Cell with Both Full Rank and Low Rank Formulations
Has multiple activation functions for the gates
hidden_size = # hidden units
gate_non_linearity = nonlinearity for the gate can be chosen from
[tanh, sigmoid, relu, quantTanh, quantSigm]
update_non_linearity = nonlinearity for final rnn update
can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm]
wRank = rank of W matrix (creates two matrices if not None)
uRank = rank of U matrix (creates two matrices if not None)
zetaInit = init for zeta, the scale param
nuInit = init for nu, the translation param
FastGRNN architecture and compression techniques are found in
FastGRNN(LINK) paper
Basic architecture is like:
z_t = gate_nl(Wx_t + Uh_{t-1} + B_g)
h_t^ = update_nl(Wx_t + Uh_{t-1} + B_h)
h_t = z_t*h_{t-1} + (sigmoid(zeta)(1-z_t) + sigmoid(nu))*h_t^
W and U can further parameterised into low rank version by
W = matmul(W_1, W_2) and U = matmul(U_1, U_2)
'''
def __init__(self, input_size, hidden_size, gate_non_linearity="sigmoid",
update_non_linearity="tanh", wRank=None, uRank=None,
zetaInit=1.0, nuInit=-4.0, name="FastGRNN"):
super(FastGRNNCell, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_non_linearity = gate_non_linearity
self._update_non_linearity = update_non_linearity
self._num_weight_matrices = [1, 1]
self._wRank = wRank
self._uRank = uRank
self._zetaInit = zetaInit
self._nuInit = nuInit
if wRank is not None:
self._num_weight_matrices[0] += 1
if uRank is not None:
self._num_weight_matrices[1] += 1
self._name = name
if wRank is None:
self.W = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]))
else:
self.W1 = nn.Parameter(0.1 * torch.randn([input_size, wRank]))
self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
if uRank is None:
self.U = nn.Parameter(
0.1 * torch.randn([hidden_size, hidden_size]))
else:
self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, uRank]))
self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.bias_gate = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_update = nn.Parameter(torch.ones([1, hidden_size]))
self.zeta = nn.Parameter(self._zetaInit * torch.ones([1, 1]))
self.nu = nn.Parameter(self._nuInit * torch.ones([1, 1]))
@property
def state_size(self):
return self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def gate_non_linearity(self):
return self._gate_non_linearity
@property
def update_non_linearity(self):
return self._update_non_linearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
return self._name
@property
def cellType(self):
return "FastGRNN"
def forward(self, input, state):
if self._wRank is None:
wComp = torch.matmul(input, self.W)
else:
wComp = torch.matmul(
torch.matmul(input, self.W1), self.W2)
if self._uRank is None:
uComp = torch.matmul(state, self.U)
else:
uComp = torch.matmul(
torch.matmul(state, self.U1), self.U2)
pre_comp = wComp + uComp
z = gen_non_linearity(pre_comp + self.bias_gate,
self._gate_non_linearity)
c = gen_non_linearity(pre_comp + self.bias_update,
self._update_non_linearity)
new_h = z * state + (torch.sigmoid(self.zeta) *
(1.0 - z) + torch.sigmoid(self.nu)) * c
return new_h
def getVars(self):
Vars = []
if self._num_weight_matrices[0] == 1:
Vars.append(self.W)
else:
Vars.extend([self.W1, self.W2])
if self._num_weight_matrices[1] == 1:
Vars.append(self.U)
else:
Vars.extend([self.U1, self.U2])
Vars.extend([self.bias_gate, self.bias_update])
Vars.extend([self.zeta, self.nu])
return Vars
class FastRNNCell(nn.Module):
'''
FastRNN Cell with Both Full Rank and Low Rank Formulations
Has multiple activation functions for the gates
hidden_size = # hidden units
update_non_linearity = nonlinearity for final rnn update
can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm]
wRank = rank of W matrix (creates two matrices if not None)
uRank = rank of U matrix (creates two matrices if not None)
alphaInit = init for alpha, the update scalar
betaInit = init for beta, the weight for previous state
FastRNN architecture and compression techniques are found in
FastGRNN(LINK) paper
Basic architecture is like:
h_t^ = update_nl(Wx_t + Uh_{t-1} + B_h)
h_t = sigmoid(beta)*h_{t-1} + sigmoid(alpha)*h_t^
W and U can further parameterised into low rank version by
W = matmul(W_1, W_2) and U = matmul(U_1, U_2)
'''
def __init__(self, input_size, hidden_size,
update_non_linearity="tanh", wRank=None, uRank=None,
alphaInit=-3.0, betaInit=3.0, name="FastRNN"):
super(FastRNNCell, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._update_non_linearity = update_non_linearity
self._num_weight_matrices = [1, 1]
self._wRank = wRank
self._uRank = uRank
self._alphaInit = alphaInit
self._betaInit = betaInit
if wRank is not None:
self._num_weight_matrices[0] += 1
if uRank is not None:
self._num_weight_matrices[1] += 1
self._name = name
if wRank is None:
self.W = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]))
else:
self.W1 = nn.Parameter(0.1 * torch.randn([input_size, wRank]))
self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
if uRank is None:
self.U = nn.Parameter(
0.1 * torch.randn([hidden_size, hidden_size]))
else:
self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, uRank]))
self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.bias_update = nn.Parameter(torch.ones([1, hidden_size]))
self.alpha = nn.Parameter(self._alphaInit * torch.ones([1, 1]))
self.beta = nn.Parameter(self._betaInit * torch.ones([1, 1]))
@property
def state_size(self):
return self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def update_non_linearity(self):
return self._update_non_linearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
return self._name
@property
def cellType(self):
return "FastRNN"
def forward(self, input, state):
if self._wRank is None:
wComp = torch.matmul(input, self.W)
else:
wComp = torch.matmul(
torch.matmul(input, self.W1), self.W2)
if self._uRank is None:
uComp = torch.matmul(state, self.U)
else:
uComp = torch.matmul(
torch.matmul(state, self.U1), self.U2)
pre_comp = wComp + uComp
c = gen_non_linearity(pre_comp + self.bias_update,
self._update_non_linearity)
new_h = torch.sigmoid(self.beta) * state + \
torch.sigmoid(self.alpha) * c
return new_h
def getVars(self):
Vars = []
if self._num_weight_matrices[0] == 1:
Vars.append(self.W)
else:
Vars.extend([self.W1, self.W2])
if self._num_weight_matrices[1] == 1:
Vars.append(self.U)
else:
Vars.extend([self.U1, self.U2])
Vars.extend([self.bias_update])
Vars.extend([self.alpha, self.beta])
return Vars
class LSTMLRCell(nn.Module):
'''
LR - Low Rank
LSTM LR Cell with Both Full Rank and Low Rank Formulations
Has multiple activation functions for the gates
hidden_size = # hidden units
gate_non_linearity = nonlinearity for the gate can be chosen from
[tanh, sigmoid, relu, quantTanh, quantSigm]
update_non_linearity = nonlinearity for final rnn update
can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm]
wRank = rank of all W matrices
(creates 5 matrices if not None else creates 4 matrices)
uRank = rank of all U matrices
(creates 5 matrices if not None else creates 4 matrices)
LSTM architecture and compression techniques are found in
LSTM paper
Basic architecture is like:
f_t = gate_nl(W1x_t + U1h_{t-1} + B_f)
i_t = gate_nl(W2x_t + U2h_{t-1} + B_i)
C_t^ = update_nl(W3x_t + U3h_{t-1} + B_c)
o_t = gate_nl(W4x_t + U4h_{t-1} + B_o)
C_t = f_t*C_{t-1} + i_t*C_t^
h_t = o_t*update_nl(C_t)
Wi and Ui can further parameterised into low rank version by
Wi = matmul(W, W_i) and Ui = matmul(U, U_i)
'''
def __init__(self, input_size, hidden_size, gate_non_linearity="sigmoid",
update_non_linearity="tanh", wRank=None, uRank=None,
name="LSTMLR"):
super(LSTMLRCell, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_non_linearity = gate_non_linearity
self._update_non_linearity = update_non_linearity
self._num_weight_matrices = [4, 4]
self._wRank = wRank
self._uRank = uRank
if wRank is not None:
self._num_weight_matrices[0] += 1
if uRank is not None:
self._num_weight_matrices[1] += 1
self._name = name
if wRank is None:
self.W1 = nn.Parameter(
0.1 * torch.randn([input_size, hidden_size]))
self.W2 = nn.Parameter(
0.1 * torch.randn([input_size, hidden_size]))
self.W3 = nn.Parameter(
0.1 * torch.randn([input_size, hidden_size]))
self.W4 = nn.Parameter(
0.1 * torch.randn([input_size, hidden_size]))
else:
self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank]))
self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
self.W3 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
self.W4 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
if uRank is None:
self.U1 = nn.Parameter(
0.1 * torch.randn([hidden_size, hidden_size]))
self.U2 = nn.Parameter(
0.1 * torch.randn([hidden_size, hidden_size]))
self.U3 = nn.Parameter(
0.1 * torch.randn([hidden_size, hidden_size]))
self.U4 = nn.Parameter(
0.1 * torch.randn([hidden_size, hidden_size]))
else:
self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank]))
self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.U3 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.U4 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.bias_f = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_i = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_c = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_o = nn.Parameter(torch.ones([1, hidden_size]))
@property
def state_size(self):
return 2 * self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def gate_non_linearity(self):
return self._gate_non_linearity
@property
def update_non_linearity(self):
return self._update_non_linearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
return self._name
@property
def cellType(self):
return "LSTMLR"
def forward(self, input, hiddenStates):
(h, c) = hiddenStates
if self._wRank is None:
wComp1 = torch.matmul(input, self.W1)
wComp2 = torch.matmul(input, self.W2)
wComp3 = torch.matmul(input, self.W3)
wComp4 = torch.matmul(input, self.W4)
else:
wComp1 = torch.matmul(
torch.matmul(input, self.W), self.W1)
wComp2 = torch.matmul(
torch.matmul(input, self.W), self.W2)
wComp3 = torch.matmul(
torch.matmul(input, self.W), self.W3)
wComp4 = torch.matmul(
torch.matmul(input, self.W), self.W4)
if self._uRank is None:
uComp1 = torch.matmul(h, self.U1)
uComp2 = torch.matmul(h, self.U2)
uComp3 = torch.matmul(h, self.U3)
uComp4 = torch.matmul(h, self.U4)
else:
uComp1 = torch.matmul(
torch.matmul(h, self.U), self.U1)
uComp2 = torch.matmul(
torch.matmul(h, self.U), self.U2)
uComp3 = torch.matmul(
torch.matmul(h, self.U), self.U3)
uComp4 = torch.matmul(
torch.matmul(h, self.U), self.U4)
pre_comp1 = wComp1 + uComp1
pre_comp2 = wComp2 + uComp2
pre_comp3 = wComp3 + uComp3
pre_comp4 = wComp4 + uComp4
i = gen_non_linearity(pre_comp1 + self.bias_i,
self._gate_non_linearity)
f = gen_non_linearity(pre_comp2 + self.bias_f,
self._gate_non_linearity)
o = gen_non_linearity(pre_comp4 + self.bias_o,
self._gate_non_linearity)
c_ = gen_non_linearity(pre_comp3 + self.bias_c,
self._update_non_linearity)
new_c = f * c + i * c_
new_h = o * gen_non_linearity(new_c, self._update_non_linearity)
return new_h, new_c
def getVars(self):
Vars = []
if self._num_weight_matrices[0] == 4:
Vars.extend([self.W1, self.W2, self.W3, self.W4])
else:
Vars.extend([self.W, self.W1, self.W2, self.W3, self.W4])
if self._num_weight_matrices[1] == 4:
Vars.extend([self.U1, self.U2, self.U3, self.U4])
else:
Vars.extend([self.U, self.U1, self.U2, self.U3, self.U4])
Vars.extend([self.bias_f, self.bias_i, self.bias_c, self.bias_o])
return Vars
class GRULRCell(nn.Module):
'''
GRU LR Cell with Both Full Rank and Low Rank Formulations
Has multiple activation functions for the gates
hidden_size = # hidden units
gate_non_linearity = nonlinearity for the gate can be chosen from
[tanh, sigmoid, relu, quantTanh, quantSigm]
update_non_linearity = nonlinearity for final rnn update
can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm]
wRank = rank of W matrix
(creates 4 matrices if not None else creates 3 matrices)
uRank = rank of U matrix
(creates 4 matrices if not None else creates 3 matrices)
GRU architecture and compression techniques are found in
GRU(LINK) paper
Basic architecture is like:
r_t = gate_nl(W1x_t + U1h_{t-1} + B_r)
z_t = gate_nl(W2x_t + U2h_{t-1} + B_g)
h_t^ = update_nl(W3x_t + r_t*U3(h_{t-1}) + B_h)
h_t = z_t*h_{t-1} + (1-z_t)*h_t^
Wi and Ui can further parameterised into low rank version by
Wi = matmul(W, W_i) and Ui = matmul(U, U_i)
'''
def __init__(self, input_size, hidden_size, gate_non_linearity="sigmoid",
update_non_linearity="tanh", wRank=None, uRank=None,
name="GRULR"):
super(GRULRCell, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_non_linearity = gate_non_linearity
self._update_non_linearity = update_non_linearity
self._num_weight_matrices = [3, 3]
self._wRank = wRank
self._uRank = uRank
if wRank is not None:
self._num_weight_matrices[0] += 1
if uRank is not None:
self._num_weight_matrices[1] += 1
self._name = name
if wRank is None:
self.W1 = nn.Parameter(
0.1 * torch.randn([input_size, hidden_size]))
self.W2 = nn.Parameter(
0.1 * torch.randn([input_size, hidden_size]))
self.W3 = nn.Parameter(
0.1 * torch.randn([input_size, hidden_size]))
else:
self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank]))
self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
self.W3 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
if uRank is None:
self.U1 = nn.Parameter(
0.1 * torch.randn([hidden_size, hidden_size]))
self.U2 = nn.Parameter(
0.1 * torch.randn([hidden_size, hidden_size]))
self.U3 = nn.Parameter(
0.1 * torch.randn([hidden_size, hidden_size]))
else:
self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank]))
self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.U3 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.bias_r = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_gate = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_update = nn.Parameter(torch.ones([1, hidden_size]))
self._device = self.bias_update.device
@property
def state_size(self):
return self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def gate_non_linearity(self):
return self._gate_non_linearity
@property
def update_non_linearity(self):
return self._update_non_linearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
return self._name
@property
def cellType(self):
return "GRULR"
def forward(self, input, state):
if self._wRank is None:
wComp1 = torch.matmul(input, self.W1)
wComp2 = torch.matmul(input, self.W2)
wComp3 = torch.matmul(input, self.W3)
else:
wComp1 = torch.matmul(
torch.matmul(input, self.W), self.W1)
wComp2 = torch.matmul(
torch.matmul(input, self.W), self.W2)
wComp3 = torch.matmul(
torch.matmul(input, self.W), self.W3)
if self._uRank is None:
uComp1 = torch.matmul(state, self.U1)
uComp2 = torch.matmul(state, self.U2)
else:
uComp1 = torch.matmul(
torch.matmul(state, self.U), self.U1)
uComp2 = torch.matmul(
torch.matmul(state, self.U), self.U2)
pre_comp1 = wComp1 + uComp1
pre_comp2 = wComp2 + uComp2
r = gen_non_linearity(pre_comp1 + self.bias_r,
self._gate_non_linearity)
z = gen_non_linearity(pre_comp2 + self.bias_gate,
self._gate_non_linearity)
if self._uRank is None:
pre_comp3 = wComp3 + torch.matmul(r * state, self.U3)
else:
pre_comp3 = wComp3 + \
torch.matmul(torch.matmul(r * state, self.U), self.U3)
c = gen_non_linearity(pre_comp3 + self.bias_update,
self._update_non_linearity)
new_h = z * state + (1.0 - z) * c
return new_h
def getVars(self):
Vars = []
if self._num_weight_matrices[0] == 3:
Vars.extend([self.W1, self.W2, self.W3])
else:
Vars.extend([self.W, self.W1, self.W2, self.W3])
if self._num_weight_matrices[1] == 3:
Vars.extend([self.U1, self.U2, self.U3])
else:
Vars.extend([self.U, self.U1, self.U2, self.U3])
Vars.extend([self.bias_r, self.bias_gate, self.bias_update])
return Vars
class UGRNNLRCell(nn.Module):
'''
UGRNN LR Cell with Both Full Rank and Low Rank Formulations
Has multiple activation functions for the gates
hidden_size = # hidden units
gate_non_linearity = nonlinearity for the gate can be chosen from
[tanh, sigmoid, relu, quantTanh, quantSigm]
update_non_linearity = nonlinearity for final rnn update
can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm]
wRank = rank of W matrix
(creates 3 matrices if not None else creates 2 matrices)
uRank = rank of U matrix
(creates 3 matrices if not None else creates 2 matrices)
UGRNN architecture and compression techniques are found in
UGRNN(LINK) paper
Basic architecture is like:
z_t = gate_nl(W1x_t + U1h_{t-1} + B_g)
h_t^ = update_nl(W1x_t + U1h_{t-1} + B_h)
h_t = z_t*h_{t-1} + (1-z_t)*h_t^
Wi and Ui can further parameterised into low rank version by
Wi = matmul(W, W_i) and Ui = matmul(U, U_i)
'''
def __init__(self, input_size, hidden_size, gate_non_linearity="sigmoid",
update_non_linearity="tanh", wRank=None, uRank=None,
name="UGRNNLR"):
super(UGRNNLRCell, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_non_linearity = gate_non_linearity
self._update_non_linearity = update_non_linearity
self._num_weight_matrices = [2, 2]
self._wRank = wRank
self._uRank = uRank
if wRank is not None:
self._num_weight_matrices[0] += 1
if uRank is not None:
self._num_weight_matrices[1] += 1
self._name = name
if wRank is None:
self.W1 = nn.Parameter(
0.1 * torch.randn([input_size, hidden_size]))
self.W2 = nn.Parameter(
0.1 * torch.randn([input_size, hidden_size]))
else:
self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank]))
self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
if uRank is None:
self.U1 = nn.Parameter(
0.1 * torch.randn([hidden_size, hidden_size]))
self.U2 = nn.Parameter(
0.1 * torch.randn([hidden_size, hidden_size]))
else:
self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank]))
self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.bias_gate = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_update = nn.Parameter(torch.ones([1, hidden_size]))
self._device = self.bias_update.device
@property
def state_size(self):
return self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def gate_non_linearity(self):
return self._gate_non_linearity
@property
def update_non_linearity(self):
return self._update_non_linearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
return self._name
@property
def cellType(self):
return "UGRNNLR"
def forward(self, input, state):
if self._wRank is None:
wComp1 = torch.matmul(input, self.W1)
wComp2 = torch.matmul(input, self.W2)
else:
wComp1 = torch.matmul(
torch.matmul(input, self.W), self.W1)
wComp2 = torch.matmul(
torch.matmul(input, self.W), self.W2)
if self._uRank is None:
uComp1 = torch.matmul(state, self.U1)
uComp2 = torch.matmul(state, self.U2)
else:
uComp1 = torch.matmul(
torch.matmul(state, self.U), self.U1)
uComp2 = torch.matmul(
torch.matmul(state, self.U), self.U2)
pre_comp1 = wComp1 + uComp1
pre_comp2 = wComp2 + uComp2
z = gen_non_linearity(pre_comp1 + self.bias_gate,
self._gate_non_linearity)
c = gen_non_linearity(pre_comp2 + self.bias_update,
self._update_non_linearity)
new_h = z * state + (1.0 - z) * c
return new_h
def getVars(self):
Vars = []
if self._num_weight_matrices[0] == 2:
Vars.extend([self.W1, self.W2])
else:
Vars.extend([self.W, self.W1, self.W2])
if self._num_weight_matrices[1] == 2:
Vars.extend([self.U1, self.U2])
else:
Vars.extend([self.U, self.U1, self.U2])
Vars.extend([self.bias_gate, self.bias_update])
return Vars
class LSTM(nn.Module):
"""Equivalent to nn.LSTM using LSTMLRCell"""
def __init__(self, input_size, hidden_size, gate_non_linearity="sigmoid",
update_non_linearity="tanh", wRank=None, uRank=None, batch_first=True):
super(LSTM, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_non_linearity = gate_non_linearity
self._update_non_linearity = update_non_linearity
self._wRank = wRank
self._uRank = uRank
self.batch_first = batch_first
self.cell = LSTMLRCell(input_size, hidden_size,
gate_non_linearity=gate_non_linearity,
update_non_linearity=update_non_linearity,
wRank=wRank, uRank=uRank)
self.unrollRNN = BaseRNN(self.cell, batch_first=self.batch_first)
def forward(self, input, hiddenState=None, cellState=None):
return self.unrollRNN(input, hiddenState, cellState)
class GRU(nn.Module):
"""Equivalent to nn.GRU using GRULRCell"""
def __init__(self, input_size, hidden_size, gate_non_linearity="sigmoid",
update_non_linearity="tanh", wRank=None, uRank=None, batch_first=True):
super(GRU, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_non_linearity = gate_non_linearity
self._update_non_linearity = update_non_linearity
self._wRank = wRank
self._uRank = uRank
self.batch_first = batch_first
self.cell = GRULRCell(input_size, hidden_size,
gate_non_linearity=gate_non_linearity,
update_non_linearity=update_non_linearity,
wRank=wRank, uRank=uRank)
self.unrollRNN = BaseRNN(self.cell, batch_first=self.batch_first)
def forward(self, input, hiddenState=None, cellState=None):
return self.unrollRNN(input, hiddenState, cellState)
class UGRNN(nn.Module):
"""Equivalent to nn.UGRNN using UGRNNLRCell"""
def __init__(self, input_size, hidden_size, gate_non_linearity="sigmoid",
update_non_linearity="tanh", wRank=None, uRank=None, batch_first=True):
super(UGRNN, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_non_linearity = gate_non_linearity
self._update_non_linearity = update_non_linearity
self._wRank = wRank
self._uRank = uRank
self.batch_first = batch_first
self.cell = UGRNNLRCell(input_size, hidden_size,
gate_non_linearity=gate_non_linearity,
update_non_linearity=update_non_linearity,
wRank=wRank, uRank=uRank)
self.unrollRNN = BaseRNN(self.cell, batch_first=self.batch_first)
def forward(self, input, hiddenState=None, cellState=None):
return self.unrollRNN(input, hiddenState, cellState)
class FastRNN(nn.Module):
"""Equivalent to nn.FastRNN using FastRNNCell"""
def __init__(self, input_size, hidden_size,
update_non_linearity="tanh", wRank=None, uRank=None,
alphaInit=-3.0, betaInit=3.0, batch_first=True):
super(FastRNN, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._update_non_linearity = update_non_linearity
self._wRank = wRank
self._uRank = uRank
self.batch_first = batch_first
self.cell = FastRNNCell(input_size, hidden_size,
update_non_linearity=update_non_linearity,
wRank=wRank, uRank=uRank,
alphaInit=alphaInit, betaInit=betaInit)
self.unrollRNN = BaseRNN(self.cell, batch_first=self.batch_first)
def forward(self, input, hiddenState=None, cellState=None):
return self.unrollRNN(input, hiddenState, cellState)
class FastGRNN(nn.Module):
"""Equivalent to nn.FastGRNN using FastGRNNCell"""
def __init__(self, input_size, hidden_size, gate_non_linearity="sigmoid",
update_non_linearity="tanh", wRank=None, uRank=None,
zetaInit=1.0, nuInit=-4.0, batch_first=True):
super(FastGRNN, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_non_linearity = gate_non_linearity
self._update_non_linearity = update_non_linearity
self._wRank = wRank
self._uRank = uRank
self.batch_first = batch_first
self.cell = FastGRNNCell(input_size, hidden_size,
gate_non_linearity=gate_non_linearity,
update_non_linearity=update_non_linearity,
wRank=wRank, uRank=uRank,
zetaInit=zetaInit, nuInit=nuInit)
self.unrollRNN = BaseRNN(self.cell, batch_first=self.batch_first)
def forward(self, input, hiddenState=None, cellState=None):
return self.unrollRNN(input, hiddenState, cellState)
class SRNN2(nn.Module):
def __init__(self, inputDim, outputDim, hiddenDim0, hiddenDim1, cellType,
dropoutProbability0 = None, dropoutProbability1 = None,
**cellArgs):
'''
A 2 Layer Shallow RNN.
inputDim: Input data's feature dimension.
hiddenDim0: Hidden state dimension of the lower layer RNN cell.
hiddenDim1: Hidden state dimension of the second layer RNN cell.
cellType: The type of RNN cell to use. Options are ['LSTM', 'FastRNNCell',
'FastGRNNCell', 'GRULRCell']
'''
super(SRNN2, self).__init__()
# Create two RNN Cells
self.inputDim = inputDim
self.hiddenDim0 = hiddenDim0
self.hiddenDim1 = hiddenDim1
self.outputDim = outputDim
self.dropoutProbability0 = dropoutProbability0
self.dropoutProbability1 = dropoutProbability1
if dropoutProbability0 != None:
assert 0 < dropoutProbability0 <= 1.0
if dropoutProbability1 != None:
assert 0 < dropoutProbability1 <= 1.0
self.cellArgs = {}
self.cellArgs.update(cellArgs)
supportedCells = ['LSTM', 'FastRNNCell', 'FastGRNNCell', 'GRULRCell']
assert cellType in supportedCells, 'Currently supported cells: %r' % supportedCells
self.cellType = cellType
if self.cellType == 'LSTM':
self.rnnClass = nn.LSTM
elif self.cellType == 'FastRNNCell':
self.rnnClass = FastRNN
elif self.cellType == 'FastGRNNCell':
self.rnnClass = FastGRNN
else:
self.rnnClass = GRU
self.rnn0 = self.rnnClass(input_size=inputDim, hidden_size=hiddenDim0, **self.cellArgs)
self.rnn1 = self.rnnClass(input_size=hiddenDim0, hidden_size=hiddenDim1, **self.cellArgs)
self.W = torch.randn([self.hiddenDim1, self.outputDim])
self.W = nn.Parameter(self.W)
self.B = torch.randn([self.outputDim])
self.B = nn.Parameter(self.B)
def getBrickedData(self, x, brickSize):
'''
Takes x of shape [timeSteps, batchSize, featureDim] and returns bricked
x of shape [numBricks, brickSize, batchSize, featureDim] by chunking
along 0-th axes.
'''
timeSteps = list(x.size())[0]
numSplits = int(timeSteps / brickSize)
batchSize = list(x.size())[1]
featureDim = list(x.size())[2]
numBricks = int(timeSteps/brickSize)
eqlen = numSplits * brickSize
x = x[:eqlen]
x_bricked = torch.split(x, numSplits, dim = 0)
x_bricked_batched = torch.cat(x_bricked)
x_bricked_batched = torch.reshape(x_bricked_batched, (numBricks,brickSize,batchSize,featureDim))
return x_bricked_batched
def forward(self, x, brickSize):
'''
x: Input data in numpy. Expected to be a 3D tensor with shape
[timeStep, batchSize, featureDim]. Note that this is different from
the convention followed in the TF codebase.
brickSize: The brick size for the lower dimension. The input data will
be divided into bricks along the timeStep axis (axis=0) internally
and fed into the lowest layer RNN. Note that if the last brick has
fewer than 'brickSize' steps, it will be ignored (no internal
padding is done).
'''
assert x.ndimension() == 3
assert list(x.size())[2] == self.inputDim
x_bricks = self.getBrickedData(x, brickSize)
# x bricks: [numBricks, brickSize, batchSize, featureDim]
x_bricks = x_bricks.permute(1,0,2,3)
# x bricks: [brickSize, numBricks, batchSize, featureDim]
oldShape = list(x_bricks.size())
x_bricks = torch.reshape(x_bricks, [oldShape[0], oldShape[1] * oldShape[2], oldShape[3]])
# x bricks: [brickSize, numBricks * batchSize, featureDim]
# x_bricks = torch.Tensor(x_bricks)
self.dropoutLayer0 = None
self.dropoutLayer1 = None
if self.cellType == 'LSTM':
hidd0, out0 = self.rnn0(x_bricks)
else:
hidd0 = self.rnn0(x_bricks)
if self.dropoutProbability0 != None:
self.dropoutLayer0 = nn.Dropout(p=self.dropoutProbability0)
hidd0 = self.dropoutLayer0(hidd0)
hidd0 = torch.squeeze(hidd0[-1])
# [numBricks * batchSize, hiddenDim0]
inp1 = hidd0.view(oldShape[1], oldShape[2], self.hiddenDim0)
# [numBricks, batchSize, hiddenDim0]
if self.cellType == 'LSTM':
hidd1, out1 = self.rnn1(inp1)
else:
hidd1 = self.rnn1(inp1)
if self.dropoutProbability1 != None:
self.dropoutLayer1 = nn.Dropout(p=self.dropoutProbability1)
hidd1 = self.dropoutLayer1(hidd1)
hidd1 = torch.squeeze(hidd1[-1])
out = torch.matmul(hidd1, self.W) + self.B
return out
| StarcoderdataPython |
1699696 | <filename>backend/apps/acl/urls.py<gh_stars>1-10
from backend.util.utils import url_join
class CommonUrlDispatcher(object):
BASE_CONFIG_URL = '/restconf/config/network-topology:network-topology/topology/topology-netconf/node/{}/yang-ext:mount'
GET_INTERFACES_URL = url_join(BASE_CONFIG_URL, '/Cisco-IOS-XR-ifmgr-cfg:interface-configurations')
GET_ACL_URL = url_join(BASE_CONFIG_URL, '/Cisco-IOS-XR-ipv4-acl-cfg:ipv4-acl-and-prefix-list')
class StandardAclUrlDispatcher(CommonUrlDispatcher):
POST_ACL_URL = url_join(CommonUrlDispatcher.BASE_CONFIG_URL)
PUT_ACL_URL = url_join(POST_ACL_URL, '/Cisco-IOS-XR-ipv4-acl-cfg:ipv4-acl-and-prefix-list/accesses/access/{}')
POST_INTERFACE_URL = url_join(CommonUrlDispatcher.GET_INTERFACES_URL, '/interface-configuration/act/{}')
PUT_INTERFACE_URL = url_join(POST_INTERFACE_URL, '/Cisco-IOS-XR-ip-pfilter-cfg:ipv4-packet-filter/{}')
DELETE_INTERFACE_URL = PUT_INTERFACE_URL
| StarcoderdataPython |
30889 | <filename>zhaquirks/xiaomi/aqara/plug.py
"""Xiaomi lumi.plug plug."""
import logging
from zigpy.profiles import zha
from zigpy.zcl.clusters.general import (
AnalogInput,
Basic,
BinaryOutput,
DeviceTemperature,
Groups,
Identify,
OnOff,
Ota,
PowerConfiguration,
Scenes,
Time,
)
from zhaquirks.xiaomi import (
LUMI,
AnalogInputCluster,
BasicCluster,
ElectricalMeasurementCluster,
XiaomiCustomDevice,
)
from zhaquirks import Bus
from zhaquirks.const import (
DEVICE_TYPE,
ENDPOINTS,
INPUT_CLUSTERS,
MODELS_INFO,
OUTPUT_CLUSTERS,
PROFILE_ID,
SKIP_CONFIGURATION,
)
_LOGGER = logging.getLogger(__name__)
class Plug(XiaomiCustomDevice):
"""lumi.plug plug."""
def __init__(self, *args, **kwargs):
"""Init."""
self.voltage_bus = Bus()
self.consumption_bus = Bus()
self.power_bus = Bus()
super().__init__(*args, **kwargs)
signature = {
MODELS_INFO: [(LUMI, "lumi.plug")],
ENDPOINTS: {
# <SimpleDescriptor endpoint=1 profile=260 device_type=81
# device_version=1
# input_clusters=[0, 4, 3, 6, 16, 5, 10, 1, 2]
# output_clusters=[25, 10]>
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.SMART_PLUG,
INPUT_CLUSTERS: [
Basic.cluster_id,
PowerConfiguration.cluster_id,
DeviceTemperature.cluster_id,
Groups.cluster_id,
Identify.cluster_id,
OnOff.cluster_id,
Scenes.cluster_id,
BinaryOutput.cluster_id,
Time.cluster_id,
],
OUTPUT_CLUSTERS: [Ota.cluster_id, Time.cluster_id],
},
# <SimpleDescriptor endpoint=2 profile=260 device_type=9
# device_version=1
# input_clusters=[12]
# output_clusters=[12, 4]>
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.MAIN_POWER_OUTLET,
INPUT_CLUSTERS: [AnalogInput.cluster_id],
OUTPUT_CLUSTERS: [AnalogInput.cluster_id, Groups.cluster_id],
},
# <SimpleDescriptor endpoint=3 profile=260 device_type=83
# device_version=1
# input_clusters=[12]
# output_clusters=[12]>
3: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.METER_INTERFACE,
INPUT_CLUSTERS: [AnalogInput.cluster_id],
OUTPUT_CLUSTERS: [AnalogInput.cluster_id],
},
},
}
replacement = {
SKIP_CONFIGURATION: True,
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.SMART_PLUG,
INPUT_CLUSTERS: [
BasicCluster,
PowerConfiguration.cluster_id,
DeviceTemperature.cluster_id,
Groups.cluster_id,
Identify.cluster_id,
OnOff.cluster_id,
Scenes.cluster_id,
BinaryOutput.cluster_id,
Time.cluster_id,
ElectricalMeasurementCluster,
],
OUTPUT_CLUSTERS: [Ota.cluster_id, Time.cluster_id],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.MAIN_POWER_OUTLET,
INPUT_CLUSTERS: [AnalogInputCluster],
OUTPUT_CLUSTERS: [AnalogInput.cluster_id, Groups.cluster_id],
},
3: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.METER_INTERFACE,
INPUT_CLUSTERS: [AnalogInput.cluster_id],
OUTPUT_CLUSTERS: [AnalogInput.cluster_id],
},
},
}
| StarcoderdataPython |
3253410 | import os
from datetime import date, datetime
from aiogram import Bot, types
from aiogram import Dispatcher
from aiogram.types.message import ContentType
from aiogram.utils import executor
import dotenv
from utils_db import (check_have_member, check_active, create_roll, create_user, activated_user,
deactivated_user, count_active_members, get_ids_active_members,
make_purpose_rec, get_last_name, get_first_name,
get_ids_advisor_active_roll, get_id_watcher_from_advisor_last_roll,
get_ids_watchers_active_roll, get_id_advisor_from_wathcer_last_roll,
set_movie_for_purpose, set_state, get_state, reset_state, check_all_movie_assigned,
get_id_chanel_from_id_member, get_all_purposes, set_status_roll)
from utils import shuffle_members, generate_markup_keybord, generate_qr
from templates import MESSAGES
dotenv.load_dotenv()
TOKEN = os.getenv('TOKEN_BOT')
ID_FOR_REPORT = 681108032
bot = Bot(token=TOKEN)
dp = Dispatcher(bot)
movies_temp = dict()
wait_img_qr = dict()
@dp.message_handler(commands=['join'])
async def registration_user(message: types.Message):
"""ะ ะตะณะธัััะฐัะธั ะฟะพะปัะทะพะฒะฐัะตะปั."""
user = message.from_user
try:
if check_have_member(user.id):
if check_active(user.id):
await message.reply(MESSAGES['is_reg_and_active'])
else:
await message.reply(MESSAGES['is_reg_and_noactive'])
else:
if user.first_name and user.last_name:
create_user(id_member=user.id,
first_name=user.first_name,
last_name=user.last_name,
id_chat=message.chat.id)
await message.reply(MESSAGES['reg_is_done'])
else:
await message.reply(MESSAGES['not_first_or_last_name'])
except Exception as e:
await bot.send_message(ID_FOR_REPORT, MESSAGES['report_error'].format(e))
@dp.message_handler(commands=['activate'])
async def activate_user(message: types.Message):
"""ะะบัะธะฒะฐัะธั ะทะฐัะตะณะธัััะธัะพะฒะฐะฝะฝะพะณะพ ะฟะพะปัะทะพะฒะฐัะตะปั"""
user = message.from_user
try:
if check_have_member(user.id):
if check_active(user.id):
await message.reply(MESSAGES['is_activate_was_active'])
else:
activated_user(id_member=user.id)
await message.reply(MESSAGES['activate_is_done'])
else:
await message.reply(MESSAGES['not_reg'])
except Exception as e:
await bot.send_message(ID_FOR_REPORT, MESSAGES['report_error'].format(e))
@dp.message_handler(commands=['deactivate'])
async def deactivate_user(message: types.Message):
"""ะะตะฐะบัะธะฒะฐัะธั ะทะฐัะตะณะธัััะธัะพะฒะฐะฝะฝะพะณะพ ะฟะพะปัะทะพะฒะฐัะตะปั"""
user = message.from_user
try:
if check_have_member(user.id):
if check_active(user.id):
deactivated_user(user.id)
await message.reply(MESSAGES['is_deactivated_is_done'])
else:
await message.reply(MESSAGES['is_deactivated_was_deactive'])
else:
await message.reply(MESSAGES['not_reg'])
except Exception as e:
await bot.send_message(ID_FOR_REPORT, MESSAGES['report_error'].format(e))
@dp.message_handler(commands=['roll'])
async def roll_members(message: types.Message):
"""ะะฐะทะฝะฐัะฐะตั ะบะฐะถะดะพะผั ััะฐััะฝะธะบั ะดััะณะพะณะพ ัะปััะฐะนะฝะพะณะพ ััะฐััะฝะธะบะฐ"""
count_member = count_active_members()
if count_member > 1:
id_roll = create_roll()
links = shuffle_members(get_ids_active_members())
for advisor_id, watcher_id in zip(links[0], links[1]):
make_purpose_rec(id_advisor=int(advisor_id), id_watcher=int(watcher_id), id_roll=id_roll)
set_state(id_member=advisor_id, status='find_movie')
text_for_advisor = (f'ะั ะฝะฐะทะฝะฐัะฐะตัะต ัะธะปัะผ ะดะปั {get_first_name(watcher_id)} {get_last_name(watcher_id)}!'
f'ะะฐะฟะธัะธัะต ัะธะปัะผ, ะบะพัะพััะน ะฒั ะฟะพัะพะฒะตััะตัะต ะดะปั ะฟัะพัะผะพััะฐ!')
await bot.send_message(chat_id=advisor_id, text=text_for_advisor)
await message.reply(f'{get_first_name(advisor_id)} {get_last_name(advisor_id)} '
f'ะทะฐะดะฐะตั ัะธะปัะผ ะดะปั {get_first_name(watcher_id)} {get_last_name(watcher_id)}')
else:
await message.reply(MESSAGES['roll_not_active'].format(count_member))
@dp.message_handler(lambda message: message.chat.id in get_ids_watchers_active_roll(), commands=['accept'])
async def accept_movie(message: types.Message):
id_advisor = get_id_advisor_from_wathcer_last_roll(message.chat.id)
if get_state(id_member=id_advisor) == 'send_movie':
movie = movies_temp.pop(str(id_advisor))
id_roll = set_movie_for_purpose(id_watcher=message.from_user.id,
title_movie=movie)
reset_state(id_member=id_advisor)
await message.reply('ะัะธััะฝะพะณะพ ะฟัะพัะผะพััะฐ')
await bot.send_message(chat_id=id_advisor,
text='ะะฐั ัะธะปัะผ ะฟัะธะฝัั!')
await check_all_assign(message=message, id_roll=id_roll)
else:
await message.reply('ะะปั ะฒะฐั ัะธะปัะผ ะตัะต ะฝะต ะฒัะฑัะฐะฝ!')
@dp.message_handler(lambda message: message.chat.id in get_ids_watchers_active_roll(), commands=['decline'])
async def decline_movie(message: types.Message):
id_advisor = get_id_advisor_from_wathcer_last_roll(message.chat.id)
if get_state(id_member=id_advisor) == 'send_movie':
movie = movies_temp.pop(str(id_advisor))
set_state(id_member=id_advisor, status='find_movie')
await message.reply('ะฅะพัะพัะพ, ั ัะบะฐะถั ััะพะฑ ะฒัะฑัะฐะปะธ ะดััะณะพะน! :)')
await bot.send_message(chat_id=id_advisor,
text='ะะฐั ัะธะปัะผ ะฝะต ััะฒะตัะดะธะปะธ:( ะัะฑะตัะธัะต ะดััะณะพะน.')
else:
await message.reply('ะะปั ะฒะฐั ัะธะปัะผ ะตัะต ะฝะต ะฒัะฑัะฐะฝ!')
@dp.message_handler(lambda message: message.chat.id in get_ids_advisor_active_roll(),
lambda message: get_state(message.chat.id) == 'find_movie')
async def question(message):
movie = message.text
movies_temp[str(message.from_user.id)] = movie
set_state(id_member=message.from_user.id, status='check_movie')
await message.reply(f'ะขะฒะพะน ัะธะปัะผ: {movie}.\n ะะฐ ะธะปะธ ะะตั')
@dp.message_handler(lambda message: message.chat.id in get_ids_advisor_active_roll(),
lambda message: get_state(message.chat.id) == 'check_movie')
async def agree(message: types.Message):
result = message.text
if result.lower() == 'ะดะฐ':
movie = movies_temp[str(message.from_user.id)]
set_state(id_member=message.from_user.id, status='send_movie')
await message.reply(f'ะะฐั ัะธะปัะผ ะพัะฟัะฐะฒะปะตะฝ ะดะปั ัะพะณะปะฐัะพะฒะฐะฝะธั')
id_watcher = get_id_watcher_from_advisor_last_roll(id_advisor=message.from_user.id)
text_for_watcher = (
f'{get_first_name(message.from_user.id)} {get_last_name(message.from_user.id)} '
f'ะกะพะฒะตััะตั ะฒะฐะผ ัะธะปัะผ {movie}.\n ะั ะฟัะธะฝะธะผะฐะตัะต ัะธะปัะผ?\n'
f'ะัะธะฝััั /accept \n ะัะบะฐะทะฐัััั /decline'
)
await bot.send_message(
chat_id=id_watcher,
text=text_for_watcher
)
elif result.lower() == 'ะฝะตั':
set_state(id_member=message.from_user.id, status='find_movie')
await message.reply(f'ะะดั ัะธะปัะผ')
async def check_all_assign(message: types.Message, id_roll: int):
"""
ะัะพะฒะตัะบะฐ, ะฒัะตะผ ะปะธ ััะฐััะฝะธะบะฐะผ ัะพะปะปะฐ ะทะฐะดะฐะฝั ัะธะปัะผั. ะ ัะปััะฐะต ััะฟะตั
ะฐ,
ะพัะฟัะฐะฒะปัะตััั ะฒ ะบะฐะฝะฐะป ะฒัะต ะฝะฐะทะฝะฐัะตะฝะธั.
"""
if check_all_movie_assigned():
id_chanel = get_id_chanel_from_id_member(message.from_user.id)
await bot.send_message(id_chanel, MESSAGES['roll_done'])
all_assigns = get_all_purposes(id_roll=id_roll)
for assign in all_assigns:
await bot.send_message(id_chanel, MESSAGES['roll_done_item'].format(
' '.join(assign[:2]),
' '.join(assign[2:4]),
assign[4]
)
)
set_status_roll(id_roll=id_roll, id_status=1)
else:
pass
@dp.message_handler(commands=['generate_qr'])
async def set_wait_img(message: types.Message):
"""
ะ ะตะณะธัััะธััะตั ัะพััะพัะฝะธะต ะพะถะธะดะฐะฝะธั ะบะฐััะธะฝะบะธ ั ะฟะพะดะฟะธััั ะดะปั ะณะตะฝะตัะฐัะธะธ qr code.
"""
if not wait_img_qr.get(str(message.from_user.id)):
wait_img_qr[str(message.from_user.id)] = True
await message.reply(MESSAGES['wait_image_generate_qr'])
@dp.message_handler(lambda message: wait_img_qr.get(str(message.from_user.id)),
content_types=ContentType.PHOTO)
async def generate_qr_code_and_send(message: types.Message):
"""
ะกะพั
ัะฐะฝัะตั ะบะฐััะธะฝะบั ะพั ะฟะพะปัะทะพะฒะฐัะตะปั, ะณะตะฝะตัะธััะตั qr code,
ะพัะฟัะฐะฒะปัะตั ะฟะพะปัะทะพะฒะฐัะตะปั.
"""
if message.caption:
now = datetime.now()
filename = f'{now.day}_{now.month}_{now.year}_{now.microsecond}'
path_to_img = os.getcwd() + os.sep + 'files' + os.sep + str(message.from_user.id) + os.sep + f'{filename}.jpg'
await message.photo[-1].download(path_to_img)
path_qr_code = generate_qr(path_to_img=path_to_img, words=message.caption, id=message.from_user.id)
image_qr = open(path_qr_code, 'rb')
await message.reply_photo(image_qr)
wait_img_qr.pop(str(message.from_user.id))
else:
await message.reply(MESSAGES['not_text_for_qr'])
if __name__ == '__main__':
executor.start_polling(dp)
| StarcoderdataPython |
30790 |
from pulp import *
prob = LpProblem("PULPTEST", LpMinimize)
# model variables
XCOORD = [0, 1, 2]
YCOORD = [0, 1, 2]
NUMBERS = [1, 2, 3, 4, 5, 6, 7, 8, 9]
# variable is a 3 x 3 x 9 matrix of binary values
allocation = LpVariable.dicts("square", (XCOORD, YCOORD, NUMBERS), 0, 1, LpInteger)
# target function
prob += 0, "Arbitrary Objective Function"
# constraint: sum over rows
for x in XCOORD:
prob += lpSum([n * allocation[x][y][n] for y in YCOORD for n in NUMBERS]) == 15
# constraint: sum over columns
for y in YCOORD:
prob += lpSum([n * allocation[x][y][n] for x in XCOORD for n in NUMBERS]) == 15
# constraint: each number only once
for n in NUMBERS:
prob += lpSum([allocation[x][y][n] for x in XCOORD for y in YCOORD]) == 1
# constraint: three numbers per column
for x in XCOORD:
prob += lpSum([allocation[x][y][n] for y in YCOORD for n in NUMBERS]) == 3
# constraint: three numbers per row
for y in YCOORD:
prob += lpSum([allocation[x][y][n] for x in XCOORD for n in NUMBERS]) == 3
# constraint: 9 numbers set
prob += lpSum([allocation[x][y][n] for x in XCOORD for y in YCOORD for n in NUMBERS]) == 9
# run the solver
prob.solve()
print("Status:", LpStatus[prob.status])
# print the numbers that have been found
for y in YCOORD:
for x in XCOORD:
for n in NUMBERS:
if value(allocation[x][y][n]) == 1:
print(n, end=' ')
#print(x, y, n)
print()
| StarcoderdataPython |
1650035 | <gh_stars>0
from eth_account import Account
import secrets
def create_account():
priv = secrets.token_hex(32)
private_key = "0x" + priv
print("Game specific private key... this is for an optional wallet for you not to worry on exposing your assets from other wallets\n")
print(f"pk: {private_key}")
acct = Account.from_key(private_key)
print("Address:", acct.address)
return (private_key, acct.address)
create_account() | StarcoderdataPython |
3366753 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
if __name__ == '__main__':
a = int(input("a = "))
if a == 1:
print("ะฏะฝะฒะฐัั")
elif a == 2:
print("ัะตะฒัะฐะปั")
elif a == 3:
print("ะะฐัั")
elif a == 4:
print("ะะฟัะตะปั")
elif a == 5:
print("ะผะฐะน")
elif a == 6:
print("ะัะฝั")
elif a == 7:
print("ะัะปั")
elif a == 8:
print("ะฐะฒะณััั")
elif a == 9:
print("ัะตะฝััะฑัั")
elif a == 10:
print("ะะบััะฑัั")
elif a == 11:
print("ะฝะพัะฑัั")
elif a == 12:
print("ะะตะบะฐะฑัั")
else:
print('Error')
| StarcoderdataPython |
1630242 | import unittest
def next_permutation(arr):
i = len(arr) - 1
while i > 0 and arr[i - 1] >= arr[i]:
i -= 1
if i <= 0:
return False
j = len(arr) - 1
while arr[j] <= arr[i - 1]:
j -= 1
arr[i - 1], arr[j] = arr[j], arr[i - 1]
arr[i:] = arr[len(arr) - 1 : i - 1 : -1]
return True
def is_matched_parentheses(ray):
lst = []
for c in ray:
if c == "(":
lst.append(c)
if c == ")":
if len(lst) < 1 or lst.pop() != "(":
return False
return True
def generate_parentheses_permutations_brute_force(number_of_pairs):
starting_list = (["("] * number_of_pairs) + [")"] * number_of_pairs
possibilities = ["".join(starting_list)]
while next_permutation(starting_list):
if is_matched_parentheses(starting_list):
possibilities.append("".join(starting_list))
return possibilities
def generate_parentheses_permutations_recursive_1(n):
def helper(
open_parentheses_remaining, closed_parentheses_remaining, current_string
):
if len(current_string) == n * 2:
result.append(current_string)
if open_parentheses_remaining > 0:
helper(
open_parentheses_remaining - 1,
closed_parentheses_remaining,
current_string + "(",
)
if closed_parentheses_remaining > open_parentheses_remaining:
helper(
open_parentheses_remaining,
closed_parentheses_remaining - 1,
current_string + ")",
)
result = []
helper(n, n, "")
return result
def add_paren(arr, left_rem, right_rem, string_arr, idx):
if left_rem < 0 or right_rem < left_rem: # invalid
return
if left_rem == 0 and right_rem == 0: # out of left and right parentheses
elem = "".join(string_arr)
arr.append(elem)
else:
string_arr[idx] = "(" # add left and recurse
add_paren(arr, left_rem - 1, right_rem, string_arr, idx + 1)
string_arr[idx] = ")" # add right and recurse
add_paren(arr, left_rem, right_rem - 1, string_arr, idx + 1)
def generate_parentheses_permutations_recursive_2(n):
results = []
string_arr = ["*"] * n * 2
add_paren(results, n, n, string_arr, 0)
return results
testable_functions = [
generate_parentheses_permutations_brute_force,
generate_parentheses_permutations_recursive_1,
generate_parentheses_permutations_recursive_2,
]
test_cases = [
(0, [""]),
(1, ["()"]),
(2, sorted(["()()", "(())"])),
(3, sorted(["((()))", "(()())", "(())()", "()(())", "()()()"])),
]
class TestSuite(unittest.TestCase):
def test_generate_parentheses_permutations(self):
for f in testable_functions:
for num, expected in test_cases:
assert sorted(f(num)) == expected, f"{f.__name__} {num} failed"
def example():
print(generate_parentheses_permutations_recursive_1(2))
print(generate_parentheses_permutations_brute_force(3))
print(generate_parentheses_permutations_recursive_2(3))
if __name__ == "__main__":
example()
| StarcoderdataPython |
79695 | #!/usr/bin/env python3
# Copyright (C) 2020-2020 <NAME>. All rights reserved.
#
# This file is subject to the terms and conditions defined in file 'LICENSE',
# which is part of this source code package.
from typing import Optional
import aioserial
import asyncio
import click
import functools
from perso import PTE, PTESerialPort, PersoData, PersoDataV1
from rtlib import Eui
class PhysicalPTESerialPort(PTESerialPort):
def __init__(self, port:str, baudrate:int) -> None:
self.serial = aioserial.AioSerial(port=port, baudrate=baudrate)
def send(self, data:bytes) -> None:
self.serial.write(data)
async def recv(self) -> bytes:
return await self.serial.read_until_async(b'\0')
class BasedIntParamType(click.ParamType):
name = "integer"
def convert(self, value, param, ctx):
if isinstance(value, int):
return value
try:
return int(value, 0)
except ValueError:
self.fail(f"{value!r} is not a valid integer", param, ctx)
class EuiParamType(click.ParamType):
name = "eui"
def convert(self, value, param, ctx):
try:
return Eui(value)
except ValueError:
self.fail(f"{value!r} is not a valid EUI", param, ctx)
class AESKeyType(click.ParamType):
name = "aeskey"
def convert(self, value, param, ctx):
try:
key = bytes.fromhex(value)
except ValueError:
self.fail(f"{value!r} is not a valid AES key", param, ctx)
if len(key) != 16:
self.fail(f"AES key must have a length of 16 bytes", param, ctx)
return key
BASED_INT = BasedIntParamType()
EUI = EuiParamType()
AESKEY = AESKeyType()
def coro(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return asyncio.run(f(*args, **kwargs))
return wrapper
@click.group()
@click.option('-p', '--port', default='/dev/ttyACM0',
help='serial port')
@click.option('-b', '--baud', type=int, default=115200,
help='baud rate')
@click.pass_context
def cli(ctx:click.Context, port:str, baud:int) -> None:
ctx.obj['pte'] = PTE(PhysicalPTESerialPort(port, baud))
@cli.command(help='Read personalization data from EEPROM')
@click.option('-o', '--offset', type=BASED_INT, default=0x0060,
help='Offset of personalization data structure in EEPROM')
@click.option('-k', '--show-keys', is_flag=True,
help='Show network and application keys')
@click.pass_context
@coro
async def pdread(ctx:click.Context, offset:int, show_keys:bool):
pte = ctx.obj['pte']
pd = PersoData.unpack(await pte.ee_read(offset, PersoData.V1_SIZE))
print(f'Hardware ID: 0x{pd.hwid:08x}')
print(f'Region ID: 0x{pd.region:08x} ({pd.region})')
print(f'Serial No: {pd.serial}')
print(f'Device EUI: {pd.deveui}')
print(f'Join EUI: {pd.joineui}')
if show_keys:
print(f'Network key: {pd.nwkkey.hex()}')
print(f'App key: {pd.appkey.hex()}')
@cli.command(help='Clear personalization data in EEPROM')
@click.option('-o', '--offset', type=BASED_INT, default=0x0060,
help='Offset of personalization data structure in EEPROM')
@click.pass_context
@coro
async def pdclear(ctx:click.Context, offset:int):
pte = ctx.obj['pte']
pd = PersoData.unpack(await pte.ee_read(offset, PersoData.V1_SIZE))
await pte.ee_write(offset, bytes(PersoData.V1_SIZE))
@cli.command(help='Write personalization data to EEPROM')
@click.option('-o', '--offset', type=BASED_INT, default=0x0060,
help='Offset of personalization data structure in EEPROM')
@click.option('--hwid', type=BASED_INT, default=0,
help='Hardware ID')
@click.option('--region', type=BASED_INT, default=0,
help='Region ID')
@click.argument('serialno', type=str)
@click.argument('deveui', type=EUI)
@click.argument('joineui', type=EUI)
@click.argument('nwkkey', type=AESKEY)
@click.argument('appkey', type=AESKEY, required=False)
@click.pass_context
@coro
async def pdwrite(ctx:click.Context, offset:int, hwid:int, region:int, serialno:str, deveui:Eui, joineui:Eui, nwkkey:bytes, appkey:Optional[bytes]):
pte = ctx.obj['pte']
if appkey is None:
appkey = nwkkey
await pte.ee_write(offset, PersoDataV1(hwid, region, serialno, deveui, joineui, nwkkey, appkey).pack())
if __name__ == '__main__':
cli(obj={})
| StarcoderdataPython |
199588 | """
coding: utf-8
Created on 30/10/2020
@author: github.com/edrmonteiro
From: Codility Lessons
"""
# DivCount
# Write a function:
# def solution(A, B, K)
# that, given three integers A, B and K, returns the number of integers within the range [A..B] that are divisible by K, i.e.:
# { i : A โค i โค B, i mod K = 0 }
# For example, for A = 6, B = 11 and K = 2, your function should return 3, because there are three numbers divisible by 2 within the range [6..11], namely 6, 8 and 10.
# Write an efficient algorithm for the following assumptions:
# A and B are integers within the range [0..2,000,000,000];
# K is an integer within the range [1..2,000,000,000];
# A โค B.
from math import ceil, floor
def solution(A, B, K):
start = ceil(A / K)
end = floor(B / K)
return end - start + 1
A = 6
B = 11
K = 2
print(solution(A, B, K)) #3
A = 11
B = 345
K = 17
print(solution(A, B, K)) #20
A = 10
B = 10
K = 5
print(solution(A, B, K)) #1
A = 10
B = 10
K = 7
print(solution(A, B, K)) #0
A = 10
B = 10
K = 20
print(solution(A, B, K)) #0
A = 0
B = 0
K = 11
print(solution(A, B, K)) #1
stop = True
# 50% performance problems
# def solution(A, B, K):
# count = 0
# for i in range(A, B + 1):
# if i % K == 0:
# count += 1
# return count | StarcoderdataPython |
51425 | # !/usr/bin/env python
# coding=UTF-8
"""
@Author: <NAME>
@LastEditors: <NAME>
@Description:
@Date: 2021-08-18
@LastEditTime: 2022-03-19
"""
import zipfile
import pickle
import gzip
import json
import re
from pathlib import Path
from typing import Union, Optional, Sequence
from ..strings import normalize_language, LANGUAGE
_DIR_PATH = Path(__file__).absolute().parent
__all__ = [
"fetch",
]
def fetch(name: str, **kwargs):
""" """
func = f"""_fetch_{re.sub("[_-]+dict", "", name.lower()).replace("-", "_")}"""
retval = eval(f"{func}(**kwargs)")
return retval
def _fetch_cilin() -> dict:
""" """
cilin_path = _DIR_PATH / "cilin_dict.zip"
with zipfile.ZipFile(cilin_path, "r") as archive:
cilin_dict = pickle.loads(archive.read("cilin_dict.pkl"))
return cilin_dict
def _fetch_fyh() -> tuple:
""" """
fyh_path = _DIR_PATH / "fyh_dict.zip"
with zipfile.ZipFile(fyh_path, "r") as archive:
tra_dict = pickle.loads(archive.read("tra_dict.pkl"))
var_dict = pickle.loads(archive.read("var_dict.pkl"))
hot_dict = pickle.loads(archive.read("hot_dict.pkl"))
return tra_dict, var_dict, hot_dict
def _fetch_stopwords(language: str) -> list:
""" """
stopwords_path = _DIR_PATH / "stopwords.json.gz"
with gzip.open(stopwords_path, "rt") as gz_file:
stopwords = json.load(gz_file)
stopwords = stopwords[normalize_language(language).value]
return stopwords
def _fetch_stopwords_zh() -> list:
""" """
return _fetch_stopwords("zh")
def _fetch_stopwords_en() -> list:
""" """
return _fetch_stopwords("en")
def _fetch_sim() -> dict:
""" """
sd_path = _DIR_PATH / "sim_dict.pkl"
return pickle.loads(sd_path.read_bytes())
def _fetch_hownet_en() -> dict:
""" """
hc_path = _DIR_PATH / "hownet_en.zip"
with zipfile.ZipFile(hc_path, "r") as archive:
hc = pickle.loads(archive.read("hownet_candidate/hownet_candidate.pkl"))
return hc
def _fetch_hownet_zh() -> dict:
""" """
hc_path = _DIR_PATH / "hownet_zh.json.gz"
with gzip.open(hc_path, "rt", encoding="utf-8") as f:
hc = json.load(f)
return hc
def _fetch_hownet(language: Union[str, LANGUAGE]) -> dict:
_lang = normalize_language(language)
if _lang == LANGUAGE.ENGLISH:
return _fetch_hownet_en()
elif _lang == LANGUAGE.CHINESE:
return _fetch_hownet_zh()
def _fetch_checklist(keys: Optional[Union[Sequence[str], str]] = None) -> dict:
""" """
checklist_path = _DIR_PATH / "checklist_subs.json.gz"
with gzip.open(checklist_path, "rt", encoding="utf-8") as f:
checklist_subs = json.load(f)
if keys is not None:
if isinstance(keys, str):
return checklist_subs[keys.upper()]
_keys = [k.upper() for k in keys]
return {k: v for k, v in checklist_subs.items() if k in _keys}
return checklist_subs
def _fetch_checklist_subs(keys: Optional[Union[Sequence[str], str]] = None) -> dict:
""" """
return _fetch_checklist(keys)
def _fetch_dces() -> dict:
""" """
dces_path = _DIR_PATH / "DCES.zip"
with zipfile.ZipFile(dces_path, "r") as archive:
descs = pickle.loads(archive.read("descs.pkl"))
try:
neigh = pickle.loads(archive.read("neigh.pkl"))
except ModuleNotFoundError:
print("failed to load DCES neighbor. Init from sklearn.")
from sklearn.neighbors import NearestNeighbors
neigh = NearestNeighbors(
**{
"algorithm": "auto",
"leaf_size": 30,
"metric": "euclidean",
"metric_params": None,
"n_jobs": None,
"n_neighbors": 5,
"p": 2,
"radius": 1.0,
}
)
vec_colnames = pickle.loads(archive.read("vec_colnames.pkl"))
ret = {
"descs": descs,
"neigh": neigh,
"vec_colnames": vec_colnames,
}
return ret
| StarcoderdataPython |
23299 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import enum
from typing import Optional
from ossdbtoolsservice.serialization import Serializable
class MetadataType(enum.Enum):
"""Contract enum for representing metadata types"""
TABLE = 0
VIEW = 1
SPROC = 2
FUNCTION = 3
class ObjectMetadata(Serializable):
"""Database object metadata"""
@classmethod
def get_child_serializable_types(cls):
return {'metadata_type': MetadataType}
def __init__(self, urn: str = None, metadata_type: MetadataType = None, metadata_type_name: str = None, name: str = None, schema: Optional[str] = None):
self.metadata_type: MetadataType = metadata_type
self.metadata_type_name: str = metadata_type_name
self.name: str = name
self.schema: str = schema
self.urn: str = urn
| StarcoderdataPython |
3318646 | ##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from perfmetrics import metricmethod
from relstorage.adapters.interfaces import IReplicaSelector
from zope.interface import implements
import os
import time
class ReplicaSelector(object):
implements(IReplicaSelector)
def __init__(self, fn, replica_timeout):
self.replica_conf = fn
self.replica_timeout = replica_timeout
self._read_config()
self._select(0)
self._iterating = False
self._skip_index = None
def _read_config(self):
self._config_modified = os.path.getmtime(self.replica_conf)
self._config_checked = time.time()
f = open(self.replica_conf, 'r')
try:
lines = f.readlines()
finally:
f.close()
replicas = []
for line in lines:
line = line.strip()
if not line or line.startswith('#'):
continue
replicas.append(line)
if not replicas:
raise IndexError(
"No replicas specified in %s" % self.replica_conf)
self._replicas = replicas
def _is_config_modified(self):
now = time.time()
if now < self._config_checked + 1:
# don't check the last mod time more often than once per second
return False
self._config_checked = now
t = os.path.getmtime(self.replica_conf)
return t != self._config_modified
def _select(self, index):
self._current_replica = self._replicas[index]
self._current_index = index
if index > 0 and self.replica_timeout:
self._expiration = time.time() + self.replica_timeout
else:
self._expiration = None
def current(self):
"""Get the current replica."""
self._iterating = False
if self._is_config_modified():
self._read_config()
self._select(0)
elif self._expiration is not None and time.time() >= self._expiration:
self._select(0)
return self._current_replica
@metricmethod
def next(self):
"""Return the next replica to try.
Return None if there are no more replicas defined.
"""
if self._is_config_modified():
# Start over even if iteration was already in progress.
self._read_config()
self._select(0)
self._skip_index = None
self._iterating = True
elif not self._iterating:
# Start iterating.
self._skip_index = self._current_index
i = 0
if i == self._skip_index:
i = 1
if i >= len(self._replicas):
# There are no more replicas to try.
self._select(0)
return None
self._select(i)
self._iterating = True
else:
# Continue iterating.
i = self._current_index + 1
if i == self._skip_index:
i += 1
if i >= len(self._replicas):
# There are no more replicas to try.
self._select(0)
return None
self._select(i)
return self._current_replica
| StarcoderdataPython |
3379746 | # Author: <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import importlib
import sys
from toscaparser import functions
RELATIONSHIP_STABS = {
'link': 'aiorchestra.core.noop:link',
'unlink': 'aiorchestra.core.noop:unlink',
}
def check_for_event_definition(action):
def wraps(*args, **kwargs):
self, node, event = args
available = self.check_event_availability(event, 'Standard')
if not available:
def stab(*args, **kwargs):
msg = ('Lifecycle event "{0}" was not implemented '
'for node "{1}". Skipping.'
.format(event, node.name))
self.context.logger.debug(msg)
return None, None
return stab(*args, **kwargs)
else:
return action(*args, **kwargs)
return wraps
def lifecycle_event_handler(action):
async def wraps(*args, **kwargs):
self = list(args)[0]
self.context.logger.debug('Attempting to run {0} event for '
'node {1}.'
.format(action.__name__, self.name))
try:
if action.__name__ in ['create', 'configure', 'start']:
self.__provisioned = True
else:
self.__provisioned = False
result = action(*args, **kwargs)
self.context.logger.debug('Event {0} finished successfully for '
'node {1}.'
.format(action.__name__, self.name))
await result
except Exception as ex:
self.__provisioned = False
self.context.logger.error(str(ex))
raise ex
return wraps
class InterfaceOperations(object):
def __init__(self, context, node):
self.context = context
self.node_type = node.type_definition
self.interface_implementations = node.type_definition.interfaces
self.check_required_lifecycle_events(node, 'Standard')
def check_event_availability(self, check_event, lifecycle_type):
return check_event in [
event for event in
self.interface_implementations.get(lifecycle_type)]
def check_required_lifecycle_events(self, node, lifecycle_type):
__current_events = node.type_definition.interfaces.get(lifecycle_type)
if 'create' not in __current_events:
msg = ('{0} lifecycle event "{1}" is required'
.format(lifecycle_type, 'create'))
self.context.logger.error(msg)
raise Exception(msg)
if 'delete' not in __current_events:
msg = ('{0} lifecycle event "delete" was not defined. '
'No way to delete provisioned node "{1}" instance.'
.format(lifecycle_type, node.name))
self.context.logger.warn(msg)
@check_for_event_definition
def __get_standard_event(self, node, event):
__current_events = node.type_definition.interfaces.get(
'Standard')
implementation = __current_events[event]['implementation']
inputs = __current_events[event].get('inputs', {})
return implementation, inputs
def __get_relationship_event(self, target, source, event):
reqs = source.type_definition.requirements
custom_defs = source.custom_defs
for req in reqs:
for req_node_name, req_def in req.items():
_type = req_def['relationship']
if req_node_name == target.name and _type in custom_defs:
impl_def = custom_defs[_type]['interfaces']['Configure']
event_def = impl_def[event]
return event_def['implementation'], event_def.get('inputs')
else:
return RELATIONSHIP_STABS[event], {}
return RELATIONSHIP_STABS[event], {}
def import_task_method(self, impl, event, node):
if impl:
module, method = impl.split(":")
m = importlib.import_module(module)
try:
return getattr(m, method)
except Exception as ex:
self.context.logger.error(
'Unable to get node "{0}" lifecycle event "{1}" '
'implementation. Reason: {2}.'
.format(node.name, str(ex), event))
raise ex
else:
msg = ('Missing node "{1}" lifecycle event "{0}" '
'implementation.'.format(event, node.name))
self.context.logger.debug(msg)
async def run_standard_event(self, node, event):
impl, inputs = self.__get_standard_event(node, event)
task = self.import_task_method(impl, event, node)
if task:
await task(node, inputs)
# TODO(denismakogon): re-write it
async def run_relationship_event(self, target, source, event):
impl, inputs = self.__get_relationship_event(target, source, event)
task = self.import_task_method(impl, event, source)
if task:
await task(source, target, inputs)
class OrchestraNode(object):
def __init__(self, context, node):
self.context = context
self.node = node
self.operations = InterfaceOperations(context, node)
self.__name = node.name
self.__properties = {}
self.__attributes = {}
self.__provisioned = False
self.__runtime_properties = {}
self.__type_defs = node.type_definition
self.__prop_def = node._properties
self.__node_type = node.type
self.__node_type_def = self.__type_defs.custom_def[self.node.type]
self.__custom_defs = self.__type_defs.custom_def
@property
def custom_defs(self):
return self.__custom_defs
@property
def node_type_definition(self):
return self.__node_type_def
@property
def node_type(self):
return self.__node_type
@property
def type_definition(self):
return self.__type_defs
@property
def property_definishion(self):
return self.__prop_def
# TODO(denismakogon): define OrchestraNodeProperties class
def __setup_properties(self):
self.context.logger.debug('Initializing node {0} properties.'
.format(self.name))
for input_ref in self.property_definishion:
if input_ref.value is not None:
self.context.logger.debug('Attempting to resolve node {0} '
'properties for TOSCA functions.'
.format(self.name))
value = None
if isinstance(input_ref.value, functions.GetInput):
if (input_ref.value.input_name in
self.context.template_inputs):
self.context.logger.debug(
'Property {0} for node {1} '
'was resolved by TOSCA get_input function.'
.format(input_ref.value.input_name, self.name))
value = self.context.template_inputs[
input_ref.value.input_name]
else:
if input_ref.required:
msg = 'Input {0} is required.'.format(
input_ref.value.input_name)
self.context.logger.error(msg)
raise Exception(msg)
else:
if input_ref.value.input_name in [
i.name for i
in self.context.inputs_definitions]:
self.context.logger.debug(
'Attempting to look-up for default '
'value for node "{0}" property "{1}" '
'in TOSCA template input definitions'
.format(self.name,
input_ref.value.input_name))
for i in self.context.inputs_definitions:
if i.name == input_ref.value.input_name:
self.context.logger.debug(
'Default value for node "{0}" '
'property "{1}" in TOSCA template '
'input definitions was found'
' - {2}.'.format(
self.name,
input_ref.value.input_name,
str(i.default)))
value = i.default
else:
msg = ('Node {0} non-required property "{1}" '
'default value is None. Attempting to '
'create value from input type "{2}".'
.format(self.name,
input_ref.value.input_name,
input_ref.type))
self.context.logger.warn(msg)
try:
_type = (
'str' if input_ref.type == 'string'
else input_ref.type)
value = getattr(sys.modules[__name__],
_type)()
except Exception as e:
msg = (
'Unable to create instance of input '
'type {0} for node {1}. It may appear '
'that custom type was used. '
'Falling back to None'
.format(input_ref.type, self.name))
self.context.logger.warn(msg)
self.context.logger.error(str(e))
value = input_ref.default
elif isinstance(input_ref.value, (str, dict, int,
float, list, bool)):
self.context.logger.debug(
'Property {0} for node {1} '
'was resolved by assigned value in its definition.'
.format(input_ref.name, self.name))
value = input_ref.value
elif isinstance(input_ref.value, functions.GetProperty):
ref_node = self.context.node_from_name(
input_ref.value.node_template_name)
if input_ref.value.property_name in ref_node.properties:
value = ref_node.properties[
input_ref.value.property_name]
self.context.logger.debug(
'Property {0} for node {1} was resolved by '
'assigned value in its definition.'
.format(input_ref.name, self.name))
else:
msg = ('Node {0} does not have referenced property.'
.format(ref_node.name))
self.context.logger.error(msg)
raise Exception(msg)
elif isinstance(input_ref.value, functions.GetAttribute):
ref_node = self.context.node_from_name(
input_ref.value.node_template_name)
if ref_node.is_provisioned:
if (input_ref.value.attribute_name in
ref_node.attributes):
value = ref_node.attributes[
input_ref.value.attribute_name]
self.context.logger.debug(
'Property {0} for node {1} was resolved '
'by TOSCA get_attribute function.'
.format(input_ref.name, self.name))
else:
msg = (
'Unable to get node "{0}" attribute "{1}" '
'because node is not provisioned. Pre-deployment '
'validation failed because node "{2}" has TOSCA '
'get_attribute function usage that can be '
'resolved only at deployment time.'.format(
input_ref.value.node_template_name,
input_ref.value.attribute_name,
self.name))
self.context.logger.debug(msg)
self.__properties.update(
{input_ref.name: value})
self.context.logger.debug('Node "{0}" properties: {1}.'.format(
self.name, str(self.__properties)))
def process_output(self, node_output_definition):
if not self.is_provisioned:
msg = 'Node "{0}" was not provisioned.'.format(self.name)
self.context.logger.error(msg)
raise Exception(msg)
name = node_output_definition.value.attribute_name
if isinstance(node_output_definition.value, functions.GetAttribute):
if name not in self.attributes:
msg = ('No such attribute "{0}" for node "{1}".'
.format(name, self.name))
self.context.logger.error(msg)
raise Exception(msg)
return self.get_attribute(name)
if isinstance(node_output_definition.value, functions.GetProperty):
if name not in self.properties:
msg = ('No such property "{0}" for node "{1}".'
.format(name, self.name))
self.context.logger.error(msg)
raise Exception(msg)
return self.properties[name]
# TODO(denismakogon): define OrchestraNodeAttributes class
# TODO(denismakogon): define OrchestraNodeRuntimeProperties class
def __setup_attributes_definition_for_node_instance(self):
__attributes = list(
self.node_type_definition.get('attributes', {}).keys())
if not self.is_provisioned:
msg = ('Can not validate attributes for node "{0}" '
'because it was not provisioned.'.format(self.name))
self.context.logger.debug(msg)
else:
self.context.logger.debug(
'Attempting to process node "{0}" '
'attributes: {1}.'.format(
self.name, str(__attributes)))
for attr in __attributes:
if attr not in self.__runtime_properties:
msg = ('Node "{0}" attribute "{1}" was not '
'initialized during provisioning, '
'falling backe to None'.format(self.name, attr))
self.context.logger.debug(msg)
value = self.__runtime_properties.get(attr)
self.context.logger.debug('Node "{0}" attribute "{1}" was '
'initialized with value "{2}".'
.format(self.name, attr, value))
self.__attributes.update({
attr: value
})
def attempt_to_validate(self):
try:
self.context.logger.info(
"Validating properties for node {0}.".format(self.name))
self.node.validate()
self.properties
self.attributes
except Exception as ex:
self.context.logger.error(
"Unable to validate node {0}. Reason: {1}"
.format(self.name, str(ex)))
raise ex
@property
def name(self):
return self.__name
@property
def is_provisioned(self):
return self.__provisioned
@is_provisioned.setter
def is_provisioned(self, provisioned):
self.__provisioned = provisioned
@property
def properties(self):
self.context.logger.debug('Retrieving node {0} properties.'
.format(self.name))
self.__setup_properties()
return self.__properties
@properties.setter
def properties(self, other):
raise Exception('Node "properties" are immutable.')
def update_runtime_properties(self, attr, value):
self.__runtime_properties.update({attr: value})
def batch_update_runtime_properties(self, **kwargs):
for k, v in kwargs.items():
self.update_runtime_properties(k, v)
@property
def attributes(self):
self.__setup_attributes_definition_for_node_instance()
return self.__attributes
def get_attribute(self, attr):
if attr in self.attributes:
return self.runtime_properties.get(attr)
else:
raise AttributeError('Unknown attribute "{0}" of node '
'"{1}".'.format(attr, self.name))
@attributes.setter
def attributes(self, other):
raise Exception('Node attributes are immutable.')
@property
def runtime_properties(self):
return self.__runtime_properties
@property
def has_parents(self):
return True if len(self.child_nodes) > 0 else False
@property
def parent_nodes(self):
return [node.name for node in list(self.node.related_nodes)]
@property
def has_children(self):
return True if len(self.parent_nodes) > 0 else False
@property
def child_nodes(self):
return [list(req.values())[0] for req in self.node.requirements]
@lifecycle_event_handler
async def link(self, source):
await self.operations.run_relationship_event(self, source, 'link')
@lifecycle_event_handler
async def unlink(self, source):
await self.operations.run_relationship_event(self, source, 'unlink')
@lifecycle_event_handler
async def create(self):
for target in self.context.deployment_plan[self]:
if target.name != self.name:
await target.link(self)
await self.operations.run_standard_event(self, 'create')
self.__provisioned = True
@lifecycle_event_handler
async def configure(self):
await self.operations.run_standard_event(self, 'configure')
self.__provisioned = True
@lifecycle_event_handler
async def start(self):
await self.operations.run_standard_event(self, 'start')
self.__provisioned = True
@lifecycle_event_handler
async def stop(self):
await self.operations.run_standard_event(self, 'stop')
@lifecycle_event_handler
async def delete(self):
await self.operations.run_standard_event(self, 'delete')
for target in self.context.deployment_plan[self]:
if target.name != self.name:
await target.unlink(self)
self.__provisioned = False
def __repr__(self):
return 'Node {0}'.format(self.name)
def serialize(self):
return {
'__name': self.name,
'is_provisioned': self.__provisioned,
'__properties': self.__properties,
'__attributes': self.__attributes,
'__runtime_properties': self.__runtime_properties,
}
@classmethod
def load(cls, context, tosca_node, **kwargs):
orchestra_node = cls(context, tosca_node)
for k, v in kwargs.items():
setattr(orchestra_node, k, v)
return orchestra_node
| StarcoderdataPython |
4821398 | # coding=utf-8
import pywikibot
import re
from pywikibot import pagegenerators
from time import sleep
CATEGORY_EN = 'Flags by year of introduction'
CATEGORY_RU = 'ะคะปะฐะณะธ ะฟะพ ะณะพะดะฐะผ'
PATTERN_EN = 'Category:Flags introduced in %s'
PATTERN_RU = 'ะะฐัะตะณะพัะธั:ะคะปะฐะณะธ %s ะณะพะดะฐ'
PATTERN_YEAR = '([0-9]+)'
site_en = pywikibot.Site('en', 'wikipedia')
site_ru = pywikibot.Site('ru', 'wikipedia')
repo = pywikibot.Site('wikidata', 'wikidata')
def merge(target_item, redirect_item):
print('MERGE: %s <- %s' % (target_item.getID(), redirect_item.getID()))
redirect_item.mergeInto(target_item, ignore_conflicts='description')
if redirect_item.isRedirectPage():
return
descriptions = redirect_item.get(force=True)['descriptions']
new_descriptions = {}
for code in descriptions:
new_descriptions[code] = ''
redirect_item.editDescriptions(new_descriptions, summary='Clearing item to prepare for redirect')
redirect_item.set_redirect_target(target_item, force=True)
def iterate_items():
search_pattern_ru = PATTERN_RU % PATTERN_YEAR
cat_ru = pywikibot.Category(site_ru, CATEGORY_RU)
subcats_ru = cat_ru.subcategories()
subcats_generator = pagegenerators.PreloadingGenerator(subcats_ru, 500)
for subcat_ru in subcats_generator:
matches = re.fullmatch(search_pattern_ru, subcat_ru.title())
year = matches[1]
title_en = PATTERN_EN % year
subcat_en = pywikibot.Category(site_en, title_en)
try:
item_en = subcat_en.data_item()
except pywikibot.exceptions.NoPage:
print('NO PAGE: %s' % title_en)
continue
item_ru = subcat_ru.data_item()
id_en = item_en.getID()
id_ru = item_ru.getID()
if id_en == id_ru:
print('SKIP: %s = %s' % (id_en, id_ru))
elif id_en < id_ru:
merge(item_en, item_ru)
sleep(5)
else:
merge(item_ru, item_en)
sleep(5)
iterate_items()
| StarcoderdataPython |
1618282 | <filename>tools/assetlib_release.py
#! /usr/bin/env python3
"""
Build system is designed to created a build targetting a single platform.
This script aims at bundle together multiple builds to generate a final
multi-platform release.
"""
import json
from pathlib import Path
from urllib.request import urlopen
import argparse
from datetime import datetime
import os
import shutil
from urllib.request import urlretrieve
from zipfile import ZipFile
from concurrent.futures import ThreadPoolExecutor
API_REPO_URL = "https://api.github.com/repos/touilleMan/godot-python/releases"
PLATFORMS = ("x11-32", "x11-64", "osx-64", "windows-32", "windows-64")
MISC_DIR = Path(__file__).parent / "../misc"
def get_release_info(version=None):
data = json.loads(urlopen(API_REPO_URL).read())
if not version:
release_info = data[0]
else:
tag_name = version if version.startswith("v") else f"v{version}"
release_info = next(x for x in data if x["tag_name"] == tag_name)
info = {
"tag_name": release_info["tag_name"],
"version": release_info["tag_name"][1:],
"platforms": {},
}
for platform in PLATFORMS:
asset = next((asset for asset in release_info["assets"] if platform in asset["name"]), None)
if asset:
info["platforms"][platform] = {
"name": asset["name"],
"url": asset["browser_download_url"],
}
else:
print(f"Warning: release info for platform {platform} not found")
return info
def pipeline_executor(dirs, release_info, platform_name):
platform_info = release_info["platforms"][platform_name]
assert platform_info["name"].endswith(".zip")
release_archive = dirs["build"] / platform_info["name"]
if not release_archive.exists():
print(f"{platform_name} - Dowloading release")
with urlopen(platform_info["url"]) as f:
release_archive.write_bytes(f.read())
if not (dirs["pythonscript"] / platform_name).exists():
print(f"{platform_name} - Extracting release")
zipobj = ZipFile(release_archive)
# Only extract platform-specific stuff
members = [x for x in zipobj.namelist() if x.startswith(f"pythonscript/{platform_name}/")]
zipobj.extractall(path=dirs["pythonscript"].parent, members=members)
def orchestrator(dirs, release_info):
futures = []
with ThreadPoolExecutor() as executor:
for platform_name in release_info["platforms"].keys():
futures.append(executor.submit(pipeline_executor, dirs, release_info, platform_name))
for future in futures:
if not future.cancelled():
future.result() # Raise exception if any
print("Add bonuses...")
(dirs["pythonscript"] / ".gdignore").touch()
license_txt = (MISC_DIR / "release_LICENSE.txt").read_text()
for entry in ["dist", "pythonscript"]:
(dirs[entry] / "LICENSE.txt").write_text(license_txt)
(dirs["dist"] / "pythonscript.gdnlib").write_text(
(MISC_DIR / "release_pythonscript.gdnlib").read_text().replace("res://", "res://addons")
)
(dirs["dist"] / "README.txt").write_text(
(MISC_DIR / "release_README.txt")
.read_text()
.format(version=release_info["version"], date=datetime.utcnow().strftime("%Y-%m-%d"))
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--version", default=None)
args = parser.parse_args()
release_info = get_release_info(args.version)
print(f"Release version: {release_info['version']}")
build_dir = Path(f"pythonscript-assetlib-release-{release_info['version']}")
dist_dir = build_dir / f"pythonscript-{release_info['version']}"
addons_dir = dist_dir / "addons"
pythonscript_dir = addons_dir / "pythonscript"
build_dir.mkdir(exist_ok=True)
dist_dir.mkdir(exist_ok=True)
addons_dir.mkdir(exist_ok=True)
pythonscript_dir.mkdir(exist_ok=True)
dirs = {
"build": build_dir,
"dist": dist_dir,
"addons": addons_dir,
"pythonscript": pythonscript_dir,
}
orchestrator(dirs, release_info)
print(f"{dist_dir} is ready !")
if __name__ == "__main__":
main()
| StarcoderdataPython |
3241641 | <gh_stars>100-1000
import os
from conans import ConanFile, CMake, tools
class EABaseConan(ConanFile):
name = "eabase"
description = "EABase is a small set of header files that define platform-independent data types and platform feature macros. "
topics = ("conan", "eabase", "config",)
license = "BSD-3-Clause"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/electronicarts/EABase"
no_copy_source = True
settings = "os", "compiler", "build_type", "arch"
@property
def _source_subfolder(self):
return "source_subfolder"
def source(self):
tools.get(**self.conan_data["sources"][self.version])
folder_name = "EABase-{}".format(self.version)
os.rename(folder_name, self._source_subfolder)
def package_id(self):
self.info.header_only()
def package(self):
self.copy("LICENSE", src=self._source_subfolder, dst="licenses")
self.copy("*.h", dst="include", src=os.path.join(self._source_subfolder, "include"))
def package_info(self):
self.cpp_info.names["cmake_find_package"] = "EABase"
self.cpp_info.names["cmake_find_package_multi"] = "EABase"
self.cpp_info.includedirs.extend([os.path.join("include", "Common"),
os.path.join("include", "Common", "EABase")])
| StarcoderdataPython |
63688 | from __future__ import print_function
import os.path
import tempfile
import shutil
from pype9.cmd import convert
import ninemlcatalog
from nineml import read
from lxml import etree
import yaml
if __name__ == '__main__':
from pype9.utils.testing import DummyTestCase as TestCase # @UnusedImport
else:
from unittest import TestCase # @Reimport
class TestConvert(TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_convert_version(self):
in_path = './' + os.path.join(os.path.relpath(ninemlcatalog.root),
'neuron', 'Izhikevich.xml')
out_path = os.path.join(self.tmpdir, 'Izhikevich.xml')
args = '--nineml_version 2 {} {}'.format(in_path, out_path)
convert.run(args.split())
# Check the document has been written in version 2 format
with open(out_path) as f:
xml = etree.parse(f)
root = xml.getroot()
self.assertEqual(root.tag, '{http://nineml.net/9ML/2.0}NineML')
# Check the converted document is equivalent
in_doc = read(in_path)
out_doc = read(out_path)
in_doc._url = None
out_doc._url = None
self.assertEqual(in_doc, out_doc)
def test_convert_format(self):
in_path = './' + os.path.join(os.path.relpath(ninemlcatalog.root),
'neuron', 'Izhikevich.xml')
out_path = os.path.join(self.tmpdir, 'Izhikevich.yml')
print(out_path)
args = '{} {}'.format(in_path, out_path)
convert.run(args.split())
# Check the output file is yaml
with open(out_path) as f:
contents = yaml.load(f)
self.assertEqual(list(contents.keys()), [b'NineML'])
# Check the converted document is equivalent
in_doc = read(in_path)
out_doc = read(out_path)
in_doc._url = None
out_doc._url = None
self.assertEqual(in_doc, out_doc)
| StarcoderdataPython |
3203662 | <reponame>enthought/etsproxy<gh_stars>1-10
# proxy module
from __future__ import absolute_import
from apptools.naming.context_adapter_factory import *
| StarcoderdataPython |
1715290 | from natch.abstract import Registry as AbstractRegistry
from natch.hashers import QualnameHasher
class Registry(AbstractRegistry):
def __init__(self, *args, **kwargs):
super(Registry, self).__init__(*args, **kwargs)
def set_hasher(self, hasher):
if hasher is None:
hasher = QualnameHasher()
super(Registry, self).set_hasher(hasher)
def register(self, func, rule):
func_hash = self.hasher.hash(func)
if func_hash not in self.index:
self.index[func_hash] = []
path = (func, rule)
self.index[func_hash].append(path)
def unregister(self, func, rule):
func_hash = self.hasher.hash(func)
if func_hash not in self.index:
return
alternation = None
for (f, r) in self.index[func_hash]:
if r == rule:
alternation = (f, r)
break
if alternation is not None:
self.index[func_hash].remove(alternation)
def lookup(self, func, *args, **kwargs):
func_hash = self.hasher.hash(func)
if func_hash not in self.index:
return None
for (f, rule) in self.index[func_hash]:
does_match = rule.does_match(*args, **kwargs)
if does_match:
return f
return None
| StarcoderdataPython |
189914 | import json
def load_dictionary_from_file(file_path):
"""Load a dictionary from a JSON file.
Parameters
----------
file_path : string
The JSON file path to load the dictionary from.
Returns
-------
dictionary : dict
The dictionary loaded from a JSON file.
"""
with open(file_path, 'r') as f:
return json.load(f)
| StarcoderdataPython |
3345494 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Runtime symbols compilation.
"""
import collections
import functools
import itertools
import logging
import typing
import uuid
from forml import flow
from forml.runtime import asset
from .. import _exception
from . import _target
LOGGER = logging.getLogger(__name__)
class Table(flow.Visitor, typing.Iterable):
"""Dynamic builder of the runtime symbols. Table uses node UIDs and GIDs where possible as instruction keys."""
class Linkage:
"""Structure for registering instruction dependency tree as relations between target (receiving) instruction
and its upstream dependency instructions representing its positional arguments.
"""
def __init__(self):
self._absolute: dict[uuid.UUID, list[typing.Optional[uuid.UUID]]] = collections.defaultdict(list)
self._prefixed: dict[uuid.UUID, list[typing.Optional[uuid.UUID]]] = collections.defaultdict(list)
def __getitem__(self, instruction: uuid.UUID) -> typing.Sequence[uuid.UUID]:
return tuple(itertools.chain(reversed(self._prefixed[instruction]), self._absolute[instruction]))
@property
def leaves(self) -> typing.AbstractSet[uuid.UUID]:
"""Return the leaf nodes that are anyone's dependency.
Returns:
leaf nodes.
"""
parents = {i for a in itertools.chain(self._absolute.values(), self._prefixed.values()) for i in a}
children = set(self._absolute).union(self._prefixed).difference(parents)
assert children, 'Not acyclic'
return children
def insert(self, instruction: uuid.UUID, argument: uuid.UUID, index: typing.Optional[int] = None) -> None:
"""Store given argument as a positional parameter of given instruction at absolute offset given by index.
Index can be omitted for single-argument instructions.
Args:
instruction: Target (receiver) instruction.
argument: Positional argument to be stored.
index: Position offset of given argument.
"""
args = self._absolute[instruction]
argcnt = len(args)
if index is None:
assert argcnt <= 1, f'Index required for multiarg ({argcnt}) instruction'
index = 0
assert index >= 0, 'Invalid positional index'
if argcnt <= index:
args.extend([None] * (index - argcnt + 1))
assert not args[index], 'Link collision'
args[index] = argument
def update(self, node: flow.Worker, getter: typing.Callable[[int], uuid.UUID]) -> None:
"""Register given node (its eventual functor) as an absolute positional argument of all of its subscribers.
For multi-output nodes the output needs to be passed through Getter instructions that are extracting
individual items.
Args:
node: Worker node (representing its actual functor) as an positional argument of its subscribers.
getter: Callback for creating a Getter instruction for given positional index and returning its key.
"""
if node.szout == 1:
for subscriber in node.output[0]:
self.insert(subscriber.node.uid, node.uid, subscriber.port)
else:
for index, output in enumerate(node.output):
source = getter(index)
self.insert(source, node.uid)
for subscriber in output:
self.insert(subscriber.node.uid, source, subscriber.port)
def prepend(self, instruction: uuid.UUID, argument: uuid.UUID) -> None:
"""In contrast to the absolute positional arguments we can potentially prepend these with various system
arguments that should eventually prefix the absolute ones.
Here we just append these to a list but during iteration we read them in reverse to reflect the prepend
order.
Args:
instruction: Key of the target (receiver) instruction.
argument: Argument (instruction key) to be prepended to the list of the absolute arguments.
"""
self._prefixed[instruction].append(argument)
class Index:
"""Mapping of the stored instructions. Same instruction might be stored under multiple keys."""
def __init__(self):
self._instructions: dict[uuid.UUID, _target.Instruction] = {}
def __contains__(self, key: uuid.UUID) -> bool:
return key in self._instructions
def __getitem__(self, key: uuid.UUID):
return self._instructions[key]
@property
def instructions(self) -> typing.Iterator[tuple[_target.Instruction, typing.Iterator[uuid.UUID]]]:
"""Iterator over tuples of instructions plus iterator of its keys.
Returns:
Instruction-keys tuples iterator.
"""
return itertools.groupby(self._instructions.keys(), self._instructions.__getitem__)
def set(self, instruction: _target.Instruction, key: typing.Optional[uuid.UUID] = None) -> uuid.UUID:
"""Store given instruction by provided or generated key.
It is an error to store instruction with existing key (to avoid, use the reset method).
Args:
instruction: Runtime instruction to be stored.
key: Optional key to be used as instruction reference.
Returns:
Key associated with the instruction.
"""
if not key:
key = uuid.uuid4()
assert key not in self, 'Instruction collision'
self._instructions[key] = instruction
return key
def reset(self, orig: uuid.UUID, new: typing.Optional[uuid.UUID] = None) -> uuid.UUID:
"""Re-register instruction under given key to a new key (provided or generate).
Args:
orig: Original key of the instruction to be re-registered.
new: Optional new key to re-register the instruction with.
Returns:
New key associated with the instruction.
"""
instruction = self._instructions[orig]
del self._instructions[orig]
return self.set(instruction, new)
def __init__(self, assets: typing.Optional[asset.State]):
self._assets: typing.Optional[asset.State] = assets
self._linkage: Table.Linkage = self.Linkage()
self._index: Table.Index = self.Index()
self._committer: typing.Optional[uuid.UUID] = None
def __iter__(self) -> _target.Symbol:
def merge(
value: typing.Iterable[typing.Optional[uuid.UUID]], element: typing.Iterable[typing.Optional[uuid.UUID]]
) -> typing.Iterable[uuid.UUID]:
"""Merge two iterables with at most one of them having non-null value on each offset into single iterable
with this non-null values picked.
Args:
value: Left iterable.
element: Right iterable.
Returns:
Merged iterable.
"""
def pick(left: typing.Optional[uuid.UUID], right: typing.Optional[uuid.UUID]) -> typing.Optional[uuid.UUID]:
"""Pick the non-null value from the two arguments.
Args:
left: Left input argument to pick from.
right: Right input argument to pick from.
Returns:
The non-null value of the two (if any).
"""
assert not (left and right), 'Expecting at most one non-null value'
return left if left else right
return (pick(a, b) for a, b in itertools.zip_longest(value, element))
stubs = {s for s in (self._index[n] for n in self._linkage.leaves) if isinstance(s, _target.Getter)}
for instruction, keys in self._index.instructions:
if instruction in stubs:
LOGGER.debug('Pruning stub getter %s', instruction)
continue
try:
arguments = tuple(self._index[a] for a in functools.reduce(merge, (self._linkage[k] for k in keys)))
except KeyError as err:
raise _exception.AssemblyError(f'Argument mismatch for instruction {instruction}') from err
yield _target.Symbol(instruction, arguments)
def add(self, node: flow.Worker) -> None:
"""Populate the symbol table to implement the logical flow of given node.
Args:
node: Node to be added - compiled into symbols.
"""
assert node.uid not in self._index, f'Node collision ({node})'
assert isinstance(node, flow.Worker), f'Not a worker node ({node})'
LOGGER.debug('Adding node %s into the symbol table', node)
functor = _target.Mapper(node.spec)
aliases = [node.uid]
if node.stateful:
state = node.gid
persistent = self._assets and state in self._assets
if not persistent and not any(n.trained for n in node.group):
raise _exception.AssemblyError(f'Stateful node {node} neither persisted nor trained')
if persistent and state not in self._index:
self._index.set(_target.Loader(self._assets, state), state)
if node.trained:
functor = _target.Consumer(node.spec)
aliases.append(state)
if persistent:
if not self._committer:
self._committer = self._index.set(_target.Committer(self._assets))
dumper = self._index.set(_target.Dumper(self._assets))
self._linkage.insert(dumper, node.uid)
self._linkage.insert(self._committer, dumper, self._assets.offset(state))
state = self._index.reset(state) # re-register loader under it's own id
if persistent or not node.trained:
functor = functor.shiftby(_target.Functor.Shifting.state)
self._linkage.prepend(node.uid, state)
for key in aliases:
self._index.set(functor, key)
if not node.trained:
self._linkage.update(node, lambda index: self._index.set(_target.Getter(index)))
def visit_node(self, node: flow.Worker) -> None:
"""Visitor entrypoint.
Args:
node: Node to be visited.
"""
self.add(node)
def generate(path: flow.Path, assets: typing.Optional[asset.State] = None) -> typing.Sequence[_target.Symbol]:
"""Generate the symbol code based on given flow path.
Args:
path: Flow path to generate the symbols for.
assets: Runtime assets dependencies.
Returns:
Sequence of symbol code.
"""
table = Table(assets)
path.accept(table)
return tuple(table)
| StarcoderdataPython |
3370005 | # -*- coding: utf-8 -*-
import logging
import copy
import json
import os
# from rest_framework.decorators import api_view
# from rest_framework.response import Response
from django.template.response import TemplateResponse
from django.shortcuts import redirect
from utils.shell_runner import get_cmd_stdout
from proj_configs import DATA_DUMP_DIR
logger = logging.getLogger(__name__)
cmd_history = []
cmd_details = {}
global_vars = {
'cmd_id': 0,
'cmd_tmpl_id': 0,
'tmpl_name2id': {},
'tmpls_meta': {
'version': 0,
}
}
cmd_tmpls = {}
shell_args_data = {
'cmd_dir': {
'name': 'cmd_dir',
'value': '/mnt/data/jkyang',
'help': '',
},
'cmd': {
'name': 'cmd',
'value': '',
'help': '',
},
'template_name': {
'name': 'cmd_tmpl_name',
'value': '',
'help': '',
},
'output': {
'name': 'output',
'value': '',
'help': 'hide',
},
}
shell_args_order = [
'cmd_dir',
'cmd',
'template_name',
]
def load_dumps():
global global_vars, cmd_tmpls
if not os.path.exists(DATA_DUMP_DIR):
return
versions = sorted(os.listdir(DATA_DUMP_DIR))
if len(versions) < 1:
return
version_dir = os.path.join(DATA_DUMP_DIR, versions[-1])
with open(os.path.join(version_dir, 'global_vars.json'), 'r') as fr:
global_vars = json.load(fr)
with open(os.path.join(version_dir, 'cmd_tmpls.json'), 'r') as fr:
cmd_tmpls = json.load(fr)
load_dumps()
# print(json.dumps(global_vars, indent=4))
# print(json.dumps(cmd_tmpls, indent=4))
def shell_home(request,
template_name='shell-home.html'):
context = {
'shell_args': [copy.copy(shell_args_data[i]) for i in shell_args_order],
'shell_output': shell_args_data['output']['value'],
'cmds': [copy.copy(cmd_details[i]) for i in cmd_history],
'cmd_tmpls': list(cmd_tmpls.values()),
}
return TemplateResponse(request, template_name, context)
def load_tmpl(request, tmpl_id):
tmpl_id = str(tmpl_id)
if tmpl_id in cmd_tmpls:
tmpl = cmd_tmpls[tmpl_id]
shell_args_data['cmd_dir']['value'] = tmpl['cmd_dir']
shell_args_data['cmd']['value'] = tmpl['cmd']
shell_args_data['output']['value'] = ''
return redirect('shell_home')
def on_tmpls_updated():
global_vars['tmpls_meta']['version'] += 1
verion_id = global_vars['tmpls_meta']['version']
version_dir = os.path.join(DATA_DUMP_DIR, 'version-%06d' % verion_id)
os.makedirs(version_dir)
with open(os.path.join(version_dir, 'global_vars.json'), 'w') as fw:
json.dump(global_vars, fw, indent=4)
with open(os.path.join(version_dir, 'cmd_tmpls.json'), 'w') as fw:
json.dump(cmd_tmpls, fw, indent=4)
def gen_cmd_id():
global_vars['cmd_id'] += 1
return global_vars['cmd_id']
def gen_cmd_tmpl_id(cmd_tmpl_name):
if cmd_tmpl_name in global_vars['tmpl_name2id']:
return global_vars['tmpl_name2id'][cmd_tmpl_name]
global_vars['cmd_tmpl_id'] += 1
tid = str(global_vars['cmd_tmpl_id'])
global_vars['tmpl_name2id'][cmd_tmpl_name] = tid
return tid
def brief_output(cmd_id, output):
lines = output.split('\n')
brief = '<br/>'.join(lines[:3])
if len(lines) > 3:
brief += '... <a href="/webshell/cmds/%s"> read more </a>' % cmd_id
return brief
def shell_run(request):
if request.method != 'POST':
pass
qd = request.POST
cmd = qd['cmd']
cmd_dir = qd['cmd_dir']
is_save_template = qd.get('is_save_template')
if is_save_template and is_save_template == 'true':
cmd_tmpl_name = qd['cmd_tmpl_name']
key = gen_cmd_tmpl_id(cmd_tmpl_name)
cmd_tmpls[key] = {
'key': key,
'name': cmd_tmpl_name,
'cmd': cmd,
'cmd_dir': cmd_dir,
}
on_tmpls_updated()
shell_args_data['cmd_dir']['value'] = cmd_dir
shell_args_data['cmd']['value'] = cmd
cmd_id = gen_cmd_id()
output = get_cmd_stdout(cmd, check=False, cmd_dir=cmd_dir)
shell_args_data['output']['value'] = output
cmd_details[cmd_id] = {
'cmd': cmd,
'cmd_args': copy.deepcopy(qd),
'output_brief': brief_output(cmd_id, output),
'output_raw': output,
'output': output.replace('\n', '<br/>'),
'notes': '',
'mark': '',
}
cmd_history.append(cmd_id)
return redirect('shell_home')
def cmd_detail(request, cmd_id,
template_name='cmd-detail.html'):
context = {
'cmd': cmd_details[cmd_id],
}
return TemplateResponse(request, template_name, context)
| StarcoderdataPython |
3221274 | from molsysmt._private.digestion import digest_item, digest_atom_indices, digest_structure_indices
def to_openmm_Modeller(item, atom_indices='all', structure_indices='all', check=True):
if check:
digest_item(item, 'file:gro')
atom_indices = digest_atom_indices(atom_indices)
structure_indices = digest_structure_indices(structure_indices)
from . import to_openmm_GromacsGroFile
from ..openmm_GromacsGroFile import to_openmm_Modeller as openmm_GromacsGroFile_to_openmm_Modeller
tmp_item = to_openmm_GromacsGroFile(item, check=False)
tmp_item = openmm_GromacsGroFile_to_openmm_Modeller(tmp_item, atom_indices=atom_indices,
structure_indices=structure_indices, check=False)
return tmp_item
| StarcoderdataPython |
4805498 | from django.contrib import admin
# Register your models here.
from .models import Project, Position, UserCompletedProject
admin.site.register(Project)
admin.site.register(Position)
admin.site.register(UserCompletedProject)
| StarcoderdataPython |
3335421 | <filename>picture/migrations/0007_auto_20161013_2250.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-13 22:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('picture', '0006_remove_paymentnote_type'),
]
operations = [
migrations.RenameField(
model_name='paymentnote',
old_name='url',
new_name='name',
),
migrations.AddField(
model_name='paymentnote',
name='picture',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, related_name='note', to='picture.Picture'),
preserve_default=False,
),
]
| StarcoderdataPython |
3353922 | <filename>PDF/utils.py
import random
from collections import defaultdict
import numpy as np
from keras import backend as K
from keras.models import Model
from configs import feature_constraints
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
def features_changed(gen_pdf, orig_pdf, feat_names):
ret_str = ''
changes = gen_pdf[0] - orig_pdf[0]
non_zero_changes_idx = np.nonzero(changes)
feat_names = np.array(feat_names)
for name, change in zip(feat_names[non_zero_changes_idx], changes[non_zero_changes_idx]):
ret_str += '{}: {} '.format(name, change)
return ret_str
def init_feature_constraints(feat_names):
incre_idx = [feat_names.index(incre_feats) for incre_feats in feature_constraints.increment]
incre_decre_idx = [feat_names.index(incre_decre_feats) for incre_decre_feats in feature_constraints.incre_decre]
return incre_idx, incre_decre_idx
def constraint(gradients, incre_idx, incre_decre_idx):
new_grads = np.zeros_like(gradients)
new_grads[..., incre_decre_idx] = gradients[:, incre_decre_idx]
# make gradients to be all positive, and get the indices of features that can only be increased
positive_grad = gradients.copy()
positive_grad[positive_grad < 0] = 0
new_grads[..., incre_idx] = positive_grad[:, incre_idx]
return new_grads
def init_coverage_tables(model1, model2, model3):
model_layer_dict1 = defaultdict(bool)
model_layer_dict2 = defaultdict(bool)
model_layer_dict3 = defaultdict(bool)
init_dict(model1, model_layer_dict1)
init_dict(model2, model_layer_dict2)
init_dict(model3, model_layer_dict3)
return model_layer_dict1, model_layer_dict2, model_layer_dict3
def init_dict(model, model_layer_dict):
for layer in model.layers:
if 'flatten' in layer.name or 'input' in layer.name:
continue
for index in range(num_neurons(layer.output_shape)): # product of dims
model_layer_dict[(layer.name, index)] = False
def neuron_to_cover(model_layer_dict):
not_covered = [(layer_name, index) for (layer_name, index), v in model_layer_dict.items() if not v]
if not_covered:
layer_name, index = random.choice(not_covered)
else:
layer_name, index = random.choice(model_layer_dict.keys())
return layer_name, index
def neuron_covered(model_layer_dict):
covered_neurons = len([v for v in model_layer_dict.values() if v])
total_neurons = len(model_layer_dict)
return covered_neurons, total_neurons, covered_neurons / float(total_neurons)
def scale(intermediate_layer_output, rmax=1, rmin=0):
X_std = (intermediate_layer_output - intermediate_layer_output.min()) / (
intermediate_layer_output.max() - intermediate_layer_output.min())
X_scaled = X_std * (rmax - rmin) + rmin
return X_scaled
def update_coverage(input_data, model, model_layer_dict, model_layer_hl_dict, test_only=False, threshold=0):
snac_dict, nc_dict = {}, {}
if test_only:
snac_dict = model_layer_dict["snac_test"]
nc_dict = model_layer_dict["nc_test"]
else:
snac_dict = model_layer_dict["snac"]
nc_dict = model_layer_dict["nc"]
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
intermediate_layer_model = Model(inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names])
intermediate_layer_outputs = intermediate_layer_model.predict(input_data)
for i, intermediate_layer_output in enumerate(intermediate_layer_outputs):
layer = intermediate_layer_output[0]
for neuron in xrange(num_neurons(layer.shape)): # index through every single (indiv) neuron
_,high = model_layer_hl_dict[(layer_names[i], neuron)]
# evaluate snac criteria
if layer[np.unravel_index(neuron, layer.shape)] > high and not snac_dict[(layer_names[i], neuron)]:
snac_dict[(layer_names[i], neuron)] = True
# evaluate nc criteria
if layer[np.unravel_index(neuron, layer.shape)] > threshold and not nc_dict[(layer_names[i], neuron)]:
nc_dict[(layer_names[i], neuron)] = True
def num_neurons(shape):
return reduce(lambda x,y: x*y, filter(lambda x : x != None, shape))
def full_coverage(model_layer_dict):
if False in model_layer_dict.values():
return False
return True
def fired(model, layer_name, index, input_data, threshold=0):
intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
intermediate_layer_output = intermediate_layer_model.predict(input_data)[0]
scaled = scale(intermediate_layer_output)
if np.mean(scaled[..., index]) > threshold:
return True
return False
def diverged(predictions1, predictions2, predictions3, target):
# if predictions2 == predictions3 == target and predictions1 != target:
if not predictions1 == predictions2 == predictions3:
return True
return False
| StarcoderdataPython |
3366294 | import threading
import time
from nephelae_base.types import NavigationRef
from nephelae_base.types import Position
from nephelae_base.types import SensorSample
from nephelae_base.types import MultiObserverSubject
from .SpatializedDatabase import SpatializedDatabase
from .SpatializedDatabase import SpbEntry
# from nephelae_base.types import ObserverSubject
class NephelaeDataServer(SpatializedDatabase):
"""NephelaeDatabase
SpatializedDatabase specialization for Nephelae project
/!\ Find better name ?
"""
def __init__(self):
super().__init__()
self.navFrame = NavigationRef()
self.observerSet = MultiObserverSubject(['add_gps', 'add_sample'])
self.uavIds = []
self.variableNames = []
# For debug, to be removed
self.gps = []
self.samples = []
def set_navigation_frame(self, navFrame):
self.navFrame = navFrame
def add_gps(self, gps):
self.observerSet.add_gps(gps)
if self.navFrame is None:
return
uavId = str(gps.uavId)
if uavId not in self.uavIds:
self.uavIds.append(uavId)
self.gps.append(gps)
tags=[uavId, 'GPS']
self.insert(SpbEntry(gps, gps - self.navFrame, tags))
def add_sample(self, sample):
# sample assumed to comply with nephelae_base.types.sensor_sample
self.observerSet.add_sample(sample)
if self.navFrame is None:
return
self.samples.append(sample)
tags=[str(sample.producer),
str(sample.variableName),
'SAMPLE']
self.insert(SpbEntry(sample, sample.position, tags))
if str(sample.variableName) not in self.variableNames:
self.variableNames.append(str(sample.variableName))
def add_gps_observer(self, observer):
self.observerSet.attach_observer(observer, 'add_gps')
def add_sensor_observer(self, observer):
self.observerSet.attach_observer(observer, 'add_sample')
def remove_gps_observer(self, observer):
self.observerSet.detach_observer(observer, 'add_gps')
def remove_sensor_observer(self, observer):
self.observerSet.detach_observer(observer, 'add_sample')
def __getstate__(self):
serializedItems = {}
serializedItems['navFrame'] = self.navFrame
serializedItems['uavIds'] = self.uavIds
serializedItems['variableNames'] = self.variableNames
serializedItems['data'] = super().__getstate__()
return serializedItems
def __setstate__(self, data):
try:
self.navFrame = data['navFrame']
if 'uavIds' in data.keys():
self.uavIds = data['uavIds']
else:
self.uavIds = []
if 'variableNames' in data.keys():
self.variableNames = data['variableNames']
else:
self.variableNames = []
super().__setstate__(data['data'])
except Exception as e:
print("Exception happenned during database load."
"File is probably corrupted or is of an older version.")
raise e
class DatabasePlayer(NephelaeDataServer):
"""DatabasePlayer
Class to replay messages stored in a NephelaeDataServer save.
As a subclass of NephelaeDataServer it can be used as a dataserver for
mapping and inteface testing.
"""
def __init__(self, databasePath, timeFactor=1.0, granularity=0.005):
super().__init__()
self.origin = SpatializedDatabase.load(databasePath)
self.timeFactor = timeFactor
self.granularity = granularity
self.running = False
self.currentTime = 0.0
self.replayData = []
self.replayThread = None
self.replayLock = threading.Lock()
self.looped = False
self.set_navigation_frame(self.origin.navFrame)
def play(self, looped=False):
if not self.running:
self.looped = looped
self.restart()
else:
print("Replay already running.",
"Call 'restart' if you want to start it again")
def stop(self):
if self.running and self.replayThread is not None:
print("Stopping replay... ", end='')
self.looped = False
self.running = False
self.replayThread.join()
print("Done.")
def restart(self):
if self.running:
self.init_replay()
else:
self.replayThread = threading.Thread(target=self.run)
self.replayThread.start()
def init_replay(self):
with self.replayLock:
self.currentTime = 0.0
sourceList = self.origin.taggedData['ALL'].tSorted
self.replayData = [entry for entry in sourceList]
def run(self):
self.init_replay()
lastTime = time.time()
self.running = True
while self.running and self.replayData:
with self.replayLock:
ellapsed = time.time() - lastTime
self.currentTime = self.currentTime + self.timeFactor*ellapsed
while self.replayData[0].index <= self.currentTime:
self.process_replayed_entry(self.replayData[0].data)
self.replayData.pop(0)
if not self.replayData:
break
lastTime = lastTime + ellapsed
time.sleep(self.granularity)
self.running = False
if self.looped:
self.init_data()
self.restart()
def process_replayed_entry(self, entry):
if 'GPS' in entry.tags:
self.add_gps(entry.data)
elif 'SAMPLE' in entry.tags:
self.add_sample(entry.data)
else:
raise ValueError("No GPS or SAMPLE tag found in entry."+
"Are you using a valid database ?")
| StarcoderdataPython |
115712 | import itertools
import logging
import os
import geopandas as gpd
import numpy as np
import pandas as pd
import tqdm
from scipy.spatial import KDTree
from shapely.geometry import LineString, Point, Polygon
from delft3dfmpy.converters import hydamo_to_dflowrr
from delft3dfmpy.core import checks, geometry
from delft3dfmpy.datamodels.common import ExtendedGeoDataFrame
from delft3dfmpy.datamodels.cstructures import meshgeom, meshgeomdim
from delft3dfmpy.io import drrreader
logger = logging.getLogger(__name__)
class DFlowRRModel:
"""Main data structure for RR-model in DflowFM. Contains subclasses
for unpaved, paved,greehouse and open water nodes and external forcings (seepage, precipitation, evaporation)
"""
def __init__(self):
self.d3b_parameters = {}
self.unpaved = Unpaved(self)
self.paved = Paved(self)
self.greenhouse = Greenhouse(self)
self.openwater = Openwater(self)
self.external_forcings = ExternalForcings(self)
self.dimr_path = ''
class ExternalForcings:
"""
Class for external forcings, which contains the boundary
conditions and the initial conditions.
"""
def __init__(self, dflowrrmodel):
# Point to relevant attributes from parent
self.dflowrrmodel = dflowrrmodel
self.io = drrreader.ExternalForcingsIO(self)
self.boundary_nodes = {}
self.seepage = {}
self.precip = {}
self.evap = {}
def add_precip(self, id, series):
self.precip[id] = {
'precip' : series
}
def add_evap(self, id, series):
self.evap[id] = {
'evap' : series
}
def add_seepage(self, id, series):
self.seepage[id] = {
'seepage' : series
}
def add_boundary_node(self, id, px, py):
self.boundary_nodes[id] = {
'id' : id,
'px' : px,
'py' : py
}
class Unpaved:
"""
Class for unpaved nodes
"""
def __init__(self, dflowrrmodel):
# Point to relevant attributes from parent
self.dflowrrmodel = dflowrrmodel
# initialize a dataframe for every type of nodes related to 'unpaved'
self.unp_nodes = {}
self.ernst_defs = {}
self.io = drrreader.UnpavedIO(self)
def add_unpaved(self,id, total_area, lu_areas, surface_level, soiltype, surface_storage, infiltration_capacity, initial_gwd, meteo_area, px, py, boundary_node):
self.unp_nodes[id] = {
'id' : 'unp_'+id,
'na' : '16',
'ar' : lu_areas,
'ga' : total_area,
'lv' : surface_level,
'co' : '3',
'su' : '0',
'sd' : surface_storage,
'sp' : 'sep_'+id,
'ic' : infiltration_capacity,
'ed' : 'ernst_'+id,
'bt' : soiltype,
'ig' : initial_gwd,
'mg' : surface_level,
'gl' : '1.5',
'is' : '0',
'ms' : 'ms_'+meteo_area,
'px': px,
'py': py,
'boundary_node': boundary_node
}
def add_ernst_def(self,id, cvo, lv, cvi, cvs):
self.ernst_defs[id] = {
'id' : 'ernst_'+id,
'cvi' : cvi,
'cvs' : cvs,
'cvo' : cvo,
'lv' : lv
}
class Paved:
"""
Class for paved nodes.
"""
def __init__(self, dflowrrmodel):
# Point to relevant attributes from parent
self.dflowrrmodel = dflowrrmodel
self.pav_nodes = {}
self.io = drrreader.PavedIO(self)
self.node_geom = {}
self.link_geom = {}
#PAVE id 'pav_Nde_n003' ar 16200 lv 1 sd '1' ss 0 qc 0 1.94E-05 0 qo 2 2 ms 'Station1' aaf 1 is 0 np 0 dw '1' ro 0 ru 0 qh '' pave#
def add_paved(self,id, area, surface_level, street_storage, sewer_storage, pump_capacity, meteo_area, px, py, boundary_node):
self.pav_nodes[id] = {
'id' : 'pav_'+id,
'ar' : area,
'lv' : surface_level,
'qc' : pump_capacity,
'strs' : street_storage,
'sews' : sewer_storage,
'ms' : 'ms_'+meteo_area,
'is' : '0',
'np' : '0',
'ro' : '0',
'ru' : '0',
'px': px,
'py': py,
'boundary_node': boundary_node
}
class Greenhouse:
"""
Class for greenhouse nodes
"""
def __init__(self, dflowrrmodel):
self.dflowrrmodel = dflowrrmodel
self.gh_nodes = {}
# Create the io class
self.io = drrreader.GreenhouseIO(self)
# GRHS id โ1โ na 10 ar 1000. 0. 0. 3000. 0. 0. 0. 0. 0. 0. sl 1.0 as 0. sd โroofstor 1mmโ si
# โsilo typ1โ ms โmeteostat1โ is 50.0 grhs
def add_greenhouse(self, id, area, surface_level, roof_storage, meteo_area, px, py, boundary_node):
self.gh_nodes[id] = {
'id': 'gh_'+id,
'ar' : area,
'sl': surface_level,
'sd': roof_storage,
'ms' : 'ms_'+meteo_area,
'is' : '0',
'px': px,
'py': py,
'boundary_node': boundary_node
}
class Openwater:
"""
Class for open water nodes
"""
def __init__(self, dflowrrmodel):
self.dflowrrmodel = dflowrrmodel
self.ow_nodes = {}
# Create the io class
self.io = drrreader.OpenwaterIO(self)
def add_openwater(self, id, area, meteo_area, px, py, boundary_node):
self.ow_nodes[id] = {
'id': 'ow_'+id,
'ar' : area,
'ms' : 'ms_'+meteo_area,
'px': px,
'py': py,
'boundary_node': boundary_node
}
| StarcoderdataPython |
3269808 | <reponame>PavriLab/classifyIS-nf
#!/usr/bin/env python
import pandas as pd
import argparse as ap
import numpy as np
import logging
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)
parser = ap.ArgumentParser(description='''converts readcounts of IS to log2(RPM) and assigns
each peak a class according to the respective signal
intensities in each condition''')
parser.add_argument('-ct', '--countTable', required = True,
help = '''tab-separated table holding the readquantification information
as generated by deeptools multiBamSummary''')
parser.add_argument('-b', '--bed', required = True,
help = 'BEDfile used to generate counttable')
parser.add_argument('-o', '--outputFile', required = True,
help = 'name of the output file')
parser.add_argument('-FC', '--foldChange', default = 0.585, type = float,
help = 'foldchange threshold for differential regulation determination in log2(RPM)')
parser.add_argument('-wt', '--wtcolumn', required = True,
help = 'name of the column in --counttable holding the WT counts ')
parser.add_argument('-kd', '--kdcolumn', required = True,
help = 'name of the column in --counttable holding the KD counts ')
parser.add_argument('-t', '--thresholdDA', default = 2, type = float,
help = 'cutoff to use to define dormant/absent origins in log2(RPM)')
args = parser.parse_args()
logging.info('reading counts %s' % args.countTable)
counts = pd.read_csv(args.countTable, sep = '\t', header = None, skiprows = 1,
names = ['chr', 'start', 'end', args.wtcolumn, args.kdcolumn])
counts.sort_values(by=['chr', 'start'], inplace = True)
counts.reset_index(drop = True, inplace = True)
bed = pd.read_csv(args.bed, sep = '\t', header = None, usecols = [0, 1, 2, 3],
names = ['chr', 'start', 'end', 'name'])
counts = counts.merge(bed, on = ['chr', 'start', 'end'], how = 'left')
logging.info('computing log2(RPM) values')
for col in [args.wtcolumn, args.kdcolumn]:
counts.loc[counts[col] == 0, col] = 1
counts.loc[:, col] = np.log2((counts[col]*1000000)/counts[col].sum())
logging.info('assigning regulation class')
counts['class'] = 0
fc = args.foldChange
# dormant
counts.loc[counts[args.wtcolumn] < counts[counts[args.kdcolumn] < args.thresholdDA][args.wtcolumn].min(), 'class'] = 1
# absent
counts.loc[counts[args.kdcolumn] < counts[counts[args.wtcolumn] < args.thresholdDA][args.kdcolumn].min(), 'class'] = 5
# upregulated
counts.loc[(counts['class'] == 0) & (counts[args.kdcolumn] > fc + counts[args.wtcolumn]), 'class'] = 2
# downregulated
counts.loc[(counts['class'] == 0) & (counts[args.wtcolumn] > fc + counts[args.kdcolumn]), 'class'] = 4
# unchanged
counts.loc[counts['class'] == 0, 'class'] = 3
counts = counts.loc[:, ['chr', 'start', 'end', 'name', args.wtcolumn, args.kdcolumn, 'class']]
counts.to_csv(args.outputFile, sep = '\t', index = False)
| StarcoderdataPython |
3201471 | <filename>spotify_dashboard/spotify/migrations/0007_auto_20200322_1350.py
# Generated by Django 2.2.11 on 2020-03-22 13:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('spotify', '0006_update'),
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='Track',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=500)),
('duration_ms', models.IntegerField()),
('track_number', models.IntegerField()),
('played_at', models.DateTimeField()),
('image', models.URLField()),
('href', models.URLField()),
('album', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='spotify.Album')),
],
),
migrations.AddField(
model_name='album',
name='artist',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='spotify.Artist'),
),
]
| StarcoderdataPython |
1752892 | import asyncio
from threading import Thread
import pytest
from slack_sdk.web.async_client import AsyncWebClient
from slack_bolt.adapter.socket_mode.websockets import AsyncSocketModeHandler
from slack_bolt.app.async_app import AsyncApp
from tests.mock_web_api_server import (
setup_mock_web_api_server,
cleanup_mock_web_api_server,
)
from tests.utils import remove_os_env_temporarily, restore_os_env
from ...adapter_tests.socket_mode.mock_socket_mode_server import (
start_socket_mode_server,
)
class TestSocketModeWebsockets:
valid_token = "<PASSWORD>"
mock_api_server_base_url = "http://localhost:8888"
web_client = AsyncWebClient(
token=valid_token,
base_url=mock_api_server_base_url,
)
@pytest.fixture
def event_loop(self):
old_os_env = remove_os_env_temporarily()
try:
setup_mock_web_api_server(self)
loop = asyncio.get_event_loop()
yield loop
loop.close()
cleanup_mock_web_api_server(self)
finally:
restore_os_env(old_os_env)
@pytest.mark.asyncio
async def test_events(self):
t = Thread(target=start_socket_mode_server(self, 3022))
t.daemon = True
t.start()
await asyncio.sleep(1) # wait for the server
app = AsyncApp(client=self.web_client)
result = {"shortcut": False, "command": False}
@app.shortcut("do-something")
async def shortcut_handler(ack):
result["shortcut"] = True
await ack()
@app.command("/hello-socket-mode")
async def command_handler(ack):
result["command"] = True
await ack()
handler = AsyncSocketModeHandler(
app_token="<PASSWORD>",
app=app,
)
try:
handler.client.wss_uri = "ws://localhost:3022/link"
await handler.connect_async()
await asyncio.sleep(2) # wait for the message receiver
await handler.client.send_message("foo")
await asyncio.sleep(2)
assert result["shortcut"] is True
assert result["command"] is True
finally:
await handler.client.close()
self.server.stop()
self.server.close()
| StarcoderdataPython |
3264112 | import tensorflow as tf
from . import custom_layers
class Discriminator(object):
"""Discriminator that takes image input and outputs logits.
Attributes:
name: str, name of `Discriminator`.
kernel_regularizer: `l1_l2_regularizer` object, regularizar for
kernel variables.
bias_regularizer: `l1_l2_regularizer` object, regularizar for bias
variables.
params: dict, user passed parameters.
alpha_var: variable, alpha for weighted sum of fade-in of layers.
input_layers: list, `Input` layers for each resolution of image.
from_rgb_conv_layers: list, `Conv2D` fromRGB layers.
from_rgb_leaky_relu_layers: list, leaky relu layers that follow
`Conv2D` fromRGB layers.
conv_layers: list, `Conv2D` layers.
leaky_relu_layers: list, leaky relu layers that follow `Conv2D`
layers.
growing_downsample_layers: list, `AveragePooling2D` layers for growing
branch.
shrinking_downsample_layers: list, `AveragePooling2D` layers for
shrinking branch.
minibatch_stddev_layer: `MiniBatchStdDev` layer, applies minibatch
stddev to image to add an additional feature channel based on the
sample.
flatten_layer: `Flatten` layer, flattens image for logits layer.
logits_layer: `Dense` layer, used for calculating logits.
models: list, instances of discriminator `Model`s for each growth.
"""
def __init__(
self,
kernel_regularizer,
bias_regularizer,
name,
params,
alpha_var,
num_growths
):
"""Instantiates and builds discriminator network.
Args:
kernel_regularizer: `l1_l2_regularizer` object, regularizar for
kernel variables.
bias_regularizer: `l1_l2_regularizer` object, regularizar for bias
variables.
name: str, name of discriminator.
params: dict, user passed parameters.
alpha_var: variable, alpha for weighted sum of fade-in of layers.
num_growths: int, number of growth phases for model.
"""
# Set name of discriminator.
self.name = name
# Store regularizers.
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
# Store parameters.
self.params = params
# Store reference to alpha variable.
self.alpha_var = alpha_var
# Store lists of layers.
self.input_layers = []
self.from_rgb_conv_layers = []
self.from_rgb_leaky_relu_layers = []
self.conv_layers = []
self.leaky_relu_layers = []
self.growing_downsample_layers = []
self.shrinking_downsample_layers = []
self.minibatch_stddev_layer = None
self.flatten_layer = None
self.logits_layer = None
# Instantiate discriminator layers.
self._create_discriminator_layers()
# Store list of discriminator models.
self.models = self._create_models(num_growths)
##########################################################################
##########################################################################
##########################################################################
def _create_input_layers(self):
"""Creates discriminator input layers for each image resolution.
Returns:
List of `Input` layers.
"""
height, width = self.params["generator_projection_dims"][0:2]
# Create list to hold `Input` layers.
input_layers = [
tf.keras.Input(
shape=(height * 2 ** i, width * 2 ** i, self.params["depth"]),
name="{}_{}x{}_inputs".format(
self.name, height * 2 ** i, width * 2 ** i
)
)
for i in range(len(self.params["discriminator_from_rgb_layers"]))
]
return input_layers
def _create_from_rgb_layers(self):
"""Creates discriminator fromRGB layers of 1x1 convs.
Returns:
List of fromRGB 1x1 conv layers and leaky relu layers.
"""
# Get fromRGB layer properties.
from_rgb = [
self.params["discriminator_from_rgb_layers"][i][0][:]
for i in range(
len(self.params["discriminator_from_rgb_layers"])
)
]
# Create list to hold toRGB 1x1 convs.
from_rgb_conv_layers = [
custom_layers.WeightScaledConv2D(
filters=from_rgb[i][3],
kernel_size=from_rgb[i][0:2],
strides=from_rgb[i][4:6],
padding="same",
activation=None,
kernel_initializer=(
tf.random_normal_initializer(mean=0., stddev=1.0)
if self.params["use_equalized_learning_rate"]
else "he_normal"
),
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
use_equalized_learning_rate=(
self.params["use_equalized_learning_rate"]
),
name="{}_from_rgb_layers_conv2d_{}_{}x{}_{}_{}".format(
self.name,
i,
from_rgb[i][0],
from_rgb[i][1],
from_rgb[i][2],
from_rgb[i][3]
)
)
for i in range(len(from_rgb))
]
from_rgb_leaky_relu_layers = [
tf.keras.layers.LeakyReLU(
alpha=self.params["discriminator_leaky_relu_alpha"],
name="{}_from_rgb_layers_leaky_relu_{}".format(self.name, i)
)
for i in range(len(from_rgb))
]
return from_rgb_conv_layers, from_rgb_leaky_relu_layers
def _create_base_conv_layer_block(self):
"""Creates discriminator base conv layer block.
Returns:
List of base block conv layers and list of leaky relu layers.
"""
# Get conv block layer properties.
conv_block = self.params["discriminator_base_conv_blocks"][0]
# Create list of base conv layers.
base_conv_layers = [
custom_layers.WeightScaledConv2D(
filters=conv_block[i][3],
kernel_size=conv_block[i][0:2],
strides=conv_block[i][4:6],
padding="same",
activation=None,
kernel_initializer=(
tf.random_normal_initializer(mean=0., stddev=1.0)
if self.params["use_equalized_learning_rate"]
else "he_normal"
),
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
use_equalized_learning_rate=(
self.params["use_equalized_learning_rate"]
),
name="{}_base_layers_conv2d_{}_{}x{}_{}_{}".format(
self.name,
i,
conv_block[i][0],
conv_block[i][1],
conv_block[i][2],
conv_block[i][3]
)
)
for i in range(len(conv_block) - 1)
]
# Have valid padding for layer just before flatten and logits.
base_conv_layers.append(
custom_layers.WeightScaledConv2D(
filters=conv_block[-1][3],
kernel_size=conv_block[-1][0:2],
strides=conv_block[-1][4:6],
padding="valid",
activation=None,
kernel_initializer=(
tf.random_normal_initializer(mean=0., stddev=1.0)
if self.params["use_equalized_learning_rate"]
else "he_normal"
),
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
use_equalized_learning_rate=(
self.params["use_equalized_learning_rate"]
),
name="{}_base_layers_conv2d_{}_{}x{}_{}_{}".format(
self.name,
len(conv_block) - 1,
conv_block[-1][0],
conv_block[-1][1],
conv_block[-1][2],
conv_block[-1][3]
)
)
)
base_leaky_relu_layers = [
tf.keras.layers.LeakyReLU(
alpha=self.params["discriminator_leaky_relu_alpha"],
name="{}_base_layers_leaky_relu_{}".format(self.name, i)
)
for i in range(len(conv_block))
]
return base_conv_layers, base_leaky_relu_layers
def _create_growth_conv_layer_block(self, block_idx):
"""Creates discriminator growth conv layer block.
Args:
block_idx: int, the current growth block's index.
Returns:
List of growth block's conv layers and list of growth block's
leaky relu layers.
"""
# Get conv block layer properties.
conv_block = (
self.params["discriminator_growth_conv_blocks"][block_idx]
)
# Create new growth convolutional layers.
growth_conv_layers = [
custom_layers.WeightScaledConv2D(
filters=conv_block[i][3],
kernel_size=conv_block[i][0:2],
strides=conv_block[i][4:6],
padding="same",
activation=None,
kernel_initializer=(
tf.random_normal_initializer(mean=0., stddev=1.0)
if self.params["use_equalized_learning_rate"]
else "he_normal"
),
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
use_equalized_learning_rate=(
self.params["use_equalized_learning_rate"]
),
name="{}_growth_layers_conv2d_{}_{}_{}x{}_{}_{}".format(
self.name,
block_idx,
i,
conv_block[i][0],
conv_block[i][1],
conv_block[i][2],
conv_block[i][3]
)
)
for i in range(len(conv_block))
]
growth_leaky_relu_layers = [
tf.keras.layers.LeakyReLU(
alpha=self.params["discriminator_leaky_relu_alpha"],
name="{}_growth_layers_leaky_relu_{}_{}".format(
self.name, block_idx, i
)
)
for i in range(len(conv_block))
]
return growth_conv_layers, growth_leaky_relu_layers
def _create_downsample_layers(self):
"""Creates discriminator downsample layers.
Returns:
Lists of AveragePooling2D layers for growing and shrinking
branches.
"""
# Create list to hold growing branch's downsampling layers.
growing_downsample_layers = [
tf.keras.layers.AveragePooling2D(
pool_size=(2, 2),
strides=(2, 2),
name="{}_growing_average_pooling_2d_{}".format(
self.name, i - 1
)
)
for i in range(
1, len(self.params["discriminator_from_rgb_layers"])
)
]
# Create list to hold shrinking branch's downsampling layers.
shrinking_downsample_layers = [
tf.keras.layers.AveragePooling2D(
pool_size=(2, 2),
strides=(2, 2),
name="{}_shrinking_average_pooling_2d_{}".format(
self.name, i - 1
)
)
for i in range(
1, len(self.params["discriminator_from_rgb_layers"])
)
]
return growing_downsample_layers, shrinking_downsample_layers
def _create_discriminator_layers(self):
"""Creates discriminator layers.
Args:
input_shape: tuple, shape of latent vector input of shape
[batch_size, latent_size].
"""
# Create input layers for each image resolution.
self.input_layers = self._create_input_layers()
(self.from_rgb_conv_layers,
self.from_rgb_leaky_relu_layers) = self._create_from_rgb_layers()
(base_conv_layers,
base_leaky_relu_layers) = self._create_base_conv_layer_block()
self.conv_layers.append(base_conv_layers)
self.leaky_relu_layers.append(base_leaky_relu_layers)
for block_idx in range(
len(self.params["discriminator_growth_conv_blocks"])
):
(growth_conv_layers,
growth_leaky_relu_layers
) = self._create_growth_conv_layer_block(block_idx)
self.conv_layers.append(growth_conv_layers)
self.leaky_relu_layers.append(growth_leaky_relu_layers)
(self.growing_downsample_layers,
self.shrinking_downsample_layers) = self._create_downsample_layers()
self.minibatch_stddev_layer = custom_layers.MiniBatchStdDev(
params={
"use_minibatch_stddev": self.params["discriminator_use_minibatch_stddev"],
"group_size": self.params["discriminator_minibatch_stddev_group_size"],
"use_averaging": self.params["discriminator_minibatch_stddev_use_averaging"]
}
)
self.flatten_layer = tf.keras.layers.Flatten()
self.logits_layer = custom_layers.WeightScaledDense(
units=1,
activation=None,
kernel_initializer=(
tf.random_normal_initializer(mean=0., stddev=1.0)
if self.params["use_equalized_learning_rate"]
else "he_normal"
),
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
use_equalized_learning_rate=(
self.params["use_equalized_learning_rate"]
),
name="{}_layers_dense_logits".format(self.name)
)
##########################################################################
##########################################################################
##########################################################################
def _use_logits_layer(self, inputs):
"""Uses flatten and logits layers to get logits tensor.
Args:
inputs: tensor, output of last conv layer of discriminator.
Returns:
Final logits tensor of discriminator.
"""
# Set shape to remove ambiguity for dense layer.
height, width = self.params["generator_projection_dims"][0:2]
valid_kernel_size = (
self.params["discriminator_base_conv_blocks"][0][-1][0]
)
inputs.set_shape(
[
inputs.get_shape()[0],
height - valid_kernel_size + 1,
width - valid_kernel_size + 1,
inputs.get_shape()[-1]]
)
# Flatten final block conv tensor.
flat_inputs = self.flatten_layer(inputs=inputs)
# Final linear layer for logits.
logits = self.logits_layer(inputs=flat_inputs)
return logits
def _create_base_block_and_logits(self, inputs):
"""Creates base discriminator block and logits.
Args:
block_conv: tensor, output of previous `Conv2D` block's layer.
Returns:
Final logits tensor of discriminator.
"""
# Only need the first conv layer block for base network.
base_conv_layers = self.conv_layers[0]
base_leaky_relu_layers = self.leaky_relu_layers[0]
network = self.minibatch_stddev_layer(inputs=inputs)
for i in range(len(base_conv_layers)):
network = base_conv_layers[i](inputs=network)
network = base_leaky_relu_layers[i](inputs=network)
# Get logits now.
logits = self._use_logits_layer(inputs=network)
return logits
def _create_growth_transition_weighted_sum(self, inputs, block_idx):
"""Creates growth transition img_to_vec weighted_sum.
Args:
inputs: tensor, input image to discriminator.
block_idx: int, current block index of model progression.
Returns:
Tensor of weighted sum between shrinking and growing block paths.
"""
# Growing side chain.
growing_from_rgb_conv_layer = self.from_rgb_conv_layers[block_idx]
growing_from_rgb_leaky_relu_layer = (
self.from_rgb_leaky_relu_layers[block_idx]
)
growing_downsample_layer = (
self.growing_downsample_layers[block_idx - 1]
)
growing_conv_layers = self.conv_layers[block_idx]
growing_leaky_relu_layers = self.leaky_relu_layers[block_idx]
# Pass inputs through layer chain.
network = growing_from_rgb_conv_layer(inputs=inputs)
network = growing_from_rgb_leaky_relu_layer(inputs=network)
for i in range(len(growing_conv_layers)):
network = growing_conv_layers[i](inputs=network)
network = growing_leaky_relu_layers[i](inputs=network)
# Down sample from 2s X 2s to s X s image.
growing_network = growing_downsample_layer(inputs=network)
# Shrinking side chain.
shrinking_from_rgb_conv_layer = (
self.from_rgb_conv_layers[block_idx - 1]
)
shrinking_from_rgb_leaky_relu_layer = (
self.from_rgb_leaky_relu_layers[block_idx - 1]
)
shrinking_downsample_layer = (
self.shrinking_downsample_layers[block_idx - 1]
)
# Pass inputs through layer chain.
# Down sample from 2s X 2s to s X s image.
network = shrinking_downsample_layer(inputs=inputs)
network = shrinking_from_rgb_conv_layer(inputs=network)
shrinking_network = shrinking_from_rgb_leaky_relu_layer(
inputs=network
)
# Weighted sum.
weighted_sum = tf.add(
x=growing_network * self.alpha_var,
y=shrinking_network * (1.0 - self.alpha_var),
name="{}_growth_transition_weighted_sum_{}".format(
self.name, block_idx
)
)
return weighted_sum
def _create_perm_growth_block_network(self, inputs, block_idx):
"""Creates discriminator permanent block network.
Args:
inputs: tensor, output of previous block's layer.
block_idx: int, current block index of model progression.
Returns:
Tensor from final permanent block `Conv2D` layer.
"""
# Get permanent growth blocks, so skip the base block.
permanent_conv_layers = self.conv_layers[1:block_idx]
permanent_leaky_relu_layers = self.leaky_relu_layers[1:block_idx]
permanent_downsample_layers = self.growing_downsample_layers[0:block_idx - 1]
# Reverse order of blocks.
permanent_conv_layers = permanent_conv_layers[::-1]
permanent_leaky_relu_layers = permanent_leaky_relu_layers[::-1]
permanent_downsample_layers = permanent_downsample_layers[::-1]
# Pass inputs through layer chain.
network = inputs
# Loop through the permanent growth blocks.
for i in range(len(permanent_conv_layers)):
# Get layers from ith permanent block.
conv_layers = permanent_conv_layers[i]
leaky_relu_layers = permanent_leaky_relu_layers[i]
permanent_downsample_layer = permanent_downsample_layers[i]
# Loop through layers of ith permanent block.
for j in range(len(conv_layers)):
network = conv_layers[j](inputs=network)
network = leaky_relu_layers[j](inputs=network)
# Down sample from 2s X 2s to s X s image.
network = permanent_downsample_layer(inputs=network)
return network
##########################################################################
##########################################################################
##########################################################################
def _build_base_model(self, input_shape):
"""Builds discriminator base model.
Args:
input_shape: tuple, shape of image vector input of shape
[batch_size, height, width, depth].
Returns:
Instance of `Model` object.
"""
# Create the input layer to discriminator.
# shape = (batch_size, height, width, depth)
inputs = self.input_layers[0]
# Only need the first fromRGB conv layer & block for base network.
base_from_rgb_conv_layer = self.from_rgb_conv_layers[0]
base_from_rgb_leaky_relu_layer = self.from_rgb_leaky_relu_layers[0]
base_conv_layers = self.conv_layers[0]
base_leaky_relu_layers = self.leaky_relu_layers[0]
# Pass inputs through layer chain.
network = base_from_rgb_conv_layer(inputs=inputs)
network = base_from_rgb_leaky_relu_layer(inputs=network)
# Get logits after continuing through base conv block.
logits = self._create_base_block_and_logits(inputs=network)
# Define model.
model = tf.keras.Model(
inputs=inputs,
outputs=logits,
name="{}_base".format(self.name)
)
return model
def _build_growth_transition_model(
self, input_shape, block_idx
):
"""Builds discriminator growth transition model.
Args:
input_shape: tuple, shape of latent vector input of shape
[batch_size, height, width, depth].
block_idx: int, current block index of model progression.
Returns:
Instance of `Model` object.
"""
# Create the input layer to discriminator.
# shape = (batch_size, height, width, depth)
inputs = self.input_layers[block_idx]
# Get weighted sum between shrinking and growing block paths.
weighted_sum = self._create_growth_transition_weighted_sum(
inputs=inputs, block_idx=block_idx
)
# Get output of final permanent growth block's last `Conv2D` layer.
network = self._create_perm_growth_block_network(
inputs=weighted_sum, block_idx=block_idx
)
# Get logits after continuing through base conv block.
logits = self._create_base_block_and_logits(inputs=network)
# Define model.
model = tf.keras.Model(
inputs=inputs,
outputs=logits,
name="{}_growth_transition_{}".format(self.name, block_idx)
)
return model
def _build_growth_stable_model(self, input_shape, block_idx):
"""Builds generator growth stable model.
Args:
input_shape: tuple, shape of latent vector input of shape
[batch_size, latent_size].
block_idx: int, current block index of model progression.
Returns:
Instance of `Model` object.
"""
# Create the input layer to discriminator.
# shape = (batch_size, latent_size)
inputs = self.input_layers[block_idx]
# Get fromRGB layers.
from_rgb_conv_layer = self.from_rgb_conv_layers[block_idx]
from_rgb_leaky_relu_layer = self.from_rgb_leaky_relu_layers[block_idx]
# Pass inputs through layer chain.
network = from_rgb_conv_layer(inputs=inputs)
network = from_rgb_leaky_relu_layer(inputs=network)
# Get output of final permanent growth block's last `Conv2D` layer.
network = self._create_perm_growth_block_network(
inputs=network, block_idx=block_idx + 1
)
# Get logits after continuing through base conv block.
logits = self._create_base_block_and_logits(inputs=network)
# Define model.
model = tf.keras.Model(
inputs=inputs,
outputs=logits,
name="{}_growth_stable_{}".format(self.name, block_idx)
)
return model
def _create_models(self, num_growths):
"""Creates list of discriminator's `Model` objects for each growth.
Args:
num_growths: int, number of growth phases for model.
Returns:
List of `Discriminator` `Model` objects.
"""
models = []
for growth_idx in range(num_growths):
block_idx = (growth_idx + 1) // 2
image_multiplier = 2 ** block_idx
height = (
self.params["generator_projection_dims"][0] * image_multiplier
)
width = (
self.params["generator_projection_dims"][1] * image_multiplier
)
input_shape = (height, width, self.params["depth"])
if growth_idx == 0:
model = self._build_base_model(input_shape)
elif growth_idx % 2 == 1:
model = self._build_growth_transition_model(
input_shape=input_shape, block_idx=block_idx
)
elif growth_idx % 2 == 0:
model = self._build_growth_stable_model(
input_shape=input_shape, block_idx=block_idx
)
models.append(model)
return models
##########################################################################
##########################################################################
##########################################################################
def _get_gradient_penalty_loss(self, fake_images, real_images, growth_idx):
"""Gets discriminator gradient penalty loss.
Args:
fake_images: tensor, images generated by the generator from random
noise of shape [batch_size, image_size, image_size, 3].
real_images: tensor, real images from input of shape
[batch_size, image_height, image_width, 3].
growth_idx: int, current growth index model has progressed to.
Returns:
Discriminator's gradient penalty loss of shape [].
"""
batch_size = real_images.shape[0]
# Get a random uniform number rank 4 tensor.
random_uniform_num = tf.random.uniform(
shape=[batch_size, 1, 1, 1],
minval=0., maxval=1.,
dtype=tf.float32,
name="gp_random_uniform_num"
)
# Find the element-wise difference between images.
image_difference = fake_images - real_images
# Get random samples from this mixed image distribution.
mixed_images = random_uniform_num * image_difference
mixed_images += real_images
# Get loss from interpolated mixed images and watch for gradients.
with tf.GradientTape() as gp_tape:
# Watch interpolated mixed images.
gp_tape.watch(tensor=mixed_images)
# Send to the discriminator to get logits.
mixed_logits = self.models[growth_idx](
inputs=mixed_images, training=True
)
# Get the mixed loss.
mixed_loss = tf.reduce_sum(
input_tensor=mixed_logits,
name="gp_mixed_loss"
)
# Get gradient from returned list of length 1.
mixed_gradients = gp_tape.gradient(
target=mixed_loss, sources=[mixed_images]
)[0]
# Get gradient's L2 norm.
mixed_norms = tf.sqrt(
x=tf.reduce_sum(
input_tensor=tf.square(
x=mixed_gradients,
name="gp_squared_grads"
),
axis=[1, 2, 3]
) + 1e-8
)
# Get squared difference from target of 1.0.
squared_difference = tf.square(
x=mixed_norms - 1.0, name="gp_squared_difference"
)
# Get gradient penalty scalar.
gradient_penalty = tf.reduce_mean(
input_tensor=squared_difference, name="gp_gradient_penalty"
)
# Multiply with lambda to get gradient penalty loss.
gradient_penalty_loss = tf.multiply(
x=self.params["discriminator_gradient_penalty_coefficient"],
y=gradient_penalty,
name="gp_gradient_penalty_loss"
)
return gradient_penalty_loss
def get_discriminator_loss(
self,
global_batch_size,
fake_images,
real_images,
fake_logits,
real_logits,
global_step,
summary_file_writer,
growth_idx
):
"""Gets discriminator loss.
Args:
global_batch_size: int, global batch size for distribution.
fake_images: tensor, images generated by the generator from random
noise of shape [batch_size, image_size, image_size, 3].
real_images: tensor, real images from input of shape
[batch_size, image_height, image_width, 3].
fake_logits: tensor, output of discriminator using fake images
with shape [batch_size, 1].
real_logits: tensor, output of discriminator using real images
with shape [batch_size, 1].
global_step: int, current global step for training.
summary_file_writer: summary file writer.
growth_idx: int, current growth index model has progressed to.
Returns:
Tensor of discriminator's total loss of shape [].
"""
if self.params["distribution_strategy"]:
# Calculate base discriminator loss.
discriminator_fake_loss = tf.nn.compute_average_loss(
per_example_loss=fake_logits,
global_batch_size=global_batch_size
)
discriminator_real_loss = tf.nn.compute_average_loss(
per_example_loss=real_logits,
global_batch_size=global_batch_size
)
else:
# Calculate base discriminator loss.
discriminator_fake_loss = tf.reduce_mean(
input_tensor=fake_logits,
name="{}_fake_loss".format(self.name)
)
discriminator_real_loss = tf.reduce_mean(
input_tensor=real_logits,
name="{}_real_loss".format(self.name)
)
discriminator_loss = tf.subtract(
x=discriminator_fake_loss,
y=discriminator_real_loss,
name="{}_loss".format(self.name)
)
# Get discriminator gradient penalty loss.
discriminator_gradient_penalty = self._get_gradient_penalty_loss(
fake_images, real_images, growth_idx
)
# Get discriminator epsilon drift penalty.
epsilon_drift_penalty = tf.multiply(
x=self.params["discriminator_epsilon_drift"],
y=tf.reduce_mean(input_tensor=tf.square(x=real_logits)),
name="epsilon_drift_penalty"
)
# Get discriminator Wasserstein GP loss.
discriminator_wasserstein_gp_loss = tf.add_n(
inputs=[
discriminator_loss,
discriminator_gradient_penalty,
epsilon_drift_penalty
],
name="{}_wasserstein_gp_loss".format(self.name)
)
if self.params["distribution_strategy"]:
# Get regularization losses.
discriminator_reg_loss = tf.nn.scale_regularization_loss(
regularization_loss=sum(self.models[growth_idx].losses)
)
else:
# Get regularization losses.
discriminator_reg_loss = sum(self.models[growth_idx].losses)
# Combine losses for total loss.
discriminator_total_loss = tf.math.add(
x=discriminator_wasserstein_gp_loss,
y=discriminator_reg_loss,
name="discriminator_total_loss"
)
if self.params["write_summaries"]:
# Add summaries for TensorBoard.
with summary_file_writer.as_default():
with tf.summary.record_if(
condition=tf.equal(
x=tf.math.floormod(
x=global_step,
y=self.params["save_summary_steps"]
), y=0
)
):
tf.summary.scalar(
name="losses/discriminator_real_loss",
data=discriminator_real_loss,
step=global_step
)
tf.summary.scalar(
name="losses/discriminator_fake_loss",
data=discriminator_fake_loss,
step=global_step
)
tf.summary.scalar(
name="losses/discriminator_loss",
data=discriminator_loss,
step=global_step
)
tf.summary.scalar(
name="losses/discriminator_gradient_penalty",
data=discriminator_gradient_penalty,
step=global_step
)
tf.summary.scalar(
name="losses/epsilon_drift_penalty",
data=epsilon_drift_penalty,
step=global_step
)
tf.summary.scalar(
name="losses/discriminator_wasserstein_gp_loss",
data=discriminator_wasserstein_gp_loss,
step=global_step
)
tf.summary.scalar(
name="losses/discriminator_reg_loss",
data=discriminator_reg_loss,
step=global_step
)
tf.summary.scalar(
name="optimized_losses/discriminator_total_loss",
data=discriminator_total_loss,
step=global_step
)
summary_file_writer.flush()
return discriminator_total_loss
| StarcoderdataPython |
1792445 | import os
TOKEN = os.environ['TOKEN']
PARKING_CHAT_ID = os.environ['PARKING_CHAT_ID']
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DBPATH = os.path.join(BASE_DIR, "db1e3bfkg2bidc")
DATABASE_URL = os.environ.get("DATABASE_URL")
HOME_URL = os.environ['HOME_URL']
SALT = os.environ['SALT'] | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.