max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
gutils/mock.py | giussepi/gutils | 0 | 6619351 | # -*- coding: utf-8 -*-
""" gutils/mock """
def notqdm(iterable, *args, **kwargs):
"""
Replacement for tqdm that just passes back the iterable
useful to silence `tqdm` in tests
Use it along with mock.patch decorator. E.g.:
@patch('Data.Prepare_patches.CRLM.tqdm', notqdm)
def myfunc(*args, **kwars):
Source: https://stackoverflow.com/questions/37091673/silence-tqdms-output-while-running-tests-or-running-the-code-via-cron#answer-46689485
"""
return iterable
| # -*- coding: utf-8 -*-
""" gutils/mock """
def notqdm(iterable, *args, **kwargs):
"""
Replacement for tqdm that just passes back the iterable
useful to silence `tqdm` in tests
Use it along with mock.patch decorator. E.g.:
@patch('Data.Prepare_patches.CRLM.tqdm', notqdm)
def myfunc(*args, **kwars):
Source: https://stackoverflow.com/questions/37091673/silence-tqdms-output-while-running-tests-or-running-the-code-via-cron#answer-46689485
"""
return iterable
| en | 0.583998 | # -*- coding: utf-8 -*- gutils/mock Replacement for tqdm that just passes back the iterable useful to silence `tqdm` in tests Use it along with mock.patch decorator. E.g.: @patch('Data.Prepare_patches.CRLM.tqdm', notqdm) def myfunc(*args, **kwars): Source: https://stackoverflow.com/questions/37091673/silence-tqdms-output-while-running-tests-or-running-the-code-via-cron#answer-46689485 | 2.386065 | 2 |
nbsafety/kernel/__main__.py | runtime-jupyter-safety/runtime-jupyter-safety | 0 | 6619352 | <gh_stars>0
# -*- coding: utf-8 -*-
import sys
# Remove the CWD from sys.path while we load stuff.
# This is added back by InteractiveShellApp.init_path()
# TODO: probably need to make this separate from nbsafety package so that we can
# completely avoid imports until after removing cwd from sys.path
if sys.path[0] == "":
del sys.path[0]
from ipykernel import kernelapp as app
from nbsafety.kernel import SafeKernel
app.launch_new_instance(kernel_class=SafeKernel)
| # -*- coding: utf-8 -*-
import sys
# Remove the CWD from sys.path while we load stuff.
# This is added back by InteractiveShellApp.init_path()
# TODO: probably need to make this separate from nbsafety package so that we can
# completely avoid imports until after removing cwd from sys.path
if sys.path[0] == "":
del sys.path[0]
from ipykernel import kernelapp as app
from nbsafety.kernel import SafeKernel
app.launch_new_instance(kernel_class=SafeKernel) | en | 0.898023 | # -*- coding: utf-8 -*- # Remove the CWD from sys.path while we load stuff. # This is added back by InteractiveShellApp.init_path() # TODO: probably need to make this separate from nbsafety package so that we can # completely avoid imports until after removing cwd from sys.path | 1.668153 | 2 |
Python3-ThirdPartyLibrary/Chapter04_paramiko-02.py | anliven/Reading-Code-Learning-Python | 0 | 6619353 | <reponame>anliven/Reading-Code-Learning-Python
# -*- coding: utf-8 -*-
import paramiko
def ssh2_trans(ip, username, passwd, cmd):
# paramiko.util.log_to_file('ssh_log') # 设置日志,记录交互信息
try:
trans = paramiko.Transport((ip, 22))
trans.connect(username=username, password=<PASSWORD>)
s = paramiko.SSHClient()
s._transport = trans # 将sshclient对象的transport指定为trans
stdin, stdout, stderr = s.exec_command(cmd)
print("### %s is OK." % ip)
# print(stdout.read().decode()) # 输出内容比较少时,直接使用read读取出所有的输出
for line in stdout.readlines(): # 输出内容比较多时,按行读取进行处理
print('... ' + line.strip('\n')) # 使用strip()处理结尾换行符
except Exception:
print("### %s is Error." % ip)
finally:
trans.close()
ssh2_trans("10.91.48.171", "root", "arthur", "w") # 注意:实参均为字符串类型
ssh2_trans("10.91.48.172", "root", "arthur", "hostname;uptime") # 通过分号分割多个命令
# ### paramiko示例
# 实现SSH登录并执行命令;
| # -*- coding: utf-8 -*-
import paramiko
def ssh2_trans(ip, username, passwd, cmd):
# paramiko.util.log_to_file('ssh_log') # 设置日志,记录交互信息
try:
trans = paramiko.Transport((ip, 22))
trans.connect(username=username, password=<PASSWORD>)
s = paramiko.SSHClient()
s._transport = trans # 将sshclient对象的transport指定为trans
stdin, stdout, stderr = s.exec_command(cmd)
print("### %s is OK." % ip)
# print(stdout.read().decode()) # 输出内容比较少时,直接使用read读取出所有的输出
for line in stdout.readlines(): # 输出内容比较多时,按行读取进行处理
print('... ' + line.strip('\n')) # 使用strip()处理结尾换行符
except Exception:
print("### %s is Error." % ip)
finally:
trans.close()
ssh2_trans("10.91.48.171", "root", "arthur", "w") # 注意:实参均为字符串类型
ssh2_trans("10.91.48.172", "root", "arthur", "hostname;uptime") # 通过分号分割多个命令
# ### paramiko示例
# 实现SSH登录并执行命令; | zh | 0.758053 | # -*- coding: utf-8 -*- # paramiko.util.log_to_file('ssh_log') # 设置日志,记录交互信息 # 将sshclient对象的transport指定为trans ## %s is OK." % ip) # print(stdout.read().decode()) # 输出内容比较少时,直接使用read读取出所有的输出 # 输出内容比较多时,按行读取进行处理 # 使用strip()处理结尾换行符 ## %s is Error." % ip) # 注意:实参均为字符串类型 # 通过分号分割多个命令 # ### paramiko示例 # 实现SSH登录并执行命令; | 2.897266 | 3 |
profract/core.py | rotaliator/profract | 0 | 6619354 | from __future__ import print_function
import sys
import time
import png
from palette import pypngpalettes
IMAGE_WIDTH = 800
IMAGE_HEIGHT = 600
class Timer:
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_value:
return False
self.interval = time.time() - self.start
return self
def save_array_as_png(array, filename, width, height, palette=None):
if palette:
writer = png.Writer(width=width, height=height, palette=palette)
else:
writer = png.Writer(width=width, height=height, greyscale=True)
with open(filename, "wb") as f:
writer.write_array(f, array)
def main():
viridis256 = pypngpalettes['PAL_Viridis256']
from mandel.pure_python import mandel_classic
with Timer() as t:
m = mandel_classic(-2.0, -1.0, 1.0, 1.0, IMAGE_WIDTH, IMAGE_HEIGHT)
print("Single proc calculations took: {:.2f} sec".format( t.interval))
# Saving images for all palettes in pypngpalettes map
for palname, palette in pypngpalettes.items():
save_array_as_png(
m,
"mandel_classic_{}.png".format(palname.lower()),
IMAGE_WIDTH, IMAGE_HEIGHT,
palette=palette,
)
try:
from mandel.pure_python_multiproc import mandel as mandel_multiproc
with Timer() as t:
m = mandel_multiproc(-2.0, -1.0, 1.0, 1.0, IMAGE_WIDTH, IMAGE_HEIGHT)
print("Multi proc calculations took: {:.2f} sec".format( t.interval))
save_array_as_png(m, "mandel_multiproc.png", IMAGE_WIDTH, IMAGE_HEIGHT, palette=viridis256)
except ImportError as e:
print(e)
try:
from mandel.mandel_cython import mandel_cython
with Timer() as t:
m = mandel_cython(-2.0, -1.0, 1.0, 1.0, IMAGE_WIDTH, IMAGE_HEIGHT)
print("Cython calculations took: {:.2f} sec".format( t.interval))
save_array_as_png(m, "mandel_cython.png", IMAGE_WIDTH, IMAGE_HEIGHT, palette=viridis256)
except ImportError as e:
print(e)
try:
from mandel.mandel_cython import mandel_cython_multiproc
with Timer() as t:
m = mandel_cython_multiproc(-2.0, -1.0, 1.0, 1.0, IMAGE_WIDTH, IMAGE_HEIGHT)
print("Cython multiproc calculations took: {:.2f} sec".format( t.interval))
save_array_as_png(m, "mandel_cython_multiproc.png", IMAGE_WIDTH, IMAGE_HEIGHT, palette=viridis256)
except ImportError as e:
print(e)
if __name__ == '__main__':
main()
| from __future__ import print_function
import sys
import time
import png
from palette import pypngpalettes
IMAGE_WIDTH = 800
IMAGE_HEIGHT = 600
class Timer:
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_value:
return False
self.interval = time.time() - self.start
return self
def save_array_as_png(array, filename, width, height, palette=None):
if palette:
writer = png.Writer(width=width, height=height, palette=palette)
else:
writer = png.Writer(width=width, height=height, greyscale=True)
with open(filename, "wb") as f:
writer.write_array(f, array)
def main():
viridis256 = pypngpalettes['PAL_Viridis256']
from mandel.pure_python import mandel_classic
with Timer() as t:
m = mandel_classic(-2.0, -1.0, 1.0, 1.0, IMAGE_WIDTH, IMAGE_HEIGHT)
print("Single proc calculations took: {:.2f} sec".format( t.interval))
# Saving images for all palettes in pypngpalettes map
for palname, palette in pypngpalettes.items():
save_array_as_png(
m,
"mandel_classic_{}.png".format(palname.lower()),
IMAGE_WIDTH, IMAGE_HEIGHT,
palette=palette,
)
try:
from mandel.pure_python_multiproc import mandel as mandel_multiproc
with Timer() as t:
m = mandel_multiproc(-2.0, -1.0, 1.0, 1.0, IMAGE_WIDTH, IMAGE_HEIGHT)
print("Multi proc calculations took: {:.2f} sec".format( t.interval))
save_array_as_png(m, "mandel_multiproc.png", IMAGE_WIDTH, IMAGE_HEIGHT, palette=viridis256)
except ImportError as e:
print(e)
try:
from mandel.mandel_cython import mandel_cython
with Timer() as t:
m = mandel_cython(-2.0, -1.0, 1.0, 1.0, IMAGE_WIDTH, IMAGE_HEIGHT)
print("Cython calculations took: {:.2f} sec".format( t.interval))
save_array_as_png(m, "mandel_cython.png", IMAGE_WIDTH, IMAGE_HEIGHT, palette=viridis256)
except ImportError as e:
print(e)
try:
from mandel.mandel_cython import mandel_cython_multiproc
with Timer() as t:
m = mandel_cython_multiproc(-2.0, -1.0, 1.0, 1.0, IMAGE_WIDTH, IMAGE_HEIGHT)
print("Cython multiproc calculations took: {:.2f} sec".format( t.interval))
save_array_as_png(m, "mandel_cython_multiproc.png", IMAGE_WIDTH, IMAGE_HEIGHT, palette=viridis256)
except ImportError as e:
print(e)
if __name__ == '__main__':
main()
| en | 0.38017 | # Saving images for all palettes in pypngpalettes map | 2.742733 | 3 |
xstate/python/tests/classifier/test_main_case_classifier.py | uwescience/new_xstate | 3 | 6619355 | <filename>xstate/python/tests/classifier/test_main_case_classifier.py
# TODO: Create CSV matrix files for existing data
import common.constants as cn
import common_python.constants as ccn
from common_python.testing import helpers
from common_python.classifier import feature_analyzer
import classifier.main_case_classifier as main
import numpy as np
import os
import pandas as pd
import shutil
import unittest
IGNORE_TEST = True
IS_PLOT = True
DIR = os.path.dirname(os.path.abspath(__file__))
TEST_OUT_PATH = os.path.join(DIR,
"test_main_case_classifier.csv")
TEST_IN_PATH = os.path.join(cn.TRINARY_SAMPLES_DIR,
"AM_MDM.csv")
STATE = 1
class TestFunctions(unittest.TestCase):
def _remove(self):
for path in [TEST_OUT_PATH]:
if os.path.isfile(path):
os.remove(path)
def setUp(self):
self._remove()
def tearDown(self):
self._remove()
def testRunState(self):
if IGNORE_TEST:
return
df_instance = pd.read_csv(TEST_IN_PATH)
arguments = main.Arguments(
state=STATE, df=df_instance, num_fset=5)
df = main._runState(arguments)
columns = expected_columns=[ccn.FEATURE_VECTOR,
ccn.SIGLVL, cn.STATE, main.INSTANCE,
ccn.FRAC, ccn.COUNT]
self.assertTrue(helpers.isValidDataFrame(df,
expected_columns=columns,
nan_columns=columns))
def testRun(self):
# TESTING
#
with open(TEST_IN_PATH, "r") as in_fd:
with open(TEST_OUT_PATH, "w") as out_fd:
main.run(in_fd, out_fd, num_fset=2)
self.assertTrue(os.path.isfile(TEST_OUT_PATH))
self.assertTrue(os.path.isfile(TEST_OUT_PATH))
if __name__ == '__main__':
unittest.main()
| <filename>xstate/python/tests/classifier/test_main_case_classifier.py
# TODO: Create CSV matrix files for existing data
import common.constants as cn
import common_python.constants as ccn
from common_python.testing import helpers
from common_python.classifier import feature_analyzer
import classifier.main_case_classifier as main
import numpy as np
import os
import pandas as pd
import shutil
import unittest
IGNORE_TEST = True
IS_PLOT = True
DIR = os.path.dirname(os.path.abspath(__file__))
TEST_OUT_PATH = os.path.join(DIR,
"test_main_case_classifier.csv")
TEST_IN_PATH = os.path.join(cn.TRINARY_SAMPLES_DIR,
"AM_MDM.csv")
STATE = 1
class TestFunctions(unittest.TestCase):
def _remove(self):
for path in [TEST_OUT_PATH]:
if os.path.isfile(path):
os.remove(path)
def setUp(self):
self._remove()
def tearDown(self):
self._remove()
def testRunState(self):
if IGNORE_TEST:
return
df_instance = pd.read_csv(TEST_IN_PATH)
arguments = main.Arguments(
state=STATE, df=df_instance, num_fset=5)
df = main._runState(arguments)
columns = expected_columns=[ccn.FEATURE_VECTOR,
ccn.SIGLVL, cn.STATE, main.INSTANCE,
ccn.FRAC, ccn.COUNT]
self.assertTrue(helpers.isValidDataFrame(df,
expected_columns=columns,
nan_columns=columns))
def testRun(self):
# TESTING
#
with open(TEST_IN_PATH, "r") as in_fd:
with open(TEST_OUT_PATH, "w") as out_fd:
main.run(in_fd, out_fd, num_fset=2)
self.assertTrue(os.path.isfile(TEST_OUT_PATH))
self.assertTrue(os.path.isfile(TEST_OUT_PATH))
if __name__ == '__main__':
unittest.main()
| en | 0.426604 | # TODO: Create CSV matrix files for existing data # TESTING # | 2.540727 | 3 |
motorTest2.py | DdOtzen/espCarStuff | 0 | 6619356 | from machine import Pin
from time import sleep_ms, sleep
vf = Pin(15, Pin.OUT)
vb = Pin(4, Pin.OUT)
hb = Pin(5, Pin.OUT)
hf = Pin(18, Pin.OUT)
led = Pin(2, Pin.OUT)
def coast() :
hf.off()
vf.off()
hb.off()
vb.off()
def frem() :
coast()
vf.on()
hf.on()
def bak() :
coast()
vb.on()
hb.on()
def drejH() :
coast()
vf.on()
def drejV() :
coast()
hf.on()
def roterH() :
coast()
vf.on()
hb.on()
def roterV() :
coast()
hf.on()
vb.on()
led.on()
sleep_ms(300)
led.off()
sleep_ms(300)
while True :
frem()
sleep_ms(500)
bak()
sleep_ms(500)
drejH()
sleep_ms(500)
drejV()
sleep_ms(500)
roterH()
sleep_ms(500)
roterV()
sleep_ms(500)
coast()
| from machine import Pin
from time import sleep_ms, sleep
vf = Pin(15, Pin.OUT)
vb = Pin(4, Pin.OUT)
hb = Pin(5, Pin.OUT)
hf = Pin(18, Pin.OUT)
led = Pin(2, Pin.OUT)
def coast() :
hf.off()
vf.off()
hb.off()
vb.off()
def frem() :
coast()
vf.on()
hf.on()
def bak() :
coast()
vb.on()
hb.on()
def drejH() :
coast()
vf.on()
def drejV() :
coast()
hf.on()
def roterH() :
coast()
vf.on()
hb.on()
def roterV() :
coast()
hf.on()
vb.on()
led.on()
sleep_ms(300)
led.off()
sleep_ms(300)
while True :
frem()
sleep_ms(500)
bak()
sleep_ms(500)
drejH()
sleep_ms(500)
drejV()
sleep_ms(500)
roterH()
sleep_ms(500)
roterV()
sleep_ms(500)
coast()
| none | 1 | 2.556058 | 3 | |
setup.py | awesome-archive/pybingwallpaper | 0 | 6619357 | from cx_Freeze import setup, Executable
import sys
sys.path.append('src')
from main import REV
# Dependencies are automatically detected, but it might need
# fine tuning.
buildOptions = {'packages': ['urllib', 'PIL'],
'includes': ['win32.win32gui', 'log', 'record',
'webutil', 'setter', 'bingwallpaper'],
'excludes': ['tkinter'],
'compressed':1,
'include_files': [('src/winsetter.py','')],
'bin_includes': ['pywintypes34.dll'],
'optimize': 2,
}
executables = [
Executable('./src/main.py', base='Win32GUI', targetName='BingWallpaper.exe'),
Executable('./src/main.py', base='Console', targetName='BingWallpaper-cli.exe')
]
setup(name='PyBingWallpaper.exe',
version = REV,
description = 'Bing.com Wallpaper Downloader',
options = {'build_exe': buildOptions},
executables = executables)
| from cx_Freeze import setup, Executable
import sys
sys.path.append('src')
from main import REV
# Dependencies are automatically detected, but it might need
# fine tuning.
buildOptions = {'packages': ['urllib', 'PIL'],
'includes': ['win32.win32gui', 'log', 'record',
'webutil', 'setter', 'bingwallpaper'],
'excludes': ['tkinter'],
'compressed':1,
'include_files': [('src/winsetter.py','')],
'bin_includes': ['pywintypes34.dll'],
'optimize': 2,
}
executables = [
Executable('./src/main.py', base='Win32GUI', targetName='BingWallpaper.exe'),
Executable('./src/main.py', base='Console', targetName='BingWallpaper-cli.exe')
]
setup(name='PyBingWallpaper.exe',
version = REV,
description = 'Bing.com Wallpaper Downloader',
options = {'build_exe': buildOptions},
executables = executables)
| en | 0.880612 | # Dependencies are automatically detected, but it might need # fine tuning. | 1.885606 | 2 |
commercialoperator/migrations/0005_auto_20190808_0037.py | sharpeez/ledger | 0 | 6619358 | <filename>commercialoperator/migrations/0005_auto_20190808_0037.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-08-07 16:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('commercialoperator', '0004_merge_20190807_1117'),
]
operations = [
migrations.CreateModel(
name='PreviewTempApproval',
fields=[
('approval_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='commercialoperator.Approval')),
],
bases=('commercialoperator.approval',),
),
migrations.AlterField(
model_name='proposaltype',
name='name',
field=models.CharField(choices=[('T Class', 'T Class'), ('Filming', 'Filming'), ('Event', 'Event')], default='T Class', max_length=64, verbose_name='Application name (eg. T Class, Filming, Event, E Class)'),
),
]
| <filename>commercialoperator/migrations/0005_auto_20190808_0037.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-08-07 16:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('commercialoperator', '0004_merge_20190807_1117'),
]
operations = [
migrations.CreateModel(
name='PreviewTempApproval',
fields=[
('approval_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='commercialoperator.Approval')),
],
bases=('commercialoperator.approval',),
),
migrations.AlterField(
model_name='proposaltype',
name='name',
field=models.CharField(choices=[('T Class', 'T Class'), ('Filming', 'Filming'), ('Event', 'Event')], default='T Class', max_length=64, verbose_name='Application name (eg. T Class, Filming, Event, E Class)'),
),
]
| en | 0.707392 | # -*- coding: utf-8 -*- # Generated by Django 1.10.8 on 2019-08-07 16:37 | 1.365293 | 1 |
phial/scheduler.py | fossabot/phial | 0 | 6619359 | from typing import Callable, Optional, List, TypeVar # noqa: F401
from datetime import timedelta, datetime
from collections import namedtuple
Time = namedtuple("Time", ['hour', 'minute', 'second'])
class Schedule:
'''
A schedule stores the relative time for something to happen.
It can be used to compute when the next event of an event
should occur.
'''
def __init__(self) -> None:
self._days = 0
self._at = None # type: Optional[Time]
self._hours = 0
self._minutes = 0
self._seconds = 0
def every(self):
# type: () -> Schedule
'''
Syntatic sugar to allow the declaration of schedules to be more
like English.
::
schedule = Schedule().every().day()
'''
return self
def day(self):
# type: () -> Schedule
'''
Adds a day to the relative time till the next event
::
schedule = Schedule().every().day()
'''
return self.days(1)
def days(self, value):
# type: (int) -> Schedule
'''
Adds the specified number of days to the relative time till the next
event.
::
schedule = Schedule().every().days(2)
Args:
value(int): The number of days to wait between events
'''
self._days = value
return self
def at(self, hour, minute, second=0):
# type: (int, int, int) -> Schedule
'''
Specifies the time of day the next occurnce will happen.
NOTE: 'at' can only be used with :meth:`day`.
::
schedule = Schedule().every().day().at(12,00)
Args:
hour(int): The hour of day the next event should happen,
when combined with the minute
minute(int): The minute of day the next event should happen,
when combined with the hour
second(int, optional): The second of day the next event should
happen, when combined with the hour and
minute.
Defaults to 0
'''
if self._hours or self._minutes:
raise Exception("'at' can only be used on day(s)")
if not self._days:
raise Exception("'at' can only be used on day(s)")
if self._at:
raise Exception("'at' can only be set once")
self._at = Time(hour, minute, second)
return self
def hour(self):
# type: () -> Schedule
'''
Adds an hour to the relative time till the next event.
::
schedule = Schedule().every().hour()
'''
return self.hours(1)
def hours(self, value):
# type: (int) -> Schedule
'''
Adds the specified number of hours to the relative time till the next
event.
::
schedule = Schedule().every().hours(2)
Args:
value(int): The number of hours to wait between events
'''
self._hours = value
return self
def minute(self):
# type: () -> Schedule
'''
Adds a minute to the relative time till the next event
::
schedule = Schedule().every().minute()
'''
return self.minutes(1)
def minutes(self, value):
# type: (int) -> Schedule
'''
Adds the specified number of minutes to the relative time till the next
event.
::
schedule = Schedule().every().minutes(2)
Args:
value(int): The number of minutes to wait between events
'''
self._minutes = value
return self
def second(self):
# type: () -> Schedule
'''
Adds a second to the relative time till the next event
::
schedule = Schedule().every().second()
'''
return self.seconds(1)
def seconds(self, value):
# type: (int) -> Schedule
'''
Adds the specified number of seconds to the relative time till the next
event.
::
schedule = Schedule().every().seconds(2)
Args:
value(int): The number of seconds to wait between events
'''
self._seconds = value
return self
def get_next_run_time(self, last_run: datetime) -> datetime:
'''
Calculates the next time to run, based on the last time the
event was run.
Args:
last_run(datetime): The last time the event happened
Returns:
A :obj:`datetime` of when the event should next happen
'''
if self._at:
next_run = last_run.replace(hour=self._at.hour,
minute=self._at.minute,
second=self._at.second,
microsecond=0)
if next_run <= datetime.now():
next_run += timedelta(days=self._days)
return next_run
return last_run + timedelta(days=self._days,
hours=self._hours,
minutes=self._minutes,
seconds=self._seconds)
class ScheduledJob:
'''
A function with a schedule
'''
def __init__(self, schedule: Schedule, func: Callable) -> None:
self.func = func
self.schedule = schedule
self.func = func
self.next_run = self.schedule.get_next_run_time(datetime.now())
def should_run(self) -> bool:
'''
Checks whether the function needs to be run based on the schedule.
Returns:
A :obj:`bool` of whether or not to run
'''
return self.next_run <= datetime.now()
def run(self) -> None:
'''
Runs the function and calculates + stores the next run time
'''
self.func()
self.next_run = self.schedule.get_next_run_time(datetime.now())
class Scheduler:
'''
A store for Scheduled Jobs
'''
def __init__(self) -> None:
self.jobs = [] # type: List[ScheduledJob]
def add_job(self, job: ScheduledJob) -> None:
'''
Adds a scheuled job to the scheduler
Args:
job(ScheduledJob): The job to be added to the scheduler
'''
self.jobs.append(job)
def run_pending(self) -> None:
'''
Runs any ScheduledJobs in the store, where job.should_run() returns
true
'''
jobs_to_run = [job for job in self.jobs
if job.should_run()] # type: List[ScheduledJob]
for job in jobs_to_run:
job.run()
| from typing import Callable, Optional, List, TypeVar # noqa: F401
from datetime import timedelta, datetime
from collections import namedtuple
Time = namedtuple("Time", ['hour', 'minute', 'second'])
class Schedule:
'''
A schedule stores the relative time for something to happen.
It can be used to compute when the next event of an event
should occur.
'''
def __init__(self) -> None:
self._days = 0
self._at = None # type: Optional[Time]
self._hours = 0
self._minutes = 0
self._seconds = 0
def every(self):
# type: () -> Schedule
'''
Syntatic sugar to allow the declaration of schedules to be more
like English.
::
schedule = Schedule().every().day()
'''
return self
def day(self):
# type: () -> Schedule
'''
Adds a day to the relative time till the next event
::
schedule = Schedule().every().day()
'''
return self.days(1)
def days(self, value):
# type: (int) -> Schedule
'''
Adds the specified number of days to the relative time till the next
event.
::
schedule = Schedule().every().days(2)
Args:
value(int): The number of days to wait between events
'''
self._days = value
return self
def at(self, hour, minute, second=0):
# type: (int, int, int) -> Schedule
'''
Specifies the time of day the next occurnce will happen.
NOTE: 'at' can only be used with :meth:`day`.
::
schedule = Schedule().every().day().at(12,00)
Args:
hour(int): The hour of day the next event should happen,
when combined with the minute
minute(int): The minute of day the next event should happen,
when combined with the hour
second(int, optional): The second of day the next event should
happen, when combined with the hour and
minute.
Defaults to 0
'''
if self._hours or self._minutes:
raise Exception("'at' can only be used on day(s)")
if not self._days:
raise Exception("'at' can only be used on day(s)")
if self._at:
raise Exception("'at' can only be set once")
self._at = Time(hour, minute, second)
return self
def hour(self):
# type: () -> Schedule
'''
Adds an hour to the relative time till the next event.
::
schedule = Schedule().every().hour()
'''
return self.hours(1)
def hours(self, value):
# type: (int) -> Schedule
'''
Adds the specified number of hours to the relative time till the next
event.
::
schedule = Schedule().every().hours(2)
Args:
value(int): The number of hours to wait between events
'''
self._hours = value
return self
def minute(self):
# type: () -> Schedule
'''
Adds a minute to the relative time till the next event
::
schedule = Schedule().every().minute()
'''
return self.minutes(1)
def minutes(self, value):
# type: (int) -> Schedule
'''
Adds the specified number of minutes to the relative time till the next
event.
::
schedule = Schedule().every().minutes(2)
Args:
value(int): The number of minutes to wait between events
'''
self._minutes = value
return self
def second(self):
# type: () -> Schedule
'''
Adds a second to the relative time till the next event
::
schedule = Schedule().every().second()
'''
return self.seconds(1)
def seconds(self, value):
# type: (int) -> Schedule
'''
Adds the specified number of seconds to the relative time till the next
event.
::
schedule = Schedule().every().seconds(2)
Args:
value(int): The number of seconds to wait between events
'''
self._seconds = value
return self
def get_next_run_time(self, last_run: datetime) -> datetime:
'''
Calculates the next time to run, based on the last time the
event was run.
Args:
last_run(datetime): The last time the event happened
Returns:
A :obj:`datetime` of when the event should next happen
'''
if self._at:
next_run = last_run.replace(hour=self._at.hour,
minute=self._at.minute,
second=self._at.second,
microsecond=0)
if next_run <= datetime.now():
next_run += timedelta(days=self._days)
return next_run
return last_run + timedelta(days=self._days,
hours=self._hours,
minutes=self._minutes,
seconds=self._seconds)
class ScheduledJob:
'''
A function with a schedule
'''
def __init__(self, schedule: Schedule, func: Callable) -> None:
self.func = func
self.schedule = schedule
self.func = func
self.next_run = self.schedule.get_next_run_time(datetime.now())
def should_run(self) -> bool:
'''
Checks whether the function needs to be run based on the schedule.
Returns:
A :obj:`bool` of whether or not to run
'''
return self.next_run <= datetime.now()
def run(self) -> None:
'''
Runs the function and calculates + stores the next run time
'''
self.func()
self.next_run = self.schedule.get_next_run_time(datetime.now())
class Scheduler:
'''
A store for Scheduled Jobs
'''
def __init__(self) -> None:
self.jobs = [] # type: List[ScheduledJob]
def add_job(self, job: ScheduledJob) -> None:
'''
Adds a scheuled job to the scheduler
Args:
job(ScheduledJob): The job to be added to the scheduler
'''
self.jobs.append(job)
def run_pending(self) -> None:
'''
Runs any ScheduledJobs in the store, where job.should_run() returns
true
'''
jobs_to_run = [job for job in self.jobs
if job.should_run()] # type: List[ScheduledJob]
for job in jobs_to_run:
job.run()
| en | 0.726363 | # noqa: F401 A schedule stores the relative time for something to happen. It can be used to compute when the next event of an event should occur. # type: Optional[Time] # type: () -> Schedule Syntatic sugar to allow the declaration of schedules to be more like English. :: schedule = Schedule().every().day() # type: () -> Schedule Adds a day to the relative time till the next event :: schedule = Schedule().every().day() # type: (int) -> Schedule Adds the specified number of days to the relative time till the next event. :: schedule = Schedule().every().days(2) Args: value(int): The number of days to wait between events # type: (int, int, int) -> Schedule Specifies the time of day the next occurnce will happen. NOTE: 'at' can only be used with :meth:`day`. :: schedule = Schedule().every().day().at(12,00) Args: hour(int): The hour of day the next event should happen, when combined with the minute minute(int): The minute of day the next event should happen, when combined with the hour second(int, optional): The second of day the next event should happen, when combined with the hour and minute. Defaults to 0 # type: () -> Schedule Adds an hour to the relative time till the next event. :: schedule = Schedule().every().hour() # type: (int) -> Schedule Adds the specified number of hours to the relative time till the next event. :: schedule = Schedule().every().hours(2) Args: value(int): The number of hours to wait between events # type: () -> Schedule Adds a minute to the relative time till the next event :: schedule = Schedule().every().minute() # type: (int) -> Schedule Adds the specified number of minutes to the relative time till the next event. :: schedule = Schedule().every().minutes(2) Args: value(int): The number of minutes to wait between events # type: () -> Schedule Adds a second to the relative time till the next event :: schedule = Schedule().every().second() # type: (int) -> Schedule Adds the specified number of seconds to the relative time till the next event. :: schedule = Schedule().every().seconds(2) Args: value(int): The number of seconds to wait between events Calculates the next time to run, based on the last time the event was run. Args: last_run(datetime): The last time the event happened Returns: A :obj:`datetime` of when the event should next happen A function with a schedule Checks whether the function needs to be run based on the schedule. Returns: A :obj:`bool` of whether or not to run Runs the function and calculates + stores the next run time A store for Scheduled Jobs # type: List[ScheduledJob] Adds a scheuled job to the scheduler Args: job(ScheduledJob): The job to be added to the scheduler Runs any ScheduledJobs in the store, where job.should_run() returns true # type: List[ScheduledJob] | 4.110179 | 4 |
probability-theory/p12114.py | sajjadt/competitive-programming | 10 | 6619360 |
case = 1
while True:
B, S = list(map(int, input().split()))
if B == 0 and S == 0:
break
if B == 1:
print("Case " + str(case) +": :-\\")
elif S >= B:
print("Case " + str(case) +": :-|")
else:
print("Case " + str(case) +": :-(")
case += 1 |
case = 1
while True:
B, S = list(map(int, input().split()))
if B == 0 and S == 0:
break
if B == 1:
print("Case " + str(case) +": :-\\")
elif S >= B:
print("Case " + str(case) +": :-|")
else:
print("Case " + str(case) +": :-(")
case += 1 | none | 1 | 3.369924 | 3 | |
django_saltapi/urls.py | holmboe/django-saltapi | 7 | 6619361 | # -*- coding: utf-8 -*-
from django_saltapi.utils import REGEX_JID, REGEX_HOSTNAME
from django.conf.urls import patterns, url
urlpatterns = patterns('django_saltapi.views',
url(r'^$', 'apiwrapper'),
url(r'^minions/$', 'minions_list'),
url(r'^minions/(?P<tgt>' + REGEX_HOSTNAME + ')/$', 'minions_details'),
url(r'^jobs/$', 'jobs_list'),
url(r'^jobs/(?P<jid>' + REGEX_JID + ')/$', 'jobs_details'),
url(r'^ping/(?P<tgt>' + REGEX_HOSTNAME + ')/$', 'ping'),
url(r'^echo/(?P<tgt>' + REGEX_HOSTNAME + ')/(?P<arg>\w+)/$', 'echo'),
)
| # -*- coding: utf-8 -*-
from django_saltapi.utils import REGEX_JID, REGEX_HOSTNAME
from django.conf.urls import patterns, url
urlpatterns = patterns('django_saltapi.views',
url(r'^$', 'apiwrapper'),
url(r'^minions/$', 'minions_list'),
url(r'^minions/(?P<tgt>' + REGEX_HOSTNAME + ')/$', 'minions_details'),
url(r'^jobs/$', 'jobs_list'),
url(r'^jobs/(?P<jid>' + REGEX_JID + ')/$', 'jobs_details'),
url(r'^ping/(?P<tgt>' + REGEX_HOSTNAME + ')/$', 'ping'),
url(r'^echo/(?P<tgt>' + REGEX_HOSTNAME + ')/(?P<arg>\w+)/$', 'echo'),
)
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.940245 | 2 |
test.py | aunghoo/insighter | 0 | 6619362 | import sys
# Takes first name and last name via command
# line arguments and then display them
print("Song " + sys.argv[1]) | import sys
# Takes first name and last name via command
# line arguments and then display them
print("Song " + sys.argv[1]) | en | 0.603246 | # Takes first name and last name via command # line arguments and then display them | 2.870342 | 3 |
darwin19/parse.py | timkphd/examples | 5 | 6619363 | #!/usr/bin/python
import numpy as np
sfile=open("wing.dat","r")
# in pyplot set sym=['.','.','.','.','#','X','0']
sin=sfile.readlines()
x=np.empty(799)
y=np.empty(799)
i=0
for s in sin:
s=s.split()
x[i]=float(s[0])
y[i]=float(s[1])
i=i+1
m0=open("m0","w")
m1=open("m1","w")
m2=open("m2","w")
m3=open("m3","w")
i=0
matfile=open("output","r")
dat=matfile.readlines()
for d in dat:
d=int(d)
print(d,x[i],y[i])
if( d == 0):myfile=m0
if (d == 1):myfile=m1
if (d == 2):myfile=m2
if (d == 3):myfile=m3
myfile.write("%g %g\n" %(x[i],y[i]))
i=i+1
| #!/usr/bin/python
import numpy as np
sfile=open("wing.dat","r")
# in pyplot set sym=['.','.','.','.','#','X','0']
sin=sfile.readlines()
x=np.empty(799)
y=np.empty(799)
i=0
for s in sin:
s=s.split()
x[i]=float(s[0])
y[i]=float(s[1])
i=i+1
m0=open("m0","w")
m1=open("m1","w")
m2=open("m2","w")
m3=open("m3","w")
i=0
matfile=open("output","r")
dat=matfile.readlines()
for d in dat:
d=int(d)
print(d,x[i],y[i])
if( d == 0):myfile=m0
if (d == 1):myfile=m1
if (d == 2):myfile=m2
if (d == 3):myfile=m3
myfile.write("%g %g\n" %(x[i],y[i]))
i=i+1
| en | 0.152669 | #!/usr/bin/python # in pyplot set sym=['.','.','.','.','#','X','0'] | 2.607778 | 3 |
test/test_construction_k_resolutions_of_n.py | SebastianoF/counting_sub_multisets | 1 | 6619364 | <gh_stars>1-10
from numpy.testing import assert_array_equal, assert_equal, assert_raises
from k_resolutions.construction_k_resolutions_of_n import k_resolutions_list
def test_k_resolutions_list_simple():
ans_ground = [[0, 0, 4], [0, 1, 3], [0, 2, 2], [0, 3, 1],
[0, 4, 0], [1, 0, 3], [1, 1, 2], [1, 2, 1],
[1, 3, 0], [2, 0, 2], [2, 1, 1], [2, 2, 0],
[3, 0, 1], [3, 1, 0], [4, 0, 0]]
ans = k_resolutions_list(4, 3)
assert_equal(len(ans), len(ans_ground))
for a in ans:
assert a in ans_ground
def test_k_resolutions_list_extreme():
with assert_raises(IOError):
k_resolutions_list(5, 0)
def test_k_resolutions_list_extreme_1():
ans_ground = [[1]]
ans = k_resolutions_list(1, 1)
assert_array_equal(ans, ans_ground)
| from numpy.testing import assert_array_equal, assert_equal, assert_raises
from k_resolutions.construction_k_resolutions_of_n import k_resolutions_list
def test_k_resolutions_list_simple():
ans_ground = [[0, 0, 4], [0, 1, 3], [0, 2, 2], [0, 3, 1],
[0, 4, 0], [1, 0, 3], [1, 1, 2], [1, 2, 1],
[1, 3, 0], [2, 0, 2], [2, 1, 1], [2, 2, 0],
[3, 0, 1], [3, 1, 0], [4, 0, 0]]
ans = k_resolutions_list(4, 3)
assert_equal(len(ans), len(ans_ground))
for a in ans:
assert a in ans_ground
def test_k_resolutions_list_extreme():
with assert_raises(IOError):
k_resolutions_list(5, 0)
def test_k_resolutions_list_extreme_1():
ans_ground = [[1]]
ans = k_resolutions_list(1, 1)
assert_array_equal(ans, ans_ground) | none | 1 | 2.579896 | 3 | |
pyxtal/miscellaneous/get_molecule_from_pubchem.py | ubikpt/PyXtal | 127 | 6619365 | <gh_stars>100-1000
import pubchempy as pcp
import numpy as np
import json
from pyxtal.database.element import Element
from rdkit import Chem
from rdkit.Chem import AllChem
import pymatgen as mg
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def read_molecule(mol, name):
x = np.transpose([mol.record["coords"][0]["conformers"][0]["x"]])
y = np.transpose([mol.record["coords"][0]["conformers"][0]["y"]])
z = np.transpose([mol.record["coords"][0]["conformers"][0]["z"]])
xyz = np.concatenate((x, y, z), axis=1)
numbers = mol.record["atoms"]["element"]
elements = [Element(i).short_name for i in numbers]
volume = mol.volume_3d
pubchemid = mol.cid
molecule = {
"name": name,
"elements": elements,
"xyz": xyz,
"volume": volume,
"pubchem id": pubchemid,
}
return molecule
names = [
"H2O",
"CH4",
"NH3",
"benzene",
"naphthalene",
"anthracene",
"tetracene",
"Pentacene",
"coumarin",
"resorcinol",
"benzamide",
"aspirin",
"ddt",
"lindane",
"Glycine",
"Glucose",
"ROY",
]
molecules = []
molecule = {
"name": "C60",
"elements": ["C"] * 60,
"xyz": np.array(
[
[2.2101953, 0.5866631, 2.6669504],
[3.1076393, 0.1577008, 1.6300286],
[1.3284430, -0.3158939, 3.2363232],
[3.0908709, -1.1585005, 1.2014240],
[3.1879245, -1.4574599, -0.1997005],
[3.2214623, 1.2230966, 0.6739440],
[3.3161210, 0.9351586, -0.6765151],
[3.2984981, -0.4301142, -1.1204138],
[-0.4480842, 1.3591484, 3.2081020],
[0.4672056, 2.2949830, 2.6175264],
[-0.0256575, 0.0764219, 3.5086259],
[1.7727917, 1.9176584, 2.3529691],
[2.3954623, 2.3095689, 1.1189539],
[-0.2610195, 3.0820935, 1.6623117],
[0.3407726, 3.4592388, 0.4745968],
[1.6951171, 3.0692446, 0.1976623],
[-2.1258394, -0.8458853, 2.6700963],
[-2.5620990, 0.4855202, 2.3531715],
[-0.8781521, -1.0461985, 3.2367302],
[-1.7415096, 1.5679963, 2.6197333],
[-1.6262468, 2.6357030, 1.6641811],
[-3.2984810, 0.4301871, 1.1204208],
[-3.1879469, 1.4573895, 0.1996030],
[-2.3360261, 2.5813627, 0.4760912],
[-0.5005210, -2.9797771, 1.7940308],
[-1.7944338, -2.7729087, 1.2047891],
[-0.0514245, -2.1328841, 2.7938830],
[-2.5891471, -1.7225828, 1.6329715],
[-3.3160705, -0.9350636, 0.6765268],
[-1.6951919, -3.0692581, -0.1976564],
[-2.3954901, -2.3096853, -1.1189862],
[-3.2214182, -1.2231835, -0.6739581],
[2.1758234, -2.0946263, 1.7922529],
[1.7118619, -2.9749681, 0.7557198],
[1.3130656, -1.6829416, 2.7943892],
[0.3959024, -3.4051395, 0.7557638],
[-0.3408219, -3.4591883, -0.4745610],
[2.3360057, -2.5814499, -0.4761050],
[1.6263757, -2.6357349, -1.6642309],
[0.2611352, -3.0821271, -1.6622618],
[-2.2100844, -0.5868636, -2.6670300],
[-1.7726970, -1.9178969, -2.3530466],
[-0.4670723, -2.2950509, -2.6175105],
[-1.3283500, 0.3157683, -3.2362375],
[-2.1759882, 2.0945383, -1.7923294],
[-3.0909663, 1.1583472, -1.2015749],
[-3.1076090, -0.1578453, -1.6301627],
[-1.3131365, 1.6828292, -2.7943639],
[0.5003224, 2.9799637, -1.7940203],
[-0.3961148, 3.4052817, -0.7557272],
[-1.7120629, 2.9749122, -0.7557988],
[0.0512824, 2.1329478, -2.7937450],
[2.1258630, 0.8460809, -2.6700534],
[2.5891853, 1.7227742, -1.6329562],
[1.7943010, 2.7730684, -1.2048262],
[0.8781323, 1.0463514, -3.2365313],
[0.4482452, -1.3591061, -3.2080510],
[1.7416948, -1.5679557, -2.6197714],
[2.5621724, -0.4853529, -2.3532026],
[0.0257904, -0.0763567, -3.5084446],
]
),
"volume": None,
"pubchem id": 123591,
}
molecules.append(molecule)
molecule = {
"name": "Glycine-z",
"elements": ["H", "N", "H", "C", "H", "H", "H", "C", "O", "O"],
"xyz": np.array(
[
[3.090064, 3.564361, -0.325567],
[2.538732, 3.591476, -1.036692],
[2.097666, 2.810077, -1.104272],
[1.560226, 4.699895, -0.864107],
[3.019736, 3.730336, -1.784084],
[0.843929, 4.596366, -1.524923],
[1.157363, 4.630876, 0.026367],
[2.190568, 6.104112, -1.022811],
[1.309305, 6.980823, -0.972406],
[3.437359, 6.189565, -1.153186],
]
),
"volume": None,
"pubchem id": None,
}
molecules.append(molecule)
molecule = {
"name": "xxvi",
"elements": ['C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'N', 'C', 'O', 'C', 'C', 'C', 'C', 'C', 'C', 'Cl', 'N', 'C', 'O', 'C', 'C', 'C', 'C', 'C', 'C', 'Cl', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'],
"xyz": np.array(
[
[ 3.13073867, -3.36491150, -2.64721385],
[ 1.82477880, -3.71896813, -2.32546087],
[ 0.94261928, -2.80596909, -1.82568763],
[ 1.33731746, -1.45528852, -1.62963296],
[ 2.66874173, -1.09519586, -1.98228771],
[ 3.53907849, -2.08759686, -2.49307291],
[ 3.06616823, 0.24489274, -1.81773666],
[ 2.21038255, 1.17722323, -1.34625977],
[ 0.87895828, 0.83753665, -1.01123254],
[ 0.44151629, -0.46570153, -1.12746036],
[-0.94993120, -0.85053187, -0.72466066],
[-1.16819747, -1.49926227, 0.47582205],
[-2.47506679, -1.91279650, 0.84257981],
[-3.52001657, -1.65697309, 0.0221523 ],
[-3.35904520, -0.99094614, -1.2040898 ],
[-4.44946712, -0.73078858, -2.06144647],
[-4.26939744, -0.08842722, -3.23777638],
[-2.99799640, 0.32962609, -3.6132389 ],
[-1.90848392, 0.10128131, -2.81797595],
[-2.05490421, -0.57541197, -1.58133938],
[-0.05140171, -1.75619845, 1.29870195],
[-0.02048065, -2.08495563, 2.61339307],
[-1.02814328, -2.27640038, 3.26779866],
[ 1.32913247, -2.27627206, 3.23718665],
[ 1.39188403, -3.28135647, 4.20405182],
[ 2.55142361, -3.57714301, 4.86588481],
[ 3.69004600, -2.87488273, 4.6081055 ],
[ 3.66640049, -1.85872590, 3.68219674],
[ 2.50413258, -1.57167947, 2.99828407],
[ 2.57998011, -0.25813695, 1.85536291],
[ 0.01862539, 1.84408465, -0.51985822],
[-0.06322446, 3.08119149, -1.05782811],
[ 0.50335840, 3.39619442, -2.09912528],
[-0.93447067, 4.06888913, -0.35196746],
[-1.91485002, 4.70053843, -1.12064886],
[-2.71788802, 5.64575569, -0.54703666],
[-2.55145998, 5.99309950, 0.76811263],
[-1.59017894, 5.42377994, 1.53293698],
[-0.77349930, 4.45245293, 0.95626051],
[ 0.52282048, 3.81797397, 1.91420694],
[ 3.72278594, -4.00725445, -2.96593944],
[ 1.54648930, -4.59649704, -2.45399439],
[ 0.07682973, -3.06919038, -1.61225759],
[ 4.40941526, -1.85455891, -2.72715538],
[ 3.93468611, 0.49322605, -2.03843703],
[ 2.50140425, 2.05508514, -1.24004422],
[-2.61330209, -2.36028263, 1.64664034],
[-4.37125504, -1.93086070, 0.27844672],
[-5.30252447, -1.00549091, -1.81425621],
[-4.99604225, 0.07369089, -3.79595391],
[-2.88613494, 0.77496485, -4.4226979 ],
[-1.06724932, 0.39216806, -3.0909586 ],
[ 0.70434526, -1.62389193, 0.92485864],
[ 0.62249541, -3.76345476, 4.40295656],
[ 2.56324636, -4.26135813, 5.49534309],
[ 4.47853291, -3.08125872, 5.05488365],
[ 4.43578910, -1.36373956, 3.51750179],
[-0.50521367, 1.64703600, 0.10159614],
[-2.02034538, 4.47929608, -2.01835861],
[-3.38269071, 6.05574511, -1.05183405],
[-3.11167675, 6.63358995, 1.14172011],
[-1.47740803, 5.67620528, 2.4211357 ]
]),
"volume": None,
"pubchem id": None,
}
molecules.append(molecule)
molecule = {
"name": "xxv",
"elements": ['O', 'H', 'O', 'O', 'O', 'O', 'O', 'N', 'N', 'C', 'C', 'C', 'H', 'C', 'C', 'H', 'C', 'C', 'H', 'N', 'N', 'C', 'C', 'H', 'C', 'H', 'C', 'C', 'H', 'C', 'C', 'H', 'H', 'C', 'H', 'H', 'C', 'C', 'H', 'C', 'C', 'H', 'C', 'H', 'C', 'C', 'H', 'H', 'C', 'H', 'H', 'H', 'C', 'H', 'H', 'H'],
"xyz": np.array(
[
[ 0.109856, 2.583241, 3.821450],
[ 0.868664, 3.006013, 3.759431],
[ 0.441683, 1.937362, 1.737301],
[-4.137107, 1.865107, 5.939288],
[-5.729484, 0.890497, 4.905636],
[-4.813431, -1.371183, 0.773325],
[-2.969797, -1.063412, -0.208262],
[-4.585626, 1.285967, 4.986797],
[-3.721930, -0.864022, 0.696758],
[-0.236548, 2.001343, 2.718888],
[-1.612853, 1.397382, 2.765593],
[-2.031292, 0.603409, 1.712034],
[-1.476709, 0.449523, 0.981587],
[-3.291929, 0.044125, 1.771756],
[-4.155857, 0.261440, 2.815362],
[-5.009223, -0.107555, 2.826081],
[-3.700827, 1.047140, 3.837529],
[-2.445325, 1.616352, 3.842123],
[-2.166030, 2.140061, 4.558023],
[ 2.309524, 3.948632, 3.675973],
[ 4.084307, 4.909178, 5.015892],
[ 4.985582, 4.706479, 3.921752],
[ 6.248705, 5.279551, 3.977646],
[ 6.494420, 5.785885, 4.718813],
[ 7.141056, 5.104706, 2.943228],
[ 7.977095, 5.510104, 2.992997],
[ 6.825061, 4.343827, 1.832244],
[ 5.561876, 3.762756, 1.790898],
[ 5.330553, 3.237669, 1.059685],
[ 4.634979, 3.945599, 2.810768],
[ 3.238411, 3.354600, 2.693621],
[ 2.899014, 3.510692, 1.797789],
[ 3.283203, 2.396537, 2.834504],
[ 3.498132, 6.251128, 5.025080],
[ 4.195172, 6.902798, 4.855867],
[ 3.129199, 6.431213, 5.904833],
[ 2.412692, 6.409702, 3.993725],
[ 1.908136, 7.665609, 3.676739],
[ 2.251805, 8.411321, 4.114701],
[ 0.910951, 7.847900, 2.731138],
[ 0.443740, 6.728229, 2.068835],
[-0.208340, 6.828338, 1.413423],
[ 0.919690, 5.471495, 2.357492],
[ 0.588580, 4.732402, 1.898857],
[ 1.894045, 5.302441, 3.334485],
[ 2.995113, 3.950011, 4.994453],
[ 2.356103, 4.172567, 5.689680],
[ 3.342599, 3.063927, 5.178214],
[ 7.798674, 4.183322, 0.696758],
[ 7.734765, 4.941996, 0.111022],
[ 7.592569, 3.383833, 0.209027],
[ 8.690665, 4.122926, 1.046668],
[ 0.345940, 9.213843, 2.455498],
[-0.525730, 9.285546, 2.852114],
[ 0.278621, 9.346218, 1.506835],
[ 0.924298, 9.881233, 2.831441],
]),
"volume": None,
"pubchem id": None,
}
molecules.append(molecule)
molecule = {
"name": "BIPHEN",
"elements": ['C']*12 + ['H']*10,
"xyz": np.array(
[
[ 3.522287, 0.022171, 0.016457],
[ 2.842472, 1.196407, 0.046472],
[ 1.414764, 1.185531, 0.030851],
[ 0.738955, -0.016045, -0.014385],
[ 1.486857, -1.160752, -0.046285],
[ 2.876353, -1.189890, -0.034198],
[-0.730003, 0.001127, -0.011393],
[-1.433617, -1.183684, 0.030781],
[-2.825556, -1.174273, 0.047609],
[-3.562975, -0.014379, 0.015227],
[-2.873925, 1.173271, -0.035666],
[-1.455614, 1.160516, -0.045470],
[ 4.640687, 0.037313, 0.023516],
[ 3.383973, 2.122626, 0.090707],
[ 0.962204, 2.134564, 0.062104],
[ 1.007704, -2.125016, -0.073566],
[ 3.459566, -2.083218, -0.066584],
[-0.890536, -2.122135, 0.054618],
[-3.366681, -2.110257, 0.113713],
[-4.660789, -0.042737, 0.026237],
[-3.424430, 2.086622, -0.058475],
[-0.951057, 2.106547, -0.094314],
]),
"volume": None,
"pubchem id": None,
}
molecules.append(molecule)
molecule = {
"name": "ANULEN",
"elements": ['C']*18 + ['H']*18,
"xyz": np.array(
[
[ -1.869782, -2.195520, 0.409199],
[ -2.270978, -2.442720, 1.719589],
[ -1.782410, -1.740960, 2.843459],
[ -0.868955, -0.711840, 2.762571],
[ -0.260498, -0.039360, 3.801747],
[ 0.704539, 0.996960, 3.598099],
[ 1.176140, 1.380960, 2.354323],
[ 1.995002, 2.440320, 2.049803],
[ -2.346725, -2.824320, -0.732752],
[ 1.869782, 2.195520, -0.409199],
[ 2.270978, 2.442720, -1.719589],
[ 1.782410, 1.740960, -2.843459],
[ 0.868955, 0.711840, -2.762571],
[ 0.260498, 0.039360, -3.801747],
[ -0.704539, -0.996960, -3.598099],
[ -1.176140, -1.380960, -2.354323],
[ -1.995002, -2.440320, -2.049803],
[ 2.346725, 2.824320, 0.732752],
[ -1.188808, -1.334400, 0.295004],
[ -2.871375, -3.206400, 1.874704],
[ -2.109651, -2.049600, 3.711342],
[ -0.645378, -0.374400, 1.855671],
[ -0.465655, -0.259200, 4.748615],
[ 1.036005, 1.536000, 4.415546],
[ 0.937534, 0.806400, 1.655830],
[ 2.260780, 3.004800, 2.826330],
[ -2.915514, -3.595200, -0.570976],
[ 1.188808, 1.334400, -0.295004],
[ 2.871375, 3.206400, -1.874704],
[ 2.109651, 2.049600, -3.711342],
[ 0.645378, 0.374400, -1.855671],
[ 0.465655, 0.259200, -4.748615],
[ -1.036005, -1.536000, -4.415546],
[ -0.937534, -0.806400, -1.655830],
[ -2.260780, -3.004800, -2.826330],
[ 2.915514, 3.595200, 0.570976],
]),
"volume": None,
"pubchem id": None,
}
molecules.append(molecule)
molecule = {
"name": "QUPHEN",
"elements": ['C']*24 + ['H']*18,
"xyz": np.array(
[
[ -0.237192, -0.001402, 0.712733],
[ -2.361455, -0.040280, 7.580801],
[ -2.807282, -1.022591, 6.689173],
[ -2.372134, -1.008061, 5.351374],
[ 0.170869, 1.009744, 1.604895],
[ -0.253537, 1.016588, 2.942338],
[ -1.085084, 0.014137, 3.446418],
[ -1.478465, -1.012493, 2.572608],
[ -1.058264, -1.012549, 1.227860],
[ -1.532259, 0.001515, 4.863509],
[ -1.114307, 1.002507, 5.762086],
[ -1.535804, 0.980235, 7.103627],
[ 0.237192, 0.001402, -0.712733],
[ 2.361455, 0.040280, -7.580801],
[ 2.807282, 1.022591, -6.689173],
[ 2.372134, 1.008061, -5.351374],
[ -0.170869, -1.009744, -1.604895],
[ 0.253537, -1.016588, -2.942338],
[ 1.085084, -0.014137, -3.446418],
[ 1.478465, 1.012493, -2.572608],
[ 1.058264, 1.012549, -1.227860],
[ 1.532259, -0.001515, -4.863509],
[ 1.114307, -1.002507, -5.762086],
[ 1.535804, -0.980235, -7.103627],
[ -2.742286, 0.016830, 8.609809],
[ -3.531354, -1.709367, 7.068525],
[ -2.716147, -1.783980, 4.839454],
[ 0.751149, 1.706562, 1.291828],
[ -0.008889, 1.923669, 3.472789],
[ -2.055778, -1.804176, 2.881221],
[ -1.405779, -1.739100, 0.522077],
[ -0.575572, 1.925913, 5.498732],
[ -1.234116, 1.740222, 7.822240],
[ 2.742286, -0.016830, -8.609809],
[ 3.531354, 1.709367, -7.068525],
[ 2.716147, 1.783980, -4.839454],
[ -0.751149, -1.706562, -1.291828],
[ 0.008889, -1.923669, -3.472789],
[ 2.055778, 1.804176, -2.881221],
[ 1.405779, 1.739100, -0.522077],
[ 0.575572, -1.925913, -5.498732],
[ 1.234116, -1.740222, -7.822240],
]),
"volume": None,
"pubchem id": None,
}
molecules.append(molecule)
molecule = {
"name": "DBPERY",
"elements": ['C']*28 + ['H']*16,
"xyz": np.array(
[
[ -2.63477, 2.22938, 5.17206],
[ -2.19688, 1.31535, 6.15920],
[ -2.00579, 2.22233, 3.89378],
[ -2.47768, 3.15252, 2.93683],
[ -3.50840, 4.04097, 3.20108],
[ -4.10838, 4.03584, 4.44721],
[ -3.70215, 3.15136, 5.46502],
[ -4.32955, 3.16659, 6.75645],
[ -5.37879, 4.04390, 7.15239],
[ -5.94660, 4.01197, 8.42979],
[ -5.48787, 3.09743, 9.36150],
[ -4.46449, 2.22335, 9.00935],
[ -3.88541, 2.25248, 7.72568],
[ -2.85263, 1.36213, 7.41318],
[ -0.50166, 0.38562, 4.59679],
[ -0.93955, 1.29965, 3.60965],
[ -1.13064, 0.39267, 5.87507],
[ -0.65875, -0.53752, 6.83202],
[ 0.37197, -1.42597, 6.56777],
[ 0.97195, -1.42084, 5.32164],
[ 0.56572, -0.53636, 4.30382],
[ 1.19311, -0.55159, 3.01239],
[ 2.24236, -1.42889, 2.61646],
[ 2.81017, -1.39697, 1.33906],
[ 2.35143, -0.48243, 0.40735],
[ 1.32806, 0.39165, 0.75950],
[ 0.74898, 0.36252, 2.04317],
[ -0.28381, 1.25287, 2.35567],
[ -1.08469, -0.59865, 7.82813],
[ 0.70545, -2.12121, 7.33346],
[ 1.77455, -2.13524, 5.16895],
[ 2.64219, -2.16872, 3.30346],
[ 3.60729, -2.08896, 1.08081],
[ 2.78363, -0.44760, -0.58861],
[ 0.97836, 1.10381, 0.01433],
[ -0.58003, 1.93503, 1.56415],
[ -2.05174, 3.21365, 1.94072],
[ -3.84189, 4.73621, 2.43539],
[ -4.91098, 4.75024, 4.59990],
[ -5.77862, 4.78372, 6.46539],
[ -6.74372, 4.70396, 8.68804],
[ -5.92006, 3.06260, 10.35746],
[ -4.11479, 1.51119, 9.75452],
[ -2.55640, 0.67997, 8.20470],
]),
"volume": None,
"pubchem id": None,
}
molecules.append(molecule)
molecule = {
"name": "TBZPER",
"elements": ['C']*34 + ['H']*18,
"xyz": np.array(
[
[ 5.623156, 2.615557, 2.025778],
[ 0.115983, 1.289134, 2.629836],
[ -0.709893, 2.176080, 3.254566],
[ -0.235965, 3.393299, 3.671818],
[ 1.055842, 3.784834, 3.356390],
[ 1.899715, 2.916533, 2.642851],
[ 3.279508, 3.308067, 2.307518],
[ 4.231365, 2.309255, 2.031137],
[ 3.771434, 0.974841, 1.700398],
[ 3.683447, 4.653135, 2.205694],
[ 2.749588, 5.729189, 1.932374],
[ 6.523021, 1.595437, 1.586323],
[ 3.171524, 7.007669, 1.725662],
[ 4.539319, 7.356587, 1.789207],
[ 4.961256, 8.675019, 1.577136],
[ 6.279058, 8.997303, 1.656758],
[ 7.236914, 8.033116, 1.894094],
[ 6.874969, 6.685385, 2.092385],
[ 7.834825, 5.659937, 2.442264],
[ 9.190621, 5.966240, 2.733958],
[ 10.046493, 5.023361, 3.204036],
[ 9.606559, 3.747545, 3.449794],
[ 6.049092, 0.348919, 1.211179],
[ 8.310753, 3.379982, 3.150444],
[ 7.420887, 4.325524, 2.593853],
[ 6.031095, 3.947307, 2.279191],
[ 5.061241, 4.978082, 2.197272],
[ 5.497175, 6.357775, 2.050277],
[ 4.689296, 0.002664, 1.261709],
[ 4.239364, -1.283807, 0.839863],
[ 2.927561, -1.563475, 0.907236],
[ 1.981703, -0.631249, 1.329847],
[ 2.395641, 0.647231, 1.759349],
[ 1.467780, 1.616745, 2.324362],
[ -0.159976, 0.585970, 2.312112],
[ -1.679748, 1.784545, 3.529416],
[ -0.899865, 4.155060, 4.004088],
[ 1.479778, 4.794300, 3.529416],
[ 1.639754, 5.433540, 1.768536],
[ 7.498875, 1.864450, 1.554168],
[ 2.419637, 7.777420, 1.133088],
[ 4.299355, 9.348885, 1.401048],
[ 6.579013, 9.854950, 1.347456],
[ 8.238764, 8.256850, 1.936968],
[ 9.498575, 6.898465, 2.710224],
[ 10.958356, 5.433540, 3.537072],
[ 10.158476, 3.036390, 3.881592],
[ 6.718992, -0.346255, 0.826848],
[ 7.958806, 2.343880, 3.246144],
[ 5.079238, -1.837815, 0.436392],
[ 2.479628, -2.477055, 0.474672],
[ 0.839874, -0.905590, 1.454640],
]),
"volume": None,
"pubchem id": None,
}
molecules.append(molecule)
molecule = {
"name": "TBZPYR",
"elements": ['C']*28 + ['H']*16,
"xyz": np.array(
[
[ 0.648186, 1.775038, 9.169800],
[ 0.680394, 2.972654, 8.479600],
[ 0.728706, 4.223735, 9.150080],
[ 0.607926, 5.485509, 8.400720],
[ 0.088572, 6.565502, 9.130360],
[ -0.370392, 7.741732, 8.459880],
[ -0.954162, 8.811032, 9.189520],
[ -0.269742, 7.795197, 7.099200],
[ 0.402600, 6.800748, 6.369560],
[ 0.938058, 5.624518, 7.000600],
[ 0.644160, 6.961143, 4.969440],
[ 1.372866, 6.073624, 4.279240],
[ 1.932480, 4.972245, 4.930000],
[ 1.743258, 4.747692, 6.251240],
[ 0.648186, 1.775038, 10.550200],
[ 0.680394, 2.972654, 11.240400],
[ 0.728706, 4.223735, 10.569920],
[ 0.607926, 5.485509, 11.319280],
[ 0.088572, 6.565502, 10.589640],
[ -0.370392, 7.741732, 11.260120],
[ -0.954162, 8.811032, 10.530480],
[ -0.269742, 7.795197, 12.620800],
[ 0.402600, 6.800748, 13.350440],
[ 0.938058, 5.624518, 12.719400],
[ 0.644160, 6.961143, 14.750560],
[ 1.372866, 6.073624, 15.440760],
[ 1.932480, 4.972245, 14.790000],
[ 1.743258, 4.747692, 13.468760],
[ 0.628056, 0.908905, 8.676800],
[ 0.668316, 2.961961, 7.493600],
[ -1.368840, 9.570235, 8.696520],
[ -0.688446, 8.554400, 6.606200],
[ 0.261690, 7.763118, 4.496160],
[ 1.513776, 6.191247, 3.293240],
[ 2.492094, 4.330665, 4.397560],
[ 2.174040, 3.956410, 6.685080],
[ 0.628056, 0.908905, 11.043200],
[ 0.668316, 2.961961, 12.226400],
[ -1.368840, 9.570235, 11.023480],
[ -0.688446, 8.554400, 13.113800],
[ 0.261690, 7.763118, 15.223840],
[ 1.513776, 6.191247, 16.426760],
[ 2.492094, 4.330665, 15.322440],
[ 2.174040, 3.956410, 13.034920],
]),
"volume": None,
"pubchem id": None,
}
molecules.append(molecule)
data = {}
data["YICMOP"] = "s1cccc1c1c(F)c(OC)c(c2sccc2)c(F)c1OC"
data["MERQIM"] = "s1c2c(c3c1SCCC3)cc1sc3SCCCc3c1c2"
for name in data.keys():
smi = data[name]
m = Chem.MolFromSmiles(smi)
m2 = Chem.AddHs(m)
AllChem.EmbedMolecule(m2)
cids = AllChem.EmbedMultipleConfs(m2, numConfs=1)
xyz = Chem.rdmolfiles.MolToXYZBlock(m2, 0)
mol = mg.core.Molecule.from_str(xyz, fmt="xyz")
molecule = {
"name": name,
"elements": [site.specie.name for site in mol],
"xyz": mol.cart_coords,
"volume": None,
"pubchem id": None,
}
molecules.append(molecule)
for name in names:
print(name)
mol = pcp.get_compounds(name, "name", record_type="3d")[0]
molecule = read_molecule(mol,name)
molecules.append(molecule)
dicts = {"LEFCIK": 812440,
"OFIXUX": 102393188,
"HAHCOI": 10910901,
"JAPWIH": 11449344,
"WEXBOS": 12232323,
"LAGNAL": 139087974,
"LUFHAW": 102382626,
"PAHYON01": 10006,
"AXOSOW01": 7847,
}
for key in dicts.keys():
mol = pcp.get_compounds(dicts[key], "cid", record_type="3d")[0]
molecule = read_molecule(mol,key)
molecules.append(molecule)
#print(molecules)
dumped = json.dumps(molecules, cls=NumpyEncoder, indent=2)
with open("molecules.json", "w") as f:
f.write(dumped)
| import pubchempy as pcp
import numpy as np
import json
from pyxtal.database.element import Element
from rdkit import Chem
from rdkit.Chem import AllChem
import pymatgen as mg
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def read_molecule(mol, name):
x = np.transpose([mol.record["coords"][0]["conformers"][0]["x"]])
y = np.transpose([mol.record["coords"][0]["conformers"][0]["y"]])
z = np.transpose([mol.record["coords"][0]["conformers"][0]["z"]])
xyz = np.concatenate((x, y, z), axis=1)
numbers = mol.record["atoms"]["element"]
elements = [Element(i).short_name for i in numbers]
volume = mol.volume_3d
pubchemid = mol.cid
molecule = {
"name": name,
"elements": elements,
"xyz": xyz,
"volume": volume,
"pubchem id": pubchemid,
}
return molecule
names = [
"H2O",
"CH4",
"NH3",
"benzene",
"naphthalene",
"anthracene",
"tetracene",
"Pentacene",
"coumarin",
"resorcinol",
"benzamide",
"aspirin",
"ddt",
"lindane",
"Glycine",
"Glucose",
"ROY",
]
molecules = []
molecule = {
"name": "C60",
"elements": ["C"] * 60,
"xyz": np.array(
[
[2.2101953, 0.5866631, 2.6669504],
[3.1076393, 0.1577008, 1.6300286],
[1.3284430, -0.3158939, 3.2363232],
[3.0908709, -1.1585005, 1.2014240],
[3.1879245, -1.4574599, -0.1997005],
[3.2214623, 1.2230966, 0.6739440],
[3.3161210, 0.9351586, -0.6765151],
[3.2984981, -0.4301142, -1.1204138],
[-0.4480842, 1.3591484, 3.2081020],
[0.4672056, 2.2949830, 2.6175264],
[-0.0256575, 0.0764219, 3.5086259],
[1.7727917, 1.9176584, 2.3529691],
[2.3954623, 2.3095689, 1.1189539],
[-0.2610195, 3.0820935, 1.6623117],
[0.3407726, 3.4592388, 0.4745968],
[1.6951171, 3.0692446, 0.1976623],
[-2.1258394, -0.8458853, 2.6700963],
[-2.5620990, 0.4855202, 2.3531715],
[-0.8781521, -1.0461985, 3.2367302],
[-1.7415096, 1.5679963, 2.6197333],
[-1.6262468, 2.6357030, 1.6641811],
[-3.2984810, 0.4301871, 1.1204208],
[-3.1879469, 1.4573895, 0.1996030],
[-2.3360261, 2.5813627, 0.4760912],
[-0.5005210, -2.9797771, 1.7940308],
[-1.7944338, -2.7729087, 1.2047891],
[-0.0514245, -2.1328841, 2.7938830],
[-2.5891471, -1.7225828, 1.6329715],
[-3.3160705, -0.9350636, 0.6765268],
[-1.6951919, -3.0692581, -0.1976564],
[-2.3954901, -2.3096853, -1.1189862],
[-3.2214182, -1.2231835, -0.6739581],
[2.1758234, -2.0946263, 1.7922529],
[1.7118619, -2.9749681, 0.7557198],
[1.3130656, -1.6829416, 2.7943892],
[0.3959024, -3.4051395, 0.7557638],
[-0.3408219, -3.4591883, -0.4745610],
[2.3360057, -2.5814499, -0.4761050],
[1.6263757, -2.6357349, -1.6642309],
[0.2611352, -3.0821271, -1.6622618],
[-2.2100844, -0.5868636, -2.6670300],
[-1.7726970, -1.9178969, -2.3530466],
[-0.4670723, -2.2950509, -2.6175105],
[-1.3283500, 0.3157683, -3.2362375],
[-2.1759882, 2.0945383, -1.7923294],
[-3.0909663, 1.1583472, -1.2015749],
[-3.1076090, -0.1578453, -1.6301627],
[-1.3131365, 1.6828292, -2.7943639],
[0.5003224, 2.9799637, -1.7940203],
[-0.3961148, 3.4052817, -0.7557272],
[-1.7120629, 2.9749122, -0.7557988],
[0.0512824, 2.1329478, -2.7937450],
[2.1258630, 0.8460809, -2.6700534],
[2.5891853, 1.7227742, -1.6329562],
[1.7943010, 2.7730684, -1.2048262],
[0.8781323, 1.0463514, -3.2365313],
[0.4482452, -1.3591061, -3.2080510],
[1.7416948, -1.5679557, -2.6197714],
[2.5621724, -0.4853529, -2.3532026],
[0.0257904, -0.0763567, -3.5084446],
]
),
"volume": None,
"pubchem id": 123591,
}
molecules.append(molecule)
molecule = {
"name": "Glycine-z",
"elements": ["H", "N", "H", "C", "H", "H", "H", "C", "O", "O"],
"xyz": np.array(
[
[3.090064, 3.564361, -0.325567],
[2.538732, 3.591476, -1.036692],
[2.097666, 2.810077, -1.104272],
[1.560226, 4.699895, -0.864107],
[3.019736, 3.730336, -1.784084],
[0.843929, 4.596366, -1.524923],
[1.157363, 4.630876, 0.026367],
[2.190568, 6.104112, -1.022811],
[1.309305, 6.980823, -0.972406],
[3.437359, 6.189565, -1.153186],
]
),
"volume": None,
"pubchem id": None,
}
molecules.append(molecule)
molecule = {
"name": "xxvi",
"elements": ['C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'N', 'C', 'O', 'C', 'C', 'C', 'C', 'C', 'C', 'Cl', 'N', 'C', 'O', 'C', 'C', 'C', 'C', 'C', 'C', 'Cl', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'],
"xyz": np.array(
[
[ 3.13073867, -3.36491150, -2.64721385],
[ 1.82477880, -3.71896813, -2.32546087],
[ 0.94261928, -2.80596909, -1.82568763],
[ 1.33731746, -1.45528852, -1.62963296],
[ 2.66874173, -1.09519586, -1.98228771],
[ 3.53907849, -2.08759686, -2.49307291],
[ 3.06616823, 0.24489274, -1.81773666],
[ 2.21038255, 1.17722323, -1.34625977],
[ 0.87895828, 0.83753665, -1.01123254],
[ 0.44151629, -0.46570153, -1.12746036],
[-0.94993120, -0.85053187, -0.72466066],
[-1.16819747, -1.49926227, 0.47582205],
[-2.47506679, -1.91279650, 0.84257981],
[-3.52001657, -1.65697309, 0.0221523 ],
[-3.35904520, -0.99094614, -1.2040898 ],
[-4.44946712, -0.73078858, -2.06144647],
[-4.26939744, -0.08842722, -3.23777638],
[-2.99799640, 0.32962609, -3.6132389 ],
[-1.90848392, 0.10128131, -2.81797595],
[-2.05490421, -0.57541197, -1.58133938],
[-0.05140171, -1.75619845, 1.29870195],
[-0.02048065, -2.08495563, 2.61339307],
[-1.02814328, -2.27640038, 3.26779866],
[ 1.32913247, -2.27627206, 3.23718665],
[ 1.39188403, -3.28135647, 4.20405182],
[ 2.55142361, -3.57714301, 4.86588481],
[ 3.69004600, -2.87488273, 4.6081055 ],
[ 3.66640049, -1.85872590, 3.68219674],
[ 2.50413258, -1.57167947, 2.99828407],
[ 2.57998011, -0.25813695, 1.85536291],
[ 0.01862539, 1.84408465, -0.51985822],
[-0.06322446, 3.08119149, -1.05782811],
[ 0.50335840, 3.39619442, -2.09912528],
[-0.93447067, 4.06888913, -0.35196746],
[-1.91485002, 4.70053843, -1.12064886],
[-2.71788802, 5.64575569, -0.54703666],
[-2.55145998, 5.99309950, 0.76811263],
[-1.59017894, 5.42377994, 1.53293698],
[-0.77349930, 4.45245293, 0.95626051],
[ 0.52282048, 3.81797397, 1.91420694],
[ 3.72278594, -4.00725445, -2.96593944],
[ 1.54648930, -4.59649704, -2.45399439],
[ 0.07682973, -3.06919038, -1.61225759],
[ 4.40941526, -1.85455891, -2.72715538],
[ 3.93468611, 0.49322605, -2.03843703],
[ 2.50140425, 2.05508514, -1.24004422],
[-2.61330209, -2.36028263, 1.64664034],
[-4.37125504, -1.93086070, 0.27844672],
[-5.30252447, -1.00549091, -1.81425621],
[-4.99604225, 0.07369089, -3.79595391],
[-2.88613494, 0.77496485, -4.4226979 ],
[-1.06724932, 0.39216806, -3.0909586 ],
[ 0.70434526, -1.62389193, 0.92485864],
[ 0.62249541, -3.76345476, 4.40295656],
[ 2.56324636, -4.26135813, 5.49534309],
[ 4.47853291, -3.08125872, 5.05488365],
[ 4.43578910, -1.36373956, 3.51750179],
[-0.50521367, 1.64703600, 0.10159614],
[-2.02034538, 4.47929608, -2.01835861],
[-3.38269071, 6.05574511, -1.05183405],
[-3.11167675, 6.63358995, 1.14172011],
[-1.47740803, 5.67620528, 2.4211357 ]
]),
"volume": None,
"pubchem id": None,
}
molecules.append(molecule)
molecule = {
"name": "xxv",
"elements": ['O', 'H', 'O', 'O', 'O', 'O', 'O', 'N', 'N', 'C', 'C', 'C', 'H', 'C', 'C', 'H', 'C', 'C', 'H', 'N', 'N', 'C', 'C', 'H', 'C', 'H', 'C', 'C', 'H', 'C', 'C', 'H', 'H', 'C', 'H', 'H', 'C', 'C', 'H', 'C', 'C', 'H', 'C', 'H', 'C', 'C', 'H', 'H', 'C', 'H', 'H', 'H', 'C', 'H', 'H', 'H'],
"xyz": np.array(
[
[ 0.109856, 2.583241, 3.821450],
[ 0.868664, 3.006013, 3.759431],
[ 0.441683, 1.937362, 1.737301],
[-4.137107, 1.865107, 5.939288],
[-5.729484, 0.890497, 4.905636],
[-4.813431, -1.371183, 0.773325],
[-2.969797, -1.063412, -0.208262],
[-4.585626, 1.285967, 4.986797],
[-3.721930, -0.864022, 0.696758],
[-0.236548, 2.001343, 2.718888],
[-1.612853, 1.397382, 2.765593],
[-2.031292, 0.603409, 1.712034],
[-1.476709, 0.449523, 0.981587],
[-3.291929, 0.044125, 1.771756],
[-4.155857, 0.261440, 2.815362],
[-5.009223, -0.107555, 2.826081],
[-3.700827, 1.047140, 3.837529],
[-2.445325, 1.616352, 3.842123],
[-2.166030, 2.140061, 4.558023],
[ 2.309524, 3.948632, 3.675973],
[ 4.084307, 4.909178, 5.015892],
[ 4.985582, 4.706479, 3.921752],
[ 6.248705, 5.279551, 3.977646],
[ 6.494420, 5.785885, 4.718813],
[ 7.141056, 5.104706, 2.943228],
[ 7.977095, 5.510104, 2.992997],
[ 6.825061, 4.343827, 1.832244],
[ 5.561876, 3.762756, 1.790898],
[ 5.330553, 3.237669, 1.059685],
[ 4.634979, 3.945599, 2.810768],
[ 3.238411, 3.354600, 2.693621],
[ 2.899014, 3.510692, 1.797789],
[ 3.283203, 2.396537, 2.834504],
[ 3.498132, 6.251128, 5.025080],
[ 4.195172, 6.902798, 4.855867],
[ 3.129199, 6.431213, 5.904833],
[ 2.412692, 6.409702, 3.993725],
[ 1.908136, 7.665609, 3.676739],
[ 2.251805, 8.411321, 4.114701],
[ 0.910951, 7.847900, 2.731138],
[ 0.443740, 6.728229, 2.068835],
[-0.208340, 6.828338, 1.413423],
[ 0.919690, 5.471495, 2.357492],
[ 0.588580, 4.732402, 1.898857],
[ 1.894045, 5.302441, 3.334485],
[ 2.995113, 3.950011, 4.994453],
[ 2.356103, 4.172567, 5.689680],
[ 3.342599, 3.063927, 5.178214],
[ 7.798674, 4.183322, 0.696758],
[ 7.734765, 4.941996, 0.111022],
[ 7.592569, 3.383833, 0.209027],
[ 8.690665, 4.122926, 1.046668],
[ 0.345940, 9.213843, 2.455498],
[-0.525730, 9.285546, 2.852114],
[ 0.278621, 9.346218, 1.506835],
[ 0.924298, 9.881233, 2.831441],
]),
"volume": None,
"pubchem id": None,
}
molecules.append(molecule)
molecule = {
"name": "BIPHEN",
"elements": ['C']*12 + ['H']*10,
"xyz": np.array(
[
[ 3.522287, 0.022171, 0.016457],
[ 2.842472, 1.196407, 0.046472],
[ 1.414764, 1.185531, 0.030851],
[ 0.738955, -0.016045, -0.014385],
[ 1.486857, -1.160752, -0.046285],
[ 2.876353, -1.189890, -0.034198],
[-0.730003, 0.001127, -0.011393],
[-1.433617, -1.183684, 0.030781],
[-2.825556, -1.174273, 0.047609],
[-3.562975, -0.014379, 0.015227],
[-2.873925, 1.173271, -0.035666],
[-1.455614, 1.160516, -0.045470],
[ 4.640687, 0.037313, 0.023516],
[ 3.383973, 2.122626, 0.090707],
[ 0.962204, 2.134564, 0.062104],
[ 1.007704, -2.125016, -0.073566],
[ 3.459566, -2.083218, -0.066584],
[-0.890536, -2.122135, 0.054618],
[-3.366681, -2.110257, 0.113713],
[-4.660789, -0.042737, 0.026237],
[-3.424430, 2.086622, -0.058475],
[-0.951057, 2.106547, -0.094314],
]),
"volume": None,
"pubchem id": None,
}
molecules.append(molecule)
molecule = {
"name": "ANULEN",
"elements": ['C']*18 + ['H']*18,
"xyz": np.array(
[
[ -1.869782, -2.195520, 0.409199],
[ -2.270978, -2.442720, 1.719589],
[ -1.782410, -1.740960, 2.843459],
[ -0.868955, -0.711840, 2.762571],
[ -0.260498, -0.039360, 3.801747],
[ 0.704539, 0.996960, 3.598099],
[ 1.176140, 1.380960, 2.354323],
[ 1.995002, 2.440320, 2.049803],
[ -2.346725, -2.824320, -0.732752],
[ 1.869782, 2.195520, -0.409199],
[ 2.270978, 2.442720, -1.719589],
[ 1.782410, 1.740960, -2.843459],
[ 0.868955, 0.711840, -2.762571],
[ 0.260498, 0.039360, -3.801747],
[ -0.704539, -0.996960, -3.598099],
[ -1.176140, -1.380960, -2.354323],
[ -1.995002, -2.440320, -2.049803],
[ 2.346725, 2.824320, 0.732752],
[ -1.188808, -1.334400, 0.295004],
[ -2.871375, -3.206400, 1.874704],
[ -2.109651, -2.049600, 3.711342],
[ -0.645378, -0.374400, 1.855671],
[ -0.465655, -0.259200, 4.748615],
[ 1.036005, 1.536000, 4.415546],
[ 0.937534, 0.806400, 1.655830],
[ 2.260780, 3.004800, 2.826330],
[ -2.915514, -3.595200, -0.570976],
[ 1.188808, 1.334400, -0.295004],
[ 2.871375, 3.206400, -1.874704],
[ 2.109651, 2.049600, -3.711342],
[ 0.645378, 0.374400, -1.855671],
[ 0.465655, 0.259200, -4.748615],
[ -1.036005, -1.536000, -4.415546],
[ -0.937534, -0.806400, -1.655830],
[ -2.260780, -3.004800, -2.826330],
[ 2.915514, 3.595200, 0.570976],
]),
"volume": None,
"pubchem id": None,
}
molecules.append(molecule)
molecule = {
"name": "QUPHEN",
"elements": ['C']*24 + ['H']*18,
"xyz": np.array(
[
[ -0.237192, -0.001402, 0.712733],
[ -2.361455, -0.040280, 7.580801],
[ -2.807282, -1.022591, 6.689173],
[ -2.372134, -1.008061, 5.351374],
[ 0.170869, 1.009744, 1.604895],
[ -0.253537, 1.016588, 2.942338],
[ -1.085084, 0.014137, 3.446418],
[ -1.478465, -1.012493, 2.572608],
[ -1.058264, -1.012549, 1.227860],
[ -1.532259, 0.001515, 4.863509],
[ -1.114307, 1.002507, 5.762086],
[ -1.535804, 0.980235, 7.103627],
[ 0.237192, 0.001402, -0.712733],
[ 2.361455, 0.040280, -7.580801],
[ 2.807282, 1.022591, -6.689173],
[ 2.372134, 1.008061, -5.351374],
[ -0.170869, -1.009744, -1.604895],
[ 0.253537, -1.016588, -2.942338],
[ 1.085084, -0.014137, -3.446418],
[ 1.478465, 1.012493, -2.572608],
[ 1.058264, 1.012549, -1.227860],
[ 1.532259, -0.001515, -4.863509],
[ 1.114307, -1.002507, -5.762086],
[ 1.535804, -0.980235, -7.103627],
[ -2.742286, 0.016830, 8.609809],
[ -3.531354, -1.709367, 7.068525],
[ -2.716147, -1.783980, 4.839454],
[ 0.751149, 1.706562, 1.291828],
[ -0.008889, 1.923669, 3.472789],
[ -2.055778, -1.804176, 2.881221],
[ -1.405779, -1.739100, 0.522077],
[ -0.575572, 1.925913, 5.498732],
[ -1.234116, 1.740222, 7.822240],
[ 2.742286, -0.016830, -8.609809],
[ 3.531354, 1.709367, -7.068525],
[ 2.716147, 1.783980, -4.839454],
[ -0.751149, -1.706562, -1.291828],
[ 0.008889, -1.923669, -3.472789],
[ 2.055778, 1.804176, -2.881221],
[ 1.405779, 1.739100, -0.522077],
[ 0.575572, -1.925913, -5.498732],
[ 1.234116, -1.740222, -7.822240],
]),
"volume": None,
"pubchem id": None,
}
molecules.append(molecule)
molecule = {
"name": "DBPERY",
"elements": ['C']*28 + ['H']*16,
"xyz": np.array(
[
[ -2.63477, 2.22938, 5.17206],
[ -2.19688, 1.31535, 6.15920],
[ -2.00579, 2.22233, 3.89378],
[ -2.47768, 3.15252, 2.93683],
[ -3.50840, 4.04097, 3.20108],
[ -4.10838, 4.03584, 4.44721],
[ -3.70215, 3.15136, 5.46502],
[ -4.32955, 3.16659, 6.75645],
[ -5.37879, 4.04390, 7.15239],
[ -5.94660, 4.01197, 8.42979],
[ -5.48787, 3.09743, 9.36150],
[ -4.46449, 2.22335, 9.00935],
[ -3.88541, 2.25248, 7.72568],
[ -2.85263, 1.36213, 7.41318],
[ -0.50166, 0.38562, 4.59679],
[ -0.93955, 1.29965, 3.60965],
[ -1.13064, 0.39267, 5.87507],
[ -0.65875, -0.53752, 6.83202],
[ 0.37197, -1.42597, 6.56777],
[ 0.97195, -1.42084, 5.32164],
[ 0.56572, -0.53636, 4.30382],
[ 1.19311, -0.55159, 3.01239],
[ 2.24236, -1.42889, 2.61646],
[ 2.81017, -1.39697, 1.33906],
[ 2.35143, -0.48243, 0.40735],
[ 1.32806, 0.39165, 0.75950],
[ 0.74898, 0.36252, 2.04317],
[ -0.28381, 1.25287, 2.35567],
[ -1.08469, -0.59865, 7.82813],
[ 0.70545, -2.12121, 7.33346],
[ 1.77455, -2.13524, 5.16895],
[ 2.64219, -2.16872, 3.30346],
[ 3.60729, -2.08896, 1.08081],
[ 2.78363, -0.44760, -0.58861],
[ 0.97836, 1.10381, 0.01433],
[ -0.58003, 1.93503, 1.56415],
[ -2.05174, 3.21365, 1.94072],
[ -3.84189, 4.73621, 2.43539],
[ -4.91098, 4.75024, 4.59990],
[ -5.77862, 4.78372, 6.46539],
[ -6.74372, 4.70396, 8.68804],
[ -5.92006, 3.06260, 10.35746],
[ -4.11479, 1.51119, 9.75452],
[ -2.55640, 0.67997, 8.20470],
]),
"volume": None,
"pubchem id": None,
}
molecules.append(molecule)
molecule = {
"name": "TBZPER",
"elements": ['C']*34 + ['H']*18,
"xyz": np.array(
[
[ 5.623156, 2.615557, 2.025778],
[ 0.115983, 1.289134, 2.629836],
[ -0.709893, 2.176080, 3.254566],
[ -0.235965, 3.393299, 3.671818],
[ 1.055842, 3.784834, 3.356390],
[ 1.899715, 2.916533, 2.642851],
[ 3.279508, 3.308067, 2.307518],
[ 4.231365, 2.309255, 2.031137],
[ 3.771434, 0.974841, 1.700398],
[ 3.683447, 4.653135, 2.205694],
[ 2.749588, 5.729189, 1.932374],
[ 6.523021, 1.595437, 1.586323],
[ 3.171524, 7.007669, 1.725662],
[ 4.539319, 7.356587, 1.789207],
[ 4.961256, 8.675019, 1.577136],
[ 6.279058, 8.997303, 1.656758],
[ 7.236914, 8.033116, 1.894094],
[ 6.874969, 6.685385, 2.092385],
[ 7.834825, 5.659937, 2.442264],
[ 9.190621, 5.966240, 2.733958],
[ 10.046493, 5.023361, 3.204036],
[ 9.606559, 3.747545, 3.449794],
[ 6.049092, 0.348919, 1.211179],
[ 8.310753, 3.379982, 3.150444],
[ 7.420887, 4.325524, 2.593853],
[ 6.031095, 3.947307, 2.279191],
[ 5.061241, 4.978082, 2.197272],
[ 5.497175, 6.357775, 2.050277],
[ 4.689296, 0.002664, 1.261709],
[ 4.239364, -1.283807, 0.839863],
[ 2.927561, -1.563475, 0.907236],
[ 1.981703, -0.631249, 1.329847],
[ 2.395641, 0.647231, 1.759349],
[ 1.467780, 1.616745, 2.324362],
[ -0.159976, 0.585970, 2.312112],
[ -1.679748, 1.784545, 3.529416],
[ -0.899865, 4.155060, 4.004088],
[ 1.479778, 4.794300, 3.529416],
[ 1.639754, 5.433540, 1.768536],
[ 7.498875, 1.864450, 1.554168],
[ 2.419637, 7.777420, 1.133088],
[ 4.299355, 9.348885, 1.401048],
[ 6.579013, 9.854950, 1.347456],
[ 8.238764, 8.256850, 1.936968],
[ 9.498575, 6.898465, 2.710224],
[ 10.958356, 5.433540, 3.537072],
[ 10.158476, 3.036390, 3.881592],
[ 6.718992, -0.346255, 0.826848],
[ 7.958806, 2.343880, 3.246144],
[ 5.079238, -1.837815, 0.436392],
[ 2.479628, -2.477055, 0.474672],
[ 0.839874, -0.905590, 1.454640],
]),
"volume": None,
"pubchem id": None,
}
molecules.append(molecule)
molecule = {
"name": "TBZPYR",
"elements": ['C']*28 + ['H']*16,
"xyz": np.array(
[
[ 0.648186, 1.775038, 9.169800],
[ 0.680394, 2.972654, 8.479600],
[ 0.728706, 4.223735, 9.150080],
[ 0.607926, 5.485509, 8.400720],
[ 0.088572, 6.565502, 9.130360],
[ -0.370392, 7.741732, 8.459880],
[ -0.954162, 8.811032, 9.189520],
[ -0.269742, 7.795197, 7.099200],
[ 0.402600, 6.800748, 6.369560],
[ 0.938058, 5.624518, 7.000600],
[ 0.644160, 6.961143, 4.969440],
[ 1.372866, 6.073624, 4.279240],
[ 1.932480, 4.972245, 4.930000],
[ 1.743258, 4.747692, 6.251240],
[ 0.648186, 1.775038, 10.550200],
[ 0.680394, 2.972654, 11.240400],
[ 0.728706, 4.223735, 10.569920],
[ 0.607926, 5.485509, 11.319280],
[ 0.088572, 6.565502, 10.589640],
[ -0.370392, 7.741732, 11.260120],
[ -0.954162, 8.811032, 10.530480],
[ -0.269742, 7.795197, 12.620800],
[ 0.402600, 6.800748, 13.350440],
[ 0.938058, 5.624518, 12.719400],
[ 0.644160, 6.961143, 14.750560],
[ 1.372866, 6.073624, 15.440760],
[ 1.932480, 4.972245, 14.790000],
[ 1.743258, 4.747692, 13.468760],
[ 0.628056, 0.908905, 8.676800],
[ 0.668316, 2.961961, 7.493600],
[ -1.368840, 9.570235, 8.696520],
[ -0.688446, 8.554400, 6.606200],
[ 0.261690, 7.763118, 4.496160],
[ 1.513776, 6.191247, 3.293240],
[ 2.492094, 4.330665, 4.397560],
[ 2.174040, 3.956410, 6.685080],
[ 0.628056, 0.908905, 11.043200],
[ 0.668316, 2.961961, 12.226400],
[ -1.368840, 9.570235, 11.023480],
[ -0.688446, 8.554400, 13.113800],
[ 0.261690, 7.763118, 15.223840],
[ 1.513776, 6.191247, 16.426760],
[ 2.492094, 4.330665, 15.322440],
[ 2.174040, 3.956410, 13.034920],
]),
"volume": None,
"pubchem id": None,
}
molecules.append(molecule)
data = {}
data["YICMOP"] = "s1cccc1c1c(F)c(OC)c(c2sccc2)c(F)c1OC"
data["MERQIM"] = "s1c2c(c3c1SCCC3)cc1sc3SCCCc3c1c2"
for name in data.keys():
smi = data[name]
m = Chem.MolFromSmiles(smi)
m2 = Chem.AddHs(m)
AllChem.EmbedMolecule(m2)
cids = AllChem.EmbedMultipleConfs(m2, numConfs=1)
xyz = Chem.rdmolfiles.MolToXYZBlock(m2, 0)
mol = mg.core.Molecule.from_str(xyz, fmt="xyz")
molecule = {
"name": name,
"elements": [site.specie.name for site in mol],
"xyz": mol.cart_coords,
"volume": None,
"pubchem id": None,
}
molecules.append(molecule)
for name in names:
print(name)
mol = pcp.get_compounds(name, "name", record_type="3d")[0]
molecule = read_molecule(mol,name)
molecules.append(molecule)
dicts = {"LEFCIK": 812440,
"OFIXUX": 102393188,
"HAHCOI": 10910901,
"JAPWIH": 11449344,
"WEXBOS": 12232323,
"LAGNAL": 139087974,
"LUFHAW": 102382626,
"PAHYON01": 10006,
"AXOSOW01": 7847,
}
for key in dicts.keys():
mol = pcp.get_compounds(dicts[key], "cid", record_type="3d")[0]
molecule = read_molecule(mol,key)
molecules.append(molecule)
#print(molecules)
dumped = json.dumps(molecules, cls=NumpyEncoder, indent=2)
with open("molecules.json", "w") as f:
f.write(dumped) | bn | 0.200532 | #print(molecules) | 2.399235 | 2 |
plugins/__init__.py | CMSC35100-JET/FRESH | 30 | 6619366 | from plugins.rationale_extractor import RationalePredict
from plugins.saliency_scorer import SaliencyPredict | from plugins.rationale_extractor import RationalePredict
from plugins.saliency_scorer import SaliencyPredict | none | 1 | 1.113311 | 1 | |
hard-gists/347596/snippet.py | jjhenkel/dockerizeme | 21 | 6619367 | <reponame>jjhenkel/dockerizeme<gh_stars>10-100
import time
from django.utils.http import http_date
AJAX_NEGATIVE_CHECK_EXPIRES = 60 # object is still available
AJAX_POSITIVE_CHECK_EXPIRES = 60*10 # if object is not available (or taken)
def check_ajax(request):
# do stuff here
timeout = AJAX_NEGATIVE_CHECK_EXPIRES if avail else AJAX_POSITIVE_CHECK_EXPIRES
response = HttpResponse(json_result, mimetype='application/json')
response['Expires'] = http_date(time.time() + timeout)
return response
| import time
from django.utils.http import http_date
AJAX_NEGATIVE_CHECK_EXPIRES = 60 # object is still available
AJAX_POSITIVE_CHECK_EXPIRES = 60*10 # if object is not available (or taken)
def check_ajax(request):
# do stuff here
timeout = AJAX_NEGATIVE_CHECK_EXPIRES if avail else AJAX_POSITIVE_CHECK_EXPIRES
response = HttpResponse(json_result, mimetype='application/json')
response['Expires'] = http_date(time.time() + timeout)
return response | en | 0.839338 | # object is still available # if object is not available (or taken) # do stuff here | 2.425373 | 2 |
tests/test_templates.py | tricoder42/django-forme | 5 | 6619368 | <gh_stars>1-10
# coding: utf-8
from __future__ import unicode_literals
import glob
import os
import os.path
import sys
import timeit
import pytest
from bs4 import BeautifulSoup
from django import template
test_dir = os.path.join(os.path.dirname(__file__), 'test_templates')
sys.path.insert(0, test_dir)
def get_cases():
return [dir_ for dir_ in os.listdir(test_dir)
if os.path.isdir(os.path.join(test_dir, dir_))]
def get_templates(case):
template_dir = os.path.join(test_dir, case, 'templates')
return glob.glob('{0}/test_*.html'.format(template_dir))
def pytest_generate_tests(metafunc):
args, ids = [], []
for case in get_cases():
for template_name in get_templates(case):
args.append((case, template_name))
ids.append('/'.join([case, os.path.basename(template_name)]))
metafunc.parametrize('case,template_name', args, ids=ids)
class TestTemplates:
def load_context(self, case):
temp_ = __import__('{0}.context'.format(case),
fromlist=['skip', 'context'], level=0)
ctx = template.Context(temp_.context)
skip = getattr(temp_, 'skip', False)
if skip:
pytest.skip()
return ctx
def load_template(self, template_name):
with open(template_name, 'r') as file_:
return template.Template(file_.read())
def test_template(self, case, template_name):
"""
Render template blocks "template" and "expected" and compare them.
"""
ctx = self.load_context(case)
tmpl = self.load_template(template_name)
from django.template.loader_tags import BlockNode
nodes = tmpl.nodelist.get_nodes_by_type(BlockNode)
params = dict([(node.name, node.nodelist.render(ctx))
for node in nodes])
if 'skip' in params:
raise pytest.skip(params['skip'])
template = BeautifulSoup(params['template']).findAll()
expected = BeautifulSoup(params['expected']).findAll()
for given, should_be in zip(template, expected):
assert given.tag == should_be.tag
assert given.attrib == should_be.attrib
assert given.text == should_be.text
@pytest.mark.profiling
def test_profiling(self, case, template_name):
ctx = self.load_context(case)
n = 1000
django_parse = lambda: template.Template('{{ form }}')
forme_parse = lambda: self.load_template(template_name)
times_parse = {
'django': timeit.Timer(django_parse).timeit(n),
'forme': timeit.Timer(forme_parse).timeit(n),
}
django_tmpl = django_parse()
tmpl = forme_parse()
from django.template.loader_tags import BlockNode
nodes = tmpl.nodelist.get_nodes_by_type(BlockNode)
params = dict([(node.name, node.nodelist)
for node in nodes])
forme_tmpl = params['template']
django_render = lambda: django_tmpl.render(ctx)
forme_render = lambda: forme_tmpl.render(ctx)
times_render = {
'django': timeit.Timer(django_render).timeit(n),
'forme': timeit.Timer(forme_render).timeit(n),
}
slower = lambda d: d['forme'] / d['django']
print('-' * 40)
print('Template: {0}/{1}'.format(case, os.path.basename(template_name)))
print('--- Parsing (Slower {0:.1f}x)'.format(slower(times_parse)))
for key, value in times_parse.items():
print('{0:^8} {1:.3f} ms'.format(key, value))
print('--- Rendering (Slower {0:.1f}x)'.format(slower(times_render)))
for key, value in times_render.items():
print('{0:^8} {1:.3f} ms'.format(key, value))
| # coding: utf-8
from __future__ import unicode_literals
import glob
import os
import os.path
import sys
import timeit
import pytest
from bs4 import BeautifulSoup
from django import template
test_dir = os.path.join(os.path.dirname(__file__), 'test_templates')
sys.path.insert(0, test_dir)
def get_cases():
return [dir_ for dir_ in os.listdir(test_dir)
if os.path.isdir(os.path.join(test_dir, dir_))]
def get_templates(case):
template_dir = os.path.join(test_dir, case, 'templates')
return glob.glob('{0}/test_*.html'.format(template_dir))
def pytest_generate_tests(metafunc):
args, ids = [], []
for case in get_cases():
for template_name in get_templates(case):
args.append((case, template_name))
ids.append('/'.join([case, os.path.basename(template_name)]))
metafunc.parametrize('case,template_name', args, ids=ids)
class TestTemplates:
def load_context(self, case):
temp_ = __import__('{0}.context'.format(case),
fromlist=['skip', 'context'], level=0)
ctx = template.Context(temp_.context)
skip = getattr(temp_, 'skip', False)
if skip:
pytest.skip()
return ctx
def load_template(self, template_name):
with open(template_name, 'r') as file_:
return template.Template(file_.read())
def test_template(self, case, template_name):
"""
Render template blocks "template" and "expected" and compare them.
"""
ctx = self.load_context(case)
tmpl = self.load_template(template_name)
from django.template.loader_tags import BlockNode
nodes = tmpl.nodelist.get_nodes_by_type(BlockNode)
params = dict([(node.name, node.nodelist.render(ctx))
for node in nodes])
if 'skip' in params:
raise pytest.skip(params['skip'])
template = BeautifulSoup(params['template']).findAll()
expected = BeautifulSoup(params['expected']).findAll()
for given, should_be in zip(template, expected):
assert given.tag == should_be.tag
assert given.attrib == should_be.attrib
assert given.text == should_be.text
@pytest.mark.profiling
def test_profiling(self, case, template_name):
ctx = self.load_context(case)
n = 1000
django_parse = lambda: template.Template('{{ form }}')
forme_parse = lambda: self.load_template(template_name)
times_parse = {
'django': timeit.Timer(django_parse).timeit(n),
'forme': timeit.Timer(forme_parse).timeit(n),
}
django_tmpl = django_parse()
tmpl = forme_parse()
from django.template.loader_tags import BlockNode
nodes = tmpl.nodelist.get_nodes_by_type(BlockNode)
params = dict([(node.name, node.nodelist)
for node in nodes])
forme_tmpl = params['template']
django_render = lambda: django_tmpl.render(ctx)
forme_render = lambda: forme_tmpl.render(ctx)
times_render = {
'django': timeit.Timer(django_render).timeit(n),
'forme': timeit.Timer(forme_render).timeit(n),
}
slower = lambda d: d['forme'] / d['django']
print('-' * 40)
print('Template: {0}/{1}'.format(case, os.path.basename(template_name)))
print('--- Parsing (Slower {0:.1f}x)'.format(slower(times_parse)))
for key, value in times_parse.items():
print('{0:^8} {1:.3f} ms'.format(key, value))
print('--- Rendering (Slower {0:.1f}x)'.format(slower(times_render)))
for key, value in times_render.items():
print('{0:^8} {1:.3f} ms'.format(key, value)) | en | 0.818055 | # coding: utf-8 Render template blocks "template" and "expected" and compare them. | 2.310346 | 2 |
tests/utils_tests/test_sparse_utils.py | pfnet/chainerchem | 184 | 6619369 | <filename>tests/utils_tests/test_sparse_utils.py
import numpy
import pytest
from chainer_chemistry.utils.sparse_utils import convert_sparse_with_edge_type
from chainer_chemistry.utils.sparse_utils import sparse_utils_available
if not sparse_utils_available():
pytest.skip('sparse_utils is available if chainer>=5 and numpy>=1.16',
allow_module_level=True)
def naive_convert(data, row, col, edge_type, num_edge_type):
mb, length = data.shape
new_mb = mb * num_edge_type
new_data = [[] for _ in range(new_mb)]
new_row = [[] for _ in range(new_mb)]
new_col = [[] for _ in range(new_mb)]
for i in range(mb):
for j in range(length):
k = i * num_edge_type + edge_type[i, j]
new_data[k].append(data[i, j])
new_row[k].append(row[i, j])
new_col[k].append(col[i, j])
new_length = max(len(arr) for arr in new_data)
def pad(arr_2d, dtype=numpy.int32):
for arr in arr_2d:
arr.extend([0] * (new_length - len(arr)))
return numpy.array(arr_2d)
ret = []
for d, r, c in zip(pad(new_data, data.dtype),
pad(new_row), pad(new_col)):
ret.append(list(sorted(zip(d, r, c))))
return ret
@pytest.mark.parametrize('in_shape,num_edge_type', [
((2, 4), 4),
((5, 10), 2),
((1, 1), 1),
((10, 1), 10),
((10, 10), 10),
])
def test_convert_sparse_with_edge_type(in_shape, num_edge_type):
num_nodes = 10
data = numpy.random.uniform(size=in_shape).astype(numpy.float32)
row = numpy.random.randint(size=in_shape, low=0, high=num_nodes)
col = numpy.random.randint(size=in_shape, low=0, high=num_nodes)
edge_type = numpy.random.randint(size=in_shape, low=0, high=num_edge_type)
received = convert_sparse_with_edge_type(data, row, col, num_nodes,
edge_type, num_edge_type)
expected = naive_convert(data, row, col, edge_type, num_edge_type)
# check by minibatch-wise
for i, expected_batch in enumerate(expected):
d = received.data.data[i, :].tolist()
r = received.row[i, :].tolist()
c = received.col[i, :].tolist()
received_batch = list(sorted(zip(d, r, c)))
assert expected_batch == received_batch
if __name__ == '__main__':
pytest.main([__file__, '-v', '-s'])
| <filename>tests/utils_tests/test_sparse_utils.py
import numpy
import pytest
from chainer_chemistry.utils.sparse_utils import convert_sparse_with_edge_type
from chainer_chemistry.utils.sparse_utils import sparse_utils_available
if not sparse_utils_available():
pytest.skip('sparse_utils is available if chainer>=5 and numpy>=1.16',
allow_module_level=True)
def naive_convert(data, row, col, edge_type, num_edge_type):
mb, length = data.shape
new_mb = mb * num_edge_type
new_data = [[] for _ in range(new_mb)]
new_row = [[] for _ in range(new_mb)]
new_col = [[] for _ in range(new_mb)]
for i in range(mb):
for j in range(length):
k = i * num_edge_type + edge_type[i, j]
new_data[k].append(data[i, j])
new_row[k].append(row[i, j])
new_col[k].append(col[i, j])
new_length = max(len(arr) for arr in new_data)
def pad(arr_2d, dtype=numpy.int32):
for arr in arr_2d:
arr.extend([0] * (new_length - len(arr)))
return numpy.array(arr_2d)
ret = []
for d, r, c in zip(pad(new_data, data.dtype),
pad(new_row), pad(new_col)):
ret.append(list(sorted(zip(d, r, c))))
return ret
@pytest.mark.parametrize('in_shape,num_edge_type', [
((2, 4), 4),
((5, 10), 2),
((1, 1), 1),
((10, 1), 10),
((10, 10), 10),
])
def test_convert_sparse_with_edge_type(in_shape, num_edge_type):
num_nodes = 10
data = numpy.random.uniform(size=in_shape).astype(numpy.float32)
row = numpy.random.randint(size=in_shape, low=0, high=num_nodes)
col = numpy.random.randint(size=in_shape, low=0, high=num_nodes)
edge_type = numpy.random.randint(size=in_shape, low=0, high=num_edge_type)
received = convert_sparse_with_edge_type(data, row, col, num_nodes,
edge_type, num_edge_type)
expected = naive_convert(data, row, col, edge_type, num_edge_type)
# check by minibatch-wise
for i, expected_batch in enumerate(expected):
d = received.data.data[i, :].tolist()
r = received.row[i, :].tolist()
c = received.col[i, :].tolist()
received_batch = list(sorted(zip(d, r, c)))
assert expected_batch == received_batch
if __name__ == '__main__':
pytest.main([__file__, '-v', '-s'])
| en | 0.930814 | # check by minibatch-wise | 2.313504 | 2 |
src/data/time_convertor.py | senovr/finance | 0 | 6619370 | <reponame>senovr/finance<filename>src/data/time_convertor.py
import datetime
import time
def utc2date(UTC):
date1 = time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(UTC))
return date1
def date2utc(date):
convert = datetime.datetime.strptime(date, "%Y:%m:%d %H:%M:%S").timetuple()
utc = time.mktime(convert)
return utc
def date2seconds(date):
try:
convert = datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S").timetuple()
seconds = time.mktime(convert)
except Exception:
seconds = 12345678900
return seconds
def seconds2date(seconds):
date = datetime.fromtimestamp(seconds).strftime("%A, %B %d, %Y %I:%M:%S")
# 'Sunday, January 29, 2017 08:30:00'
return date
def TimeStamp2seconds(TimeStamp):
seconds = TimeStamp.dt.total_seconds()
return seconds
##-------------------------------------------------------------------------------------------------
# https://www.geeksforgeeks.org/python-program-to-convert-seconds-into-hours-minutes-and-seconds/
##-------------------------------------------------------------------------------------------------
def convert_via_naive(seconds):
""" Convert seconds into hours, minutes and seconds (naive algorithm).
Parameters
----------
seconds : int (float?)
Returns
-------
string
n = 12345
print(convert(n))
>> 3:25:45
"""
seconds = seconds % (24 * 3600)
hour = seconds // 3600
seconds %= 3600
minutes = seconds // 60
seconds %= 60
time_string = "%d:%02d:%02d" % (hour, minutes, seconds)
return time_string
##-------------------------------------------------------------------------------------------------
def convert_via_naive_divmod(seconds):
min, sec = divmod(seconds, 60)
hour, min = divmod(min, 60)
time_string = "%d:%02d:%02d" % (hour, min, sec)
return time_string
##-------------------------------------------------------------------------------------------------
def convert_via_datetime(n):
time_string = str(datetime.timedelta(seconds=n))
return time_string
##-------------------------------------------------------------------------------------------------
def convert_via_time(seconds):
time_string = time.strftime("%H:%M:%S", time.gmtime(seconds))
return time_string
| import datetime
import time
def utc2date(UTC):
date1 = time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(UTC))
return date1
def date2utc(date):
convert = datetime.datetime.strptime(date, "%Y:%m:%d %H:%M:%S").timetuple()
utc = time.mktime(convert)
return utc
def date2seconds(date):
try:
convert = datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S").timetuple()
seconds = time.mktime(convert)
except Exception:
seconds = 12345678900
return seconds
def seconds2date(seconds):
date = datetime.fromtimestamp(seconds).strftime("%A, %B %d, %Y %I:%M:%S")
# 'Sunday, January 29, 2017 08:30:00'
return date
def TimeStamp2seconds(TimeStamp):
seconds = TimeStamp.dt.total_seconds()
return seconds
##-------------------------------------------------------------------------------------------------
# https://www.geeksforgeeks.org/python-program-to-convert-seconds-into-hours-minutes-and-seconds/
##-------------------------------------------------------------------------------------------------
def convert_via_naive(seconds):
""" Convert seconds into hours, minutes and seconds (naive algorithm).
Parameters
----------
seconds : int (float?)
Returns
-------
string
n = 12345
print(convert(n))
>> 3:25:45
"""
seconds = seconds % (24 * 3600)
hour = seconds // 3600
seconds %= 3600
minutes = seconds // 60
seconds %= 60
time_string = "%d:%02d:%02d" % (hour, minutes, seconds)
return time_string
##-------------------------------------------------------------------------------------------------
def convert_via_naive_divmod(seconds):
min, sec = divmod(seconds, 60)
hour, min = divmod(min, 60)
time_string = "%d:%02d:%02d" % (hour, min, sec)
return time_string
##-------------------------------------------------------------------------------------------------
def convert_via_datetime(n):
time_string = str(datetime.timedelta(seconds=n))
return time_string
##-------------------------------------------------------------------------------------------------
def convert_via_time(seconds):
time_string = time.strftime("%H:%M:%S", time.gmtime(seconds))
return time_string | en | 0.205967 | # 'Sunday, January 29, 2017 08:30:00' ##------------------------------------------------------------------------------------------------- # https://www.geeksforgeeks.org/python-program-to-convert-seconds-into-hours-minutes-and-seconds/ ##------------------------------------------------------------------------------------------------- Convert seconds into hours, minutes and seconds (naive algorithm). Parameters ---------- seconds : int (float?) Returns ------- string n = 12345 print(convert(n)) >> 3:25:45 ##------------------------------------------------------------------------------------------------- ##------------------------------------------------------------------------------------------------- ##------------------------------------------------------------------------------------------------- | 3.713442 | 4 |
gourd/mqtt_log_handler.py | clueboard/gourd | 0 | 6619371 | import logging
from paho.mqtt.client import mqtt_cs_connected, mqtt_cs_connect_async
MQTT_CONNECTED = (mqtt_cs_connected, mqtt_cs_connect_async)
class MQTTLogHandler(logging.Handler):
def __init__(self, mqtt_client, topic, qos=0, retain=False):
super().__init__()
self.mqtt = mqtt_client
self.topic = topic
self.qos = qos
self.retain = retain
def emit(self, record):
if self.mqtt._state in MQTT_CONNECTED: # Only emit logs when MQTT is connected
try:
msg = self.format(record)
if self.topic not in msg and 'Received PUBACK' not in msg:
# Avoid loops by skipping log messages possibly triggered by us
self.mqtt.publish(topic=self.topic, payload=msg, qos=self.qos, retain=self.retain)
except Exception:
self.handleError(record)
| import logging
from paho.mqtt.client import mqtt_cs_connected, mqtt_cs_connect_async
MQTT_CONNECTED = (mqtt_cs_connected, mqtt_cs_connect_async)
class MQTTLogHandler(logging.Handler):
def __init__(self, mqtt_client, topic, qos=0, retain=False):
super().__init__()
self.mqtt = mqtt_client
self.topic = topic
self.qos = qos
self.retain = retain
def emit(self, record):
if self.mqtt._state in MQTT_CONNECTED: # Only emit logs when MQTT is connected
try:
msg = self.format(record)
if self.topic not in msg and 'Received PUBACK' not in msg:
# Avoid loops by skipping log messages possibly triggered by us
self.mqtt.publish(topic=self.topic, payload=msg, qos=self.qos, retain=self.retain)
except Exception:
self.handleError(record)
| en | 0.952965 | # Only emit logs when MQTT is connected # Avoid loops by skipping log messages possibly triggered by us | 2.646544 | 3 |
train.py | nknshmsk/pytorch-lightning-minimal | 0 | 6619372 | import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import MNIST
import pytorch_lightning as pl
from pytorch_lightning.metrics import functional as FM
class Classifier(pl.LightningModule):
def __init__(self):
super().__init__()
self.classifier = nn.Sequential(
nn.Linear(28 * 28, 10),
nn.ReLU())
def forward(self, x):
return self.classifier(x.view(x.size(0), -1))
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self.classifier(x.view(x.size(0), -1))
return nn.functional.cross_entropy(y_hat, y)
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self.classifier(x.view(x.size(0), -1))
self.log_dict({'accuracy': FM.accuracy(torch.argmax(y_hat, dim=1), y)})
def configure_optimizers(self):
return optim.Adam(self.parameters(), lr=0.02)
train_loader = DataLoader(MNIST('./', train=True, download=True, transform=transforms.ToTensor()), batch_size=1024, num_workers=12)
test_loader = DataLoader(MNIST('./', train=False, download=True, transform=transforms.ToTensor()), batch_size=1024, num_workers=12)
trainer = pl.Trainer(max_epochs=10, gpus=1)
model = Classifier()
trainer.fit(model, train_loader)
trainer.test(model, test_dataloaders=test_loader)
| import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import MNIST
import pytorch_lightning as pl
from pytorch_lightning.metrics import functional as FM
class Classifier(pl.LightningModule):
def __init__(self):
super().__init__()
self.classifier = nn.Sequential(
nn.Linear(28 * 28, 10),
nn.ReLU())
def forward(self, x):
return self.classifier(x.view(x.size(0), -1))
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self.classifier(x.view(x.size(0), -1))
return nn.functional.cross_entropy(y_hat, y)
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self.classifier(x.view(x.size(0), -1))
self.log_dict({'accuracy': FM.accuracy(torch.argmax(y_hat, dim=1), y)})
def configure_optimizers(self):
return optim.Adam(self.parameters(), lr=0.02)
train_loader = DataLoader(MNIST('./', train=True, download=True, transform=transforms.ToTensor()), batch_size=1024, num_workers=12)
test_loader = DataLoader(MNIST('./', train=False, download=True, transform=transforms.ToTensor()), batch_size=1024, num_workers=12)
trainer = pl.Trainer(max_epochs=10, gpus=1)
model = Classifier()
trainer.fit(model, train_loader)
trainer.test(model, test_dataloaders=test_loader)
| none | 1 | 2.542203 | 3 | |
app.py | Vonamugan/Longeron | 2 | 6619373 | from flask import Flask, render_template, request, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///blog.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class Item(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(200), nullable=False)
subtitle = db.Column(db.String)
author = db.Column(db.String(50))
date_posted = db.Column(db.DateTime)
body = db.Column(db.Text, nullable=False)
isActive = db.Column(db.Boolean, default=True)
# text = db.Column(db.Text, nullable=False)
def __repr__(self):
return self.title
@app.route('/')
def index():
items = Item.query.order_by(Item.date_posted.desc()).all()
return render_template('index.html', posts=items)
@app.route('/post/<int:post_id>')
def post(post_id):
post = Item.query.filter_by(id=post_id).one()
return render_template('post.html', post=post)
@app.route('/login')
def login():
return render_template('login.html')
@app.route('/subscribe')
def subscribe():
return render_template('subscribe.html')
@app.route('/create', methods=['POST', 'GET'])
def create():
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
subtitle = request.form['subtitle']
author = request.form['author']
post = Item(title=title, body=body, subtitle=subtitle, author=author, date_posted=datetime.now())
try:
db.session.add(post)
db.session.commit()
return redirect('/')
except:
return "Error("
else:
return render_template('create.html')
if __name__ == "__main__":
app.run(debug=True)
| from flask import Flask, render_template, request, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///blog.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class Item(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(200), nullable=False)
subtitle = db.Column(db.String)
author = db.Column(db.String(50))
date_posted = db.Column(db.DateTime)
body = db.Column(db.Text, nullable=False)
isActive = db.Column(db.Boolean, default=True)
# text = db.Column(db.Text, nullable=False)
def __repr__(self):
return self.title
@app.route('/')
def index():
items = Item.query.order_by(Item.date_posted.desc()).all()
return render_template('index.html', posts=items)
@app.route('/post/<int:post_id>')
def post(post_id):
post = Item.query.filter_by(id=post_id).one()
return render_template('post.html', post=post)
@app.route('/login')
def login():
return render_template('login.html')
@app.route('/subscribe')
def subscribe():
return render_template('subscribe.html')
@app.route('/create', methods=['POST', 'GET'])
def create():
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
subtitle = request.form['subtitle']
author = request.form['author']
post = Item(title=title, body=body, subtitle=subtitle, author=author, date_posted=datetime.now())
try:
db.session.add(post)
db.session.commit()
return redirect('/')
except:
return "Error("
else:
return render_template('create.html')
if __name__ == "__main__":
app.run(debug=True)
| en | 0.131173 | # text = db.Column(db.Text, nullable=False) | 2.727164 | 3 |
icesat2_toolkit/spatial.py | outlk/read-ICESat-2 | 0 | 6619374 | <reponame>outlk/read-ICESat-2
#!/usr/bin/env python
u"""
spatial.py
Written by <NAME> (11/2021)
Utilities for reading and operating on spatial data
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
h5py: Pythonic interface to the HDF5 binary data format
https://www.h5py.org/
gdal: Pythonic interface to the Geospatial Data Abstraction Library (GDAL)
https://pypi.python.org/pypi/GDAL
UPDATE HISTORY:
Written 11/2021
"""
import os
import re
import io
import gzip
import uuid
import h5py
import logging
import netCDF4
import warnings
import numpy as np
try:
import osgeo.gdal, osgeo.osr
except ModuleNotFoundError:
warnings.filterwarnings("always")
warnings.warn("GDAL not available")
def case_insensitive_filename(filename):
"""
Searches a directory for a filename without case dependence
"""
#-- check if file presently exists with input case
if not os.access(os.path.expanduser(filename),os.F_OK):
#-- search for filename without case dependence
basename = os.path.basename(filename)
directory = os.path.dirname(os.path.expanduser(filename))
f = [f for f in os.listdir(directory) if re.match(basename,f,re.I)]
if not f:
raise IOError('{0} not found in file system'.format(filename))
filename = os.path.join(directory,f.pop())
return os.path.expanduser(filename)
def from_file(filename, format, **kwargs):
"""
Wrapper function for reading data from an input format
"""
#-- read input file to extract spatial coordinates and data
if (format == 'netCDF4'):
dinput = from_netCDF4(filename, **kwargs)
elif (format == 'HDF5'):
dinput = from_HDF5(filename, **kwargs)
elif (format == 'geotiff'):
dinput = from_geotiff(filename, **kwargs)
else:
raise ValueError('Invalid format {0}'.format(format))
return dinput
def from_netCDF4(filename, **kwargs):
"""
Read data from a netCDF4 file
Inputs: full path of input netCDF4 file
Options:
netCDF4 file is compressed or streamed from memory
netCDF4 variable names of x, y, and data
"""
#-- set default keyword arguments
kwargs.setdefault('compression',None)
kwargs.setdefault('xname','x')
kwargs.setdefault('yname','y')
kwargs.setdefault('varname','data')
#-- read data from netCDF4 file
#-- Open the NetCDF4 file for reading
if (kwargs['compression'] == 'gzip'):
#-- read as in-memory (diskless) netCDF4 dataset
with gzip.open(case_insensitive_filename(filename),'r') as f:
fileID = netCDF4.Dataset(uuid.uuid4().hex,memory=f.read())
elif (kwargs['compression'] == 'bytes'):
#-- read as in-memory (diskless) netCDF4 dataset
fileID = netCDF4.Dataset(uuid.uuid4().hex,memory=filename.read())
else:
#-- read netCDF4 dataset
fileID = netCDF4.Dataset(case_insensitive_filename(filename), 'r')
#-- Output NetCDF file information
logging.info(fileID.filepath())
logging.info(list(fileID.variables.keys()))
#-- create python dictionary for output variables and attributes
dinput = {}
dinput['attributes'] = {}
#-- get attributes for the file
for attr in ['title','description','projection']:
#-- try getting the attribute
try:
ncattr, = [s for s in fileID.ncattrs() if re.match(attr,s,re.I)]
dinput['attributes'][attr] = fileID.getncattr(ncattr)
except (ValueError,AttributeError):
pass
#-- list of attributes to attempt to retrieve from included variables
attributes_list = ['description','units','long_name','calendar',
'standard_name','grid_mapping','_FillValue']
#-- mapping between netCDF4 variable names and output names
variable_mapping = dict(x=kwargs['xname'],y=kwargs['yname'],
data=kwargs['varname'])
#-- for each variable
for key,nc in variable_mapping.items():
#-- Getting the data from each NetCDF variable
dinput[key] = fileID.variables[nc][:]
#-- get attributes for the included variables
dinput['attributes'][key] = {}
for attr in attributes_list:
#-- try getting the attribute
try:
ncattr, = [s for s in fileID.variables[nc].ncattrs()
if re.match(attr,s,re.I)]
dinput['attributes'][key][attr] = \
fileID.variables[nc].getncattr(ncattr)
except (ValueError,AttributeError):
pass
#-- get projection information if there is a grid_mapping attribute
if 'grid_mapping' in dinput['attributes']['data'].keys():
#-- try getting the attribute
grid_mapping = dinput['attributes']['data']['grid_mapping']
for att_name in fileID[grid_mapping].ncattrs():
dinput['attributes']['crs'][att_name] = \
fileID.variables[grid_mapping].getncattr(att_name)
#-- get the spatial projection reference information from wkt
#-- and overwrite the file-level projection attribute (if existing)
srs = osgeo.osr.SpatialReference()
srs.ImportFromWkt(dinput['attributes']['crs']['crs_wkt'])
dinput['attributes']['projection'] = srs.ExportToProj4()
#-- convert to masked array if fill values
if '_FillValue' in dinput['attributes']['data'].keys():
dinput['data'] = np.ma.asarray(dinput['data'])
dinput['data'].fill_value = dinput['attributes']['data']['_FillValue']
dinput['data'].mask = (dinput['data'].data == dinput['data'].fill_value)
#-- add extent and spacing attributes
xmin,xmax = np.min(dinput['x']),np.max(dinput['x'])
ymin,ymax = np.min(dinput['y']),np.max(dinput['y'])
dinput['attributes']['extent'] = (xmin,xmax,ymin,ymax)
dx = dinput['x'][1] - dinput['x'][0]
dy = dinput['y'][1] - dinput['y'][0]
dinput['attributes']['spacing'] = (dx,dy)
#-- Closing the NetCDF file
fileID.close()
#-- return the spatial variables
return dinput
def from_HDF5(filename, **kwargs):
"""
Read data from a HDF5 file
Inputs: full path of input HDF5 file
Options:
HDF5 file is compressed or streamed from memory
HDF5 variable names of x, y, and data
"""
#-- set default keyword arguments
kwargs.setdefault('compression',None)
kwargs.setdefault('xname','x')
kwargs.setdefault('yname','y')
kwargs.setdefault('varname','data')
#-- read data from HDF5 file
#-- Open the HDF5 file for reading
if (kwargs['compression'] == 'gzip'):
#-- read gzip compressed file and extract into in-memory file object
with gzip.open(case_insensitive_filename(filename),'r') as f:
fid = io.BytesIO(f.read())
#-- set filename of BytesIO object
fid.filename = os.path.basename(filename)
#-- rewind to start of file
fid.seek(0)
#-- read as in-memory (diskless) HDF5 dataset from BytesIO object
fileID = h5py.File(fid, 'r')
elif (kwargs['compression'] == 'bytes'):
#-- read as in-memory (diskless) HDF5 dataset
fileID = h5py.File(filename, 'r')
else:
#-- read HDF5 dataset
fileID = h5py.File(case_insensitive_filename(filename), 'r')
#-- Output HDF5 file information
logging.info(fileID.filename)
logging.info(list(fileID.keys()))
#-- create python dictionary for output variables and attributes
dinput = {}
dinput['attributes'] = {}
#-- get attributes for the file
for attr in ['title','description','projection']:
#-- try getting the attribute
try:
dinput['attributes'][attr] = fileID.attrs[attr]
except (KeyError,AttributeError):
pass
#-- list of attributes to attempt to retrieve from included variables
attributes_list = ['description','units','long_name','calendar',
'standard_name','grid_mapping','_FillValue']
#-- mapping between HDF5 variable names and output names
variable_mapping = dict(x=kwargs['xname'],y=kwargs['yname'],
data=kwargs['varname'])
#-- for each variable
for key,h5 in variable_mapping.items():
#-- Getting the data from each HDF5 variable
dinput[key] = np.copy(fileID[h5][:])
#-- get attributes for the included variables
dinput['attributes'][key] = {}
for attr in attributes_list:
#-- try getting the attribute
try:
dinput['attributes'][key][attr] = fileID[h5].attrs[attr]
except (KeyError,AttributeError):
pass
#-- get projection information if there is a grid_mapping attribute
if 'grid_mapping' in dinput['attributes']['data'].keys():
#-- try getting the attribute
grid_mapping = dinput['attributes']['data']['grid_mapping']
for att_name,att_val in fileID[grid_mapping].attrs.items():
dinput['attributes']['crs'][att_name] = att_val
#-- get the spatial projection reference information from wkt
#-- and overwrite the file-level projection attribute (if existing)
srs = osgeo.osr.SpatialReference()
srs.ImportFromWkt(dinput['attributes']['crs']['crs_wkt'])
dinput['attributes']['projection'] = srs.ExportToProj4()
#-- convert to masked array if fill values
if '_FillValue' in dinput['attributes']['data'].keys():
dinput['data'] = np.ma.asarray(dinput['data'])
dinput['data'].fill_value = dinput['attributes']['data']['_FillValue']
dinput['data'].mask = (dinput['data'].data == dinput['data'].fill_value)
#-- add extent and spacing attributes
xmin,xmax = np.min(dinput['x']),np.max(dinput['x'])
ymin,ymax = np.min(dinput['y']),np.max(dinput['y'])
dinput['attributes']['extent'] = (xmin,xmax,ymin,ymax)
dx = dinput['x'][1] - dinput['x'][0]
dy = dinput['y'][1] - dinput['y'][0]
dinput['attributes']['spacing'] = (dx,dy)
#-- Closing the HDF5 file
fileID.close()
#-- return the spatial variables
return dinput
def from_geotiff(filename, **kwargs):
"""
Read data from a geotiff file
Inputs: full path of input geotiff file
Options:
geotiff file is compressed or streamed from memory
"""
#-- set default keyword arguments
kwargs.setdefault('compression',None)
#-- Open the geotiff file for reading
if (kwargs['compression'] == 'gzip'):
#-- read gzip compressed file and extract into memory-mapped object
mmap_name = "/vsimem/{0}".format(uuid.uuid4().hex)
with gzip.open(case_insensitive_filename(filename),'r') as f:
osgeo.gdal.FileFromMemBuffer(mmap_name, f.read())
#-- read as GDAL memory-mapped (diskless) geotiff dataset
ds = osgeo.gdal.Open(mmap_name)
elif (kwargs['compression'] == 'bytes'):
#-- read as GDAL memory-mapped (diskless) geotiff dataset
mmap_name = "/vsimem/{0}".format(uuid.uuid4().hex)
osgeo.gdal.FileFromMemBuffer(mmap_name, filename.read())
ds = osgeo.gdal.Open(mmap_name)
else:
#-- read geotiff dataset
ds = osgeo.gdal.Open(case_insensitive_filename(filename))
#-- print geotiff file if verbose
logging.info(filename)
#-- create python dictionary for output variables and attributes
dinput = {}
dinput['attributes'] = {c:dict() for c in ['x','y','data']}
#-- get the spatial projection reference information
srs = ds.GetSpatialRef()
dinput['attributes']['projection'] = srs.ExportToProj4()
dinput['attributes']['wkt'] = srs.ExportToWkt()
#-- get dimensions
xsize = ds.RasterXSize
ysize = ds.RasterYSize
#-- get geotiff info
info_geotiff = ds.GetGeoTransform()
dinput['attributes']['spacing'] = (info_geotiff[1],info_geotiff[5])
#-- calculate image extents
xmin = info_geotiff[0]
ymax = info_geotiff[3]
xmax = xmin + (xsize-1)*info_geotiff[1]
ymin = ymax + (ysize-1)*info_geotiff[5]
dinput['attributes']['extent'] = (xmin,xmax,ymin,ymax)
#-- x and y pixel center coordinates (converted from upper left)
dinput['x'] = xmin + info_geotiff[1]/2.0 + np.arange(xsize)*info_geotiff[1]
dinput['y'] = ymax + info_geotiff[5]/2.0 + np.arange(ysize)*info_geotiff[5]
#-- read full image with GDAL
dinput['data'] = ds.ReadAsArray()
#-- check if image has fill values
dinput['data'] = np.ma.asarray(dinput['data'])
dinput['data'].mask = np.zeros_like(dinput['data'],dtype=bool)
if ds.GetRasterBand(1).GetNoDataValue():
#-- mask invalid values
dinput['data'].fill_value = ds.GetRasterBand(1).GetNoDataValue()
#-- create mask array for bad values
dinput['data'].mask[:] = (dinput['data'].data == dinput['data'].fill_value)
#-- set attribute for fill value
dinput['attributes']['data']['_FillValue'] = dinput['data'].fill_value
#-- close the dataset
ds = None
#-- return the spatial variables
return dinput
def convert_ellipsoid(phi1, h1, a1, f1, a2, f2, eps=1e-12, itmax=10):
"""
Convert latitudes and heights to a different ellipsoid using Newton-Raphson
Inputs:
phi1: latitude of input ellipsoid in degrees
h1: height above input ellipsoid in meters
a1: semi-major axis of input ellipsoid
f1: flattening of input ellipsoid
a2: semi-major axis of output ellipsoid
f2: flattening of output ellipsoid
Options:
eps: tolerance to prevent division by small numbers
and to determine convergence
itmax: maximum number of iterations to use in Newton-Raphson
Returns:
phi2: latitude of output ellipsoid in degrees
h2: height above output ellipsoid in meters
References:
Astronomical Algorithms, <NAME>, 1991, Willmann-Bell, Inc.
pp. 77-82
"""
if (len(phi1) != len(h1)):
raise ValueError('phi and h have incompatable dimensions')
#-- semiminor axis of input and output ellipsoid
b1 = (1.0 - f1)*a1
b2 = (1.0 - f2)*a2
#-- initialize output arrays
npts = len(phi1)
phi2 = np.zeros((npts))
h2 = np.zeros((npts))
#-- for each point
for N in range(npts):
#-- force phi1 into range -90 <= phi1 <= 90
if (np.abs(phi1[N]) > 90.0):
phi1[N] = np.sign(phi1[N])*90.0
#-- handle special case near the equator
#-- phi2 = phi1 (latitudes congruent)
#-- h2 = h1 + a1 - a2
if (np.abs(phi1[N]) < eps):
phi2[N] = np.copy(phi1[N])
h2[N] = h1[N] + a1 - a2
#-- handle special case near the poles
#-- phi2 = phi1 (latitudes congruent)
#-- h2 = h1 + b1 - b2
elif ((90.0 - np.abs(phi1[N])) < eps):
phi2[N] = np.copy(phi1[N])
h2[N] = h1[N] + b1 - b2
#-- handle case if latitude is within 45 degrees of equator
elif (np.abs(phi1[N]) <= 45):
#-- convert phi1 to radians
phi1r = phi1[N] * np.pi/180.0
sinphi1 = np.sin(phi1r)
cosphi1 = np.cos(phi1r)
#-- prevent division by very small numbers
cosphi1 = np.copy(eps) if (cosphi1 < eps) else cosphi1
#-- calculate tangent
tanphi1 = sinphi1 / cosphi1
u1 = np.arctan(b1 / a1 * tanphi1)
hpr1sin = b1 * np.sin(u1) + h1[N] * sinphi1
hpr1cos = a1 * np.cos(u1) + h1[N] * cosphi1
#-- set initial value for u2
u2 = np.copy(u1)
#-- setup constants
k0 = b2 * b2 - a2 * a2
k1 = a2 * hpr1cos
k2 = b2 * hpr1sin
#-- perform newton-raphson iteration to solve for u2
#-- cos(u2) will not be close to zero since abs(phi1) <= 45
for i in range(0, itmax+1):
cosu2 = np.cos(u2)
fu2 = k0 * np.sin(u2) + k1 * np.tan(u2) - k2
fu2p = k0 * cosu2 + k1 / (cosu2 * cosu2)
if (np.abs(fu2p) < eps):
i = np.copy(itmax)
else:
delta = fu2 / fu2p
u2 -= delta
if (np.abs(delta) < eps):
i = np.copy(itmax)
#-- convert latitude to degrees and verify values between +/- 90
phi2r = np.arctan(a2 / b2 * np.tan(u2))
phi2[N] = phi2r*180.0/np.pi
if (np.abs(phi2[N]) > 90.0):
phi2[N] = np.sign(phi2[N])*90.0
#-- calculate height
h2[N] = (hpr1cos - a2 * np.cos(u2)) / np.cos(phi2r)
#-- handle final case where latitudes are between 45 degrees and pole
else:
#-- convert phi1 to radians
phi1r = phi1[N] * np.pi/180.0
sinphi1 = np.sin(phi1r)
cosphi1 = np.cos(phi1r)
#-- prevent division by very small numbers
cosphi1 = np.copy(eps) if (cosphi1 < eps) else cosphi1
#-- calculate tangent
tanphi1 = sinphi1 / cosphi1
u1 = np.arctan(b1 / a1 * tanphi1)
hpr1sin = b1 * np.sin(u1) + h1[N] * sinphi1
hpr1cos = a1 * np.cos(u1) + h1[N] * cosphi1
#-- set initial value for u2
u2 = np.copy(u1)
#-- setup constants
k0 = a2 * a2 - b2 * b2
k1 = b2 * hpr1sin
k2 = a2 * hpr1cos
#-- perform newton-raphson iteration to solve for u2
#-- sin(u2) will not be close to zero since abs(phi1) > 45
for i in range(0, itmax+1):
sinu2 = np.sin(u2)
fu2 = k0 * np.cos(u2) + k1 / np.tan(u2) - k2
fu2p = -1 * (k0 * sinu2 + k1 / (sinu2 * sinu2))
if (np.abs(fu2p) < eps):
i = np.copy(itmax)
else:
delta = fu2 / fu2p
u2 -= delta
if (np.abs(delta) < eps):
i = np.copy(itmax)
#-- convert latitude to degrees and verify values between +/- 90
phi2r = np.arctan(a2 / b2 * np.tan(u2))
phi2[N] = phi2r*180.0/np.pi
if (np.abs(phi2[N]) > 90.0):
phi2[N] = np.sign(phi2[N])*90.0
#-- calculate height
h2[N] = (hpr1sin - b2 * np.sin(u2)) / np.sin(phi2r)
#-- return the latitude and height
return (phi2, h2)
def compute_delta_h(a1, f1, a2, f2, lat):
"""
Compute difference in elevation for two ellipsoids at a given
latitude using a simplified empirical equation
Inputs:
a1: semi-major axis of input ellipsoid
f1: flattening of input ellipsoid
a2: semi-major axis of output ellipsoid
f2: flattening of output ellipsoid
lat: array of latitudes in degrees
Returns:
delta_h: difference in elevation for two ellipsoids
Reference:
<NAME>, Astronomical Algorithms, pp. 77-82 (1991)
"""
#-- force phi into range -90 <= phi <= 90
gt90, = np.nonzero((lat < -90.0) | (lat > 90.0))
lat[gt90] = np.sign(lat[gt90])*90.0
#-- semiminor axis of input and output ellipsoid
b1 = (1.0 - f1)*a1
b2 = (1.0 - f2)*a2
#-- compute delta_a and delta_b coefficients
delta_a = a2 - a1
delta_b = b2 - b1
#-- compute differences between ellipsoids
#-- delta_h = -(delta_a * cos(phi)^2 + delta_b * sin(phi)^2)
phi = lat * np.pi/180.0
delta_h = -(delta_a*np.cos(phi)**2 + delta_b*np.sin(phi)**2)
return delta_h
def wrap_longitudes(lon):
"""
Wraps longitudes to range from -180 to +180
Inputs:
lon: longitude (degrees east)
"""
phi = np.arctan2(np.sin(lon*np.pi/180.0),np.cos(lon*np.pi/180.0))
#-- convert phi from radians to degrees
return phi*180.0/np.pi
def to_cartesian(lon,lat,h=0.0,a_axis=6378137.0,flat=1.0/298.257223563):
"""
Converts geodetic coordinates to Cartesian coordinates
Inputs:
lon: longitude (degrees east)
lat: latitude (degrees north)
Options:
h: height above ellipsoid (or sphere)
a_axis: semimajor axis of the ellipsoid (default: WGS84)
* for spherical coordinates set to radius of the Earth
flat: ellipsoidal flattening (default: WGS84)
* for spherical coordinates set to 0
"""
#-- verify axes
lon = np.atleast_1d(lon)
lat = np.atleast_1d(lat)
#-- fix coordinates to be 0:360
count = np.count_nonzero(lon < 0)
if (count != 0):
lt0, = np.nonzero(lon < 0)
lon[lt0] += 360.0
#-- Linear eccentricity and first numerical eccentricity
lin_ecc = np.sqrt((2.0*flat - flat**2)*a_axis**2)
ecc1 = lin_ecc/a_axis
#-- convert from geodetic latitude to geocentric latitude
dtr = np.pi/180.0
#-- geodetic latitude in radians
latitude_geodetic_rad = lat*dtr
#-- prime vertical radius of curvature
N = a_axis/np.sqrt(1.0 - ecc1**2.0*np.sin(latitude_geodetic_rad)**2.0)
#-- calculate X, Y and Z from geodetic latitude and longitude
X = (N + h) * np.cos(latitude_geodetic_rad) * np.cos(lon*dtr)
Y = (N + h) * np.cos(latitude_geodetic_rad) * np.sin(lon*dtr)
Z = (N * (1.0 - ecc1**2.0) + h) * np.sin(latitude_geodetic_rad)
#-- return the cartesian coordinates
return (X,Y,Z)
def to_sphere(x,y,z):
"""
Convert from cartesian coordinates to spherical coordinates
Inputs:
x,y,z in cartesian coordinates
"""
#-- calculate radius
rad = np.sqrt(x**2.0 + y**2.0 + z**2.0)
#-- calculate angular coordinates
#-- phi: azimuthal angle
phi = np.arctan2(y,x)
#-- th: polar angle
th = np.arccos(z/rad)
#-- convert to degrees and fix to 0:360
lon = 180.0*phi/np.pi
count = np.count_nonzero(lon < 0)
if (count != 0):
lt0 = np.nonzero(lon < 0)
lon[lt0] = lon[lt0]+360.0
#-- convert to degrees and fix to -90:90
lat = 90.0 - (180.0*th/np.pi)
#-- return latitude, longitude and radius
return (lon,lat,rad)
def to_geodetic(x,y,z,a_axis=6378137.0,flat=1.0/298.257223563):
"""
Convert from cartesian coordinates to geodetic coordinates
using a closed form solution
Inputs:
x,y,z in cartesian coordinates
Options:
a_axis: semimajor axis of the ellipsoid (default: WGS84)
flat: ellipsoidal flattening (default: WGS84)
References:
<NAME> "Exact conversion of Earth-centered, Earth-fixed
coordinates to geodetic coordinates"
Journal of Guidance, Control, and Dynamics,
16(2), 389--391, 1993
https://arc.aiaa.org/doi/abs/10.2514/3.21016
"""
#-- semiminor axis of the WGS84 ellipsoid [m]
b_axis = (1.0 - flat)*a_axis
#-- Linear eccentricity and first numerical eccentricity
lin_ecc = np.sqrt((2.0*flat - flat**2)*a_axis**2)
ecc1 = lin_ecc/a_axis
#-- square of first numerical eccentricity
e12 = ecc1**2
#-- degrees to radians
dtr = np.pi/180.0
#-- calculate distance
w = np.sqrt(x**2 + y**2)
#-- calculate longitude
lon = np.arctan2(y,x)/dtr
lat = np.zeros_like(lon)
h = np.zeros_like(lon)
if (w == 0):
#-- special case where w == 0 (exact polar solution)
h = np.sign(z)*z - b_axis
lat = 90.0*np.sign(z)
else:
#-- all other cases
l = e12/2.0
m = (w/a_axis)**2.0
n = ((1.0-e12)*z/b_axis)**2.0
i = -(2.0*l**2 + m + n)/2.0
k = (l**2.0 - m - n)*l**2.0
q = (1.0/216.0)*(m + n - 4.0*l**2)**3.0 + m*n*l**2.0
D = np.sqrt((2.0*q - m*n*l**2)*m*n*l**2)
B = i/3.0 - (q+D)**(1.0/3.0) - (q-D)**(1.0/3.0)
t = np.sqrt(np.sqrt(B**2-k) - (B+i)/2.0)-np.sign(m-n)*np.sqrt((B-i)/2.0)
wi = w/(t+l)
zi = (1.0-e12)*z/(t-l)
#-- calculate latitude and height
lat = np.arctan2(zi,((1.0-e12)*wi))/dtr
h = np.sign(t-1.0+l)*np.sqrt((w-wi)**2.0 + (z-zi)**2.0)
#-- return latitude, longitude and height
return (lon,lat,h)
def scale_areas(lat, flat=1.0/298.257223563, ref=70.0):
"""
Calculates area scaling factors for a polar stereographic projection
including special case of at the exact pole
Inputs:
lat: latitude (degrees north)
Options:
flat: ellipsoidal flattening (default: WGS84)
ref: reference latitude (true scale latitude)
Returns:
scale: area scaling factors at input latitudes
References:
<NAME> (1982) Map Projections used by the U.S. Geological Survey
Forward formulas for the ellipsoid. Geological Survey Bulletin
1532, U.S. Government Printing Office.
JPL Technical Memorandum 3349-85-101
"""
#-- convert latitude from degrees to positive radians
theta = np.abs(lat)*np.pi/180.0
#-- convert reference latitude from degrees to positive radians
theta_ref = np.abs(ref)*np.pi/180.0
#-- square of the eccentricity of the ellipsoid
#-- ecc2 = (1-b**2/a**2) = 2.0*flat - flat^2
ecc2 = 2.0*flat - flat**2
#-- eccentricity of the ellipsoid
ecc = np.sqrt(ecc2)
#-- calculate ratio at input latitudes
m = np.cos(theta)/np.sqrt(1.0 - ecc2*np.sin(theta)**2)
t = np.tan(np.pi/4.0 - theta/2.0)/((1.0 - ecc*np.sin(theta)) / \
(1.0 + ecc*np.sin(theta)))**(ecc/2.0)
#-- calculate ratio at reference latitude
mref = np.cos(theta_ref)/np.sqrt(1.0 - ecc2*np.sin(theta_ref)**2)
tref = np.tan(np.pi/4.0 - theta_ref/2.0)/((1.0 - ecc*np.sin(theta_ref)) / \
(1.0 + ecc*np.sin(theta_ref)))**(ecc/2.0)
#-- distance scaling
k = (mref/m)*(t/tref)
kp = 0.5*mref*np.sqrt(((1.0+ecc)**(1.0+ecc))*((1.0-ecc)**(1.0-ecc)))/tref
#-- area scaling
scale = np.where(np.isclose(theta,np.pi/2.0),1.0/(kp**2),1.0/(k**2))
return scale
#-- PURPOSE: check a specified 2D point is inside a specified 2D polygon
def inside_polygon(x, y, xpts, ypts, threshold=0.01):
"""
Indicates whether a specified 2D point is inside a specified 2D polygon
Inputs:
x: x coordinates of the 2D point(s) to check.
y: y coordinates of the 2D point(s) to check.
xpts: The x coordinates of the 2D polygon.
ypts: The y coordinates of the 2D polygon.
Options:
threshold: minimum angle for checking if inside polygon
Returns:
flag: True for points within polygon, False for points outside polygon
"""
#-- create numpy arrays for 2D points
x = np.atleast_1d(x)
y = np.atleast_1d(y)
nn = len(x)
#-- create numpy arrays for polygon points
xpts = np.array(xpts)
ypts = np.array(ypts)
#-- check dimensions of polygon points
if (xpts.ndim != 1):
raise ValueError('X coordinates of polygon not a vector.')
if (ypts.ndim != 1):
raise ValueError('Y coordinates of polygon not a vector.')
if (len(xpts) != len(ypts)):
raise ValueError('Incompatable vector dimensions.')
#-- maximum possible number of vertices in polygon
N = len(xpts)
#-- Close the polygon if not already closed
if not np.isclose(xpts[-1],xpts[0]) and not np.isclose(ypts[-1],ypts[0]):
xpts = np.concatenate((xpts,[xpts[0]]),axis=0)
ypts = np.concatenate((ypts,[ypts[0]]),axis=0)
else:
#-- remove 1 from number of vertices
N -= 1
#-- Calculate dot and cross products of points to neighboring polygon points
i = np.arange(N)
X1 = np.dot(xpts[i][:,np.newaxis],np.ones((1,nn))) - \
np.dot(np.ones((N,1)),x[np.newaxis,:])
Y1 = np.dot(ypts[i][:,np.newaxis],np.ones((1,nn))) - \
np.dot(np.ones((N,1)),y[np.newaxis,:])
X2 = np.dot(xpts[i+1][:,np.newaxis],np.ones((1,nn))) - \
np.dot(np.ones((N,1)),x[np.newaxis,:])
Y2 = np.dot(ypts[i+1][:,np.newaxis],np.ones((1,nn))) - \
np.dot(np.ones((N,1)),y[np.newaxis,:])
#-- Dot-product
dp = X1*X2 + Y1*Y2
#-- Cross-product
cp = X1*Y2 - Y1*X2
#-- Calculate tangent of the angle between the two nearest adjacent points
theta = np.arctan2(cp,dp)
#-- If point is outside polygon then summation over all possible
#-- angles will equal a small number (e.g. 0.01)
flag = np.where(np.abs(np.sum(theta,axis=0)) > threshold, True, False)
# Make a scalar value if there was only one input value
if (nn == 1):
return flag[0]
else:
return flag
| #!/usr/bin/env python
u"""
spatial.py
Written by <NAME> (11/2021)
Utilities for reading and operating on spatial data
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
h5py: Pythonic interface to the HDF5 binary data format
https://www.h5py.org/
gdal: Pythonic interface to the Geospatial Data Abstraction Library (GDAL)
https://pypi.python.org/pypi/GDAL
UPDATE HISTORY:
Written 11/2021
"""
import os
import re
import io
import gzip
import uuid
import h5py
import logging
import netCDF4
import warnings
import numpy as np
try:
import osgeo.gdal, osgeo.osr
except ModuleNotFoundError:
warnings.filterwarnings("always")
warnings.warn("GDAL not available")
def case_insensitive_filename(filename):
"""
Searches a directory for a filename without case dependence
"""
#-- check if file presently exists with input case
if not os.access(os.path.expanduser(filename),os.F_OK):
#-- search for filename without case dependence
basename = os.path.basename(filename)
directory = os.path.dirname(os.path.expanduser(filename))
f = [f for f in os.listdir(directory) if re.match(basename,f,re.I)]
if not f:
raise IOError('{0} not found in file system'.format(filename))
filename = os.path.join(directory,f.pop())
return os.path.expanduser(filename)
def from_file(filename, format, **kwargs):
"""
Wrapper function for reading data from an input format
"""
#-- read input file to extract spatial coordinates and data
if (format == 'netCDF4'):
dinput = from_netCDF4(filename, **kwargs)
elif (format == 'HDF5'):
dinput = from_HDF5(filename, **kwargs)
elif (format == 'geotiff'):
dinput = from_geotiff(filename, **kwargs)
else:
raise ValueError('Invalid format {0}'.format(format))
return dinput
def from_netCDF4(filename, **kwargs):
"""
Read data from a netCDF4 file
Inputs: full path of input netCDF4 file
Options:
netCDF4 file is compressed or streamed from memory
netCDF4 variable names of x, y, and data
"""
#-- set default keyword arguments
kwargs.setdefault('compression',None)
kwargs.setdefault('xname','x')
kwargs.setdefault('yname','y')
kwargs.setdefault('varname','data')
#-- read data from netCDF4 file
#-- Open the NetCDF4 file for reading
if (kwargs['compression'] == 'gzip'):
#-- read as in-memory (diskless) netCDF4 dataset
with gzip.open(case_insensitive_filename(filename),'r') as f:
fileID = netCDF4.Dataset(uuid.uuid4().hex,memory=f.read())
elif (kwargs['compression'] == 'bytes'):
#-- read as in-memory (diskless) netCDF4 dataset
fileID = netCDF4.Dataset(uuid.uuid4().hex,memory=filename.read())
else:
#-- read netCDF4 dataset
fileID = netCDF4.Dataset(case_insensitive_filename(filename), 'r')
#-- Output NetCDF file information
logging.info(fileID.filepath())
logging.info(list(fileID.variables.keys()))
#-- create python dictionary for output variables and attributes
dinput = {}
dinput['attributes'] = {}
#-- get attributes for the file
for attr in ['title','description','projection']:
#-- try getting the attribute
try:
ncattr, = [s for s in fileID.ncattrs() if re.match(attr,s,re.I)]
dinput['attributes'][attr] = fileID.getncattr(ncattr)
except (ValueError,AttributeError):
pass
#-- list of attributes to attempt to retrieve from included variables
attributes_list = ['description','units','long_name','calendar',
'standard_name','grid_mapping','_FillValue']
#-- mapping between netCDF4 variable names and output names
variable_mapping = dict(x=kwargs['xname'],y=kwargs['yname'],
data=kwargs['varname'])
#-- for each variable
for key,nc in variable_mapping.items():
#-- Getting the data from each NetCDF variable
dinput[key] = fileID.variables[nc][:]
#-- get attributes for the included variables
dinput['attributes'][key] = {}
for attr in attributes_list:
#-- try getting the attribute
try:
ncattr, = [s for s in fileID.variables[nc].ncattrs()
if re.match(attr,s,re.I)]
dinput['attributes'][key][attr] = \
fileID.variables[nc].getncattr(ncattr)
except (ValueError,AttributeError):
pass
#-- get projection information if there is a grid_mapping attribute
if 'grid_mapping' in dinput['attributes']['data'].keys():
#-- try getting the attribute
grid_mapping = dinput['attributes']['data']['grid_mapping']
for att_name in fileID[grid_mapping].ncattrs():
dinput['attributes']['crs'][att_name] = \
fileID.variables[grid_mapping].getncattr(att_name)
#-- get the spatial projection reference information from wkt
#-- and overwrite the file-level projection attribute (if existing)
srs = osgeo.osr.SpatialReference()
srs.ImportFromWkt(dinput['attributes']['crs']['crs_wkt'])
dinput['attributes']['projection'] = srs.ExportToProj4()
#-- convert to masked array if fill values
if '_FillValue' in dinput['attributes']['data'].keys():
dinput['data'] = np.ma.asarray(dinput['data'])
dinput['data'].fill_value = dinput['attributes']['data']['_FillValue']
dinput['data'].mask = (dinput['data'].data == dinput['data'].fill_value)
#-- add extent and spacing attributes
xmin,xmax = np.min(dinput['x']),np.max(dinput['x'])
ymin,ymax = np.min(dinput['y']),np.max(dinput['y'])
dinput['attributes']['extent'] = (xmin,xmax,ymin,ymax)
dx = dinput['x'][1] - dinput['x'][0]
dy = dinput['y'][1] - dinput['y'][0]
dinput['attributes']['spacing'] = (dx,dy)
#-- Closing the NetCDF file
fileID.close()
#-- return the spatial variables
return dinput
def from_HDF5(filename, **kwargs):
"""
Read data from a HDF5 file
Inputs: full path of input HDF5 file
Options:
HDF5 file is compressed or streamed from memory
HDF5 variable names of x, y, and data
"""
#-- set default keyword arguments
kwargs.setdefault('compression',None)
kwargs.setdefault('xname','x')
kwargs.setdefault('yname','y')
kwargs.setdefault('varname','data')
#-- read data from HDF5 file
#-- Open the HDF5 file for reading
if (kwargs['compression'] == 'gzip'):
#-- read gzip compressed file and extract into in-memory file object
with gzip.open(case_insensitive_filename(filename),'r') as f:
fid = io.BytesIO(f.read())
#-- set filename of BytesIO object
fid.filename = os.path.basename(filename)
#-- rewind to start of file
fid.seek(0)
#-- read as in-memory (diskless) HDF5 dataset from BytesIO object
fileID = h5py.File(fid, 'r')
elif (kwargs['compression'] == 'bytes'):
#-- read as in-memory (diskless) HDF5 dataset
fileID = h5py.File(filename, 'r')
else:
#-- read HDF5 dataset
fileID = h5py.File(case_insensitive_filename(filename), 'r')
#-- Output HDF5 file information
logging.info(fileID.filename)
logging.info(list(fileID.keys()))
#-- create python dictionary for output variables and attributes
dinput = {}
dinput['attributes'] = {}
#-- get attributes for the file
for attr in ['title','description','projection']:
#-- try getting the attribute
try:
dinput['attributes'][attr] = fileID.attrs[attr]
except (KeyError,AttributeError):
pass
#-- list of attributes to attempt to retrieve from included variables
attributes_list = ['description','units','long_name','calendar',
'standard_name','grid_mapping','_FillValue']
#-- mapping between HDF5 variable names and output names
variable_mapping = dict(x=kwargs['xname'],y=kwargs['yname'],
data=kwargs['varname'])
#-- for each variable
for key,h5 in variable_mapping.items():
#-- Getting the data from each HDF5 variable
dinput[key] = np.copy(fileID[h5][:])
#-- get attributes for the included variables
dinput['attributes'][key] = {}
for attr in attributes_list:
#-- try getting the attribute
try:
dinput['attributes'][key][attr] = fileID[h5].attrs[attr]
except (KeyError,AttributeError):
pass
#-- get projection information if there is a grid_mapping attribute
if 'grid_mapping' in dinput['attributes']['data'].keys():
#-- try getting the attribute
grid_mapping = dinput['attributes']['data']['grid_mapping']
for att_name,att_val in fileID[grid_mapping].attrs.items():
dinput['attributes']['crs'][att_name] = att_val
#-- get the spatial projection reference information from wkt
#-- and overwrite the file-level projection attribute (if existing)
srs = osgeo.osr.SpatialReference()
srs.ImportFromWkt(dinput['attributes']['crs']['crs_wkt'])
dinput['attributes']['projection'] = srs.ExportToProj4()
#-- convert to masked array if fill values
if '_FillValue' in dinput['attributes']['data'].keys():
dinput['data'] = np.ma.asarray(dinput['data'])
dinput['data'].fill_value = dinput['attributes']['data']['_FillValue']
dinput['data'].mask = (dinput['data'].data == dinput['data'].fill_value)
#-- add extent and spacing attributes
xmin,xmax = np.min(dinput['x']),np.max(dinput['x'])
ymin,ymax = np.min(dinput['y']),np.max(dinput['y'])
dinput['attributes']['extent'] = (xmin,xmax,ymin,ymax)
dx = dinput['x'][1] - dinput['x'][0]
dy = dinput['y'][1] - dinput['y'][0]
dinput['attributes']['spacing'] = (dx,dy)
#-- Closing the HDF5 file
fileID.close()
#-- return the spatial variables
return dinput
def from_geotiff(filename, **kwargs):
"""
Read data from a geotiff file
Inputs: full path of input geotiff file
Options:
geotiff file is compressed or streamed from memory
"""
#-- set default keyword arguments
kwargs.setdefault('compression',None)
#-- Open the geotiff file for reading
if (kwargs['compression'] == 'gzip'):
#-- read gzip compressed file and extract into memory-mapped object
mmap_name = "/vsimem/{0}".format(uuid.uuid4().hex)
with gzip.open(case_insensitive_filename(filename),'r') as f:
osgeo.gdal.FileFromMemBuffer(mmap_name, f.read())
#-- read as GDAL memory-mapped (diskless) geotiff dataset
ds = osgeo.gdal.Open(mmap_name)
elif (kwargs['compression'] == 'bytes'):
#-- read as GDAL memory-mapped (diskless) geotiff dataset
mmap_name = "/vsimem/{0}".format(uuid.uuid4().hex)
osgeo.gdal.FileFromMemBuffer(mmap_name, filename.read())
ds = osgeo.gdal.Open(mmap_name)
else:
#-- read geotiff dataset
ds = osgeo.gdal.Open(case_insensitive_filename(filename))
#-- print geotiff file if verbose
logging.info(filename)
#-- create python dictionary for output variables and attributes
dinput = {}
dinput['attributes'] = {c:dict() for c in ['x','y','data']}
#-- get the spatial projection reference information
srs = ds.GetSpatialRef()
dinput['attributes']['projection'] = srs.ExportToProj4()
dinput['attributes']['wkt'] = srs.ExportToWkt()
#-- get dimensions
xsize = ds.RasterXSize
ysize = ds.RasterYSize
#-- get geotiff info
info_geotiff = ds.GetGeoTransform()
dinput['attributes']['spacing'] = (info_geotiff[1],info_geotiff[5])
#-- calculate image extents
xmin = info_geotiff[0]
ymax = info_geotiff[3]
xmax = xmin + (xsize-1)*info_geotiff[1]
ymin = ymax + (ysize-1)*info_geotiff[5]
dinput['attributes']['extent'] = (xmin,xmax,ymin,ymax)
#-- x and y pixel center coordinates (converted from upper left)
dinput['x'] = xmin + info_geotiff[1]/2.0 + np.arange(xsize)*info_geotiff[1]
dinput['y'] = ymax + info_geotiff[5]/2.0 + np.arange(ysize)*info_geotiff[5]
#-- read full image with GDAL
dinput['data'] = ds.ReadAsArray()
#-- check if image has fill values
dinput['data'] = np.ma.asarray(dinput['data'])
dinput['data'].mask = np.zeros_like(dinput['data'],dtype=bool)
if ds.GetRasterBand(1).GetNoDataValue():
#-- mask invalid values
dinput['data'].fill_value = ds.GetRasterBand(1).GetNoDataValue()
#-- create mask array for bad values
dinput['data'].mask[:] = (dinput['data'].data == dinput['data'].fill_value)
#-- set attribute for fill value
dinput['attributes']['data']['_FillValue'] = dinput['data'].fill_value
#-- close the dataset
ds = None
#-- return the spatial variables
return dinput
def convert_ellipsoid(phi1, h1, a1, f1, a2, f2, eps=1e-12, itmax=10):
"""
Convert latitudes and heights to a different ellipsoid using Newton-Raphson
Inputs:
phi1: latitude of input ellipsoid in degrees
h1: height above input ellipsoid in meters
a1: semi-major axis of input ellipsoid
f1: flattening of input ellipsoid
a2: semi-major axis of output ellipsoid
f2: flattening of output ellipsoid
Options:
eps: tolerance to prevent division by small numbers
and to determine convergence
itmax: maximum number of iterations to use in Newton-Raphson
Returns:
phi2: latitude of output ellipsoid in degrees
h2: height above output ellipsoid in meters
References:
Astronomical Algorithms, <NAME>, 1991, Willmann-Bell, Inc.
pp. 77-82
"""
if (len(phi1) != len(h1)):
raise ValueError('phi and h have incompatable dimensions')
#-- semiminor axis of input and output ellipsoid
b1 = (1.0 - f1)*a1
b2 = (1.0 - f2)*a2
#-- initialize output arrays
npts = len(phi1)
phi2 = np.zeros((npts))
h2 = np.zeros((npts))
#-- for each point
for N in range(npts):
#-- force phi1 into range -90 <= phi1 <= 90
if (np.abs(phi1[N]) > 90.0):
phi1[N] = np.sign(phi1[N])*90.0
#-- handle special case near the equator
#-- phi2 = phi1 (latitudes congruent)
#-- h2 = h1 + a1 - a2
if (np.abs(phi1[N]) < eps):
phi2[N] = np.copy(phi1[N])
h2[N] = h1[N] + a1 - a2
#-- handle special case near the poles
#-- phi2 = phi1 (latitudes congruent)
#-- h2 = h1 + b1 - b2
elif ((90.0 - np.abs(phi1[N])) < eps):
phi2[N] = np.copy(phi1[N])
h2[N] = h1[N] + b1 - b2
#-- handle case if latitude is within 45 degrees of equator
elif (np.abs(phi1[N]) <= 45):
#-- convert phi1 to radians
phi1r = phi1[N] * np.pi/180.0
sinphi1 = np.sin(phi1r)
cosphi1 = np.cos(phi1r)
#-- prevent division by very small numbers
cosphi1 = np.copy(eps) if (cosphi1 < eps) else cosphi1
#-- calculate tangent
tanphi1 = sinphi1 / cosphi1
u1 = np.arctan(b1 / a1 * tanphi1)
hpr1sin = b1 * np.sin(u1) + h1[N] * sinphi1
hpr1cos = a1 * np.cos(u1) + h1[N] * cosphi1
#-- set initial value for u2
u2 = np.copy(u1)
#-- setup constants
k0 = b2 * b2 - a2 * a2
k1 = a2 * hpr1cos
k2 = b2 * hpr1sin
#-- perform newton-raphson iteration to solve for u2
#-- cos(u2) will not be close to zero since abs(phi1) <= 45
for i in range(0, itmax+1):
cosu2 = np.cos(u2)
fu2 = k0 * np.sin(u2) + k1 * np.tan(u2) - k2
fu2p = k0 * cosu2 + k1 / (cosu2 * cosu2)
if (np.abs(fu2p) < eps):
i = np.copy(itmax)
else:
delta = fu2 / fu2p
u2 -= delta
if (np.abs(delta) < eps):
i = np.copy(itmax)
#-- convert latitude to degrees and verify values between +/- 90
phi2r = np.arctan(a2 / b2 * np.tan(u2))
phi2[N] = phi2r*180.0/np.pi
if (np.abs(phi2[N]) > 90.0):
phi2[N] = np.sign(phi2[N])*90.0
#-- calculate height
h2[N] = (hpr1cos - a2 * np.cos(u2)) / np.cos(phi2r)
#-- handle final case where latitudes are between 45 degrees and pole
else:
#-- convert phi1 to radians
phi1r = phi1[N] * np.pi/180.0
sinphi1 = np.sin(phi1r)
cosphi1 = np.cos(phi1r)
#-- prevent division by very small numbers
cosphi1 = np.copy(eps) if (cosphi1 < eps) else cosphi1
#-- calculate tangent
tanphi1 = sinphi1 / cosphi1
u1 = np.arctan(b1 / a1 * tanphi1)
hpr1sin = b1 * np.sin(u1) + h1[N] * sinphi1
hpr1cos = a1 * np.cos(u1) + h1[N] * cosphi1
#-- set initial value for u2
u2 = np.copy(u1)
#-- setup constants
k0 = a2 * a2 - b2 * b2
k1 = b2 * hpr1sin
k2 = a2 * hpr1cos
#-- perform newton-raphson iteration to solve for u2
#-- sin(u2) will not be close to zero since abs(phi1) > 45
for i in range(0, itmax+1):
sinu2 = np.sin(u2)
fu2 = k0 * np.cos(u2) + k1 / np.tan(u2) - k2
fu2p = -1 * (k0 * sinu2 + k1 / (sinu2 * sinu2))
if (np.abs(fu2p) < eps):
i = np.copy(itmax)
else:
delta = fu2 / fu2p
u2 -= delta
if (np.abs(delta) < eps):
i = np.copy(itmax)
#-- convert latitude to degrees and verify values between +/- 90
phi2r = np.arctan(a2 / b2 * np.tan(u2))
phi2[N] = phi2r*180.0/np.pi
if (np.abs(phi2[N]) > 90.0):
phi2[N] = np.sign(phi2[N])*90.0
#-- calculate height
h2[N] = (hpr1sin - b2 * np.sin(u2)) / np.sin(phi2r)
#-- return the latitude and height
return (phi2, h2)
def compute_delta_h(a1, f1, a2, f2, lat):
"""
Compute difference in elevation for two ellipsoids at a given
latitude using a simplified empirical equation
Inputs:
a1: semi-major axis of input ellipsoid
f1: flattening of input ellipsoid
a2: semi-major axis of output ellipsoid
f2: flattening of output ellipsoid
lat: array of latitudes in degrees
Returns:
delta_h: difference in elevation for two ellipsoids
Reference:
<NAME>, Astronomical Algorithms, pp. 77-82 (1991)
"""
#-- force phi into range -90 <= phi <= 90
gt90, = np.nonzero((lat < -90.0) | (lat > 90.0))
lat[gt90] = np.sign(lat[gt90])*90.0
#-- semiminor axis of input and output ellipsoid
b1 = (1.0 - f1)*a1
b2 = (1.0 - f2)*a2
#-- compute delta_a and delta_b coefficients
delta_a = a2 - a1
delta_b = b2 - b1
#-- compute differences between ellipsoids
#-- delta_h = -(delta_a * cos(phi)^2 + delta_b * sin(phi)^2)
phi = lat * np.pi/180.0
delta_h = -(delta_a*np.cos(phi)**2 + delta_b*np.sin(phi)**2)
return delta_h
def wrap_longitudes(lon):
"""
Wraps longitudes to range from -180 to +180
Inputs:
lon: longitude (degrees east)
"""
phi = np.arctan2(np.sin(lon*np.pi/180.0),np.cos(lon*np.pi/180.0))
#-- convert phi from radians to degrees
return phi*180.0/np.pi
def to_cartesian(lon,lat,h=0.0,a_axis=6378137.0,flat=1.0/298.257223563):
"""
Converts geodetic coordinates to Cartesian coordinates
Inputs:
lon: longitude (degrees east)
lat: latitude (degrees north)
Options:
h: height above ellipsoid (or sphere)
a_axis: semimajor axis of the ellipsoid (default: WGS84)
* for spherical coordinates set to radius of the Earth
flat: ellipsoidal flattening (default: WGS84)
* for spherical coordinates set to 0
"""
#-- verify axes
lon = np.atleast_1d(lon)
lat = np.atleast_1d(lat)
#-- fix coordinates to be 0:360
count = np.count_nonzero(lon < 0)
if (count != 0):
lt0, = np.nonzero(lon < 0)
lon[lt0] += 360.0
#-- Linear eccentricity and first numerical eccentricity
lin_ecc = np.sqrt((2.0*flat - flat**2)*a_axis**2)
ecc1 = lin_ecc/a_axis
#-- convert from geodetic latitude to geocentric latitude
dtr = np.pi/180.0
#-- geodetic latitude in radians
latitude_geodetic_rad = lat*dtr
#-- prime vertical radius of curvature
N = a_axis/np.sqrt(1.0 - ecc1**2.0*np.sin(latitude_geodetic_rad)**2.0)
#-- calculate X, Y and Z from geodetic latitude and longitude
X = (N + h) * np.cos(latitude_geodetic_rad) * np.cos(lon*dtr)
Y = (N + h) * np.cos(latitude_geodetic_rad) * np.sin(lon*dtr)
Z = (N * (1.0 - ecc1**2.0) + h) * np.sin(latitude_geodetic_rad)
#-- return the cartesian coordinates
return (X,Y,Z)
def to_sphere(x,y,z):
"""
Convert from cartesian coordinates to spherical coordinates
Inputs:
x,y,z in cartesian coordinates
"""
#-- calculate radius
rad = np.sqrt(x**2.0 + y**2.0 + z**2.0)
#-- calculate angular coordinates
#-- phi: azimuthal angle
phi = np.arctan2(y,x)
#-- th: polar angle
th = np.arccos(z/rad)
#-- convert to degrees and fix to 0:360
lon = 180.0*phi/np.pi
count = np.count_nonzero(lon < 0)
if (count != 0):
lt0 = np.nonzero(lon < 0)
lon[lt0] = lon[lt0]+360.0
#-- convert to degrees and fix to -90:90
lat = 90.0 - (180.0*th/np.pi)
#-- return latitude, longitude and radius
return (lon,lat,rad)
def to_geodetic(x,y,z,a_axis=6378137.0,flat=1.0/298.257223563):
"""
Convert from cartesian coordinates to geodetic coordinates
using a closed form solution
Inputs:
x,y,z in cartesian coordinates
Options:
a_axis: semimajor axis of the ellipsoid (default: WGS84)
flat: ellipsoidal flattening (default: WGS84)
References:
<NAME> "Exact conversion of Earth-centered, Earth-fixed
coordinates to geodetic coordinates"
Journal of Guidance, Control, and Dynamics,
16(2), 389--391, 1993
https://arc.aiaa.org/doi/abs/10.2514/3.21016
"""
#-- semiminor axis of the WGS84 ellipsoid [m]
b_axis = (1.0 - flat)*a_axis
#-- Linear eccentricity and first numerical eccentricity
lin_ecc = np.sqrt((2.0*flat - flat**2)*a_axis**2)
ecc1 = lin_ecc/a_axis
#-- square of first numerical eccentricity
e12 = ecc1**2
#-- degrees to radians
dtr = np.pi/180.0
#-- calculate distance
w = np.sqrt(x**2 + y**2)
#-- calculate longitude
lon = np.arctan2(y,x)/dtr
lat = np.zeros_like(lon)
h = np.zeros_like(lon)
if (w == 0):
#-- special case where w == 0 (exact polar solution)
h = np.sign(z)*z - b_axis
lat = 90.0*np.sign(z)
else:
#-- all other cases
l = e12/2.0
m = (w/a_axis)**2.0
n = ((1.0-e12)*z/b_axis)**2.0
i = -(2.0*l**2 + m + n)/2.0
k = (l**2.0 - m - n)*l**2.0
q = (1.0/216.0)*(m + n - 4.0*l**2)**3.0 + m*n*l**2.0
D = np.sqrt((2.0*q - m*n*l**2)*m*n*l**2)
B = i/3.0 - (q+D)**(1.0/3.0) - (q-D)**(1.0/3.0)
t = np.sqrt(np.sqrt(B**2-k) - (B+i)/2.0)-np.sign(m-n)*np.sqrt((B-i)/2.0)
wi = w/(t+l)
zi = (1.0-e12)*z/(t-l)
#-- calculate latitude and height
lat = np.arctan2(zi,((1.0-e12)*wi))/dtr
h = np.sign(t-1.0+l)*np.sqrt((w-wi)**2.0 + (z-zi)**2.0)
#-- return latitude, longitude and height
return (lon,lat,h)
def scale_areas(lat, flat=1.0/298.257223563, ref=70.0):
"""
Calculates area scaling factors for a polar stereographic projection
including special case of at the exact pole
Inputs:
lat: latitude (degrees north)
Options:
flat: ellipsoidal flattening (default: WGS84)
ref: reference latitude (true scale latitude)
Returns:
scale: area scaling factors at input latitudes
References:
<NAME> (1982) Map Projections used by the U.S. Geological Survey
Forward formulas for the ellipsoid. Geological Survey Bulletin
1532, U.S. Government Printing Office.
JPL Technical Memorandum 3349-85-101
"""
#-- convert latitude from degrees to positive radians
theta = np.abs(lat)*np.pi/180.0
#-- convert reference latitude from degrees to positive radians
theta_ref = np.abs(ref)*np.pi/180.0
#-- square of the eccentricity of the ellipsoid
#-- ecc2 = (1-b**2/a**2) = 2.0*flat - flat^2
ecc2 = 2.0*flat - flat**2
#-- eccentricity of the ellipsoid
ecc = np.sqrt(ecc2)
#-- calculate ratio at input latitudes
m = np.cos(theta)/np.sqrt(1.0 - ecc2*np.sin(theta)**2)
t = np.tan(np.pi/4.0 - theta/2.0)/((1.0 - ecc*np.sin(theta)) / \
(1.0 + ecc*np.sin(theta)))**(ecc/2.0)
#-- calculate ratio at reference latitude
mref = np.cos(theta_ref)/np.sqrt(1.0 - ecc2*np.sin(theta_ref)**2)
tref = np.tan(np.pi/4.0 - theta_ref/2.0)/((1.0 - ecc*np.sin(theta_ref)) / \
(1.0 + ecc*np.sin(theta_ref)))**(ecc/2.0)
#-- distance scaling
k = (mref/m)*(t/tref)
kp = 0.5*mref*np.sqrt(((1.0+ecc)**(1.0+ecc))*((1.0-ecc)**(1.0-ecc)))/tref
#-- area scaling
scale = np.where(np.isclose(theta,np.pi/2.0),1.0/(kp**2),1.0/(k**2))
return scale
#-- PURPOSE: check a specified 2D point is inside a specified 2D polygon
def inside_polygon(x, y, xpts, ypts, threshold=0.01):
"""
Indicates whether a specified 2D point is inside a specified 2D polygon
Inputs:
x: x coordinates of the 2D point(s) to check.
y: y coordinates of the 2D point(s) to check.
xpts: The x coordinates of the 2D polygon.
ypts: The y coordinates of the 2D polygon.
Options:
threshold: minimum angle for checking if inside polygon
Returns:
flag: True for points within polygon, False for points outside polygon
"""
#-- create numpy arrays for 2D points
x = np.atleast_1d(x)
y = np.atleast_1d(y)
nn = len(x)
#-- create numpy arrays for polygon points
xpts = np.array(xpts)
ypts = np.array(ypts)
#-- check dimensions of polygon points
if (xpts.ndim != 1):
raise ValueError('X coordinates of polygon not a vector.')
if (ypts.ndim != 1):
raise ValueError('Y coordinates of polygon not a vector.')
if (len(xpts) != len(ypts)):
raise ValueError('Incompatable vector dimensions.')
#-- maximum possible number of vertices in polygon
N = len(xpts)
#-- Close the polygon if not already closed
if not np.isclose(xpts[-1],xpts[0]) and not np.isclose(ypts[-1],ypts[0]):
xpts = np.concatenate((xpts,[xpts[0]]),axis=0)
ypts = np.concatenate((ypts,[ypts[0]]),axis=0)
else:
#-- remove 1 from number of vertices
N -= 1
#-- Calculate dot and cross products of points to neighboring polygon points
i = np.arange(N)
X1 = np.dot(xpts[i][:,np.newaxis],np.ones((1,nn))) - \
np.dot(np.ones((N,1)),x[np.newaxis,:])
Y1 = np.dot(ypts[i][:,np.newaxis],np.ones((1,nn))) - \
np.dot(np.ones((N,1)),y[np.newaxis,:])
X2 = np.dot(xpts[i+1][:,np.newaxis],np.ones((1,nn))) - \
np.dot(np.ones((N,1)),x[np.newaxis,:])
Y2 = np.dot(ypts[i+1][:,np.newaxis],np.ones((1,nn))) - \
np.dot(np.ones((N,1)),y[np.newaxis,:])
#-- Dot-product
dp = X1*X2 + Y1*Y2
#-- Cross-product
cp = X1*Y2 - Y1*X2
#-- Calculate tangent of the angle between the two nearest adjacent points
theta = np.arctan2(cp,dp)
#-- If point is outside polygon then summation over all possible
#-- angles will equal a small number (e.g. 0.01)
flag = np.where(np.abs(np.sum(theta,axis=0)) > threshold, True, False)
# Make a scalar value if there was only one input value
if (nn == 1):
return flag[0]
else:
return flag | en | 0.410455 | #!/usr/bin/env python spatial.py Written by <NAME> (11/2021) Utilities for reading and operating on spatial data PYTHON DEPENDENCIES: numpy: Scientific Computing Tools For Python https://numpy.org https://numpy.org/doc/stable/user/numpy-for-matlab-users.html netCDF4: Python interface to the netCDF C library https://unidata.github.io/netcdf4-python/netCDF4/index.html h5py: Pythonic interface to the HDF5 binary data format https://www.h5py.org/ gdal: Pythonic interface to the Geospatial Data Abstraction Library (GDAL) https://pypi.python.org/pypi/GDAL UPDATE HISTORY: Written 11/2021 Searches a directory for a filename without case dependence #-- check if file presently exists with input case #-- search for filename without case dependence Wrapper function for reading data from an input format #-- read input file to extract spatial coordinates and data Read data from a netCDF4 file Inputs: full path of input netCDF4 file Options: netCDF4 file is compressed or streamed from memory netCDF4 variable names of x, y, and data #-- set default keyword arguments #-- read data from netCDF4 file #-- Open the NetCDF4 file for reading #-- read as in-memory (diskless) netCDF4 dataset #-- read as in-memory (diskless) netCDF4 dataset #-- read netCDF4 dataset #-- Output NetCDF file information #-- create python dictionary for output variables and attributes #-- get attributes for the file #-- try getting the attribute #-- list of attributes to attempt to retrieve from included variables #-- mapping between netCDF4 variable names and output names #-- for each variable #-- Getting the data from each NetCDF variable #-- get attributes for the included variables #-- try getting the attribute #-- get projection information if there is a grid_mapping attribute #-- try getting the attribute #-- get the spatial projection reference information from wkt #-- and overwrite the file-level projection attribute (if existing) #-- convert to masked array if fill values #-- add extent and spacing attributes #-- Closing the NetCDF file #-- return the spatial variables Read data from a HDF5 file Inputs: full path of input HDF5 file Options: HDF5 file is compressed or streamed from memory HDF5 variable names of x, y, and data #-- set default keyword arguments #-- read data from HDF5 file #-- Open the HDF5 file for reading #-- read gzip compressed file and extract into in-memory file object #-- set filename of BytesIO object #-- rewind to start of file #-- read as in-memory (diskless) HDF5 dataset from BytesIO object #-- read as in-memory (diskless) HDF5 dataset #-- read HDF5 dataset #-- Output HDF5 file information #-- create python dictionary for output variables and attributes #-- get attributes for the file #-- try getting the attribute #-- list of attributes to attempt to retrieve from included variables #-- mapping between HDF5 variable names and output names #-- for each variable #-- Getting the data from each HDF5 variable #-- get attributes for the included variables #-- try getting the attribute #-- get projection information if there is a grid_mapping attribute #-- try getting the attribute #-- get the spatial projection reference information from wkt #-- and overwrite the file-level projection attribute (if existing) #-- convert to masked array if fill values #-- add extent and spacing attributes #-- Closing the HDF5 file #-- return the spatial variables Read data from a geotiff file Inputs: full path of input geotiff file Options: geotiff file is compressed or streamed from memory #-- set default keyword arguments #-- Open the geotiff file for reading #-- read gzip compressed file and extract into memory-mapped object #-- read as GDAL memory-mapped (diskless) geotiff dataset #-- read as GDAL memory-mapped (diskless) geotiff dataset #-- read geotiff dataset #-- print geotiff file if verbose #-- create python dictionary for output variables and attributes #-- get the spatial projection reference information #-- get dimensions #-- get geotiff info #-- calculate image extents #-- x and y pixel center coordinates (converted from upper left) #-- read full image with GDAL #-- check if image has fill values #-- mask invalid values #-- create mask array for bad values #-- set attribute for fill value #-- close the dataset #-- return the spatial variables Convert latitudes and heights to a different ellipsoid using Newton-Raphson Inputs: phi1: latitude of input ellipsoid in degrees h1: height above input ellipsoid in meters a1: semi-major axis of input ellipsoid f1: flattening of input ellipsoid a2: semi-major axis of output ellipsoid f2: flattening of output ellipsoid Options: eps: tolerance to prevent division by small numbers and to determine convergence itmax: maximum number of iterations to use in Newton-Raphson Returns: phi2: latitude of output ellipsoid in degrees h2: height above output ellipsoid in meters References: Astronomical Algorithms, <NAME>, 1991, Willmann-Bell, Inc. pp. 77-82 #-- semiminor axis of input and output ellipsoid #-- initialize output arrays #-- for each point #-- force phi1 into range -90 <= phi1 <= 90 #-- handle special case near the equator #-- phi2 = phi1 (latitudes congruent) #-- h2 = h1 + a1 - a2 #-- handle special case near the poles #-- phi2 = phi1 (latitudes congruent) #-- h2 = h1 + b1 - b2 #-- handle case if latitude is within 45 degrees of equator #-- convert phi1 to radians #-- prevent division by very small numbers #-- calculate tangent #-- set initial value for u2 #-- setup constants #-- perform newton-raphson iteration to solve for u2 #-- cos(u2) will not be close to zero since abs(phi1) <= 45 #-- convert latitude to degrees and verify values between +/- 90 #-- calculate height #-- handle final case where latitudes are between 45 degrees and pole #-- convert phi1 to radians #-- prevent division by very small numbers #-- calculate tangent #-- set initial value for u2 #-- setup constants #-- perform newton-raphson iteration to solve for u2 #-- sin(u2) will not be close to zero since abs(phi1) > 45 #-- convert latitude to degrees and verify values between +/- 90 #-- calculate height #-- return the latitude and height Compute difference in elevation for two ellipsoids at a given latitude using a simplified empirical equation Inputs: a1: semi-major axis of input ellipsoid f1: flattening of input ellipsoid a2: semi-major axis of output ellipsoid f2: flattening of output ellipsoid lat: array of latitudes in degrees Returns: delta_h: difference in elevation for two ellipsoids Reference: <NAME>, Astronomical Algorithms, pp. 77-82 (1991) #-- force phi into range -90 <= phi <= 90 #-- semiminor axis of input and output ellipsoid #-- compute delta_a and delta_b coefficients #-- compute differences between ellipsoids #-- delta_h = -(delta_a * cos(phi)^2 + delta_b * sin(phi)^2) Wraps longitudes to range from -180 to +180 Inputs: lon: longitude (degrees east) #-- convert phi from radians to degrees Converts geodetic coordinates to Cartesian coordinates Inputs: lon: longitude (degrees east) lat: latitude (degrees north) Options: h: height above ellipsoid (or sphere) a_axis: semimajor axis of the ellipsoid (default: WGS84) * for spherical coordinates set to radius of the Earth flat: ellipsoidal flattening (default: WGS84) * for spherical coordinates set to 0 #-- verify axes #-- fix coordinates to be 0:360 #-- Linear eccentricity and first numerical eccentricity #-- convert from geodetic latitude to geocentric latitude #-- geodetic latitude in radians #-- prime vertical radius of curvature #-- calculate X, Y and Z from geodetic latitude and longitude #-- return the cartesian coordinates Convert from cartesian coordinates to spherical coordinates Inputs: x,y,z in cartesian coordinates #-- calculate radius #-- calculate angular coordinates #-- phi: azimuthal angle #-- th: polar angle #-- convert to degrees and fix to 0:360 #-- convert to degrees and fix to -90:90 #-- return latitude, longitude and radius Convert from cartesian coordinates to geodetic coordinates using a closed form solution Inputs: x,y,z in cartesian coordinates Options: a_axis: semimajor axis of the ellipsoid (default: WGS84) flat: ellipsoidal flattening (default: WGS84) References: <NAME> "Exact conversion of Earth-centered, Earth-fixed coordinates to geodetic coordinates" Journal of Guidance, Control, and Dynamics, 16(2), 389--391, 1993 https://arc.aiaa.org/doi/abs/10.2514/3.21016 #-- semiminor axis of the WGS84 ellipsoid [m] #-- Linear eccentricity and first numerical eccentricity #-- square of first numerical eccentricity #-- degrees to radians #-- calculate distance #-- calculate longitude #-- special case where w == 0 (exact polar solution) #-- all other cases #-- calculate latitude and height #-- return latitude, longitude and height Calculates area scaling factors for a polar stereographic projection including special case of at the exact pole Inputs: lat: latitude (degrees north) Options: flat: ellipsoidal flattening (default: WGS84) ref: reference latitude (true scale latitude) Returns: scale: area scaling factors at input latitudes References: <NAME> (1982) Map Projections used by the U.S. Geological Survey Forward formulas for the ellipsoid. Geological Survey Bulletin 1532, U.S. Government Printing Office. JPL Technical Memorandum 3349-85-101 #-- convert latitude from degrees to positive radians #-- convert reference latitude from degrees to positive radians #-- square of the eccentricity of the ellipsoid #-- ecc2 = (1-b**2/a**2) = 2.0*flat - flat^2 #-- eccentricity of the ellipsoid #-- calculate ratio at input latitudes #-- calculate ratio at reference latitude #-- distance scaling #-- area scaling #-- PURPOSE: check a specified 2D point is inside a specified 2D polygon Indicates whether a specified 2D point is inside a specified 2D polygon Inputs: x: x coordinates of the 2D point(s) to check. y: y coordinates of the 2D point(s) to check. xpts: The x coordinates of the 2D polygon. ypts: The y coordinates of the 2D polygon. Options: threshold: minimum angle for checking if inside polygon Returns: flag: True for points within polygon, False for points outside polygon #-- create numpy arrays for 2D points #-- create numpy arrays for polygon points #-- check dimensions of polygon points #-- maximum possible number of vertices in polygon #-- Close the polygon if not already closed #-- remove 1 from number of vertices #-- Calculate dot and cross products of points to neighboring polygon points #-- Dot-product #-- Cross-product #-- Calculate tangent of the angle between the two nearest adjacent points #-- If point is outside polygon then summation over all possible #-- angles will equal a small number (e.g. 0.01) # Make a scalar value if there was only one input value | 2.384941 | 2 |
Proj029Pipelines/pipeline_proj029_enrichment.py | CGATOxford/proj029 | 3 | 6619375 | <filename>Proj029Pipelines/pipeline_proj029_enrichment.py
"""
=======================================
Perform functional enrichment testing
=======================================
:Author: <NAME>
:Release: $Id$
:Date: |today|
:Tags: Python
"""
# load modules
from ruffus import *
import os
import CGAT.Experiment as E
import logging as L
import CGAT.Database as Database
import CGAT.CSV as CSV
import CGAT.IOTools as IOTools
from rpy2.robjects import r as R
import pandas
import sys
import CGATPipelines.Pipeline as P
import collections
#######################
# parameters
#######################
P.getParameters(
["pipeline.ini"])
PARAMS = P.PARAMS
#########################################
#########################################
#########################################
@follows(mkdir("pathways.dir"))
@transform("ratio_genes.annotated.outsidepi.tsv", regex("(\S+).tsv"), r"pathways.dir/\1.foreground.tsv.gz")
def buildForegroundSet(infile, outfile):
'''
build foreground set of COGs
'''
status = PARAMS.get("group_status")
statement = '''cat %(infile)s | grep %(status)s | cut -f1 | gzip > %(outfile)s'''
P.run()
#########################################
#########################################
#########################################
@split([buildForegroundSet,
"common_genes.tsv",
PARAMS.get("pathways_geneset")],
"pathways.dir/*.overall")
def runPathwaysAnalysis(infiles, outfiles):
'''
run pathways analysis
'''
genes, background, gene2pathway = infiles
# remove General function prediction only
# and Function unknown
temp = P.getTempFilename(".")
statement = '''cat %(gene2pathway)s | grep -v "General function"
| grep -v "Function unknown"
> %(temp)s'''
P.run()
statement = '''python %(scriptsdir)s/runGO.py \
--background=%(background)s
--genes=%(genes)s \
--filename-input=%(temp)s \
--fdr \
-q BH \
--output-filename-pattern="pathways.dir/%%(set)s.%%(go)s.%%(section)s" \
> pathways.dir/pathways.log \
; rm -rf %(temp)s
'''
P.run()
#########################################
#########################################
#########################################
@merge([buildForegroundSet, PARAMS.get("pathways_geneset")],
"pathways.dir/cogs_pathways.tsv")
def buildDiffCogsAndPathways(infiles, outfile):
'''
merge diff COGs and pathways
'''
R('''cogs <- read.csv("%s", header = F, stringsAsFactors = F, sep = "\t")''' % infiles[0])
R('''colnames(cogs) <- "gene"''')
R('''pathways <- read.csv("%s", header = F, stringsAsFactors = F, sep = "\t")''' % infiles[1])
R('''dat <- merge(cogs, pathways, by.x = "gene", by.y = "V2", all.x = T, all.y = F)''')
R('''write.table(dat, file = "%s", sep = "\t", row.names = F, quote = F)''' % outfile)
#########################################
#########################################
#########################################
@merge(["common_genes.tsv", PARAMS.get("pathways_geneset")],
"pathways.dir/background_cogs_pathways.tsv")
def buildBackgroundCogsAndPathways(infiles, outfile):
'''
merge diff COGs and pathways
'''
R('''cogs <- read.csv("%s", header = F, stringsAsFactors = F, sep = "\t")''' % infiles[0])
R('''colnames(cogs) <- "gene"''')
R('''pathways <- read.csv("%s", header = F, stringsAsFactors = F, sep = "\t")''' % infiles[1])
R('''dat <- merge(cogs, pathways, by.x = "gene", by.y = "V2", all.x = T, all.y = F)''')
R('''write.table(dat, file = "%s", sep = "\t", row.names = F, quote = F)''' % outfile)
#########################################
#########################################
#########################################
@transform(runPathwaysAnalysis, suffix(".overall"), ".bar.pdf")
def plotPathways(infile, outfile):
'''
plot pathways associated with clusters
'''
R('''library(ggplot2)''')
R('''dat <- read.csv("%s", header=T, stringsAsFactors=F, sep="\t")''' % infile)
R('''dat <- dat[order(dat$ratio),]''')
R('''plot1 <- ggplot(dat, aes(x = factor(description, levels=description), y=ratio, stat="identity"))''')
R('''plot1 + geom_bar(stat="identity") + coord_flip()''')
R('''ggsave("%s")''' % outfile)
#########################################
#########################################
#########################################
@follows(mkdir("heatmaps.dir"))
@split(buildDiffCogsAndPathways, "heatmaps.dir/*.tsv")
def splitPathways(infile, outfiles):
'''
map cogs to pathways in separate files
'''
inf = IOTools.openFile(infile)
inf.readline()
pathway2nogs = collections.defaultdict(set)
for line in inf.readlines():
data = line[:-1].split("\t")
nog, pathway = data[0], data[3]
pathway = pathway.replace(" ", "_").replace("/", "_")
pathway2nogs[pathway].add(nog)
for pathway, nogs in pathway2nogs.iteritems():
outname = os.path.join("heatmaps.dir", pathway + ".tsv")
outf = IOTools.openFile(outname, "w")
outf.write("NOG\tpathway\n")
for nog in nogs:
outf.write("%s\t%s\n"% (nog, pathway))
outf.close()
#########################################
#########################################
#########################################
@follows(mkdir("annotations.dir"))
@transform(splitPathways,
regex("(\S+)/(\S+).tsv"),
add_inputs(PARAMS.get("annotations_eggnog")),
r"annotations.dir/\2.annotated.tsv")
def annotateNogs(infiles, outfile):
'''
annotate the NOGs with their descriptions
'''
pathways, annotations = infiles
anno = {}
# read annotations
for line in open(annotations).readlines():
data = line[:-1].split("\t")
nog, description = data
anno[nog] = description
# write out annotations
p = IOTools.openFile(pathways)
p.readline()
outf = IOTools.openFile(outfile, "w")
outf.write("NOG\tpathway\tdescription\n")
for line in p.readlines():
data = line[:-1].split("\t")
nog, pathway = data
try:
outf.write("%s\t%s\t%s\n" % (nog, pathway, anno[nog]))
except KeyError:
outf.write("%s\t%s\t%s\n" % (nog, pathway, "NA"))
outf.close()
#########################################
#########################################
#########################################
@jobs_limit(1,"R")
@transform(splitPathways,
suffix(".tsv"),
add_inputs(PARAMS.get("matrix_file")),
".heatmap.pdf")
def heatmapNogs(infiles, outfile):
'''
heatmap nogs per functional category
'''
# check files is compatible with heatmaps
# i.e. >=2 NOGs
nogs, matrix = infiles
inf = IOTools.openFile(nogs)
# header
inf.readline()
# check lines
if len(inf.readlines()) == 1:
P.touch(outfile)
else:
R('''library(gtools)''')
R('''library(gplots)''')
# read in matrix
R('''mat <- read.csv("%s", header=T,stringsAsFactors=F, sep="\t")''' % matrix)
R('''rownames(mat) <- mat$taxa''')
R('''mat <- mat[,1:ncol(mat)-1]''')
# read in NOGs
R('''nogs <- read.csv("%s", header=T, stringsAsFactors=F, sep="\t")''' % nogs)
# subset matrix
R('''mat <- mat[nogs$NOG,]''')
# scale
R('''mat.s <- data.frame(t(apply(mat, 1, scale)))''')
R('''colnames(mat.s) <- colnames(mat)''')
R('''mat.s <- mat.s[, mixedsort(colnames(mat.s))]''')
R('''mat.s <- mat.s[order(rownames(mat.s)),]''')
# heatmap
R('''pdf("%s")''' % outfile)
R('''cols <- colorRampPalette(c("blue", "white", "red"))(75)''')
R('''heatmap.2(as.matrix(mat.s),
trace="none",
Colv=F,
Rowv=F,
col=cols,
margins=c(15,15))''')
R["dev.off"]()
#########################################
#########################################
#########################################
@jobs_limit(1,"R")
@transform(splitPathways,
suffix(".tsv"),
add_inputs(PARAMS.get("matrix_taxa")),
".taxa.heatmap.pdf")
def heatmapTaxaAssociatedWithNogs(infiles, outfile):
'''
heatmap nogs per functional category
'''
# check files is compatible with heatmaps
# i.e. >=2 NOGs
nogs, matrix = infiles
inf = IOTools.openFile(nogs)
# header
inf.readline()
# check lines
if len(inf.readlines()) == 1:
P.touch(outfile)
else:
R('''library(gtools)''')
R('''library(gplots)''')
R('''library(pheatmap)''')
# read in matrix
R('''mat <- t(read.csv("%s", header=T,stringsAsFactors=F, sep="\t", row.names=1))''' % matrix)
# read in NOGs
R('''nogs <- read.csv("%s", header=T, stringsAsFactors=F, sep="\t")''' % nogs)
# subset matrix
R('''mat <- mat[nogs$NOG,]''')
R('''mat <- mat[, colSums(mat > 5) >= 1]''')
R('''mat <- mat[order(rownames(mat)),]''')
# heatmap
R('''pdf("%s")''' % outfile)
R('''cols <- colorRampPalette(c("white", "blue"))(75)''')
R('''pheatmap(as.matrix(mat),
color=cols,
cluster_cols=F,
cluster_rows=F)''')
R["dev.off"]()
@follows(heatmapNogs,
heatmapTaxaAssociatedWithNogs)
def heatmaps():
pass
@follows(runPathwaysAnalysis,annotateNogs,heatmaps)
def full():
pass
#########################################
#########################################
#########################################
if __name__ == "__main__":
sys.exit(P.main(sys.argv))
| <filename>Proj029Pipelines/pipeline_proj029_enrichment.py
"""
=======================================
Perform functional enrichment testing
=======================================
:Author: <NAME>
:Release: $Id$
:Date: |today|
:Tags: Python
"""
# load modules
from ruffus import *
import os
import CGAT.Experiment as E
import logging as L
import CGAT.Database as Database
import CGAT.CSV as CSV
import CGAT.IOTools as IOTools
from rpy2.robjects import r as R
import pandas
import sys
import CGATPipelines.Pipeline as P
import collections
#######################
# parameters
#######################
P.getParameters(
["pipeline.ini"])
PARAMS = P.PARAMS
#########################################
#########################################
#########################################
@follows(mkdir("pathways.dir"))
@transform("ratio_genes.annotated.outsidepi.tsv", regex("(\S+).tsv"), r"pathways.dir/\1.foreground.tsv.gz")
def buildForegroundSet(infile, outfile):
'''
build foreground set of COGs
'''
status = PARAMS.get("group_status")
statement = '''cat %(infile)s | grep %(status)s | cut -f1 | gzip > %(outfile)s'''
P.run()
#########################################
#########################################
#########################################
@split([buildForegroundSet,
"common_genes.tsv",
PARAMS.get("pathways_geneset")],
"pathways.dir/*.overall")
def runPathwaysAnalysis(infiles, outfiles):
'''
run pathways analysis
'''
genes, background, gene2pathway = infiles
# remove General function prediction only
# and Function unknown
temp = P.getTempFilename(".")
statement = '''cat %(gene2pathway)s | grep -v "General function"
| grep -v "Function unknown"
> %(temp)s'''
P.run()
statement = '''python %(scriptsdir)s/runGO.py \
--background=%(background)s
--genes=%(genes)s \
--filename-input=%(temp)s \
--fdr \
-q BH \
--output-filename-pattern="pathways.dir/%%(set)s.%%(go)s.%%(section)s" \
> pathways.dir/pathways.log \
; rm -rf %(temp)s
'''
P.run()
#########################################
#########################################
#########################################
@merge([buildForegroundSet, PARAMS.get("pathways_geneset")],
"pathways.dir/cogs_pathways.tsv")
def buildDiffCogsAndPathways(infiles, outfile):
'''
merge diff COGs and pathways
'''
R('''cogs <- read.csv("%s", header = F, stringsAsFactors = F, sep = "\t")''' % infiles[0])
R('''colnames(cogs) <- "gene"''')
R('''pathways <- read.csv("%s", header = F, stringsAsFactors = F, sep = "\t")''' % infiles[1])
R('''dat <- merge(cogs, pathways, by.x = "gene", by.y = "V2", all.x = T, all.y = F)''')
R('''write.table(dat, file = "%s", sep = "\t", row.names = F, quote = F)''' % outfile)
#########################################
#########################################
#########################################
@merge(["common_genes.tsv", PARAMS.get("pathways_geneset")],
"pathways.dir/background_cogs_pathways.tsv")
def buildBackgroundCogsAndPathways(infiles, outfile):
'''
merge diff COGs and pathways
'''
R('''cogs <- read.csv("%s", header = F, stringsAsFactors = F, sep = "\t")''' % infiles[0])
R('''colnames(cogs) <- "gene"''')
R('''pathways <- read.csv("%s", header = F, stringsAsFactors = F, sep = "\t")''' % infiles[1])
R('''dat <- merge(cogs, pathways, by.x = "gene", by.y = "V2", all.x = T, all.y = F)''')
R('''write.table(dat, file = "%s", sep = "\t", row.names = F, quote = F)''' % outfile)
#########################################
#########################################
#########################################
@transform(runPathwaysAnalysis, suffix(".overall"), ".bar.pdf")
def plotPathways(infile, outfile):
'''
plot pathways associated with clusters
'''
R('''library(ggplot2)''')
R('''dat <- read.csv("%s", header=T, stringsAsFactors=F, sep="\t")''' % infile)
R('''dat <- dat[order(dat$ratio),]''')
R('''plot1 <- ggplot(dat, aes(x = factor(description, levels=description), y=ratio, stat="identity"))''')
R('''plot1 + geom_bar(stat="identity") + coord_flip()''')
R('''ggsave("%s")''' % outfile)
#########################################
#########################################
#########################################
@follows(mkdir("heatmaps.dir"))
@split(buildDiffCogsAndPathways, "heatmaps.dir/*.tsv")
def splitPathways(infile, outfiles):
'''
map cogs to pathways in separate files
'''
inf = IOTools.openFile(infile)
inf.readline()
pathway2nogs = collections.defaultdict(set)
for line in inf.readlines():
data = line[:-1].split("\t")
nog, pathway = data[0], data[3]
pathway = pathway.replace(" ", "_").replace("/", "_")
pathway2nogs[pathway].add(nog)
for pathway, nogs in pathway2nogs.iteritems():
outname = os.path.join("heatmaps.dir", pathway + ".tsv")
outf = IOTools.openFile(outname, "w")
outf.write("NOG\tpathway\n")
for nog in nogs:
outf.write("%s\t%s\n"% (nog, pathway))
outf.close()
#########################################
#########################################
#########################################
@follows(mkdir("annotations.dir"))
@transform(splitPathways,
regex("(\S+)/(\S+).tsv"),
add_inputs(PARAMS.get("annotations_eggnog")),
r"annotations.dir/\2.annotated.tsv")
def annotateNogs(infiles, outfile):
'''
annotate the NOGs with their descriptions
'''
pathways, annotations = infiles
anno = {}
# read annotations
for line in open(annotations).readlines():
data = line[:-1].split("\t")
nog, description = data
anno[nog] = description
# write out annotations
p = IOTools.openFile(pathways)
p.readline()
outf = IOTools.openFile(outfile, "w")
outf.write("NOG\tpathway\tdescription\n")
for line in p.readlines():
data = line[:-1].split("\t")
nog, pathway = data
try:
outf.write("%s\t%s\t%s\n" % (nog, pathway, anno[nog]))
except KeyError:
outf.write("%s\t%s\t%s\n" % (nog, pathway, "NA"))
outf.close()
#########################################
#########################################
#########################################
@jobs_limit(1,"R")
@transform(splitPathways,
suffix(".tsv"),
add_inputs(PARAMS.get("matrix_file")),
".heatmap.pdf")
def heatmapNogs(infiles, outfile):
'''
heatmap nogs per functional category
'''
# check files is compatible with heatmaps
# i.e. >=2 NOGs
nogs, matrix = infiles
inf = IOTools.openFile(nogs)
# header
inf.readline()
# check lines
if len(inf.readlines()) == 1:
P.touch(outfile)
else:
R('''library(gtools)''')
R('''library(gplots)''')
# read in matrix
R('''mat <- read.csv("%s", header=T,stringsAsFactors=F, sep="\t")''' % matrix)
R('''rownames(mat) <- mat$taxa''')
R('''mat <- mat[,1:ncol(mat)-1]''')
# read in NOGs
R('''nogs <- read.csv("%s", header=T, stringsAsFactors=F, sep="\t")''' % nogs)
# subset matrix
R('''mat <- mat[nogs$NOG,]''')
# scale
R('''mat.s <- data.frame(t(apply(mat, 1, scale)))''')
R('''colnames(mat.s) <- colnames(mat)''')
R('''mat.s <- mat.s[, mixedsort(colnames(mat.s))]''')
R('''mat.s <- mat.s[order(rownames(mat.s)),]''')
# heatmap
R('''pdf("%s")''' % outfile)
R('''cols <- colorRampPalette(c("blue", "white", "red"))(75)''')
R('''heatmap.2(as.matrix(mat.s),
trace="none",
Colv=F,
Rowv=F,
col=cols,
margins=c(15,15))''')
R["dev.off"]()
#########################################
#########################################
#########################################
@jobs_limit(1,"R")
@transform(splitPathways,
suffix(".tsv"),
add_inputs(PARAMS.get("matrix_taxa")),
".taxa.heatmap.pdf")
def heatmapTaxaAssociatedWithNogs(infiles, outfile):
'''
heatmap nogs per functional category
'''
# check files is compatible with heatmaps
# i.e. >=2 NOGs
nogs, matrix = infiles
inf = IOTools.openFile(nogs)
# header
inf.readline()
# check lines
if len(inf.readlines()) == 1:
P.touch(outfile)
else:
R('''library(gtools)''')
R('''library(gplots)''')
R('''library(pheatmap)''')
# read in matrix
R('''mat <- t(read.csv("%s", header=T,stringsAsFactors=F, sep="\t", row.names=1))''' % matrix)
# read in NOGs
R('''nogs <- read.csv("%s", header=T, stringsAsFactors=F, sep="\t")''' % nogs)
# subset matrix
R('''mat <- mat[nogs$NOG,]''')
R('''mat <- mat[, colSums(mat > 5) >= 1]''')
R('''mat <- mat[order(rownames(mat)),]''')
# heatmap
R('''pdf("%s")''' % outfile)
R('''cols <- colorRampPalette(c("white", "blue"))(75)''')
R('''pheatmap(as.matrix(mat),
color=cols,
cluster_cols=F,
cluster_rows=F)''')
R["dev.off"]()
@follows(heatmapNogs,
heatmapTaxaAssociatedWithNogs)
def heatmaps():
pass
@follows(runPathwaysAnalysis,annotateNogs,heatmaps)
def full():
pass
#########################################
#########################################
#########################################
if __name__ == "__main__":
sys.exit(P.main(sys.argv))
| de | 0.254002 | ======================================= Perform functional enrichment testing ======================================= :Author: <NAME> :Release: $Id$ :Date: |today| :Tags: Python # load modules ####################### # parameters ####################### ######################################### ######################################### ######################################### build foreground set of COGs cat %(infile)s | grep %(status)s | cut -f1 | gzip > %(outfile)s ######################################### ######################################### ######################################### run pathways analysis # remove General function prediction only # and Function unknown cat %(gene2pathway)s | grep -v "General function" | grep -v "Function unknown" > %(temp)s python %(scriptsdir)s/runGO.py \ --background=%(background)s --genes=%(genes)s \ --filename-input=%(temp)s \ --fdr \ -q BH \ --output-filename-pattern="pathways.dir/%%(set)s.%%(go)s.%%(section)s" \ > pathways.dir/pathways.log \ ; rm -rf %(temp)s ######################################### ######################################### ######################################### merge diff COGs and pathways cogs <- read.csv("%s", header = F, stringsAsFactors = F, sep = "\t") colnames(cogs) <- "gene" pathways <- read.csv("%s", header = F, stringsAsFactors = F, sep = "\t") dat <- merge(cogs, pathways, by.x = "gene", by.y = "V2", all.x = T, all.y = F) write.table(dat, file = "%s", sep = "\t", row.names = F, quote = F) ######################################### ######################################### ######################################### merge diff COGs and pathways cogs <- read.csv("%s", header = F, stringsAsFactors = F, sep = "\t") colnames(cogs) <- "gene" pathways <- read.csv("%s", header = F, stringsAsFactors = F, sep = "\t") dat <- merge(cogs, pathways, by.x = "gene", by.y = "V2", all.x = T, all.y = F) write.table(dat, file = "%s", sep = "\t", row.names = F, quote = F) ######################################### ######################################### ######################################### plot pathways associated with clusters library(ggplot2) dat <- read.csv("%s", header=T, stringsAsFactors=F, sep="\t") dat <- dat[order(dat$ratio),] plot1 <- ggplot(dat, aes(x = factor(description, levels=description), y=ratio, stat="identity")) plot1 + geom_bar(stat="identity") + coord_flip() ggsave("%s") ######################################### ######################################### ######################################### map cogs to pathways in separate files ######################################### ######################################### ######################################### annotate the NOGs with their descriptions # read annotations # write out annotations ######################################### ######################################### ######################################### heatmap nogs per functional category # check files is compatible with heatmaps # i.e. >=2 NOGs # header # check lines library(gtools) library(gplots) # read in matrix mat <- read.csv("%s", header=T,stringsAsFactors=F, sep="\t") rownames(mat) <- mat$taxa mat <- mat[,1:ncol(mat)-1] # read in NOGs nogs <- read.csv("%s", header=T, stringsAsFactors=F, sep="\t") # subset matrix mat <- mat[nogs$NOG,] # scale mat.s <- data.frame(t(apply(mat, 1, scale))) colnames(mat.s) <- colnames(mat) mat.s <- mat.s[, mixedsort(colnames(mat.s))] mat.s <- mat.s[order(rownames(mat.s)),] # heatmap pdf("%s") cols <- colorRampPalette(c("blue", "white", "red"))(75) heatmap.2(as.matrix(mat.s), trace="none", Colv=F, Rowv=F, col=cols, margins=c(15,15)) ######################################### ######################################### ######################################### heatmap nogs per functional category # check files is compatible with heatmaps # i.e. >=2 NOGs # header # check lines library(gtools) library(gplots) library(pheatmap) # read in matrix mat <- t(read.csv("%s", header=T,stringsAsFactors=F, sep="\t", row.names=1)) # read in NOGs nogs <- read.csv("%s", header=T, stringsAsFactors=F, sep="\t") # subset matrix mat <- mat[nogs$NOG,] mat <- mat[, colSums(mat > 5) >= 1] mat <- mat[order(rownames(mat)),] # heatmap pdf("%s") cols <- colorRampPalette(c("white", "blue"))(75) pheatmap(as.matrix(mat), color=cols, cluster_cols=F, cluster_rows=F) ######################################### ######################################### ######################################### | 2.137165 | 2 |
users/views.py | Frank1963-mpoyi/Fast-Food-Web-App | 0 | 6619376 | <reponame>Frank1963-mpoyi/Fast-Food-Web-App
from django.shortcuts import render, redirect
from .forms import NewUserForm
#from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
def signup(request):
template_name = "users/sign-up.html"
form = NewUserForm(request.POST or None)
if form.is_valid():
form.save()
return redirect('users:login')
context = dict( form =NewUserForm())
return render(request, template_name, context)
def login_n(request): # i change login
template_name ='users/login.html'
if request.POST:
username = request.POST.get('username')
pwd = request.POST.get('password')
user = authenticate(request, username=username, password=<PASSWORD>)
if user is not None: # which means is authenticate
login(request, user)# this login is a django function
return redirect('food:index')
else:
messages.info(request, 'username and/or password are incorrect')
#messages.info(request, 'Three credits remain in your account.')
context ={'active_link': 'login'}
return render (request, template_name, context)
def logout_n(request):
logout(request)
return redirect('food:index') | from django.shortcuts import render, redirect
from .forms import NewUserForm
#from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
def signup(request):
template_name = "users/sign-up.html"
form = NewUserForm(request.POST or None)
if form.is_valid():
form.save()
return redirect('users:login')
context = dict( form =NewUserForm())
return render(request, template_name, context)
def login_n(request): # i change login
template_name ='users/login.html'
if request.POST:
username = request.POST.get('username')
pwd = request.POST.get('password')
user = authenticate(request, username=username, password=<PASSWORD>)
if user is not None: # which means is authenticate
login(request, user)# this login is a django function
return redirect('food:index')
else:
messages.info(request, 'username and/or password are incorrect')
#messages.info(request, 'Three credits remain in your account.')
context ={'active_link': 'login'}
return render (request, template_name, context)
def logout_n(request):
logout(request)
return redirect('food:index') | en | 0.779322 | #from django.contrib.auth.forms import UserCreationForm # i change login # which means is authenticate # this login is a django function #messages.info(request, 'Three credits remain in your account.') | 2.488664 | 2 |
test.py | StefanDuan/IntroToComSci | 0 | 6619377 | <filename>test.py<gh_stars>0
# - coding: utf-8 -
#def putchar(s):
# index = len(s) - 1
# while index >= 0:
# print s[index]
# index -= 1
#
#fruit = "abcdef"
#putchar(fruit)
##prefixes = "JKLMNOPQ"
##suffix_1 = "ack"
##suffix_2 = "uack"
##
##for letter in prefixes:
## if letter in "OQ":
## print letter + suffix_2
## else:
## print letter + suffix_1
# find the square root of perfect square
##x = 16
##ans = 0
##while ans*ans <= x:
## ans += 1
##print ans
##fin = open('wordlist.txt', 'r')
##for line in fin:
## word = line.strip()
## if len(word)>20:
## print word
##fin.close()
##def has_no_e(word):
## return not('e' in word)
def has_no_e(word):
for letter in word:
if letter == 'e':
return False
return True
##fin = open('wordlist.txt', 'r')
##total = 0
##no_e = 0
##for line in fin:
## word = line.strip()
## total += 1
## if has_no_e(word):
## no_e += 1
## print word, ' ',
##fin.close()
##print float(no_e)/float(total)
def avoids(word, forbidden):
for letter in word:
if letter in forbidden:
return False
return True
##fin = open('wordlist.txt', 'r')
##f_word = raw_input('Enter a string with forbidden letters:')
##no_forbidden = 0
##for line in fin:
## word = line.strip()
## if avoids(word, f_word):
## no_forbidden += 1
##fin.close()
##print no_forbidden
def use_only(word, string):
for letter in string:
if letter in word:
return True
return False
| <filename>test.py<gh_stars>0
# - coding: utf-8 -
#def putchar(s):
# index = len(s) - 1
# while index >= 0:
# print s[index]
# index -= 1
#
#fruit = "abcdef"
#putchar(fruit)
##prefixes = "JKLMNOPQ"
##suffix_1 = "ack"
##suffix_2 = "uack"
##
##for letter in prefixes:
## if letter in "OQ":
## print letter + suffix_2
## else:
## print letter + suffix_1
# find the square root of perfect square
##x = 16
##ans = 0
##while ans*ans <= x:
## ans += 1
##print ans
##fin = open('wordlist.txt', 'r')
##for line in fin:
## word = line.strip()
## if len(word)>20:
## print word
##fin.close()
##def has_no_e(word):
## return not('e' in word)
def has_no_e(word):
for letter in word:
if letter == 'e':
return False
return True
##fin = open('wordlist.txt', 'r')
##total = 0
##no_e = 0
##for line in fin:
## word = line.strip()
## total += 1
## if has_no_e(word):
## no_e += 1
## print word, ' ',
##fin.close()
##print float(no_e)/float(total)
def avoids(word, forbidden):
for letter in word:
if letter in forbidden:
return False
return True
##fin = open('wordlist.txt', 'r')
##f_word = raw_input('Enter a string with forbidden letters:')
##no_forbidden = 0
##for line in fin:
## word = line.strip()
## if avoids(word, f_word):
## no_forbidden += 1
##fin.close()
##print no_forbidden
def use_only(word, string):
for letter in string:
if letter in word:
return True
return False
| en | 0.292153 | # - coding: utf-8 - #def putchar(s): # index = len(s) - 1 # while index >= 0: # print s[index] # index -= 1 # #fruit = "abcdef" #putchar(fruit) ##prefixes = "JKLMNOPQ" ##suffix_1 = "ack" ##suffix_2 = "uack" ## ##for letter in prefixes: ## if letter in "OQ": ## print letter + suffix_2 ## else: ## print letter + suffix_1 # find the square root of perfect square ##x = 16 ##ans = 0 ##while ans*ans <= x: ## ans += 1 ##print ans ##fin = open('wordlist.txt', 'r') ##for line in fin: ## word = line.strip() ## if len(word)>20: ## print word ##fin.close() ##def has_no_e(word): ## return not('e' in word) ##fin = open('wordlist.txt', 'r') ##total = 0 ##no_e = 0 ##for line in fin: ## word = line.strip() ## total += 1 ## if has_no_e(word): ## no_e += 1 ## print word, ' ', ##fin.close() ##print float(no_e)/float(total) ##fin = open('wordlist.txt', 'r') ##f_word = raw_input('Enter a string with forbidden letters:') ##no_forbidden = 0 ##for line in fin: ## word = line.strip() ## if avoids(word, f_word): ## no_forbidden += 1 ##fin.close() ##print no_forbidden | 3.598507 | 4 |
test/core/test_world_available_target_actions.py | PMatthaei/multiagent-particle-envs | 0 | 6619378 | <filename>test/core/test_world_available_target_actions.py
import unittest
import numpy as np
from maenv.core import World
N_AGENTS = 6
class WorldAvailableTargetActionsTestCases(unittest.TestCase):
def setUp(self):
self.world = World(grid_size=10, n_teams=2, n_agents=4)
self.world.attack_target_mask = np.array([
[0, 0, 1, 1], # attacker
[0, 0, 0, 0], # healer
[0, 0, 1, 1], # attacker
[0, 0, 0, 0] # healer
])
self.world.heal_target_mask = np.array([
[0, 0, 0, 0], # attacker
[1, 1, 0, 0], # healer
[0, 0, 0, 0], # attacker
[1, 1, 0, 0] # healer
])
self.world.reachability = np.array([
[1, 1, 0, 0],
[1, 1, 0, 0],
[0, 0, 1, 1],
[0, 0, 1, 1]
])
self.world.alive = np.array([1, 1, 1, 1])
def test_no_target_action_available_if_alive_and_no_enemy_reachable_and_attacker(self):
self.world.calculate_avail_target_actions()
np.testing.assert_array_equal(self.world.avail_target_actions[0], [0, 0, 0, 0])
def test_no_target_action_available_if_alive_and_only_enemy_reachable_and_healer(self):
self.world.reachability[1] = [0, 0, 1, 1]
self.world.calculate_avail_target_actions()
np.testing.assert_array_equal(self.world.avail_target_actions[1], [0, 0, 0, 0])
def test_mate_target_action_available_if_alive_and_mate_reachable_and_healer(self):
self.world.calculate_avail_target_actions()
np.testing.assert_array_equal(self.world.avail_target_actions[1], [1, 0, 0, 0])
def test_no_target_action_if_dead(self):
self.world.alive = np.array([0, 1, 1, 1])
self.world.calculate_avail_target_actions()
np.testing.assert_array_equal(self.world.avail_target_actions[0], [0, 0, 0, 0])
def test_only_enemy_targets_available_if_alive_and_all_enemy_reachable_and_attacker(self):
self.world.reachability[0] = [0, 0, 1, 1]
self.world.calculate_avail_target_actions()
np.testing.assert_array_equal(self.world.avail_target_actions[0], [0, 0, 1, 1])
def test_only_enemy_targets_available_if_alive_and_some_enemy_reachable_and_attacker(self):
self.world.reachability[0] = [0, 0, 0, 1]
self.world.calculate_avail_target_actions()
np.testing.assert_array_equal(self.world.avail_target_actions[0], [0, 0, 0, 1])
if __name__ == '__main__':
unittest.main()
| <filename>test/core/test_world_available_target_actions.py
import unittest
import numpy as np
from maenv.core import World
N_AGENTS = 6
class WorldAvailableTargetActionsTestCases(unittest.TestCase):
def setUp(self):
self.world = World(grid_size=10, n_teams=2, n_agents=4)
self.world.attack_target_mask = np.array([
[0, 0, 1, 1], # attacker
[0, 0, 0, 0], # healer
[0, 0, 1, 1], # attacker
[0, 0, 0, 0] # healer
])
self.world.heal_target_mask = np.array([
[0, 0, 0, 0], # attacker
[1, 1, 0, 0], # healer
[0, 0, 0, 0], # attacker
[1, 1, 0, 0] # healer
])
self.world.reachability = np.array([
[1, 1, 0, 0],
[1, 1, 0, 0],
[0, 0, 1, 1],
[0, 0, 1, 1]
])
self.world.alive = np.array([1, 1, 1, 1])
def test_no_target_action_available_if_alive_and_no_enemy_reachable_and_attacker(self):
self.world.calculate_avail_target_actions()
np.testing.assert_array_equal(self.world.avail_target_actions[0], [0, 0, 0, 0])
def test_no_target_action_available_if_alive_and_only_enemy_reachable_and_healer(self):
self.world.reachability[1] = [0, 0, 1, 1]
self.world.calculate_avail_target_actions()
np.testing.assert_array_equal(self.world.avail_target_actions[1], [0, 0, 0, 0])
def test_mate_target_action_available_if_alive_and_mate_reachable_and_healer(self):
self.world.calculate_avail_target_actions()
np.testing.assert_array_equal(self.world.avail_target_actions[1], [1, 0, 0, 0])
def test_no_target_action_if_dead(self):
self.world.alive = np.array([0, 1, 1, 1])
self.world.calculate_avail_target_actions()
np.testing.assert_array_equal(self.world.avail_target_actions[0], [0, 0, 0, 0])
def test_only_enemy_targets_available_if_alive_and_all_enemy_reachable_and_attacker(self):
self.world.reachability[0] = [0, 0, 1, 1]
self.world.calculate_avail_target_actions()
np.testing.assert_array_equal(self.world.avail_target_actions[0], [0, 0, 1, 1])
def test_only_enemy_targets_available_if_alive_and_some_enemy_reachable_and_attacker(self):
self.world.reachability[0] = [0, 0, 0, 1]
self.world.calculate_avail_target_actions()
np.testing.assert_array_equal(self.world.avail_target_actions[0], [0, 0, 0, 1])
if __name__ == '__main__':
unittest.main()
| en | 0.757438 | # attacker # healer # attacker # healer # attacker # healer # attacker # healer | 2.749474 | 3 |
apps/common/base_test.py | DrMartiner/kaptilo_back | 3 | 6619379 | <reponame>DrMartiner/kaptilo_back<filename>apps/common/base_test.py
from django.test import TestCase
__all__ = ["BaseTest"]
class BaseTest(TestCase):
maxDiff = 5000
| from django.test import TestCase
__all__ = ["BaseTest"]
class BaseTest(TestCase):
maxDiff = 5000 | none | 1 | 1.312406 | 1 | |
test2_07.py | yoojunwoong/python_review01 | 0 | 6619380 | # %와 //, and와 or을 사용하여 조건 맞추기
# 한개의 숫자를 입력 받아
# 3의 배수이고 짝수이고 양수이면 출력,그렇지 않으면 FAIL을 출력하시오.
num = int(input('input Num.....'));
if num > 0 and num%3 == 0 and num%2 ==0:
print('OK');
else:
print('FAIL');
# 두자리 숫자만 입력을 받는다.
# 단 두개의 숫자는 모두 한자리로 입력되어야 한다.
# 한자리가 아니고 음수이면 프로그램을 종료시킨다.
# exit(0); __ 프로그램 종료
num1 = int(input('input Num1.....'))
num2 = int(input('input Num2.....'))
if not(((num1 // 10) < 1 and (num2 // 10)<1) or (num1 < 0 and num2 < 0)):
print('not in range Num');
exit(0);
| # %와 //, and와 or을 사용하여 조건 맞추기
# 한개의 숫자를 입력 받아
# 3의 배수이고 짝수이고 양수이면 출력,그렇지 않으면 FAIL을 출력하시오.
num = int(input('input Num.....'));
if num > 0 and num%3 == 0 and num%2 ==0:
print('OK');
else:
print('FAIL');
# 두자리 숫자만 입력을 받는다.
# 단 두개의 숫자는 모두 한자리로 입력되어야 한다.
# 한자리가 아니고 음수이면 프로그램을 종료시킨다.
# exit(0); __ 프로그램 종료
num1 = int(input('input Num1.....'))
num2 = int(input('input Num2.....'))
if not(((num1 // 10) < 1 and (num2 // 10)<1) or (num1 < 0 and num2 < 0)):
print('not in range Num');
exit(0);
| ko | 1.000055 | # %와 //, and와 or을 사용하여 조건 맞추기 # 한개의 숫자를 입력 받아 # 3의 배수이고 짝수이고 양수이면 출력,그렇지 않으면 FAIL을 출력하시오. # 두자리 숫자만 입력을 받는다. # 단 두개의 숫자는 모두 한자리로 입력되어야 한다. # 한자리가 아니고 음수이면 프로그램을 종료시킨다. # exit(0); __ 프로그램 종료 | 3.832384 | 4 |
api/setup.py | tuxiqae/minidetector | 0 | 6619381 | <filename>api/setup.py
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
print(long_description)
setuptools.setup(
name="minidetector-api",
version="0.0.1",
author="<NAME>",
author_email="<EMAIL>",
description="minidetector API",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
install_requires=[
"sqlalchemy>=1.3,<1.4",
"psycopg2-binary",
"fastapi",
"uvicorn[standard]",
"pyfiglet",
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.8',
)
| <filename>api/setup.py
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
print(long_description)
setuptools.setup(
name="minidetector-api",
version="0.0.1",
author="<NAME>",
author_email="<EMAIL>",
description="minidetector API",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
install_requires=[
"sqlalchemy>=1.3,<1.4",
"psycopg2-binary",
"fastapi",
"uvicorn[standard]",
"pyfiglet",
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.8',
)
| none | 1 | 1.664749 | 2 | |
src/evengsdk/cli/common.py | aopdal/evengsdk | 0 | 6619382 | <gh_stars>0
import click
output_option = click.option(
"--output",
type=click.Choice(["json", "text", "table"]),
default="table",
)
def list_sub_command(subcommand):
for decorator in reversed(
(
click.command(name="list"),
output_option,
)
):
subcommand = decorator(subcommand)
return subcommand
def list_command(command):
for decorator in reversed((output_option,)):
command = decorator(command)
return command
| import click
output_option = click.option(
"--output",
type=click.Choice(["json", "text", "table"]),
default="table",
)
def list_sub_command(subcommand):
for decorator in reversed(
(
click.command(name="list"),
output_option,
)
):
subcommand = decorator(subcommand)
return subcommand
def list_command(command):
for decorator in reversed((output_option,)):
command = decorator(command)
return command | none | 1 | 2.933664 | 3 | |
app/utils/regex.py | jiazifa/sky_main | 1 | 6619383 | # -*- coding: utf-8 -*-
import re
from typing import Optional
def is_emoji(content: str) -> bool:
""" judge str is emoji
Args: str type
Return : Bool type , return True if is Emoji , else False
"""
if not content:
return False
if u"\U0001F600" <= content and content <= u"\U0001F64F":
return True
elif u"\U0001F300" <= content and content <= u"\U0001F5FF":
return True
elif u"\U0001F680" <= content and content <= u"\U0001F6FF":
return True
elif u"\U0001F1E0" <= content and content <= u"\U0001F1FF":
return True
else:
return False
def is_link(url: Optional[str]) -> bool:
""" 验证是否是一个链接
Args:
url: 需要验证的字符
Return: 如果是合法的链接,返回 True ,否则返回 False
"""
regex = r'(https?)://[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]'
result: Optional[re.Match] = re.match(regex, url)
return False if not result else True
def is_phone(content: str) -> bool:
""" 验证是否是一个手机号
Args:
url: 需要验证的号码
Return: 如果是合法的,返回 True ,否则返回 False
"""
regex = r'1[3|4|5|7|8][0-9]{9}'
result: Optional[re.Match] = re.match(regex, content)
return False if not result else True
def is_email(content: str) -> bool:
""" 验证是否是一个邮箱
Args:
url: 需要验证的邮箱
Return: 如果是合法的,返回 True ,否则返回 False
"""
regex = r'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)'
result: Optional[re.Match] = re.match(regex, content)
return False if not result else True | # -*- coding: utf-8 -*-
import re
from typing import Optional
def is_emoji(content: str) -> bool:
""" judge str is emoji
Args: str type
Return : Bool type , return True if is Emoji , else False
"""
if not content:
return False
if u"\U0001F600" <= content and content <= u"\U0001F64F":
return True
elif u"\U0001F300" <= content and content <= u"\U0001F5FF":
return True
elif u"\U0001F680" <= content and content <= u"\U0001F6FF":
return True
elif u"\U0001F1E0" <= content and content <= u"\U0001F1FF":
return True
else:
return False
def is_link(url: Optional[str]) -> bool:
""" 验证是否是一个链接
Args:
url: 需要验证的字符
Return: 如果是合法的链接,返回 True ,否则返回 False
"""
regex = r'(https?)://[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]'
result: Optional[re.Match] = re.match(regex, url)
return False if not result else True
def is_phone(content: str) -> bool:
""" 验证是否是一个手机号
Args:
url: 需要验证的号码
Return: 如果是合法的,返回 True ,否则返回 False
"""
regex = r'1[3|4|5|7|8][0-9]{9}'
result: Optional[re.Match] = re.match(regex, content)
return False if not result else True
def is_email(content: str) -> bool:
""" 验证是否是一个邮箱
Args:
url: 需要验证的邮箱
Return: 如果是合法的,返回 True ,否则返回 False
"""
regex = r'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)'
result: Optional[re.Match] = re.match(regex, content)
return False if not result else True | zh | 0.804944 | # -*- coding: utf-8 -*- judge str is emoji Args: str type Return : Bool type , return True if is Emoji , else False 验证是否是一个链接 Args: url: 需要验证的字符 Return: 如果是合法的链接,返回 True ,否则返回 False #/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]' 验证是否是一个手机号 Args: url: 需要验证的号码 Return: 如果是合法的,返回 True ,否则返回 False 验证是否是一个邮箱 Args: url: 需要验证的邮箱 Return: 如果是合法的,返回 True ,否则返回 False | 3.17076 | 3 |
chrome/test/pyautolib/chrome_driver_factory.py | nagineni/chromium-crosswalk | 231 | 6619384 | <reponame>nagineni/chromium-crosswalk<filename>chrome/test/pyautolib/chrome_driver_factory.py
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Factory that creates ChromeDriver instances for pyauto."""
import os
import random
import tempfile
import pyauto_paths
from selenium import webdriver
from selenium.webdriver.chrome import service
class ChromeDriverFactory(object):
""""Factory that creates ChromeDriver instances for pyauto.
Starts a single ChromeDriver server when necessary. Users should call 'Stop'
when no longer using the factory.
"""
def __init__(self, port=0):
"""Initialize ChromeDriverFactory.
Args:
port: The port for WebDriver to use; by default the service will select a
free port.
"""
self._chromedriver_port = port
self._chromedriver_server = None
def NewChromeDriver(self, pyauto):
"""Creates a new remote WebDriver instance.
This instance will connect to a new automation provider of an already
running Chrome.
Args:
pyauto: pyauto.PyUITest instance
Returns:
selenium.webdriver.remote.webdriver.WebDriver instance.
"""
if pyauto.IsChromeOS():
os.putenv('DISPLAY', ':0.0')
os.putenv('XAUTHORITY', '/home/chronos/.Xauthority')
self._StartServerIfNecessary()
channel_id = 'testing' + hex(random.getrandbits(20 * 4))[2:-1]
if not pyauto.IsWin():
channel_id = os.path.join(tempfile.gettempdir(), channel_id)
pyauto.CreateNewAutomationProvider(channel_id)
return webdriver.Remote(self._chromedriver_server.service_url,
{'chrome.channel': channel_id,
'chrome.noWebsiteTestingDefaults': True})
def _StartServerIfNecessary(self):
"""Starts the ChromeDriver server, if not already started."""
if self._chromedriver_server is None:
exe = pyauto_paths.GetChromeDriverExe()
assert exe, 'Cannot find chromedriver exe. Did you build it?'
self._chromedriver_server = service.Service(exe, self._chromedriver_port)
self._chromedriver_server.start()
def Stop(self):
"""Stops the ChromeDriver server, if running."""
if self._chromedriver_server is not None:
self._chromedriver_server.stop()
self._chromedriver_server = None
def GetPort(self):
"""Gets the port ChromeDriver is set to use.
Returns:
The port all ChromeDriver instances returned from NewChromeDriver() will
be listening on. A return value of 0 indicates the ChromeDriver service
will select a free port.
"""
return self._chromedriver_port
def __del__(self):
self.Stop()
| # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Factory that creates ChromeDriver instances for pyauto."""
import os
import random
import tempfile
import pyauto_paths
from selenium import webdriver
from selenium.webdriver.chrome import service
class ChromeDriverFactory(object):
""""Factory that creates ChromeDriver instances for pyauto.
Starts a single ChromeDriver server when necessary. Users should call 'Stop'
when no longer using the factory.
"""
def __init__(self, port=0):
"""Initialize ChromeDriverFactory.
Args:
port: The port for WebDriver to use; by default the service will select a
free port.
"""
self._chromedriver_port = port
self._chromedriver_server = None
def NewChromeDriver(self, pyauto):
"""Creates a new remote WebDriver instance.
This instance will connect to a new automation provider of an already
running Chrome.
Args:
pyauto: pyauto.PyUITest instance
Returns:
selenium.webdriver.remote.webdriver.WebDriver instance.
"""
if pyauto.IsChromeOS():
os.putenv('DISPLAY', ':0.0')
os.putenv('XAUTHORITY', '/home/chronos/.Xauthority')
self._StartServerIfNecessary()
channel_id = 'testing' + hex(random.getrandbits(20 * 4))[2:-1]
if not pyauto.IsWin():
channel_id = os.path.join(tempfile.gettempdir(), channel_id)
pyauto.CreateNewAutomationProvider(channel_id)
return webdriver.Remote(self._chromedriver_server.service_url,
{'chrome.channel': channel_id,
'chrome.noWebsiteTestingDefaults': True})
def _StartServerIfNecessary(self):
"""Starts the ChromeDriver server, if not already started."""
if self._chromedriver_server is None:
exe = pyauto_paths.GetChromeDriverExe()
assert exe, 'Cannot find chromedriver exe. Did you build it?'
self._chromedriver_server = service.Service(exe, self._chromedriver_port)
self._chromedriver_server.start()
def Stop(self):
"""Stops the ChromeDriver server, if running."""
if self._chromedriver_server is not None:
self._chromedriver_server.stop()
self._chromedriver_server = None
def GetPort(self):
"""Gets the port ChromeDriver is set to use.
Returns:
The port all ChromeDriver instances returned from NewChromeDriver() will
be listening on. A return value of 0 indicates the ChromeDriver service
will select a free port.
"""
return self._chromedriver_port
def __del__(self):
self.Stop() | en | 0.71637 | # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. Factory that creates ChromeDriver instances for pyauto. "Factory that creates ChromeDriver instances for pyauto. Starts a single ChromeDriver server when necessary. Users should call 'Stop' when no longer using the factory. Initialize ChromeDriverFactory. Args: port: The port for WebDriver to use; by default the service will select a free port. Creates a new remote WebDriver instance. This instance will connect to a new automation provider of an already running Chrome. Args: pyauto: pyauto.PyUITest instance Returns: selenium.webdriver.remote.webdriver.WebDriver instance. Starts the ChromeDriver server, if not already started. Stops the ChromeDriver server, if running. Gets the port ChromeDriver is set to use. Returns: The port all ChromeDriver instances returned from NewChromeDriver() will be listening on. A return value of 0 indicates the ChromeDriver service will select a free port. | 3.055883 | 3 |
pikuli/uia/adapter/dotnet/value_converters.py | NVoronchev/pikuli | 0 | 6619385 | <filename>pikuli/uia/adapter/dotnet/value_converters.py<gh_stars>0
# -*- coding: utf-8 -*-
from pikuli.uia.adapter.property_value_types import Rectangle
class DotNetPropertyValueConverter(object):
@classmethod
def convert_BoundingRectangle(cls, val):
return Rectangle(val.Left, val.Top, val.Width, val.Height)
| <filename>pikuli/uia/adapter/dotnet/value_converters.py<gh_stars>0
# -*- coding: utf-8 -*-
from pikuli.uia.adapter.property_value_types import Rectangle
class DotNetPropertyValueConverter(object):
@classmethod
def convert_BoundingRectangle(cls, val):
return Rectangle(val.Left, val.Top, val.Width, val.Height)
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.430954 | 1 |
Year_Lived.py | souvikroys/hacktoberfest2021 | 0 | 6619386 | <filename>Year_Lived.py
#Modules Required
#1. datetime
#2. tkinter
#3. calender
import datetime
from tkinter import *
from PIL import ImageTk,Image
from tkinter import font as tkFont
import calendar
from datetime import date
root=Tk()
widths=root.winfo_screenwidth()
heights=root.winfo_screenheight()
root.geometry("%dx%d+0+0" % (widths,heights))
root.config(bg="#081923")
helv36 = tkFont.Font(family='Helvetica',size=29)
now = datetime.datetime.now()
year=str(now.strftime("%Y"))
date=now.strftime("%d")
month=calendar.month_name[int(now.strftime("%m"))]
def clock():
now = datetime.datetime.now()
hour=now.strftime("%H")
minute=now.strftime("%M")
second=now.strftime("%S")
if(int(hour)>12):
hour=str(int(hour)-12)
label1.config(text=hour)
label2.config(text=minute)
label3.config(text=second)
label4.config(text=str(now.strftime("%p")))
label3.after(200,clock)
#Current dates and times
lable=Label(root,text="Age and Time",font=("times new roman",20,"bold"),fg="white",bg="#081923").place(x=680,y=40)
label1=Label(root,font=("times new roman",30,"bold"),bg="#0047AB",fg="white")
label1.place(x=500,y=100,width=120,height=130)
label2=Label(root,font=("times new roman",30,"bold"),bg="#0096FF",fg="white")
label2.place(x=635,y=100,width=120,height=130)
label3=Label(root,font=("times new roman",30,"bold"),bg="#5F9EA0",fg="white")
label3.place(x=770,y=100,width=120,heigh=130)
label4=Label(root,font=("times new roman",30,"bold"),bg="#6F8FAF",fg="white")
label4.place(x=905,y=100,width=120,height=130)
label5=Label(root,text="HOUR",font=("times new roman",15,"bold"),bg="#0047AB",fg="white").place(x=500,y=240,width=120,height=30)
label6=Label(root,text="MINUTE",font=("times new roman",15,"bold"),bg="#0096FF",fg="white").place(x=635,y=240,width=120,height=30)
label7=Label(root,text="SECOND",font=("times new roman",15,"bold"),bg="#5F9EA0",fg="white").place(x=770,y=240,width=120,height=30)
label8=Label(root,text="NOON",font=("times new roman",15,"bold"),bg="#6F8FAF",fg="white").place(x=905,y=240,width=120,height=30)
label9=Label(root,text=date,font=("times new roman",14,"bold"),bg="#0047AB",fg="white").place(x=500,y=280,width=120,height=30)
label10=Label(root,text=month,font=("times new roman",14,"bold"),bg="#0096FF",fg="white").place(x=635,y=280,width=120,height=30)
label11=Label(root,text=year,font=("times new roman",14,"bold"),bg="#5F9EA0",fg="white").place(x=770,y=280,width=120,height=30)
label12=Label(root,text="Date",font=("times new roman",14,"bold"),bg="#6F8FAF",fg="white").place(x=905,y=280,width=120,height=30)
#frame
frame1=Frame(root,height=200,width=530,bg="#6F8FAF").place(x=500,y=350)
label_d=Label(root,text="Date",font=("times new roman",14,"bold"),bg="#0047AB",fg="white").place(x=635,y=380,width=120,height=30)
label_m=Label(root,text="Month",font=("times new roman",14,"bold"),bg="#0096FF",fg="white").place(x=635,y=420,width=120,height=30)
label_y=Label(root,text="Year",font=("times new roman",14,"bold"),bg="#5F9EA0",fg="white").place(x=635,y=460,width=120,height=30)
dates=StringVar()
months=StringVar()
years=StringVar()
input_d=Entry(frame1)
input_d.place(x=770,y=385)
input_m=Entry(frame1)
input_m.place(x=770,y=425)
input_y=Entry(frame1)
input_y.place(x=770,y=465)
from datetime import date
today=str(date.today()) #getting current date using datetime module
list_today=today.split("-")
def click():
from datetime import date
global today
global new
b_year=int(input_y.get())
b_date=int(input_d.get())
b_month=int(input_m.get())
c_date=int(list_today[2])
c_month=int(list_today[1])
c_year=int(list_today[0])
month =[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
if(b_date>c_date):
c_month=c_month-1
c_date=c_date+month[b_month-1]
if (b_month>c_month):
c_year=c_year-1
c_month=c_month+12
resultd=str(c_date-b_date)
resultm=str(c_month-b_month)
resulty=str(c_year-b_year)
years.set("Years "+str(resulty))
months.set("Months "+str(resultm))
dates.set("Days "+str(resultd))
label13=Label(root,textvariable=dates,font=("times new roman",14,"bold"),fg="white",bg="#0047AB").place(x=635,y=600,width=200,height=30)
label14=Label(root,textvariable=months,font=("times new roman",14,"bold"),fg="white",bg="#0096FF").place(x=635,y=650,width=200,height=30)
label15=Label(root,textvariable=years,font=("times new roman",14,"bold"),fg="white",bg="#5F9EA0").place(x=635,y=700,width=200,height=30)
submit=Button(root,text="submit",command=click,bg="white").place(x=730,y=510)
clock()
root.mainloop()
| <filename>Year_Lived.py
#Modules Required
#1. datetime
#2. tkinter
#3. calender
import datetime
from tkinter import *
from PIL import ImageTk,Image
from tkinter import font as tkFont
import calendar
from datetime import date
root=Tk()
widths=root.winfo_screenwidth()
heights=root.winfo_screenheight()
root.geometry("%dx%d+0+0" % (widths,heights))
root.config(bg="#081923")
helv36 = tkFont.Font(family='Helvetica',size=29)
now = datetime.datetime.now()
year=str(now.strftime("%Y"))
date=now.strftime("%d")
month=calendar.month_name[int(now.strftime("%m"))]
def clock():
now = datetime.datetime.now()
hour=now.strftime("%H")
minute=now.strftime("%M")
second=now.strftime("%S")
if(int(hour)>12):
hour=str(int(hour)-12)
label1.config(text=hour)
label2.config(text=minute)
label3.config(text=second)
label4.config(text=str(now.strftime("%p")))
label3.after(200,clock)
#Current dates and times
lable=Label(root,text="Age and Time",font=("times new roman",20,"bold"),fg="white",bg="#081923").place(x=680,y=40)
label1=Label(root,font=("times new roman",30,"bold"),bg="#0047AB",fg="white")
label1.place(x=500,y=100,width=120,height=130)
label2=Label(root,font=("times new roman",30,"bold"),bg="#0096FF",fg="white")
label2.place(x=635,y=100,width=120,height=130)
label3=Label(root,font=("times new roman",30,"bold"),bg="#5F9EA0",fg="white")
label3.place(x=770,y=100,width=120,heigh=130)
label4=Label(root,font=("times new roman",30,"bold"),bg="#6F8FAF",fg="white")
label4.place(x=905,y=100,width=120,height=130)
label5=Label(root,text="HOUR",font=("times new roman",15,"bold"),bg="#0047AB",fg="white").place(x=500,y=240,width=120,height=30)
label6=Label(root,text="MINUTE",font=("times new roman",15,"bold"),bg="#0096FF",fg="white").place(x=635,y=240,width=120,height=30)
label7=Label(root,text="SECOND",font=("times new roman",15,"bold"),bg="#5F9EA0",fg="white").place(x=770,y=240,width=120,height=30)
label8=Label(root,text="NOON",font=("times new roman",15,"bold"),bg="#6F8FAF",fg="white").place(x=905,y=240,width=120,height=30)
label9=Label(root,text=date,font=("times new roman",14,"bold"),bg="#0047AB",fg="white").place(x=500,y=280,width=120,height=30)
label10=Label(root,text=month,font=("times new roman",14,"bold"),bg="#0096FF",fg="white").place(x=635,y=280,width=120,height=30)
label11=Label(root,text=year,font=("times new roman",14,"bold"),bg="#5F9EA0",fg="white").place(x=770,y=280,width=120,height=30)
label12=Label(root,text="Date",font=("times new roman",14,"bold"),bg="#6F8FAF",fg="white").place(x=905,y=280,width=120,height=30)
#frame
frame1=Frame(root,height=200,width=530,bg="#6F8FAF").place(x=500,y=350)
label_d=Label(root,text="Date",font=("times new roman",14,"bold"),bg="#0047AB",fg="white").place(x=635,y=380,width=120,height=30)
label_m=Label(root,text="Month",font=("times new roman",14,"bold"),bg="#0096FF",fg="white").place(x=635,y=420,width=120,height=30)
label_y=Label(root,text="Year",font=("times new roman",14,"bold"),bg="#5F9EA0",fg="white").place(x=635,y=460,width=120,height=30)
dates=StringVar()
months=StringVar()
years=StringVar()
input_d=Entry(frame1)
input_d.place(x=770,y=385)
input_m=Entry(frame1)
input_m.place(x=770,y=425)
input_y=Entry(frame1)
input_y.place(x=770,y=465)
from datetime import date
today=str(date.today()) #getting current date using datetime module
list_today=today.split("-")
def click():
from datetime import date
global today
global new
b_year=int(input_y.get())
b_date=int(input_d.get())
b_month=int(input_m.get())
c_date=int(list_today[2])
c_month=int(list_today[1])
c_year=int(list_today[0])
month =[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
if(b_date>c_date):
c_month=c_month-1
c_date=c_date+month[b_month-1]
if (b_month>c_month):
c_year=c_year-1
c_month=c_month+12
resultd=str(c_date-b_date)
resultm=str(c_month-b_month)
resulty=str(c_year-b_year)
years.set("Years "+str(resulty))
months.set("Months "+str(resultm))
dates.set("Days "+str(resultd))
label13=Label(root,textvariable=dates,font=("times new roman",14,"bold"),fg="white",bg="#0047AB").place(x=635,y=600,width=200,height=30)
label14=Label(root,textvariable=months,font=("times new roman",14,"bold"),fg="white",bg="#0096FF").place(x=635,y=650,width=200,height=30)
label15=Label(root,textvariable=years,font=("times new roman",14,"bold"),fg="white",bg="#5F9EA0").place(x=635,y=700,width=200,height=30)
submit=Button(root,text="submit",command=click,bg="white").place(x=730,y=510)
clock()
root.mainloop()
| en | 0.348376 | #Modules Required #1. datetime #2. tkinter #3. calender #Current dates and times #frame #getting current date using datetime module | 3.412385 | 3 |
scripts/inspect_delays.py | keelder/hera_cal | 0 | 6619387 | <gh_stars>0
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 the HERA Project
# Licensed under the MIT License
import aipy as a
import numpy as np
import optparse
import sys
import pyuvdata
import glob
import pylab as p
# Options
o = optparse.OptionParser()
o.set_usage('inspect_delays.py [options] *firstcal.fits')
o.set_description(__doc__)
a.scripting.add_standard_options(o, pol=True)
opts, args = o.parse_args(sys.argv[1:])
delays = {}
for f in args:
cal = pyuvdata.UVCal()
cal.read_calfits(f)
print " Reading calibration: {0}".format(f)
if cal.cal_type != 'delay':
print "Not a file with delays, exiting..."
exit()
for i, ant in enumerate(cal.ant_array):
if ant not in delays:
delays[ant] = []
delays[ant].append(cal.delay_array[i, 0, :, 0])
for ant in cal.ant_array:
p.plot(1e9 * np.concatenate(delays[ant]).flatten(), '.', label=str(ant))
p.xlabel('time bins')
p.ylabel('delays (ns)')
p.legend(loc='best')
p.show()
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 the HERA Project
# Licensed under the MIT License
import aipy as a
import numpy as np
import optparse
import sys
import pyuvdata
import glob
import pylab as p
# Options
o = optparse.OptionParser()
o.set_usage('inspect_delays.py [options] *firstcal.fits')
o.set_description(__doc__)
a.scripting.add_standard_options(o, pol=True)
opts, args = o.parse_args(sys.argv[1:])
delays = {}
for f in args:
cal = pyuvdata.UVCal()
cal.read_calfits(f)
print " Reading calibration: {0}".format(f)
if cal.cal_type != 'delay':
print "Not a file with delays, exiting..."
exit()
for i, ant in enumerate(cal.ant_array):
if ant not in delays:
delays[ant] = []
delays[ant].append(cal.delay_array[i, 0, :, 0])
for ant in cal.ant_array:
p.plot(1e9 * np.concatenate(delays[ant]).flatten(), '.', label=str(ant))
p.xlabel('time bins')
p.ylabel('delays (ns)')
p.legend(loc='best')
p.show() | en | 0.605628 | #! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2018 the HERA Project # Licensed under the MIT License # Options | 2.278797 | 2 |
IMLearn/metrics/loss_functions.py | noamkari/IML.HUJI | 0 | 6619388 | import numpy as np
def mean_square_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Calculate MSE loss
Parameters
----------
y_true: ndarray of shape (n_samples, )
True response values
y_pred: ndarray of shape (n_samples, )
Predicted response values
Returns
-------
MSE of given predictions
"""
los_sum = 0
for i in range(len(y_true)):
los_sum += ((y_true[i] - y_pred[i]) ** 2)
return los_sum / len(y_true)
def misclassification_error(y_true: np.ndarray, y_pred: np.ndarray,
normalize: bool = True) -> float:
"""
Calculate misclassification loss
Parameters
----------
y_true: ndarray of shape (n_samples, )
True response values
y_pred: ndarray of shape (n_samples, )
Predicted response values
normalize: bool, default = True
Normalize by number of samples or not
Returns
-------
Misclassification of given predictions
"""
size = y_pred.size
error_sum = 0
for i in range(size):
if y_pred[i] != y_true[i]:
error_sum += 1
return error_sum / size if normalize else error_sum
def accuracy(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Calculate accuracy of given predictions
Parameters
----------
y_true: ndarray of shape (n_samples, )
True response values
y_pred: ndarray of shape (n_samples, )
Predicted response values
Returns
-------
Accuracy of given predictions
"""
accurate_sum = 0
for i in range(y_true.size):
if y_true[i] == y_pred[i]:
accurate_sum += 1
return accurate_sum / y_true.size
def cross_entropy(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Calculate the cross entropy of given predictions
Parameters
----------
y_true: ndarray of shape (n_samples, )
True response values
y_pred: ndarray of shape (n_samples, )
Predicted response values
Returns
-------
Cross entropy of given predictions
"""
raise NotImplementedError()
if __name__ == '__main__':
y_true = np.array([279000, 432000, 326000, 333000, 437400, 555950])
y_pred = np.array(
[199000.37562541, 452589.25533196, 345267.48129011, 345856.57131275,
563867.1347574, 395102.94362135])
print(mean_square_error(y_true, y_pred))
| import numpy as np
def mean_square_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Calculate MSE loss
Parameters
----------
y_true: ndarray of shape (n_samples, )
True response values
y_pred: ndarray of shape (n_samples, )
Predicted response values
Returns
-------
MSE of given predictions
"""
los_sum = 0
for i in range(len(y_true)):
los_sum += ((y_true[i] - y_pred[i]) ** 2)
return los_sum / len(y_true)
def misclassification_error(y_true: np.ndarray, y_pred: np.ndarray,
normalize: bool = True) -> float:
"""
Calculate misclassification loss
Parameters
----------
y_true: ndarray of shape (n_samples, )
True response values
y_pred: ndarray of shape (n_samples, )
Predicted response values
normalize: bool, default = True
Normalize by number of samples or not
Returns
-------
Misclassification of given predictions
"""
size = y_pred.size
error_sum = 0
for i in range(size):
if y_pred[i] != y_true[i]:
error_sum += 1
return error_sum / size if normalize else error_sum
def accuracy(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Calculate accuracy of given predictions
Parameters
----------
y_true: ndarray of shape (n_samples, )
True response values
y_pred: ndarray of shape (n_samples, )
Predicted response values
Returns
-------
Accuracy of given predictions
"""
accurate_sum = 0
for i in range(y_true.size):
if y_true[i] == y_pred[i]:
accurate_sum += 1
return accurate_sum / y_true.size
def cross_entropy(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Calculate the cross entropy of given predictions
Parameters
----------
y_true: ndarray of shape (n_samples, )
True response values
y_pred: ndarray of shape (n_samples, )
Predicted response values
Returns
-------
Cross entropy of given predictions
"""
raise NotImplementedError()
if __name__ == '__main__':
y_true = np.array([279000, 432000, 326000, 333000, 437400, 555950])
y_pred = np.array(
[199000.37562541, 452589.25533196, 345267.48129011, 345856.57131275,
563867.1347574, 395102.94362135])
print(mean_square_error(y_true, y_pred))
| en | 0.569704 | Calculate MSE loss Parameters ---------- y_true: ndarray of shape (n_samples, ) True response values y_pred: ndarray of shape (n_samples, ) Predicted response values Returns ------- MSE of given predictions Calculate misclassification loss Parameters ---------- y_true: ndarray of shape (n_samples, ) True response values y_pred: ndarray of shape (n_samples, ) Predicted response values normalize: bool, default = True Normalize by number of samples or not Returns ------- Misclassification of given predictions Calculate accuracy of given predictions Parameters ---------- y_true: ndarray of shape (n_samples, ) True response values y_pred: ndarray of shape (n_samples, ) Predicted response values Returns ------- Accuracy of given predictions Calculate the cross entropy of given predictions Parameters ---------- y_true: ndarray of shape (n_samples, ) True response values y_pred: ndarray of shape (n_samples, ) Predicted response values Returns ------- Cross entropy of given predictions | 3.178163 | 3 |
insomnia_keeper_main/migrations/0002_adminsettings_fee_percent.py | gh0st-work/insomnia_keeper | 2 | 6619389 | # Generated by Django 4.0.2 on 2022-03-11 22:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('insomnia_keeper_main', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='adminsettings',
name='fee_percent',
field=models.DecimalField(decimal_places=2, default=1, max_digits=5, verbose_name='Комиссия %'),
),
]
| # Generated by Django 4.0.2 on 2022-03-11 22:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('insomnia_keeper_main', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='adminsettings',
name='fee_percent',
field=models.DecimalField(decimal_places=2, default=1, max_digits=5, verbose_name='Комиссия %'),
),
]
| en | 0.844241 | # Generated by Django 4.0.2 on 2022-03-11 22:19 | 1.338847 | 1 |
caixiya/20180424/bullet.py | python20180319howmework/homework | 0 | 6619390 | <reponame>python20180319howmework/homework<filename>caixiya/20180424/bullet.py
import pygame
import random
class Bullet(pygame.sprite.Sprite):
def __init__(self,pos):
pygame.sprite.Sprite.__init__(self)
self.image1 = pygame.image.load('../images/bullet1.png').convert_alpha()
self.image2 = pygame.image.load('../images/bullet2.png').convert_alpha()
self.rect = self.image1.get_rect()
self.rect.left, self.rect.top = pos
self.speed =12
self.alive=True
self.mask = pygame.mask.from_surface(self.image1)
self.mask = pygame.mask.from_surface(self.image2)
def move(self):
if self.rect.top < 0:
self.alive=False
else:
self.rect.top -= self.speed
def reset(self,pos):
self.rect.left, self.rect.top = pos
self.alive=True
class Bomb(pygame.sprite.Sprite):
def __init__(self, bg_size):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load('../images/bomb_supply.png').convert_alpha()
self.rect = self.image.get_rect()
# 位置
self.width = bg_size.width
self.height = bg_size.height
self.rect.left, self.rect.top = (random.randint(0, self.width - self.rect.width), \
random.randint(-10 * self.height, -5 * self.height))
self.speed = 5
self.alive = True
self.mask = pygame.mask.from_surface(self.image)
def move(self):
if self.rect.top < 0:
self.alive = False
self.rect.left, self.rect.top = (random.randint(0, self.width - self.rect.width), \
random.randint(-10 * self.height, -2 * self.height))
else:
self.rect.top -= self.speed | import pygame
import random
class Bullet(pygame.sprite.Sprite):
def __init__(self,pos):
pygame.sprite.Sprite.__init__(self)
self.image1 = pygame.image.load('../images/bullet1.png').convert_alpha()
self.image2 = pygame.image.load('../images/bullet2.png').convert_alpha()
self.rect = self.image1.get_rect()
self.rect.left, self.rect.top = pos
self.speed =12
self.alive=True
self.mask = pygame.mask.from_surface(self.image1)
self.mask = pygame.mask.from_surface(self.image2)
def move(self):
if self.rect.top < 0:
self.alive=False
else:
self.rect.top -= self.speed
def reset(self,pos):
self.rect.left, self.rect.top = pos
self.alive=True
class Bomb(pygame.sprite.Sprite):
def __init__(self, bg_size):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load('../images/bomb_supply.png').convert_alpha()
self.rect = self.image.get_rect()
# 位置
self.width = bg_size.width
self.height = bg_size.height
self.rect.left, self.rect.top = (random.randint(0, self.width - self.rect.width), \
random.randint(-10 * self.height, -5 * self.height))
self.speed = 5
self.alive = True
self.mask = pygame.mask.from_surface(self.image)
def move(self):
if self.rect.top < 0:
self.alive = False
self.rect.left, self.rect.top = (random.randint(0, self.width - self.rect.width), \
random.randint(-10 * self.height, -2 * self.height))
else:
self.rect.top -= self.speed | none | 1 | 3.014525 | 3 | |
learning/tests/views/test_resource_views.py | dbcaturra/django-koala-azure | 0 | 6619391 | <reponame>dbcaturra/django-koala-azure
#
# Copyright (C) 2019 <NAME> <<EMAIL>>
# Copyright (C) 2020 <NAME> <<EMAIL>>
# Copyright (C) 2020 <NAME> <<EMAIL>>
#
# This file is part of Koala LMS (Learning Management system)
# Koala LMS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# We make an extensive use of the Django framework, https://www.djangoproject.com/
#
import os
import tempfile
from datetime import datetime
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.template.defaultfilters import filesizeformat
from django.test import TestCase, override_settings
from django.urls import reverse
from learning.models import Resource, ResourceType, Licences, ResourceAccess, ResourceReuse, Duration, Activity
from learning.tests.views.helpers import ClientFactory
def get_temporary_file(file_size=2 ** 20):
file_path = tempfile.mkstemp()[1]
with open(file_path, mode="wb") as file:
file.write(os.getrandom(file_size, os.GRND_NONBLOCK))
return open(file_path, mode="rb")
class ResourceViews(TestCase):
def setUp(self):
for initials in ["ws", "acd", "lt"]:
setattr(self, initials, get_user_model().objects.create_user(username=initials, password="<PASSWORD>"))
self.ws_resource = Resource.objects.create(
id=1,
name="A sample resource",
description="A sample description",
type=ResourceType.AUDIO,
access=ResourceAccess.PUBLIC.name,
reuse=ResourceReuse.NO_RESTRICTION.name,
duration=Duration.NOT_SPECIFIED.name,
licence=Licences.CC_0.name,
author=self.ws,
language='fr',
)
self.ws_resource.tags.add("A")
self.ws_resource.tags.add("B")
self.acd_resource = Resource.objects.create(author=self.acd, name="resource1", language="en")
self.lt_resource = Resource.objects.create(author=self.lt, name="resource2", language="en")
"""
ResourceDetailView
"""
def test_get_resource_view(self):
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/detail", kwargs={'slug': self.ws_resource.slug})
)
self.assertEqual(200, response.status_code)
the_object = response.context.get('object')
resource = response.context.get('resource')
self.assertEqual(the_object, self.ws_resource)
self.assertEqual(resource, self.ws_resource)
self.assertTemplateUsed(response, "learning/resource/detail.html")
def common_contains_resource_detail_view(self, response):
self.assertContains(response, "object-tags", count=1)
self.assertContains(response, "object-language", count=1)
self.assertContains(response, "resource-description", count=1)
the_object = response.context.get('object')
resource = response.context.get('resource')
self.assertIsNotNone(the_object)
self.assertIsNotNone(resource)
def test_post_detail_resource_view_method_not_allowed(self):
response = ClientFactory.get_client_for_user("ws").post(
reverse("learning:resource/detail", kwargs={'slug': self.ws_resource.slug})
)
self.assertEqual(405, response.status_code)
def test_get_detail_resource_view_as_author_private_resource(self):
self.ws_resource.access = ResourceAccess.PRIVATE.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/detail", kwargs={'slug': self.ws_resource.slug})
)
self.assertIn("view_resource", self.ws_resource.get_user_perms(self.ws))
self.assertEqual(200, response.status_code)
self.assertContains(response, "access-badge", count=1)
self.assertContains(response, "reuse-badge", count=1)
self.assertContains(response, "licence-badge", count=1)
self.assertContains(response, "duration-badge", count=1)
self.assertContains(response, "btn-edit-resource", count=1)
self.assertContains(response, "btn-delete-resource", count=1)
self.assertContains(response, "link-resource-detail", count=1)
self.assertContains(response, "link-resource-usage", count=1)
self.assertContains(response, "link-resource-similar", count=1)
self.assertNotContains(response, "attachment-description")
self.common_contains_resource_detail_view(response)
resource = response.context.get('resource')
self.assertEqual(resource, self.ws_resource)
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_get_detail_resource_view_as_author_public_resource(self):
self.ws_resource.access = ResourceAccess.PUBLIC.name
filename = "sample_update_{date}.txt".format(date=datetime.now().timestamp())
self.ws_resource.attachment.save(filename, get_temporary_file(), save=True)
self.ws_resource.save()
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/detail", kwargs={'slug': self.ws_resource.slug})
)
self.assertIn("view_resource", self.ws_resource.get_user_perms(self.ws))
self.assertEqual(200, response.status_code)
self.assertContains(response, "access-badge", count=1)
self.assertContains(response, "reuse-badge", count=1)
self.assertContains(response, "licence-badge", count=1)
self.assertContains(response, "duration-badge", count=1)
self.assertContains(response, "btn-edit-resource", count=1)
self.assertContains(response, "btn-delete-resource", count=1)
self.assertContains(response, "link-resource-detail", count=1)
self.assertContains(response, "link-resource-usage", count=1)
self.assertContains(response, "link-resource-similar", count=1)
self.assertContains(response, "media-description")
self.common_contains_resource_detail_view(response)
resource = response.context.get('resource')
self.assertEqual(resource, self.ws_resource)
self.assertIsNotNone(resource.attachment.name)
self.assertEqual(os.path.join("resources", str(self.ws_resource.id), filename), resource.attachment.name)
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_get_detail_resource_view_as_author_public_resource_no_attachment(self):
self.ws_resource.access = ResourceAccess.PUBLIC.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/detail", kwargs={'slug': self.ws_resource.slug})
)
self.assertIn("view_resource", self.ws_resource.get_user_perms(self.ws))
self.assertEqual(200, response.status_code)
self.assertContains(response, "access-badge", count=1)
self.assertContains(response, "reuse-badge", count=1)
self.assertContains(response, "licence-badge", count=1)
self.assertContains(response, "duration-badge", count=1)
self.assertContains(response, "btn-edit-resource", count=1)
self.assertContains(response, "btn-delete-resource", count=1)
self.assertContains(response, "link-resource-detail", count=1)
self.assertContains(response, "link-resource-usage", count=1)
self.assertContains(response, "link-resource-similar", count=1)
self.assertNotContains(response, "attachment-description")
self.common_contains_resource_detail_view(response)
resource = response.context.get('resource')
self.assertEqual(resource, self.ws_resource)
def test_get_detail_resource_view_user_private_resource_forbidden(self):
self.ws_resource.access = ResourceAccess.PRIVATE.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("acd").get(
reverse("learning:resource/detail", kwargs={'slug': self.ws_resource.slug})
)
self.assertNotIn("view_resource", self.ws_resource.get_user_perms(self.acd))
self.assertEqual(403, response.status_code)
def test_get_detail_resource_view_user_public_resource(self):
self.ws_resource.access = ResourceAccess.PUBLIC.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("acd").get(
reverse("learning:resource/detail", kwargs={'slug': self.ws_resource.slug})
)
self.assertIn("view_resource", self.ws_resource.get_user_perms(self.acd))
self.assertEqual(200, response.status_code)
self.assertNotContains(response, "access-badge")
self.assertNotContains(response, "reuse-badge")
self.assertNotContains(response, "licence-badge")
self.assertNotContains(response, "duration-badge")
self.assertNotContains(response, "btn-edit-resource")
self.assertNotContains(response, "btn-delete-resource")
self.assertContains(response, "link-resource-detail", count=1)
self.assertNotContains(response, "link-resource-usage")
self.assertNotContains(response, "link-resource-similar")
self.assertNotContains(response, "attachment-description")
self.common_contains_resource_detail_view(response)
resource = response.context.get('resource')
self.assertEqual(resource, self.ws_resource)
"""
ResourceCreateView
"""
def test_get_create_resource_view(self):
response = ClientFactory.get_client_for_user("ws").get(reverse("learning:resource/add"))
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, "learning/resource/add.html")
self.assertContains(response, """id="resource_add_form" enctype=\"multipart/form-data\"""")
def test_post_create_resource_error_missing_tags_name_description_language(self):
form_data = {
'type': ResourceType.FILE.name,
'licence': Licences.CC_BY.name,
'access': ResourceAccess.PUBLIC.name,
'reuse': ResourceReuse.ONLY_AUTHOR.name,
'duration': Duration.NOT_SPECIFIED.name,
}
response = ClientFactory.get_client_for_user("ws").post(reverse("learning:resource/add"), form_data)
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, "learning/resource/add.html")
self.assertContains(response, "is-invalid", count=4)
def test_post_create_resource_error_missing_all_fields(self):
response = ClientFactory.get_client_for_user("ws").post(reverse("learning:resource/add"))
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, "learning/resource/add.html")
self.assertContains(response, "is-invalid", count=9)
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_post_create_resource(self):
temp_file = get_temporary_file()
form_data = {
'name': "A sample name",
'description': "A short description",
'type': ResourceType.FILE.name,
'language': 'fr',
'licence': Licences.CC_BY.name,
'access': ResourceAccess.PUBLIC.name,
'reuse': ResourceReuse.ONLY_AUTHOR.name,
'duration': Duration.NOT_SPECIFIED.name,
'tags': "A",
"attachment": temp_file
}
response = ClientFactory.get_client_for_user("ws").post(reverse("learning:resource/add"), form_data)
# Check redirection after resource creation
self.assertRedirects(
response,
status_code=302, target_status_code=200,
expected_url=reverse("learning:resource/detail", kwargs={'slug': "a-sample-name"})
)
# The author is the request sender
resource = Resource.objects.get(pk=4)
self.assertEqual(self.ws, resource.author)
self.assertIsNotNone(resource.attachment.name)
self.assertIn(os.path.basename(temp_file.name), resource.attachment.name)
self.assertTrue(os.path.isfile(resource.attachment.path))
"""
ResourceUpdateView
"""
def test_update_get_resource_as_author(self):
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/update", kwargs={'slug': self.ws_resource.slug})
)
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, "learning/resource/details/change.html")
self.assertContains(response, """id="resource_update_form" enctype=\"multipart/form-data\"""")
def test_update_get_resource_form_without_attachment(self):
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/update", kwargs={'slug': self.ws_resource.slug})
)
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, "learning/resource/details/change.html")
self.assertNotContains(response, "column-clear-attachment")
self.assertNotContains(response, "column-download-attachment")
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_update_get_resource_form_with_attachment(self):
filename = "sample_update_{date}.txt".format(date=datetime.now().timestamp())
self.ws_resource.attachment.save(filename, get_temporary_file(), save=True)
self.ws_resource.save()
self.assertIsNotNone(self.ws_resource.attachment.name)
self.assertIn("sample_update", self.ws_resource.attachment.name)
self.assertTrue(os.path.isfile(os.path.join(settings.MEDIA_ROOT, "resources", str(self.ws_resource.id), filename)))
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/update", kwargs={'slug': self.ws_resource.slug})
)
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, "learning/resource/details/change.html")
self.assertContains(response, "column-clear-attachment")
self.assertContains(response, "column-download-attachment")
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_update_post_resource_as_author(self):
self.assertIsNone(self.ws_resource.attachment.name)
temp_file = get_temporary_file()
form_data = {
'name': "A sample name that changed",
'description': "A short description",
'type': ResourceType.FILE.name,
'language': 'fr',
'licence': Licences.CC_BY.name,
'access': ResourceAccess.PUBLIC.name,
'reuse': ResourceReuse.ONLY_AUTHOR.name,
'duration': Duration.NOT_SPECIFIED.name,
"tags": "B",
"attachment": temp_file
}
response = ClientFactory.get_client_for_user("ws").post(
reverse("learning:resource/update", kwargs={'slug': self.ws_resource.slug}), form_data
)
self.assertRedirects(
response, status_code=302, target_status_code=200,
expected_url=reverse("learning:resource/detail", kwargs={'slug': "a-sample-name-that-changed"})
)
resource = Resource.objects.get(pk=self.ws_resource.id)
self.assertIsNotNone(resource.attachment.name)
self.assertIn(os.path.basename(temp_file.name), resource.attachment.name)
self.assertTrue(os.path.isfile(resource.attachment.path))
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_update_post_resource_as_author_replace_attachment(self):
filename = "sample_update_{date}.txt".format(date=datetime.now().timestamp())
self.ws_resource.attachment.save(filename, get_temporary_file(), save=True)
self.ws_resource.save()
self.assertIsNotNone(self.ws_resource.attachment.name)
self.assertIn("sample_update", self.ws_resource.attachment.name)
self.assertTrue(os.path.isfile(os.path.join(settings.MEDIA_ROOT, "resources", str(self.ws_resource.id), filename)))
temp_file = get_temporary_file(file_size=2 ** 5)
form_data = {
'name': "A sample name that changed",
'description': "A short description",
'type': ResourceType.FILE.name,
'language': 'fr',
'licence': Licences.CC_BY.name,
'access': ResourceAccess.PUBLIC.name,
'reuse': ResourceReuse.ONLY_AUTHOR.name,
'duration': Duration.NOT_SPECIFIED.name,
"tags": "B",
"attachment": temp_file
}
response = ClientFactory.get_client_for_user("ws").post(
reverse("learning:resource/update", kwargs={'slug': self.ws_resource.slug}), form_data
)
self.assertRedirects(
response, status_code=302, target_status_code=200,
expected_url=reverse("learning:resource/detail", kwargs={'slug': "a-sample-name-that-changed"})
)
resource = Resource.objects.get(pk=self.ws_resource.id)
self.assertIsNotNone(resource.attachment.name)
self.assertIn(os.path.basename(temp_file.name), resource.attachment.name)
# Current file exists and previous has been removed
self.assertTrue(os.path.isfile(resource.attachment.path))
self.assertFalse(os.path.isfile(os.path.join(settings.MEDIA_ROOT, filename)))
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_update_post_resource_as_author_delete_attachment(self):
filename = "sample_update_{date}.txt".format(date=datetime.now().timestamp())
self.ws_resource.attachment.save(filename, get_temporary_file(), save=True)
self.ws_resource.save()
self.assertIsNotNone(self.ws_resource.attachment.name)
self.assertIn("sample_update", self.ws_resource.attachment.name)
self.assertTrue(os.path.isfile(os.path.join(settings.MEDIA_ROOT, "resources", str(self.ws_resource.id), filename)))
form_data = {
'name': "A sample name that changed",
'description': "A short description",
'type': ResourceType.FILE.name,
'language': 'fr',
'licence': Licences.CC_BY.name,
'access': ResourceAccess.PUBLIC.name,
'reuse': ResourceReuse.ONLY_AUTHOR.name,
'duration': Duration.NOT_SPECIFIED.name,
"tags": "B",
"attachment-clear": "on"
}
response = ClientFactory.get_client_for_user("ws").post(
reverse("learning:resource/update", kwargs={'slug': self.ws_resource.slug}), form_data
)
self.assertRedirects(
response, status_code=302, target_status_code=200,
expected_url=reverse("learning:resource/detail", kwargs={'slug': "a-sample-name-that-changed"})
)
resource = Resource.objects.get(pk=self.ws_resource.id)
self.assertEqual("", resource.attachment.name)
self.assertFalse(os.path.isfile(os.path.join(settings.MEDIA_ROOT, filename)))
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_update_post_resource_as_author_too_big_resource(self):
filename = "sample_update_{date}.txt".format(date=datetime.now().timestamp())
self.ws_resource.attachment.save(filename, get_temporary_file(), save=True)
self.ws_resource.save()
self.assertIsNotNone(self.ws_resource.attachment.name)
self.assertIn("sample_update", self.ws_resource.attachment.name)
self.assertTrue(os.path.isfile(os.path.join(settings.MEDIA_ROOT, "resources", str(self.ws_resource.id), filename)))
resource = Resource.objects.get(pk=self.ws_resource.id)
self.assertIsNotNone(resource.attachment.name)
self.assertIn("sample_update", resource.attachment.name)
self.assertTrue(os.path.isfile(os.path.join(settings.MEDIA_ROOT, "resources", str(self.ws_resource.id), filename)))
form_data = {
'name': "A sample name that changed",
'description': "A short description",
'type': ResourceType.FILE.name,
'language': 'fr',
'licence': Licences.CC_BY.name,
'access': ResourceAccess.PUBLIC.name,
'reuse': ResourceReuse.ONLY_AUTHOR.name,
'duration': Duration.NOT_SPECIFIED.name,
"tags": "B",
"attachment": get_temporary_file(file_size=2 ** 21)
}
response = ClientFactory.get_client_for_user("ws").post(
reverse("learning:resource/update", kwargs={'slug': self.ws_resource.slug}), form_data
)
self.assertEqual(200, response.status_code)
self.assertIsNotNone(response.context.get('form').errors)
self.assertContains(response, "sample_update", count=2) # link and title
self.assertContains(response, filesizeformat(2 ** 21), count=1)
self.assertIsNotNone(self.ws_resource.attachment.name)
self.assertIn("sample_update", self.ws_resource.attachment.name)
self.assertTrue(os.path.isfile(os.path.join(settings.MEDIA_ROOT, "resources", str(self.ws_resource.id), filename)))
resource = Resource.objects.get(pk=self.ws_resource.id)
self.assertIsNotNone(resource.attachment.name)
self.assertIn("sample_update", resource.attachment.name)
self.assertTrue(os.path.isfile(os.path.join(settings.MEDIA_ROOT, "resources", str(self.ws_resource.id), filename)))
def test_update_get_resource_without_being_author_forbidden(self):
response = ClientFactory.get_client_for_user("acd").get(
reverse("learning:resource/update", kwargs={'slug': self.ws_resource.slug})
)
self.assertEqual(403, response.status_code)
def test_update_post_resource_without_being_author_forbidden(self):
form_data = {
'name': "A sample name that changed",
'description': "A short description",
'type': ResourceType.FILE.name,
'language': 'fr',
'licence': Licences.CC_BY.name,
'access': ResourceAccess.PUBLIC.name,
'reuse': ResourceReuse.ONLY_AUTHOR.name,
'duration': Duration.NOT_SPECIFIED.name,
}
response = ClientFactory.get_client_for_user("acd").post(
reverse("learning:resource/update", kwargs={'slug': self.ws_resource.slug}), form_data
)
self.assertEqual(403, response.status_code)
"""
ResourceDeleteView
"""
def test_delete_resource_get_as_author(self):
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/delete", kwargs={'slug': self.ws_resource.slug})
)
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, "learning/resource/delete.html")
def test_delete_resource_post_as_author(self):
response = ClientFactory.get_client_for_user("ws").post(
reverse("learning:resource/delete", kwargs={'slug': self.ws_resource.slug})
)
self.assertRedirects(
response,
status_code=302, target_status_code=200,
expected_url=reverse("learning:resource/my")
)
with self.assertRaises(ObjectDoesNotExist):
Resource.objects.get(pk=self.ws_resource.id)
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_delete_resource_post_as_author_with_attachment(self):
filename = "sample_update_{date}.txt".format(date=datetime.now().timestamp())
self.ws_resource.attachment.save(filename, get_temporary_file(), save=True)
self.ws_resource.save()
self.assertIsNotNone(self.ws_resource.attachment.name)
self.assertIn("sample_update", self.ws_resource.attachment.name)
self.assertTrue(os.path.isfile(os.path.join(settings.MEDIA_ROOT, "resources", str(self.ws_resource.id), filename)))
response = ClientFactory.get_client_for_user("ws").post(
reverse("learning:resource/delete", kwargs={'slug': self.ws_resource.slug})
)
self.assertRedirects(
response,
status_code=302, target_status_code=200,
expected_url=reverse("learning:resource/my")
)
self.assertFalse(os.path.isfile(os.path.join(settings.MEDIA_ROOT, filename)))
def test_delete_resource_get_without_being_author_forbidden(self):
response = ClientFactory.get_client_for_user("acd").get(
reverse("learning:resource/delete", kwargs={'slug': self.ws_resource.slug})
)
self.assertEqual(403, response.status_code)
def test_delete_resource_post_without_being_author_forbidden(self):
response = ClientFactory.get_client_for_user("acd").post(
reverse("learning:resource/delete", kwargs={'slug': self.ws_resource.slug})
)
self.assertEqual(403, response.status_code)
"""
ResourceDetailUsageView
"""
def test_post_detail_usage_resource_view_method_not_allowed(self):
response = ClientFactory.get_client_for_user("ws").post(
reverse("learning:resource/detail/usage", kwargs={'slug': self.ws_resource.slug})
)
self.assertEqual(405, response.status_code)
def test_get_detail_usage_resource_view_as_author_private_resource_empty(self):
self.ws_resource.access = ResourceAccess.PRIVATE.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/detail/usage", kwargs={'slug': self.ws_resource.slug})
)
self.assertIn("view_usage_resource", self.ws_resource.get_user_perms(self.ws))
self.assertEqual(200, response.status_code)
resource = response.context.get('resource')
self.assertEqual(resource, self.ws_resource)
self.assertNotContains(response, "table-resource-usage")
self.assertContains(response, "alert-not-used")
def test_get_detail_usage_resource_view_as_author_public_resource_empty(self):
self.ws_resource.access = ResourceAccess.PUBLIC.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/detail/usage", kwargs={'slug': self.ws_resource.slug})
)
self.assertIn("view_usage_resource", self.ws_resource.get_user_perms(self.ws))
self.assertEqual(200, response.status_code)
resource = response.context.get('resource')
self.assertEqual(resource, self.ws_resource)
self.assertNotContains(response, "table-resource-usage")
self.assertContains(response, "alert-not-used")
def test_get_detail_usage_resource_view_as_author_public_resource_used_twice(self):
a1 = Activity.objects.create(author=self.ws, name="test1")
a2 = Activity.objects.create(author=self.acd, name="test2")
a1.resources.add(self.ws_resource)
a2.resources.add(self.ws_resource)
self.assertEqual(2, self.ws_resource.activities.count())
self.ws_resource.access = ResourceAccess.PUBLIC.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/detail/usage", kwargs={'slug': self.ws_resource.slug})
)
self.assertIn("view_usage_resource", self.ws_resource.get_user_perms(self.ws))
self.assertEqual(200, response.status_code)
resource = response.context.get('resource')
self.assertEqual(resource, self.ws_resource)
self.assertContains(response, "table-resource-usage")
self.assertContains(response, "usage-activity-row", count=2)
self.assertNotContains(response, "alert-not-used")
page_obj = response.context.get('page_obj')
self.assertIsNotNone(page_obj)
self.assertEqual(2, len(page_obj.object_list))
def test_get_detail_usage_resource_view_as_author_private_resource_used_three_times(self):
a1 = Activity.objects.create(author=self.ws, name="test1")
a2 = Activity.objects.create(author=self.acd, name="test2")
a3 = Activity.objects.create(author=self.lt, name="test3")
a1.resources.add(self.ws_resource)
a2.resources.add(self.ws_resource)
a3.resources.add(self.ws_resource)
self.assertEqual(3, self.ws_resource.activities.count())
self.ws_resource.access = ResourceAccess.PRIVATE.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/detail/usage", kwargs={'slug': self.ws_resource.slug})
)
self.assertIn("view_usage_resource", self.ws_resource.get_user_perms(self.ws))
self.assertEqual(200, response.status_code)
resource = response.context.get('resource')
self.assertIsNotNone(resource)
self.assertEqual(resource, self.ws_resource)
self.assertContains(response, "table-resource-usage")
self.assertContains(response, "usage-activity-row", count=3)
self.assertNotContains(response, "alert-not-used")
page_obj = response.context.get('page_obj')
self.assertIsNotNone(page_obj)
self.assertEqual(3, len(page_obj.object_list))
def test_get_detail_usage_resource_view_user_private_resource_forbidden(self):
self.ws_resource.access = ResourceAccess.PRIVATE.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("acd").get(
reverse("learning:resource/detail/usage", kwargs={'slug': self.ws_resource.slug})
)
self.assertNotIn("view_usage_resource", self.ws_resource.get_user_perms(self.acd))
self.assertEqual(403, response.status_code)
def test_get_detail_usage_resource_view_user_public_resource(self):
self.ws_resource.access = ResourceAccess.PUBLIC.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("acd").get(
reverse("learning:resource/detail/usage", kwargs={'slug': self.ws_resource.slug})
)
self.assertNotIn("view_usage_resource", self.ws_resource.get_user_perms(self.acd))
self.assertEqual(403, response.status_code)
"""
ResourceDetailSimilarView
"""
def test_post_detail_similar_resource_view_method_not_allowed(self):
response = ClientFactory.get_client_for_user("ws").post(
reverse("learning:resource/detail/similar", kwargs={'slug': self.ws_resource.slug})
)
self.assertEqual(405, response.status_code)
def test_get_detail_similar_resource_view_as_author_private_resource_empty(self):
self.ws_resource.access = ResourceAccess.PRIVATE.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/detail/similar", kwargs={'slug': self.ws_resource.slug})
)
self.assertIn("view_similar_resource", self.ws_resource.get_user_perms(self.ws))
self.assertEqual(200, response.status_code)
resource = response.context.get('resource')
self.assertEqual(resource, self.ws_resource)
self.assertNotContains(response, "similar-resources")
self.assertContains(response, "alert-no-similar-resource")
def test_get_detail_similar_resource_view_as_author_public_resource_empty(self):
self.ws_resource.access = ResourceAccess.PUBLIC.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/detail/similar", kwargs={'slug': self.ws_resource.slug})
)
self.assertIn("view_similar_resource", self.ws_resource.get_user_perms(self.ws))
self.assertEqual(200, response.status_code)
resource = response.context.get('resource')
self.assertEqual(resource, self.ws_resource)
self.assertNotContains(response, "similar-resources")
self.assertContains(response, "alert-no-similar-resource")
def test_get_detail_similar_resource_view_as_author_public_resource_used_twice(self):
for tag in self.ws_resource.tags.all():
self.acd_resource.tags.add(tag)
self.lt_resource.tags.add(tag)
self.acd_resource.save()
self.lt_resource.save()
self.ws_resource.access = ResourceAccess.PUBLIC.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/detail/similar", kwargs={'slug': self.ws_resource.slug})
)
self.assertIn("view_similar_resource", self.ws_resource.get_user_perms(self.ws))
self.assertEqual(200, response.status_code)
resource = response.context.get('resource')
self.assertEqual(resource, self.ws_resource)
self.assertNotContains(response, "alert-no-similar-resource")
self.assertContains(response, "similar-resources")
page_obj = response.context.get('page_obj')
self.assertIsNotNone(page_obj)
self.assertEqual(2, len(page_obj.object_list))
def test_get_detail_similar_resource_view_user_private_resource_forbidden(self):
self.ws_resource.access = ResourceAccess.PRIVATE.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("acd").get(
reverse("learning:resource/detail/similar", kwargs={'slug': self.ws_resource.slug})
)
self.assertNotIn("view_similar_resource", self.ws_resource.get_user_perms(self.acd))
self.assertEqual(403, response.status_code)
def test_get_detail_similar_resource_view_user_public_resource(self):
self.ws_resource.access = ResourceAccess.PUBLIC.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("acd").get(
reverse("learning:resource/detail/similar", kwargs={'slug': self.ws_resource.slug})
)
self.assertNotIn("view_similar_resource", self.ws_resource.get_user_perms(self.acd))
self.assertEqual(403, response.status_code)
| #
# Copyright (C) 2019 <NAME> <<EMAIL>>
# Copyright (C) 2020 <NAME> <<EMAIL>>
# Copyright (C) 2020 <NAME> <<EMAIL>>
#
# This file is part of Koala LMS (Learning Management system)
# Koala LMS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# We make an extensive use of the Django framework, https://www.djangoproject.com/
#
import os
import tempfile
from datetime import datetime
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.template.defaultfilters import filesizeformat
from django.test import TestCase, override_settings
from django.urls import reverse
from learning.models import Resource, ResourceType, Licences, ResourceAccess, ResourceReuse, Duration, Activity
from learning.tests.views.helpers import ClientFactory
def get_temporary_file(file_size=2 ** 20):
file_path = tempfile.mkstemp()[1]
with open(file_path, mode="wb") as file:
file.write(os.getrandom(file_size, os.GRND_NONBLOCK))
return open(file_path, mode="rb")
class ResourceViews(TestCase):
def setUp(self):
for initials in ["ws", "acd", "lt"]:
setattr(self, initials, get_user_model().objects.create_user(username=initials, password="<PASSWORD>"))
self.ws_resource = Resource.objects.create(
id=1,
name="A sample resource",
description="A sample description",
type=ResourceType.AUDIO,
access=ResourceAccess.PUBLIC.name,
reuse=ResourceReuse.NO_RESTRICTION.name,
duration=Duration.NOT_SPECIFIED.name,
licence=Licences.CC_0.name,
author=self.ws,
language='fr',
)
self.ws_resource.tags.add("A")
self.ws_resource.tags.add("B")
self.acd_resource = Resource.objects.create(author=self.acd, name="resource1", language="en")
self.lt_resource = Resource.objects.create(author=self.lt, name="resource2", language="en")
"""
ResourceDetailView
"""
def test_get_resource_view(self):
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/detail", kwargs={'slug': self.ws_resource.slug})
)
self.assertEqual(200, response.status_code)
the_object = response.context.get('object')
resource = response.context.get('resource')
self.assertEqual(the_object, self.ws_resource)
self.assertEqual(resource, self.ws_resource)
self.assertTemplateUsed(response, "learning/resource/detail.html")
def common_contains_resource_detail_view(self, response):
self.assertContains(response, "object-tags", count=1)
self.assertContains(response, "object-language", count=1)
self.assertContains(response, "resource-description", count=1)
the_object = response.context.get('object')
resource = response.context.get('resource')
self.assertIsNotNone(the_object)
self.assertIsNotNone(resource)
def test_post_detail_resource_view_method_not_allowed(self):
response = ClientFactory.get_client_for_user("ws").post(
reverse("learning:resource/detail", kwargs={'slug': self.ws_resource.slug})
)
self.assertEqual(405, response.status_code)
def test_get_detail_resource_view_as_author_private_resource(self):
self.ws_resource.access = ResourceAccess.PRIVATE.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/detail", kwargs={'slug': self.ws_resource.slug})
)
self.assertIn("view_resource", self.ws_resource.get_user_perms(self.ws))
self.assertEqual(200, response.status_code)
self.assertContains(response, "access-badge", count=1)
self.assertContains(response, "reuse-badge", count=1)
self.assertContains(response, "licence-badge", count=1)
self.assertContains(response, "duration-badge", count=1)
self.assertContains(response, "btn-edit-resource", count=1)
self.assertContains(response, "btn-delete-resource", count=1)
self.assertContains(response, "link-resource-detail", count=1)
self.assertContains(response, "link-resource-usage", count=1)
self.assertContains(response, "link-resource-similar", count=1)
self.assertNotContains(response, "attachment-description")
self.common_contains_resource_detail_view(response)
resource = response.context.get('resource')
self.assertEqual(resource, self.ws_resource)
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_get_detail_resource_view_as_author_public_resource(self):
self.ws_resource.access = ResourceAccess.PUBLIC.name
filename = "sample_update_{date}.txt".format(date=datetime.now().timestamp())
self.ws_resource.attachment.save(filename, get_temporary_file(), save=True)
self.ws_resource.save()
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/detail", kwargs={'slug': self.ws_resource.slug})
)
self.assertIn("view_resource", self.ws_resource.get_user_perms(self.ws))
self.assertEqual(200, response.status_code)
self.assertContains(response, "access-badge", count=1)
self.assertContains(response, "reuse-badge", count=1)
self.assertContains(response, "licence-badge", count=1)
self.assertContains(response, "duration-badge", count=1)
self.assertContains(response, "btn-edit-resource", count=1)
self.assertContains(response, "btn-delete-resource", count=1)
self.assertContains(response, "link-resource-detail", count=1)
self.assertContains(response, "link-resource-usage", count=1)
self.assertContains(response, "link-resource-similar", count=1)
self.assertContains(response, "media-description")
self.common_contains_resource_detail_view(response)
resource = response.context.get('resource')
self.assertEqual(resource, self.ws_resource)
self.assertIsNotNone(resource.attachment.name)
self.assertEqual(os.path.join("resources", str(self.ws_resource.id), filename), resource.attachment.name)
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_get_detail_resource_view_as_author_public_resource_no_attachment(self):
self.ws_resource.access = ResourceAccess.PUBLIC.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/detail", kwargs={'slug': self.ws_resource.slug})
)
self.assertIn("view_resource", self.ws_resource.get_user_perms(self.ws))
self.assertEqual(200, response.status_code)
self.assertContains(response, "access-badge", count=1)
self.assertContains(response, "reuse-badge", count=1)
self.assertContains(response, "licence-badge", count=1)
self.assertContains(response, "duration-badge", count=1)
self.assertContains(response, "btn-edit-resource", count=1)
self.assertContains(response, "btn-delete-resource", count=1)
self.assertContains(response, "link-resource-detail", count=1)
self.assertContains(response, "link-resource-usage", count=1)
self.assertContains(response, "link-resource-similar", count=1)
self.assertNotContains(response, "attachment-description")
self.common_contains_resource_detail_view(response)
resource = response.context.get('resource')
self.assertEqual(resource, self.ws_resource)
def test_get_detail_resource_view_user_private_resource_forbidden(self):
self.ws_resource.access = ResourceAccess.PRIVATE.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("acd").get(
reverse("learning:resource/detail", kwargs={'slug': self.ws_resource.slug})
)
self.assertNotIn("view_resource", self.ws_resource.get_user_perms(self.acd))
self.assertEqual(403, response.status_code)
def test_get_detail_resource_view_user_public_resource(self):
self.ws_resource.access = ResourceAccess.PUBLIC.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("acd").get(
reverse("learning:resource/detail", kwargs={'slug': self.ws_resource.slug})
)
self.assertIn("view_resource", self.ws_resource.get_user_perms(self.acd))
self.assertEqual(200, response.status_code)
self.assertNotContains(response, "access-badge")
self.assertNotContains(response, "reuse-badge")
self.assertNotContains(response, "licence-badge")
self.assertNotContains(response, "duration-badge")
self.assertNotContains(response, "btn-edit-resource")
self.assertNotContains(response, "btn-delete-resource")
self.assertContains(response, "link-resource-detail", count=1)
self.assertNotContains(response, "link-resource-usage")
self.assertNotContains(response, "link-resource-similar")
self.assertNotContains(response, "attachment-description")
self.common_contains_resource_detail_view(response)
resource = response.context.get('resource')
self.assertEqual(resource, self.ws_resource)
"""
ResourceCreateView
"""
def test_get_create_resource_view(self):
response = ClientFactory.get_client_for_user("ws").get(reverse("learning:resource/add"))
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, "learning/resource/add.html")
self.assertContains(response, """id="resource_add_form" enctype=\"multipart/form-data\"""")
def test_post_create_resource_error_missing_tags_name_description_language(self):
form_data = {
'type': ResourceType.FILE.name,
'licence': Licences.CC_BY.name,
'access': ResourceAccess.PUBLIC.name,
'reuse': ResourceReuse.ONLY_AUTHOR.name,
'duration': Duration.NOT_SPECIFIED.name,
}
response = ClientFactory.get_client_for_user("ws").post(reverse("learning:resource/add"), form_data)
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, "learning/resource/add.html")
self.assertContains(response, "is-invalid", count=4)
def test_post_create_resource_error_missing_all_fields(self):
response = ClientFactory.get_client_for_user("ws").post(reverse("learning:resource/add"))
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, "learning/resource/add.html")
self.assertContains(response, "is-invalid", count=9)
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_post_create_resource(self):
temp_file = get_temporary_file()
form_data = {
'name': "A sample name",
'description': "A short description",
'type': ResourceType.FILE.name,
'language': 'fr',
'licence': Licences.CC_BY.name,
'access': ResourceAccess.PUBLIC.name,
'reuse': ResourceReuse.ONLY_AUTHOR.name,
'duration': Duration.NOT_SPECIFIED.name,
'tags': "A",
"attachment": temp_file
}
response = ClientFactory.get_client_for_user("ws").post(reverse("learning:resource/add"), form_data)
# Check redirection after resource creation
self.assertRedirects(
response,
status_code=302, target_status_code=200,
expected_url=reverse("learning:resource/detail", kwargs={'slug': "a-sample-name"})
)
# The author is the request sender
resource = Resource.objects.get(pk=4)
self.assertEqual(self.ws, resource.author)
self.assertIsNotNone(resource.attachment.name)
self.assertIn(os.path.basename(temp_file.name), resource.attachment.name)
self.assertTrue(os.path.isfile(resource.attachment.path))
"""
ResourceUpdateView
"""
def test_update_get_resource_as_author(self):
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/update", kwargs={'slug': self.ws_resource.slug})
)
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, "learning/resource/details/change.html")
self.assertContains(response, """id="resource_update_form" enctype=\"multipart/form-data\"""")
def test_update_get_resource_form_without_attachment(self):
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/update", kwargs={'slug': self.ws_resource.slug})
)
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, "learning/resource/details/change.html")
self.assertNotContains(response, "column-clear-attachment")
self.assertNotContains(response, "column-download-attachment")
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_update_get_resource_form_with_attachment(self):
filename = "sample_update_{date}.txt".format(date=datetime.now().timestamp())
self.ws_resource.attachment.save(filename, get_temporary_file(), save=True)
self.ws_resource.save()
self.assertIsNotNone(self.ws_resource.attachment.name)
self.assertIn("sample_update", self.ws_resource.attachment.name)
self.assertTrue(os.path.isfile(os.path.join(settings.MEDIA_ROOT, "resources", str(self.ws_resource.id), filename)))
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/update", kwargs={'slug': self.ws_resource.slug})
)
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, "learning/resource/details/change.html")
self.assertContains(response, "column-clear-attachment")
self.assertContains(response, "column-download-attachment")
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_update_post_resource_as_author(self):
self.assertIsNone(self.ws_resource.attachment.name)
temp_file = get_temporary_file()
form_data = {
'name': "A sample name that changed",
'description': "A short description",
'type': ResourceType.FILE.name,
'language': 'fr',
'licence': Licences.CC_BY.name,
'access': ResourceAccess.PUBLIC.name,
'reuse': ResourceReuse.ONLY_AUTHOR.name,
'duration': Duration.NOT_SPECIFIED.name,
"tags": "B",
"attachment": temp_file
}
response = ClientFactory.get_client_for_user("ws").post(
reverse("learning:resource/update", kwargs={'slug': self.ws_resource.slug}), form_data
)
self.assertRedirects(
response, status_code=302, target_status_code=200,
expected_url=reverse("learning:resource/detail", kwargs={'slug': "a-sample-name-that-changed"})
)
resource = Resource.objects.get(pk=self.ws_resource.id)
self.assertIsNotNone(resource.attachment.name)
self.assertIn(os.path.basename(temp_file.name), resource.attachment.name)
self.assertTrue(os.path.isfile(resource.attachment.path))
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_update_post_resource_as_author_replace_attachment(self):
filename = "sample_update_{date}.txt".format(date=datetime.now().timestamp())
self.ws_resource.attachment.save(filename, get_temporary_file(), save=True)
self.ws_resource.save()
self.assertIsNotNone(self.ws_resource.attachment.name)
self.assertIn("sample_update", self.ws_resource.attachment.name)
self.assertTrue(os.path.isfile(os.path.join(settings.MEDIA_ROOT, "resources", str(self.ws_resource.id), filename)))
temp_file = get_temporary_file(file_size=2 ** 5)
form_data = {
'name': "A sample name that changed",
'description': "A short description",
'type': ResourceType.FILE.name,
'language': 'fr',
'licence': Licences.CC_BY.name,
'access': ResourceAccess.PUBLIC.name,
'reuse': ResourceReuse.ONLY_AUTHOR.name,
'duration': Duration.NOT_SPECIFIED.name,
"tags": "B",
"attachment": temp_file
}
response = ClientFactory.get_client_for_user("ws").post(
reverse("learning:resource/update", kwargs={'slug': self.ws_resource.slug}), form_data
)
self.assertRedirects(
response, status_code=302, target_status_code=200,
expected_url=reverse("learning:resource/detail", kwargs={'slug': "a-sample-name-that-changed"})
)
resource = Resource.objects.get(pk=self.ws_resource.id)
self.assertIsNotNone(resource.attachment.name)
self.assertIn(os.path.basename(temp_file.name), resource.attachment.name)
# Current file exists and previous has been removed
self.assertTrue(os.path.isfile(resource.attachment.path))
self.assertFalse(os.path.isfile(os.path.join(settings.MEDIA_ROOT, filename)))
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_update_post_resource_as_author_delete_attachment(self):
filename = "sample_update_{date}.txt".format(date=datetime.now().timestamp())
self.ws_resource.attachment.save(filename, get_temporary_file(), save=True)
self.ws_resource.save()
self.assertIsNotNone(self.ws_resource.attachment.name)
self.assertIn("sample_update", self.ws_resource.attachment.name)
self.assertTrue(os.path.isfile(os.path.join(settings.MEDIA_ROOT, "resources", str(self.ws_resource.id), filename)))
form_data = {
'name': "A sample name that changed",
'description': "A short description",
'type': ResourceType.FILE.name,
'language': 'fr',
'licence': Licences.CC_BY.name,
'access': ResourceAccess.PUBLIC.name,
'reuse': ResourceReuse.ONLY_AUTHOR.name,
'duration': Duration.NOT_SPECIFIED.name,
"tags": "B",
"attachment-clear": "on"
}
response = ClientFactory.get_client_for_user("ws").post(
reverse("learning:resource/update", kwargs={'slug': self.ws_resource.slug}), form_data
)
self.assertRedirects(
response, status_code=302, target_status_code=200,
expected_url=reverse("learning:resource/detail", kwargs={'slug': "a-sample-name-that-changed"})
)
resource = Resource.objects.get(pk=self.ws_resource.id)
self.assertEqual("", resource.attachment.name)
self.assertFalse(os.path.isfile(os.path.join(settings.MEDIA_ROOT, filename)))
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_update_post_resource_as_author_too_big_resource(self):
filename = "sample_update_{date}.txt".format(date=datetime.now().timestamp())
self.ws_resource.attachment.save(filename, get_temporary_file(), save=True)
self.ws_resource.save()
self.assertIsNotNone(self.ws_resource.attachment.name)
self.assertIn("sample_update", self.ws_resource.attachment.name)
self.assertTrue(os.path.isfile(os.path.join(settings.MEDIA_ROOT, "resources", str(self.ws_resource.id), filename)))
resource = Resource.objects.get(pk=self.ws_resource.id)
self.assertIsNotNone(resource.attachment.name)
self.assertIn("sample_update", resource.attachment.name)
self.assertTrue(os.path.isfile(os.path.join(settings.MEDIA_ROOT, "resources", str(self.ws_resource.id), filename)))
form_data = {
'name': "A sample name that changed",
'description': "A short description",
'type': ResourceType.FILE.name,
'language': 'fr',
'licence': Licences.CC_BY.name,
'access': ResourceAccess.PUBLIC.name,
'reuse': ResourceReuse.ONLY_AUTHOR.name,
'duration': Duration.NOT_SPECIFIED.name,
"tags": "B",
"attachment": get_temporary_file(file_size=2 ** 21)
}
response = ClientFactory.get_client_for_user("ws").post(
reverse("learning:resource/update", kwargs={'slug': self.ws_resource.slug}), form_data
)
self.assertEqual(200, response.status_code)
self.assertIsNotNone(response.context.get('form').errors)
self.assertContains(response, "sample_update", count=2) # link and title
self.assertContains(response, filesizeformat(2 ** 21), count=1)
self.assertIsNotNone(self.ws_resource.attachment.name)
self.assertIn("sample_update", self.ws_resource.attachment.name)
self.assertTrue(os.path.isfile(os.path.join(settings.MEDIA_ROOT, "resources", str(self.ws_resource.id), filename)))
resource = Resource.objects.get(pk=self.ws_resource.id)
self.assertIsNotNone(resource.attachment.name)
self.assertIn("sample_update", resource.attachment.name)
self.assertTrue(os.path.isfile(os.path.join(settings.MEDIA_ROOT, "resources", str(self.ws_resource.id), filename)))
def test_update_get_resource_without_being_author_forbidden(self):
response = ClientFactory.get_client_for_user("acd").get(
reverse("learning:resource/update", kwargs={'slug': self.ws_resource.slug})
)
self.assertEqual(403, response.status_code)
def test_update_post_resource_without_being_author_forbidden(self):
form_data = {
'name': "A sample name that changed",
'description': "A short description",
'type': ResourceType.FILE.name,
'language': 'fr',
'licence': Licences.CC_BY.name,
'access': ResourceAccess.PUBLIC.name,
'reuse': ResourceReuse.ONLY_AUTHOR.name,
'duration': Duration.NOT_SPECIFIED.name,
}
response = ClientFactory.get_client_for_user("acd").post(
reverse("learning:resource/update", kwargs={'slug': self.ws_resource.slug}), form_data
)
self.assertEqual(403, response.status_code)
"""
ResourceDeleteView
"""
def test_delete_resource_get_as_author(self):
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/delete", kwargs={'slug': self.ws_resource.slug})
)
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, "learning/resource/delete.html")
def test_delete_resource_post_as_author(self):
response = ClientFactory.get_client_for_user("ws").post(
reverse("learning:resource/delete", kwargs={'slug': self.ws_resource.slug})
)
self.assertRedirects(
response,
status_code=302, target_status_code=200,
expected_url=reverse("learning:resource/my")
)
with self.assertRaises(ObjectDoesNotExist):
Resource.objects.get(pk=self.ws_resource.id)
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_delete_resource_post_as_author_with_attachment(self):
filename = "sample_update_{date}.txt".format(date=datetime.now().timestamp())
self.ws_resource.attachment.save(filename, get_temporary_file(), save=True)
self.ws_resource.save()
self.assertIsNotNone(self.ws_resource.attachment.name)
self.assertIn("sample_update", self.ws_resource.attachment.name)
self.assertTrue(os.path.isfile(os.path.join(settings.MEDIA_ROOT, "resources", str(self.ws_resource.id), filename)))
response = ClientFactory.get_client_for_user("ws").post(
reverse("learning:resource/delete", kwargs={'slug': self.ws_resource.slug})
)
self.assertRedirects(
response,
status_code=302, target_status_code=200,
expected_url=reverse("learning:resource/my")
)
self.assertFalse(os.path.isfile(os.path.join(settings.MEDIA_ROOT, filename)))
def test_delete_resource_get_without_being_author_forbidden(self):
response = ClientFactory.get_client_for_user("acd").get(
reverse("learning:resource/delete", kwargs={'slug': self.ws_resource.slug})
)
self.assertEqual(403, response.status_code)
def test_delete_resource_post_without_being_author_forbidden(self):
response = ClientFactory.get_client_for_user("acd").post(
reverse("learning:resource/delete", kwargs={'slug': self.ws_resource.slug})
)
self.assertEqual(403, response.status_code)
"""
ResourceDetailUsageView
"""
def test_post_detail_usage_resource_view_method_not_allowed(self):
response = ClientFactory.get_client_for_user("ws").post(
reverse("learning:resource/detail/usage", kwargs={'slug': self.ws_resource.slug})
)
self.assertEqual(405, response.status_code)
def test_get_detail_usage_resource_view_as_author_private_resource_empty(self):
self.ws_resource.access = ResourceAccess.PRIVATE.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/detail/usage", kwargs={'slug': self.ws_resource.slug})
)
self.assertIn("view_usage_resource", self.ws_resource.get_user_perms(self.ws))
self.assertEqual(200, response.status_code)
resource = response.context.get('resource')
self.assertEqual(resource, self.ws_resource)
self.assertNotContains(response, "table-resource-usage")
self.assertContains(response, "alert-not-used")
def test_get_detail_usage_resource_view_as_author_public_resource_empty(self):
self.ws_resource.access = ResourceAccess.PUBLIC.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/detail/usage", kwargs={'slug': self.ws_resource.slug})
)
self.assertIn("view_usage_resource", self.ws_resource.get_user_perms(self.ws))
self.assertEqual(200, response.status_code)
resource = response.context.get('resource')
self.assertEqual(resource, self.ws_resource)
self.assertNotContains(response, "table-resource-usage")
self.assertContains(response, "alert-not-used")
def test_get_detail_usage_resource_view_as_author_public_resource_used_twice(self):
a1 = Activity.objects.create(author=self.ws, name="test1")
a2 = Activity.objects.create(author=self.acd, name="test2")
a1.resources.add(self.ws_resource)
a2.resources.add(self.ws_resource)
self.assertEqual(2, self.ws_resource.activities.count())
self.ws_resource.access = ResourceAccess.PUBLIC.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/detail/usage", kwargs={'slug': self.ws_resource.slug})
)
self.assertIn("view_usage_resource", self.ws_resource.get_user_perms(self.ws))
self.assertEqual(200, response.status_code)
resource = response.context.get('resource')
self.assertEqual(resource, self.ws_resource)
self.assertContains(response, "table-resource-usage")
self.assertContains(response, "usage-activity-row", count=2)
self.assertNotContains(response, "alert-not-used")
page_obj = response.context.get('page_obj')
self.assertIsNotNone(page_obj)
self.assertEqual(2, len(page_obj.object_list))
def test_get_detail_usage_resource_view_as_author_private_resource_used_three_times(self):
a1 = Activity.objects.create(author=self.ws, name="test1")
a2 = Activity.objects.create(author=self.acd, name="test2")
a3 = Activity.objects.create(author=self.lt, name="test3")
a1.resources.add(self.ws_resource)
a2.resources.add(self.ws_resource)
a3.resources.add(self.ws_resource)
self.assertEqual(3, self.ws_resource.activities.count())
self.ws_resource.access = ResourceAccess.PRIVATE.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/detail/usage", kwargs={'slug': self.ws_resource.slug})
)
self.assertIn("view_usage_resource", self.ws_resource.get_user_perms(self.ws))
self.assertEqual(200, response.status_code)
resource = response.context.get('resource')
self.assertIsNotNone(resource)
self.assertEqual(resource, self.ws_resource)
self.assertContains(response, "table-resource-usage")
self.assertContains(response, "usage-activity-row", count=3)
self.assertNotContains(response, "alert-not-used")
page_obj = response.context.get('page_obj')
self.assertIsNotNone(page_obj)
self.assertEqual(3, len(page_obj.object_list))
def test_get_detail_usage_resource_view_user_private_resource_forbidden(self):
self.ws_resource.access = ResourceAccess.PRIVATE.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("acd").get(
reverse("learning:resource/detail/usage", kwargs={'slug': self.ws_resource.slug})
)
self.assertNotIn("view_usage_resource", self.ws_resource.get_user_perms(self.acd))
self.assertEqual(403, response.status_code)
def test_get_detail_usage_resource_view_user_public_resource(self):
self.ws_resource.access = ResourceAccess.PUBLIC.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("acd").get(
reverse("learning:resource/detail/usage", kwargs={'slug': self.ws_resource.slug})
)
self.assertNotIn("view_usage_resource", self.ws_resource.get_user_perms(self.acd))
self.assertEqual(403, response.status_code)
"""
ResourceDetailSimilarView
"""
def test_post_detail_similar_resource_view_method_not_allowed(self):
response = ClientFactory.get_client_for_user("ws").post(
reverse("learning:resource/detail/similar", kwargs={'slug': self.ws_resource.slug})
)
self.assertEqual(405, response.status_code)
def test_get_detail_similar_resource_view_as_author_private_resource_empty(self):
self.ws_resource.access = ResourceAccess.PRIVATE.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/detail/similar", kwargs={'slug': self.ws_resource.slug})
)
self.assertIn("view_similar_resource", self.ws_resource.get_user_perms(self.ws))
self.assertEqual(200, response.status_code)
resource = response.context.get('resource')
self.assertEqual(resource, self.ws_resource)
self.assertNotContains(response, "similar-resources")
self.assertContains(response, "alert-no-similar-resource")
def test_get_detail_similar_resource_view_as_author_public_resource_empty(self):
self.ws_resource.access = ResourceAccess.PUBLIC.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/detail/similar", kwargs={'slug': self.ws_resource.slug})
)
self.assertIn("view_similar_resource", self.ws_resource.get_user_perms(self.ws))
self.assertEqual(200, response.status_code)
resource = response.context.get('resource')
self.assertEqual(resource, self.ws_resource)
self.assertNotContains(response, "similar-resources")
self.assertContains(response, "alert-no-similar-resource")
def test_get_detail_similar_resource_view_as_author_public_resource_used_twice(self):
for tag in self.ws_resource.tags.all():
self.acd_resource.tags.add(tag)
self.lt_resource.tags.add(tag)
self.acd_resource.save()
self.lt_resource.save()
self.ws_resource.access = ResourceAccess.PUBLIC.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("ws").get(
reverse("learning:resource/detail/similar", kwargs={'slug': self.ws_resource.slug})
)
self.assertIn("view_similar_resource", self.ws_resource.get_user_perms(self.ws))
self.assertEqual(200, response.status_code)
resource = response.context.get('resource')
self.assertEqual(resource, self.ws_resource)
self.assertNotContains(response, "alert-no-similar-resource")
self.assertContains(response, "similar-resources")
page_obj = response.context.get('page_obj')
self.assertIsNotNone(page_obj)
self.assertEqual(2, len(page_obj.object_list))
def test_get_detail_similar_resource_view_user_private_resource_forbidden(self):
self.ws_resource.access = ResourceAccess.PRIVATE.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("acd").get(
reverse("learning:resource/detail/similar", kwargs={'slug': self.ws_resource.slug})
)
self.assertNotIn("view_similar_resource", self.ws_resource.get_user_perms(self.acd))
self.assertEqual(403, response.status_code)
def test_get_detail_similar_resource_view_user_public_resource(self):
self.ws_resource.access = ResourceAccess.PUBLIC.name
self.ws_resource.save()
response = ClientFactory.get_client_for_user("acd").get(
reverse("learning:resource/detail/similar", kwargs={'slug': self.ws_resource.slug})
)
self.assertNotIn("view_similar_resource", self.ws_resource.get_user_perms(self.acd))
self.assertEqual(403, response.status_code) | en | 0.803585 | # # Copyright (C) 2019 <NAME> <<EMAIL>> # Copyright (C) 2020 <NAME> <<EMAIL>> # Copyright (C) 2020 <NAME> <<EMAIL>> # # This file is part of Koala LMS (Learning Management system) # Koala LMS is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # # We make an extensive use of the Django framework, https://www.djangoproject.com/ # ResourceDetailView ResourceCreateView id="resource_add_form" enctype=\"multipart/form-data\ # Check redirection after resource creation # The author is the request sender ResourceUpdateView id="resource_update_form" enctype=\"multipart/form-data\ # Current file exists and previous has been removed # link and title ResourceDeleteView ResourceDetailUsageView ResourceDetailSimilarView | 2.013834 | 2 |
Pyrado/scripts/deployment/run_policy_quanser.py | jacarvalho/SimuRLacra | 0 | 6619392 | """
Load and run a policy on the associated real-world Quanser environment.
"""
import pyrado
from pyrado.environments.quanser.quanser_ball_balancer import QBallBalancerReal
from pyrado.environments.quanser.quanser_cartpole import QCartPoleReal
from pyrado.environments.quanser.quanser_qube import QQubeReal
from pyrado.environments.pysim.quanser_ball_balancer import QBallBalancerSim
from pyrado.environments.pysim.quanser_cartpole import QCartPoleSim
from pyrado.environments.pysim.quanser_qube import QQubeSim
from pyrado.environment_wrappers.utils import inner_env
from pyrado.logger.experiment import ask_for_experiment
from pyrado.sampling.rollout import rollout, after_rollout_query
from pyrado.utils.data_types import RenderMode
from pyrado.utils.experiments import wrap_like_other_env, load_experiment
from pyrado.utils.input_output import print_cbt
from pyrado.utils.argparser import get_argparser
if __name__ == '__main__':
# Parse command line arguments
args = get_argparser().parse_args()
# Get the experiment's directory to load from
ex_dir = ask_for_experiment()
# Load the policy (trained in simulation) and the environment (for constructing the real-world counterpart)
env_sim, policy, _ = load_experiment(ex_dir)
# Detect the correct real-world counterpart and create it
if isinstance(inner_env(env_sim), QBallBalancerSim):
env_real = QBallBalancerReal(dt=args.dt, max_steps=args.max_steps)
elif isinstance(inner_env(env_sim), QCartPoleSim):
env_real = QCartPoleReal(dt=args.dt, max_steps=args.max_steps)
elif isinstance(inner_env(env_sim), QQubeSim):
env_real = QQubeReal(dt=args.dt, max_steps=args.max_steps)
else:
raise pyrado.TypeErr(given=env_sim, expected_type=[QBallBalancerSim, QCartPoleSim, QQubeSim])
print_cbt(f'Set up env {env_real.name}.', 'c')
# Finally wrap the env in the same as done during training
env_real = wrap_like_other_env(env_real, env_sim)
# Run on device
done = False
print_cbt('Running loaded policy ...', 'c', bright=True)
while not done:
ro = rollout(env_real, policy, eval=True, render_mode=RenderMode(text=False, video=args.animation))
print_cbt(f'Return: {ro.undiscounted_return()}', 'g', bright=True)
done, _, _ = after_rollout_query(env_real, policy, ro)
| """
Load and run a policy on the associated real-world Quanser environment.
"""
import pyrado
from pyrado.environments.quanser.quanser_ball_balancer import QBallBalancerReal
from pyrado.environments.quanser.quanser_cartpole import QCartPoleReal
from pyrado.environments.quanser.quanser_qube import QQubeReal
from pyrado.environments.pysim.quanser_ball_balancer import QBallBalancerSim
from pyrado.environments.pysim.quanser_cartpole import QCartPoleSim
from pyrado.environments.pysim.quanser_qube import QQubeSim
from pyrado.environment_wrappers.utils import inner_env
from pyrado.logger.experiment import ask_for_experiment
from pyrado.sampling.rollout import rollout, after_rollout_query
from pyrado.utils.data_types import RenderMode
from pyrado.utils.experiments import wrap_like_other_env, load_experiment
from pyrado.utils.input_output import print_cbt
from pyrado.utils.argparser import get_argparser
if __name__ == '__main__':
# Parse command line arguments
args = get_argparser().parse_args()
# Get the experiment's directory to load from
ex_dir = ask_for_experiment()
# Load the policy (trained in simulation) and the environment (for constructing the real-world counterpart)
env_sim, policy, _ = load_experiment(ex_dir)
# Detect the correct real-world counterpart and create it
if isinstance(inner_env(env_sim), QBallBalancerSim):
env_real = QBallBalancerReal(dt=args.dt, max_steps=args.max_steps)
elif isinstance(inner_env(env_sim), QCartPoleSim):
env_real = QCartPoleReal(dt=args.dt, max_steps=args.max_steps)
elif isinstance(inner_env(env_sim), QQubeSim):
env_real = QQubeReal(dt=args.dt, max_steps=args.max_steps)
else:
raise pyrado.TypeErr(given=env_sim, expected_type=[QBallBalancerSim, QCartPoleSim, QQubeSim])
print_cbt(f'Set up env {env_real.name}.', 'c')
# Finally wrap the env in the same as done during training
env_real = wrap_like_other_env(env_real, env_sim)
# Run on device
done = False
print_cbt('Running loaded policy ...', 'c', bright=True)
while not done:
ro = rollout(env_real, policy, eval=True, render_mode=RenderMode(text=False, video=args.animation))
print_cbt(f'Return: {ro.undiscounted_return()}', 'g', bright=True)
done, _, _ = after_rollout_query(env_real, policy, ro)
| en | 0.923456 | Load and run a policy on the associated real-world Quanser environment. # Parse command line arguments # Get the experiment's directory to load from # Load the policy (trained in simulation) and the environment (for constructing the real-world counterpart) # Detect the correct real-world counterpart and create it # Finally wrap the env in the same as done during training # Run on device | 2.160259 | 2 |
project/lib/models/settings.py | efulet/python-project | 0 | 6619393 | <reponame>efulet/python-project
"""
@created_at 2015-05-11
@author <NAME> <<EMAIL>>
"""
DATABASE = {
'drivername': 'postgres',
'host': 'localhost',
'port': '5432',
'username': 'projectuser',
'password': '<PASSWORD>',
'database': 'project'
}
| """
@created_at 2015-05-11
@author <NAME> <<EMAIL>>
"""
DATABASE = {
'drivername': 'postgres',
'host': 'localhost',
'port': '5432',
'username': 'projectuser',
'password': '<PASSWORD>',
'database': 'project'
} | en | 0.164241 | @created_at 2015-05-11 @author <NAME> <<EMAIL>> | 1.353168 | 1 |
stv/models/synchronous/simple_voting_2_model.py | wp777/stv-compute | 2 | 6619394 | <gh_stars>1-10
from stv.models.model_generator import ModelGenerator
from stv.tools.list_tools import ListTools
from typing import List
import itertools
class SimpleVoting2Model(ModelGenerator):
def __init__(self, no_voters: int, no_candidates: int):
super().__init__(agents_count=no_voters + no_candidates)
self._no_voters = no_voters
self._no_candidates = no_candidates
def generate(self):
self._generate_initial_states()
self._generate_model()
def _generate_initial_states(self):
first_state = {'vote': [-1 for _ in range(self._no_voters)],
'voter_action': ['' for _ in range(self._no_voters)],
'pun': [None for _ in range(self._no_voters)],
'finish': [False for _ in range(self._no_voters)],
'ea_action': ''}
self._add_state(first_state)
def _generate_model(self):
current_state_id = -1
for state in self.states:
current_state_id += 1
if self._is_final_state(state):
continue
if state['ea_action'] == '':
self._generate_ea_action(state, current_state_id)
continue
actions_product_array = [self._get_coercer_possible_actions(state)]
for voter_id in range(0, self._no_voters):
actions_product_array.append(self._get_voter_possible_actions(state, voter_id))
for possibility in itertools.product(*actions_product_array):
all_wait = True
for act in possibility:
if act != 'Wait':
all_wait = False
break
if all_wait:
continue
coercer_action = possibility[0]
voter_action = possibility[1:]
new_state = {'vote': state['vote'][:],
'voter_action': state['voter_action'][:],
'pun': state['pun'][:],
'finish': state['finish'][:],
'ea_action': state['ea_action']}
actions = ['Wait' for _ in range(self._no_voters + 2)]
if coercer_action != 'Wait':
voter_id = coercer_action[1]
if coercer_action[0] == 'pun':
if state['ea_action'] == 'high' and state['voter_action'][voter_id] == 'ng':
new_state['pun'][voter_id] = False
else:
new_state['pun'][voter_id] = True
actions[1] = f'pun{voter_id}'
new_state['finish'][voter_id] = True
else:
new_state['pun'][voter_id] = False
actions[1] = f'np{voter_id}'
new_state['finish'][voter_id] = True
for voter_id in range(0, self._no_voters):
if voter_action[voter_id] == 'Wait':
continue
if voter_action[voter_id] == 'give':
new_state['voter_action'][voter_id] = 'give'
actions[voter_id + 2] = 'give'
elif voter_action[voter_id] == 'ng':
new_state['voter_action'][voter_id] = 'ng'
actions[voter_id + 2] = 'ng'
else:
candidate_id = voter_action[voter_id][1]
new_state['vote'][voter_id] = candidate_id
actions[voter_id + 2] = f'Vote{candidate_id}'
new_state_id = self._add_state(new_state)
self.model.add_transition(current_state_id, new_state_id, actions)
def _get_coercer_possible_actions(self, state):
coercer_actions = []
for voter_id in range(0, self._no_voters):
if state['pun'][voter_id] is None and state['voter_action'][voter_id] != '':
coercer_actions.append(('pun', voter_id))
coercer_actions.append(('np', voter_id))
if len(coercer_actions) == 0:
return ['Wait']
return coercer_actions
def _get_voter_possible_actions(self, state, voter_id):
voter_actions = ['Wait']
if state['vote'][voter_id] == -1:
for candidate_id in range(0, self._no_candidates):
voter_actions.append(('vote', candidate_id))
elif state['voter_action'][voter_id] == '':
voter_actions.append('give')
voter_actions.append('ng')
return voter_actions
def _generate_ea_action(self, state, current_state_id):
for level in ['low', 'high']:
new_state = {'vote': state['vote'][:],
'voter_action': state['voter_action'][:],
'pun': state['pun'][:],
'finish': state['finish'][:],
'ea_action': f'{level}'}
new_state_id = self._add_state(new_state)
actions = ['Wait' for _ in range(self._no_voters + 2)]
actions[0] = f'{level} protection'
self.model.add_transition(current_state_id, new_state_id, actions)
def _is_final_state(self, state):
for val in state['finish']:
if not val:
return False
return True
def _get_epistemic_state(self, state: hash, agent_id: int):
return state
def get_actions(self) -> list:
actions = [['low protection', 'high protection', 'Wait'], ['Wait']]
for voter_id in range(0, self._no_voters):
actions[-1].append(f'pun{voter_id}')
actions[-1].append(f'np{voter_id}')
for voter_id in range(0, self._no_voters):
actions.append(['Wait', 'give', 'ng'])
for candidate_id in range(0, self._no_candidates):
actions[-1].append(f'Vote{candidate_id}')
return actions
def _get_props_for_state(self, state: hash) -> List[str]:
pass
def get_props_list(self) -> List[str]:
pass
def get_winning_states(self, prop: str) -> List[int]:
pass
if __name__ == "__main__":
model = SimpleVoting2Model(no_voters=2, no_candidates=2)
model.generate()
| from stv.models.model_generator import ModelGenerator
from stv.tools.list_tools import ListTools
from typing import List
import itertools
class SimpleVoting2Model(ModelGenerator):
def __init__(self, no_voters: int, no_candidates: int):
super().__init__(agents_count=no_voters + no_candidates)
self._no_voters = no_voters
self._no_candidates = no_candidates
def generate(self):
self._generate_initial_states()
self._generate_model()
def _generate_initial_states(self):
first_state = {'vote': [-1 for _ in range(self._no_voters)],
'voter_action': ['' for _ in range(self._no_voters)],
'pun': [None for _ in range(self._no_voters)],
'finish': [False for _ in range(self._no_voters)],
'ea_action': ''}
self._add_state(first_state)
def _generate_model(self):
current_state_id = -1
for state in self.states:
current_state_id += 1
if self._is_final_state(state):
continue
if state['ea_action'] == '':
self._generate_ea_action(state, current_state_id)
continue
actions_product_array = [self._get_coercer_possible_actions(state)]
for voter_id in range(0, self._no_voters):
actions_product_array.append(self._get_voter_possible_actions(state, voter_id))
for possibility in itertools.product(*actions_product_array):
all_wait = True
for act in possibility:
if act != 'Wait':
all_wait = False
break
if all_wait:
continue
coercer_action = possibility[0]
voter_action = possibility[1:]
new_state = {'vote': state['vote'][:],
'voter_action': state['voter_action'][:],
'pun': state['pun'][:],
'finish': state['finish'][:],
'ea_action': state['ea_action']}
actions = ['Wait' for _ in range(self._no_voters + 2)]
if coercer_action != 'Wait':
voter_id = coercer_action[1]
if coercer_action[0] == 'pun':
if state['ea_action'] == 'high' and state['voter_action'][voter_id] == 'ng':
new_state['pun'][voter_id] = False
else:
new_state['pun'][voter_id] = True
actions[1] = f'pun{voter_id}'
new_state['finish'][voter_id] = True
else:
new_state['pun'][voter_id] = False
actions[1] = f'np{voter_id}'
new_state['finish'][voter_id] = True
for voter_id in range(0, self._no_voters):
if voter_action[voter_id] == 'Wait':
continue
if voter_action[voter_id] == 'give':
new_state['voter_action'][voter_id] = 'give'
actions[voter_id + 2] = 'give'
elif voter_action[voter_id] == 'ng':
new_state['voter_action'][voter_id] = 'ng'
actions[voter_id + 2] = 'ng'
else:
candidate_id = voter_action[voter_id][1]
new_state['vote'][voter_id] = candidate_id
actions[voter_id + 2] = f'Vote{candidate_id}'
new_state_id = self._add_state(new_state)
self.model.add_transition(current_state_id, new_state_id, actions)
def _get_coercer_possible_actions(self, state):
coercer_actions = []
for voter_id in range(0, self._no_voters):
if state['pun'][voter_id] is None and state['voter_action'][voter_id] != '':
coercer_actions.append(('pun', voter_id))
coercer_actions.append(('np', voter_id))
if len(coercer_actions) == 0:
return ['Wait']
return coercer_actions
def _get_voter_possible_actions(self, state, voter_id):
voter_actions = ['Wait']
if state['vote'][voter_id] == -1:
for candidate_id in range(0, self._no_candidates):
voter_actions.append(('vote', candidate_id))
elif state['voter_action'][voter_id] == '':
voter_actions.append('give')
voter_actions.append('ng')
return voter_actions
def _generate_ea_action(self, state, current_state_id):
for level in ['low', 'high']:
new_state = {'vote': state['vote'][:],
'voter_action': state['voter_action'][:],
'pun': state['pun'][:],
'finish': state['finish'][:],
'ea_action': f'{level}'}
new_state_id = self._add_state(new_state)
actions = ['Wait' for _ in range(self._no_voters + 2)]
actions[0] = f'{level} protection'
self.model.add_transition(current_state_id, new_state_id, actions)
def _is_final_state(self, state):
for val in state['finish']:
if not val:
return False
return True
def _get_epistemic_state(self, state: hash, agent_id: int):
return state
def get_actions(self) -> list:
actions = [['low protection', 'high protection', 'Wait'], ['Wait']]
for voter_id in range(0, self._no_voters):
actions[-1].append(f'pun{voter_id}')
actions[-1].append(f'np{voter_id}')
for voter_id in range(0, self._no_voters):
actions.append(['Wait', 'give', 'ng'])
for candidate_id in range(0, self._no_candidates):
actions[-1].append(f'Vote{candidate_id}')
return actions
def _get_props_for_state(self, state: hash) -> List[str]:
pass
def get_props_list(self) -> List[str]:
pass
def get_winning_states(self, prop: str) -> List[int]:
pass
if __name__ == "__main__":
model = SimpleVoting2Model(no_voters=2, no_candidates=2)
model.generate() | none | 1 | 2.691308 | 3 | |
notebooks/Ch04_Feature_Engineering_and_Selection/feature_engineering_text.py | baoqt2/practical-machine-learning-with-python | 1,989 | 6619395 | <reponame>baoqt2/practical-machine-learning-with-python
# coding: utf-8
"""
Created on Mon May 17 00:00:00 2017
@author: DIP
"""
# # Import necessary dependencies and settings
# In[1]:
import pandas as pd
import numpy as np
import re
import nltk
# # Sample corpus of text documents
# In[2]:
corpus = ['The sky is blue and beautiful.',
'Love this blue and beautiful sky!',
'The quick brown fox jumps over the lazy dog.',
'The brown fox is quick and the blue dog is lazy!',
'The sky is very blue and the sky is very beautiful today',
'The dog is lazy but the brown fox is quick!'
]
labels = ['weather', 'weather', 'animals', 'animals', 'weather', 'animals']
corpus = np.array(corpus)
corpus_df = pd.DataFrame({'Document': corpus,
'Category': labels})
corpus_df = corpus_df[['Document', 'Category']]
corpus_df
# # Simple text pre-processing
# In[3]:
wpt = nltk.WordPunctTokenizer()
stop_words = nltk.corpus.stopwords.words('english')
def normalize_document(doc):
# lower case and remove special characters\whitespaces
doc = re.sub(r'[^a-zA-Z0-9\s]', '', doc, re.I)
doc = doc.lower()
doc = doc.strip()
# tokenize document
tokens = wpt.tokenize(doc)
# filter stopwords out of document
filtered_tokens = [token for token in tokens if token not in stop_words]
# re-create document from filtered tokens
doc = ' '.join(filtered_tokens)
return doc
normalize_corpus = np.vectorize(normalize_document)
# In[4]:
norm_corpus = normalize_corpus(corpus)
norm_corpus
# # Bag of Words Model
# In[5]:
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(min_df=0., max_df=1.)
cv_matrix = cv.fit_transform(norm_corpus)
cv_matrix = cv_matrix.toarray()
cv_matrix
# In[6]:
vocab = cv.get_feature_names()
pd.DataFrame(cv_matrix, columns=vocab)
# # Bag of N-Grams Model
# In[7]:
bv = CountVectorizer(ngram_range=(2,2))
bv_matrix = bv.fit_transform(norm_corpus)
bv_matrix = bv_matrix.toarray()
vocab = bv.get_feature_names()
pd.DataFrame(bv_matrix, columns=vocab)
# # TF-IDF Model
# In[8]:
from sklearn.feature_extraction.text import TfidfVectorizer
tv = TfidfVectorizer(min_df=0., max_df=1., use_idf=True)
tv_matrix = tv.fit_transform(norm_corpus)
tv_matrix = tv_matrix.toarray()
vocab = tv.get_feature_names()
pd.DataFrame(np.round(tv_matrix, 2), columns=vocab)
# # Document Similarity
# In[9]:
from sklearn.metrics.pairwise import cosine_similarity
similarity_matrix = cosine_similarity(tv_matrix)
similarity_df = pd.DataFrame(similarity_matrix)
similarity_df
# ## Clustering documents using similarity features
# In[10]:
from sklearn.cluster import KMeans
km = KMeans(n_clusters=2)
km.fit_transform(similarity_df)
cluster_labels = km.labels_
cluster_labels = pd.DataFrame(cluster_labels, columns=['ClusterLabel'])
pd.concat([corpus_df, cluster_labels], axis=1)
# # Topic models
# In[11]:
from sklearn.decomposition import LatentDirichletAllocation
lda = LatentDirichletAllocation(n_topics=2, max_iter=100, random_state=42)
dt_matrix = lda.fit_transform(tv_matrix)
features = pd.DataFrame(dt_matrix, columns=['T1', 'T2'])
features
# ## Show topics and their weights
# In[12]:
tt_matrix = lda.components_
for topic_weights in tt_matrix:
topic = [(token, weight) for token, weight in zip(vocab, topic_weights)]
topic = sorted(topic, key=lambda x: -x[1])
topic = [item for item in topic if item[1] > 0.6]
print(topic)
print()
# ## Clustering documents using topic model features
# In[13]:
km = KMeans(n_clusters=2)
km.fit_transform(features)
cluster_labels = km.labels_
cluster_labels = pd.DataFrame(cluster_labels, columns=['ClusterLabel'])
pd.concat([corpus_df, cluster_labels], axis=1)
# # Word Embeddings
# In[14]:
from gensim.models import word2vec
wpt = nltk.WordPunctTokenizer()
tokenized_corpus = [wpt.tokenize(document) for document in norm_corpus]
# Set values for various parameters
feature_size = 10 # Word vector dimensionality
window_context = 10 # Context window size
min_word_count = 1 # Minimum word count
sample = 1e-3 # Downsample setting for frequent words
w2v_model = word2vec.Word2Vec(tokenized_corpus, size=feature_size,
window=window_context, min_count = min_word_count,
sample=sample)
# In[15]:
w2v_model.wv['sky']
# In[16]:
def average_word_vectors(words, model, vocabulary, num_features):
feature_vector = np.zeros((num_features,),dtype="float64")
nwords = 0.
for word in words:
if word in vocabulary:
nwords = nwords + 1.
feature_vector = np.add(feature_vector, model[word])
if nwords:
feature_vector = np.divide(feature_vector, nwords)
return feature_vector
def averaged_word_vectorizer(corpus, model, num_features):
vocabulary = set(model.wv.index2word)
features = [average_word_vectors(tokenized_sentence, model, vocabulary, num_features)
for tokenized_sentence in corpus]
return np.array(features)
# In[17]:
w2v_feature_array = averaged_word_vectorizer(corpus=tokenized_corpus, model=w2v_model,
num_features=feature_size)
pd.DataFrame(w2v_feature_array)
# In[18]:
from sklearn.cluster import AffinityPropagation
ap = AffinityPropagation()
ap.fit(w2v_feature_array)
cluster_labels = ap.labels_
cluster_labels = pd.DataFrame(cluster_labels, columns=['ClusterLabel'])
pd.concat([corpus_df, cluster_labels], axis=1)
| # coding: utf-8
"""
Created on Mon May 17 00:00:00 2017
@author: DIP
"""
# # Import necessary dependencies and settings
# In[1]:
import pandas as pd
import numpy as np
import re
import nltk
# # Sample corpus of text documents
# In[2]:
corpus = ['The sky is blue and beautiful.',
'Love this blue and beautiful sky!',
'The quick brown fox jumps over the lazy dog.',
'The brown fox is quick and the blue dog is lazy!',
'The sky is very blue and the sky is very beautiful today',
'The dog is lazy but the brown fox is quick!'
]
labels = ['weather', 'weather', 'animals', 'animals', 'weather', 'animals']
corpus = np.array(corpus)
corpus_df = pd.DataFrame({'Document': corpus,
'Category': labels})
corpus_df = corpus_df[['Document', 'Category']]
corpus_df
# # Simple text pre-processing
# In[3]:
wpt = nltk.WordPunctTokenizer()
stop_words = nltk.corpus.stopwords.words('english')
def normalize_document(doc):
# lower case and remove special characters\whitespaces
doc = re.sub(r'[^a-zA-Z0-9\s]', '', doc, re.I)
doc = doc.lower()
doc = doc.strip()
# tokenize document
tokens = wpt.tokenize(doc)
# filter stopwords out of document
filtered_tokens = [token for token in tokens if token not in stop_words]
# re-create document from filtered tokens
doc = ' '.join(filtered_tokens)
return doc
normalize_corpus = np.vectorize(normalize_document)
# In[4]:
norm_corpus = normalize_corpus(corpus)
norm_corpus
# # Bag of Words Model
# In[5]:
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(min_df=0., max_df=1.)
cv_matrix = cv.fit_transform(norm_corpus)
cv_matrix = cv_matrix.toarray()
cv_matrix
# In[6]:
vocab = cv.get_feature_names()
pd.DataFrame(cv_matrix, columns=vocab)
# # Bag of N-Grams Model
# In[7]:
bv = CountVectorizer(ngram_range=(2,2))
bv_matrix = bv.fit_transform(norm_corpus)
bv_matrix = bv_matrix.toarray()
vocab = bv.get_feature_names()
pd.DataFrame(bv_matrix, columns=vocab)
# # TF-IDF Model
# In[8]:
from sklearn.feature_extraction.text import TfidfVectorizer
tv = TfidfVectorizer(min_df=0., max_df=1., use_idf=True)
tv_matrix = tv.fit_transform(norm_corpus)
tv_matrix = tv_matrix.toarray()
vocab = tv.get_feature_names()
pd.DataFrame(np.round(tv_matrix, 2), columns=vocab)
# # Document Similarity
# In[9]:
from sklearn.metrics.pairwise import cosine_similarity
similarity_matrix = cosine_similarity(tv_matrix)
similarity_df = pd.DataFrame(similarity_matrix)
similarity_df
# ## Clustering documents using similarity features
# In[10]:
from sklearn.cluster import KMeans
km = KMeans(n_clusters=2)
km.fit_transform(similarity_df)
cluster_labels = km.labels_
cluster_labels = pd.DataFrame(cluster_labels, columns=['ClusterLabel'])
pd.concat([corpus_df, cluster_labels], axis=1)
# # Topic models
# In[11]:
from sklearn.decomposition import LatentDirichletAllocation
lda = LatentDirichletAllocation(n_topics=2, max_iter=100, random_state=42)
dt_matrix = lda.fit_transform(tv_matrix)
features = pd.DataFrame(dt_matrix, columns=['T1', 'T2'])
features
# ## Show topics and their weights
# In[12]:
tt_matrix = lda.components_
for topic_weights in tt_matrix:
topic = [(token, weight) for token, weight in zip(vocab, topic_weights)]
topic = sorted(topic, key=lambda x: -x[1])
topic = [item for item in topic if item[1] > 0.6]
print(topic)
print()
# ## Clustering documents using topic model features
# In[13]:
km = KMeans(n_clusters=2)
km.fit_transform(features)
cluster_labels = km.labels_
cluster_labels = pd.DataFrame(cluster_labels, columns=['ClusterLabel'])
pd.concat([corpus_df, cluster_labels], axis=1)
# # Word Embeddings
# In[14]:
from gensim.models import word2vec
wpt = nltk.WordPunctTokenizer()
tokenized_corpus = [wpt.tokenize(document) for document in norm_corpus]
# Set values for various parameters
feature_size = 10 # Word vector dimensionality
window_context = 10 # Context window size
min_word_count = 1 # Minimum word count
sample = 1e-3 # Downsample setting for frequent words
w2v_model = word2vec.Word2Vec(tokenized_corpus, size=feature_size,
window=window_context, min_count = min_word_count,
sample=sample)
# In[15]:
w2v_model.wv['sky']
# In[16]:
def average_word_vectors(words, model, vocabulary, num_features):
feature_vector = np.zeros((num_features,),dtype="float64")
nwords = 0.
for word in words:
if word in vocabulary:
nwords = nwords + 1.
feature_vector = np.add(feature_vector, model[word])
if nwords:
feature_vector = np.divide(feature_vector, nwords)
return feature_vector
def averaged_word_vectorizer(corpus, model, num_features):
vocabulary = set(model.wv.index2word)
features = [average_word_vectors(tokenized_sentence, model, vocabulary, num_features)
for tokenized_sentence in corpus]
return np.array(features)
# In[17]:
w2v_feature_array = averaged_word_vectorizer(corpus=tokenized_corpus, model=w2v_model,
num_features=feature_size)
pd.DataFrame(w2v_feature_array)
# In[18]:
from sklearn.cluster import AffinityPropagation
ap = AffinityPropagation()
ap.fit(w2v_feature_array)
cluster_labels = ap.labels_
cluster_labels = pd.DataFrame(cluster_labels, columns=['ClusterLabel'])
pd.concat([corpus_df, cluster_labels], axis=1) | en | 0.532185 | # coding: utf-8 Created on Mon May 17 00:00:00 2017 @author: DIP # # Import necessary dependencies and settings # In[1]: # # Sample corpus of text documents # In[2]: # # Simple text pre-processing # In[3]: # lower case and remove special characters\whitespaces # tokenize document # filter stopwords out of document # re-create document from filtered tokens # In[4]: # # Bag of Words Model # In[5]: # In[6]: # # Bag of N-Grams Model # In[7]: # # TF-IDF Model # In[8]: # # Document Similarity # In[9]: # ## Clustering documents using similarity features # In[10]: # # Topic models # In[11]: # ## Show topics and their weights # In[12]: # ## Clustering documents using topic model features # In[13]: # # Word Embeddings # In[14]: # Set values for various parameters # Word vector dimensionality # Context window size # Minimum word count # Downsample setting for frequent words # In[15]: # In[16]: # In[17]: # In[18]: | 3.057786 | 3 |
mall/apps/oauth/WeiBotool.py | codedaliu/02meiduo | 0 | 6619396 | <filename>mall/apps/oauth/WeiBotool.py
# from urllib.parse import parse_qs
# import requests
#
#
# class OAuthWeiBo(object):
#
# def get_access_token(self,code):
# access_token_url = "https://api.weibo.com/oauth2/access_token"
# #组织数据
# re_dict = requests.post(access_token_url, data={
# "client_id": 3305669385,
# "client_secret": "<KEY>",
# "grant_type": "authorization_code",
# "code": code,
# "redirect_uri": "http://www.meiduo.site:8080/sina_callback.html",
# })
# try:
# # 提取数据
# datas = re_dict.text
#
# # data获取到的信息未一个字典'{"access_token":"2.<PASSWORD>",
# # "remind_in":"15799","expires_in":15799,"uid":"5675652",
# # "isRealName":"true"}'
#
# # 转化为字典
# data = eval(datas)
# except :
# raise Exception('微博登录错误')
# # 提取access_token
# access_token = data.get('access_token', None)
# print(data)
# if not access_token :
# raise Exception('获取失败')
# print(re_dict)
# return access_token[0]
#
# def get_token_info(self,access_token):
# user_url = 'https://api.weibo.com/oauth2/get_token_info'
# # user_url = "https://api.weibo.com/2/users/show.json"
# uid = self.get_access_token().data['uid']
# get_url = user_url + "?access_token={at}&uid={uid}".format(at=access_token, uid=uid)
# print(get_url)
#
| <filename>mall/apps/oauth/WeiBotool.py
# from urllib.parse import parse_qs
# import requests
#
#
# class OAuthWeiBo(object):
#
# def get_access_token(self,code):
# access_token_url = "https://api.weibo.com/oauth2/access_token"
# #组织数据
# re_dict = requests.post(access_token_url, data={
# "client_id": 3305669385,
# "client_secret": "<KEY>",
# "grant_type": "authorization_code",
# "code": code,
# "redirect_uri": "http://www.meiduo.site:8080/sina_callback.html",
# })
# try:
# # 提取数据
# datas = re_dict.text
#
# # data获取到的信息未一个字典'{"access_token":"2.<PASSWORD>",
# # "remind_in":"15799","expires_in":15799,"uid":"5675652",
# # "isRealName":"true"}'
#
# # 转化为字典
# data = eval(datas)
# except :
# raise Exception('微博登录错误')
# # 提取access_token
# access_token = data.get('access_token', None)
# print(data)
# if not access_token :
# raise Exception('获取失败')
# print(re_dict)
# return access_token[0]
#
# def get_token_info(self,access_token):
# user_url = 'https://api.weibo.com/oauth2/get_token_info'
# # user_url = "https://api.weibo.com/2/users/show.json"
# uid = self.get_access_token().data['uid']
# get_url = user_url + "?access_token={at}&uid={uid}".format(at=access_token, uid=uid)
# print(get_url)
#
| en | 0.341007 | # from urllib.parse import parse_qs # import requests # # # class OAuthWeiBo(object): # # def get_access_token(self,code): # access_token_url = "https://api.weibo.com/oauth2/access_token" # #组织数据 # re_dict = requests.post(access_token_url, data={ # "client_id": 3305669385, # "client_secret": "<KEY>", # "grant_type": "authorization_code", # "code": code, # "redirect_uri": "http://www.meiduo.site:8080/sina_callback.html", # }) # try: # # 提取数据 # datas = re_dict.text # # # data获取到的信息未一个字典'{"access_token":"2.<PASSWORD>", # # "remind_in":"15799","expires_in":15799,"uid":"5675652", # # "isRealName":"true"}' # # # 转化为字典 # data = eval(datas) # except : # raise Exception('微博登录错误') # # 提取access_token # access_token = data.get('access_token', None) # print(data) # if not access_token : # raise Exception('获取失败') # print(re_dict) # return access_token[0] # # def get_token_info(self,access_token): # user_url = 'https://api.weibo.com/oauth2/get_token_info' # # user_url = "https://api.weibo.com/2/users/show.json" # uid = self.get_access_token().data['uid'] # get_url = user_url + "?access_token={at}&uid={uid}".format(at=access_token, uid=uid) # print(get_url) # | 3.063326 | 3 |
allocation/protocols/int_clause_converter.py | gabrielpereiram10/allocation | 0 | 6619397 | <filename>allocation/protocols/int_clause_converter.py
from typing import Protocol, Set
from abc import abstractmethod
from allocation.protocols.types import ClausesOfFormulas, ClausesOfIntegers
class IntClausesConverter(Protocol):
@abstractmethod
def to_clauses_of_int(self, clauses: ClausesOfFormulas) -> ClausesOfIntegers:
raise NotImplemented
| <filename>allocation/protocols/int_clause_converter.py
from typing import Protocol, Set
from abc import abstractmethod
from allocation.protocols.types import ClausesOfFormulas, ClausesOfIntegers
class IntClausesConverter(Protocol):
@abstractmethod
def to_clauses_of_int(self, clauses: ClausesOfFormulas) -> ClausesOfIntegers:
raise NotImplemented
| none | 1 | 2.667919 | 3 | |
nnk_benchmark.py | shekkizh/VISSL_NNK_Benchmark | 0 | 6619398 | <reponame>shekkizh/VISSL_NNK_Benchmark
__author__ = "shekkizh"
"""Modified code for feature extraction using VISSL tutorial and tools codes"""
import argparse
import os
import numpy as np
import torch, faiss
from typing import Any, List
from vissl.config import AttrDict
from vissl.utils.hydra_config import convert_to_attrdict, is_hydra_available
from hydra.experimental import compose, initialize_config_module
from vissl.utils.distributed_launcher import launch_distributed
from vissl.hooks import default_hook_generator
from vissl.models.model_helpers import get_trunk_output_feature_names
from vissl.utils.misc import merge_features
from vissl.utils.checkpoint import get_checkpoint_folder
from vissl.data.dataset_catalog import VisslDatasetCatalog
from utils.non_neg_qpsolver import non_negative_qpsolver
parser = argparse.ArgumentParser(description='VISSL extract features')
parser.add_argument('--model_url',
default='https://dl.fbaipublicfiles.com/vissl/model_zoo/deepclusterv2_800ep_pretrain.pth.tar',
help='Model to download - https://github.com/facebookresearch/vissl/blob/master/MODEL_ZOO.md')
parser.add_argument('--logs_dir', default='/scratch/shekkizh/logs/VISSL')
parser.add_argument("--config", default="imagenet1k_resnet50_trunk_features.yaml",
help="config file to extract features")
parser.add_argument('--top_k', default=50, help="initial no. of neighbors")
parser.add_argument('--extract_features', dest='extract_features', action='store_true')
parser.add_argument('--noextract_features', dest='extract_features', action='store_false')
parser.set_defaults(extract_features=False)
def to_categorical(y, num_classes=None, dtype='float32'):
"""
Code taken from keras to categorical
"""
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), dtype=dtype)
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
@torch.no_grad()
def nnk_classifier(features, labels, queries, targets, topk, num_classes=1000):
dim = features.shape[1]
target_one_hot = to_categorical(labels, num_classes)
normalized_features = features / np.linalg.norm(features, axis=1, keepdims=True)
index = faiss.IndexFlatIP(dim)
index = faiss.index_cpu_to_all_gpus(index)
index.add(normalized_features)
normalized_queries = queries / np.linalg.norm(queries, axis=1, keepdims=True)
n_queries = queries.shape[0]
soft_prediction = np.zeros(shape=(n_queries, num_classes), dtype=np.float)
distances, indices = index.search(normalized_queries, topk)
for ii, x_test in enumerate(normalized_queries):
neighbor_indices = indices[ii, :]
neighbor_labels = target_one_hot[neighbor_indices, :]
x_support = normalized_features[neighbor_indices]
g_i = 0.5 + np.dot(x_support, x_test) / 2
G_i = 0.5 + np.dot(x_support, x_support.T) / 2
x_opt = non_negative_qpsolver(G_i, g_i, g_i, x_tol=1e-10)
# x_opt = g_i
non_zero_indices = np.nonzero(x_opt)
x_opt = x_opt[non_zero_indices] / np.sum(x_opt[non_zero_indices])
soft_prediction[ii, :] = np.dot(x_opt, neighbor_labels[non_zero_indices])
if ii % 10000 == 0:
print(f"{ii}/{n_queries} processed...")
probs = torch.from_numpy(soft_prediction).cuda()
targets = torch.from_numpy(targets).cuda()
_, predictions = probs.sort(1, True)
correct = predictions.eq(targets.data.view(-1, 1))
top1 = correct.narrow(1, 0, 1).sum().item() * 100.0 / n_queries
top5 = correct.narrow(1, 0, 5).sum().item() * 100.0 / n_queries
return top1, top5
def benchmark_layer(cfg: AttrDict, layer_name: str = "heads"):
num_neighbors = cfg.NEAREST_NEIGHBOR.TOPK
output_dir = get_checkpoint_folder(cfg)
train_out = merge_features(output_dir, "train", layer_name, cfg)
train_features, train_labels = train_out["features"], train_out["targets"]
num_classes = np.max(train_labels) + 1
test_out = merge_features(output_dir, "test", layer_name, cfg)
test_features, test_labels = test_out["features"], test_out["targets"]
top1, top5 = nnk_classifier(train_features, train_labels, test_features, test_labels, num_neighbors, num_classes)
return top1, top5
def hydra_main(overrides: List[Any], extract_features=False):
print(f"####### overrides: {overrides}")
with initialize_config_module(config_module="vissl.config"):
cfg = compose("defaults", overrides=overrides)
args, config = convert_to_attrdict(cfg)
if extract_features:
launch_distributed(
cfg=config,
node_id=args.node_id,
engine_name=args.engine_name,
hook_generator=default_hook_generator,
)
feat_names = get_trunk_output_feature_names(config.MODEL)
if len(feat_names) == 0:
feat_names = ["heads"]
for layer in feat_names:
top1, top5 = benchmark_layer(config, layer_name=layer)
print(f"NNK classifier - Layer: {layer}, Top1: {top1}, Top5: {top5}")
if __name__ == "__main__":
args = parser.parse_args()
print("Retrieving model weights from VISSL MODEL ZOO")
basename = os.path.basename(args.model_url)
weights_file = os.path.join('/scratch/shekkizh/torch_hub/checkpoints/', basename)
if not os.path.exists(weights_file):
os.system(f"wget -O {weights_file} -L {args.model_url}")
logs_dir = os.path.join(args.logs_dir, basename.split('.')[0])
# print imagenet path
print(VisslDatasetCatalog.get("imagenet1k_folder"))
overrides = [f"config={args.config}", f"config.CHECKPOINT.DIR={logs_dir}",
f"config.MODEL.WEIGHTS_INIT.PARAMS_FILE={weights_file}", f"config.NEAREST_NEIGHBOR.TOPK={args.top_k}"]
assert is_hydra_available(), "Make sure to install hydra"
overrides.append("hydra.verbose=true")
hydra_main(overrides=overrides, extract_features=args.extract_features)
| __author__ = "shekkizh"
"""Modified code for feature extraction using VISSL tutorial and tools codes"""
import argparse
import os
import numpy as np
import torch, faiss
from typing import Any, List
from vissl.config import AttrDict
from vissl.utils.hydra_config import convert_to_attrdict, is_hydra_available
from hydra.experimental import compose, initialize_config_module
from vissl.utils.distributed_launcher import launch_distributed
from vissl.hooks import default_hook_generator
from vissl.models.model_helpers import get_trunk_output_feature_names
from vissl.utils.misc import merge_features
from vissl.utils.checkpoint import get_checkpoint_folder
from vissl.data.dataset_catalog import VisslDatasetCatalog
from utils.non_neg_qpsolver import non_negative_qpsolver
parser = argparse.ArgumentParser(description='VISSL extract features')
parser.add_argument('--model_url',
default='https://dl.fbaipublicfiles.com/vissl/model_zoo/deepclusterv2_800ep_pretrain.pth.tar',
help='Model to download - https://github.com/facebookresearch/vissl/blob/master/MODEL_ZOO.md')
parser.add_argument('--logs_dir', default='/scratch/shekkizh/logs/VISSL')
parser.add_argument("--config", default="imagenet1k_resnet50_trunk_features.yaml",
help="config file to extract features")
parser.add_argument('--top_k', default=50, help="initial no. of neighbors")
parser.add_argument('--extract_features', dest='extract_features', action='store_true')
parser.add_argument('--noextract_features', dest='extract_features', action='store_false')
parser.set_defaults(extract_features=False)
def to_categorical(y, num_classes=None, dtype='float32'):
"""
Code taken from keras to categorical
"""
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), dtype=dtype)
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
@torch.no_grad()
def nnk_classifier(features, labels, queries, targets, topk, num_classes=1000):
dim = features.shape[1]
target_one_hot = to_categorical(labels, num_classes)
normalized_features = features / np.linalg.norm(features, axis=1, keepdims=True)
index = faiss.IndexFlatIP(dim)
index = faiss.index_cpu_to_all_gpus(index)
index.add(normalized_features)
normalized_queries = queries / np.linalg.norm(queries, axis=1, keepdims=True)
n_queries = queries.shape[0]
soft_prediction = np.zeros(shape=(n_queries, num_classes), dtype=np.float)
distances, indices = index.search(normalized_queries, topk)
for ii, x_test in enumerate(normalized_queries):
neighbor_indices = indices[ii, :]
neighbor_labels = target_one_hot[neighbor_indices, :]
x_support = normalized_features[neighbor_indices]
g_i = 0.5 + np.dot(x_support, x_test) / 2
G_i = 0.5 + np.dot(x_support, x_support.T) / 2
x_opt = non_negative_qpsolver(G_i, g_i, g_i, x_tol=1e-10)
# x_opt = g_i
non_zero_indices = np.nonzero(x_opt)
x_opt = x_opt[non_zero_indices] / np.sum(x_opt[non_zero_indices])
soft_prediction[ii, :] = np.dot(x_opt, neighbor_labels[non_zero_indices])
if ii % 10000 == 0:
print(f"{ii}/{n_queries} processed...")
probs = torch.from_numpy(soft_prediction).cuda()
targets = torch.from_numpy(targets).cuda()
_, predictions = probs.sort(1, True)
correct = predictions.eq(targets.data.view(-1, 1))
top1 = correct.narrow(1, 0, 1).sum().item() * 100.0 / n_queries
top5 = correct.narrow(1, 0, 5).sum().item() * 100.0 / n_queries
return top1, top5
def benchmark_layer(cfg: AttrDict, layer_name: str = "heads"):
num_neighbors = cfg.NEAREST_NEIGHBOR.TOPK
output_dir = get_checkpoint_folder(cfg)
train_out = merge_features(output_dir, "train", layer_name, cfg)
train_features, train_labels = train_out["features"], train_out["targets"]
num_classes = np.max(train_labels) + 1
test_out = merge_features(output_dir, "test", layer_name, cfg)
test_features, test_labels = test_out["features"], test_out["targets"]
top1, top5 = nnk_classifier(train_features, train_labels, test_features, test_labels, num_neighbors, num_classes)
return top1, top5
def hydra_main(overrides: List[Any], extract_features=False):
print(f"####### overrides: {overrides}")
with initialize_config_module(config_module="vissl.config"):
cfg = compose("defaults", overrides=overrides)
args, config = convert_to_attrdict(cfg)
if extract_features:
launch_distributed(
cfg=config,
node_id=args.node_id,
engine_name=args.engine_name,
hook_generator=default_hook_generator,
)
feat_names = get_trunk_output_feature_names(config.MODEL)
if len(feat_names) == 0:
feat_names = ["heads"]
for layer in feat_names:
top1, top5 = benchmark_layer(config, layer_name=layer)
print(f"NNK classifier - Layer: {layer}, Top1: {top1}, Top5: {top5}")
if __name__ == "__main__":
args = parser.parse_args()
print("Retrieving model weights from VISSL MODEL ZOO")
basename = os.path.basename(args.model_url)
weights_file = os.path.join('/scratch/shekkizh/torch_hub/checkpoints/', basename)
if not os.path.exists(weights_file):
os.system(f"wget -O {weights_file} -L {args.model_url}")
logs_dir = os.path.join(args.logs_dir, basename.split('.')[0])
# print imagenet path
print(VisslDatasetCatalog.get("imagenet1k_folder"))
overrides = [f"config={args.config}", f"config.CHECKPOINT.DIR={logs_dir}",
f"config.MODEL.WEIGHTS_INIT.PARAMS_FILE={weights_file}", f"config.NEAREST_NEIGHBOR.TOPK={args.top_k}"]
assert is_hydra_available(), "Make sure to install hydra"
overrides.append("hydra.verbose=true")
hydra_main(overrides=overrides, extract_features=args.extract_features) | en | 0.564328 | Modified code for feature extraction using VISSL tutorial and tools codes Code taken from keras to categorical # x_opt = g_i ###### overrides: {overrides}") # print imagenet path | 1.837947 | 2 |
examples/poisson-line-process/causal_test_poisson.py | CITCOM-project/CausalTestingFramework | 1 | 6619399 | <gh_stars>1-10
from causal_testing.specification.causal_dag import CausalDAG
from causal_testing.specification.scenario import Scenario
from causal_testing.specification.variable import Input, Output
from causal_testing.specification.causal_specification import CausalSpecification
from causal_testing.data_collection.data_collector import ObservationalDataCollector
from causal_testing.testing.causal_test_case import CausalTestCase
from causal_testing.testing.causal_test_outcome import ExactValue, Positive
from causal_testing.testing.causal_test_engine import CausalTestEngine
from causal_testing.testing.estimators import LinearRegressionEstimator, Estimator
import pandas as pd
class EmpiricalMeanEstimator(Estimator):
def add_modelling_assumptions(self):
"""
Add modelling assumptions to the estimator. This is a list of strings which list the modelling assumptions that
must hold if the resulting causal inference is to be considered valid.
"""
self.modelling_assumptions += (
"The data must contain runs with the exact configuration of interest."
)
def estimate_ate(self) -> float:
""" Estimate the outcomes under control and treatment.
:return: The empirical average treatment effect.
"""
control_results = self.df.where(
self.df[self.treatment[0]] == self.control_values
)[self.outcome].dropna()
treatment_results = self.df.where(
self.df[self.treatment[0]] == self.treatment_values
)[self.outcome].dropna()
return treatment_results.mean()[0] - control_results.mean()[0], None
def estimate_risk_ratio(self) -> float:
""" Estimate the outcomes under control and treatment.
:return: The empirical average treatment effect.
"""
control_results = self.df.where(
self.df[self.treatment[0]] == self.control_values
)[self.outcome].dropna()
treatment_results = self.df.where(
self.df[self.treatment[0]] == self.treatment_values
)[self.outcome].dropna()
return treatment_results.mean()[0] / control_results.mean()[0], None
# 1. Read in the Causal DAG
causal_dag = CausalDAG("./dag.dot")
# 2. Create variables
width = Input("width", float)
height = Input("height", float)
intensity = Input("intensity", float)
num_lines_abs = Output("num_lines_abs", float)
num_lines_unit = Output("num_lines_unit", float)
num_shapes_abs = Output("num_shapes_abs", float)
num_shapes_unit = Output("num_shapes_unit", float)
# 3. Create scenario by applying constraints over a subset of the input variables
scenario = Scenario(
variables={
width,
height,
intensity,
num_lines_abs,
num_lines_unit,
num_shapes_abs,
num_shapes_unit,
}
)
# 4. Construct a causal specification from the scenario and causal DAG
causal_specification = CausalSpecification(scenario, causal_dag)
def test_intensity_num_shapes(
observational_data_path,
causal_test_case,
square_terms=[],
inverse_terms=[],
empirical=False,
):
# 6. Create a data collector
data_collector = ObservationalDataCollector(scenario, observational_data_path)
# 7. Create an instance of the causal test engine
causal_test_engine = CausalTestEngine(
causal_test_case, causal_specification, data_collector
)
# 8. Obtain the minimal adjustment set for the causal test case from the causal DAG
causal_test_engine.load_data(index_col=0)
# 9. Set up an estimator
data = pd.read_csv(observational_data_path)
treatment = list(causal_test_case.control_input_configuration)[0].name
outcome = list(causal_test_case.outcome_variables)[0].name
estimator = None
if empirical:
estimator = EmpiricalMeanEstimator(
treatment=[treatment],
control_values=list(causal_test_case.control_input_configuration.values())[
0
],
treatment_values=list(
causal_test_case.treatment_input_configuration.values()
)[0],
adjustment_set=set(),
outcome=[outcome],
df=data,
effect_modifiers=causal_test_case.effect_modifier_configuration,
)
else:
estimator = LinearRegressionEstimator(
treatment=[treatment],
control_values=list(causal_test_case.control_input_configuration.values())[
0
],
treatment_values=list(
causal_test_case.treatment_input_configuration.values()
)[0],
adjustment_set=set(),
outcome=[outcome],
df=data,
intercept=0,
effect_modifiers=causal_test_case.effect_modifier_configuration,
)
for t in square_terms:
estimator.add_squared_term_to_df(t)
for t in inverse_terms:
estimator.add_inverse_term_to_df(t)
# 10. Execute the test
causal_test_result = causal_test_engine.execute_test(
estimator, causal_test_case.estimate_type
)
return causal_test_result
observational_data_path = "data/random/data_random_1000.csv"
intensity_num_shapes_results = []
for wh in range(1, 11):
smt_data_path = f"data/smt_100/data_smt_wh{wh}_100.csv"
for control_value, treatment_value in [(1, 2), (2, 4), (4, 8), (8, 16)]:
print("=" * 33, "CAUSAL TEST", "=" * 33)
print(f"WIDTH = HEIGHT = {wh}")
print("Identifying")
# 5. Create a causal test case
causal_test_case = CausalTestCase(
control_input_configuration={intensity: control_value},
treatment_input_configuration={intensity: treatment_value},
expected_causal_effect=ExactValue(4, tolerance=0.5),
outcome_variables={num_shapes_unit},
estimate_type="risk_ratio",
# effect_modifier_configuration={width: wh, height: wh}
)
obs_causal_test_result = test_intensity_num_shapes(
observational_data_path,
causal_test_case,
square_terms=["intensity"],
empirical=False,
)
print("Observational", end=" ")
print(obs_causal_test_result)
smt_causal_test_result = test_intensity_num_shapes(
smt_data_path, causal_test_case, square_terms=["intensity"], empirical=True
)
print("RCT", end=" ")
print(smt_causal_test_result)
results = {
"width": wh,
"height": wh,
"control": control_value,
"treatment": treatment_value,
"smt_risk_ratio": smt_causal_test_result.ate,
"obs_risk_ratio": obs_causal_test_result.ate,
}
intensity_num_shapes_results.append(results)
intensity_num_shapes_results = pd.DataFrame(intensity_num_shapes_results)
intensity_num_shapes_results.to_csv("intensity_num_shapes_results_random_1000.csv")
print(intensity_num_shapes_results)
width_num_shapes_results = []
for i in range(17):
for w in range(1, 10):
print("=" * 37, "CAUSAL TEST", "=" * 37)
print("Identifying")
# 5. Create a causal test case
control_value = w
treatment_value = w + 1
causal_test_case = CausalTestCase(
control_input_configuration={width: control_value},
treatment_input_configuration={width: treatment_value},
expected_causal_effect=Positive(),
outcome_variables={num_shapes_unit},
estimate_type="ate_calculated",
effect_modifier_configuration={intensity: i},
)
causal_test_result = test_intensity_num_shapes(
observational_data_path,
causal_test_case,
square_terms=["intensity"],
inverse_terms=["width"],
)
print(causal_test_result)
results = {
"control": control_value,
"treatment": treatment_value,
"intensity": i,
"ate": causal_test_result.ate,
"ci_low": min(causal_test_result.confidence_intervals),
"ci_high": max(causal_test_result.confidence_intervals),
}
width_num_shapes_results.append(results)
width_num_shapes_results = pd.DataFrame(width_num_shapes_results)
width_num_shapes_results.to_csv("width_num_shapes_results_random_1000.csv")
print(width_num_shapes_results)
| from causal_testing.specification.causal_dag import CausalDAG
from causal_testing.specification.scenario import Scenario
from causal_testing.specification.variable import Input, Output
from causal_testing.specification.causal_specification import CausalSpecification
from causal_testing.data_collection.data_collector import ObservationalDataCollector
from causal_testing.testing.causal_test_case import CausalTestCase
from causal_testing.testing.causal_test_outcome import ExactValue, Positive
from causal_testing.testing.causal_test_engine import CausalTestEngine
from causal_testing.testing.estimators import LinearRegressionEstimator, Estimator
import pandas as pd
class EmpiricalMeanEstimator(Estimator):
def add_modelling_assumptions(self):
"""
Add modelling assumptions to the estimator. This is a list of strings which list the modelling assumptions that
must hold if the resulting causal inference is to be considered valid.
"""
self.modelling_assumptions += (
"The data must contain runs with the exact configuration of interest."
)
def estimate_ate(self) -> float:
""" Estimate the outcomes under control and treatment.
:return: The empirical average treatment effect.
"""
control_results = self.df.where(
self.df[self.treatment[0]] == self.control_values
)[self.outcome].dropna()
treatment_results = self.df.where(
self.df[self.treatment[0]] == self.treatment_values
)[self.outcome].dropna()
return treatment_results.mean()[0] - control_results.mean()[0], None
def estimate_risk_ratio(self) -> float:
""" Estimate the outcomes under control and treatment.
:return: The empirical average treatment effect.
"""
control_results = self.df.where(
self.df[self.treatment[0]] == self.control_values
)[self.outcome].dropna()
treatment_results = self.df.where(
self.df[self.treatment[0]] == self.treatment_values
)[self.outcome].dropna()
return treatment_results.mean()[0] / control_results.mean()[0], None
# 1. Read in the Causal DAG
causal_dag = CausalDAG("./dag.dot")
# 2. Create variables
width = Input("width", float)
height = Input("height", float)
intensity = Input("intensity", float)
num_lines_abs = Output("num_lines_abs", float)
num_lines_unit = Output("num_lines_unit", float)
num_shapes_abs = Output("num_shapes_abs", float)
num_shapes_unit = Output("num_shapes_unit", float)
# 3. Create scenario by applying constraints over a subset of the input variables
scenario = Scenario(
variables={
width,
height,
intensity,
num_lines_abs,
num_lines_unit,
num_shapes_abs,
num_shapes_unit,
}
)
# 4. Construct a causal specification from the scenario and causal DAG
causal_specification = CausalSpecification(scenario, causal_dag)
def test_intensity_num_shapes(
observational_data_path,
causal_test_case,
square_terms=[],
inverse_terms=[],
empirical=False,
):
# 6. Create a data collector
data_collector = ObservationalDataCollector(scenario, observational_data_path)
# 7. Create an instance of the causal test engine
causal_test_engine = CausalTestEngine(
causal_test_case, causal_specification, data_collector
)
# 8. Obtain the minimal adjustment set for the causal test case from the causal DAG
causal_test_engine.load_data(index_col=0)
# 9. Set up an estimator
data = pd.read_csv(observational_data_path)
treatment = list(causal_test_case.control_input_configuration)[0].name
outcome = list(causal_test_case.outcome_variables)[0].name
estimator = None
if empirical:
estimator = EmpiricalMeanEstimator(
treatment=[treatment],
control_values=list(causal_test_case.control_input_configuration.values())[
0
],
treatment_values=list(
causal_test_case.treatment_input_configuration.values()
)[0],
adjustment_set=set(),
outcome=[outcome],
df=data,
effect_modifiers=causal_test_case.effect_modifier_configuration,
)
else:
estimator = LinearRegressionEstimator(
treatment=[treatment],
control_values=list(causal_test_case.control_input_configuration.values())[
0
],
treatment_values=list(
causal_test_case.treatment_input_configuration.values()
)[0],
adjustment_set=set(),
outcome=[outcome],
df=data,
intercept=0,
effect_modifiers=causal_test_case.effect_modifier_configuration,
)
for t in square_terms:
estimator.add_squared_term_to_df(t)
for t in inverse_terms:
estimator.add_inverse_term_to_df(t)
# 10. Execute the test
causal_test_result = causal_test_engine.execute_test(
estimator, causal_test_case.estimate_type
)
return causal_test_result
observational_data_path = "data/random/data_random_1000.csv"
intensity_num_shapes_results = []
for wh in range(1, 11):
smt_data_path = f"data/smt_100/data_smt_wh{wh}_100.csv"
for control_value, treatment_value in [(1, 2), (2, 4), (4, 8), (8, 16)]:
print("=" * 33, "CAUSAL TEST", "=" * 33)
print(f"WIDTH = HEIGHT = {wh}")
print("Identifying")
# 5. Create a causal test case
causal_test_case = CausalTestCase(
control_input_configuration={intensity: control_value},
treatment_input_configuration={intensity: treatment_value},
expected_causal_effect=ExactValue(4, tolerance=0.5),
outcome_variables={num_shapes_unit},
estimate_type="risk_ratio",
# effect_modifier_configuration={width: wh, height: wh}
)
obs_causal_test_result = test_intensity_num_shapes(
observational_data_path,
causal_test_case,
square_terms=["intensity"],
empirical=False,
)
print("Observational", end=" ")
print(obs_causal_test_result)
smt_causal_test_result = test_intensity_num_shapes(
smt_data_path, causal_test_case, square_terms=["intensity"], empirical=True
)
print("RCT", end=" ")
print(smt_causal_test_result)
results = {
"width": wh,
"height": wh,
"control": control_value,
"treatment": treatment_value,
"smt_risk_ratio": smt_causal_test_result.ate,
"obs_risk_ratio": obs_causal_test_result.ate,
}
intensity_num_shapes_results.append(results)
intensity_num_shapes_results = pd.DataFrame(intensity_num_shapes_results)
intensity_num_shapes_results.to_csv("intensity_num_shapes_results_random_1000.csv")
print(intensity_num_shapes_results)
width_num_shapes_results = []
for i in range(17):
for w in range(1, 10):
print("=" * 37, "CAUSAL TEST", "=" * 37)
print("Identifying")
# 5. Create a causal test case
control_value = w
treatment_value = w + 1
causal_test_case = CausalTestCase(
control_input_configuration={width: control_value},
treatment_input_configuration={width: treatment_value},
expected_causal_effect=Positive(),
outcome_variables={num_shapes_unit},
estimate_type="ate_calculated",
effect_modifier_configuration={intensity: i},
)
causal_test_result = test_intensity_num_shapes(
observational_data_path,
causal_test_case,
square_terms=["intensity"],
inverse_terms=["width"],
)
print(causal_test_result)
results = {
"control": control_value,
"treatment": treatment_value,
"intensity": i,
"ate": causal_test_result.ate,
"ci_low": min(causal_test_result.confidence_intervals),
"ci_high": max(causal_test_result.confidence_intervals),
}
width_num_shapes_results.append(results)
width_num_shapes_results = pd.DataFrame(width_num_shapes_results)
width_num_shapes_results.to_csv("width_num_shapes_results_random_1000.csv")
print(width_num_shapes_results) | en | 0.774086 | Add modelling assumptions to the estimator. This is a list of strings which list the modelling assumptions that must hold if the resulting causal inference is to be considered valid. Estimate the outcomes under control and treatment. :return: The empirical average treatment effect. Estimate the outcomes under control and treatment. :return: The empirical average treatment effect. # 1. Read in the Causal DAG # 2. Create variables # 3. Create scenario by applying constraints over a subset of the input variables # 4. Construct a causal specification from the scenario and causal DAG # 6. Create a data collector # 7. Create an instance of the causal test engine # 8. Obtain the minimal adjustment set for the causal test case from the causal DAG # 9. Set up an estimator # 10. Execute the test # 5. Create a causal test case # effect_modifier_configuration={width: wh, height: wh} # 5. Create a causal test case | 2.433701 | 2 |
jina/types/sets/match_set.py | mahdinezhadasad/jina | 0 | 6619400 | from typing import Optional
from .document_set import DocumentSet
if False:
from ..document import Document
class MatchSet(DocumentSet):
def __init__(self, docs_proto, reference_doc: 'Document'):
super().__init__(docs_proto)
self._ref_doc = reference_doc
def append(self, document: Optional['Document'] = None, **kwargs) -> 'Document':
"""Add a matched document to the current Document
:return: the newly added sub-document in :class:`Document` view
"""
c = self._docs_proto.add()
if document is not None:
c.CopyFrom(document.as_pb_object)
from ..document import Document
m = Document(c)
m.set_attrs(granularity=self._ref_doc.granularity,
adjacency=self._ref_doc.adjacency + 1,
**kwargs)
m.score.ref_id = self._ref_doc.id
if not m.mime_type:
m.mime_type = self._ref_doc.mime_type
return m
| from typing import Optional
from .document_set import DocumentSet
if False:
from ..document import Document
class MatchSet(DocumentSet):
def __init__(self, docs_proto, reference_doc: 'Document'):
super().__init__(docs_proto)
self._ref_doc = reference_doc
def append(self, document: Optional['Document'] = None, **kwargs) -> 'Document':
"""Add a matched document to the current Document
:return: the newly added sub-document in :class:`Document` view
"""
c = self._docs_proto.add()
if document is not None:
c.CopyFrom(document.as_pb_object)
from ..document import Document
m = Document(c)
m.set_attrs(granularity=self._ref_doc.granularity,
adjacency=self._ref_doc.adjacency + 1,
**kwargs)
m.score.ref_id = self._ref_doc.id
if not m.mime_type:
m.mime_type = self._ref_doc.mime_type
return m
| en | 0.761592 | Add a matched document to the current Document :return: the newly added sub-document in :class:`Document` view | 2.423018 | 2 |
airbyte-cdk/python/airbyte_cdk/sources/declarative/checks/connection_checker.py | onaio/airbyte | 22 | 6619401 | #
# Copyright (c) 2022 Airbyte, Inc., all rights reserved.
#
import logging
from abc import ABC, abstractmethod
from typing import Any, Mapping, Tuple
from airbyte_cdk.sources.source import Source
class ConnectionChecker(ABC):
"""
Abstract base class for checking a connection
"""
@abstractmethod
def check_connection(self, source: Source, logger: logging.Logger, config: Mapping[str, Any]) -> Tuple[bool, any]:
"""
:param source: source
:param logger: source logger
:param config: The user-provided configuration as specified by the source's spec.
This usually contains information required to check connection e.g. tokens, secrets and keys etc.
:return: A tuple of (boolean, error). If boolean is true, then the connection check is successful
and we can connect to the underlying data source using the provided configuration.
Otherwise, the input config cannot be used to connect to the underlying data source,
and the "error" object should describe what went wrong.
The error object will be cast to string to display the problem to the user.
"""
pass
| #
# Copyright (c) 2022 Airbyte, Inc., all rights reserved.
#
import logging
from abc import ABC, abstractmethod
from typing import Any, Mapping, Tuple
from airbyte_cdk.sources.source import Source
class ConnectionChecker(ABC):
"""
Abstract base class for checking a connection
"""
@abstractmethod
def check_connection(self, source: Source, logger: logging.Logger, config: Mapping[str, Any]) -> Tuple[bool, any]:
"""
:param source: source
:param logger: source logger
:param config: The user-provided configuration as specified by the source's spec.
This usually contains information required to check connection e.g. tokens, secrets and keys etc.
:return: A tuple of (boolean, error). If boolean is true, then the connection check is successful
and we can connect to the underlying data source using the provided configuration.
Otherwise, the input config cannot be used to connect to the underlying data source,
and the "error" object should describe what went wrong.
The error object will be cast to string to display the problem to the user.
"""
pass
| en | 0.792439 | # # Copyright (c) 2022 Airbyte, Inc., all rights reserved. # Abstract base class for checking a connection :param source: source :param logger: source logger :param config: The user-provided configuration as specified by the source's spec. This usually contains information required to check connection e.g. tokens, secrets and keys etc. :return: A tuple of (boolean, error). If boolean is true, then the connection check is successful and we can connect to the underlying data source using the provided configuration. Otherwise, the input config cannot be used to connect to the underlying data source, and the "error" object should describe what went wrong. The error object will be cast to string to display the problem to the user. | 2.798564 | 3 |
test_abm.py | lena-kilian/GEOG5995M_CW1 | 0 | 6619402 | """
tests for abm: agents_framework.py
"""
import pytest
import mock_framework
def test_moveagent():
agents = []
environment = []
while len(agents) < 2:
agents.append(mock_framework.Agents(environment, agents))
agents[0].store = 0
agents[0].moveagent()
assert agents[0].y_position == 49 or agents[0].y_position == 51
assert agents[0].x_position == 49 or agents[0].x_position == 51
agents[0].y_position = 301
agents[0].x_position = -3
print(agents[0])
agents[0].moveagent()
print(agents[0])
assert agents[0].y_position == 1 or agents[0].y_position == 3
assert agents[0].x_position == 295 or agents[0].x_position == 297
agents[1].store = 200
agents[1].moveagent()
assert agents[1].y_position == 48 or agents[1].y_position == 52
assert agents[1].x_position == 48 or agents[1].x_position == 52
def test_eat():
environment = []
list = []
while len(list) < 300:
list.append(100)
while len(environment) < 300:
environment.append(list.copy())
agents = []
while len(agents) < 2:
agents.append(mock_framework.Agents(environment, agents))
agents[0].eat()
assert agents[0].environment[agents[0].y_position][agents[0].x_position] == 90 and agents[0].store == 10
agents[0].store = 90
agents[0].eat()
assert agents[0].environment[agents[0].y_position][agents[0].x_position] == 80 and agents[0].store == 95
agents[0].environment[agents[0].y_position][agents[0].x_position] = 3
agents[0].eat()
assert agents[0].environment[agents[0].y_position][agents[0].x_position] == 0 and agents[0].store == 98
def test_regurgitate():
environment = []
list = []
while len(list) < 100:
list.append(100)
while len(environment) < 100:
environment.append(list.copy())
agents = []
agents.append(mock_framework.Agents(environment, agents))
agents[0].store = 100
agents[0].regurgitate()
assert agents[0].store == 100 and agents[0].environment[agents[0].y_position][agents[0].x_position] == 100
agents[0].store = 300
agents[0].regurgitate()
assert agents[0].store == 250 and agents[0].environment[agents[0].y_position][agents[0].x_position] == 126
assert agents[0].environment[agents[0].y_position][agents[0].x_position + 1] == 103
assert agents[0].environment[agents[0].y_position][agents[0].x_position - 1] == 103
assert agents[0].environment[agents[0].y_position + 1][agents[0].x_position] == 103
assert agents[0].environment[agents[0].y_position + 1][agents[0].x_position + 1] == 103
assert agents[0].environment[agents[0].y_position + 1][agents[0].x_position - 1] == 103
assert agents[0].environment[agents[0].y_position - 1][agents[0].x_position] == 103
assert agents[0].environment[agents[0].y_position - 1][agents[0].x_position + 1] == 103
assert agents[0].environment[agents[0].y_position - 1][agents[0].x_position - 1] == 103
def test_grass_grow():
environment = []
list = []
while len(list) < 100:
list.append(100)
while len(environment) < 100:
environment.append(list.copy())
environment[5][5] = 600
environment[2][5] = 254
agents = []
agents.append(mock_framework.Agents(environment, agents))
agents[0].grass_grow()
agents[0].grass_grow()
assert environment[1][1] == 102 and environment[5][5] == 600 and environment[2][5] == 255
def test_distance():
environment = []
agents = []
while len(agents) < 2:
agents.append(mock_framework.Agents(environment, agents))
agents[0].x_position = 4
agents[0].y_position = 0
agents[1].x_position = 0
agents[1].y_position = 3
dist = agents[0].distance(agents[1])
assert dist == 5
def test_min_distance():
environment = []
agents = []
while len(agents) < 3:
agents.append(mock_framework.Agents(environment, agents))
agents[0].x_position = 4
agents[0].y_position = 0
agents[1].x_position = 0
agents[1].y_position = 3
assert agents[0].min_distance() == 5
agents[2].x_position = 0
agents[2].y_position = 3
assert agents[0].min_distance() == 0
def test_max_distance():
environment = []
agents = []
while len(agents) < 3:
agents.append(mock_framework.Agents(environment, agents))
agents[0].x_position = 37
agents[0].y_position = 45
agents[1].x_position = 50
agents[1].y_position = 0
assert agents[0].max_distance() == 50
agents[2].x_position = 50
agents[2].y_position = 100
assert agents[0].max_distance() == 100
def test_share():
environment = []
agents = []
while len(agents) < 3:
agents.append(mock_framework.Agents(environment, agents))
agents[2].x_position = 90
agents[2].y_position = 90
agents[0].store = 10
agents[1].store = 0
agents[0].share(10)
assert agents[0].store == 5 and agents[1].store == 5
pytest.main() | """
tests for abm: agents_framework.py
"""
import pytest
import mock_framework
def test_moveagent():
agents = []
environment = []
while len(agents) < 2:
agents.append(mock_framework.Agents(environment, agents))
agents[0].store = 0
agents[0].moveagent()
assert agents[0].y_position == 49 or agents[0].y_position == 51
assert agents[0].x_position == 49 or agents[0].x_position == 51
agents[0].y_position = 301
agents[0].x_position = -3
print(agents[0])
agents[0].moveagent()
print(agents[0])
assert agents[0].y_position == 1 or agents[0].y_position == 3
assert agents[0].x_position == 295 or agents[0].x_position == 297
agents[1].store = 200
agents[1].moveagent()
assert agents[1].y_position == 48 or agents[1].y_position == 52
assert agents[1].x_position == 48 or agents[1].x_position == 52
def test_eat():
environment = []
list = []
while len(list) < 300:
list.append(100)
while len(environment) < 300:
environment.append(list.copy())
agents = []
while len(agents) < 2:
agents.append(mock_framework.Agents(environment, agents))
agents[0].eat()
assert agents[0].environment[agents[0].y_position][agents[0].x_position] == 90 and agents[0].store == 10
agents[0].store = 90
agents[0].eat()
assert agents[0].environment[agents[0].y_position][agents[0].x_position] == 80 and agents[0].store == 95
agents[0].environment[agents[0].y_position][agents[0].x_position] = 3
agents[0].eat()
assert agents[0].environment[agents[0].y_position][agents[0].x_position] == 0 and agents[0].store == 98
def test_regurgitate():
environment = []
list = []
while len(list) < 100:
list.append(100)
while len(environment) < 100:
environment.append(list.copy())
agents = []
agents.append(mock_framework.Agents(environment, agents))
agents[0].store = 100
agents[0].regurgitate()
assert agents[0].store == 100 and agents[0].environment[agents[0].y_position][agents[0].x_position] == 100
agents[0].store = 300
agents[0].regurgitate()
assert agents[0].store == 250 and agents[0].environment[agents[0].y_position][agents[0].x_position] == 126
assert agents[0].environment[agents[0].y_position][agents[0].x_position + 1] == 103
assert agents[0].environment[agents[0].y_position][agents[0].x_position - 1] == 103
assert agents[0].environment[agents[0].y_position + 1][agents[0].x_position] == 103
assert agents[0].environment[agents[0].y_position + 1][agents[0].x_position + 1] == 103
assert agents[0].environment[agents[0].y_position + 1][agents[0].x_position - 1] == 103
assert agents[0].environment[agents[0].y_position - 1][agents[0].x_position] == 103
assert agents[0].environment[agents[0].y_position - 1][agents[0].x_position + 1] == 103
assert agents[0].environment[agents[0].y_position - 1][agents[0].x_position - 1] == 103
def test_grass_grow():
environment = []
list = []
while len(list) < 100:
list.append(100)
while len(environment) < 100:
environment.append(list.copy())
environment[5][5] = 600
environment[2][5] = 254
agents = []
agents.append(mock_framework.Agents(environment, agents))
agents[0].grass_grow()
agents[0].grass_grow()
assert environment[1][1] == 102 and environment[5][5] == 600 and environment[2][5] == 255
def test_distance():
environment = []
agents = []
while len(agents) < 2:
agents.append(mock_framework.Agents(environment, agents))
agents[0].x_position = 4
agents[0].y_position = 0
agents[1].x_position = 0
agents[1].y_position = 3
dist = agents[0].distance(agents[1])
assert dist == 5
def test_min_distance():
environment = []
agents = []
while len(agents) < 3:
agents.append(mock_framework.Agents(environment, agents))
agents[0].x_position = 4
agents[0].y_position = 0
agents[1].x_position = 0
agents[1].y_position = 3
assert agents[0].min_distance() == 5
agents[2].x_position = 0
agents[2].y_position = 3
assert agents[0].min_distance() == 0
def test_max_distance():
environment = []
agents = []
while len(agents) < 3:
agents.append(mock_framework.Agents(environment, agents))
agents[0].x_position = 37
agents[0].y_position = 45
agents[1].x_position = 50
agents[1].y_position = 0
assert agents[0].max_distance() == 50
agents[2].x_position = 50
agents[2].y_position = 100
assert agents[0].max_distance() == 100
def test_share():
environment = []
agents = []
while len(agents) < 3:
agents.append(mock_framework.Agents(environment, agents))
agents[2].x_position = 90
agents[2].y_position = 90
agents[0].store = 10
agents[1].store = 0
agents[0].share(10)
assert agents[0].store == 5 and agents[1].store == 5
pytest.main() | en | 0.416384 | tests for abm: agents_framework.py | 2.64038 | 3 |
feder/letters/migrations/0027_auto_20211021_0248.py | dzemeuksis/feder | 0 | 6619403 | <filename>feder/letters/migrations/0027_auto_20211021_0248.py
# Generated by Django 2.2.24 on 2021-10-21 02:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("letters", "0026_auto_20210505_1327"),
]
operations = [
migrations.AddIndex(
model_name="letter",
index=models.Index(
fields=["created"], name="letters_let_created_533a4c_idx"
),
),
]
| <filename>feder/letters/migrations/0027_auto_20211021_0248.py
# Generated by Django 2.2.24 on 2021-10-21 02:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("letters", "0026_auto_20210505_1327"),
]
operations = [
migrations.AddIndex(
model_name="letter",
index=models.Index(
fields=["created"], name="letters_let_created_533a4c_idx"
),
),
]
| en | 0.841686 | # Generated by Django 2.2.24 on 2021-10-21 02:48 | 1.549338 | 2 |
app/api.py | TomStevenson/starter-snake-python | 0 | 6619404 | <reponame>TomStevenson/starter-snake-python
import json
from bottle import HTTPResponse
def ping_response():
return HTTPResponse(
status=200
)
def start_response():
return HTTPResponse(
status=200
)
def get_response():
return HTTPResponse(
status=200,
headers={
"Content-Type": "application/json"
},
body=json.dumps({
"apiversion": "1",
"author": "ThomasStevenson",
"color": "#3dcd58",
"head" : "shades",
"tail": "bolt"
})
)
def move_response(move):
assert move in ['up', 'down', 'left', 'right'], \
"Move must be one of [up, down, left, right]"
return HTTPResponse(
status=200,
headers={
"Content-Type": "application/json"
},
body=json.dumps({
"move": move
})
)
def end_response():
return HTTPResponse(
status=200
)
| import json
from bottle import HTTPResponse
def ping_response():
return HTTPResponse(
status=200
)
def start_response():
return HTTPResponse(
status=200
)
def get_response():
return HTTPResponse(
status=200,
headers={
"Content-Type": "application/json"
},
body=json.dumps({
"apiversion": "1",
"author": "ThomasStevenson",
"color": "#3dcd58",
"head" : "shades",
"tail": "bolt"
})
)
def move_response(move):
assert move in ['up', 'down', 'left', 'right'], \
"Move must be one of [up, down, left, right]"
return HTTPResponse(
status=200,
headers={
"Content-Type": "application/json"
},
body=json.dumps({
"move": move
})
)
def end_response():
return HTTPResponse(
status=200
) | none | 1 | 2.540438 | 3 | |
QrDetector/20201009/QrDetector.py | raoyi/QRcode | 0 | 6619405 | <reponame>raoyi/QRcode
#!/usr/bin/env python3
import cv2
import tkinter.messagebox
import configparser
import re
import os
import time
from datetime import datetime
def error(msg):
root = tkinter.Tk()
root.withdraw() # hide main window
tkinter.messagebox.showerror('ERROR',msg)
os._exit(0)
conf = configparser.ConfigParser()
conf.read('QrDetector.ini',encoding='utf-8')
# 检查是否有变量 qrstr1 等,并组成列表
for key in conf['Settings']:
if re.match("qrstr\d+",key):
try:
qrstrx
except NameError:
qrstrx = []
if conf['Settings'][key] != '':
qrstrx.append(conf['Settings'][key].split(conf['Settings']['separator']))
if not 'qrstrx' in locals().keys():
qrstrx = []
# 处理中的qrstrx列表的ID
strindex = 0
# 设置autoexit标记
if 'autoexit' in conf['Settings']:
autoexit = conf['Settings']['autoexit'].upper()
if autoexit != 'Y':
autoexit = 'N'
# 设置保存视频标记
if 'saveavi' in conf['Settings']:
saveavi = conf['Settings']['saveavi'].upper()
if saveavi == 'Y':
if os.path.exists('debug') == False:
os.mkdir('debug')
else:
saveavi = 'N'
else:
saveavi = 'N'
# 设置保存二维码图片标记
if 'qrpic' in conf['Settings']:
qrpic = conf['Settings']['qrpic'].upper()
if qrpic == 'Y':
if os.path.exists('qrpic') == False:
os.mkdir('qrpic')
else:
qrpic = 'N'
else:
qrpic = 'N'
count_experiments = 1
# 0是默认的笔记本摄像头ID
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW) # 创建一个 VideoCapture 对象
##############################
if saveavi == 'Y':
fourcc = cv2.VideoWriter_fourcc(*'XVID')
fps = 24
size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
out = cv2.VideoWriter('debug\\'+str(len(os.listdir('debug'))+1)+'.avi', fourcc, fps, size)
##############################
prev_result = ''
# cv2.isOpened()检查是否初始化成功,返回布尔值
while(cap.isOpened()): # 循环读取每一帧
frame = cap.read()[1]
if len(qrstrx) != 0 and strindex < len(qrstrx):
frame = cv2.putText(frame, 'waitQR'+str(qrstrx[strindex]), (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1)
cv2.imshow("QrDetector - 20201009 | Author:RaoYi", frame) # 窗口显示,并设置窗口标题
k = cv2.waitKey(1) & 0xFF # 每帧数据延时 1ms,延时不能为 0,否则读取的结果会是静态帧
#####################
if saveavi == 'Y':
out.write(frame)
#####################
for i in range(count_experiments):
# 检测与识别
try:
result_detection = cv2.QRCodeDetector().detectAndDecode(frame)[0]
except cv2.error:
pass
if result_detection:
if result_detection != prev_result:
if qrpic == 'Y':
cv2.imwrite('qrpic\\'+str(len(os.listdir('qrpic'))+1)+'.jpg', frame)
ff = open('scanlog.txt', 'a')
ff.write(datetime.now().strftime('[%Y/%m/%d-%H:%M:%S.%f]')+' get string : '+result_detection+'\n')
ff.close()
prev_result = result_detection
if qrstrx == [] and autoexit == 'Y':
cap.release() # 释放摄像头
cv2.destroyAllWindows() # 删除建立的全部窗口
os._exit(0)
if strindex < len(qrstrx) and qrstrx[strindex].count(result_detection) != 0:
# 将二维码内容写入文件
f = open('result.txt', 'w')
f.write(result_detection)
f.close()
if strindex < len(qrstrx):
strindex = strindex + 1
if strindex >= len(qrstrx) and autoexit == 'Y':
cap.release() # 释放摄像头
cv2.destroyAllWindows() # 删除建立的全部窗口
os._exit(0)
if k == 27: # 若检测到按键 ‘Esc’,退出
break
cap.release() # 释放摄像头
if saveavi == 'Y':
out.release()
cv2.destroyAllWindows() # 删除建立的全部窗口
os._exit(0)
| #!/usr/bin/env python3
import cv2
import tkinter.messagebox
import configparser
import re
import os
import time
from datetime import datetime
def error(msg):
root = tkinter.Tk()
root.withdraw() # hide main window
tkinter.messagebox.showerror('ERROR',msg)
os._exit(0)
conf = configparser.ConfigParser()
conf.read('QrDetector.ini',encoding='utf-8')
# 检查是否有变量 qrstr1 等,并组成列表
for key in conf['Settings']:
if re.match("qrstr\d+",key):
try:
qrstrx
except NameError:
qrstrx = []
if conf['Settings'][key] != '':
qrstrx.append(conf['Settings'][key].split(conf['Settings']['separator']))
if not 'qrstrx' in locals().keys():
qrstrx = []
# 处理中的qrstrx列表的ID
strindex = 0
# 设置autoexit标记
if 'autoexit' in conf['Settings']:
autoexit = conf['Settings']['autoexit'].upper()
if autoexit != 'Y':
autoexit = 'N'
# 设置保存视频标记
if 'saveavi' in conf['Settings']:
saveavi = conf['Settings']['saveavi'].upper()
if saveavi == 'Y':
if os.path.exists('debug') == False:
os.mkdir('debug')
else:
saveavi = 'N'
else:
saveavi = 'N'
# 设置保存二维码图片标记
if 'qrpic' in conf['Settings']:
qrpic = conf['Settings']['qrpic'].upper()
if qrpic == 'Y':
if os.path.exists('qrpic') == False:
os.mkdir('qrpic')
else:
qrpic = 'N'
else:
qrpic = 'N'
count_experiments = 1
# 0是默认的笔记本摄像头ID
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW) # 创建一个 VideoCapture 对象
##############################
if saveavi == 'Y':
fourcc = cv2.VideoWriter_fourcc(*'XVID')
fps = 24
size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
out = cv2.VideoWriter('debug\\'+str(len(os.listdir('debug'))+1)+'.avi', fourcc, fps, size)
##############################
prev_result = ''
# cv2.isOpened()检查是否初始化成功,返回布尔值
while(cap.isOpened()): # 循环读取每一帧
frame = cap.read()[1]
if len(qrstrx) != 0 and strindex < len(qrstrx):
frame = cv2.putText(frame, 'waitQR'+str(qrstrx[strindex]), (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1)
cv2.imshow("QrDetector - 20201009 | Author:RaoYi", frame) # 窗口显示,并设置窗口标题
k = cv2.waitKey(1) & 0xFF # 每帧数据延时 1ms,延时不能为 0,否则读取的结果会是静态帧
#####################
if saveavi == 'Y':
out.write(frame)
#####################
for i in range(count_experiments):
# 检测与识别
try:
result_detection = cv2.QRCodeDetector().detectAndDecode(frame)[0]
except cv2.error:
pass
if result_detection:
if result_detection != prev_result:
if qrpic == 'Y':
cv2.imwrite('qrpic\\'+str(len(os.listdir('qrpic'))+1)+'.jpg', frame)
ff = open('scanlog.txt', 'a')
ff.write(datetime.now().strftime('[%Y/%m/%d-%H:%M:%S.%f]')+' get string : '+result_detection+'\n')
ff.close()
prev_result = result_detection
if qrstrx == [] and autoexit == 'Y':
cap.release() # 释放摄像头
cv2.destroyAllWindows() # 删除建立的全部窗口
os._exit(0)
if strindex < len(qrstrx) and qrstrx[strindex].count(result_detection) != 0:
# 将二维码内容写入文件
f = open('result.txt', 'w')
f.write(result_detection)
f.close()
if strindex < len(qrstrx):
strindex = strindex + 1
if strindex >= len(qrstrx) and autoexit == 'Y':
cap.release() # 释放摄像头
cv2.destroyAllWindows() # 删除建立的全部窗口
os._exit(0)
if k == 27: # 若检测到按键 ‘Esc’,退出
break
cap.release() # 释放摄像头
if saveavi == 'Y':
out.release()
cv2.destroyAllWindows() # 删除建立的全部窗口
os._exit(0) | zh | 0.834168 | #!/usr/bin/env python3 # hide main window # 检查是否有变量 qrstr1 等,并组成列表 # 处理中的qrstrx列表的ID # 设置autoexit标记 # 设置保存视频标记 # 设置保存二维码图片标记 # 0是默认的笔记本摄像头ID # 创建一个 VideoCapture 对象 ############################## ############################## # cv2.isOpened()检查是否初始化成功,返回布尔值 # 循环读取每一帧 # 窗口显示,并设置窗口标题 # 每帧数据延时 1ms,延时不能为 0,否则读取的结果会是静态帧 ##################### ##################### # 检测与识别 # 释放摄像头 # 删除建立的全部窗口 # 将二维码内容写入文件 # 释放摄像头 # 删除建立的全部窗口 # 若检测到按键 ‘Esc’,退出 # 释放摄像头 # 删除建立的全部窗口 | 2.296842 | 2 |
dataxmissionprotocol/field.py | RobinBobin/data-transmission-protocol | 0 | 6619406 | from commonutils import StaticUtils
from struct import calcsize, pack_into, unpack_from
class Field:
__encoding = "utf-8"
__formats = {
1: ["B"],
2: ["H"],
3: ["L1"],
4: ["L"],
5: ["Q3"],
6: ["Q2"],
7: ["Q1"],
8: ["Q"]
}
for v in __formats.values():
v.append(v[0].lower())
def __init__(self, size = None, **kw):
previousField = kw.get("previousField")
signed = kw.get("signed")
value = kw.get("value")
valueIsStr = not size
self.__offset = previousField.nextOffset if previousField else 0
self.__size = len(value) if valueIsStr else size
self.__format = f"{self.__size}s" if valueIsStr else None if signed == None else Field.__formats[self.__size][+signed]
self.__value = value.encode(Field.__encoding) if valueIsStr else value
@property
def nextOffset(self):
return self.__offset + self.__size
@property
def offset(self):
return self.__offset
@offset.setter
def offset(self, offset):
self.__offset = offset
@property
def size(self):
return self.__size
@property
def value(self):
return self.__value
@value.setter
def value(self, value):
if self.__format != None:
self.__value = value
elif len(value) != self.__size:
raise ValueError()
else:
self.__value = value[:]
@staticmethod
def createChain(size = None, signed = None, value = None, fields = None):
chain = fields
if chain:
for i, field in enumerate(chain):
if i:
field.__offset = chain[i - 1].nextOffset
else:
chain = []
args = (size, signed, value)
def count():
for x in args:
if StaticUtils.isIterable(x):
return len(x)
raise ValueError()
for index in range(count()):
params = tuple(v[index] if StaticUtils.isIterable(v) else v for v in args)
chain.append(Field(
params[0],
previousField = chain[index - 1] if index else None,
signed = params[1],
value = params[2]))
return chain
@staticmethod
def setEncoding(encoding):
Field.__encoding = encoding
def _get(self, buf, offset, byteorder):
i = offset + self.__offset
j = i + self.__size
if self.__format == None:
self.__value = buf[i:j]
else:
isStr = self.__format[0].isdigit()
if len(self.__format) != 2 or isStr:
self.__value = unpack_from(f"{byteorder}{self.__format}", buf, i)[0]
if isStr:
self.__value = self.__value.decode(Field.__encoding)
else:
self.__value = buf[i:j]
for _ in range(int(self.__format[1])):
if byteorder == ">":
self.__value[:0] = [0]
else:
self.__value.append(0)
self.__value = unpack_from(f"{byteorder}{self.__format[0]}", self.__value, 0)[0]
return self
def _set(self, buf, offset, byteorder):
i = offset + self.__offset
j = i + self.__size
if self.__format == None:
buf[i:j] = self.__value
elif len(self.__format) != 2 or self.__format[0].isdigit():
pack_into(f"{byteorder}{self.__format}", buf, i, self.__value)
else:
fmt = f"{byteorder}{self.__format[0]}"
tmpbuf = bytearray(calcsize(fmt))
pack_into(fmt, tmpbuf, 0, self.__value)
for _ in range(int(self.__format[1])):
tmpbuf.pop(0 if byteorder == ">" else -1)
buf[i:j] = tmpbuf
class UnsignedField1(Field):
def __init__(self, size = None, **kw):
# = The prototype was invalid, so backward compatibility... = #
if "value" not in kw:
kw["value"] = size
super().__init__(size = 1, signed = False, value = kw["value"])
| from commonutils import StaticUtils
from struct import calcsize, pack_into, unpack_from
class Field:
__encoding = "utf-8"
__formats = {
1: ["B"],
2: ["H"],
3: ["L1"],
4: ["L"],
5: ["Q3"],
6: ["Q2"],
7: ["Q1"],
8: ["Q"]
}
for v in __formats.values():
v.append(v[0].lower())
def __init__(self, size = None, **kw):
previousField = kw.get("previousField")
signed = kw.get("signed")
value = kw.get("value")
valueIsStr = not size
self.__offset = previousField.nextOffset if previousField else 0
self.__size = len(value) if valueIsStr else size
self.__format = f"{self.__size}s" if valueIsStr else None if signed == None else Field.__formats[self.__size][+signed]
self.__value = value.encode(Field.__encoding) if valueIsStr else value
@property
def nextOffset(self):
return self.__offset + self.__size
@property
def offset(self):
return self.__offset
@offset.setter
def offset(self, offset):
self.__offset = offset
@property
def size(self):
return self.__size
@property
def value(self):
return self.__value
@value.setter
def value(self, value):
if self.__format != None:
self.__value = value
elif len(value) != self.__size:
raise ValueError()
else:
self.__value = value[:]
@staticmethod
def createChain(size = None, signed = None, value = None, fields = None):
chain = fields
if chain:
for i, field in enumerate(chain):
if i:
field.__offset = chain[i - 1].nextOffset
else:
chain = []
args = (size, signed, value)
def count():
for x in args:
if StaticUtils.isIterable(x):
return len(x)
raise ValueError()
for index in range(count()):
params = tuple(v[index] if StaticUtils.isIterable(v) else v for v in args)
chain.append(Field(
params[0],
previousField = chain[index - 1] if index else None,
signed = params[1],
value = params[2]))
return chain
@staticmethod
def setEncoding(encoding):
Field.__encoding = encoding
def _get(self, buf, offset, byteorder):
i = offset + self.__offset
j = i + self.__size
if self.__format == None:
self.__value = buf[i:j]
else:
isStr = self.__format[0].isdigit()
if len(self.__format) != 2 or isStr:
self.__value = unpack_from(f"{byteorder}{self.__format}", buf, i)[0]
if isStr:
self.__value = self.__value.decode(Field.__encoding)
else:
self.__value = buf[i:j]
for _ in range(int(self.__format[1])):
if byteorder == ">":
self.__value[:0] = [0]
else:
self.__value.append(0)
self.__value = unpack_from(f"{byteorder}{self.__format[0]}", self.__value, 0)[0]
return self
def _set(self, buf, offset, byteorder):
i = offset + self.__offset
j = i + self.__size
if self.__format == None:
buf[i:j] = self.__value
elif len(self.__format) != 2 or self.__format[0].isdigit():
pack_into(f"{byteorder}{self.__format}", buf, i, self.__value)
else:
fmt = f"{byteorder}{self.__format[0]}"
tmpbuf = bytearray(calcsize(fmt))
pack_into(fmt, tmpbuf, 0, self.__value)
for _ in range(int(self.__format[1])):
tmpbuf.pop(0 if byteorder == ">" else -1)
buf[i:j] = tmpbuf
class UnsignedField1(Field):
def __init__(self, size = None, **kw):
# = The prototype was invalid, so backward compatibility... = #
if "value" not in kw:
kw["value"] = size
super().__init__(size = 1, signed = False, value = kw["value"])
| en | 0.949692 | # = The prototype was invalid, so backward compatibility... = # | 2.21107 | 2 |
arekit/contrib/experiment_rusentrel/exp_sl/opinions.py | nicolay-r/AREk | 18 | 6619407 | <filename>arekit/contrib/experiment_rusentrel/exp_sl/opinions.py
import logging
from arekit.common.experiment.api.ctx_base import DataIO
from arekit.common.experiment.api.io_utils import BaseIOUtils
from arekit.common.experiment.api.ops_opin import OpinionOperations
from arekit.common.experiment.data_type import DataType
from arekit.common.opinions.collection import OpinionCollection
from arekit.contrib.experiment_rusentrel.labels.formatters.neut_label import ExperimentNeutralLabelsFormatter
from arekit.contrib.experiment_rusentrel.labels.formatters.rusentrel import RuSentRelExperimentLabelsFormatter
from arekit.contrib.source.rusentrel.io_utils import RuSentRelVersions
from arekit.contrib.source.rusentrel.opinions.collection import RuSentRelOpinionCollection
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class RuSentrelOpinionOperations(OpinionOperations):
def __init__(self, experiment_data, experiment_io, get_synonyms_func, version):
assert(isinstance(experiment_data, DataIO))
assert(isinstance(version, RuSentRelVersions))
super(RuSentrelOpinionOperations, self).__init__()
self.__get_synonyms_func = get_synonyms_func
self.__version = version
self.__experiment_io = experiment_io
self.__result_labels_fmt = RuSentRelExperimentLabelsFormatter()
self.__neutral_labels_fmt = ExperimentNeutralLabelsFormatter()
@property
def LabelsFormatter(self):
return self.__neutral_labels_fmt
# region CVBasedOperations
def iter_opinions_for_extraction(self, doc_id, data_type):
collections = []
# Reading automatically annotated collection of neutral opinions.
auto_neutral = self.__experiment_io.read_opinion_collection(
target=self.__experiment_io.create_result_opinion_collection_target(
doc_id=doc_id,
data_type=data_type,
check_existance=True),
labels_formatter=self.__neutral_labels_fmt,
create_collection_func=self.__create_collection)
if data_type == DataType.Train:
# Providing neutral and sentiment.
if auto_neutral is not None:
collections.append(auto_neutral)
# Providing sentiment opinions.
etalon = self.get_etalon_opinion_collection(doc_id=doc_id)
collections.append(etalon)
elif data_type == DataType.Test:
# Providing neutrally labeled only
collections.append(auto_neutral)
for collection in collections:
for opinion in collection:
yield opinion
def get_etalon_opinion_collection(self, doc_id):
assert(isinstance(doc_id, int))
opins_iter = RuSentRelOpinionCollection.iter_opinions_from_doc(
doc_id=doc_id,
labels_fmt=self.__result_labels_fmt,
version=self.__version)
return self.__create_collection(opins_iter)
def create_opinion_collection(self, opinions):
return self.__create_collection(opinions)
def get_result_opinion_collection(self, doc_id, data_type, epoch_index):
""" Since evaluation supported only for neural networks,
we need to guarantee the presence of a function that returns filepath
by using isinstance command.
"""
assert(isinstance(self.__experiment_io, BaseIOUtils))
return self.__experiment_io.read_opinion_collection(
target=self.__experiment_io.create_result_opinion_collection_target(
doc_id=doc_id,
data_type=data_type,
epoch_index=epoch_index),
labels_formatter=self.__result_labels_fmt,
create_collection_func=lambda opinions: self.__create_collection(opinions))
# endregion
# region private provider methods
def __create_collection(self, opinions=None):
return OpinionCollection(opinions=[] if opinions is None else opinions,
synonyms=self.__get_synonyms_func(),
error_on_duplicates=True,
error_on_synonym_end_missed=True)
# endregion | <filename>arekit/contrib/experiment_rusentrel/exp_sl/opinions.py
import logging
from arekit.common.experiment.api.ctx_base import DataIO
from arekit.common.experiment.api.io_utils import BaseIOUtils
from arekit.common.experiment.api.ops_opin import OpinionOperations
from arekit.common.experiment.data_type import DataType
from arekit.common.opinions.collection import OpinionCollection
from arekit.contrib.experiment_rusentrel.labels.formatters.neut_label import ExperimentNeutralLabelsFormatter
from arekit.contrib.experiment_rusentrel.labels.formatters.rusentrel import RuSentRelExperimentLabelsFormatter
from arekit.contrib.source.rusentrel.io_utils import RuSentRelVersions
from arekit.contrib.source.rusentrel.opinions.collection import RuSentRelOpinionCollection
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class RuSentrelOpinionOperations(OpinionOperations):
def __init__(self, experiment_data, experiment_io, get_synonyms_func, version):
assert(isinstance(experiment_data, DataIO))
assert(isinstance(version, RuSentRelVersions))
super(RuSentrelOpinionOperations, self).__init__()
self.__get_synonyms_func = get_synonyms_func
self.__version = version
self.__experiment_io = experiment_io
self.__result_labels_fmt = RuSentRelExperimentLabelsFormatter()
self.__neutral_labels_fmt = ExperimentNeutralLabelsFormatter()
@property
def LabelsFormatter(self):
return self.__neutral_labels_fmt
# region CVBasedOperations
def iter_opinions_for_extraction(self, doc_id, data_type):
collections = []
# Reading automatically annotated collection of neutral opinions.
auto_neutral = self.__experiment_io.read_opinion_collection(
target=self.__experiment_io.create_result_opinion_collection_target(
doc_id=doc_id,
data_type=data_type,
check_existance=True),
labels_formatter=self.__neutral_labels_fmt,
create_collection_func=self.__create_collection)
if data_type == DataType.Train:
# Providing neutral and sentiment.
if auto_neutral is not None:
collections.append(auto_neutral)
# Providing sentiment opinions.
etalon = self.get_etalon_opinion_collection(doc_id=doc_id)
collections.append(etalon)
elif data_type == DataType.Test:
# Providing neutrally labeled only
collections.append(auto_neutral)
for collection in collections:
for opinion in collection:
yield opinion
def get_etalon_opinion_collection(self, doc_id):
assert(isinstance(doc_id, int))
opins_iter = RuSentRelOpinionCollection.iter_opinions_from_doc(
doc_id=doc_id,
labels_fmt=self.__result_labels_fmt,
version=self.__version)
return self.__create_collection(opins_iter)
def create_opinion_collection(self, opinions):
return self.__create_collection(opinions)
def get_result_opinion_collection(self, doc_id, data_type, epoch_index):
""" Since evaluation supported only for neural networks,
we need to guarantee the presence of a function that returns filepath
by using isinstance command.
"""
assert(isinstance(self.__experiment_io, BaseIOUtils))
return self.__experiment_io.read_opinion_collection(
target=self.__experiment_io.create_result_opinion_collection_target(
doc_id=doc_id,
data_type=data_type,
epoch_index=epoch_index),
labels_formatter=self.__result_labels_fmt,
create_collection_func=lambda opinions: self.__create_collection(opinions))
# endregion
# region private provider methods
def __create_collection(self, opinions=None):
return OpinionCollection(opinions=[] if opinions is None else opinions,
synonyms=self.__get_synonyms_func(),
error_on_duplicates=True,
error_on_synonym_end_missed=True)
# endregion | en | 0.783107 | # region CVBasedOperations # Reading automatically annotated collection of neutral opinions. # Providing neutral and sentiment. # Providing sentiment opinions. # Providing neutrally labeled only Since evaluation supported only for neural networks, we need to guarantee the presence of a function that returns filepath by using isinstance command. # endregion # region private provider methods # endregion | 1.717668 | 2 |
root/apps/portfolio/admin.py | auzigog/jbrinkerhoff.com | 1 | 6619408 | <filename>root/apps/portfolio/admin.py
from django.contrib import admin
from portfolio import models
admin.site.register(models.TextSnippet)
admin.site.register(models.Quote) | <filename>root/apps/portfolio/admin.py
from django.contrib import admin
from portfolio import models
admin.site.register(models.TextSnippet)
admin.site.register(models.Quote) | none | 1 | 1.285987 | 1 | |
nano/nano/doctype/commission_payment/commission_payment.py | erpcloudsystems/nano | 0 | 6619409 | <reponame>erpcloudsystems/nano
# Copyright (c) 2021, ERP Cloud Systems and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, erpnext, json
from frappe.utils import cstr, flt, fmt_money, formatdate, getdate, nowdate, cint, get_link_to_form
from frappe import msgprint, _, scrub
from erpnext.controllers.accounts_controller import AccountsController
from dateutil.relativedelta import relativedelta
from erpnext.accounts.utils import get_balance_on, get_stock_accounts, get_stock_and_account_balance, \
get_account_currency, check_if_stock_and_account_balance_synced
from erpnext.accounts.party import get_party_account
from erpnext.hr.doctype.expense_claim.expense_claim import update_reimbursed_amount
from erpnext.accounts.doctype.invoice_discounting.invoice_discounting \
import get_party_account_based_on_invoice_discounting
from erpnext.accounts.deferred_revenue import get_deferred_booking_accounts
from frappe.model.document import Document
from six import string_types, iteritems
class CommissionPayment(Document):
pass
def validate(self):
if self.total_payable ==0:
self.get_details()
def on_submit(self):
if self.pay_to == "Sales Partner":
self.update_invoice_partner1()
self.make_jv_partner()
elif self.pay_to == "Sales Manager":
self.update_invoice_manager1()
self.make_jv_manager()
def on_cancel(self):
if self.pay_to == "Sales Partner":
self.update_invoice_partner0()
elif self.pay_to == "Sales Manager":
self.update_invoice_manager0()
@frappe.whitelist()
def get_details(self):
if self.pay_to =="Sales Partner":
invoices =frappe.db.sql(""" select name as name ,
customer as customer,
posting_date as posting_date,
net_total as net_total,
outstanding_amount as outstanding,
sales_partner_commission as commissions
from `tabSales Invoice`
where
docstatus = 1
and paid = 0
and sales_partner = %s
and outstanding_amount = 0
and posting_date > '2019-12-31'
and sales_partner_commission != 0""", self.sales_partner, as_dict=True)
for comm in invoices:
row = self.append('commission_details', {})
row.sales_invoice = comm.name
row.customer = comm.customer
row.posting_date = comm.posting_date
row.net_total = comm.net_total
row.outstanding = comm.outstanding
row.commissions = comm.commissions
elif self.pay_to =="Sales Manager":
invoices = frappe.db.sql(""" select name as name ,
customer as customer,
posting_date as posting_date,
net_total as net_total,
outstanding_amount as outstanding,
sales_manager_commission as commissions
from `tabSales Invoice`
where
docstatus=1
and paid2 =0
and sales_manager = %s
and outstanding_amount = 0
and posting_date > '2020-12-31'
and sales_manager_commission != 0""", self.sales_manager, as_dict=True)
for comm in invoices:
row = self.append('commission_details', {})
row.sales_invoice = comm.name
row.customer = comm.customer
row.posting_date = comm.posting_date
row.net_total = comm.net_total
row.outstanding = comm.outstanding
row.commissions = comm.commissions
def update_invoice_partner1(self):
for inv in self.commission_details:
frappe.db.sql(""" update `tabSales Invoice` set paid = 1 where name = %s """,inv.sales_invoice)
def update_invoice_partner0(self):
for inv in self.commission_details:
frappe.db.sql(""" update `tabSales Invoice` set paid = 0 where name = %s """,inv.sales_invoice)
def update_invoice_manager1(self):
for inv in self.commission_details:
frappe.db.sql(""" update `tabSales Invoice` set paid2 = 1 where name = %s """,inv.sales_invoice)
def update_invoice_manager0(self):
for inv in self.commission_details:
frappe.db.sql(""" update `tabSales Invoice` set paid2 = 0 where name = %s """,inv.sales_invoice)
@frappe.whitelist()
def make_jv_manager(self):
company = frappe.db.get_value("Company", frappe.db.get_value("Global Defaults", None, "default_company"),"company_name")
accounts = [
{
"account": self.sales_manager_account,
"debit_in_account_currency": self.total_payable,
"exchange_rate": "1"
},
{
"account": self.payment_account,
"credit_in_account_currency": self.total_payable,
"exchange_rate": "1"
}
]
doc = frappe.get_doc({
"doctype": "Journal Entry",
"voucher_type": "Journal Entry",
"commission_payment": self.name,
"company": company,
"posting_date": self.posting_date,
"accounts": accounts,
"cheque_no": self.name,
"cheque_date": self.posting_date,
"user_remark": _('Accrual Journal Entry for Sales Commission for {0}').format(self.sales_partner),
"total_debit": self.total_payable,
"total_credit": self.total_payable,
"remark": _('Accrual Journal Entry for Sales Commission for {0}').format(self.sales_partner)
})
doc.insert()
doc.submit()
def make_jv_partner(self):
company = frappe.db.get_value("Company", frappe.db.get_value("Global Defaults", None, "default_company"),"company_name")
accounts = [
{
"account": self.sales_partner_account,
"debit_in_account_currency": self.total_payable,
"exchange_rate": "1"
},
{
"account": self.payment_account,
"credit_in_account_currency": self.total_payable,
"exchange_rate": "1"
}
]
doc = frappe.get_doc({
"doctype": "Journal Entry",
"voucher_type": "Journal Entry",
"commission_payment": self.name,
"company": company,
"posting_date": self.posting_date,
"accounts": accounts,
"cheque_no": self.name,
"cheque_date": self.posting_date,
"user_remark": _('Accrual Journal Entry for Sales Commission for {0}').format(self.sales_partner),
"total_debit": self.total_payable,
"total_credit": self.total_payable,
"remark": _('Accrual Journal Entry for Sales Commission for {0}').format(self.sales_partner)
})
doc.insert()
doc.submit()
| # Copyright (c) 2021, ERP Cloud Systems and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, erpnext, json
from frappe.utils import cstr, flt, fmt_money, formatdate, getdate, nowdate, cint, get_link_to_form
from frappe import msgprint, _, scrub
from erpnext.controllers.accounts_controller import AccountsController
from dateutil.relativedelta import relativedelta
from erpnext.accounts.utils import get_balance_on, get_stock_accounts, get_stock_and_account_balance, \
get_account_currency, check_if_stock_and_account_balance_synced
from erpnext.accounts.party import get_party_account
from erpnext.hr.doctype.expense_claim.expense_claim import update_reimbursed_amount
from erpnext.accounts.doctype.invoice_discounting.invoice_discounting \
import get_party_account_based_on_invoice_discounting
from erpnext.accounts.deferred_revenue import get_deferred_booking_accounts
from frappe.model.document import Document
from six import string_types, iteritems
class CommissionPayment(Document):
pass
def validate(self):
if self.total_payable ==0:
self.get_details()
def on_submit(self):
if self.pay_to == "Sales Partner":
self.update_invoice_partner1()
self.make_jv_partner()
elif self.pay_to == "Sales Manager":
self.update_invoice_manager1()
self.make_jv_manager()
def on_cancel(self):
if self.pay_to == "Sales Partner":
self.update_invoice_partner0()
elif self.pay_to == "Sales Manager":
self.update_invoice_manager0()
@frappe.whitelist()
def get_details(self):
if self.pay_to =="Sales Partner":
invoices =frappe.db.sql(""" select name as name ,
customer as customer,
posting_date as posting_date,
net_total as net_total,
outstanding_amount as outstanding,
sales_partner_commission as commissions
from `tabSales Invoice`
where
docstatus = 1
and paid = 0
and sales_partner = %s
and outstanding_amount = 0
and posting_date > '2019-12-31'
and sales_partner_commission != 0""", self.sales_partner, as_dict=True)
for comm in invoices:
row = self.append('commission_details', {})
row.sales_invoice = comm.name
row.customer = comm.customer
row.posting_date = comm.posting_date
row.net_total = comm.net_total
row.outstanding = comm.outstanding
row.commissions = comm.commissions
elif self.pay_to =="Sales Manager":
invoices = frappe.db.sql(""" select name as name ,
customer as customer,
posting_date as posting_date,
net_total as net_total,
outstanding_amount as outstanding,
sales_manager_commission as commissions
from `tabSales Invoice`
where
docstatus=1
and paid2 =0
and sales_manager = %s
and outstanding_amount = 0
and posting_date > '2020-12-31'
and sales_manager_commission != 0""", self.sales_manager, as_dict=True)
for comm in invoices:
row = self.append('commission_details', {})
row.sales_invoice = comm.name
row.customer = comm.customer
row.posting_date = comm.posting_date
row.net_total = comm.net_total
row.outstanding = comm.outstanding
row.commissions = comm.commissions
def update_invoice_partner1(self):
for inv in self.commission_details:
frappe.db.sql(""" update `tabSales Invoice` set paid = 1 where name = %s """,inv.sales_invoice)
def update_invoice_partner0(self):
for inv in self.commission_details:
frappe.db.sql(""" update `tabSales Invoice` set paid = 0 where name = %s """,inv.sales_invoice)
def update_invoice_manager1(self):
for inv in self.commission_details:
frappe.db.sql(""" update `tabSales Invoice` set paid2 = 1 where name = %s """,inv.sales_invoice)
def update_invoice_manager0(self):
for inv in self.commission_details:
frappe.db.sql(""" update `tabSales Invoice` set paid2 = 0 where name = %s """,inv.sales_invoice)
@frappe.whitelist()
def make_jv_manager(self):
company = frappe.db.get_value("Company", frappe.db.get_value("Global Defaults", None, "default_company"),"company_name")
accounts = [
{
"account": self.sales_manager_account,
"debit_in_account_currency": self.total_payable,
"exchange_rate": "1"
},
{
"account": self.payment_account,
"credit_in_account_currency": self.total_payable,
"exchange_rate": "1"
}
]
doc = frappe.get_doc({
"doctype": "Journal Entry",
"voucher_type": "Journal Entry",
"commission_payment": self.name,
"company": company,
"posting_date": self.posting_date,
"accounts": accounts,
"cheque_no": self.name,
"cheque_date": self.posting_date,
"user_remark": _('Accrual Journal Entry for Sales Commission for {0}').format(self.sales_partner),
"total_debit": self.total_payable,
"total_credit": self.total_payable,
"remark": _('Accrual Journal Entry for Sales Commission for {0}').format(self.sales_partner)
})
doc.insert()
doc.submit()
def make_jv_partner(self):
company = frappe.db.get_value("Company", frappe.db.get_value("Global Defaults", None, "default_company"),"company_name")
accounts = [
{
"account": self.sales_partner_account,
"debit_in_account_currency": self.total_payable,
"exchange_rate": "1"
},
{
"account": self.payment_account,
"credit_in_account_currency": self.total_payable,
"exchange_rate": "1"
}
]
doc = frappe.get_doc({
"doctype": "Journal Entry",
"voucher_type": "Journal Entry",
"commission_payment": self.name,
"company": company,
"posting_date": self.posting_date,
"accounts": accounts,
"cheque_no": self.name,
"cheque_date": self.posting_date,
"user_remark": _('Accrual Journal Entry for Sales Commission for {0}').format(self.sales_partner),
"total_debit": self.total_payable,
"total_credit": self.total_payable,
"remark": _('Accrual Journal Entry for Sales Commission for {0}').format(self.sales_partner)
})
doc.insert()
doc.submit() | en | 0.903011 | # Copyright (c) 2021, ERP Cloud Systems and contributors # For license information, please see license.txt select name as name , customer as customer, posting_date as posting_date, net_total as net_total, outstanding_amount as outstanding, sales_partner_commission as commissions from `tabSales Invoice` where docstatus = 1 and paid = 0 and sales_partner = %s and outstanding_amount = 0 and posting_date > '2019-12-31' and sales_partner_commission != 0 select name as name , customer as customer, posting_date as posting_date, net_total as net_total, outstanding_amount as outstanding, sales_manager_commission as commissions from `tabSales Invoice` where docstatus=1 and paid2 =0 and sales_manager = %s and outstanding_amount = 0 and posting_date > '2020-12-31' and sales_manager_commission != 0 update `tabSales Invoice` set paid = 1 where name = %s update `tabSales Invoice` set paid = 0 where name = %s update `tabSales Invoice` set paid2 = 1 where name = %s update `tabSales Invoice` set paid2 = 0 where name = %s | 1.810229 | 2 |
mono/api/mono_user.py | iameo/monopy | 2 | 6619410 | <reponame>iameo/monopy
from .base_api import BaseAPI
class UserMono(BaseAPI):
def transaction(self, **kwargs):
"""
Fetch all transactions done by Account ID.
params:
- id: Account ID
- start: start period of the transactions eg. 01-10-2020
- end: end period of the transactions eg. 07-10-2020
- narrationstringfilters all transactions by narration e.g Uber transactions
- type: filters transactions by debit or credit
- paginate: true or false (If you want to receive the data all at once or you want it paginated)
- limit: limit the number of transactions returned per API call
"""
id = kwargs.pop('id')
url = self._BASE_URL + f'/accounts/{id}/transactions'
status, response = self._make_request('GET', url, params=kwargs)
return status, response
def income(self, **kwargs):
'''
This resource will return income information on the account. (Beta)
params:
- id: Account ID returned from token exchange
response:
Type: INCOME (Regular income) or AVG_INCOME (Irregular income)
Amount: The monthly salary/income
Confidence: Confidence value in the predicted income
'''
id = kwargs.pop('id')
url = self._BASE_URL + f'accounts/{id}/income'
status, response = self._make_request('GET', url)
return status, response
def identity(self, **kwargs):
'''
This resource provides a high level overview of an account identity data.
params:
- id: Account id from token exchange
note:
Not all banks will return the identity information. See here https://docs.mono.co/docs/bvn-coverage
'''
id = kwargs.pop('id')
url = self._BASE_URL + f'/accounts/{id}/identity'
status, response = self._make_request('GET', url)
return status, response
| from .base_api import BaseAPI
class UserMono(BaseAPI):
def transaction(self, **kwargs):
"""
Fetch all transactions done by Account ID.
params:
- id: Account ID
- start: start period of the transactions eg. 01-10-2020
- end: end period of the transactions eg. 07-10-2020
- narrationstringfilters all transactions by narration e.g Uber transactions
- type: filters transactions by debit or credit
- paginate: true or false (If you want to receive the data all at once or you want it paginated)
- limit: limit the number of transactions returned per API call
"""
id = kwargs.pop('id')
url = self._BASE_URL + f'/accounts/{id}/transactions'
status, response = self._make_request('GET', url, params=kwargs)
return status, response
def income(self, **kwargs):
'''
This resource will return income information on the account. (Beta)
params:
- id: Account ID returned from token exchange
response:
Type: INCOME (Regular income) or AVG_INCOME (Irregular income)
Amount: The monthly salary/income
Confidence: Confidence value in the predicted income
'''
id = kwargs.pop('id')
url = self._BASE_URL + f'accounts/{id}/income'
status, response = self._make_request('GET', url)
return status, response
def identity(self, **kwargs):
'''
This resource provides a high level overview of an account identity data.
params:
- id: Account id from token exchange
note:
Not all banks will return the identity information. See here https://docs.mono.co/docs/bvn-coverage
'''
id = kwargs.pop('id')
url = self._BASE_URL + f'/accounts/{id}/identity'
status, response = self._make_request('GET', url)
return status, response | en | 0.71859 | Fetch all transactions done by Account ID. params: - id: Account ID - start: start period of the transactions eg. 01-10-2020 - end: end period of the transactions eg. 07-10-2020 - narrationstringfilters all transactions by narration e.g Uber transactions - type: filters transactions by debit or credit - paginate: true or false (If you want to receive the data all at once or you want it paginated) - limit: limit the number of transactions returned per API call This resource will return income information on the account. (Beta) params: - id: Account ID returned from token exchange response: Type: INCOME (Regular income) or AVG_INCOME (Irregular income) Amount: The monthly salary/income Confidence: Confidence value in the predicted income This resource provides a high level overview of an account identity data. params: - id: Account id from token exchange note: Not all banks will return the identity information. See here https://docs.mono.co/docs/bvn-coverage | 2.710573 | 3 |
archive/LearnTK/activityIndicatorProgressbar.py | UpBeatMan/Abschlussarbeit | 0 | 6619411 | from tkinter.ttk import Progressbar, Style
from tkinter import Tk, Label
from time import sleep
class LoadingSplash:
def __init__(self):
# setting root window:
self.root = Tk()
self.root.title("Progressbar")
self.root.config() # bg="#1F2732"
# self.root.attributes("-fullscreen", True)
self.root.geometry("560x380+300+150")
# progressbar theme:
theme = Style()
theme.theme_use("winnative")
theme.configure("green.Horizontal.TProgressbar", background="green")
# loading text:
txt = Label(self.root, text="Loading...", fg="green") # bg="#1F2732"
txt.place(x=200, y=140)
# txt.place(x=520, y=330)
# progressbar:
self.bar = Progressbar(
self.root,
style="green.Horizontal.TProgressbar",
orient="horizontal",
mode="indeterminate",
length="180",
)
self.bar.place(x=200, y=170)
# self.bar.place(x=500, y=360)
# update root to see animation:
self.root.update()
self.play_animation()
# window in mainloop:
self.root.mainloop()
# progressbar animation:
def play_animation(self):
for i in range(2000):
self.bar["value"] += 1
self.root.update_idletasks()
sleep(0.01)
else:
self.root.destroy()
exit(0)
if __name__ == "__main__":
LoadingSplash()
| from tkinter.ttk import Progressbar, Style
from tkinter import Tk, Label
from time import sleep
class LoadingSplash:
def __init__(self):
# setting root window:
self.root = Tk()
self.root.title("Progressbar")
self.root.config() # bg="#1F2732"
# self.root.attributes("-fullscreen", True)
self.root.geometry("560x380+300+150")
# progressbar theme:
theme = Style()
theme.theme_use("winnative")
theme.configure("green.Horizontal.TProgressbar", background="green")
# loading text:
txt = Label(self.root, text="Loading...", fg="green") # bg="#1F2732"
txt.place(x=200, y=140)
# txt.place(x=520, y=330)
# progressbar:
self.bar = Progressbar(
self.root,
style="green.Horizontal.TProgressbar",
orient="horizontal",
mode="indeterminate",
length="180",
)
self.bar.place(x=200, y=170)
# self.bar.place(x=500, y=360)
# update root to see animation:
self.root.update()
self.play_animation()
# window in mainloop:
self.root.mainloop()
# progressbar animation:
def play_animation(self):
for i in range(2000):
self.bar["value"] += 1
self.root.update_idletasks()
sleep(0.01)
else:
self.root.destroy()
exit(0)
if __name__ == "__main__":
LoadingSplash()
| en | 0.527983 | # setting root window: # bg="#1F2732" # self.root.attributes("-fullscreen", True) # progressbar theme: # loading text: # bg="#1F2732" # txt.place(x=520, y=330) # progressbar: # self.bar.place(x=500, y=360) # update root to see animation: # window in mainloop: # progressbar animation: | 3.505867 | 4 |
cloudscale/lib/region.py | resmo/python-cloudscale | 6 | 6619412 | from . import CloudscaleBase
class Region(CloudscaleBase):
def __init__(self):
super().__init__()
self.resource = 'regions'
| from . import CloudscaleBase
class Region(CloudscaleBase):
def __init__(self):
super().__init__()
self.resource = 'regions'
| none | 1 | 1.692103 | 2 | |
regexs.py | movy-niaj/python-regex | 0 | 6619413 | <gh_stars>0
from colorama import Fore, Style, Back
import os
def regex():
op = input('If you want to attach a file,type "file.open", else press Enter: ')
if op == "file.open":
path=input('Enter file path:')
fl = open(path,"r+")
fl.seek(0,os.SEEK_SET)
l=fl.read()
print("\nString read from the file:")
print(l)
k = input('Enter key:')
p=input('If you want to replace the key in text with something else, press 0:\n')
if p =='0':
r=input('Enter replacement character:')
fl.seek(0, os.SEEK_SET)
l2= map(lambda x: r if(x==k) else x,l)
l2=''.join(l2)
fl.write(l2)
print("File has been written.")
print('Original string:')
highlight(k,l,k)
print('Altered string:')
highlight(k,l,r)
else:
highlight(k,l,k)
else:
k = input('Enter key:')
l=input('Enter text:\n')
p=input('If you want to replace the key in text with something else, press 0:\n')
if p =='0':
r=input('replacement character:')
print('Original String:')
highlight(k,l,k)
print('Altered string:')
highlight(k,l,r)
else:
highlight(k,l,k)
def highlight(k,l,r):
if l.startswith(k) == False and l.endswith(k) == False:
p= l.split(k)
s=f'{Back.BLUE}{r}{Style.RESET_ALL}'
print(s.join(p))
elif(l.startswith(k)==False):
p= l[0:-1].split(k)
s=f'{Back.BLUE}{r}{Style.RESET_ALL}'
print(s.join(p)+s)
elif(l.endswith(k)==False):
p= l[1:].split(k)
s=f'{Back.BLUE}{r}{Style.RESET_ALL}'
print(s+s.join(p))
else:
p= l[1:-1].split(k)
s=f'{Back.BLUE}{r}{Style.RESET_ALL}'
print(s+s.join(p)+s)
regex()
| from colorama import Fore, Style, Back
import os
def regex():
op = input('If you want to attach a file,type "file.open", else press Enter: ')
if op == "file.open":
path=input('Enter file path:')
fl = open(path,"r+")
fl.seek(0,os.SEEK_SET)
l=fl.read()
print("\nString read from the file:")
print(l)
k = input('Enter key:')
p=input('If you want to replace the key in text with something else, press 0:\n')
if p =='0':
r=input('Enter replacement character:')
fl.seek(0, os.SEEK_SET)
l2= map(lambda x: r if(x==k) else x,l)
l2=''.join(l2)
fl.write(l2)
print("File has been written.")
print('Original string:')
highlight(k,l,k)
print('Altered string:')
highlight(k,l,r)
else:
highlight(k,l,k)
else:
k = input('Enter key:')
l=input('Enter text:\n')
p=input('If you want to replace the key in text with something else, press 0:\n')
if p =='0':
r=input('replacement character:')
print('Original String:')
highlight(k,l,k)
print('Altered string:')
highlight(k,l,r)
else:
highlight(k,l,k)
def highlight(k,l,r):
if l.startswith(k) == False and l.endswith(k) == False:
p= l.split(k)
s=f'{Back.BLUE}{r}{Style.RESET_ALL}'
print(s.join(p))
elif(l.startswith(k)==False):
p= l[0:-1].split(k)
s=f'{Back.BLUE}{r}{Style.RESET_ALL}'
print(s.join(p)+s)
elif(l.endswith(k)==False):
p= l[1:].split(k)
s=f'{Back.BLUE}{r}{Style.RESET_ALL}'
print(s+s.join(p))
else:
p= l[1:-1].split(k)
s=f'{Back.BLUE}{r}{Style.RESET_ALL}'
print(s+s.join(p)+s)
regex() | none | 1 | 3.468112 | 3 | |
171_excel_sheet_column_number.py | claytonjwong/leetcode-py | 1 | 6619414 | <filename>171_excel_sheet_column_number.py
#
# 171. Excel Sheet Column Number
#
# Q: https://leetcode.com/problems/excel-sheet-column-number/
# A: https://leetcode.com/problems/excel-sheet-column-number/discuss/594372/Javascript-Python3-C%2B%2B-1-Liners
#
class Solution:
def titleToNumber(self, s: str) -> int:
return reduce(lambda a, b: a + b, [26 ** i * (ord(c) - 64) for i, c in enumerate(reversed([c for c in s]))])
| <filename>171_excel_sheet_column_number.py
#
# 171. Excel Sheet Column Number
#
# Q: https://leetcode.com/problems/excel-sheet-column-number/
# A: https://leetcode.com/problems/excel-sheet-column-number/discuss/594372/Javascript-Python3-C%2B%2B-1-Liners
#
class Solution:
def titleToNumber(self, s: str) -> int:
return reduce(lambda a, b: a + b, [26 ** i * (ord(c) - 64) for i, c in enumerate(reversed([c for c in s]))])
| en | 0.512737 | # # 171. Excel Sheet Column Number # # Q: https://leetcode.com/problems/excel-sheet-column-number/ # A: https://leetcode.com/problems/excel-sheet-column-number/discuss/594372/Javascript-Python3-C%2B%2B-1-Liners # | 3.178532 | 3 |
damast/document_fragment.py | UniStuttgart-VISUS/damast | 2 | 6619415 | <reponame>UniStuttgart-VISUS/damast<filename>damast/document_fragment.py<gh_stars>1-10
import html5lib
from xml.dom.minidom import Text
import re
def extract_fragment(content, start, end):
'''
Extract the text between `start` and `end` from the HTML document
`content`, including all tags this range overlaps with, but excluding
others as well as text in those tags not in the range.
'''
document = html5lib.parse(content, treebuilder='dom')
offset, node = _handle(document, document, 0, start, end)
walker = html5lib.getTreeWalker("dom")
stream = walker(node)
s = html5lib.serializer.HTMLSerializer(omit_optional_tags=False)
output = ''.join(s.serialize(stream))
return output
WORD = re.compile('\\b\\w+\\b')
def tokenize_html_document(content):
document = html5lib.parse(content, treebuilder='dom')
_, tokens = _tokenize(document, 0)
return tokens
def tokenize_text_document(content):
tokens = []
for tok in re.finditer(WORD, content):
text = tok.group(0)
start = tok.span()[0] + offset
end = tok.span()[1] + offset
tokens.append((text,start,end))
return tokens
def _tokenize(node, offset):
inner_offset = 0
inner_tokens = []
if type(node) == Text:
for tok in re.finditer(WORD, node.data):
text = tok.group(0)
start = tok.span()[0] + offset
end = tok.span()[1] + offset
inner_tokens.append((text,start,end))
return len(node.data), inner_tokens
else:
for child in node.childNodes:
l,t = _tokenize(child, offset + inner_offset)
inner_offset += l
inner_tokens.extend(t)
return inner_offset, inner_tokens
def _handle(root, node, offset, start, end):
if type(node) == Text:
if offset <= start and offset + len(node.data) <= start:
return len(node.data), None
elif offset <= start and offset + len(node.data) > start:
text = node.data[start-offset:end-offset]
return len(node.data), root.createTextNode(text)
elif offset > start and offset <= end:
text = node.data[:end-offset]
return len(node.data), root.createTextNode(text)
else:
return len(node.data), None
else:
length = 0
startOffset = offset
toRemove = []
for n in node.childNodes:
delta, newNode = _handle(root, n, offset + length, start, end)
length += delta
if newNode is None:
toRemove.append(n)
else:
node.replaceChild(newNode, n)
for r in toRemove:
node.removeChild(r)
endOffset = offset + length
if startOffset > end or endOffset <= start:
return length, None
else:
return length, node
def document_length(content):
'''
Return the character length of an HTML document.
'''
document = html5lib.parse(content, treebuilder='dom')
return _doclen(document)
def _doclen(node):
if type(node) == Text:
return len(node.data)
return sum(map(_doclen, node.childNodes))
def inner_text(content):
'''
Return only concatenated text nodes.
'''
document = html5lib.parse(content, treebuilder='dom')
return _docstr(document)
def _docstr(node):
if type(node) == Text:
return node.data
return ''.join(map(_docstr, node.childNodes))
| import html5lib
from xml.dom.minidom import Text
import re
def extract_fragment(content, start, end):
'''
Extract the text between `start` and `end` from the HTML document
`content`, including all tags this range overlaps with, but excluding
others as well as text in those tags not in the range.
'''
document = html5lib.parse(content, treebuilder='dom')
offset, node = _handle(document, document, 0, start, end)
walker = html5lib.getTreeWalker("dom")
stream = walker(node)
s = html5lib.serializer.HTMLSerializer(omit_optional_tags=False)
output = ''.join(s.serialize(stream))
return output
WORD = re.compile('\\b\\w+\\b')
def tokenize_html_document(content):
document = html5lib.parse(content, treebuilder='dom')
_, tokens = _tokenize(document, 0)
return tokens
def tokenize_text_document(content):
tokens = []
for tok in re.finditer(WORD, content):
text = tok.group(0)
start = tok.span()[0] + offset
end = tok.span()[1] + offset
tokens.append((text,start,end))
return tokens
def _tokenize(node, offset):
inner_offset = 0
inner_tokens = []
if type(node) == Text:
for tok in re.finditer(WORD, node.data):
text = tok.group(0)
start = tok.span()[0] + offset
end = tok.span()[1] + offset
inner_tokens.append((text,start,end))
return len(node.data), inner_tokens
else:
for child in node.childNodes:
l,t = _tokenize(child, offset + inner_offset)
inner_offset += l
inner_tokens.extend(t)
return inner_offset, inner_tokens
def _handle(root, node, offset, start, end):
if type(node) == Text:
if offset <= start and offset + len(node.data) <= start:
return len(node.data), None
elif offset <= start and offset + len(node.data) > start:
text = node.data[start-offset:end-offset]
return len(node.data), root.createTextNode(text)
elif offset > start and offset <= end:
text = node.data[:end-offset]
return len(node.data), root.createTextNode(text)
else:
return len(node.data), None
else:
length = 0
startOffset = offset
toRemove = []
for n in node.childNodes:
delta, newNode = _handle(root, n, offset + length, start, end)
length += delta
if newNode is None:
toRemove.append(n)
else:
node.replaceChild(newNode, n)
for r in toRemove:
node.removeChild(r)
endOffset = offset + length
if startOffset > end or endOffset <= start:
return length, None
else:
return length, node
def document_length(content):
'''
Return the character length of an HTML document.
'''
document = html5lib.parse(content, treebuilder='dom')
return _doclen(document)
def _doclen(node):
if type(node) == Text:
return len(node.data)
return sum(map(_doclen, node.childNodes))
def inner_text(content):
'''
Return only concatenated text nodes.
'''
document = html5lib.parse(content, treebuilder='dom')
return _docstr(document)
def _docstr(node):
if type(node) == Text:
return node.data
return ''.join(map(_docstr, node.childNodes)) | en | 0.921544 | Extract the text between `start` and `end` from the HTML document `content`, including all tags this range overlaps with, but excluding others as well as text in those tags not in the range. Return the character length of an HTML document. Return only concatenated text nodes. | 2.917747 | 3 |
utilities.py | animesh21/covid-vaccine-alerts | 0 | 6619416 | import sqlite3
from datetime import datetime
from constants import DATETIME_FORMAT, DB_NAME
def get_active_users(district_id=None):
"""
Returns list of dict with each dict containing data of an active user
:param district_id: int, district id of the district from which the users have to be returned
:return: list of user dict
"""
# get the db connection
with sqlite3.connect(DB_NAME) as con:
con.row_factory = sqlite3.Row # to get an sql row as a Python dictionary
cur = con.cursor()
if district_id:
sql_query = 'SELECT * FROM users WHERE is_active = ? AND district_id = ?'
active_users = [dict(row) for row in cur.execute(sql_query, (True, district_id))]
else:
sql_query = 'SELECT * FROM users WHERE is_active = ?'
active_users = [dict(row) for row in cur.execute(sql_query, (True, ))]
return active_users
def get_active_district_ids():
"""
Queries unique district ids of active users
:return: list of unique district ids
"""
# get the db connection
with sqlite3.connect(DB_NAME) as con:
con.row_factory = sqlite3.Row # to get an sql row as a Python dictionary
cur = con.cursor()
active_district_ids = [
dict(row)['district_id'] for row in
cur.execute('SELECT DISTINCT district_id FROM users WHERE is_active = ?', (True, )).fetchall()
]
return active_district_ids
def update_last_notified(user_id):
"""
Updates row with id=user_id in users table with current time as last_notified column value
:param user_id: id of the user for which last_notified is to be updated
:return: None
"""
now_str = datetime.now().strftime(DATETIME_FORMAT)
with sqlite3.connect(DB_NAME) as con:
cur = con.cursor()
cur.execute('UPDATE users SET last_notified = ? WHERE id = ?', (now_str, user_id))
| import sqlite3
from datetime import datetime
from constants import DATETIME_FORMAT, DB_NAME
def get_active_users(district_id=None):
"""
Returns list of dict with each dict containing data of an active user
:param district_id: int, district id of the district from which the users have to be returned
:return: list of user dict
"""
# get the db connection
with sqlite3.connect(DB_NAME) as con:
con.row_factory = sqlite3.Row # to get an sql row as a Python dictionary
cur = con.cursor()
if district_id:
sql_query = 'SELECT * FROM users WHERE is_active = ? AND district_id = ?'
active_users = [dict(row) for row in cur.execute(sql_query, (True, district_id))]
else:
sql_query = 'SELECT * FROM users WHERE is_active = ?'
active_users = [dict(row) for row in cur.execute(sql_query, (True, ))]
return active_users
def get_active_district_ids():
"""
Queries unique district ids of active users
:return: list of unique district ids
"""
# get the db connection
with sqlite3.connect(DB_NAME) as con:
con.row_factory = sqlite3.Row # to get an sql row as a Python dictionary
cur = con.cursor()
active_district_ids = [
dict(row)['district_id'] for row in
cur.execute('SELECT DISTINCT district_id FROM users WHERE is_active = ?', (True, )).fetchall()
]
return active_district_ids
def update_last_notified(user_id):
"""
Updates row with id=user_id in users table with current time as last_notified column value
:param user_id: id of the user for which last_notified is to be updated
:return: None
"""
now_str = datetime.now().strftime(DATETIME_FORMAT)
with sqlite3.connect(DB_NAME) as con:
cur = con.cursor()
cur.execute('UPDATE users SET last_notified = ? WHERE id = ?', (now_str, user_id))
| en | 0.8748 | Returns list of dict with each dict containing data of an active user :param district_id: int, district id of the district from which the users have to be returned :return: list of user dict # get the db connection # to get an sql row as a Python dictionary Queries unique district ids of active users :return: list of unique district ids # get the db connection # to get an sql row as a Python dictionary Updates row with id=user_id in users table with current time as last_notified column value :param user_id: id of the user for which last_notified is to be updated :return: None | 3.359689 | 3 |
packaging/github_action_version.py | HEXRD/hexrdgui | 13 | 6619417 | <reponame>HEXRD/hexrdgui<filename>packaging/github_action_version.py
# Script that takes the output of git describe --tag and a version component
# string 'full'|'major'|'minor'|'patch' and append the environment variable to
# the env file to set environment variable for that version component to be
# using within the github action workflow.
import sys
import re
import platform
import os
if len(sys.argv) != 3:
print('Please provide version string and component.')
sys.exit(1)
version = sys.argv[1]
component = sys.argv[2]
version_regex = re.compile(r'v?([0-9]*)\.([0-9]*)\.(.*)')
match = version_regex.match(version)
if match is None:
print('Invalid version string.')
sys.exit(2)
major = match.group(1)
minor = match.group(2)
patch = match.group(3)
version = '%s.%s.%s' % (major, minor, patch)
# Windows only allows 0 to 65534 in version string, we have to parse it further
if platform.system() == 'Windows':
parts = patch.split('-')
# If we are not on a tag
if len(parts) == 3:
patch = parts[0]
build = parts[1]
version = '%s.%s.%s.%s' % (major, minor, patch, build)
# Get the env file
if 'GITHUB_ENV' not in os.environ:
print('GITHUB_ENV not in environment.')
sys.exit(3)
github_env = os.environ['GITHUB_ENV']
with open(github_env, 'a') as fp:
if component == 'full':
fp.write('VERSION=%s\n' % version)
elif component == 'major':
fp.write('VERSION_MAJOR=%s\n' % major)
elif component == 'minor':
fp.write('VERSION_MINOR=%s\n' % minor)
elif component == 'patch':
fp.write('VERSION_PATCH=%s\n' % patch)
else:
print('Invalid version component.')
sys.exit(4)
| # Script that takes the output of git describe --tag and a version component
# string 'full'|'major'|'minor'|'patch' and append the environment variable to
# the env file to set environment variable for that version component to be
# using within the github action workflow.
import sys
import re
import platform
import os
if len(sys.argv) != 3:
print('Please provide version string and component.')
sys.exit(1)
version = sys.argv[1]
component = sys.argv[2]
version_regex = re.compile(r'v?([0-9]*)\.([0-9]*)\.(.*)')
match = version_regex.match(version)
if match is None:
print('Invalid version string.')
sys.exit(2)
major = match.group(1)
minor = match.group(2)
patch = match.group(3)
version = '%s.%s.%s' % (major, minor, patch)
# Windows only allows 0 to 65534 in version string, we have to parse it further
if platform.system() == 'Windows':
parts = patch.split('-')
# If we are not on a tag
if len(parts) == 3:
patch = parts[0]
build = parts[1]
version = '%s.%s.%s.%s' % (major, minor, patch, build)
# Get the env file
if 'GITHUB_ENV' not in os.environ:
print('GITHUB_ENV not in environment.')
sys.exit(3)
github_env = os.environ['GITHUB_ENV']
with open(github_env, 'a') as fp:
if component == 'full':
fp.write('VERSION=%s\n' % version)
elif component == 'major':
fp.write('VERSION_MAJOR=%s\n' % major)
elif component == 'minor':
fp.write('VERSION_MINOR=%s\n' % minor)
elif component == 'patch':
fp.write('VERSION_PATCH=%s\n' % patch)
else:
print('Invalid version component.')
sys.exit(4) | en | 0.796126 | # Script that takes the output of git describe --tag and a version component # string 'full'|'major'|'minor'|'patch' and append the environment variable to # the env file to set environment variable for that version component to be # using within the github action workflow. # Windows only allows 0 to 65534 in version string, we have to parse it further # If we are not on a tag # Get the env file | 2.522769 | 3 |
setup.py | aimms/sphinx-aimms-theme | 0 | 6619418 | <reponame>aimms/sphinx-aimms-theme
from distutils.core import setup
import setuptools
import sys
setup(
name = "sphinx_aimms_theme",
version = '0.1.40',
license = "MIT",
packages= ['sphinx_aimms_theme'],
url = "https://github.com/aimms/sphinx-aimms-theme",
description = 'AIMMS theme for Sphinx',
long_description='Please refer to https://github.com/aimms/sphinx-aimms-theme#readme',
author = "AIM<NAME>",
author_email = "<EMAIL>",
entry_points = {
'sphinx.html_themes': [
'sphinx_aimms_theme = sphinx_aimms_theme',
]
},
install_requires=[
'sphinx',
'sphinx_rtd_theme',
],
package_data={'sphinx_aimms_theme': [
'theme.conf',
'*.html',
'static/aimms_css/*.*',
'static/*.*',
'static/icons/*.*'
]},
include_package_data=True,
)
| from distutils.core import setup
import setuptools
import sys
setup(
name = "sphinx_aimms_theme",
version = '0.1.40',
license = "MIT",
packages= ['sphinx_aimms_theme'],
url = "https://github.com/aimms/sphinx-aimms-theme",
description = 'AIMMS theme for Sphinx',
long_description='Please refer to https://github.com/aimms/sphinx-aimms-theme#readme',
author = "AIM<NAME>",
author_email = "<EMAIL>",
entry_points = {
'sphinx.html_themes': [
'sphinx_aimms_theme = sphinx_aimms_theme',
]
},
install_requires=[
'sphinx',
'sphinx_rtd_theme',
],
package_data={'sphinx_aimms_theme': [
'theme.conf',
'*.html',
'static/aimms_css/*.*',
'static/*.*',
'static/icons/*.*'
]},
include_package_data=True,
) | none | 1 | 1.058692 | 1 | |
mp3_split.py | smutt/mp3_split | 0 | 6619419 | #!/usr/bin/env python
import sys
import signal
import subprocess
import argparse
###########
# GLOBALS #
###########
FFMPEG = "/usr/local/bin/ffmpeg"
###########
# CLASSES #
###########
class Chapter():
def __init__(self, start, end):
self.title = ""
self.start = start
self.end = end
def __repr__(self):
return "<title:" + self.title + " start:" + str(self.start) + " end:" + str(self.end) + ">"
#############
# FUNCTIONS #
#############
# Count how many of char c begin in s before another char
# deprecated
def charCnt(s, c):
if s.find(c) == 0:
return 1 + charCnt(s[1:], c)
else:
return 0
# Takes output from ffmpeg info cmd
# Returns parsed info
def parseInfo(ss):
lines = ss.splitlines()
for ii in xrange(len(lines)):
if lines[ii].find("Input #") == 0:
start = ii
if lines[ii].find("Output #") == 0:
end = ii
lines = lines[start+1:end]
rv = {}
rv["general"] = {}
rv["metadata"] = {}
rv["chapters"] = []
active = ""
for ll in lines:
if ll.find(" Metadata:") == 0:
active = "metadata"
continue
elif ll.find(" Duration:") == 0:
vals = ll.strip().split(",")
rv["general"]["duration"] = vals[0].split("Duration: ")[1]
rv["general"]["start"] = float(vals[1].split("start: ")[1].strip())
rv["general"]["bitrate"] = int(vals[2].strip().split(" ")[1])
continue
elif ll.find(" Chapter #") == 0:
active = "chapters"
vals = ll.split(",")
start = float(vals[0].split("start ")[1].strip())
end = float(vals[1].split("end ")[1].strip())
rv["chapters"].append(Chapter(start, end))
continue
if active == "metadata":
vals = ll.strip().split(":", 1)
rv["metadata"][vals[0].strip()] = vals[1].strip()
elif active == "chapters":
if ll.strip().find("title") == 0:
rv["chapters"][-1].title = ll.split(":", 1)[1].strip()
return rv
# Call ffmpeg binary and returns output
def ff(cmd):
s = FFMPEG + " " + cmd
return str(subprocess.check_output(s.split(), stderr=subprocess.STDOUT))
# Kill ourselves
def euthanize(signal, frame):
print(str(signal) + " exiting")
sys.exit(0)
###################
# BEGIN EXECUTION #
###################
signal.signal(signal.SIGINT, euthanize)
signal.signal(signal.SIGTERM, euthanize)
signal.signal(signal.SIGABRT, euthanize)
signal.signal(signal.SIGALRM, euthanize)
signal.signal(signal.SIGSEGV, euthanize)
signal.signal(signal.SIGHUP, euthanize)
ap = argparse.ArgumentParser(description='Split a large mp3 file into smaller files')
ap.add_argument(nargs=1, dest='infile', type=str, default=None,
help='File to split')
ap.add_argument('-p', '--prefix', default=None, nargs=1, dest='prefix', type=str, required=False,
help='Prefix for output files')
ap.add_argument('-b', '--begin', default=0, nargs=1, dest='pause', type=int, required=False,
help='Begin with a pause for each slice in seconds(not implemented)')
ap.add_argument('-s', '--slice', default=None, nargs=1, dest='slice', type=int, required=False,
help='Size of each slice in minutes(not implemented)')
ap.add_argument('-c', '--chapters', default=False, dest='chapters', action='store_true', required=False,
help='Use chapter breaks if present. Overrides -s if present')
ap.add_argument('-d', '--dump', default=False, dest='dump', action='store_true', required=False,
help='Dump info on mp3 and exit.')
ap.add_argument('-v', '--verbose', default=False, dest='verbose', action='store_true', required=False,
help='Verbose output during processing.')
args = ap.parse_args()
if args.pause:
print("ERROR: -b --begin not yet implemented")
exit(1)
if args.slice:
print("ERROR: -s --slice not yet implemented")
exit(1)
infile = args.infile[0]
# Capture info on our file
info = parseInfo(ff("-i " + infile + " -f null -"))
if args.dump:
print("__general__")
for k,v in info["general"].iteritems():
print(k + " : " + str(v))
print("__metadata__")
for k,v in info["metadata"].iteritems():
print(k + " : " + str(v))
print("__chapters__")
for chap in info["chapters"]:
print(repr(chap))
exit(0)
# Some handy commands
# ffmpeg -i input.ext -c:a copy -ss start_time -t end_time output-ext
# ffmpeg -i in.opus -ss 00:00:30.0 -t 00:03:00 -c copy out.opus
# ffmpeg -loglevel fatal -i test.mp3 -ss 623.907 -to 1187.843 -c:a copy chap3.mp3
if args.chapters and len(info["chapters"]) > 0: # Split by chapters
if args.verbose:
print("Splitting by " + str(len(info["chapters"])) + " chapters")
cnt = 0
for chap in info["chapters"]:
cnt += 1
if args.prefix == None:
outfile = infile.rsplit(".", 1)[0] + "-" + str(cnt).zfill(3) + ".mp3"
else:
outfile = args.prefix[0] + "-" + str(cnt).zfill(3) + ".mp3"
if args.verbose:
print("Extracting chapter " + str(cnt) + " to " + outfile + " at " + str(chap.start))
# This also copies all metadata information into each chapter and sets track number
ff("-loglevel fatal -i " + infile + " -ss " + str(chap.start) + " -to " + str(chap.end) + \
" -metadata track=" + str(cnt).zfill(3) + " -c:a copy " + outfile)
else: # Split by slice size
pass
if args.verbose:
print("Finished\a\a\a\a")
| #!/usr/bin/env python
import sys
import signal
import subprocess
import argparse
###########
# GLOBALS #
###########
FFMPEG = "/usr/local/bin/ffmpeg"
###########
# CLASSES #
###########
class Chapter():
def __init__(self, start, end):
self.title = ""
self.start = start
self.end = end
def __repr__(self):
return "<title:" + self.title + " start:" + str(self.start) + " end:" + str(self.end) + ">"
#############
# FUNCTIONS #
#############
# Count how many of char c begin in s before another char
# deprecated
def charCnt(s, c):
if s.find(c) == 0:
return 1 + charCnt(s[1:], c)
else:
return 0
# Takes output from ffmpeg info cmd
# Returns parsed info
def parseInfo(ss):
lines = ss.splitlines()
for ii in xrange(len(lines)):
if lines[ii].find("Input #") == 0:
start = ii
if lines[ii].find("Output #") == 0:
end = ii
lines = lines[start+1:end]
rv = {}
rv["general"] = {}
rv["metadata"] = {}
rv["chapters"] = []
active = ""
for ll in lines:
if ll.find(" Metadata:") == 0:
active = "metadata"
continue
elif ll.find(" Duration:") == 0:
vals = ll.strip().split(",")
rv["general"]["duration"] = vals[0].split("Duration: ")[1]
rv["general"]["start"] = float(vals[1].split("start: ")[1].strip())
rv["general"]["bitrate"] = int(vals[2].strip().split(" ")[1])
continue
elif ll.find(" Chapter #") == 0:
active = "chapters"
vals = ll.split(",")
start = float(vals[0].split("start ")[1].strip())
end = float(vals[1].split("end ")[1].strip())
rv["chapters"].append(Chapter(start, end))
continue
if active == "metadata":
vals = ll.strip().split(":", 1)
rv["metadata"][vals[0].strip()] = vals[1].strip()
elif active == "chapters":
if ll.strip().find("title") == 0:
rv["chapters"][-1].title = ll.split(":", 1)[1].strip()
return rv
# Call ffmpeg binary and returns output
def ff(cmd):
s = FFMPEG + " " + cmd
return str(subprocess.check_output(s.split(), stderr=subprocess.STDOUT))
# Kill ourselves
def euthanize(signal, frame):
print(str(signal) + " exiting")
sys.exit(0)
###################
# BEGIN EXECUTION #
###################
signal.signal(signal.SIGINT, euthanize)
signal.signal(signal.SIGTERM, euthanize)
signal.signal(signal.SIGABRT, euthanize)
signal.signal(signal.SIGALRM, euthanize)
signal.signal(signal.SIGSEGV, euthanize)
signal.signal(signal.SIGHUP, euthanize)
ap = argparse.ArgumentParser(description='Split a large mp3 file into smaller files')
ap.add_argument(nargs=1, dest='infile', type=str, default=None,
help='File to split')
ap.add_argument('-p', '--prefix', default=None, nargs=1, dest='prefix', type=str, required=False,
help='Prefix for output files')
ap.add_argument('-b', '--begin', default=0, nargs=1, dest='pause', type=int, required=False,
help='Begin with a pause for each slice in seconds(not implemented)')
ap.add_argument('-s', '--slice', default=None, nargs=1, dest='slice', type=int, required=False,
help='Size of each slice in minutes(not implemented)')
ap.add_argument('-c', '--chapters', default=False, dest='chapters', action='store_true', required=False,
help='Use chapter breaks if present. Overrides -s if present')
ap.add_argument('-d', '--dump', default=False, dest='dump', action='store_true', required=False,
help='Dump info on mp3 and exit.')
ap.add_argument('-v', '--verbose', default=False, dest='verbose', action='store_true', required=False,
help='Verbose output during processing.')
args = ap.parse_args()
if args.pause:
print("ERROR: -b --begin not yet implemented")
exit(1)
if args.slice:
print("ERROR: -s --slice not yet implemented")
exit(1)
infile = args.infile[0]
# Capture info on our file
info = parseInfo(ff("-i " + infile + " -f null -"))
if args.dump:
print("__general__")
for k,v in info["general"].iteritems():
print(k + " : " + str(v))
print("__metadata__")
for k,v in info["metadata"].iteritems():
print(k + " : " + str(v))
print("__chapters__")
for chap in info["chapters"]:
print(repr(chap))
exit(0)
# Some handy commands
# ffmpeg -i input.ext -c:a copy -ss start_time -t end_time output-ext
# ffmpeg -i in.opus -ss 00:00:30.0 -t 00:03:00 -c copy out.opus
# ffmpeg -loglevel fatal -i test.mp3 -ss 623.907 -to 1187.843 -c:a copy chap3.mp3
if args.chapters and len(info["chapters"]) > 0: # Split by chapters
if args.verbose:
print("Splitting by " + str(len(info["chapters"])) + " chapters")
cnt = 0
for chap in info["chapters"]:
cnt += 1
if args.prefix == None:
outfile = infile.rsplit(".", 1)[0] + "-" + str(cnt).zfill(3) + ".mp3"
else:
outfile = args.prefix[0] + "-" + str(cnt).zfill(3) + ".mp3"
if args.verbose:
print("Extracting chapter " + str(cnt) + " to " + outfile + " at " + str(chap.start))
# This also copies all metadata information into each chapter and sets track number
ff("-loglevel fatal -i " + infile + " -ss " + str(chap.start) + " -to " + str(chap.end) + \
" -metadata track=" + str(cnt).zfill(3) + " -c:a copy " + outfile)
else: # Split by slice size
pass
if args.verbose:
print("Finished\a\a\a\a")
| en | 0.393895 | #!/usr/bin/env python ########### # GLOBALS # ########### ########### # CLASSES # ########### ############# # FUNCTIONS # ############# # Count how many of char c begin in s before another char # deprecated # Takes output from ffmpeg info cmd # Returns parsed info #") == 0: #") == 0: #") == 0: # Call ffmpeg binary and returns output # Kill ourselves ################### # BEGIN EXECUTION # ################### # Capture info on our file # Some handy commands # ffmpeg -i input.ext -c:a copy -ss start_time -t end_time output-ext # ffmpeg -i in.opus -ss 00:00:30.0 -t 00:03:00 -c copy out.opus # ffmpeg -loglevel fatal -i test.mp3 -ss 623.907 -to 1187.843 -c:a copy chap3.mp3 # Split by chapters # This also copies all metadata information into each chapter and sets track number # Split by slice size | 2.879178 | 3 |
oo/pessoa.py | gitrodrigo/pythonbirds | 0 | 6619420 | <filename>oo/pessoa.py
class Pessoa:
olhos = 2
def __init__(self, *filhos, nome=None, idade=34):
self.idade = idade
self.nome = nome
self.filhos = list(filhos)
def cumprimentar(self):
return f'Olá {id(self)}'
if __name__ == '__main__':
joaquim = Pessoa(nome='Joaquim')
vicente = Pessoa(nome='Vicente')
rodrigo = Pessoa(joaquim,vicente, nome='Rodrigo')
print(Pessoa.cumprimentar(rodrigo))
print(id(rodrigo))
print(rodrigo.cumprimentar())
print(rodrigo.nome)
print(rodrigo.idade)
for filho in rodrigo.filhos:
print(f'{filho.nome} é filho de {rodrigo.nome}')
rodrigo.sobrenome = 'Pimentel'
del rodrigo.filhos
rodrigo.olhos = 1
del rodrigo.olhos
print(rodrigo.__dict__)
print(joaquim.__dict__)
print(vicente.__dict__)
Pessoa.olhos = 3
print(Pessoa.olhos)
print(rodrigo.olhos)
print(vicente.olhos)
print(id(Pessoa.olhos),id(joaquim.olhos), id(rodrigo.olhos))
| <filename>oo/pessoa.py
class Pessoa:
olhos = 2
def __init__(self, *filhos, nome=None, idade=34):
self.idade = idade
self.nome = nome
self.filhos = list(filhos)
def cumprimentar(self):
return f'Olá {id(self)}'
if __name__ == '__main__':
joaquim = Pessoa(nome='Joaquim')
vicente = Pessoa(nome='Vicente')
rodrigo = Pessoa(joaquim,vicente, nome='Rodrigo')
print(Pessoa.cumprimentar(rodrigo))
print(id(rodrigo))
print(rodrigo.cumprimentar())
print(rodrigo.nome)
print(rodrigo.idade)
for filho in rodrigo.filhos:
print(f'{filho.nome} é filho de {rodrigo.nome}')
rodrigo.sobrenome = 'Pimentel'
del rodrigo.filhos
rodrigo.olhos = 1
del rodrigo.olhos
print(rodrigo.__dict__)
print(joaquim.__dict__)
print(vicente.__dict__)
Pessoa.olhos = 3
print(Pessoa.olhos)
print(rodrigo.olhos)
print(vicente.olhos)
print(id(Pessoa.olhos),id(joaquim.olhos), id(rodrigo.olhos))
| none | 1 | 3.79537 | 4 | |
sopel_modules/quiz/quiz.py | sharktamer/sopel-quiz | 1 | 6619421 | #! /usr/bin/env python
import requests
from sopel.module import commands, rule
from sopel.config.types import (StaticSection, ValidatedAttribute,
ChoiceAttribute, ListAttribute)
from sopel.db import SopelDB
from sopel.formatting import colors, color
import re
from threading import Timer
from time import sleep
class QuizSection(StaticSection):
win_method = ChoiceAttribute('win_method', ['points', 'score'],
default='points')
points_to_win = ValidatedAttribute('points_to_win', int, default=10)
score_to_win = ValidatedAttribute('score_to_win', int, default=7000)
db_users = ListAttribute('db_users')
def setup(bot):
bot.config.define_section('quiz', QuizSection)
bot.memory['quiz'] = None
def configure(config):
config.define_section('quiz', QuizSection, validate=False)
config.quiz.configure_setting('win_method', 'Win by points or score?')
if config.quiz.win_method == 'points':
config.quiz.configure_setting('points_to_win',
'How many points are needed to win?')
else:
config.quiz.configure_setting('score_to_win',
'What score is needed to win?')
config.quiz.configure_setting('db_users',
'Which users can start tracked quizzes?')
def shutdown(bot):
if bot.memory.contains('qtimer'):
bot.memory['qtimer'].cancel()
class Question():
def __init__(self):
r = requests.get('http://jservice.io/api/random')
q_json = r.json()[0]
self.question = q_json['question'].strip()
self.answer = self.strip_answer(q_json['answer'])
self.checked_answer = self.parse_answer(self.answer)
self.category = q_json['category']['title']
self.value = q_json['value'] or 100
self.answered = False
r.close()
def get_question(self):
q, c, v = self.question, self.category, self.value
return '{} ({}) [{}]'.format(q, c, v)
def strip_answer(self, answer):
# strip any crap that should never be printed
# - html tags
# - \'
answer = re.sub(r'\<.*?\>|\\(?=\')', '', answer)
return answer
def parse_answer(self, answer):
# strip extraneous characters, making the question easier to answer
# - a, an and the from the beginning
# - quotes
# - parenthesised sections
answer = re.sub(r'^"?(the|a|an) |"| ?\(.*\) ?|s$|', '', answer,
flags=re.I)
answer = re.sub(r'&', 'and', answer)
return answer.lower()
def attempt(self, attempt):
return (attempt is not None and self.checked_answer in attempt.lower())
class Quiz():
def __init__(self, starter):
self.scores = {}
self.qno = 0
self.next_question()
self.starter = starter
def get_question(self):
return 'Question {}: {}'.format(self.qno, self.question.get_question())
def award_user(self, user, count):
if user not in self.scores:
self.scores[user] = count
else:
self.scores[user] += count
def next_question(self):
self.qno += 1
self.question = Question()
def get_scores(self):
return self.scores
@commands('quiz')
def quiz(bot, trigger):
if bot.memory['quiz']:
bot.say('Quiz is already running')
return
bot.say('Quiz started by {}'.format(trigger.nick))
if bot.config.quiz.win_method == 'points':
win_value = bot.config.quiz.points_to_win
bot.say('First to answer {} questions wins!'.format(win_value))
else:
win_value = bot.config.quiz.score_to_win
bot.say('First to {} points wins!'.format(win_value))
bot.memory['quiz'] = Quiz(trigger.nick)
bot.say(bot.memory['quiz'].get_question())
bot.memory['qtimer'] = Timer(30, qtimeout, args=[bot])
bot.memory['qtimer'].start()
@commands('qstop')
def qstop(bot, trigger):
if not bot.memory['quiz']:
bot.say('No quiz running!')
return
bot.say('Quiz stopped by {}'.format(trigger.nick))
bot.memory['quiz'] = None
bot.memory['qtimer'].cancel()
@commands('qscores')
def qscores(bot, trigger=None):
if not bot.memory['quiz']:
bot.say('No quiz running!')
return
if not bot.memory['quiz'].get_scores():
bot.say('No one has scored any points yet!')
return
scores = sorted(bot.memory['quiz'].get_scores().items(),
key=lambda x: x[1], reverse=True)
bot.say('Current scores:')
for quizzer, score in scores:
score = int(score)
bot.say('{}: {} point{}'.format(quizzer, score, 's' * (score != 1)))
@commands('qwins')
def qwins(bot, trigger):
db = SopelDB(bot.config)
winners = db.execute(
'SELECT canonical, value from nicknames JOIN nick_values '
'ON nicknames.nick_id = nick_values.nick_id '
'WHERE key = ?',
['quiz_wins']).fetchall()
if winners:
bot.say('Overall quiz win counts')
for user, count in sorted(winners, key=lambda x: x[1], reverse=True):
bot.say('{}: {}'.format(user, count))
else:
bot.say('No one has won yet!')
def reset_timer(bot):
bot.memory['qtimer'].cancel()
bot.memory['qtimer'] = Timer(30, qtimeout, args=[bot])
bot.memory['qtimer'].start()
def next_q(bot):
if not bot.memory['quiz'].qno % 10:
qscores(bot)
bot.memory['quiz'].next_question()
sleep(5)
bot.say(bot.memory['quiz'].get_question())
reset_timer(bot)
@commands('qskip')
def qskip(bot, trigger):
if not bot.memory['quiz']:
bot.say('No quiz running!')
return
quiz = bot.memory['quiz']
bot.say('Fine, the answer was {}'.format(quiz.question.answer))
next_q(bot)
def qtimeout(bot):
if not bot.memory['quiz']:
return
quiz = bot.memory['quiz']
answer = quiz.question.answer
bot.say('No answer within 30 seconds. The answer was {}'.format(answer))
next_q(bot)
@rule('[^\.].*')
def handle_quiz(bot, trigger):
if not bot.memory['quiz']:
return
quiz = bot.memory['quiz']
if quiz.question.attempt(trigger.args[1]) and not quiz.question.answered:
quiz.question.answered = True
bot.say(color('Correct! The answer was {}'.format(quiz.question.answer),
colors.GREEN))
quiz.award_user(trigger.nick, quiz.question.value
if bot.config.quiz.win_method == 'score' else 1)
score = bot.memory['quiz'].get_scores()[trigger.nick]
bot.say('{} has {} point{}!'.format(trigger.nick, score,
's' * (score > 1)))
if bot.config.quiz.win_method == 'points':
win_value = bot.config.quiz.points_to_win
else:
win_value = bot.config.quiz.score_to_win
if score >= win_value:
bot.say('{} is the winner!'.format(trigger.nick))
qscores(bot)
db = SopelDB(bot.config)
db_users = bot.config.quiz.db_users
if not db_users or quiz.starter in db_users:
wins = (db.get_nick_value(trigger.nick, 'quiz_wins') or 0) + 1
db.set_nick_value(trigger.nick, 'quiz_wins', wins)
bot.say('{} has won {} time{}'.format(trigger.nick, wins,
's' * (wins > 1)))
bot.memory['quiz'] = None
return
next_q(bot)
| #! /usr/bin/env python
import requests
from sopel.module import commands, rule
from sopel.config.types import (StaticSection, ValidatedAttribute,
ChoiceAttribute, ListAttribute)
from sopel.db import SopelDB
from sopel.formatting import colors, color
import re
from threading import Timer
from time import sleep
class QuizSection(StaticSection):
win_method = ChoiceAttribute('win_method', ['points', 'score'],
default='points')
points_to_win = ValidatedAttribute('points_to_win', int, default=10)
score_to_win = ValidatedAttribute('score_to_win', int, default=7000)
db_users = ListAttribute('db_users')
def setup(bot):
bot.config.define_section('quiz', QuizSection)
bot.memory['quiz'] = None
def configure(config):
config.define_section('quiz', QuizSection, validate=False)
config.quiz.configure_setting('win_method', 'Win by points or score?')
if config.quiz.win_method == 'points':
config.quiz.configure_setting('points_to_win',
'How many points are needed to win?')
else:
config.quiz.configure_setting('score_to_win',
'What score is needed to win?')
config.quiz.configure_setting('db_users',
'Which users can start tracked quizzes?')
def shutdown(bot):
if bot.memory.contains('qtimer'):
bot.memory['qtimer'].cancel()
class Question():
def __init__(self):
r = requests.get('http://jservice.io/api/random')
q_json = r.json()[0]
self.question = q_json['question'].strip()
self.answer = self.strip_answer(q_json['answer'])
self.checked_answer = self.parse_answer(self.answer)
self.category = q_json['category']['title']
self.value = q_json['value'] or 100
self.answered = False
r.close()
def get_question(self):
q, c, v = self.question, self.category, self.value
return '{} ({}) [{}]'.format(q, c, v)
def strip_answer(self, answer):
# strip any crap that should never be printed
# - html tags
# - \'
answer = re.sub(r'\<.*?\>|\\(?=\')', '', answer)
return answer
def parse_answer(self, answer):
# strip extraneous characters, making the question easier to answer
# - a, an and the from the beginning
# - quotes
# - parenthesised sections
answer = re.sub(r'^"?(the|a|an) |"| ?\(.*\) ?|s$|', '', answer,
flags=re.I)
answer = re.sub(r'&', 'and', answer)
return answer.lower()
def attempt(self, attempt):
return (attempt is not None and self.checked_answer in attempt.lower())
class Quiz():
def __init__(self, starter):
self.scores = {}
self.qno = 0
self.next_question()
self.starter = starter
def get_question(self):
return 'Question {}: {}'.format(self.qno, self.question.get_question())
def award_user(self, user, count):
if user not in self.scores:
self.scores[user] = count
else:
self.scores[user] += count
def next_question(self):
self.qno += 1
self.question = Question()
def get_scores(self):
return self.scores
@commands('quiz')
def quiz(bot, trigger):
if bot.memory['quiz']:
bot.say('Quiz is already running')
return
bot.say('Quiz started by {}'.format(trigger.nick))
if bot.config.quiz.win_method == 'points':
win_value = bot.config.quiz.points_to_win
bot.say('First to answer {} questions wins!'.format(win_value))
else:
win_value = bot.config.quiz.score_to_win
bot.say('First to {} points wins!'.format(win_value))
bot.memory['quiz'] = Quiz(trigger.nick)
bot.say(bot.memory['quiz'].get_question())
bot.memory['qtimer'] = Timer(30, qtimeout, args=[bot])
bot.memory['qtimer'].start()
@commands('qstop')
def qstop(bot, trigger):
if not bot.memory['quiz']:
bot.say('No quiz running!')
return
bot.say('Quiz stopped by {}'.format(trigger.nick))
bot.memory['quiz'] = None
bot.memory['qtimer'].cancel()
@commands('qscores')
def qscores(bot, trigger=None):
if not bot.memory['quiz']:
bot.say('No quiz running!')
return
if not bot.memory['quiz'].get_scores():
bot.say('No one has scored any points yet!')
return
scores = sorted(bot.memory['quiz'].get_scores().items(),
key=lambda x: x[1], reverse=True)
bot.say('Current scores:')
for quizzer, score in scores:
score = int(score)
bot.say('{}: {} point{}'.format(quizzer, score, 's' * (score != 1)))
@commands('qwins')
def qwins(bot, trigger):
db = SopelDB(bot.config)
winners = db.execute(
'SELECT canonical, value from nicknames JOIN nick_values '
'ON nicknames.nick_id = nick_values.nick_id '
'WHERE key = ?',
['quiz_wins']).fetchall()
if winners:
bot.say('Overall quiz win counts')
for user, count in sorted(winners, key=lambda x: x[1], reverse=True):
bot.say('{}: {}'.format(user, count))
else:
bot.say('No one has won yet!')
def reset_timer(bot):
bot.memory['qtimer'].cancel()
bot.memory['qtimer'] = Timer(30, qtimeout, args=[bot])
bot.memory['qtimer'].start()
def next_q(bot):
if not bot.memory['quiz'].qno % 10:
qscores(bot)
bot.memory['quiz'].next_question()
sleep(5)
bot.say(bot.memory['quiz'].get_question())
reset_timer(bot)
@commands('qskip')
def qskip(bot, trigger):
if not bot.memory['quiz']:
bot.say('No quiz running!')
return
quiz = bot.memory['quiz']
bot.say('Fine, the answer was {}'.format(quiz.question.answer))
next_q(bot)
def qtimeout(bot):
if not bot.memory['quiz']:
return
quiz = bot.memory['quiz']
answer = quiz.question.answer
bot.say('No answer within 30 seconds. The answer was {}'.format(answer))
next_q(bot)
@rule('[^\.].*')
def handle_quiz(bot, trigger):
if not bot.memory['quiz']:
return
quiz = bot.memory['quiz']
if quiz.question.attempt(trigger.args[1]) and not quiz.question.answered:
quiz.question.answered = True
bot.say(color('Correct! The answer was {}'.format(quiz.question.answer),
colors.GREEN))
quiz.award_user(trigger.nick, quiz.question.value
if bot.config.quiz.win_method == 'score' else 1)
score = bot.memory['quiz'].get_scores()[trigger.nick]
bot.say('{} has {} point{}!'.format(trigger.nick, score,
's' * (score > 1)))
if bot.config.quiz.win_method == 'points':
win_value = bot.config.quiz.points_to_win
else:
win_value = bot.config.quiz.score_to_win
if score >= win_value:
bot.say('{} is the winner!'.format(trigger.nick))
qscores(bot)
db = SopelDB(bot.config)
db_users = bot.config.quiz.db_users
if not db_users or quiz.starter in db_users:
wins = (db.get_nick_value(trigger.nick, 'quiz_wins') or 0) + 1
db.set_nick_value(trigger.nick, 'quiz_wins', wins)
bot.say('{} has won {} time{}'.format(trigger.nick, wins,
's' * (wins > 1)))
bot.memory['quiz'] = None
return
next_q(bot)
| en | 0.753609 | #! /usr/bin/env python # strip any crap that should never be printed # - html tags # - \' # strip extraneous characters, making the question easier to answer # - a, an and the from the beginning # - quotes # - parenthesised sections | 2.554052 | 3 |
ss3/SE 1.py | DuongVu39/C4E10_Duong | 0 | 6619422 | import time
clothes = ["T-Shirt", "Sweater", "Jeans"]
print ("|==============================================|")
print ("|Copy: 'C' |Read: 'R' |Update: 'U' |Delete: 'D'|")
print ("|==============================================|")
while True:
action = input ( "Welcome to our shop, what do you want (C, R, U, D)?")
action = action.upper()
if action == "C":
item = (input("Enter new item:")).title()
clothes.append(item)
print ("Our items:",clothes)
elif action == "R":
print ("Our items:",clothes)
elif action == "U":
position = int(input("Update position:"))
if position > (len (clothes)-1):
print ("There's no item number", position)
print()
continue
item = (input("New item:")).title()
clothes[position-1] = item
print ("Our items:",clothes)
else:
position = int(input("Delete position:"))
if position > (len (clothes) -1):
print ("There's no item number", position)
print()
continue
del clothes[position-1]
print ("Our items:",clothes)
print ()
time.sleep(3)
| import time
clothes = ["T-Shirt", "Sweater", "Jeans"]
print ("|==============================================|")
print ("|Copy: 'C' |Read: 'R' |Update: 'U' |Delete: 'D'|")
print ("|==============================================|")
while True:
action = input ( "Welcome to our shop, what do you want (C, R, U, D)?")
action = action.upper()
if action == "C":
item = (input("Enter new item:")).title()
clothes.append(item)
print ("Our items:",clothes)
elif action == "R":
print ("Our items:",clothes)
elif action == "U":
position = int(input("Update position:"))
if position > (len (clothes)-1):
print ("There's no item number", position)
print()
continue
item = (input("New item:")).title()
clothes[position-1] = item
print ("Our items:",clothes)
else:
position = int(input("Delete position:"))
if position > (len (clothes) -1):
print ("There's no item number", position)
print()
continue
del clothes[position-1]
print ("Our items:",clothes)
print ()
time.sleep(3)
| none | 1 | 4.291383 | 4 | |
labs/backend/models.py | judaicalink/judaicalink-labs | 3 | 6619423 | from django.db import models
from datetime import datetime
from django.utils import timezone
# Create your models here.
from . import consumers
class ThreadTask(models.Model):
name = models.TextField()
is_done = models.BooleanField(blank=False, default=False)
status_ok = models.BooleanField(blank=False, default=True)
started = models.DateTimeField(default = timezone.now)
ended = models.DateTimeField(null=True)
log_text = models.TextField()
def done(self):
self.is_done = True
self.ended = datetime.now()
self.save()
def log(self, message):
self.refresh_from_db()
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M")
self.log_text += '\n' + timestamp + ": " + message
self.log_text = self.log_text.strip()
self.save()
consumers.send_sub_message('task{}'.format(self.id), submessage=message)
print('Logged: {}'.format(message))
def last_log(self):
msgs = self.log_text.split('\n')
for i in range(len(msgs) - 1, 0, -1):
if msgs[i].strip():
return msgs[i]
return ""
def __str__(self):
return "{}".format(self.name)
| from django.db import models
from datetime import datetime
from django.utils import timezone
# Create your models here.
from . import consumers
class ThreadTask(models.Model):
name = models.TextField()
is_done = models.BooleanField(blank=False, default=False)
status_ok = models.BooleanField(blank=False, default=True)
started = models.DateTimeField(default = timezone.now)
ended = models.DateTimeField(null=True)
log_text = models.TextField()
def done(self):
self.is_done = True
self.ended = datetime.now()
self.save()
def log(self, message):
self.refresh_from_db()
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M")
self.log_text += '\n' + timestamp + ": " + message
self.log_text = self.log_text.strip()
self.save()
consumers.send_sub_message('task{}'.format(self.id), submessage=message)
print('Logged: {}'.format(message))
def last_log(self):
msgs = self.log_text.split('\n')
for i in range(len(msgs) - 1, 0, -1):
if msgs[i].strip():
return msgs[i]
return ""
def __str__(self):
return "{}".format(self.name)
| en | 0.963489 | # Create your models here. | 2.31011 | 2 |
payu/__init__.py | martn/django-payu | 45 | 6619424 | default_app_config = 'payu.apps.PayuConfig'
# from payu.gateway import *
| default_app_config = 'payu.apps.PayuConfig'
# from payu.gateway import *
| en | 0.381392 | # from payu.gateway import * | 1.130663 | 1 |
tkinter/optionmenu/example-4.py | whitmans-max/python-examples | 140 | 6619425 | import tkinter as tk
#--- functions ---
def on_click():
for number, var in enumerate(all_variables):
print('optionmenu:', number, '| selected:', var.get(), '| all:', data[number])
#--- main ---
data = ['a,b,c', 'x,y,z']
root = tk.Tk()
all_variables = []
for options in data:
options = options.split(',')
var = tk.StringVar(value=options[0])
all_variables.append(var)
op = tk.OptionMenu(root, var, *options)
op.pack()
b = tk.Button(root, text='OK', command=on_click)
b.pack()
root.mainloop()
| import tkinter as tk
#--- functions ---
def on_click():
for number, var in enumerate(all_variables):
print('optionmenu:', number, '| selected:', var.get(), '| all:', data[number])
#--- main ---
data = ['a,b,c', 'x,y,z']
root = tk.Tk()
all_variables = []
for options in data:
options = options.split(',')
var = tk.StringVar(value=options[0])
all_variables.append(var)
op = tk.OptionMenu(root, var, *options)
op.pack()
b = tk.Button(root, text='OK', command=on_click)
b.pack()
root.mainloop()
| en | 0.309386 | #--- functions --- #--- main --- | 3.521791 | 4 |
src/regex.py | helish88/AnimateaBot | 0 | 6619426 | import re
__all__: tuple[str, ...] = ("ANSI_ESCAPE",)
ANSI_ESCAPE: re.Pattern[str] = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
| import re
__all__: tuple[str, ...] = ("ANSI_ESCAPE",)
ANSI_ESCAPE: re.Pattern[str] = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
| none | 1 | 2.472244 | 2 | |
src/jgikbase/idmapping/storage/id_mapping_storage.py | jgi-kbase/IDMappingService | 0 | 6619427 | """
Interface for a storage system for ID mappings.
"""
# it'd be nice if you could just pragma: no cover the entire file, but that doesn't seem to work
from abc import abstractmethod as _abstractmethod # pragma: no cover
from abc import ABCMeta as _ABCMeta # pragma: no cover
from jgikbase.idmapping.core.object_id import NamespaceID # pragma: no cover
from jgikbase.idmapping.core.user import User, Username # pragma: no cover
from jgikbase.idmapping.core.tokens import HashedToken # pragma: no cover
from jgikbase.idmapping.core.object_id import Namespace # pragma: no cover
from typing import Iterable, Set, Tuple # pragma: no cover
from jgikbase.idmapping.core.object_id import ObjectID # pragma: no cover
from typing import Dict
class IDMappingStorage: # pragma: no cover
"""
An interface for a storage system for ID mappings. All methods are abstract.
"""
__metaclass__ = _ABCMeta
@_abstractmethod
def create_local_user(self, username: Username, token: HashedToken) -> None:
"""
Create a user.
Once created, users cannot be removed. The client programmer is responsible for
ensuring that the token provided does not already exist in the database.
:param username: the user name.
:param token: the user's token after applying a hash function.
:raises ValueError: if the token already exists in the database.
:raises TypeError: if any of the arguments are None.
:raises UserExistsError: if the user already exists.
:raises IDMappingStorageError: if an unexpected error occurs.
"""
raise NotImplementedError()
@_abstractmethod
def set_local_user_as_admin(self, username: Username, admin: bool) -> None:
'''
Mark a user as a system admin. Or not.
:param username: the name of the user to alter.
:param admin: True to give the user admin privileges, False to remove them. If the user
is already in the given state, no further action is taken.
:raises TypeError: if the usename is None.
'''
raise NotImplementedError()
@_abstractmethod
def update_local_user_token(self, username: Username, token: HashedToken) -> None:
"""
Update an existing user's token.
:param username: the user name.
:param token: the user's token after applying a hash function.
:raises ValueError: if the token already exists in the database.
:raises TypeError: if any of the arguments are None.
:raises NoSuchUserError: if the user does not exist.
:raises IDMappingStorageError: if an unexpected error occurs.
"""
raise NotImplementedError()
@_abstractmethod
def get_user(self, token: HashedToken) -> Tuple[Username, bool]:
"""
Get the user, if any, associated with a hashed token.
:param token: the hashed token.
:raises TypeError: if the token is None.
:raises InvalidTokenError: if the token does not exist in the storage system.
:raises IDMappingStorageError: if an unexpected error occurs.
:returns: a tuple of the username corresponding to the token and a boolean denoting
whether the user is an admin or not.
"""
raise NotImplementedError()
@_abstractmethod
def get_users(self) -> Dict[Username, bool]:
"""
Get all the users in the system.
:raises IDMappingStorageError: if an unexpected error occurs.
:returns: a mapping of username to a boolean denoting whether the user is an admin or not.
"""
raise NotImplementedError()
@_abstractmethod
def user_exists(self, username: Username) -> bool:
'''
Check if a user exist in the system. Returns True if so.
:param username: the username to check.
:raises TypeError: if the username is None.
'''
raise NotImplementedError()
@_abstractmethod
def create_namespace(self, namespace_id: NamespaceID) -> None:
"""
Create a new namespace. Once created, namespaces cannot be removed.
:param namespace_id: The namespace to create.
:raises TypeError: if the namespace ID is None.
:raises NamespaceExistsError: if the namespace already exists.
"""
raise NotImplementedError()
@_abstractmethod
def add_user_to_namespace(self, namespace_id: NamespaceID, admin_user: User) -> None:
"""
Add a user to a namespace, giving them administration rights. A noop occurs if the user
is already an administrator for the namespace.
:param namespace_id: the namespace to modify.
:param admin_user: the user.
:raises TypeError: if any of the arguments are None.
:raises NoSuchNamespaceError: if the namespace does not exist.
:raises UserExistsError: if the user already administrates the namespace.
"""
raise NotImplementedError()
@_abstractmethod
def remove_user_from_namespace(self, namespace_id: NamespaceID, admin_user: User) -> None:
"""
Remove a user from a namespace, removing their administration rights.
:param namespace_id: the namespace to modify.
:param admin_user: the user.
:raises TypeError: if any of the arguments are None.
:raises NoSuchNamespaceError: if the namespace does not exist.
:raises NoSuchUserError: if the user does not administrate the namespace.
"""
raise NotImplementedError()
@_abstractmethod
def set_namespace_publicly_mappable(self, namespace_id: NamespaceID, publicly_mappable: bool
) -> None:
"""
Set the publicly mappable flag on a namespace.
:param namespace_id: The namespace to alter.
:param publicly_mappable: True to set the namespace to publicly mappable, False or None
to prevent public mapping.
:raises TypeError: if namespace_id is None.
:raises NoSuchNamespaceError: if the namespace does not exist.
"""
raise NotImplementedError()
@_abstractmethod
def get_namespaces(self, nids: Iterable[NamespaceID]=None) -> Set[Namespace]:
"""
Get all the namespaces in the system.
:param ids: specific namespaces to get. By default all namespaces are returned.
:raises TypeError: if nids contains None.
:raises NoSuchNamespaceError: if any of the namespaces in the nids parameter do not
exist
"""
raise NotImplementedError()
@_abstractmethod
def get_namespace(self, namespace_id: NamespaceID) -> Namespace:
"""
Get a particular namespace.
:param namespace_id: the id of the namespace to get.
:raises TypeError: if the namespace ID is None.
:raises NoSuchNamespaceError: if the namespace does not exist.
"""
raise NotImplementedError()
@_abstractmethod
def add_mapping(self, primary_OID: ObjectID, secondary_OID: ObjectID) -> None:
"""
Create a mapping from one namespace to another.
Note that this method does NOT check for the existence of the namespaces.
If the mapping already exists, no further action is taken.
:param primary_OID: the primary namespace/ID combination.
:param secondary_OID: the secondary namespace/ID combination.
:raise TypeError: if any of the arguments are None.
:raise ValueError: if the namespace IDs are the same.
"""
raise NotImplementedError()
@_abstractmethod
def remove_mapping(self, primary_OID: ObjectID, secondary_OID: ObjectID) -> bool:
"""
Remove a mapping from one namespace to another. Returns true if a mapping was removed,
false otherwise.
:param primary_OID: the primary namespace/ID combination.
:param secondary_OID: the secondary namespace/ID combination.
:raise TypeError: if any of the arguments are None.
"""
raise NotImplementedError()
@_abstractmethod
def find_mappings(self, oid: ObjectID, ns_filter: Iterable[NamespaceID]=None
) -> Tuple[Set[ObjectID], Set[ObjectID]]:
"""
Find mappings given a namespace / id combination.
If the namespace or id does not exist, no results will be returned. The namespaces in the
filter are ignored if they do not exist.
:param oid: the namespace / id combination to match against.
:param ns_filter: a list of namespaces with which to filter the results. Only results in
these namespaces will be returned.
:returns: a tuple of sets of object IDs. The first set in the tuple contains mappings
where the provided object ID is the primary object ID, and the second set contains
mappings where the provided object ID is the secondary object ID.
:raise TypeError: if the object ID is None or the filter contains None.
"""
raise NotImplementedError()
| """
Interface for a storage system for ID mappings.
"""
# it'd be nice if you could just pragma: no cover the entire file, but that doesn't seem to work
from abc import abstractmethod as _abstractmethod # pragma: no cover
from abc import ABCMeta as _ABCMeta # pragma: no cover
from jgikbase.idmapping.core.object_id import NamespaceID # pragma: no cover
from jgikbase.idmapping.core.user import User, Username # pragma: no cover
from jgikbase.idmapping.core.tokens import HashedToken # pragma: no cover
from jgikbase.idmapping.core.object_id import Namespace # pragma: no cover
from typing import Iterable, Set, Tuple # pragma: no cover
from jgikbase.idmapping.core.object_id import ObjectID # pragma: no cover
from typing import Dict
class IDMappingStorage: # pragma: no cover
"""
An interface for a storage system for ID mappings. All methods are abstract.
"""
__metaclass__ = _ABCMeta
@_abstractmethod
def create_local_user(self, username: Username, token: HashedToken) -> None:
"""
Create a user.
Once created, users cannot be removed. The client programmer is responsible for
ensuring that the token provided does not already exist in the database.
:param username: the user name.
:param token: the user's token after applying a hash function.
:raises ValueError: if the token already exists in the database.
:raises TypeError: if any of the arguments are None.
:raises UserExistsError: if the user already exists.
:raises IDMappingStorageError: if an unexpected error occurs.
"""
raise NotImplementedError()
@_abstractmethod
def set_local_user_as_admin(self, username: Username, admin: bool) -> None:
'''
Mark a user as a system admin. Or not.
:param username: the name of the user to alter.
:param admin: True to give the user admin privileges, False to remove them. If the user
is already in the given state, no further action is taken.
:raises TypeError: if the usename is None.
'''
raise NotImplementedError()
@_abstractmethod
def update_local_user_token(self, username: Username, token: HashedToken) -> None:
"""
Update an existing user's token.
:param username: the user name.
:param token: the user's token after applying a hash function.
:raises ValueError: if the token already exists in the database.
:raises TypeError: if any of the arguments are None.
:raises NoSuchUserError: if the user does not exist.
:raises IDMappingStorageError: if an unexpected error occurs.
"""
raise NotImplementedError()
@_abstractmethod
def get_user(self, token: HashedToken) -> Tuple[Username, bool]:
"""
Get the user, if any, associated with a hashed token.
:param token: the hashed token.
:raises TypeError: if the token is None.
:raises InvalidTokenError: if the token does not exist in the storage system.
:raises IDMappingStorageError: if an unexpected error occurs.
:returns: a tuple of the username corresponding to the token and a boolean denoting
whether the user is an admin or not.
"""
raise NotImplementedError()
@_abstractmethod
def get_users(self) -> Dict[Username, bool]:
"""
Get all the users in the system.
:raises IDMappingStorageError: if an unexpected error occurs.
:returns: a mapping of username to a boolean denoting whether the user is an admin or not.
"""
raise NotImplementedError()
@_abstractmethod
def user_exists(self, username: Username) -> bool:
'''
Check if a user exist in the system. Returns True if so.
:param username: the username to check.
:raises TypeError: if the username is None.
'''
raise NotImplementedError()
@_abstractmethod
def create_namespace(self, namespace_id: NamespaceID) -> None:
"""
Create a new namespace. Once created, namespaces cannot be removed.
:param namespace_id: The namespace to create.
:raises TypeError: if the namespace ID is None.
:raises NamespaceExistsError: if the namespace already exists.
"""
raise NotImplementedError()
@_abstractmethod
def add_user_to_namespace(self, namespace_id: NamespaceID, admin_user: User) -> None:
"""
Add a user to a namespace, giving them administration rights. A noop occurs if the user
is already an administrator for the namespace.
:param namespace_id: the namespace to modify.
:param admin_user: the user.
:raises TypeError: if any of the arguments are None.
:raises NoSuchNamespaceError: if the namespace does not exist.
:raises UserExistsError: if the user already administrates the namespace.
"""
raise NotImplementedError()
@_abstractmethod
def remove_user_from_namespace(self, namespace_id: NamespaceID, admin_user: User) -> None:
"""
Remove a user from a namespace, removing their administration rights.
:param namespace_id: the namespace to modify.
:param admin_user: the user.
:raises TypeError: if any of the arguments are None.
:raises NoSuchNamespaceError: if the namespace does not exist.
:raises NoSuchUserError: if the user does not administrate the namespace.
"""
raise NotImplementedError()
@_abstractmethod
def set_namespace_publicly_mappable(self, namespace_id: NamespaceID, publicly_mappable: bool
) -> None:
"""
Set the publicly mappable flag on a namespace.
:param namespace_id: The namespace to alter.
:param publicly_mappable: True to set the namespace to publicly mappable, False or None
to prevent public mapping.
:raises TypeError: if namespace_id is None.
:raises NoSuchNamespaceError: if the namespace does not exist.
"""
raise NotImplementedError()
@_abstractmethod
def get_namespaces(self, nids: Iterable[NamespaceID]=None) -> Set[Namespace]:
"""
Get all the namespaces in the system.
:param ids: specific namespaces to get. By default all namespaces are returned.
:raises TypeError: if nids contains None.
:raises NoSuchNamespaceError: if any of the namespaces in the nids parameter do not
exist
"""
raise NotImplementedError()
@_abstractmethod
def get_namespace(self, namespace_id: NamespaceID) -> Namespace:
"""
Get a particular namespace.
:param namespace_id: the id of the namespace to get.
:raises TypeError: if the namespace ID is None.
:raises NoSuchNamespaceError: if the namespace does not exist.
"""
raise NotImplementedError()
@_abstractmethod
def add_mapping(self, primary_OID: ObjectID, secondary_OID: ObjectID) -> None:
"""
Create a mapping from one namespace to another.
Note that this method does NOT check for the existence of the namespaces.
If the mapping already exists, no further action is taken.
:param primary_OID: the primary namespace/ID combination.
:param secondary_OID: the secondary namespace/ID combination.
:raise TypeError: if any of the arguments are None.
:raise ValueError: if the namespace IDs are the same.
"""
raise NotImplementedError()
@_abstractmethod
def remove_mapping(self, primary_OID: ObjectID, secondary_OID: ObjectID) -> bool:
"""
Remove a mapping from one namespace to another. Returns true if a mapping was removed,
false otherwise.
:param primary_OID: the primary namespace/ID combination.
:param secondary_OID: the secondary namespace/ID combination.
:raise TypeError: if any of the arguments are None.
"""
raise NotImplementedError()
@_abstractmethod
def find_mappings(self, oid: ObjectID, ns_filter: Iterable[NamespaceID]=None
) -> Tuple[Set[ObjectID], Set[ObjectID]]:
"""
Find mappings given a namespace / id combination.
If the namespace or id does not exist, no results will be returned. The namespaces in the
filter are ignored if they do not exist.
:param oid: the namespace / id combination to match against.
:param ns_filter: a list of namespaces with which to filter the results. Only results in
these namespaces will be returned.
:returns: a tuple of sets of object IDs. The first set in the tuple contains mappings
where the provided object ID is the primary object ID, and the second set contains
mappings where the provided object ID is the secondary object ID.
:raise TypeError: if the object ID is None or the filter contains None.
"""
raise NotImplementedError()
| en | 0.6472 | Interface for a storage system for ID mappings. # it'd be nice if you could just pragma: no cover the entire file, but that doesn't seem to work # pragma: no cover # pragma: no cover # pragma: no cover # pragma: no cover # pragma: no cover # pragma: no cover # pragma: no cover # pragma: no cover # pragma: no cover An interface for a storage system for ID mappings. All methods are abstract. Create a user. Once created, users cannot be removed. The client programmer is responsible for ensuring that the token provided does not already exist in the database. :param username: the user name. :param token: the user's token after applying a hash function. :raises ValueError: if the token already exists in the database. :raises TypeError: if any of the arguments are None. :raises UserExistsError: if the user already exists. :raises IDMappingStorageError: if an unexpected error occurs. Mark a user as a system admin. Or not. :param username: the name of the user to alter. :param admin: True to give the user admin privileges, False to remove them. If the user is already in the given state, no further action is taken. :raises TypeError: if the usename is None. Update an existing user's token. :param username: the user name. :param token: the user's token after applying a hash function. :raises ValueError: if the token already exists in the database. :raises TypeError: if any of the arguments are None. :raises NoSuchUserError: if the user does not exist. :raises IDMappingStorageError: if an unexpected error occurs. Get the user, if any, associated with a hashed token. :param token: the hashed token. :raises TypeError: if the token is None. :raises InvalidTokenError: if the token does not exist in the storage system. :raises IDMappingStorageError: if an unexpected error occurs. :returns: a tuple of the username corresponding to the token and a boolean denoting whether the user is an admin or not. Get all the users in the system. :raises IDMappingStorageError: if an unexpected error occurs. :returns: a mapping of username to a boolean denoting whether the user is an admin or not. Check if a user exist in the system. Returns True if so. :param username: the username to check. :raises TypeError: if the username is None. Create a new namespace. Once created, namespaces cannot be removed. :param namespace_id: The namespace to create. :raises TypeError: if the namespace ID is None. :raises NamespaceExistsError: if the namespace already exists. Add a user to a namespace, giving them administration rights. A noop occurs if the user is already an administrator for the namespace. :param namespace_id: the namespace to modify. :param admin_user: the user. :raises TypeError: if any of the arguments are None. :raises NoSuchNamespaceError: if the namespace does not exist. :raises UserExistsError: if the user already administrates the namespace. Remove a user from a namespace, removing their administration rights. :param namespace_id: the namespace to modify. :param admin_user: the user. :raises TypeError: if any of the arguments are None. :raises NoSuchNamespaceError: if the namespace does not exist. :raises NoSuchUserError: if the user does not administrate the namespace. Set the publicly mappable flag on a namespace. :param namespace_id: The namespace to alter. :param publicly_mappable: True to set the namespace to publicly mappable, False or None to prevent public mapping. :raises TypeError: if namespace_id is None. :raises NoSuchNamespaceError: if the namespace does not exist. Get all the namespaces in the system. :param ids: specific namespaces to get. By default all namespaces are returned. :raises TypeError: if nids contains None. :raises NoSuchNamespaceError: if any of the namespaces in the nids parameter do not exist Get a particular namespace. :param namespace_id: the id of the namespace to get. :raises TypeError: if the namespace ID is None. :raises NoSuchNamespaceError: if the namespace does not exist. Create a mapping from one namespace to another. Note that this method does NOT check for the existence of the namespaces. If the mapping already exists, no further action is taken. :param primary_OID: the primary namespace/ID combination. :param secondary_OID: the secondary namespace/ID combination. :raise TypeError: if any of the arguments are None. :raise ValueError: if the namespace IDs are the same. Remove a mapping from one namespace to another. Returns true if a mapping was removed, false otherwise. :param primary_OID: the primary namespace/ID combination. :param secondary_OID: the secondary namespace/ID combination. :raise TypeError: if any of the arguments are None. Find mappings given a namespace / id combination. If the namespace or id does not exist, no results will be returned. The namespaces in the filter are ignored if they do not exist. :param oid: the namespace / id combination to match against. :param ns_filter: a list of namespaces with which to filter the results. Only results in these namespaces will be returned. :returns: a tuple of sets of object IDs. The first set in the tuple contains mappings where the provided object ID is the primary object ID, and the second set contains mappings where the provided object ID is the secondary object ID. :raise TypeError: if the object ID is None or the filter contains None. | 2.81411 | 3 |
app/controllers/signup_controller.py | alteregoxiv/Task-Handler | 0 | 6619428 | <filename>app/controllers/signup_controller.py
"""
Password generation, hashing and verification
"""
from taskHandler.app.utils.hash import hashed, verify
from taskHandler.app.utils.passwd import genCode
from taskHandler.app.utils.mail import mailSend
from taskHandler.app.models.user_model import get_user_data_by
def email_pwd(email):
pwd = genCode()
mailSend(email, pwd)
hash_pwd = hashed(pwd)
return hash_pwd
def verify_pwd(hash_pwd, pwd):
return verify(pwd , hash_pwd)
def username_avl(username):
return len(get_user_data_by(username = username)) == 0
def email_avl(email):
return len(get_user_data_by(email = email)) == 0
| <filename>app/controllers/signup_controller.py
"""
Password generation, hashing and verification
"""
from taskHandler.app.utils.hash import hashed, verify
from taskHandler.app.utils.passwd import genCode
from taskHandler.app.utils.mail import mailSend
from taskHandler.app.models.user_model import get_user_data_by
def email_pwd(email):
pwd = genCode()
mailSend(email, pwd)
hash_pwd = hashed(pwd)
return hash_pwd
def verify_pwd(hash_pwd, pwd):
return verify(pwd , hash_pwd)
def username_avl(username):
return len(get_user_data_by(username = username)) == 0
def email_avl(email):
return len(get_user_data_by(email = email)) == 0
| en | 0.840527 | Password generation, hashing and verification | 2.665216 | 3 |
sundries/dataclass/demo2.py | MerleLiuKun/my-python | 1 | 6619429 | """
嵌套字典的数据类
"""
from dataclasses import dataclass, field, fields, is_dataclass
def dicts_to_dataclasses(instance):
"""将所有的数据类属性都转化到数据类中"""
cls = type(instance)
for f in fields(cls):
if not is_dataclass(f.type):
continue
value = getattr(instance, f.name)
if not isinstance(value, dict):
continue
new_value = f.type(**value)
setattr(instance, f.name, new_value)
@dataclass
class Cover:
id: str = None
cover_id: str = None
offset_x: str = field(default=None, repr=False)
offset_y: str = field(default=None, repr=False)
source: str = field(default=None, repr=False)
@dataclass
class Page:
id: str = None
about: str = field(default=None, repr=False)
birthday: str = field(default=None, repr=False)
name: str = None
username: str = None
fan_count: int = field(default=None, repr=False)
cover: Cover = field(default=None, repr=False)
def __post_init__(self):
dicts_to_dataclasses(self)
if __name__ == '__main__':
data = {
"id": "20531316728",
"about": "The Facebook Page celebrates how our friends inspire us, support us, and help us discover the world when we connect.",
"birthday": "02/04/2004",
"name": "Facebook",
"username": "facebookapp",
"fan_count": 214643503,
"cover": {
"cover_id": "10158913960541729",
"offset_x": 50,
"offset_y": 50,
"source": "https://scontent.xx.fbcdn.net/v/t1.0-9/s720x720/73087560_10158913960546729_8876113648821469184_o.jpg?_nc_cat=1&_nc_ohc=bAJ1yh0abN4AQkSOGhMpytya2quC_uS0j0BF-XEVlRlgwTfzkL_F0fojQ&_nc_ht=scontent.xx&oh=2964a1a64b6b474e64b06bdb568684da&oe=5E454425",
"id": "10158913960541729"
}
}
# 数据加载
p = Page(**data)
print(p.name)
print(p)
print(p.cover)
| """
嵌套字典的数据类
"""
from dataclasses import dataclass, field, fields, is_dataclass
def dicts_to_dataclasses(instance):
"""将所有的数据类属性都转化到数据类中"""
cls = type(instance)
for f in fields(cls):
if not is_dataclass(f.type):
continue
value = getattr(instance, f.name)
if not isinstance(value, dict):
continue
new_value = f.type(**value)
setattr(instance, f.name, new_value)
@dataclass
class Cover:
id: str = None
cover_id: str = None
offset_x: str = field(default=None, repr=False)
offset_y: str = field(default=None, repr=False)
source: str = field(default=None, repr=False)
@dataclass
class Page:
id: str = None
about: str = field(default=None, repr=False)
birthday: str = field(default=None, repr=False)
name: str = None
username: str = None
fan_count: int = field(default=None, repr=False)
cover: Cover = field(default=None, repr=False)
def __post_init__(self):
dicts_to_dataclasses(self)
if __name__ == '__main__':
data = {
"id": "20531316728",
"about": "The Facebook Page celebrates how our friends inspire us, support us, and help us discover the world when we connect.",
"birthday": "02/04/2004",
"name": "Facebook",
"username": "facebookapp",
"fan_count": 214643503,
"cover": {
"cover_id": "10158913960541729",
"offset_x": 50,
"offset_y": 50,
"source": "https://scontent.xx.fbcdn.net/v/t1.0-9/s720x720/73087560_10158913960546729_8876113648821469184_o.jpg?_nc_cat=1&_nc_ohc=bAJ1yh0abN4AQkSOGhMpytya2quC_uS0j0BF-XEVlRlgwTfzkL_F0fojQ&_nc_ht=scontent.xx&oh=2964a1a64b6b474e64b06bdb568684da&oe=5E454425",
"id": "10158913960541729"
}
}
# 数据加载
p = Page(**data)
print(p.name)
print(p)
print(p.cover)
| zh | 0.99543 | 嵌套字典的数据类 将所有的数据类属性都转化到数据类中 # 数据加载 | 3.397944 | 3 |
spyke/ecs/components/__init__.py | m4reQ/spyke | 0 | 6619430 | <reponame>m4reQ/spyke
from .audio import AudioComponent
from .camera import CameraComponent
from .particleSystem import ParticleSystemComponent
from .sprite import SpriteComponent
from .text import TextComponent
from .transform import TransformComponent
from .tag import TagComponent
from .audio import AudioComponent
__all__ = (
'AudioComponent',
'CameraComponent',
'ParticleSystemComponent',
'SpriteComponent',
'TextComponent',
'TransformComponent',
'TagComponent',
)
| from .audio import AudioComponent
from .camera import CameraComponent
from .particleSystem import ParticleSystemComponent
from .sprite import SpriteComponent
from .text import TextComponent
from .transform import TransformComponent
from .tag import TagComponent
from .audio import AudioComponent
__all__ = (
'AudioComponent',
'CameraComponent',
'ParticleSystemComponent',
'SpriteComponent',
'TextComponent',
'TransformComponent',
'TagComponent',
) | none | 1 | 1.288602 | 1 | |
publishers/models.py | Aki-qiu/DATA130039.01-MyBookDB | 3 | 6619431 | from django.db import models
# Create your models here.
class Publishers(models.Model):
name = models.CharField(max_length=100, verbose_name='出版社名')
phone_number = models.CharField(max_length=20, verbose_name='出版社电话')
email = models.EmailField(verbose_name='出版社邮箱')
contacts = models.CharField(max_length=40, verbose_name='联系人')
address = models.CharField(max_length=60, verbose_name='出版社地址')
class Meta:
verbose_name = '出版社信息'
verbose_name_plural = '出版社信息'
def __str__(self):
return self.name
| from django.db import models
# Create your models here.
class Publishers(models.Model):
name = models.CharField(max_length=100, verbose_name='出版社名')
phone_number = models.CharField(max_length=20, verbose_name='出版社电话')
email = models.EmailField(verbose_name='出版社邮箱')
contacts = models.CharField(max_length=40, verbose_name='联系人')
address = models.CharField(max_length=60, verbose_name='出版社地址')
class Meta:
verbose_name = '出版社信息'
verbose_name_plural = '出版社信息'
def __str__(self):
return self.name
| en | 0.963489 | # Create your models here. | 2.199747 | 2 |
algo/Experiment_data/7cpu_compare_latest_c.py | allengrr/deadlock_project | 0 | 6619432 | from drawnow import *
from matplotlib import pyplot as plt
import data
import cpu_redo as cp
import cpu6_redo as cp6
import data_for_1cpu as d1cpu
fig = plt.figure()
ax1 = fig.add_subplot(141)
ax2 = fig.add_subplot(142)
ax3 = fig.add_subplot(143)
ax4 = fig.add_subplot(144)
style = ['g--^', 'r:o', 'b-.s', 'm--*', 'k-.>', 'c--+']
algo_dict = {'RMS+Bankers': r'$ALG_1$',
'EDF+Bankers': r'$ALG_2$',
'RMS+wound wait': r'$ALG_3$',
'RMS+wait die': r'$ALG_4$',
'EDF+wound wait': r'$ALG_5$',
'EDF+wait die': r'$ALG_6$'}
def _mov_avg(a1):
ma1 = [] # moving average list
avg1 = 0 # movinf average pointwise
count = 0
for i in range(len(a1)):
count += 1
avg1 = ((count - 1) * avg1 + a1[i]) / count
ma1.append(avg1) # cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return ma1
def x_index(full_list, sel_list):
r_list = []
r_set = set()
for i in sel_list:
if i in r_set:
start = full_list.index(i) + 1
r_list.append(full_list.index(i, start, 499))
else:
r_list.append(full_list.index(i))
r_set.add(i)
return r_list
def get_x_y(data, ax, _id, name):
mv = _mov_avg(data)
pt = mv[0:len(mv):int((len(mv) / 20)) + 1]
if pt[-1] != mv[-1]:
pt.append(mv[-1])
# ptx = [mv.index(i) for i in pt]
ptx = x_index(full_list=mv, sel_list=pt)
return ax.plot(ptx,
pt,
style[_id],
linewidth=2,
label=f'{name} (Avg) : {round(mv[-1], 3)}')
def four_mec():
ax1.grid(True)
_list = [data.cpu_1, data.cpu_3, data.cpu_5, data.cpu_8, data.cpu_11, data.cpu_16]
labels = list(algo_dict.values())
for i in _list:
get_x_y(data=i, ax=ax1, _id=_list.index(i), name=labels[_list.index(i)])
ax1.set_title('Moving CPU Utilization for 4 MEC Set-up', fontdict={'weight': 'medium', "size": 14})
ax1.set_ylabel('Moving CPU %', fontdict={'weight': 'medium', 'size': 13})
ax1.set_ylim(top=30)
ax1.set_xlabel('Time Period', fontdict={'weight': 'medium', 'size': 13})
ax1.legend(prop={"size": 14})
plt.subplot(ax1)
def five_mec():
ax3.grid(True)
_list = [d1cpu.cpu_1_5, data.cpu_3_5, data.cpu_5_5, data.cpu_8_5, data.cpu_11_5, data.cpu_16_5]
labels = list(algo_dict.values())
for i in _list:
get_x_y(data=i, ax=ax3, _id=_list.index(i), name=labels[_list.index(i)])
ax3.set_title('Moving CPU Utilization for 6 MEC Set-up', fontdict={'weight': 'medium', "size": 14})
ax3.set_ylabel('Moving CPU %', fontdict={'weight': 'medium', 'size': 13})
ax3.set_ylim(top=30)
ax3.set_xlabel('Time Period', fontdict={'weight': 'medium', 'size': 13})
ax3.legend(prop={"size": 14})
plt.subplot(ax3)
def six_mec():
ax2.grid(True)
_list = [d1cpu.cpu_1_6, data.cpu_3_6, data.cpu_5_6, data.cpu_8_6, data.cpu_11_6, data.cpu_16_6]
labels = list(algo_dict.values())
for i in _list:
get_x_y(data=i, ax=ax2, _id=_list.index(i), name=labels[_list.index(i)])
ax2.set_title('Moving CPU Utilization for 5 MEC Set-up', fontdict={'weight': 'medium', "size": 14})
ax2.set_ylabel('Moving CPU %', fontdict={'weight': 'medium', 'size': 13})
ax2.set_ylim(top=30)
ax2.set_xlabel('Time Period', fontdict={'weight': 'medium', 'size': 13})
ax2.legend(prop={"size": 14})
plt.subplot(ax2)
def seven_mec():
ax4.grid(True)
_list = [d1cpu.cpu_1_7, d1cpu.cpu_3_7, d1cpu.cpu_5_7, d1cpu.cpu_8_7, d1cpu.cpu_11_7, d1cpu.cpu_16_7]
labels = list(algo_dict.values())
for i in _list:
get_x_y(data=i, ax=ax4, _id=_list.index(i), name=labels[_list.index(i)])
ax4.set_title('Moving CPU Utilization for 7 MEC Set-up', fontdict={'weight': 'medium', "size": 14})
ax4.set_ylabel('Moving CPU %', fontdict={'weight': 'medium', 'size': 13})
ax4.set_ylim(top=30)
ax4.set_xlabel('Time Period', fontdict={'weight': 'medium', 'size': 13})
ax4.legend(prop={"size": 14})
plt.subplot(ax4)
def plot_graphs():
four_mec()
five_mec()
six_mec()
seven_mec()
# fig.suptitle('MEC CPU Utilization During Homogeneous Deadlock Experiment')
plt.show()
def show_graphs():
drawnow(plot_graphs)
show_graphs()
| from drawnow import *
from matplotlib import pyplot as plt
import data
import cpu_redo as cp
import cpu6_redo as cp6
import data_for_1cpu as d1cpu
fig = plt.figure()
ax1 = fig.add_subplot(141)
ax2 = fig.add_subplot(142)
ax3 = fig.add_subplot(143)
ax4 = fig.add_subplot(144)
style = ['g--^', 'r:o', 'b-.s', 'm--*', 'k-.>', 'c--+']
algo_dict = {'RMS+Bankers': r'$ALG_1$',
'EDF+Bankers': r'$ALG_2$',
'RMS+wound wait': r'$ALG_3$',
'RMS+wait die': r'$ALG_4$',
'EDF+wound wait': r'$ALG_5$',
'EDF+wait die': r'$ALG_6$'}
def _mov_avg(a1):
ma1 = [] # moving average list
avg1 = 0 # movinf average pointwise
count = 0
for i in range(len(a1)):
count += 1
avg1 = ((count - 1) * avg1 + a1[i]) / count
ma1.append(avg1) # cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return ma1
def x_index(full_list, sel_list):
r_list = []
r_set = set()
for i in sel_list:
if i in r_set:
start = full_list.index(i) + 1
r_list.append(full_list.index(i, start, 499))
else:
r_list.append(full_list.index(i))
r_set.add(i)
return r_list
def get_x_y(data, ax, _id, name):
mv = _mov_avg(data)
pt = mv[0:len(mv):int((len(mv) / 20)) + 1]
if pt[-1] != mv[-1]:
pt.append(mv[-1])
# ptx = [mv.index(i) for i in pt]
ptx = x_index(full_list=mv, sel_list=pt)
return ax.plot(ptx,
pt,
style[_id],
linewidth=2,
label=f'{name} (Avg) : {round(mv[-1], 3)}')
def four_mec():
ax1.grid(True)
_list = [data.cpu_1, data.cpu_3, data.cpu_5, data.cpu_8, data.cpu_11, data.cpu_16]
labels = list(algo_dict.values())
for i in _list:
get_x_y(data=i, ax=ax1, _id=_list.index(i), name=labels[_list.index(i)])
ax1.set_title('Moving CPU Utilization for 4 MEC Set-up', fontdict={'weight': 'medium', "size": 14})
ax1.set_ylabel('Moving CPU %', fontdict={'weight': 'medium', 'size': 13})
ax1.set_ylim(top=30)
ax1.set_xlabel('Time Period', fontdict={'weight': 'medium', 'size': 13})
ax1.legend(prop={"size": 14})
plt.subplot(ax1)
def five_mec():
ax3.grid(True)
_list = [d1cpu.cpu_1_5, data.cpu_3_5, data.cpu_5_5, data.cpu_8_5, data.cpu_11_5, data.cpu_16_5]
labels = list(algo_dict.values())
for i in _list:
get_x_y(data=i, ax=ax3, _id=_list.index(i), name=labels[_list.index(i)])
ax3.set_title('Moving CPU Utilization for 6 MEC Set-up', fontdict={'weight': 'medium', "size": 14})
ax3.set_ylabel('Moving CPU %', fontdict={'weight': 'medium', 'size': 13})
ax3.set_ylim(top=30)
ax3.set_xlabel('Time Period', fontdict={'weight': 'medium', 'size': 13})
ax3.legend(prop={"size": 14})
plt.subplot(ax3)
def six_mec():
ax2.grid(True)
_list = [d1cpu.cpu_1_6, data.cpu_3_6, data.cpu_5_6, data.cpu_8_6, data.cpu_11_6, data.cpu_16_6]
labels = list(algo_dict.values())
for i in _list:
get_x_y(data=i, ax=ax2, _id=_list.index(i), name=labels[_list.index(i)])
ax2.set_title('Moving CPU Utilization for 5 MEC Set-up', fontdict={'weight': 'medium', "size": 14})
ax2.set_ylabel('Moving CPU %', fontdict={'weight': 'medium', 'size': 13})
ax2.set_ylim(top=30)
ax2.set_xlabel('Time Period', fontdict={'weight': 'medium', 'size': 13})
ax2.legend(prop={"size": 14})
plt.subplot(ax2)
def seven_mec():
ax4.grid(True)
_list = [d1cpu.cpu_1_7, d1cpu.cpu_3_7, d1cpu.cpu_5_7, d1cpu.cpu_8_7, d1cpu.cpu_11_7, d1cpu.cpu_16_7]
labels = list(algo_dict.values())
for i in _list:
get_x_y(data=i, ax=ax4, _id=_list.index(i), name=labels[_list.index(i)])
ax4.set_title('Moving CPU Utilization for 7 MEC Set-up', fontdict={'weight': 'medium', "size": 14})
ax4.set_ylabel('Moving CPU %', fontdict={'weight': 'medium', 'size': 13})
ax4.set_ylim(top=30)
ax4.set_xlabel('Time Period', fontdict={'weight': 'medium', 'size': 13})
ax4.legend(prop={"size": 14})
plt.subplot(ax4)
def plot_graphs():
four_mec()
five_mec()
six_mec()
seven_mec()
# fig.suptitle('MEC CPU Utilization During Homogeneous Deadlock Experiment')
plt.show()
def show_graphs():
drawnow(plot_graphs)
show_graphs()
| en | 0.670281 | # moving average list # movinf average pointwise # cumulative average formula # μ_n=((n-1) μ_(n-1) + x_n)/n # ptx = [mv.index(i) for i in pt] # fig.suptitle('MEC CPU Utilization During Homogeneous Deadlock Experiment') | 2.650143 | 3 |
galvasr2/galvasr_tokenize_words.py | keithachorn-intel/peoples-speech | 0 | 6619433 | <gh_stars>0
import sys
import lingvo.compat as tf
from lingvo.core import py_utils
from lingvo.core.ops import ascii_to_token_id
from lingvo.core.ops import id_to_ascii
from lingvo.core.ops import str_to_vocab_tokens
from lingvo.core.ops import vocab_id_to_token
tf.flags.DEFINE_string('in_words_txt', None,
'Name of input file with each word in vocabulary, new-line delimited.')
tf.flags.DEFINE_string('in_units_txt', None,
'Name of input file with each character in vocabulary, new-line delimited.')
tf.flags.DEFINE_string('out_spelling_txt', None,
'Name of output file. Will be in Kaldi\'s lexicon.txt format')
tf.flags.DEFINE_string('out_spelling_numbers_txt', None,
'Name of output file. Will be in Kaldi\'s lexicon_numbers.txt format')
tf.flags.DEFINE_string('out_units_txt', None,
'Name of output file. Will be in Kaldi\'s units.txt format')
tf.flags.DEFINE_string('space_char', None,
'Space charactr. " " is invalid for openfst.')
FLAGS = tf.flags.FLAGS
UNK_NUMBER = None
def dump_units_txt(in_units_txt: str, out_units_txt: str):
with open(in_units_txt, "r") as in_fh, open(out_units_txt, "w") as out_fh:
seen_unk = False
seen_space = False
for i, line in enumerate(in_fh):
line = line.rstrip("\n")
if line == "<unk>":
seen_unk = True
global UNK_NUMBER
UNK_NUMBER = i
if line == " ":
line = FLAGS.space_char
seen_space = True
out_fh.write(f"{line} {i}\n")
assert seen_unk
assert seen_space
def main(unused_argv):
dump_units_txt(FLAGS.in_units_txt, FLAGS.out_units_txt)
dump_spellings()
def dump_spellings():
words = []
with open(FLAGS.in_words_txt, 'r') as words_fh:
words = words_fh.read().lower().splitlines()
# if "<unk>" not in words:
# words.append("<unk>")
# We add 2 to account for <s> and (optional) </s> tokens.
longest_word_length = max(len(word) for word in words) + 2
print("GALV:", longest_word_length)
with open(FLAGS.in_units_txt, 'r') as units_fh:
vocab_tokens = [line.rstrip("\n") for line in units_fh.readlines()]
print("GALV:", vocab_tokens)
@tf.function(input_signature=[tf.TensorSpec(shape=[len(words)], dtype=tf.string)])
def tokenize_words(words_t):
padded_tokenized_t, _, paddings_t = str_to_vocab_tokens(
labels=words_t,
maxlen=longest_word_length,
append_eos=True,
pad_to_maxlen=True,
vocab_filepath=FLAGS.in_units_txt,
load_token_ids_from_vocab=False,
delimiter=''
)
# Either lengths or paddings are incorrect.
lengths_t = py_utils.LengthsFromPaddings(paddings_t)
ragged_tokenized_t = tf.RaggedTensor.from_tensor(padded_tokenized_t, lengths=lengths_t)
# Drop start-of-sentence-token
ragged_tokenized_t = ragged_tokenized_t[:, 1:]
lengths_t -= 1
letters_t = vocab_id_to_token(id=ragged_tokenized_t.flat_values,
vocab=vocab_tokens,
load_token_ids_from_vocab=False)
ragged_letters_t = tf.RaggedTensor.from_row_lengths(letters_t, lengths_t)
# Is capatilizationt he problem?
return ragged_tokenized_t, ragged_letters_t
with tf.Session() as session:
spelling_numbers, spelling_letters = session.run(tokenize_words(words))
spelling_numbers = spelling_numbers.to_list()
spelling_letters = spelling_letters.to_list()
with open(FLAGS.out_spelling_txt, "w") as spelling_fh, open(FLAGS.out_spelling_numbers_txt, "w") as spelling_numbers_fh:
for word, numbers, letters in zip(words, spelling_numbers, spelling_letters):
if isinstance(letters, list):
letters_str = " ".join([str(letter) for letter in word])
else:
letters_str = letters
numbers_str = " ".join([str(number) for number in numbers])
spelling_fh.write(f"{word} {letters_str}\n")
spelling_numbers_fh.write(f"{word} {numbers_str}\n")
spelling_fh.write("<unk> <unk>\n")
spelling_numbers_fh.write(f"<unk> {UNK_NUMBER}\n")
if __name__ == '__main__':
tf.flags.mark_flag_as_required('in_words_txt')
tf.flags.mark_flag_as_required('in_units_txt')
tf.flags.mark_flag_as_required('out_spelling_txt')
tf.flags.mark_flag_as_required('out_spelling_numbers_txt')
tf.flags.mark_flag_as_required('out_units_txt')
tf.flags.mark_flag_as_required('space_char')
FLAGS(sys.argv)
tf.app.run(main)
| import sys
import lingvo.compat as tf
from lingvo.core import py_utils
from lingvo.core.ops import ascii_to_token_id
from lingvo.core.ops import id_to_ascii
from lingvo.core.ops import str_to_vocab_tokens
from lingvo.core.ops import vocab_id_to_token
tf.flags.DEFINE_string('in_words_txt', None,
'Name of input file with each word in vocabulary, new-line delimited.')
tf.flags.DEFINE_string('in_units_txt', None,
'Name of input file with each character in vocabulary, new-line delimited.')
tf.flags.DEFINE_string('out_spelling_txt', None,
'Name of output file. Will be in Kaldi\'s lexicon.txt format')
tf.flags.DEFINE_string('out_spelling_numbers_txt', None,
'Name of output file. Will be in Kaldi\'s lexicon_numbers.txt format')
tf.flags.DEFINE_string('out_units_txt', None,
'Name of output file. Will be in Kaldi\'s units.txt format')
tf.flags.DEFINE_string('space_char', None,
'Space charactr. " " is invalid for openfst.')
FLAGS = tf.flags.FLAGS
UNK_NUMBER = None
def dump_units_txt(in_units_txt: str, out_units_txt: str):
with open(in_units_txt, "r") as in_fh, open(out_units_txt, "w") as out_fh:
seen_unk = False
seen_space = False
for i, line in enumerate(in_fh):
line = line.rstrip("\n")
if line == "<unk>":
seen_unk = True
global UNK_NUMBER
UNK_NUMBER = i
if line == " ":
line = FLAGS.space_char
seen_space = True
out_fh.write(f"{line} {i}\n")
assert seen_unk
assert seen_space
def main(unused_argv):
dump_units_txt(FLAGS.in_units_txt, FLAGS.out_units_txt)
dump_spellings()
def dump_spellings():
words = []
with open(FLAGS.in_words_txt, 'r') as words_fh:
words = words_fh.read().lower().splitlines()
# if "<unk>" not in words:
# words.append("<unk>")
# We add 2 to account for <s> and (optional) </s> tokens.
longest_word_length = max(len(word) for word in words) + 2
print("GALV:", longest_word_length)
with open(FLAGS.in_units_txt, 'r') as units_fh:
vocab_tokens = [line.rstrip("\n") for line in units_fh.readlines()]
print("GALV:", vocab_tokens)
@tf.function(input_signature=[tf.TensorSpec(shape=[len(words)], dtype=tf.string)])
def tokenize_words(words_t):
padded_tokenized_t, _, paddings_t = str_to_vocab_tokens(
labels=words_t,
maxlen=longest_word_length,
append_eos=True,
pad_to_maxlen=True,
vocab_filepath=FLAGS.in_units_txt,
load_token_ids_from_vocab=False,
delimiter=''
)
# Either lengths or paddings are incorrect.
lengths_t = py_utils.LengthsFromPaddings(paddings_t)
ragged_tokenized_t = tf.RaggedTensor.from_tensor(padded_tokenized_t, lengths=lengths_t)
# Drop start-of-sentence-token
ragged_tokenized_t = ragged_tokenized_t[:, 1:]
lengths_t -= 1
letters_t = vocab_id_to_token(id=ragged_tokenized_t.flat_values,
vocab=vocab_tokens,
load_token_ids_from_vocab=False)
ragged_letters_t = tf.RaggedTensor.from_row_lengths(letters_t, lengths_t)
# Is capatilizationt he problem?
return ragged_tokenized_t, ragged_letters_t
with tf.Session() as session:
spelling_numbers, spelling_letters = session.run(tokenize_words(words))
spelling_numbers = spelling_numbers.to_list()
spelling_letters = spelling_letters.to_list()
with open(FLAGS.out_spelling_txt, "w") as spelling_fh, open(FLAGS.out_spelling_numbers_txt, "w") as spelling_numbers_fh:
for word, numbers, letters in zip(words, spelling_numbers, spelling_letters):
if isinstance(letters, list):
letters_str = " ".join([str(letter) for letter in word])
else:
letters_str = letters
numbers_str = " ".join([str(number) for number in numbers])
spelling_fh.write(f"{word} {letters_str}\n")
spelling_numbers_fh.write(f"{word} {numbers_str}\n")
spelling_fh.write("<unk> <unk>\n")
spelling_numbers_fh.write(f"<unk> {UNK_NUMBER}\n")
if __name__ == '__main__':
tf.flags.mark_flag_as_required('in_words_txt')
tf.flags.mark_flag_as_required('in_units_txt')
tf.flags.mark_flag_as_required('out_spelling_txt')
tf.flags.mark_flag_as_required('out_spelling_numbers_txt')
tf.flags.mark_flag_as_required('out_units_txt')
tf.flags.mark_flag_as_required('space_char')
FLAGS(sys.argv)
tf.app.run(main) | en | 0.599049 | # if "<unk>" not in words: # words.append("<unk>") # We add 2 to account for <s> and (optional) </s> tokens. # Either lengths or paddings are incorrect. # Drop start-of-sentence-token # Is capatilizationt he problem? | 2.399376 | 2 |
setup.py | allenai/beakerstore | 0 | 6619434 | <filename>setup.py
from setuptools import setup
version = {}
with open('beakerstore/version.py') as v:
exec(v.read(), version)
# TODO: license
setup(
name='beakerstore',
version=version['__version__'],
description='Local store for Beaker datasets and files.',
packages=['beakerstore'],
url='https://github.com/allenai/beakerstore',
author='<NAME>',
author_email='<EMAIL>',
python_requires='>=3',
install_requires=[
'requests >= 2.22.0'
]
)
| <filename>setup.py
from setuptools import setup
version = {}
with open('beakerstore/version.py') as v:
exec(v.read(), version)
# TODO: license
setup(
name='beakerstore',
version=version['__version__'],
description='Local store for Beaker datasets and files.',
packages=['beakerstore'],
url='https://github.com/allenai/beakerstore',
author='<NAME>',
author_email='<EMAIL>',
python_requires='>=3',
install_requires=[
'requests >= 2.22.0'
]
)
| en | 0.048283 | # TODO: license | 1.334731 | 1 |
app/models.py | Globe-Eater/OLI_2 | 0 | 6619435 | <filename>app/models.py
from datetime import datetime
from flask import current_app, request, url_for
from flask_sqlalchemy import SQLAlchemy
from flask_login import UserMixin, AnonymousUserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from . import login_manager
from . import db
class Permission:
SEARCH = 1
ENTRY = 2
EDIT = 4
MODERATE = 8
ADMIN = 16
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
def __init__(self, **kwargs):
super(Role, self).__init__(**kwargs)
if self.permissions is None:
self.premissions = 0
@staticmethod
def insert_roles():
roles = {
'User': [Permission.SEARCH, Permission.ENTRY],
'Moderator': [Permission.SEARCH, Permission.ENTRY, Permission.EDIT,
Permission.EDIT, Permission.MODERATE],
'Administrator': [Permission.SEARCH, Permission.ENTRY, Permission.EDIT,
Permission.EDIT, Permission.MODERATE,
Permission.ADMIN]
}
default_role = 'User'
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.reset_permissions()
for perm in roles[r]:
role.add_permission(perm)
role.default = (role.name == default_role)
db.session.add(role)
db.session.commit()
def add_permission(self, perm):
if not self.has_permission(perm):
self.permissions += perm
def remove_permission(self, perm):
if self.has_permission(perm):
self.permissions -= perm
def reset_permissions(self):
self.permissions = 0
def has_permission(self, perm):
return self.permissions & perm == perm
def __repr__(self):
return '<Role %r>' % self.name
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
name = db.Column(db.String(64))
member_since = db.Column(db.DateTime(), default=datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow)
posts = db.relationship('hpr', backref='user', lazy='dynamic')
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['ADMIN']:
self.role = Role.query.filter_by(name='Administrator').first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id}).decode('utf-8')
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token.encode('utf-8'))
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id}).decode('utf-8')
@staticmethod
def reset_password(token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token.encode('utf-8'))
except:
return False
user = User.query.get(data.get('reset'))
if user is None:
return False
user.password = <PASSWORD>
db.session.add(user)
return True
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token.encode('utf-8'))
except:
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
self.avatar_hash = self.gravatar_hash()
db.session.add(self)
return True
def can(self, perm):
return self.role is not None and self.role.has_permission(perm)
def is_administrator(self):
return self.can(Permission.ADMIN)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def to_json(self):
json_user = {
'url': url_for('api.get_user', id=self.id),
'username': self.username,
'member_since': self.member_since,
'last_seen': self.last_seen,
'posts_url': url_for('api.get_user_posts', id=self.id),
'followed_posts_url': url_for('api.get_user_followed_posts',
id=self.id),
'post_count': self.posts.count()
}
return json_user
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['SECRET_KEY'],
expires_in=expiration)
return s.dumps({'id': self.id}).decode('utf-8')
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def __repr__(self):
return '<User %r>' % self.username
class image(db.Model):
__tablename__ = 'image'
index = db.Column(db.Integer, primary_key=True)
picture = db.Column(db.Text)
prop_id = db.Column(db.Integer, db.ForeignKey('hpr.objectid'))
prop = db.relationship('hpr', foreign_keys=prop_id)
class hpr(db.Model):
__tablename__ = 'hpr'
index = db.Column(db.Integer)
objectid = db.Column(db.Integer, primary_key=True)
propname = db.Column(db.String())
resname = db.Column(db.String())
address = db.Column(db.String())
city = db.Column(db.String())
vicinity = db.Column(db.String())
countycd = db.Column(db.Float())
lot = db.Column(db.String())
block = db.Column(db.String())
platename = db.Column(db.String())
section = db.Column(db.String())
township = db.Column(db.String())
range = db.Column(db.String())
restype = db.Column(db.String())
hist_func = db.Column(db.String())
curr_func = db.Column(db.String())
areasg_1 = db.Column(db.String())
areasg_2 = db.Column(db.String())
desc_seg = db.Column(db.String())
doc_source = db.Column(db.String())
name_prep = db.Column(db.String())
survey_pro = db.Column(db.String())
projectname = db.Column(db.String())
date_prep = db.Column(db.String())
photograph = db.Column(db.String())
year = db.Column(db.String())
arch_build = db.Column(db.String())
year_build = db.Column(db.String())
orig_site = db.Column(db.String())
datemoved = db.Column(db.String())
fromwhere = db.Column(db.String())
accessible = db.Column(db.String())
arch_style = db.Column(db.String())
other_arch = db.Column(db.String())
foun_mat = db.Column(db.Float())
roof_type = db.Column(db.String())
roof_mat = db.Column(db.Float())
wall_mat_1 = db.Column(db.Float())
wall_mat_2 = db.Column(db.String())
window_typ = db.Column(db.String())
window_mat = db.Column(db.Float())
door_typ = db.Column(db.String())
door_mat = db.Column(db.Float())
exter_fea = db.Column(db.String())
inter_fea = db.Column(db.String())
dec_detail = db.Column(db.String())
condition = db.Column(db.Float())
des_res = db.Column(db.String())
comments = db.Column(db.String())
placement = db.Column(db.String())
lonr = db.Column(db.String())
continuation = db.Column(db.String())
nrdata = db.Column(db.String())
date_updated = db.Column(db.String())
lat = db.Column(db.Float())
long = db.Column(db.Float())
utm_zone = db.Column(db.Float())
easting = db.Column(db.String())
northing = db.Column(db.String())
p_b_c = db.Column(db.String())
year_closed = db.Column(db.Float())
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
duplicate_check = db.Column(db.String())
duplicate_check_date = db.Column(db.String())
duplicate_check_user = db.Column(db.Float())
duplicate_check_comments = db.Column(db.String())
approved_shpo = db.Column(db.Float())
| <filename>app/models.py
from datetime import datetime
from flask import current_app, request, url_for
from flask_sqlalchemy import SQLAlchemy
from flask_login import UserMixin, AnonymousUserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from . import login_manager
from . import db
class Permission:
SEARCH = 1
ENTRY = 2
EDIT = 4
MODERATE = 8
ADMIN = 16
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
def __init__(self, **kwargs):
super(Role, self).__init__(**kwargs)
if self.permissions is None:
self.premissions = 0
@staticmethod
def insert_roles():
roles = {
'User': [Permission.SEARCH, Permission.ENTRY],
'Moderator': [Permission.SEARCH, Permission.ENTRY, Permission.EDIT,
Permission.EDIT, Permission.MODERATE],
'Administrator': [Permission.SEARCH, Permission.ENTRY, Permission.EDIT,
Permission.EDIT, Permission.MODERATE,
Permission.ADMIN]
}
default_role = 'User'
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.reset_permissions()
for perm in roles[r]:
role.add_permission(perm)
role.default = (role.name == default_role)
db.session.add(role)
db.session.commit()
def add_permission(self, perm):
if not self.has_permission(perm):
self.permissions += perm
def remove_permission(self, perm):
if self.has_permission(perm):
self.permissions -= perm
def reset_permissions(self):
self.permissions = 0
def has_permission(self, perm):
return self.permissions & perm == perm
def __repr__(self):
return '<Role %r>' % self.name
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
name = db.Column(db.String(64))
member_since = db.Column(db.DateTime(), default=datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow)
posts = db.relationship('hpr', backref='user', lazy='dynamic')
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['ADMIN']:
self.role = Role.query.filter_by(name='Administrator').first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id}).decode('utf-8')
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token.encode('utf-8'))
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id}).decode('utf-8')
@staticmethod
def reset_password(token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token.encode('utf-8'))
except:
return False
user = User.query.get(data.get('reset'))
if user is None:
return False
user.password = <PASSWORD>
db.session.add(user)
return True
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token.encode('utf-8'))
except:
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
self.avatar_hash = self.gravatar_hash()
db.session.add(self)
return True
def can(self, perm):
return self.role is not None and self.role.has_permission(perm)
def is_administrator(self):
return self.can(Permission.ADMIN)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def to_json(self):
json_user = {
'url': url_for('api.get_user', id=self.id),
'username': self.username,
'member_since': self.member_since,
'last_seen': self.last_seen,
'posts_url': url_for('api.get_user_posts', id=self.id),
'followed_posts_url': url_for('api.get_user_followed_posts',
id=self.id),
'post_count': self.posts.count()
}
return json_user
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['SECRET_KEY'],
expires_in=expiration)
return s.dumps({'id': self.id}).decode('utf-8')
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def __repr__(self):
return '<User %r>' % self.username
class image(db.Model):
__tablename__ = 'image'
index = db.Column(db.Integer, primary_key=True)
picture = db.Column(db.Text)
prop_id = db.Column(db.Integer, db.ForeignKey('hpr.objectid'))
prop = db.relationship('hpr', foreign_keys=prop_id)
class hpr(db.Model):
__tablename__ = 'hpr'
index = db.Column(db.Integer)
objectid = db.Column(db.Integer, primary_key=True)
propname = db.Column(db.String())
resname = db.Column(db.String())
address = db.Column(db.String())
city = db.Column(db.String())
vicinity = db.Column(db.String())
countycd = db.Column(db.Float())
lot = db.Column(db.String())
block = db.Column(db.String())
platename = db.Column(db.String())
section = db.Column(db.String())
township = db.Column(db.String())
range = db.Column(db.String())
restype = db.Column(db.String())
hist_func = db.Column(db.String())
curr_func = db.Column(db.String())
areasg_1 = db.Column(db.String())
areasg_2 = db.Column(db.String())
desc_seg = db.Column(db.String())
doc_source = db.Column(db.String())
name_prep = db.Column(db.String())
survey_pro = db.Column(db.String())
projectname = db.Column(db.String())
date_prep = db.Column(db.String())
photograph = db.Column(db.String())
year = db.Column(db.String())
arch_build = db.Column(db.String())
year_build = db.Column(db.String())
orig_site = db.Column(db.String())
datemoved = db.Column(db.String())
fromwhere = db.Column(db.String())
accessible = db.Column(db.String())
arch_style = db.Column(db.String())
other_arch = db.Column(db.String())
foun_mat = db.Column(db.Float())
roof_type = db.Column(db.String())
roof_mat = db.Column(db.Float())
wall_mat_1 = db.Column(db.Float())
wall_mat_2 = db.Column(db.String())
window_typ = db.Column(db.String())
window_mat = db.Column(db.Float())
door_typ = db.Column(db.String())
door_mat = db.Column(db.Float())
exter_fea = db.Column(db.String())
inter_fea = db.Column(db.String())
dec_detail = db.Column(db.String())
condition = db.Column(db.Float())
des_res = db.Column(db.String())
comments = db.Column(db.String())
placement = db.Column(db.String())
lonr = db.Column(db.String())
continuation = db.Column(db.String())
nrdata = db.Column(db.String())
date_updated = db.Column(db.String())
lat = db.Column(db.Float())
long = db.Column(db.Float())
utm_zone = db.Column(db.Float())
easting = db.Column(db.String())
northing = db.Column(db.String())
p_b_c = db.Column(db.String())
year_closed = db.Column(db.Float())
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
duplicate_check = db.Column(db.String())
duplicate_check_date = db.Column(db.String())
duplicate_check_user = db.Column(db.Float())
duplicate_check_comments = db.Column(db.String())
approved_shpo = db.Column(db.Float())
| none | 1 | 2.437051 | 2 | |
lib/modes/mode_phonemes.py | okonomichiyaki/parrot.py | 80 | 6619436 | from lib.detection_strategies import *
import threading
import numpy as np
import pyautogui
from pyautogui import press, hotkey, click, scroll, typewrite, moveRel, moveTo, position, keyUp, keyDown, mouseUp, mouseDown
from time import sleep
from subprocess import call
from lib.system_toggles import toggle_eyetracker, turn_on_sound, mute_sound, toggle_speechrec
from lib.pattern_detector import PatternDetector
from lib.heroes_grammar import *
import os
import pythoncom
from lib.overlay_manipulation import update_overlay_image
class PhonemesMode:
def __init__(self, modeSwitcher):
self.mode = "regular"
self.modeSwitcher = modeSwitcher
self.detector = PatternDetector({
'silence': {
'strategy': 'rapid',
'sound': 'silence',
'percentage': 70,
'intensity': 0
}
})
self.remembered_phonemes = []
def start( self ):
mute_sound()
toggle_eyetracker()
update_overlay_image( "default" )
def handle_input( self, dataDicts ):
self.detector.tick( dataDicts )
# Early escape for performance
if( self.detector.detect('silence') ):
if( len( self.remembered_phonemes ) > 0 ):
typewrite("->" + "/".join( self.remembered_phonemes ) + "<-" )
self.remembered_phonemes = []
press('enter')
else:
lastDict = dataDicts[ len( dataDicts ) - 1 ]
for label in lastDict:
if( lastDict[label]['winner'] == True and lastDict[label]['percent'] > 85 ):
self.add_phoneme( label )
return self.detector.tickActions
def label_to_phoneme( self, label ):
return label.replace( "vowel_", "" ).replace( "approximant_", "" ).replace( "fricative_", "").replace( "semivowel_", "" ).replace( "nasal_", "" ).replace( "stop_", "" ).replace(
"sibilant_", "" ).replace( "click_alveolar", "*").replace( "click_lateral", "^").replace( "thrill_", "~" )
def add_phoneme( self, label ):
phoneme = self.label_to_phoneme( label )
if( len( self.remembered_phonemes ) == 0 or
self.remembered_phonemes[ len( self.remembered_phonemes ) - 1 ] != phoneme ):
self.remembered_phonemes.append( phoneme )
def exit( self ):
self.mode = "regular"
turn_on_sound()
update_overlay_image( "default" )
toggle_eyetracker()
| from lib.detection_strategies import *
import threading
import numpy as np
import pyautogui
from pyautogui import press, hotkey, click, scroll, typewrite, moveRel, moveTo, position, keyUp, keyDown, mouseUp, mouseDown
from time import sleep
from subprocess import call
from lib.system_toggles import toggle_eyetracker, turn_on_sound, mute_sound, toggle_speechrec
from lib.pattern_detector import PatternDetector
from lib.heroes_grammar import *
import os
import pythoncom
from lib.overlay_manipulation import update_overlay_image
class PhonemesMode:
def __init__(self, modeSwitcher):
self.mode = "regular"
self.modeSwitcher = modeSwitcher
self.detector = PatternDetector({
'silence': {
'strategy': 'rapid',
'sound': 'silence',
'percentage': 70,
'intensity': 0
}
})
self.remembered_phonemes = []
def start( self ):
mute_sound()
toggle_eyetracker()
update_overlay_image( "default" )
def handle_input( self, dataDicts ):
self.detector.tick( dataDicts )
# Early escape for performance
if( self.detector.detect('silence') ):
if( len( self.remembered_phonemes ) > 0 ):
typewrite("->" + "/".join( self.remembered_phonemes ) + "<-" )
self.remembered_phonemes = []
press('enter')
else:
lastDict = dataDicts[ len( dataDicts ) - 1 ]
for label in lastDict:
if( lastDict[label]['winner'] == True and lastDict[label]['percent'] > 85 ):
self.add_phoneme( label )
return self.detector.tickActions
def label_to_phoneme( self, label ):
return label.replace( "vowel_", "" ).replace( "approximant_", "" ).replace( "fricative_", "").replace( "semivowel_", "" ).replace( "nasal_", "" ).replace( "stop_", "" ).replace(
"sibilant_", "" ).replace( "click_alveolar", "*").replace( "click_lateral", "^").replace( "thrill_", "~" )
def add_phoneme( self, label ):
phoneme = self.label_to_phoneme( label )
if( len( self.remembered_phonemes ) == 0 or
self.remembered_phonemes[ len( self.remembered_phonemes ) - 1 ] != phoneme ):
self.remembered_phonemes.append( phoneme )
def exit( self ):
self.mode = "regular"
turn_on_sound()
update_overlay_image( "default" )
toggle_eyetracker()
| en | 0.852486 | # Early escape for performance | 2.195819 | 2 |
rest_framework_roles/decorators.py | Pithikos/rest-framework-roles | 19 | 6619437 | from rest_framework_roles import parsing
from rest_framework_roles import exceptions
from rest_framework_roles import patching
DEFAULT_COST = 0
DEFAULT_EXPENSIVE = 50
# ------------------------------------------------------------------------------
def allowed(*roles):
"""
Allow only given roles to access view. Any other roles will be denied access.
"""
def wrapped(fn):
role_checkers = parsing.load_roles()
# Check first roles are valid
for r in roles:
if r not in role_checkers:
raise exceptions.Misconfigured(f"Invalid role '{r}'")
if hasattr(fn, '_view_permissions'):
raise Exception(f"Unexpected existing '_view_permissions' for '{fn}'")
fn._view_permissions = []
for role in roles:
fn._view_permissions.append((True, role_checkers[role]))
fn._view_permissions.append((False, True)) # disallow anyone else
# SPECIAL CASE: REST function creates a class with metaprogramming. To adhere to that
# we need to patch the metaprogrammatically created class
if patching.is_callback_rest_function(fn):
fn.cls._view_permissions = {
fn.__name__: fn._view_permissions
}
return fn
return wrapped
def disallowed(*roles):
"""
Deny access for given roles. Any other roles will be allowed access.
"""
def wrapped(fn):
role_checkers = parsing.load_roles()
# Check first roles are valid
for r in roles:
if r not in role_checkers:
raise exceptions.Misconfigured(f"Invalid role '{r}'")
if hasattr(fn, '_view_permissions'):
raise Exception(f"Unexpected existing '_view_permissions' for '{fn}'")
fn._view_permissions = []
for role in roles:
fn._view_permissions.append((False, role_checkers[role]))
# SPECIAL CASE: REST function creates a class with metaprogramming. To adhere to that
# we need to patch the metaprogrammatically created class
if patching.is_callback_rest_function(fn):
fn.cls._view_permissions = {
fn.__name__: fn._view_permissions
}
return fn
return wrapped
# ------------------------------------------------------------------------------
def role_checker(*args, **kwargs):
"""
Denote if role checker is cheap
"""
cost = kwargs.get('cost', DEFAULT_COST)
def decorator_role(fn):
def wrapped_role(*args, **kwargs):
return fn(*args, **kwargs)
wrapped_role.cost = cost
return wrapped_role
decorator_role.cost = cost
if args and callable(args[0]):
return decorator_role(*args)
else:
return decorator_role
| from rest_framework_roles import parsing
from rest_framework_roles import exceptions
from rest_framework_roles import patching
DEFAULT_COST = 0
DEFAULT_EXPENSIVE = 50
# ------------------------------------------------------------------------------
def allowed(*roles):
"""
Allow only given roles to access view. Any other roles will be denied access.
"""
def wrapped(fn):
role_checkers = parsing.load_roles()
# Check first roles are valid
for r in roles:
if r not in role_checkers:
raise exceptions.Misconfigured(f"Invalid role '{r}'")
if hasattr(fn, '_view_permissions'):
raise Exception(f"Unexpected existing '_view_permissions' for '{fn}'")
fn._view_permissions = []
for role in roles:
fn._view_permissions.append((True, role_checkers[role]))
fn._view_permissions.append((False, True)) # disallow anyone else
# SPECIAL CASE: REST function creates a class with metaprogramming. To adhere to that
# we need to patch the metaprogrammatically created class
if patching.is_callback_rest_function(fn):
fn.cls._view_permissions = {
fn.__name__: fn._view_permissions
}
return fn
return wrapped
def disallowed(*roles):
"""
Deny access for given roles. Any other roles will be allowed access.
"""
def wrapped(fn):
role_checkers = parsing.load_roles()
# Check first roles are valid
for r in roles:
if r not in role_checkers:
raise exceptions.Misconfigured(f"Invalid role '{r}'")
if hasattr(fn, '_view_permissions'):
raise Exception(f"Unexpected existing '_view_permissions' for '{fn}'")
fn._view_permissions = []
for role in roles:
fn._view_permissions.append((False, role_checkers[role]))
# SPECIAL CASE: REST function creates a class with metaprogramming. To adhere to that
# we need to patch the metaprogrammatically created class
if patching.is_callback_rest_function(fn):
fn.cls._view_permissions = {
fn.__name__: fn._view_permissions
}
return fn
return wrapped
# ------------------------------------------------------------------------------
def role_checker(*args, **kwargs):
"""
Denote if role checker is cheap
"""
cost = kwargs.get('cost', DEFAULT_COST)
def decorator_role(fn):
def wrapped_role(*args, **kwargs):
return fn(*args, **kwargs)
wrapped_role.cost = cost
return wrapped_role
decorator_role.cost = cost
if args and callable(args[0]):
return decorator_role(*args)
else:
return decorator_role
| en | 0.715039 | # ------------------------------------------------------------------------------ Allow only given roles to access view. Any other roles will be denied access. # Check first roles are valid # disallow anyone else # SPECIAL CASE: REST function creates a class with metaprogramming. To adhere to that # we need to patch the metaprogrammatically created class Deny access for given roles. Any other roles will be allowed access. # Check first roles are valid # SPECIAL CASE: REST function creates a class with metaprogramming. To adhere to that # we need to patch the metaprogrammatically created class # ------------------------------------------------------------------------------ Denote if role checker is cheap | 2.451176 | 2 |
tests/unit/test_kube.py | neuro-inc/platform-monitoring | 0 | 6619438 | import asyncio
from typing import Any
from unittest import mock
import aiohttp
import pytest
from platform_monitoring.kube_client import (
GPUCounter,
GPUCounters,
JobError,
Node,
Pod,
PodContainerStats,
PodPhase,
Resources,
StatsSummary,
)
from platform_monitoring.logs import filter_out_rpc_error
class TestPod:
def test_no_node_name(self) -> None:
pod = Pod({"spec": {}})
assert pod.node_name is None
def test_node_name(self) -> None:
pod = Pod({"spec": {"nodeName": "testnode"}})
assert pod.node_name == "testnode"
def test_no_status(self) -> None:
pod = Pod({"spec": {}})
with pytest.raises(ValueError, match="Missing pod status"):
pod.get_container_status("testcontainer")
def test_no_container_status(self) -> None:
pod = Pod({"spec": {}, "status": {"containerStatuses": []}})
container_status = pod.get_container_status("testcontainer")
assert container_status == {}
def test_container_status(self) -> None:
pod = Pod(
{
"spec": {},
"status": {
"containerStatuses": [{"name": ""}, {"name": "testcontainer"}]
},
}
)
container_status = pod.get_container_status("testcontainer")
assert container_status == {"name": "testcontainer"}
def test_no_container_id(self) -> None:
pod = Pod(
{"spec": {}, "status": {"containerStatuses": [{"name": "testcontainer"}]}}
)
container_id = pod.get_container_id("testcontainer")
assert container_id is None
def test_container_id(self) -> None:
pod = Pod(
{
"spec": {},
"status": {
"containerStatuses": [
{
"name": "testcontainer",
"containerID": "docker://testcontainerid",
}
]
},
}
)
container_id = pod.get_container_id("testcontainer")
assert container_id == "testcontainerid"
def test_phase(self) -> None:
pod = Pod({"spec": {}, "status": {"phase": "Running"}})
assert pod.phase == PodPhase.RUNNING
def test_is_phase_running_false(self) -> None:
pod = Pod({"spec": {}, "status": {"phase": "Pending"}})
assert not pod.is_phase_running
def test_is_phase_running(self) -> None:
pod = Pod({"spec": {}, "status": {"phase": "Running"}})
assert pod.is_phase_running
def test_no_resource_requests(self) -> None:
pod = Pod({"spec": {"containers": [{"resources": {}}]}})
assert pod.resource_requests == Resources()
def test_resource_requests_cpu_milicores(self) -> None:
pod = Pod(
{"spec": {"containers": [{"resources": {"requests": {"cpu": "100m"}}}]}}
)
assert pod.resource_requests == Resources(cpu_m=100)
def test_resource_requests_cpu_cores(self) -> None:
pod = Pod({"spec": {"containers": [{"resources": {"requests": {"cpu": "1"}}}]}})
assert pod.resource_requests == Resources(cpu_m=1000)
def test_resource_requests_memory_mebibytes(self) -> None:
pod = Pod(
{
"spec": {
"containers": [{"resources": {"requests": {"memory": "1000Mi"}}}]
}
}
)
assert pod.resource_requests == Resources(memory_mb=1000)
def test_resource_requests_memory_gibibytes(self) -> None:
pod = Pod(
{"spec": {"containers": [{"resources": {"requests": {"memory": "1Gi"}}}]}}
)
assert pod.resource_requests == Resources(memory_mb=1024)
def test_resource_requests_gpu(self) -> None:
pod = Pod(
{
"spec": {
"containers": [{"resources": {"requests": {"nvidia.com/gpu": "1"}}}]
}
}
)
assert pod.resource_requests == Resources(gpu=1)
def test_resource_requests_for_multiple_containers(self) -> None:
pod = Pod(
{
"spec": {
"containers": [
{"resources": {"requests": {"cpu": "0.5", "memory": "512Mi"}}},
{
"resources": {
"requests": {
"cpu": "1",
"memory": "1Gi",
"nvidia.com/gpu": "1",
}
}
},
]
}
}
)
assert pod.resource_requests == Resources(cpu_m=1500, memory_mb=1536, gpu=1)
class TestPodContainerStats:
def test_from_primitive_no_keys(self) -> None:
payload: dict[str, Any] = {"memory": {}}
stats = PodContainerStats.from_primitive(payload)
empty_stats = PodContainerStats(cpu=0.0, memory=0.0)
assert stats == empty_stats
payload = {"cpu": {}}
stats = PodContainerStats.from_primitive(payload)
assert stats == empty_stats
payload = {}
stats = PodContainerStats.from_primitive(payload)
assert stats == empty_stats
def test_from_primitive_empty(self) -> None:
payload: dict[str, Any] = {"cpu": {}, "memory": {}}
stats = PodContainerStats.from_primitive(payload)
assert stats == PodContainerStats(cpu=0.0, memory=0.0)
def test_from_primitive(self) -> None:
payload = {
"cpu": {"usageNanoCores": 1000},
"memory": {"workingSetBytes": 1024 * 1024},
}
stats = PodContainerStats.from_primitive(payload)
assert stats == PodContainerStats(cpu=0.000001, memory=1.0)
class TestStatsSummary:
def test_get_pod_container_stats_error_response(self) -> None:
payload: dict[str, Any] = {
"kind": "Status",
"apiVersion": "v1",
"metadata": {},
"status": "Failure",
"message": "message",
"reason": "Forbidden",
"details": {"name": "default-pool", "kind": "nodes"},
"code": 403,
}
with pytest.raises(JobError, match="Invalid stats summary response"):
StatsSummary(payload)
def test_get_pod_container_stats_no_pod(self) -> None:
payload: dict[str, Any] = {"pods": []}
stats = StatsSummary(payload).get_pod_container_stats(
"namespace", "pod", "container"
)
assert stats is None
def test_get_pod_container_stats_no_containers(self) -> None:
payload = {"pods": [{"podRef": {"namespace": "namespace", "name": "pod"}}]}
stats = StatsSummary(payload).get_pod_container_stats(
"namespace", "pod", "container"
)
assert stats is None
def test_get_pod_container_stats(self) -> None:
payload = {
"pods": [
{
"podRef": {"namespace": "namespace", "name": "pod"},
"containers": [{"name": "container", "cpu": {}, "memory": {}}],
}
]
}
stats = StatsSummary(payload).get_pod_container_stats(
"namespace", "pod", "container"
)
assert stats
class TestGPUCounters:
def test_parse(self) -> None:
metrics = """
# HELP DCGM_FI_DEV_GPU_UTIL GPU utilization (in %).
# TYPE DCGM_FI_DEV_GPU_UTIL gauge
# HELP DCGM_FI_DEV_FB_USED Framebuffer memory used (in MiB).
# TYPE DCGM_FI_DEV_FB_USED gauge
DCGM_FI_DEV_GPU_UTIL{gpu="0",container="job-0",namespace="platform-jobs",pod="job-0"} 1
DCGM_FI_DEV_FB_USED{gpu="0",container="job-0",namespace="platform-jobs",pod="job-0"} 10
DCGM_FI_DEV_GPU_UTIL{gpu="1",container="job-0",namespace="platform-jobs",pod="job-0"} 2
DCGM_FI_DEV_FB_USED{gpu="1",container="job-0",namespace="platform-jobs",pod="job-0"} 20
DCGM_FI_DEV_GPU_UTIL{gpu="2",container="job-1",namespace="platform-jobs",pod="job-1"} 3
DCGM_FI_DEV_FB_USED{gpu="2",container="job-1",namespace="platform-jobs",pod="job-1"} 30
"""
counters = GPUCounters.parse(metrics)
assert counters == GPUCounters(
counters=[
GPUCounter(
name="DCGM_FI_DEV_GPU_UTIL",
value=1,
labels={
"gpu": "0",
"namespace": "platform-jobs",
"pod": "job-0",
"container": "job-0",
},
),
GPUCounter(
name="DCGM_FI_DEV_FB_USED",
value=10,
labels={
"gpu": "0",
"namespace": "platform-jobs",
"pod": "job-0",
"container": "job-0",
},
),
GPUCounter(
name="DCGM_FI_DEV_GPU_UTIL",
value=2,
labels={
"gpu": "1",
"namespace": "platform-jobs",
"pod": "job-0",
"container": "job-0",
},
),
GPUCounter(
name="DCGM_FI_DEV_FB_USED",
value=20,
labels={
"gpu": "1",
"namespace": "platform-jobs",
"pod": "job-0",
"container": "job-0",
},
),
GPUCounter(
name="DCGM_FI_DEV_GPU_UTIL",
value=3,
labels={
"gpu": "2",
"namespace": "platform-jobs",
"pod": "job-1",
"container": "job-1",
},
),
GPUCounter(
name="DCGM_FI_DEV_FB_USED",
value=30,
labels={
"gpu": "2",
"namespace": "platform-jobs",
"pod": "job-1",
"container": "job-1",
},
),
]
)
def test_get_pod_container_stats_utilization(self) -> None:
counters = GPUCounters(
counters=[
GPUCounter(
name="DCGM_FI_DEV_GPU_UTIL",
value=1,
labels={
"gpu": "0",
"namespace": "platform-jobs",
"pod": "job-0",
"container": "job-0",
},
),
GPUCounter(
name="DCGM_FI_DEV_GPU_UTIL",
value=4,
labels={
"gpu": "1",
"namespace": "platform-jobs",
"pod": "job-0",
"container": "job-0",
},
),
GPUCounter(
name="DCGM_FI_DEV_GPU_UTIL",
value=2,
labels={
"gpu": "2",
"namespace": "platform-jobs",
"pod": "job-1",
"container": "job-1",
},
),
]
)
stats = counters.get_pod_container_stats(
namespace_name="platform-jobs", pod_name="job-0", container_name="job-0"
)
assert stats.utilization == 2
stats = counters.get_pod_container_stats(
namespace_name="platform-jobs", pod_name="job-1", container_name="job-1"
)
assert stats.utilization == 2
def test_get_pod_container_stats_memory_used(self) -> None:
counters = GPUCounters(
counters=[
GPUCounter(
name="DCGM_FI_DEV_FB_USED",
value=1,
labels={
"gpu": "0",
"namespace": "platform-jobs",
"pod": "job-0",
"container": "job-0",
},
),
GPUCounter(
name="DCGM_FI_DEV_FB_USED",
value=2,
labels={
"gpu": "1",
"namespace": "platform-jobs",
"pod": "job-0",
"container": "job-0",
},
),
GPUCounter(
name="DCGM_FI_DEV_FB_USED",
value=3,
labels={
"gpu": "2",
"namespace": "platform-jobs",
"pod": "job-1",
"container": "job-1",
},
),
]
)
stats = counters.get_pod_container_stats(
namespace_name="platform-jobs", pod_name="job-0", container_name="job-0"
)
assert stats.utilization == 0
assert stats.memory_used_mb == 3
def test_get_pod_container_stats_unknown_job(self) -> None:
counters = GPUCounters(
counters=[
GPUCounter(
name="DCGM_FI_DEV_GPU_UTIL",
value=1,
labels={
"gpu": "0",
"namespace": "platform-jobs",
"pod": "job-0",
"container": "job-0",
},
),
GPUCounter(
name="DCGM_FI_DEV_FB_USED",
value=1,
labels={
"gpu": "0",
"namespace": "platform-jobs",
"pod": "job-0",
"container": "job-0",
},
),
]
)
stats = counters.get_pod_container_stats(
namespace_name="platform-jobs", pod_name="job-1", container_name="job-1"
)
assert stats.utilization == 0
assert stats.memory_used_mb == 0
class TestFilterOutRPCError:
async def test_iter_eof(self) -> None:
reader = aiohttp.StreamReader(mock.Mock(_reading_paused=False), 1024)
reader.feed_eof()
it = filter_out_rpc_error(reader)
chunks = [chunk async for chunk in it]
assert chunks == []
async def test_read_two_lines_eof(self) -> None:
reader = aiohttp.StreamReader(mock.Mock(_reading_paused=False), 1024)
reader.feed_data(b"line1\n")
reader.feed_data(b"line2")
reader.feed_eof()
it = filter_out_rpc_error(reader)
chunks = [chunk async for chunk in it]
assert chunks == [b"line1\n", b"line2"]
async def test_filtered_single_rpc_error(self) -> None:
reader = aiohttp.StreamReader(mock.Mock(_reading_paused=False), 1024)
reader.feed_data(b"line1\n")
reader.feed_data(b"rpc error: code = whatever")
reader.feed_eof()
it = filter_out_rpc_error(reader)
chunks = [chunk async for chunk in it]
assert chunks == [b"line1\n"]
async def test_filtered_single_rpc_error2(self) -> None:
reader = aiohttp.StreamReader(mock.Mock(_reading_paused=False), 1024)
reader.feed_data(b"line1\n")
reader.feed_data(
b"Unable to retrieve container logs for docker://0123456789abcdef"
)
reader.feed_eof()
it = filter_out_rpc_error(reader)
chunks = [chunk async for chunk in it]
assert chunks == [b"line1\n"]
async def test_filtered_single_rpc_error3(self) -> None:
reader = aiohttp.StreamReader(mock.Mock(_reading_paused=False), 1024)
reader.feed_data(b"line1\n")
reader.feed_data(
b'failed to try resolving symlinks in path "/var/log/pods/xxx.log": '
b"lstat /var/log/pods/xxx.log: no such file or directory"
)
reader.feed_eof()
it = filter_out_rpc_error(reader)
chunks = [chunk async for chunk in it]
assert chunks == [b"line1\n"]
async def test_filtered_two_rpc_errors(self) -> None:
reader = aiohttp.StreamReader(mock.Mock(_reading_paused=False), 1024)
reader.feed_data(b"line1\n")
reader.feed_data(b"rpc error: code = whatever\n")
reader.feed_data(b"rpc error: code = again\n")
reader.feed_eof()
it = filter_out_rpc_error(reader)
chunks = [chunk async for chunk in it]
assert chunks == [b"line1\n", b"rpc error: code = whatever\n"]
async def test_not_filtered_single_rpc_not_eof(self) -> None:
reader = aiohttp.StreamReader(mock.Mock(_reading_paused=False), 1024)
reader.feed_data(b"line1\n")
reader.feed_data(b"rpc error: code = whatever\n")
reader.feed_data(b"line2\n")
reader.feed_eof()
it = filter_out_rpc_error(reader)
chunks = [chunk async for chunk in it]
assert chunks == [b"line1\n", b"rpc error: code = whatever\n", b"line2\n"]
async def test_min_line_chunk(self) -> None:
reader = aiohttp.StreamReader(mock.Mock(_reading_paused=False), 1024)
it = filter_out_rpc_error(reader)
async def _read_all() -> list[bytes]:
return [chunk async for chunk in it]
async def _feed_raw_chunk(data: bytes) -> None:
reader.feed_data(data)
await asyncio.sleep(0.0)
task = asyncio.create_task(_read_all())
await _feed_raw_chunk(b"chunk01\r")
await _feed_raw_chunk(b"chunk02\r")
await _feed_raw_chunk(b"chunk03\r")
await _feed_raw_chunk(b"chunk04\r")
await _feed_raw_chunk(b"chunk05\r\n")
await _feed_raw_chunk(b"chunk06\r\n")
await _feed_raw_chunk(b"chunk07\r")
await _feed_raw_chunk(b"chunk08\r\n")
await _feed_raw_chunk(b"rpc error: ")
await _feed_raw_chunk(b"code =")
reader.feed_eof()
chunks = await task
assert chunks == [
b"chunk01\rchunk02\rchunk03\r",
b"chunk04\r",
b"chunk05\r\n",
b"chunk06\r\n",
b"chunk07\rchunk08\r\n",
]
class TestNode:
def test_name(self) -> None:
node = Node({"metadata": {"name": "default"}})
assert node.name == "default"
def test_get_label(self) -> None:
node = Node({"metadata": {"labels": {"hello": "world"}}})
assert node.get_label("hello") == "world"
def test_get_label_is_none(self) -> None:
node = Node({"metadata": {}})
assert node.get_label("hello") is None
class TestResources:
def test_add(self) -> None:
resources1 = Resources(cpu_m=1, memory_mb=2, gpu=3)
resources2 = Resources(cpu_m=4, memory_mb=5, gpu=6)
assert resources1.add(resources2) == Resources(cpu_m=5, memory_mb=7, gpu=9)
def test_available(self) -> None:
total = Resources(cpu_m=1000, memory_mb=1024, gpu=2)
used = Resources(cpu_m=100, memory_mb=256, gpu=1)
assert total.available(used) == Resources(cpu_m=900, memory_mb=768, gpu=1)
def test_count(self) -> None:
total = Resources(cpu_m=1000, memory_mb=1024, gpu=2)
assert total.count(Resources(cpu_m=100, memory_mb=128, gpu=1)) == 2
assert total.count(Resources(cpu_m=100, memory_mb=128)) == 8
assert total.count(Resources(cpu_m=100)) == 10
assert total.count(Resources(cpu_m=1100)) == 0
assert total.count(Resources()) == 110
assert Resources().count(Resources()) == 0
| import asyncio
from typing import Any
from unittest import mock
import aiohttp
import pytest
from platform_monitoring.kube_client import (
GPUCounter,
GPUCounters,
JobError,
Node,
Pod,
PodContainerStats,
PodPhase,
Resources,
StatsSummary,
)
from platform_monitoring.logs import filter_out_rpc_error
class TestPod:
def test_no_node_name(self) -> None:
pod = Pod({"spec": {}})
assert pod.node_name is None
def test_node_name(self) -> None:
pod = Pod({"spec": {"nodeName": "testnode"}})
assert pod.node_name == "testnode"
def test_no_status(self) -> None:
pod = Pod({"spec": {}})
with pytest.raises(ValueError, match="Missing pod status"):
pod.get_container_status("testcontainer")
def test_no_container_status(self) -> None:
pod = Pod({"spec": {}, "status": {"containerStatuses": []}})
container_status = pod.get_container_status("testcontainer")
assert container_status == {}
def test_container_status(self) -> None:
pod = Pod(
{
"spec": {},
"status": {
"containerStatuses": [{"name": ""}, {"name": "testcontainer"}]
},
}
)
container_status = pod.get_container_status("testcontainer")
assert container_status == {"name": "testcontainer"}
def test_no_container_id(self) -> None:
pod = Pod(
{"spec": {}, "status": {"containerStatuses": [{"name": "testcontainer"}]}}
)
container_id = pod.get_container_id("testcontainer")
assert container_id is None
def test_container_id(self) -> None:
pod = Pod(
{
"spec": {},
"status": {
"containerStatuses": [
{
"name": "testcontainer",
"containerID": "docker://testcontainerid",
}
]
},
}
)
container_id = pod.get_container_id("testcontainer")
assert container_id == "testcontainerid"
def test_phase(self) -> None:
pod = Pod({"spec": {}, "status": {"phase": "Running"}})
assert pod.phase == PodPhase.RUNNING
def test_is_phase_running_false(self) -> None:
pod = Pod({"spec": {}, "status": {"phase": "Pending"}})
assert not pod.is_phase_running
def test_is_phase_running(self) -> None:
pod = Pod({"spec": {}, "status": {"phase": "Running"}})
assert pod.is_phase_running
def test_no_resource_requests(self) -> None:
pod = Pod({"spec": {"containers": [{"resources": {}}]}})
assert pod.resource_requests == Resources()
def test_resource_requests_cpu_milicores(self) -> None:
pod = Pod(
{"spec": {"containers": [{"resources": {"requests": {"cpu": "100m"}}}]}}
)
assert pod.resource_requests == Resources(cpu_m=100)
def test_resource_requests_cpu_cores(self) -> None:
pod = Pod({"spec": {"containers": [{"resources": {"requests": {"cpu": "1"}}}]}})
assert pod.resource_requests == Resources(cpu_m=1000)
def test_resource_requests_memory_mebibytes(self) -> None:
pod = Pod(
{
"spec": {
"containers": [{"resources": {"requests": {"memory": "1000Mi"}}}]
}
}
)
assert pod.resource_requests == Resources(memory_mb=1000)
def test_resource_requests_memory_gibibytes(self) -> None:
pod = Pod(
{"spec": {"containers": [{"resources": {"requests": {"memory": "1Gi"}}}]}}
)
assert pod.resource_requests == Resources(memory_mb=1024)
def test_resource_requests_gpu(self) -> None:
pod = Pod(
{
"spec": {
"containers": [{"resources": {"requests": {"nvidia.com/gpu": "1"}}}]
}
}
)
assert pod.resource_requests == Resources(gpu=1)
def test_resource_requests_for_multiple_containers(self) -> None:
pod = Pod(
{
"spec": {
"containers": [
{"resources": {"requests": {"cpu": "0.5", "memory": "512Mi"}}},
{
"resources": {
"requests": {
"cpu": "1",
"memory": "1Gi",
"nvidia.com/gpu": "1",
}
}
},
]
}
}
)
assert pod.resource_requests == Resources(cpu_m=1500, memory_mb=1536, gpu=1)
class TestPodContainerStats:
def test_from_primitive_no_keys(self) -> None:
payload: dict[str, Any] = {"memory": {}}
stats = PodContainerStats.from_primitive(payload)
empty_stats = PodContainerStats(cpu=0.0, memory=0.0)
assert stats == empty_stats
payload = {"cpu": {}}
stats = PodContainerStats.from_primitive(payload)
assert stats == empty_stats
payload = {}
stats = PodContainerStats.from_primitive(payload)
assert stats == empty_stats
def test_from_primitive_empty(self) -> None:
payload: dict[str, Any] = {"cpu": {}, "memory": {}}
stats = PodContainerStats.from_primitive(payload)
assert stats == PodContainerStats(cpu=0.0, memory=0.0)
def test_from_primitive(self) -> None:
payload = {
"cpu": {"usageNanoCores": 1000},
"memory": {"workingSetBytes": 1024 * 1024},
}
stats = PodContainerStats.from_primitive(payload)
assert stats == PodContainerStats(cpu=0.000001, memory=1.0)
class TestStatsSummary:
def test_get_pod_container_stats_error_response(self) -> None:
payload: dict[str, Any] = {
"kind": "Status",
"apiVersion": "v1",
"metadata": {},
"status": "Failure",
"message": "message",
"reason": "Forbidden",
"details": {"name": "default-pool", "kind": "nodes"},
"code": 403,
}
with pytest.raises(JobError, match="Invalid stats summary response"):
StatsSummary(payload)
def test_get_pod_container_stats_no_pod(self) -> None:
payload: dict[str, Any] = {"pods": []}
stats = StatsSummary(payload).get_pod_container_stats(
"namespace", "pod", "container"
)
assert stats is None
def test_get_pod_container_stats_no_containers(self) -> None:
payload = {"pods": [{"podRef": {"namespace": "namespace", "name": "pod"}}]}
stats = StatsSummary(payload).get_pod_container_stats(
"namespace", "pod", "container"
)
assert stats is None
def test_get_pod_container_stats(self) -> None:
payload = {
"pods": [
{
"podRef": {"namespace": "namespace", "name": "pod"},
"containers": [{"name": "container", "cpu": {}, "memory": {}}],
}
]
}
stats = StatsSummary(payload).get_pod_container_stats(
"namespace", "pod", "container"
)
assert stats
class TestGPUCounters:
def test_parse(self) -> None:
metrics = """
# HELP DCGM_FI_DEV_GPU_UTIL GPU utilization (in %).
# TYPE DCGM_FI_DEV_GPU_UTIL gauge
# HELP DCGM_FI_DEV_FB_USED Framebuffer memory used (in MiB).
# TYPE DCGM_FI_DEV_FB_USED gauge
DCGM_FI_DEV_GPU_UTIL{gpu="0",container="job-0",namespace="platform-jobs",pod="job-0"} 1
DCGM_FI_DEV_FB_USED{gpu="0",container="job-0",namespace="platform-jobs",pod="job-0"} 10
DCGM_FI_DEV_GPU_UTIL{gpu="1",container="job-0",namespace="platform-jobs",pod="job-0"} 2
DCGM_FI_DEV_FB_USED{gpu="1",container="job-0",namespace="platform-jobs",pod="job-0"} 20
DCGM_FI_DEV_GPU_UTIL{gpu="2",container="job-1",namespace="platform-jobs",pod="job-1"} 3
DCGM_FI_DEV_FB_USED{gpu="2",container="job-1",namespace="platform-jobs",pod="job-1"} 30
"""
counters = GPUCounters.parse(metrics)
assert counters == GPUCounters(
counters=[
GPUCounter(
name="DCGM_FI_DEV_GPU_UTIL",
value=1,
labels={
"gpu": "0",
"namespace": "platform-jobs",
"pod": "job-0",
"container": "job-0",
},
),
GPUCounter(
name="DCGM_FI_DEV_FB_USED",
value=10,
labels={
"gpu": "0",
"namespace": "platform-jobs",
"pod": "job-0",
"container": "job-0",
},
),
GPUCounter(
name="DCGM_FI_DEV_GPU_UTIL",
value=2,
labels={
"gpu": "1",
"namespace": "platform-jobs",
"pod": "job-0",
"container": "job-0",
},
),
GPUCounter(
name="DCGM_FI_DEV_FB_USED",
value=20,
labels={
"gpu": "1",
"namespace": "platform-jobs",
"pod": "job-0",
"container": "job-0",
},
),
GPUCounter(
name="DCGM_FI_DEV_GPU_UTIL",
value=3,
labels={
"gpu": "2",
"namespace": "platform-jobs",
"pod": "job-1",
"container": "job-1",
},
),
GPUCounter(
name="DCGM_FI_DEV_FB_USED",
value=30,
labels={
"gpu": "2",
"namespace": "platform-jobs",
"pod": "job-1",
"container": "job-1",
},
),
]
)
def test_get_pod_container_stats_utilization(self) -> None:
counters = GPUCounters(
counters=[
GPUCounter(
name="DCGM_FI_DEV_GPU_UTIL",
value=1,
labels={
"gpu": "0",
"namespace": "platform-jobs",
"pod": "job-0",
"container": "job-0",
},
),
GPUCounter(
name="DCGM_FI_DEV_GPU_UTIL",
value=4,
labels={
"gpu": "1",
"namespace": "platform-jobs",
"pod": "job-0",
"container": "job-0",
},
),
GPUCounter(
name="DCGM_FI_DEV_GPU_UTIL",
value=2,
labels={
"gpu": "2",
"namespace": "platform-jobs",
"pod": "job-1",
"container": "job-1",
},
),
]
)
stats = counters.get_pod_container_stats(
namespace_name="platform-jobs", pod_name="job-0", container_name="job-0"
)
assert stats.utilization == 2
stats = counters.get_pod_container_stats(
namespace_name="platform-jobs", pod_name="job-1", container_name="job-1"
)
assert stats.utilization == 2
def test_get_pod_container_stats_memory_used(self) -> None:
counters = GPUCounters(
counters=[
GPUCounter(
name="DCGM_FI_DEV_FB_USED",
value=1,
labels={
"gpu": "0",
"namespace": "platform-jobs",
"pod": "job-0",
"container": "job-0",
},
),
GPUCounter(
name="DCGM_FI_DEV_FB_USED",
value=2,
labels={
"gpu": "1",
"namespace": "platform-jobs",
"pod": "job-0",
"container": "job-0",
},
),
GPUCounter(
name="DCGM_FI_DEV_FB_USED",
value=3,
labels={
"gpu": "2",
"namespace": "platform-jobs",
"pod": "job-1",
"container": "job-1",
},
),
]
)
stats = counters.get_pod_container_stats(
namespace_name="platform-jobs", pod_name="job-0", container_name="job-0"
)
assert stats.utilization == 0
assert stats.memory_used_mb == 3
def test_get_pod_container_stats_unknown_job(self) -> None:
counters = GPUCounters(
counters=[
GPUCounter(
name="DCGM_FI_DEV_GPU_UTIL",
value=1,
labels={
"gpu": "0",
"namespace": "platform-jobs",
"pod": "job-0",
"container": "job-0",
},
),
GPUCounter(
name="DCGM_FI_DEV_FB_USED",
value=1,
labels={
"gpu": "0",
"namespace": "platform-jobs",
"pod": "job-0",
"container": "job-0",
},
),
]
)
stats = counters.get_pod_container_stats(
namespace_name="platform-jobs", pod_name="job-1", container_name="job-1"
)
assert stats.utilization == 0
assert stats.memory_used_mb == 0
class TestFilterOutRPCError:
async def test_iter_eof(self) -> None:
reader = aiohttp.StreamReader(mock.Mock(_reading_paused=False), 1024)
reader.feed_eof()
it = filter_out_rpc_error(reader)
chunks = [chunk async for chunk in it]
assert chunks == []
async def test_read_two_lines_eof(self) -> None:
reader = aiohttp.StreamReader(mock.Mock(_reading_paused=False), 1024)
reader.feed_data(b"line1\n")
reader.feed_data(b"line2")
reader.feed_eof()
it = filter_out_rpc_error(reader)
chunks = [chunk async for chunk in it]
assert chunks == [b"line1\n", b"line2"]
async def test_filtered_single_rpc_error(self) -> None:
reader = aiohttp.StreamReader(mock.Mock(_reading_paused=False), 1024)
reader.feed_data(b"line1\n")
reader.feed_data(b"rpc error: code = whatever")
reader.feed_eof()
it = filter_out_rpc_error(reader)
chunks = [chunk async for chunk in it]
assert chunks == [b"line1\n"]
async def test_filtered_single_rpc_error2(self) -> None:
reader = aiohttp.StreamReader(mock.Mock(_reading_paused=False), 1024)
reader.feed_data(b"line1\n")
reader.feed_data(
b"Unable to retrieve container logs for docker://0123456789abcdef"
)
reader.feed_eof()
it = filter_out_rpc_error(reader)
chunks = [chunk async for chunk in it]
assert chunks == [b"line1\n"]
async def test_filtered_single_rpc_error3(self) -> None:
reader = aiohttp.StreamReader(mock.Mock(_reading_paused=False), 1024)
reader.feed_data(b"line1\n")
reader.feed_data(
b'failed to try resolving symlinks in path "/var/log/pods/xxx.log": '
b"lstat /var/log/pods/xxx.log: no such file or directory"
)
reader.feed_eof()
it = filter_out_rpc_error(reader)
chunks = [chunk async for chunk in it]
assert chunks == [b"line1\n"]
async def test_filtered_two_rpc_errors(self) -> None:
reader = aiohttp.StreamReader(mock.Mock(_reading_paused=False), 1024)
reader.feed_data(b"line1\n")
reader.feed_data(b"rpc error: code = whatever\n")
reader.feed_data(b"rpc error: code = again\n")
reader.feed_eof()
it = filter_out_rpc_error(reader)
chunks = [chunk async for chunk in it]
assert chunks == [b"line1\n", b"rpc error: code = whatever\n"]
async def test_not_filtered_single_rpc_not_eof(self) -> None:
reader = aiohttp.StreamReader(mock.Mock(_reading_paused=False), 1024)
reader.feed_data(b"line1\n")
reader.feed_data(b"rpc error: code = whatever\n")
reader.feed_data(b"line2\n")
reader.feed_eof()
it = filter_out_rpc_error(reader)
chunks = [chunk async for chunk in it]
assert chunks == [b"line1\n", b"rpc error: code = whatever\n", b"line2\n"]
async def test_min_line_chunk(self) -> None:
reader = aiohttp.StreamReader(mock.Mock(_reading_paused=False), 1024)
it = filter_out_rpc_error(reader)
async def _read_all() -> list[bytes]:
return [chunk async for chunk in it]
async def _feed_raw_chunk(data: bytes) -> None:
reader.feed_data(data)
await asyncio.sleep(0.0)
task = asyncio.create_task(_read_all())
await _feed_raw_chunk(b"chunk01\r")
await _feed_raw_chunk(b"chunk02\r")
await _feed_raw_chunk(b"chunk03\r")
await _feed_raw_chunk(b"chunk04\r")
await _feed_raw_chunk(b"chunk05\r\n")
await _feed_raw_chunk(b"chunk06\r\n")
await _feed_raw_chunk(b"chunk07\r")
await _feed_raw_chunk(b"chunk08\r\n")
await _feed_raw_chunk(b"rpc error: ")
await _feed_raw_chunk(b"code =")
reader.feed_eof()
chunks = await task
assert chunks == [
b"chunk01\rchunk02\rchunk03\r",
b"chunk04\r",
b"chunk05\r\n",
b"chunk06\r\n",
b"chunk07\rchunk08\r\n",
]
class TestNode:
def test_name(self) -> None:
node = Node({"metadata": {"name": "default"}})
assert node.name == "default"
def test_get_label(self) -> None:
node = Node({"metadata": {"labels": {"hello": "world"}}})
assert node.get_label("hello") == "world"
def test_get_label_is_none(self) -> None:
node = Node({"metadata": {}})
assert node.get_label("hello") is None
class TestResources:
def test_add(self) -> None:
resources1 = Resources(cpu_m=1, memory_mb=2, gpu=3)
resources2 = Resources(cpu_m=4, memory_mb=5, gpu=6)
assert resources1.add(resources2) == Resources(cpu_m=5, memory_mb=7, gpu=9)
def test_available(self) -> None:
total = Resources(cpu_m=1000, memory_mb=1024, gpu=2)
used = Resources(cpu_m=100, memory_mb=256, gpu=1)
assert total.available(used) == Resources(cpu_m=900, memory_mb=768, gpu=1)
def test_count(self) -> None:
total = Resources(cpu_m=1000, memory_mb=1024, gpu=2)
assert total.count(Resources(cpu_m=100, memory_mb=128, gpu=1)) == 2
assert total.count(Resources(cpu_m=100, memory_mb=128)) == 8
assert total.count(Resources(cpu_m=100)) == 10
assert total.count(Resources(cpu_m=1100)) == 0
assert total.count(Resources()) == 110
assert Resources().count(Resources()) == 0
| en | 0.249491 | # HELP DCGM_FI_DEV_GPU_UTIL GPU utilization (in %). # TYPE DCGM_FI_DEV_GPU_UTIL gauge # HELP DCGM_FI_DEV_FB_USED Framebuffer memory used (in MiB). # TYPE DCGM_FI_DEV_FB_USED gauge DCGM_FI_DEV_GPU_UTIL{gpu="0",container="job-0",namespace="platform-jobs",pod="job-0"} 1 DCGM_FI_DEV_FB_USED{gpu="0",container="job-0",namespace="platform-jobs",pod="job-0"} 10 DCGM_FI_DEV_GPU_UTIL{gpu="1",container="job-0",namespace="platform-jobs",pod="job-0"} 2 DCGM_FI_DEV_FB_USED{gpu="1",container="job-0",namespace="platform-jobs",pod="job-0"} 20 DCGM_FI_DEV_GPU_UTIL{gpu="2",container="job-1",namespace="platform-jobs",pod="job-1"} 3 DCGM_FI_DEV_FB_USED{gpu="2",container="job-1",namespace="platform-jobs",pod="job-1"} 30 | 2.130439 | 2 |
pylark/api_service_approval_get_user_task_list.py | chyroc/pylark | 7 | 6619439 | # Code generated by lark_sdk_gen. DO NOT EDIT.
from pylark.lark_request import RawRequestReq, _new_method_option
from pylark import lark_type, lark_type_sheet, lark_type_approval
import attr
import typing
import io
@attr.s
class GetApprovalUserTaskListReq(object):
page_size: int = attr.ib(
default=0, metadata={"req_type": "query", "key": "page_size"}
) # 分页大小, 示例值:100, 最大值:`200`
page_token: str = attr.ib(
default="", metadata={"req_type": "query", "key": "page_token"}
) # 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果, 示例值:"1"
user_id: str = attr.ib(
default="", metadata={"req_type": "query", "key": "user_id"}
) # 需要查询的 User ID, 示例值:"example_user_id"
topic: str = attr.ib(
default="", metadata={"req_type": "query", "key": "topic"}
) # 需要查询的任务分组主题,如「待办」、「已办」等, 示例值:"1", 可选值有: `1`:待办审批, `2`:已办审批, `3`:已发起审批, `17`:未读知会, `18`:已读知会
user_id_type: lark_type.IDType = attr.ib(
default=None, metadata={"req_type": "query", "key": "user_id_type"}
) # 用户 ID 类型, 示例值:"open_id", 可选值有: `open_id`:用户的 open id, `union_id`:用户的 union id, `user_id`:用户的 user id, 默认值: `open_id`,, 当值为 `user_id`, 字段权限要求: 获取用户 user ID
@attr.s
class GetApprovalUserTaskListRespCount(object):
total: int = attr.ib(
default=0, metadata={"req_type": "json", "key": "total"}
) # 总数,大于等于 1000 个项目时将返回 999
has_more: bool = attr.ib(
factory=lambda: bool(), metadata={"req_type": "json", "key": "has_more"}
) # 还有更多,当大于等于 1000 时将返回 true
@attr.s
class GetApprovalUserTaskListRespTaskURLs(object):
helpdesk: str = attr.ib(
default="", metadata={"req_type": "json", "key": "helpdesk"}
) # 帮助服务台 URL
mobile: str = attr.ib(
default="", metadata={"req_type": "json", "key": "mobile"}
) # 移动端 URL
pc: str = attr.ib(
default="", metadata={"req_type": "json", "key": "pc"}
) # PC 端 URL
@attr.s
class GetApprovalUserTaskListRespTask(object):
topic: str = attr.ib(
default="", metadata={"req_type": "json", "key": "topic"}
) # 任务所属的任务分组,如「待办」、「已办」等, 可选值有: `1`:待办审批, `2`:已办审批, `3`:已发起审批, `17`:未读知会, `18`:已读知会
user_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "user_id"}
) # 任务所属的用户 ID
title: str = attr.ib(
default="", metadata={"req_type": "json", "key": "title"}
) # 任务题目
urls: GetApprovalUserTaskListRespTaskURLs = attr.ib(
default=None, metadata={"req_type": "json", "key": "urls"}
) # 任务相关 URL
process_external_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "process_external_id"}
) # 流程三方 ID,仅第三方流程,需要在当前租户、当前 APP 内唯一
task_external_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "task_external_id"}
) # 任务三方 ID,仅第三方流程,需要在当前流程实例内唯一
status: str = attr.ib(
default="", metadata={"req_type": "json", "key": "status"}
) # 任务状态, 可选值有: `1`:待办, `2`:已办, `17`:未读, `18`:已读, `33`:处理中,标记完成用, `34`:撤回
process_status: str = attr.ib(
default="", metadata={"req_type": "json", "key": "process_status"}
) # 流程实例状态, 可选值有: `0`:无流程状态,不展示对应标签, `1`:流程实例流转中, `2`:已通过, `3`:已拒绝, `4`:已撤销, `5`:已终止
definition_code: str = attr.ib(
default="", metadata={"req_type": "json", "key": "definition_code"}
) # 流程定义 Code
initiators: typing.List[str] = attr.ib(
factory=lambda: [], metadata={"req_type": "json", "key": "initiators"}
) # 发起人 ID 列表
initiator_names: typing.List[str] = attr.ib(
factory=lambda: [], metadata={"req_type": "json", "key": "initiator_names"}
) # 发起人姓名列表
task_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "task_id"}
) # 任务 ID,全局唯一
process_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "process_id"}
) # 流程 ID,全局唯一
process_code: str = attr.ib(
default="", metadata={"req_type": "json", "key": "process_code"}
) # 流程 Code
definition_group_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "definition_group_id"}
) # 流程定义分组 ID
definition_group_name: str = attr.ib(
default="", metadata={"req_type": "json", "key": "definition_group_name"}
) # 流程定义分组名称
definition_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "definition_id"}
) # 流程定义 ID
definition_name: str = attr.ib(
default="", metadata={"req_type": "json", "key": "definition_name"}
) # 流程定义名称
@attr.s
class GetApprovalUserTaskListResp(object):
tasks: typing.List[GetApprovalUserTaskListRespTask] = attr.ib(
factory=lambda: [], metadata={"req_type": "json", "key": "tasks"}
) # 任务列表
page_token: str = attr.ib(
default="", metadata={"req_type": "json", "key": "page_token"}
) # 分页标记,当 has_more 为 true 时,会同时返回新的 page_token,否则不返回 page_token
has_more: bool = attr.ib(
factory=lambda: bool(), metadata={"req_type": "json", "key": "has_more"}
) # 是否还有更多项
count: GetApprovalUserTaskListRespCount = attr.ib(
default=None, metadata={"req_type": "json", "key": "count"}
) # 列表计数,只在分页第一页返回
def _gen_get_approval_user_task_list_req(request, options) -> RawRequestReq:
return RawRequestReq(
dataclass=GetApprovalUserTaskListResp,
scope="Approval",
api="GetApprovalUserTaskList",
method="GET",
url="https://open.feishu.cn/open-apis/approval/v4/tasks/query",
body=request,
method_option=_new_method_option(options),
need_tenant_access_token=True,
need_user_access_token=True,
)
| # Code generated by lark_sdk_gen. DO NOT EDIT.
from pylark.lark_request import RawRequestReq, _new_method_option
from pylark import lark_type, lark_type_sheet, lark_type_approval
import attr
import typing
import io
@attr.s
class GetApprovalUserTaskListReq(object):
page_size: int = attr.ib(
default=0, metadata={"req_type": "query", "key": "page_size"}
) # 分页大小, 示例值:100, 最大值:`200`
page_token: str = attr.ib(
default="", metadata={"req_type": "query", "key": "page_token"}
) # 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果, 示例值:"1"
user_id: str = attr.ib(
default="", metadata={"req_type": "query", "key": "user_id"}
) # 需要查询的 User ID, 示例值:"example_user_id"
topic: str = attr.ib(
default="", metadata={"req_type": "query", "key": "topic"}
) # 需要查询的任务分组主题,如「待办」、「已办」等, 示例值:"1", 可选值有: `1`:待办审批, `2`:已办审批, `3`:已发起审批, `17`:未读知会, `18`:已读知会
user_id_type: lark_type.IDType = attr.ib(
default=None, metadata={"req_type": "query", "key": "user_id_type"}
) # 用户 ID 类型, 示例值:"open_id", 可选值有: `open_id`:用户的 open id, `union_id`:用户的 union id, `user_id`:用户的 user id, 默认值: `open_id`,, 当值为 `user_id`, 字段权限要求: 获取用户 user ID
@attr.s
class GetApprovalUserTaskListRespCount(object):
total: int = attr.ib(
default=0, metadata={"req_type": "json", "key": "total"}
) # 总数,大于等于 1000 个项目时将返回 999
has_more: bool = attr.ib(
factory=lambda: bool(), metadata={"req_type": "json", "key": "has_more"}
) # 还有更多,当大于等于 1000 时将返回 true
@attr.s
class GetApprovalUserTaskListRespTaskURLs(object):
helpdesk: str = attr.ib(
default="", metadata={"req_type": "json", "key": "helpdesk"}
) # 帮助服务台 URL
mobile: str = attr.ib(
default="", metadata={"req_type": "json", "key": "mobile"}
) # 移动端 URL
pc: str = attr.ib(
default="", metadata={"req_type": "json", "key": "pc"}
) # PC 端 URL
@attr.s
class GetApprovalUserTaskListRespTask(object):
topic: str = attr.ib(
default="", metadata={"req_type": "json", "key": "topic"}
) # 任务所属的任务分组,如「待办」、「已办」等, 可选值有: `1`:待办审批, `2`:已办审批, `3`:已发起审批, `17`:未读知会, `18`:已读知会
user_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "user_id"}
) # 任务所属的用户 ID
title: str = attr.ib(
default="", metadata={"req_type": "json", "key": "title"}
) # 任务题目
urls: GetApprovalUserTaskListRespTaskURLs = attr.ib(
default=None, metadata={"req_type": "json", "key": "urls"}
) # 任务相关 URL
process_external_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "process_external_id"}
) # 流程三方 ID,仅第三方流程,需要在当前租户、当前 APP 内唯一
task_external_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "task_external_id"}
) # 任务三方 ID,仅第三方流程,需要在当前流程实例内唯一
status: str = attr.ib(
default="", metadata={"req_type": "json", "key": "status"}
) # 任务状态, 可选值有: `1`:待办, `2`:已办, `17`:未读, `18`:已读, `33`:处理中,标记完成用, `34`:撤回
process_status: str = attr.ib(
default="", metadata={"req_type": "json", "key": "process_status"}
) # 流程实例状态, 可选值有: `0`:无流程状态,不展示对应标签, `1`:流程实例流转中, `2`:已通过, `3`:已拒绝, `4`:已撤销, `5`:已终止
definition_code: str = attr.ib(
default="", metadata={"req_type": "json", "key": "definition_code"}
) # 流程定义 Code
initiators: typing.List[str] = attr.ib(
factory=lambda: [], metadata={"req_type": "json", "key": "initiators"}
) # 发起人 ID 列表
initiator_names: typing.List[str] = attr.ib(
factory=lambda: [], metadata={"req_type": "json", "key": "initiator_names"}
) # 发起人姓名列表
task_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "task_id"}
) # 任务 ID,全局唯一
process_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "process_id"}
) # 流程 ID,全局唯一
process_code: str = attr.ib(
default="", metadata={"req_type": "json", "key": "process_code"}
) # 流程 Code
definition_group_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "definition_group_id"}
) # 流程定义分组 ID
definition_group_name: str = attr.ib(
default="", metadata={"req_type": "json", "key": "definition_group_name"}
) # 流程定义分组名称
definition_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "definition_id"}
) # 流程定义 ID
definition_name: str = attr.ib(
default="", metadata={"req_type": "json", "key": "definition_name"}
) # 流程定义名称
@attr.s
class GetApprovalUserTaskListResp(object):
tasks: typing.List[GetApprovalUserTaskListRespTask] = attr.ib(
factory=lambda: [], metadata={"req_type": "json", "key": "tasks"}
) # 任务列表
page_token: str = attr.ib(
default="", metadata={"req_type": "json", "key": "page_token"}
) # 分页标记,当 has_more 为 true 时,会同时返回新的 page_token,否则不返回 page_token
has_more: bool = attr.ib(
factory=lambda: bool(), metadata={"req_type": "json", "key": "has_more"}
) # 是否还有更多项
count: GetApprovalUserTaskListRespCount = attr.ib(
default=None, metadata={"req_type": "json", "key": "count"}
) # 列表计数,只在分页第一页返回
def _gen_get_approval_user_task_list_req(request, options) -> RawRequestReq:
return RawRequestReq(
dataclass=GetApprovalUserTaskListResp,
scope="Approval",
api="GetApprovalUserTaskList",
method="GET",
url="https://open.feishu.cn/open-apis/approval/v4/tasks/query",
body=request,
method_option=_new_method_option(options),
need_tenant_access_token=True,
need_user_access_token=True,
)
| zh | 0.910386 | # Code generated by lark_sdk_gen. DO NOT EDIT. # 分页大小, 示例值:100, 最大值:`200` # 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果, 示例值:"1" # 需要查询的 User ID, 示例值:"example_user_id" # 需要查询的任务分组主题,如「待办」、「已办」等, 示例值:"1", 可选值有: `1`:待办审批, `2`:已办审批, `3`:已发起审批, `17`:未读知会, `18`:已读知会 # 用户 ID 类型, 示例值:"open_id", 可选值有: `open_id`:用户的 open id, `union_id`:用户的 union id, `user_id`:用户的 user id, 默认值: `open_id`,, 当值为 `user_id`, 字段权限要求: 获取用户 user ID # 总数,大于等于 1000 个项目时将返回 999 # 还有更多,当大于等于 1000 时将返回 true # 帮助服务台 URL # 移动端 URL # PC 端 URL # 任务所属的任务分组,如「待办」、「已办」等, 可选值有: `1`:待办审批, `2`:已办审批, `3`:已发起审批, `17`:未读知会, `18`:已读知会 # 任务所属的用户 ID # 任务题目 # 任务相关 URL # 流程三方 ID,仅第三方流程,需要在当前租户、当前 APP 内唯一 # 任务三方 ID,仅第三方流程,需要在当前流程实例内唯一 # 任务状态, 可选值有: `1`:待办, `2`:已办, `17`:未读, `18`:已读, `33`:处理中,标记完成用, `34`:撤回 # 流程实例状态, 可选值有: `0`:无流程状态,不展示对应标签, `1`:流程实例流转中, `2`:已通过, `3`:已拒绝, `4`:已撤销, `5`:已终止 # 流程定义 Code # 发起人 ID 列表 # 发起人姓名列表 # 任务 ID,全局唯一 # 流程 ID,全局唯一 # 流程 Code # 流程定义分组 ID # 流程定义分组名称 # 流程定义 ID # 流程定义名称 # 任务列表 # 分页标记,当 has_more 为 true 时,会同时返回新的 page_token,否则不返回 page_token # 是否还有更多项 # 列表计数,只在分页第一页返回 | 1.817414 | 2 |
gizeh/geometry.py | gouthambs/gizeh | 1 | 6619440 | <filename>gizeh/geometry.py<gh_stars>1-10
import numpy as np
def rotation_matrix(a):
return np.array([[np.cos(a), -np.sin(a),0],
[np.sin(a), np.cos(a),0],
[0, 0 , 1.0]])
def translation_matrix(xy):
return np.array([[1.0,0,xy[0]],
[0,1,xy[1]],
[0,0,1]])
def scaling_matrix(sx,sy):
return np.array([[sx,0,0],
[0,sy,0],
[0,0,1]])
def polar_polygon(nfaces,radius, npoints):
""" Returns the (x,y) coordinates of n points regularly spaced
along a regular polygon of `nfaces` faces and given radius.
"""
theta=np.linspace(0,2*np.pi,npoints)[:-1]
cos, pi, n = np.cos, np.pi, nfaces
r= cos( pi/n )/cos((theta%(2*pi/n))-pi/n)
d = np.cumsum(np.sqrt(((r[1:]-r[:-1])**2)))
d = [0]+list(d/d.max())
return zip(radius*r, theta, d)
def polar2cart(r,theta):
""" Transforms polar coodinates into cartesian coordinates (x,y).
If r or theta or both are vectors, returns a np. array of the list
[(x1,y1),(x2,y2),etc...]
"""
res = r*np.array([np.cos(theta), np.sin(theta)])
return res if len(res.shape)==1 else res.T | <filename>gizeh/geometry.py<gh_stars>1-10
import numpy as np
def rotation_matrix(a):
return np.array([[np.cos(a), -np.sin(a),0],
[np.sin(a), np.cos(a),0],
[0, 0 , 1.0]])
def translation_matrix(xy):
return np.array([[1.0,0,xy[0]],
[0,1,xy[1]],
[0,0,1]])
def scaling_matrix(sx,sy):
return np.array([[sx,0,0],
[0,sy,0],
[0,0,1]])
def polar_polygon(nfaces,radius, npoints):
""" Returns the (x,y) coordinates of n points regularly spaced
along a regular polygon of `nfaces` faces and given radius.
"""
theta=np.linspace(0,2*np.pi,npoints)[:-1]
cos, pi, n = np.cos, np.pi, nfaces
r= cos( pi/n )/cos((theta%(2*pi/n))-pi/n)
d = np.cumsum(np.sqrt(((r[1:]-r[:-1])**2)))
d = [0]+list(d/d.max())
return zip(radius*r, theta, d)
def polar2cart(r,theta):
""" Transforms polar coodinates into cartesian coordinates (x,y).
If r or theta or both are vectors, returns a np. array of the list
[(x1,y1),(x2,y2),etc...]
"""
res = r*np.array([np.cos(theta), np.sin(theta)])
return res if len(res.shape)==1 else res.T | en | 0.671592 | Returns the (x,y) coordinates of n points regularly spaced along a regular polygon of `nfaces` faces and given radius. Transforms polar coodinates into cartesian coordinates (x,y). If r or theta or both are vectors, returns a np. array of the list [(x1,y1),(x2,y2),etc...] | 3.136864 | 3 |
src/util.py | gto76/wfdl | 11 | 6619441 | import ast
import operator as op
import re
from collections import namedtuple
from math import pi, cos, sin
from numbers import Real
Point = namedtuple('Point', list('xy'))
OPERATORS = {ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul,
ast.Div: op.truediv, ast.Pow: op.pow, ast.BitXor: op.xor,
ast.USub: op.neg}
###
## DICT SUB
#
def replace_matched_items(elements, dictionary):
if not elements:
return []
out = []
for element in elements:
if type(element) is set:
out.append(replace_in_set(element, dictionary))
elif type(element) is list:
out.append(replace_matched_items(element, dictionary))
elif type(element) is dict:
out.append(replace_in_dict(element, dictionary))
else:
out.append(get_value_of_exp(element, dictionary))
return out
def replace_in_set(a_set, dictionary):
return {get_value_of_exp(element, dictionary) for element in a_set}
def replace_in_dict(a_dict, dictionary):
return {k: get_value_of_exp(v, dictionary) for k, v in a_dict.items()}
def get_value_of_exp(exp, dictionary):
# if isinstance(exp, Number) or isinstance(exp, list):
if type(exp) != str:
return exp
tokens = [a for a in re.split('([ +\\-/*()])', exp) if a]
tokens_out = []
for token in tokens:
token_out = sub_exp(token, dictionary)
tokens_out.append(token_out)
exp = ''.join(tokens_out)
# for key, value in dictionary.items():
# exp = exp.replace(key, str(value)) #!!!!! more specific
if re.search('[a-zA-Z]', exp):
return exp
return eval_expr(exp)
def sub_exp(exp, dictionary):
for key, value in dictionary.items():
if exp == key:
return str(value)
return exp
def eval_expr(expr):
return eval_(ast.parse(expr, mode='eval').body)
def eval_(node):
if isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.BinOp):
return OPERATORS[type(node.op)](eval_(node.left), eval_(node.right))
elif isinstance(node, ast.UnaryOp):
return OPERATORS[type(node.op)](eval_(node.operand))
else:
raise TypeError(node)
###
## UTIL
#
def get_rad(fi):
return fi * 2 * pi - pi / 2
def get_cent(rad):
return (rad + pi / 2) / (2 * pi)
def get_point(fi, r):
return Point(cos(fi) * r, sin(fi) * r)
def get_point_xy(x, y):
return Point(x, y)
def get_enum(a_enum, enum_name, dbg_context):
try:
out = a_enum[enum_name]
except KeyError:
no_enum_error(a_enum, enum_name, dbg_context)
else:
return out
def no_enum_error(a_enum, name, dbg_context):
enum_name_tokens = re.split('([A-Z][a-z]*)', a_enum.__name__)
enum_name = ' '.join([a.lower() for a in enum_name_tokens if a])
enums = ', '.join([f'"{a.name}"' for a in list(a_enum)])
msg = f'Invalid {enum_name} "{name}" in subgroup "{dbg_context}". ' \
f'Available {enum_name}s: {enums}.'
raise ValueError(msg)
def check_args(prms, dbg_context):
if not prms.shape.value.min_no_args:
return
check_args_no(prms, dbg_context)
check_args_type(prms, dbg_context)
def check_args_no(prms, dbg_context):
shape = prms.shape
no_args = len(prms.args)
min_args = shape.value.min_no_args
max_args = len(shape.value.max_args)
if no_args < min_args:
not_enough_args_err(shape, min_args, no_args, dbg_context)
if no_args > max_args:
too_much_args_err(shape, max_args, no_args, dbg_context)
def not_enough_args_err(shape, min_args, no_args, subgroup):
msg = f'Shape "{shape.name}" needs at least {min_args} arguments, but ' \
f'{no_args} were provided in subgroup "{subgroup}".'
raise ValueError(msg)
def too_much_args_err(shape, max_args, no_args, subgroup):
msg = f'Shape "{shape.name}" can have at most {max_args} arguments, but ' \
f'{no_args} were provided in subgroup "{subgroup}".'
raise ValueError(msg)
def check_args_type(prms, subgroup):
for i, arg in enumerate(prms.args):
if not isinstance(arg, Real):
msg = f'Argument {arg} of shape "{prms.shape.name}" is a number. ' \
f'Subgroup "{subgroup}".'
raise ValueError(msg)
max_arg = prms.shape.value[3][i]
if arg > max_arg:
msg = f'Argument {arg} of shape "{prms.shape.name}" is larger ' \
f'than the maximum allowed value ({max_arg}). ' \
f'Subgroup "{subgroup}".'
raise ValueError(msg)
def read_file(filename):
with open(filename, encoding='utf-8') as file:
return file.readlines()
def write_to_file(filename, text):
with open(filename, 'w', encoding='utf-8') as file:
file.write(text)
def add_defaults(a_list, defaults):
for i, default in enumerate(defaults):
yield a_list[i] if i < len(a_list) else default
| import ast
import operator as op
import re
from collections import namedtuple
from math import pi, cos, sin
from numbers import Real
Point = namedtuple('Point', list('xy'))
OPERATORS = {ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul,
ast.Div: op.truediv, ast.Pow: op.pow, ast.BitXor: op.xor,
ast.USub: op.neg}
###
## DICT SUB
#
def replace_matched_items(elements, dictionary):
if not elements:
return []
out = []
for element in elements:
if type(element) is set:
out.append(replace_in_set(element, dictionary))
elif type(element) is list:
out.append(replace_matched_items(element, dictionary))
elif type(element) is dict:
out.append(replace_in_dict(element, dictionary))
else:
out.append(get_value_of_exp(element, dictionary))
return out
def replace_in_set(a_set, dictionary):
return {get_value_of_exp(element, dictionary) for element in a_set}
def replace_in_dict(a_dict, dictionary):
return {k: get_value_of_exp(v, dictionary) for k, v in a_dict.items()}
def get_value_of_exp(exp, dictionary):
# if isinstance(exp, Number) or isinstance(exp, list):
if type(exp) != str:
return exp
tokens = [a for a in re.split('([ +\\-/*()])', exp) if a]
tokens_out = []
for token in tokens:
token_out = sub_exp(token, dictionary)
tokens_out.append(token_out)
exp = ''.join(tokens_out)
# for key, value in dictionary.items():
# exp = exp.replace(key, str(value)) #!!!!! more specific
if re.search('[a-zA-Z]', exp):
return exp
return eval_expr(exp)
def sub_exp(exp, dictionary):
for key, value in dictionary.items():
if exp == key:
return str(value)
return exp
def eval_expr(expr):
return eval_(ast.parse(expr, mode='eval').body)
def eval_(node):
if isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.BinOp):
return OPERATORS[type(node.op)](eval_(node.left), eval_(node.right))
elif isinstance(node, ast.UnaryOp):
return OPERATORS[type(node.op)](eval_(node.operand))
else:
raise TypeError(node)
###
## UTIL
#
def get_rad(fi):
return fi * 2 * pi - pi / 2
def get_cent(rad):
return (rad + pi / 2) / (2 * pi)
def get_point(fi, r):
return Point(cos(fi) * r, sin(fi) * r)
def get_point_xy(x, y):
return Point(x, y)
def get_enum(a_enum, enum_name, dbg_context):
try:
out = a_enum[enum_name]
except KeyError:
no_enum_error(a_enum, enum_name, dbg_context)
else:
return out
def no_enum_error(a_enum, name, dbg_context):
enum_name_tokens = re.split('([A-Z][a-z]*)', a_enum.__name__)
enum_name = ' '.join([a.lower() for a in enum_name_tokens if a])
enums = ', '.join([f'"{a.name}"' for a in list(a_enum)])
msg = f'Invalid {enum_name} "{name}" in subgroup "{dbg_context}". ' \
f'Available {enum_name}s: {enums}.'
raise ValueError(msg)
def check_args(prms, dbg_context):
if not prms.shape.value.min_no_args:
return
check_args_no(prms, dbg_context)
check_args_type(prms, dbg_context)
def check_args_no(prms, dbg_context):
shape = prms.shape
no_args = len(prms.args)
min_args = shape.value.min_no_args
max_args = len(shape.value.max_args)
if no_args < min_args:
not_enough_args_err(shape, min_args, no_args, dbg_context)
if no_args > max_args:
too_much_args_err(shape, max_args, no_args, dbg_context)
def not_enough_args_err(shape, min_args, no_args, subgroup):
msg = f'Shape "{shape.name}" needs at least {min_args} arguments, but ' \
f'{no_args} were provided in subgroup "{subgroup}".'
raise ValueError(msg)
def too_much_args_err(shape, max_args, no_args, subgroup):
msg = f'Shape "{shape.name}" can have at most {max_args} arguments, but ' \
f'{no_args} were provided in subgroup "{subgroup}".'
raise ValueError(msg)
def check_args_type(prms, subgroup):
for i, arg in enumerate(prms.args):
if not isinstance(arg, Real):
msg = f'Argument {arg} of shape "{prms.shape.name}" is a number. ' \
f'Subgroup "{subgroup}".'
raise ValueError(msg)
max_arg = prms.shape.value[3][i]
if arg > max_arg:
msg = f'Argument {arg} of shape "{prms.shape.name}" is larger ' \
f'than the maximum allowed value ({max_arg}). ' \
f'Subgroup "{subgroup}".'
raise ValueError(msg)
def read_file(filename):
with open(filename, encoding='utf-8') as file:
return file.readlines()
def write_to_file(filename, text):
with open(filename, 'w', encoding='utf-8') as file:
file.write(text)
def add_defaults(a_list, defaults):
for i, default in enumerate(defaults):
yield a_list[i] if i < len(a_list) else default
| en | 0.391919 | ### ## DICT SUB # # if isinstance(exp, Number) or isinstance(exp, list): # for key, value in dictionary.items(): # exp = exp.replace(key, str(value)) #!!!!! more specific ### ## UTIL # | 3.19768 | 3 |
tests/time_zone.py | fossabot/PyFunceble | 0 | 6619442 | """
The tool to check the availability or syntax of domain, IP or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Provides a simple interface to get a custom timezone.
Author:
<NAME>, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/special-thanks.html
Contributors:
https://pyfunceble.github.io/contributors.html
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/master/
Project homepage:
https://pyfunceble.github.io/
License:
::
Copyright 2017, 2018, 2019, 2020 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import timedelta, timezone
class TZ:
"""
Provides a timezone.
:param str sign:
The sign to apply. Should be :code:`+` or :code:`-`.
:param int days:
The number of days from UTC.
:param int seconds:
The number of seconds from UTC.
:param int microseconds:
The number of microseconds from UTC.
:param int milliseconds:
The number of days from UTC.
:param int minutes:
The number of minutes from UTC.
:param int hours:
The number of hours from UTC.
:param int weeks:
The number of weeks from UTC.
"""
def __init__(
self,
sign="+",
days=0,
seconds=0,
microseconds=0,
milliseconds=0,
minutes=0,
hours=0,
weeks=0,
):
if sign == "+":
self.sign = 1
else: # pragma: no cover
self.sign = -1
self.timedelda = timedelta(
days=days,
seconds=seconds,
microseconds=microseconds,
milliseconds=milliseconds,
minutes=minutes,
hours=hours,
weeks=weeks,
)
def get(self):
"""
Provides the timezone itself.
:rtype: timezone
"""
return timezone(self.sign * self.timedelda)
| """
The tool to check the availability or syntax of domain, IP or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Provides a simple interface to get a custom timezone.
Author:
<NAME>, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/special-thanks.html
Contributors:
https://pyfunceble.github.io/contributors.html
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/master/
Project homepage:
https://pyfunceble.github.io/
License:
::
Copyright 2017, 2018, 2019, 2020 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import timedelta, timezone
class TZ:
"""
Provides a timezone.
:param str sign:
The sign to apply. Should be :code:`+` or :code:`-`.
:param int days:
The number of days from UTC.
:param int seconds:
The number of seconds from UTC.
:param int microseconds:
The number of microseconds from UTC.
:param int milliseconds:
The number of days from UTC.
:param int minutes:
The number of minutes from UTC.
:param int hours:
The number of hours from UTC.
:param int weeks:
The number of weeks from UTC.
"""
def __init__(
self,
sign="+",
days=0,
seconds=0,
microseconds=0,
milliseconds=0,
minutes=0,
hours=0,
weeks=0,
):
if sign == "+":
self.sign = 1
else: # pragma: no cover
self.sign = -1
self.timedelda = timedelta(
days=days,
seconds=seconds,
microseconds=microseconds,
milliseconds=milliseconds,
minutes=minutes,
hours=hours,
weeks=weeks,
)
def get(self):
"""
Provides the timezone itself.
:rtype: timezone
"""
return timezone(self.sign * self.timedelda)
| en | 0.578345 | The tool to check the availability or syntax of domain, IP or URL. :: ██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗ ██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝ ██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗ ██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝ ██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝ Provides a simple interface to get a custom timezone. Author: <NAME>, @funilrys, contactTATAfunilrysTODTODcom Special thanks: https://pyfunceble.github.io/special-thanks.html Contributors: https://pyfunceble.github.io/contributors.html Project link: https://github.com/funilrys/PyFunceble Project documentation: https://pyfunceble.readthedocs.io/en/master/ Project homepage: https://pyfunceble.github.io/ License: :: Copyright 2017, 2018, 2019, 2020 <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Provides a timezone. :param str sign: The sign to apply. Should be :code:`+` or :code:`-`. :param int days: The number of days from UTC. :param int seconds: The number of seconds from UTC. :param int microseconds: The number of microseconds from UTC. :param int milliseconds: The number of days from UTC. :param int minutes: The number of minutes from UTC. :param int hours: The number of hours from UTC. :param int weeks: The number of weeks from UTC. # pragma: no cover Provides the timezone itself. :rtype: timezone | 2.78139 | 3 |
src/main/python/utils/evdevutils.py | werpu/input_pipe | 11 | 6619443 | <reponame>werpu/input_pipe<filename>src/main/python/utils/evdevutils.py
import evdev
class EvDevUtils:
# externalized producer to be replaced in testing cases by mocks
@staticmethod
def get_available_devices():
return [evdev.InputDevice(path) for path in evdev.list_devices()]
| import evdev
class EvDevUtils:
# externalized producer to be replaced in testing cases by mocks
@staticmethod
def get_available_devices():
return [evdev.InputDevice(path) for path in evdev.list_devices()] | en | 0.960428 | # externalized producer to be replaced in testing cases by mocks | 1.926951 | 2 |
features/steps/US_0009.py | av1m/cars | 0 | 6619444 | <reponame>av1m/cars
from behave import *
from cars.motor import Motor
use_step_matcher("parse")
@given("Two motors already installed")
def step_impl(context):
"""
:type context: behave.runner.Context
"""
context.motor1 = Motor(100)
context.motor2 = Motor(200)
@when("I compare the two motors")
def step_impl(context):
"""
:type context: behave.runner.Context
"""
context.compare = context.motor1 < context.motor2
@then("I should see the motor with the best performance")
def step_impl(context):
"""
:type context: behave.runner.Context
"""
assert context.compare is True
| from behave import *
from cars.motor import Motor
use_step_matcher("parse")
@given("Two motors already installed")
def step_impl(context):
"""
:type context: behave.runner.Context
"""
context.motor1 = Motor(100)
context.motor2 = Motor(200)
@when("I compare the two motors")
def step_impl(context):
"""
:type context: behave.runner.Context
"""
context.compare = context.motor1 < context.motor2
@then("I should see the motor with the best performance")
def step_impl(context):
"""
:type context: behave.runner.Context
"""
assert context.compare is True | ru | 0.262572 | :type context: behave.runner.Context :type context: behave.runner.Context :type context: behave.runner.Context | 2.55535 | 3 |
src/core/domain/generator/entity/generator.py | Spires12/generate-password-api | 0 | 6619445 | import attr
@attr.s(auto_attribs=True)
class Generator():
""" Base generator entity """
length_password: int = 0
include_symbols: bool = False
include_numbers: bool = False
include_lowercase_letters: bool = False
include_uppercase_characters: bool = False
| import attr
@attr.s(auto_attribs=True)
class Generator():
""" Base generator entity """
length_password: int = 0
include_symbols: bool = False
include_numbers: bool = False
include_lowercase_letters: bool = False
include_uppercase_characters: bool = False
| en | 0.220188 | Base generator entity | 2.329762 | 2 |
receiver.py | ciscocms/cdr-receiver-db | 5 | 6619446 | <gh_stars>1-10
from __future__ import print_function
from flask import Flask, request, redirect, jsonify
import xmltodict
import json
import cdrs
from models import Record
app = Flask(__name__)
@app.route('/')
def index():
return redirect('/admin/')
@app.route('/cdr', methods=['POST'])
def post():
try:
cdr = xmltodict.parse(request.data)
if isinstance(cdr['records']['record'], list):
for record in cdr['records']['record']:
instance = getattr(cdrs, str(record['@type']))(record)
Record.create(data=json.dumps(instance.__dict__))
else:
instance = getattr(cdrs, str(cdr['records']['record']['@type']))(cdr['records']['record'])
Record.create(data=json.dumps(instance.__dict__))
return('', 204)
except:
print('Parser failure!')
return('', 204)
@app.route('/api/v1/records/', methods=['GET'])
@app.route('/api/v1/records/<int:page>', methods=['GET'])
def records_endpoint(page=1):
per_page = 20
query = Record.select().paginate(page, per_page).order_by(Record.created_date.desc())
data = [json.loads(i.serialize) for i in query]
if data:
response = jsonify({'records': data})
response.status_code = 200
else:
output = {
'records': 0
}
response = jsonify(output)
response.status_code = 200
return response
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8444)
| from __future__ import print_function
from flask import Flask, request, redirect, jsonify
import xmltodict
import json
import cdrs
from models import Record
app = Flask(__name__)
@app.route('/')
def index():
return redirect('/admin/')
@app.route('/cdr', methods=['POST'])
def post():
try:
cdr = xmltodict.parse(request.data)
if isinstance(cdr['records']['record'], list):
for record in cdr['records']['record']:
instance = getattr(cdrs, str(record['@type']))(record)
Record.create(data=json.dumps(instance.__dict__))
else:
instance = getattr(cdrs, str(cdr['records']['record']['@type']))(cdr['records']['record'])
Record.create(data=json.dumps(instance.__dict__))
return('', 204)
except:
print('Parser failure!')
return('', 204)
@app.route('/api/v1/records/', methods=['GET'])
@app.route('/api/v1/records/<int:page>', methods=['GET'])
def records_endpoint(page=1):
per_page = 20
query = Record.select().paginate(page, per_page).order_by(Record.created_date.desc())
data = [json.loads(i.serialize) for i in query]
if data:
response = jsonify({'records': data})
response.status_code = 200
else:
output = {
'records': 0
}
response = jsonify(output)
response.status_code = 200
return response
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8444) | none | 1 | 2.640799 | 3 | |
vsbuy_backend/products/views/products.py | Edward-TL/vsbuy_backend | 0 | 6619447 | <gh_stars>0
"""Product views."""
# Django REST Framework
from rest_framework import viewsets, mixins
# Filters
from rest_framework.filters import SearchFilter, OrderingFilter
from django_filters.rest_framework import DjangoFilterBackend
# Serializers
from vsbuy_backend.products.serializers.products import ProductModelSerializer
# Models
from vsbuy_backend.products.models.products import Product
class ProductViewSet(mixins.ListModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
"""Product view set."""
queryset = Product.objects.filter(is_active=True)
serializer_class = ProductModelSerializer
lookup_field = 'name'
# Filters
filter_backends = (SearchFilter, OrderingFilter, DjangoFilterBackend)
search_fields = ('name')
ordering = ('id')
| """Product views."""
# Django REST Framework
from rest_framework import viewsets, mixins
# Filters
from rest_framework.filters import SearchFilter, OrderingFilter
from django_filters.rest_framework import DjangoFilterBackend
# Serializers
from vsbuy_backend.products.serializers.products import ProductModelSerializer
# Models
from vsbuy_backend.products.models.products import Product
class ProductViewSet(mixins.ListModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
"""Product view set."""
queryset = Product.objects.filter(is_active=True)
serializer_class = ProductModelSerializer
lookup_field = 'name'
# Filters
filter_backends = (SearchFilter, OrderingFilter, DjangoFilterBackend)
search_fields = ('name')
ordering = ('id') | en | 0.611191 | Product views. # Django REST Framework # Filters # Serializers # Models Product view set. # Filters | 2.115149 | 2 |
recursion_tree/config.py | IgorZyktin/recursion_tree | 3 | 6619448 | # -*- coding: utf-8 -*-
"""Default configuration."""
DEFAULT_CONFIG = {
}
| # -*- coding: utf-8 -*-
"""Default configuration."""
DEFAULT_CONFIG = {
}
| en | 0.668263 | # -*- coding: utf-8 -*- Default configuration. | 1.107568 | 1 |
users_auth/models.py | sabreensalama/Cloud-Devops-Capstone | 0 | 6619449 | <filename>users_auth/models.py<gh_stars>0
from django.db import models
from django.core.validators import *
# Create your models here.
class Users(models.Model):
first_name = models.CharField(null=False , max_length=50)
last_name = models.CharField(null=False , max_length=50)
email = models.EmailField(null=False,max_length=254)
password = models.CharField(null=False, max_length=50)
re_password = models.CharField(null=False, max_length=50)
usertype=models.BooleanField(default=True)
country=models.CharField(max_length=50,default="")
us_phone=models.CharField(null=True,max_length=12)
date_birth =models.DateField(null=True)
faceboo_link= models.URLField(null=True)
picture = models.ImageField(upload_to='users', blank=True)
created_at = models.DateField(auto_now_add=True)
updated_at = models.DateField(auto_now=True)
email_confirmed = models.BooleanField(default=False)
| <filename>users_auth/models.py<gh_stars>0
from django.db import models
from django.core.validators import *
# Create your models here.
class Users(models.Model):
first_name = models.CharField(null=False , max_length=50)
last_name = models.CharField(null=False , max_length=50)
email = models.EmailField(null=False,max_length=254)
password = models.CharField(null=False, max_length=50)
re_password = models.CharField(null=False, max_length=50)
usertype=models.BooleanField(default=True)
country=models.CharField(max_length=50,default="")
us_phone=models.CharField(null=True,max_length=12)
date_birth =models.DateField(null=True)
faceboo_link= models.URLField(null=True)
picture = models.ImageField(upload_to='users', blank=True)
created_at = models.DateField(auto_now_add=True)
updated_at = models.DateField(auto_now=True)
email_confirmed = models.BooleanField(default=False)
| en | 0.963489 | # Create your models here. | 2.201789 | 2 |
LD3/ld3_script.py | 0x4C4A/SS-2014 | 0 | 6619450 | <reponame>0x4C4A/SS-2014
# -*- coding: utf-8 -*-
# Signāli un sistēmas. 3. Laboratorijas darbs
# == Taisnstūra loga ietekme uz signāla spektru ==
import sys
import numpy as np
import matplotlib.pyplot as plt
from PyQt4 import QtGui, QtCore
from scipy.fftpack import fft
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
class Window(QtGui.QDialog):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
# a figure instance to plot on
self.figure = plt.figure()
self.setWindowTitle('Singnala spektra atkariba no taisnstura loga platuma')
# this is the Canvas Widget that displays the `figure`
# it takes the `figure` instance as a parameter to __init__
self.canvas = FigureCanvas(self.figure)
# this is the Navigation widget
# it takes the Canvas widget and a parent
self.toolbar = NavigationToolbar(self.canvas, self)
# Make a slidebar
sld = QtGui.QSlider(QtCore.Qt.Horizontal, self)
sld.setFocusPolicy(QtCore.Qt.StrongFocus)
sld.setGeometry(30, 40, 200, 30)
sld.setMaximum(40)
sld.setMinimum(1)
sld.setTickInterval(1)
sld.setTickPosition(2)
sld.setValue(20)
sld.valueChanged[int].connect(self.changeValue)
# Make a Line Edit widget
self.qle = QtGui.QLineEdit(self)
self.qle.setReadOnly(1)
#self.qle.insert('Taisnstura loga platums:')
# set the layout
layout = QtGui.QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
layout.addWidget(sld)
layout.addWidget(self.qle)
self.setLayout(layout)
def changeValue(self, value):
''' plot '''
# Laika parametri
T = value/10.
sampRate = samples/T
x = np.linspace(0, T, samples)
# Logots signāls
y = np.sin(2*np.pi*x)+np.sin(2*np.pi*x*1.5)
# Diskrēts spektrs
S = fft(y)/samples
fs = np.arange(0, sampRate, 1/T)
# Vienlaidu spektrs
fx0 = np.arange(-2, 10, 0.001)
S0 = 0.5*np.sinc(T*fx0)
# plot
sign = self.figure.add_subplot(211)
spectr = self.figure.add_subplot(212)
# Atceļ veco
sign.hold(False)
spectr.hold(False)
# Uzliek jauno
sign.plot(x, y, '.-k')
sign.legend(['Ierobezots signals'], 1)
spectr.stem(fs, abs(S), linefmt='k', markerfmt='.k'), spectr.hold(True)
spectr.plot(fx0+1, abs(S0), '-.b')
spectr.legend(['Signala spektrs'], 1)
spectr.axis([0., 5., 0, 0.8])#, sign.axis([0, 4., -1, 1])
spectr.grid(b = True, which='both', linewidth=2), sign.grid(b = True)
# Papildina Line Edit widget ar loga platumu
t = 'Taisnstura loga platums: {}xT'.format(T)
self.qle.setSelection(0, len(t))
self.qle.insert(t)
# Atjauno canvas
self.canvas.draw()
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
# Siulācijas laika patametri
samples = 128
# GUI
main = Window()
main.changeValue(20)
main.show()
sys.exit(app.exec_())
| # -*- coding: utf-8 -*-
# Signāli un sistēmas. 3. Laboratorijas darbs
# == Taisnstūra loga ietekme uz signāla spektru ==
import sys
import numpy as np
import matplotlib.pyplot as plt
from PyQt4 import QtGui, QtCore
from scipy.fftpack import fft
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
class Window(QtGui.QDialog):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
# a figure instance to plot on
self.figure = plt.figure()
self.setWindowTitle('Singnala spektra atkariba no taisnstura loga platuma')
# this is the Canvas Widget that displays the `figure`
# it takes the `figure` instance as a parameter to __init__
self.canvas = FigureCanvas(self.figure)
# this is the Navigation widget
# it takes the Canvas widget and a parent
self.toolbar = NavigationToolbar(self.canvas, self)
# Make a slidebar
sld = QtGui.QSlider(QtCore.Qt.Horizontal, self)
sld.setFocusPolicy(QtCore.Qt.StrongFocus)
sld.setGeometry(30, 40, 200, 30)
sld.setMaximum(40)
sld.setMinimum(1)
sld.setTickInterval(1)
sld.setTickPosition(2)
sld.setValue(20)
sld.valueChanged[int].connect(self.changeValue)
# Make a Line Edit widget
self.qle = QtGui.QLineEdit(self)
self.qle.setReadOnly(1)
#self.qle.insert('Taisnstura loga platums:')
# set the layout
layout = QtGui.QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
layout.addWidget(sld)
layout.addWidget(self.qle)
self.setLayout(layout)
def changeValue(self, value):
''' plot '''
# Laika parametri
T = value/10.
sampRate = samples/T
x = np.linspace(0, T, samples)
# Logots signāls
y = np.sin(2*np.pi*x)+np.sin(2*np.pi*x*1.5)
# Diskrēts spektrs
S = fft(y)/samples
fs = np.arange(0, sampRate, 1/T)
# Vienlaidu spektrs
fx0 = np.arange(-2, 10, 0.001)
S0 = 0.5*np.sinc(T*fx0)
# plot
sign = self.figure.add_subplot(211)
spectr = self.figure.add_subplot(212)
# Atceļ veco
sign.hold(False)
spectr.hold(False)
# Uzliek jauno
sign.plot(x, y, '.-k')
sign.legend(['Ierobezots signals'], 1)
spectr.stem(fs, abs(S), linefmt='k', markerfmt='.k'), spectr.hold(True)
spectr.plot(fx0+1, abs(S0), '-.b')
spectr.legend(['Signala spektrs'], 1)
spectr.axis([0., 5., 0, 0.8])#, sign.axis([0, 4., -1, 1])
spectr.grid(b = True, which='both', linewidth=2), sign.grid(b = True)
# Papildina Line Edit widget ar loga platumu
t = 'Taisnstura loga platums: {}xT'.format(T)
self.qle.setSelection(0, len(t))
self.qle.insert(t)
# Atjauno canvas
self.canvas.draw()
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
# Siulācijas laika patametri
samples = 128
# GUI
main = Window()
main.changeValue(20)
main.show()
sys.exit(app.exec_()) | en | 0.238156 | # -*- coding: utf-8 -*- # Signāli un sistēmas. 3. Laboratorijas darbs # == Taisnstūra loga ietekme uz signāla spektru == # a figure instance to plot on # this is the Canvas Widget that displays the `figure` # it takes the `figure` instance as a parameter to __init__ # this is the Navigation widget # it takes the Canvas widget and a parent # Make a slidebar # Make a Line Edit widget #self.qle.insert('Taisnstura loga platums:') # set the layout plot # Laika parametri # Logots signāls # Diskrēts spektrs # Vienlaidu spektrs # plot # Atceļ veco # Uzliek jauno #, sign.axis([0, 4., -1, 1]) # Papildina Line Edit widget ar loga platumu # Atjauno canvas # Siulācijas laika patametri # GUI | 2.4721 | 2 |