max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
src/classes/providers.py | bertini36/profiler | 7 | 6617951 | # -*- coding: UTF-8 -*-
import time
from abc import ABC, abstractmethod, abstractproperty
import tweepy
from loguru import logger
from .decorators import timeit
from .exceptions import UserDoesNotExist
class Provider(ABC):
def __repr__(self):
return self.name
@abstractproperty
def name(self) -> str:
pass
@abstractmethod
def __enter__(self):
pass
@abstractmethod
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@abstractmethod
def download_timeline(
self, user: str, limit=None, filter_retweets: bool = True
) -> dict:
pass
class TweepyProvider(Provider):
def __init__(
self,
public_key: str,
secret_key: str,
access_token: str,
secret_token: str,
):
self._public_key = public_key
self._secret_key = secret_key
self._access_token = access_token
self._access_token_secret = secret_token
@property
def name(self):
return 'Tweepy provider'
@logger.catch
def __enter__(self):
auth = tweepy.OAuthHandler(self._public_key, self._secret_key)
auth.set_access_token(self._access_token, self._access_token_secret)
self.api = tweepy.API(auth)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@staticmethod
def is_retweet(tweet):
return tweet.retweeted or 'RT @' in tweet.full_text
@timeit
def download_timeline(
self, user: str, limit: int = None, filter_rts: bool = False
) -> dict:
"""
Download user tweets ignoring retweets
:param user: Twitter username
:param limit: Number of tweets to download
:param filter_rts: Filter user retweets
"""
logger.info(f'Downloading {user} timeline')
timeline = {'user': user, 'tweets': []}
cursor = tweepy.Cursor(
self.api.user_timeline, screen_name=user, tweet_mode='extended'
).items()
try:
tweet = cursor.next()
except tweepy.TweepError as e:
print(e)
raise UserDoesNotExist(
f'User {user} does not exist '
f'or it has not registered tweets. e: {e}'
)
while True:
try:
if filter_rts and self.__class__.is_retweet(tweet):
continue
timeline['tweets'].append(
{
'id': tweet.id,
'created_at': tweet.created_at,
'text': tweet.full_text,
}
)
if limit and len(timeline['tweets']) >= limit:
break
tweet = cursor.next()
except tweepy.TweepError:
logger.warning('TweepError: Delaying...')
time.sleep(60 * 15)
continue
except StopIteration:
break
return timeline
| # -*- coding: UTF-8 -*-
import time
from abc import ABC, abstractmethod, abstractproperty
import tweepy
from loguru import logger
from .decorators import timeit
from .exceptions import UserDoesNotExist
class Provider(ABC):
def __repr__(self):
return self.name
@abstractproperty
def name(self) -> str:
pass
@abstractmethod
def __enter__(self):
pass
@abstractmethod
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@abstractmethod
def download_timeline(
self, user: str, limit=None, filter_retweets: bool = True
) -> dict:
pass
class TweepyProvider(Provider):
def __init__(
self,
public_key: str,
secret_key: str,
access_token: str,
secret_token: str,
):
self._public_key = public_key
self._secret_key = secret_key
self._access_token = access_token
self._access_token_secret = secret_token
@property
def name(self):
return 'Tweepy provider'
@logger.catch
def __enter__(self):
auth = tweepy.OAuthHandler(self._public_key, self._secret_key)
auth.set_access_token(self._access_token, self._access_token_secret)
self.api = tweepy.API(auth)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@staticmethod
def is_retweet(tweet):
return tweet.retweeted or 'RT @' in tweet.full_text
@timeit
def download_timeline(
self, user: str, limit: int = None, filter_rts: bool = False
) -> dict:
"""
Download user tweets ignoring retweets
:param user: Twitter username
:param limit: Number of tweets to download
:param filter_rts: Filter user retweets
"""
logger.info(f'Downloading {user} timeline')
timeline = {'user': user, 'tweets': []}
cursor = tweepy.Cursor(
self.api.user_timeline, screen_name=user, tweet_mode='extended'
).items()
try:
tweet = cursor.next()
except tweepy.TweepError as e:
print(e)
raise UserDoesNotExist(
f'User {user} does not exist '
f'or it has not registered tweets. e: {e}'
)
while True:
try:
if filter_rts and self.__class__.is_retweet(tweet):
continue
timeline['tweets'].append(
{
'id': tweet.id,
'created_at': tweet.created_at,
'text': tweet.full_text,
}
)
if limit and len(timeline['tweets']) >= limit:
break
tweet = cursor.next()
except tweepy.TweepError:
logger.warning('TweepError: Delaying...')
time.sleep(60 * 15)
continue
except StopIteration:
break
return timeline
| en | 0.606247 | # -*- coding: UTF-8 -*- Download user tweets ignoring retweets :param user: Twitter username :param limit: Number of tweets to download :param filter_rts: Filter user retweets | 2.778885 | 3 |
django_auth/urls.py | rockychen-dpaw/django-forms | 0 | 6617952 | <reponame>rockychen-dpaw/django-forms<filename>django_auth/urls.py
from .views import (UsersView,UserEditView)
app_name = "django_auth"
urlpatterns = []
urlpatterns.extend(UsersView.urlpatterns())
urlpatterns.extend(UserEditView.urlpatterns())
| from .views import (UsersView,UserEditView)
app_name = "django_auth"
urlpatterns = []
urlpatterns.extend(UsersView.urlpatterns())
urlpatterns.extend(UserEditView.urlpatterns()) | none | 1 | 1.395312 | 1 | |
terra_notebook_utils/progress.py | mitchac/terra-notebook-utils | 0 | 6617953 | <reponame>mitchac/terra-notebook-utils
import sys
import time
import threading
import sched
from math import floor, ceil
from contextlib import AbstractContextManager
from concurrent.futures import ThreadPoolExecutor, as_completed
class RateLimited:
"""
Decorator to rate limit method calls. Raises `exception` if method is called more quickly than
`rate` times per second, or does nothing if exception is None.
"""
def __init__(self, rate: float, exception: Exception=None):
self.period = 1 / rate
self.exception = exception
self.reset()
def reset(self):
self._last_call = time.time() - 10 * self.period
def __call__(self, func):
def wrapper(*args, **kwargs):
now = time.time()
if now - self._last_call >= self.period:
self._last_call = now
return func(*args, **kwargs)
elif self.exception is not None:
raise self.exception("Too soon")
wrapper.reset = self.reset
return wrapper
class ProgressBar(AbstractContextManager):
def __init__(self, number_of_steps, prefix="", units="", size: int=None):
self.number_of_steps = number_of_steps
self.steps_completed = 0
self.prefix = prefix
self.units = units
self.size = size
self._length = 40 # This seems like a reasonable length for notebook output
self._lock = threading.Lock()
self._start_time = time.time()
def update(self, number_of_steps=1):
self.steps_completed += number_of_steps
slots_filled = int(self._length * self.steps_completed / self.number_of_steps)
duration = ceil(time.time() - self._start_time)
portion_complete = self.steps_completed / self.number_of_steps
bar = "#" * slots_filled
bar += " " * (self._length - slots_filled)
bar += f" {floor(100 * portion_complete)}%"
if self.size is not None:
bar += f" of {self.size}{self.units}"
bar += f" %.2f{self.units}/s" % (self.size * portion_complete / duration)
self._bar = bar
self._duration = duration
self._print()
@RateLimited(2.0)
def _print(self):
with self._lock:
print("\r", f"{self.prefix} {self._bar} ({self._duration} seconds)", end="")
def close(self, message=None):
self._print.reset()
self._print()
with self._lock:
print()
if message is not None:
print(message)
def __exit__(self, *args, **kwargs):
self.close()
class ProgressReporter(AbstractContextManager):
def __init__(self, units: str="lines"):
self.start_time = time.time()
self._checkpoint_time = [self.start_time, self.start_time]
self._units_processed = [0, 0]
self._lock = threading.Lock()
self.units = units
@property
def duration(self):
return self._checkpoint_time[-1] - self.start_time
@property
def average_rate(self):
return self._units_processed[-1] / self.duration
@property
def instantaneous_rate(self):
_units_processed = [self._units_processed[-1] - self._units_processed[-2]]
duration = self._checkpoint_time[-1] - self._checkpoint_time[-2]
return _units_processed / duration
@property
def units_processed(self):
return self._units_processed[-1]
def checkpoint(self, _units_processed: int):
self._units_processed = [self._units_processed[-1], self._units_processed[-1] + _units_processed]
self._checkpoint_time = [self._checkpoint_time[-1], time.time()]
self._print()
@RateLimited(2.0)
def _print(self):
with self._lock:
print("\r",
f"%9i {self.units} processed," % self._units_processed[-1],
f"%6.0f {self.units}/s" % self.average_rate,
"%7.2f s" % self.duration,
end="")
def close(self):
self._print.reset()
self._print()
with self._lock:
print()
def __exit__(self, *args, **kwargs):
self.close()
| import sys
import time
import threading
import sched
from math import floor, ceil
from contextlib import AbstractContextManager
from concurrent.futures import ThreadPoolExecutor, as_completed
class RateLimited:
"""
Decorator to rate limit method calls. Raises `exception` if method is called more quickly than
`rate` times per second, or does nothing if exception is None.
"""
def __init__(self, rate: float, exception: Exception=None):
self.period = 1 / rate
self.exception = exception
self.reset()
def reset(self):
self._last_call = time.time() - 10 * self.period
def __call__(self, func):
def wrapper(*args, **kwargs):
now = time.time()
if now - self._last_call >= self.period:
self._last_call = now
return func(*args, **kwargs)
elif self.exception is not None:
raise self.exception("Too soon")
wrapper.reset = self.reset
return wrapper
class ProgressBar(AbstractContextManager):
def __init__(self, number_of_steps, prefix="", units="", size: int=None):
self.number_of_steps = number_of_steps
self.steps_completed = 0
self.prefix = prefix
self.units = units
self.size = size
self._length = 40 # This seems like a reasonable length for notebook output
self._lock = threading.Lock()
self._start_time = time.time()
def update(self, number_of_steps=1):
self.steps_completed += number_of_steps
slots_filled = int(self._length * self.steps_completed / self.number_of_steps)
duration = ceil(time.time() - self._start_time)
portion_complete = self.steps_completed / self.number_of_steps
bar = "#" * slots_filled
bar += " " * (self._length - slots_filled)
bar += f" {floor(100 * portion_complete)}%"
if self.size is not None:
bar += f" of {self.size}{self.units}"
bar += f" %.2f{self.units}/s" % (self.size * portion_complete / duration)
self._bar = bar
self._duration = duration
self._print()
@RateLimited(2.0)
def _print(self):
with self._lock:
print("\r", f"{self.prefix} {self._bar} ({self._duration} seconds)", end="")
def close(self, message=None):
self._print.reset()
self._print()
with self._lock:
print()
if message is not None:
print(message)
def __exit__(self, *args, **kwargs):
self.close()
class ProgressReporter(AbstractContextManager):
def __init__(self, units: str="lines"):
self.start_time = time.time()
self._checkpoint_time = [self.start_time, self.start_time]
self._units_processed = [0, 0]
self._lock = threading.Lock()
self.units = units
@property
def duration(self):
return self._checkpoint_time[-1] - self.start_time
@property
def average_rate(self):
return self._units_processed[-1] / self.duration
@property
def instantaneous_rate(self):
_units_processed = [self._units_processed[-1] - self._units_processed[-2]]
duration = self._checkpoint_time[-1] - self._checkpoint_time[-2]
return _units_processed / duration
@property
def units_processed(self):
return self._units_processed[-1]
def checkpoint(self, _units_processed: int):
self._units_processed = [self._units_processed[-1], self._units_processed[-1] + _units_processed]
self._checkpoint_time = [self._checkpoint_time[-1], time.time()]
self._print()
@RateLimited(2.0)
def _print(self):
with self._lock:
print("\r",
f"%9i {self.units} processed," % self._units_processed[-1],
f"%6.0f {self.units}/s" % self.average_rate,
"%7.2f s" % self.duration,
end="")
def close(self):
self._print.reset()
self._print()
with self._lock:
print()
def __exit__(self, *args, **kwargs):
self.close() | en | 0.934665 | Decorator to rate limit method calls. Raises `exception` if method is called more quickly than `rate` times per second, or does nothing if exception is None. # This seems like a reasonable length for notebook output | 2.664393 | 3 |
users/admin.py | mike2151/Online-OH-queue | 6 | 6617954 | from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import Group
from django.conf import settings
from .forms import StudentUserCreationForm, StudentUserChangeForm
from .models import StudentUser
import os
class StudentUserAdmin(UserAdmin):
add_form = StudentUserCreationForm
form = StudentUserChangeForm
model = StudentUser
list_display = ['email', 'first_name', 'last_name', 'username', 'is_active', 'is_ta', 'is_superuser']
fieldsets = (
('User Information', {'fields': ('first_name', 'last_name', 'email', 'username')}),
('Permissions', {'fields': ('is_active', 'is_ta', 'is_superuser')}),
('Important Dates', {'fields': ('date_joined',)})
)
list_filter = ('is_superuser', 'is_ta', 'is_active')
admin.site.site_header = os.environ.get('COURSE_TITLE','') + ' Office Hours Admin'
admin.site.register(StudentUser, StudentUserAdmin)
admin.site.unregister(Group) | from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import Group
from django.conf import settings
from .forms import StudentUserCreationForm, StudentUserChangeForm
from .models import StudentUser
import os
class StudentUserAdmin(UserAdmin):
add_form = StudentUserCreationForm
form = StudentUserChangeForm
model = StudentUser
list_display = ['email', 'first_name', 'last_name', 'username', 'is_active', 'is_ta', 'is_superuser']
fieldsets = (
('User Information', {'fields': ('first_name', 'last_name', 'email', 'username')}),
('Permissions', {'fields': ('is_active', 'is_ta', 'is_superuser')}),
('Important Dates', {'fields': ('date_joined',)})
)
list_filter = ('is_superuser', 'is_ta', 'is_active')
admin.site.site_header = os.environ.get('COURSE_TITLE','') + ' Office Hours Admin'
admin.site.register(StudentUser, StudentUserAdmin)
admin.site.unregister(Group) | none | 1 | 1.934767 | 2 | |
src/draw_dag.py | D-Stacks/kaspad-py-explorer | 6 | 6617955 |
import os
# import pandas as pd
import pygraphviz as pgv
from store import *
tempfile = r'C:\temp\temp.png'
temppdf = r'C:\temp\temp.pdf'
def draw_from_pp_up():
# store = Store(os.getenv('localappdata') + r'\Kaspad\kaspa-mainnet\fresh\kaspa-mainnet\datadir2')
store = Store(os.getenv('localappdata') + r'\Kaspad\kaspa-mainnet\datadir2')
store.load_blocks()
# df = pd.DataFrame(store.load_data(['timeInMilliseconds', 'blueScore', 'daaScore'], []))
s = set()
skip = 0 # 86400*2 + 3600*16
chunk = 10000
aa = pgv.AGraph(strict=True, directed=True, rankdir='TB', splines=False, label='|G|={}'.format(len(store.blocks)))
for block_hash, block_relations in store.traverse_loaded_blocks():
if skip > 0:
skip -= 1
if skip % 20000 == 0:
print('skipped 20,000 blocks')
continue
aa.add_node(block_hash, label=block_hash.hex()[-8:])
s.add(block_hash)
for p in block_relations.parents:
if p in s:
aa.add_edge(block_hash, p, color='green')
else:
print('Missing parent: ', p.hex())
if chunk == len(s):
break
store.close()
aa.draw(temppdf, prog='dot')
def draw_from_tips_down():
# store = Store(os.getenv('localappdata') + r'\Kaspad\kaspa-mainnet\fresh\kaspa-mainnet\datadir2')
store = Store(os.getenv('localappdata') + r'\Kaspad\kaspa-mainnet\datadir2')
# store.load_blocks()
# df = pd.DataFrame(store.load_data(['timeInMilliseconds', 'blueScore', 'daaScore'], []))
s = set()
skip = 3600*6
chunk = 2000
aa = pgv.AGraph(strict=True, directed=True, rankdir='TB', splines=False, label='|G|={}'.format(len(store.blocks)))
for block_hash, block_relations in store.traverse_from_tips():
if skip > 0:
skip -= 1
if skip % 2000 == 0:
print('skipped 2000 blocks')
continue
aa.add_node(block_hash, label=block_hash.hex()[-8:])
s.add(block_hash)
for c in block_relations.children:
if c in s:
aa.add_edge(c, block_hash, color='green')
else:
print('Missing child: ', c.hex())
if chunk == len(s):
break
store.close()
aa.draw(temppdf, prog='dot')
if __name__ == '__main__':
draw_from_tips_down()
# draw_from_pp_up()
|
import os
# import pandas as pd
import pygraphviz as pgv
from store import *
tempfile = r'C:\temp\temp.png'
temppdf = r'C:\temp\temp.pdf'
def draw_from_pp_up():
# store = Store(os.getenv('localappdata') + r'\Kaspad\kaspa-mainnet\fresh\kaspa-mainnet\datadir2')
store = Store(os.getenv('localappdata') + r'\Kaspad\kaspa-mainnet\datadir2')
store.load_blocks()
# df = pd.DataFrame(store.load_data(['timeInMilliseconds', 'blueScore', 'daaScore'], []))
s = set()
skip = 0 # 86400*2 + 3600*16
chunk = 10000
aa = pgv.AGraph(strict=True, directed=True, rankdir='TB', splines=False, label='|G|={}'.format(len(store.blocks)))
for block_hash, block_relations in store.traverse_loaded_blocks():
if skip > 0:
skip -= 1
if skip % 20000 == 0:
print('skipped 20,000 blocks')
continue
aa.add_node(block_hash, label=block_hash.hex()[-8:])
s.add(block_hash)
for p in block_relations.parents:
if p in s:
aa.add_edge(block_hash, p, color='green')
else:
print('Missing parent: ', p.hex())
if chunk == len(s):
break
store.close()
aa.draw(temppdf, prog='dot')
def draw_from_tips_down():
# store = Store(os.getenv('localappdata') + r'\Kaspad\kaspa-mainnet\fresh\kaspa-mainnet\datadir2')
store = Store(os.getenv('localappdata') + r'\Kaspad\kaspa-mainnet\datadir2')
# store.load_blocks()
# df = pd.DataFrame(store.load_data(['timeInMilliseconds', 'blueScore', 'daaScore'], []))
s = set()
skip = 3600*6
chunk = 2000
aa = pgv.AGraph(strict=True, directed=True, rankdir='TB', splines=False, label='|G|={}'.format(len(store.blocks)))
for block_hash, block_relations in store.traverse_from_tips():
if skip > 0:
skip -= 1
if skip % 2000 == 0:
print('skipped 2000 blocks')
continue
aa.add_node(block_hash, label=block_hash.hex()[-8:])
s.add(block_hash)
for c in block_relations.children:
if c in s:
aa.add_edge(c, block_hash, color='green')
else:
print('Missing child: ', c.hex())
if chunk == len(s):
break
store.close()
aa.draw(temppdf, prog='dot')
if __name__ == '__main__':
draw_from_tips_down()
# draw_from_pp_up()
| en | 0.188226 | # import pandas as pd # store = Store(os.getenv('localappdata') + r'\Kaspad\kaspa-mainnet\fresh\kaspa-mainnet\datadir2') # df = pd.DataFrame(store.load_data(['timeInMilliseconds', 'blueScore', 'daaScore'], [])) # 86400*2 + 3600*16 # store = Store(os.getenv('localappdata') + r'\Kaspad\kaspa-mainnet\fresh\kaspa-mainnet\datadir2') # store.load_blocks() # df = pd.DataFrame(store.load_data(['timeInMilliseconds', 'blueScore', 'daaScore'], [])) # draw_from_pp_up() | 2.494663 | 2 |
upfind.py | Borowiec-B/Python-Shell-Utilities | 0 | 6617956 | <reponame>Borowiec-B/Python-Shell-Utilities<filename>upfind.py
import os, sys;
from typing import Optional
def directory_up(directory: str) -> str:
"""
Shorthand for returning the parent directory.
Args:
directory: Directory to return the parent of.
Returns:
Parent directory of argument.
"""
return os.path.dirname(directory)
def upfind(target_filename: str, last_directory_to_check: str = "/") -> Optional[str]:
"""
Tries to find absolute path to target_filename in current directory, goes up, and repeats the process.
Args:
target_filename: File name to be found.
last_directory_to_check: If file name is not found there, stop the search.
Returns:
Success: Absolute path to target_filename.
Failure: None.
"""
# Somewhat arbitrary number to limit iterations when last_directory_to_check is not cwd's parent,
# and the hierarchy is really deep (after "/", search always stops regardless of last_directory_to_check).
max_iterations = 25
current_iteration = 0
current_search_directory = os.getcwd()
while (True):
current_directory_files_list = os.listdir(current_search_directory)
if (target_filename in current_directory_files_list):
# Join directory containing target_filename with target_filename, separating them with '/' to form the final found path.
found_file_path = current_search_directory + '/' + target_filename
# If directory containing target_filename is "/", above statement results in "//target_filename".
# In this case, remove the unnecessary leading slash.
if (current_search_directory == "/"):
found_file_path = found_file_path[1:];
return found_file_path;
else:
if (current_search_directory == "/" or
current_search_directory == last_directory_to_check or
current_iteration == max_iterations):
return None
current_iteration = current_iteration + 1
current_search_directory = directory_up(current_search_directory)
def upfind_parent(target_filename: str, last_directory_to_check: str = "/") -> Optional[str]:
""" Same as upfind(), but returns parent directory of its result.
"""
# If upfind() returns None on unsuccessful search, directory_up is gonna throw.
try:
return directory_up(upfind(target_filename, last_directory_to_check))
except TypeError:
return None
def upfind_any(*target_filenames: str, last_directory_to_check: str = "/") -> Optional[str]:
"""
Calls upfind on each filename.
Args:
*target_filenames: File names to be found.
Returns:
Success: First found file from target_filenames.
Failure: None.
"""
for target_filename in set(target_filenames):
result = upfind(target_filename, last_directory_to_check)
if (result != None):
return result
return None;
| import os, sys;
from typing import Optional
def directory_up(directory: str) -> str:
"""
Shorthand for returning the parent directory.
Args:
directory: Directory to return the parent of.
Returns:
Parent directory of argument.
"""
return os.path.dirname(directory)
def upfind(target_filename: str, last_directory_to_check: str = "/") -> Optional[str]:
"""
Tries to find absolute path to target_filename in current directory, goes up, and repeats the process.
Args:
target_filename: File name to be found.
last_directory_to_check: If file name is not found there, stop the search.
Returns:
Success: Absolute path to target_filename.
Failure: None.
"""
# Somewhat arbitrary number to limit iterations when last_directory_to_check is not cwd's parent,
# and the hierarchy is really deep (after "/", search always stops regardless of last_directory_to_check).
max_iterations = 25
current_iteration = 0
current_search_directory = os.getcwd()
while (True):
current_directory_files_list = os.listdir(current_search_directory)
if (target_filename in current_directory_files_list):
# Join directory containing target_filename with target_filename, separating them with '/' to form the final found path.
found_file_path = current_search_directory + '/' + target_filename
# If directory containing target_filename is "/", above statement results in "//target_filename".
# In this case, remove the unnecessary leading slash.
if (current_search_directory == "/"):
found_file_path = found_file_path[1:];
return found_file_path;
else:
if (current_search_directory == "/" or
current_search_directory == last_directory_to_check or
current_iteration == max_iterations):
return None
current_iteration = current_iteration + 1
current_search_directory = directory_up(current_search_directory)
def upfind_parent(target_filename: str, last_directory_to_check: str = "/") -> Optional[str]:
""" Same as upfind(), but returns parent directory of its result.
"""
# If upfind() returns None on unsuccessful search, directory_up is gonna throw.
try:
return directory_up(upfind(target_filename, last_directory_to_check))
except TypeError:
return None
def upfind_any(*target_filenames: str, last_directory_to_check: str = "/") -> Optional[str]:
"""
Calls upfind on each filename.
Args:
*target_filenames: File names to be found.
Returns:
Success: First found file from target_filenames.
Failure: None.
"""
for target_filename in set(target_filenames):
result = upfind(target_filename, last_directory_to_check)
if (result != None):
return result
return None; | en | 0.833301 | Shorthand for returning the parent directory. Args: directory: Directory to return the parent of. Returns: Parent directory of argument. Tries to find absolute path to target_filename in current directory, goes up, and repeats the process. Args: target_filename: File name to be found. last_directory_to_check: If file name is not found there, stop the search. Returns: Success: Absolute path to target_filename. Failure: None. # Somewhat arbitrary number to limit iterations when last_directory_to_check is not cwd's parent, # and the hierarchy is really deep (after "/", search always stops regardless of last_directory_to_check). # Join directory containing target_filename with target_filename, separating them with '/' to form the final found path. # If directory containing target_filename is "/", above statement results in "//target_filename". # In this case, remove the unnecessary leading slash. Same as upfind(), but returns parent directory of its result. # If upfind() returns None on unsuccessful search, directory_up is gonna throw. Calls upfind on each filename. Args: *target_filenames: File names to be found. Returns: Success: First found file from target_filenames. Failure: None. | 3.750273 | 4 |
No_0812_Largest Triangle Area/largest_triagle_area_by_determinant.py | coderMaruf/leetcode-1 | 32 | 6617957 | '''
Description:
You have a list of points in the plane. Return the area of the largest triangle that can be formed by any 3 of the points.
Example:
Input: points = [[0,0],[0,1],[1,0],[0,2],[2,0]]
Output: 2
Explanation:
The five points are show in the figure below. The red triangle is the largest.
Notes:
3 <= points.length <= 50.
No points will be duplicated.
-50 <= points[i][j] <= 50.
Answers within 10^-6 of the true value will be accepted as correct.
'''
from typing import List
class Solution:
def largestTriangleArea(self, points: List[List[int]]) -> float:
size = len(points)
max_area_of_parallelogram = 0
# Choose 3 points on iteration
for i in range( size):
for j in range( i+1, size):
for k in range( j+1, size):
# point A:
point_a_x, point_a_y = points[i]
# point B:
point_b_x, point_b_y = points[j]
# point C:
point_c_x, point_c_y = points[k]
# compute the area of parallelogram, composed by vector AB and AC
cur_area = abs( point_a_x * point_b_y + \
point_b_x * point_c_y + \
point_c_x * point_a_y - \
point_a_x * point_c_y - \
point_b_x * point_a_y - \
point_c_x * point_b_y )
# update maximum area of parallelogram
max_area_of_parallelogram = max(max_area_of_parallelogram, cur_area)
# max area of triangle = max area of parallelogram / 2
return max_area_of_parallelogram / 2
# n : the length of input array, points.
## Time Complexity: O( n ^ 3 )
#
# The overhead in time is the cost of triple nested loops, iterating up to n, which is of O( n^3 ).
## Space Complexity: O( 1 )
#
# The overhead in space is the storage for looping index, and temporary area computing variable, which is of O( 1 ).
def test_bench():
test_data = [
[[0,0],[0,1],[1,0],[0,2],[2,0]]
]
for points in test_data:
print( Solution().largestTriangleArea(points) )
return
if __name__ == '__main__':
test_bench() | '''
Description:
You have a list of points in the plane. Return the area of the largest triangle that can be formed by any 3 of the points.
Example:
Input: points = [[0,0],[0,1],[1,0],[0,2],[2,0]]
Output: 2
Explanation:
The five points are show in the figure below. The red triangle is the largest.
Notes:
3 <= points.length <= 50.
No points will be duplicated.
-50 <= points[i][j] <= 50.
Answers within 10^-6 of the true value will be accepted as correct.
'''
from typing import List
class Solution:
def largestTriangleArea(self, points: List[List[int]]) -> float:
size = len(points)
max_area_of_parallelogram = 0
# Choose 3 points on iteration
for i in range( size):
for j in range( i+1, size):
for k in range( j+1, size):
# point A:
point_a_x, point_a_y = points[i]
# point B:
point_b_x, point_b_y = points[j]
# point C:
point_c_x, point_c_y = points[k]
# compute the area of parallelogram, composed by vector AB and AC
cur_area = abs( point_a_x * point_b_y + \
point_b_x * point_c_y + \
point_c_x * point_a_y - \
point_a_x * point_c_y - \
point_b_x * point_a_y - \
point_c_x * point_b_y )
# update maximum area of parallelogram
max_area_of_parallelogram = max(max_area_of_parallelogram, cur_area)
# max area of triangle = max area of parallelogram / 2
return max_area_of_parallelogram / 2
# n : the length of input array, points.
## Time Complexity: O( n ^ 3 )
#
# The overhead in time is the cost of triple nested loops, iterating up to n, which is of O( n^3 ).
## Space Complexity: O( 1 )
#
# The overhead in space is the storage for looping index, and temporary area computing variable, which is of O( 1 ).
def test_bench():
test_data = [
[[0,0],[0,1],[1,0],[0,2],[2,0]]
]
for points in test_data:
print( Solution().largestTriangleArea(points) )
return
if __name__ == '__main__':
test_bench() | en | 0.858489 | Description: You have a list of points in the plane. Return the area of the largest triangle that can be formed by any 3 of the points. Example: Input: points = [[0,0],[0,1],[1,0],[0,2],[2,0]] Output: 2 Explanation: The five points are show in the figure below. The red triangle is the largest. Notes: 3 <= points.length <= 50. No points will be duplicated. -50 <= points[i][j] <= 50. Answers within 10^-6 of the true value will be accepted as correct. # Choose 3 points on iteration # point A: # point B: # point C: # compute the area of parallelogram, composed by vector AB and AC # update maximum area of parallelogram # max area of triangle = max area of parallelogram / 2 # n : the length of input array, points. ## Time Complexity: O( n ^ 3 ) # # The overhead in time is the cost of triple nested loops, iterating up to n, which is of O( n^3 ). ## Space Complexity: O( 1 ) # # The overhead in space is the storage for looping index, and temporary area computing variable, which is of O( 1 ). | 4.195255 | 4 |
torchvision/edgeailite/xnn/layers/normalization.py | TexasInstruments/vision | 21 | 6617958 | <filename>torchvision/edgeailite/xnn/layers/normalization.py
#################################################################################
# Copyright (c) 2018-2021, Texas Instruments Incorporated - http://www.ti.com
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################
import torch
###############################################################
# SlowBatchNorm2d is basically same as BatchNorm2d, but with a slower update of batch norm statistics.
# When the statistics are updated slowly, they tend to be similar across GPUs.
# This may help to minimize the problem of lower of inference accuracy when training with smaller batch sizes.
class SlowBatchNorm2d(torch.nn.BatchNorm2d):
def __init__(self, num_features, eps=1e-5, momentum=0.01, affine=True, track_running_stats=True):
super().__init__(num_features, eps, momentum, affine, track_running_stats)
###############################################################
# Experimental - group norm with fixed number of channels
class Group4Norm(torch.nn.GroupNorm):
def __init__(self, num_channels):
super().__init__(num_groups=4, num_channels=num_channels)
class Group8Norm(torch.nn.GroupNorm):
def __init__(self, num_channels):
super().__init__(num_groups=8, num_channels=num_channels)
###############################################################
# Experimental - a trick to increase the batch size in batch norm by using lower number of channels internally
# This may help to minimize the problem of lower of inference accuracy when training with smaller batch sizes.
class GroupBatchNorm2d(torch.nn.BatchNorm2d):
def __init__(self, num_groups, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True):
assert num_features % num_groups == 0, 'num_features={} is not divisible by num_groups={}'.format(num_features, num_groups)
super().__init__(num_features//num_groups, eps, momentum, affine, track_running_stats)
self.num_groups = num_groups
def forward(self, x):
b,c,h,w = x.size()
x_grouped = x.view(-1,c//self.num_groups,h,w).contiguous()
y_gropued = super().forward(x_grouped)
y = y_gropued.view(b,c,h,w).contiguous()
return y
class Group4BatchNorm2d(GroupBatchNorm2d):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True):
super().__init__(4, num_features, eps, momentum, affine, track_running_stats)
class Group8BatchNorm2d(GroupBatchNorm2d):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True):
super().__init__(8, num_features, eps, momentum, affine, track_running_stats)
| <filename>torchvision/edgeailite/xnn/layers/normalization.py
#################################################################################
# Copyright (c) 2018-2021, Texas Instruments Incorporated - http://www.ti.com
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################
import torch
###############################################################
# SlowBatchNorm2d is basically same as BatchNorm2d, but with a slower update of batch norm statistics.
# When the statistics are updated slowly, they tend to be similar across GPUs.
# This may help to minimize the problem of lower of inference accuracy when training with smaller batch sizes.
class SlowBatchNorm2d(torch.nn.BatchNorm2d):
def __init__(self, num_features, eps=1e-5, momentum=0.01, affine=True, track_running_stats=True):
super().__init__(num_features, eps, momentum, affine, track_running_stats)
###############################################################
# Experimental - group norm with fixed number of channels
class Group4Norm(torch.nn.GroupNorm):
def __init__(self, num_channels):
super().__init__(num_groups=4, num_channels=num_channels)
class Group8Norm(torch.nn.GroupNorm):
def __init__(self, num_channels):
super().__init__(num_groups=8, num_channels=num_channels)
###############################################################
# Experimental - a trick to increase the batch size in batch norm by using lower number of channels internally
# This may help to minimize the problem of lower of inference accuracy when training with smaller batch sizes.
class GroupBatchNorm2d(torch.nn.BatchNorm2d):
def __init__(self, num_groups, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True):
assert num_features % num_groups == 0, 'num_features={} is not divisible by num_groups={}'.format(num_features, num_groups)
super().__init__(num_features//num_groups, eps, momentum, affine, track_running_stats)
self.num_groups = num_groups
def forward(self, x):
b,c,h,w = x.size()
x_grouped = x.view(-1,c//self.num_groups,h,w).contiguous()
y_gropued = super().forward(x_grouped)
y = y_gropued.view(b,c,h,w).contiguous()
return y
class Group4BatchNorm2d(GroupBatchNorm2d):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True):
super().__init__(4, num_features, eps, momentum, affine, track_running_stats)
class Group8BatchNorm2d(GroupBatchNorm2d):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True):
super().__init__(8, num_features, eps, momentum, affine, track_running_stats)
| en | 0.614295 | ################################################################################# # Copyright (c) 2018-2021, Texas Instruments Incorporated - http://www.ti.com # All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################# ############################################################### # SlowBatchNorm2d is basically same as BatchNorm2d, but with a slower update of batch norm statistics. # When the statistics are updated slowly, they tend to be similar across GPUs. # This may help to minimize the problem of lower of inference accuracy when training with smaller batch sizes. ############################################################### # Experimental - group norm with fixed number of channels ############################################################### # Experimental - a trick to increase the batch size in batch norm by using lower number of channels internally # This may help to minimize the problem of lower of inference accuracy when training with smaller batch sizes. | 1.274723 | 1 |
sensor_stick/scripts/RANSAC.py | Vi-Ku/Perception-Project | 1 | 6617959 | <filename>sensor_stick/scripts/RANSAC.py<gh_stars>1-10
# Import PCL module
import pcl
from pcl_helper import *
# Load Point Cloud file
cloud = pcl.load_XYZRGB('tabletop.pcd')
def pcl_callback(pcl_msg):
# Voxel Grid filter
# Create a VoxelGrid filter object for our input point cloud
vox = cloud.make_voxel_grid_filter()
# Choose a voxel (also known as leaf) size
# Note: this (1) is a poor choice of leaf size
# Experiment and find the appropriate size!
LEAF_SIZE = 1
# Set the voxel (or leaf) size
vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)
# Call the filter function to obtain the resultant downsampled point cloud
cloud_filtered = vox.filter()
filename = 'voxel_downsampled.pcd'
pcl.save(cloud_filtered, filename)
# PassThrough filter
# RANSAC plane segmentation
# Extract inliers
# Save pcd for table
# pcl.save(cloud, filename)
# Extract outliers
# Save pcd for tabletop objects
| <filename>sensor_stick/scripts/RANSAC.py<gh_stars>1-10
# Import PCL module
import pcl
from pcl_helper import *
# Load Point Cloud file
cloud = pcl.load_XYZRGB('tabletop.pcd')
def pcl_callback(pcl_msg):
# Voxel Grid filter
# Create a VoxelGrid filter object for our input point cloud
vox = cloud.make_voxel_grid_filter()
# Choose a voxel (also known as leaf) size
# Note: this (1) is a poor choice of leaf size
# Experiment and find the appropriate size!
LEAF_SIZE = 1
# Set the voxel (or leaf) size
vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)
# Call the filter function to obtain the resultant downsampled point cloud
cloud_filtered = vox.filter()
filename = 'voxel_downsampled.pcd'
pcl.save(cloud_filtered, filename)
# PassThrough filter
# RANSAC plane segmentation
# Extract inliers
# Save pcd for table
# pcl.save(cloud, filename)
# Extract outliers
# Save pcd for tabletop objects
| en | 0.756703 | # Import PCL module # Load Point Cloud file # Voxel Grid filter # Create a VoxelGrid filter object for our input point cloud # Choose a voxel (also known as leaf) size # Note: this (1) is a poor choice of leaf size # Experiment and find the appropriate size! # Set the voxel (or leaf) size # Call the filter function to obtain the resultant downsampled point cloud # PassThrough filter # RANSAC plane segmentation # Extract inliers # Save pcd for table # pcl.save(cloud, filename) # Extract outliers # Save pcd for tabletop objects | 2.705377 | 3 |
lab.py | GluKhovKirill/Pygame-project | 0 | 6617960 | import random
class Transform():
def __init__(self):
self.dim_x = 5
self.dim_y = 5
self.c = []
self.grid = [[0 for i in range(self.dim_x)] for j in range(self.dim_y)]
self.visited = [1]
def valid(self,nb):
if nb >= 1 and nb <= self.dim_x * self.dim_y:
return True
return False
def list_moves(self,nb):
moves = []
nb = int(nb)
if self.valid(nb + self.dim_y) and self.visited.count(nb + self.dim_y) < 1:
moves.append(nb + self.dim_y)
if self.valid(nb - self.dim_y) and self.visited.count(nb - self.dim_y) < 1:
moves.append(nb - self.dim_y)
if self.valid(nb + 1) and self.visited.count(nb + 1) < 1 and nb % self.dim_x != 0:
moves.append(nb + 1)
if self.valid(nb - 1) and self.visited.count(nb - 1) < 1 and nb % self.dim_x != 1:
moves.append(nb - 1)
return moves
def gen(self):
pos = len(self.visited) - 1
while len(self.list_moves(self.visited[pos])) < 1:
pos -= 1
next_visit = random.choice(self.list_moves(self.visited[pos]))
self.visited.append(next_visit)
def generator(self):
while len(self.visited) != self.dim_x * self.dim_y:
self.gen()
return self.visited
def Transformer(self):
visit = self.generator()
for i in self.visited:
if not i%5:
self.c.append([750, i // 5 * (-750)])
else:
self.c.append([(i%5)*750,i//5*(-750)])
return self.c
def trans(self, c):
c_append = []
total = []
total1 = []
v = 0
Flag = True
for n in c:
x = n[0]
y = n[1]
for i in range(16):
for k in range(16):
if y == n[1] or y == n[1] - 700 or (x == n[0]) or (x == n[0] + 700):
if (x == n[0]+700 and y == n[1] - 350 or x == n[0]+700 and y == n[1] - 300 or x == n[0]+700 and y == n[1] - 400) or y == n[1]-700 and x == n[0] + 350 or y == n[1]-700 and x == n[0] + 300 or y == n[1]-700 and x == n[0] + 400:
c_append.append([x, y, "grass"])
elif v > 0:
print(n[0] > c[v-1][0] and (x == n[0] and y == n[1] - 350), x, y, 1, n[0], c[v-1][0])
print(n[1] < c[v-1][1] and (y == n[1] and x == n[0] + 350), x, y, 2, n[1], c[v-1][1])
if n[0] > c[v-1][0] and (x == n[0] and y == n[1] - 350 or x == n[0] and y == n[1] - 300 or x == n[0] and y == n[1] - 400) or (n[1] < c[v-1][1] or n[1] > c[v-1][1]) and (y == n[1] and (x == n[0] + 350 or x == n[0] + 300 or x == n[0] + 400)):
c_append.append([x, y, "grass"])
else:
c_append.append([x, y, "wall"])
else:
c_append.append([x, y, "wall"])
else:
if Flag:
c_append.append([x, y, "player"])
Flag = False
else:
c_append.append([x, y, random.choice(["grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","hole","wall","treasure"])])
if x != n[0]+700:
x += 50
else:
x = n[0]
total.append(c_append)
c_append = []
if y != n[1]-700:
y -= 50
v += 1
total1.append(total)
total = []
return total1
| import random
class Transform():
def __init__(self):
self.dim_x = 5
self.dim_y = 5
self.c = []
self.grid = [[0 for i in range(self.dim_x)] for j in range(self.dim_y)]
self.visited = [1]
def valid(self,nb):
if nb >= 1 and nb <= self.dim_x * self.dim_y:
return True
return False
def list_moves(self,nb):
moves = []
nb = int(nb)
if self.valid(nb + self.dim_y) and self.visited.count(nb + self.dim_y) < 1:
moves.append(nb + self.dim_y)
if self.valid(nb - self.dim_y) and self.visited.count(nb - self.dim_y) < 1:
moves.append(nb - self.dim_y)
if self.valid(nb + 1) and self.visited.count(nb + 1) < 1 and nb % self.dim_x != 0:
moves.append(nb + 1)
if self.valid(nb - 1) and self.visited.count(nb - 1) < 1 and nb % self.dim_x != 1:
moves.append(nb - 1)
return moves
def gen(self):
pos = len(self.visited) - 1
while len(self.list_moves(self.visited[pos])) < 1:
pos -= 1
next_visit = random.choice(self.list_moves(self.visited[pos]))
self.visited.append(next_visit)
def generator(self):
while len(self.visited) != self.dim_x * self.dim_y:
self.gen()
return self.visited
def Transformer(self):
visit = self.generator()
for i in self.visited:
if not i%5:
self.c.append([750, i // 5 * (-750)])
else:
self.c.append([(i%5)*750,i//5*(-750)])
return self.c
def trans(self, c):
c_append = []
total = []
total1 = []
v = 0
Flag = True
for n in c:
x = n[0]
y = n[1]
for i in range(16):
for k in range(16):
if y == n[1] or y == n[1] - 700 or (x == n[0]) or (x == n[0] + 700):
if (x == n[0]+700 and y == n[1] - 350 or x == n[0]+700 and y == n[1] - 300 or x == n[0]+700 and y == n[1] - 400) or y == n[1]-700 and x == n[0] + 350 or y == n[1]-700 and x == n[0] + 300 or y == n[1]-700 and x == n[0] + 400:
c_append.append([x, y, "grass"])
elif v > 0:
print(n[0] > c[v-1][0] and (x == n[0] and y == n[1] - 350), x, y, 1, n[0], c[v-1][0])
print(n[1] < c[v-1][1] and (y == n[1] and x == n[0] + 350), x, y, 2, n[1], c[v-1][1])
if n[0] > c[v-1][0] and (x == n[0] and y == n[1] - 350 or x == n[0] and y == n[1] - 300 or x == n[0] and y == n[1] - 400) or (n[1] < c[v-1][1] or n[1] > c[v-1][1]) and (y == n[1] and (x == n[0] + 350 or x == n[0] + 300 or x == n[0] + 400)):
c_append.append([x, y, "grass"])
else:
c_append.append([x, y, "wall"])
else:
c_append.append([x, y, "wall"])
else:
if Flag:
c_append.append([x, y, "player"])
Flag = False
else:
c_append.append([x, y, random.choice(["grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","grass","hole","wall","treasure"])])
if x != n[0]+700:
x += 50
else:
x = n[0]
total.append(c_append)
c_append = []
if y != n[1]-700:
y -= 50
v += 1
total1.append(total)
total = []
return total1
| none | 1 | 3.072045 | 3 | |
src/investmentstk/utils/dataframe.py | fernandobrito/investments-toolkit | 3 | 6617961 | import pandas as pd
from pandas.tseries.frequencies import to_offset
RESAMPLE_LOGIC = {"open": "first", "high": "max", "low": "min", "close": "last"}
def convert_to_pct_change(dataframe: pd.DataFrame) -> pd.DataFrame:
"""
Iterates over all columns of a dataframe and converts them to
percentage changes between the current and a prior element.
Useful to convert asset prices to % changes before calculating
the correlation between different assets.
:param dataframe: input dataframe
:return: a copy of the dataframe
"""
dataframe = dataframe.copy()
dataframe = dataframe.sort_index()
for column in dataframe:
dataframe[column] = dataframe[column].pct_change(fill_method=None)
return dataframe
def merge_dataframes(dataframes: list[pd.DataFrame], join: str = "outer") -> pd.DataFrame:
"""
Performs a join of a list of dataframes.
:param dataframes: a list of dataframes
:param join: the type of join
:return:
"""
dataframe = pd.concat(dataframes, axis="columns", join=join)
return dataframe
def convert_daily_ohlc_to_weekly(dataframe: pd.DataFrame) -> pd.DataFrame:
# From:
# https://stackoverflow.com/questions/34597926/converting-daily-stock-data-to-weekly-based-via-pandas-in-python
dataframe = dataframe.resample("W").apply(RESAMPLE_LOGIC)
# Convert date labels so weeks start on Monday
offset = pd.Timedelta(days=-6)
dataframe.index = dataframe.index + to_offset(offset)
return dataframe
def convert_daily_ohlc_to_monthly(dataframe: pd.DataFrame) -> pd.DataFrame:
dataframe = dataframe.resample("M").apply(RESAMPLE_LOGIC)
# Convert date labels so months start on day 1
offset = pd.tseries.offsets.MonthBegin(n=-1)
dataframe.index = dataframe.index + to_offset(offset)
return dataframe
| import pandas as pd
from pandas.tseries.frequencies import to_offset
RESAMPLE_LOGIC = {"open": "first", "high": "max", "low": "min", "close": "last"}
def convert_to_pct_change(dataframe: pd.DataFrame) -> pd.DataFrame:
"""
Iterates over all columns of a dataframe and converts them to
percentage changes between the current and a prior element.
Useful to convert asset prices to % changes before calculating
the correlation between different assets.
:param dataframe: input dataframe
:return: a copy of the dataframe
"""
dataframe = dataframe.copy()
dataframe = dataframe.sort_index()
for column in dataframe:
dataframe[column] = dataframe[column].pct_change(fill_method=None)
return dataframe
def merge_dataframes(dataframes: list[pd.DataFrame], join: str = "outer") -> pd.DataFrame:
"""
Performs a join of a list of dataframes.
:param dataframes: a list of dataframes
:param join: the type of join
:return:
"""
dataframe = pd.concat(dataframes, axis="columns", join=join)
return dataframe
def convert_daily_ohlc_to_weekly(dataframe: pd.DataFrame) -> pd.DataFrame:
# From:
# https://stackoverflow.com/questions/34597926/converting-daily-stock-data-to-weekly-based-via-pandas-in-python
dataframe = dataframe.resample("W").apply(RESAMPLE_LOGIC)
# Convert date labels so weeks start on Monday
offset = pd.Timedelta(days=-6)
dataframe.index = dataframe.index + to_offset(offset)
return dataframe
def convert_daily_ohlc_to_monthly(dataframe: pd.DataFrame) -> pd.DataFrame:
dataframe = dataframe.resample("M").apply(RESAMPLE_LOGIC)
# Convert date labels so months start on day 1
offset = pd.tseries.offsets.MonthBegin(n=-1)
dataframe.index = dataframe.index + to_offset(offset)
return dataframe
| en | 0.713772 | Iterates over all columns of a dataframe and converts them to percentage changes between the current and a prior element. Useful to convert asset prices to % changes before calculating the correlation between different assets. :param dataframe: input dataframe :return: a copy of the dataframe Performs a join of a list of dataframes. :param dataframes: a list of dataframes :param join: the type of join :return: # From: # https://stackoverflow.com/questions/34597926/converting-daily-stock-data-to-weekly-based-via-pandas-in-python # Convert date labels so weeks start on Monday # Convert date labels so months start on day 1 | 3.5187 | 4 |
tests/test_activation.py | johanere/qflow | 5 | 6617962 | import numpy as np
from hypothesis import assume, given, settings
from hypothesis import strategies as st
from qflow.wavefunctions.nn.activations import (
exponential,
identity,
relu,
sigmoid,
tanh,
)
from .testutils import array_strat, assert_close
@given(array_strat(max_size=100))
def test_identity(x):
np.testing.assert_array_equal(x, identity.evaluate(x))
np.testing.assert_array_equal(np.ones_like(x), identity.derivative(x))
np.testing.assert_array_equal(np.zeros_like(x), identity.dbl_derivative(x))
@given(array_strat(max_size=100))
def test_relu(x):
np.testing.assert_array_equal(np.where(x > 0, x, 0), relu.evaluate(x))
np.testing.assert_array_equal(
np.where(x > 0, 1, 0), relu.derivative(relu.evaluate(x))
)
np.testing.assert_array_equal(
np.zeros_like(x), relu.dbl_derivative(relu.evaluate(x))
)
@given(array_strat(max_size=100))
def test_sigmoid(x):
sig = 1 / (1 + np.exp(-x))
np.testing.assert_array_equal(sig, sigmoid.evaluate(x))
np.testing.assert_array_equal(sig * (1 - sig), sigmoid.derivative(sig))
np.testing.assert_array_equal(
sig * (1 - sig) * (1 - 2 * sig), sigmoid.dbl_derivative(sig)
)
@given(array_strat(max_size=50))
def test_tanh(x):
assume(np.all(np.abs(x) < 15))
ta = np.tanh(x)
assert_close(ta, tanh.evaluate(x))
assert_close(1 - ta ** 2, tanh.derivative(tanh.evaluate(x)))
assert_close(
-2 * np.sinh(x) / np.cosh(x) ** 3, tanh.dbl_derivative(tanh.evaluate(x))
)
@given(array_strat(max_size=100))
def test_exponential(x):
exp = np.exp(x)
np.testing.assert_array_equal(exp, exponential.evaluate(x))
np.testing.assert_array_equal(exp, exponential.derivative(exponential.evaluate(x)))
np.testing.assert_array_equal(
exp, exponential.dbl_derivative(exponential.evaluate(x))
)
| import numpy as np
from hypothesis import assume, given, settings
from hypothesis import strategies as st
from qflow.wavefunctions.nn.activations import (
exponential,
identity,
relu,
sigmoid,
tanh,
)
from .testutils import array_strat, assert_close
@given(array_strat(max_size=100))
def test_identity(x):
np.testing.assert_array_equal(x, identity.evaluate(x))
np.testing.assert_array_equal(np.ones_like(x), identity.derivative(x))
np.testing.assert_array_equal(np.zeros_like(x), identity.dbl_derivative(x))
@given(array_strat(max_size=100))
def test_relu(x):
np.testing.assert_array_equal(np.where(x > 0, x, 0), relu.evaluate(x))
np.testing.assert_array_equal(
np.where(x > 0, 1, 0), relu.derivative(relu.evaluate(x))
)
np.testing.assert_array_equal(
np.zeros_like(x), relu.dbl_derivative(relu.evaluate(x))
)
@given(array_strat(max_size=100))
def test_sigmoid(x):
sig = 1 / (1 + np.exp(-x))
np.testing.assert_array_equal(sig, sigmoid.evaluate(x))
np.testing.assert_array_equal(sig * (1 - sig), sigmoid.derivative(sig))
np.testing.assert_array_equal(
sig * (1 - sig) * (1 - 2 * sig), sigmoid.dbl_derivative(sig)
)
@given(array_strat(max_size=50))
def test_tanh(x):
assume(np.all(np.abs(x) < 15))
ta = np.tanh(x)
assert_close(ta, tanh.evaluate(x))
assert_close(1 - ta ** 2, tanh.derivative(tanh.evaluate(x)))
assert_close(
-2 * np.sinh(x) / np.cosh(x) ** 3, tanh.dbl_derivative(tanh.evaluate(x))
)
@given(array_strat(max_size=100))
def test_exponential(x):
exp = np.exp(x)
np.testing.assert_array_equal(exp, exponential.evaluate(x))
np.testing.assert_array_equal(exp, exponential.derivative(exponential.evaluate(x)))
np.testing.assert_array_equal(
exp, exponential.dbl_derivative(exponential.evaluate(x))
)
| none | 1 | 2.162329 | 2 | |
mtalg/random/__init__.py | WWakker/mtalg | 10 | 6617963 | <reponame>WWakker/mtalg
from mtalg.random.random_number_generators import MultithreadedRNG
_RNG = MultithreadedRNG()
def beta(*args, **kwargs):
return _RNG.beta(*args, **kwargs)
def binomial(*args, **kwargs):
return _RNG.binomial(*args, **kwargs)
def chisquare(*args, **kwargs):
return _RNG.chisquare(*args, **kwargs)
def exponential(*args, **kwargs):
return _RNG.exponential(*args, **kwargs)
def f(*args, **kwargs):
return _RNG.f(*args, **kwargs)
def gamma(*args, **kwargs):
return _RNG.gamma(*args, **kwargs)
def geometric(*args, **kwargs):
return _RNG.geometric(*args, **kwargs)
def gumbel(*args, **kwargs):
return _RNG.gumbel(*args, **kwargs)
def hypergeometric(*args, **kwargs):
return _RNG.hypergeometric(*args, **kwargs)
def integers(*args, **kwargs):
return _RNG.integers(*args, **kwargs)
def laplace(*args, **kwargs):
return _RNG.laplace(*args, **kwargs)
def logistic(*args, **kwargs):
return _RNG.logistic(*args, **kwargs)
def lognormal(*args, **kwargs):
return _RNG.lognormal(*args, **kwargs)
def logseries(*args, **kwargs):
return _RNG.logseries(*args, **kwargs)
def negative_binomial(*args, **kwargs):
return _RNG.negative_binomial(*args, **kwargs)
def noncentral_chisquare(*args, **kwargs):
return _RNG.noncentral_chisquare(*args, **kwargs)
def noncentral_f(*args, **kwargs):
return _RNG.noncentral_f(*args, **kwargs)
def normal(*args, **kwargs):
return _RNG.normal(*args, **kwargs)
def pareto(*args, **kwargs):
return _RNG.pareto(*args, **kwargs)
def poisson(*args, **kwargs):
return _RNG.poisson(*args, **kwargs)
def power(*args, **kwargs):
return _RNG.power(*args, **kwargs)
def random(*args, **kwargs):
return _RNG.random(*args, **kwargs)
def rayleigh(*args, **kwargs):
return _RNG.rayleigh(*args, **kwargs)
def standard_cauchy(*args, **kwargs):
return _RNG.standard_cauchy(*args, **kwargs)
def standard_exponential(*args, **kwargs):
return _RNG.standard_exponential(*args, **kwargs)
def standard_gamma(*args, **kwargs):
return _RNG.standard_gamma(*args, **kwargs)
def standard_normal(*args, **kwargs):
return _RNG.standard_normal(*args, **kwargs)
def standard_t(*args, **kwargs):
return _RNG.standard_t(*args, **kwargs)
def triangular(*args, **kwargs):
return _RNG.triangular(*args, **kwargs)
def uniform(*args, **kwargs):
return _RNG.uniform(*args, **kwargs)
def vonmises(*args, **kwargs):
return _RNG.vonmises(*args, **kwargs)
def wald(*args, **kwargs):
return _RNG.wald(*args, **kwargs)
def weibull(*args, **kwargs):
return _RNG.weibull(*args, **kwargs)
def zipf(*args, **kwargs):
return _RNG.zipf(*args, **kwargs)
for func in [beta, binomial, chisquare, exponential, f, gamma, geometric, gumbel, hypergeometric, integers, laplace,
logistic, lognormal, logseries, negative_binomial, noncentral_chisquare, noncentral_f, normal, pareto,
poisson, power, random, rayleigh, standard_cauchy, standard_exponential, standard_gamma, standard_normal,
standard_t, triangular, uniform, vonmises, wald, weibull, zipf]:
func.__doc__ = getattr(_RNG, func.__name__).__doc__
| from mtalg.random.random_number_generators import MultithreadedRNG
_RNG = MultithreadedRNG()
def beta(*args, **kwargs):
return _RNG.beta(*args, **kwargs)
def binomial(*args, **kwargs):
return _RNG.binomial(*args, **kwargs)
def chisquare(*args, **kwargs):
return _RNG.chisquare(*args, **kwargs)
def exponential(*args, **kwargs):
return _RNG.exponential(*args, **kwargs)
def f(*args, **kwargs):
return _RNG.f(*args, **kwargs)
def gamma(*args, **kwargs):
return _RNG.gamma(*args, **kwargs)
def geometric(*args, **kwargs):
return _RNG.geometric(*args, **kwargs)
def gumbel(*args, **kwargs):
return _RNG.gumbel(*args, **kwargs)
def hypergeometric(*args, **kwargs):
return _RNG.hypergeometric(*args, **kwargs)
def integers(*args, **kwargs):
return _RNG.integers(*args, **kwargs)
def laplace(*args, **kwargs):
return _RNG.laplace(*args, **kwargs)
def logistic(*args, **kwargs):
return _RNG.logistic(*args, **kwargs)
def lognormal(*args, **kwargs):
return _RNG.lognormal(*args, **kwargs)
def logseries(*args, **kwargs):
return _RNG.logseries(*args, **kwargs)
def negative_binomial(*args, **kwargs):
return _RNG.negative_binomial(*args, **kwargs)
def noncentral_chisquare(*args, **kwargs):
return _RNG.noncentral_chisquare(*args, **kwargs)
def noncentral_f(*args, **kwargs):
return _RNG.noncentral_f(*args, **kwargs)
def normal(*args, **kwargs):
return _RNG.normal(*args, **kwargs)
def pareto(*args, **kwargs):
return _RNG.pareto(*args, **kwargs)
def poisson(*args, **kwargs):
return _RNG.poisson(*args, **kwargs)
def power(*args, **kwargs):
return _RNG.power(*args, **kwargs)
def random(*args, **kwargs):
return _RNG.random(*args, **kwargs)
def rayleigh(*args, **kwargs):
return _RNG.rayleigh(*args, **kwargs)
def standard_cauchy(*args, **kwargs):
return _RNG.standard_cauchy(*args, **kwargs)
def standard_exponential(*args, **kwargs):
return _RNG.standard_exponential(*args, **kwargs)
def standard_gamma(*args, **kwargs):
return _RNG.standard_gamma(*args, **kwargs)
def standard_normal(*args, **kwargs):
return _RNG.standard_normal(*args, **kwargs)
def standard_t(*args, **kwargs):
return _RNG.standard_t(*args, **kwargs)
def triangular(*args, **kwargs):
return _RNG.triangular(*args, **kwargs)
def uniform(*args, **kwargs):
return _RNG.uniform(*args, **kwargs)
def vonmises(*args, **kwargs):
return _RNG.vonmises(*args, **kwargs)
def wald(*args, **kwargs):
return _RNG.wald(*args, **kwargs)
def weibull(*args, **kwargs):
return _RNG.weibull(*args, **kwargs)
def zipf(*args, **kwargs):
return _RNG.zipf(*args, **kwargs)
for func in [beta, binomial, chisquare, exponential, f, gamma, geometric, gumbel, hypergeometric, integers, laplace,
logistic, lognormal, logseries, negative_binomial, noncentral_chisquare, noncentral_f, normal, pareto,
poisson, power, random, rayleigh, standard_cauchy, standard_exponential, standard_gamma, standard_normal,
standard_t, triangular, uniform, vonmises, wald, weibull, zipf]:
func.__doc__ = getattr(_RNG, func.__name__).__doc__ | none | 1 | 2.344999 | 2 | |
apostello/migrations/0019_remove_smsinbound_matched_link.py | LaudateCorpus1/apostello | 69 | 6617964 | <filename>apostello/migrations/0019_remove_smsinbound_matched_link.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-29 11:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("apostello", "0018_auto_20170808_1045")]
operations = [migrations.RemoveField(model_name="smsinbound", name="matched_link")]
| <filename>apostello/migrations/0019_remove_smsinbound_matched_link.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-29 11:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("apostello", "0018_auto_20170808_1045")]
operations = [migrations.RemoveField(model_name="smsinbound", name="matched_link")]
| en | 0.727512 | # -*- coding: utf-8 -*- # Generated by Django 1.11.4 on 2017-08-29 11:16 | 1.222147 | 1 |
req_errors.py | VladimirsHisamutdinovs/Advanced_Python_Operations | 0 | 6617965 | <filename>req_errors.py
import requests
from requests import HTTPError, Timeout
def main():
try:
#url = "http://httpbin.org/status/404"
url = "http://httpbin.org/delay/5"
res = requests.get(url, timeout=2)
res.raise_for_status()
print_results(res)
except HTTPError as err:
print(f"Error: {err}")
except Timeout as err:
print(f'Request timed out: {err}')
def print_results(res):
print(f'Result: {res.status_code}')
print(f'Data: {res.text}')
if __name__ == "__main__":
main() | <filename>req_errors.py
import requests
from requests import HTTPError, Timeout
def main():
try:
#url = "http://httpbin.org/status/404"
url = "http://httpbin.org/delay/5"
res = requests.get(url, timeout=2)
res.raise_for_status()
print_results(res)
except HTTPError as err:
print(f"Error: {err}")
except Timeout as err:
print(f'Request timed out: {err}')
def print_results(res):
print(f'Result: {res.status_code}')
print(f'Data: {res.text}')
if __name__ == "__main__":
main() | en | 0.732524 | #url = "http://httpbin.org/status/404" | 3.17224 | 3 |
apps/layers_dataset/layer_param_pair.py | new-TonyWang/tvm | 0 | 6617966 | <gh_stars>0
import copy
from layers_param_data import global_table
from generate_param_value import *
"""
该文件主要实现了参数组合的枚举并过滤掉不匹配的参数名称,目前该文件的函数运行良好,不需要修改
"""
class LayerParampair(object):
class Permu:
except_param={#和输入有关的参数
'input_shape':get_input_shape,
'batch_size':get_value,
}
"""
permu_conf存的是(参数名称,参数配置)
"""
def init_serch_space(self,param_func):
"""
通过配置初始化枚举函数并返回(参数名称,枚举函数)
"""
funcs={}
for funcname in self.permu_conf.keys():
func_config=self.permu_conf.get(funcname)
if(param_func.get(funcname)!=None):
print(funcname)
funcs[funcname]=param_func[funcname](**func_config)
if(self.except_param.get(funcname)!=None):
funcs[funcname]=self.except_param[funcname](**func_config)
return funcs
def get_useful_conf(self,permu_conf:dict,params:dict):
"""
通过参数列表和except_param来丢掉配置中多余的配置
"""
final_keys={}
for param_name in params.keys():
if(permu_conf.get(param_name)!=None):
final_keys[param_name]=permu_conf[param_name]
for param_name in self.except_param.keys():
if(permu_conf.get(param_name)!=None):
final_keys[param_name]=permu_conf[param_name]
return final_keys
def get_first_permu(self,funcs={}):
final_param_dict={}
for p in funcs.keys():
retuen_value=funcs[p]()
final_param_dict[p]=retuen_value.rv
return final_param_dict
def next_permutation(self,previous_config):
return self.state.next_permutation(previous_config)
def __init__(self,permu_conf,params_func):
"""
从全局表中获取参数函数,并从配置中读取参数
"""
self.permu_conf=self.get_useful_conf(permu_conf,params_func)
self.params_func=self.init_serch_space(params_func)
self.state=LayerParampair.running_state(self,[x for x in self.params_func.keys()])#状态机
class absstate():
def __init__(self,env) -> None:
self.env=env
def next_permutation(self):
pass
class start_state(absstate):
def __init__(self, env) -> None:
super().__init__(env)
def next_permutation(self,previous_config=None):
final_param_dict={}
func=self.env.params_func
for p in func.keys():
retuen_value=func[p]()
final_param_dict[p]=retuen_value.rv
self.env.state=LayerParampair.running_state(self.env,[x for x in self.env.params_func.keys()])
return final_param_dict
class running_state(absstate):
def __init__(self, env,funclist=[]) -> None:
self.ptr=0
self.funclist=funclist
self.len=(len(self.funclist))
self.boollist=[False]*self.len
super().__init__(env)
def next_permutation(self,previous_config):
"""
类似基于回溯的树的深度优先遍历,不同的是每调用该函数一次就会得到一组新的参数,而不是一次获取所有参数
"""
func=self.env.params_func
while(self.ptr<self.len-1):
fname=self.funclist[self.ptr]
p=func[fname]()
#print("p={}".format(fname))
self.boollist[self.ptr]=p.isEnd
previous_config[fname]=p.rv
self.ptr=self.ptr+1
if(self.ptr==self.len-1):
fname=self.funclist[self.ptr]
p=func[fname]()
self.boollist[self.ptr]=p.isEnd
previous_config[fname]=p.rv
while(self.boollist[self.ptr] and self.ptr>=0):
#print("-------------------------------------------------------{}".format(self.ptr))
self.ptr=self.ptr-1
if(self.ptr==-1 and self.boollist[0]):
return previous_config,True
else:
return previous_config,False
def next_permutation(self,previous_config):
return self.permutor.next_permutation(previous_config)
def __init__(self,Lname,permu_conf=None):
super().__init__()
self.Lname = Lname
params=global_table[self.Lname]
if(params==None):
print("no layer {}, skip permu".format(self.Lname))
self.permutor = LayerParampair.Permu(permu_conf,params)
| import copy
from layers_param_data import global_table
from generate_param_value import *
"""
该文件主要实现了参数组合的枚举并过滤掉不匹配的参数名称,目前该文件的函数运行良好,不需要修改
"""
class LayerParampair(object):
class Permu:
except_param={#和输入有关的参数
'input_shape':get_input_shape,
'batch_size':get_value,
}
"""
permu_conf存的是(参数名称,参数配置)
"""
def init_serch_space(self,param_func):
"""
通过配置初始化枚举函数并返回(参数名称,枚举函数)
"""
funcs={}
for funcname in self.permu_conf.keys():
func_config=self.permu_conf.get(funcname)
if(param_func.get(funcname)!=None):
print(funcname)
funcs[funcname]=param_func[funcname](**func_config)
if(self.except_param.get(funcname)!=None):
funcs[funcname]=self.except_param[funcname](**func_config)
return funcs
def get_useful_conf(self,permu_conf:dict,params:dict):
"""
通过参数列表和except_param来丢掉配置中多余的配置
"""
final_keys={}
for param_name in params.keys():
if(permu_conf.get(param_name)!=None):
final_keys[param_name]=permu_conf[param_name]
for param_name in self.except_param.keys():
if(permu_conf.get(param_name)!=None):
final_keys[param_name]=permu_conf[param_name]
return final_keys
def get_first_permu(self,funcs={}):
final_param_dict={}
for p in funcs.keys():
retuen_value=funcs[p]()
final_param_dict[p]=retuen_value.rv
return final_param_dict
def next_permutation(self,previous_config):
return self.state.next_permutation(previous_config)
def __init__(self,permu_conf,params_func):
"""
从全局表中获取参数函数,并从配置中读取参数
"""
self.permu_conf=self.get_useful_conf(permu_conf,params_func)
self.params_func=self.init_serch_space(params_func)
self.state=LayerParampair.running_state(self,[x for x in self.params_func.keys()])#状态机
class absstate():
def __init__(self,env) -> None:
self.env=env
def next_permutation(self):
pass
class start_state(absstate):
def __init__(self, env) -> None:
super().__init__(env)
def next_permutation(self,previous_config=None):
final_param_dict={}
func=self.env.params_func
for p in func.keys():
retuen_value=func[p]()
final_param_dict[p]=retuen_value.rv
self.env.state=LayerParampair.running_state(self.env,[x for x in self.env.params_func.keys()])
return final_param_dict
class running_state(absstate):
def __init__(self, env,funclist=[]) -> None:
self.ptr=0
self.funclist=funclist
self.len=(len(self.funclist))
self.boollist=[False]*self.len
super().__init__(env)
def next_permutation(self,previous_config):
"""
类似基于回溯的树的深度优先遍历,不同的是每调用该函数一次就会得到一组新的参数,而不是一次获取所有参数
"""
func=self.env.params_func
while(self.ptr<self.len-1):
fname=self.funclist[self.ptr]
p=func[fname]()
#print("p={}".format(fname))
self.boollist[self.ptr]=p.isEnd
previous_config[fname]=p.rv
self.ptr=self.ptr+1
if(self.ptr==self.len-1):
fname=self.funclist[self.ptr]
p=func[fname]()
self.boollist[self.ptr]=p.isEnd
previous_config[fname]=p.rv
while(self.boollist[self.ptr] and self.ptr>=0):
#print("-------------------------------------------------------{}".format(self.ptr))
self.ptr=self.ptr-1
if(self.ptr==-1 and self.boollist[0]):
return previous_config,True
else:
return previous_config,False
def next_permutation(self,previous_config):
return self.permutor.next_permutation(previous_config)
def __init__(self,Lname,permu_conf=None):
super().__init__()
self.Lname = Lname
params=global_table[self.Lname]
if(params==None):
print("no layer {}, skip permu".format(self.Lname))
self.permutor = LayerParampair.Permu(permu_conf,params) | zh | 0.922364 | 该文件主要实现了参数组合的枚举并过滤掉不匹配的参数名称,目前该文件的函数运行良好,不需要修改 #和输入有关的参数 permu_conf存的是(参数名称,参数配置) 通过配置初始化枚举函数并返回(参数名称,枚举函数) 通过参数列表和except_param来丢掉配置中多余的配置 从全局表中获取参数函数,并从配置中读取参数 #状态机 类似基于回溯的树的深度优先遍历,不同的是每调用该函数一次就会得到一组新的参数,而不是一次获取所有参数 #print("p={}".format(fname)) #print("-------------------------------------------------------{}".format(self.ptr)) | 2.265712 | 2 |
utils_nlp/dataset/bbc_hindi.py | cali-li/nlp-recipes | 1 | 6617967 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""
Utility functions for downloading, extracting, and reading the
BBC Hindi News Corpus.
https://github.com/NirantK/hindi2vec/releases/tag/bbc-hindi-v0.1
"""
import os
import pandas as pd
import logging
import numpy as np
import tarfile
from tempfile import TemporaryDirectory
from utils_nlp.dataset.url_utils import maybe_download
from utils_nlp.models.transformers.common import MAX_SEQ_LEN
from utils_nlp.models.transformers.sequence_classification import Processor
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
URL = (
"https://github.com/NirantK/hindi2vec/releases/"
"download/bbc-hindi-v0.1/bbc-hindiv01.tar.gz"
)
def load_pandas_df(local_cache_path=TemporaryDirectory().name):
"""
Downloads and extracts the dataset files
Args:
local_cache_path (str, optional): The local file path to save the raw file.
Defaults to TemporaryDirectory().name.
Returns:
pd.DataFrame: pandas DataFrame containing the loaded dataset.
"""
zipped_file = URL.split("/")[-1]
maybe_download(URL, zipped_file, local_cache_path)
zipped_file_path = os.path.join(local_cache_path, zipped_file)
tar = tarfile.open(zipped_file_path, "r:gz")
tar.extractall(path=local_cache_path)
tar.close()
train_csv_file_path = os.path.join(local_cache_path, "hindi-train.csv")
test_csv_file_path = os.path.join(local_cache_path, "hindi-test.csv")
train_df = pd.read_csv(
train_csv_file_path,
sep="\t",
encoding='utf-8',
header=None
)
test_df = pd.read_csv(
test_csv_file_path,
sep="\t",
encoding='utf-8',
header=None
)
train_df = train_df.fillna("")
test_df = test_df.fillna("")
return (train_df, test_df)
def load_tc_dataset(
local_path=TemporaryDirectory().name,
test_fraction=0.25,
random_seed=None,
train_sample_ratio=1.0,
test_sample_ratio=1.0,
model_name="bert-base-uncased",
to_lower=True,
cache_dir=TemporaryDirectory().name,
max_len=MAX_SEQ_LEN,
batch_size=32,
num_gpus=None
):
"""
Load the multinli dataset and split into training and testing datasets.
The datasets are preprocessed and can be used to train a NER model or evaluate
on the testing dataset.
Args:
local_path (str, optional): The local file path to save the raw wikigold file.
Defautls to TemporaryDirectory().name.
test_fraction (float, optional): The fraction of testing dataset when splitting.
Defaults to 0.25.
random_seed (float, optional): Random seed used to shuffle the data.
Defaults to None.
train_sample_ratio (float, optional): The ratio that used to sub-sampling for training.
Defaults to 1.0.
test_sample_ratio (float, optional): The ratio that used to sub-sampling for testing.
Defaults to 1.0.
model_name (str, optional): The pretained model name.
Defaults to "bert-base-uncased".
to_lower (bool, optional): Lower case text input.
Defaults to True.
cache_dir (str, optional): The default folder for saving cache files.
Defaults to TemporaryDirectory().name.
max_len (int, optional): Maximum length of the list of tokens. Lists longer
than this are truncated and shorter ones are padded with "O"s.
Default value is BERT_MAX_LEN=512.
batch_size (int, optional): The batch size for training and testing.
Defaults to 32.
num_gpus (int, optional): The number of GPUs.
Defaults to None.
Returns:
tuple. The tuple contains four elements:
train_dataload (DataLoader): a PyTorch DataLoader instance for training.
test_dataload (DataLoader): a PyTorch DataLoader instance for testing.
label_encoder (LabelEncoder): a sklearn LabelEncoder instance. The label values
can be retrieved by calling the `inverse_transform` function.
test_labels (Series): a Pandas Series of testing label (in label ID format). If
the labels are in raw label values format, we will need to transform it to
label IDs by using the label_encoder.transform function.
"""
# download and load the original dataset
train, test = load_pandas_df(local_cache_path=local_path)
all_df = pd.concat([train, test], ignore_index=True)
all_df.columns = ["label", "text"]
text_col = "text"
label_col = "label"
# encode labels, use the "genre" column as the label column
label_encoder = LabelEncoder()
label_encoder.fit(all_df[label_col])
if test_fraction < 0 or test_fraction >= 1.0:
logging.warning("Invalid test fraction value: {}, changed to 0.25".format(test_fraction))
test_fraction = 0.25
train_df, test_df = train_test_split(
all_df,
train_size=(1.0 - test_fraction),
random_state=random_seed
)
if train_sample_ratio > 1.0:
train_sample_ratio = 1.0
logging.warning("Setting the training sample ratio to 1.0")
elif train_sample_ratio < 0:
logging.error("Invalid training sample ration: {}".format(train_sample_ratio))
raise ValueError("Invalid training sample ration: {}".format(train_sample_ratio))
if test_sample_ratio > 1.0:
test_sample_ratio = 1.0
logging.warning("Setting the testing sample ratio to 1.0")
elif test_sample_ratio < 0:
logging.error("Invalid testing sample ration: {}".format(test_sample_ratio))
raise ValueError("Invalid testing sample ration: {}".format(test_sample_ratio))
if train_sample_ratio < 1.0:
train_df = train_df.sample(frac=train_sample_ratio).reset_index(drop=True)
if test_sample_ratio < 1.0:
test_df = test_df.sample(frac=test_sample_ratio).reset_index(drop=True)
train_labels = label_encoder.transform(train_df[label_col])
train_df[label_col] = train_labels
test_labels = label_encoder.transform(test_df[label_col])
test_df[label_col] = test_labels
processor = Processor(
model_name=model_name,
to_lower=to_lower,
cache_dir=cache_dir
)
train_dataloader = processor.create_dataloader_from_df(
df=train_df,
text_col=text_col,
label_col=label_col,
max_len=max_len,
text2_col=None,
batch_size=batch_size,
num_gpus=num_gpus,
shuffle=True,
distributed=False
)
test_dataloader = processor.create_dataloader_from_df(
df=test_df,
text_col=text_col,
label_col=label_col,
max_len=max_len,
text2_col=None,
batch_size=batch_size,
num_gpus=num_gpus,
shuffle=False,
distributed=False
)
return (train_dataloader, test_dataloader, label_encoder, test_labels)
def get_label_values(label_encoder, label_ids):
"""
Get the label values from label IDs.
Args:
label_encoder (LabelEncoder): a fitted sklearn LabelEncoder instance
label_ids (Numpy array): a Numpy array of label IDs.
Returns:
Numpy array. A Numpy array of label values.
"""
return label_encoder.inverse_transform(label_ids)
| # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""
Utility functions for downloading, extracting, and reading the
BBC Hindi News Corpus.
https://github.com/NirantK/hindi2vec/releases/tag/bbc-hindi-v0.1
"""
import os
import pandas as pd
import logging
import numpy as np
import tarfile
from tempfile import TemporaryDirectory
from utils_nlp.dataset.url_utils import maybe_download
from utils_nlp.models.transformers.common import MAX_SEQ_LEN
from utils_nlp.models.transformers.sequence_classification import Processor
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
URL = (
"https://github.com/NirantK/hindi2vec/releases/"
"download/bbc-hindi-v0.1/bbc-hindiv01.tar.gz"
)
def load_pandas_df(local_cache_path=TemporaryDirectory().name):
"""
Downloads and extracts the dataset files
Args:
local_cache_path (str, optional): The local file path to save the raw file.
Defaults to TemporaryDirectory().name.
Returns:
pd.DataFrame: pandas DataFrame containing the loaded dataset.
"""
zipped_file = URL.split("/")[-1]
maybe_download(URL, zipped_file, local_cache_path)
zipped_file_path = os.path.join(local_cache_path, zipped_file)
tar = tarfile.open(zipped_file_path, "r:gz")
tar.extractall(path=local_cache_path)
tar.close()
train_csv_file_path = os.path.join(local_cache_path, "hindi-train.csv")
test_csv_file_path = os.path.join(local_cache_path, "hindi-test.csv")
train_df = pd.read_csv(
train_csv_file_path,
sep="\t",
encoding='utf-8',
header=None
)
test_df = pd.read_csv(
test_csv_file_path,
sep="\t",
encoding='utf-8',
header=None
)
train_df = train_df.fillna("")
test_df = test_df.fillna("")
return (train_df, test_df)
def load_tc_dataset(
local_path=TemporaryDirectory().name,
test_fraction=0.25,
random_seed=None,
train_sample_ratio=1.0,
test_sample_ratio=1.0,
model_name="bert-base-uncased",
to_lower=True,
cache_dir=TemporaryDirectory().name,
max_len=MAX_SEQ_LEN,
batch_size=32,
num_gpus=None
):
"""
Load the multinli dataset and split into training and testing datasets.
The datasets are preprocessed and can be used to train a NER model or evaluate
on the testing dataset.
Args:
local_path (str, optional): The local file path to save the raw wikigold file.
Defautls to TemporaryDirectory().name.
test_fraction (float, optional): The fraction of testing dataset when splitting.
Defaults to 0.25.
random_seed (float, optional): Random seed used to shuffle the data.
Defaults to None.
train_sample_ratio (float, optional): The ratio that used to sub-sampling for training.
Defaults to 1.0.
test_sample_ratio (float, optional): The ratio that used to sub-sampling for testing.
Defaults to 1.0.
model_name (str, optional): The pretained model name.
Defaults to "bert-base-uncased".
to_lower (bool, optional): Lower case text input.
Defaults to True.
cache_dir (str, optional): The default folder for saving cache files.
Defaults to TemporaryDirectory().name.
max_len (int, optional): Maximum length of the list of tokens. Lists longer
than this are truncated and shorter ones are padded with "O"s.
Default value is BERT_MAX_LEN=512.
batch_size (int, optional): The batch size for training and testing.
Defaults to 32.
num_gpus (int, optional): The number of GPUs.
Defaults to None.
Returns:
tuple. The tuple contains four elements:
train_dataload (DataLoader): a PyTorch DataLoader instance for training.
test_dataload (DataLoader): a PyTorch DataLoader instance for testing.
label_encoder (LabelEncoder): a sklearn LabelEncoder instance. The label values
can be retrieved by calling the `inverse_transform` function.
test_labels (Series): a Pandas Series of testing label (in label ID format). If
the labels are in raw label values format, we will need to transform it to
label IDs by using the label_encoder.transform function.
"""
# download and load the original dataset
train, test = load_pandas_df(local_cache_path=local_path)
all_df = pd.concat([train, test], ignore_index=True)
all_df.columns = ["label", "text"]
text_col = "text"
label_col = "label"
# encode labels, use the "genre" column as the label column
label_encoder = LabelEncoder()
label_encoder.fit(all_df[label_col])
if test_fraction < 0 or test_fraction >= 1.0:
logging.warning("Invalid test fraction value: {}, changed to 0.25".format(test_fraction))
test_fraction = 0.25
train_df, test_df = train_test_split(
all_df,
train_size=(1.0 - test_fraction),
random_state=random_seed
)
if train_sample_ratio > 1.0:
train_sample_ratio = 1.0
logging.warning("Setting the training sample ratio to 1.0")
elif train_sample_ratio < 0:
logging.error("Invalid training sample ration: {}".format(train_sample_ratio))
raise ValueError("Invalid training sample ration: {}".format(train_sample_ratio))
if test_sample_ratio > 1.0:
test_sample_ratio = 1.0
logging.warning("Setting the testing sample ratio to 1.0")
elif test_sample_ratio < 0:
logging.error("Invalid testing sample ration: {}".format(test_sample_ratio))
raise ValueError("Invalid testing sample ration: {}".format(test_sample_ratio))
if train_sample_ratio < 1.0:
train_df = train_df.sample(frac=train_sample_ratio).reset_index(drop=True)
if test_sample_ratio < 1.0:
test_df = test_df.sample(frac=test_sample_ratio).reset_index(drop=True)
train_labels = label_encoder.transform(train_df[label_col])
train_df[label_col] = train_labels
test_labels = label_encoder.transform(test_df[label_col])
test_df[label_col] = test_labels
processor = Processor(
model_name=model_name,
to_lower=to_lower,
cache_dir=cache_dir
)
train_dataloader = processor.create_dataloader_from_df(
df=train_df,
text_col=text_col,
label_col=label_col,
max_len=max_len,
text2_col=None,
batch_size=batch_size,
num_gpus=num_gpus,
shuffle=True,
distributed=False
)
test_dataloader = processor.create_dataloader_from_df(
df=test_df,
text_col=text_col,
label_col=label_col,
max_len=max_len,
text2_col=None,
batch_size=batch_size,
num_gpus=num_gpus,
shuffle=False,
distributed=False
)
return (train_dataloader, test_dataloader, label_encoder, test_labels)
def get_label_values(label_encoder, label_ids):
"""
Get the label values from label IDs.
Args:
label_encoder (LabelEncoder): a fitted sklearn LabelEncoder instance
label_ids (Numpy array): a Numpy array of label IDs.
Returns:
Numpy array. A Numpy array of label values.
"""
return label_encoder.inverse_transform(label_ids)
| en | 0.678774 | # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. Utility functions for downloading, extracting, and reading the BBC Hindi News Corpus. https://github.com/NirantK/hindi2vec/releases/tag/bbc-hindi-v0.1 Downloads and extracts the dataset files Args: local_cache_path (str, optional): The local file path to save the raw file. Defaults to TemporaryDirectory().name. Returns: pd.DataFrame: pandas DataFrame containing the loaded dataset. Load the multinli dataset and split into training and testing datasets. The datasets are preprocessed and can be used to train a NER model or evaluate on the testing dataset. Args: local_path (str, optional): The local file path to save the raw wikigold file. Defautls to TemporaryDirectory().name. test_fraction (float, optional): The fraction of testing dataset when splitting. Defaults to 0.25. random_seed (float, optional): Random seed used to shuffle the data. Defaults to None. train_sample_ratio (float, optional): The ratio that used to sub-sampling for training. Defaults to 1.0. test_sample_ratio (float, optional): The ratio that used to sub-sampling for testing. Defaults to 1.0. model_name (str, optional): The pretained model name. Defaults to "bert-base-uncased". to_lower (bool, optional): Lower case text input. Defaults to True. cache_dir (str, optional): The default folder for saving cache files. Defaults to TemporaryDirectory().name. max_len (int, optional): Maximum length of the list of tokens. Lists longer than this are truncated and shorter ones are padded with "O"s. Default value is BERT_MAX_LEN=512. batch_size (int, optional): The batch size for training and testing. Defaults to 32. num_gpus (int, optional): The number of GPUs. Defaults to None. Returns: tuple. The tuple contains four elements: train_dataload (DataLoader): a PyTorch DataLoader instance for training. test_dataload (DataLoader): a PyTorch DataLoader instance for testing. label_encoder (LabelEncoder): a sklearn LabelEncoder instance. The label values can be retrieved by calling the `inverse_transform` function. test_labels (Series): a Pandas Series of testing label (in label ID format). If the labels are in raw label values format, we will need to transform it to label IDs by using the label_encoder.transform function. # download and load the original dataset # encode labels, use the "genre" column as the label column Get the label values from label IDs. Args: label_encoder (LabelEncoder): a fitted sklearn LabelEncoder instance label_ids (Numpy array): a Numpy array of label IDs. Returns: Numpy array. A Numpy array of label values. | 3.038389 | 3 |
esmvalcore/cmor/_fixes/cmip5/fgoals_s2.py | markelg/ESMValCore | 26 | 6617968 | """Fixes for FGOALS-s2 model."""
import iris
from ..fix import Fix
class AllVars(Fix):
"""Fixes for all variables."""
def fix_metadata(self, cubes):
"""Fix metadata.
Fix wrong bounds of latitude coordinate at first and last index.
Parameters
----------
cubes : iris.cube.CubeList
Input cubes.
Returns
-------
iris.cube.CubeList
"""
for cube in cubes:
try:
lat_coord = cube.coord('latitude')
except iris.exceptions.CoordinateNotFoundError:
continue
if lat_coord.ndim != 1:
continue
if lat_coord.shape[0] < 3:
continue
lat_bounds = lat_coord.core_bounds().copy()
lat_diff = lat_bounds[1][1] - lat_bounds[1][0]
lat_bounds[0][0] = lat_bounds[0][1] - lat_diff
lat_bounds[-1][1] = lat_bounds[-1][0] + lat_diff
lat_coord.bounds = lat_bounds
return cubes
| """Fixes for FGOALS-s2 model."""
import iris
from ..fix import Fix
class AllVars(Fix):
"""Fixes for all variables."""
def fix_metadata(self, cubes):
"""Fix metadata.
Fix wrong bounds of latitude coordinate at first and last index.
Parameters
----------
cubes : iris.cube.CubeList
Input cubes.
Returns
-------
iris.cube.CubeList
"""
for cube in cubes:
try:
lat_coord = cube.coord('latitude')
except iris.exceptions.CoordinateNotFoundError:
continue
if lat_coord.ndim != 1:
continue
if lat_coord.shape[0] < 3:
continue
lat_bounds = lat_coord.core_bounds().copy()
lat_diff = lat_bounds[1][1] - lat_bounds[1][0]
lat_bounds[0][0] = lat_bounds[0][1] - lat_diff
lat_bounds[-1][1] = lat_bounds[-1][0] + lat_diff
lat_coord.bounds = lat_bounds
return cubes
| en | 0.239744 | Fixes for FGOALS-s2 model. Fixes for all variables. Fix metadata. Fix wrong bounds of latitude coordinate at first and last index. Parameters ---------- cubes : iris.cube.CubeList Input cubes. Returns ------- iris.cube.CubeList | 2.507697 | 3 |
Leetcode/1000-2000/1847. Closest Room/1847.py | Next-Gen-UI/Code-Dynamics | 0 | 6617969 | from sortedcontainers import SortedList
class Solution:
def closestRoom(self, rooms: List[List[int]], queries: List[List[int]]) -> List[int]:
ans = [0] * len(queries)
qs = [[*q, i] for i, q in enumerate(queries)]
roomIds = SortedList()
rooms.sort(key=lambda x: -x[1])
qs.sort(key=lambda x: -x[1])
def searchClosestRoomId(roomIds: SortedList, preferred: int):
if not roomIds:
return -1
candIds = []
i = roomIds.bisect_right(preferred)
if i > 0:
candIds.append(roomIds[i - 1])
if i < len(roomIds):
candIds.append(roomIds[i])
return min(candIds, key=lambda x: abs(x - preferred))
i = 0 # rooms' pointer
for preferred, minSize, index in qs:
while i < len(rooms) and rooms[i][1] >= minSize:
roomIds.add(rooms[i][0])
i += 1
ans[index] = searchClosestRoomId(roomIds, preferred)
return ans
| from sortedcontainers import SortedList
class Solution:
def closestRoom(self, rooms: List[List[int]], queries: List[List[int]]) -> List[int]:
ans = [0] * len(queries)
qs = [[*q, i] for i, q in enumerate(queries)]
roomIds = SortedList()
rooms.sort(key=lambda x: -x[1])
qs.sort(key=lambda x: -x[1])
def searchClosestRoomId(roomIds: SortedList, preferred: int):
if not roomIds:
return -1
candIds = []
i = roomIds.bisect_right(preferred)
if i > 0:
candIds.append(roomIds[i - 1])
if i < len(roomIds):
candIds.append(roomIds[i])
return min(candIds, key=lambda x: abs(x - preferred))
i = 0 # rooms' pointer
for preferred, minSize, index in qs:
while i < len(rooms) and rooms[i][1] >= minSize:
roomIds.add(rooms[i][0])
i += 1
ans[index] = searchClosestRoomId(roomIds, preferred)
return ans
| en | 0.939555 | # rooms' pointer | 3.114018 | 3 |
muller/inheritance/__init__.py | andreashirley/Lolipop | 6 | 6617970 | from .genotype_lineage import LineageWorkflow
| from .genotype_lineage import LineageWorkflow
| none | 1 | 1.025977 | 1 | |
src/recommendations/src/recommendations-service/experimentation/evidently_feature_resolver.py | ronaks4/retail-demo-store | 0 | 6617971 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import boto3
import os
import json
import logging
from typing import Dict,List
from expiring_dict import ExpiringDict
from experimentation.features import FEATURE_NAMES
from experimentation.experiment_evidently import EvidentlyExperiment
log = logging.getLogger(__name__)
evidently = boto3.client('evidently')
# Cache feature evals for 30 seconds to balance latency and timeliness of picking up experiments
eval_features_by_user_cache = ExpiringDict(30)
project_name = os.environ['EVIDENTLY_PROJECT_NAME']
class EvidentlyFeatureResolver:
"""
This class is used by ExperimentManager to determine if an Evidently experiment is active
for a feature as well as for mapping a correlation ID to an EvidentlyExperiment instance for logging outcomes.
"""
def evaluate_feature(self, user_id: str, feature: str) -> EvidentlyExperiment:
""" Evaluates a storefront feature for a user
An EvidentlyExperiment will be returned if there is an active Evidently experiment for the feature or
None if an experiment is not active.
"""
cache_key = user_id
evaluated = eval_features_by_user_cache.get(cache_key)
if evaluated is None:
evaluated = self._call_evidently_evaluate_features(user_id)
eval_features_by_user_cache[cache_key] = evaluated
log.debug('Eval feature results for user/feature %s/%s: %s', user_id, feature, evaluated)
else:
log.debug('Found cached eval feature result for user/feature %s/%s: %s', user_id, feature, evaluated)
experiment = None
feature_found = False
for eval_feature in evaluated:
if eval_feature.get('feature').split('/')[-1] == feature:
feature_found = True
log.debug('Found matching feature in evaluated with reason %s', eval_feature.get('reason'))
if eval_feature.get('reason') == 'EXPERIMENT_RULE_MATCH':
variation_config = json.loads(eval_feature['value']['stringValue'])
# Config convenience check allowing ARN to be expressed as 'arn' in Evidently feature.
if 'inference_arn' not in variation_config and 'arn' in variation_config:
variation_config['inference_arn'] = variation_config.pop('arn')
details = json.loads(eval_feature['details'])
experiment_config = {
'id': details['experiment'],
'name': details['experiment'],
'feature': feature,
'project': eval_feature['project'].split('/')[-1],
'status': 'ACTIVE',
'type': 'evidently',
'variations': [ variation_config ],
'variation_name': eval_feature['variation']
}
experiment = EvidentlyExperiment(**experiment_config)
break
if not feature_found:
log.warning('Feature "%s" not found in Evidently for project "%s"', feature, project_name)
return experiment
def create_from_correlation_id(self, correlation_id: str) -> EvidentlyExperiment:
""" Creates an EvidentlyExperiment given a correlation ID
A correlation ID is created by EvidentlyExperiment for each recommended item that is part of an
active experiment. This ID is used when logging outcomes/conversions to map back to an experiment.
"""
id_bits = correlation_id.split('~')
if id_bits[0] != 'evidently':
raise Exception('Correlation ID does not appear to belong to an Evidently experiment')
feature = id_bits[2]
experiment_config = {
'id': 'evidently',
'name': 'Evidently Experiment',
'feature': feature,
'status': 'ACTIVE',
'type': 'evidently',
'variations': [ ],
}
return EvidentlyExperiment(**experiment_config)
def _call_evidently_evaluate_features(self, user_id: str) -> List[Dict]:
requests = []
for feature in FEATURE_NAMES:
requests.append({
'entityId': user_id,
'feature': feature
})
response = evidently.batch_evaluate_feature(
project=project_name,
requests=requests
)
return response['results'] | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import boto3
import os
import json
import logging
from typing import Dict,List
from expiring_dict import ExpiringDict
from experimentation.features import FEATURE_NAMES
from experimentation.experiment_evidently import EvidentlyExperiment
log = logging.getLogger(__name__)
evidently = boto3.client('evidently')
# Cache feature evals for 30 seconds to balance latency and timeliness of picking up experiments
eval_features_by_user_cache = ExpiringDict(30)
project_name = os.environ['EVIDENTLY_PROJECT_NAME']
class EvidentlyFeatureResolver:
"""
This class is used by ExperimentManager to determine if an Evidently experiment is active
for a feature as well as for mapping a correlation ID to an EvidentlyExperiment instance for logging outcomes.
"""
def evaluate_feature(self, user_id: str, feature: str) -> EvidentlyExperiment:
""" Evaluates a storefront feature for a user
An EvidentlyExperiment will be returned if there is an active Evidently experiment for the feature or
None if an experiment is not active.
"""
cache_key = user_id
evaluated = eval_features_by_user_cache.get(cache_key)
if evaluated is None:
evaluated = self._call_evidently_evaluate_features(user_id)
eval_features_by_user_cache[cache_key] = evaluated
log.debug('Eval feature results for user/feature %s/%s: %s', user_id, feature, evaluated)
else:
log.debug('Found cached eval feature result for user/feature %s/%s: %s', user_id, feature, evaluated)
experiment = None
feature_found = False
for eval_feature in evaluated:
if eval_feature.get('feature').split('/')[-1] == feature:
feature_found = True
log.debug('Found matching feature in evaluated with reason %s', eval_feature.get('reason'))
if eval_feature.get('reason') == 'EXPERIMENT_RULE_MATCH':
variation_config = json.loads(eval_feature['value']['stringValue'])
# Config convenience check allowing ARN to be expressed as 'arn' in Evidently feature.
if 'inference_arn' not in variation_config and 'arn' in variation_config:
variation_config['inference_arn'] = variation_config.pop('arn')
details = json.loads(eval_feature['details'])
experiment_config = {
'id': details['experiment'],
'name': details['experiment'],
'feature': feature,
'project': eval_feature['project'].split('/')[-1],
'status': 'ACTIVE',
'type': 'evidently',
'variations': [ variation_config ],
'variation_name': eval_feature['variation']
}
experiment = EvidentlyExperiment(**experiment_config)
break
if not feature_found:
log.warning('Feature "%s" not found in Evidently for project "%s"', feature, project_name)
return experiment
def create_from_correlation_id(self, correlation_id: str) -> EvidentlyExperiment:
""" Creates an EvidentlyExperiment given a correlation ID
A correlation ID is created by EvidentlyExperiment for each recommended item that is part of an
active experiment. This ID is used when logging outcomes/conversions to map back to an experiment.
"""
id_bits = correlation_id.split('~')
if id_bits[0] != 'evidently':
raise Exception('Correlation ID does not appear to belong to an Evidently experiment')
feature = id_bits[2]
experiment_config = {
'id': 'evidently',
'name': 'Evidently Experiment',
'feature': feature,
'status': 'ACTIVE',
'type': 'evidently',
'variations': [ ],
}
return EvidentlyExperiment(**experiment_config)
def _call_evidently_evaluate_features(self, user_id: str) -> List[Dict]:
requests = []
for feature in FEATURE_NAMES:
requests.append({
'entityId': user_id,
'feature': feature
})
response = evidently.batch_evaluate_feature(
project=project_name,
requests=requests
)
return response['results'] | en | 0.893432 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 # Cache feature evals for 30 seconds to balance latency and timeliness of picking up experiments This class is used by ExperimentManager to determine if an Evidently experiment is active for a feature as well as for mapping a correlation ID to an EvidentlyExperiment instance for logging outcomes. Evaluates a storefront feature for a user An EvidentlyExperiment will be returned if there is an active Evidently experiment for the feature or None if an experiment is not active. # Config convenience check allowing ARN to be expressed as 'arn' in Evidently feature. Creates an EvidentlyExperiment given a correlation ID A correlation ID is created by EvidentlyExperiment for each recommended item that is part of an active experiment. This ID is used when logging outcomes/conversions to map back to an experiment. | 2.211317 | 2 |
notebooks/main.py | LHerdy/scientificProjectPizza | 0 | 6617972 | import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression
# Diametros = (cm)
Diametros = [[7], [10], [15], [30], [45]]
# Preço (R$)
Precos = [[8], [11], [16], [38.5], [52]]
# Visualização dos dados
plt.figure()
plt.xlabel('Diâmetro(cm)')
plt.ylabel('Preço (R$)')
plt.title('Diâmetro X Preço')
plt.plot(Diametros, Precos, 'k.')
plt.axis([0, 60, 0, 60])
plt.grid(True)
plt.show()
x = [[7], [10], [15], [30], [45]]
y = [[8], [11], [16], [38.5], [52]]
modelo = LinearRegression()
type(modelo)
modelo.fit(x, y)
# print("Uma pizza de 20 cm de diâmetro deve custar: R$ {.2f}".format(modelo.predict([20], [0])))
plt.scatter(x, y, color='black')
plt.plot(x, modelo.predict(x), color='blue', linewidth=3)
plt.xlabel('x')
plt.ylabel('y')
plt.xticks(())
plt.yticks(())
plt.show()
| import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression
# Diametros = (cm)
Diametros = [[7], [10], [15], [30], [45]]
# Preço (R$)
Precos = [[8], [11], [16], [38.5], [52]]
# Visualização dos dados
plt.figure()
plt.xlabel('Diâmetro(cm)')
plt.ylabel('Preço (R$)')
plt.title('Diâmetro X Preço')
plt.plot(Diametros, Precos, 'k.')
plt.axis([0, 60, 0, 60])
plt.grid(True)
plt.show()
x = [[7], [10], [15], [30], [45]]
y = [[8], [11], [16], [38.5], [52]]
modelo = LinearRegression()
type(modelo)
modelo.fit(x, y)
# print("Uma pizza de 20 cm de diâmetro deve custar: R$ {.2f}".format(modelo.predict([20], [0])))
plt.scatter(x, y, color='black')
plt.plot(x, modelo.predict(x), color='blue', linewidth=3)
plt.xlabel('x')
plt.ylabel('y')
plt.xticks(())
plt.yticks(())
plt.show()
| pt | 0.931344 | # Diametros = (cm) # Preço (R$) # Visualização dos dados # print("Uma pizza de 20 cm de diâmetro deve custar: R$ {.2f}".format(modelo.predict([20], [0]))) | 3.184598 | 3 |
akernel/akernel.py | jtpio/akernel | 43 | 6617973 | import typer
from .kernel import Kernel
from .kernelspec import write_kernelspec
cli = typer.Typer()
@cli.command()
def install(mode: str = typer.Argument(..., help="Mode of the kernel to install.")):
display_name = f"Python 3 (akernel-{mode})"
write_kernelspec("akernel", mode, display_name)
@cli.command()
def launch(
mode: str = typer.Argument(..., help="Mode of the kernel to launch."),
connection_file: str = typer.Option(..., "-f", help="Path to the connection file."),
):
Kernel(mode, connection_file)
if __name__ == "__main__":
cli()
| import typer
from .kernel import Kernel
from .kernelspec import write_kernelspec
cli = typer.Typer()
@cli.command()
def install(mode: str = typer.Argument(..., help="Mode of the kernel to install.")):
display_name = f"Python 3 (akernel-{mode})"
write_kernelspec("akernel", mode, display_name)
@cli.command()
def launch(
mode: str = typer.Argument(..., help="Mode of the kernel to launch."),
connection_file: str = typer.Option(..., "-f", help="Path to the connection file."),
):
Kernel(mode, connection_file)
if __name__ == "__main__":
cli()
| none | 1 | 2.405504 | 2 | |
run_train.py | Dhairya1510/pre_react_hover_net | 0 | 6617974 |
import cv2
cv2.setNumThreads(0)
import inspect
import logging
import os
import numpy as np
import torch
import yaml
from tensorboardX import SummaryWriter
from torch.nn import DataParallel
# TODO: switch to DistributedDataParallel
from torch.utils.data import DataLoader
from run_utils.engine import RunEngine
from run_utils.utils import (
check_manual_seed, colored,
convert_pytorch_checkpoint
)
# * must initialize augmentor per worker, else duplicated
# * rng generators may happen
def worker_init_fn(worker_id):
# ! to make the seed chain reproducible, must use the torch random, not numpy
# the torch rng from main thread will regenerate a base seed, which is then
# copied into the dataloader each time it created (i.e start of each epoch)
# then dataloader with this seed will spawn worker,
# now we reseed the worker
worker_info = torch.utils.data.get_worker_info()
# to make it more random, simply switch torch.randint to np.randint
worker_seed = torch.randint(0, 2**32, (1,))[0].cpu().item() + worker_id
# print('Loader Worker %d Uses RNG Seed: %d' % (worker_id, worker_seed))
# retrieve the dataset copied into this worker process
# then set the random seed for each augmentation
worker_info.dataset.setup_augmentor(worker_id, worker_seed)
return
class RunManager(object):
"""
Either used to view the dataset or
to initialise the main training loop.
"""
def __init__(self, **kwargs):
self.phase_idx = 0
for variable, value in kwargs.items():
self.__setattr__(variable, value)
return
####
def _get_datagen(self, batch_size, run_mode, subset_name, nr_procs=0):
nr_procs = nr_procs if not self.debug else 0
input_dataset = self.create_dataset(
run_mode=run_mode,
subset_name=subset_name,
setup_augmentor=nr_procs == 0)
logging.info(
f'Dataset {run_mode} - {subset_name} : {len(input_dataset)}')
dataloader = DataLoader(
input_dataset,
num_workers=nr_procs,
batch_size=batch_size,
shuffle=run_mode == 'train',
drop_last=run_mode == 'train',
worker_init_fn=worker_init_fn,
)
return dataloader
####
def _run_once(self, opt, run_engine_opt, log_dir, prev_log_dir=None):
"""
Simply run the defined run_step of the related method once
"""
check_manual_seed(self.seed)
log_info = {}
if self.logging:
# check_log_dir(log_dir)
# rm_n_mkdir(log_dir)
import joblib
tfwriter = SummaryWriter(log_dir=log_dir)
log_file = log_dir + "/stats.dat"
joblib.dump({}, log_file)
log_info = {
"log_file": log_file,
"tfwriter": tfwriter,
}
# ! create list of data loader
def create_loader_dict(run_mode, loader_name_list):
loader_dict = {}
for loader_name in loader_name_list:
loader_opt = opt['loader'][loader_name]
loader_dict[loader_name] = self._get_datagen(
loader_opt['batch_size'],
run_mode, loader_name,
nr_procs=loader_opt['nr_procs'])
return loader_dict
####
def get_last_chkpt_path(prev_phase_dir, net_name):
info = joblib.load(f'{prev_phase_dir}/stats.dat')
# ! prioritize epoch over step if both exist
epoch_list = [int(v) for v in info.keys()]
last_chkpts_path = (
f"{prev_phase_dir}/"
f"{net_name}_epoch={max(epoch_list):2d}.tar"
)
return last_chkpts_path
# TODO: adding way to load pretrained weight or resume the training
# parsing the network and optimizer information
net_run_info = {}
net_info_opt = opt['run_info']
for net_name, net_info in net_info_opt.items():
assert inspect.isclass(net_info['desc']) \
or inspect.isfunction(net_info['desc']), \
"`desc` must be a Class or Function which instantiate NEW objects !!!"
net_desc = net_info['desc']()
# TODO: customize print-out for each run ?
# summary_string(net_desc, (3, 270, 270), device='cpu')
pretrained_path = net_info["pretrained"]
if pretrained_path is not None:
if pretrained_path == -1:
# * depend on logging format so may be
# * broken if logging format has been changed
pretrained_path = get_last_chkpt_path(prev_log_dir, net_name)
net_state_dict = torch.load(pretrained_path)["desc"]
else:
chkpt_ext = os.path.basename(pretrained_path).split(".")[-1]
if chkpt_ext == "npz":
net_state_dict = dict(np.load(pretrained_path))
net_state_dict = {
k: torch.from_numpy(v)
for k, v in net_state_dict.items()
}
elif chkpt_ext == "tar": # ! assume same saving format we desire
net_state_dict = torch.load(pretrained_path)["desc"]
colored_word = colored(net_name, color="red", attrs=["bold"])
logging.info(
f"Model `{colored_word}` pretrained path: {pretrained_path}"
)
# load_state_dict returns (missing keys, unexpected keys)
net_state_dict = convert_pytorch_checkpoint(net_state_dict)
load_feedback = net_desc.load_state_dict(net_state_dict, strict=False)
# * uncomment for your convenience
logging.info(f"Missing Variables: {load_feedback[0]}")
logging.info(f"Detected Unknown Variables: {load_feedback[1]}")
# net_desc = torch.jit.script(net_desc)
net_desc = DataParallel(net_desc)
net_desc = net_desc.to('cuda')
# print(net_desc) # * dump network definition or not?
optimizer, optimizer_args = net_info['optimizer']
optimizer = optimizer(net_desc.parameters(), **optimizer_args)
# TODO: expand for external aug for scheduler
nr_iter = opt['nr_epochs']
scheduler = net_info['lr_scheduler'](optimizer, nr_iter)
net_run_info[net_name] = {
'desc': net_desc,
'optimizer': optimizer,
'lr_scheduler': scheduler,
# TODO: standardize API for external hooks
'extra_info': net_info['extra_info']
}
# parsing the running engine configuration
assert 'train' in run_engine_opt, \
'No engine for training detected in description file'
# initialize runner and attach callback afterward
# * all engine shared the same network info declaration
runner_dict = {}
for runner_name, runner_opt in run_engine_opt.items():
runner_loader_dict = create_loader_dict(
runner_name, runner_opt['loader'])
runner_dict[runner_name] = RunEngine(
loader_dict=runner_loader_dict,
engine_name=runner_name,
run_step=runner_opt['run_step'],
run_info=net_run_info,
log_info=log_info,
)
for runner_name, runner in runner_dict.items():
callback_info = run_engine_opt[runner_name]['callbacks']
for event, callback_list, in callback_info.items():
for callback in callback_list:
if callback.engine_trigger:
triggered_runner_name = (
callback.triggered_engine_name
)
callback.triggered_engine = (
runner_dict[triggered_runner_name]
)
runner.add_event_handler(event, callback)
# retrieve main runner
main_runner = runner_dict['train']
main_runner.separate_loader_output = False
main_runner.state.logging = self.logging
main_runner.state.log_dir = log_dir
# start the run loop
main_runner.run(opt['nr_epochs'])
logging.info('\n')
logging.info("#" * 16)
logging.info('\n')
return
####
def run(self):
"""
Define multi-stage run or cross-validation or whatever in here
"""
phase_list = self.model_config['phase_list']
engine_opt = self.model_config['run_engine']
prev_save_path = None
for phase_idx, phase_info in enumerate(phase_list):
if len(phase_list) == 1:
save_path = self.log_dir
else:
save_path = self.log_dir + '/%02d' % (phase_idx)
self._run_once(
phase_info, engine_opt, save_path,
prev_log_dir=prev_save_path)
prev_save_path = save_path
self.phase_idx += 1
|
import cv2
cv2.setNumThreads(0)
import inspect
import logging
import os
import numpy as np
import torch
import yaml
from tensorboardX import SummaryWriter
from torch.nn import DataParallel
# TODO: switch to DistributedDataParallel
from torch.utils.data import DataLoader
from run_utils.engine import RunEngine
from run_utils.utils import (
check_manual_seed, colored,
convert_pytorch_checkpoint
)
# * must initialize augmentor per worker, else duplicated
# * rng generators may happen
def worker_init_fn(worker_id):
# ! to make the seed chain reproducible, must use the torch random, not numpy
# the torch rng from main thread will regenerate a base seed, which is then
# copied into the dataloader each time it created (i.e start of each epoch)
# then dataloader with this seed will spawn worker,
# now we reseed the worker
worker_info = torch.utils.data.get_worker_info()
# to make it more random, simply switch torch.randint to np.randint
worker_seed = torch.randint(0, 2**32, (1,))[0].cpu().item() + worker_id
# print('Loader Worker %d Uses RNG Seed: %d' % (worker_id, worker_seed))
# retrieve the dataset copied into this worker process
# then set the random seed for each augmentation
worker_info.dataset.setup_augmentor(worker_id, worker_seed)
return
class RunManager(object):
"""
Either used to view the dataset or
to initialise the main training loop.
"""
def __init__(self, **kwargs):
self.phase_idx = 0
for variable, value in kwargs.items():
self.__setattr__(variable, value)
return
####
def _get_datagen(self, batch_size, run_mode, subset_name, nr_procs=0):
nr_procs = nr_procs if not self.debug else 0
input_dataset = self.create_dataset(
run_mode=run_mode,
subset_name=subset_name,
setup_augmentor=nr_procs == 0)
logging.info(
f'Dataset {run_mode} - {subset_name} : {len(input_dataset)}')
dataloader = DataLoader(
input_dataset,
num_workers=nr_procs,
batch_size=batch_size,
shuffle=run_mode == 'train',
drop_last=run_mode == 'train',
worker_init_fn=worker_init_fn,
)
return dataloader
####
def _run_once(self, opt, run_engine_opt, log_dir, prev_log_dir=None):
"""
Simply run the defined run_step of the related method once
"""
check_manual_seed(self.seed)
log_info = {}
if self.logging:
# check_log_dir(log_dir)
# rm_n_mkdir(log_dir)
import joblib
tfwriter = SummaryWriter(log_dir=log_dir)
log_file = log_dir + "/stats.dat"
joblib.dump({}, log_file)
log_info = {
"log_file": log_file,
"tfwriter": tfwriter,
}
# ! create list of data loader
def create_loader_dict(run_mode, loader_name_list):
loader_dict = {}
for loader_name in loader_name_list:
loader_opt = opt['loader'][loader_name]
loader_dict[loader_name] = self._get_datagen(
loader_opt['batch_size'],
run_mode, loader_name,
nr_procs=loader_opt['nr_procs'])
return loader_dict
####
def get_last_chkpt_path(prev_phase_dir, net_name):
info = joblib.load(f'{prev_phase_dir}/stats.dat')
# ! prioritize epoch over step if both exist
epoch_list = [int(v) for v in info.keys()]
last_chkpts_path = (
f"{prev_phase_dir}/"
f"{net_name}_epoch={max(epoch_list):2d}.tar"
)
return last_chkpts_path
# TODO: adding way to load pretrained weight or resume the training
# parsing the network and optimizer information
net_run_info = {}
net_info_opt = opt['run_info']
for net_name, net_info in net_info_opt.items():
assert inspect.isclass(net_info['desc']) \
or inspect.isfunction(net_info['desc']), \
"`desc` must be a Class or Function which instantiate NEW objects !!!"
net_desc = net_info['desc']()
# TODO: customize print-out for each run ?
# summary_string(net_desc, (3, 270, 270), device='cpu')
pretrained_path = net_info["pretrained"]
if pretrained_path is not None:
if pretrained_path == -1:
# * depend on logging format so may be
# * broken if logging format has been changed
pretrained_path = get_last_chkpt_path(prev_log_dir, net_name)
net_state_dict = torch.load(pretrained_path)["desc"]
else:
chkpt_ext = os.path.basename(pretrained_path).split(".")[-1]
if chkpt_ext == "npz":
net_state_dict = dict(np.load(pretrained_path))
net_state_dict = {
k: torch.from_numpy(v)
for k, v in net_state_dict.items()
}
elif chkpt_ext == "tar": # ! assume same saving format we desire
net_state_dict = torch.load(pretrained_path)["desc"]
colored_word = colored(net_name, color="red", attrs=["bold"])
logging.info(
f"Model `{colored_word}` pretrained path: {pretrained_path}"
)
# load_state_dict returns (missing keys, unexpected keys)
net_state_dict = convert_pytorch_checkpoint(net_state_dict)
load_feedback = net_desc.load_state_dict(net_state_dict, strict=False)
# * uncomment for your convenience
logging.info(f"Missing Variables: {load_feedback[0]}")
logging.info(f"Detected Unknown Variables: {load_feedback[1]}")
# net_desc = torch.jit.script(net_desc)
net_desc = DataParallel(net_desc)
net_desc = net_desc.to('cuda')
# print(net_desc) # * dump network definition or not?
optimizer, optimizer_args = net_info['optimizer']
optimizer = optimizer(net_desc.parameters(), **optimizer_args)
# TODO: expand for external aug for scheduler
nr_iter = opt['nr_epochs']
scheduler = net_info['lr_scheduler'](optimizer, nr_iter)
net_run_info[net_name] = {
'desc': net_desc,
'optimizer': optimizer,
'lr_scheduler': scheduler,
# TODO: standardize API for external hooks
'extra_info': net_info['extra_info']
}
# parsing the running engine configuration
assert 'train' in run_engine_opt, \
'No engine for training detected in description file'
# initialize runner and attach callback afterward
# * all engine shared the same network info declaration
runner_dict = {}
for runner_name, runner_opt in run_engine_opt.items():
runner_loader_dict = create_loader_dict(
runner_name, runner_opt['loader'])
runner_dict[runner_name] = RunEngine(
loader_dict=runner_loader_dict,
engine_name=runner_name,
run_step=runner_opt['run_step'],
run_info=net_run_info,
log_info=log_info,
)
for runner_name, runner in runner_dict.items():
callback_info = run_engine_opt[runner_name]['callbacks']
for event, callback_list, in callback_info.items():
for callback in callback_list:
if callback.engine_trigger:
triggered_runner_name = (
callback.triggered_engine_name
)
callback.triggered_engine = (
runner_dict[triggered_runner_name]
)
runner.add_event_handler(event, callback)
# retrieve main runner
main_runner = runner_dict['train']
main_runner.separate_loader_output = False
main_runner.state.logging = self.logging
main_runner.state.log_dir = log_dir
# start the run loop
main_runner.run(opt['nr_epochs'])
logging.info('\n')
logging.info("#" * 16)
logging.info('\n')
return
####
def run(self):
"""
Define multi-stage run or cross-validation or whatever in here
"""
phase_list = self.model_config['phase_list']
engine_opt = self.model_config['run_engine']
prev_save_path = None
for phase_idx, phase_info in enumerate(phase_list):
if len(phase_list) == 1:
save_path = self.log_dir
else:
save_path = self.log_dir + '/%02d' % (phase_idx)
self._run_once(
phase_info, engine_opt, save_path,
prev_log_dir=prev_save_path)
prev_save_path = save_path
self.phase_idx += 1
| en | 0.675884 | # TODO: switch to DistributedDataParallel # * must initialize augmentor per worker, else duplicated # * rng generators may happen # ! to make the seed chain reproducible, must use the torch random, not numpy # the torch rng from main thread will regenerate a base seed, which is then # copied into the dataloader each time it created (i.e start of each epoch) # then dataloader with this seed will spawn worker, # now we reseed the worker # to make it more random, simply switch torch.randint to np.randint # print('Loader Worker %d Uses RNG Seed: %d' % (worker_id, worker_seed)) # retrieve the dataset copied into this worker process # then set the random seed for each augmentation Either used to view the dataset or to initialise the main training loop. #### #### Simply run the defined run_step of the related method once # check_log_dir(log_dir) # rm_n_mkdir(log_dir) # ! create list of data loader #### # ! prioritize epoch over step if both exist # TODO: adding way to load pretrained weight or resume the training # parsing the network and optimizer information # TODO: customize print-out for each run ? # summary_string(net_desc, (3, 270, 270), device='cpu') # * depend on logging format so may be # * broken if logging format has been changed # ! assume same saving format we desire # load_state_dict returns (missing keys, unexpected keys) # * uncomment for your convenience # net_desc = torch.jit.script(net_desc) # print(net_desc) # * dump network definition or not? # TODO: expand for external aug for scheduler # TODO: standardize API for external hooks # parsing the running engine configuration # initialize runner and attach callback afterward # * all engine shared the same network info declaration # retrieve main runner # start the run loop #### Define multi-stage run or cross-validation or whatever in here | 2.667312 | 3 |
src/pylife/strength/helpers.py | alexander-maier/pylife | 57 | 6617975 | <reponame>alexander-maier/pylife
# Copyright (c) 2019-2021 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# https://github.com/boschresearch/pylife
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Small helper functions for fatigue analysis
"""
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
import numpy as np
def solidity_haibach(collective, k):
"""Compute solidity according to Haibach
Refer to:
Haibach - Betriebsfestigkeit - 3. Auflage (2005) - S.271
Parameters
----------
collective : np.ndarray
numpy array of shape (:, 2) where ":" depends on the number of classes
defined for the rainflow counting
1. column: class values in ascending order
2. column: accumulated number of cycles first entry is the total
number of cycles then in a descending manner till the number of
cycles of the highest stress class
k : float
slope of the S/N curve
Returns
-------
V : np.ndarray (1,)
Völligkeitswert (solidity)
"""
S = collective[:, 0]
# the accumulated number of cycles
N_acc = collective[:, 1]
# the number of cycles for each class
hi = np.zeros_like(N_acc)
# get the number of cycles for each class
for i in range(len(hi)):
if i == (len(hi) - 1):
# the last entry is the accumulation of only the last class
# so it is already the number of cycles of the highest class
hi[i] = N_acc[i]
else:
hi[i] = N_acc[i] - N_acc[i + 1]
# the selection of S is required so that the highest class
# with actual counts (hi > 0) is taken as reference for all stress values
xi = S / S[hi > 0].max()
V = np.sum((hi * (xi**k)) / hi.sum())
return V
def solidity_fkm(collective, k):
"""Compute solidity according to the FKM guideline (2012)
Refer to:
FKM-Richtlinie - 6. Auflage (2012) - S.58 - Gl. (2.4.55) + Gl. (2.4.55)
Parameters
----------
collective : np.ndarray
numpy array of shape (:, 2) where ":" depends on the number of classes
defined for the rainflow counting
1. column: class values in ascending order
2. column: accumulated number of cycles first entry is the total
number of cycles then in a descending manner till the number of
cycles of the highest stress class k : float slope of the S/N
curve
Returns
-------
V : np.ndarray
Völligkeitswert (solidity)
"""
V_haibach = solidity_haibach(collective, k)
V = V_haibach**(1./k)
return V
class StressRelations:
"""Namespace for simple relations of stress / amplitude / R-ratio
Refer to:
Haibach (2006), p. 21
"""
@staticmethod
def get_max_stress_from_amplitude(amplitude, R):
return 2 * amplitude / (1 - R)
@staticmethod
def get_mean_stress_from_amplitude(amplitude, R):
return amplitude * (1 + R) / (1 - R)
def irregularity_factor(rainflow_matrix, residuals=np.empty(0), decision_bin=None):
"""
Calculate the irregularity factor of a turning point sequence based on a rainflow matrix and its residuals.
Two sided irregularity factor:
..math::
I = N_{mean crossings} / N_{turning points}
Parameters
----------
rainflow_matrix: np.ndarray[int, int]
2D-rainflow matrix (must be square shaped)
residuals: np.ndarray[int], Optional
1D array of residuals to consider for accurate calculation. Consecutive duplicates are removed beforehand.
Residuals must be provided as bin numbers.
Hint: Transformation from physical to binned values possible via np.digitize.
decision_bin: int, Optional
Bin number that equals the mean (two-sided). If not provided the decision_bin is inferred by the matrix entries
as the mean value based on the turning points and will be broadcasted to int-type.
Todo
----
Future version may provide the one-sided irregularity factor as a second option. Formula would be:
One sided irregularity factor:
.. math::
I = N_{zero bin upwards crossing} / N_{peaks}
N_{zero bin upwards crossings} equals positive_mean_bin_crossing if `decision_bin` is set to the bin of physical 0.
Inferring exact amount of peaks from rainflow-matrix and residuals is left to be done.
"""
# Ensure input types
assert isinstance(rainflow_matrix, np.ndarray)
assert isinstance(residuals, np.ndarray)
if rainflow_matrix.shape[0] != rainflow_matrix.shape[1]:
raise ValueError("Rainflow matrix must be square shaped in order to calculate the irregularity factor.")
# Remove duplicates from residuals
diffs = np.diff(residuals)
if np.any(diffs == 0.0):
# Remove the duplicates
duplicates = np.concatenate([diffs == 0, [False]])
residuals = residuals[~duplicates]
# Infer decision bin as mean if necessary
if decision_bin is None:
row_sum = 0
col_sum = 0
total_counts = 0
for i in range(rainflow_matrix.shape[0]):
row = rainflow_matrix[i, :].sum()
col = rainflow_matrix[:, i].sum()
total_counts += row + col
row_sum += i * row
col_sum += i * col
total_counts += residuals.shape[0]
res_sum = residuals.sum()
decision_bin = int((row_sum + col_sum + res_sum) / total_counts)
else:
decision_bin = int(decision_bin)
# Calculate two sided irregularity factor
positive_mean_bin_crossing = rainflow_matrix[0:decision_bin, decision_bin:-1].sum()
negative_mean_bin_crossing = rainflow_matrix[decision_bin:-1, 0:decision_bin].sum()
total_mean_crossing = 2 * (positive_mean_bin_crossing + negative_mean_bin_crossing)
amount_of_turning_points = 2 * rainflow_matrix.sum()
amount_of_turning_points += residuals.shape[0]
for i in range(residuals.shape[0] - 1):
if (residuals[i] - decision_bin) * (residuals[i+1] - decision_bin) < 0:
total_mean_crossing += 1
return total_mean_crossing / amount_of_turning_points
| # Copyright (c) 2019-2021 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# https://github.com/boschresearch/pylife
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Small helper functions for fatigue analysis
"""
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
import numpy as np
def solidity_haibach(collective, k):
"""Compute solidity according to Haibach
Refer to:
Haibach - Betriebsfestigkeit - 3. Auflage (2005) - S.271
Parameters
----------
collective : np.ndarray
numpy array of shape (:, 2) where ":" depends on the number of classes
defined for the rainflow counting
1. column: class values in ascending order
2. column: accumulated number of cycles first entry is the total
number of cycles then in a descending manner till the number of
cycles of the highest stress class
k : float
slope of the S/N curve
Returns
-------
V : np.ndarray (1,)
Völligkeitswert (solidity)
"""
S = collective[:, 0]
# the accumulated number of cycles
N_acc = collective[:, 1]
# the number of cycles for each class
hi = np.zeros_like(N_acc)
# get the number of cycles for each class
for i in range(len(hi)):
if i == (len(hi) - 1):
# the last entry is the accumulation of only the last class
# so it is already the number of cycles of the highest class
hi[i] = N_acc[i]
else:
hi[i] = N_acc[i] - N_acc[i + 1]
# the selection of S is required so that the highest class
# with actual counts (hi > 0) is taken as reference for all stress values
xi = S / S[hi > 0].max()
V = np.sum((hi * (xi**k)) / hi.sum())
return V
def solidity_fkm(collective, k):
"""Compute solidity according to the FKM guideline (2012)
Refer to:
FKM-Richtlinie - 6. Auflage (2012) - S.58 - Gl. (2.4.55) + Gl. (2.4.55)
Parameters
----------
collective : np.ndarray
numpy array of shape (:, 2) where ":" depends on the number of classes
defined for the rainflow counting
1. column: class values in ascending order
2. column: accumulated number of cycles first entry is the total
number of cycles then in a descending manner till the number of
cycles of the highest stress class k : float slope of the S/N
curve
Returns
-------
V : np.ndarray
Völligkeitswert (solidity)
"""
V_haibach = solidity_haibach(collective, k)
V = V_haibach**(1./k)
return V
class StressRelations:
"""Namespace for simple relations of stress / amplitude / R-ratio
Refer to:
Haibach (2006), p. 21
"""
@staticmethod
def get_max_stress_from_amplitude(amplitude, R):
return 2 * amplitude / (1 - R)
@staticmethod
def get_mean_stress_from_amplitude(amplitude, R):
return amplitude * (1 + R) / (1 - R)
def irregularity_factor(rainflow_matrix, residuals=np.empty(0), decision_bin=None):
"""
Calculate the irregularity factor of a turning point sequence based on a rainflow matrix and its residuals.
Two sided irregularity factor:
..math::
I = N_{mean crossings} / N_{turning points}
Parameters
----------
rainflow_matrix: np.ndarray[int, int]
2D-rainflow matrix (must be square shaped)
residuals: np.ndarray[int], Optional
1D array of residuals to consider for accurate calculation. Consecutive duplicates are removed beforehand.
Residuals must be provided as bin numbers.
Hint: Transformation from physical to binned values possible via np.digitize.
decision_bin: int, Optional
Bin number that equals the mean (two-sided). If not provided the decision_bin is inferred by the matrix entries
as the mean value based on the turning points and will be broadcasted to int-type.
Todo
----
Future version may provide the one-sided irregularity factor as a second option. Formula would be:
One sided irregularity factor:
.. math::
I = N_{zero bin upwards crossing} / N_{peaks}
N_{zero bin upwards crossings} equals positive_mean_bin_crossing if `decision_bin` is set to the bin of physical 0.
Inferring exact amount of peaks from rainflow-matrix and residuals is left to be done.
"""
# Ensure input types
assert isinstance(rainflow_matrix, np.ndarray)
assert isinstance(residuals, np.ndarray)
if rainflow_matrix.shape[0] != rainflow_matrix.shape[1]:
raise ValueError("Rainflow matrix must be square shaped in order to calculate the irregularity factor.")
# Remove duplicates from residuals
diffs = np.diff(residuals)
if np.any(diffs == 0.0):
# Remove the duplicates
duplicates = np.concatenate([diffs == 0, [False]])
residuals = residuals[~duplicates]
# Infer decision bin as mean if necessary
if decision_bin is None:
row_sum = 0
col_sum = 0
total_counts = 0
for i in range(rainflow_matrix.shape[0]):
row = rainflow_matrix[i, :].sum()
col = rainflow_matrix[:, i].sum()
total_counts += row + col
row_sum += i * row
col_sum += i * col
total_counts += residuals.shape[0]
res_sum = residuals.sum()
decision_bin = int((row_sum + col_sum + res_sum) / total_counts)
else:
decision_bin = int(decision_bin)
# Calculate two sided irregularity factor
positive_mean_bin_crossing = rainflow_matrix[0:decision_bin, decision_bin:-1].sum()
negative_mean_bin_crossing = rainflow_matrix[decision_bin:-1, 0:decision_bin].sum()
total_mean_crossing = 2 * (positive_mean_bin_crossing + negative_mean_bin_crossing)
amount_of_turning_points = 2 * rainflow_matrix.sum()
amount_of_turning_points += residuals.shape[0]
for i in range(residuals.shape[0] - 1):
if (residuals[i] - decision_bin) * (residuals[i+1] - decision_bin) < 0:
total_mean_crossing += 1
return total_mean_crossing / amount_of_turning_points | en | 0.801182 | # Copyright (c) 2019-2021 - for information on the respective copyright owner # see the NOTICE file and/or the repository # https://github.com/boschresearch/pylife # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Small helper functions for fatigue analysis Compute solidity according to Haibach Refer to: Haibach - Betriebsfestigkeit - 3. Auflage (2005) - S.271 Parameters ---------- collective : np.ndarray numpy array of shape (:, 2) where ":" depends on the number of classes defined for the rainflow counting 1. column: class values in ascending order 2. column: accumulated number of cycles first entry is the total number of cycles then in a descending manner till the number of cycles of the highest stress class k : float slope of the S/N curve Returns ------- V : np.ndarray (1,) Völligkeitswert (solidity) # the accumulated number of cycles # the number of cycles for each class # get the number of cycles for each class # the last entry is the accumulation of only the last class # so it is already the number of cycles of the highest class # the selection of S is required so that the highest class # with actual counts (hi > 0) is taken as reference for all stress values Compute solidity according to the FKM guideline (2012) Refer to: FKM-Richtlinie - 6. Auflage (2012) - S.58 - Gl. (2.4.55) + Gl. (2.4.55) Parameters ---------- collective : np.ndarray numpy array of shape (:, 2) where ":" depends on the number of classes defined for the rainflow counting 1. column: class values in ascending order 2. column: accumulated number of cycles first entry is the total number of cycles then in a descending manner till the number of cycles of the highest stress class k : float slope of the S/N curve Returns ------- V : np.ndarray Völligkeitswert (solidity) Namespace for simple relations of stress / amplitude / R-ratio Refer to: Haibach (2006), p. 21 Calculate the irregularity factor of a turning point sequence based on a rainflow matrix and its residuals. Two sided irregularity factor: ..math:: I = N_{mean crossings} / N_{turning points} Parameters ---------- rainflow_matrix: np.ndarray[int, int] 2D-rainflow matrix (must be square shaped) residuals: np.ndarray[int], Optional 1D array of residuals to consider for accurate calculation. Consecutive duplicates are removed beforehand. Residuals must be provided as bin numbers. Hint: Transformation from physical to binned values possible via np.digitize. decision_bin: int, Optional Bin number that equals the mean (two-sided). If not provided the decision_bin is inferred by the matrix entries as the mean value based on the turning points and will be broadcasted to int-type. Todo ---- Future version may provide the one-sided irregularity factor as a second option. Formula would be: One sided irregularity factor: .. math:: I = N_{zero bin upwards crossing} / N_{peaks} N_{zero bin upwards crossings} equals positive_mean_bin_crossing if `decision_bin` is set to the bin of physical 0. Inferring exact amount of peaks from rainflow-matrix and residuals is left to be done. # Ensure input types # Remove duplicates from residuals # Remove the duplicates # Infer decision bin as mean if necessary # Calculate two sided irregularity factor | 2.215617 | 2 |
src/nr/util/fs/_path.py | NiklasRosenstein/python-nr.util | 0 | 6617976 |
from __future__ import annotations
import sys
from pathlib import Path
def is_relative_to(a: Path | str, b: Path | str) -> bool:
""" Returns `True` if path *a* is relative to path *b*. A backfill for #Path.is_relative_to() for Python versions
older than 3.9. """
if sys.version_info < (3, 9):
try:
Path(a).relative_to(b)
except ValueError:
return False
return True
else:
return Path(a).is_relative_to(b)
|
from __future__ import annotations
import sys
from pathlib import Path
def is_relative_to(a: Path | str, b: Path | str) -> bool:
""" Returns `True` if path *a* is relative to path *b*. A backfill for #Path.is_relative_to() for Python versions
older than 3.9. """
if sys.version_info < (3, 9):
try:
Path(a).relative_to(b)
except ValueError:
return False
return True
else:
return Path(a).is_relative_to(b)
| en | 0.60992 | Returns `True` if path *a* is relative to path *b*. A backfill for #Path.is_relative_to() for Python versions older than 3.9. | 3.080483 | 3 |
examples/without-django-rest-framework/without_django_rest_framework/urls.py | marquicus/django-knockout | 21 | 6617977 | <filename>examples/without-django-rest-framework/without_django_rest_framework/urls.py
from django.conf.urls import include, url
from django.contrib import admin
from app import views
urlpatterns = [
url(r'^$', views.Index.as_view(), name="index"),
url(r'^app/', include('app.urls', namespace='app', app_name='app')),
url(r'^admin/', include(admin.site.urls)),
]
| <filename>examples/without-django-rest-framework/without_django_rest_framework/urls.py
from django.conf.urls import include, url
from django.contrib import admin
from app import views
urlpatterns = [
url(r'^$', views.Index.as_view(), name="index"),
url(r'^app/', include('app.urls', namespace='app', app_name='app')),
url(r'^admin/', include(admin.site.urls)),
]
| none | 1 | 1.730848 | 2 | |
mongo/script/importCsvIntoMongo.py | zhangymPerson/DBMS-note | 0 | 6617978 | <filename>mongo/script/importCsvIntoMongo.py<gh_stars>0
# 导入csv进入mongo
# -*- coding: utf-8 -*-
# @Author:<EMAIL>
import csv
# 导包
from pymongo import MongoClient
# 创建连接MongoDB数据库函数
def connection():
# 1:连接本地MongoDB数据库服务
conn = MongoClient("localhost")
# 2:连接本地数据库(guazidata)。没有时会自动创建
db = conn.health_dev
# 3:创建集合
set1 = db.data
# 4:看情况是否选择清空(两种清空方式,第一种不行的情况下,选择第二种)
# 第一种直接remove
set1.remove(None)
# 第二种remove不好用的时候
# set1.delete_many({})
return set1
def insertToMongoDB():
# 打开文件guazi.csv
with open('C:/Users/Administrator/Desktop/course.csv', 'r', encoding='utf-8')as csvfile:
# 调用csv中的DictReader函数直接获取数据为字典形式
reader = csv.reader(csvfile)
# 创建一个counts计数一下 看自己一共添加了了多少条数据
counts = 0
for each in reader:
# 将数据中需要转换类型的数据转换类型。原本全是字符串(string)。
print(each[0])
counts += 1
print('成功添加了' + str(counts) + '条数据 ')
# 创建主函数
def main():
insertToMongoDB()
# 判断是不是调用的main函数。这样以后调用的时候就可以防止不会多次调用 或者函数调用错误
if __name__ == '__main__':
main()
| <filename>mongo/script/importCsvIntoMongo.py<gh_stars>0
# 导入csv进入mongo
# -*- coding: utf-8 -*-
# @Author:<EMAIL>
import csv
# 导包
from pymongo import MongoClient
# 创建连接MongoDB数据库函数
def connection():
# 1:连接本地MongoDB数据库服务
conn = MongoClient("localhost")
# 2:连接本地数据库(guazidata)。没有时会自动创建
db = conn.health_dev
# 3:创建集合
set1 = db.data
# 4:看情况是否选择清空(两种清空方式,第一种不行的情况下,选择第二种)
# 第一种直接remove
set1.remove(None)
# 第二种remove不好用的时候
# set1.delete_many({})
return set1
def insertToMongoDB():
# 打开文件guazi.csv
with open('C:/Users/Administrator/Desktop/course.csv', 'r', encoding='utf-8')as csvfile:
# 调用csv中的DictReader函数直接获取数据为字典形式
reader = csv.reader(csvfile)
# 创建一个counts计数一下 看自己一共添加了了多少条数据
counts = 0
for each in reader:
# 将数据中需要转换类型的数据转换类型。原本全是字符串(string)。
print(each[0])
counts += 1
print('成功添加了' + str(counts) + '条数据 ')
# 创建主函数
def main():
insertToMongoDB()
# 判断是不是调用的main函数。这样以后调用的时候就可以防止不会多次调用 或者函数调用错误
if __name__ == '__main__':
main()
| zh | 0.97897 | # 导入csv进入mongo # -*- coding: utf-8 -*- # @Author:<EMAIL> # 导包 # 创建连接MongoDB数据库函数 # 1:连接本地MongoDB数据库服务 # 2:连接本地数据库(guazidata)。没有时会自动创建 # 3:创建集合 # 4:看情况是否选择清空(两种清空方式,第一种不行的情况下,选择第二种) # 第一种直接remove # 第二种remove不好用的时候 # set1.delete_many({}) # 打开文件guazi.csv # 调用csv中的DictReader函数直接获取数据为字典形式 # 创建一个counts计数一下 看自己一共添加了了多少条数据 # 将数据中需要转换类型的数据转换类型。原本全是字符串(string)。 # 创建主函数 # 判断是不是调用的main函数。这样以后调用的时候就可以防止不会多次调用 或者函数调用错误 | 3.043435 | 3 |
compass/db/api/host.py | leah03/test-originrepo | 2 | 6617979 | # Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Host database operations."""
import functools
import logging
import netaddr
import re
from compass.db.api import database
from compass.db.api import metadata_holder as metadata_api
from compass.db.api import permission
from compass.db.api import user as user_api
from compass.db.api import utils
from compass.db import exception
from compass.db import models
from compass.utils import util
SUPPORTED_FIELDS = ['name', 'os_name', 'owner', 'mac']
SUPPORTED_MACHINE_HOST_FIELDS = [
'mac', 'tag', 'location', 'os_name', 'os_id'
]
SUPPORTED_NETOWORK_FIELDS = [
'interface', 'ip', 'is_mgmt', 'is_promiscuous'
]
RESP_FIELDS = [
'id', 'name', 'hostname', 'os_name', 'owner', 'mac',
'switch_ip', 'port', 'switches', 'os_installer', 'os_id', 'ip',
'reinstall_os', 'os_installed', 'tag', 'location', 'networks',
'created_at', 'updated_at'
]
RESP_CLUSTER_FIELDS = [
'id', 'name', 'os_name', 'reinstall_distributed_system',
'owner', 'adapter_name', 'flavor_name',
'distributed_system_installed', 'created_at', 'updated_at'
]
RESP_NETWORK_FIELDS = [
'id', 'ip', 'interface', 'netmask', 'is_mgmt', 'is_promiscuous',
'created_at', 'updated_at'
]
RESP_CONFIG_FIELDS = [
'os_config',
'config_setp',
'config_validated',
'networks',
'created_at',
'updated_at'
]
RESP_DEPLOYED_CONFIG_FIELDS = [
'deployed_os_config'
]
RESP_DEPLOY_FIELDS = [
'status', 'host'
]
UPDATED_FIELDS = ['name', 'reinstall_os']
UPDATED_CONFIG_FIELDS = [
'put_os_config'
]
PATCHED_CONFIG_FIELDS = [
'patched_os_config'
]
UPDATED_DEPLOYED_CONFIG_FIELDS = [
'deployed_os_config'
]
ADDED_NETWORK_FIELDS = [
'interface', 'ip', 'subnet_id'
]
OPTIONAL_ADDED_NETWORK_FIELDS = ['is_mgmt', 'is_promiscuous']
UPDATED_NETWORK_FIELDS = [
'interface', 'ip', 'subnet_id', 'subnet', 'is_mgmt',
'is_promiscuous'
]
IGNORE_FIELDS = [
'id', 'created_at', 'updated_at'
]
RESP_STATE_FIELDS = [
'id', 'state', 'percentage', 'message', 'severity', 'ready'
]
UPDATED_STATE_FIELDS = [
'state', 'percentage', 'message', 'severity'
]
UPDATED_STATE_INTERNAL_FIELDS = [
'ready'
]
RESP_LOG_FIELDS = [
'id', 'filename', 'position', 'partial_line', 'percentage',
'message', 'severity', 'line_matcher_name'
]
ADDED_LOG_FIELDS = [
'filename'
]
UPDATED_LOG_FIELDS = [
'position', 'partial_line', 'percentage',
'message', 'severity', 'line_matcher_name'
]
@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_HOSTS
)
@utils.wrap_to_dict(RESP_FIELDS)
def list_hosts(user=None, session=None, **filters):
"""List hosts."""
return utils.list_db_objects(
session, models.Host, **filters
)
@utils.supported_filters(
optional_support_keys=SUPPORTED_MACHINE_HOST_FIELDS)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_HOSTS
)
@utils.output_filters(
missing_ok=True,
tag=utils.general_filter_callback,
location=utils.general_filter_callback,
os_name=utils.general_filter_callback,
os_id=utils.general_filter_callback
)
@utils.wrap_to_dict(RESP_FIELDS)
def list_machines_or_hosts(user=None, session=None, **filters):
"""List machines or hosts if possible."""
machines = utils.list_db_objects(
session, models.Machine, **filters
)
machines_or_hosts = []
for machine in machines:
host = machine.host
if host:
machines_or_hosts.append(host)
else:
machines_or_hosts.append(machine)
return machines_or_hosts
def _get_host(host_id, session=None, **kwargs):
"""Get host by id."""
if isinstance(host_id, (int, long)):
return utils.get_db_object(
session, models.Host,
id=host_id, **kwargs
)
else:
raise exception.InvalidParameter(
'host id %s type is not int compatible' % host_id
)
def get_host_internal(host_id, session=None, **kwargs):
"""Helper function to get host.
Used by other files under db/api.
"""
return _get_host(host_id, session=session, **kwargs)
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_HOSTS
)
@utils.wrap_to_dict(RESP_FIELDS)
def get_host(
host_id, exception_when_missing=True,
user=None, session=None, **kwargs
):
"""get host info."""
return _get_host(
host_id,
exception_when_missing=exception_when_missing,
session=session
)
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_HOSTS
)
@utils.wrap_to_dict(RESP_FIELDS)
def get_machine_or_host(
host_id, exception_when_missing=True,
user=None, session=None, **kwargs
):
"""get machine or host if possible."""
from compass.db.api import machine as machine_api
machine = machine_api.get_machine_internal(
host_id,
exception_when_missing=exception_when_missing,
session=session
)
if machine.host:
return machine.host
else:
return machine
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_HOST_CLUSTERS
)
@utils.wrap_to_dict(RESP_CLUSTER_FIELDS)
def get_host_clusters(host_id, user=None, session=None, **kwargs):
"""get host clusters."""
host = _get_host(host_id, session=session)
return [clusterhost.cluster for clusterhost in host.clusterhosts]
def check_host_validated(host):
"""Check host is validated."""
if not host.config_validated:
raise exception.Forbidden(
'host %s is not validated' % host.name
)
def check_host_editable(
host, user=None,
check_in_installing=False
):
"""Check host is editable.
If we try to set reinstall_os or check the host is not in installing
state, we should set check_in_installing to True.
Otherwise we will check the host is not in installing or installed.
We also make sure the user is admin or the owner of the host to avoid
unauthorized user to update host attributes.
"""
if check_in_installing:
if host.state.state == 'INSTALLING':
raise exception.Forbidden(
'host %s is not editable '
'when state is in installing' % host.name
)
elif not host.reinstall_os:
raise exception.Forbidden(
'host %s is not editable '
'when not to be reinstalled' % host.name
)
if user and not user.is_admin and host.creator_id != user.id:
raise exception.Forbidden(
'host %s is not editable '
'when user is not admin or the owner of the host' % host.name
)
def is_host_editable(
host, user=None,
check_in_installing=False
):
"""Get if host is editable."""
try:
check_host_editable(
host, user=user,
check_in_installing=check_in_installing
)
return True
except exception.Forbidden:
return False
def validate_host(host):
"""Validate host.
Makesure hostname is not empty, there is only one mgmt network,
The mgmt network is not in promiscuous mode.
"""
if not host.hostname:
raise exception.Invalidparameter(
'host %s does not set hostname' % host.name
)
if not host.host_networks:
raise exception.InvalidParameter(
'host %s does not have any network' % host.name
)
mgmt_interface_set = False
for host_network in host.host_networks:
if host_network.is_mgmt:
if mgmt_interface_set:
raise exception.InvalidParameter(
'host %s multi interfaces set mgmt ' % host.name
)
if host_network.is_promiscuous:
raise exception.InvalidParameter(
'host %s interface %s is mgmt but promiscuous' % (
host.name, host_network.interface
)
)
mgmt_interface_set = True
if not mgmt_interface_set:
raise exception.InvalidParameter(
'host %s has no mgmt interface' % host.name
)
@utils.supported_filters(
optional_support_keys=UPDATED_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@utils.input_validates(name=utils.check_name)
@utils.wrap_to_dict(RESP_FIELDS)
def _update_host(host_id, session=None, user=None, **kwargs):
"""Update a host internal."""
host = _get_host(host_id, session=session)
if host.state.state == "SUCCESSFUL" and not host.reinstall_os:
logging.info("ignoring successful host: %s", host_id)
return {}
check_host_editable(
host, user=user,
check_in_installing=kwargs.get('reinstall_os', False)
)
return utils.update_db_object(session, host, **kwargs)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_UPDATE_HOST
)
def update_host(host_id, user=None, session=None, **kwargs):
"""Update a host."""
return _update_host(host_id, session=session, user=user, **kwargs)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_UPDATE_HOST
)
def update_hosts(data=[], user=None, session=None):
"""Update hosts."""
# TODO(xicheng): this batch function is not similar as others.
# try to make it similar output as others and batch update should
# tolerate partial failure.
hosts = []
for host_data in data:
hosts.append(_update_host(session=session, user=user, **host_data))
return hosts
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_DEL_HOST
)
@utils.wrap_to_dict(
RESP_FIELDS + ['status', 'host'],
host=RESP_FIELDS
)
def del_host(
host_id, force=False, from_database_only=False,
user=None, session=None, **kwargs
):
"""Delete a host.
If force, we delete the host anyway.
If from_database_only, we only delete the host record in databaes.
Otherwise we send to del host task to celery to delete the host
record in os installer and package installer, clean installation logs
and at last clean database record.
The backend will call this function again after it deletes the record
in os installer and package installer with from_database_only set.
"""
from compass.db.api import cluster as cluster_api
host = _get_host(host_id, session=session)
# force set host state to ERROR when we want to delete the
# host anyway even the host is in installing or already
# installed. It let the api know the deleting is in doing when backend
# is doing the real deleting. In future we may import a new state like
# INDELETE to indicate the deleting is processing.
# We need discuss about if we can delete a host when it is already
# installed by api.
if host.state.state != 'UNINITIALIZED' and force:
host.state.state = 'ERROR'
check_host_editable(
host, user=user,
check_in_installing=True
)
cluster_ids = []
for clusterhost in host.clusterhosts:
if clusterhost.state.state != 'UNINITIALIZED' and force:
clusterhost.state.state = 'ERROR'
# TODO(grace): here we check all clusters which use this host editable.
# Because in backend we do not have functions to delete host without
# reference its cluster. After deleting pure host supported in backend,
# we should change code here to is_cluster_editable.
# Here delete a host may fail even we set force flag.
cluster_api.check_cluster_editable(
clusterhost.cluster, user=user,
check_in_installing=True
)
cluster_ids.append(clusterhost.cluster_id)
# Delete host record directly if there is no need to delete it
# in backend or from_database_only is set.
if host.state.state == 'UNINITIALIZED' or from_database_only:
return utils.del_db_object(session, host)
else:
logging.info(
'send del host %s task to celery', host_id
)
from compass.tasks import client as celery_client
celery_client.celery.send_task(
'compass.tasks.delete_host',
(
user.email, host.id, cluster_ids
)
)
return {
'status': 'delete action sent',
'host': host,
}
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_HOST_CONFIG
)
@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
def get_host_config(host_id, user=None, session=None, **kwargs):
"""Get host config."""
return _get_host(host_id, session=session)
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_HOST_CONFIG
)
@utils.wrap_to_dict(RESP_DEPLOYED_CONFIG_FIELDS)
def get_host_deployed_config(host_id, user=None, session=None, **kwargs):
"""Get host deployed config."""
return _get_host(host_id, session=session)
# replace os_config to deployed_os_config in kwargs.
@utils.replace_filters(
os_config='deployed_os_config'
)
@utils.supported_filters(
UPDATED_DEPLOYED_CONFIG_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_ADD_HOST_CONFIG
)
@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
def update_host_deployed_config(host_id, user=None, session=None, **kwargs):
"""Update host deployed config."""
host = _get_host(host_id, session=session)
check_host_editable(host, user=user)
check_host_validated(host)
return utils.update_db_object(session, host, **kwargs)
def _host_os_config_validates(
config, host, session=None, user=None, **kwargs
):
"""Check host os config's validation."""
metadata_api.validate_os_config(
config, host.os_id
)
@utils.input_validates_with_args(
put_os_config=_host_os_config_validates
)
@utils.output_validates_with_args(
os_config=_host_os_config_validates
)
@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
def _update_host_config(host, session=None, user=None, **kwargs):
"""Update host config."""
check_host_editable(host, user=user)
return utils.update_db_object(session, host, **kwargs)
# replace os_config to put_os_config in kwargs.
# It tells db the os_config will be updated not patched.
@utils.replace_filters(
os_config='put_os_config'
)
@utils.supported_filters(
UPDATED_CONFIG_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_ADD_HOST_CONFIG
)
def update_host_config(host_id, user=None, session=None, **kwargs):
"""Update host config."""
host = _get_host(host_id, session=session)
return _update_host_config(
host, session=session, user=user, **kwargs
)
# replace os_config to patched_os_config in kwargs.
# It tells db os_config will be patched not be updated.
@utils.replace_filters(
os_config='patched_os_config'
)
@utils.supported_filters(
PATCHED_CONFIG_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_ADD_HOST_CONFIG
)
def patch_host_config(host_id, user=None, session=None, **kwargs):
"""Patch host config."""
host = _get_host(host_id, session=session)
return _update_host_config(
host, session=session, user=user, **kwargs
)
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_DEL_HOST_CONFIG
)
@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
def del_host_config(host_id, user=None, session=None):
"""delete a host config."""
host = _get_host(host_id, session=session)
check_host_editable(host, user=user)
return utils.update_db_object(
session, host, os_config={}, config_validated=False
)
@utils.supported_filters(
optional_support_keys=SUPPORTED_NETOWORK_FIELDS
)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_HOST_NETWORKS
)
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
def list_host_networks(host_id, user=None, session=None, **filters):
"""Get host networks for a host."""
host = _get_host(host_id, session=session)
return utils.list_db_objects(
session, models.HostNetwork,
host_id=host.id, **filters
)
@utils.supported_filters(
optional_support_keys=SUPPORTED_NETOWORK_FIELDS
)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_HOST_NETWORKS
)
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
def list_hostnetworks(user=None, session=None, **filters):
"""Get host networks."""
return utils.list_db_objects(
session, models.HostNetwork, **filters
)
def _get_hostnetwork(host_network_id, session=None, **kwargs):
"""Get hostnetwork by hostnetwork id."""
if isinstance(host_network_id, (int, long)):
return utils.get_db_object(
session, models.HostNetwork,
id=host_network_id, **kwargs
)
raise exception.InvalidParameter(
'host network id %s type is not int compatible' % host_network_id
)
def _get_host_network(host_id, host_network_id, session=None, **kwargs):
"""Get hostnetwork by host id and hostnetwork id."""
host = _get_host(host_id, session=session)
host_network = _get_hostnetwork(host_network_id, session=session, **kwargs)
if host_network.host_id != host.id:
raise exception.RecordNotExists(
'host %s does not own host network %s' % (
host.id, host_network.id
)
)
return host_network
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_HOST_NETWORKS
)
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
def get_host_network(
host_id, host_network_id,
user=None, session=None, **kwargs
):
"""Get host network."""
return _get_host_network(
host_id, host_network_id, session=session
)
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_HOST_NETWORKS
)
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
def get_hostnetwork(host_network_id, user=None, session=None, **kwargs):
"""Get host network."""
return _get_hostnetwork(host_network_id, session=session)
@utils.supported_filters(
ADDED_NETWORK_FIELDS,
optional_support_keys=OPTIONAL_ADDED_NETWORK_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@utils.input_validates(
ip=utils.check_ip
)
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
def _add_host_network(
host_id, exception_when_existing=True,
session=None, user=None, interface=None, ip=None, **kwargs
):
"""Add hostnetwork to a host."""
host = _get_host(host_id, session=session)
check_host_editable(host, user=user)
return utils.add_db_object(
session, models.HostNetwork,
exception_when_existing,
host.id, interface, ip=ip, **kwargs
)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_ADD_HOST_NETWORK
)
def add_host_network(
host_id, exception_when_existing=True,
interface=None, user=None, session=None, **kwargs
):
"""Create a hostnetwork to a host."""
return _add_host_network(
host_id,
exception_when_existing,
interface=interface, session=session, user=user, **kwargs
)
def _get_hostnetwork_by_ip(
ip, session=None, **kwargs
):
ip_int = long(netaddr.IPAddress(ip))
return utils.get_db_object(
session, models.HostNetwork,
ip_int=ip_int, **kwargs
)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_ADD_HOST_NETWORK
)
def add_host_networks(
exception_when_existing=False,
data=[], user=None, session=None
):
"""Create host networks."""
hosts = []
failed_hosts = []
for host_data in data:
host_id = host_data['host_id']
host = _get_host(host_id, session=session)
networks = host_data['networks']
host_networks = []
failed_host_networks = []
for network in networks:
host_network = _get_hostnetwork_by_ip(
network['ip'], session=session,
exception_when_missing=False
)
if (
host_network and not (
host_network.host_id == host.id and
host_network.interface == network['interface']
)
):
logging.error('ip %s exists in host network %s' % (
network['ip'], host_network.id
))
failed_host_networks.append(network)
else:
host_networks.append(_add_host_network(
host.id, exception_when_existing,
session=session, user=user, **network
))
if host_networks:
hosts.append({'host_id': host.id, 'networks': host_networks})
if failed_host_networks:
failed_hosts.append({
'host_id': host.id, 'networks': failed_host_networks
})
return {
'hosts': hosts,
'failed_hosts': failed_hosts
}
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
def _update_host_network(
host_network, session=None, user=None, **kwargs
):
"""Update host network."""
check_host_editable(host_network.host, user=user)
return utils.update_db_object(session, host_network, **kwargs)
@utils.supported_filters(
optional_support_keys=UPDATED_NETWORK_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@utils.input_validates(
ip=utils.check_ip
)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_ADD_HOST_NETWORK
)
def update_host_network(
host_id, host_network_id, user=None, session=None, **kwargs
):
"""Update a host network by host id and host network id."""
host = _get_host(
host_id, session=session
)
if host.state.state == "SUCCESSFUL" and not host.reinstall_os:
logging.info("ignoring updating request for successful hosts")
return {}
host_network = _get_host_network(
host_id, host_network_id, session=session
)
return _update_host_network(
host_network, session=session, user=user, **kwargs
)
@utils.supported_filters(
optional_support_keys=UPDATED_NETWORK_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@utils.input_validates(
ip=utils.check_ip
)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_ADD_HOST_NETWORK
)
def update_hostnetwork(host_network_id, user=None, session=None, **kwargs):
"""Update a host network by host network id."""
host_network = _get_hostnetwork(
host_network_id, session=session
)
return _update_host_network(
host_network, session=session, user=user, **kwargs
)
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_DEL_HOST_NETWORK
)
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
def del_host_network(
host_id, host_network_id, user=None,
session=None, **kwargs
):
"""Delete a host network by host id and host network id."""
host_network = _get_host_network(
host_id, host_network_id, session=session
)
check_host_editable(host_network.host, user=user)
return utils.del_db_object(session, host_network)
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_DEL_HOST_NETWORK
)
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
def del_hostnetwork(host_network_id, user=None, session=None, **kwargs):
"""Delete a host network by host network id."""
host_network = _get_hostnetwork(
host_network_id, session=session
)
check_host_editable(host_network.host, user=user)
return utils.del_db_object(session, host_network)
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_GET_HOST_STATE
)
@utils.wrap_to_dict(RESP_STATE_FIELDS)
def get_host_state(host_id, user=None, session=None, **kwargs):
"""Get host state info."""
return _get_host(host_id, session=session).state
@utils.supported_filters(
optional_support_keys=UPDATED_STATE_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_UPDATE_HOST_STATE
)
@utils.wrap_to_dict(RESP_STATE_FIELDS)
def update_host_state(host_id, user=None, session=None, **kwargs):
"""Update a host state."""
host = _get_host(host_id, session=session)
utils.update_db_object(session, host.state, **kwargs)
return host.state
@util.deprecated
@utils.supported_filters(
optional_support_keys=UPDATED_STATE_INTERNAL_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_UPDATE_HOST_STATE
)
@utils.wrap_to_dict(['status', 'host'])
def update_host_state_internal(
host_id, from_database_only=False,
user=None, session=None, **kwargs
):
"""Update a host state.
This function is called when host os is installed.
If from_database_only, the state is updated in database.
Otherwise a celery task sent to os installer and package installer
to do some future actions.
"""
# TODO(xicheng): should be merged into update_host_state
host = _get_host(host_id, session=session)
if 'ready' in kwargs and kwargs['ready'] and not host.state.ready:
ready_triggered = True
else:
ready_triggered = False
clusterhosts_ready = {}
clusters_os_ready = {}
if ready_triggered:
for clusterhost in host.clusterhosts:
cluster = clusterhost.cluster
if cluster.flavor_name:
clusterhosts_ready[cluster.id] = False
else:
clusterhosts_ready[cluster.id] = True
all_os_ready = True
for clusterhost_in_cluster in cluster.clusterhosts:
host_in_cluster = clusterhost_in_cluster.host
if host_in_cluster.id == host.id:
continue
if not host_in_cluster.state.ready:
all_os_ready = False
clusters_os_ready[cluster.id] = all_os_ready
logging.debug('host %s ready: %s', host_id, ready_triggered)
logging.debug("clusterhosts_ready is: %s", clusterhosts_ready)
logging.debug("clusters_os_ready is %s", clusters_os_ready)
if not ready_triggered or from_database_only:
logging.debug('%s state is set to %s', host.name, kwargs)
utils.update_db_object(session, host.state, **kwargs)
if not host.state.ready:
for clusterhost in host.clusterhosts:
utils.update_db_object(
session, clusterhost.state, ready=False
)
utils.update_db_object(
session, clusterhost.cluster.state, ready=False
)
status = '%s state is updated' % host.name
else:
from compass.tasks import client as celery_client
celery_client.celery.send_task(
'compass.tasks.os_installed',
(
host.id, clusterhosts_ready,
clusters_os_ready
)
)
status = '%s: clusterhosts ready %s clusters os ready %s' % (
host.name, clusterhosts_ready, clusters_os_ready
)
logging.info('action status: %s', status)
return {
'status': status,
'host': host.state
}
@utils.supported_filters([])
@database.run_in_session()
@utils.wrap_to_dict(RESP_LOG_FIELDS)
def get_host_log_histories(host_id, user=None, session=None, **kwargs):
"""Get host log history."""
host = _get_host(host_id, session=session)
return utils.list_db_objects(
session, models.HostLogHistory, id=host.id, **kwargs
)
def _get_host_log_history(host_id, filename, session=None, **kwargs):
host = _get_host(host_id, session=session)
return utils.get_db_object(
session, models.HostLogHistory, id=host.id,
filename=filename, **kwargs
)
@utils.supported_filters([])
@database.run_in_session()
@utils.wrap_to_dict(RESP_LOG_FIELDS)
def get_host_log_history(host_id, filename, user=None, session=None, **kwargs):
"""Get host log history."""
return _get_host_log_history(
host_id, filename, session=session
)
@utils.supported_filters(
optional_support_keys=UPDATED_LOG_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@database.run_in_session()
@utils.wrap_to_dict(RESP_LOG_FIELDS)
def update_host_log_history(
host_id, filename, user=None,
session=None, **kwargs
):
"""Update a host log history."""
host_log_history = _get_host_log_history(
host_id, filename, session=session
)
return utils.update_db_object(session, host_log_history, **kwargs)
@utils.supported_filters(
ADDED_LOG_FIELDS,
optional_support_keys=UPDATED_LOG_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@database.run_in_session()
@utils.wrap_to_dict(RESP_LOG_FIELDS)
def add_host_log_history(
host_id, exception_when_existing=False,
filename=None, user=None, session=None, **kwargs
):
"""add a host log history."""
host = _get_host(host_id, session=session)
return utils.add_db_object(
session, models.HostLogHistory, exception_when_existing,
host.id, filename, **kwargs
)
@utils.supported_filters(optional_support_keys=['poweron'])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_DEPLOY_HOST
)
@utils.wrap_to_dict(
RESP_DEPLOY_FIELDS,
host=RESP_CONFIG_FIELDS
)
def poweron_host(
host_id, poweron={}, user=None, session=None, **kwargs
):
"""power on host."""
from compass.tasks import client as celery_client
host = _get_host(host_id, session=session)
check_host_validated(host)
celery_client.celery.send_task(
'compass.tasks.poweron_host',
(host.id,)
)
return {
'status': 'poweron %s action sent' % host.name,
'host': host
}
@utils.supported_filters(optional_support_keys=['poweroff'])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_DEPLOY_HOST
)
@utils.wrap_to_dict(
RESP_DEPLOY_FIELDS,
host=RESP_CONFIG_FIELDS
)
def poweroff_host(
host_id, poweroff={}, user=None, session=None, **kwargs
):
"""power off host."""
from compass.tasks import client as celery_client
host = _get_host(host_id, session=session)
check_host_validated(host)
celery_client.celery.send_task(
'compass.tasks.poweroff_host',
(host.id,)
)
return {
'status': 'poweroff %s action sent' % host.name,
'host': host
}
@utils.supported_filters(optional_support_keys=['reset'])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_DEPLOY_HOST
)
@utils.wrap_to_dict(
RESP_DEPLOY_FIELDS,
host=RESP_CONFIG_FIELDS
)
def reset_host(
host_id, reset={}, user=None, session=None, **kwargs
):
"""reset host."""
from compass.tasks import client as celery_client
host = _get_host(host_id, session=session)
check_host_validated(host)
celery_client.celery.send_task(
'compass.tasks.reset_host',
(host.id,)
)
return {
'status': 'reset %s action sent' % host.name,
'host': host
}
| # Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Host database operations."""
import functools
import logging
import netaddr
import re
from compass.db.api import database
from compass.db.api import metadata_holder as metadata_api
from compass.db.api import permission
from compass.db.api import user as user_api
from compass.db.api import utils
from compass.db import exception
from compass.db import models
from compass.utils import util
SUPPORTED_FIELDS = ['name', 'os_name', 'owner', 'mac']
SUPPORTED_MACHINE_HOST_FIELDS = [
'mac', 'tag', 'location', 'os_name', 'os_id'
]
SUPPORTED_NETOWORK_FIELDS = [
'interface', 'ip', 'is_mgmt', 'is_promiscuous'
]
RESP_FIELDS = [
'id', 'name', 'hostname', 'os_name', 'owner', 'mac',
'switch_ip', 'port', 'switches', 'os_installer', 'os_id', 'ip',
'reinstall_os', 'os_installed', 'tag', 'location', 'networks',
'created_at', 'updated_at'
]
RESP_CLUSTER_FIELDS = [
'id', 'name', 'os_name', 'reinstall_distributed_system',
'owner', 'adapter_name', 'flavor_name',
'distributed_system_installed', 'created_at', 'updated_at'
]
RESP_NETWORK_FIELDS = [
'id', 'ip', 'interface', 'netmask', 'is_mgmt', 'is_promiscuous',
'created_at', 'updated_at'
]
RESP_CONFIG_FIELDS = [
'os_config',
'config_setp',
'config_validated',
'networks',
'created_at',
'updated_at'
]
RESP_DEPLOYED_CONFIG_FIELDS = [
'deployed_os_config'
]
RESP_DEPLOY_FIELDS = [
'status', 'host'
]
UPDATED_FIELDS = ['name', 'reinstall_os']
UPDATED_CONFIG_FIELDS = [
'put_os_config'
]
PATCHED_CONFIG_FIELDS = [
'patched_os_config'
]
UPDATED_DEPLOYED_CONFIG_FIELDS = [
'deployed_os_config'
]
ADDED_NETWORK_FIELDS = [
'interface', 'ip', 'subnet_id'
]
OPTIONAL_ADDED_NETWORK_FIELDS = ['is_mgmt', 'is_promiscuous']
UPDATED_NETWORK_FIELDS = [
'interface', 'ip', 'subnet_id', 'subnet', 'is_mgmt',
'is_promiscuous'
]
IGNORE_FIELDS = [
'id', 'created_at', 'updated_at'
]
RESP_STATE_FIELDS = [
'id', 'state', 'percentage', 'message', 'severity', 'ready'
]
UPDATED_STATE_FIELDS = [
'state', 'percentage', 'message', 'severity'
]
UPDATED_STATE_INTERNAL_FIELDS = [
'ready'
]
RESP_LOG_FIELDS = [
'id', 'filename', 'position', 'partial_line', 'percentage',
'message', 'severity', 'line_matcher_name'
]
ADDED_LOG_FIELDS = [
'filename'
]
UPDATED_LOG_FIELDS = [
'position', 'partial_line', 'percentage',
'message', 'severity', 'line_matcher_name'
]
@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_HOSTS
)
@utils.wrap_to_dict(RESP_FIELDS)
def list_hosts(user=None, session=None, **filters):
"""List hosts."""
return utils.list_db_objects(
session, models.Host, **filters
)
@utils.supported_filters(
optional_support_keys=SUPPORTED_MACHINE_HOST_FIELDS)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_HOSTS
)
@utils.output_filters(
missing_ok=True,
tag=utils.general_filter_callback,
location=utils.general_filter_callback,
os_name=utils.general_filter_callback,
os_id=utils.general_filter_callback
)
@utils.wrap_to_dict(RESP_FIELDS)
def list_machines_or_hosts(user=None, session=None, **filters):
"""List machines or hosts if possible."""
machines = utils.list_db_objects(
session, models.Machine, **filters
)
machines_or_hosts = []
for machine in machines:
host = machine.host
if host:
machines_or_hosts.append(host)
else:
machines_or_hosts.append(machine)
return machines_or_hosts
def _get_host(host_id, session=None, **kwargs):
"""Get host by id."""
if isinstance(host_id, (int, long)):
return utils.get_db_object(
session, models.Host,
id=host_id, **kwargs
)
else:
raise exception.InvalidParameter(
'host id %s type is not int compatible' % host_id
)
def get_host_internal(host_id, session=None, **kwargs):
"""Helper function to get host.
Used by other files under db/api.
"""
return _get_host(host_id, session=session, **kwargs)
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_HOSTS
)
@utils.wrap_to_dict(RESP_FIELDS)
def get_host(
host_id, exception_when_missing=True,
user=None, session=None, **kwargs
):
"""get host info."""
return _get_host(
host_id,
exception_when_missing=exception_when_missing,
session=session
)
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_HOSTS
)
@utils.wrap_to_dict(RESP_FIELDS)
def get_machine_or_host(
host_id, exception_when_missing=True,
user=None, session=None, **kwargs
):
"""get machine or host if possible."""
from compass.db.api import machine as machine_api
machine = machine_api.get_machine_internal(
host_id,
exception_when_missing=exception_when_missing,
session=session
)
if machine.host:
return machine.host
else:
return machine
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_HOST_CLUSTERS
)
@utils.wrap_to_dict(RESP_CLUSTER_FIELDS)
def get_host_clusters(host_id, user=None, session=None, **kwargs):
"""get host clusters."""
host = _get_host(host_id, session=session)
return [clusterhost.cluster for clusterhost in host.clusterhosts]
def check_host_validated(host):
"""Check host is validated."""
if not host.config_validated:
raise exception.Forbidden(
'host %s is not validated' % host.name
)
def check_host_editable(
host, user=None,
check_in_installing=False
):
"""Check host is editable.
If we try to set reinstall_os or check the host is not in installing
state, we should set check_in_installing to True.
Otherwise we will check the host is not in installing or installed.
We also make sure the user is admin or the owner of the host to avoid
unauthorized user to update host attributes.
"""
if check_in_installing:
if host.state.state == 'INSTALLING':
raise exception.Forbidden(
'host %s is not editable '
'when state is in installing' % host.name
)
elif not host.reinstall_os:
raise exception.Forbidden(
'host %s is not editable '
'when not to be reinstalled' % host.name
)
if user and not user.is_admin and host.creator_id != user.id:
raise exception.Forbidden(
'host %s is not editable '
'when user is not admin or the owner of the host' % host.name
)
def is_host_editable(
host, user=None,
check_in_installing=False
):
"""Get if host is editable."""
try:
check_host_editable(
host, user=user,
check_in_installing=check_in_installing
)
return True
except exception.Forbidden:
return False
def validate_host(host):
"""Validate host.
Makesure hostname is not empty, there is only one mgmt network,
The mgmt network is not in promiscuous mode.
"""
if not host.hostname:
raise exception.Invalidparameter(
'host %s does not set hostname' % host.name
)
if not host.host_networks:
raise exception.InvalidParameter(
'host %s does not have any network' % host.name
)
mgmt_interface_set = False
for host_network in host.host_networks:
if host_network.is_mgmt:
if mgmt_interface_set:
raise exception.InvalidParameter(
'host %s multi interfaces set mgmt ' % host.name
)
if host_network.is_promiscuous:
raise exception.InvalidParameter(
'host %s interface %s is mgmt but promiscuous' % (
host.name, host_network.interface
)
)
mgmt_interface_set = True
if not mgmt_interface_set:
raise exception.InvalidParameter(
'host %s has no mgmt interface' % host.name
)
@utils.supported_filters(
optional_support_keys=UPDATED_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@utils.input_validates(name=utils.check_name)
@utils.wrap_to_dict(RESP_FIELDS)
def _update_host(host_id, session=None, user=None, **kwargs):
"""Update a host internal."""
host = _get_host(host_id, session=session)
if host.state.state == "SUCCESSFUL" and not host.reinstall_os:
logging.info("ignoring successful host: %s", host_id)
return {}
check_host_editable(
host, user=user,
check_in_installing=kwargs.get('reinstall_os', False)
)
return utils.update_db_object(session, host, **kwargs)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_UPDATE_HOST
)
def update_host(host_id, user=None, session=None, **kwargs):
"""Update a host."""
return _update_host(host_id, session=session, user=user, **kwargs)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_UPDATE_HOST
)
def update_hosts(data=[], user=None, session=None):
"""Update hosts."""
# TODO(xicheng): this batch function is not similar as others.
# try to make it similar output as others and batch update should
# tolerate partial failure.
hosts = []
for host_data in data:
hosts.append(_update_host(session=session, user=user, **host_data))
return hosts
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_DEL_HOST
)
@utils.wrap_to_dict(
RESP_FIELDS + ['status', 'host'],
host=RESP_FIELDS
)
def del_host(
host_id, force=False, from_database_only=False,
user=None, session=None, **kwargs
):
"""Delete a host.
If force, we delete the host anyway.
If from_database_only, we only delete the host record in databaes.
Otherwise we send to del host task to celery to delete the host
record in os installer and package installer, clean installation logs
and at last clean database record.
The backend will call this function again after it deletes the record
in os installer and package installer with from_database_only set.
"""
from compass.db.api import cluster as cluster_api
host = _get_host(host_id, session=session)
# force set host state to ERROR when we want to delete the
# host anyway even the host is in installing or already
# installed. It let the api know the deleting is in doing when backend
# is doing the real deleting. In future we may import a new state like
# INDELETE to indicate the deleting is processing.
# We need discuss about if we can delete a host when it is already
# installed by api.
if host.state.state != 'UNINITIALIZED' and force:
host.state.state = 'ERROR'
check_host_editable(
host, user=user,
check_in_installing=True
)
cluster_ids = []
for clusterhost in host.clusterhosts:
if clusterhost.state.state != 'UNINITIALIZED' and force:
clusterhost.state.state = 'ERROR'
# TODO(grace): here we check all clusters which use this host editable.
# Because in backend we do not have functions to delete host without
# reference its cluster. After deleting pure host supported in backend,
# we should change code here to is_cluster_editable.
# Here delete a host may fail even we set force flag.
cluster_api.check_cluster_editable(
clusterhost.cluster, user=user,
check_in_installing=True
)
cluster_ids.append(clusterhost.cluster_id)
# Delete host record directly if there is no need to delete it
# in backend or from_database_only is set.
if host.state.state == 'UNINITIALIZED' or from_database_only:
return utils.del_db_object(session, host)
else:
logging.info(
'send del host %s task to celery', host_id
)
from compass.tasks import client as celery_client
celery_client.celery.send_task(
'compass.tasks.delete_host',
(
user.email, host.id, cluster_ids
)
)
return {
'status': 'delete action sent',
'host': host,
}
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_HOST_CONFIG
)
@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
def get_host_config(host_id, user=None, session=None, **kwargs):
"""Get host config."""
return _get_host(host_id, session=session)
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_HOST_CONFIG
)
@utils.wrap_to_dict(RESP_DEPLOYED_CONFIG_FIELDS)
def get_host_deployed_config(host_id, user=None, session=None, **kwargs):
"""Get host deployed config."""
return _get_host(host_id, session=session)
# replace os_config to deployed_os_config in kwargs.
@utils.replace_filters(
os_config='deployed_os_config'
)
@utils.supported_filters(
UPDATED_DEPLOYED_CONFIG_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_ADD_HOST_CONFIG
)
@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
def update_host_deployed_config(host_id, user=None, session=None, **kwargs):
"""Update host deployed config."""
host = _get_host(host_id, session=session)
check_host_editable(host, user=user)
check_host_validated(host)
return utils.update_db_object(session, host, **kwargs)
def _host_os_config_validates(
config, host, session=None, user=None, **kwargs
):
"""Check host os config's validation."""
metadata_api.validate_os_config(
config, host.os_id
)
@utils.input_validates_with_args(
put_os_config=_host_os_config_validates
)
@utils.output_validates_with_args(
os_config=_host_os_config_validates
)
@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
def _update_host_config(host, session=None, user=None, **kwargs):
"""Update host config."""
check_host_editable(host, user=user)
return utils.update_db_object(session, host, **kwargs)
# replace os_config to put_os_config in kwargs.
# It tells db the os_config will be updated not patched.
@utils.replace_filters(
os_config='put_os_config'
)
@utils.supported_filters(
UPDATED_CONFIG_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_ADD_HOST_CONFIG
)
def update_host_config(host_id, user=None, session=None, **kwargs):
"""Update host config."""
host = _get_host(host_id, session=session)
return _update_host_config(
host, session=session, user=user, **kwargs
)
# replace os_config to patched_os_config in kwargs.
# It tells db os_config will be patched not be updated.
@utils.replace_filters(
os_config='patched_os_config'
)
@utils.supported_filters(
PATCHED_CONFIG_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_ADD_HOST_CONFIG
)
def patch_host_config(host_id, user=None, session=None, **kwargs):
"""Patch host config."""
host = _get_host(host_id, session=session)
return _update_host_config(
host, session=session, user=user, **kwargs
)
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_DEL_HOST_CONFIG
)
@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
def del_host_config(host_id, user=None, session=None):
"""delete a host config."""
host = _get_host(host_id, session=session)
check_host_editable(host, user=user)
return utils.update_db_object(
session, host, os_config={}, config_validated=False
)
@utils.supported_filters(
optional_support_keys=SUPPORTED_NETOWORK_FIELDS
)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_HOST_NETWORKS
)
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
def list_host_networks(host_id, user=None, session=None, **filters):
"""Get host networks for a host."""
host = _get_host(host_id, session=session)
return utils.list_db_objects(
session, models.HostNetwork,
host_id=host.id, **filters
)
@utils.supported_filters(
optional_support_keys=SUPPORTED_NETOWORK_FIELDS
)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_HOST_NETWORKS
)
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
def list_hostnetworks(user=None, session=None, **filters):
"""Get host networks."""
return utils.list_db_objects(
session, models.HostNetwork, **filters
)
def _get_hostnetwork(host_network_id, session=None, **kwargs):
"""Get hostnetwork by hostnetwork id."""
if isinstance(host_network_id, (int, long)):
return utils.get_db_object(
session, models.HostNetwork,
id=host_network_id, **kwargs
)
raise exception.InvalidParameter(
'host network id %s type is not int compatible' % host_network_id
)
def _get_host_network(host_id, host_network_id, session=None, **kwargs):
"""Get hostnetwork by host id and hostnetwork id."""
host = _get_host(host_id, session=session)
host_network = _get_hostnetwork(host_network_id, session=session, **kwargs)
if host_network.host_id != host.id:
raise exception.RecordNotExists(
'host %s does not own host network %s' % (
host.id, host_network.id
)
)
return host_network
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_HOST_NETWORKS
)
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
def get_host_network(
host_id, host_network_id,
user=None, session=None, **kwargs
):
"""Get host network."""
return _get_host_network(
host_id, host_network_id, session=session
)
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_HOST_NETWORKS
)
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
def get_hostnetwork(host_network_id, user=None, session=None, **kwargs):
"""Get host network."""
return _get_hostnetwork(host_network_id, session=session)
@utils.supported_filters(
ADDED_NETWORK_FIELDS,
optional_support_keys=OPTIONAL_ADDED_NETWORK_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@utils.input_validates(
ip=utils.check_ip
)
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
def _add_host_network(
host_id, exception_when_existing=True,
session=None, user=None, interface=None, ip=None, **kwargs
):
"""Add hostnetwork to a host."""
host = _get_host(host_id, session=session)
check_host_editable(host, user=user)
return utils.add_db_object(
session, models.HostNetwork,
exception_when_existing,
host.id, interface, ip=ip, **kwargs
)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_ADD_HOST_NETWORK
)
def add_host_network(
host_id, exception_when_existing=True,
interface=None, user=None, session=None, **kwargs
):
"""Create a hostnetwork to a host."""
return _add_host_network(
host_id,
exception_when_existing,
interface=interface, session=session, user=user, **kwargs
)
def _get_hostnetwork_by_ip(
ip, session=None, **kwargs
):
ip_int = long(netaddr.IPAddress(ip))
return utils.get_db_object(
session, models.HostNetwork,
ip_int=ip_int, **kwargs
)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_ADD_HOST_NETWORK
)
def add_host_networks(
exception_when_existing=False,
data=[], user=None, session=None
):
"""Create host networks."""
hosts = []
failed_hosts = []
for host_data in data:
host_id = host_data['host_id']
host = _get_host(host_id, session=session)
networks = host_data['networks']
host_networks = []
failed_host_networks = []
for network in networks:
host_network = _get_hostnetwork_by_ip(
network['ip'], session=session,
exception_when_missing=False
)
if (
host_network and not (
host_network.host_id == host.id and
host_network.interface == network['interface']
)
):
logging.error('ip %s exists in host network %s' % (
network['ip'], host_network.id
))
failed_host_networks.append(network)
else:
host_networks.append(_add_host_network(
host.id, exception_when_existing,
session=session, user=user, **network
))
if host_networks:
hosts.append({'host_id': host.id, 'networks': host_networks})
if failed_host_networks:
failed_hosts.append({
'host_id': host.id, 'networks': failed_host_networks
})
return {
'hosts': hosts,
'failed_hosts': failed_hosts
}
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
def _update_host_network(
host_network, session=None, user=None, **kwargs
):
"""Update host network."""
check_host_editable(host_network.host, user=user)
return utils.update_db_object(session, host_network, **kwargs)
@utils.supported_filters(
optional_support_keys=UPDATED_NETWORK_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@utils.input_validates(
ip=utils.check_ip
)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_ADD_HOST_NETWORK
)
def update_host_network(
host_id, host_network_id, user=None, session=None, **kwargs
):
"""Update a host network by host id and host network id."""
host = _get_host(
host_id, session=session
)
if host.state.state == "SUCCESSFUL" and not host.reinstall_os:
logging.info("ignoring updating request for successful hosts")
return {}
host_network = _get_host_network(
host_id, host_network_id, session=session
)
return _update_host_network(
host_network, session=session, user=user, **kwargs
)
@utils.supported_filters(
optional_support_keys=UPDATED_NETWORK_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@utils.input_validates(
ip=utils.check_ip
)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_ADD_HOST_NETWORK
)
def update_hostnetwork(host_network_id, user=None, session=None, **kwargs):
"""Update a host network by host network id."""
host_network = _get_hostnetwork(
host_network_id, session=session
)
return _update_host_network(
host_network, session=session, user=user, **kwargs
)
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_DEL_HOST_NETWORK
)
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
def del_host_network(
host_id, host_network_id, user=None,
session=None, **kwargs
):
"""Delete a host network by host id and host network id."""
host_network = _get_host_network(
host_id, host_network_id, session=session
)
check_host_editable(host_network.host, user=user)
return utils.del_db_object(session, host_network)
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_DEL_HOST_NETWORK
)
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
def del_hostnetwork(host_network_id, user=None, session=None, **kwargs):
"""Delete a host network by host network id."""
host_network = _get_hostnetwork(
host_network_id, session=session
)
check_host_editable(host_network.host, user=user)
return utils.del_db_object(session, host_network)
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_GET_HOST_STATE
)
@utils.wrap_to_dict(RESP_STATE_FIELDS)
def get_host_state(host_id, user=None, session=None, **kwargs):
"""Get host state info."""
return _get_host(host_id, session=session).state
@utils.supported_filters(
optional_support_keys=UPDATED_STATE_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_UPDATE_HOST_STATE
)
@utils.wrap_to_dict(RESP_STATE_FIELDS)
def update_host_state(host_id, user=None, session=None, **kwargs):
"""Update a host state."""
host = _get_host(host_id, session=session)
utils.update_db_object(session, host.state, **kwargs)
return host.state
@util.deprecated
@utils.supported_filters(
optional_support_keys=UPDATED_STATE_INTERNAL_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_UPDATE_HOST_STATE
)
@utils.wrap_to_dict(['status', 'host'])
def update_host_state_internal(
host_id, from_database_only=False,
user=None, session=None, **kwargs
):
"""Update a host state.
This function is called when host os is installed.
If from_database_only, the state is updated in database.
Otherwise a celery task sent to os installer and package installer
to do some future actions.
"""
# TODO(xicheng): should be merged into update_host_state
host = _get_host(host_id, session=session)
if 'ready' in kwargs and kwargs['ready'] and not host.state.ready:
ready_triggered = True
else:
ready_triggered = False
clusterhosts_ready = {}
clusters_os_ready = {}
if ready_triggered:
for clusterhost in host.clusterhosts:
cluster = clusterhost.cluster
if cluster.flavor_name:
clusterhosts_ready[cluster.id] = False
else:
clusterhosts_ready[cluster.id] = True
all_os_ready = True
for clusterhost_in_cluster in cluster.clusterhosts:
host_in_cluster = clusterhost_in_cluster.host
if host_in_cluster.id == host.id:
continue
if not host_in_cluster.state.ready:
all_os_ready = False
clusters_os_ready[cluster.id] = all_os_ready
logging.debug('host %s ready: %s', host_id, ready_triggered)
logging.debug("clusterhosts_ready is: %s", clusterhosts_ready)
logging.debug("clusters_os_ready is %s", clusters_os_ready)
if not ready_triggered or from_database_only:
logging.debug('%s state is set to %s', host.name, kwargs)
utils.update_db_object(session, host.state, **kwargs)
if not host.state.ready:
for clusterhost in host.clusterhosts:
utils.update_db_object(
session, clusterhost.state, ready=False
)
utils.update_db_object(
session, clusterhost.cluster.state, ready=False
)
status = '%s state is updated' % host.name
else:
from compass.tasks import client as celery_client
celery_client.celery.send_task(
'compass.tasks.os_installed',
(
host.id, clusterhosts_ready,
clusters_os_ready
)
)
status = '%s: clusterhosts ready %s clusters os ready %s' % (
host.name, clusterhosts_ready, clusters_os_ready
)
logging.info('action status: %s', status)
return {
'status': status,
'host': host.state
}
@utils.supported_filters([])
@database.run_in_session()
@utils.wrap_to_dict(RESP_LOG_FIELDS)
def get_host_log_histories(host_id, user=None, session=None, **kwargs):
"""Get host log history."""
host = _get_host(host_id, session=session)
return utils.list_db_objects(
session, models.HostLogHistory, id=host.id, **kwargs
)
def _get_host_log_history(host_id, filename, session=None, **kwargs):
host = _get_host(host_id, session=session)
return utils.get_db_object(
session, models.HostLogHistory, id=host.id,
filename=filename, **kwargs
)
@utils.supported_filters([])
@database.run_in_session()
@utils.wrap_to_dict(RESP_LOG_FIELDS)
def get_host_log_history(host_id, filename, user=None, session=None, **kwargs):
"""Get host log history."""
return _get_host_log_history(
host_id, filename, session=session
)
@utils.supported_filters(
optional_support_keys=UPDATED_LOG_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@database.run_in_session()
@utils.wrap_to_dict(RESP_LOG_FIELDS)
def update_host_log_history(
host_id, filename, user=None,
session=None, **kwargs
):
"""Update a host log history."""
host_log_history = _get_host_log_history(
host_id, filename, session=session
)
return utils.update_db_object(session, host_log_history, **kwargs)
@utils.supported_filters(
ADDED_LOG_FIELDS,
optional_support_keys=UPDATED_LOG_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@database.run_in_session()
@utils.wrap_to_dict(RESP_LOG_FIELDS)
def add_host_log_history(
host_id, exception_when_existing=False,
filename=None, user=None, session=None, **kwargs
):
"""add a host log history."""
host = _get_host(host_id, session=session)
return utils.add_db_object(
session, models.HostLogHistory, exception_when_existing,
host.id, filename, **kwargs
)
@utils.supported_filters(optional_support_keys=['poweron'])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_DEPLOY_HOST
)
@utils.wrap_to_dict(
RESP_DEPLOY_FIELDS,
host=RESP_CONFIG_FIELDS
)
def poweron_host(
host_id, poweron={}, user=None, session=None, **kwargs
):
"""power on host."""
from compass.tasks import client as celery_client
host = _get_host(host_id, session=session)
check_host_validated(host)
celery_client.celery.send_task(
'compass.tasks.poweron_host',
(host.id,)
)
return {
'status': 'poweron %s action sent' % host.name,
'host': host
}
@utils.supported_filters(optional_support_keys=['poweroff'])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_DEPLOY_HOST
)
@utils.wrap_to_dict(
RESP_DEPLOY_FIELDS,
host=RESP_CONFIG_FIELDS
)
def poweroff_host(
host_id, poweroff={}, user=None, session=None, **kwargs
):
"""power off host."""
from compass.tasks import client as celery_client
host = _get_host(host_id, session=session)
check_host_validated(host)
celery_client.celery.send_task(
'compass.tasks.poweroff_host',
(host.id,)
)
return {
'status': 'poweroff %s action sent' % host.name,
'host': host
}
@utils.supported_filters(optional_support_keys=['reset'])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_DEPLOY_HOST
)
@utils.wrap_to_dict(
RESP_DEPLOY_FIELDS,
host=RESP_CONFIG_FIELDS
)
def reset_host(
host_id, reset={}, user=None, session=None, **kwargs
):
"""reset host."""
from compass.tasks import client as celery_client
host = _get_host(host_id, session=session)
check_host_validated(host)
celery_client.celery.send_task(
'compass.tasks.reset_host',
(host.id,)
)
return {
'status': 'reset %s action sent' % host.name,
'host': host
}
| en | 0.891232 | # Copyright 2014 Huawei Technologies Co. Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Host database operations. List hosts. List machines or hosts if possible. Get host by id. Helper function to get host. Used by other files under db/api. get host info. get machine or host if possible. get host clusters. Check host is validated. Check host is editable. If we try to set reinstall_os or check the host is not in installing state, we should set check_in_installing to True. Otherwise we will check the host is not in installing or installed. We also make sure the user is admin or the owner of the host to avoid unauthorized user to update host attributes. Get if host is editable. Validate host. Makesure hostname is not empty, there is only one mgmt network, The mgmt network is not in promiscuous mode. Update a host internal. Update a host. Update hosts. # TODO(xicheng): this batch function is not similar as others. # try to make it similar output as others and batch update should # tolerate partial failure. Delete a host. If force, we delete the host anyway. If from_database_only, we only delete the host record in databaes. Otherwise we send to del host task to celery to delete the host record in os installer and package installer, clean installation logs and at last clean database record. The backend will call this function again after it deletes the record in os installer and package installer with from_database_only set. # force set host state to ERROR when we want to delete the # host anyway even the host is in installing or already # installed. It let the api know the deleting is in doing when backend # is doing the real deleting. In future we may import a new state like # INDELETE to indicate the deleting is processing. # We need discuss about if we can delete a host when it is already # installed by api. # TODO(grace): here we check all clusters which use this host editable. # Because in backend we do not have functions to delete host without # reference its cluster. After deleting pure host supported in backend, # we should change code here to is_cluster_editable. # Here delete a host may fail even we set force flag. # Delete host record directly if there is no need to delete it # in backend or from_database_only is set. Get host config. Get host deployed config. # replace os_config to deployed_os_config in kwargs. Update host deployed config. Check host os config's validation. Update host config. # replace os_config to put_os_config in kwargs. # It tells db the os_config will be updated not patched. Update host config. # replace os_config to patched_os_config in kwargs. # It tells db os_config will be patched not be updated. Patch host config. delete a host config. Get host networks for a host. Get host networks. Get hostnetwork by hostnetwork id. Get hostnetwork by host id and hostnetwork id. Get host network. Get host network. Add hostnetwork to a host. Create a hostnetwork to a host. Create host networks. Update host network. Update a host network by host id and host network id. Update a host network by host network id. Delete a host network by host id and host network id. Delete a host network by host network id. Get host state info. Update a host state. Update a host state. This function is called when host os is installed. If from_database_only, the state is updated in database. Otherwise a celery task sent to os installer and package installer to do some future actions. # TODO(xicheng): should be merged into update_host_state Get host log history. Get host log history. Update a host log history. add a host log history. power on host. power off host. reset host. | 1.474991 | 1 |
tasks/run_monthly_tasks.py | GilHoggarth/ukwa-manage | 1 | 6617980 | <filename>tasks/run_monthly_tasks.py
#!/usr/bin/env python
# encoding: utf-8
"""
This module summarises the tasks that are to be run monthly.
"""
import luigi
from tasks.ingest.external_data_sources import NominetDomainListToHDFS
class MonthlyIngestTasks(luigi.WrapperTask):
"""
Monthly ingest tasks.
"""
def requires(self):
return [NominetDomainListToHDFS()]
if __name__ == '__main__':
# Running from Python, but using the Luigi scheduler:
luigi.run(['MonthlyIngestTasks'])
| <filename>tasks/run_monthly_tasks.py
#!/usr/bin/env python
# encoding: utf-8
"""
This module summarises the tasks that are to be run monthly.
"""
import luigi
from tasks.ingest.external_data_sources import NominetDomainListToHDFS
class MonthlyIngestTasks(luigi.WrapperTask):
"""
Monthly ingest tasks.
"""
def requires(self):
return [NominetDomainListToHDFS()]
if __name__ == '__main__':
# Running from Python, but using the Luigi scheduler:
luigi.run(['MonthlyIngestTasks'])
| en | 0.721399 | #!/usr/bin/env python # encoding: utf-8 This module summarises the tasks that are to be run monthly. Monthly ingest tasks. # Running from Python, but using the Luigi scheduler: | 2.182918 | 2 |
list01/ex14.py | FlavioBrusamolin/py-scripts | 0 | 6617981 | weight = float(input('Enter your weight: '))
height = float(input('Enter your height: '))
imc = weight / (height * height)
if imc < 18.5:
print('Under weight')
elif imc < 25:
print('Ideal weight')
elif imc < 30:
print('Overweight')
elif imc < 40:
print('Obesity')
else:
print('Morbid obesity')
| weight = float(input('Enter your weight: '))
height = float(input('Enter your height: '))
imc = weight / (height * height)
if imc < 18.5:
print('Under weight')
elif imc < 25:
print('Ideal weight')
elif imc < 30:
print('Overweight')
elif imc < 40:
print('Obesity')
else:
print('Morbid obesity')
| none | 1 | 4.118151 | 4 | |
targeted/sitecore.py | t4skforce/zapscripts | 0 | 6617982 | """
Targeted scripts can only be invoked by you, the user, eg via a right-click option on the Sites or History tabs
"""
from org.parosproxy.paros.network import HttpSender
from org.parosproxy.paros.model import Model
from org.parosproxy.paros.extension.history import ExtensionHistory
from org.parosproxy.paros.control import Control
from org.parosproxy.paros.model import HistoryReference
from org.parosproxy.paros.view import View
from java.awt import EventQueue
from org.apache.commons.httpclient import URI
from org.zaproxy.zap.extension.alert import ExtensionAlert
from org.parosproxy.paros.core.scanner import Alert
paths = ["/App_Config","/App_Config/ConnectionStrings.config","/sitecore/","/sitecore/admin","/sitecore/admin/login.aspx","/sitecore/debug","/sitecore/default.aspx","/sitecore/login","/sitecore/login.aspx","/sitecore/login/default.aspx","/sitecore/shell/WebService","/sitecore/shell/webservice/service.asmx","/sitecore/shell/webservice/service2.asmx","/sitecore/shell/sitecore.version.xml","/sitecore/service"]
def addToHistory(msg):
extHistory = Control.getSingleton().getExtensionLoader().getExtension(ExtensionHistory.NAME)
if extHistory:
historyRef = HistoryReference(Model.getSingleton().getSession(), HistoryReference.TYPE_PROXIED, msg);
historyRef.addTag("Sitecore")
if View.isInitialised():
extHistory.addHistory(historyRef);
Model.getSingleton().getSession().getSiteTree().addPath(historyRef, msg);
return historyRef
# risk: 0: info, 1: low, 2: medium, 3: high
# reliability: 0: falsePassitive, 1: suspicious, 2: warning
def raiseAlert(msg, risk=0, confidence=0, name="", description="", param=None, attack="", otherInfo="", solution="", evidence="", reference="", cweId=-1, wascId=-1):
extAlert = Control.getSingleton().getExtensionLoader().getExtension(ExtensionAlert.NAME)
if extAlert and msg:
href = addToHistory(msg)
alert = Alert(1337,risk,confidence,name)
alert.setDescription(description)
alert.setParam(param)
alert.setAttack(attack)
alert.setOtherInfo(otherInfo)
alert.setSolution(solution)
alert.setEvidence(evidence)
alert.setCweId(cweId)
alert.setWascId(wascId)
alert.setReference(reference)
alert.setHistoryRef(href)
alert.setMessage(msg)
alert.setUri(msg.getRequestHeader().getURI().toString())
extAlert.alertFound(alert,href)
def invokeWith(msg):
sender = HttpSender(Model.getSingleton().getOptionsParam().getConnectionParam(), True, 6)
uri = msg.getRequestHeader().getURI()
for path in paths:
reqUri = URI(uri.getScheme(),uri.getAuthority(),path,None,None)
req = msg.cloneRequest()
req.getRequestHeader().setURI(reqUri)
sender.sendAndReceive(req,False)
statusCode = req.getResponseHeader().getStatusCode()
if statusCode in [200, 401, 403, 500]:
raiseAlert(req, 3, 2, 'Sitecore default Page exposure', path+' should not be anonymously reachable. Allows for Information Disclosure.', solution="Follow Sitecore Security Hardening Guide, see references", evidence=req.getResponseHeader().getPrimeHeader(), reference="https://doc.sitecore.com/SdnArchive/upload/sitecore7/75/sitecore_security_hardening_guide-sc75-usletter.pdf")
addToHistory(req)
print(str(statusCode)+" - "+path)
| """
Targeted scripts can only be invoked by you, the user, eg via a right-click option on the Sites or History tabs
"""
from org.parosproxy.paros.network import HttpSender
from org.parosproxy.paros.model import Model
from org.parosproxy.paros.extension.history import ExtensionHistory
from org.parosproxy.paros.control import Control
from org.parosproxy.paros.model import HistoryReference
from org.parosproxy.paros.view import View
from java.awt import EventQueue
from org.apache.commons.httpclient import URI
from org.zaproxy.zap.extension.alert import ExtensionAlert
from org.parosproxy.paros.core.scanner import Alert
paths = ["/App_Config","/App_Config/ConnectionStrings.config","/sitecore/","/sitecore/admin","/sitecore/admin/login.aspx","/sitecore/debug","/sitecore/default.aspx","/sitecore/login","/sitecore/login.aspx","/sitecore/login/default.aspx","/sitecore/shell/WebService","/sitecore/shell/webservice/service.asmx","/sitecore/shell/webservice/service2.asmx","/sitecore/shell/sitecore.version.xml","/sitecore/service"]
def addToHistory(msg):
extHistory = Control.getSingleton().getExtensionLoader().getExtension(ExtensionHistory.NAME)
if extHistory:
historyRef = HistoryReference(Model.getSingleton().getSession(), HistoryReference.TYPE_PROXIED, msg);
historyRef.addTag("Sitecore")
if View.isInitialised():
extHistory.addHistory(historyRef);
Model.getSingleton().getSession().getSiteTree().addPath(historyRef, msg);
return historyRef
# risk: 0: info, 1: low, 2: medium, 3: high
# reliability: 0: falsePassitive, 1: suspicious, 2: warning
def raiseAlert(msg, risk=0, confidence=0, name="", description="", param=None, attack="", otherInfo="", solution="", evidence="", reference="", cweId=-1, wascId=-1):
extAlert = Control.getSingleton().getExtensionLoader().getExtension(ExtensionAlert.NAME)
if extAlert and msg:
href = addToHistory(msg)
alert = Alert(1337,risk,confidence,name)
alert.setDescription(description)
alert.setParam(param)
alert.setAttack(attack)
alert.setOtherInfo(otherInfo)
alert.setSolution(solution)
alert.setEvidence(evidence)
alert.setCweId(cweId)
alert.setWascId(wascId)
alert.setReference(reference)
alert.setHistoryRef(href)
alert.setMessage(msg)
alert.setUri(msg.getRequestHeader().getURI().toString())
extAlert.alertFound(alert,href)
def invokeWith(msg):
sender = HttpSender(Model.getSingleton().getOptionsParam().getConnectionParam(), True, 6)
uri = msg.getRequestHeader().getURI()
for path in paths:
reqUri = URI(uri.getScheme(),uri.getAuthority(),path,None,None)
req = msg.cloneRequest()
req.getRequestHeader().setURI(reqUri)
sender.sendAndReceive(req,False)
statusCode = req.getResponseHeader().getStatusCode()
if statusCode in [200, 401, 403, 500]:
raiseAlert(req, 3, 2, 'Sitecore default Page exposure', path+' should not be anonymously reachable. Allows for Information Disclosure.', solution="Follow Sitecore Security Hardening Guide, see references", evidence=req.getResponseHeader().getPrimeHeader(), reference="https://doc.sitecore.com/SdnArchive/upload/sitecore7/75/sitecore_security_hardening_guide-sc75-usletter.pdf")
addToHistory(req)
print(str(statusCode)+" - "+path)
| en | 0.794807 | Targeted scripts can only be invoked by you, the user, eg via a right-click option on the Sites or History tabs # risk: 0: info, 1: low, 2: medium, 3: high # reliability: 0: falsePassitive, 1: suspicious, 2: warning | 1.86692 | 2 |
python/hello.py | melnig/coding-playground | 0 | 6617983 | # Author: <NAME>, <EMAIL>
# File: hello.py
# Purpose: "Hello, World!" file
if __name__ == "__main__":
print("Hello, World!")
| # Author: <NAME>, <EMAIL>
# File: hello.py
# Purpose: "Hello, World!" file
if __name__ == "__main__":
print("Hello, World!")
| en | 0.581363 | # Author: <NAME>, <EMAIL> # File: hello.py # Purpose: "Hello, World!" file | 1.916822 | 2 |
djangoapp_cloudedbats_base/apps.py | cloudedbats/cloudedbats_web_archive | 0 | 6617984 | <filename>djangoapp_cloudedbats_base/apps.py
from django.apps import AppConfig
class DjangoappCloudedbatsBaseConfig(AppConfig):
name = 'djangoapp_cloudedbats_base'
| <filename>djangoapp_cloudedbats_base/apps.py
from django.apps import AppConfig
class DjangoappCloudedbatsBaseConfig(AppConfig):
name = 'djangoapp_cloudedbats_base'
| none | 1 | 1.227021 | 1 | |
getMatrices.py | KathanKashiparekh/TransE-Practice | 0 | 6617985 | import numpy as np
import tensorflow as tf
sess=tf.Session()
saver=tf.train.Saver()
saver.restore(sess,'./model_full_train.vec')
| import numpy as np
import tensorflow as tf
sess=tf.Session()
saver=tf.train.Saver()
saver.restore(sess,'./model_full_train.vec')
| none | 1 | 1.855605 | 2 | |
stats.py | alexaltair/feature-selection | 1 | 6617986 | <filename>stats.py
import json
from pandas import Series, DataFrame
from sklearn.decomposition import PCA
from sklearn.cluster import MeanShift, DBSCAN
from results_routes import result_route
@result_route
def preview(data, n=10):
return data.iloc[:n].to_html(index=False,
classes=['table', 'table-condensed', 'table-bordered', 'table-striped'])
@result_route
def mean(data):
return data.mean().to_frame().T.to_html(index=False,
classes=['table', 'table-condensed', 'table-bordered'])
@result_route
def median(data):
return data.median().to_frame().T.to_html(index=False,
classes=['table', 'table-condensed', 'table-bordered'])
@result_route
def variance(data):
return data.var().to_frame().T.to_html(index=False,
classes=['table', 'table-condensed', 'table-bordered'])
@result_route
def sorted_variance(data):
return data.var().sort_values().to_frame().T.to_html(
index=False,
classes=['table', 'table-condensed', 'table-bordered'])
@result_route
def covariance(data):
return data.cov().to_html(
classes=['table', 'table-condensed', 'table-bordered', 'table-striped'])
@result_route
def pca(data):
remaining_variance = PCA().fit(data).explained_variance_ratio_
index = range(1, len(remaining_variance)+1)
remaining_variance = DataFrame({
"Component": index,
"Explained variance ratio": remaining_variance,
}, dtype=object).T
return json.dumps({
'html': remaining_variance.to_html(header=False,
classes=['table', 'table-condensed', 'table-bordered']),
'json': remaining_variance.to_dict()
})
@result_route
def mean_shift(data):
labels = set(MeanShift().fit(data.values).labels_)
num_labels = len(labels) - (1 if -1 in labels else 0)
return json.dumps(num_labels)
@result_route
def dbscan(data):
labels = set(DBSCAN().fit(data.values).labels_)
num_labels = len(labels) - (1 if -1 in labels else 0)
return json.dumps(num_labels)
| <filename>stats.py
import json
from pandas import Series, DataFrame
from sklearn.decomposition import PCA
from sklearn.cluster import MeanShift, DBSCAN
from results_routes import result_route
@result_route
def preview(data, n=10):
return data.iloc[:n].to_html(index=False,
classes=['table', 'table-condensed', 'table-bordered', 'table-striped'])
@result_route
def mean(data):
return data.mean().to_frame().T.to_html(index=False,
classes=['table', 'table-condensed', 'table-bordered'])
@result_route
def median(data):
return data.median().to_frame().T.to_html(index=False,
classes=['table', 'table-condensed', 'table-bordered'])
@result_route
def variance(data):
return data.var().to_frame().T.to_html(index=False,
classes=['table', 'table-condensed', 'table-bordered'])
@result_route
def sorted_variance(data):
return data.var().sort_values().to_frame().T.to_html(
index=False,
classes=['table', 'table-condensed', 'table-bordered'])
@result_route
def covariance(data):
return data.cov().to_html(
classes=['table', 'table-condensed', 'table-bordered', 'table-striped'])
@result_route
def pca(data):
remaining_variance = PCA().fit(data).explained_variance_ratio_
index = range(1, len(remaining_variance)+1)
remaining_variance = DataFrame({
"Component": index,
"Explained variance ratio": remaining_variance,
}, dtype=object).T
return json.dumps({
'html': remaining_variance.to_html(header=False,
classes=['table', 'table-condensed', 'table-bordered']),
'json': remaining_variance.to_dict()
})
@result_route
def mean_shift(data):
labels = set(MeanShift().fit(data.values).labels_)
num_labels = len(labels) - (1 if -1 in labels else 0)
return json.dumps(num_labels)
@result_route
def dbscan(data):
labels = set(DBSCAN().fit(data.values).labels_)
num_labels = len(labels) - (1 if -1 in labels else 0)
return json.dumps(num_labels)
| none | 1 | 2.636639 | 3 | |
2_estrutura_de_decisao/02_positivo_negativo.py | cecilmalone/lista_de_exercicios_pybr | 0 | 6617987 | """
2. Faça um Programa que peça um valor e mostre na tela se o valor é positivo ou negativo.
"""
n = int(input("Informe um número: "))
if n >= 0:
print('Positivo')
else:
print('Negativo') | """
2. Faça um Programa que peça um valor e mostre na tela se o valor é positivo ou negativo.
"""
n = int(input("Informe um número: "))
if n >= 0:
print('Positivo')
else:
print('Negativo') | pt | 0.94697 | 2. Faça um Programa que peça um valor e mostre na tela se o valor é positivo ou negativo. | 4.099283 | 4 |
1/V00/oneLiner.py | alpha-256/Neural-Matrix-Research | 0 | 6617988 |
from random import randint
from random import choice
from pprint import pprint
"""
I = input Cell
O = Output Cell
G = Green Cell (Increase signal per step from I Cell)
R = Red Cell (Decrease Signal per step from I Cell)
"""
def ayy(layer_count, size):
layer_matrix = [[choice(["G", "R"]) for _ in range(size)] for _ in range(layer_count)]
for arr in layer_matrix:
for _ in range(3):
randomly_select_idx = randint(0, len(arr)-1)
arr[randomly_select_idx] = "O"
randomly_select_idx = randint(0, len(layer_matrix[0])-1)
layer_matrix[0][randomly_select_idx] = "I"
return layer_matrix
def lmao(layer_matrix):
for matrix_idx, curr_layer in enumerate(layer_matrix[1:], start=1):
for char_idx, char in enumerate(layer_matrix[matrix_idx-1]):
if char == "O":
curr_layer[char_idx] = "I"
return
layer_count = int(input("Layer count > "))
size = int(input("Size > "))
arr = ayy(layer_count, size*size)
print("Generated Layer")
for layer in arr:
for idx, ele in enumerate(layer):
if idx % size == 0:
print()
print(ele, end=" ")
print()
print("---")
lmao(arr)
print("Connected Layer")
for layer in arr:
for idx, ele in enumerate(layer):
if idx % size == 0:
print()
print(ele, end=" ")
print()
print("---")
#END
|
from random import randint
from random import choice
from pprint import pprint
"""
I = input Cell
O = Output Cell
G = Green Cell (Increase signal per step from I Cell)
R = Red Cell (Decrease Signal per step from I Cell)
"""
def ayy(layer_count, size):
layer_matrix = [[choice(["G", "R"]) for _ in range(size)] for _ in range(layer_count)]
for arr in layer_matrix:
for _ in range(3):
randomly_select_idx = randint(0, len(arr)-1)
arr[randomly_select_idx] = "O"
randomly_select_idx = randint(0, len(layer_matrix[0])-1)
layer_matrix[0][randomly_select_idx] = "I"
return layer_matrix
def lmao(layer_matrix):
for matrix_idx, curr_layer in enumerate(layer_matrix[1:], start=1):
for char_idx, char in enumerate(layer_matrix[matrix_idx-1]):
if char == "O":
curr_layer[char_idx] = "I"
return
layer_count = int(input("Layer count > "))
size = int(input("Size > "))
arr = ayy(layer_count, size*size)
print("Generated Layer")
for layer in arr:
for idx, ele in enumerate(layer):
if idx % size == 0:
print()
print(ele, end=" ")
print()
print("---")
lmao(arr)
print("Connected Layer")
for layer in arr:
for idx, ele in enumerate(layer):
if idx % size == 0:
print()
print(ele, end=" ")
print()
print("---")
#END
| en | 0.831546 | I = input Cell O = Output Cell G = Green Cell (Increase signal per step from I Cell) R = Red Cell (Decrease Signal per step from I Cell) #END | 3.32821 | 3 |
source/utils/__init__.py | CostaDiego/diabeticRetinopathy-CExIA | 0 | 6617989 | <reponame>CostaDiego/diabeticRetinopathy-CExIA
__all__ = ['structure']
from os import path
import sys
root = path.abspath('../..')
if root not in sys.path:
sys.path.append(root)
from source.utils import structure
from source.utils.structure import exists, make_dir
from source.utils import utils
from source.utils.utils import image_to_array, save_array | __all__ = ['structure']
from os import path
import sys
root = path.abspath('../..')
if root not in sys.path:
sys.path.append(root)
from source.utils import structure
from source.utils.structure import exists, make_dir
from source.utils import utils
from source.utils.utils import image_to_array, save_array | none | 1 | 2.025256 | 2 | |
PyLESA/test_grid.py | andrewlyden/PyLESA | 6 | 6617990 | <gh_stars>1-10
import grid
import inputs
import matplotlib.pyplot as plt
plt.style.use('ggplot')
plt.rcParams.update({'font.size': 22})
name = 'west_whins_combined.xlsx'
subname = 'hp_14_ts_380'
myInputs = inputs.Inputs(name, subname)
grid_inputs = myInputs.grid()
export = grid_inputs['export']
tariff_choice = grid_inputs['tariff_choice']
balancing_mechanism = grid_inputs['balancing_mechanism']
grid_services = grid_inputs['grid_services']
variable_periods_year = grid_inputs['variable_periods_year']
premium = grid_inputs['wm_info']['premium']
maximum = grid_inputs['wm_info']['maximum']
lower_percent = grid_inputs['ppa_info']['lower_percent']
higher_percent = grid_inputs['ppa_info']['higher_percent']
lower_penalty = grid_inputs['ppa_info']['lower_penalty']
higher_discount = grid_inputs['ppa_info']['higher_discount']
def flatrate():
fr = grid_inputs['flat_rates']
myGrid = grid.Grid(
name, subname,
tariff_choice, balancing_mechanism, grid_services,
flat_rate=fr)
return myGrid.flat_rates_series()
def flatrate_wind():
fr = grid_inputs['flat_rates']
myGrid = grid.Grid(
name, subname, export,
tariff_choice, balancing_mechanism, grid_services,
flat_rate=fr, lower_percent=lower_percent,
higher_percent=higher_percent, higher_discount=higher_discount,
lower_penalty=lower_penalty)
return myGrid.flat_rates_wind()
def variableperiods():
vp = grid_inputs['variable_periods']
myGrid = grid.Grid(
name, subname, export,
tariff_choice, balancing_mechanism, grid_services,
variable_periods=vp, variable_periods_year=variable_periods_year)
return myGrid.variable_periods_series()
def variableperiods_wind():
vp = grid_inputs['variable_periods']
myGrid = grid.Grid(
name, subname, export,
tariff_choice, balancing_mechanism, grid_services,
variable_periods=vp, variable_periods_year=variable_periods_year,
lower_percent=lower_percent,
higher_percent=higher_percent, higher_discount=higher_discount,
lower_penalty=lower_penalty)
return myGrid.variable_periods_wind()
def tou_wm():
twm = grid_inputs['wholesale_market']
myGrid = grid.Grid(
name, subname, export,
tariff_choice, balancing_mechanism, grid_services,
wholesale_market=twm, premium=premium,
maximum=maximum)
return myGrid.tou_wm_series()
def tou_wm_wind():
twm = grid_inputs['wholesale_market']
myGrid = grid.Grid(
name, subname, export,
tariff_choice, balancing_mechanism, grid_services,
wholesale_market=twm, premium=premium,
maximum=maximum, lower_percent=lower_percent,
higher_percent=higher_percent, higher_discount=higher_discount,
lower_penalty=lower_penalty)
return myGrid.tou_wm_wind_ppa()
def wind():
twm = grid_inputs['wholesale_market']
myGrid = grid.Grid(
name, subname, export,
tariff_choice, balancing_mechanism, grid_services,
wholesale_market=twm, premium=premium,
maximum=maximum, lower_percent=lower_percent,
higher_percent=higher_percent, higher_discount=higher_discount,
lower_penalty=lower_penalty)
return myGrid.wind_farm_info()
def plot_traditional():
fr = flatrate()
vp = variableperiods()
twm = tou_wm()
timesteps = 72
hour1 = 0
hour2 = hour1 + timesteps
plt.plot(fr[hour1:hour2], LineWidth=2)
# plt.plot(vp[hour1:hour2], LineWidth=2)
plt.plot(twm[hour1:hour2], LineWidth=2)
plt.legend(['Flat rates', 'Time of use'], loc='best')
plt.ylabel('Import costs')
plt.xlabel('Time (h)')
plt.show()
# # Plot solution
# t = 72
# hour1 = 0
# hour2 = hour1 + t
# plt.figure()
# plt.subplot(3, 1, 1)
# plt.plot(t, fr[hour1:hour2], LineWidth=2)
# plt.ylabel('Flat rates')
# plt.subplot(3, 1, 2)
# plt.plot(t, new_tou[hour1:hour2], LineWidth=2)
# plt.legend(['wind_ppa', 'no_wind_ppa'], loc='best')
# plt.ylabel('Prices pound/MWh')
# plt.subplot(3, 1, 3)
# plt.plot(t, new_tou[hour1:hour2], LineWidth=2)
# plt.legend(['wind_ppa', 'no_wind_ppa'], loc='best')
# plt.ylabel('Prices pound/MWh')
# plt.xlabel('Time')
# plt.show()
def plot_future():
frw = flatrate_wind()
vpw = variableperiods_wind()
twmw = tou_wm_wind()
# timesteps = 192
# hour1 = 1932
timesteps = 250
hour1 = 1300
hour2 = hour1 + timesteps
w = wind()
power = w['power']
lb = [w['lower_band']] * timesteps
ub = [w['higher_band']] * timesteps
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(power[hour1:hour2].values, 'r', LineWidth=2)
plt.plot(lb, 'g', LineWidth=2)
plt.plot(ub, 'b', LineWidth=2)
plt.ylabel('Wind farm')
plt.legend(['output', 'lb', 'ub'], loc='best')
plt.subplot(2, 1, 2)
plt.plot(frw[hour1:hour2], LineWidth=2)
# plt.plot(vpw[hour1:hour2], LineWidth=2)
# plt.plot(twmw[hour1:hour2], LineWidth=2)
plt.legend(['Flat rates', 'Variable periods', 'Time of use'], loc='best')
plt.ylabel('Import costs')
plt.xlabel('Time (h)')
plt.show()
def findhorn():
vp = grid_inputs['variable_periods']
myGrid = grid.Grid(
name, subname, export,
tariff_choice, balancing_mechanism, grid_services,
variable_periods=vp, variable_periods_year=variable_periods_year)
day_night = myGrid.findhorn_tariff()
# print day_night
findhorn()
# plot_traditional()
# plot_future()
# flatrate()
# print flatrate_wind()
# variableperiods()
# variableperiods_wind()
# tou_wm()
# tou_wm_wind()
| import grid
import inputs
import matplotlib.pyplot as plt
plt.style.use('ggplot')
plt.rcParams.update({'font.size': 22})
name = 'west_whins_combined.xlsx'
subname = 'hp_14_ts_380'
myInputs = inputs.Inputs(name, subname)
grid_inputs = myInputs.grid()
export = grid_inputs['export']
tariff_choice = grid_inputs['tariff_choice']
balancing_mechanism = grid_inputs['balancing_mechanism']
grid_services = grid_inputs['grid_services']
variable_periods_year = grid_inputs['variable_periods_year']
premium = grid_inputs['wm_info']['premium']
maximum = grid_inputs['wm_info']['maximum']
lower_percent = grid_inputs['ppa_info']['lower_percent']
higher_percent = grid_inputs['ppa_info']['higher_percent']
lower_penalty = grid_inputs['ppa_info']['lower_penalty']
higher_discount = grid_inputs['ppa_info']['higher_discount']
def flatrate():
fr = grid_inputs['flat_rates']
myGrid = grid.Grid(
name, subname,
tariff_choice, balancing_mechanism, grid_services,
flat_rate=fr)
return myGrid.flat_rates_series()
def flatrate_wind():
fr = grid_inputs['flat_rates']
myGrid = grid.Grid(
name, subname, export,
tariff_choice, balancing_mechanism, grid_services,
flat_rate=fr, lower_percent=lower_percent,
higher_percent=higher_percent, higher_discount=higher_discount,
lower_penalty=lower_penalty)
return myGrid.flat_rates_wind()
def variableperiods():
vp = grid_inputs['variable_periods']
myGrid = grid.Grid(
name, subname, export,
tariff_choice, balancing_mechanism, grid_services,
variable_periods=vp, variable_periods_year=variable_periods_year)
return myGrid.variable_periods_series()
def variableperiods_wind():
vp = grid_inputs['variable_periods']
myGrid = grid.Grid(
name, subname, export,
tariff_choice, balancing_mechanism, grid_services,
variable_periods=vp, variable_periods_year=variable_periods_year,
lower_percent=lower_percent,
higher_percent=higher_percent, higher_discount=higher_discount,
lower_penalty=lower_penalty)
return myGrid.variable_periods_wind()
def tou_wm():
twm = grid_inputs['wholesale_market']
myGrid = grid.Grid(
name, subname, export,
tariff_choice, balancing_mechanism, grid_services,
wholesale_market=twm, premium=premium,
maximum=maximum)
return myGrid.tou_wm_series()
def tou_wm_wind():
twm = grid_inputs['wholesale_market']
myGrid = grid.Grid(
name, subname, export,
tariff_choice, balancing_mechanism, grid_services,
wholesale_market=twm, premium=premium,
maximum=maximum, lower_percent=lower_percent,
higher_percent=higher_percent, higher_discount=higher_discount,
lower_penalty=lower_penalty)
return myGrid.tou_wm_wind_ppa()
def wind():
twm = grid_inputs['wholesale_market']
myGrid = grid.Grid(
name, subname, export,
tariff_choice, balancing_mechanism, grid_services,
wholesale_market=twm, premium=premium,
maximum=maximum, lower_percent=lower_percent,
higher_percent=higher_percent, higher_discount=higher_discount,
lower_penalty=lower_penalty)
return myGrid.wind_farm_info()
def plot_traditional():
fr = flatrate()
vp = variableperiods()
twm = tou_wm()
timesteps = 72
hour1 = 0
hour2 = hour1 + timesteps
plt.plot(fr[hour1:hour2], LineWidth=2)
# plt.plot(vp[hour1:hour2], LineWidth=2)
plt.plot(twm[hour1:hour2], LineWidth=2)
plt.legend(['Flat rates', 'Time of use'], loc='best')
plt.ylabel('Import costs')
plt.xlabel('Time (h)')
plt.show()
# # Plot solution
# t = 72
# hour1 = 0
# hour2 = hour1 + t
# plt.figure()
# plt.subplot(3, 1, 1)
# plt.plot(t, fr[hour1:hour2], LineWidth=2)
# plt.ylabel('Flat rates')
# plt.subplot(3, 1, 2)
# plt.plot(t, new_tou[hour1:hour2], LineWidth=2)
# plt.legend(['wind_ppa', 'no_wind_ppa'], loc='best')
# plt.ylabel('Prices pound/MWh')
# plt.subplot(3, 1, 3)
# plt.plot(t, new_tou[hour1:hour2], LineWidth=2)
# plt.legend(['wind_ppa', 'no_wind_ppa'], loc='best')
# plt.ylabel('Prices pound/MWh')
# plt.xlabel('Time')
# plt.show()
def plot_future():
frw = flatrate_wind()
vpw = variableperiods_wind()
twmw = tou_wm_wind()
# timesteps = 192
# hour1 = 1932
timesteps = 250
hour1 = 1300
hour2 = hour1 + timesteps
w = wind()
power = w['power']
lb = [w['lower_band']] * timesteps
ub = [w['higher_band']] * timesteps
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(power[hour1:hour2].values, 'r', LineWidth=2)
plt.plot(lb, 'g', LineWidth=2)
plt.plot(ub, 'b', LineWidth=2)
plt.ylabel('Wind farm')
plt.legend(['output', 'lb', 'ub'], loc='best')
plt.subplot(2, 1, 2)
plt.plot(frw[hour1:hour2], LineWidth=2)
# plt.plot(vpw[hour1:hour2], LineWidth=2)
# plt.plot(twmw[hour1:hour2], LineWidth=2)
plt.legend(['Flat rates', 'Variable periods', 'Time of use'], loc='best')
plt.ylabel('Import costs')
plt.xlabel('Time (h)')
plt.show()
def findhorn():
vp = grid_inputs['variable_periods']
myGrid = grid.Grid(
name, subname, export,
tariff_choice, balancing_mechanism, grid_services,
variable_periods=vp, variable_periods_year=variable_periods_year)
day_night = myGrid.findhorn_tariff()
# print day_night
findhorn()
# plot_traditional()
# plot_future()
# flatrate()
# print flatrate_wind()
# variableperiods()
# variableperiods_wind()
# tou_wm()
# tou_wm_wind() | en | 0.245884 | # plt.plot(vp[hour1:hour2], LineWidth=2) # # Plot solution # t = 72 # hour1 = 0 # hour2 = hour1 + t # plt.figure() # plt.subplot(3, 1, 1) # plt.plot(t, fr[hour1:hour2], LineWidth=2) # plt.ylabel('Flat rates') # plt.subplot(3, 1, 2) # plt.plot(t, new_tou[hour1:hour2], LineWidth=2) # plt.legend(['wind_ppa', 'no_wind_ppa'], loc='best') # plt.ylabel('Prices pound/MWh') # plt.subplot(3, 1, 3) # plt.plot(t, new_tou[hour1:hour2], LineWidth=2) # plt.legend(['wind_ppa', 'no_wind_ppa'], loc='best') # plt.ylabel('Prices pound/MWh') # plt.xlabel('Time') # plt.show() # timesteps = 192 # hour1 = 1932 # plt.plot(vpw[hour1:hour2], LineWidth=2) # plt.plot(twmw[hour1:hour2], LineWidth=2) # print day_night # plot_traditional() # plot_future() # flatrate() # print flatrate_wind() # variableperiods() # variableperiods_wind() # tou_wm() # tou_wm_wind() | 2.149773 | 2 |
blog/views.py | django-byte/coursera-project | 0 | 6617991 | <gh_stars>0
from django.shortcuts import render, get_object_or_404
from .models import Post
def home(request):
posts = Post.objects.all()
return render(request, 'home.html', {'section': 'home', 'posts': posts})
def detail(request, slug=None):
post = get_object_or_404(Post, slug=slug)
return render(request, 'detail.html', {'section': 'blog_detail', 'post': post, })
| from django.shortcuts import render, get_object_or_404
from .models import Post
def home(request):
posts = Post.objects.all()
return render(request, 'home.html', {'section': 'home', 'posts': posts})
def detail(request, slug=None):
post = get_object_or_404(Post, slug=slug)
return render(request, 'detail.html', {'section': 'blog_detail', 'post': post, }) | none | 1 | 2.031089 | 2 | |
PyBank/main.py | crogar/python-challenge | 1 | 6617992 | import os,csv
from collections import namedtuple
budget_data = os.path.join(os.getcwd(),'Resources/budget_data.csv') #reference to budget_data in ./Resources/budget_data.cs
analysis_output = os.path.join(os.getcwd(),'Analysis/analysis.txt')
#Declaring variables to store the data from the CSV file
data = []
print_out = []
months = namedtuple("months","Date profit_losses") # using namedtuple to keep list organized
def summation(data_set):
''' This function receives a list containing Profit/Losses, it will add them up.
It will return the net total amount using index 1 to locate that data
'''
return sum([month.profit_losses for month in data_set])
def calc_changes(data_,mode='average'):
''' This function receives a list containing Profit/Losses will add them up
it will return the the average, max or min based on the chosen mode "average","GRI", or "GRD"
'''
changes = []
for i in range(0,len(data_)-1):
change = data_[i+1].profit_losses- data_[i].profit_losses
changes.append((data_[i+1].Date,change)) # first Element in tuple is the month and second element is the actual change
if mode == 'average':
average_change = sum([x[1] for x in changes]) / len(changes)
return round(average_change,2)
elif mode == 'GRI':
return max(changes, key = lambda i: i[1]) # using Lambda function to tell MAX to use The second element in tuple as comparison
elif mode == 'GRD':
return min (changes, key = lambda i: i[1]) # using Lambda function to tell MIN to use The second element in tuple as comparison
else:
raise ValueError(f"{mode} is not a supported parameter") # if user doesn't provide a valid mode, we raise a ValueError
def main():
with open(budget_data, newline='') as csvfile: # reading CSV File and storing the rows into a list
reader = csv.DictReader(csvfile)
for row in reader:
month = months(row['Date'], float(row['Profit/Losses'])) # appending month of type months(namedTuple)
data.append(month)
print_out.append("Financial Analysis")
print_out.append("----------------------------")
# obtaining The total number of months included in the dataset
print_out.append(f"Total Months: {len(data)}")
# obtaining The net total amount of "Profit/Losses" over the entire period
net_total = summation(data)
print_out.append(f"Total: ${net_total}")
# Calculates the changes in "Profit/Losses" over the entire period, then returns the average of those changes
average_change = calc_changes(data, mode='average')
print_out.append(f'Average Change: ${average_change}')
# Calculates The greatest increase in profits (date and amount) over the entire period
GRI = calc_changes(data,mode="GRI") # Receiving a tuple containing the month and the GRI change i.e ('Sep-2016', -665765.0)
print_out.append(f'Greatest Increase in Profits: {GRI[0]} (${GRI[1]})')
# calculates The greatest decrease in losses (date and amount) over the entire period
GRD = calc_changes(data,mode="GRD") # Receiving a tuple containing the month and the GRD change i.e ('Sep-2016', -665765.0)
print_out.append(f'Greatest Decrease in Profits: {GRD[0]} (${GRD[1]})')
with open(analysis_output, 'w', newline='') as out_file: # Generating analysis.txt printing out same result as in the console
for line in print_out:
print(line)
out_file.write(line + '\n') # using escape code \n so this way python will add a new line after wrinting a new row
if __name__ == '__main__':
main() | import os,csv
from collections import namedtuple
budget_data = os.path.join(os.getcwd(),'Resources/budget_data.csv') #reference to budget_data in ./Resources/budget_data.cs
analysis_output = os.path.join(os.getcwd(),'Analysis/analysis.txt')
#Declaring variables to store the data from the CSV file
data = []
print_out = []
months = namedtuple("months","Date profit_losses") # using namedtuple to keep list organized
def summation(data_set):
''' This function receives a list containing Profit/Losses, it will add them up.
It will return the net total amount using index 1 to locate that data
'''
return sum([month.profit_losses for month in data_set])
def calc_changes(data_,mode='average'):
''' This function receives a list containing Profit/Losses will add them up
it will return the the average, max or min based on the chosen mode "average","GRI", or "GRD"
'''
changes = []
for i in range(0,len(data_)-1):
change = data_[i+1].profit_losses- data_[i].profit_losses
changes.append((data_[i+1].Date,change)) # first Element in tuple is the month and second element is the actual change
if mode == 'average':
average_change = sum([x[1] for x in changes]) / len(changes)
return round(average_change,2)
elif mode == 'GRI':
return max(changes, key = lambda i: i[1]) # using Lambda function to tell MAX to use The second element in tuple as comparison
elif mode == 'GRD':
return min (changes, key = lambda i: i[1]) # using Lambda function to tell MIN to use The second element in tuple as comparison
else:
raise ValueError(f"{mode} is not a supported parameter") # if user doesn't provide a valid mode, we raise a ValueError
def main():
with open(budget_data, newline='') as csvfile: # reading CSV File and storing the rows into a list
reader = csv.DictReader(csvfile)
for row in reader:
month = months(row['Date'], float(row['Profit/Losses'])) # appending month of type months(namedTuple)
data.append(month)
print_out.append("Financial Analysis")
print_out.append("----------------------------")
# obtaining The total number of months included in the dataset
print_out.append(f"Total Months: {len(data)}")
# obtaining The net total amount of "Profit/Losses" over the entire period
net_total = summation(data)
print_out.append(f"Total: ${net_total}")
# Calculates the changes in "Profit/Losses" over the entire period, then returns the average of those changes
average_change = calc_changes(data, mode='average')
print_out.append(f'Average Change: ${average_change}')
# Calculates The greatest increase in profits (date and amount) over the entire period
GRI = calc_changes(data,mode="GRI") # Receiving a tuple containing the month and the GRI change i.e ('Sep-2016', -665765.0)
print_out.append(f'Greatest Increase in Profits: {GRI[0]} (${GRI[1]})')
# calculates The greatest decrease in losses (date and amount) over the entire period
GRD = calc_changes(data,mode="GRD") # Receiving a tuple containing the month and the GRD change i.e ('Sep-2016', -665765.0)
print_out.append(f'Greatest Decrease in Profits: {GRD[0]} (${GRD[1]})')
with open(analysis_output, 'w', newline='') as out_file: # Generating analysis.txt printing out same result as in the console
for line in print_out:
print(line)
out_file.write(line + '\n') # using escape code \n so this way python will add a new line after wrinting a new row
if __name__ == '__main__':
main() | en | 0.844313 | #reference to budget_data in ./Resources/budget_data.cs #Declaring variables to store the data from the CSV file # using namedtuple to keep list organized This function receives a list containing Profit/Losses, it will add them up. It will return the net total amount using index 1 to locate that data This function receives a list containing Profit/Losses will add them up it will return the the average, max or min based on the chosen mode "average","GRI", or "GRD" # first Element in tuple is the month and second element is the actual change # using Lambda function to tell MAX to use The second element in tuple as comparison # using Lambda function to tell MIN to use The second element in tuple as comparison # if user doesn't provide a valid mode, we raise a ValueError # reading CSV File and storing the rows into a list # appending month of type months(namedTuple) # obtaining The total number of months included in the dataset # obtaining The net total amount of "Profit/Losses" over the entire period # Calculates the changes in "Profit/Losses" over the entire period, then returns the average of those changes # Calculates The greatest increase in profits (date and amount) over the entire period # Receiving a tuple containing the month and the GRI change i.e ('Sep-2016', -665765.0) # calculates The greatest decrease in losses (date and amount) over the entire period # Receiving a tuple containing the month and the GRD change i.e ('Sep-2016', -665765.0) # Generating analysis.txt printing out same result as in the console # using escape code \n so this way python will add a new line after wrinting a new row | 3.746456 | 4 |
pmaapi/model/__init__.py | joeflack4/pma-api-open-model | 0 | 6617993 | """Init for resources."""
| """Init for resources."""
| en | 0.815422 | Init for resources. | 1.159885 | 1 |
src/seqdesign_pt/scripts/run_autoregressive_vae_fr.py | aaronkollasch/seqdesign-pytorch | 7 | 6617994 | <filename>src/seqdesign_pt/scripts/run_autoregressive_vae_fr.py
#!/usr/bin/env python
import sys
import os
import argparse
import time
import json
import numpy as np
import torch
from seqdesign_pt import data_loaders
from seqdesign_pt import autoregressive_model
from seqdesign_pt import autoregressive_train
from seqdesign_pt import model_logging
from seqdesign_pt.utils import get_cuda_version, get_cudnn_version, get_github_head_hash, Tee
working_dir = '/n/groups/marks/users/aaron/autoregressive'
data_dir = '/n/groups/marks/projects/autoregressive'
parser = argparse.ArgumentParser(description="Train an autoregressive model on a collection of sequences.")
parser.add_argument("--channels", type=int, default=48,
help="Number of channels.")
parser.add_argument("--num-iterations", type=int, default=250005,
help="Number of iterations to run the model.")
parser.add_argument("--dataset", type=str, default=None, required=True,
help="Dataset name for fitting model. Alignment weights must be computed beforehand.")
parser.add_argument("--num-data-workers", type=int, default=4,
help="Number of workers to load data")
parser.add_argument("--restore", type=str, default=None,
help="Snapshot path for restoring a model to continue training.")
parser.add_argument("--r-seed", type=int, default=42,
help="Random seed")
parser.add_argument("--no-lag-inf", action='store_true',
help="Disable lagging inference")
parser.add_argument("--lag-inf-max-steps", type=int, default=None,
help="Disable lagging inference")
parser.add_argument("--dropout-p", type=float, default=0.5,
help="Decoder dropout probability (drop rate, not keep rate)")
parser.add_argument("--no-cuda", action='store_true',
help="Disable GPU training")
args = parser.parse_args()
run_name = f"{args.dataset}_VAE_elu_channels-{args.channels}_dropout-{args.dropout_p}_rseed-{args.r_seed}" \
f"_start-{time.strftime('%y%b%d_%H%M', time.localtime())}"
sbatch_executable = f"""#!/bin/bash
#SBATCH -c 4 # Request one core
#SBATCH -N 1 # Request one node (if you request more than one core with -c, also using
# -N 1 means all cores will be on the same node)
#SBATCH -t 2-11:59 # Runtime in D-HH:MM format
#SBATCH -p gpu # Partition to run in
#SBATCH --gres=gpu:1
#SBATCH --mem=30G # Memory total in MB (for all cores)
#SBATCH -o slurm_files/slurm-%j.out # File to which STDOUT + STDERR will be written, including job ID in filename
hostname
pwd
module load gcc/6.2.0 cuda/9.0
srun stdbuf -oL -eL {sys.executable} \\
{sys.argv[0]} \\
--dataset {args.dataset} --num-iterations {args.num_iterations} \\
--channels {args.channels} --dropout-p {args.dropout_p} --r-seed {args.r_seed} \\
--restore {{restore}}
"""
torch.manual_seed(args.r_seed)
torch.cuda.manual_seed_all(args.r_seed)
def _init_fn(worker_id):
np.random.seed(args.r_seed + worker_id)
os.makedirs(f'logs/{args.run_name}', exist_ok=True)
log_f = Tee(f'logs/{args.run_name}/log.txt', 'a')
print("OS: ", sys.platform)
print("Python: ", sys.version)
print("PyTorch: ", torch.__version__)
print("Numpy: ", np.__version__)
USE_CUDA = not args.no_cuda
device = torch.device("cuda:0" if USE_CUDA and torch.cuda.is_available() else "cpu")
print('Using device:', device)
if device.type == 'cuda':
print(torch.cuda.get_device_name(0))
print('Memory Usage:')
print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3, 1), 'GB')
print('Cached: ', round(torch.cuda.memory_cached(0)/1024**3, 1), 'GB')
print(get_cuda_version())
print("CuDNN Version ", get_cudnn_version())
print("git hash:", str(get_github_head_hash()))
print()
print("Run:", run_name)
dataset = data_loaders.SingleFamilyDataset(
batch_size=args.batch_size,
working_dir=data_dir,
dataset=args.dataset,
matching=True,
unlimited_epoch=True,
output_shape='NCHW',
output_types='decoder',
)
loader = data_loaders.GeneratorDataLoader(
dataset,
num_workers=args.num_data_workers,
pin_memory=True,
worker_init_fn=_init_fn
)
if args.restore is not None:
print("Restoring model from:", args.restore)
checkpoint = torch.load(args.restore, map_location='cpu' if device.type == 'cpu' else None)
dims = checkpoint['model_dims']
hyperparams = checkpoint['model_hyperparams']
trainer_params = checkpoint['train_params']
model = autoregressive_model.AutoregressiveVAEFR(dims=dims, hyperparams=hyperparams, dropout_p=args.dropout_p)
else:
checkpoint = args.restore
trainer_params = None
model = autoregressive_model.AutoregressiveVAEFR(channels=args.channels, dropout_p=args.dropout_p)
model.to(device)
trainer = autoregressive_train.AutoregressiveVAETrainer(
model=model,
data_loader=loader,
params=trainer_params,
snapshot_path=working_dir + '/sess',
snapshot_name=run_name,
snapshot_interval=args.num_iterations // 10,
snapshot_exec_template=sbatch_executable,
device=device,
# logger=model_logging.Logger(validation_interval=None),
logger=model_logging.TensorboardLogger(
log_interval=500,
validation_interval=1000,
generate_interval=5000,
log_dir=working_dir + '/log/' + run_name
)
)
if args.restore is not None:
trainer.load_state(checkpoint)
if args.no_lag_inf:
trainer.params['lagging_inference'] = False
if args.lag_inf_max_steps is not None:
trainer.params['lag_inf_inner_loop_max_steps'] = args.lag_inf_max_steps
print("Hyperparameters:", json.dumps(model.hyperparams, indent=4))
print("Training parameters:", json.dumps(trainer.params, indent=4))
print("Num trainable parameters:", model.parameter_count())
trainer.train(steps=args.num_iterations)
| <filename>src/seqdesign_pt/scripts/run_autoregressive_vae_fr.py
#!/usr/bin/env python
import sys
import os
import argparse
import time
import json
import numpy as np
import torch
from seqdesign_pt import data_loaders
from seqdesign_pt import autoregressive_model
from seqdesign_pt import autoregressive_train
from seqdesign_pt import model_logging
from seqdesign_pt.utils import get_cuda_version, get_cudnn_version, get_github_head_hash, Tee
working_dir = '/n/groups/marks/users/aaron/autoregressive'
data_dir = '/n/groups/marks/projects/autoregressive'
parser = argparse.ArgumentParser(description="Train an autoregressive model on a collection of sequences.")
parser.add_argument("--channels", type=int, default=48,
help="Number of channels.")
parser.add_argument("--num-iterations", type=int, default=250005,
help="Number of iterations to run the model.")
parser.add_argument("--dataset", type=str, default=None, required=True,
help="Dataset name for fitting model. Alignment weights must be computed beforehand.")
parser.add_argument("--num-data-workers", type=int, default=4,
help="Number of workers to load data")
parser.add_argument("--restore", type=str, default=None,
help="Snapshot path for restoring a model to continue training.")
parser.add_argument("--r-seed", type=int, default=42,
help="Random seed")
parser.add_argument("--no-lag-inf", action='store_true',
help="Disable lagging inference")
parser.add_argument("--lag-inf-max-steps", type=int, default=None,
help="Disable lagging inference")
parser.add_argument("--dropout-p", type=float, default=0.5,
help="Decoder dropout probability (drop rate, not keep rate)")
parser.add_argument("--no-cuda", action='store_true',
help="Disable GPU training")
args = parser.parse_args()
run_name = f"{args.dataset}_VAE_elu_channels-{args.channels}_dropout-{args.dropout_p}_rseed-{args.r_seed}" \
f"_start-{time.strftime('%y%b%d_%H%M', time.localtime())}"
sbatch_executable = f"""#!/bin/bash
#SBATCH -c 4 # Request one core
#SBATCH -N 1 # Request one node (if you request more than one core with -c, also using
# -N 1 means all cores will be on the same node)
#SBATCH -t 2-11:59 # Runtime in D-HH:MM format
#SBATCH -p gpu # Partition to run in
#SBATCH --gres=gpu:1
#SBATCH --mem=30G # Memory total in MB (for all cores)
#SBATCH -o slurm_files/slurm-%j.out # File to which STDOUT + STDERR will be written, including job ID in filename
hostname
pwd
module load gcc/6.2.0 cuda/9.0
srun stdbuf -oL -eL {sys.executable} \\
{sys.argv[0]} \\
--dataset {args.dataset} --num-iterations {args.num_iterations} \\
--channels {args.channels} --dropout-p {args.dropout_p} --r-seed {args.r_seed} \\
--restore {{restore}}
"""
torch.manual_seed(args.r_seed)
torch.cuda.manual_seed_all(args.r_seed)
def _init_fn(worker_id):
np.random.seed(args.r_seed + worker_id)
os.makedirs(f'logs/{args.run_name}', exist_ok=True)
log_f = Tee(f'logs/{args.run_name}/log.txt', 'a')
print("OS: ", sys.platform)
print("Python: ", sys.version)
print("PyTorch: ", torch.__version__)
print("Numpy: ", np.__version__)
USE_CUDA = not args.no_cuda
device = torch.device("cuda:0" if USE_CUDA and torch.cuda.is_available() else "cpu")
print('Using device:', device)
if device.type == 'cuda':
print(torch.cuda.get_device_name(0))
print('Memory Usage:')
print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3, 1), 'GB')
print('Cached: ', round(torch.cuda.memory_cached(0)/1024**3, 1), 'GB')
print(get_cuda_version())
print("CuDNN Version ", get_cudnn_version())
print("git hash:", str(get_github_head_hash()))
print()
print("Run:", run_name)
dataset = data_loaders.SingleFamilyDataset(
batch_size=args.batch_size,
working_dir=data_dir,
dataset=args.dataset,
matching=True,
unlimited_epoch=True,
output_shape='NCHW',
output_types='decoder',
)
loader = data_loaders.GeneratorDataLoader(
dataset,
num_workers=args.num_data_workers,
pin_memory=True,
worker_init_fn=_init_fn
)
if args.restore is not None:
print("Restoring model from:", args.restore)
checkpoint = torch.load(args.restore, map_location='cpu' if device.type == 'cpu' else None)
dims = checkpoint['model_dims']
hyperparams = checkpoint['model_hyperparams']
trainer_params = checkpoint['train_params']
model = autoregressive_model.AutoregressiveVAEFR(dims=dims, hyperparams=hyperparams, dropout_p=args.dropout_p)
else:
checkpoint = args.restore
trainer_params = None
model = autoregressive_model.AutoregressiveVAEFR(channels=args.channels, dropout_p=args.dropout_p)
model.to(device)
trainer = autoregressive_train.AutoregressiveVAETrainer(
model=model,
data_loader=loader,
params=trainer_params,
snapshot_path=working_dir + '/sess',
snapshot_name=run_name,
snapshot_interval=args.num_iterations // 10,
snapshot_exec_template=sbatch_executable,
device=device,
# logger=model_logging.Logger(validation_interval=None),
logger=model_logging.TensorboardLogger(
log_interval=500,
validation_interval=1000,
generate_interval=5000,
log_dir=working_dir + '/log/' + run_name
)
)
if args.restore is not None:
trainer.load_state(checkpoint)
if args.no_lag_inf:
trainer.params['lagging_inference'] = False
if args.lag_inf_max_steps is not None:
trainer.params['lag_inf_inner_loop_max_steps'] = args.lag_inf_max_steps
print("Hyperparameters:", json.dumps(model.hyperparams, indent=4))
print("Training parameters:", json.dumps(trainer.params, indent=4))
print("Num trainable parameters:", model.parameter_count())
trainer.train(steps=args.num_iterations)
| en | 0.464788 | #!/usr/bin/env python #!/bin/bash #SBATCH -c 4 # Request one core #SBATCH -N 1 # Request one node (if you request more than one core with -c, also using # -N 1 means all cores will be on the same node) #SBATCH -t 2-11:59 # Runtime in D-HH:MM format #SBATCH -p gpu # Partition to run in #SBATCH --gres=gpu:1 #SBATCH --mem=30G # Memory total in MB (for all cores) #SBATCH -o slurm_files/slurm-%j.out # File to which STDOUT + STDERR will be written, including job ID in filename hostname pwd module load gcc/6.2.0 cuda/9.0 srun stdbuf -oL -eL {sys.executable} \\ {sys.argv[0]} \\ --dataset {args.dataset} --num-iterations {args.num_iterations} \\ --channels {args.channels} --dropout-p {args.dropout_p} --r-seed {args.r_seed} \\ --restore {{restore}} # logger=model_logging.Logger(validation_interval=None), | 2.058889 | 2 |
searches/jump_search.py | collin-newman/Python | 0 | 6617995 | """
Pure Python implementation of the jump search algorithm.
This algorithm iterates through a sorted collection with a step of n^(1/2),
until the element compared is bigger than the one searched.
It will then perform a linear search until it matches the wanted number.
If not found, it returns -1.
"""
import math
def jump_search(arr: list, x: int) -> int:
"""
Pure Python implementation of the jump search algorithm.
Examples:
>>> jump_search([0, 1, 2, 3, 4, 5], 3)
3
>>> jump_search([-5, -2, -1], -1)
2
>>> jump_search([0, 5, 10, 20], 8)
-1
>>> jump_search([0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610], 55)
10
"""
"""edge cases"""
if len(arr) == 0:
return -1
if arr[0] > x:
return -1
if arr[len(arr) - 1] < x:
return -1
targetIndex = -1
length = len(arr) - 1
lowerBound = 0
upperBound = arr[length]
block = math.floor(math.sqrt(x))
for index in range(0, length, block):
if arr[index] < x:
lowerBound = index
else:
upperBound = index
break
for index in range(lowerBound, upperBound, 1):
if arr[index] == x:
targetIndex = index
break
return targetIndex
if __name__ == "__main__":
user_input = input("Enter numbers separated by a comma:\n").strip()
arr = [int(item) for item in user_input.split(",")]
x = int(input("Enter the number to be searched:\n"))
res = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f"Number {x} is at index {res}")
| """
Pure Python implementation of the jump search algorithm.
This algorithm iterates through a sorted collection with a step of n^(1/2),
until the element compared is bigger than the one searched.
It will then perform a linear search until it matches the wanted number.
If not found, it returns -1.
"""
import math
def jump_search(arr: list, x: int) -> int:
"""
Pure Python implementation of the jump search algorithm.
Examples:
>>> jump_search([0, 1, 2, 3, 4, 5], 3)
3
>>> jump_search([-5, -2, -1], -1)
2
>>> jump_search([0, 5, 10, 20], 8)
-1
>>> jump_search([0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610], 55)
10
"""
"""edge cases"""
if len(arr) == 0:
return -1
if arr[0] > x:
return -1
if arr[len(arr) - 1] < x:
return -1
targetIndex = -1
length = len(arr) - 1
lowerBound = 0
upperBound = arr[length]
block = math.floor(math.sqrt(x))
for index in range(0, length, block):
if arr[index] < x:
lowerBound = index
else:
upperBound = index
break
for index in range(lowerBound, upperBound, 1):
if arr[index] == x:
targetIndex = index
break
return targetIndex
if __name__ == "__main__":
user_input = input("Enter numbers separated by a comma:\n").strip()
arr = [int(item) for item in user_input.split(",")]
x = int(input("Enter the number to be searched:\n"))
res = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f"Number {x} is at index {res}")
| en | 0.638356 | Pure Python implementation of the jump search algorithm. This algorithm iterates through a sorted collection with a step of n^(1/2), until the element compared is bigger than the one searched. It will then perform a linear search until it matches the wanted number. If not found, it returns -1. Pure Python implementation of the jump search algorithm. Examples: >>> jump_search([0, 1, 2, 3, 4, 5], 3) 3 >>> jump_search([-5, -2, -1], -1) 2 >>> jump_search([0, 5, 10, 20], 8) -1 >>> jump_search([0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610], 55) 10 edge cases | 4.049971 | 4 |
executor.py | haeinous/a-relational-db | 0 | 6617996 | #!/usr/bin/python3
def execute_query(query_plan):
"""
Given a QueryPlan object, execute the query.
"""
for record in query_plan:
yield record
| #!/usr/bin/python3
def execute_query(query_plan):
"""
Given a QueryPlan object, execute the query.
"""
for record in query_plan:
yield record
| en | 0.520033 | #!/usr/bin/python3 Given a QueryPlan object, execute the query. | 2.375587 | 2 |
migrations/versions/bb185ea22eff_added_user_notes_relationship.py | wastevensv/postdrop | 2 | 6617997 | """Added user-notes relationship.
Revision ID: bb185ea22eff
Revises: e<PASSWORD>
Create Date: 2016-06-08 21:55:39.604973
"""
# revision identifiers, used by Alembic.
revision = 'bb185ea22eff'
down_revision = 'e7428295c<PASSWORD>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('notes', schema=None) as batch_op:
batch_op.add_column(sa.Column('owner_id', sa.Integer(), nullable=True))
batch_op.create_foreign_key('owner', 'users', ['owner_id'], ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('notes', schema=None) as batch_op:
batch_op.drop_constraint('owner', type_='foreignkey')
batch_op.drop_column('owner_id')
### end Alembic commands ###
| """Added user-notes relationship.
Revision ID: bb185ea22eff
Revises: e<PASSWORD>
Create Date: 2016-06-08 21:55:39.604973
"""
# revision identifiers, used by Alembic.
revision = 'bb185ea22eff'
down_revision = 'e7428295c<PASSWORD>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('notes', schema=None) as batch_op:
batch_op.add_column(sa.Column('owner_id', sa.Integer(), nullable=True))
batch_op.create_foreign_key('owner', 'users', ['owner_id'], ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('notes', schema=None) as batch_op:
batch_op.drop_constraint('owner', type_='foreignkey')
batch_op.drop_column('owner_id')
### end Alembic commands ###
| en | 0.578532 | Added user-notes relationship. Revision ID: bb185ea22eff Revises: e<PASSWORD> Create Date: 2016-06-08 21:55:39.604973 # revision identifiers, used by Alembic. ### commands auto generated by Alembic - please adjust! ### ### end Alembic commands ### ### commands auto generated by Alembic - please adjust! ### ### end Alembic commands ### | 1.447697 | 1 |
graded_classwork.py | cs-fullstack-2019-spring/python-arraycollections-cw-cgarciapieto | 0 | 6617998 | <filename>graded_classwork.py
def main():
problem1()
problem2()
problem3()
problem4()
#created an array and used built functions to remove itmes from the array
def problem1():
arrayForProblem2 = ['Kenn', 'Kevin', 'Erin', 'Meka']
print(arrayForProblem2[2])
print(arrayForProblem2.__len__())
print(arrayForProblem2.pop(1))
print(arrayForProblem2.pop(2))
#Python Program that Creates a function that has a loop that quits with ‘q’. If the user doesn't enter 'q', ask them to input another string.
def problem2():
userInput = ""
while(userInput != 'q'):
userInput = input("Enter something to quit the program")
def problem3():
myPeople = {
"Jonathan": "John",
"Michaek":"Mike",
"William":"Bill",
"Robert":"Rob"
}
print(myPeople)
print(myPeople["William"])
def problem4():
numArray = [1,2,3,4,5]
for i in range( len(numArray) - 1, -1, -1) :
print(i)
def problem5():
totalArray = [1,2,3,4,5,6,7,8,9,10]
playerOne = int(input("how many numbers in an array are higher, lower, or equal to it."))
if __name__ == '__main__':
main()
| <filename>graded_classwork.py
def main():
problem1()
problem2()
problem3()
problem4()
#created an array and used built functions to remove itmes from the array
def problem1():
arrayForProblem2 = ['Kenn', 'Kevin', 'Erin', 'Meka']
print(arrayForProblem2[2])
print(arrayForProblem2.__len__())
print(arrayForProblem2.pop(1))
print(arrayForProblem2.pop(2))
#Python Program that Creates a function that has a loop that quits with ‘q’. If the user doesn't enter 'q', ask them to input another string.
def problem2():
userInput = ""
while(userInput != 'q'):
userInput = input("Enter something to quit the program")
def problem3():
myPeople = {
"Jonathan": "John",
"Michaek":"Mike",
"William":"Bill",
"Robert":"Rob"
}
print(myPeople)
print(myPeople["William"])
def problem4():
numArray = [1,2,3,4,5]
for i in range( len(numArray) - 1, -1, -1) :
print(i)
def problem5():
totalArray = [1,2,3,4,5,6,7,8,9,10]
playerOne = int(input("how many numbers in an array are higher, lower, or equal to it."))
if __name__ == '__main__':
main()
| en | 0.923144 | #created an array and used built functions to remove itmes from the array #Python Program that Creates a function that has a loop that quits with ‘q’. If the user doesn't enter 'q', ask them to input another string. | 4.005948 | 4 |
stun-py/stun/tlv/length.py | moky/WormHole | 5 | 6617999 | # -*- coding: utf-8 -*-
#
# TLV: Tag Length Value
#
# Written in 2020 by Moky <<EMAIL>>
#
# ==============================================================================
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
from abc import abstractmethod
from typing import TypeVar, Generic, Union, Optional
from udp.ba import ByteArray
from udp.ba import IntegerData, UInt8Data, UInt16Data, UInt32Data, VarIntData, Convert
from .tag import Tag, T
"""
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Value (variable) ....
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
"""
class Length(IntegerData):
""" TLV Length """
pass
L = TypeVar('L') # Length
class LengthParser(Generic[T, L]):
""" Length Parser """
@abstractmethod
def parse_length(self, data: ByteArray, tag: T) -> Optional[L]:
""" Parse Length from data with Tag """
raise NotImplemented
"""
Lengths
~~~~~~~
"""
class Length8(UInt8Data, Length):
""" Fixed size Length (8 bits) """
ZERO = None # Length8.parse(data=UInt8Data.ZERO)
# noinspection PyUnusedLocal
@classmethod
def parse(cls, data: Union[bytes, bytearray, ByteArray], tag: Optional[Tag] = None): # -> Length8
""" parse Length """
if isinstance(data, cls):
return data
elif not isinstance(data, UInt8Data):
data = UInt8Data.from_data(data=data)
if data is not None:
return cls(data=data, value=data.value)
@classmethod
def new(cls, value: int): # -> Length8
data = UInt8Data.from_int(value=value)
return cls(data=data, value=data.value)
class Length16(UInt16Data, Length):
""" Fixed size Length (16 bits) """
ZERO = None # Length16.parse(data=UInt16Data.ZERO)
# noinspection PyUnusedLocal
@classmethod
def parse(cls, data: Union[bytes, bytearray, ByteArray], tag: Optional[Tag] = None): # -> Length16
""" parse Length """
if isinstance(data, cls):
return data
elif not isinstance(data, UInt16Data):
data = Convert.uint16data_from_data(data=data)
if data is not None:
return cls(data=data, value=data.value, endian=data.endian)
@classmethod
def new(cls, value: int): # -> Length16
data = Convert.uint16data_from_value(value=value)
return cls(data=data, value=data.value, endian=data.endian)
class Length32(UInt16Data, Length):
""" Fixed size Length (32 bits) """
ZERO = None # Length32.parse(data=UInt32Data.ZERO)
# noinspection PyUnusedLocal
@classmethod
def parse(cls, data: Union[bytes, bytearray, ByteArray], tag: Optional[Tag] = None): # -> Length32
""" parse Length """
if isinstance(data, cls):
return data
elif not isinstance(data, UInt32Data):
data = Convert.uint32data_from_data(data=data)
if data is not None:
return cls(data=data, value=data.value, endian=data.endian)
@classmethod
def new(cls, value: int): # -> Length32
data = Convert.uint32data_from_value(value=value)
return cls(data=data, value=data.value, endian=data.endian)
class VarLength(VarIntData, Length):
""" Variable size Length """
ZERO = None # VarLength.parse(data=VarIntData.ZERO)
# noinspection PyUnusedLocal
@classmethod
def parse(cls, data: Union[bytes, bytearray, ByteArray], tag: Optional[Tag] = None): # -> VarLength
""" parse Length """
if isinstance(data, cls):
return data
elif not isinstance(data, VarIntData):
data = VarIntData.from_data(data=data)
if data is not None:
return cls(data=data, value=data.value)
@classmethod
def new(cls, value: int): # -> VarLength
data = VarIntData.from_int(value=value)
return cls(data=data, value=data.value)
Length8.ZERO = Length8.parse(data=UInt8Data.ZERO)
Length16.ZERO = Length16.parse(data=UInt16Data.ZERO)
Length32.ZERO = Length32.parse(data=UInt32Data.ZERO)
VarLength.ZERO = VarLength.parse(data=VarIntData.ZERO)
| # -*- coding: utf-8 -*-
#
# TLV: Tag Length Value
#
# Written in 2020 by Moky <<EMAIL>>
#
# ==============================================================================
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
from abc import abstractmethod
from typing import TypeVar, Generic, Union, Optional
from udp.ba import ByteArray
from udp.ba import IntegerData, UInt8Data, UInt16Data, UInt32Data, VarIntData, Convert
from .tag import Tag, T
"""
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Value (variable) ....
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
"""
class Length(IntegerData):
""" TLV Length """
pass
L = TypeVar('L') # Length
class LengthParser(Generic[T, L]):
""" Length Parser """
@abstractmethod
def parse_length(self, data: ByteArray, tag: T) -> Optional[L]:
""" Parse Length from data with Tag """
raise NotImplemented
"""
Lengths
~~~~~~~
"""
class Length8(UInt8Data, Length):
""" Fixed size Length (8 bits) """
ZERO = None # Length8.parse(data=UInt8Data.ZERO)
# noinspection PyUnusedLocal
@classmethod
def parse(cls, data: Union[bytes, bytearray, ByteArray], tag: Optional[Tag] = None): # -> Length8
""" parse Length """
if isinstance(data, cls):
return data
elif not isinstance(data, UInt8Data):
data = UInt8Data.from_data(data=data)
if data is not None:
return cls(data=data, value=data.value)
@classmethod
def new(cls, value: int): # -> Length8
data = UInt8Data.from_int(value=value)
return cls(data=data, value=data.value)
class Length16(UInt16Data, Length):
""" Fixed size Length (16 bits) """
ZERO = None # Length16.parse(data=UInt16Data.ZERO)
# noinspection PyUnusedLocal
@classmethod
def parse(cls, data: Union[bytes, bytearray, ByteArray], tag: Optional[Tag] = None): # -> Length16
""" parse Length """
if isinstance(data, cls):
return data
elif not isinstance(data, UInt16Data):
data = Convert.uint16data_from_data(data=data)
if data is not None:
return cls(data=data, value=data.value, endian=data.endian)
@classmethod
def new(cls, value: int): # -> Length16
data = Convert.uint16data_from_value(value=value)
return cls(data=data, value=data.value, endian=data.endian)
class Length32(UInt16Data, Length):
""" Fixed size Length (32 bits) """
ZERO = None # Length32.parse(data=UInt32Data.ZERO)
# noinspection PyUnusedLocal
@classmethod
def parse(cls, data: Union[bytes, bytearray, ByteArray], tag: Optional[Tag] = None): # -> Length32
""" parse Length """
if isinstance(data, cls):
return data
elif not isinstance(data, UInt32Data):
data = Convert.uint32data_from_data(data=data)
if data is not None:
return cls(data=data, value=data.value, endian=data.endian)
@classmethod
def new(cls, value: int): # -> Length32
data = Convert.uint32data_from_value(value=value)
return cls(data=data, value=data.value, endian=data.endian)
class VarLength(VarIntData, Length):
""" Variable size Length """
ZERO = None # VarLength.parse(data=VarIntData.ZERO)
# noinspection PyUnusedLocal
@classmethod
def parse(cls, data: Union[bytes, bytearray, ByteArray], tag: Optional[Tag] = None): # -> VarLength
""" parse Length """
if isinstance(data, cls):
return data
elif not isinstance(data, VarIntData):
data = VarIntData.from_data(data=data)
if data is not None:
return cls(data=data, value=data.value)
@classmethod
def new(cls, value: int): # -> VarLength
data = VarIntData.from_int(value=value)
return cls(data=data, value=data.value)
Length8.ZERO = Length8.parse(data=UInt8Data.ZERO)
Length16.ZERO = Length16.parse(data=UInt16Data.ZERO)
Length32.ZERO = Length32.parse(data=UInt32Data.ZERO)
VarLength.ZERO = VarLength.parse(data=VarIntData.ZERO)
| en | 0.480255 | # -*- coding: utf-8 -*- # # TLV: Tag Length Value # # Written in 2020 by Moky <<EMAIL>> # # ============================================================================== # MIT License # # Copyright (c) 2020 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ============================================================================== 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Value (variable) .... +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ TLV Length # Length Length Parser Parse Length from data with Tag Lengths ~~~~~~~ Fixed size Length (8 bits) # Length8.parse(data=UInt8Data.ZERO) # noinspection PyUnusedLocal # -> Length8 parse Length # -> Length8 Fixed size Length (16 bits) # Length16.parse(data=UInt16Data.ZERO) # noinspection PyUnusedLocal # -> Length16 parse Length # -> Length16 Fixed size Length (32 bits) # Length32.parse(data=UInt32Data.ZERO) # noinspection PyUnusedLocal # -> Length32 parse Length # -> Length32 Variable size Length # VarLength.parse(data=VarIntData.ZERO) # noinspection PyUnusedLocal # -> VarLength parse Length # -> VarLength | 1.56947 | 2 |
src/problema3.py | victoragcosta/Trab2_IPI | 0 | 6618000 | import numpy as np
import cv2
import matplotlib.pyplot as plt
from funcoes import *
img = cv2.imread('img/cookies.tif')
cv2.imshow('Original', cv2.resize(img, None, fx=2, fy=2))
_, binary = cv2.threshold(img, 105, 255, cv2.THRESH_BINARY)
cv2.imshow('Binária', cv2.resize(binary, None, fx=2, fy=2))
# plt.imshow(binary)
# plt.show()
# Usa-se um kernel grande o suficiente para sumir com o cookie mordido
# porém pequeno o suficiente para não destruir completamente o cookie normal
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (118,118))
# Erode-se o cookie mordido completamente
removed = cv2.morphologyEx(binary, cv2.MORPH_ERODE, kernel)
cv2.imshow('Erodido cookie mordido', cv2.resize(removed, None, fx=2, fy=2))
# Dilata-se o cookie normal ao seu tamanho original
restored = cv2.morphologyEx(removed, cv2.MORPH_DILATE, kernel)
cv2.imshow('Dilatado cookie normal', cv2.resize(restored, None, fx=2, fy=2))
# Nota: isso é basicamente uma abertura
# Multiplica-se elemento a elemento a imagem original e a máscara obtida
result = img * restored
cv2.imshow('Obtido cookie normal', cv2.resize(result, None, fx=2, fy=2))
# cv2.imwrite('img/cookies.png', img)
# cv2.imwrite('img/resultado/problema3_binaria.png', binary)
# cv2.imwrite('img/resultado/problema3_removido.png', removed)
# cv2.imwrite('img/resultado/problema3_restaurado.png', restored)
# cv2.imwrite('img/resultado/problema3_resultado.png', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
| import numpy as np
import cv2
import matplotlib.pyplot as plt
from funcoes import *
img = cv2.imread('img/cookies.tif')
cv2.imshow('Original', cv2.resize(img, None, fx=2, fy=2))
_, binary = cv2.threshold(img, 105, 255, cv2.THRESH_BINARY)
cv2.imshow('Binária', cv2.resize(binary, None, fx=2, fy=2))
# plt.imshow(binary)
# plt.show()
# Usa-se um kernel grande o suficiente para sumir com o cookie mordido
# porém pequeno o suficiente para não destruir completamente o cookie normal
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (118,118))
# Erode-se o cookie mordido completamente
removed = cv2.morphologyEx(binary, cv2.MORPH_ERODE, kernel)
cv2.imshow('Erodido cookie mordido', cv2.resize(removed, None, fx=2, fy=2))
# Dilata-se o cookie normal ao seu tamanho original
restored = cv2.morphologyEx(removed, cv2.MORPH_DILATE, kernel)
cv2.imshow('Dilatado cookie normal', cv2.resize(restored, None, fx=2, fy=2))
# Nota: isso é basicamente uma abertura
# Multiplica-se elemento a elemento a imagem original e a máscara obtida
result = img * restored
cv2.imshow('Obtido cookie normal', cv2.resize(result, None, fx=2, fy=2))
# cv2.imwrite('img/cookies.png', img)
# cv2.imwrite('img/resultado/problema3_binaria.png', binary)
# cv2.imwrite('img/resultado/problema3_removido.png', removed)
# cv2.imwrite('img/resultado/problema3_restaurado.png', restored)
# cv2.imwrite('img/resultado/problema3_resultado.png', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
| pt | 0.87278 | # plt.imshow(binary) # plt.show() # Usa-se um kernel grande o suficiente para sumir com o cookie mordido # porém pequeno o suficiente para não destruir completamente o cookie normal # Erode-se o cookie mordido completamente # Dilata-se o cookie normal ao seu tamanho original # Nota: isso é basicamente uma abertura # Multiplica-se elemento a elemento a imagem original e a máscara obtida # cv2.imwrite('img/cookies.png', img) # cv2.imwrite('img/resultado/problema3_binaria.png', binary) # cv2.imwrite('img/resultado/problema3_removido.png', removed) # cv2.imwrite('img/resultado/problema3_restaurado.png', restored) # cv2.imwrite('img/resultado/problema3_resultado.png', result) | 2.716714 | 3 |
corefacility/roi/tests/entity_objects/rectangular_roi_set_object.py | serik1987/corefacility | 0 | 6618001 | from random import randint
from core.test.entity_set.entity_set_objects.entity_set_object import EntitySetObject
from roi.entity import RectangularRoi
class RectangularRoiSetObject(EntitySetObject):
"""
Represents a container where testing ROIs may be represented
"""
_entity_class = RectangularRoi
_map_set_object = None
ROI_NUMBER = 5
def __init__(self, map_set_object, _entity_list=None):
"""
Initializes a set of certain custom entity objects and adds such objects to the database.
Values of the object fields shall be returned by the data_provider function.
:param map_set_object: The parent map set object
:param _entity_list: This is an internal argument. Don't use it.
"""
self._map_set_object = map_set_object
super().__init__(_entity_list)
def data_provider(self):
"""
Defines properties of custom entity objects created in the constructor.
:return: list of field_name => field_value dictionary reflecting properties of a certain user
"""
return [
dict(left=randint(1, 512), right=randint(1, 512), top=randint(1, 512), bottom=randint(1, 512),
map=current_map)
for n in range(self.ROI_NUMBER)
for current_map in self._map_set_object
]
def clone(self):
"""
Returns an exact copy of the entity set. During the copy process the entity list but not entities itself
will be copied
:return: the cloned object
"""
return self.__class__(self._map_set_object.clone(), _entity_list=list(self._entities))
def filter_by_map(self, imaging_map):
"""
Destroys all ROIs within the container that does NOT belong to a given map
:param imaging_map: a given map
:return: nothing
"""
self._entities = list(filter(lambda roi: roi.map.id == imaging_map.id, self._entities))
| from random import randint
from core.test.entity_set.entity_set_objects.entity_set_object import EntitySetObject
from roi.entity import RectangularRoi
class RectangularRoiSetObject(EntitySetObject):
"""
Represents a container where testing ROIs may be represented
"""
_entity_class = RectangularRoi
_map_set_object = None
ROI_NUMBER = 5
def __init__(self, map_set_object, _entity_list=None):
"""
Initializes a set of certain custom entity objects and adds such objects to the database.
Values of the object fields shall be returned by the data_provider function.
:param map_set_object: The parent map set object
:param _entity_list: This is an internal argument. Don't use it.
"""
self._map_set_object = map_set_object
super().__init__(_entity_list)
def data_provider(self):
"""
Defines properties of custom entity objects created in the constructor.
:return: list of field_name => field_value dictionary reflecting properties of a certain user
"""
return [
dict(left=randint(1, 512), right=randint(1, 512), top=randint(1, 512), bottom=randint(1, 512),
map=current_map)
for n in range(self.ROI_NUMBER)
for current_map in self._map_set_object
]
def clone(self):
"""
Returns an exact copy of the entity set. During the copy process the entity list but not entities itself
will be copied
:return: the cloned object
"""
return self.__class__(self._map_set_object.clone(), _entity_list=list(self._entities))
def filter_by_map(self, imaging_map):
"""
Destroys all ROIs within the container that does NOT belong to a given map
:param imaging_map: a given map
:return: nothing
"""
self._entities = list(filter(lambda roi: roi.map.id == imaging_map.id, self._entities))
| en | 0.723691 | Represents a container where testing ROIs may be represented Initializes a set of certain custom entity objects and adds such objects to the database. Values of the object fields shall be returned by the data_provider function. :param map_set_object: The parent map set object :param _entity_list: This is an internal argument. Don't use it. Defines properties of custom entity objects created in the constructor. :return: list of field_name => field_value dictionary reflecting properties of a certain user Returns an exact copy of the entity set. During the copy process the entity list but not entities itself will be copied :return: the cloned object Destroys all ROIs within the container that does NOT belong to a given map :param imaging_map: a given map :return: nothing | 2.889831 | 3 |
minecraft_mod_manager/app/download/download.py | lospejos/minecraft-mod-manager | 56 | 6618002 | <gh_stars>10-100
from typing import List, Sequence
from minecraft_mod_manager.core.utils.latest_version_finder import LatestVersionFinder
from ...core.entities.mod import Mod, ModArg
from ...core.entities.version_info import VersionInfo
from ...core.errors.mod_not_found_exception import ModNotFoundException
from ...utils.logger import LogColors, Logger
from .download_repo import DownloadRepo
class Download:
def __init__(self, repo: DownloadRepo):
self._repo = repo
def find_download_and_install(self, mods: Sequence[Mod]) -> None:
mods_not_found: List[ModNotFoundException] = []
# Find latest version of mod
for mod in mods:
try:
Logger.info(mod.id, LogColors.bold)
mod.sites = self._repo.search_for_mod(mod)
versions = self._repo.get_versions(mod)
latest_version = LatestVersionFinder.find_latest_version(mod, versions, filter=True)
if latest_version:
Logger.verbose("⬇ Downloading...", indent=1)
downloaded_mod = self._download(mod, latest_version)
self._update_mod_from_file(downloaded_mod)
self._repo.update_mod(downloaded_mod)
self.on_version_found(mod, downloaded_mod)
else:
self.on_version_not_found(mod, versions)
except ModNotFoundException as exception:
Logger.info("🔺 Mod not found on any site...", LogColors.red, indent=1)
mods_not_found.append(exception)
# Print errors
if len(mods_not_found) > 0:
Logger.info("🔺 Mods not found", LogColors.bold + LogColors.red)
for error in mods_not_found:
error.print_message()
def on_version_found(self, old: Mod, new: Mod) -> None:
raise NotImplementedError("Not implemented in subclass")
def on_version_not_found(self, mod: Mod, versions: List[VersionInfo]) -> None:
raise NotImplementedError("Not implemented in subclass")
def _update_mod_from_file(self, mod: Mod) -> None:
if mod.file:
installed_mod = self._repo.get_mod_from_file(mod.file)
if installed_mod:
mod.id = installed_mod.id
mod.name = installed_mod.name
mod.version = installed_mod.version
def _download(self, mod: ModArg, latest_version: VersionInfo) -> Mod:
downloaded_file = self._repo.download(latest_version.download_url, latest_version.filename)
sites = mod.sites
if not sites:
sites = {}
add_mod = Mod(
id=mod.id,
name=mod.id,
sites=sites,
file=downloaded_file.name,
upload_time=latest_version.upload_time,
)
return add_mod
| from typing import List, Sequence
from minecraft_mod_manager.core.utils.latest_version_finder import LatestVersionFinder
from ...core.entities.mod import Mod, ModArg
from ...core.entities.version_info import VersionInfo
from ...core.errors.mod_not_found_exception import ModNotFoundException
from ...utils.logger import LogColors, Logger
from .download_repo import DownloadRepo
class Download:
def __init__(self, repo: DownloadRepo):
self._repo = repo
def find_download_and_install(self, mods: Sequence[Mod]) -> None:
mods_not_found: List[ModNotFoundException] = []
# Find latest version of mod
for mod in mods:
try:
Logger.info(mod.id, LogColors.bold)
mod.sites = self._repo.search_for_mod(mod)
versions = self._repo.get_versions(mod)
latest_version = LatestVersionFinder.find_latest_version(mod, versions, filter=True)
if latest_version:
Logger.verbose("⬇ Downloading...", indent=1)
downloaded_mod = self._download(mod, latest_version)
self._update_mod_from_file(downloaded_mod)
self._repo.update_mod(downloaded_mod)
self.on_version_found(mod, downloaded_mod)
else:
self.on_version_not_found(mod, versions)
except ModNotFoundException as exception:
Logger.info("🔺 Mod not found on any site...", LogColors.red, indent=1)
mods_not_found.append(exception)
# Print errors
if len(mods_not_found) > 0:
Logger.info("🔺 Mods not found", LogColors.bold + LogColors.red)
for error in mods_not_found:
error.print_message()
def on_version_found(self, old: Mod, new: Mod) -> None:
raise NotImplementedError("Not implemented in subclass")
def on_version_not_found(self, mod: Mod, versions: List[VersionInfo]) -> None:
raise NotImplementedError("Not implemented in subclass")
def _update_mod_from_file(self, mod: Mod) -> None:
if mod.file:
installed_mod = self._repo.get_mod_from_file(mod.file)
if installed_mod:
mod.id = installed_mod.id
mod.name = installed_mod.name
mod.version = installed_mod.version
def _download(self, mod: ModArg, latest_version: VersionInfo) -> Mod:
downloaded_file = self._repo.download(latest_version.download_url, latest_version.filename)
sites = mod.sites
if not sites:
sites = {}
add_mod = Mod(
id=mod.id,
name=mod.id,
sites=sites,
file=downloaded_file.name,
upload_time=latest_version.upload_time,
)
return add_mod | en | 0.446077 | # Find latest version of mod # Print errors | 2.352501 | 2 |
output/models/saxon_data/wild/wild075_xsd/__init__.py | tefra/xsdata-w3c-tests | 1 | 6618003 | <gh_stars>1-10
from output.models.saxon_data.wild.wild075_xsd.wild075 import (
A,
B,
Root,
Zing,
)
__all__ = [
"A",
"B",
"Root",
"Zing",
]
| from output.models.saxon_data.wild.wild075_xsd.wild075 import (
A,
B,
Root,
Zing,
)
__all__ = [
"A",
"B",
"Root",
"Zing",
] | none | 1 | 1.400507 | 1 | |
src/logbesselk/sca.py | tk2lab/logbesselk | 0 | 6618004 | import tensorflow as tf
from . import math as tk
from .utils import wrap_log_k
from .series import _log_bessel_ku as log_ku_small_x
from .cfraction import _log_bessel_ku as log_ku_large_x
from .utils import log_bessel_recurrence
from .asymptotic import _log_bessel_k as log_k_large_v
@wrap_log_k
def log_bessel_k(v, x):
return _log_bessel_k(v, x)
def _log_bessel_k(v, x, return_counter=False):
x = tf.convert_to_tensor(x)
v = tf.convert_to_tensor(v, x.dtype)
v = tk.abs(v)
n = tk.round(v)
u = v - n
small_v_ = v < 25.0
small_x_ = x < 1.6 + 0.5 * tk.log(v + 1.)
small_x = small_x_ & small_v_ & (x > 0.)
large_x = ~small_x_ & small_v_
small_v = small_v_ & (x > 0.)
large_v = ~small_v_ & (x > 0.)
lk0s, lk1s, cs = log_ku_small_x(u, x, small_x, return_counter=True)
lk0l, lk1l, cl = log_ku_large_x(u, x, large_x, return_counter=True)
lk0 = tf.where(small_x, lk0s, lk0l)
lk1 = tf.where(small_x, lk1s, lk1l)
out_small_v = log_bessel_recurrence(lk0, lk1, u, n, x, small_v)[0]
out_large_v, cv = log_k_large_v(v, x, large_v, return_counter=True)
out = tf.cast(tk.nan, x.dtype) # x < 0.
out = tf.where(tf.equal(x, 0.), tf.cast(tk.inf, x.dtype), out)
out = tf.where(small_v, out_small_v, out)
out = tf.where(large_v, out_large_v, out)
if return_counter:
return out, (cs, cl, cv)
return out
| import tensorflow as tf
from . import math as tk
from .utils import wrap_log_k
from .series import _log_bessel_ku as log_ku_small_x
from .cfraction import _log_bessel_ku as log_ku_large_x
from .utils import log_bessel_recurrence
from .asymptotic import _log_bessel_k as log_k_large_v
@wrap_log_k
def log_bessel_k(v, x):
return _log_bessel_k(v, x)
def _log_bessel_k(v, x, return_counter=False):
x = tf.convert_to_tensor(x)
v = tf.convert_to_tensor(v, x.dtype)
v = tk.abs(v)
n = tk.round(v)
u = v - n
small_v_ = v < 25.0
small_x_ = x < 1.6 + 0.5 * tk.log(v + 1.)
small_x = small_x_ & small_v_ & (x > 0.)
large_x = ~small_x_ & small_v_
small_v = small_v_ & (x > 0.)
large_v = ~small_v_ & (x > 0.)
lk0s, lk1s, cs = log_ku_small_x(u, x, small_x, return_counter=True)
lk0l, lk1l, cl = log_ku_large_x(u, x, large_x, return_counter=True)
lk0 = tf.where(small_x, lk0s, lk0l)
lk1 = tf.where(small_x, lk1s, lk1l)
out_small_v = log_bessel_recurrence(lk0, lk1, u, n, x, small_v)[0]
out_large_v, cv = log_k_large_v(v, x, large_v, return_counter=True)
out = tf.cast(tk.nan, x.dtype) # x < 0.
out = tf.where(tf.equal(x, 0.), tf.cast(tk.inf, x.dtype), out)
out = tf.where(small_v, out_small_v, out)
out = tf.where(large_v, out_large_v, out)
if return_counter:
return out, (cs, cl, cv)
return out
| none | 1 | 2.244512 | 2 | |
ribosome/scmtools/errors.py | alexandervpetrov/ribosome | 2 | 6618005 |
class ScmError(Exception):
pass
class CommandRunError(Exception):
pass
|
class ScmError(Exception):
pass
class CommandRunError(Exception):
pass
| none | 1 | 1.262867 | 1 | |
gitlab_tweeter/gatapp/apps.py | peijun-dev/gitlab_activities_tweeter | 0 | 6618006 | <gh_stars>0
from django.apps import AppConfig
class GatappConfig(AppConfig):
name = 'gatapp'
| from django.apps import AppConfig
class GatappConfig(AppConfig):
name = 'gatapp' | none | 1 | 1.112819 | 1 | |
tests/test_clamp.py | naturalness/sensibility | 17 | 6618007 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import pytest
from hypothesis import given # type: ignore
from hypothesis.strategies import floats # type: ignore
from sensibility.utils import clamp
def test_clamp_nan() -> None:
"""
Clamp should raise when given NaN.
"""
from math import nan
with pytest.raises(FloatingPointError):
clamp(nan)
@given(floats(allow_nan=False))
def test_clamp(x: float) -> None:
assert 0. <= clamp(x) <= 1.
| #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import pytest
from hypothesis import given # type: ignore
from hypothesis.strategies import floats # type: ignore
from sensibility.utils import clamp
def test_clamp_nan() -> None:
"""
Clamp should raise when given NaN.
"""
from math import nan
with pytest.raises(FloatingPointError):
clamp(nan)
@given(floats(allow_nan=False))
def test_clamp(x: float) -> None:
assert 0. <= clamp(x) <= 1.
| en | 0.290089 | #!/usr/bin/env python3 # -*- coding: UTF-8 -*- # type: ignore # type: ignore Clamp should raise when given NaN. | 2.682336 | 3 |
apiserver/decrypter.py | MadDonkey/DongTai-openapi | 4 | 6618008 | <reponame>MadDonkey/DongTai-openapi
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:owefsad
# datetime:2021/1/12 下午7:49
# software: PyCharm
# project: lingzhi-agent-server
import gzip
import json
def parse_data(stream_data):
"""从http request解析iast agent上报的json数据
步骤:
1.从http request对象读取二进制流
2.gzip解压缩
4.json反序列化
:param stream_data: POST请求的流式对象
:return: iast agent上报的json数据,如果解压缩、解密过程失败,则抛出异常
"""
data = gzip.decompress(stream_data).decode('utf-8')
objs = json.loads(data)
return objs
| #!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:owefsad
# datetime:2021/1/12 下午7:49
# software: PyCharm
# project: lingzhi-agent-server
import gzip
import json
def parse_data(stream_data):
"""从http request解析iast agent上报的json数据
步骤:
1.从http request对象读取二进制流
2.gzip解压缩
4.json反序列化
:param stream_data: POST请求的流式对象
:return: iast agent上报的json数据,如果解压缩、解密过程失败,则抛出异常
"""
data = gzip.decompress(stream_data).decode('utf-8')
objs = json.loads(data)
return objs | zh | 0.388855 | #!/usr/bin/env python # -*- coding:utf-8 -*- # author:owefsad # datetime:2021/1/12 下午7:49 # software: PyCharm # project: lingzhi-agent-server 从http request解析iast agent上报的json数据 步骤: 1.从http request对象读取二进制流 2.gzip解压缩 4.json反序列化 :param stream_data: POST请求的流式对象 :return: iast agent上报的json数据,如果解压缩、解密过程失败,则抛出异常 | 2.402063 | 2 |
python/greedy/1090_largest_values_from_labels.py | linshaoyong/leetcode | 6 | 6618009 | class Solution(object):
def largestValsFromLabels(self, values, labels, num_wanted, use_limit):
"""
:type values: List[int]
:type labels: List[int]
:type num_wanted: int
:type use_limit: int
:rtype: int
"""
res, num, used = 0, 0, {}
for value, label in sorted(list(zip(values, labels)), reverse=True):
if num >= num_wanted:
break
u = used.get(label, 0)
if u >= use_limit:
continue
res += value
num += 1
used[label] = u + 1
return res
def test_largest_vals_from_labels():
s = Solution()
assert 9 == s.largestValsFromLabels([5, 4, 3, 2, 1], [1, 1, 2, 2, 3], 3, 1)
assert 12 == s.largestValsFromLabels(
[5, 4, 3, 2, 1], [1, 3, 3, 3, 2], 3, 2)
assert 16 == s.largestValsFromLabels(
[9, 8, 8, 7, 6], [0, 0, 0, 1, 1], 3, 1)
assert 24 == s.largestValsFromLabels(
[9, 8, 8, 7, 6], [0, 0, 0, 1, 1], 3, 2)
| class Solution(object):
def largestValsFromLabels(self, values, labels, num_wanted, use_limit):
"""
:type values: List[int]
:type labels: List[int]
:type num_wanted: int
:type use_limit: int
:rtype: int
"""
res, num, used = 0, 0, {}
for value, label in sorted(list(zip(values, labels)), reverse=True):
if num >= num_wanted:
break
u = used.get(label, 0)
if u >= use_limit:
continue
res += value
num += 1
used[label] = u + 1
return res
def test_largest_vals_from_labels():
s = Solution()
assert 9 == s.largestValsFromLabels([5, 4, 3, 2, 1], [1, 1, 2, 2, 3], 3, 1)
assert 12 == s.largestValsFromLabels(
[5, 4, 3, 2, 1], [1, 3, 3, 3, 2], 3, 2)
assert 16 == s.largestValsFromLabels(
[9, 8, 8, 7, 6], [0, 0, 0, 1, 1], 3, 1)
assert 24 == s.largestValsFromLabels(
[9, 8, 8, 7, 6], [0, 0, 0, 1, 1], 3, 2)
| en | 0.268753 | :type values: List[int] :type labels: List[int] :type num_wanted: int :type use_limit: int :rtype: int | 3.236067 | 3 |
DetectFromVideo/Utils/DebugTimer.py | robdobsn/CatDeterV3 | 0 | 6618010 | import time
class DebugTimer():
def __init__(self, countLabels):
self.counts = [0] * len(countLabels)
self.starts = [0] * len(countLabels)
self.times = [0] * len(countLabels)
self.countLabels = countLabels
def start(self, i):
self.starts[i] = time.time()
def end(self, i):
self.times[i] += time.time() - self.starts[i]
self.counts[i] += 1
def mean(self, i):
if self.counts[i] > 0:
return self.times[i] / self.counts[i]
return 0
def getTimings(self):
timeVals = []
for i in range(len(self.countLabels)):
timeVal = { "n": self.countLabels[i], "t": self.times[i], "i": self.counts[i], "m": self.mean(i)}
timeVals.append(timeVal)
return timeVals
def printTimings(self):
timings = self.getTimings()
for tim in timings:
print("{:20}\t{:.3f}\t{:.3f}\t{}".format(tim['n'],tim['m'],tim['t'],tim['i']))
| import time
class DebugTimer():
def __init__(self, countLabels):
self.counts = [0] * len(countLabels)
self.starts = [0] * len(countLabels)
self.times = [0] * len(countLabels)
self.countLabels = countLabels
def start(self, i):
self.starts[i] = time.time()
def end(self, i):
self.times[i] += time.time() - self.starts[i]
self.counts[i] += 1
def mean(self, i):
if self.counts[i] > 0:
return self.times[i] / self.counts[i]
return 0
def getTimings(self):
timeVals = []
for i in range(len(self.countLabels)):
timeVal = { "n": self.countLabels[i], "t": self.times[i], "i": self.counts[i], "m": self.mean(i)}
timeVals.append(timeVal)
return timeVals
def printTimings(self):
timings = self.getTimings()
for tim in timings:
print("{:20}\t{:.3f}\t{:.3f}\t{}".format(tim['n'],tim['m'],tim['t'],tim['i']))
| none | 1 | 3.52633 | 4 | |
apps/links/urls.py | MeirKriheli/debian.org.il | 0 | 6618011 | <filename>apps/links/urls.py
from django.conf.urls import url
from .views import LinksIndexView, LinkDetailView, LinksByTagView
app_name = 'links'
urlpatterns = [
url(
regex=r'^$',
view=LinksIndexView.as_view(),
name='index'),
url(
regex=r'^(?P<slug>[0-9A-Za-z-_]+)/$',
view=LinkDetailView.as_view(),
name='link'),
url(
regex=r'^tag/(?P<slug>[0-9A-Za-z-_]+)/$',
view=LinksByTagView.as_view(),
name='tag'),
]
| <filename>apps/links/urls.py
from django.conf.urls import url
from .views import LinksIndexView, LinkDetailView, LinksByTagView
app_name = 'links'
urlpatterns = [
url(
regex=r'^$',
view=LinksIndexView.as_view(),
name='index'),
url(
regex=r'^(?P<slug>[0-9A-Za-z-_]+)/$',
view=LinkDetailView.as_view(),
name='link'),
url(
regex=r'^tag/(?P<slug>[0-9A-Za-z-_]+)/$',
view=LinksByTagView.as_view(),
name='tag'),
]
| none | 1 | 1.990751 | 2 | |
kafka_utils/kafka_consumer_manager/commands/rename_group.py | dbgrigsby/kafka-utils | 0 | 6618012 | <reponame>dbgrigsby/kafka-utils
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import sys
from .offset_manager import OffsetManagerBase
from kafka_utils.util.client import KafkaToolClient
from kafka_utils.util.offsets import get_current_consumer_offsets
from kafka_utils.util.offsets import nullify_offsets
from kafka_utils.util.offsets import set_consumer_offsets
class RenameGroup(OffsetManagerBase):
@classmethod
def setup_subparser(cls, subparsers):
parser_rename_group = subparsers.add_parser(
"rename_group",
description="Rename specified consumer group ID to a new name. "
"This tool shall migrate all offset metadata in Zookeeper.",
add_help=False
)
parser_rename_group.add_argument(
"-h", "--help", action="help",
help="Show this help message and exit."
)
parser_rename_group.add_argument(
'old_groupid',
help="Consumer Group ID to be renamed."
)
parser_rename_group.add_argument(
'new_groupid',
help="New name for the consumer group ID."
)
parser_rename_group.set_defaults(command=cls.run)
@classmethod
def run(cls, args, cluster_config):
if args.old_groupid == args.new_groupid:
print(
"Error: Old group ID and new group ID are the same.",
file=sys.stderr,
)
sys.exit(1)
# Setup the Kafka client
client = KafkaToolClient(cluster_config.broker_list)
client.load_metadata_for_topics()
topics_dict = cls.preprocess_args(
groupid=args.old_groupid,
topic=None,
partitions=None,
cluster_config=cluster_config,
client=client,
)
cls.rename_group(
client,
args.old_groupid,
args.new_groupid,
topics_dict,
)
@classmethod
def rename_group(
cls,
client,
old_groupid,
new_groupid,
topics,
):
copied_offsets = get_current_consumer_offsets(
client,
old_groupid,
topics,
)
set_consumer_offsets(client, new_groupid, copied_offsets)
set_consumer_offsets(
client,
old_groupid,
nullify_offsets(topics),
)
| # -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import sys
from .offset_manager import OffsetManagerBase
from kafka_utils.util.client import KafkaToolClient
from kafka_utils.util.offsets import get_current_consumer_offsets
from kafka_utils.util.offsets import nullify_offsets
from kafka_utils.util.offsets import set_consumer_offsets
class RenameGroup(OffsetManagerBase):
@classmethod
def setup_subparser(cls, subparsers):
parser_rename_group = subparsers.add_parser(
"rename_group",
description="Rename specified consumer group ID to a new name. "
"This tool shall migrate all offset metadata in Zookeeper.",
add_help=False
)
parser_rename_group.add_argument(
"-h", "--help", action="help",
help="Show this help message and exit."
)
parser_rename_group.add_argument(
'old_groupid',
help="Consumer Group ID to be renamed."
)
parser_rename_group.add_argument(
'new_groupid',
help="New name for the consumer group ID."
)
parser_rename_group.set_defaults(command=cls.run)
@classmethod
def run(cls, args, cluster_config):
if args.old_groupid == args.new_groupid:
print(
"Error: Old group ID and new group ID are the same.",
file=sys.stderr,
)
sys.exit(1)
# Setup the Kafka client
client = KafkaToolClient(cluster_config.broker_list)
client.load_metadata_for_topics()
topics_dict = cls.preprocess_args(
groupid=args.old_groupid,
topic=None,
partitions=None,
cluster_config=cluster_config,
client=client,
)
cls.rename_group(
client,
args.old_groupid,
args.new_groupid,
topics_dict,
)
@classmethod
def rename_group(
cls,
client,
old_groupid,
new_groupid,
topics,
):
copied_offsets = get_current_consumer_offsets(
client,
old_groupid,
topics,
)
set_consumer_offsets(client, new_groupid, copied_offsets)
set_consumer_offsets(
client,
old_groupid,
nullify_offsets(topics),
) | en | 0.847089 | # -*- coding: utf-8 -*- # Copyright 2016 Yelp Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Setup the Kafka client | 1.889567 | 2 |
codableopt/solver/formulation/variable/solver_category_variable.py | recruit-tech/codable-model-optimizer | 0 | 6618013 | # Copyright 2022 Recruit Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import random
import numpy as np
from codableopt.solver.formulation.variable.solver_variable import SolverVariable
from codableopt.solver.optimizer.optimization_state import OptimizationState
from codableopt.solver.optimizer.entity.proposal_to_move import ProposalToMove
class SolverCategoryVariable(SolverVariable):
def __init__(
self,
var_no: int,
var_index: int,
name: str,
categories: List[str]):
super(SolverCategoryVariable, self).__init__(var_no, var_index, name)
self._categories = categories
self._category_num = len(categories)
@property
def categories(self) -> List[str]:
return self._categories
def array_size(self) -> int:
return self._category_num
def propose_low_penalty_move(self, state: OptimizationState) -> List[ProposalToMove]:
hot_indexes = [index for index
in range(self._var_index, self._var_index + self._category_num)
if state.var_array[index] == 1]
if len(hot_indexes) != 1:
raise ValueError(f'Category Variable is not one hot encoding.')
else:
hot_index = hot_indexes[0]
minimum_penalty_score = None
best_proposal_list_groups = []
proposal_to_cold = ProposalToMove(
var_no=self._var_no,
var_index=hot_index,
pre_value=1,
new_value=0)
for new_hot_index in range(self._var_index, self._var_index + self._category_num):
if new_hot_index != hot_index:
proposal_to_hot = ProposalToMove(
var_no=self._var_no,
var_index=new_hot_index,
pre_value=0,
new_value=1)
# ペナルティスコアで比較
penalty_score = state.calculate_penalties([proposal_to_cold, proposal_to_hot])
proposal_list = [proposal_to_cold, proposal_to_hot]
if minimum_penalty_score is None or minimum_penalty_score > penalty_score:
best_proposal_list_groups = [proposal_list]
minimum_penalty_score = penalty_score
elif minimum_penalty_score == penalty_score:
best_proposal_list_groups.append(proposal_list)
return random.choice(best_proposal_list_groups)
def propose_random_move_with_range(self, var_value_array: np.array, lower: np.double, upper: np.double) \
-> List[ProposalToMove]:
raise NotImplementedError('propose_random_move_with_range is not implemented!')
def propose_random_move(self, var_value_array: np.array) -> List[ProposalToMove]:
hot_indexes = \
[index for index
in range(self._var_index, self._var_index + self._category_num)
if var_value_array[index] == 1]
new_hot_indexes = \
[index for index
in range(self._var_index, self._var_index + self._category_num)
if var_value_array[index] != 1]
if len(hot_indexes) != 1 or len(new_hot_indexes) == 0:
raise ValueError(f'Category Variable is not one hot encoding.')
hot_index = hot_indexes[0]
new_hot_index = random.choice(new_hot_indexes)
return [ProposalToMove(
var_no=self._var_no,
var_index=hot_index,
pre_value=1,
new_value=0),
ProposalToMove(
var_no=self._var_no,
var_index=new_hot_index,
pre_value=0,
new_value=1)]
def decode(self, var_value_array):
array_indexes = var_value_array[self._var_index:(self._var_index + self.array_size())]
category_index = [index for index, value in enumerate(array_indexes) if value == 1][0]
return self._categories[category_index]
def random_values(self):
var_value = [0] * self._category_num
var_value[random.randint(0, self._category_num - 1)] = 1
return var_value
def encode(self, value: int) -> np.array:
if value not in self._categories:
raise ValueError(f'{value} is not in categories of Variable:{self._name}!')
return np.array([1.0 if category == value else 0 for category in self._categories])
| # Copyright 2022 Recruit Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import random
import numpy as np
from codableopt.solver.formulation.variable.solver_variable import SolverVariable
from codableopt.solver.optimizer.optimization_state import OptimizationState
from codableopt.solver.optimizer.entity.proposal_to_move import ProposalToMove
class SolverCategoryVariable(SolverVariable):
def __init__(
self,
var_no: int,
var_index: int,
name: str,
categories: List[str]):
super(SolverCategoryVariable, self).__init__(var_no, var_index, name)
self._categories = categories
self._category_num = len(categories)
@property
def categories(self) -> List[str]:
return self._categories
def array_size(self) -> int:
return self._category_num
def propose_low_penalty_move(self, state: OptimizationState) -> List[ProposalToMove]:
hot_indexes = [index for index
in range(self._var_index, self._var_index + self._category_num)
if state.var_array[index] == 1]
if len(hot_indexes) != 1:
raise ValueError(f'Category Variable is not one hot encoding.')
else:
hot_index = hot_indexes[0]
minimum_penalty_score = None
best_proposal_list_groups = []
proposal_to_cold = ProposalToMove(
var_no=self._var_no,
var_index=hot_index,
pre_value=1,
new_value=0)
for new_hot_index in range(self._var_index, self._var_index + self._category_num):
if new_hot_index != hot_index:
proposal_to_hot = ProposalToMove(
var_no=self._var_no,
var_index=new_hot_index,
pre_value=0,
new_value=1)
# ペナルティスコアで比較
penalty_score = state.calculate_penalties([proposal_to_cold, proposal_to_hot])
proposal_list = [proposal_to_cold, proposal_to_hot]
if minimum_penalty_score is None or minimum_penalty_score > penalty_score:
best_proposal_list_groups = [proposal_list]
minimum_penalty_score = penalty_score
elif minimum_penalty_score == penalty_score:
best_proposal_list_groups.append(proposal_list)
return random.choice(best_proposal_list_groups)
def propose_random_move_with_range(self, var_value_array: np.array, lower: np.double, upper: np.double) \
-> List[ProposalToMove]:
raise NotImplementedError('propose_random_move_with_range is not implemented!')
def propose_random_move(self, var_value_array: np.array) -> List[ProposalToMove]:
hot_indexes = \
[index for index
in range(self._var_index, self._var_index + self._category_num)
if var_value_array[index] == 1]
new_hot_indexes = \
[index for index
in range(self._var_index, self._var_index + self._category_num)
if var_value_array[index] != 1]
if len(hot_indexes) != 1 or len(new_hot_indexes) == 0:
raise ValueError(f'Category Variable is not one hot encoding.')
hot_index = hot_indexes[0]
new_hot_index = random.choice(new_hot_indexes)
return [ProposalToMove(
var_no=self._var_no,
var_index=hot_index,
pre_value=1,
new_value=0),
ProposalToMove(
var_no=self._var_no,
var_index=new_hot_index,
pre_value=0,
new_value=1)]
def decode(self, var_value_array):
array_indexes = var_value_array[self._var_index:(self._var_index + self.array_size())]
category_index = [index for index, value in enumerate(array_indexes) if value == 1][0]
return self._categories[category_index]
def random_values(self):
var_value = [0] * self._category_num
var_value[random.randint(0, self._category_num - 1)] = 1
return var_value
def encode(self, value: int) -> np.array:
if value not in self._categories:
raise ValueError(f'{value} is not in categories of Variable:{self._name}!')
return np.array([1.0 if category == value else 0 for category in self._categories])
| en | 0.823334 | # Copyright 2022 Recruit Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ペナルティスコアで比較 | 2.194718 | 2 |
backend/projects/api/views.py | requiemofthesouls/erp-chzso_2 | 0 | 6618014 | from rest_framework import viewsets
from projects.api.serializers import ProjectSerializer, TaskSerializer
from projects.models import Project, Task
class ProjectViewSet(viewsets.ModelViewSet):
serializer_class = ProjectSerializer
queryset = Project.objects.all()
class TaskViewSet(viewsets.ModelViewSet):
serializer_class = TaskSerializer
queryset = Task.objects.all()
| from rest_framework import viewsets
from projects.api.serializers import ProjectSerializer, TaskSerializer
from projects.models import Project, Task
class ProjectViewSet(viewsets.ModelViewSet):
serializer_class = ProjectSerializer
queryset = Project.objects.all()
class TaskViewSet(viewsets.ModelViewSet):
serializer_class = TaskSerializer
queryset = Task.objects.all()
| none | 1 | 1.908253 | 2 | |
checker/main.py | craycrayfish/supermarketchecker | 0 | 6618015 | import os
import telegram
import sqlalchemy as db
from datetime import datetime
bot = telegram.Bot(token=os.environ["TELEGRAM_TOKEN"])
db_user = os.environ.get("DB_USER")
db_pass = os.environ.get("DB_PASS")
db_name = os.environ.get("DB_NAME")
cloud_sql_connection_name = os.environ.get("CLOUD_SQL_CONNECTION_NAME")
tables = {'UK': ['uk_metadata', 'uk_status']}
engine = db.create_engine(
# Equivalent URL:
# postgres+pg8000://<db_user>:<db_pass>@/<db_name>?unix_sock=/cloudsql/<cloud_sql_instance_name>/.s.PGSQL.5432
db.engine.url.URL(
drivername='postgres+pg8000',
username=db_user,
password=<PASSWORD>,
database=db_name,
query={
'unix_sock': '/cloudsql/{}/.s.PGSQL.5432'.format(
cloud_sql_connection_name)
}
),
# ... Specify additional properties here.
)
response_format = ['Supermarket', 'Postcode', 'Size', 'Capacity', 'smid', \
'Postal Area', 'Crowd Size', 'Last Updated', 'orderid', 'smid']
def pretty_print(update, response):
'''Formats the response and output each entry as a message'''
for entry in response:
info = {i: j for i, j in zip(response_format, entry)}
if info['Crowd Size'] <= 1.0:
info['Status'] = '{}% full with no queue'\
.format(int(info['Crowd Size'] * 100))
else:
info['Status'] = '{} people in queue'\
.format((int((info['Crowd Size']-1) \
* int(info['Capacity']))))
update.message.reply_text('\n'.join([
'{} @ {}'.format(info['Supermarket'], info['Postcode']),
'Status: {}'.format(info['Status']),
'Capacity: {}'.format(info['Capacity']),
'Last Updated: {}'.format(info['Last Updated'].strftime('%H:%M %d-%b-%y'))
])
)
return 'ok'
def load_table(table_name):
metadata = db.MetaData()
table = db.Table(table_name, metadata, autoload=True, autoload_with=engine)
return table
def find_supermarkets(post_area, table_name):
'''Query metadata table to get id and info of nearby supermarkets.'''
metadata = load_table(table_name)
nearby_sm = db.select([metadata])\
.where(metadata.columns.post_area == post_area)
return nearby_sm
def get_supermarkets_status(supermarkets, table_name):
'''Query status table to get crowd status and last updated.
params:
supermarkets list of supermarkets obtained from metadata table
table_name table to join with
'''
status = load_table(table_name)
supermarkets = supermarkets.alias('supermarkets_info')
crowd_sizes = db.select([supermarkets, status])\
.select_from(supermarkets.join(status, \
supermarkets.columns.smid==status.columns.smid))
return crowd_sizes
def bot_help(update):
update.message.reply_text('''
Please use the /start command to start or restart the bot. \n
Tell the bot your country and postcode to find out how crowded nearby supermarkets are. Example: /find UK WC1N.
''')
return 'ok'
def bot_start(update):
'''Activated when /start is entered. Gives instructions for searching crowd size.'''
update.message.reply_text('''Hi! Please enter /find [country] [post area] to find out how crowded nearby supermarkets are. Example: /find UK WC1N''')
return 'ok'
def find_crowd_sizes(update, connection):
'''Main function activated by /find [country] [post area] that queries database.'''
reply = update.message.text.split(' ')
country = reply[1].upper()
if country not in list(tables):
update.message.reply_text('Country not supported. We currently support {}'.format(list(tables)))
return 'ok'
post_area = str(reply[2]).upper()
supermarkets = find_supermarkets(post_area, tables[country][0])
supermarkets_sizes = get_supermarkets_status(supermarkets, tables[country][1])
result = connection.execute(supermarkets_sizes).fetchall()
if len(result) == 0:
update.message.reply_text('No supermarkets found near you. Try another postarea')
else:
pretty_print(update, result)
return 'ok'
def webhook(request):
if request.method == "POST":
update = telegram.Update.de_json(request.get_json(force=True), bot)
if update.message:
bot.send_chat_action(chat_id=update.message.chat_id, action=telegram.ChatAction.TYPING)
if update.message.text == '/start':
bot_start(update)
return 'ok'
if update.message.text =='/help':
bot_help(update)
return 'ok'
if '/find' in update.message.text:
if len(update.message.text.split(' ')) != 3:
update.message.reply_text('Unexpected format. Please check.')
else:
connection = engine.connect()
find_crowd_sizes(update, connection)
return 'ok'
| import os
import telegram
import sqlalchemy as db
from datetime import datetime
bot = telegram.Bot(token=os.environ["TELEGRAM_TOKEN"])
db_user = os.environ.get("DB_USER")
db_pass = os.environ.get("DB_PASS")
db_name = os.environ.get("DB_NAME")
cloud_sql_connection_name = os.environ.get("CLOUD_SQL_CONNECTION_NAME")
tables = {'UK': ['uk_metadata', 'uk_status']}
engine = db.create_engine(
# Equivalent URL:
# postgres+pg8000://<db_user>:<db_pass>@/<db_name>?unix_sock=/cloudsql/<cloud_sql_instance_name>/.s.PGSQL.5432
db.engine.url.URL(
drivername='postgres+pg8000',
username=db_user,
password=<PASSWORD>,
database=db_name,
query={
'unix_sock': '/cloudsql/{}/.s.PGSQL.5432'.format(
cloud_sql_connection_name)
}
),
# ... Specify additional properties here.
)
response_format = ['Supermarket', 'Postcode', 'Size', 'Capacity', 'smid', \
'Postal Area', 'Crowd Size', 'Last Updated', 'orderid', 'smid']
def pretty_print(update, response):
'''Formats the response and output each entry as a message'''
for entry in response:
info = {i: j for i, j in zip(response_format, entry)}
if info['Crowd Size'] <= 1.0:
info['Status'] = '{}% full with no queue'\
.format(int(info['Crowd Size'] * 100))
else:
info['Status'] = '{} people in queue'\
.format((int((info['Crowd Size']-1) \
* int(info['Capacity']))))
update.message.reply_text('\n'.join([
'{} @ {}'.format(info['Supermarket'], info['Postcode']),
'Status: {}'.format(info['Status']),
'Capacity: {}'.format(info['Capacity']),
'Last Updated: {}'.format(info['Last Updated'].strftime('%H:%M %d-%b-%y'))
])
)
return 'ok'
def load_table(table_name):
metadata = db.MetaData()
table = db.Table(table_name, metadata, autoload=True, autoload_with=engine)
return table
def find_supermarkets(post_area, table_name):
'''Query metadata table to get id and info of nearby supermarkets.'''
metadata = load_table(table_name)
nearby_sm = db.select([metadata])\
.where(metadata.columns.post_area == post_area)
return nearby_sm
def get_supermarkets_status(supermarkets, table_name):
'''Query status table to get crowd status and last updated.
params:
supermarkets list of supermarkets obtained from metadata table
table_name table to join with
'''
status = load_table(table_name)
supermarkets = supermarkets.alias('supermarkets_info')
crowd_sizes = db.select([supermarkets, status])\
.select_from(supermarkets.join(status, \
supermarkets.columns.smid==status.columns.smid))
return crowd_sizes
def bot_help(update):
update.message.reply_text('''
Please use the /start command to start or restart the bot. \n
Tell the bot your country and postcode to find out how crowded nearby supermarkets are. Example: /find UK WC1N.
''')
return 'ok'
def bot_start(update):
'''Activated when /start is entered. Gives instructions for searching crowd size.'''
update.message.reply_text('''Hi! Please enter /find [country] [post area] to find out how crowded nearby supermarkets are. Example: /find UK WC1N''')
return 'ok'
def find_crowd_sizes(update, connection):
'''Main function activated by /find [country] [post area] that queries database.'''
reply = update.message.text.split(' ')
country = reply[1].upper()
if country not in list(tables):
update.message.reply_text('Country not supported. We currently support {}'.format(list(tables)))
return 'ok'
post_area = str(reply[2]).upper()
supermarkets = find_supermarkets(post_area, tables[country][0])
supermarkets_sizes = get_supermarkets_status(supermarkets, tables[country][1])
result = connection.execute(supermarkets_sizes).fetchall()
if len(result) == 0:
update.message.reply_text('No supermarkets found near you. Try another postarea')
else:
pretty_print(update, result)
return 'ok'
def webhook(request):
if request.method == "POST":
update = telegram.Update.de_json(request.get_json(force=True), bot)
if update.message:
bot.send_chat_action(chat_id=update.message.chat_id, action=telegram.ChatAction.TYPING)
if update.message.text == '/start':
bot_start(update)
return 'ok'
if update.message.text =='/help':
bot_help(update)
return 'ok'
if '/find' in update.message.text:
if len(update.message.text.split(' ')) != 3:
update.message.reply_text('Unexpected format. Please check.')
else:
connection = engine.connect()
find_crowd_sizes(update, connection)
return 'ok'
| en | 0.748561 | # Equivalent URL: # postgres+pg8000://<db_user>:<db_pass>@/<db_name>?unix_sock=/cloudsql/<cloud_sql_instance_name>/.s.PGSQL.5432 # ... Specify additional properties here. Formats the response and output each entry as a message Query metadata table to get id and info of nearby supermarkets. Query status table to get crowd status and last updated. params: supermarkets list of supermarkets obtained from metadata table table_name table to join with Please use the /start command to start or restart the bot. \n Tell the bot your country and postcode to find out how crowded nearby supermarkets are. Example: /find UK WC1N. Activated when /start is entered. Gives instructions for searching crowd size. Hi! Please enter /find [country] [post area] to find out how crowded nearby supermarkets are. Example: /find UK WC1N Main function activated by /find [country] [post area] that queries database. | 2.564273 | 3 |
src/spiking_pid/scripts/gazebo_position_controller.py | tudelft/neuro_pid | 1 | 6618016 | #!/usr/bin/env python
import numpy as np
import rospy
import yaml
import rospkg
import os
from geometry_msgs.msg import Point, Quaternion, Vector3
from mav_msgs.msg import Actuators, TorqueThrust
from nav_msgs.msg import Odometry
from std_msgs.msg import Bool
from std_srvs.srv import Empty
from sensor_fusion_comm.srv import InitScale
from body_equations import *
from neuromorphic_altitude_pid import *
from src.adder_numpy import create_csv_from_adder
def odo_callback(data):
global pos, att, t, ang_vel, vel
# t_new = rospy.get_time()
# # rospy.loginfo(rospy.get_time() - t)
# t = t_new
pos = data.pose.pose.position
att = data.pose.pose.orientation
vel = data.twist.twist.linear
ang_vel = data.twist.twist.angular
def waypoint_callback(data):
global waypoint
waypoint = data
def listener():
global pos, att, motor_command, pid, pub, att_gt, t, ang_vel, waypoint, vel, bag
last_time = rospy.get_time()
t = last_time
rospy.Subscriber("/odometry", Odometry, odo_callback, queue_size=5, buff_size=2**24, tcp_nodelay=True)
rospy.Subscriber("/waypoints", Point, waypoint_callback, queue_size=1)
rate = rospy.Rate(200)
rospy.sleep(3)
att_last = None
started = False
while not rospy.is_shutdown():
if (not started) and (waypoint.x is not None) and (att != att_last):
started = True
sim_started.data = True
status_pub.publish(sim_started)
rospy.sleep(1)
if started:
rtime = rospy.get_time()
dt = rtime - last_time
dt = max(dt, 0.002)
last_time = rtime
q = att
eulers = to_euler_angles([q.w, q.x, q.y, q.z])
pos_d = [waypoint.x, waypoint.y, waypoint.z]
forces = pid.calculate_rotor_commands(pos_d, [pos.x, pos.y, pos.z], eulers, vel, ang_vel, dt)
rpm = forces_to_omega(forces, pid.att_pid.m_w_inv)
motor_command.angular_velocities = rpm
pub.publish(motor_command)
att_last = att
rate.sleep()
rospy.spin()
if __name__ == '__main__':
global pos, motor_command, att, pid, pub, ang_vel, waypoint, bag
rospack = rospkg.RosPack()
pos = Point()
motor_command = Actuators()
att = Quaternion()
ang_vel = Vector3()
vel = Vector3()
waypoint = Point()
waypoint.x, waypoint.y, waypoint.z = None, None, None
#initial waypoint is None, so it will only start after receiving the first waypoint
gains_file = rospy.get_param("spiking_pid/gains")
orientation = rospy.get_param("spiking_pid/orientation")
log_filename = rospy.get_param("ros_logger/filename")
spiking_precision = rospy.get_param("spiking_pid/precision")
control_range = rospy.get_param("spiking_pid/control_range")
with open(os.path.join(rospack.get_path('spiking_pid'), gains_file)) as f:
gains = yaml.load(f, Loader=yaml.FullLoader)
M = 0.68
G = 9.8
K_F = 8.54858e-06
L = 0.17
K_D = K_F * 0.016
pid = PositionPID(gains, M,
L,
K_F,
K_D,
with_delay=True,
spiking_precision=spiking_precision,
orientation=orientation,
use_loihi_weights=True)
create_csv_from_adder('error', pid.att_pid.altitude_pid.error_sub)
create_csv_from_adder('int', pid.att_pid.altitude_pid.int_adder)
create_csv_from_adder('control', pid.att_pid.altitude_pid.control_adder)
rospy.init_node('gazebo_position_controller')
status_pub = rospy.Publisher('/simulation_started', Bool, queue_size=1)
sim_started = Bool()
sim_started.data = False
pub = rospy.Publisher('/command/motor_speed', Actuators, queue_size=1)
# Create service handle to unpause Gazebo physics
rospy.wait_for_service("/gazebo/unpause_physics")
unpauser = rospy.ServiceProxy("/gazebo/unpause_physics", Empty)
unpaused = unpauser()
rospy.sleep(3)
# Send zero motor speed command to gazebo
motor_command = Actuators()
motor_command.angular_velocities = [0, 0, 0, 0]
pub.publish(motor_command)
# Create service handle to reset the environment
rospy.wait_for_service("/gazebo/reset_world")
reset = rospy.ServiceProxy("/gazebo/reset_world", Empty)
is_reset = reset()
rospy.sleep(0.5)
rospy.wait_for_service("hummingbird/msf/pose_sensor/initialize_msf_scale")
init_srv = InitScale()
msf_init = rospy.ServiceProxy("hummingbird/msf/pose_sensor/initialize_msf_scale", InitScale)
msf_started = msf_init(1)
rospy.sleep(1)
try:
listener()
except rospy.ROSInterruptException:
pass | #!/usr/bin/env python
import numpy as np
import rospy
import yaml
import rospkg
import os
from geometry_msgs.msg import Point, Quaternion, Vector3
from mav_msgs.msg import Actuators, TorqueThrust
from nav_msgs.msg import Odometry
from std_msgs.msg import Bool
from std_srvs.srv import Empty
from sensor_fusion_comm.srv import InitScale
from body_equations import *
from neuromorphic_altitude_pid import *
from src.adder_numpy import create_csv_from_adder
def odo_callback(data):
global pos, att, t, ang_vel, vel
# t_new = rospy.get_time()
# # rospy.loginfo(rospy.get_time() - t)
# t = t_new
pos = data.pose.pose.position
att = data.pose.pose.orientation
vel = data.twist.twist.linear
ang_vel = data.twist.twist.angular
def waypoint_callback(data):
global waypoint
waypoint = data
def listener():
global pos, att, motor_command, pid, pub, att_gt, t, ang_vel, waypoint, vel, bag
last_time = rospy.get_time()
t = last_time
rospy.Subscriber("/odometry", Odometry, odo_callback, queue_size=5, buff_size=2**24, tcp_nodelay=True)
rospy.Subscriber("/waypoints", Point, waypoint_callback, queue_size=1)
rate = rospy.Rate(200)
rospy.sleep(3)
att_last = None
started = False
while not rospy.is_shutdown():
if (not started) and (waypoint.x is not None) and (att != att_last):
started = True
sim_started.data = True
status_pub.publish(sim_started)
rospy.sleep(1)
if started:
rtime = rospy.get_time()
dt = rtime - last_time
dt = max(dt, 0.002)
last_time = rtime
q = att
eulers = to_euler_angles([q.w, q.x, q.y, q.z])
pos_d = [waypoint.x, waypoint.y, waypoint.z]
forces = pid.calculate_rotor_commands(pos_d, [pos.x, pos.y, pos.z], eulers, vel, ang_vel, dt)
rpm = forces_to_omega(forces, pid.att_pid.m_w_inv)
motor_command.angular_velocities = rpm
pub.publish(motor_command)
att_last = att
rate.sleep()
rospy.spin()
if __name__ == '__main__':
global pos, motor_command, att, pid, pub, ang_vel, waypoint, bag
rospack = rospkg.RosPack()
pos = Point()
motor_command = Actuators()
att = Quaternion()
ang_vel = Vector3()
vel = Vector3()
waypoint = Point()
waypoint.x, waypoint.y, waypoint.z = None, None, None
#initial waypoint is None, so it will only start after receiving the first waypoint
gains_file = rospy.get_param("spiking_pid/gains")
orientation = rospy.get_param("spiking_pid/orientation")
log_filename = rospy.get_param("ros_logger/filename")
spiking_precision = rospy.get_param("spiking_pid/precision")
control_range = rospy.get_param("spiking_pid/control_range")
with open(os.path.join(rospack.get_path('spiking_pid'), gains_file)) as f:
gains = yaml.load(f, Loader=yaml.FullLoader)
M = 0.68
G = 9.8
K_F = 8.54858e-06
L = 0.17
K_D = K_F * 0.016
pid = PositionPID(gains, M,
L,
K_F,
K_D,
with_delay=True,
spiking_precision=spiking_precision,
orientation=orientation,
use_loihi_weights=True)
create_csv_from_adder('error', pid.att_pid.altitude_pid.error_sub)
create_csv_from_adder('int', pid.att_pid.altitude_pid.int_adder)
create_csv_from_adder('control', pid.att_pid.altitude_pid.control_adder)
rospy.init_node('gazebo_position_controller')
status_pub = rospy.Publisher('/simulation_started', Bool, queue_size=1)
sim_started = Bool()
sim_started.data = False
pub = rospy.Publisher('/command/motor_speed', Actuators, queue_size=1)
# Create service handle to unpause Gazebo physics
rospy.wait_for_service("/gazebo/unpause_physics")
unpauser = rospy.ServiceProxy("/gazebo/unpause_physics", Empty)
unpaused = unpauser()
rospy.sleep(3)
# Send zero motor speed command to gazebo
motor_command = Actuators()
motor_command.angular_velocities = [0, 0, 0, 0]
pub.publish(motor_command)
# Create service handle to reset the environment
rospy.wait_for_service("/gazebo/reset_world")
reset = rospy.ServiceProxy("/gazebo/reset_world", Empty)
is_reset = reset()
rospy.sleep(0.5)
rospy.wait_for_service("hummingbird/msf/pose_sensor/initialize_msf_scale")
init_srv = InitScale()
msf_init = rospy.ServiceProxy("hummingbird/msf/pose_sensor/initialize_msf_scale", InitScale)
msf_started = msf_init(1)
rospy.sleep(1)
try:
listener()
except rospy.ROSInterruptException:
pass | en | 0.665415 | #!/usr/bin/env python # t_new = rospy.get_time() # # rospy.loginfo(rospy.get_time() - t) # t = t_new #initial waypoint is None, so it will only start after receiving the first waypoint # Create service handle to unpause Gazebo physics # Send zero motor speed command to gazebo # Create service handle to reset the environment | 1.961856 | 2 |
achilles/blocks.py | movermeyer/django-achilles | 0 | 6618017 | <filename>achilles/blocks.py
from django.conf import settings
from django.template import Context, RequestContext
from django.template.loader import get_template
from django.utils.log import getLogger
import sys
import six
import traceback
from inspect import isclass
from importlib import import_module
from achilles.common import BaseLibrary
from achilles.actions import Library as ActionsLibrary
logger = getLogger(__name__)
class Library(BaseLibrary):
"""
Blocks library holds a register of all defined blocks
Use it to define and register new blocks, grouping them under
a common namespace. See :func:`block`.
:param namespace: Unique namespace for this register
"""
registers = {}
def __init__(self, namespace=None):
BaseLibrary.__init__(self, namespace)
def register(self, name=None):
if name is None:
return BaseLibrary.register(self, name)
elif isclass(name) and issubclass(name, Block):
return self._register(name)
elif callable(name):
res = self._create_class(name)
res.__name__ = getattr(name, '_decorated_function', name).__name__
return self._register(res)
else:
return BaseLibrary.register(self, name)
def block(self, name=None, template_name=None, takes_context=False):
"""
Block register decorator, register a block on the library.
When decorating a function, this method will automatically create a
block. The block will use a dict returned by the function as template
context::
from achilles import blocks
register = blocks.Library('myapp')
@register.block(template_name='foo.html')
def foo():
return {
'template_var1' : 42,
'template_var2' : True,
}
When decorating a Block class it will just register it on the library.
:param name: Name of the block, if None the decorated function name
will be taken
:param template_name: Path of the block template
:param takes_context: If True, the decorated function will receive
the template context as first parameter
"""
if not template_name:
return self.register(name)
def dec(name):
res = self.register(name)
if template_name:
res.template_name = template_name
res.takes_context = takes_context
return res
return dec
def _create_class(self, func):
class B(Block):
def get_context_data(self, *args, **kwargs):
context = super(B, self).get_context_data(*args, **kwargs)
if self.takes_context:
res = func(self.context, *args, **kwargs)
else:
res = func(*args, **kwargs)
if res != context:
context.update(res)
return context
return B
def get(name, context=None):
"""
Retrieve a block with the given name. Example::
blocks.get('myapp:foo')
:param name: Fully namespaced block name
"""
# make sure all blocks are loaded
for app in settings.INSTALLED_APPS:
if six.PY2:
try:
import_module(app + '.blocks')
except ImportError:
tb = sys.exc_info()[2]
stack = traceback.extract_tb(tb, 3)
if len(stack) > 2:
raise
else:
from importlib import find_loader
if find_loader(app + '.blocks'):
import_module(app + '.blocks')
return Library.get_global(name)(context)
class Block(object):
"""
Blocks are parts of the page that can be dinamically rendered. By calling
:func:`update` action you can reload any block asynchronously.
In most cases blocks are automatically created out of functions decorated
with :func:`Library.block`. For advanced uses you may need to sublcass
this.
"""
#: Template file that will be used in :func:`render`
template_name = None
def __init__(self, context):
self.context = context or Context()
def render(self, *args, **kwargs):
"""
Render the block, this method receives block arguments (if any)
and renders HTML result of the block.
"""
t = get_template(self.template_name)
return t.render(self.get_context_data(*args, **kwargs))
def get_context_data(self, *args, **kwargs):
"""
Returns context to be passed to the template renderer in
:func:`render`.
"""
return self.context
def update(self, transport, *args, **kwargs):
"""
Render and send the update of block within the given achilles transport
"""
transport.data('blocks', []).append({
'name': self.register_name,
'args': args,
'kwargs': kwargs,
'data': self.render(*args, **kwargs),
})
register = ActionsLibrary('blocks')
@register.action
def update(transport, name, *args, **kwargs):
"""
Action name: **blocks:update**
Update a block, if the block doesn't exists on the page nothing will
happen.
Blocks may have arguments, this function will pass any argument to
the block handler. When using arguments, only blocks matching them
will be updated.
:param transport: Achilles transport object that is being served
:param name: Fully namespaced block name
"""
context = RequestContext(transport.request, {})
block = get(name, context)
block.update(transport, *args, **kwargs)
def render(transport):
return transport.data('blocks', [])
| <filename>achilles/blocks.py
from django.conf import settings
from django.template import Context, RequestContext
from django.template.loader import get_template
from django.utils.log import getLogger
import sys
import six
import traceback
from inspect import isclass
from importlib import import_module
from achilles.common import BaseLibrary
from achilles.actions import Library as ActionsLibrary
logger = getLogger(__name__)
class Library(BaseLibrary):
"""
Blocks library holds a register of all defined blocks
Use it to define and register new blocks, grouping them under
a common namespace. See :func:`block`.
:param namespace: Unique namespace for this register
"""
registers = {}
def __init__(self, namespace=None):
BaseLibrary.__init__(self, namespace)
def register(self, name=None):
if name is None:
return BaseLibrary.register(self, name)
elif isclass(name) and issubclass(name, Block):
return self._register(name)
elif callable(name):
res = self._create_class(name)
res.__name__ = getattr(name, '_decorated_function', name).__name__
return self._register(res)
else:
return BaseLibrary.register(self, name)
def block(self, name=None, template_name=None, takes_context=False):
"""
Block register decorator, register a block on the library.
When decorating a function, this method will automatically create a
block. The block will use a dict returned by the function as template
context::
from achilles import blocks
register = blocks.Library('myapp')
@register.block(template_name='foo.html')
def foo():
return {
'template_var1' : 42,
'template_var2' : True,
}
When decorating a Block class it will just register it on the library.
:param name: Name of the block, if None the decorated function name
will be taken
:param template_name: Path of the block template
:param takes_context: If True, the decorated function will receive
the template context as first parameter
"""
if not template_name:
return self.register(name)
def dec(name):
res = self.register(name)
if template_name:
res.template_name = template_name
res.takes_context = takes_context
return res
return dec
def _create_class(self, func):
class B(Block):
def get_context_data(self, *args, **kwargs):
context = super(B, self).get_context_data(*args, **kwargs)
if self.takes_context:
res = func(self.context, *args, **kwargs)
else:
res = func(*args, **kwargs)
if res != context:
context.update(res)
return context
return B
def get(name, context=None):
"""
Retrieve a block with the given name. Example::
blocks.get('myapp:foo')
:param name: Fully namespaced block name
"""
# make sure all blocks are loaded
for app in settings.INSTALLED_APPS:
if six.PY2:
try:
import_module(app + '.blocks')
except ImportError:
tb = sys.exc_info()[2]
stack = traceback.extract_tb(tb, 3)
if len(stack) > 2:
raise
else:
from importlib import find_loader
if find_loader(app + '.blocks'):
import_module(app + '.blocks')
return Library.get_global(name)(context)
class Block(object):
"""
Blocks are parts of the page that can be dinamically rendered. By calling
:func:`update` action you can reload any block asynchronously.
In most cases blocks are automatically created out of functions decorated
with :func:`Library.block`. For advanced uses you may need to sublcass
this.
"""
#: Template file that will be used in :func:`render`
template_name = None
def __init__(self, context):
self.context = context or Context()
def render(self, *args, **kwargs):
"""
Render the block, this method receives block arguments (if any)
and renders HTML result of the block.
"""
t = get_template(self.template_name)
return t.render(self.get_context_data(*args, **kwargs))
def get_context_data(self, *args, **kwargs):
"""
Returns context to be passed to the template renderer in
:func:`render`.
"""
return self.context
def update(self, transport, *args, **kwargs):
"""
Render and send the update of block within the given achilles transport
"""
transport.data('blocks', []).append({
'name': self.register_name,
'args': args,
'kwargs': kwargs,
'data': self.render(*args, **kwargs),
})
register = ActionsLibrary('blocks')
@register.action
def update(transport, name, *args, **kwargs):
"""
Action name: **blocks:update**
Update a block, if the block doesn't exists on the page nothing will
happen.
Blocks may have arguments, this function will pass any argument to
the block handler. When using arguments, only blocks matching them
will be updated.
:param transport: Achilles transport object that is being served
:param name: Fully namespaced block name
"""
context = RequestContext(transport.request, {})
block = get(name, context)
block.update(transport, *args, **kwargs)
def render(transport):
return transport.data('blocks', [])
| en | 0.721034 | Blocks library holds a register of all defined blocks Use it to define and register new blocks, grouping them under a common namespace. See :func:`block`. :param namespace: Unique namespace for this register Block register decorator, register a block on the library. When decorating a function, this method will automatically create a block. The block will use a dict returned by the function as template context:: from achilles import blocks register = blocks.Library('myapp') @register.block(template_name='foo.html') def foo(): return { 'template_var1' : 42, 'template_var2' : True, } When decorating a Block class it will just register it on the library. :param name: Name of the block, if None the decorated function name will be taken :param template_name: Path of the block template :param takes_context: If True, the decorated function will receive the template context as first parameter Retrieve a block with the given name. Example:: blocks.get('myapp:foo') :param name: Fully namespaced block name # make sure all blocks are loaded Blocks are parts of the page that can be dinamically rendered. By calling :func:`update` action you can reload any block asynchronously. In most cases blocks are automatically created out of functions decorated with :func:`Library.block`. For advanced uses you may need to sublcass this. #: Template file that will be used in :func:`render` Render the block, this method receives block arguments (if any) and renders HTML result of the block. Returns context to be passed to the template renderer in :func:`render`. Render and send the update of block within the given achilles transport Action name: **blocks:update** Update a block, if the block doesn't exists on the page nothing will happen. Blocks may have arguments, this function will pass any argument to the block handler. When using arguments, only blocks matching them will be updated. :param transport: Achilles transport object that is being served :param name: Fully namespaced block name | 2.477501 | 2 |
app/interface/rmqclient.py | kianby/mail2run | 0 | 6618018 | <reponame>kianby/mail2run
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pika
import json
import logging
import re
import subprocess
from threading import Thread
from conf import config
from util import rabbit
logger = logging.getLogger(__name__)
def get_rabbitmq_connection():
credentials = pika.PlainCredentials(
config.rabbitmq['username'], config.rabbitmq['password'])
parameters = pika.ConnectionParameters(
host=config.rabbitmq['host'],
port=config.rabbitmq['port'],
credentials=credentials,
virtual_host=config.rabbitmq['vhost']
)
return rabbit.Connection(parameters)
def mail(to_email, subject, message):
body = {
'to': to_email,
'subject': subject,
'content': message
}
connector = get_rabbitmq_connection()
connection = connector.open()
channel = connection.channel()
channel.basic_publish(exchange=config.rabbitmq['exchange'],
routing_key='mail.command.send',
body=json.dumps(body, indent=False, sort_keys=False))
connector.close()
logger.info('Email for %s posted' % to_email)
def send_delete_command(content):
connector = get_rabbitmq_connection()
connection = connector.open()
channel = connection.channel()
channel.basic_publish(exchange=config.rabbitmq['exchange'],
routing_key='mail.command.delete',
body=json.dumps(content, indent=False, sort_keys=False))
connector.close()
logger.info('Email accepted. Delete request sent for %s' % content)
class MailConsumer(rabbit.Consumer):
def process(self, channel, method, properties, body):
topic = method.routing_key
data = json.loads(body)
if topic == 'mail.message':
logger.info('new message => {}'.format(data))
for run in config.runs:
if re.search(run['regex'], data['subject']):
try:
r = subprocess.run(
[run['exec']], stdout=subprocess.PIPE)
message = str(r)
except:
logger.exception('cannot execute')
message = 'cannot execute {}'.format(run['exec'])
send_delete_command(data)
mail(data['from'], 'RE: ' + data['subject'], message)
break
else:
logger.info('no match {} for {}'.format(
data['subject'], run))
else:
logger.warn('unsupported message [topic={}]'.format(topic))
def start():
connection = get_rabbitmq_connection()
c = MailConsumer(connection, config.rabbitmq['exchange'], 'mail.message')
c.start()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pika
import json
import logging
import re
import subprocess
from threading import Thread
from conf import config
from util import rabbit
logger = logging.getLogger(__name__)
def get_rabbitmq_connection():
credentials = pika.PlainCredentials(
config.rabbitmq['username'], config.rabbitmq['password'])
parameters = pika.ConnectionParameters(
host=config.rabbitmq['host'],
port=config.rabbitmq['port'],
credentials=credentials,
virtual_host=config.rabbitmq['vhost']
)
return rabbit.Connection(parameters)
def mail(to_email, subject, message):
body = {
'to': to_email,
'subject': subject,
'content': message
}
connector = get_rabbitmq_connection()
connection = connector.open()
channel = connection.channel()
channel.basic_publish(exchange=config.rabbitmq['exchange'],
routing_key='mail.command.send',
body=json.dumps(body, indent=False, sort_keys=False))
connector.close()
logger.info('Email for %s posted' % to_email)
def send_delete_command(content):
connector = get_rabbitmq_connection()
connection = connector.open()
channel = connection.channel()
channel.basic_publish(exchange=config.rabbitmq['exchange'],
routing_key='mail.command.delete',
body=json.dumps(content, indent=False, sort_keys=False))
connector.close()
logger.info('Email accepted. Delete request sent for %s' % content)
class MailConsumer(rabbit.Consumer):
def process(self, channel, method, properties, body):
topic = method.routing_key
data = json.loads(body)
if topic == 'mail.message':
logger.info('new message => {}'.format(data))
for run in config.runs:
if re.search(run['regex'], data['subject']):
try:
r = subprocess.run(
[run['exec']], stdout=subprocess.PIPE)
message = str(r)
except:
logger.exception('cannot execute')
message = 'cannot execute {}'.format(run['exec'])
send_delete_command(data)
mail(data['from'], 'RE: ' + data['subject'], message)
break
else:
logger.info('no match {} for {}'.format(
data['subject'], run))
else:
logger.warn('unsupported message [topic={}]'.format(topic))
def start():
connection = get_rabbitmq_connection()
c = MailConsumer(connection, config.rabbitmq['exchange'], 'mail.message')
c.start() | en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 2.264515 | 2 |
drive_downloader/download_contents.py | sandipandutta21/gdrive_downloader_with_unittest | 0 | 6618019 | <gh_stars>0
import requests
import os
import re
from shutil import disk_usage as du
import mimetypes
import pathlib
# https://drive.google.com/file/d/1IVPq8VODLpLaP_EDhIaz88PflYQqi4dE/view?usp=sharing
# https://drive.google.com/drive/folders/1g-UCIJBVn6FakY6PqJGk?usp=sharing
class Downloader:
def __init__(self, url):
self.output_dir = os.getcwd()
self.url = url
self.d_url = 'https://docs.google.com/uc?export=download' # Link for dowlnloading form gdrive
def get_id_from_url(self):
# for files - https://drive.google.com/file/d/FILE-ID-GOES-HERE/view?usp=sharing
id = re.search('/d/(.*)/view\?usp=sharing', self.url)
if not id:
# for folders - https://drive.google.com/drive/folders/FOLDER-ID-GOES-HERE?usp=sharing
id = re.search('/folders/(.*)\?usp=sharing', self.url)
if id:
return id[1]
else:
return None
def check_system_storage(self, file_size):
if int(file_size) >= du(os.getcwd()).free:
raise RuntimeError('Not enough space available')
def after_download_file_check(self, filename, headers):
downloaded_file_properties = {}
original_file= {}
downloaded_file_properties['fullpath'] = os.path.join(os.getcwd(), filename)
downloaded_file_properties['name'] = filename
downloaded_file_properties['size'] = os.path.getsize(downloaded_file_properties['fullpath'])
downloaded_file_properties['type'] = mimetypes.types_map[pathlib.PurePath(filename).suffix]
original_file['fullpath'] = os.path.join(os.getcwd(), filename)
original_file['name'] = filename
original_file['size'] = int(headers['Content-Length'])
original_file['type'] = headers['Content-Type']
if downloaded_file_properties == original_file:
result = True
else:
raise RuntimeError("Download did not happen properly")
return downloaded_file_properties, result
def download(self):
file_id = self.get_id_from_url()
if file_id:
with requests.Session() as sess:
raw = sess.get(self.d_url, params = { 'id' : file_id }, stream = True, allow_redirects=True)
try:
temp = re.search('filename="(.*)"', raw.headers['Content-Disposition'])
size = raw.headers['Content-Length']
self.check_system_storage(size)
except KeyError:
return ("Wrong Input", "Could not downoad the file", 404)
filename = temp.groups()[0]
chunk = 1024 * 256
with open( os.path.join(self.output_dir, filename), 'wb') as output_file:
for value in raw.iter_content(chunk):
if value:
output_file.write(value)
downloaded_file_properties, result = self.after_download_file_check(filename, raw.headers)
rc = 0
return rc, downloaded_file_properties, raw.status_code
else:
raise RuntimeError('Not able to retrieve File ID')
if __name__ == '__main__':
#file_id = '1IVPq8VODLpLaP_EDhIaz88PflYQqi4dE'
url = "https://drive.google.com/file/d/1IVPq8VODLpLaP_EDhIaz88PflYQqi4dE/view?usp=sharing"
obj = Downloader(url)
sc, filename, rc = obj.download()
print(sc, filename, rc)
| import requests
import os
import re
from shutil import disk_usage as du
import mimetypes
import pathlib
# https://drive.google.com/file/d/1IVPq8VODLpLaP_EDhIaz88PflYQqi4dE/view?usp=sharing
# https://drive.google.com/drive/folders/1g-UCIJBVn6FakY6PqJGk?usp=sharing
class Downloader:
def __init__(self, url):
self.output_dir = os.getcwd()
self.url = url
self.d_url = 'https://docs.google.com/uc?export=download' # Link for dowlnloading form gdrive
def get_id_from_url(self):
# for files - https://drive.google.com/file/d/FILE-ID-GOES-HERE/view?usp=sharing
id = re.search('/d/(.*)/view\?usp=sharing', self.url)
if not id:
# for folders - https://drive.google.com/drive/folders/FOLDER-ID-GOES-HERE?usp=sharing
id = re.search('/folders/(.*)\?usp=sharing', self.url)
if id:
return id[1]
else:
return None
def check_system_storage(self, file_size):
if int(file_size) >= du(os.getcwd()).free:
raise RuntimeError('Not enough space available')
def after_download_file_check(self, filename, headers):
downloaded_file_properties = {}
original_file= {}
downloaded_file_properties['fullpath'] = os.path.join(os.getcwd(), filename)
downloaded_file_properties['name'] = filename
downloaded_file_properties['size'] = os.path.getsize(downloaded_file_properties['fullpath'])
downloaded_file_properties['type'] = mimetypes.types_map[pathlib.PurePath(filename).suffix]
original_file['fullpath'] = os.path.join(os.getcwd(), filename)
original_file['name'] = filename
original_file['size'] = int(headers['Content-Length'])
original_file['type'] = headers['Content-Type']
if downloaded_file_properties == original_file:
result = True
else:
raise RuntimeError("Download did not happen properly")
return downloaded_file_properties, result
def download(self):
file_id = self.get_id_from_url()
if file_id:
with requests.Session() as sess:
raw = sess.get(self.d_url, params = { 'id' : file_id }, stream = True, allow_redirects=True)
try:
temp = re.search('filename="(.*)"', raw.headers['Content-Disposition'])
size = raw.headers['Content-Length']
self.check_system_storage(size)
except KeyError:
return ("Wrong Input", "Could not downoad the file", 404)
filename = temp.groups()[0]
chunk = 1024 * 256
with open( os.path.join(self.output_dir, filename), 'wb') as output_file:
for value in raw.iter_content(chunk):
if value:
output_file.write(value)
downloaded_file_properties, result = self.after_download_file_check(filename, raw.headers)
rc = 0
return rc, downloaded_file_properties, raw.status_code
else:
raise RuntimeError('Not able to retrieve File ID')
if __name__ == '__main__':
#file_id = '1IVPq8VODLpLaP_EDhIaz88PflYQqi4dE'
url = "https://drive.google.com/file/d/1IVPq8VODLpLaP_EDhIaz88PflYQqi4dE/view?usp=sharing"
obj = Downloader(url)
sc, filename, rc = obj.download()
print(sc, filename, rc) | en | 0.413245 | # https://drive.google.com/file/d/1IVPq8VODLpLaP_EDhIaz88PflYQqi4dE/view?usp=sharing # https://drive.google.com/drive/folders/1g-UCIJBVn6FakY6PqJGk?usp=sharing # Link for dowlnloading form gdrive # for files - https://drive.google.com/file/d/FILE-ID-GOES-HERE/view?usp=sharing # for folders - https://drive.google.com/drive/folders/FOLDER-ID-GOES-HERE?usp=sharing #file_id = '1IVPq8VODLpLaP_EDhIaz88PflYQqi4dE' | 2.56844 | 3 |
01_factorial.py | mihricankizilyer/Coderbyte-Solutions | 0 | 6618020 | <gh_stars>0
# QUESTION
"""
FIRST FACTORIAL
Have the function FirstFactorial(num) take the num parameter being passed
and return the factorial of it. For example: if num = 4, then your program
should return (4 * 3 * 2 * 1) = 24. For the test cases, the range will be
between 1 and 18 and the input will always be an integer.
Use the Parameter Testing feature in the box below to test your code with
different arguments.
"""
# SOLUTION
def factorial (num):
value = 1
for i in range(num):
value = value * (i + 1)
return value
print(factorial(int(input())))
| # QUESTION
"""
FIRST FACTORIAL
Have the function FirstFactorial(num) take the num parameter being passed
and return the factorial of it. For example: if num = 4, then your program
should return (4 * 3 * 2 * 1) = 24. For the test cases, the range will be
between 1 and 18 and the input will always be an integer.
Use the Parameter Testing feature in the box below to test your code with
different arguments.
"""
# SOLUTION
def factorial (num):
value = 1
for i in range(num):
value = value * (i + 1)
return value
print(factorial(int(input()))) | en | 0.687677 | # QUESTION FIRST FACTORIAL Have the function FirstFactorial(num) take the num parameter being passed and return the factorial of it. For example: if num = 4, then your program should return (4 * 3 * 2 * 1) = 24. For the test cases, the range will be between 1 and 18 and the input will always be an integer. Use the Parameter Testing feature in the box below to test your code with different arguments. # SOLUTION | 4.280586 | 4 |
pype/hosts/hiero/plugins/_publish/collect_metadata.py | simonebarbieri/pype | 0 | 6618021 | from pyblish import api
class CollectClipMetadata(api.InstancePlugin):
"""Collect Metadata from selected track items."""
order = api.CollectorOrder + 0.01
label = "Collect Metadata"
hosts = ["hiero"]
def process(self, instance):
item = instance.data["item"]
ti_metadata = self.metadata_to_string(dict(item.metadata()))
ms_metadata = self.metadata_to_string(
dict(item.source().mediaSource().metadata()))
instance.data["clipMetadata"] = ti_metadata
instance.data["mediaSourceMetadata"] = ms_metadata
self.log.info(instance.data["clipMetadata"])
self.log.info(instance.data["mediaSourceMetadata"])
return
def metadata_to_string(self, metadata):
data = dict()
for k, v in metadata.items():
if v not in ["-", ""]:
data[str(k)] = v
return data
| from pyblish import api
class CollectClipMetadata(api.InstancePlugin):
"""Collect Metadata from selected track items."""
order = api.CollectorOrder + 0.01
label = "Collect Metadata"
hosts = ["hiero"]
def process(self, instance):
item = instance.data["item"]
ti_metadata = self.metadata_to_string(dict(item.metadata()))
ms_metadata = self.metadata_to_string(
dict(item.source().mediaSource().metadata()))
instance.data["clipMetadata"] = ti_metadata
instance.data["mediaSourceMetadata"] = ms_metadata
self.log.info(instance.data["clipMetadata"])
self.log.info(instance.data["mediaSourceMetadata"])
return
def metadata_to_string(self, metadata):
data = dict()
for k, v in metadata.items():
if v not in ["-", ""]:
data[str(k)] = v
return data
| en | 0.82208 | Collect Metadata from selected track items. | 2.628733 | 3 |
sasl/prelude.py | thisismedium/python-sasl | 2 | 6618022 | ## Copyright (c) 2010, Coptix, Inc. All rights reserved.
## See the LICENSE file for license terms and warranty disclaimer.
"""prelude -- extra builtins"""
from __future__ import absolute_import
import os, logging
__all__ = ('log', )
log = logging.getLogger(os.path.basename(os.path.dirname(__file__)))
log.addHandler(logging.StreamHandler())
| ## Copyright (c) 2010, Coptix, Inc. All rights reserved.
## See the LICENSE file for license terms and warranty disclaimer.
"""prelude -- extra builtins"""
from __future__ import absolute_import
import os, logging
__all__ = ('log', )
log = logging.getLogger(os.path.basename(os.path.dirname(__file__)))
log.addHandler(logging.StreamHandler())
| en | 0.739539 | ## Copyright (c) 2010, Coptix, Inc. All rights reserved. ## See the LICENSE file for license terms and warranty disclaimer. prelude -- extra builtins | 1.535281 | 2 |
beginner_contest/166/B.py | FGtatsuro/myatcoder | 0 | 6618023 | import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
n, k = map(int, input().split())
has = set()
for _ in range(k):
input()
has.update(map(int, input().split()))
no_has = set(range(1, n+1)) - has
print(len(no_has))
| import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
n, k = map(int, input().split())
has = set()
for _ in range(k):
input()
has.update(map(int, input().split()))
no_has = set(range(1, n+1)) - has
print(len(no_has))
| none | 1 | 2.747923 | 3 | |
example/run_shell.py | nickvasko/DLTrainer | 0 | 6618024 | from src.DLTrainer.pytorch import DLTrainer
from src.DLTrainer.pytorch.trainer_utils import BaseTrainerArgs
from example.model import SimpleConfig, SimpleModel
from example.dataset import SimpleDataset
from example.metrics import calculate_metrics
"""MODELS Dictionary
The models dictionary contains the model classes to be used during training.
The base format is designed for NLP tasks, however, can be used for non-NLP
tasks by excluding the TokenizerClass
The following format should be used:
'nlp_model_name': (ModelConfigClass, ModelClass, DatasetClass, TokenizerClass)
'non_nlp_model_name': ((ModelConfigClass, ModelClass, DatasetClass)
If
"""
MODELS = {
'simple': (SimpleConfig, SimpleModel, SimpleDataset),
}
class TrainerArgs(BaseTrainerArgs):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.input_size=2
self.hidden_size=10
self.output_size=1
args = TrainerArgs(model='simple', do_train=True, logging_steps=1)
trainer = DLTrainer(MODELS, calculate_metrics, args=args)
| from src.DLTrainer.pytorch import DLTrainer
from src.DLTrainer.pytorch.trainer_utils import BaseTrainerArgs
from example.model import SimpleConfig, SimpleModel
from example.dataset import SimpleDataset
from example.metrics import calculate_metrics
"""MODELS Dictionary
The models dictionary contains the model classes to be used during training.
The base format is designed for NLP tasks, however, can be used for non-NLP
tasks by excluding the TokenizerClass
The following format should be used:
'nlp_model_name': (ModelConfigClass, ModelClass, DatasetClass, TokenizerClass)
'non_nlp_model_name': ((ModelConfigClass, ModelClass, DatasetClass)
If
"""
MODELS = {
'simple': (SimpleConfig, SimpleModel, SimpleDataset),
}
class TrainerArgs(BaseTrainerArgs):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.input_size=2
self.hidden_size=10
self.output_size=1
args = TrainerArgs(model='simple', do_train=True, logging_steps=1)
trainer = DLTrainer(MODELS, calculate_metrics, args=args)
| en | 0.804411 | MODELS Dictionary The models dictionary contains the model classes to be used during training. The base format is designed for NLP tasks, however, can be used for non-NLP tasks by excluding the TokenizerClass The following format should be used: 'nlp_model_name': (ModelConfigClass, ModelClass, DatasetClass, TokenizerClass) 'non_nlp_model_name': ((ModelConfigClass, ModelClass, DatasetClass) If | 3.058472 | 3 |
altair/evaluation.py | lyeeer/altair | 43 | 6618025 | import json
import tqdm
import numpy
from multiprocessing import Pool, Queue
from sklearn.metrics.pairwise import cosine_similarity
from scipy.sparse import issparse
from altair.vectorize01.vectorizers.BowAllVectorizer import BowAllVectorizer
from altair.vectorize01.vectorizers.BowImportVectorizer import BowImportVectorizer
from altair.vectorize01.vectorizers.Doc2VecVectorizer import Doc2VecVectorizer
from altair.vectorize01.vectorizers.LDAVectorizer import LDAVectorizer
from altair.vectorize01.vectorizers.TFIDFVectorizer import TFIDFVectorizer
from altair.util.separate_code_and_comments import separate_code_and_comments
features = None
raw = None
q = Queue()
def q_init(q):
score_performance.q = q
def score_performance(t):
current_idx, v = t
# sklearn throws deprecation warnings for 1d arrays so need to reshape v
pair_sims = cosine_similarity(v.reshape(1, -1), features)
# TODO: Set a minimum cosine similarity score for candidates?
top_candidates = pair_sims[0].argsort()[-top_n-1:][::-1]
comp_id = raw[current_idx]["CompetitionId"]
candidate_ids = [raw[candidate_idx]["CompetitionId"] for candidate_idx in top_candidates if candidate_idx!=current_idx][:top_n]
scoring = [candidate_id == comp_id for candidate_id in candidate_ids]
top_1_score = int(scoring[0])
top_n_any_score = int(any(scoring))
top_n_all_score = int(all(scoring))
score_performance.q.put((top_1_score, top_n_any_score, top_n_all_score))
def main(data_path, num_cores, top_n_param, vectorizer):
global raw
global features
global q
# Patch for error thrown by score_performance on declaration of top_n
global top_n
top_n = top_n_param
raw = read_data(data_path)
"""
# Remove items where competition IDs are in:
# PyCon2015 Tutorial (#4353)
# Word2Vec NLP Tutorial (#3971)
filter_comp_ids = ["4353", "3971"]
idxs_to_remove = set()
for idx, r in enumerate(raw):
if r["CompetitionId"] in filter_comp_ids:
idxs_to_remove.add(idx)
raw = [r for idx, r in enumerate(raw) if idx not in idxs_to_remove]
"""
"""
# Take a random sample from raw.
import random
raw = random.sample(raw, 2000)
"""
# Strip out comments and add to scripts if it has code; otherwise remove it from raw list
scripts = list()
for index,script in list(enumerate(raw)):
code, _ = separate_code_and_comments(script["ScriptContent"],script["ScriptTitle"])
if len(code)>0:
scripts.append(code)
else:
raw.pop(index)
#scripts = [script["ScriptContent"] for script in raw]
# Choose vectorizer
print("Vectorizing documents...")
#vectorizer.vectorizer.fit(scripts)
features = vectorizer.vectorize_multi(scripts)
features_dense = features.todense() if issparse(features) else features
p = Pool(num_cores, q_init, [q])
print("Calculating pairwise similarities + scores...")
for _ in tqdm.tqdm(p.imap_unordered(score_performance, list(enumerate(features_dense))), total=len(features_dense)):
pass
score_top_1 = 0
score_top_n_any = 0
score_top_n_all = 0
while not q.empty():
top_1, top_n_any, top_n_all = q.get()
score_top_1 += top_1
score_top_n_any += top_n_any
score_top_n_all += top_n_all
top_1_accuracy = score_top_1 / float(len(raw))
top_n_any_accuracy = score_top_n_any / float(len(raw))
top_n_all_accuracy = score_top_n_all / float(len(raw))
print("Top 1: %s" % top_1_accuracy)
print("Top N (Any): %s" % top_n_any_accuracy)
print("Top N (All): %s" % top_n_all_accuracy)
print("(N = %s)" % top_n)
return {"top_1_accuracy":top_1_accuracy, "top_n_any_accuracy":top_n_any_accuracy, "top_n_all_accuracy":top_n_all_accuracy, "top_n":top_n}
def read_data(data_path):
raw = []
print("Reading data from: %s" % data_path)
with open(data_path, "r") as f:
for line in f:
raw.append(json.loads(line))
return raw
def parse_kwargs(kwargs_str):
kv_pairs = kwargs_str.split(";")
kwargs = {}
for kv_pair in kv_pairs:
k, v = kv_pair.split("=")
kwargs[k] = v
return kwargs
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Calculate evaluation metrics (Top 1, Top N Any, Top N All).')
# Required args
parser.add_argument("data_path",
type=str,
help="Location of metakaggle JSON file.")
# Optional args
parser.add_argument("--num_cores",
type=int,
default=1,
help="Number cores (for parallelism).")
parser.add_argument("--top_n",
type=int,
default=3,
help="N for calculating Top N (Any) and Top N (All).")
subparsers = parser.add_subparsers(help="Subparsers per vectorizer type.")
###
bow_all = subparsers.add_parser("bow_all",
help="Bag of words vectorizer (entire script).")
bow_all.add_argument("pkl_vocab",
type=str,
help="Path to vocabulary pickle file. Generated offline by build_bow_script_vocabulary.py.")
bow_all.add_argument("--vectorizer_kwargs",
type=str,
help="Keyword arguments (see CountVectorizer docs for full list). Format: key1=val;key2=val2.")
bow_all.set_defaults(vectorizer_cls = BowAllVectorizer)
###
bow_import = subparsers.add_parser("bow_import",
help="Bag of words vectorizer (libraries only).")
bow_import.add_argument("pkl_libraries",
type=str,
help="Path to libraries pickle file. Generated offline by build_imported_libraries_vocabulary.py.")
bow_import.add_argument("--vectorizer_kwargs",
type=str,
help="Keyword arguments (see CountVectorizer docs for full list). Format: key1=val;key2=val2.")
bow_import.set_defaults(vectorizer_cls = BowImportVectorizer)
###
doc2vec = subparsers.add_parser("doc2vec",
help="Doc2Vec vectorizer (Le, Mikolov 2014) in gensim.")
doc2vec.add_argument("pkl_d2v_model",
type=str,
help="Path to pickled Doc2Vec model. Generated offline.")
doc2vec.add_argument("--normalizer_kwargs",
type=str,
help="Keyword arguments (see normalize_text() in utils/ for full list). Format: key1=val;key2=val2.")
doc2vec.add_argument("--infer_kwargs",
type=str,
help="Keyword arguments (see Doc2Vec.infer_vector() docs for full list). Format: key1=val;key2=val2.")
doc2vec.set_defaults(vectorizer_cls = Doc2VecVectorizer)
###
lda = subparsers.add_parser("lda",
help="Latent Dirichlet Allocation vectorizer (Blei, Ng, Jordan 2003) in scikit-learn.")
lda.add_argument("pkl_vocab",
type=str,
help="Path to vocabulary pickle file. Generated offline by build_bow_script_vocabulary.py.")
lda.add_argument("pkl_lda_model",
type=str,
help="Path to pickled LDA model. Generated offline.")
lda.add_argument("--vectorizer_kwargs",
type=str,
help="Keyword arguments (see CountVectorizer docs for full list). Format: key1=val;key2=val2.")
lda.set_defaults(vectorizer_cls = LDAVectorizer)
###
tfidf = subparsers.add_parser("tfidf",
help="TF-IDF (Term Frequency, Inverse Document Frequency) vectorizer.")
tfidf.add_argument("pkl_vocab",
type=str,
help="Path to vocabulary pickle file. Generated offline by build_bow_script_vocabulary.py.")
tfidf.add_argument("--vectorizer_kwargs",
type=str,
help="Keyword arguments (see CountVectorizer docs for full list). Format: key1=val;key2=val2.")
tfidf.add_argument("--transformer_kwargs",
type=str,
help="Keyword arguments (see TfidfTransformer docs for full list). Format: key1=val;key2=val2.")
tfidf.set_defaults(vectorizer_cls = TFIDFVectorizer)
args = parser.parse_args()
args = args.__dict__
data_path = args.pop("data_path")
num_cores = args.pop("num_cores")
top_n = args.pop("top_n")
for argname, val in args.items():
if "kwargs" in argname and val is not None:
args[argname] = parse_kwargs(val)
vectorizer_cls = args.pop("vectorizer_cls")
vectorizer = vectorizer_cls(**args)
main(data_path, num_cores, top_n, vectorizer)
| import json
import tqdm
import numpy
from multiprocessing import Pool, Queue
from sklearn.metrics.pairwise import cosine_similarity
from scipy.sparse import issparse
from altair.vectorize01.vectorizers.BowAllVectorizer import BowAllVectorizer
from altair.vectorize01.vectorizers.BowImportVectorizer import BowImportVectorizer
from altair.vectorize01.vectorizers.Doc2VecVectorizer import Doc2VecVectorizer
from altair.vectorize01.vectorizers.LDAVectorizer import LDAVectorizer
from altair.vectorize01.vectorizers.TFIDFVectorizer import TFIDFVectorizer
from altair.util.separate_code_and_comments import separate_code_and_comments
features = None
raw = None
q = Queue()
def q_init(q):
score_performance.q = q
def score_performance(t):
current_idx, v = t
# sklearn throws deprecation warnings for 1d arrays so need to reshape v
pair_sims = cosine_similarity(v.reshape(1, -1), features)
# TODO: Set a minimum cosine similarity score for candidates?
top_candidates = pair_sims[0].argsort()[-top_n-1:][::-1]
comp_id = raw[current_idx]["CompetitionId"]
candidate_ids = [raw[candidate_idx]["CompetitionId"] for candidate_idx in top_candidates if candidate_idx!=current_idx][:top_n]
scoring = [candidate_id == comp_id for candidate_id in candidate_ids]
top_1_score = int(scoring[0])
top_n_any_score = int(any(scoring))
top_n_all_score = int(all(scoring))
score_performance.q.put((top_1_score, top_n_any_score, top_n_all_score))
def main(data_path, num_cores, top_n_param, vectorizer):
global raw
global features
global q
# Patch for error thrown by score_performance on declaration of top_n
global top_n
top_n = top_n_param
raw = read_data(data_path)
"""
# Remove items where competition IDs are in:
# PyCon2015 Tutorial (#4353)
# Word2Vec NLP Tutorial (#3971)
filter_comp_ids = ["4353", "3971"]
idxs_to_remove = set()
for idx, r in enumerate(raw):
if r["CompetitionId"] in filter_comp_ids:
idxs_to_remove.add(idx)
raw = [r for idx, r in enumerate(raw) if idx not in idxs_to_remove]
"""
"""
# Take a random sample from raw.
import random
raw = random.sample(raw, 2000)
"""
# Strip out comments and add to scripts if it has code; otherwise remove it from raw list
scripts = list()
for index,script in list(enumerate(raw)):
code, _ = separate_code_and_comments(script["ScriptContent"],script["ScriptTitle"])
if len(code)>0:
scripts.append(code)
else:
raw.pop(index)
#scripts = [script["ScriptContent"] for script in raw]
# Choose vectorizer
print("Vectorizing documents...")
#vectorizer.vectorizer.fit(scripts)
features = vectorizer.vectorize_multi(scripts)
features_dense = features.todense() if issparse(features) else features
p = Pool(num_cores, q_init, [q])
print("Calculating pairwise similarities + scores...")
for _ in tqdm.tqdm(p.imap_unordered(score_performance, list(enumerate(features_dense))), total=len(features_dense)):
pass
score_top_1 = 0
score_top_n_any = 0
score_top_n_all = 0
while not q.empty():
top_1, top_n_any, top_n_all = q.get()
score_top_1 += top_1
score_top_n_any += top_n_any
score_top_n_all += top_n_all
top_1_accuracy = score_top_1 / float(len(raw))
top_n_any_accuracy = score_top_n_any / float(len(raw))
top_n_all_accuracy = score_top_n_all / float(len(raw))
print("Top 1: %s" % top_1_accuracy)
print("Top N (Any): %s" % top_n_any_accuracy)
print("Top N (All): %s" % top_n_all_accuracy)
print("(N = %s)" % top_n)
return {"top_1_accuracy":top_1_accuracy, "top_n_any_accuracy":top_n_any_accuracy, "top_n_all_accuracy":top_n_all_accuracy, "top_n":top_n}
def read_data(data_path):
raw = []
print("Reading data from: %s" % data_path)
with open(data_path, "r") as f:
for line in f:
raw.append(json.loads(line))
return raw
def parse_kwargs(kwargs_str):
kv_pairs = kwargs_str.split(";")
kwargs = {}
for kv_pair in kv_pairs:
k, v = kv_pair.split("=")
kwargs[k] = v
return kwargs
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Calculate evaluation metrics (Top 1, Top N Any, Top N All).')
# Required args
parser.add_argument("data_path",
type=str,
help="Location of metakaggle JSON file.")
# Optional args
parser.add_argument("--num_cores",
type=int,
default=1,
help="Number cores (for parallelism).")
parser.add_argument("--top_n",
type=int,
default=3,
help="N for calculating Top N (Any) and Top N (All).")
subparsers = parser.add_subparsers(help="Subparsers per vectorizer type.")
###
bow_all = subparsers.add_parser("bow_all",
help="Bag of words vectorizer (entire script).")
bow_all.add_argument("pkl_vocab",
type=str,
help="Path to vocabulary pickle file. Generated offline by build_bow_script_vocabulary.py.")
bow_all.add_argument("--vectorizer_kwargs",
type=str,
help="Keyword arguments (see CountVectorizer docs for full list). Format: key1=val;key2=val2.")
bow_all.set_defaults(vectorizer_cls = BowAllVectorizer)
###
bow_import = subparsers.add_parser("bow_import",
help="Bag of words vectorizer (libraries only).")
bow_import.add_argument("pkl_libraries",
type=str,
help="Path to libraries pickle file. Generated offline by build_imported_libraries_vocabulary.py.")
bow_import.add_argument("--vectorizer_kwargs",
type=str,
help="Keyword arguments (see CountVectorizer docs for full list). Format: key1=val;key2=val2.")
bow_import.set_defaults(vectorizer_cls = BowImportVectorizer)
###
doc2vec = subparsers.add_parser("doc2vec",
help="Doc2Vec vectorizer (Le, Mikolov 2014) in gensim.")
doc2vec.add_argument("pkl_d2v_model",
type=str,
help="Path to pickled Doc2Vec model. Generated offline.")
doc2vec.add_argument("--normalizer_kwargs",
type=str,
help="Keyword arguments (see normalize_text() in utils/ for full list). Format: key1=val;key2=val2.")
doc2vec.add_argument("--infer_kwargs",
type=str,
help="Keyword arguments (see Doc2Vec.infer_vector() docs for full list). Format: key1=val;key2=val2.")
doc2vec.set_defaults(vectorizer_cls = Doc2VecVectorizer)
###
lda = subparsers.add_parser("lda",
help="Latent Dirichlet Allocation vectorizer (Blei, Ng, Jordan 2003) in scikit-learn.")
lda.add_argument("pkl_vocab",
type=str,
help="Path to vocabulary pickle file. Generated offline by build_bow_script_vocabulary.py.")
lda.add_argument("pkl_lda_model",
type=str,
help="Path to pickled LDA model. Generated offline.")
lda.add_argument("--vectorizer_kwargs",
type=str,
help="Keyword arguments (see CountVectorizer docs for full list). Format: key1=val;key2=val2.")
lda.set_defaults(vectorizer_cls = LDAVectorizer)
###
tfidf = subparsers.add_parser("tfidf",
help="TF-IDF (Term Frequency, Inverse Document Frequency) vectorizer.")
tfidf.add_argument("pkl_vocab",
type=str,
help="Path to vocabulary pickle file. Generated offline by build_bow_script_vocabulary.py.")
tfidf.add_argument("--vectorizer_kwargs",
type=str,
help="Keyword arguments (see CountVectorizer docs for full list). Format: key1=val;key2=val2.")
tfidf.add_argument("--transformer_kwargs",
type=str,
help="Keyword arguments (see TfidfTransformer docs for full list). Format: key1=val;key2=val2.")
tfidf.set_defaults(vectorizer_cls = TFIDFVectorizer)
args = parser.parse_args()
args = args.__dict__
data_path = args.pop("data_path")
num_cores = args.pop("num_cores")
top_n = args.pop("top_n")
for argname, val in args.items():
if "kwargs" in argname and val is not None:
args[argname] = parse_kwargs(val)
vectorizer_cls = args.pop("vectorizer_cls")
vectorizer = vectorizer_cls(**args)
main(data_path, num_cores, top_n, vectorizer)
| en | 0.611887 | # sklearn throws deprecation warnings for 1d arrays so need to reshape v # TODO: Set a minimum cosine similarity score for candidates? # Patch for error thrown by score_performance on declaration of top_n # Remove items where competition IDs are in: # PyCon2015 Tutorial (#4353) # Word2Vec NLP Tutorial (#3971) filter_comp_ids = ["4353", "3971"] idxs_to_remove = set() for idx, r in enumerate(raw): if r["CompetitionId"] in filter_comp_ids: idxs_to_remove.add(idx) raw = [r for idx, r in enumerate(raw) if idx not in idxs_to_remove] # Take a random sample from raw. import random raw = random.sample(raw, 2000) # Strip out comments and add to scripts if it has code; otherwise remove it from raw list #scripts = [script["ScriptContent"] for script in raw] # Choose vectorizer #vectorizer.vectorizer.fit(scripts) # Required args # Optional args ### ### ### ### ### | 2.202153 | 2 |
tests/parser/dictionary/encoder/other/test_ipi_base.py | orenyodfat/CWR-DataApi | 37 | 6618026 | <gh_stars>10-100
# -*- coding: utf-8 -*-
import unittest
from cwr.parser.encoder.dictionary import IPIBaseDictionaryEncoder
"""
Acknowledgement to dictionary encoding tests.
The following cases are tested:
"""
__author__ = '<NAME>'
__license__ = 'MIT'
__status__ = 'Development'
class TestIPIBaseEncoding(unittest.TestCase):
def setUp(self):
self._encoder = IPIBaseDictionaryEncoder()
def test_encoded(self):
encoded = self._encoder.encode('T-123456789-1')
self.assertEqual('T-123456789-1', encoded)
| # -*- coding: utf-8 -*-
import unittest
from cwr.parser.encoder.dictionary import IPIBaseDictionaryEncoder
"""
Acknowledgement to dictionary encoding tests.
The following cases are tested:
"""
__author__ = '<NAME>'
__license__ = 'MIT'
__status__ = 'Development'
class TestIPIBaseEncoding(unittest.TestCase):
def setUp(self):
self._encoder = IPIBaseDictionaryEncoder()
def test_encoded(self):
encoded = self._encoder.encode('T-123456789-1')
self.assertEqual('T-123456789-1', encoded) | en | 0.868212 | # -*- coding: utf-8 -*- Acknowledgement to dictionary encoding tests. The following cases are tested: | 2.633585 | 3 |
labequipment/picoscope.py | Pollynm/labequipment | 0 | 6618027 | <filename>labequipment/picoscope.py
from picoscope import ps2000
import numpy as np
import time
class Picoscope:
def __init__(self):
self.scope = ps2000.PS2000()
waveform_desired_duration = 200E-3
obs_duration = 3 * waveform_desired_duration
sampling_interval = obs_duration / 4096
(self.actualSamplingInterval, self.nSamples, maxSamples) = \
self.scope.setSamplingInterval(sampling_interval, obs_duration)
print('actual sampling interval = ', self.actualSamplingInterval)
print('nsamples = ', self.nSamples)
self.scope.setChannel('A', 'AC', 2.0, 0.0, enabled=True,
BWLimited=False)
self.scope.setSimpleTrigger('A', 0, 'Falling', timeout_ms=100,
enabled=True)
self.scope.setChannel('B', 'AC', 2.0, 0.0, enabled=True, BWLimited=False)
self.scope.setSimpleTrigger('B', 0, 'Falling', timeout_ms=100,
enabled=True)
def get_V(self, refine_range=False, channel='A'):
s = time.time()
if refine_range:
channelRange = self.scope.setChannel(channel, 'AC', 2.0, 0.0,
enabled=True, BWLimited=False)
self.scope.runBlock()
self.scope.waitReady()
data = self.scope.getDataV(channel, self.nSamples,
returnOverflow=False)
vrange = np.max(data) * 1.5
channelRange = self.scope.setChannel(channel, 'AC', vrange, 0.0,
enabled=True, BWLimited=False)
self.scope.runBlock()
self.scope.waitReady()
data = self.scope.getDataV(channel, self.nSamples, returnOverflow=False)
times = np.arange(self.nSamples) * self.actualSamplingInterval
return times, data, time.time() - s
| <filename>labequipment/picoscope.py
from picoscope import ps2000
import numpy as np
import time
class Picoscope:
def __init__(self):
self.scope = ps2000.PS2000()
waveform_desired_duration = 200E-3
obs_duration = 3 * waveform_desired_duration
sampling_interval = obs_duration / 4096
(self.actualSamplingInterval, self.nSamples, maxSamples) = \
self.scope.setSamplingInterval(sampling_interval, obs_duration)
print('actual sampling interval = ', self.actualSamplingInterval)
print('nsamples = ', self.nSamples)
self.scope.setChannel('A', 'AC', 2.0, 0.0, enabled=True,
BWLimited=False)
self.scope.setSimpleTrigger('A', 0, 'Falling', timeout_ms=100,
enabled=True)
self.scope.setChannel('B', 'AC', 2.0, 0.0, enabled=True, BWLimited=False)
self.scope.setSimpleTrigger('B', 0, 'Falling', timeout_ms=100,
enabled=True)
def get_V(self, refine_range=False, channel='A'):
s = time.time()
if refine_range:
channelRange = self.scope.setChannel(channel, 'AC', 2.0, 0.0,
enabled=True, BWLimited=False)
self.scope.runBlock()
self.scope.waitReady()
data = self.scope.getDataV(channel, self.nSamples,
returnOverflow=False)
vrange = np.max(data) * 1.5
channelRange = self.scope.setChannel(channel, 'AC', vrange, 0.0,
enabled=True, BWLimited=False)
self.scope.runBlock()
self.scope.waitReady()
data = self.scope.getDataV(channel, self.nSamples, returnOverflow=False)
times = np.arange(self.nSamples) * self.actualSamplingInterval
return times, data, time.time() - s
| none | 1 | 2.581073 | 3 | |
server/server.py | bsai-krishna/Remote-Plant-Watering-system | 0 | 6618028 | <reponame>bsai-krishna/Remote-Plant-Watering-system
#!/usr/bin/env python
#
# Based off example code found in the Tornado package, and code from
# the tinaja labs xbee package.
import datetime, json, logging, os, serial, sys, syslog, time, uuid
import tornado.escape
import tornado.ioloop
import tornado.options
from tornado.options import define, options
import tornado.web
import tornado.websocket
from xbee import xbee
define("port", default=8888, help="run on the given port", type=int)
def log_data_file(plant_num):
return "sensor-data/" + plant_num + ".log"
def instructions_data_file(plant_num):
return "instructions-data/" + plant_num + ".log"
def translate_instruction(instruction):
translate = json.loads(instruction)
if 'manual_percent_moisture' in translate:
translate = "M" + chr(int(translate['manual_percent_moisture'] + "0")) +\
chr(0x54)
else:
translate = "A" +\
chr(int(translate['auto_percent_moisture_low'] + "0")) +\
chr(int(translate['auto_percent_moisture_high'] + "0"))
return translate + '\n'
def touch(fname, times=None):
# from stackoverflow question 1158076
with file(fname, 'a'):
os.utime(fname, times)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/plant/(.*)", WaterDataSocketHandler),
(r"/sensorupdated/(.*)/(.*)", SensorUpdatedHandler),
(r"/tomatoes", TomatoesHandler),
(r"/", SplashHandler),
]
settings = dict(
cookie_secret="it'sarandomcookiesecrethopefullynooneguessesit!",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
autoescape=None,
)
tornado.web.Application.__init__(self, handlers, **settings)
class SplashHandler(tornado.web.RequestHandler):
def get(self):
self.render("splash.html", messages=[])
class TomatoesHandler(tornado.web.RequestHandler):
def get(self):
self.render("tomatoes.html", messages=[])
class SensorUpdatedHandler(tornado.web.RequestHandler):
def get(self, plant_num, value):
WaterDataSocketHandler.send_latest_data(plant_num, value)
class WaterDataSocketHandler(tornado.websocket.WebSocketHandler):
clients = {}
def allow_draft76(self):
# for iOS 5.0 Safari
return True
def open(self, plant_num):
plant_num = plant_num.strip('?plant=_')
WaterDataSocketHandler.clients[plant_num] = self
self.plant_num = plant_num
logging.info("got client for plant " + plant_num)
WaterDataSocketHandler.send_all_data(plant_num)
def on_close(self):
del WaterDataSocketHandler.clients[self.plant_num]
@classmethod
def send_all_data(cls, plant_num):
data = 'hi shiry'
try:
data_file = open(log_data_file(plant_num), 'r')
data = []
for line in data_file:
try:
timestamp, reading = line.strip().split()
except ValueError, e:
continue
data.append({timestamp: reading})
except IOError:
pass
logging.info("sent data")
try:
cls.clients[plant_num].write_message(tornado.escape.json_encode(data))
except:
logging.error("Error sending message", exc_info=True)
@classmethod
def send_latest_data(cls, plant_num, sensor_reading):
if not plant_num in cls.clients:
return
try:
data = [{str(time.time()): str(sensor_reading)}]
cls.clients[plant_num].write_message(tornado.escape.json_encode(data))
except:
logging.error("Error sending message", exc_info=True)
def on_message(self, instruction):
logging.info("got message %r", instruction)
touch(instructions_data_file(self.plant_num))
instructions_file = open(instructions_data_file(self.plant_num), 'w')
instructions_file.write(translate_instruction(instruction))
instructions_file.close()
def main():
tornado.options.parse_command_line()
app = Application()
app.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| #!/usr/bin/env python
#
# Based off example code found in the Tornado package, and code from
# the tinaja labs xbee package.
import datetime, json, logging, os, serial, sys, syslog, time, uuid
import tornado.escape
import tornado.ioloop
import tornado.options
from tornado.options import define, options
import tornado.web
import tornado.websocket
from xbee import xbee
define("port", default=8888, help="run on the given port", type=int)
def log_data_file(plant_num):
return "sensor-data/" + plant_num + ".log"
def instructions_data_file(plant_num):
return "instructions-data/" + plant_num + ".log"
def translate_instruction(instruction):
translate = json.loads(instruction)
if 'manual_percent_moisture' in translate:
translate = "M" + chr(int(translate['manual_percent_moisture'] + "0")) +\
chr(0x54)
else:
translate = "A" +\
chr(int(translate['auto_percent_moisture_low'] + "0")) +\
chr(int(translate['auto_percent_moisture_high'] + "0"))
return translate + '\n'
def touch(fname, times=None):
# from stackoverflow question 1158076
with file(fname, 'a'):
os.utime(fname, times)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/plant/(.*)", WaterDataSocketHandler),
(r"/sensorupdated/(.*)/(.*)", SensorUpdatedHandler),
(r"/tomatoes", TomatoesHandler),
(r"/", SplashHandler),
]
settings = dict(
cookie_secret="it'sarandomcookiesecrethopefullynooneguessesit!",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
autoescape=None,
)
tornado.web.Application.__init__(self, handlers, **settings)
class SplashHandler(tornado.web.RequestHandler):
def get(self):
self.render("splash.html", messages=[])
class TomatoesHandler(tornado.web.RequestHandler):
def get(self):
self.render("tomatoes.html", messages=[])
class SensorUpdatedHandler(tornado.web.RequestHandler):
def get(self, plant_num, value):
WaterDataSocketHandler.send_latest_data(plant_num, value)
class WaterDataSocketHandler(tornado.websocket.WebSocketHandler):
clients = {}
def allow_draft76(self):
# for iOS 5.0 Safari
return True
def open(self, plant_num):
plant_num = plant_num.strip('?plant=_')
WaterDataSocketHandler.clients[plant_num] = self
self.plant_num = plant_num
logging.info("got client for plant " + plant_num)
WaterDataSocketHandler.send_all_data(plant_num)
def on_close(self):
del WaterDataSocketHandler.clients[self.plant_num]
@classmethod
def send_all_data(cls, plant_num):
data = 'hi shiry'
try:
data_file = open(log_data_file(plant_num), 'r')
data = []
for line in data_file:
try:
timestamp, reading = line.strip().split()
except ValueError, e:
continue
data.append({timestamp: reading})
except IOError:
pass
logging.info("sent data")
try:
cls.clients[plant_num].write_message(tornado.escape.json_encode(data))
except:
logging.error("Error sending message", exc_info=True)
@classmethod
def send_latest_data(cls, plant_num, sensor_reading):
if not plant_num in cls.clients:
return
try:
data = [{str(time.time()): str(sensor_reading)}]
cls.clients[plant_num].write_message(tornado.escape.json_encode(data))
except:
logging.error("Error sending message", exc_info=True)
def on_message(self, instruction):
logging.info("got message %r", instruction)
touch(instructions_data_file(self.plant_num))
instructions_file = open(instructions_data_file(self.plant_num), 'w')
instructions_file.write(translate_instruction(instruction))
instructions_file.close()
def main():
tornado.options.parse_command_line()
app = Application()
app.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main() | en | 0.703037 | #!/usr/bin/env python # # Based off example code found in the Tornado package, and code from # the tinaja labs xbee package. # from stackoverflow question 1158076 # for iOS 5.0 Safari | 2.308197 | 2 |
bin/check_key.py | ifij775/ffprobe-python | 0 | 6618029 | # Print the frame type of all of the video frames of a video
import sys
sys.path.append(".")
from ffprobe import FFProbe
import argparse
from pathlib import Path
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--packets", action="store_true", help="Show whether key packets")
parser.add_argument("--frames", action="store_true", help="Show whether key frames")
parser.add_argument("file_path", type=Path)
args = parser.parse_args()
x = FFProbe(args.file_path, select_streams='v:0', show_frames=args.frames, show_packets=args.packets)
if args.packets:
print("Packets:")
packets = x.packets()
for packet in packets:
print(int(packet.has_key_flag()), end='')
print('')
if args.frames:
print("Frames:")
frames = x.frames()
for frame in frames:
print(int(frame.is_key_frame()), end='')
print('')
| # Print the frame type of all of the video frames of a video
import sys
sys.path.append(".")
from ffprobe import FFProbe
import argparse
from pathlib import Path
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--packets", action="store_true", help="Show whether key packets")
parser.add_argument("--frames", action="store_true", help="Show whether key frames")
parser.add_argument("file_path", type=Path)
args = parser.parse_args()
x = FFProbe(args.file_path, select_streams='v:0', show_frames=args.frames, show_packets=args.packets)
if args.packets:
print("Packets:")
packets = x.packets()
for packet in packets:
print(int(packet.has_key_flag()), end='')
print('')
if args.frames:
print("Frames:")
frames = x.frames()
for frame in frames:
print(int(frame.is_key_frame()), end='')
print('')
| en | 0.73393 | # Print the frame type of all of the video frames of a video | 3.126481 | 3 |
simulator/workload.py | KarizCache/serverless | 0 | 6618030 | #!/usr/bin/python3
from job import Job
import itertools
import yaml
import json
import os
class Workload:
def __init__(self, env, scheduler, config_fpath):
self.env = env
self.jobs = itertools.cycle(self.build_workload(config_fpath))
self.scheduler = scheduler
generator = env.process(self.job_generator())
def job_generator(self):
while True:
job = next(self.jobs)
print(f'Workload generator submitted job {job} at {self.env.now}')
yield self.scheduler.put(job)
yield self.env.timeout(200000)
def build_workload(self, config_fpath):
jobs = []
try:
data = yaml.load(open(config_fpath, 'r'), Loader=yaml.FullLoader)
workloaddir = data['benchmark']['workloaddir']
if 'workloads' in data['benchmark']:
workloads = [{'name': w, 'path': os.path.join(workloaddir, w)} for w in data['benchmark']['workloads']]
else:
# remember to add the case to read of the
pass
print(workloads)
prefetch = data['cluster']['prefetch']
for w in workloads:
print(w["name"])
jobs = [Job(self.env, prefetch=prefetch).build_job_from_file(f'{workload["path"]}.g', f'{workload["path"]}.json', name=workload['name']) for workload in workloads]
except:
raise
return jobs
| #!/usr/bin/python3
from job import Job
import itertools
import yaml
import json
import os
class Workload:
def __init__(self, env, scheduler, config_fpath):
self.env = env
self.jobs = itertools.cycle(self.build_workload(config_fpath))
self.scheduler = scheduler
generator = env.process(self.job_generator())
def job_generator(self):
while True:
job = next(self.jobs)
print(f'Workload generator submitted job {job} at {self.env.now}')
yield self.scheduler.put(job)
yield self.env.timeout(200000)
def build_workload(self, config_fpath):
jobs = []
try:
data = yaml.load(open(config_fpath, 'r'), Loader=yaml.FullLoader)
workloaddir = data['benchmark']['workloaddir']
if 'workloads' in data['benchmark']:
workloads = [{'name': w, 'path': os.path.join(workloaddir, w)} for w in data['benchmark']['workloads']]
else:
# remember to add the case to read of the
pass
print(workloads)
prefetch = data['cluster']['prefetch']
for w in workloads:
print(w["name"])
jobs = [Job(self.env, prefetch=prefetch).build_job_from_file(f'{workload["path"]}.g', f'{workload["path"]}.json', name=workload['name']) for workload in workloads]
except:
raise
return jobs
| en | 0.752795 | #!/usr/bin/python3 # remember to add the case to read of the | 2.342313 | 2 |
tests/logic/email_test.py | brenns10/love | 4 | 6618031 | <filename>tests/logic/email_test.py
# -*- coding: utf-8 -*-
import mock
import unittest
import logic.email
class EmailTest(unittest.TestCase):
"""We really just want to test that configuration is honored here."""
sender = '<EMAIL>'
recipient = '<EMAIL>'
subject = 'test subject'
html = '<p>hello test</p>'
text = 'hello test'
@mock.patch('logic.email.EMAIL_BACKENDS')
@mock.patch('logic.email.config')
def test_send_email_appengine(self, mock_config, mock_backends):
mock_config.EMAIL_BACKEND = 'appengine'
mock_backends['appengine'] = mock.Mock()
logic.email.send_email(self.sender, self.recipient, self.subject,
self.html, self.text)
mock_backends['appengine'].assert_called_once_with(
self.sender, self.recipient, self.subject, self.html, self.text
)
@mock.patch('logic.email.EMAIL_BACKENDS')
@mock.patch('logic.email.config')
def test_send_email_sendgrid(self, mock_config, mock_backends):
mock_config.EMAIL_BACKEND = 'sendgrid'
mock_backends['sendgrid'] = mock.Mock()
logic.email.send_email(self.sender, self.recipient, self.subject,
self.html, self.text)
mock_backends['sendgrid'].assert_called_once_with(
self.sender, self.recipient, self.subject, self.html, self.text
)
| <filename>tests/logic/email_test.py
# -*- coding: utf-8 -*-
import mock
import unittest
import logic.email
class EmailTest(unittest.TestCase):
"""We really just want to test that configuration is honored here."""
sender = '<EMAIL>'
recipient = '<EMAIL>'
subject = 'test subject'
html = '<p>hello test</p>'
text = 'hello test'
@mock.patch('logic.email.EMAIL_BACKENDS')
@mock.patch('logic.email.config')
def test_send_email_appengine(self, mock_config, mock_backends):
mock_config.EMAIL_BACKEND = 'appengine'
mock_backends['appengine'] = mock.Mock()
logic.email.send_email(self.sender, self.recipient, self.subject,
self.html, self.text)
mock_backends['appengine'].assert_called_once_with(
self.sender, self.recipient, self.subject, self.html, self.text
)
@mock.patch('logic.email.EMAIL_BACKENDS')
@mock.patch('logic.email.config')
def test_send_email_sendgrid(self, mock_config, mock_backends):
mock_config.EMAIL_BACKEND = 'sendgrid'
mock_backends['sendgrid'] = mock.Mock()
logic.email.send_email(self.sender, self.recipient, self.subject,
self.html, self.text)
mock_backends['sendgrid'].assert_called_once_with(
self.sender, self.recipient, self.subject, self.html, self.text
)
| en | 0.974268 | # -*- coding: utf-8 -*- We really just want to test that configuration is honored here. | 2.841862 | 3 |
connected.py | acrosman/connected | 0 | 6618032 | """
A very simple Python program to check if you can ping Google every ten seconds.
"""
from platform import system as system_name # Returns the system/OS name.
import subprocess
import time
def ping(host):
"""
Returns response time in ms if server is up, otherwise an error string.
Remember that some hosts may not respond to a ping request even if the host
name is valid.
"""
# Ping parameters as function of OS
parameters = "-n 1" if system_name().lower() == "windows" else "-c 1"
# Pinging
proc = subprocess.Popen(["ping", parameters, host], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Parse response
response = proc.communicate()
if response[0]:
tokens = response[0].split()
else:
return response[1]
if tokens[0].decode("utf-8") == 'PING':
for t in tokens:
sample = t.decode('utf-8')
if sample[:5] == 'time=':
ms = sample.split('=')[1]
return ms
return 'down'
if __name__ == '__main__':
with open('data.txt', 'ba', buffering=0) as outputFile:
while 1:
outputFile.write("{0}\t{1}\n".format(time.asctime(),
ping('www.google.com')).encode("utf-8"))
time.sleep(5)
| """
A very simple Python program to check if you can ping Google every ten seconds.
"""
from platform import system as system_name # Returns the system/OS name.
import subprocess
import time
def ping(host):
"""
Returns response time in ms if server is up, otherwise an error string.
Remember that some hosts may not respond to a ping request even if the host
name is valid.
"""
# Ping parameters as function of OS
parameters = "-n 1" if system_name().lower() == "windows" else "-c 1"
# Pinging
proc = subprocess.Popen(["ping", parameters, host], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Parse response
response = proc.communicate()
if response[0]:
tokens = response[0].split()
else:
return response[1]
if tokens[0].decode("utf-8") == 'PING':
for t in tokens:
sample = t.decode('utf-8')
if sample[:5] == 'time=':
ms = sample.split('=')[1]
return ms
return 'down'
if __name__ == '__main__':
with open('data.txt', 'ba', buffering=0) as outputFile:
while 1:
outputFile.write("{0}\t{1}\n".format(time.asctime(),
ping('www.google.com')).encode("utf-8"))
time.sleep(5)
| en | 0.825761 | A very simple Python program to check if you can ping Google every ten seconds. # Returns the system/OS name. Returns response time in ms if server is up, otherwise an error string. Remember that some hosts may not respond to a ping request even if the host name is valid. # Ping parameters as function of OS # Pinging # Parse response | 3.543715 | 4 |
skimage_widgets/plugin.py | sofroniewn/proto-skimage-widgets | 0 | 6618033 | from napari_plugin_engine import napari_hook_implementation
@napari_hook_implementation
def napari_experimental_provide_function_widget():
from .annotate import annotate_module
from skimage import filters
return list(annotate_module(filters).values())
| from napari_plugin_engine import napari_hook_implementation
@napari_hook_implementation
def napari_experimental_provide_function_widget():
from .annotate import annotate_module
from skimage import filters
return list(annotate_module(filters).values())
| none | 1 | 1.490342 | 1 | |
pages/migrations/0010_populate_news_index.py | JoshZero87/site | 4 | 6618034 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-24 20:20
from __future__ import unicode_literals
from django.db import migrations
def populate_news_index(apps, schema_editor):
from pages.models import IndexPage, NewsIndex
# Home Page
home_page = IndexPage.objects.get(title='Our Revolution')
# Our Candidates
news_index = NewsIndex(title='News', slug='press')
home_page.add_child(instance=news_index)
def remove_news_index(apps, schema_editor):
from wagtail.wagtailcore.models import Page
news_index = Page.objects.get(title='News')
news_index.delete()
class Migration(migrations.Migration):
dependencies = [
('pages', '0009_auto_20161024_1818'),
]
operations = [
migrations.RunPython(populate_news_index, reverse_code=remove_news_index)
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-24 20:20
from __future__ import unicode_literals
from django.db import migrations
def populate_news_index(apps, schema_editor):
from pages.models import IndexPage, NewsIndex
# Home Page
home_page = IndexPage.objects.get(title='Our Revolution')
# Our Candidates
news_index = NewsIndex(title='News', slug='press')
home_page.add_child(instance=news_index)
def remove_news_index(apps, schema_editor):
from wagtail.wagtailcore.models import Page
news_index = Page.objects.get(title='News')
news_index.delete()
class Migration(migrations.Migration):
dependencies = [
('pages', '0009_auto_20161024_1818'),
]
operations = [
migrations.RunPython(populate_news_index, reverse_code=remove_news_index)
]
| en | 0.745568 | # -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2016-10-24 20:20 # Home Page # Our Candidates | 1.864193 | 2 |
pycordia/utils.py | classPythonAddike/pycordia | 23 | 6618035 | import typing
import inspect
import datetime
import enum
import random
class Color:
class BrandColor(enum.Enum):
blurple = 0x5865F2
green = 0x57F287
yellow = 0xFEE75C
fuchsia = 0xEB459E
red = 0xED4245
white = 0xFFFFFF
black = 0x000000
brand = BrandColor
@classmethod
def random(cls):
"""Generate random color as an integer for use with Discord"""
return random.randint(0, 0xFFFFFF)
class BoolEnumKey:
def __init__(self, name: str, value: typing.Any, is_set: bool):
self.name = name
self.value = value
self.is_set = is_set
def __str__(self):
return str(self.value)
def __repr__(self):
return f"<EnumKey{':set'*bool(self.is_set)} name='{self.name}' value={self.value}>"
class BoolEnum:
def __new__(cls, **kwargs):
# Get defined class attributes
attribs = {}
init = object.__new__(cls)
for name, value in inspect.getmembers(cls):
# If member is not a routine (a function) and if it's not a dunder
if not inspect.isroutine(value) and \
not (name.startswith("__") and name.endswith("__")):
attribs[name] = value
for name, value in kwargs.items():
if name not in attribs:
raise ValueError(f"'{name}' is not a valid keyword argument")
if name in attribs and not isinstance(value, bool):
raise ValueError(f"Argument '{name}' value must be of boolean form")
for name, value in attribs.items():
setattr(init, name, BoolEnumKey(name, value, kwargs.get(name, False)))
return init
def __setattr__(self, name, value):
attr = getattr(self, name, None)
if attr and isinstance(attr, BoolEnumKey):
if isinstance(value, bool):
attr.is_set = value
return
raise ValueError(f"'{name}' must have a value of boolean form")
return object.__setattr__(self, name, value)
def __iter__(self) -> typing.Iterator[BoolEnumKey]:
return iter(value for _, value in inspect.getmembers(
self, predicate=lambda member: isinstance(member, BoolEnumKey)
))
def obj_from_dict(data: dict, obj: typing.Callable, alias: dict = None):
"""Convert a dictionary `data` into an `obj`.
Parameters:
data (dict): The data to provide.
obj (typing.Callable): The object created from `data`
alias (dict): A dictionary containing aliases for any parts of the dictionary. \
Follows a (param-alias) pair
"""
sig = inspect.signature(obj)
new_inf = {}
for param in sig.parameters:
if alias and param in alias:
new_inf[param] = data[alias[param]]
elif param in data:
new_inf[param] = data[param]
return obj(**new_inf)
def obj_to_dict(obj: typing.Any, *, alias: dict = None, ignore_fields: list = None):
"""Convert an `obj` into a dictionary.
Parameters:
obj (typing.Any): The object to convert
alias (dict): A dictionary containing aliases for any parts of the object. \
Follows a (attribute-alias) pair
ignore_fields (list): A list of attributes to ignore when converting
"""
new_inf = {}
if ignore_fields is None:
ignore_fields = []
if obj.__dict__:
for key, value in obj.__dict__.items():
if not key.startswith("_"):
if key not in ignore_fields:
if alias and key in alias:
new_inf[alias[key]] = value
else:
new_inf[key] = value
return new_inf
def mutually_exclusive(*argument_names):
def factory(fun):
async def wrapper(*args, **kwargs):
mutuals = []
for kw in kwargs:
if kw in argument_names:
mutuals.append(kw)
if len(mutuals) > 1:
raise Exception(f"Only one of this group {argument_names} can be provided at a time, found {len(mutuals)}.")
return await fun(*args, **kwargs)
return wrapper
return factory
def snowflake_to_date(snowflake: int) -> datetime.datetime:
"""Converts a snowflake to a valid `datetime.datetime` object
Args:
snowflake (int): A valid snowflake
Returns: `datetime.datetime`
"""
# The algorithm applied is the same as documented in
# https://discord.com/developers/docs/reference
# (snowflake >> 22) + Discord Epoch
ms = (snowflake >> 22) + 1420070400000
# Divided by 1000 and then provided to datetime.datetime
return datetime.datetime.utcfromtimestamp(ms / 1000)
def make_optional(callable: typing.Callable, *args, **kwargs) -> typing.Optional[typing.Any]:
"""Return the result of `callable` if `args` or `kwargs` evaluate to True, else None
Args:
callable (typing.Callable): A callable object
"""
if any(bool(arg) for arg in args) or \
any(bool(kwargs[kwarg]) for kwarg in kwargs):
return callable(*args, **kwargs)
def add_ext(hash_: str):
if hash_.startswith("a_"):
return f'{hash_}.gif'
return f'{hash_}.png'
def get_flag_list(enum_flag: enum.EnumMeta, integer: int):
return [flag for flag in enum_flag if integer & flag.value == flag.value] # type: ignore
| import typing
import inspect
import datetime
import enum
import random
class Color:
class BrandColor(enum.Enum):
blurple = 0x5865F2
green = 0x57F287
yellow = 0xFEE75C
fuchsia = 0xEB459E
red = 0xED4245
white = 0xFFFFFF
black = 0x000000
brand = BrandColor
@classmethod
def random(cls):
"""Generate random color as an integer for use with Discord"""
return random.randint(0, 0xFFFFFF)
class BoolEnumKey:
def __init__(self, name: str, value: typing.Any, is_set: bool):
self.name = name
self.value = value
self.is_set = is_set
def __str__(self):
return str(self.value)
def __repr__(self):
return f"<EnumKey{':set'*bool(self.is_set)} name='{self.name}' value={self.value}>"
class BoolEnum:
def __new__(cls, **kwargs):
# Get defined class attributes
attribs = {}
init = object.__new__(cls)
for name, value in inspect.getmembers(cls):
# If member is not a routine (a function) and if it's not a dunder
if not inspect.isroutine(value) and \
not (name.startswith("__") and name.endswith("__")):
attribs[name] = value
for name, value in kwargs.items():
if name not in attribs:
raise ValueError(f"'{name}' is not a valid keyword argument")
if name in attribs and not isinstance(value, bool):
raise ValueError(f"Argument '{name}' value must be of boolean form")
for name, value in attribs.items():
setattr(init, name, BoolEnumKey(name, value, kwargs.get(name, False)))
return init
def __setattr__(self, name, value):
attr = getattr(self, name, None)
if attr and isinstance(attr, BoolEnumKey):
if isinstance(value, bool):
attr.is_set = value
return
raise ValueError(f"'{name}' must have a value of boolean form")
return object.__setattr__(self, name, value)
def __iter__(self) -> typing.Iterator[BoolEnumKey]:
return iter(value for _, value in inspect.getmembers(
self, predicate=lambda member: isinstance(member, BoolEnumKey)
))
def obj_from_dict(data: dict, obj: typing.Callable, alias: dict = None):
"""Convert a dictionary `data` into an `obj`.
Parameters:
data (dict): The data to provide.
obj (typing.Callable): The object created from `data`
alias (dict): A dictionary containing aliases for any parts of the dictionary. \
Follows a (param-alias) pair
"""
sig = inspect.signature(obj)
new_inf = {}
for param in sig.parameters:
if alias and param in alias:
new_inf[param] = data[alias[param]]
elif param in data:
new_inf[param] = data[param]
return obj(**new_inf)
def obj_to_dict(obj: typing.Any, *, alias: dict = None, ignore_fields: list = None):
"""Convert an `obj` into a dictionary.
Parameters:
obj (typing.Any): The object to convert
alias (dict): A dictionary containing aliases for any parts of the object. \
Follows a (attribute-alias) pair
ignore_fields (list): A list of attributes to ignore when converting
"""
new_inf = {}
if ignore_fields is None:
ignore_fields = []
if obj.__dict__:
for key, value in obj.__dict__.items():
if not key.startswith("_"):
if key not in ignore_fields:
if alias and key in alias:
new_inf[alias[key]] = value
else:
new_inf[key] = value
return new_inf
def mutually_exclusive(*argument_names):
def factory(fun):
async def wrapper(*args, **kwargs):
mutuals = []
for kw in kwargs:
if kw in argument_names:
mutuals.append(kw)
if len(mutuals) > 1:
raise Exception(f"Only one of this group {argument_names} can be provided at a time, found {len(mutuals)}.")
return await fun(*args, **kwargs)
return wrapper
return factory
def snowflake_to_date(snowflake: int) -> datetime.datetime:
"""Converts a snowflake to a valid `datetime.datetime` object
Args:
snowflake (int): A valid snowflake
Returns: `datetime.datetime`
"""
# The algorithm applied is the same as documented in
# https://discord.com/developers/docs/reference
# (snowflake >> 22) + Discord Epoch
ms = (snowflake >> 22) + 1420070400000
# Divided by 1000 and then provided to datetime.datetime
return datetime.datetime.utcfromtimestamp(ms / 1000)
def make_optional(callable: typing.Callable, *args, **kwargs) -> typing.Optional[typing.Any]:
"""Return the result of `callable` if `args` or `kwargs` evaluate to True, else None
Args:
callable (typing.Callable): A callable object
"""
if any(bool(arg) for arg in args) or \
any(bool(kwargs[kwarg]) for kwarg in kwargs):
return callable(*args, **kwargs)
def add_ext(hash_: str):
if hash_.startswith("a_"):
return f'{hash_}.gif'
return f'{hash_}.png'
def get_flag_list(enum_flag: enum.EnumMeta, integer: int):
return [flag for flag in enum_flag if integer & flag.value == flag.value] # type: ignore
| en | 0.659865 | Generate random color as an integer for use with Discord # Get defined class attributes # If member is not a routine (a function) and if it's not a dunder Convert a dictionary `data` into an `obj`. Parameters: data (dict): The data to provide. obj (typing.Callable): The object created from `data` alias (dict): A dictionary containing aliases for any parts of the dictionary. \ Follows a (param-alias) pair Convert an `obj` into a dictionary. Parameters: obj (typing.Any): The object to convert alias (dict): A dictionary containing aliases for any parts of the object. \ Follows a (attribute-alias) pair ignore_fields (list): A list of attributes to ignore when converting Converts a snowflake to a valid `datetime.datetime` object Args: snowflake (int): A valid snowflake Returns: `datetime.datetime` # The algorithm applied is the same as documented in # https://discord.com/developers/docs/reference # (snowflake >> 22) + Discord Epoch # Divided by 1000 and then provided to datetime.datetime Return the result of `callable` if `args` or `kwargs` evaluate to True, else None Args: callable (typing.Callable): A callable object # type: ignore | 3.114112 | 3 |
domaintools/build_data_file.py | dieselmachine/domaintools | 42 | 6618036 | import os.path
import pprint
import requests
PUBLIC_SUFFIX_URL = 'https://publicsuffix.org/list/effective_tld_names.dat'
def parse_tld_data(data):
""" Parses raw data from the suffix list into a python dict """
private = False
tlds = {}
for line in data.split('\n'):
line = line.strip().decode('utf8')
if line.startswith('// ===BEGIN PRIVATE DOMAINS==='):
# set the private flag, all future values are private
private = True
continue
if not line or line.startswith('//'):
continue
line = line.encode('idna')
frags = line.split('.')
if frags[-1] not in tlds:
tlds[frags[-1]] = {}
tlds[frags[-1]][line] = private
return tlds
def update_data_file():
""" grabs the latest public suffix list, parses it, and saves to data.py"""
rsp = requests.get(PUBLIC_SUFFIX_URL)
data = rsp.content
tlds = parse_tld_data(data)
out = 'TLDS = %s' % pprint.pformat(tlds)
frags = __file__.rsplit('/',1)
if len(frags) == 1:
filename = 'data.py'
else:
filename = os.path.join(frags[0], 'data.py')
f = open(filename, 'w')
f.write(out)
f.close()
if __name__ == '__main__':
update_data_file()
| import os.path
import pprint
import requests
PUBLIC_SUFFIX_URL = 'https://publicsuffix.org/list/effective_tld_names.dat'
def parse_tld_data(data):
""" Parses raw data from the suffix list into a python dict """
private = False
tlds = {}
for line in data.split('\n'):
line = line.strip().decode('utf8')
if line.startswith('// ===BEGIN PRIVATE DOMAINS==='):
# set the private flag, all future values are private
private = True
continue
if not line or line.startswith('//'):
continue
line = line.encode('idna')
frags = line.split('.')
if frags[-1] not in tlds:
tlds[frags[-1]] = {}
tlds[frags[-1]][line] = private
return tlds
def update_data_file():
""" grabs the latest public suffix list, parses it, and saves to data.py"""
rsp = requests.get(PUBLIC_SUFFIX_URL)
data = rsp.content
tlds = parse_tld_data(data)
out = 'TLDS = %s' % pprint.pformat(tlds)
frags = __file__.rsplit('/',1)
if len(frags) == 1:
filename = 'data.py'
else:
filename = os.path.join(frags[0], 'data.py')
f = open(filename, 'w')
f.write(out)
f.close()
if __name__ == '__main__':
update_data_file()
| en | 0.662791 | Parses raw data from the suffix list into a python dict # set the private flag, all future values are private grabs the latest public suffix list, parses it, and saves to data.py | 3.278282 | 3 |
py/audio_decoder.py | MattDietz/lightshowpi | 0 | 6618037 | import aifc
import os
import struct
import subprocess
import wave
LAME_BIN = "lame"
FAAD_BIN = "faad"
FLAC_BIN = "flac"
FFMPEG_BIN = "ffmpeg"
OGGDEC_BIN = "oggdec"
class PCMProxy(object):
def __init__(self, input_proc, filename):
self._input_proc = input_proc
self._filename = filename
self._read_header()
def __del__(self):
self.close()
def close(self):
self._input_proc.stdout.close()
def _read_header(self):
self._nframes = 0
self._soundpos = 0
input_stream = self._input_proc.stdout
# Read in all data
header = input_stream.read(44)
# Verify that the correct identifiers are present
if (header[0:4] != "RIFF") or (header[12:16] != "fmt "):
raise Exception("file does not start with RIFF id or fmt chunk"
"missing")
self._chunksize = struct.unpack('<L', header[4:8])[0]
self._format = header[8:12]
self._nchannels = struct.unpack('<H', header[22:24])[0]
self._framerate = struct.unpack('<L', header[24:28])[0]
self._bitspersample = struct.unpack('<H', header[34:36])[0]
self._sampwidth = (self._bitspersample + 7) // 8
self._framesize = self._nchannels * self._sampwidth
def readframes(self, nframes):
r = self._input_proc.stdout.read(nframes * self._framesize)
if not r and self._soundpos + nframes <= self._nframes:
r = (nframes * self._framesize) * "\x00"
if r:
self._soundpos += nframes
return r
def getnchannels(self):
return self._nchannels
def getframerate(self):
return self._framerate
def getsampwidth(self):
return self._sampwidth
def open(file_name):
name = os.path.abspath(file_name)
if not os.path.exists(file_name):
raise IOError("No such file or directory: '%s'" % file_name)
_, file_ext = os.path.splitext(name)
file_ext = file_ext[1:]
proc_args = []
audio_file = None
if file_ext in ("mp4", "m4a", "m4b", "aac"):
proc_args = [FAAD_BIN, "-q", "-f", "2", "-w", name]
elif file_ext == "ogg":
proc_args = [OGG_BIN, "-q", "-o", "-", name]
elif file_ext in ("wav", "wave"):
audio_file = wave.open(name, "r")
elif file_ext in ("aiff", "aif"):
audio_file = aifc.open(name, "r")
elif file_ext in ("mp1", "mp2", "mp3"):
proc_args = [LAME_BIN, "--quiet", "--decode", name, "-"]
elif file_ext in ("flac", "oga"):
proc_args = [FLAC_BIN, "--silent", "--stdout", "-d", name]
elif file_ext == "wma":
proc_args = [FFMPEG_BIN, "-i", name, "-f", "wav", "-"]
if proc_args:
proc = subprocess.Popen(proc_args, stdout=subprocess.PIPE)
audio_file = PCMProxy(proc, name)
return audio_file
| import aifc
import os
import struct
import subprocess
import wave
LAME_BIN = "lame"
FAAD_BIN = "faad"
FLAC_BIN = "flac"
FFMPEG_BIN = "ffmpeg"
OGGDEC_BIN = "oggdec"
class PCMProxy(object):
def __init__(self, input_proc, filename):
self._input_proc = input_proc
self._filename = filename
self._read_header()
def __del__(self):
self.close()
def close(self):
self._input_proc.stdout.close()
def _read_header(self):
self._nframes = 0
self._soundpos = 0
input_stream = self._input_proc.stdout
# Read in all data
header = input_stream.read(44)
# Verify that the correct identifiers are present
if (header[0:4] != "RIFF") or (header[12:16] != "fmt "):
raise Exception("file does not start with RIFF id or fmt chunk"
"missing")
self._chunksize = struct.unpack('<L', header[4:8])[0]
self._format = header[8:12]
self._nchannels = struct.unpack('<H', header[22:24])[0]
self._framerate = struct.unpack('<L', header[24:28])[0]
self._bitspersample = struct.unpack('<H', header[34:36])[0]
self._sampwidth = (self._bitspersample + 7) // 8
self._framesize = self._nchannels * self._sampwidth
def readframes(self, nframes):
r = self._input_proc.stdout.read(nframes * self._framesize)
if not r and self._soundpos + nframes <= self._nframes:
r = (nframes * self._framesize) * "\x00"
if r:
self._soundpos += nframes
return r
def getnchannels(self):
return self._nchannels
def getframerate(self):
return self._framerate
def getsampwidth(self):
return self._sampwidth
def open(file_name):
name = os.path.abspath(file_name)
if not os.path.exists(file_name):
raise IOError("No such file or directory: '%s'" % file_name)
_, file_ext = os.path.splitext(name)
file_ext = file_ext[1:]
proc_args = []
audio_file = None
if file_ext in ("mp4", "m4a", "m4b", "aac"):
proc_args = [FAAD_BIN, "-q", "-f", "2", "-w", name]
elif file_ext == "ogg":
proc_args = [OGG_BIN, "-q", "-o", "-", name]
elif file_ext in ("wav", "wave"):
audio_file = wave.open(name, "r")
elif file_ext in ("aiff", "aif"):
audio_file = aifc.open(name, "r")
elif file_ext in ("mp1", "mp2", "mp3"):
proc_args = [LAME_BIN, "--quiet", "--decode", name, "-"]
elif file_ext in ("flac", "oga"):
proc_args = [FLAC_BIN, "--silent", "--stdout", "-d", name]
elif file_ext == "wma":
proc_args = [FFMPEG_BIN, "-i", name, "-f", "wav", "-"]
if proc_args:
proc = subprocess.Popen(proc_args, stdout=subprocess.PIPE)
audio_file = PCMProxy(proc, name)
return audio_file
| en | 0.904236 | # Read in all data # Verify that the correct identifiers are present | 2.42472 | 2 |
sympy/physics/tests/test_pring.py | msgoff/sympy | 0 | 6618038 | <reponame>msgoff/sympy
from sympy.physics.pring import wavefunction, energy
from sympy import pi, integrate, sqrt, exp, simplify, I
from sympy.abc import m, x, r
from sympy.physics.quantum.constants import hbar
def test_wavefunction():
Psi = {
0: (1 / sqrt(2 * pi)),
1: (1 / sqrt(2 * pi)) * exp(I * x),
2: (1 / sqrt(2 * pi)) * exp(2 * I * x),
3: (1 / sqrt(2 * pi)) * exp(3 * I * x),
}
for n in Psi:
assert simplify(wavefunction(n, x) - Psi[n]) == 0
def test_norm(n=1):
# Maximum "n" which is tested:
for i in range(n + 1):
assert integrate(wavefunction(i, x) * wavefunction(-i, x), (x, 0, 2 * pi)) == 1
def test_orthogonality(n=1):
# Maximum "n" which is tested:
for i in range(n + 1):
for j in range(i + 1, n + 1):
assert (
integrate(wavefunction(i, x) * wavefunction(j, x), (x, 0, 2 * pi)) == 0
)
def test_energy(n=1):
# Maximum "n" which is tested:
for i in range(n + 1):
assert (
simplify(energy(i, m, r) - ((i ** 2 * hbar ** 2) / (2 * m * r ** 2))) == 0
)
| from sympy.physics.pring import wavefunction, energy
from sympy import pi, integrate, sqrt, exp, simplify, I
from sympy.abc import m, x, r
from sympy.physics.quantum.constants import hbar
def test_wavefunction():
Psi = {
0: (1 / sqrt(2 * pi)),
1: (1 / sqrt(2 * pi)) * exp(I * x),
2: (1 / sqrt(2 * pi)) * exp(2 * I * x),
3: (1 / sqrt(2 * pi)) * exp(3 * I * x),
}
for n in Psi:
assert simplify(wavefunction(n, x) - Psi[n]) == 0
def test_norm(n=1):
# Maximum "n" which is tested:
for i in range(n + 1):
assert integrate(wavefunction(i, x) * wavefunction(-i, x), (x, 0, 2 * pi)) == 1
def test_orthogonality(n=1):
# Maximum "n" which is tested:
for i in range(n + 1):
for j in range(i + 1, n + 1):
assert (
integrate(wavefunction(i, x) * wavefunction(j, x), (x, 0, 2 * pi)) == 0
)
def test_energy(n=1):
# Maximum "n" which is tested:
for i in range(n + 1):
assert (
simplify(energy(i, m, r) - ((i ** 2 * hbar ** 2) / (2 * m * r ** 2))) == 0
) | en | 0.981187 | # Maximum "n" which is tested: # Maximum "n" which is tested: # Maximum "n" which is tested: | 2.956163 | 3 |
models/layers/CRF.py | JRC1995/SocialMediaNER | 0 | 6618039 |
import torch
import torch.nn as nn
import numpy as np
from models.layers.crf_function import crf_loss_func
from collections import OrderedDict
class CRF(nn.Module):
def __init__(self, embed_dim, num_labels, device):
super(CRF, self).__init__()
self.embed_dim = embed_dim
self.num_labels = num_labels
self.device = device
self.W = nn.Parameter(torch.rand(self.embed_dim, self.num_labels)).to(device)
self.T = nn.Parameter(torch.rand(self.num_labels, self.num_labels)).to(device)
self.init_params()
def init_params(self):
"""
Initialize trainable parameters of CRF here
"""
for n, p in self.named_parameters():
nn.init.xavier_uniform_(p)
def decode(self, X, pad_mask):
"""
Pre-condition: node_potentials tensor of size N x S x C
where N is the batch size
S is the sequence length
C is the number of labels
pad_mask tensor of size N x S
Post-condition: Returns partition function in log space log Z for each sample in batch. Shape: N.
"""
features = X
N, S, _ = features.size()
l = torch.zeros(N, S, self.num_labels).float().to(self.device)
node_potentials = self.node_potential_linear(features)
# make sure the node_potential at step -1 is the potential for the last non padded node
# easier to use for computing max score
for t in range(S):
node_potentials[:, t, :] = (1-pad_mask[:, t].unsqueeze(-1))*node_potentials[:, t-1, :] \
+ pad_mask[:, t].unsqueeze(-1)*node_potentials[:, t, :]
for t in range(1, S):
for c in range(self.num_labels):
new = torch.max(node_potentials[:, t-1, :] +
self.T[:, c].unsqueeze(0) + l[:, t-1, :], dim=-1)[0]
old = l[:, t-1, c]
# only update if the timestep is not padded
l[:, t, c] = pad_mask[:, t]*new + (1-pad_mask[:, t])*old
score, prev_c = torch.max(node_potentials[:, S-1, :] + l[:, S-1, :], dim=-1)
# prev_c = pad_mask[:, S-1]*prev_c + (1-pad_mask[:, S-1])*(-1)
# prev_c = prev_c.long()
# Backtracking
path_c = pad_mask[:, S-1]*prev_c + (1-pad_mask[:, S-1])*(-1) # use -1 for pad positions
path = [path_c.unsqueeze(1)]
for t in range(S-2, -1, -1):
prev_c_ = torch.argmax(node_potentials[:, t, :] +
self.T[:, prev_c].permute(1, 0).contiguous() + l[:, t, :], dim=-1)
# prev_c_ only means something if position t+1 was not a pad
prev_c = (pad_mask[:, t+1]*prev_c_ + (1-pad_mask[:, t+1])*prev_c).long()
path_c = pad_mask[:, t]*prev_c + (1-pad_mask[:, t])*(-1)
path = [path_c.unsqueeze(1)]+path
prediction = torch.cat(path, dim=1)
return prediction, score
def node_potential_linear(self, features):
return torch.matmul(features, self.W)
def loss(self, X, labels, pad_mask):
"""
Compute the negative conditional log-likelihood of a labelling given a sequence.
"""
"""
Pre-condition: X tensor of size N x S x D
where N is the batch size
S is the sequence length
D is the embedding dimension
labels tensor of size N x S x C
where N is the batch size
S is the sequence length
C is the number of labels
(each label must be one-hot encoded)
pad_mask is a tensor of size N x S
Post-condition: Returns objective function (negative logliklihood averaged over batch)
"""
self.features = X
nll = crf_loss_func(pad_mask, self.device).apply(self.features,
self.W,
self.T,
labels)
return nll
|
import torch
import torch.nn as nn
import numpy as np
from models.layers.crf_function import crf_loss_func
from collections import OrderedDict
class CRF(nn.Module):
def __init__(self, embed_dim, num_labels, device):
super(CRF, self).__init__()
self.embed_dim = embed_dim
self.num_labels = num_labels
self.device = device
self.W = nn.Parameter(torch.rand(self.embed_dim, self.num_labels)).to(device)
self.T = nn.Parameter(torch.rand(self.num_labels, self.num_labels)).to(device)
self.init_params()
def init_params(self):
"""
Initialize trainable parameters of CRF here
"""
for n, p in self.named_parameters():
nn.init.xavier_uniform_(p)
def decode(self, X, pad_mask):
"""
Pre-condition: node_potentials tensor of size N x S x C
where N is the batch size
S is the sequence length
C is the number of labels
pad_mask tensor of size N x S
Post-condition: Returns partition function in log space log Z for each sample in batch. Shape: N.
"""
features = X
N, S, _ = features.size()
l = torch.zeros(N, S, self.num_labels).float().to(self.device)
node_potentials = self.node_potential_linear(features)
# make sure the node_potential at step -1 is the potential for the last non padded node
# easier to use for computing max score
for t in range(S):
node_potentials[:, t, :] = (1-pad_mask[:, t].unsqueeze(-1))*node_potentials[:, t-1, :] \
+ pad_mask[:, t].unsqueeze(-1)*node_potentials[:, t, :]
for t in range(1, S):
for c in range(self.num_labels):
new = torch.max(node_potentials[:, t-1, :] +
self.T[:, c].unsqueeze(0) + l[:, t-1, :], dim=-1)[0]
old = l[:, t-1, c]
# only update if the timestep is not padded
l[:, t, c] = pad_mask[:, t]*new + (1-pad_mask[:, t])*old
score, prev_c = torch.max(node_potentials[:, S-1, :] + l[:, S-1, :], dim=-1)
# prev_c = pad_mask[:, S-1]*prev_c + (1-pad_mask[:, S-1])*(-1)
# prev_c = prev_c.long()
# Backtracking
path_c = pad_mask[:, S-1]*prev_c + (1-pad_mask[:, S-1])*(-1) # use -1 for pad positions
path = [path_c.unsqueeze(1)]
for t in range(S-2, -1, -1):
prev_c_ = torch.argmax(node_potentials[:, t, :] +
self.T[:, prev_c].permute(1, 0).contiguous() + l[:, t, :], dim=-1)
# prev_c_ only means something if position t+1 was not a pad
prev_c = (pad_mask[:, t+1]*prev_c_ + (1-pad_mask[:, t+1])*prev_c).long()
path_c = pad_mask[:, t]*prev_c + (1-pad_mask[:, t])*(-1)
path = [path_c.unsqueeze(1)]+path
prediction = torch.cat(path, dim=1)
return prediction, score
def node_potential_linear(self, features):
return torch.matmul(features, self.W)
def loss(self, X, labels, pad_mask):
"""
Compute the negative conditional log-likelihood of a labelling given a sequence.
"""
"""
Pre-condition: X tensor of size N x S x D
where N is the batch size
S is the sequence length
D is the embedding dimension
labels tensor of size N x S x C
where N is the batch size
S is the sequence length
C is the number of labels
(each label must be one-hot encoded)
pad_mask is a tensor of size N x S
Post-condition: Returns objective function (negative logliklihood averaged over batch)
"""
self.features = X
nll = crf_loss_func(pad_mask, self.device).apply(self.features,
self.W,
self.T,
labels)
return nll
| en | 0.833012 | Initialize trainable parameters of CRF here Pre-condition: node_potentials tensor of size N x S x C where N is the batch size S is the sequence length C is the number of labels pad_mask tensor of size N x S Post-condition: Returns partition function in log space log Z for each sample in batch. Shape: N. # make sure the node_potential at step -1 is the potential for the last non padded node # easier to use for computing max score # only update if the timestep is not padded # prev_c = pad_mask[:, S-1]*prev_c + (1-pad_mask[:, S-1])*(-1) # prev_c = prev_c.long() # Backtracking # use -1 for pad positions # prev_c_ only means something if position t+1 was not a pad Compute the negative conditional log-likelihood of a labelling given a sequence. Pre-condition: X tensor of size N x S x D where N is the batch size S is the sequence length D is the embedding dimension labels tensor of size N x S x C where N is the batch size S is the sequence length C is the number of labels (each label must be one-hot encoded) pad_mask is a tensor of size N x S Post-condition: Returns objective function (negative logliklihood averaged over batch) | 2.463295 | 2 |
WEB21-1-12/WEB2/commoninterface/zvlbase.py | coderdq/vuetest | 0 | 6618040 | <gh_stars>0
#! encoding = utf-8
import logging
import time
import visa
from .instrument import InstBase
logger = logging.getLogger('ghost')
class ZVLBase(InstBase):
def __init__(self):
InstBase.__init__(self, 'ZVL')
def reset_zvl(self):
zvlhandle = self.handle
if zvlhandle:
zvlhandle.timeout = 2000
zvlhandle.set_visa_attribute(visa.constants.VI_ATTR_TERMCHAR, 10)
zvlhandle.set_visa_attribute(visa.constants.VI_ATTR_TERMCHAR_EN, True)
zvlhandle.write_termination = '\n'
# zvlhandle.write('*RST')
zvlhandle.write('*CLS;*OPC?')
self.ext_error_checking()
zvlhandle.write('INIT:CONT ON')
zvlhandle.write('INST:SEL NWA')
zvlhandle.write('SYST:DISP:UPD ON')
zvlhandle.write('SYST:ERR:DISP ON')
self.set_pwr(-10)
def new_setup(self, setupn):
try:
setupname = 'set{}'.format(setupn)
zvlhandle = self.handle
zvlhandle.write("MEM:DEF '{}'".format(setupname))
zvlhandle.write("MEM:SEL '{}'".format(setupname)) # 选择新的setup
except:
errors = 'new_setup error'
logger.error(errors)
def query_name(self):
try:
zvlhandle = self.handle
text = zvlhandle.query('*IDN?')
return text.strip()
except:
return 'N.A.'
def set_pwr(self, value):
try:
zvlhandle = self.handle
zvlhandle.write('SOUR:POW {}'.format(value))
except:
errors = 'set_pwr error'
logger.error(errors)
def set_freq(self, start, stop):
'''
默认单位是HZ
:param zvlhandle:
:param start: str,'1GHz'
:param stop: str,'3GHz'
:return:
'''
try:
# zvlhandle.write('FREQ:STAR {}'.format(start))
# zvlhandle.write('FREQ:STOP {}'.format(stop))
zvlhandle = self.handle
zvlhandle.write('FREQ:STAR {};STOP {}'.format(start, stop))
# zvlhandle.ext_error_checking()
except:
errors = 'set_freq error'
logger.error(errors)
def set_freq_span(self, center, span):
'''
:param zvlhandle:
:param center: str
:param span: str 1GHz
:return:
'''
try:
zvlhandle = self.handle
zvlhandle.write('FREQ:CENT {}'.format(center))
zvlhandle.write('FREQ:SPAN {}'.format(span))
except:
errors = 'set_freq_span error'
logger.error(errors)
def add_trace(self, n, means, form):
'''
增加trace
:param zvlhandle:
:param n: 1,2,3
:param means: str,S11,S21,S12,S22
form:str,SWR,MLOG
:return:
'''
try:
zvlhandle = self.handle
zvlhandle.write("CALC:PAR:SDEF 'Trc{}','{}'".format(n, means))
zvlhandle.write("CALC:FORM {}".format(form))
zvlhandle.write("DISP:WIND:TRAC{}:FEED 'Trc{}'".format(n, n)) # 显示到界面
except:
errors = 'add_trace error'
logger.error(errors)
def delete_trace(self, n):
try:
zvlhandle = self.handle
zvlhandle.write("CALC:PAR:DEL 'Trc{}'".format(n))
except:
errors = 'delete_trace error'
logger.error(errors)
def change_trace_meas(self, tracen, meas):
'''
:param zvlhandle:
:param tracen:
:param meas: str,'S11','S12','S22','S21'
:return:
'''
try:
zvlhandle = self.handle
zvlhandle.write("CALC:PAR:SEL 'Trc{}'".format(tracen))
zvlhandle.write("CALC:PAR:MEAS 'Trc{}','{}'".format(tracen, meas))
except:
errors = 'change_trace_meas error'
logger.error(errors)
def set_ref_value(self, tracen, value):
'''
设置参考值
:param zvlhandle:
:param tracen: 1,2,3
:param value:
:return:
'''
try:
zvlhandle = self.handle
zvlhandle.write("CALC:PAR:SEL 'Trc{}'".format(tracen))
zvlhandle.write('DISP:WIND:TRAC{}:Y:RLEV {}'.format(tracen, value))
except:
errors = 'set_ref_value error'
logger.error(errors)
def set_div_value(self, tracen, value):
'''
设置每一格大小
:param zvlhandle:
:param tracen: 1,2,3
:param value:
:return:
'''
try:
zvlhandle = self.handle
zvlhandle.write("CALC:PAR:SEL 'Trc{}'".format(tracen))
zvlhandle.write('DISP:WIND:TRAC{}:Y:PDIV {}'.format(tracen, value))
except:
errors = 'set_div_value error'
logger.error(errors)
def set_trace_form(self, tracen, value):
'''
设置trace的显示形式
:param zvlhandle:
:param tracen: 1,2,3
:param value: str,SWR,MLIN,MLOG,PHAS,UPH,POL,SMITH
:return:
'''
try:
zvlhandle = self.handle
zvlhandle.write("CALC:PAR:SEL 'Trc{}'".format(tracen))
zvlhandle.write('CALC:FORM {}'.format(value))
except:
errors = 'set_trace_form error'
logger.error(errors)
def set_trace_marker(self, tracen, markern, x):
'''
设置trace的标记
:param zvlhandle:
tracen:1,2,3
:param markern: 1,2,3
:param x:str,1GHz
:return:
'''
try:
zvlhandle = self.handle
zvlhandle.write("CALC:PAR:SEL 'Trc{}'".format(tracen))
zvlhandle.write('CALC:MARK{} ON'.format(markern)) # create marker
time.sleep(0.1)
zvlhandle.write('CALC:MARK{}:X {}'.format(markern, x))
except:
errors = 'set_trace_marker error'
logger.error(errors)
def create_max_marker(self, tracen, markern):
try:
zvlhandle = self.handle
zvlhandle.write("CALC:PAR:SEL 'Trc{}'".format(tracen))
zvlhandle.write('CALC:MARK{} ON'.format(markern))
time.sleep(0.1)
zvlhandle.write('CALC:MARK{}:FUNC:EXEC MAX'.format(markern))
self.ext_error_checking()
except:
errors = 'create_max_marker error'
logger.error(errors)
def create_min_marker(self, tracen, markern):
try:
zvlhandle = self.handle
zvlhandle.write("CALC:PAR:SEL 'Trc{}'".format(tracen))
zvlhandle.write('CALC:MARK{} ON'.format(markern))
time.sleep(0.1)
zvlhandle.write('CALC:MARK{}:FUNC:EXEC MIN'.format(markern))
self.ext_error_checking()
zvlhandle.query('*OPC?')
except:
errors = 'create_min_marker error'
logger.error(errors)
def query_marker(self, tracen, markern):
'''
查询marker位置
:param zvlhandle:
:param tracen:
:param markern:
:return:x:float 单位Hz
y:float
'''
try:
zvlhandle = self.handle
zvlhandle.write("CALC:PAR:SEL 'Trc{}'".format(tracen))
# markerX = float(zvlhandle.query('CALC:MARK{}:X?'.format(markern)))
# markerY = float(zvlhandle.query('CALC:MARK{}:Y?'.format(markern)))
strxy = zvlhandle.query('CALC:MARK{}:FUNC:RES?'.format(markern))
# zvlhandle.ext_error_checking()
markerX, markerY = strxy.split(',')
return float(markerX), float(markerY)
except:
errors = 'query_marker error'
logger.error(errors)
def remove_allmarker(self, tracen):
'''
没有单独的删除某个marker的命令,只有全删
:param zvlhandle:
:param tracen:
:return:
'''
try:
zvlhandle = self.handle
zvlhandle.write("CALC:PAR:SEL 'Trc{}'".format(tracen))
zvlhandle.write('CALC:MARK:AOFF')
time.sleep(0.1)
self.ext_error_checking()
except:
errors = 'remove_allmarker error'
logger.error(errors)
def save_screenshot(self, src, dest):
'''
:param zvlhandle:
:param path:str
:return:
'''
try:
zvlhandle = self.handle
zvlhandle.write('HCOP:DEV:LANG PNG')
zvlhandle.write("MMEM:NAME '{}'".format(src))
zvlhandle.write("HCOP:DEST 'MMEM'")
zvlhandle.write('HCOP:ITEM:ALL')
zvlhandle.write('HCOP')
zvlhandle.query('*OPC?')
# print('screeshot ing...')
self.ext_error_checking()
self.ext_query_bin_data_to_file("MMEM:DATA? '{}'".format(src), dest)
zvlhandle.query('*OPC?')
except:
errors = 'save_screenshot error'
logger.error(errors)
def sel_color(self):
try:
zvlhandler = self.handle
zvlhandler.write('SYST:DISP:COL DBAC') # DARK background
zvlhandler.write('SYST:DISP:COL BWS') # black and white solid
zvlhandler.write('DISP:CMAP13:RGB 1,0,0,SOLid,2') # red
zvlhandler.write('DISP:CMAP14:RGB 0,0,1,SOLid,2') # blue
zvlhandler.write('DISP:CMAP:MARK OFF')
zvlhandler.write('DISP:WIND:TITL ON')
zvlhandler.write("DISP:WIND:TITL:DATA 'S11&S21'") # 显示标题
except:
errors = 'sel_color error'
logger.error(errors)
def auto_scale(self, tracen):
try:
zvlhandler = self.handle
zvlhandler.write("DISP:WIND:TRAC:Y:SCAL:AUTO ONCE,'Trc{}'".format(tracen))
self.set_div_value(tracen, 10)
except:
errors = 'auto scale error'
logger.error(errors)
| #! encoding = utf-8
import logging
import time
import visa
from .instrument import InstBase
logger = logging.getLogger('ghost')
class ZVLBase(InstBase):
def __init__(self):
InstBase.__init__(self, 'ZVL')
def reset_zvl(self):
zvlhandle = self.handle
if zvlhandle:
zvlhandle.timeout = 2000
zvlhandle.set_visa_attribute(visa.constants.VI_ATTR_TERMCHAR, 10)
zvlhandle.set_visa_attribute(visa.constants.VI_ATTR_TERMCHAR_EN, True)
zvlhandle.write_termination = '\n'
# zvlhandle.write('*RST')
zvlhandle.write('*CLS;*OPC?')
self.ext_error_checking()
zvlhandle.write('INIT:CONT ON')
zvlhandle.write('INST:SEL NWA')
zvlhandle.write('SYST:DISP:UPD ON')
zvlhandle.write('SYST:ERR:DISP ON')
self.set_pwr(-10)
def new_setup(self, setupn):
try:
setupname = 'set{}'.format(setupn)
zvlhandle = self.handle
zvlhandle.write("MEM:DEF '{}'".format(setupname))
zvlhandle.write("MEM:SEL '{}'".format(setupname)) # 选择新的setup
except:
errors = 'new_setup error'
logger.error(errors)
def query_name(self):
try:
zvlhandle = self.handle
text = zvlhandle.query('*IDN?')
return text.strip()
except:
return 'N.A.'
def set_pwr(self, value):
try:
zvlhandle = self.handle
zvlhandle.write('SOUR:POW {}'.format(value))
except:
errors = 'set_pwr error'
logger.error(errors)
def set_freq(self, start, stop):
'''
默认单位是HZ
:param zvlhandle:
:param start: str,'1GHz'
:param stop: str,'3GHz'
:return:
'''
try:
# zvlhandle.write('FREQ:STAR {}'.format(start))
# zvlhandle.write('FREQ:STOP {}'.format(stop))
zvlhandle = self.handle
zvlhandle.write('FREQ:STAR {};STOP {}'.format(start, stop))
# zvlhandle.ext_error_checking()
except:
errors = 'set_freq error'
logger.error(errors)
def set_freq_span(self, center, span):
'''
:param zvlhandle:
:param center: str
:param span: str 1GHz
:return:
'''
try:
zvlhandle = self.handle
zvlhandle.write('FREQ:CENT {}'.format(center))
zvlhandle.write('FREQ:SPAN {}'.format(span))
except:
errors = 'set_freq_span error'
logger.error(errors)
def add_trace(self, n, means, form):
'''
增加trace
:param zvlhandle:
:param n: 1,2,3
:param means: str,S11,S21,S12,S22
form:str,SWR,MLOG
:return:
'''
try:
zvlhandle = self.handle
zvlhandle.write("CALC:PAR:SDEF 'Trc{}','{}'".format(n, means))
zvlhandle.write("CALC:FORM {}".format(form))
zvlhandle.write("DISP:WIND:TRAC{}:FEED 'Trc{}'".format(n, n)) # 显示到界面
except:
errors = 'add_trace error'
logger.error(errors)
def delete_trace(self, n):
try:
zvlhandle = self.handle
zvlhandle.write("CALC:PAR:DEL 'Trc{}'".format(n))
except:
errors = 'delete_trace error'
logger.error(errors)
def change_trace_meas(self, tracen, meas):
'''
:param zvlhandle:
:param tracen:
:param meas: str,'S11','S12','S22','S21'
:return:
'''
try:
zvlhandle = self.handle
zvlhandle.write("CALC:PAR:SEL 'Trc{}'".format(tracen))
zvlhandle.write("CALC:PAR:MEAS 'Trc{}','{}'".format(tracen, meas))
except:
errors = 'change_trace_meas error'
logger.error(errors)
def set_ref_value(self, tracen, value):
'''
设置参考值
:param zvlhandle:
:param tracen: 1,2,3
:param value:
:return:
'''
try:
zvlhandle = self.handle
zvlhandle.write("CALC:PAR:SEL 'Trc{}'".format(tracen))
zvlhandle.write('DISP:WIND:TRAC{}:Y:RLEV {}'.format(tracen, value))
except:
errors = 'set_ref_value error'
logger.error(errors)
def set_div_value(self, tracen, value):
'''
设置每一格大小
:param zvlhandle:
:param tracen: 1,2,3
:param value:
:return:
'''
try:
zvlhandle = self.handle
zvlhandle.write("CALC:PAR:SEL 'Trc{}'".format(tracen))
zvlhandle.write('DISP:WIND:TRAC{}:Y:PDIV {}'.format(tracen, value))
except:
errors = 'set_div_value error'
logger.error(errors)
def set_trace_form(self, tracen, value):
'''
设置trace的显示形式
:param zvlhandle:
:param tracen: 1,2,3
:param value: str,SWR,MLIN,MLOG,PHAS,UPH,POL,SMITH
:return:
'''
try:
zvlhandle = self.handle
zvlhandle.write("CALC:PAR:SEL 'Trc{}'".format(tracen))
zvlhandle.write('CALC:FORM {}'.format(value))
except:
errors = 'set_trace_form error'
logger.error(errors)
def set_trace_marker(self, tracen, markern, x):
'''
设置trace的标记
:param zvlhandle:
tracen:1,2,3
:param markern: 1,2,3
:param x:str,1GHz
:return:
'''
try:
zvlhandle = self.handle
zvlhandle.write("CALC:PAR:SEL 'Trc{}'".format(tracen))
zvlhandle.write('CALC:MARK{} ON'.format(markern)) # create marker
time.sleep(0.1)
zvlhandle.write('CALC:MARK{}:X {}'.format(markern, x))
except:
errors = 'set_trace_marker error'
logger.error(errors)
def create_max_marker(self, tracen, markern):
try:
zvlhandle = self.handle
zvlhandle.write("CALC:PAR:SEL 'Trc{}'".format(tracen))
zvlhandle.write('CALC:MARK{} ON'.format(markern))
time.sleep(0.1)
zvlhandle.write('CALC:MARK{}:FUNC:EXEC MAX'.format(markern))
self.ext_error_checking()
except:
errors = 'create_max_marker error'
logger.error(errors)
def create_min_marker(self, tracen, markern):
try:
zvlhandle = self.handle
zvlhandle.write("CALC:PAR:SEL 'Trc{}'".format(tracen))
zvlhandle.write('CALC:MARK{} ON'.format(markern))
time.sleep(0.1)
zvlhandle.write('CALC:MARK{}:FUNC:EXEC MIN'.format(markern))
self.ext_error_checking()
zvlhandle.query('*OPC?')
except:
errors = 'create_min_marker error'
logger.error(errors)
def query_marker(self, tracen, markern):
'''
查询marker位置
:param zvlhandle:
:param tracen:
:param markern:
:return:x:float 单位Hz
y:float
'''
try:
zvlhandle = self.handle
zvlhandle.write("CALC:PAR:SEL 'Trc{}'".format(tracen))
# markerX = float(zvlhandle.query('CALC:MARK{}:X?'.format(markern)))
# markerY = float(zvlhandle.query('CALC:MARK{}:Y?'.format(markern)))
strxy = zvlhandle.query('CALC:MARK{}:FUNC:RES?'.format(markern))
# zvlhandle.ext_error_checking()
markerX, markerY = strxy.split(',')
return float(markerX), float(markerY)
except:
errors = 'query_marker error'
logger.error(errors)
def remove_allmarker(self, tracen):
'''
没有单独的删除某个marker的命令,只有全删
:param zvlhandle:
:param tracen:
:return:
'''
try:
zvlhandle = self.handle
zvlhandle.write("CALC:PAR:SEL 'Trc{}'".format(tracen))
zvlhandle.write('CALC:MARK:AOFF')
time.sleep(0.1)
self.ext_error_checking()
except:
errors = 'remove_allmarker error'
logger.error(errors)
def save_screenshot(self, src, dest):
'''
:param zvlhandle:
:param path:str
:return:
'''
try:
zvlhandle = self.handle
zvlhandle.write('HCOP:DEV:LANG PNG')
zvlhandle.write("MMEM:NAME '{}'".format(src))
zvlhandle.write("HCOP:DEST 'MMEM'")
zvlhandle.write('HCOP:ITEM:ALL')
zvlhandle.write('HCOP')
zvlhandle.query('*OPC?')
# print('screeshot ing...')
self.ext_error_checking()
self.ext_query_bin_data_to_file("MMEM:DATA? '{}'".format(src), dest)
zvlhandle.query('*OPC?')
except:
errors = 'save_screenshot error'
logger.error(errors)
def sel_color(self):
try:
zvlhandler = self.handle
zvlhandler.write('SYST:DISP:COL DBAC') # DARK background
zvlhandler.write('SYST:DISP:COL BWS') # black and white solid
zvlhandler.write('DISP:CMAP13:RGB 1,0,0,SOLid,2') # red
zvlhandler.write('DISP:CMAP14:RGB 0,0,1,SOLid,2') # blue
zvlhandler.write('DISP:CMAP:MARK OFF')
zvlhandler.write('DISP:WIND:TITL ON')
zvlhandler.write("DISP:WIND:TITL:DATA 'S11&S21'") # 显示标题
except:
errors = 'sel_color error'
logger.error(errors)
def auto_scale(self, tracen):
try:
zvlhandler = self.handle
zvlhandler.write("DISP:WIND:TRAC:Y:SCAL:AUTO ONCE,'Trc{}'".format(tracen))
self.set_div_value(tracen, 10)
except:
errors = 'auto scale error'
logger.error(errors) | en | 0.152232 | #! encoding = utf-8 # zvlhandle.write('*RST') # 选择新的setup 默认单位是HZ :param zvlhandle: :param start: str,'1GHz' :param stop: str,'3GHz' :return: # zvlhandle.write('FREQ:STAR {}'.format(start)) # zvlhandle.write('FREQ:STOP {}'.format(stop)) # zvlhandle.ext_error_checking() :param zvlhandle: :param center: str :param span: str 1GHz :return: 增加trace :param zvlhandle: :param n: 1,2,3 :param means: str,S11,S21,S12,S22 form:str,SWR,MLOG :return: # 显示到界面 :param zvlhandle: :param tracen: :param meas: str,'S11','S12','S22','S21' :return: 设置参考值 :param zvlhandle: :param tracen: 1,2,3 :param value: :return: 设置每一格大小 :param zvlhandle: :param tracen: 1,2,3 :param value: :return: 设置trace的显示形式 :param zvlhandle: :param tracen: 1,2,3 :param value: str,SWR,MLIN,MLOG,PHAS,UPH,POL,SMITH :return: 设置trace的标记 :param zvlhandle: tracen:1,2,3 :param markern: 1,2,3 :param x:str,1GHz :return: # create marker 查询marker位置 :param zvlhandle: :param tracen: :param markern: :return:x:float 单位Hz y:float # markerX = float(zvlhandle.query('CALC:MARK{}:X?'.format(markern))) # markerY = float(zvlhandle.query('CALC:MARK{}:Y?'.format(markern))) # zvlhandle.ext_error_checking() 没有单独的删除某个marker的命令,只有全删 :param zvlhandle: :param tracen: :return: :param zvlhandle: :param path:str :return: # print('screeshot ing...') # DARK background # black and white solid # red # blue # 显示标题 | 2.093195 | 2 |
scripting/plots/plot-base-heat-NDE.py | znes/OSeEM-DE | 1 | 6618041 | #Update the directory paths accordingly
import plotly.offline as offline
from oemof.tabular.tools.plots_heat_NDE import hourly_plot
name = "base-heat-NDE"
offline.plot(
hourly_plot(
name,
"heat_Bus_NDE",
'/home/dozeummam/Insync/<EMAIL>/Google Drive/projects/models/OSeEM-DE/results/scenarios/base-NDE-SDE',
plot_filling_levels=False
),
filename= '/home/dozeummam/Insync/<EMAIL>/Google Drive/projects/models/OSeEM-DE/results/plots/base-heat-NDE ' 'hourly-plot.html'
)
| #Update the directory paths accordingly
import plotly.offline as offline
from oemof.tabular.tools.plots_heat_NDE import hourly_plot
name = "base-heat-NDE"
offline.plot(
hourly_plot(
name,
"heat_Bus_NDE",
'/home/dozeummam/Insync/<EMAIL>/Google Drive/projects/models/OSeEM-DE/results/scenarios/base-NDE-SDE',
plot_filling_levels=False
),
filename= '/home/dozeummam/Insync/<EMAIL>/Google Drive/projects/models/OSeEM-DE/results/plots/base-heat-NDE ' 'hourly-plot.html'
)
| en | 0.796189 | #Update the directory paths accordingly | 1.968444 | 2 |
SC/demo_data.py | lfr4704/DS-Unit-3-Sprint-2-SQL-and-Databases | 0 | 6618042 | import os
import sqlite3
import psycopg2
import numpy as np
from psycopg2.extras import execute_values
psycopg2.extensions.register_adapter(np.int64, psycopg2._psycopg.AsIs)
#Part 1
# construct a path to wherever your database exists
DB_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "SC", "demo_data.sqlite3")
connection = sqlite3.connect(DB_FILEPATH)
print("CONNECTION:", connection)
#connection.row_factory = sqlite3.Row
cursor = connection.cursor()
print("CURSOR", cursor)
# create a table to store the passenger
drop_existing_sql_table = """
DROP TABLE IF EXISTS demo_data;
"""
table_creation_sql = """
CREATE TABLE IF NOT EXISTS demo_data (
id SERIAL PRIMARY KEY,
"s" text,
"x" int4,
"y" int4);
"""
insertion_query = '''
INSERT INTO demo_data (s, x, y)
VALUES
('g', 3, 9),
('v', 5, 7),
('f', 8, 7);
'''
cursor.execute(drop_existing_sql_table)
cursor.execute(table_creation_sql)
cursor.execute(insertion_query)
#- Count how many rows you have - it should be 3!
query = """
SELECT count(*)
FROM demo_data
"""
#- How many rows are there where both `x` and `y` are at least 5?
query2 = """
SELECT *
FROM demo_data
WHERE x = 5 AND y = 5
"""
#- How many unique values of `y` are there (hint - `COUNT()` can accept a keyword `DISTINCT`)?
query3 = """
SELECT COUNT(DISTINCT y)
FROM demo_data
"""
result = cursor.execute(query).fetchall()
result2 = cursor.execute(query2).fetchall()
result3 = cursor.execute(query3).fetchall()
print("RESULT 1:", result)
print("RESULT 2:", result2)
print("RESULT 3:", result3)
# ACTUALLY SAVE THE TRANSACTIONS
connection.commit()
cursor.close()
connection.close()
| import os
import sqlite3
import psycopg2
import numpy as np
from psycopg2.extras import execute_values
psycopg2.extensions.register_adapter(np.int64, psycopg2._psycopg.AsIs)
#Part 1
# construct a path to wherever your database exists
DB_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "SC", "demo_data.sqlite3")
connection = sqlite3.connect(DB_FILEPATH)
print("CONNECTION:", connection)
#connection.row_factory = sqlite3.Row
cursor = connection.cursor()
print("CURSOR", cursor)
# create a table to store the passenger
drop_existing_sql_table = """
DROP TABLE IF EXISTS demo_data;
"""
table_creation_sql = """
CREATE TABLE IF NOT EXISTS demo_data (
id SERIAL PRIMARY KEY,
"s" text,
"x" int4,
"y" int4);
"""
insertion_query = '''
INSERT INTO demo_data (s, x, y)
VALUES
('g', 3, 9),
('v', 5, 7),
('f', 8, 7);
'''
cursor.execute(drop_existing_sql_table)
cursor.execute(table_creation_sql)
cursor.execute(insertion_query)
#- Count how many rows you have - it should be 3!
query = """
SELECT count(*)
FROM demo_data
"""
#- How many rows are there where both `x` and `y` are at least 5?
query2 = """
SELECT *
FROM demo_data
WHERE x = 5 AND y = 5
"""
#- How many unique values of `y` are there (hint - `COUNT()` can accept a keyword `DISTINCT`)?
query3 = """
SELECT COUNT(DISTINCT y)
FROM demo_data
"""
result = cursor.execute(query).fetchall()
result2 = cursor.execute(query2).fetchall()
result3 = cursor.execute(query3).fetchall()
print("RESULT 1:", result)
print("RESULT 2:", result2)
print("RESULT 3:", result3)
# ACTUALLY SAVE THE TRANSACTIONS
connection.commit()
cursor.close()
connection.close()
| en | 0.664727 | #Part 1 # construct a path to wherever your database exists #connection.row_factory = sqlite3.Row # create a table to store the passenger DROP TABLE IF EXISTS demo_data; CREATE TABLE IF NOT EXISTS demo_data ( id SERIAL PRIMARY KEY, "s" text, "x" int4, "y" int4); INSERT INTO demo_data (s, x, y) VALUES ('g', 3, 9), ('v', 5, 7), ('f', 8, 7); #- Count how many rows you have - it should be 3! SELECT count(*) FROM demo_data #- How many rows are there where both `x` and `y` are at least 5? SELECT * FROM demo_data WHERE x = 5 AND y = 5 #- How many unique values of `y` are there (hint - `COUNT()` can accept a keyword `DISTINCT`)? SELECT COUNT(DISTINCT y) FROM demo_data # ACTUALLY SAVE THE TRANSACTIONS | 3.402727 | 3 |
FastAPI/model.py | med-cab-bw-team/DS | 0 | 6618043 | <filename>FastAPI/model.py
# Imports
import pandas as pd
import numpy as np
import pickle
from spacy.lang.en import English
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.neighbors import NearestNeighbors
class TheStash:
strains = pd.read_csv('./DS_MedCab_Unit4/lemmatized_strains.csv')
KNN = "./DS_MedCab_Unit4/KNN_Model.pkl"
CNN = "./DS_MedCab_Unit4/CNN_Model.pkl"
# Loading pickled model from file
loaded_knn = pickle.load(open(KNN, 'rb'))
# Loading pickled model from file
loaded_cnn = pickle.load(open(CNN, 'rb'))
def predict(self, request_text):
transformer = TfidfVectorizer(stop_words="english", min_df=0.025, max_df=0.98, ngram_range=(1,3))
dtm = transformer.fit_transform(self.strains['lemmas'])
transformed = transformer.transform([request_text])
dense = transformed.todense()
recommendations = self.loaded_knn.kneighbors(dense)[1][0]
output_array = []
for recommendation in recommendations:
strain = self.strains.iloc[recommendation]
output = strain.drop(['Unnamed: 0', 'name', 'ailment', 'all_text', 'lemmas']).to_dict()
output_array.append(output)
return output_array
# if __name__ == "__main__":
# text = "help me i need some stinky sweet indica"
# pred = predict(text)
# print(pred) | <filename>FastAPI/model.py
# Imports
import pandas as pd
import numpy as np
import pickle
from spacy.lang.en import English
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.neighbors import NearestNeighbors
class TheStash:
strains = pd.read_csv('./DS_MedCab_Unit4/lemmatized_strains.csv')
KNN = "./DS_MedCab_Unit4/KNN_Model.pkl"
CNN = "./DS_MedCab_Unit4/CNN_Model.pkl"
# Loading pickled model from file
loaded_knn = pickle.load(open(KNN, 'rb'))
# Loading pickled model from file
loaded_cnn = pickle.load(open(CNN, 'rb'))
def predict(self, request_text):
transformer = TfidfVectorizer(stop_words="english", min_df=0.025, max_df=0.98, ngram_range=(1,3))
dtm = transformer.fit_transform(self.strains['lemmas'])
transformed = transformer.transform([request_text])
dense = transformed.todense()
recommendations = self.loaded_knn.kneighbors(dense)[1][0]
output_array = []
for recommendation in recommendations:
strain = self.strains.iloc[recommendation]
output = strain.drop(['Unnamed: 0', 'name', 'ailment', 'all_text', 'lemmas']).to_dict()
output_array.append(output)
return output_array
# if __name__ == "__main__":
# text = "help me i need some stinky sweet indica"
# pred = predict(text)
# print(pred) | en | 0.801514 | # Imports # Loading pickled model from file # Loading pickled model from file # if __name__ == "__main__": # text = "help me i need some stinky sweet indica" # pred = predict(text) # print(pred) | 2.845086 | 3 |
searchguard/exceptions.py | ByteInternet/searchguard-python | 3 | 6618044 | class SearchGuardException(Exception):
pass
class CreateUserException(SearchGuardException):
pass
class UserAlreadyExistsException(CreateUserException):
pass
class DeleteUserException(SearchGuardException):
pass
class CreateRoleException(SearchGuardException):
pass
class RoleAlreadyExistsException(CreateRoleException):
pass
class DeleteRoleException(SearchGuardException):
pass
class ViewRoleException(SearchGuardException):
pass
class ViewUserException(SearchGuardException):
pass
class ModifyUserException(SearchGuardException):
pass
class ModifyRoleException(SearchGuardException):
pass
class ListUsersException(SearchGuardException):
pass
class CheckUserExistsException(SearchGuardException):
pass
class CheckRoleExistsException(SearchGuardException):
pass
class RoleMappingException(SearchGuardException):
pass
class CreateRoleMappingException(SearchGuardException):
pass
class CheckRoleMappingExistsException(SearchGuardException):
pass
class ModifyRoleMappingException(SearchGuardException):
pass
class ViewAllRoleMappingException(SearchGuardException):
pass
class ViewRoleMappingException(SearchGuardException):
pass
class DeleteRoleMappingException(SearchGuardException):
pass
| class SearchGuardException(Exception):
pass
class CreateUserException(SearchGuardException):
pass
class UserAlreadyExistsException(CreateUserException):
pass
class DeleteUserException(SearchGuardException):
pass
class CreateRoleException(SearchGuardException):
pass
class RoleAlreadyExistsException(CreateRoleException):
pass
class DeleteRoleException(SearchGuardException):
pass
class ViewRoleException(SearchGuardException):
pass
class ViewUserException(SearchGuardException):
pass
class ModifyUserException(SearchGuardException):
pass
class ModifyRoleException(SearchGuardException):
pass
class ListUsersException(SearchGuardException):
pass
class CheckUserExistsException(SearchGuardException):
pass
class CheckRoleExistsException(SearchGuardException):
pass
class RoleMappingException(SearchGuardException):
pass
class CreateRoleMappingException(SearchGuardException):
pass
class CheckRoleMappingExistsException(SearchGuardException):
pass
class ModifyRoleMappingException(SearchGuardException):
pass
class ViewAllRoleMappingException(SearchGuardException):
pass
class ViewRoleMappingException(SearchGuardException):
pass
class DeleteRoleMappingException(SearchGuardException):
pass
| none | 1 | 1.714093 | 2 | |
co2_diag/operations/datasetdict.py | dkauf42/gdess | 2 | 6618045 | <filename>co2_diag/operations/datasetdict.py<gh_stars>1-10
import xarray as xr
from dask.diagnostics import ProgressBar
import pickle, logging
_datasetdict_logger = logging.getLogger("{0}.{1}".format(__name__, "loader"))
class DatasetDict(dict):
"""A dict wrapper for working simultaneously with multiple, consistent xArray Datasets.
Extends the 'dict' class to make it easy to apply selections and calculations
to each and every Dataset in the dictionary. Currently, the following procedures are supported:
- selections
- means
- load
"""
def __init__(self, *args, **kwargs):
super(DatasetDict, self).__init__(*args, **kwargs)
def queue_selection(self, **selection_dict):
"""Select from datasets. Wrapper for xarray's .sel().
Can also use xarray's .isel() with an additional argument.
Example
-------
dsd = DatasetDict()
One can pass slices or individual values:
dsd.queue_selection(time=slice("1960", None), inplace=True)
dsd.queue_selection(plev=100000, inplace=True)
Selections can also be given as a dictionary by using the double splat operator:
selection_dict = {'time': slice("1960", None),
'plev': 100000}
new_dsd = dsd.queue_selection(**selection_dict, inplace=False)
Parameters
----------
selection_dict
include <isel=True> to use index selection instead of keyword selection.
Returns
-------
A DatasetDict with selections lazily queued, but not executed. Or None if inplace==True.
"""
_datasetdict_logger.debug("Queueing selection operation. keyword args = %s", selection_dict)
if selection_dict.pop('isel', False):
returndict = self.apply_function_to_all(xr.Dataset.isel, **selection_dict)
else: # Use the standard selection method if 'isel' key exists & is false, or if key does not exist.
returndict = self.apply_function_to_all(xr.Dataset.sel, **selection_dict)
_datasetdict_logger.info("selection(s) queued, but not yet executed. Ready for .execute_all()")
return returndict
def queue_mean(self, dim, **kwargs):
"""Wrapper for calculating the mean for Xarray Datasets.
Parameters
----------
dim : dict
kwargs
keyword arguments to pass to the mean functions
Returns
-------
A DatasetDict with selections lazily queued, but not executed. Or None if inplace==True.
"""
_datasetdict_logger.debug("Queueing mean operation. keyword args = %s", kwargs)
returndict = self.apply_function_to_all(xr.Dataset.mean, dim=dim, **kwargs)
_datasetdict_logger.info("mean calculation queued for all, but not yet executed. Ready for .execute_all()")
return returndict
def apply_function_to_all(self, fnc, *args, **kwargs):
"""Helper for applying functions to multiple datasets.
The specified function is queued lazily (unless executing=True) for execution on datasets
of an origin dictionary, which will be copied to a destination dictionary.
Hopefully with this, there shouldn't be a need to writing additional looping code.
Parameters
----------
fnc
args
kwargs
keyword arguments, e.g.
inplace : bool
whether the functions should be applied to this DatasetDict or
whether a copy should be returned with the operations applied.
Returns
-------
A DatasetDict if inplace==False, or None if inplace==True
"""
_datasetdict_logger.debug("Processing datasets operation <%s>. keyword args = %s", fnc, kwargs)
# The destination is either this instance or a copy (as determined by the 'inplace' keyword).
# Default is to create a copy.
inplace = kwargs.pop('inplace', False) # Key is removed once no longer needed.
if inplace:
destination_dict = self
else: # A copy is used if the 'inplace' key exists & is false, or if the key does not exist.
destination_dict = self.copy()
# The function is applied to each dataset.
number_of_datasets = len(destination_dict)
if number_of_datasets >= 1:
for i, k in enumerate(destination_dict.keys()):
_datasetdict_logger.debug("-- %d/%d - %s/.. ", i+1, number_of_datasets, k)
destination_dict[k] = destination_dict[k].pipe(fnc, *args, **kwargs)
_datasetdict_logger.debug("Operation processed on all datasets.")
else:
_datasetdict_logger.debug("Nothing done. No datasets are ready for execution.")
if inplace:
return None
else:
return destination_dict
def execute_all(self,
progressbar: bool = True,
inplace: bool = True):
"""Process any lazily loaded selections and computations
Parameters
----------
progressbar : bool, default True
inplace : bool, default True
Returns
-------
A DatasetDict if inplace==False, or None if inplace==True
"""
if progressbar:
ProgressBar().register()
_datasetdict_logger.debug("Executing all queued functions.")
returndict = self.apply_function_to_all(xr.Dataset.load, inplace=inplace)
_datasetdict_logger.info("done.")
return returndict
def copy(self) -> 'DatasetDict':
"""Generate a new Datasetdict with each dataset copied
Useful for preventing further operations from modifying the original.
"""
new_datasetdict = DatasetDict()
for k, v in self.items():
new_datasetdict[k] = v.copy(deep=True)
return new_datasetdict
def to_pickle(self, filename: str = 'datasetdict.pickle') -> None:
"""Pickle this DatasetDict using the highest protocol available.
Parameters
----------
filename : str, default 'datasetdict.pickle'
"""
with open(filename, 'wb') as f:
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
def from_pickle(self,
filename: str = 'cmip_collection.latest_executed_datasets.pickle',
replace: bool = False) -> 'DatasetDict':
"""Load a DatasetDict from a saved pickle file.
Parameters
----------
filename : str, default 'cmip_collection.latest_executed_datasets.pickle'
replace : bool, default False
Returns
-------
DatasetDict
"""
with open(filename, 'rb') as f:
# The protocol version used is detected automatically, so we do not have to specify it.
le_datasets = pickle.load(f)
if replace:
for k, v in le_datasets.items():
self[k] = v
else:
return le_datasets
| <filename>co2_diag/operations/datasetdict.py<gh_stars>1-10
import xarray as xr
from dask.diagnostics import ProgressBar
import pickle, logging
_datasetdict_logger = logging.getLogger("{0}.{1}".format(__name__, "loader"))
class DatasetDict(dict):
"""A dict wrapper for working simultaneously with multiple, consistent xArray Datasets.
Extends the 'dict' class to make it easy to apply selections and calculations
to each and every Dataset in the dictionary. Currently, the following procedures are supported:
- selections
- means
- load
"""
def __init__(self, *args, **kwargs):
super(DatasetDict, self).__init__(*args, **kwargs)
def queue_selection(self, **selection_dict):
"""Select from datasets. Wrapper for xarray's .sel().
Can also use xarray's .isel() with an additional argument.
Example
-------
dsd = DatasetDict()
One can pass slices or individual values:
dsd.queue_selection(time=slice("1960", None), inplace=True)
dsd.queue_selection(plev=100000, inplace=True)
Selections can also be given as a dictionary by using the double splat operator:
selection_dict = {'time': slice("1960", None),
'plev': 100000}
new_dsd = dsd.queue_selection(**selection_dict, inplace=False)
Parameters
----------
selection_dict
include <isel=True> to use index selection instead of keyword selection.
Returns
-------
A DatasetDict with selections lazily queued, but not executed. Or None if inplace==True.
"""
_datasetdict_logger.debug("Queueing selection operation. keyword args = %s", selection_dict)
if selection_dict.pop('isel', False):
returndict = self.apply_function_to_all(xr.Dataset.isel, **selection_dict)
else: # Use the standard selection method if 'isel' key exists & is false, or if key does not exist.
returndict = self.apply_function_to_all(xr.Dataset.sel, **selection_dict)
_datasetdict_logger.info("selection(s) queued, but not yet executed. Ready for .execute_all()")
return returndict
def queue_mean(self, dim, **kwargs):
"""Wrapper for calculating the mean for Xarray Datasets.
Parameters
----------
dim : dict
kwargs
keyword arguments to pass to the mean functions
Returns
-------
A DatasetDict with selections lazily queued, but not executed. Or None if inplace==True.
"""
_datasetdict_logger.debug("Queueing mean operation. keyword args = %s", kwargs)
returndict = self.apply_function_to_all(xr.Dataset.mean, dim=dim, **kwargs)
_datasetdict_logger.info("mean calculation queued for all, but not yet executed. Ready for .execute_all()")
return returndict
def apply_function_to_all(self, fnc, *args, **kwargs):
"""Helper for applying functions to multiple datasets.
The specified function is queued lazily (unless executing=True) for execution on datasets
of an origin dictionary, which will be copied to a destination dictionary.
Hopefully with this, there shouldn't be a need to writing additional looping code.
Parameters
----------
fnc
args
kwargs
keyword arguments, e.g.
inplace : bool
whether the functions should be applied to this DatasetDict or
whether a copy should be returned with the operations applied.
Returns
-------
A DatasetDict if inplace==False, or None if inplace==True
"""
_datasetdict_logger.debug("Processing datasets operation <%s>. keyword args = %s", fnc, kwargs)
# The destination is either this instance or a copy (as determined by the 'inplace' keyword).
# Default is to create a copy.
inplace = kwargs.pop('inplace', False) # Key is removed once no longer needed.
if inplace:
destination_dict = self
else: # A copy is used if the 'inplace' key exists & is false, or if the key does not exist.
destination_dict = self.copy()
# The function is applied to each dataset.
number_of_datasets = len(destination_dict)
if number_of_datasets >= 1:
for i, k in enumerate(destination_dict.keys()):
_datasetdict_logger.debug("-- %d/%d - %s/.. ", i+1, number_of_datasets, k)
destination_dict[k] = destination_dict[k].pipe(fnc, *args, **kwargs)
_datasetdict_logger.debug("Operation processed on all datasets.")
else:
_datasetdict_logger.debug("Nothing done. No datasets are ready for execution.")
if inplace:
return None
else:
return destination_dict
def execute_all(self,
progressbar: bool = True,
inplace: bool = True):
"""Process any lazily loaded selections and computations
Parameters
----------
progressbar : bool, default True
inplace : bool, default True
Returns
-------
A DatasetDict if inplace==False, or None if inplace==True
"""
if progressbar:
ProgressBar().register()
_datasetdict_logger.debug("Executing all queued functions.")
returndict = self.apply_function_to_all(xr.Dataset.load, inplace=inplace)
_datasetdict_logger.info("done.")
return returndict
def copy(self) -> 'DatasetDict':
"""Generate a new Datasetdict with each dataset copied
Useful for preventing further operations from modifying the original.
"""
new_datasetdict = DatasetDict()
for k, v in self.items():
new_datasetdict[k] = v.copy(deep=True)
return new_datasetdict
def to_pickle(self, filename: str = 'datasetdict.pickle') -> None:
"""Pickle this DatasetDict using the highest protocol available.
Parameters
----------
filename : str, default 'datasetdict.pickle'
"""
with open(filename, 'wb') as f:
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
def from_pickle(self,
filename: str = 'cmip_collection.latest_executed_datasets.pickle',
replace: bool = False) -> 'DatasetDict':
"""Load a DatasetDict from a saved pickle file.
Parameters
----------
filename : str, default 'cmip_collection.latest_executed_datasets.pickle'
replace : bool, default False
Returns
-------
DatasetDict
"""
with open(filename, 'rb') as f:
# The protocol version used is detected automatically, so we do not have to specify it.
le_datasets = pickle.load(f)
if replace:
for k, v in le_datasets.items():
self[k] = v
else:
return le_datasets
| en | 0.690457 | A dict wrapper for working simultaneously with multiple, consistent xArray Datasets. Extends the 'dict' class to make it easy to apply selections and calculations to each and every Dataset in the dictionary. Currently, the following procedures are supported: - selections - means - load Select from datasets. Wrapper for xarray's .sel(). Can also use xarray's .isel() with an additional argument. Example ------- dsd = DatasetDict() One can pass slices or individual values: dsd.queue_selection(time=slice("1960", None), inplace=True) dsd.queue_selection(plev=100000, inplace=True) Selections can also be given as a dictionary by using the double splat operator: selection_dict = {'time': slice("1960", None), 'plev': 100000} new_dsd = dsd.queue_selection(**selection_dict, inplace=False) Parameters ---------- selection_dict include <isel=True> to use index selection instead of keyword selection. Returns ------- A DatasetDict with selections lazily queued, but not executed. Or None if inplace==True. # Use the standard selection method if 'isel' key exists & is false, or if key does not exist. Wrapper for calculating the mean for Xarray Datasets. Parameters ---------- dim : dict kwargs keyword arguments to pass to the mean functions Returns ------- A DatasetDict with selections lazily queued, but not executed. Or None if inplace==True. Helper for applying functions to multiple datasets. The specified function is queued lazily (unless executing=True) for execution on datasets of an origin dictionary, which will be copied to a destination dictionary. Hopefully with this, there shouldn't be a need to writing additional looping code. Parameters ---------- fnc args kwargs keyword arguments, e.g. inplace : bool whether the functions should be applied to this DatasetDict or whether a copy should be returned with the operations applied. Returns ------- A DatasetDict if inplace==False, or None if inplace==True # The destination is either this instance or a copy (as determined by the 'inplace' keyword). # Default is to create a copy. # Key is removed once no longer needed. # A copy is used if the 'inplace' key exists & is false, or if the key does not exist. # The function is applied to each dataset. Process any lazily loaded selections and computations Parameters ---------- progressbar : bool, default True inplace : bool, default True Returns ------- A DatasetDict if inplace==False, or None if inplace==True Generate a new Datasetdict with each dataset copied Useful for preventing further operations from modifying the original. Pickle this DatasetDict using the highest protocol available. Parameters ---------- filename : str, default 'datasetdict.pickle' Load a DatasetDict from a saved pickle file. Parameters ---------- filename : str, default 'cmip_collection.latest_executed_datasets.pickle' replace : bool, default False Returns ------- DatasetDict # The protocol version used is detected automatically, so we do not have to specify it. | 2.404584 | 2 |
utool/_internal/meta_util_cache.py | Erotemic/utool | 8 | 6618046 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import shelve
from .meta_util_cplat import get_app_resource_dir
from .meta_util_path import ensuredir
from . import meta_util_arg
from .meta_util_constants import global_cache_fname, global_cache_dname, default_appname
from os.path import join
def global_cache_read(key, appname=None, **kwargs):
if appname is None:
appname = default_appname
global_cache_dir = get_app_resource_dir(appname, global_cache_dname)
ensuredir(global_cache_dir)
shelf_fpath = join(global_cache_dir, global_cache_fname)
import six
if six.PY2:
# key must be non-unicode in python2
key = str(key)
try:
shelf = shelve.open(shelf_fpath)
if 'default' in kwargs:
return shelf.get(key, kwargs['default'])
else:
return shelf[key]
shelf.close()
except Exception as ex:
print('[meta_util_cache] WARNING')
print(ex)
print('[meta_util_cache] Error reading: shelf_fpath=%r' % shelf_fpath)
if meta_util_arg.SUPER_STRICT:
raise
return kwargs['default']
#raise
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import shelve
from .meta_util_cplat import get_app_resource_dir
from .meta_util_path import ensuredir
from . import meta_util_arg
from .meta_util_constants import global_cache_fname, global_cache_dname, default_appname
from os.path import join
def global_cache_read(key, appname=None, **kwargs):
if appname is None:
appname = default_appname
global_cache_dir = get_app_resource_dir(appname, global_cache_dname)
ensuredir(global_cache_dir)
shelf_fpath = join(global_cache_dir, global_cache_fname)
import six
if six.PY2:
# key must be non-unicode in python2
key = str(key)
try:
shelf = shelve.open(shelf_fpath)
if 'default' in kwargs:
return shelf.get(key, kwargs['default'])
else:
return shelf[key]
shelf.close()
except Exception as ex:
print('[meta_util_cache] WARNING')
print(ex)
print('[meta_util_cache] Error reading: shelf_fpath=%r' % shelf_fpath)
if meta_util_arg.SUPER_STRICT:
raise
return kwargs['default']
#raise
| en | 0.746056 | # -*- coding: utf-8 -*- # key must be non-unicode in python2 #raise | 2.098182 | 2 |
py_sandbox/plotting.py | kjgonzalez/codefiles | 0 | 6618047 | import matplotlib.pyplot as plt
# 1. import matplotlib and numpy
# 2. plot stuff
# 3. see how fast python plotting is
# 4. see how to make a serial object
# in python, read data from USB
# 5. plot incoming data from USB?
# ^^^ will allow data viz on both OS's
a=[1,2,3,4]
plt.plot(a)
plt.ylabel('some numbers')
plt.show() | import matplotlib.pyplot as plt
# 1. import matplotlib and numpy
# 2. plot stuff
# 3. see how fast python plotting is
# 4. see how to make a serial object
# in python, read data from USB
# 5. plot incoming data from USB?
# ^^^ will allow data viz on both OS's
a=[1,2,3,4]
plt.plot(a)
plt.ylabel('some numbers')
plt.show() | en | 0.774378 | # 1. import matplotlib and numpy # 2. plot stuff # 3. see how fast python plotting is # 4. see how to make a serial object # in python, read data from USB # 5. plot incoming data from USB? # ^^^ will allow data viz on both OS's | 3.416111 | 3 |
PyTorch Benchmarks/train_imagenet.py | geochri/Mish | 3 | 6618048 | import argparse
import os
import random
import shutil
import time
from datetime import datetime
import wandb
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms
from resnet import ResidualNet
from PIL import ImageFile
from ptflops import get_model_complexity_info
ImageFile.LOAD_TRUNCATED_IMAGES = True
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
def parse_args():
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet',
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('--depth', default=50, type=int, metavar='D',
help='model depth')
parser.add_argument('--ngpu', default=4, type=int, metavar='G',
help='number of gpus to use')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=100, type=int,
metavar='N', help='print frequency (default: 100)')
parser.add_argument('--log-freq', '-l', default=500, type=int,
metavar='L', help='log frequency (default: 500)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument("--seed", type=int, default=1234, metavar='BS',
help='input batch size for training (default: 64)')
parser.add_argument("--prefix", type=str, required=True, metavar='PFX',
help='prefix for logging & checkpoint saving')
parser.add_argument('--evaluate', dest='evaluate', action='store_true', help='evaluation only')
# parser.add_argument('--att-type', type=str, choices=['BAM', 'CBAM', 'PCAM'], default=None)
# parser.add_argument('--conv', required=False, type=bool)
# parser.add_argument('--settings', nargs="+", required=True, type=list)
# parser.add_argument('--name', required=True, type=str)
# parser.add_argument('--san', required=False, type=bool)
parser.add_argument('--dryrun', required=False, type=bool)
args = parser.parse_args()
return args
def init_wandb(entity, project, model):
wandb.init(entity=entity, project=project, allow_val_change=True)
wandb.config.update(args)
wandb.watch(model)
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def get_model(arch, depth):
assert arch == "resnet"
model = ResidualNet('ImageNet', depth, 1000)
print(model)
return model
def get_model_stats(model, device, verbose=True):
with torch.cuda.device(device):
macs, params = get_model_complexity_info(model, (3, 224, 224), as_strings=False,
print_per_layer_stat=False)
if verbose:
print('{:<30} {:<8}'.format('Computational complexity: ', int(macs)))
print('{:<30} {:<8}'.format('Number of parameters: ', params))
return macs, params
def get_loss_optim(model, device, lr, momentum, weight_decay):
criterion = nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.SGD(model.parameters(), lr,
momentum=momentum,
weight_decay=weight_decay)
return criterion, optimizer
def get_model_checkpoint(path, model, optimizer):
if os.path.isfile(path):
print("=> loading checkpoint '{}'".format(path))
checkpoint = torch.load(path)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
if 'optimizer' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(path, checkpoint['epoch']))
return start_epoch
else:
print("=> no checkpoint found at '{}'".format(path))
exit()
def get_dataloader(root_dir, is_train, batch_size, workers):
dir_name = "train" if is_train else "val"
data_dir = os.path.join(root_dir, dir_name)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transformations = [
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
] if is_train else [
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]
dataset = datasets.ImageFolder(data_dir, transforms.Compose(transformations))
shuffle = True if is_train else False
loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=shuffle,
num_workers=workers, pin_memory=True)
return loader
best_prec1 = 0
args = parse_args()
torch.backends.cudnn.benchmark = True
def main():
global args, best_prec1
print("args", args)
set_seed(args.seed)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = get_model("resnet", int(args.depth))
macs, params = get_model_stats(model, device)
model = model.to(device)
criterion, optimizer = get_loss_optim(model, device, args.lr, args.momentum, args.weight_decay)
model = torch.nn.DataParallel(model, device_ids=list(range(int(args.ngpu))))
if args.resume:
start_epoch = get_model_checkpoint(args.resume, model, optimizer)
args.start_epoch = start_epoch
if args.dryrun:
for epoch in range(args.start_epoch, 100):
print(epoch)
x = torch.randn(args.batch_size, 3, 224, 224).to(device)
model.zero_grad()
y = model(x)
y.mean().backward()
exit()
val_loader = get_dataloader(args.data, False, args.batch_size, args.workers)
if args.evaluate:
validate(val_loader, model, criterion, 0)
return
train_loader = get_dataloader(args.data, True, args.batch_size, args.workers)
init_wandb("landskape", "SAN-ImageNet", model)
wandb.config.update({"Parameters": params, "FLOPs": macs})
print(f"Parameters: {params}, FLOPs: {macs}")
print(args)
print(model)
for epoch in range(args.start_epoch, args.epochs):
start_time = datetime.now()
adjust_learning_rate(optimizer, epoch)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion, epoch)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
}, is_best, args.prefix)
end_time = datetime.now()
delta = (end_time - start_time).total_seconds() // 60.
wandb.log({'epoch': epoch, "best_prec1": best_prec1, "Time (min.)": delta})
print(f"Epoch {epoch} Prec {prec1:.3f} Best {best_prec1:.3f} Time {delta} m")
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(non_blocking=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1,
top5=top5))
if i % args.log_freq == 0:
wandb.log(
{"Batch": epoch * len(train_loader) + i, "Batch Training time (ms)": batch_time.val*10, "Batch Data time (ms)": data_time.val*10,
"Batch Training loss": losses.val, "Batch Training Top-1 accuracy": top1.val,
"Batch Training Top-5 accuracy": top5.val})
def validate(val_loader, model, criterion, epoch):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
with torch.no_grad():
target = target.cuda(non_blocking=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
# log stats to wandb
wandb.log({
'epoch': epoch,
'Top-1 accuracy': top1.avg,
'Top-5 accuracy': top5.avg,
'loss': losses.avg,
})
return top1.avg
def save_checkpoint(state, is_best, prefix):
if not os.path.exists('./checkpoints'):
os.mkdir('./checkpoints')
filename = './checkpoints/%s_checkpoint.pth.tar' % prefix
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, './checkpoints/%s_model_best.pth.tar' % prefix)
wandb.save(filename)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
if args.arch == 'mobilenet':
lr = args.lr * (0.98 ** epoch)
elif args.arch == 'resnet':
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
wandb.log({'lr': lr, 'epoch': epoch})
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| import argparse
import os
import random
import shutil
import time
from datetime import datetime
import wandb
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms
from resnet import ResidualNet
from PIL import ImageFile
from ptflops import get_model_complexity_info
ImageFile.LOAD_TRUNCATED_IMAGES = True
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
def parse_args():
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet',
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('--depth', default=50, type=int, metavar='D',
help='model depth')
parser.add_argument('--ngpu', default=4, type=int, metavar='G',
help='number of gpus to use')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=100, type=int,
metavar='N', help='print frequency (default: 100)')
parser.add_argument('--log-freq', '-l', default=500, type=int,
metavar='L', help='log frequency (default: 500)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument("--seed", type=int, default=1234, metavar='BS',
help='input batch size for training (default: 64)')
parser.add_argument("--prefix", type=str, required=True, metavar='PFX',
help='prefix for logging & checkpoint saving')
parser.add_argument('--evaluate', dest='evaluate', action='store_true', help='evaluation only')
# parser.add_argument('--att-type', type=str, choices=['BAM', 'CBAM', 'PCAM'], default=None)
# parser.add_argument('--conv', required=False, type=bool)
# parser.add_argument('--settings', nargs="+", required=True, type=list)
# parser.add_argument('--name', required=True, type=str)
# parser.add_argument('--san', required=False, type=bool)
parser.add_argument('--dryrun', required=False, type=bool)
args = parser.parse_args()
return args
def init_wandb(entity, project, model):
wandb.init(entity=entity, project=project, allow_val_change=True)
wandb.config.update(args)
wandb.watch(model)
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def get_model(arch, depth):
assert arch == "resnet"
model = ResidualNet('ImageNet', depth, 1000)
print(model)
return model
def get_model_stats(model, device, verbose=True):
with torch.cuda.device(device):
macs, params = get_model_complexity_info(model, (3, 224, 224), as_strings=False,
print_per_layer_stat=False)
if verbose:
print('{:<30} {:<8}'.format('Computational complexity: ', int(macs)))
print('{:<30} {:<8}'.format('Number of parameters: ', params))
return macs, params
def get_loss_optim(model, device, lr, momentum, weight_decay):
criterion = nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.SGD(model.parameters(), lr,
momentum=momentum,
weight_decay=weight_decay)
return criterion, optimizer
def get_model_checkpoint(path, model, optimizer):
if os.path.isfile(path):
print("=> loading checkpoint '{}'".format(path))
checkpoint = torch.load(path)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
if 'optimizer' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(path, checkpoint['epoch']))
return start_epoch
else:
print("=> no checkpoint found at '{}'".format(path))
exit()
def get_dataloader(root_dir, is_train, batch_size, workers):
dir_name = "train" if is_train else "val"
data_dir = os.path.join(root_dir, dir_name)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transformations = [
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
] if is_train else [
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]
dataset = datasets.ImageFolder(data_dir, transforms.Compose(transformations))
shuffle = True if is_train else False
loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=shuffle,
num_workers=workers, pin_memory=True)
return loader
best_prec1 = 0
args = parse_args()
torch.backends.cudnn.benchmark = True
def main():
global args, best_prec1
print("args", args)
set_seed(args.seed)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = get_model("resnet", int(args.depth))
macs, params = get_model_stats(model, device)
model = model.to(device)
criterion, optimizer = get_loss_optim(model, device, args.lr, args.momentum, args.weight_decay)
model = torch.nn.DataParallel(model, device_ids=list(range(int(args.ngpu))))
if args.resume:
start_epoch = get_model_checkpoint(args.resume, model, optimizer)
args.start_epoch = start_epoch
if args.dryrun:
for epoch in range(args.start_epoch, 100):
print(epoch)
x = torch.randn(args.batch_size, 3, 224, 224).to(device)
model.zero_grad()
y = model(x)
y.mean().backward()
exit()
val_loader = get_dataloader(args.data, False, args.batch_size, args.workers)
if args.evaluate:
validate(val_loader, model, criterion, 0)
return
train_loader = get_dataloader(args.data, True, args.batch_size, args.workers)
init_wandb("landskape", "SAN-ImageNet", model)
wandb.config.update({"Parameters": params, "FLOPs": macs})
print(f"Parameters: {params}, FLOPs: {macs}")
print(args)
print(model)
for epoch in range(args.start_epoch, args.epochs):
start_time = datetime.now()
adjust_learning_rate(optimizer, epoch)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion, epoch)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
}, is_best, args.prefix)
end_time = datetime.now()
delta = (end_time - start_time).total_seconds() // 60.
wandb.log({'epoch': epoch, "best_prec1": best_prec1, "Time (min.)": delta})
print(f"Epoch {epoch} Prec {prec1:.3f} Best {best_prec1:.3f} Time {delta} m")
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(non_blocking=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1,
top5=top5))
if i % args.log_freq == 0:
wandb.log(
{"Batch": epoch * len(train_loader) + i, "Batch Training time (ms)": batch_time.val*10, "Batch Data time (ms)": data_time.val*10,
"Batch Training loss": losses.val, "Batch Training Top-1 accuracy": top1.val,
"Batch Training Top-5 accuracy": top5.val})
def validate(val_loader, model, criterion, epoch):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
with torch.no_grad():
target = target.cuda(non_blocking=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
# log stats to wandb
wandb.log({
'epoch': epoch,
'Top-1 accuracy': top1.avg,
'Top-5 accuracy': top5.avg,
'loss': losses.avg,
})
return top1.avg
def save_checkpoint(state, is_best, prefix):
if not os.path.exists('./checkpoints'):
os.mkdir('./checkpoints')
filename = './checkpoints/%s_checkpoint.pth.tar' % prefix
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, './checkpoints/%s_model_best.pth.tar' % prefix)
wandb.save(filename)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
if args.arch == 'mobilenet':
lr = args.lr * (0.98 ** epoch)
elif args.arch == 'resnet':
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
wandb.log({'lr': lr, 'epoch': epoch})
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| en | 0.423586 | # parser.add_argument('--att-type', type=str, choices=['BAM', 'CBAM', 'PCAM'], default=None) # parser.add_argument('--conv', required=False, type=bool) # parser.add_argument('--settings', nargs="+", required=True, type=list) # parser.add_argument('--name', required=True, type=str) # parser.add_argument('--san', required=False, type=bool) # train for one epoch # evaluate on validation set # remember best prec@1 and save checkpoint # switch to train mode # measure data loading time # compute output # measure accuracy and record loss # compute gradient and do SGD step # measure elapsed time # switch to evaluate mode # compute output # measure accuracy and record loss # measure elapsed time # log stats to wandb Computes and stores the average and current value Sets the learning rate to the initial LR decayed by 10 every 30 epochs Computes the precision@k for the specified values of k | 2.063294 | 2 |
components/vulcanize.bzl | guildai/_guild-python-legacy | 0 | 6618049 | # TODO: note TensorFlow copyright
load("@org_tensorflow_tensorboard//tensorboard/defs:defs.bzl", "legacy_js")
load("@io_bazel_rules_closure//closure/private:defs.bzl", "collect_js", "unfurl", "long_path")
load("@org_tensorflow_tensorboard//tensorboard/defs:web.bzl", "web_aspect")
def _guild_html_binary(ctx):
deps = unfurl(ctx.attr.deps, provider="webfiles")
manifests = set(order="topological")
files = set()
webpaths = set()
for dep in deps:
manifests += dep.webfiles.manifests
webpaths += dep.webfiles.webpaths
files += dep.data_runfiles.files
webpaths += [ctx.attr.output_path]
closure_js_library=collect_js(
ctx, unfurl(ctx.attr.deps, provider="closure_js_library"))
# vulcanize
jslibs = depset(ctx.files._jslibs) + closure_js_library.srcs
path_regexs_str = ",".join(ctx.attr.path_regexs_for_noinline) or "NO_REGEXS"
ctx.action(
inputs=list(manifests | files | jslibs),
outputs=[ctx.outputs.html],
executable=ctx.executable._Vulcanize,
arguments=([ctx.attr.compilation_level,
"true" if ctx.attr.testonly else "false",
ctx.attr.input_path,
ctx.attr.output_path,
ctx.outputs.html.path,
path_regexs_str] +
[f.path for f in jslibs] +
[f.path for f in manifests]),
progress_message="Vulcanizing %s" % ctx.attr.input_path)
# webfiles manifest
manifest_srcs = [struct(path=ctx.outputs.html.path,
longpath=long_path(ctx, ctx.outputs.html),
webpath=ctx.attr.output_path)]
manifest = ctx.new_file(ctx.configuration.bin_dir,
"%s.pbtxt" % ctx.label.name)
ctx.file_action(
output=manifest,
content=struct(
label=str(ctx.label),
src=manifest_srcs).to_proto())
manifests += [manifest]
# webfiles server
params = struct(
label=str(ctx.label),
bind="[::]:6006",
manifest=[long_path(ctx, man) for man in manifests],
external_asset=[struct(webpath=k, path=v)
for k, v in ctx.attr.external_assets.items()])
params_file = ctx.new_file(ctx.configuration.bin_dir,
"%s_server_params.pbtxt" % ctx.label.name)
ctx.file_action(output=params_file, content=params.to_proto())
ctx.file_action(
executable=True,
output=ctx.outputs.executable,
content="#!/bin/sh\nexec %s %s" % (
ctx.executable._WebfilesServer.short_path,
long_path(ctx, params_file)))
transitive_runfiles = depset()
transitive_runfiles += ctx.attr._WebfilesServer.data_runfiles.files
for dep in deps:
transitive_runfiles += dep.data_runfiles.files
return struct(
files=depset([ctx.outputs.html]),
webfiles=struct(
manifest=manifest,
manifests=manifests,
webpaths=webpaths,
dummy=ctx.outputs.html),
runfiles=ctx.runfiles(
files=ctx.files.data + [manifest,
params_file,
ctx.outputs.html,
ctx.outputs.executable],
transitive_files=transitive_runfiles))
guild_html_binary = rule(
implementation=_guild_html_binary,
executable=True,
attrs={
"compilation_level": attr.string(default="ADVANCED"),
"input_path": attr.string(mandatory=True),
"output_path": attr.string(mandatory=True),
"data": attr.label_list(cfg="data", allow_files=True),
"deps": attr.label_list(
aspects=[
web_aspect,
legacy_js,
],
mandatory=True),
"external_assets": attr.string_dict(default={"/_/runfiles": "."}),
"path_regexs_for_noinline": attr.string_list(),
"_jslibs": attr.label(
default=Label("//java/org/guildai/vulcanize:jslibs"),
allow_files=True),
"_Vulcanize": attr.label(
default=Label("//java/org/guildai/vulcanize:Vulcanize"),
executable=True,
cfg="host"),
"_WebfilesServer": attr.label(
default=Label(
"@io_bazel_rules_closure//java/io/bazel/rules/closure/webfiles/server:WebfilesServer"),
executable=True,
cfg="host"),
},
outputs={
"html": "%{name}.html",
})
| # TODO: note TensorFlow copyright
load("@org_tensorflow_tensorboard//tensorboard/defs:defs.bzl", "legacy_js")
load("@io_bazel_rules_closure//closure/private:defs.bzl", "collect_js", "unfurl", "long_path")
load("@org_tensorflow_tensorboard//tensorboard/defs:web.bzl", "web_aspect")
def _guild_html_binary(ctx):
deps = unfurl(ctx.attr.deps, provider="webfiles")
manifests = set(order="topological")
files = set()
webpaths = set()
for dep in deps:
manifests += dep.webfiles.manifests
webpaths += dep.webfiles.webpaths
files += dep.data_runfiles.files
webpaths += [ctx.attr.output_path]
closure_js_library=collect_js(
ctx, unfurl(ctx.attr.deps, provider="closure_js_library"))
# vulcanize
jslibs = depset(ctx.files._jslibs) + closure_js_library.srcs
path_regexs_str = ",".join(ctx.attr.path_regexs_for_noinline) or "NO_REGEXS"
ctx.action(
inputs=list(manifests | files | jslibs),
outputs=[ctx.outputs.html],
executable=ctx.executable._Vulcanize,
arguments=([ctx.attr.compilation_level,
"true" if ctx.attr.testonly else "false",
ctx.attr.input_path,
ctx.attr.output_path,
ctx.outputs.html.path,
path_regexs_str] +
[f.path for f in jslibs] +
[f.path for f in manifests]),
progress_message="Vulcanizing %s" % ctx.attr.input_path)
# webfiles manifest
manifest_srcs = [struct(path=ctx.outputs.html.path,
longpath=long_path(ctx, ctx.outputs.html),
webpath=ctx.attr.output_path)]
manifest = ctx.new_file(ctx.configuration.bin_dir,
"%s.pbtxt" % ctx.label.name)
ctx.file_action(
output=manifest,
content=struct(
label=str(ctx.label),
src=manifest_srcs).to_proto())
manifests += [manifest]
# webfiles server
params = struct(
label=str(ctx.label),
bind="[::]:6006",
manifest=[long_path(ctx, man) for man in manifests],
external_asset=[struct(webpath=k, path=v)
for k, v in ctx.attr.external_assets.items()])
params_file = ctx.new_file(ctx.configuration.bin_dir,
"%s_server_params.pbtxt" % ctx.label.name)
ctx.file_action(output=params_file, content=params.to_proto())
ctx.file_action(
executable=True,
output=ctx.outputs.executable,
content="#!/bin/sh\nexec %s %s" % (
ctx.executable._WebfilesServer.short_path,
long_path(ctx, params_file)))
transitive_runfiles = depset()
transitive_runfiles += ctx.attr._WebfilesServer.data_runfiles.files
for dep in deps:
transitive_runfiles += dep.data_runfiles.files
return struct(
files=depset([ctx.outputs.html]),
webfiles=struct(
manifest=manifest,
manifests=manifests,
webpaths=webpaths,
dummy=ctx.outputs.html),
runfiles=ctx.runfiles(
files=ctx.files.data + [manifest,
params_file,
ctx.outputs.html,
ctx.outputs.executable],
transitive_files=transitive_runfiles))
guild_html_binary = rule(
implementation=_guild_html_binary,
executable=True,
attrs={
"compilation_level": attr.string(default="ADVANCED"),
"input_path": attr.string(mandatory=True),
"output_path": attr.string(mandatory=True),
"data": attr.label_list(cfg="data", allow_files=True),
"deps": attr.label_list(
aspects=[
web_aspect,
legacy_js,
],
mandatory=True),
"external_assets": attr.string_dict(default={"/_/runfiles": "."}),
"path_regexs_for_noinline": attr.string_list(),
"_jslibs": attr.label(
default=Label("//java/org/guildai/vulcanize:jslibs"),
allow_files=True),
"_Vulcanize": attr.label(
default=Label("//java/org/guildai/vulcanize:Vulcanize"),
executable=True,
cfg="host"),
"_WebfilesServer": attr.label(
default=Label(
"@io_bazel_rules_closure//java/io/bazel/rules/closure/webfiles/server:WebfilesServer"),
executable=True,
cfg="host"),
},
outputs={
"html": "%{name}.html",
})
| en | 0.283538 | # TODO: note TensorFlow copyright # vulcanize # webfiles manifest # webfiles server | 1.877961 | 2 |
main.py | starcreep48/HearthStoneBGLite | 2 | 6618050 | <filename>main.py<gh_stars>1-10
import logging
import copy
import json
import pickle
from sys import exit
from random import randint
from game.tier1 import AllCards
from game.warband import Warband
from game.simulator import Simulator
from utils.warbandlogic import remove_card, select_attacking_card, select_defending_card
from utils.cardlogic import inflict_damage_to_card
# player1Warband = Warband('Player', [Alleycat(), DragonspawnLieutenant(), AcolyteOfCThun(), Alleycat(), DragonspawnLieutenant()])
# player2Warband = Warband('Computer', [Alleycat(), AcolyteOfCThun(), Alleycat(), AcolyteOfCThun(), Alleycat(), AcolyteOfCThun()])
randomWarband1 = []
randomWarband2 = []
print(len(AllCards[5]))
for i in range(0, 4):
randomWarband1.append(AllCards[randint(0, 5)][randint(0, 5)].copy())
randomWarband2.append(AllCards[randint(0, 5)][randint(0, 5)].copy())
player1Warband = Warband('Player', randomWarband1)
player2Warband = Warband('Computer', randomWarband2)
pickle.dump( player1Warband, open('player1.pickle', 'wb') )
pickle.dump( player2Warband, open('player2.pickle', 'wb') )
nRounds = 1
if nRounds > 10:
logging.basicConfig(filename='main.log', level=logging.WARNING)
else:
logging.basicConfig(filename='main.log', level=logging.DEBUG)
player1Wins = 0
player2Wins = 0
ties = 0
for i in range(0, nRounds):
simulator = Simulator(player1Warband.copy(), player2Warband.copy())
simulator.simulate()
if simulator.winner == player1Warband.name:
player1Wins += 1
elif simulator.winner == player2Warband.name:
player2Wins += 1
else:
ties += 1
print(f'{player1Warband.name} wins: {player1Wins}, win%: {player1Wins/nRounds}')
print(f'{player2Warband.name} wins: {player2Wins}, win%: {player2Wins/nRounds}')
print(f'Ties: {ties}') | <filename>main.py<gh_stars>1-10
import logging
import copy
import json
import pickle
from sys import exit
from random import randint
from game.tier1 import AllCards
from game.warband import Warband
from game.simulator import Simulator
from utils.warbandlogic import remove_card, select_attacking_card, select_defending_card
from utils.cardlogic import inflict_damage_to_card
# player1Warband = Warband('Player', [Alleycat(), DragonspawnLieutenant(), AcolyteOfCThun(), Alleycat(), DragonspawnLieutenant()])
# player2Warband = Warband('Computer', [Alleycat(), AcolyteOfCThun(), Alleycat(), AcolyteOfCThun(), Alleycat(), AcolyteOfCThun()])
randomWarband1 = []
randomWarband2 = []
print(len(AllCards[5]))
for i in range(0, 4):
randomWarband1.append(AllCards[randint(0, 5)][randint(0, 5)].copy())
randomWarband2.append(AllCards[randint(0, 5)][randint(0, 5)].copy())
player1Warband = Warband('Player', randomWarband1)
player2Warband = Warband('Computer', randomWarband2)
pickle.dump( player1Warband, open('player1.pickle', 'wb') )
pickle.dump( player2Warband, open('player2.pickle', 'wb') )
nRounds = 1
if nRounds > 10:
logging.basicConfig(filename='main.log', level=logging.WARNING)
else:
logging.basicConfig(filename='main.log', level=logging.DEBUG)
player1Wins = 0
player2Wins = 0
ties = 0
for i in range(0, nRounds):
simulator = Simulator(player1Warband.copy(), player2Warband.copy())
simulator.simulate()
if simulator.winner == player1Warband.name:
player1Wins += 1
elif simulator.winner == player2Warband.name:
player2Wins += 1
else:
ties += 1
print(f'{player1Warband.name} wins: {player1Wins}, win%: {player1Wins/nRounds}')
print(f'{player2Warband.name} wins: {player2Wins}, win%: {player2Wins/nRounds}')
print(f'Ties: {ties}') | zh | 0.153792 | # player1Warband = Warband('Player', [Alleycat(), DragonspawnLieutenant(), AcolyteOfCThun(), Alleycat(), DragonspawnLieutenant()]) # player2Warband = Warband('Computer', [Alleycat(), AcolyteOfCThun(), Alleycat(), AcolyteOfCThun(), Alleycat(), AcolyteOfCThun()]) | 2.534948 | 3 |