max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
scott_bot/util/constants.py | TestyYay/scottbot | 1 | 6618651 | <gh_stars>1-10
import os
import yaml
from dotenv import load_dotenv, find_dotenv
with open(os.path.join(os.path.dirname(__file__), "../config.yml"), encoding="UTF-8") as f:
_CONFIG_YAML = yaml.safe_load(f)
load_dotenv(find_dotenv())
class YAMLGetter(type):
"""
** STOLEN STRAIGHT FROM python-discord's BOT https://github.com/python-discord/bot **
Implements a custom metaclass used for accessing
configuration data by simply accessing class attributes.
Supports getting configuration from up to two levels
of nested configuration through `section` and `subsection`.
`section` specifies the YAML configuration section (or "key")
in which the configuration lives, and must be set.
`subsection` is an optional attribute specifying the section
within the section from which configuration should be loaded.
Example Usage:
# config.yml
scott_bot:
prefixes:
direct_message: ''
guild: '!'
# admin_cog.py
class Prefixes(metaclass=YAMLGetter):
section = "scott_bot"
subsection = "prefixes"
# Usage in Python code
from config import Prefixes
def get_prefix(scott_bot, message):
if isinstance(message.channel, PrivateChannel):
return Prefixes.direct_message
return Prefixes.guild
"""
subsection = None
def __getattr__(cls, name):
name = name.lower()
try:
if cls.subsection is not None:
return _CONFIG_YAML[cls.section][cls.subsection][name]
return _CONFIG_YAML[cls.section][name]
except KeyError:
dotted_path = '.'.join(
(cls.section, cls.subsection, name)
if cls.subsection is not None else (cls.section, name)
)
# log.critical(f"Tried accessing configuration variable at `{dotted_path}`, but it could not be found.")
raise
def __getitem__(cls, name):
return cls.__getattr__(name)
def __iter__(cls):
"""Return generator of key: value pairs of current constants class' config values."""
for name in cls.__annotations__:
yield name, getattr(cls, name)
class Bot(metaclass=YAMLGetter):
section = "bot"
default_prefix: str
token: str
color: int
class DataBase(metaclass=YAMLGetter):
section = "database"
db_url: str
password: str
main_tablename: str
nickname_tablename: str
suggestions_tablename: str
DataBase.db_url = os.getenv("DB_URL", DataBase.db_url)
class Config(metaclass=YAMLGetter):
section = "config"
bad: list
channels: list
class ConfigHelp(metaclass=YAMLGetter):
section = "config"
subsection = "help"
prefix: str
join_leave: str
dad_name: str
admin_channel: str
swearing: str
class Logging(metaclass=YAMLGetter):
section = "logging"
enabled: bool
guild_id: int
class Channels(metaclass=YAMLGetter):
section = "logging"
subsection = "channels"
errors: int
guild_join: int
guild_leave: int
bot_start: int
class Defaults(metaclass=YAMLGetter):
section = "config"
subsection = "defaults"
prefix: str
dad_name: str
swearing: bool
class Emojis(metaclass=YAMLGetter):
section = "messages"
subsection = "emojis"
first: str
last: str
left: str
right: str
delete: str
class JoinMessages(metaclass=YAMLGetter):
section = "messages"
subsection = "join_messages"
general: list
swearing: list
it_mems: list
class IFTTT(metaclass=YAMLGetter):
section = "ifttt"
token: str
suggestion: str
class UwU(metaclass=YAMLGetter):
section = "messages"
subsection = "uwu"
faces: list
replaces: dict
class Reddit(metaclass=YAMLGetter):
section = "reddit"
client_id: str
client_secret: str
user_agent: str
username: str
password: str
HOME_DIR = os.path.join(os.path.dirname(__file__), "../..")
BOT_DIR = os.path.abspath(HOME_DIR)
x = os.path.dirname(__file__)
| import os
import yaml
from dotenv import load_dotenv, find_dotenv
with open(os.path.join(os.path.dirname(__file__), "../config.yml"), encoding="UTF-8") as f:
_CONFIG_YAML = yaml.safe_load(f)
load_dotenv(find_dotenv())
class YAMLGetter(type):
"""
** STOLEN STRAIGHT FROM python-discord's BOT https://github.com/python-discord/bot **
Implements a custom metaclass used for accessing
configuration data by simply accessing class attributes.
Supports getting configuration from up to two levels
of nested configuration through `section` and `subsection`.
`section` specifies the YAML configuration section (or "key")
in which the configuration lives, and must be set.
`subsection` is an optional attribute specifying the section
within the section from which configuration should be loaded.
Example Usage:
# config.yml
scott_bot:
prefixes:
direct_message: ''
guild: '!'
# admin_cog.py
class Prefixes(metaclass=YAMLGetter):
section = "scott_bot"
subsection = "prefixes"
# Usage in Python code
from config import Prefixes
def get_prefix(scott_bot, message):
if isinstance(message.channel, PrivateChannel):
return Prefixes.direct_message
return Prefixes.guild
"""
subsection = None
def __getattr__(cls, name):
name = name.lower()
try:
if cls.subsection is not None:
return _CONFIG_YAML[cls.section][cls.subsection][name]
return _CONFIG_YAML[cls.section][name]
except KeyError:
dotted_path = '.'.join(
(cls.section, cls.subsection, name)
if cls.subsection is not None else (cls.section, name)
)
# log.critical(f"Tried accessing configuration variable at `{dotted_path}`, but it could not be found.")
raise
def __getitem__(cls, name):
return cls.__getattr__(name)
def __iter__(cls):
"""Return generator of key: value pairs of current constants class' config values."""
for name in cls.__annotations__:
yield name, getattr(cls, name)
class Bot(metaclass=YAMLGetter):
section = "bot"
default_prefix: str
token: str
color: int
class DataBase(metaclass=YAMLGetter):
section = "database"
db_url: str
password: str
main_tablename: str
nickname_tablename: str
suggestions_tablename: str
DataBase.db_url = os.getenv("DB_URL", DataBase.db_url)
class Config(metaclass=YAMLGetter):
section = "config"
bad: list
channels: list
class ConfigHelp(metaclass=YAMLGetter):
section = "config"
subsection = "help"
prefix: str
join_leave: str
dad_name: str
admin_channel: str
swearing: str
class Logging(metaclass=YAMLGetter):
section = "logging"
enabled: bool
guild_id: int
class Channels(metaclass=YAMLGetter):
section = "logging"
subsection = "channels"
errors: int
guild_join: int
guild_leave: int
bot_start: int
class Defaults(metaclass=YAMLGetter):
section = "config"
subsection = "defaults"
prefix: str
dad_name: str
swearing: bool
class Emojis(metaclass=YAMLGetter):
section = "messages"
subsection = "emojis"
first: str
last: str
left: str
right: str
delete: str
class JoinMessages(metaclass=YAMLGetter):
section = "messages"
subsection = "join_messages"
general: list
swearing: list
it_mems: list
class IFTTT(metaclass=YAMLGetter):
section = "ifttt"
token: str
suggestion: str
class UwU(metaclass=YAMLGetter):
section = "messages"
subsection = "uwu"
faces: list
replaces: dict
class Reddit(metaclass=YAMLGetter):
section = "reddit"
client_id: str
client_secret: str
user_agent: str
username: str
password: str
HOME_DIR = os.path.join(os.path.dirname(__file__), "../..")
BOT_DIR = os.path.abspath(HOME_DIR)
x = os.path.dirname(__file__) | en | 0.639322 | ** STOLEN STRAIGHT FROM python-discord's BOT https://github.com/python-discord/bot ** Implements a custom metaclass used for accessing configuration data by simply accessing class attributes. Supports getting configuration from up to two levels of nested configuration through `section` and `subsection`. `section` specifies the YAML configuration section (or "key") in which the configuration lives, and must be set. `subsection` is an optional attribute specifying the section within the section from which configuration should be loaded. Example Usage: # config.yml scott_bot: prefixes: direct_message: '' guild: '!' # admin_cog.py class Prefixes(metaclass=YAMLGetter): section = "scott_bot" subsection = "prefixes" # Usage in Python code from config import Prefixes def get_prefix(scott_bot, message): if isinstance(message.channel, PrivateChannel): return Prefixes.direct_message return Prefixes.guild # log.critical(f"Tried accessing configuration variable at `{dotted_path}`, but it could not be found.") Return generator of key: value pairs of current constants class' config values. | 2.673963 | 3 |
obsplan/base.py | zachariahmilby/keck-aurora-observation-planning | 0 | 6618652 | import datetime
from pathlib import Path
import astropy.units as u
import matplotlib.dates as dates
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from astroplan import Observer
from astropy.coordinates import SkyCoord
from astropy.time import Time
from numpy.typing import ArrayLike
from obsplan.ephemeris import _get_ephemeris, _get_eclipse_indices
from obsplan.graphics import color_dict, _keck_one_alt_az_axis, \
_format_axis_date_labels
from obsplan.time import _convert_string_to_datetime, \
_convert_datetime_to_string, _convert_ephemeris_date_to_string, \
_convert_to_california_time, _calculate_duration
# set graphics style
plt.style.use(Path(Path(__file__).resolve().parent, 'anc/rcparams.mplstyle'))
target_info = {'Io': {'ID': '501', 'color': color_dict['red']},
'Europa': {'ID': '502', 'color': color_dict['blue']},
'Ganymede': {'ID': '503', 'color': color_dict['grey']},
'Callisto': {'ID': '504', 'color': color_dict['violet']},
'Jupiter': {'ID': '599', 'color': color_dict['orange']}}
class _AngularSeparation:
"""
This class takes a pre-calculated ephemeris table, generates the
corresponding ephemeris table for a second target, then calculates the
angular separation between the two targets over the duration of the table.
"""
def __init__(self, target1_ephemeris: dict, target2_name: str):
"""
Parameters
----------
target1_ephemeris : dict
Ephemeris table for the primary object.
target2_name : str
The comparison body (Io, Europa, Ganymede, Callisto or Jupiter).
"""
self._target1_ephemeris = target1_ephemeris
self._target2_name = target2_name
self._target2_ephemeris = None
self._angular_separation = self._calculate_angular_separation()
def _calculate_angular_separation(self) -> u.Quantity:
"""
Calculate the angular separation between targets.
"""
target2_ephemeris = _get_ephemeris(
self._target1_ephemeris['datetime_str'][0],
self._target1_ephemeris['datetime_str'][-1],
target=self._target2_name, airmass_lessthan=3)
self._target2_ephemeris = target2_ephemeris
target_1_positions = SkyCoord(ra=self._target1_ephemeris['RA'],
dec=self._target1_ephemeris['DEC'])
target_2_positions = SkyCoord(ra=target2_ephemeris['RA'],
dec=target2_ephemeris['DEC'])
return target_1_positions.separation(target_2_positions).to(u.arcsec)
@property
def target_2_ephemeris(self) -> dict:
return self._target2_ephemeris
@property
def values(self) -> u.Quantity:
return self._angular_separation
@property
def angular_radius(self) -> u.Quantity:
return self._target2_ephemeris['ang_width'].value/2 * u.arcsec
class EclipsePrediction:
"""
This class finds all instances of a target (Io, Europa, Ganymede or
Callisto) being eclipsed by Jupiter over a given timeframe visible from
Mauna Kea at night.
Parameters
----------
starting_datetime : str
The date you want to begin the search. Format (time optional):
YYYY-MM-DD [HH:MM:SS].
ending_datetime
The date you want to end the search. Format (time optional):
YYYY-MM-DD [HH:MM:SS].
target : str
The Galilean moon for which you want to find eclipses. Io, Europa,
Ganymede or Callisto. If it fails, you might have to put in a body
number like 501 for Io, 502 for Europa, 503 for Ganymede or 504 for
Callisto.
"""
def __init__(self, starting_datetime: str, ending_datetime: str,
target: str):
self._target = target
self._target_name = target
self._starting_datetime = starting_datetime
self._ending_datetime = ending_datetime
self._eclipses = self._find_eclipses()
print(self._print_string())
def __str__(self):
return self._print_string()
@staticmethod
def _consecutive_integers(
data: np.ndarray, stepsize: int = 1) -> np.ndarray:
"""
Find sets of consecutive integers (find independent events in an
ephemeris table).
"""
return np.split(data, np.where(np.diff(data) != stepsize)[0] + 1)
def _find_eclipses(self) -> list[dict]:
"""
Find the eclipses by first querying the JPL Horizons System in 1-hour
intervals, finding the eclipse events, then performs a more refined
search around those events in 1-minute intervals.
"""
data = []
initial_ephemeris = _get_ephemeris(self._starting_datetime,
self._ending_datetime,
target=self._target, step='1h')
eclipses = self._consecutive_integers(
_get_eclipse_indices(initial_ephemeris))
if len(eclipses[-1]) == 0:
raise Exception('Sorry, no eclipses found!')
for eclipse in eclipses:
self._target_name = \
initial_ephemeris['targetname'][0].split(' ')[0]
starting_time = _convert_string_to_datetime(
initial_ephemeris[eclipse[0]]['datetime_str'])
starting_time -= datetime.timedelta(days=1)
starting_time = _convert_datetime_to_string(starting_time)
ending_time = _convert_string_to_datetime(
initial_ephemeris[eclipse[-1]]['datetime_str'])
ending_time += datetime.timedelta(days=1)
ending_time = _convert_datetime_to_string(ending_time)
ephemeris = _get_ephemeris(starting_time, ending_time,
target=self._target, step='1m')
indices = _get_eclipse_indices(ephemeris)
refined_ephemeris = _get_ephemeris(
ephemeris['datetime_str'][indices[0]],
ephemeris['datetime_str'][indices[-1]], target=self._target)
data.append(refined_ephemeris)
return data
def _print_string(self) -> str:
"""
Format a terminal-printable summary table of the identified eclipses
along with starting/ending times in both UTC and local California time,
the duration of the eclipse, the range in airmass and the satellite's
relative velocity.
"""
print(f'\n{len(self._eclipses)} {self._target_name} eclipse(s) '
f'identified between {self._starting_datetime} and '
f'{self._ending_datetime}.\n')
df = pd.DataFrame(
columns=['Starting Time (Keck/UTC)', 'Ending Time (Keck/UTC)',
'Starting Time (California)', 'Ending Time (California)',
'Duration', 'Airmass Range', 'Relative Velocity'])
for eclipse in range(len(self._eclipses)):
times = self._eclipses[eclipse]['datetime_str']
airmass = self._eclipses[eclipse]['airmass']
relative_velocity = np.mean(self._eclipses[eclipse]['delta_rate'])
starting_time_utc = times[0]
ending_time_utc = times[-1]
data = {
'Starting Time (Keck/UTC)':
_convert_ephemeris_date_to_string(starting_time_utc),
'Ending Time (Keck/UTC)':
_convert_ephemeris_date_to_string(ending_time_utc),
'Starting Time (California)': _convert_datetime_to_string(
_convert_to_california_time(starting_time_utc)),
'Ending Time (California)': _convert_datetime_to_string(
_convert_to_california_time(ending_time_utc)),
'Duration':
_calculate_duration(starting_time_utc, ending_time_utc),
'Airmass Range':
f"{np.min(airmass):.3f} to {np.max(airmass):.3f}",
'Relative Velocity': f"{relative_velocity:.3f} km/s"
}
df = pd.concat([df, pd.DataFrame(data, index=[0])])
return pd.DataFrame(df).to_string(index=False, justify='left')
@staticmethod
def _plot_line_with_initial_position(
axis: plt.Axes, x: ArrayLike, y: u.Quantity, color: str,
label: str = None, radius: u.Quantity = None) -> None:
"""
Plot a line with a scatterplot point at the starting position. Useful
so I know on different plots which point corresponds to the beginning
of the eclipse.
Update 2022-05-11: now includes Jupiter's angular diameter.
"""
axis.plot(x, y, color=color, linewidth=1)
if radius is not None:
axis.fill_between(x, y.value+radius.value, y.value-radius.value,
color=color, linewidth=0, alpha=0.25)
axis.scatter(x[0], y[0], color=color, edgecolors='none', s=9)
if label is not None:
axis.annotate(label, xy=(x[0], y[0].value), va='center',
ha='right', xytext=(-3, 0), fontsize=6,
textcoords='offset pixels', color=color)
def save_summary_graphics(self, save_directory: str = Path.cwd()) -> None:
"""
Save a summary graphic of each identified eclipse to a specified
directory.
"""
for eclipse in range(len(self._eclipses)):
# get relevant quantities
times = self._eclipses[eclipse]['datetime_str']
starting_time = times[0]
ending_time = times[-1]
duration = _calculate_duration(starting_time, ending_time)
times = dates.datestr2num(times)
polar_angle = 'unknown'
observer = Observer.at_site('Keck')
sunset = observer.sun_set_time(
Time(_convert_string_to_datetime(starting_time)),
which='nearest')
sunset = _convert_datetime_to_string(sunset.datetime)
sunrise = observer.sun_rise_time(
Time(_convert_string_to_datetime(ending_time)),
which='nearest')
sunrise = _convert_datetime_to_string(sunrise.datetime)
# make figure and place axes
fig = plt.figure(figsize=(5, 4), constrained_layout=True)
gs = gridspec.GridSpec(nrows=2, ncols=2, width_ratios=[1, 1.5],
figure=fig)
info_axis = fig.add_subplot(gs[0, 0])
info_axis.set_frame_on(False)
info_axis.set_xticks([])
info_axis.set_yticks([])
alt_az_polar_axis = _keck_one_alt_az_axis(
fig.add_subplot(gs[1, 0], projection='polar'))
airmass_axis_utc = fig.add_subplot(gs[0, 1])
airmass_axis_utc.set_ylabel('Airmass', fontweight='bold')
primary_sep_axis_utc = fig.add_subplot(gs[1, 1])
primary_sep_axis_utc.set_ylabel('Angular Separation [arcsec]',
fontweight='bold')
# plot data
self._plot_line_with_initial_position(
alt_az_polar_axis, np.radians(self._eclipses[eclipse]['AZ']),
self._eclipses[eclipse]['EL'], color='k')
self._plot_line_with_initial_position(
airmass_axis_utc, times, self._eclipses[eclipse]['airmass'],
color='k')
airmass_axis_california = _format_axis_date_labels(
airmass_axis_utc)
for ind, target in enumerate(
[target_info[key]['ID'] for key in target_info.keys()]):
angular_separation = _AngularSeparation(
self._eclipses[eclipse], target)
# get Jupiter's average polar angle rotation when calculating
# it's ephemerides
radius = 0 * u.arcsec
if target == '599':
polar_angle = np.mean(
angular_separation.target_2_ephemeris['NPole_ang'])
radius = angular_separation.angular_radius
if np.sum(angular_separation.values != 0):
self._plot_line_with_initial_position(
primary_sep_axis_utc, times, angular_separation.values,
color=target_info[
list(target_info.keys())[ind]]['color'],
label=angular_separation.target_2_ephemeris[
'targetname'][0][0], radius=radius)
primary_sep_axis_california = _format_axis_date_labels(
primary_sep_axis_utc)
# information string, it's beastly but I don't know a better way of
# doing it...
info_string = 'California Start:' + '\n'
info_string += 'California End:' + '\n' * 2
info_string += 'Keck Start:' + '\n'
info_string += 'Keck End:' + '\n' * 2
info_string += 'Keck Sunset:' + '\n'
info_string += 'Keck Sunrise:' + '\n' * 2
info_string += f'Duration: {duration}' + '\n'
info_string += 'Jupiter North Pole Angle: '
info_string += fr"{polar_angle:.1f}$\degree$" + '\n'
info_string += f'{self._target_name} Relative Velocity: '
info_string += \
fr"${np.mean(self._eclipses[eclipse]['delta_rate']):.3f}$ km/s"
times_string = _convert_datetime_to_string(
_convert_to_california_time(starting_time))
times_string += '\n'
times_string += _convert_datetime_to_string(
_convert_to_california_time(ending_time))
times_string += '\n' * 2
times_string += f'{starting_time} UTC' + '\n'
times_string += f'{ending_time} UTC' + '\n'
times_string += '\n'
times_string += f'{sunset} UTC' + '\n'
times_string += f'{sunrise} UTC'
info_axis.text(0.05, 0.95, info_string, linespacing=1.67,
ha='left', va='top', fontsize=6)
info_axis.text(0.4, 0.95, times_string, linespacing=1.67,
ha='left', va='top',
transform=info_axis.transAxes, fontsize=6)
info_axis.set_title('Eclipse Information', fontweight='bold')
# set axis labels, limits and other parameters
airmass_axis_california.set_xlabel('Time (California)',
fontweight='bold')
airmass_axis_utc.set_xticklabels([])
primary_sep_axis_utc.set_xlabel('Time (UTC)', fontweight='bold')
primary_sep_axis_california.set_xticklabels([])
alt_az_polar_axis.set_rmin(90)
alt_az_polar_axis.set_rmax(0)
airmass_axis_utc.set_ylim(1, 2)
primary_sep_axis_utc.set_ylim(bottom=0)
# save the figure
filename_date_str = datetime.datetime.strftime(
_convert_string_to_datetime(starting_time), '%Y-%m-%d')
filepath = Path(save_directory,
f'{self._target_name.lower()}_'
f'{filename_date_str.lower()}.pdf')
if not filepath.parent.exists():
filepath.mkdir(parents=True)
plt.savefig(filepath)
plt.close(fig)
if __name__ == "__main__":
eclipse_prediction = EclipsePrediction(starting_datetime='2022-11-01',
ending_datetime='2022-11-30',
target='Europa')
eclipse_prediction.save_summary_graphics('/Users/zachariahmilby/Downloads')
| import datetime
from pathlib import Path
import astropy.units as u
import matplotlib.dates as dates
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from astroplan import Observer
from astropy.coordinates import SkyCoord
from astropy.time import Time
from numpy.typing import ArrayLike
from obsplan.ephemeris import _get_ephemeris, _get_eclipse_indices
from obsplan.graphics import color_dict, _keck_one_alt_az_axis, \
_format_axis_date_labels
from obsplan.time import _convert_string_to_datetime, \
_convert_datetime_to_string, _convert_ephemeris_date_to_string, \
_convert_to_california_time, _calculate_duration
# set graphics style
plt.style.use(Path(Path(__file__).resolve().parent, 'anc/rcparams.mplstyle'))
target_info = {'Io': {'ID': '501', 'color': color_dict['red']},
'Europa': {'ID': '502', 'color': color_dict['blue']},
'Ganymede': {'ID': '503', 'color': color_dict['grey']},
'Callisto': {'ID': '504', 'color': color_dict['violet']},
'Jupiter': {'ID': '599', 'color': color_dict['orange']}}
class _AngularSeparation:
"""
This class takes a pre-calculated ephemeris table, generates the
corresponding ephemeris table for a second target, then calculates the
angular separation between the two targets over the duration of the table.
"""
def __init__(self, target1_ephemeris: dict, target2_name: str):
"""
Parameters
----------
target1_ephemeris : dict
Ephemeris table for the primary object.
target2_name : str
The comparison body (Io, Europa, Ganymede, Callisto or Jupiter).
"""
self._target1_ephemeris = target1_ephemeris
self._target2_name = target2_name
self._target2_ephemeris = None
self._angular_separation = self._calculate_angular_separation()
def _calculate_angular_separation(self) -> u.Quantity:
"""
Calculate the angular separation between targets.
"""
target2_ephemeris = _get_ephemeris(
self._target1_ephemeris['datetime_str'][0],
self._target1_ephemeris['datetime_str'][-1],
target=self._target2_name, airmass_lessthan=3)
self._target2_ephemeris = target2_ephemeris
target_1_positions = SkyCoord(ra=self._target1_ephemeris['RA'],
dec=self._target1_ephemeris['DEC'])
target_2_positions = SkyCoord(ra=target2_ephemeris['RA'],
dec=target2_ephemeris['DEC'])
return target_1_positions.separation(target_2_positions).to(u.arcsec)
@property
def target_2_ephemeris(self) -> dict:
return self._target2_ephemeris
@property
def values(self) -> u.Quantity:
return self._angular_separation
@property
def angular_radius(self) -> u.Quantity:
return self._target2_ephemeris['ang_width'].value/2 * u.arcsec
class EclipsePrediction:
"""
This class finds all instances of a target (Io, Europa, Ganymede or
Callisto) being eclipsed by Jupiter over a given timeframe visible from
Mauna Kea at night.
Parameters
----------
starting_datetime : str
The date you want to begin the search. Format (time optional):
YYYY-MM-DD [HH:MM:SS].
ending_datetime
The date you want to end the search. Format (time optional):
YYYY-MM-DD [HH:MM:SS].
target : str
The Galilean moon for which you want to find eclipses. Io, Europa,
Ganymede or Callisto. If it fails, you might have to put in a body
number like 501 for Io, 502 for Europa, 503 for Ganymede or 504 for
Callisto.
"""
def __init__(self, starting_datetime: str, ending_datetime: str,
target: str):
self._target = target
self._target_name = target
self._starting_datetime = starting_datetime
self._ending_datetime = ending_datetime
self._eclipses = self._find_eclipses()
print(self._print_string())
def __str__(self):
return self._print_string()
@staticmethod
def _consecutive_integers(
data: np.ndarray, stepsize: int = 1) -> np.ndarray:
"""
Find sets of consecutive integers (find independent events in an
ephemeris table).
"""
return np.split(data, np.where(np.diff(data) != stepsize)[0] + 1)
def _find_eclipses(self) -> list[dict]:
"""
Find the eclipses by first querying the JPL Horizons System in 1-hour
intervals, finding the eclipse events, then performs a more refined
search around those events in 1-minute intervals.
"""
data = []
initial_ephemeris = _get_ephemeris(self._starting_datetime,
self._ending_datetime,
target=self._target, step='1h')
eclipses = self._consecutive_integers(
_get_eclipse_indices(initial_ephemeris))
if len(eclipses[-1]) == 0:
raise Exception('Sorry, no eclipses found!')
for eclipse in eclipses:
self._target_name = \
initial_ephemeris['targetname'][0].split(' ')[0]
starting_time = _convert_string_to_datetime(
initial_ephemeris[eclipse[0]]['datetime_str'])
starting_time -= datetime.timedelta(days=1)
starting_time = _convert_datetime_to_string(starting_time)
ending_time = _convert_string_to_datetime(
initial_ephemeris[eclipse[-1]]['datetime_str'])
ending_time += datetime.timedelta(days=1)
ending_time = _convert_datetime_to_string(ending_time)
ephemeris = _get_ephemeris(starting_time, ending_time,
target=self._target, step='1m')
indices = _get_eclipse_indices(ephemeris)
refined_ephemeris = _get_ephemeris(
ephemeris['datetime_str'][indices[0]],
ephemeris['datetime_str'][indices[-1]], target=self._target)
data.append(refined_ephemeris)
return data
def _print_string(self) -> str:
"""
Format a terminal-printable summary table of the identified eclipses
along with starting/ending times in both UTC and local California time,
the duration of the eclipse, the range in airmass and the satellite's
relative velocity.
"""
print(f'\n{len(self._eclipses)} {self._target_name} eclipse(s) '
f'identified between {self._starting_datetime} and '
f'{self._ending_datetime}.\n')
df = pd.DataFrame(
columns=['Starting Time (Keck/UTC)', 'Ending Time (Keck/UTC)',
'Starting Time (California)', 'Ending Time (California)',
'Duration', 'Airmass Range', 'Relative Velocity'])
for eclipse in range(len(self._eclipses)):
times = self._eclipses[eclipse]['datetime_str']
airmass = self._eclipses[eclipse]['airmass']
relative_velocity = np.mean(self._eclipses[eclipse]['delta_rate'])
starting_time_utc = times[0]
ending_time_utc = times[-1]
data = {
'Starting Time (Keck/UTC)':
_convert_ephemeris_date_to_string(starting_time_utc),
'Ending Time (Keck/UTC)':
_convert_ephemeris_date_to_string(ending_time_utc),
'Starting Time (California)': _convert_datetime_to_string(
_convert_to_california_time(starting_time_utc)),
'Ending Time (California)': _convert_datetime_to_string(
_convert_to_california_time(ending_time_utc)),
'Duration':
_calculate_duration(starting_time_utc, ending_time_utc),
'Airmass Range':
f"{np.min(airmass):.3f} to {np.max(airmass):.3f}",
'Relative Velocity': f"{relative_velocity:.3f} km/s"
}
df = pd.concat([df, pd.DataFrame(data, index=[0])])
return pd.DataFrame(df).to_string(index=False, justify='left')
@staticmethod
def _plot_line_with_initial_position(
axis: plt.Axes, x: ArrayLike, y: u.Quantity, color: str,
label: str = None, radius: u.Quantity = None) -> None:
"""
Plot a line with a scatterplot point at the starting position. Useful
so I know on different plots which point corresponds to the beginning
of the eclipse.
Update 2022-05-11: now includes Jupiter's angular diameter.
"""
axis.plot(x, y, color=color, linewidth=1)
if radius is not None:
axis.fill_between(x, y.value+radius.value, y.value-radius.value,
color=color, linewidth=0, alpha=0.25)
axis.scatter(x[0], y[0], color=color, edgecolors='none', s=9)
if label is not None:
axis.annotate(label, xy=(x[0], y[0].value), va='center',
ha='right', xytext=(-3, 0), fontsize=6,
textcoords='offset pixels', color=color)
def save_summary_graphics(self, save_directory: str = Path.cwd()) -> None:
"""
Save a summary graphic of each identified eclipse to a specified
directory.
"""
for eclipse in range(len(self._eclipses)):
# get relevant quantities
times = self._eclipses[eclipse]['datetime_str']
starting_time = times[0]
ending_time = times[-1]
duration = _calculate_duration(starting_time, ending_time)
times = dates.datestr2num(times)
polar_angle = 'unknown'
observer = Observer.at_site('Keck')
sunset = observer.sun_set_time(
Time(_convert_string_to_datetime(starting_time)),
which='nearest')
sunset = _convert_datetime_to_string(sunset.datetime)
sunrise = observer.sun_rise_time(
Time(_convert_string_to_datetime(ending_time)),
which='nearest')
sunrise = _convert_datetime_to_string(sunrise.datetime)
# make figure and place axes
fig = plt.figure(figsize=(5, 4), constrained_layout=True)
gs = gridspec.GridSpec(nrows=2, ncols=2, width_ratios=[1, 1.5],
figure=fig)
info_axis = fig.add_subplot(gs[0, 0])
info_axis.set_frame_on(False)
info_axis.set_xticks([])
info_axis.set_yticks([])
alt_az_polar_axis = _keck_one_alt_az_axis(
fig.add_subplot(gs[1, 0], projection='polar'))
airmass_axis_utc = fig.add_subplot(gs[0, 1])
airmass_axis_utc.set_ylabel('Airmass', fontweight='bold')
primary_sep_axis_utc = fig.add_subplot(gs[1, 1])
primary_sep_axis_utc.set_ylabel('Angular Separation [arcsec]',
fontweight='bold')
# plot data
self._plot_line_with_initial_position(
alt_az_polar_axis, np.radians(self._eclipses[eclipse]['AZ']),
self._eclipses[eclipse]['EL'], color='k')
self._plot_line_with_initial_position(
airmass_axis_utc, times, self._eclipses[eclipse]['airmass'],
color='k')
airmass_axis_california = _format_axis_date_labels(
airmass_axis_utc)
for ind, target in enumerate(
[target_info[key]['ID'] for key in target_info.keys()]):
angular_separation = _AngularSeparation(
self._eclipses[eclipse], target)
# get Jupiter's average polar angle rotation when calculating
# it's ephemerides
radius = 0 * u.arcsec
if target == '599':
polar_angle = np.mean(
angular_separation.target_2_ephemeris['NPole_ang'])
radius = angular_separation.angular_radius
if np.sum(angular_separation.values != 0):
self._plot_line_with_initial_position(
primary_sep_axis_utc, times, angular_separation.values,
color=target_info[
list(target_info.keys())[ind]]['color'],
label=angular_separation.target_2_ephemeris[
'targetname'][0][0], radius=radius)
primary_sep_axis_california = _format_axis_date_labels(
primary_sep_axis_utc)
# information string, it's beastly but I don't know a better way of
# doing it...
info_string = 'California Start:' + '\n'
info_string += 'California End:' + '\n' * 2
info_string += 'Keck Start:' + '\n'
info_string += 'Keck End:' + '\n' * 2
info_string += 'Keck Sunset:' + '\n'
info_string += 'Keck Sunrise:' + '\n' * 2
info_string += f'Duration: {duration}' + '\n'
info_string += 'Jupiter North Pole Angle: '
info_string += fr"{polar_angle:.1f}$\degree$" + '\n'
info_string += f'{self._target_name} Relative Velocity: '
info_string += \
fr"${np.mean(self._eclipses[eclipse]['delta_rate']):.3f}$ km/s"
times_string = _convert_datetime_to_string(
_convert_to_california_time(starting_time))
times_string += '\n'
times_string += _convert_datetime_to_string(
_convert_to_california_time(ending_time))
times_string += '\n' * 2
times_string += f'{starting_time} UTC' + '\n'
times_string += f'{ending_time} UTC' + '\n'
times_string += '\n'
times_string += f'{sunset} UTC' + '\n'
times_string += f'{sunrise} UTC'
info_axis.text(0.05, 0.95, info_string, linespacing=1.67,
ha='left', va='top', fontsize=6)
info_axis.text(0.4, 0.95, times_string, linespacing=1.67,
ha='left', va='top',
transform=info_axis.transAxes, fontsize=6)
info_axis.set_title('Eclipse Information', fontweight='bold')
# set axis labels, limits and other parameters
airmass_axis_california.set_xlabel('Time (California)',
fontweight='bold')
airmass_axis_utc.set_xticklabels([])
primary_sep_axis_utc.set_xlabel('Time (UTC)', fontweight='bold')
primary_sep_axis_california.set_xticklabels([])
alt_az_polar_axis.set_rmin(90)
alt_az_polar_axis.set_rmax(0)
airmass_axis_utc.set_ylim(1, 2)
primary_sep_axis_utc.set_ylim(bottom=0)
# save the figure
filename_date_str = datetime.datetime.strftime(
_convert_string_to_datetime(starting_time), '%Y-%m-%d')
filepath = Path(save_directory,
f'{self._target_name.lower()}_'
f'{filename_date_str.lower()}.pdf')
if not filepath.parent.exists():
filepath.mkdir(parents=True)
plt.savefig(filepath)
plt.close(fig)
if __name__ == "__main__":
eclipse_prediction = EclipsePrediction(starting_datetime='2022-11-01',
ending_datetime='2022-11-30',
target='Europa')
eclipse_prediction.save_summary_graphics('/Users/zachariahmilby/Downloads')
| en | 0.775818 | # set graphics style This class takes a pre-calculated ephemeris table, generates the corresponding ephemeris table for a second target, then calculates the angular separation between the two targets over the duration of the table. Parameters ---------- target1_ephemeris : dict Ephemeris table for the primary object. target2_name : str The comparison body (Io, Europa, Ganymede, Callisto or Jupiter). Calculate the angular separation between targets. This class finds all instances of a target (Io, Europa, Ganymede or Callisto) being eclipsed by Jupiter over a given timeframe visible from Mauna Kea at night. Parameters ---------- starting_datetime : str The date you want to begin the search. Format (time optional): YYYY-MM-DD [HH:MM:SS]. ending_datetime The date you want to end the search. Format (time optional): YYYY-MM-DD [HH:MM:SS]. target : str The Galilean moon for which you want to find eclipses. Io, Europa, Ganymede or Callisto. If it fails, you might have to put in a body number like 501 for Io, 502 for Europa, 503 for Ganymede or 504 for Callisto. Find sets of consecutive integers (find independent events in an ephemeris table). Find the eclipses by first querying the JPL Horizons System in 1-hour intervals, finding the eclipse events, then performs a more refined search around those events in 1-minute intervals. Format a terminal-printable summary table of the identified eclipses along with starting/ending times in both UTC and local California time, the duration of the eclipse, the range in airmass and the satellite's relative velocity. Plot a line with a scatterplot point at the starting position. Useful so I know on different plots which point corresponds to the beginning of the eclipse. Update 2022-05-11: now includes Jupiter's angular diameter. Save a summary graphic of each identified eclipse to a specified directory. # get relevant quantities # make figure and place axes # plot data # get Jupiter's average polar angle rotation when calculating # it's ephemerides # information string, it's beastly but I don't know a better way of # doing it... # set axis labels, limits and other parameters # save the figure | 2.367845 | 2 |
registry-image-check.py | cmorty/docker-image-check | 0 | 6618653 | <reponame>cmorty/docker-image-check
#!/usr/bin/env python
# -*- coding:utf-8 -*-
""" docker image check """
import argparse
import sys
import json
from registry import RegistryApi
class ApiProxy(object):
""" user RegistryApi """
def __init__(self, registry, args):
self.registry = registry
self.args = args
self.callbacks = dict()
self.register_callback("repo", "list", self.list_repo)
self.register_callback("tag", "list", self.list_tag)
self.register_callback("tag", "delete", self.delete_tag)
self.register_callback("manifest", "list", self.list_manifest)
self.register_callback("manifest", "delete", self.delete_manifest)
self.register_callback("manifest", "get", self.get_manifest)
def register_callback(self, target, action, func):
""" register real actions """
if not target in self.callbacks.keys():
self.callbacks[target] = {action: func}
return
self.callbacks[target][action] = func
def execute(self, target, action):
""" execute """
print json.dumps(self.callbacks[target][action](), indent=4, sort_keys=True)
def list_repo(self):
""" list repo """
return self.registry.getRepositoryList(self.args.num)
def list_tag(self):
""" list tag """
return self.registry.getTagList(self.args.repo)
def delete_tag(self):
""" delete tag """
(_, ref) = self.registry.existManifest(self.args.repo, self.args.tag)
if ref is not None:
return self.registry.deleteManifest(self.args.repo, ref)
return False
def list_manifest(self):
""" list manifest """
tags = self.registry.getTagList(self.args.repo)["tags"]
manifests = list()
if tags is None:
return None
for i in tags:
content = self.registry.getManifestWithConf(self.args.repo, i)
manifests.append({i: content})
return manifests
def delete_manifest(self):
""" delete manifest """
return self.registry.deleteManifest(self.args.repo, self.args.ref)
def get_manifest(self):
""" get manifest """
return self.registry.getManifestWithConf(self.args.repo, self.args.tag)
def get_parser():
""" return a parser """
parser = argparse.ArgumentParser("cli")
parser.add_argument('registryimage', help="registry/image:tag - tag is optional")
# Username and password come last to make them optional later
parser.add_argument('username', help='username')
parser.add_argument('password', help='password')
return parser
def main():
""" main entrance """
parser = get_parser()
options = parser.parse_args()
registryimage = options.registryimage.split('/', 1)
registry = RegistryApi(options.username, options.password, "https://" + registryimage[0] + '/')
tags = registry.getTagList(registryimage[1])
print(json.dumps(tags))
if __name__ == '__main__':
main()
| #!/usr/bin/env python
# -*- coding:utf-8 -*-
""" docker image check """
import argparse
import sys
import json
from registry import RegistryApi
class ApiProxy(object):
""" user RegistryApi """
def __init__(self, registry, args):
self.registry = registry
self.args = args
self.callbacks = dict()
self.register_callback("repo", "list", self.list_repo)
self.register_callback("tag", "list", self.list_tag)
self.register_callback("tag", "delete", self.delete_tag)
self.register_callback("manifest", "list", self.list_manifest)
self.register_callback("manifest", "delete", self.delete_manifest)
self.register_callback("manifest", "get", self.get_manifest)
def register_callback(self, target, action, func):
""" register real actions """
if not target in self.callbacks.keys():
self.callbacks[target] = {action: func}
return
self.callbacks[target][action] = func
def execute(self, target, action):
""" execute """
print json.dumps(self.callbacks[target][action](), indent=4, sort_keys=True)
def list_repo(self):
""" list repo """
return self.registry.getRepositoryList(self.args.num)
def list_tag(self):
""" list tag """
return self.registry.getTagList(self.args.repo)
def delete_tag(self):
""" delete tag """
(_, ref) = self.registry.existManifest(self.args.repo, self.args.tag)
if ref is not None:
return self.registry.deleteManifest(self.args.repo, ref)
return False
def list_manifest(self):
""" list manifest """
tags = self.registry.getTagList(self.args.repo)["tags"]
manifests = list()
if tags is None:
return None
for i in tags:
content = self.registry.getManifestWithConf(self.args.repo, i)
manifests.append({i: content})
return manifests
def delete_manifest(self):
""" delete manifest """
return self.registry.deleteManifest(self.args.repo, self.args.ref)
def get_manifest(self):
""" get manifest """
return self.registry.getManifestWithConf(self.args.repo, self.args.tag)
def get_parser():
""" return a parser """
parser = argparse.ArgumentParser("cli")
parser.add_argument('registryimage', help="registry/image:tag - tag is optional")
# Username and password come last to make them optional later
parser.add_argument('username', help='username')
parser.add_argument('password', help='password')
return parser
def main():
""" main entrance """
parser = get_parser()
options = parser.parse_args()
registryimage = options.registryimage.split('/', 1)
registry = RegistryApi(options.username, options.password, "https://" + registryimage[0] + '/')
tags = registry.getTagList(registryimage[1])
print(json.dumps(tags))
if __name__ == '__main__':
main() | en | 0.827308 | #!/usr/bin/env python # -*- coding:utf-8 -*- docker image check user RegistryApi register real actions execute list repo list tag delete tag list manifest delete manifest get manifest return a parser # Username and password come last to make them optional later main entrance | 2.3603 | 2 |
Python_MiniGame_Fighter/venv/Lib/site-packages/pygetwindow/_pygetwindow_macos.py | JE-Chen/je_old_repo | 12 | 6618654 | import Quartz
import pygetwindow
def getAllTitles():
"""Returns a list of strings of window titles for all visible windows.
"""
# Source: https://stackoverflow.com/questions/53237278/obtain-list-of-all-window-titles-on-macos-from-a-python-script/53985082#53985082
windows = Quartz.CGWindowListCopyWindowInfo(Quartz.kCGWindowListExcludeDesktopElements | Quartz.kCGWindowListOptionOnScreenOnly, Quartz.kCGNullWindowID)
return ['%s %s' % (win[Quartz.kCGWindowOwnerName], win.get(Quartz.kCGWindowName, '')) for win in windows]
def getActiveWindow():
"""Returns a Window object of the currently active Window."""
# Source: https://stackoverflow.com/questions/5286274/front-most-window-using-cgwindowlistcopywindowinfo
windows = Quartz.CGWindowListCopyWindowInfo(Quartz.kCGWindowListExcludeDesktopElements | Quartz.kCGWindowListOptionOnScreenOnly, Quartz.kCGNullWindowID)
for win in windows:
if win['kCGWindowLayer'] == 0:
return '%s %s' % (win[Quartz.kCGWindowOwnerName], win.get(Quartz.kCGWindowName, '')) # Temporary. For now, we'll just return the title of the active window.
raise Exception('Could not find an active window.') # Temporary hack.
def getWindowsAt(x, y):
windows = Quartz.CGWindowListCopyWindowInfo(Quartz.kCGWindowListExcludeDesktopElements | Quartz.kCGWindowListOptionOnScreenOnly, Quartz.kCGNullWindowID)
matches = []
for win in windows:
w = win['kCGWindowBounds']
if pygetwindow.pointInRect(x, y, w['X'], w['Y'], w['Width'], w['Height']):
matches.append('%s %s' % (win[Quartz.kCGWindowOwnerName], win.get(Quartz.kCGWindowName, '')))
return matches
def activate():
# TEMP - this is not a real api, I'm just using this name to store these notes for now.
# Source: https://stackoverflow.com/questions/7460092/nswindow-makekeyandorderfront-makes-window-appear-but-not-key-or-front?rq=1
# Source: https://stackoverflow.com/questions/4905024/is-it-possible-to-bring-window-to-front-without-taking-focus?rq=1
pass
def getWindowGeometry(title):
# TEMP - this is not a real api, I'm just using this name to stoe these notes for now.
windows = Quartz.CGWindowListCopyWindowInfo(Quartz.kCGWindowListExcludeDesktopElements | Quartz.kCGWindowListOptionOnScreenOnly, Quartz.kCGNullWindowID)
for win in windows:
if title in '%s %s' % (win[Quartz.kCGWindowOwnerName], win.get(Quartz.kCGWindowName, '')):
w = win['kCGWindowBounds']
return (w['X'], w['Y'], w['Width'], w['Height'])
def isVisible(title):
# TEMP - this is not a real api, I'm just using this name to stoe these notes for now.
windows = Quartz.CGWindowListCopyWindowInfo(Quartz.kCGWindowListExcludeDesktopElements | Quartz.kCGWindowListOptionOnScreenOnly, Quartz.kCGNullWindowID)
for win in windows:
if title in '%s %s' % (win[Quartz.kCGWindowOwnerName], win.get(Quartz.kCGWindowName, '')):
return win['kCGWindowAlpha'] != 0.0
def isMinimized():
# TEMP - this is not a real api, I'm just using this name to stoe these notes for now.
# Source: https://stackoverflow.com/questions/10258676/how-to-know-whether-a-window-is-minimised-or-not
# Use the kCGWindowIsOnscreen to check this. Minimized windows are considered to not be on the screen. (But I'm not sure if there are other situations where a window is "off screen".)
# I'm not sure how kCGWindowListOptionOnScreenOnly interferes with this.
pass
# TODO: This class doesn't work yet. I've copied the Win32Window class and will make adjustments as needed here.
class MacOSWindow():
def __init__(self, hWnd):
self._hWnd = hWnd # TODO fix this, this is a LP_c_long insead of an int.
def _onRead(attrName):
r = self._getWindowRect(_hWnd)
self._rect._left = r.left # Setting _left directly to skip the onRead.
self._rect._top = r.top # Setting _top directly to skip the onRead.
self._rect._width = r.right - r.left # Setting _width directly to skip the onRead.
self._rect._height = r.bottom - r.top # Setting _height directly to skip the onRead.
def _onChange(oldBox, newBox):
self.moveTo(newBox.left, newBox.top)
self.resizeTo(newBox.width, newBox.height)
r = self._getWindowRect(_hWnd)
self._rect = pyrect.Rect(r.left, r.top, r.right - r.left, r.bottom - r.top, onChange=_onChange, onRead=_onRead)
def __str__(self):
r = self._getWindowRect(_hWnd)
width = r.right - r.left
height = r.bottom - r.top
return '<%s left="%s", top="%s", width="%s", height="%s", title="%s">' % (self.__class__.__name__, r.left, r.top, width, height, self.title)
def __repr__(self):
return '%s(hWnd=%s)' % (self.__class__.__name__, self._hWnd)
def __eq__(self, other):
return isinstance(other, MacOSWindow) and self._hWnd == other._hWnd
def close(self):
"""Closes this window. This may trigger "Are you sure you want to
quit?" dialogs or other actions that prevent the window from
actually closing. This is identical to clicking the X button on the
window."""
raise NotImplementedError
def minimize(self):
"""Minimizes this window."""
raise NotImplementedError
def maximize(self):
"""Maximizes this window."""
raise NotImplementedError
def restore(self):
"""If maximized or minimized, restores the window to it's normal size."""
raise NotImplementedError
def activate(self):
"""Activate this window and make it the foreground window."""
raise NotImplementedError
def resizeRel(self, widthOffset, heightOffset):
"""Resizes the window relative to its current size."""
raise NotImplementedError
def resizeTo(self, newWidth, newHeight):
"""Resizes the window to a new width and height."""
raise NotImplementedError
def moveRel(self, xOffset, yOffset):
"""Moves the window relative to its current position."""
raise NotImplementedError
def moveTo(self, newLeft, newTop):
"""Moves the window to new coordinates on the screen."""
raise NotImplementedError
@property
def isMinimized(self):
"""Returns True if the window is currently minimized."""
raise NotImplementedError
@property
def isMaximized(self):
"""Returns True if the window is currently maximized."""
raise NotImplementedError
@property
def isActive(self):
"""Returns True if the window is currently the active, foreground window."""
raise NotImplementedError
@property
def title(self):
"""Returns the window title as a string."""
raise NotImplementedError
@property
def visible(self):
raise NotImplementedError
| import Quartz
import pygetwindow
def getAllTitles():
"""Returns a list of strings of window titles for all visible windows.
"""
# Source: https://stackoverflow.com/questions/53237278/obtain-list-of-all-window-titles-on-macos-from-a-python-script/53985082#53985082
windows = Quartz.CGWindowListCopyWindowInfo(Quartz.kCGWindowListExcludeDesktopElements | Quartz.kCGWindowListOptionOnScreenOnly, Quartz.kCGNullWindowID)
return ['%s %s' % (win[Quartz.kCGWindowOwnerName], win.get(Quartz.kCGWindowName, '')) for win in windows]
def getActiveWindow():
"""Returns a Window object of the currently active Window."""
# Source: https://stackoverflow.com/questions/5286274/front-most-window-using-cgwindowlistcopywindowinfo
windows = Quartz.CGWindowListCopyWindowInfo(Quartz.kCGWindowListExcludeDesktopElements | Quartz.kCGWindowListOptionOnScreenOnly, Quartz.kCGNullWindowID)
for win in windows:
if win['kCGWindowLayer'] == 0:
return '%s %s' % (win[Quartz.kCGWindowOwnerName], win.get(Quartz.kCGWindowName, '')) # Temporary. For now, we'll just return the title of the active window.
raise Exception('Could not find an active window.') # Temporary hack.
def getWindowsAt(x, y):
windows = Quartz.CGWindowListCopyWindowInfo(Quartz.kCGWindowListExcludeDesktopElements | Quartz.kCGWindowListOptionOnScreenOnly, Quartz.kCGNullWindowID)
matches = []
for win in windows:
w = win['kCGWindowBounds']
if pygetwindow.pointInRect(x, y, w['X'], w['Y'], w['Width'], w['Height']):
matches.append('%s %s' % (win[Quartz.kCGWindowOwnerName], win.get(Quartz.kCGWindowName, '')))
return matches
def activate():
# TEMP - this is not a real api, I'm just using this name to store these notes for now.
# Source: https://stackoverflow.com/questions/7460092/nswindow-makekeyandorderfront-makes-window-appear-but-not-key-or-front?rq=1
# Source: https://stackoverflow.com/questions/4905024/is-it-possible-to-bring-window-to-front-without-taking-focus?rq=1
pass
def getWindowGeometry(title):
# TEMP - this is not a real api, I'm just using this name to stoe these notes for now.
windows = Quartz.CGWindowListCopyWindowInfo(Quartz.kCGWindowListExcludeDesktopElements | Quartz.kCGWindowListOptionOnScreenOnly, Quartz.kCGNullWindowID)
for win in windows:
if title in '%s %s' % (win[Quartz.kCGWindowOwnerName], win.get(Quartz.kCGWindowName, '')):
w = win['kCGWindowBounds']
return (w['X'], w['Y'], w['Width'], w['Height'])
def isVisible(title):
# TEMP - this is not a real api, I'm just using this name to stoe these notes for now.
windows = Quartz.CGWindowListCopyWindowInfo(Quartz.kCGWindowListExcludeDesktopElements | Quartz.kCGWindowListOptionOnScreenOnly, Quartz.kCGNullWindowID)
for win in windows:
if title in '%s %s' % (win[Quartz.kCGWindowOwnerName], win.get(Quartz.kCGWindowName, '')):
return win['kCGWindowAlpha'] != 0.0
def isMinimized():
# TEMP - this is not a real api, I'm just using this name to stoe these notes for now.
# Source: https://stackoverflow.com/questions/10258676/how-to-know-whether-a-window-is-minimised-or-not
# Use the kCGWindowIsOnscreen to check this. Minimized windows are considered to not be on the screen. (But I'm not sure if there are other situations where a window is "off screen".)
# I'm not sure how kCGWindowListOptionOnScreenOnly interferes with this.
pass
# TODO: This class doesn't work yet. I've copied the Win32Window class and will make adjustments as needed here.
class MacOSWindow():
def __init__(self, hWnd):
self._hWnd = hWnd # TODO fix this, this is a LP_c_long insead of an int.
def _onRead(attrName):
r = self._getWindowRect(_hWnd)
self._rect._left = r.left # Setting _left directly to skip the onRead.
self._rect._top = r.top # Setting _top directly to skip the onRead.
self._rect._width = r.right - r.left # Setting _width directly to skip the onRead.
self._rect._height = r.bottom - r.top # Setting _height directly to skip the onRead.
def _onChange(oldBox, newBox):
self.moveTo(newBox.left, newBox.top)
self.resizeTo(newBox.width, newBox.height)
r = self._getWindowRect(_hWnd)
self._rect = pyrect.Rect(r.left, r.top, r.right - r.left, r.bottom - r.top, onChange=_onChange, onRead=_onRead)
def __str__(self):
r = self._getWindowRect(_hWnd)
width = r.right - r.left
height = r.bottom - r.top
return '<%s left="%s", top="%s", width="%s", height="%s", title="%s">' % (self.__class__.__name__, r.left, r.top, width, height, self.title)
def __repr__(self):
return '%s(hWnd=%s)' % (self.__class__.__name__, self._hWnd)
def __eq__(self, other):
return isinstance(other, MacOSWindow) and self._hWnd == other._hWnd
def close(self):
"""Closes this window. This may trigger "Are you sure you want to
quit?" dialogs or other actions that prevent the window from
actually closing. This is identical to clicking the X button on the
window."""
raise NotImplementedError
def minimize(self):
"""Minimizes this window."""
raise NotImplementedError
def maximize(self):
"""Maximizes this window."""
raise NotImplementedError
def restore(self):
"""If maximized or minimized, restores the window to it's normal size."""
raise NotImplementedError
def activate(self):
"""Activate this window and make it the foreground window."""
raise NotImplementedError
def resizeRel(self, widthOffset, heightOffset):
"""Resizes the window relative to its current size."""
raise NotImplementedError
def resizeTo(self, newWidth, newHeight):
"""Resizes the window to a new width and height."""
raise NotImplementedError
def moveRel(self, xOffset, yOffset):
"""Moves the window relative to its current position."""
raise NotImplementedError
def moveTo(self, newLeft, newTop):
"""Moves the window to new coordinates on the screen."""
raise NotImplementedError
@property
def isMinimized(self):
"""Returns True if the window is currently minimized."""
raise NotImplementedError
@property
def isMaximized(self):
"""Returns True if the window is currently maximized."""
raise NotImplementedError
@property
def isActive(self):
"""Returns True if the window is currently the active, foreground window."""
raise NotImplementedError
@property
def title(self):
"""Returns the window title as a string."""
raise NotImplementedError
@property
def visible(self):
raise NotImplementedError
| en | 0.889871 | Returns a list of strings of window titles for all visible windows. # Source: https://stackoverflow.com/questions/53237278/obtain-list-of-all-window-titles-on-macos-from-a-python-script/53985082#53985082 Returns a Window object of the currently active Window. # Source: https://stackoverflow.com/questions/5286274/front-most-window-using-cgwindowlistcopywindowinfo # Temporary. For now, we'll just return the title of the active window. # Temporary hack. # TEMP - this is not a real api, I'm just using this name to store these notes for now. # Source: https://stackoverflow.com/questions/7460092/nswindow-makekeyandorderfront-makes-window-appear-but-not-key-or-front?rq=1 # Source: https://stackoverflow.com/questions/4905024/is-it-possible-to-bring-window-to-front-without-taking-focus?rq=1 # TEMP - this is not a real api, I'm just using this name to stoe these notes for now. # TEMP - this is not a real api, I'm just using this name to stoe these notes for now. # TEMP - this is not a real api, I'm just using this name to stoe these notes for now. # Source: https://stackoverflow.com/questions/10258676/how-to-know-whether-a-window-is-minimised-or-not # Use the kCGWindowIsOnscreen to check this. Minimized windows are considered to not be on the screen. (But I'm not sure if there are other situations where a window is "off screen".) # I'm not sure how kCGWindowListOptionOnScreenOnly interferes with this. # TODO: This class doesn't work yet. I've copied the Win32Window class and will make adjustments as needed here. # TODO fix this, this is a LP_c_long insead of an int. # Setting _left directly to skip the onRead. # Setting _top directly to skip the onRead. # Setting _width directly to skip the onRead. # Setting _height directly to skip the onRead. Closes this window. This may trigger "Are you sure you want to
quit?" dialogs or other actions that prevent the window from
actually closing. This is identical to clicking the X button on the
window. Minimizes this window. Maximizes this window. If maximized or minimized, restores the window to it's normal size. Activate this window and make it the foreground window. Resizes the window relative to its current size. Resizes the window to a new width and height. Moves the window relative to its current position. Moves the window to new coordinates on the screen. Returns True if the window is currently minimized. Returns True if the window is currently maximized. Returns True if the window is currently the active, foreground window. Returns the window title as a string. | 3.144625 | 3 |
picasso/server/helper.py | ajasja/picasso | 0 | 6618655 | import picasso.io
import picasso.postprocess
import os
import numpy as np
from sqlalchemy import create_engine
import pandas as pd
import streamlit as st
import time
def _db_filename():
home = os.path.expanduser("~")
return os.path.abspath(os.path.join(home, ".picasso", "app.db"))
def fetch_db():
try:
engine = create_engine("sqlite:///" + _db_filename(), echo=False)
df = pd.read_sql_table("files", con=engine)
df = df.sort_values("file_created")
except ValueError:
df = pd.DataFrame()
return df
def fetch_watcher():
try:
engine = create_engine("sqlite:///" + _db_filename(), echo=False)
df = pd.read_sql_table("watcher", con=engine)
except ValueError:
df = pd.DataFrame()
return df
def refresh(to_wait: int):
"""
Utility function that waits for a given amount and then restarts streamlit.
"""
ref = st.empty()
for i in range(to_wait):
ref.write(f"Refreshing in {to_wait-i} s")
time.sleep(1)
raise st.script_runner.RerunException(st.script_request_queue.RerunData(None))
| import picasso.io
import picasso.postprocess
import os
import numpy as np
from sqlalchemy import create_engine
import pandas as pd
import streamlit as st
import time
def _db_filename():
home = os.path.expanduser("~")
return os.path.abspath(os.path.join(home, ".picasso", "app.db"))
def fetch_db():
try:
engine = create_engine("sqlite:///" + _db_filename(), echo=False)
df = pd.read_sql_table("files", con=engine)
df = df.sort_values("file_created")
except ValueError:
df = pd.DataFrame()
return df
def fetch_watcher():
try:
engine = create_engine("sqlite:///" + _db_filename(), echo=False)
df = pd.read_sql_table("watcher", con=engine)
except ValueError:
df = pd.DataFrame()
return df
def refresh(to_wait: int):
"""
Utility function that waits for a given amount and then restarts streamlit.
"""
ref = st.empty()
for i in range(to_wait):
ref.write(f"Refreshing in {to_wait-i} s")
time.sleep(1)
raise st.script_runner.RerunException(st.script_request_queue.RerunData(None))
| en | 0.757187 | Utility function that waits for a given amount and then restarts streamlit. | 2.697062 | 3 |
templates/namespace/namespace_test.py | threefoldtech/0-templates | 1 | 6618656 | <gh_stars>1-10
from unittest.mock import MagicMock
import os
import pytest
from namespace import Namespace
from zerorobot.template.state import StateCheckError
from zerorobot.service_collection import ServiceNotFoundError
from JumpscaleZrobot.test.utils import ZrobotBaseTest, task_mock
class TestNamespaceTemplate(ZrobotBaseTest):
@classmethod
def setUpClass(cls):
super().preTest(os.path.dirname(__file__), Namespace)
cls.valid_data = {
'diskType': 'HDD',
'mode': 'user',
'password': '<PASSWORD>',
'public': False,
'size': 20,
'nsName': '',
}
def test_invalid_data(self):
with pytest.raises(ValueError, message='template should fail to instantiate if data dict is missing the size'):
data = self.valid_data.copy()
data.pop('size')
ns = Namespace(name='namespace', data=data)
ns.api.services.get = MagicMock()
ns.validate()
def test_no_node_installed(self):
with pytest.raises(RuntimeError, message='template should fail to install if no service node is installed'):
ns = Namespace(name='namespace', data=self.valid_data)
ns.api.services.get = MagicMock(side_effect=ServiceNotFoundError)
ns.validate()
with pytest.raises(RuntimeError, message='template should fail to install if no service node is installed'):
ns = Namespace(name='namespace', data=self.valid_data)
node = MagicMock()
node.state.check = MagicMock(side_effect=StateCheckError)
ns.api.services.get = MagicMock(return_value=node)
ns.validate()
def test_valid_data(self):
ns = Namespace(name='namespace', data=self.valid_data)
ns.api.services.get = MagicMock()
ns.validate()
data = self.valid_data.copy()
data['zerodb'] = ''
data['nsName'] = ''
assert ns.data == data
def test_zerodb_property(self):
ns = Namespace(name='namespace', data=self.valid_data)
ns.api.services.get = MagicMock(return_value='zerodb')
assert ns._zerodb == 'zerodb'
def test_install(self):
ns = Namespace(name='namespace', data=self.valid_data)
node = MagicMock()
node.schedule_action = MagicMock(return_value=task_mock(('instance', 'nsName')))
ns.api = MagicMock()
ns.api.services.get = MagicMock(return_value=node)
args = {
'disktype': ns.data['diskType'].upper(),
'mode': ns.data['mode'],
'password': ns.data['password'],
'public': ns.data['public'],
'ns_size': ns.data['size'],
'name': ns.data['nsName']
}
ns.install()
node.schedule_action.assert_called_once_with('create_zdb_namespace', args)
ns.state.check('actions', 'install', 'ok')
assert ns.data['nsName'] == 'nsName'
assert ns.data['zerodb'] == 'instance'
def test_info_without_install(self):
with pytest.raises(StateCheckError, message='Executing info action without install should raise an error'):
ns = Namespace(name='namespace', data=self.valid_data)
ns.info()
def test_info(self):
ns = Namespace(name='namespace', data=self.valid_data)
ns.data['nsName'] = 'nsName'
ns.state.set('actions', 'install', 'ok')
ns.api = MagicMock()
task = task_mock('info')
ns._zerodb.schedule_action = MagicMock(return_value=task)
assert ns.info() == 'info'
ns._zerodb.schedule_action.assert_called_once_with('namespace_info', args={'name': ns.data['nsName']})
def test_uninstall_without_install(self):
with pytest.raises(StateCheckError, message='Executing uninstall action without install should raise an error'):
ns = Namespace(name='namespace', data=self.valid_data)
ns.uninstall()
def test_uninstall(self):
ns = Namespace(name='namespace', data=self.valid_data)
ns.data['nsName'] = 'nsName'
ns.state.set('actions', 'install', 'ok')
ns.api = MagicMock()
ns.uninstall()
ns._zerodb.schedule_action.assert_called_once_with('namespace_delete', args={'name': 'nsName'})
def test_connection_info_without_install(self):
with pytest.raises(StateCheckError, message='Executing connection_info action without install should raise an error'):
ns = Namespace(name='namespace', data=self.valid_data)
ns.connection_info()
def test_connection_info(self):
ns = Namespace(name='namespace', data=self.valid_data)
ns.state.set('actions', 'install', 'ok')
ns.state.set('status', 'running', 'ok')
ns.api = MagicMock()
result = {'ip': '127.0.0.1', 'port': 9900}
task = task_mock(result)
ns._zerodb.schedule_action = MagicMock(return_value=task)
assert ns.connection_info() == result
ns._zerodb.schedule_action.assert_called_once_with('connection_info')
def test_url_without_install(self):
with pytest.raises(StateCheckError, message='Executing info action without install should raise an error'):
ns = Namespace(name='namespace', data=self.valid_data)
ns.url()
def test_url(self):
ns = Namespace(name='namespace', data=self.valid_data)
ns.data['nsName'] = 'nsName'
ns.state.set('actions', 'install', 'ok')
ns.api = MagicMock()
ns._zerodb.schedule_action = MagicMock(return_value=task_mock('url'))
assert ns.url() == 'url'
ns._zerodb.schedule_action.assert_called_once_with('namespace_url', args={'name': 'nsName'})
def test_private_url_without_install(self):
with pytest.raises(StateCheckError, message='Executing info action without install should raise an error'):
ns = Namespace(name='namespace', data=self.valid_data)
ns.url()
def test_private_url(self):
ns = Namespace(name='namespace', data=self.valid_data)
ns.data['nsName'] = 'nsName'
ns.state.set('actions', 'install', 'ok')
ns.api = MagicMock()
ns._zerodb.schedule_action = MagicMock(return_value=task_mock('url'))
assert ns.private_url() == 'url'
ns._zerodb.schedule_action.assert_called_once_with('namespace_private_url', args={'name': 'nsName'})
| from unittest.mock import MagicMock
import os
import pytest
from namespace import Namespace
from zerorobot.template.state import StateCheckError
from zerorobot.service_collection import ServiceNotFoundError
from JumpscaleZrobot.test.utils import ZrobotBaseTest, task_mock
class TestNamespaceTemplate(ZrobotBaseTest):
@classmethod
def setUpClass(cls):
super().preTest(os.path.dirname(__file__), Namespace)
cls.valid_data = {
'diskType': 'HDD',
'mode': 'user',
'password': '<PASSWORD>',
'public': False,
'size': 20,
'nsName': '',
}
def test_invalid_data(self):
with pytest.raises(ValueError, message='template should fail to instantiate if data dict is missing the size'):
data = self.valid_data.copy()
data.pop('size')
ns = Namespace(name='namespace', data=data)
ns.api.services.get = MagicMock()
ns.validate()
def test_no_node_installed(self):
with pytest.raises(RuntimeError, message='template should fail to install if no service node is installed'):
ns = Namespace(name='namespace', data=self.valid_data)
ns.api.services.get = MagicMock(side_effect=ServiceNotFoundError)
ns.validate()
with pytest.raises(RuntimeError, message='template should fail to install if no service node is installed'):
ns = Namespace(name='namespace', data=self.valid_data)
node = MagicMock()
node.state.check = MagicMock(side_effect=StateCheckError)
ns.api.services.get = MagicMock(return_value=node)
ns.validate()
def test_valid_data(self):
ns = Namespace(name='namespace', data=self.valid_data)
ns.api.services.get = MagicMock()
ns.validate()
data = self.valid_data.copy()
data['zerodb'] = ''
data['nsName'] = ''
assert ns.data == data
def test_zerodb_property(self):
ns = Namespace(name='namespace', data=self.valid_data)
ns.api.services.get = MagicMock(return_value='zerodb')
assert ns._zerodb == 'zerodb'
def test_install(self):
ns = Namespace(name='namespace', data=self.valid_data)
node = MagicMock()
node.schedule_action = MagicMock(return_value=task_mock(('instance', 'nsName')))
ns.api = MagicMock()
ns.api.services.get = MagicMock(return_value=node)
args = {
'disktype': ns.data['diskType'].upper(),
'mode': ns.data['mode'],
'password': ns.data['password'],
'public': ns.data['public'],
'ns_size': ns.data['size'],
'name': ns.data['nsName']
}
ns.install()
node.schedule_action.assert_called_once_with('create_zdb_namespace', args)
ns.state.check('actions', 'install', 'ok')
assert ns.data['nsName'] == 'nsName'
assert ns.data['zerodb'] == 'instance'
def test_info_without_install(self):
with pytest.raises(StateCheckError, message='Executing info action without install should raise an error'):
ns = Namespace(name='namespace', data=self.valid_data)
ns.info()
def test_info(self):
ns = Namespace(name='namespace', data=self.valid_data)
ns.data['nsName'] = 'nsName'
ns.state.set('actions', 'install', 'ok')
ns.api = MagicMock()
task = task_mock('info')
ns._zerodb.schedule_action = MagicMock(return_value=task)
assert ns.info() == 'info'
ns._zerodb.schedule_action.assert_called_once_with('namespace_info', args={'name': ns.data['nsName']})
def test_uninstall_without_install(self):
with pytest.raises(StateCheckError, message='Executing uninstall action without install should raise an error'):
ns = Namespace(name='namespace', data=self.valid_data)
ns.uninstall()
def test_uninstall(self):
ns = Namespace(name='namespace', data=self.valid_data)
ns.data['nsName'] = 'nsName'
ns.state.set('actions', 'install', 'ok')
ns.api = MagicMock()
ns.uninstall()
ns._zerodb.schedule_action.assert_called_once_with('namespace_delete', args={'name': 'nsName'})
def test_connection_info_without_install(self):
with pytest.raises(StateCheckError, message='Executing connection_info action without install should raise an error'):
ns = Namespace(name='namespace', data=self.valid_data)
ns.connection_info()
def test_connection_info(self):
ns = Namespace(name='namespace', data=self.valid_data)
ns.state.set('actions', 'install', 'ok')
ns.state.set('status', 'running', 'ok')
ns.api = MagicMock()
result = {'ip': '127.0.0.1', 'port': 9900}
task = task_mock(result)
ns._zerodb.schedule_action = MagicMock(return_value=task)
assert ns.connection_info() == result
ns._zerodb.schedule_action.assert_called_once_with('connection_info')
def test_url_without_install(self):
with pytest.raises(StateCheckError, message='Executing info action without install should raise an error'):
ns = Namespace(name='namespace', data=self.valid_data)
ns.url()
def test_url(self):
ns = Namespace(name='namespace', data=self.valid_data)
ns.data['nsName'] = 'nsName'
ns.state.set('actions', 'install', 'ok')
ns.api = MagicMock()
ns._zerodb.schedule_action = MagicMock(return_value=task_mock('url'))
assert ns.url() == 'url'
ns._zerodb.schedule_action.assert_called_once_with('namespace_url', args={'name': 'nsName'})
def test_private_url_without_install(self):
with pytest.raises(StateCheckError, message='Executing info action without install should raise an error'):
ns = Namespace(name='namespace', data=self.valid_data)
ns.url()
def test_private_url(self):
ns = Namespace(name='namespace', data=self.valid_data)
ns.data['nsName'] = 'nsName'
ns.state.set('actions', 'install', 'ok')
ns.api = MagicMock()
ns._zerodb.schedule_action = MagicMock(return_value=task_mock('url'))
assert ns.private_url() == 'url'
ns._zerodb.schedule_action.assert_called_once_with('namespace_private_url', args={'name': 'nsName'}) | none | 1 | 2.116381 | 2 | |
tests/dsp/conftest.py | drzraf/pulseviz.py | 25 | 6618657 | <filename>tests/dsp/conftest.py
import pytest
class FakeLibPulseSimple(object):
# TODO: Add function that allows to inject some data that will be returned by pa_simple_read.
def pa_usec_to_bytes(a, b):
return 1337
def pa_simple_new(a, b, c, d, e, f, g, h, i):
return 1337
def pa_simple_read(a, data, size, error):
data[:] = (type(data))(0) # Yes, this actually works!
return 0
def pa_simple_flush(a, b):
return 0
def pa_simple_free(a):
return 0
@pytest.fixture
def fixture_fake_simple_client(monkeypatch):
"""
Patches the simple_client module so that it does not require a real PulseAudio server to work.
"""
import pulseviz.pulseaudio.simple_client
monkeypatch.setattr(pulseviz.pulseaudio.simple_client, '_libpulse_simple', FakeLibPulseSimple)
| <filename>tests/dsp/conftest.py
import pytest
class FakeLibPulseSimple(object):
# TODO: Add function that allows to inject some data that will be returned by pa_simple_read.
def pa_usec_to_bytes(a, b):
return 1337
def pa_simple_new(a, b, c, d, e, f, g, h, i):
return 1337
def pa_simple_read(a, data, size, error):
data[:] = (type(data))(0) # Yes, this actually works!
return 0
def pa_simple_flush(a, b):
return 0
def pa_simple_free(a):
return 0
@pytest.fixture
def fixture_fake_simple_client(monkeypatch):
"""
Patches the simple_client module so that it does not require a real PulseAudio server to work.
"""
import pulseviz.pulseaudio.simple_client
monkeypatch.setattr(pulseviz.pulseaudio.simple_client, '_libpulse_simple', FakeLibPulseSimple)
| en | 0.849216 | # TODO: Add function that allows to inject some data that will be returned by pa_simple_read. # Yes, this actually works! Patches the simple_client module so that it does not require a real PulseAudio server to work. | 2.153762 | 2 |
flask_login_template/app.py | sonalimahajan12/Automation-scripts | 496 | 6618658 | <filename>flask_login_template/app.py<gh_stars>100-1000
from flask import Flask, render_template, flash, redirect, session
from functools import wraps
import setup.config as config
from user import User
app = Flask(__name__, template_folder="templates", static_folder='static')
def login_required(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash('Unauthorized. Please login.', 'danger')
return redirect('/')
return wrap
@app.route('/', methods=['POST', 'GET'])
def register():
return User().register()
@app.route('/logout')
def logout():
return User().logout()
@app.route('/dashboard')
@login_required
def dashboard():
return render_template('dashboard.html')
if __name__ == "__main__":
app.secret_key = config.secret_key
app.run()
| <filename>flask_login_template/app.py<gh_stars>100-1000
from flask import Flask, render_template, flash, redirect, session
from functools import wraps
import setup.config as config
from user import User
app = Flask(__name__, template_folder="templates", static_folder='static')
def login_required(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash('Unauthorized. Please login.', 'danger')
return redirect('/')
return wrap
@app.route('/', methods=['POST', 'GET'])
def register():
return User().register()
@app.route('/logout')
def logout():
return User().logout()
@app.route('/dashboard')
@login_required
def dashboard():
return render_template('dashboard.html')
if __name__ == "__main__":
app.secret_key = config.secret_key
app.run()
| none | 1 | 2.428944 | 2 | |
bash/containers.py | oonray/PalenightEverything | 0 | 6618659 | #!python3
import os, system, subprocess
containers="/opt/Docker/"
plex=$containers+"Plex/"
minecraft=$containers+"Minecraft/"
def start_containers(path):
subprocess.Popen()
if __name__ == "__name__":
pass
| #!python3
import os, system, subprocess
containers="/opt/Docker/"
plex=$containers+"Plex/"
minecraft=$containers+"Minecraft/"
def start_containers(path):
subprocess.Popen()
if __name__ == "__name__":
pass
| none | 1 | 2.12799 | 2 | |
pyelong/api/ihotel/detail.py | DeanThompson/pyelong | 1 | 6618660 | # -*- coding: utf-8 -*-
from ..base import ApiBase
class Detail(ApiBase):
_category = 'ihotel'
def avail(self, **kwargs):
"""国际酒店详情,方法名:ihotel.detail.avail
文档
~~~~
- http://open.elong.com/wiki/Ihotel.detail
NOTE
~~~~
- 请求的入出参数和ihotel.detail一样
- 属于实时数据接口,将不使用缓存
- 实时接口访问控制频次更严格(每秒5次)
"""
return self._request('avail', raw=True, **kwargs)
def __call__(self, **kwargs):
"""国际酒店详情,方法名:ihotel.detail
文档
~~~~
- http://open.elong.com/wiki/Ihotel.detail
NOTE
~~~~
本接口是基于缓存的响应速度快,适用于本地缓存数据或快速展示一个粗略的价格,入参有限制:
- 只能一个房间
- 房间大人个数是1或者2,不能带小孩
- 传入 productId 参数,将不受上面的限制并且结果不使用缓存
"""
use_detail_avail = False
room_group = kwargs.get('roomGroup', [])
if room_group:
if len(room_group) > 1:
use_detail_avail = True
else:
room = room_group[0]
if room['childAges'] or room['numberOfAdults'] > 2:
use_detail_avail = True
if use_detail_avail:
return self._request('avail', raw=True, **kwargs)
return self._request('', raw=True, **kwargs)
| # -*- coding: utf-8 -*-
from ..base import ApiBase
class Detail(ApiBase):
_category = 'ihotel'
def avail(self, **kwargs):
"""国际酒店详情,方法名:ihotel.detail.avail
文档
~~~~
- http://open.elong.com/wiki/Ihotel.detail
NOTE
~~~~
- 请求的入出参数和ihotel.detail一样
- 属于实时数据接口,将不使用缓存
- 实时接口访问控制频次更严格(每秒5次)
"""
return self._request('avail', raw=True, **kwargs)
def __call__(self, **kwargs):
"""国际酒店详情,方法名:ihotel.detail
文档
~~~~
- http://open.elong.com/wiki/Ihotel.detail
NOTE
~~~~
本接口是基于缓存的响应速度快,适用于本地缓存数据或快速展示一个粗略的价格,入参有限制:
- 只能一个房间
- 房间大人个数是1或者2,不能带小孩
- 传入 productId 参数,将不受上面的限制并且结果不使用缓存
"""
use_detail_avail = False
room_group = kwargs.get('roomGroup', [])
if room_group:
if len(room_group) > 1:
use_detail_avail = True
else:
room = room_group[0]
if room['childAges'] or room['numberOfAdults'] > 2:
use_detail_avail = True
if use_detail_avail:
return self._request('avail', raw=True, **kwargs)
return self._request('', raw=True, **kwargs)
| zh | 0.876156 | # -*- coding: utf-8 -*- 国际酒店详情,方法名:ihotel.detail.avail 文档 ~~~~ - http://open.elong.com/wiki/Ihotel.detail NOTE ~~~~ - 请求的入出参数和ihotel.detail一样 - 属于实时数据接口,将不使用缓存 - 实时接口访问控制频次更严格(每秒5次) 国际酒店详情,方法名:ihotel.detail 文档 ~~~~ - http://open.elong.com/wiki/Ihotel.detail NOTE ~~~~ 本接口是基于缓存的响应速度快,适用于本地缓存数据或快速展示一个粗略的价格,入参有限制: - 只能一个房间 - 房间大人个数是1或者2,不能带小孩 - 传入 productId 参数,将不受上面的限制并且结果不使用缓存 | 2.432672 | 2 |
controller.py | ploiu/PyGame-Controller | 1 | 6618661 | <reponame>ploiu/PyGame-Controller
import pygame
class Controller:
"""
Abstract class to make using pygame's joysticks easier.
The goal of this class is to be pretty generic, so as to allow for specific controller types to be created from this class.
"""
def __init__(self, joystickId):
"""
initializes this controller with the passed joystickId
:param joystickId: the number of the joystick you want to initialize this controller with. must be at least 0 and less than the number of joysticks obtained by `pygame.joystick.get_count()`
"""
# create a joystick from that passed joystickId and initialize that joystick
self.__joystick = pygame.joystick.Joystick(joystickId)
self.__joystick.init()
# create a dict for the button mappings
self.__buttonMappings = {}
"""
a dict of integer -> dict values.
The integer key is the number associated with the button on our joystick,
and the dict value contains a string of either 'press' or 'release',
the value of that key being a 0-argument function to be called when that button is either pressed or released.
for example, and entry may look like this::
{0: {
'press': lambda: print('pressed'),
'release': lambda: print('released')
}
}
"""
# create a dict for the directional mappings
self.__directionalMappings = {}
"""
a dict of integer -> dict values.
The integer key is the number associated with the id of the corresponding axis on our joystick,
and the dict value contains a string of either 'positive', 'negative', or 'release'.
For each of those keys, the value is a 0-argument function that gets called when that axis is pushed in that direction.
for example, and entry may look like this::
{0: {
'positive': lambda: print('moved in the positive direction'),
'negative': lambda: print('moved in the negative direction'),
'release': lambda: print('released axis')
}
}
"""
# set the id of this controller to the one passed in
self.__joystickId = joystickId
# an empty function to default to for button commands
self.__UNMAPPED_COMMAND = (lambda: None)
def get_controllerNumber(self):
return self.__joystickId
def get_buttonState(self, buttonId):
"""
Wrapper method for `pygame.joystick.get_button()`
:param buttonId: the number assigned to the button we are getting the state of
:return: the value returned by `pygame.joystick.get_button()`
"""
return self.__joystick.get_button(buttonId)
def map_button(self, buttonId, pressCommand = None, releaseCommand = None):
"""
Used to bind a function to the pressing and releasing of a button with the passed buttonId.
:param buttonId: the number associated with the button on the controller we are mapping the functions to
:param pressCommand: a 0-length function to be called when the button is pressed down. If `None` is passed in, then the button's press command will be unmapped
:param releaseCommand: a 0-length function to be called when the button is released. If `None` is passed in, then the button's release command will be unmapped
"""
# delete the existing mapping if it exists, as no matter how this function is called, the mappings would be overwritten
if buttonId in self.__buttonMappings:
del self.__buttonMappings[buttonId]
# if press command is None, turn it into a lambda that does nothing
pressCommand = self.__UNMAPPED_COMMAND if pressCommand is None else pressCommand
# ... same with release command
releaseCommand = self.__UNMAPPED_COMMAND if releaseCommand is None else releaseCommand
# now add the mapping
self.__buttonMappings[buttonId] = {'press': pressCommand, 'release': releaseCommand}
def map_directionalButton(self, axisId, positiveCommand = None, negativeCommand = None, releaseCommand = None):
"""
Used to bind a function to the change in state of one of the controller's axes (e.g. a directional pad).
Each axis has 2 directions, positive and negative. In pygame, they are represented with a positive number and a negative number (approximately 1/-1).
Pygame also has a state for when an axis is released, denoted as 0
:param axisId: the number associated with the axis on the controller we are mapping functions to
:param positiveCommand: a 0-length function to be called when the axis's positive end is pushed. If `None` is passed in, then the positive command for that action will be unmapped
:param negativeCommand: a 0-length function to be called when the axis's negative end is pushed. If `None` is passed in, then the negative command for that action will be unmapped
:param releaseCommand: a 0-length function to be called when one of the axis's ends was released. If `None` is passed in, then the release command for that action will be unmapped
"""
# delete the existing mappings for the axis if they already exist, as no matter what they will be overwritten
if axisId in self.__directionalMappings:
del self.__directionalMappings[axisId]
# for each of the commands that are None, assign an empty lambda to them
positiveCommand = self.__UNMAPPED_COMMAND if positiveCommand is None else positiveCommand
negativeCommand = self.__UNMAPPED_COMMAND if negativeCommand is None else negativeCommand
releaseCommand = self.__UNMAPPED_COMMAND if releaseCommand is None else releaseCommand
# now add the mappings for the axis
self.__directionalMappings[axisId] = {'positive': positiveCommand, 'negative': negativeCommand, 'release': releaseCommand}
def press_button(self, buttonId):
"""
Calls the function mapped to when the button with the associated buttonId is pressed. If the button has nothing mapped to it being pressed, nothing will happen
:param buttonId: the id of the button being pressed
"""
# first check if the buttonId is in our mappings before we try to access it
if buttonId in self.__buttonMappings:
# call the associated function
self.__buttonMappings[buttonId]['press']()
def release_button(self, buttonId):
"""
Calls the function mapped to when the button with the associated buttonId is released. If the button has nothing mapped to it being released, nothing will happen
:param buttonId: the id of the button being released
"""
# first check if the buttonId is in our mappings before we try to access it
if buttonId in self.__buttonMappings:
# call the associated function
self.__buttonMappings[buttonId]['release']()
def press_directionalButton(self, axisId, direction):
"""
Executes the function bound to the passed direction on the axis with the associated axisId. If there is no function bound to that direction or axis, nothing is called.
:param axisId: the id of the axis being pushed in a direction
:param direction: a positive number, negative number, or 0. This number represents the state of the axis, with a positive number for the positive end, a negative number for the negative end, and 0 for one of the ends being released
"""
if axisId in self.__directionalMappings:
# determine which function to call based on the state of the axis (negative, 0, or positive)
if direction < 0:
self.__directionalMappings[axisId]['negative']()
elif direction == 0:
self.__directionalMappings[axisId]['release']()
else:
self.__directionalMappings[axisId]['positive']()
| import pygame
class Controller:
"""
Abstract class to make using pygame's joysticks easier.
The goal of this class is to be pretty generic, so as to allow for specific controller types to be created from this class.
"""
def __init__(self, joystickId):
"""
initializes this controller with the passed joystickId
:param joystickId: the number of the joystick you want to initialize this controller with. must be at least 0 and less than the number of joysticks obtained by `pygame.joystick.get_count()`
"""
# create a joystick from that passed joystickId and initialize that joystick
self.__joystick = pygame.joystick.Joystick(joystickId)
self.__joystick.init()
# create a dict for the button mappings
self.__buttonMappings = {}
"""
a dict of integer -> dict values.
The integer key is the number associated with the button on our joystick,
and the dict value contains a string of either 'press' or 'release',
the value of that key being a 0-argument function to be called when that button is either pressed or released.
for example, and entry may look like this::
{0: {
'press': lambda: print('pressed'),
'release': lambda: print('released')
}
}
"""
# create a dict for the directional mappings
self.__directionalMappings = {}
"""
a dict of integer -> dict values.
The integer key is the number associated with the id of the corresponding axis on our joystick,
and the dict value contains a string of either 'positive', 'negative', or 'release'.
For each of those keys, the value is a 0-argument function that gets called when that axis is pushed in that direction.
for example, and entry may look like this::
{0: {
'positive': lambda: print('moved in the positive direction'),
'negative': lambda: print('moved in the negative direction'),
'release': lambda: print('released axis')
}
}
"""
# set the id of this controller to the one passed in
self.__joystickId = joystickId
# an empty function to default to for button commands
self.__UNMAPPED_COMMAND = (lambda: None)
def get_controllerNumber(self):
return self.__joystickId
def get_buttonState(self, buttonId):
"""
Wrapper method for `pygame.joystick.get_button()`
:param buttonId: the number assigned to the button we are getting the state of
:return: the value returned by `pygame.joystick.get_button()`
"""
return self.__joystick.get_button(buttonId)
def map_button(self, buttonId, pressCommand = None, releaseCommand = None):
"""
Used to bind a function to the pressing and releasing of a button with the passed buttonId.
:param buttonId: the number associated with the button on the controller we are mapping the functions to
:param pressCommand: a 0-length function to be called when the button is pressed down. If `None` is passed in, then the button's press command will be unmapped
:param releaseCommand: a 0-length function to be called when the button is released. If `None` is passed in, then the button's release command will be unmapped
"""
# delete the existing mapping if it exists, as no matter how this function is called, the mappings would be overwritten
if buttonId in self.__buttonMappings:
del self.__buttonMappings[buttonId]
# if press command is None, turn it into a lambda that does nothing
pressCommand = self.__UNMAPPED_COMMAND if pressCommand is None else pressCommand
# ... same with release command
releaseCommand = self.__UNMAPPED_COMMAND if releaseCommand is None else releaseCommand
# now add the mapping
self.__buttonMappings[buttonId] = {'press': pressCommand, 'release': releaseCommand}
def map_directionalButton(self, axisId, positiveCommand = None, negativeCommand = None, releaseCommand = None):
"""
Used to bind a function to the change in state of one of the controller's axes (e.g. a directional pad).
Each axis has 2 directions, positive and negative. In pygame, they are represented with a positive number and a negative number (approximately 1/-1).
Pygame also has a state for when an axis is released, denoted as 0
:param axisId: the number associated with the axis on the controller we are mapping functions to
:param positiveCommand: a 0-length function to be called when the axis's positive end is pushed. If `None` is passed in, then the positive command for that action will be unmapped
:param negativeCommand: a 0-length function to be called when the axis's negative end is pushed. If `None` is passed in, then the negative command for that action will be unmapped
:param releaseCommand: a 0-length function to be called when one of the axis's ends was released. If `None` is passed in, then the release command for that action will be unmapped
"""
# delete the existing mappings for the axis if they already exist, as no matter what they will be overwritten
if axisId in self.__directionalMappings:
del self.__directionalMappings[axisId]
# for each of the commands that are None, assign an empty lambda to them
positiveCommand = self.__UNMAPPED_COMMAND if positiveCommand is None else positiveCommand
negativeCommand = self.__UNMAPPED_COMMAND if negativeCommand is None else negativeCommand
releaseCommand = self.__UNMAPPED_COMMAND if releaseCommand is None else releaseCommand
# now add the mappings for the axis
self.__directionalMappings[axisId] = {'positive': positiveCommand, 'negative': negativeCommand, 'release': releaseCommand}
def press_button(self, buttonId):
"""
Calls the function mapped to when the button with the associated buttonId is pressed. If the button has nothing mapped to it being pressed, nothing will happen
:param buttonId: the id of the button being pressed
"""
# first check if the buttonId is in our mappings before we try to access it
if buttonId in self.__buttonMappings:
# call the associated function
self.__buttonMappings[buttonId]['press']()
def release_button(self, buttonId):
"""
Calls the function mapped to when the button with the associated buttonId is released. If the button has nothing mapped to it being released, nothing will happen
:param buttonId: the id of the button being released
"""
# first check if the buttonId is in our mappings before we try to access it
if buttonId in self.__buttonMappings:
# call the associated function
self.__buttonMappings[buttonId]['release']()
def press_directionalButton(self, axisId, direction):
"""
Executes the function bound to the passed direction on the axis with the associated axisId. If there is no function bound to that direction or axis, nothing is called.
:param axisId: the id of the axis being pushed in a direction
:param direction: a positive number, negative number, or 0. This number represents the state of the axis, with a positive number for the positive end, a negative number for the negative end, and 0 for one of the ends being released
"""
if axisId in self.__directionalMappings:
# determine which function to call based on the state of the axis (negative, 0, or positive)
if direction < 0:
self.__directionalMappings[axisId]['negative']()
elif direction == 0:
self.__directionalMappings[axisId]['release']()
else:
self.__directionalMappings[axisId]['positive']() | en | 0.887948 | Abstract class to make using pygame's joysticks easier. The goal of this class is to be pretty generic, so as to allow for specific controller types to be created from this class. initializes this controller with the passed joystickId :param joystickId: the number of the joystick you want to initialize this controller with. must be at least 0 and less than the number of joysticks obtained by `pygame.joystick.get_count()` # create a joystick from that passed joystickId and initialize that joystick # create a dict for the button mappings a dict of integer -> dict values. The integer key is the number associated with the button on our joystick, and the dict value contains a string of either 'press' or 'release', the value of that key being a 0-argument function to be called when that button is either pressed or released. for example, and entry may look like this:: {0: { 'press': lambda: print('pressed'), 'release': lambda: print('released') } } # create a dict for the directional mappings a dict of integer -> dict values. The integer key is the number associated with the id of the corresponding axis on our joystick, and the dict value contains a string of either 'positive', 'negative', or 'release'. For each of those keys, the value is a 0-argument function that gets called when that axis is pushed in that direction. for example, and entry may look like this:: {0: { 'positive': lambda: print('moved in the positive direction'), 'negative': lambda: print('moved in the negative direction'), 'release': lambda: print('released axis') } } # set the id of this controller to the one passed in # an empty function to default to for button commands Wrapper method for `pygame.joystick.get_button()` :param buttonId: the number assigned to the button we are getting the state of :return: the value returned by `pygame.joystick.get_button()` Used to bind a function to the pressing and releasing of a button with the passed buttonId. :param buttonId: the number associated with the button on the controller we are mapping the functions to :param pressCommand: a 0-length function to be called when the button is pressed down. If `None` is passed in, then the button's press command will be unmapped :param releaseCommand: a 0-length function to be called when the button is released. If `None` is passed in, then the button's release command will be unmapped # delete the existing mapping if it exists, as no matter how this function is called, the mappings would be overwritten # if press command is None, turn it into a lambda that does nothing # ... same with release command # now add the mapping Used to bind a function to the change in state of one of the controller's axes (e.g. a directional pad). Each axis has 2 directions, positive and negative. In pygame, they are represented with a positive number and a negative number (approximately 1/-1). Pygame also has a state for when an axis is released, denoted as 0 :param axisId: the number associated with the axis on the controller we are mapping functions to :param positiveCommand: a 0-length function to be called when the axis's positive end is pushed. If `None` is passed in, then the positive command for that action will be unmapped :param negativeCommand: a 0-length function to be called when the axis's negative end is pushed. If `None` is passed in, then the negative command for that action will be unmapped :param releaseCommand: a 0-length function to be called when one of the axis's ends was released. If `None` is passed in, then the release command for that action will be unmapped # delete the existing mappings for the axis if they already exist, as no matter what they will be overwritten # for each of the commands that are None, assign an empty lambda to them # now add the mappings for the axis Calls the function mapped to when the button with the associated buttonId is pressed. If the button has nothing mapped to it being pressed, nothing will happen :param buttonId: the id of the button being pressed # first check if the buttonId is in our mappings before we try to access it # call the associated function Calls the function mapped to when the button with the associated buttonId is released. If the button has nothing mapped to it being released, nothing will happen :param buttonId: the id of the button being released # first check if the buttonId is in our mappings before we try to access it # call the associated function Executes the function bound to the passed direction on the axis with the associated axisId. If there is no function bound to that direction or axis, nothing is called. :param axisId: the id of the axis being pushed in a direction :param direction: a positive number, negative number, or 0. This number represents the state of the axis, with a positive number for the positive end, a negative number for the negative end, and 0 for one of the ends being released # determine which function to call based on the state of the axis (negative, 0, or positive) | 4.003372 | 4 |
src/crud.py | LeoPFreitas/projeto-crud-ap1 | 0 | 6618662 | <reponame>LeoPFreitas/projeto-crud-ap1
import os
# =====================================================================
# MENUS
def mostrarMenu():
print('=' * 60)
print("CRUD Academia: \n")
print("1 - Submenu de Alunos.")
print("2 - Submenu de Modalidades.")
print("3 - Submenu de Práticas.")
print("4 - Submenu de Relatórios.")
print("5 - Sair.")
def submenuOps():
print("1. Listar todos")
print("2. Listar um")
print("3. Incluir")
print("4. Alterar")
print("5. Excluir")
def submenuAlunos():
print("\nSubmenu Alunos: \n")
submenuOps()
def submenuModalidades():
print("\nSubmenu Modalidades: \n")
submenuOps()
def submenuPraticas():
print("\nSubmenu Práticas: \n")
subOpsPratica()
def submenurRelatorios():
print("\nSubmenu Relatórios: \n")
print("1. Mostrar todos os dados de todas as modalidades que são oferecidas mais do que 2 vezes na semana\n")
print("2. Mostrar todos os dias-horários-professores de uma dada modalidade fornecida pelo usuário\n")
print("3. Mostrar todos os dados de todos os alunos que praticam mais do que X modalidades diferentes na academia, onde X deve ser fornecido pelo usuário. Mostrar também quais modalidades o aluno pratica.\n")
def subOpsPratica():
print("1. Listar todos alunos\n")
print("2. Listar aluno\n")
print("3. Incluir aluno em modalidade\n")
print("4. Alterar dia-hora de prática\n")
print("5. Excluir aluno de 1 prática\n")
print("6. Excluir aluno de todas práticas\n")
# =====================================================================
# ARQUIVO
def salvaAlunos(dic):
ref = open('alunos.txt', 'w')
if len(dic):
for chave in dic:
cpf = str(chave)
valor = list(dic[chave])
nome = valor[0]
data = valor[1]
sexo = valor[2]
peso = valor[3]
altura = valor[4]
email = valor[5] # lista
tel = valor[6] # lista
nome = nome.split()
nome = '-'.join(nome)
email = '-'.join(email)
tel = '-'.join(tel)
linha = str(cpf) + '\t' + str(nome) + '\t' + str(data) + '\t' + str(sexo) + \
'\t' + str(peso) + '\t' + str(altura) + \
'\t' + str(email) + '\t' + str(tel) + '\n'
ref.write(linha)
ref.close()
def lerAlunos(dic):
ref = open('alunos.txt', 'r')
for linha in ref:
linha = linha.split()
cpf = linha[0]
nome = linha[1]
nasc = linha[2]
sexo = linha[3]
peso = linha[4]
altura = linha[5]
email = linha[6]
tel = linha[7]
email = email.split('-')
tel = tel.split('-')
chave = cpf
valor = (nome, nasc, sexo, peso, altura, email, tel)
x = {chave: valor}
dic.update(x)
ref.close()
def salvaModalidades(dic):
ref1 = open('modalidades.txt', 'w')
if len(dic):
for chave in dic:
cod = str(chave)
valor = list(dic[chave])
# receber descrição
desc = valor[0]
desc = desc.split()
desc = '-'.join(desc)
# receber duração
dura = valor[1]
# receber dias
dias = valor[2]
dias = '-'.join(dias)
# receber horas
horas = valor[3]
horas = '-'.join(horas)
# receber profs
profs = valor[4]
for i in range(len(profs)):
profs[i] = profs[i].split()
profs[i] = '+'.join(profs[i])
profs = '-'.join(profs)
# valores
val = valor[5]
# linha
linha = str(cod) + '\t' + str(desc) + '\t' + str(dura) + '\t' + \
str(dias) + '\t' + str(horas) + \
'\t' + str(profs) + '\t' + str(val) + '\n'
ref1.write(linha)
ref1.close()
def lerModalidades(dic):
ref1 = open('modalidades.txt', 'r')
for linha in ref1:
linha = linha.split()
cod = linha[0]
desc = linha[1]
dura = linha[2]
dias = linha[3]
horas = linha[4]
profs = linha[5]
val = linha[6]
desc = desc.split('-')
desc = ' '.join(desc)
dias = dias.split('-')
horas = horas.split('-')
# profs
profs = profs.split('-')
for i in range(len(profs)):
profs[i] = profs[i].split('+')
profs[i] = ' '.join(profs[i])
# update dic
chave = str(cod)
valor = [desc, dura, dias, horas, profs, val]
x = {chave: valor}
dic.update(x)
ref1.close()
def salvaPratica(lista):
ref = open('praticas.txt', 'w')
if len(lista):
for cpf in lista:
# Escreve o cpf do aluno
cpfAluno = cpf[0]
ref.write(cpfAluno + '\t')
# quantidade de modalidades que o cpf pratica
i = len(cpf) - 1
# Escrever cada modalidade
while i > 0:
string = '-'.join(cpf[i])
ref.write(string + '\t')
i -= 1
# mudou de cpf
ref.write('\n')
ref.close()
def lerPraticas(lista):
ref = open('praticas.txt', 'r')
for linha in ref:
aluno = []
# Receber cpf
listAluno = linha.split()
cpf = listAluno[0]
aluno.append(cpf)
# Separar modalidades em lista
mods = listAluno[1:]
for mod in mods:
mod = mod.split('-')
aluno.append(mod)
# carregar na lista principal
lista.append(aluno)
ref.close()
# =====================================================================
# FUNÇÕES ALUNOS
def isAluno(dic, cpf):
for chave in dic:
if cpf == chave:
return True
return False
def cadAluno(dic):
cpf = input("Digite o cpf: [xxxxxxxxxxx] ")
if isAluno(dic, cpf):
print("Aluno ja cadastrado.")
else:
# receber dados
nome = input("Digite o nome: ")
nasc = input("Digite o nascimento: [xx/xx/xxxx] ")
sexo = input("Digite o sexo: [M/F] ")
peso = input("Digite o peso: ")
altura = input("Digite a altura: ")
# lista de email
emails = []
email = input("Digite o primeiro e-mail: ")
while email != '':
emails.append(email)
email = input("Digite o próximo e-mail: ")
# lista de tel
tels = []
tel = input("Digite o telefone (sem espaço): ")
while tel != '':
tels.append(tel)
tel = input("Digite o próximo telefone (sem espaço): ")
# criar dic
chave = cpf
valor = (nome, nasc, sexo, peso, altura, emails, tels)
# gerar dic
x = {chave: valor}
dic.update(x)
print("Aluno cadastrado com sucesso!! \n\n")
def listarAlunoFormatado(dic):
for aluno in dic:
cpf = aluno
string = dic[aluno]
nome = string[0]
data = string[1]
sexo = string[2]
peso = string[3]
altura = string[4]
emails = string[5]
emails = ', '.join(emails)
tels = string[6]
tels = ', '.join(tels)
print('CPF:', cpf, 'NOME:', nome, 'DATA DE NASCIMENTO:', data, 'SEXO:', sexo,
'PESO:', peso, 'ALTURA:', altura, 'EMAILS: ', emails, 'TELEFONES:', tels)
def listarUmAluno(dic):
cpf = input("Digite o cpf do aluno: ")
if isAluno(dic, cpf):
print("Segue dados do aluno: ")
string = dic[cpf]
nome = string[0]
data = string[1]
sexo = string[2]
peso = string[3]
altura = string[4]
emails = string[5]
emails = ', '.join(emails)
tels = string[6]
tels = ', '.join(tels)
print('CPF:', cpf, 'NOME:', nome, 'DATA DE NASCIMENTO:', data, 'SEXO:', sexo,
'PESO:', peso, 'ALTURA:', altura, 'EMAILS: ', emails, 'TELEFONES:', tels)
else:
print("Aluno não existente.")
print('=' * 60)
def alterarAluno(dic):
cpf = input("Digite o cpf do aluno: ")
if isAluno(dic, cpf):
print("Vamos alterar o aluno.")
# receber dados
nome = input("Digite o nome: ")
nasc = input("Digite o nascimento: ")
sexo = input("Digite o sexo: ")
peso = input("Digite o peso: ")
altura = input("Digite a altura: ")
emails = []
email = input("Digite o primeiro e-mail: ")
while email != '':
emails.append(email)
email = input("Digite o próximo e-mail: ")
# lista de tel
tels = []
tel = input("Digite o telefone (sem espaço): ")
while tel != '':
tels.append(tel)
tel = input("Digite o próximo telefone (sem espaço): ")
# criar dic
chave = cpf
valor = (nome, nasc, sexo, peso, altura, emails, tels)
# gerar dic
x = {chave: valor}
dic.update(x)
print("Aluno atualizado com sucesso!!")
print('=' * 60)
else:
print("Aluno inexistente.")
def excluirAluno(dic):
cpf = input("Digite o cpf do aluno: ")
if isAluno(dic, cpf):
print("Vamos escluir o aluno do cadastro.")
dic.pop(cpf)
print("Aluno escluido! ")
print('=' * 60)
else:
print("Aluno não existente.")
# =====================================================================
# FUNÇÕES MODALIDADE
def isModalidade(dic, cod):
for chave in dic:
if cod == chave:
return True
return False
def cadModalidades(dic):
cod = input("Digite o código da modalidade: ")
if isModalidade(dic, cod):
print("Modalidade já cadastrada. ")
else:
# receber dados
desc = input("Digite a descrição: ")
dura = input("digite a duração da aula (minutos): ")
# Receber dias
dias = []
print("Para dos dias de oferecimento utilize as seguintes opções")
opDias = ['segunda', 'terça', 'quarta',
'quinta', 'sexta', 'sabado', 'domingo']
for value in opDias:
print(value)
dia = input("\nDigite o primeiro dia: ")
while dia != '':
dias.append(dia)
dia = input("Digite o proximo dia: ")
# Receber horarios
horas = []
hora = input("\nDigite o primeiro horário [xx:xx]")
while hora != '':
horas.append(hora)
hora = input("Digite o próximo horário [xx:xx]")
# receber professores
profs = []
prof = input("Digite o nome do professor responsável: ")
while prof != '':
profs.append(prof)
prof = input("Digite o nome do próximo professor: ")
# receber valor
val = input("Digite o valor da modalidade: ")
# gerar dic
chave = cod
valor = [desc, dura, dias, horas, profs, val]
x = {chave: valor}
dic.update(x)
print("\n\nModalidade adicionada com sucesso!!")
def listarModalidades(dic):
for modalidade in dic:
cod = modalidade
string = dic[modalidade]
desc = string[0]
dura = string[1]
dias = string[2]
dias = ','.join(dias)
horas = string[3]
horas = ','.join(horas)
profs = string[4]
profs = ','.join(profs)
val = string[5]
print('CODIGO:', cod, 'DESCRIÇÃO:', desc, 'DURAÇÃO:', dura, 'DIAS OFERECIDOS:', dias,
'HORARIOS OFERECIDOS:', horas, 'PROFESSORES RESPONSÁVEIS:', profs, 'VALOR:', val)
def listarUmaModalidade(dic):
cod = input("Digite o código: ")
if isModalidade(dic, cod):
string = dic[cod]
desc = string[0]
dura = string[1]
dias = string[2]
dias = ','.join(dias)
horas = string[3]
horas = ','.join(horas)
profs = string[4]
profs = ','.join(profs)
val = string[5]
print('CODIGO:', cod, 'DESCRIÇÃO:', desc, 'DURAÇÃO:', dura, 'DIAS OFERECIDOS:', dias,
'HORARIOS OFERECIDOS:', horas, 'PROFESSORES RESPONSÁVEIS:', profs, 'VALOR:', val)
else:
print("Modalidade inexistente. ")
def alterarModalidade(dic):
cod = input("Digite o código da modalidade: ")
if isModalidade(dic, cod):
desc = input("Digite a descrição: ")
dura = input("digite a duração da aula (minutos): ")
# Receber dias
dias = []
print("Para dos dias de oferecimento utilize as seguintes opções")
opDias = ['segunda', 'terça', 'quarta',
'quinta', 'sexta', 'sabado', 'domingo']
for value in opDias:
print(value)
dia = input("\nDigite o primeiro dia: ")
while dia != '':
dias.append(dia)
dia = input("Digite o proximo dia: ")
# Receber horarios
horas = []
hora = input("\nDigite o primeiro horário [xx:xx]")
while hora != '':
horas.append(hora)
hora = input("Digite o próximo horário [xx:xx]")
# receber professores
profs = []
prof = input("Digite o nome do professor responsável: ")
while prof != '':
profs.append(prof)
prof = input("Digite o nome do próximo professor: ")
# receber valor
val = input("Digite o valor da modalidade: ")
# gerar dic
chave = cod
valor = [desc, dura, dias, horas, profs, val]
x = {chave: valor}
dic.update(x)
print("\n\nModalidade atualizada com sucesso!!")
else:
print("Modalidade inexistente.")
def excluirModalidade(dic):
cod = input("Digite o código da modalidade: ")
if isModalidade(dic, cod):
print("Vamos escluir essa modalidade")
dic.pop(cod)
print("Modalidade excluida!")
print('=' * 60)
else:
print("Modalidade não existe.")
# =====================================================================
# FUNÇÕES PRATICAS
def listarPraticas(lista):
print("\nLista de práticas:\n")
for aluno in lista:
# printar cpf do aluno
print('CPF aluno:', aluno[0], end=' --MODALIDADES-- ')
# Varrer as práticas do aluno
praticas = aluno[1:]
for pratica in praticas:
print('COD: ', pratica[0], 'DIA: ', pratica[1],
'HORA:', pratica[2], end=' --- ')
print()
def listarUmaPratica(lista):
cpf = input("Digite o cpf do aluno: ")
if isPratica(cpf, lista):
# buscar o aluno pedido
for aluno in lista:
if aluno[0] == cpf:
alunoPedido = aluno
# Encotnrou o aluno
print('CPF aluno:', alunoPedido[0], end=' --MODALIDADES-- ')
# varrer modalidades do aluno
praticas = alunoPedido[1:]
for pratica in praticas:
print('COD: ', pratica[0], 'DIA: ', pratica[1],
'HORA:', pratica[2], end=' --- ')
print()
else:
print('Aluno não pratica nenhuma modalidade! \n')
def isPratica(cpf, lista):
for aluno in lista:
if aluno[0] == cpf:
return True
return False
def returnAluno(cpf, lista):
for aluno in lista:
if aluno[0] == cpf:
return aluno
def cadPratica(lista, dic_modalidade, dic_academia):
# ver se a modalidade existe no sistema
cod = input("Digite o código da modalidade: ")
cpf = input("Digite o cpf do aluno: ")
# aluno existe e modalidade existe
if isModalidade(dic_modalidade, cod) and isAluno(dic_academia, cpf):
# aluno esta cadastrado aqui em alguma coisa?
if isPratica(cpf, lista):
# verificar se ele ja pratica a modalidade passada
alunoPedido = returnAluno(cpf, lista)
alunoPedido = alunoPedido[1:]
mods = []
for mod in alunoPedido:
mods.append(mod[0])
if cod in mods:
# aluno ja pratica essa modalidade
return print("Aluno já pratica essa modalidade")
else:
# aluno não pratica ESSA modalidade
# Verificar o horario e data desejado
valores = dic_modalidade[cod]
dias = ', '.join(valores[2])
print("Os dias disponíveis são: ", end=' ')
print(dias)
# receber dia
dia = input("Digite o dia desejado: ")
if dia not in valores[2]:
return print("\nDia indisponível ou digitado errado, tente outra vez.")
print("\nAs horas disponíveis são: ", end=' ')
horas = ', '.join(valores[3])
print(horas)
hora = input("Digite a hora desejada: ")
if hora not in valores[3]:
return print("\nHora indisponível ou digitada errado, tente outra vez!")
# se voce chegou até aqui, bora cadastrar pois!
alunoCadastrar1 = [cod, dia, hora]
for aux in lista:
if aux[0] == cpf:
aux.append(alunoCadastrar1)
return print("Aluno cadastrado nessa modalidade! ")
else:
# ele com ctz nao pratica NENHUMA MODALIDADE
print(
"Esse aluno ainda não esta matriculado em nenhuma modalidade, borá matriculado ae!!\n")
valores = dic_modalidade[cod]
print("Os dias disponíveis são: ", end=' ')
print(valores[2])
# receber dia
dia = input("Digite o dia desejado: ")
if dia not in valores[2]:
return print("\nDia indisponível ou digitado errado, tente outra vez.")
print("\nAs horas disponíveis são: ", end=' ')
print(valores[3])
hora = input("Digite a hora desejada: ")
if hora not in valores[3]:
return print("\nHora indisponível ou digitada errado, tente outra vez!")
# Se voce chegou até aqui, bora cadastra
alunoCadastrar2 = [cpf, [cod, dia, hora]]
lista.append(alunoCadastrar2)
return print('Aluno cadastrado na modalidade!\n')
else:
return print("Aluno ou modalidade não existente!")
def alterDiaHoraPratica(lista, dic_modalidade):
cpf = input("Digite o cpf do aluno: ")
cod = input("Digite o código da modalidade a ser alterada: ")
# Verificar se esse esta na lista de modalidades
if isPratica(cpf, lista):
# aluno esta matriculado em alguma pratica
for aluno in lista:
if aluno[0] == cpf:
if aluno[1][0] == cod:
print("Ok, vamos alterar\n")
# receber novo dia
valores = dic_modalidade[cod]
print("Os dias disponíveis são: ", end=' ')
print(valores[2])
dia = input("Digite o dia desejado: ")
if dia not in valores[2]:
return print("\nDia indisponível ou digitado errado, tente outra vez.")
# Receber hora
print("\nAs horas disponíveis são: ", end=' ')
print(valores[3])
hora = input("Digite a hora desejada: ")
if hora not in valores[3]:
return print("\nHora indisponível ou digitada errado, tente outra vez!")
# alterar
aluno[1][0] = cod
aluno[1][1] = dia
aluno[1][2] = hora
else:
return print("Aluno não está cadastrado em nenhuma pratica ou algum dado foi inserido errado.")
def removerAlunoPratica(lista):
cpf = input("Digite o cpf do aluno: ")
cod = input("Digite o código da modalidade: ")
# verificar se ele está matriculado em alguma prática
if isPratica(cpf, lista):
# aluno esta matriculado em alguma pratica
for aluno in lista:
if aluno[0] == cpf:
# Verificar se ele pratica ess modalidade
for i in range(1, len(aluno)):
if aluno[i][0] == cod:
print("\n\nVamos remover o aluno dessa pratica")
# verificar se essa é a unica pratica dele.
if len(aluno) == 2:
# Ele só pratica isso, logo esxcluir tudo
idx = lista.index(aluno)
del lista[idx]
return print("Aluno removido dessa pratica e da lista de praticas")
else:
# remover somente a pratica pedida
idx = 0
for mod in aluno:
if mod[0] == cod:
del aluno[idx]
return print("Aluno removido da pratica\n")
idx += 1
def remoerAlunoTodasPraticas(lista):
cpf = input("Digite o cpf do aluno: ")
# verificar se ele esta matriculado em alguma prática
if isPratica(cpf, lista):
for aluno in lista:
if aluno[0] == cpf:
idx = lista.index(aluno)
del lista[idx]
return print("Aluno removido de todas as práticas.")
# =====================================================================
# FUNÇÕES RELTÓRIOS
def relatorio1(dic_modalidade):
print("As modalidades que são ofertadas mais de 2 vezes na semana sao: \n")
for k in dic_modalidade.items():
chave = k[0]
valores = k[1]
profs = ' , '.join(valores[4])
hora = ' , '.join(valores[3])
dias = ' , '.join(valores[2])
if len(valores[2]) > 2:
print('COD:', chave)
print('\t DESCRIÇÃO:', valores[0])
print('\t DURAÇÃO DA AULA:', valores[1])
print('\t DIAS OFERTADOS:', dias)
print('\t HORARIOS:', hora)
print('\t PROFS RESPONSÁVEIS:', profs)
print('\t VALOR:', valores[5])
print('\t', '===')
print('=' * 60)
def relatorio2(dic_modalidade):
cod = input("Digite o código da modalidade a ser consultada: ")
# verificar se é modalidade valida
if isModalidade(dic_modalidade, cod):
print("A disponibilidade da modalidade é:\n")
for chave in dic_modalidade:
if chave == cod:
valores = dic_modalidade[chave]
dias = ', '.join(valores[2])
horas = ', '.join(valores[3])
profs = ', '.join(valores[4])
print('\tDIAS:', dias)
print('\tHORAS:', horas)
print('\tPROFESSORES:', profs)
else:
return print("Esse código de modalidade é inválido.")
def relatorio3(praticas, dic_modalidade, dic_academia):
x = int(input("Digite o número de modalidade: "))
for aluno in praticas:
# verificar se é maior que x
tam = len(aluno) - 1
if tam > x:
# printar dados do aluno
cpf = str(aluno[0])
for chave in dic_academia:
if chave == cpf:
valores = dic_academia[chave]
modalidades = aluno[1:]
print('\nDados do aluno:')
emails = ', '.join(valores[5])
tels = ', '.join(valores[6])
print('CPF:', chave, 'NOME:', valores[0], 'DATA DE NASCIMENTO:', valores[1], 'SEXO:', valores[2],
'PESO:', valores[3], 'ALTURA:', valores[4], 'EMAILS: ', emails, 'TELEFONES:', tels)
print('Modalidades que ele pratica:')
for mod in modalidades:
print("MODALIDADE:", mod[0], 'DIA:',
mod[1], 'HORARIO:', mod[2])
print('='*10)
return print("Fim dos alunos nessa categoria.")
# =====================================================================
# MAIN
def main():
# declarar dicionarios
dic_academia = {}
dic_modalidade = {}
praticas = []
# Verificar existencia
if os.path.exists('alunos.txt'):
lerAlunos(dic_academia)
print("Dicionário alunos carregado!")
if os.path.exists('modalidades.txt'):
lerModalidades(dic_modalidade)
print("Dicionario modalidades carregado!")
if os.path.exists('praticas.txt'):
lerPraticas(praticas)
print("Lista de praticas carregada!")
menu = True
while menu:
mostrarMenu()
op1 = input("\n Escolha uma opção: ")
if op1 == '1':
print("="*50)
submenuAlunos()
op2 = input("Digite sua opção: ")
# submenu
if op2 == '1':
print('Segue a lista de todos os aluno.\n\n')
listarAlunoFormatado(dic_academia)
elif op2 == '2':
listarUmAluno(dic_academia)
elif op2 == '3':
cadAluno(dic_academia)
elif op2 == '4':
alterarAluno(dic_academia)
elif op2 == '5':
excluirAluno(dic_academia)
elif op1 == '2':
print("="*50)
submenuModalidades()
op2 = input("Digite sua opção: ")
# submenu
if op2 == '1':
listarModalidades(dic_modalidade)
elif op2 == '2':
listarUmaModalidade(dic_modalidade)
elif op2 == '3':
cadModalidades(dic_modalidade)
elif op2 == '4':
alterarModalidade(dic_modalidade)
elif op2 == '5':
excluirModalidade(dic_modalidade)
elif op1 == '3':
print("="*50)
submenuPraticas()
op2 = input("Digite sua opção: ")
# submenu
if op2 == '1':
listarPraticas(praticas)
elif op2 == '2':
listarUmaPratica(praticas)
elif op2 == '3':
cadPratica(praticas, dic_modalidade, dic_academia)
elif op2 == '4':
alterDiaHoraPratica(praticas, dic_modalidade)
elif op2 == '5':
removerAlunoPratica(praticas)
elif op2 == '6':
remoerAlunoTodasPraticas(praticas)
elif op1 == '4':
print("="*50)
submenurRelatorios()
op2 = input("Digite sua opção: ")
# submenu
if op2 == '1':
relatorio1(dic_modalidade)
elif op2 == '2':
relatorio2(dic_modalidade)
elif op2 == '3':
relatorio3(praticas, dic_modalidade, dic_academia)
else:
salvaAlunos(dic_academia)
salvaModalidades(dic_modalidade)
salvaPratica(praticas)
print("\nTerminando a execução do programa!!!")
menu = False
#### MAIN ####
main()
| import os
# =====================================================================
# MENUS
def mostrarMenu():
print('=' * 60)
print("CRUD Academia: \n")
print("1 - Submenu de Alunos.")
print("2 - Submenu de Modalidades.")
print("3 - Submenu de Práticas.")
print("4 - Submenu de Relatórios.")
print("5 - Sair.")
def submenuOps():
print("1. Listar todos")
print("2. Listar um")
print("3. Incluir")
print("4. Alterar")
print("5. Excluir")
def submenuAlunos():
print("\nSubmenu Alunos: \n")
submenuOps()
def submenuModalidades():
print("\nSubmenu Modalidades: \n")
submenuOps()
def submenuPraticas():
print("\nSubmenu Práticas: \n")
subOpsPratica()
def submenurRelatorios():
print("\nSubmenu Relatórios: \n")
print("1. Mostrar todos os dados de todas as modalidades que são oferecidas mais do que 2 vezes na semana\n")
print("2. Mostrar todos os dias-horários-professores de uma dada modalidade fornecida pelo usuário\n")
print("3. Mostrar todos os dados de todos os alunos que praticam mais do que X modalidades diferentes na academia, onde X deve ser fornecido pelo usuário. Mostrar também quais modalidades o aluno pratica.\n")
def subOpsPratica():
print("1. Listar todos alunos\n")
print("2. Listar aluno\n")
print("3. Incluir aluno em modalidade\n")
print("4. Alterar dia-hora de prática\n")
print("5. Excluir aluno de 1 prática\n")
print("6. Excluir aluno de todas práticas\n")
# =====================================================================
# ARQUIVO
def salvaAlunos(dic):
ref = open('alunos.txt', 'w')
if len(dic):
for chave in dic:
cpf = str(chave)
valor = list(dic[chave])
nome = valor[0]
data = valor[1]
sexo = valor[2]
peso = valor[3]
altura = valor[4]
email = valor[5] # lista
tel = valor[6] # lista
nome = nome.split()
nome = '-'.join(nome)
email = '-'.join(email)
tel = '-'.join(tel)
linha = str(cpf) + '\t' + str(nome) + '\t' + str(data) + '\t' + str(sexo) + \
'\t' + str(peso) + '\t' + str(altura) + \
'\t' + str(email) + '\t' + str(tel) + '\n'
ref.write(linha)
ref.close()
def lerAlunos(dic):
ref = open('alunos.txt', 'r')
for linha in ref:
linha = linha.split()
cpf = linha[0]
nome = linha[1]
nasc = linha[2]
sexo = linha[3]
peso = linha[4]
altura = linha[5]
email = linha[6]
tel = linha[7]
email = email.split('-')
tel = tel.split('-')
chave = cpf
valor = (nome, nasc, sexo, peso, altura, email, tel)
x = {chave: valor}
dic.update(x)
ref.close()
def salvaModalidades(dic):
ref1 = open('modalidades.txt', 'w')
if len(dic):
for chave in dic:
cod = str(chave)
valor = list(dic[chave])
# receber descrição
desc = valor[0]
desc = desc.split()
desc = '-'.join(desc)
# receber duração
dura = valor[1]
# receber dias
dias = valor[2]
dias = '-'.join(dias)
# receber horas
horas = valor[3]
horas = '-'.join(horas)
# receber profs
profs = valor[4]
for i in range(len(profs)):
profs[i] = profs[i].split()
profs[i] = '+'.join(profs[i])
profs = '-'.join(profs)
# valores
val = valor[5]
# linha
linha = str(cod) + '\t' + str(desc) + '\t' + str(dura) + '\t' + \
str(dias) + '\t' + str(horas) + \
'\t' + str(profs) + '\t' + str(val) + '\n'
ref1.write(linha)
ref1.close()
def lerModalidades(dic):
ref1 = open('modalidades.txt', 'r')
for linha in ref1:
linha = linha.split()
cod = linha[0]
desc = linha[1]
dura = linha[2]
dias = linha[3]
horas = linha[4]
profs = linha[5]
val = linha[6]
desc = desc.split('-')
desc = ' '.join(desc)
dias = dias.split('-')
horas = horas.split('-')
# profs
profs = profs.split('-')
for i in range(len(profs)):
profs[i] = profs[i].split('+')
profs[i] = ' '.join(profs[i])
# update dic
chave = str(cod)
valor = [desc, dura, dias, horas, profs, val]
x = {chave: valor}
dic.update(x)
ref1.close()
def salvaPratica(lista):
ref = open('praticas.txt', 'w')
if len(lista):
for cpf in lista:
# Escreve o cpf do aluno
cpfAluno = cpf[0]
ref.write(cpfAluno + '\t')
# quantidade de modalidades que o cpf pratica
i = len(cpf) - 1
# Escrever cada modalidade
while i > 0:
string = '-'.join(cpf[i])
ref.write(string + '\t')
i -= 1
# mudou de cpf
ref.write('\n')
ref.close()
def lerPraticas(lista):
ref = open('praticas.txt', 'r')
for linha in ref:
aluno = []
# Receber cpf
listAluno = linha.split()
cpf = listAluno[0]
aluno.append(cpf)
# Separar modalidades em lista
mods = listAluno[1:]
for mod in mods:
mod = mod.split('-')
aluno.append(mod)
# carregar na lista principal
lista.append(aluno)
ref.close()
# =====================================================================
# FUNÇÕES ALUNOS
def isAluno(dic, cpf):
for chave in dic:
if cpf == chave:
return True
return False
def cadAluno(dic):
cpf = input("Digite o cpf: [xxxxxxxxxxx] ")
if isAluno(dic, cpf):
print("Aluno ja cadastrado.")
else:
# receber dados
nome = input("Digite o nome: ")
nasc = input("Digite o nascimento: [xx/xx/xxxx] ")
sexo = input("Digite o sexo: [M/F] ")
peso = input("Digite o peso: ")
altura = input("Digite a altura: ")
# lista de email
emails = []
email = input("Digite o primeiro e-mail: ")
while email != '':
emails.append(email)
email = input("Digite o próximo e-mail: ")
# lista de tel
tels = []
tel = input("Digite o telefone (sem espaço): ")
while tel != '':
tels.append(tel)
tel = input("Digite o próximo telefone (sem espaço): ")
# criar dic
chave = cpf
valor = (nome, nasc, sexo, peso, altura, emails, tels)
# gerar dic
x = {chave: valor}
dic.update(x)
print("Aluno cadastrado com sucesso!! \n\n")
def listarAlunoFormatado(dic):
for aluno in dic:
cpf = aluno
string = dic[aluno]
nome = string[0]
data = string[1]
sexo = string[2]
peso = string[3]
altura = string[4]
emails = string[5]
emails = ', '.join(emails)
tels = string[6]
tels = ', '.join(tels)
print('CPF:', cpf, 'NOME:', nome, 'DATA DE NASCIMENTO:', data, 'SEXO:', sexo,
'PESO:', peso, 'ALTURA:', altura, 'EMAILS: ', emails, 'TELEFONES:', tels)
def listarUmAluno(dic):
cpf = input("Digite o cpf do aluno: ")
if isAluno(dic, cpf):
print("Segue dados do aluno: ")
string = dic[cpf]
nome = string[0]
data = string[1]
sexo = string[2]
peso = string[3]
altura = string[4]
emails = string[5]
emails = ', '.join(emails)
tels = string[6]
tels = ', '.join(tels)
print('CPF:', cpf, 'NOME:', nome, 'DATA DE NASCIMENTO:', data, 'SEXO:', sexo,
'PESO:', peso, 'ALTURA:', altura, 'EMAILS: ', emails, 'TELEFONES:', tels)
else:
print("Aluno não existente.")
print('=' * 60)
def alterarAluno(dic):
cpf = input("Digite o cpf do aluno: ")
if isAluno(dic, cpf):
print("Vamos alterar o aluno.")
# receber dados
nome = input("Digite o nome: ")
nasc = input("Digite o nascimento: ")
sexo = input("Digite o sexo: ")
peso = input("Digite o peso: ")
altura = input("Digite a altura: ")
emails = []
email = input("Digite o primeiro e-mail: ")
while email != '':
emails.append(email)
email = input("Digite o próximo e-mail: ")
# lista de tel
tels = []
tel = input("Digite o telefone (sem espaço): ")
while tel != '':
tels.append(tel)
tel = input("Digite o próximo telefone (sem espaço): ")
# criar dic
chave = cpf
valor = (nome, nasc, sexo, peso, altura, emails, tels)
# gerar dic
x = {chave: valor}
dic.update(x)
print("Aluno atualizado com sucesso!!")
print('=' * 60)
else:
print("Aluno inexistente.")
def excluirAluno(dic):
cpf = input("Digite o cpf do aluno: ")
if isAluno(dic, cpf):
print("Vamos escluir o aluno do cadastro.")
dic.pop(cpf)
print("Aluno escluido! ")
print('=' * 60)
else:
print("Aluno não existente.")
# =====================================================================
# FUNÇÕES MODALIDADE
def isModalidade(dic, cod):
for chave in dic:
if cod == chave:
return True
return False
def cadModalidades(dic):
cod = input("Digite o código da modalidade: ")
if isModalidade(dic, cod):
print("Modalidade já cadastrada. ")
else:
# receber dados
desc = input("Digite a descrição: ")
dura = input("digite a duração da aula (minutos): ")
# Receber dias
dias = []
print("Para dos dias de oferecimento utilize as seguintes opções")
opDias = ['segunda', 'terça', 'quarta',
'quinta', 'sexta', 'sabado', 'domingo']
for value in opDias:
print(value)
dia = input("\nDigite o primeiro dia: ")
while dia != '':
dias.append(dia)
dia = input("Digite o proximo dia: ")
# Receber horarios
horas = []
hora = input("\nDigite o primeiro horário [xx:xx]")
while hora != '':
horas.append(hora)
hora = input("Digite o próximo horário [xx:xx]")
# receber professores
profs = []
prof = input("Digite o nome do professor responsável: ")
while prof != '':
profs.append(prof)
prof = input("Digite o nome do próximo professor: ")
# receber valor
val = input("Digite o valor da modalidade: ")
# gerar dic
chave = cod
valor = [desc, dura, dias, horas, profs, val]
x = {chave: valor}
dic.update(x)
print("\n\nModalidade adicionada com sucesso!!")
def listarModalidades(dic):
for modalidade in dic:
cod = modalidade
string = dic[modalidade]
desc = string[0]
dura = string[1]
dias = string[2]
dias = ','.join(dias)
horas = string[3]
horas = ','.join(horas)
profs = string[4]
profs = ','.join(profs)
val = string[5]
print('CODIGO:', cod, 'DESCRIÇÃO:', desc, 'DURAÇÃO:', dura, 'DIAS OFERECIDOS:', dias,
'HORARIOS OFERECIDOS:', horas, 'PROFESSORES RESPONSÁVEIS:', profs, 'VALOR:', val)
def listarUmaModalidade(dic):
cod = input("Digite o código: ")
if isModalidade(dic, cod):
string = dic[cod]
desc = string[0]
dura = string[1]
dias = string[2]
dias = ','.join(dias)
horas = string[3]
horas = ','.join(horas)
profs = string[4]
profs = ','.join(profs)
val = string[5]
print('CODIGO:', cod, 'DESCRIÇÃO:', desc, 'DURAÇÃO:', dura, 'DIAS OFERECIDOS:', dias,
'HORARIOS OFERECIDOS:', horas, 'PROFESSORES RESPONSÁVEIS:', profs, 'VALOR:', val)
else:
print("Modalidade inexistente. ")
def alterarModalidade(dic):
cod = input("Digite o código da modalidade: ")
if isModalidade(dic, cod):
desc = input("Digite a descrição: ")
dura = input("digite a duração da aula (minutos): ")
# Receber dias
dias = []
print("Para dos dias de oferecimento utilize as seguintes opções")
opDias = ['segunda', 'terça', 'quarta',
'quinta', 'sexta', 'sabado', 'domingo']
for value in opDias:
print(value)
dia = input("\nDigite o primeiro dia: ")
while dia != '':
dias.append(dia)
dia = input("Digite o proximo dia: ")
# Receber horarios
horas = []
hora = input("\nDigite o primeiro horário [xx:xx]")
while hora != '':
horas.append(hora)
hora = input("Digite o próximo horário [xx:xx]")
# receber professores
profs = []
prof = input("Digite o nome do professor responsável: ")
while prof != '':
profs.append(prof)
prof = input("Digite o nome do próximo professor: ")
# receber valor
val = input("Digite o valor da modalidade: ")
# gerar dic
chave = cod
valor = [desc, dura, dias, horas, profs, val]
x = {chave: valor}
dic.update(x)
print("\n\nModalidade atualizada com sucesso!!")
else:
print("Modalidade inexistente.")
def excluirModalidade(dic):
cod = input("Digite o código da modalidade: ")
if isModalidade(dic, cod):
print("Vamos escluir essa modalidade")
dic.pop(cod)
print("Modalidade excluida!")
print('=' * 60)
else:
print("Modalidade não existe.")
# =====================================================================
# FUNÇÕES PRATICAS
def listarPraticas(lista):
print("\nLista de práticas:\n")
for aluno in lista:
# printar cpf do aluno
print('CPF aluno:', aluno[0], end=' --MODALIDADES-- ')
# Varrer as práticas do aluno
praticas = aluno[1:]
for pratica in praticas:
print('COD: ', pratica[0], 'DIA: ', pratica[1],
'HORA:', pratica[2], end=' --- ')
print()
def listarUmaPratica(lista):
cpf = input("Digite o cpf do aluno: ")
if isPratica(cpf, lista):
# buscar o aluno pedido
for aluno in lista:
if aluno[0] == cpf:
alunoPedido = aluno
# Encotnrou o aluno
print('CPF aluno:', alunoPedido[0], end=' --MODALIDADES-- ')
# varrer modalidades do aluno
praticas = alunoPedido[1:]
for pratica in praticas:
print('COD: ', pratica[0], 'DIA: ', pratica[1],
'HORA:', pratica[2], end=' --- ')
print()
else:
print('Aluno não pratica nenhuma modalidade! \n')
def isPratica(cpf, lista):
for aluno in lista:
if aluno[0] == cpf:
return True
return False
def returnAluno(cpf, lista):
for aluno in lista:
if aluno[0] == cpf:
return aluno
def cadPratica(lista, dic_modalidade, dic_academia):
# ver se a modalidade existe no sistema
cod = input("Digite o código da modalidade: ")
cpf = input("Digite o cpf do aluno: ")
# aluno existe e modalidade existe
if isModalidade(dic_modalidade, cod) and isAluno(dic_academia, cpf):
# aluno esta cadastrado aqui em alguma coisa?
if isPratica(cpf, lista):
# verificar se ele ja pratica a modalidade passada
alunoPedido = returnAluno(cpf, lista)
alunoPedido = alunoPedido[1:]
mods = []
for mod in alunoPedido:
mods.append(mod[0])
if cod in mods:
# aluno ja pratica essa modalidade
return print("Aluno já pratica essa modalidade")
else:
# aluno não pratica ESSA modalidade
# Verificar o horario e data desejado
valores = dic_modalidade[cod]
dias = ', '.join(valores[2])
print("Os dias disponíveis são: ", end=' ')
print(dias)
# receber dia
dia = input("Digite o dia desejado: ")
if dia not in valores[2]:
return print("\nDia indisponível ou digitado errado, tente outra vez.")
print("\nAs horas disponíveis são: ", end=' ')
horas = ', '.join(valores[3])
print(horas)
hora = input("Digite a hora desejada: ")
if hora not in valores[3]:
return print("\nHora indisponível ou digitada errado, tente outra vez!")
# se voce chegou até aqui, bora cadastrar pois!
alunoCadastrar1 = [cod, dia, hora]
for aux in lista:
if aux[0] == cpf:
aux.append(alunoCadastrar1)
return print("Aluno cadastrado nessa modalidade! ")
else:
# ele com ctz nao pratica NENHUMA MODALIDADE
print(
"Esse aluno ainda não esta matriculado em nenhuma modalidade, borá matriculado ae!!\n")
valores = dic_modalidade[cod]
print("Os dias disponíveis são: ", end=' ')
print(valores[2])
# receber dia
dia = input("Digite o dia desejado: ")
if dia not in valores[2]:
return print("\nDia indisponível ou digitado errado, tente outra vez.")
print("\nAs horas disponíveis são: ", end=' ')
print(valores[3])
hora = input("Digite a hora desejada: ")
if hora not in valores[3]:
return print("\nHora indisponível ou digitada errado, tente outra vez!")
# Se voce chegou até aqui, bora cadastra
alunoCadastrar2 = [cpf, [cod, dia, hora]]
lista.append(alunoCadastrar2)
return print('Aluno cadastrado na modalidade!\n')
else:
return print("Aluno ou modalidade não existente!")
def alterDiaHoraPratica(lista, dic_modalidade):
cpf = input("Digite o cpf do aluno: ")
cod = input("Digite o código da modalidade a ser alterada: ")
# Verificar se esse esta na lista de modalidades
if isPratica(cpf, lista):
# aluno esta matriculado em alguma pratica
for aluno in lista:
if aluno[0] == cpf:
if aluno[1][0] == cod:
print("Ok, vamos alterar\n")
# receber novo dia
valores = dic_modalidade[cod]
print("Os dias disponíveis são: ", end=' ')
print(valores[2])
dia = input("Digite o dia desejado: ")
if dia not in valores[2]:
return print("\nDia indisponível ou digitado errado, tente outra vez.")
# Receber hora
print("\nAs horas disponíveis são: ", end=' ')
print(valores[3])
hora = input("Digite a hora desejada: ")
if hora not in valores[3]:
return print("\nHora indisponível ou digitada errado, tente outra vez!")
# alterar
aluno[1][0] = cod
aluno[1][1] = dia
aluno[1][2] = hora
else:
return print("Aluno não está cadastrado em nenhuma pratica ou algum dado foi inserido errado.")
def removerAlunoPratica(lista):
cpf = input("Digite o cpf do aluno: ")
cod = input("Digite o código da modalidade: ")
# verificar se ele está matriculado em alguma prática
if isPratica(cpf, lista):
# aluno esta matriculado em alguma pratica
for aluno in lista:
if aluno[0] == cpf:
# Verificar se ele pratica ess modalidade
for i in range(1, len(aluno)):
if aluno[i][0] == cod:
print("\n\nVamos remover o aluno dessa pratica")
# verificar se essa é a unica pratica dele.
if len(aluno) == 2:
# Ele só pratica isso, logo esxcluir tudo
idx = lista.index(aluno)
del lista[idx]
return print("Aluno removido dessa pratica e da lista de praticas")
else:
# remover somente a pratica pedida
idx = 0
for mod in aluno:
if mod[0] == cod:
del aluno[idx]
return print("Aluno removido da pratica\n")
idx += 1
def remoerAlunoTodasPraticas(lista):
cpf = input("Digite o cpf do aluno: ")
# verificar se ele esta matriculado em alguma prática
if isPratica(cpf, lista):
for aluno in lista:
if aluno[0] == cpf:
idx = lista.index(aluno)
del lista[idx]
return print("Aluno removido de todas as práticas.")
# =====================================================================
# FUNÇÕES RELTÓRIOS
def relatorio1(dic_modalidade):
print("As modalidades que são ofertadas mais de 2 vezes na semana sao: \n")
for k in dic_modalidade.items():
chave = k[0]
valores = k[1]
profs = ' , '.join(valores[4])
hora = ' , '.join(valores[3])
dias = ' , '.join(valores[2])
if len(valores[2]) > 2:
print('COD:', chave)
print('\t DESCRIÇÃO:', valores[0])
print('\t DURAÇÃO DA AULA:', valores[1])
print('\t DIAS OFERTADOS:', dias)
print('\t HORARIOS:', hora)
print('\t PROFS RESPONSÁVEIS:', profs)
print('\t VALOR:', valores[5])
print('\t', '===')
print('=' * 60)
def relatorio2(dic_modalidade):
cod = input("Digite o código da modalidade a ser consultada: ")
# verificar se é modalidade valida
if isModalidade(dic_modalidade, cod):
print("A disponibilidade da modalidade é:\n")
for chave in dic_modalidade:
if chave == cod:
valores = dic_modalidade[chave]
dias = ', '.join(valores[2])
horas = ', '.join(valores[3])
profs = ', '.join(valores[4])
print('\tDIAS:', dias)
print('\tHORAS:', horas)
print('\tPROFESSORES:', profs)
else:
return print("Esse código de modalidade é inválido.")
def relatorio3(praticas, dic_modalidade, dic_academia):
x = int(input("Digite o número de modalidade: "))
for aluno in praticas:
# verificar se é maior que x
tam = len(aluno) - 1
if tam > x:
# printar dados do aluno
cpf = str(aluno[0])
for chave in dic_academia:
if chave == cpf:
valores = dic_academia[chave]
modalidades = aluno[1:]
print('\nDados do aluno:')
emails = ', '.join(valores[5])
tels = ', '.join(valores[6])
print('CPF:', chave, 'NOME:', valores[0], 'DATA DE NASCIMENTO:', valores[1], 'SEXO:', valores[2],
'PESO:', valores[3], 'ALTURA:', valores[4], 'EMAILS: ', emails, 'TELEFONES:', tels)
print('Modalidades que ele pratica:')
for mod in modalidades:
print("MODALIDADE:", mod[0], 'DIA:',
mod[1], 'HORARIO:', mod[2])
print('='*10)
return print("Fim dos alunos nessa categoria.")
# =====================================================================
# MAIN
def main():
# declarar dicionarios
dic_academia = {}
dic_modalidade = {}
praticas = []
# Verificar existencia
if os.path.exists('alunos.txt'):
lerAlunos(dic_academia)
print("Dicionário alunos carregado!")
if os.path.exists('modalidades.txt'):
lerModalidades(dic_modalidade)
print("Dicionario modalidades carregado!")
if os.path.exists('praticas.txt'):
lerPraticas(praticas)
print("Lista de praticas carregada!")
menu = True
while menu:
mostrarMenu()
op1 = input("\n Escolha uma opção: ")
if op1 == '1':
print("="*50)
submenuAlunos()
op2 = input("Digite sua opção: ")
# submenu
if op2 == '1':
print('Segue a lista de todos os aluno.\n\n')
listarAlunoFormatado(dic_academia)
elif op2 == '2':
listarUmAluno(dic_academia)
elif op2 == '3':
cadAluno(dic_academia)
elif op2 == '4':
alterarAluno(dic_academia)
elif op2 == '5':
excluirAluno(dic_academia)
elif op1 == '2':
print("="*50)
submenuModalidades()
op2 = input("Digite sua opção: ")
# submenu
if op2 == '1':
listarModalidades(dic_modalidade)
elif op2 == '2':
listarUmaModalidade(dic_modalidade)
elif op2 == '3':
cadModalidades(dic_modalidade)
elif op2 == '4':
alterarModalidade(dic_modalidade)
elif op2 == '5':
excluirModalidade(dic_modalidade)
elif op1 == '3':
print("="*50)
submenuPraticas()
op2 = input("Digite sua opção: ")
# submenu
if op2 == '1':
listarPraticas(praticas)
elif op2 == '2':
listarUmaPratica(praticas)
elif op2 == '3':
cadPratica(praticas, dic_modalidade, dic_academia)
elif op2 == '4':
alterDiaHoraPratica(praticas, dic_modalidade)
elif op2 == '5':
removerAlunoPratica(praticas)
elif op2 == '6':
remoerAlunoTodasPraticas(praticas)
elif op1 == '4':
print("="*50)
submenurRelatorios()
op2 = input("Digite sua opção: ")
# submenu
if op2 == '1':
relatorio1(dic_modalidade)
elif op2 == '2':
relatorio2(dic_modalidade)
elif op2 == '3':
relatorio3(praticas, dic_modalidade, dic_academia)
else:
salvaAlunos(dic_academia)
salvaModalidades(dic_modalidade)
salvaPratica(praticas)
print("\nTerminando a execução do programa!!!")
menu = False
#### MAIN ####
main() | pt | 0.894194 | # ===================================================================== # MENUS # ===================================================================== # ARQUIVO # lista # lista # receber descrição # receber duração # receber dias # receber horas # receber profs # valores # linha # profs # update dic # Escreve o cpf do aluno # quantidade de modalidades que o cpf pratica # Escrever cada modalidade # mudou de cpf # Receber cpf # Separar modalidades em lista # carregar na lista principal # ===================================================================== # FUNÇÕES ALUNOS # receber dados # lista de email # lista de tel # criar dic # gerar dic # receber dados # lista de tel # criar dic # gerar dic # ===================================================================== # FUNÇÕES MODALIDADE # receber dados # Receber dias # Receber horarios # receber professores # receber valor # gerar dic # Receber dias # Receber horarios # receber professores # receber valor # gerar dic # ===================================================================== # FUNÇÕES PRATICAS # printar cpf do aluno # Varrer as práticas do aluno # buscar o aluno pedido # Encotnrou o aluno # varrer modalidades do aluno # ver se a modalidade existe no sistema # aluno existe e modalidade existe # aluno esta cadastrado aqui em alguma coisa? # verificar se ele ja pratica a modalidade passada # aluno ja pratica essa modalidade # aluno não pratica ESSA modalidade # Verificar o horario e data desejado # receber dia # se voce chegou até aqui, bora cadastrar pois! # ele com ctz nao pratica NENHUMA MODALIDADE # receber dia # Se voce chegou até aqui, bora cadastra # Verificar se esse esta na lista de modalidades # aluno esta matriculado em alguma pratica # receber novo dia # Receber hora # alterar # verificar se ele está matriculado em alguma prática # aluno esta matriculado em alguma pratica # Verificar se ele pratica ess modalidade # verificar se essa é a unica pratica dele. # Ele só pratica isso, logo esxcluir tudo # remover somente a pratica pedida # verificar se ele esta matriculado em alguma prática # ===================================================================== # FUNÇÕES RELTÓRIOS # verificar se é modalidade valida # verificar se é maior que x # printar dados do aluno # ===================================================================== # MAIN # declarar dicionarios # Verificar existencia # submenu # submenu # submenu # submenu #### MAIN #### | 3.291108 | 3 |
stylize.py | mixuala/fast-neural-style-pytorch | 1 | 6618663 | import torch
import os
from torchvision import transforms
import time
import cv2
import fast_neural_style_pytorch.transformer as transformer
import fast_neural_style_pytorch.utils as utils
STYLE_TRANSFORM_PATH = "transforms/udnie_aggressive.pth"
PRESERVE_COLOR = False
def stylize():
# Device
device = ("cuda" if torch.cuda.is_available() else "cpu")
# Load Transformer Network
net = transformer.TransformerNetwork()
net.load_state_dict(torch.load(STYLE_TRANSFORM_PATH))
net = net.to(device)
with torch.no_grad():
while(1):
torch.cuda.empty_cache()
print("Stylize Image~ Press Ctrl+C and Enter to close the program")
content_image_path = input("Enter the image path: ")
content_image = utils.load_image(content_image_path)
starttime = time.time()
content_tensor = utils.itot(content_image).to(device)
generated_tensor = net(content_tensor)
generated_image = utils.ttoi(generated_tensor.detach())
if (PRESERVE_COLOR):
generated_image = utils.transfer_color(content_image, generated_image)
print("Transfer Time: {}".format(time.time() - starttime))
utils.show(generated_image)
utils.saveimg(generated_image, "helloworld.jpg")
def stylize_folder_single(style_path, content_folder, save_folder):
"""
Reads frames/pictures as follows:
content_folder
pic1.ext
pic2.ext
pic3.ext
...
and saves as the styled images in save_folder as follow:
save_folder
pic1.ext
pic2.ext
pic3.ext
...
"""
# Device
device = ("cuda" if torch.cuda.is_available() else "cpu")
# Load Transformer Network
net = transformer.TransformerNetwork()
net.load_state_dict(torch.load(style_path))
net = net.to(device)
# Stylize every frame
images = [img for img in os.listdir(content_folder) if img.endswith(".jpg")]
with torch.no_grad():
for image_name in images:
# Free-up unneeded cuda memory
torch.cuda.empty_cache()
# Load content image
content_image = utils.load_image(content_folder + image_name)
content_tensor = utils.itot(content_image).to(device)
# Generate image
generated_tensor = net(content_tensor)
generated_image = utils.ttoi(generated_tensor.detach())
if (PRESERVE_COLOR):
generated_image = utils.transfer_color(content_image, generated_image)
# Save image
utils.saveimg(generated_image, save_folder + image_name)
def stylize_folder(style_path, folder_containing_the_content_folder, save_folder, batch_size=1):
"""Stylizes images in a folder by batch
If the images are of different dimensions, use transform.resize() or use a batch size of 1
IMPORTANT: Put content_folder inside another folder folder_containing_the_content_folder
folder_containing_the_content_folder
content_folder
pic1.ext
pic2.ext
pic3.ext
...
and saves as the styled images in save_folder as follow:
save_folder
pic1.ext
pic2.ext
pic3.ext
...
"""
# Device
device = ("cuda" if torch.cuda.is_available() else "cpu")
# Image loader
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255))
])
image_dataset = utils.ImageFolderWithPaths(folder_containing_the_content_folder, transform=transform)
image_loader = torch.utils.data.DataLoader(image_dataset, batch_size=batch_size)
# Load Transformer Network
net = transformer.TransformerNetwork()
net.load_state_dict(torch.load(style_path))
net = net.to(device)
# Stylize batches of images
with torch.no_grad():
for content_batch, _, path in image_loader:
# Free-up unneeded cuda memory
torch.cuda.empty_cache()
# Generate image
generated_tensor = net(content_batch.to(device)).detach()
# Save images
for i in range(len(path)):
generated_image = utils.ttoi(generated_tensor[i])
if (PRESERVE_COLOR):
generated_image = utils.transfer_color(content_image, generated_image)
image_name = os.path.basename(path[i])
utils.saveimg(generated_image, save_folder + image_name)
#stylize() | import torch
import os
from torchvision import transforms
import time
import cv2
import fast_neural_style_pytorch.transformer as transformer
import fast_neural_style_pytorch.utils as utils
STYLE_TRANSFORM_PATH = "transforms/udnie_aggressive.pth"
PRESERVE_COLOR = False
def stylize():
# Device
device = ("cuda" if torch.cuda.is_available() else "cpu")
# Load Transformer Network
net = transformer.TransformerNetwork()
net.load_state_dict(torch.load(STYLE_TRANSFORM_PATH))
net = net.to(device)
with torch.no_grad():
while(1):
torch.cuda.empty_cache()
print("Stylize Image~ Press Ctrl+C and Enter to close the program")
content_image_path = input("Enter the image path: ")
content_image = utils.load_image(content_image_path)
starttime = time.time()
content_tensor = utils.itot(content_image).to(device)
generated_tensor = net(content_tensor)
generated_image = utils.ttoi(generated_tensor.detach())
if (PRESERVE_COLOR):
generated_image = utils.transfer_color(content_image, generated_image)
print("Transfer Time: {}".format(time.time() - starttime))
utils.show(generated_image)
utils.saveimg(generated_image, "helloworld.jpg")
def stylize_folder_single(style_path, content_folder, save_folder):
"""
Reads frames/pictures as follows:
content_folder
pic1.ext
pic2.ext
pic3.ext
...
and saves as the styled images in save_folder as follow:
save_folder
pic1.ext
pic2.ext
pic3.ext
...
"""
# Device
device = ("cuda" if torch.cuda.is_available() else "cpu")
# Load Transformer Network
net = transformer.TransformerNetwork()
net.load_state_dict(torch.load(style_path))
net = net.to(device)
# Stylize every frame
images = [img for img in os.listdir(content_folder) if img.endswith(".jpg")]
with torch.no_grad():
for image_name in images:
# Free-up unneeded cuda memory
torch.cuda.empty_cache()
# Load content image
content_image = utils.load_image(content_folder + image_name)
content_tensor = utils.itot(content_image).to(device)
# Generate image
generated_tensor = net(content_tensor)
generated_image = utils.ttoi(generated_tensor.detach())
if (PRESERVE_COLOR):
generated_image = utils.transfer_color(content_image, generated_image)
# Save image
utils.saveimg(generated_image, save_folder + image_name)
def stylize_folder(style_path, folder_containing_the_content_folder, save_folder, batch_size=1):
"""Stylizes images in a folder by batch
If the images are of different dimensions, use transform.resize() or use a batch size of 1
IMPORTANT: Put content_folder inside another folder folder_containing_the_content_folder
folder_containing_the_content_folder
content_folder
pic1.ext
pic2.ext
pic3.ext
...
and saves as the styled images in save_folder as follow:
save_folder
pic1.ext
pic2.ext
pic3.ext
...
"""
# Device
device = ("cuda" if torch.cuda.is_available() else "cpu")
# Image loader
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255))
])
image_dataset = utils.ImageFolderWithPaths(folder_containing_the_content_folder, transform=transform)
image_loader = torch.utils.data.DataLoader(image_dataset, batch_size=batch_size)
# Load Transformer Network
net = transformer.TransformerNetwork()
net.load_state_dict(torch.load(style_path))
net = net.to(device)
# Stylize batches of images
with torch.no_grad():
for content_batch, _, path in image_loader:
# Free-up unneeded cuda memory
torch.cuda.empty_cache()
# Generate image
generated_tensor = net(content_batch.to(device)).detach()
# Save images
for i in range(len(path)):
generated_image = utils.ttoi(generated_tensor[i])
if (PRESERVE_COLOR):
generated_image = utils.transfer_color(content_image, generated_image)
image_name = os.path.basename(path[i])
utils.saveimg(generated_image, save_folder + image_name)
#stylize() | en | 0.747071 | # Device # Load Transformer Network Reads frames/pictures as follows: content_folder pic1.ext pic2.ext pic3.ext ... and saves as the styled images in save_folder as follow: save_folder pic1.ext pic2.ext pic3.ext ... # Device # Load Transformer Network # Stylize every frame # Free-up unneeded cuda memory # Load content image # Generate image # Save image Stylizes images in a folder by batch If the images are of different dimensions, use transform.resize() or use a batch size of 1 IMPORTANT: Put content_folder inside another folder folder_containing_the_content_folder folder_containing_the_content_folder content_folder pic1.ext pic2.ext pic3.ext ... and saves as the styled images in save_folder as follow: save_folder pic1.ext pic2.ext pic3.ext ... # Device # Image loader # Load Transformer Network # Stylize batches of images # Free-up unneeded cuda memory # Generate image # Save images #stylize() | 2.469786 | 2 |
drl_implementation/agent/utils/exploration_strategy.py | IanYangChina/DRL_Implementation | 11 | 6618664 | <reponame>IanYangChina/DRL_Implementation<gh_stars>10-100
import math as M
import numpy as np
class ExpDecayGreedy(object):
# e-greedy exploration with exponential decay
def __init__(self, start=1, end=0.05, decay=50000, decay_start=None, rng=None):
self.start = start
self.end = end
self.decay = decay
self.decay_start = decay_start
if rng is None:
self.rng = np.random.default_rng(seed=0)
else:
self.rng = rng
def __call__(self, count):
if self.decay_start is not None:
count -= self.decay_start
if count < 0:
count = 0
epsilon = self.end + (self.start - self.end) * M.exp(-1. * count / self.decay)
prob = self.rng.uniform(0, 1)
if prob < epsilon:
return True
else:
return False
class LinearDecayGreedy(object):
# e-greedy exploration with linear decay
def __init__(self, start=1.0, end=0.1, decay=1000000, decay_start=None, rng=None):
self.start = start
self.end = end
self.decay = decay
self.decay_start = decay_start
if rng is None:
self.rng = np.random.default_rng(seed=0)
else:
self.rng = rng
def __call__(self, count):
if self.decay_start is not None:
count -= self.decay_start
if count < 0:
count = 0
if count > self.decay:
count = self.dacay
epsilon = self.start - count * (self.start - self.end) / self.decay
prob = self.rng.uniform(0, 1)
if prob < epsilon:
return True
else:
return False
class OUNoise(object):
# https://github.com/rll/rllab/blob/master/rllab/exploration_strategies/ou_strategy.py
def __init__(self, action_dim, action_max, mu=0, theta=0.2, sigma=1.0, rng=None):
if rng is None:
self.rng = np.random.default_rng(seed=0)
else:
self.rng = rng
self.action_dim = action_dim
self.action_max = action_max
self.mu = mu
self.theta = theta
self.sigma = sigma
self.state = np.ones(self.action_dim) * self.mu
self.reset()
def reset(self):
self.state = np.ones(self.action_dim) * self.mu
def __call__(self, action):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * self.rng.standard_normal(len(x))
self.state = x + dx
return np.clip(action + self.state, -self.action_max, self.action_max)
class GaussianNoise(object):
# the one used in the TD3 paper: http://proceedings.mlr.press/v80/fujimoto18a/fujimoto18a.pdf
def __init__(self, action_dim, action_max, scale=1, mu=0, sigma=0.1, rng=None):
if rng is None:
self.rng = np.random.default_rng(seed=0)
else:
self.rng = rng
self.scale = scale
self.action_dim = action_dim
self.action_max = action_max
self.mu = mu
self.sigma = sigma
def __call__(self, action):
noise = self.scale*self.rng.normal(loc=self.mu, scale=self.sigma, size=(self.action_dim,))
return np.clip(action + noise, -self.action_max, self.action_max)
class EGreedyGaussian(object):
# the one used in the HER paper: https://arxiv.org/abs/1707.01495
def __init__(self, action_dim, action_max, chance=0.2, scale=1, mu=0, sigma=0.1, rng=None):
self.chance = chance
self.scale = scale
self.action_dim = action_dim
self.action_max = action_max
self.mu = mu
self.sigma = sigma
if rng is None:
self.rng = np.random.default_rng(seed=0)
else:
self.rng = rng
def __call__(self, action):
chance = self.rng.uniform(0, 1)
if chance < self.chance:
return self.rng.uniform(-self.action_max, self.action_max, size=(self.action_dim,))
else:
noise = self.scale*self.rng.normal(loc=self.mu, scale=self.sigma, size=(self.action_dim,))
return np.clip(action + noise, -self.action_max, self.action_max)
class AutoAdjustingEGreedyGaussian(object):
"""
https://ieeexplore.ieee.org/document/9366328
This exploration class is a goal-success-rate-based auto-adjusting exploration strategy.
It modifies the original constant chance exploration strategy by reducing exploration probabilities and noise deviations
w.r.t. the testing success rate of each goal.
"""
def __init__(self, goal_num, action_dim, action_max, tau=0.05, chance=0.2, scale=1, mu=0, sigma=0.2, rng=None):
if rng is None:
self.rng = np.random.default_rng(seed=0)
else:
self.rng = rng
self.scale = scale
self.action_dim = action_dim
self.action_max = action_max
self.mu = mu
self.base_sigma = sigma
self.sigma = np.ones(self.goal_num) * sigma
self.base_chance = chance
self.goal_num = goal_num
self.tau = tau
self.success_rates = np.zeros(self.goal_num)
self.chance = np.ones(self.goal_num) * chance
def update_success_rates(self, new_tet_suc_rate):
old_tet_suc_rate = self.success_rates.copy()
self.success_rates = (1-self.tau)*old_tet_suc_rate + self.tau*new_tet_suc_rate
self.chance = self.base_chance*(1-self.success_rates)
self.sigma = self.base_sigma*(1-self.success_rates)
def __call__(self, goal_ind, action):
# return a random action or a noisy action
prob = self.rng.uniform(0, 1)
if prob < self.chance[goal_ind]:
return self.rng.uniform(-self.action_max, self.action_max, size=(self.action_dim,))
else:
noise = self.scale*self.rng.normal(loc=self.mu, scale=self.sigma[goal_ind], size=(self.action_dim,))
return action + noise
| import math as M
import numpy as np
class ExpDecayGreedy(object):
# e-greedy exploration with exponential decay
def __init__(self, start=1, end=0.05, decay=50000, decay_start=None, rng=None):
self.start = start
self.end = end
self.decay = decay
self.decay_start = decay_start
if rng is None:
self.rng = np.random.default_rng(seed=0)
else:
self.rng = rng
def __call__(self, count):
if self.decay_start is not None:
count -= self.decay_start
if count < 0:
count = 0
epsilon = self.end + (self.start - self.end) * M.exp(-1. * count / self.decay)
prob = self.rng.uniform(0, 1)
if prob < epsilon:
return True
else:
return False
class LinearDecayGreedy(object):
# e-greedy exploration with linear decay
def __init__(self, start=1.0, end=0.1, decay=1000000, decay_start=None, rng=None):
self.start = start
self.end = end
self.decay = decay
self.decay_start = decay_start
if rng is None:
self.rng = np.random.default_rng(seed=0)
else:
self.rng = rng
def __call__(self, count):
if self.decay_start is not None:
count -= self.decay_start
if count < 0:
count = 0
if count > self.decay:
count = self.dacay
epsilon = self.start - count * (self.start - self.end) / self.decay
prob = self.rng.uniform(0, 1)
if prob < epsilon:
return True
else:
return False
class OUNoise(object):
# https://github.com/rll/rllab/blob/master/rllab/exploration_strategies/ou_strategy.py
def __init__(self, action_dim, action_max, mu=0, theta=0.2, sigma=1.0, rng=None):
if rng is None:
self.rng = np.random.default_rng(seed=0)
else:
self.rng = rng
self.action_dim = action_dim
self.action_max = action_max
self.mu = mu
self.theta = theta
self.sigma = sigma
self.state = np.ones(self.action_dim) * self.mu
self.reset()
def reset(self):
self.state = np.ones(self.action_dim) * self.mu
def __call__(self, action):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * self.rng.standard_normal(len(x))
self.state = x + dx
return np.clip(action + self.state, -self.action_max, self.action_max)
class GaussianNoise(object):
# the one used in the TD3 paper: http://proceedings.mlr.press/v80/fujimoto18a/fujimoto18a.pdf
def __init__(self, action_dim, action_max, scale=1, mu=0, sigma=0.1, rng=None):
if rng is None:
self.rng = np.random.default_rng(seed=0)
else:
self.rng = rng
self.scale = scale
self.action_dim = action_dim
self.action_max = action_max
self.mu = mu
self.sigma = sigma
def __call__(self, action):
noise = self.scale*self.rng.normal(loc=self.mu, scale=self.sigma, size=(self.action_dim,))
return np.clip(action + noise, -self.action_max, self.action_max)
class EGreedyGaussian(object):
# the one used in the HER paper: https://arxiv.org/abs/1707.01495
def __init__(self, action_dim, action_max, chance=0.2, scale=1, mu=0, sigma=0.1, rng=None):
self.chance = chance
self.scale = scale
self.action_dim = action_dim
self.action_max = action_max
self.mu = mu
self.sigma = sigma
if rng is None:
self.rng = np.random.default_rng(seed=0)
else:
self.rng = rng
def __call__(self, action):
chance = self.rng.uniform(0, 1)
if chance < self.chance:
return self.rng.uniform(-self.action_max, self.action_max, size=(self.action_dim,))
else:
noise = self.scale*self.rng.normal(loc=self.mu, scale=self.sigma, size=(self.action_dim,))
return np.clip(action + noise, -self.action_max, self.action_max)
class AutoAdjustingEGreedyGaussian(object):
"""
https://ieeexplore.ieee.org/document/9366328
This exploration class is a goal-success-rate-based auto-adjusting exploration strategy.
It modifies the original constant chance exploration strategy by reducing exploration probabilities and noise deviations
w.r.t. the testing success rate of each goal.
"""
def __init__(self, goal_num, action_dim, action_max, tau=0.05, chance=0.2, scale=1, mu=0, sigma=0.2, rng=None):
if rng is None:
self.rng = np.random.default_rng(seed=0)
else:
self.rng = rng
self.scale = scale
self.action_dim = action_dim
self.action_max = action_max
self.mu = mu
self.base_sigma = sigma
self.sigma = np.ones(self.goal_num) * sigma
self.base_chance = chance
self.goal_num = goal_num
self.tau = tau
self.success_rates = np.zeros(self.goal_num)
self.chance = np.ones(self.goal_num) * chance
def update_success_rates(self, new_tet_suc_rate):
old_tet_suc_rate = self.success_rates.copy()
self.success_rates = (1-self.tau)*old_tet_suc_rate + self.tau*new_tet_suc_rate
self.chance = self.base_chance*(1-self.success_rates)
self.sigma = self.base_sigma*(1-self.success_rates)
def __call__(self, goal_ind, action):
# return a random action or a noisy action
prob = self.rng.uniform(0, 1)
if prob < self.chance[goal_ind]:
return self.rng.uniform(-self.action_max, self.action_max, size=(self.action_dim,))
else:
noise = self.scale*self.rng.normal(loc=self.mu, scale=self.sigma[goal_ind], size=(self.action_dim,))
return action + noise | en | 0.677347 | # e-greedy exploration with exponential decay # e-greedy exploration with linear decay # https://github.com/rll/rllab/blob/master/rllab/exploration_strategies/ou_strategy.py # the one used in the TD3 paper: http://proceedings.mlr.press/v80/fujimoto18a/fujimoto18a.pdf # the one used in the HER paper: https://arxiv.org/abs/1707.01495 https://ieeexplore.ieee.org/document/9366328 This exploration class is a goal-success-rate-based auto-adjusting exploration strategy. It modifies the original constant chance exploration strategy by reducing exploration probabilities and noise deviations w.r.t. the testing success rate of each goal. # return a random action or a noisy action | 2.635807 | 3 |
outd2aclog/gui.py | thedeltaflyer/OUTD_2_ACLOG | 0 | 6618665 | <reponame>thedeltaflyer/OUTD_2_ACLOG<gh_stars>0
# OUTD 2 ACLOG
# ------------
# By: N6ARA
# Converts OutD ADIF log files to ACLOG (N3FJP) ADIF parseable log files
from argparse import ArgumentParser
from tkinter import (Tk, PhotoImage, StringVar, Label, Button, Entry, OptionMenu, E, W)
from tkinter import filedialog
from .static import static_file
from .version import __version__
def browse_files(local):
filename = filedialog.askopenfilename(initialdir="/",
filetypes=[("ADIF files", "*.adif")],
title="Select a File",
)
local['filename'] = filename
local['label_file_explorer'].configure(text=".." + filename[-25:])
def open_file(local, input_filename):
fin = open(input_filename, 'rt')
try:
input_filename.index("OutdLog-")
except ValueError:
local['label_file_explorer'].configure(text=" Invalid File! ")
else:
output_filename = input_filename.replace("OutdLog-", "ACLOG-")
fout = open(output_filename, 'w+')
for line in fin:
if len(local['comment'].get()) == 0:
new_line = line.replace('QSPMSG', 'COMMENT')
new_line = new_line.replace('<EOR>',
'<OTHER:4>' + local['other'].get() + '<MY_GRIDSQUARE:4>' +
local['grid'].get() + '<EOR>')
else:
new_line = line.replace('<EOR>',
'<OTHER:4>' + local['other'].get() + '<MY_GRIDSQUARE:4>' + local['grid'].get() +
'<COMMENT:' + str(len(local['comment'].get())) + '>' + local['comment'].get() +
'<EOR>')
fout.write(new_line)
fout.close()
local['label_file_explorer'].configure(text=" Converted successfully! ")
fin.close()
def main():
parser = ArgumentParser(description='Converts OutD ADIF log files to ACLOG (N3FJP) ADIF parsable log files')
parser.add_argument('--version',
help='Print version and exit.',
action='version',
version=__version__)
_ = parser.parse_args() # This could have more args in the future
window = Tk()
window.title('OUTD 2 ACLOG')
window.resizable(0, 0)
icon = PhotoImage(file=static_file('outd2aclogicon.png'))
window.iconphoto(False, icon)
# local stores mutable data that is used by different parts of the GUI
local = {
'filename': "",
'grid': StringVar(),
'other': StringVar(),
'comment': StringVar(),
'label_file_explorer': Label(window, text="Select OutD ADIF file:"),
}
local['other'].set("SOTA")
button_explore = Button(window,
text="Browse Files",
command=lambda: browse_files(local))
grid_label = Label(window, text='My 4-Digit Gridsqure: ')
grid_entry = Entry(window, textvariable=local['grid'])
otherfield_label = Label(window, text='Type: ', )
otherfield_drop = OptionMenu(window, local['other'], "SOTA", "CHASE")
comment_label = Label(window, text='Comment: ')
comment_entry = Entry(window, textvariable=local['comment'])
button_convert = Button(window,
text="Convert",
command=lambda: open_file(local, local['filename']))
local['label_file_explorer'].grid(row=1, column=0, sticky=E)
button_explore.grid(row=1, column=1, sticky=W)
grid_label.grid(row=3, column=0, sticky=E)
grid_entry.grid(row=3, column=1, sticky=W)
otherfield_label.grid(row=4, column=0, sticky=E)
otherfield_drop.grid(row=4, column=1, sticky=W)
comment_label.grid(row=5, column=0, sticky=E)
comment_entry.grid(row=5, column=1, sticky=W)
button_convert.grid(row=6, column=1)
window.mainloop()
| # OUTD 2 ACLOG
# ------------
# By: N6ARA
# Converts OutD ADIF log files to ACLOG (N3FJP) ADIF parseable log files
from argparse import ArgumentParser
from tkinter import (Tk, PhotoImage, StringVar, Label, Button, Entry, OptionMenu, E, W)
from tkinter import filedialog
from .static import static_file
from .version import __version__
def browse_files(local):
filename = filedialog.askopenfilename(initialdir="/",
filetypes=[("ADIF files", "*.adif")],
title="Select a File",
)
local['filename'] = filename
local['label_file_explorer'].configure(text=".." + filename[-25:])
def open_file(local, input_filename):
fin = open(input_filename, 'rt')
try:
input_filename.index("OutdLog-")
except ValueError:
local['label_file_explorer'].configure(text=" Invalid File! ")
else:
output_filename = input_filename.replace("OutdLog-", "ACLOG-")
fout = open(output_filename, 'w+')
for line in fin:
if len(local['comment'].get()) == 0:
new_line = line.replace('QSPMSG', 'COMMENT')
new_line = new_line.replace('<EOR>',
'<OTHER:4>' + local['other'].get() + '<MY_GRIDSQUARE:4>' +
local['grid'].get() + '<EOR>')
else:
new_line = line.replace('<EOR>',
'<OTHER:4>' + local['other'].get() + '<MY_GRIDSQUARE:4>' + local['grid'].get() +
'<COMMENT:' + str(len(local['comment'].get())) + '>' + local['comment'].get() +
'<EOR>')
fout.write(new_line)
fout.close()
local['label_file_explorer'].configure(text=" Converted successfully! ")
fin.close()
def main():
parser = ArgumentParser(description='Converts OutD ADIF log files to ACLOG (N3FJP) ADIF parsable log files')
parser.add_argument('--version',
help='Print version and exit.',
action='version',
version=__version__)
_ = parser.parse_args() # This could have more args in the future
window = Tk()
window.title('OUTD 2 ACLOG')
window.resizable(0, 0)
icon = PhotoImage(file=static_file('outd2aclogicon.png'))
window.iconphoto(False, icon)
# local stores mutable data that is used by different parts of the GUI
local = {
'filename': "",
'grid': StringVar(),
'other': StringVar(),
'comment': StringVar(),
'label_file_explorer': Label(window, text="Select OutD ADIF file:"),
}
local['other'].set("SOTA")
button_explore = Button(window,
text="Browse Files",
command=lambda: browse_files(local))
grid_label = Label(window, text='My 4-Digit Gridsqure: ')
grid_entry = Entry(window, textvariable=local['grid'])
otherfield_label = Label(window, text='Type: ', )
otherfield_drop = OptionMenu(window, local['other'], "SOTA", "CHASE")
comment_label = Label(window, text='Comment: ')
comment_entry = Entry(window, textvariable=local['comment'])
button_convert = Button(window,
text="Convert",
command=lambda: open_file(local, local['filename']))
local['label_file_explorer'].grid(row=1, column=0, sticky=E)
button_explore.grid(row=1, column=1, sticky=W)
grid_label.grid(row=3, column=0, sticky=E)
grid_entry.grid(row=3, column=1, sticky=W)
otherfield_label.grid(row=4, column=0, sticky=E)
otherfield_drop.grid(row=4, column=1, sticky=W)
comment_label.grid(row=5, column=0, sticky=E)
comment_entry.grid(row=5, column=1, sticky=W)
button_convert.grid(row=6, column=1)
window.mainloop() | en | 0.831226 | # OUTD 2 ACLOG # ------------ # By: N6ARA # Converts OutD ADIF log files to ACLOG (N3FJP) ADIF parseable log files # This could have more args in the future # local stores mutable data that is used by different parts of the GUI | 2.742477 | 3 |
datasets/ucr_uWaveGes.py | alirezaghods/PIPNet | 0 | 6618666 | <reponame>alirezaghods/PIPNet
"""
UCR-UWaveGesture dataset
"""
import os
import numpy as np
import pandas as pd
from scipy.io import arff
import cv2
from tensorflow.keras.utils import to_categorical
def __get_pic(y, module_path):
if y == 0:
return cv2.imread(module_path+'/datasets/pics/UWaveGesture/1.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
elif y == 1:
return cv2.imread(module_path+'/datasets/pics/UWaveGesture/2.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
elif y == 2:
return cv2.imread(module_path+'/datasets/pics/UWaveGesture/3.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
elif y == 3:
return cv2.imread(module_path+'/datasets/pics/UWaveGesture/4.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
elif y == 4:
return cv2.imread(module_path+'/datasets/pics/UWaveGesture/5.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
elif y == 5:
return cv2.imread(module_path+'/datasets/pics/UWaveGesture/6.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
elif y == 6:
return cv2.imread(module_path+'/datasets/pics/UWaveGesture/7.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
elif y == 7:
return cv2.imread(module_path+'/datasets/pics/UWaveGesture/8.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
def __class_to_pic(Y, module_path):
pics = []
for y in Y:
pics.append(__get_pic(y, module_path))
return np.expand_dims(np.array(pics),3)
def load_data():
"""
Load and return the UCR-FordA dataset.
============== ==============
Training Samples total 120
Testing Samples total 320
Number of time steps 315
Dimensionality 3
Number of targets 8
============== ==============
# Returns
Tuple of Numpy arrays: (x_train, y_train, pic_train), (x_test, y_test, pic_test)
"""
module_path = os.getcwd()
print(module_path)
train_dim1 = pd.DataFrame(arff.loadarff(module_path + '/datasets/data/UWaveGestureLibrary/UWaveGestureLibraryDimension1_TRAIN.arff')[0])
train_dim2 = pd.DataFrame(arff.loadarff(module_path + '/datasets/data/UWaveGestureLibrary/UWaveGestureLibraryDimension2_TRAIN.arff')[0])
train_dim3 = pd.DataFrame(arff.loadarff(module_path + '/datasets/data/UWaveGestureLibrary/UWaveGestureLibraryDimension3_TRAIN.arff')[0])
test_dim1 = pd.DataFrame(arff.loadarff(module_path + '/datasets/data/UWaveGestureLibrary/UWaveGestureLibraryDimension1_TEST.arff')[0])
test_dim2 = pd.DataFrame(arff.loadarff(module_path + '/datasets/data/UWaveGestureLibrary/UWaveGestureLibraryDimension2_TEST.arff')[0])
test_dim3 = pd.DataFrame(arff.loadarff(module_path + '/datasets/data/UWaveGestureLibrary/UWaveGestureLibraryDimension3_TEST.arff')[0])
X_train = np.stack([train_dim1[train_dim1.columns[:315]].to_numpy(),train_dim2[train_dim2.columns[:315]].to_numpy(),train_dim3[train_dim3.columns[:315]].to_numpy()],axis=2)
X_test = np.stack([test_dim1[test_dim1.columns[:315]].to_numpy(),test_dim2[test_dim2.columns[:315]].to_numpy(),test_dim3[test_dim3.columns[:315]].to_numpy()],axis=2)
y_train = np.array([int(float(y))-1 for y in list(train_dim1.classAttribute)])
y_test = np.array([int(float(y))-1 for y in list(test_dim1.classAttribute)])
pic_train = __class_to_pic(y_train, module_path)
pic_test = __class_to_pic(y_test, module_path)
# y_train = to_categorical(y_train, num_classes=8)
# y_test = to_categorical(y_test, num_classes=8)
return (X_train, y_train, pic_train), (X_test, y_test, pic_test)
| """
UCR-UWaveGesture dataset
"""
import os
import numpy as np
import pandas as pd
from scipy.io import arff
import cv2
from tensorflow.keras.utils import to_categorical
def __get_pic(y, module_path):
if y == 0:
return cv2.imread(module_path+'/datasets/pics/UWaveGesture/1.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
elif y == 1:
return cv2.imread(module_path+'/datasets/pics/UWaveGesture/2.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
elif y == 2:
return cv2.imread(module_path+'/datasets/pics/UWaveGesture/3.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
elif y == 3:
return cv2.imread(module_path+'/datasets/pics/UWaveGesture/4.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
elif y == 4:
return cv2.imread(module_path+'/datasets/pics/UWaveGesture/5.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
elif y == 5:
return cv2.imread(module_path+'/datasets/pics/UWaveGesture/6.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
elif y == 6:
return cv2.imread(module_path+'/datasets/pics/UWaveGesture/7.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
elif y == 7:
return cv2.imread(module_path+'/datasets/pics/UWaveGesture/8.png', cv2.IMREAD_GRAYSCALE).astype('float32') / 255.
def __class_to_pic(Y, module_path):
pics = []
for y in Y:
pics.append(__get_pic(y, module_path))
return np.expand_dims(np.array(pics),3)
def load_data():
"""
Load and return the UCR-FordA dataset.
============== ==============
Training Samples total 120
Testing Samples total 320
Number of time steps 315
Dimensionality 3
Number of targets 8
============== ==============
# Returns
Tuple of Numpy arrays: (x_train, y_train, pic_train), (x_test, y_test, pic_test)
"""
module_path = os.getcwd()
print(module_path)
train_dim1 = pd.DataFrame(arff.loadarff(module_path + '/datasets/data/UWaveGestureLibrary/UWaveGestureLibraryDimension1_TRAIN.arff')[0])
train_dim2 = pd.DataFrame(arff.loadarff(module_path + '/datasets/data/UWaveGestureLibrary/UWaveGestureLibraryDimension2_TRAIN.arff')[0])
train_dim3 = pd.DataFrame(arff.loadarff(module_path + '/datasets/data/UWaveGestureLibrary/UWaveGestureLibraryDimension3_TRAIN.arff')[0])
test_dim1 = pd.DataFrame(arff.loadarff(module_path + '/datasets/data/UWaveGestureLibrary/UWaveGestureLibraryDimension1_TEST.arff')[0])
test_dim2 = pd.DataFrame(arff.loadarff(module_path + '/datasets/data/UWaveGestureLibrary/UWaveGestureLibraryDimension2_TEST.arff')[0])
test_dim3 = pd.DataFrame(arff.loadarff(module_path + '/datasets/data/UWaveGestureLibrary/UWaveGestureLibraryDimension3_TEST.arff')[0])
X_train = np.stack([train_dim1[train_dim1.columns[:315]].to_numpy(),train_dim2[train_dim2.columns[:315]].to_numpy(),train_dim3[train_dim3.columns[:315]].to_numpy()],axis=2)
X_test = np.stack([test_dim1[test_dim1.columns[:315]].to_numpy(),test_dim2[test_dim2.columns[:315]].to_numpy(),test_dim3[test_dim3.columns[:315]].to_numpy()],axis=2)
y_train = np.array([int(float(y))-1 for y in list(train_dim1.classAttribute)])
y_test = np.array([int(float(y))-1 for y in list(test_dim1.classAttribute)])
pic_train = __class_to_pic(y_train, module_path)
pic_test = __class_to_pic(y_test, module_path)
# y_train = to_categorical(y_train, num_classes=8)
# y_test = to_categorical(y_test, num_classes=8)
return (X_train, y_train, pic_train), (X_test, y_test, pic_test) | en | 0.572336 | UCR-UWaveGesture dataset Load and return the UCR-FordA dataset. ============== ============== Training Samples total 120 Testing Samples total 320 Number of time steps 315 Dimensionality 3 Number of targets 8 ============== ============== # Returns Tuple of Numpy arrays: (x_train, y_train, pic_train), (x_test, y_test, pic_test) # y_train = to_categorical(y_train, num_classes=8) # y_test = to_categorical(y_test, num_classes=8) | 2.803566 | 3 |
fpga/script/mmi_gen.py | scarv/xdivinsa | 2 | 6618667 | <reponame>scarv/xdivinsa
import sys
import re
# check arguments
if len(sys.argv) != 6:
print("Wrong arguments\nmmi_gen in out bus-width mem-size part")
exit()
fin = sys.argv[1]
fout = sys.argv[2]
bwidth= int(sys.argv[3])
msize = int(sys.argv[4])
dpart = sys.argv[5]
# read the ramb search result
f = open(fin, "r")
lines = f.readlines()
f.close()
rams = []
n=0
for i, line in enumerate(lines):
ram_match = re.match(r"ram_reg_(\d+)", line)
if ram_match:
loc_match = re.match(r"LOC[\w\s]+RAMB(\d+)_X(\d+)Y(\d+)", lines[i+2])
if loc_match:
rams.append((n, loc_match.group(2), loc_match.group(3)))
n = n + 1
# get the bit-width of each
if bwidth % len(rams) != 0:
print("Cannot divide memory bus evenly into BRAMs!")
exit()
DW = bwidth / len(rams)
MS = "%#010x"%(msize - 1)
BS = (msize*8)/bwidth
rams = sorted(rams, key=lambda r: r[0], reverse=True)
f = open(fout, "w")
f.write('<?xml version="1.0" encoding="UTF-8"?>\n')
f.write('<MemInfo Version="1" Minor="0">\n')
f.write('\t<Processor Endianness="Little" InstPath="dummy">\n')
f.write('\t\t<AddressSpace Name="ram" Begin="0" End="{0}">\n'.format(msize-1))
f.write('\t\t\t<BusBlock>\n')
#f.write('ADDRESS_SPACE BOOTRAM RAMB32 [0x00000000:{0}]\n'.format(MS))
#f.write(" BUS_BLOCK\n")
for r in rams:
#f.write(' ram_reg_{0} [{1}:{2}] LOC = X{3}Y{4};\n'.format(r[0], r[0]*DW+DW-1, r[0]*DW, r[1], r[2]))
f.write('\t\t\t\t<BitLane MemType="RAMB32" Placement="X{0}Y{1}">\n'.format(r[1], r[2]))
f.write('\t\t\t\t\t<DataWidth MSB="{0}" LSB="{1}"/>\n'.format(int(r[0]*DW+DW-1), int(r[0]*DW)))
f.write('\t\t\t\t\t<AddressRange Begin="0" End="{0}"/>\n'.format(int(BS-1)))
f.write('\t\t\t\t\t<Parity ON="false" NumBits="0"/>\n')
f.write('\t\t\t\t</BitLane>\n')
f.write('\t\t\t</BusBlock>\n')
f.write('\t\t</AddressSpace>\n')
f.write('\t</Processor>\n')
f.write('<Config>\n')
f.write('\t<Option Name="Part" Val="{0}"/>\n'.format(dpart))
f.write('</Config>\n')
f.write('</MemInfo>\n')
#f.write(" END_BUS_BLOCK;\n")
#f.write("END_ADDRESS_SPACE;\n")
f.close()
| import sys
import re
# check arguments
if len(sys.argv) != 6:
print("Wrong arguments\nmmi_gen in out bus-width mem-size part")
exit()
fin = sys.argv[1]
fout = sys.argv[2]
bwidth= int(sys.argv[3])
msize = int(sys.argv[4])
dpart = sys.argv[5]
# read the ramb search result
f = open(fin, "r")
lines = f.readlines()
f.close()
rams = []
n=0
for i, line in enumerate(lines):
ram_match = re.match(r"ram_reg_(\d+)", line)
if ram_match:
loc_match = re.match(r"LOC[\w\s]+RAMB(\d+)_X(\d+)Y(\d+)", lines[i+2])
if loc_match:
rams.append((n, loc_match.group(2), loc_match.group(3)))
n = n + 1
# get the bit-width of each
if bwidth % len(rams) != 0:
print("Cannot divide memory bus evenly into BRAMs!")
exit()
DW = bwidth / len(rams)
MS = "%#010x"%(msize - 1)
BS = (msize*8)/bwidth
rams = sorted(rams, key=lambda r: r[0], reverse=True)
f = open(fout, "w")
f.write('<?xml version="1.0" encoding="UTF-8"?>\n')
f.write('<MemInfo Version="1" Minor="0">\n')
f.write('\t<Processor Endianness="Little" InstPath="dummy">\n')
f.write('\t\t<AddressSpace Name="ram" Begin="0" End="{0}">\n'.format(msize-1))
f.write('\t\t\t<BusBlock>\n')
#f.write('ADDRESS_SPACE BOOTRAM RAMB32 [0x00000000:{0}]\n'.format(MS))
#f.write(" BUS_BLOCK\n")
for r in rams:
#f.write(' ram_reg_{0} [{1}:{2}] LOC = X{3}Y{4};\n'.format(r[0], r[0]*DW+DW-1, r[0]*DW, r[1], r[2]))
f.write('\t\t\t\t<BitLane MemType="RAMB32" Placement="X{0}Y{1}">\n'.format(r[1], r[2]))
f.write('\t\t\t\t\t<DataWidth MSB="{0}" LSB="{1}"/>\n'.format(int(r[0]*DW+DW-1), int(r[0]*DW)))
f.write('\t\t\t\t\t<AddressRange Begin="0" End="{0}"/>\n'.format(int(BS-1)))
f.write('\t\t\t\t\t<Parity ON="false" NumBits="0"/>\n')
f.write('\t\t\t\t</BitLane>\n')
f.write('\t\t\t</BusBlock>\n')
f.write('\t\t</AddressSpace>\n')
f.write('\t</Processor>\n')
f.write('<Config>\n')
f.write('\t<Option Name="Part" Val="{0}"/>\n'.format(dpart))
f.write('</Config>\n')
f.write('</MemInfo>\n')
#f.write(" END_BUS_BLOCK;\n")
#f.write("END_ADDRESS_SPACE;\n")
f.close() | en | 0.270052 | # check arguments # read the ramb search result # get the bit-width of each #010x"%(msize - 1) #f.write('ADDRESS_SPACE BOOTRAM RAMB32 [0x00000000:{0}]\n'.format(MS)) #f.write(" BUS_BLOCK\n") #f.write(' ram_reg_{0} [{1}:{2}] LOC = X{3}Y{4};\n'.format(r[0], r[0]*DW+DW-1, r[0]*DW, r[1], r[2])) #f.write(" END_BUS_BLOCK;\n") #f.write("END_ADDRESS_SPACE;\n") | 2.595028 | 3 |
src/scenes/ui_element.py | codingblocks/gotta-get-out | 0 | 6618668 | from typing import Callable
import pygame
from pygame.sprite import Sprite
from src.config import MAIN_FONT_FILE, UI_ICON_FONT_SIZE, UI_ICON_TEXT_BUFFER, UI_ICON_TEXT_HEIGHT_BUFFER
class UiElement(Sprite):
def __init__(self, image:pygame.Surface, pos: tuple, get_text: Callable[[], str]):
pygame.sprite.Sprite.__init__(self)
self.font = pygame.font.Font(MAIN_FONT_FILE, UI_ICON_FONT_SIZE)
self.image = image
self.rect = image.get_rect()
self.rect.x = pos[0]
self.rect.y = pos[1]
self.get_text = get_text
def render_text(self, surface):
img = self.font.render(self.get_text(), True, 'white')
surface.blit(img, (self.rect[0] + UI_ICON_TEXT_BUFFER, self.rect[1] + UI_ICON_TEXT_HEIGHT_BUFFER)) | from typing import Callable
import pygame
from pygame.sprite import Sprite
from src.config import MAIN_FONT_FILE, UI_ICON_FONT_SIZE, UI_ICON_TEXT_BUFFER, UI_ICON_TEXT_HEIGHT_BUFFER
class UiElement(Sprite):
def __init__(self, image:pygame.Surface, pos: tuple, get_text: Callable[[], str]):
pygame.sprite.Sprite.__init__(self)
self.font = pygame.font.Font(MAIN_FONT_FILE, UI_ICON_FONT_SIZE)
self.image = image
self.rect = image.get_rect()
self.rect.x = pos[0]
self.rect.y = pos[1]
self.get_text = get_text
def render_text(self, surface):
img = self.font.render(self.get_text(), True, 'white')
surface.blit(img, (self.rect[0] + UI_ICON_TEXT_BUFFER, self.rect[1] + UI_ICON_TEXT_HEIGHT_BUFFER)) | none | 1 | 2.866689 | 3 | |
backend/appengine/routes/courses/home.py | SamaraCardoso27/eMakeup | 0 | 6618669 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from google.appengine.ext import ndb
from student.student_model import Course
from config.template_middleware import TemplateResponse
from gaecookie.decorator import no_csrf
from routes.courses import edit
from routes.courses.new import salvar
from tekton.gae.middleware.redirect import RedirectResponse
from tekton.router import to_path
from gaepermission.decorator import login_not_required
@login_not_required
@no_csrf
def index():
query = Course.query_order_by_name()
edit_path_base = to_path(edit)
deletar_path_base = to_path(deletar)
courses = query.fetch()
for cat in courses:
key = cat.key
key_id = key.id()
cat.edit_path = to_path(edit_path_base, key_id)
cat.deletar_path = to_path(deletar_path_base, key_id)
ctx = {'salvar_path': to_path(salvar),
'courses': courses}
return TemplateResponse(ctx, 'courses/courses_home.html')
@login_not_required
@no_csrf
def deletar(course_id):
key = ndb.Key(Course, int(course_id))
key.delete()
return RedirectResponse(index) | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from google.appengine.ext import ndb
from student.student_model import Course
from config.template_middleware import TemplateResponse
from gaecookie.decorator import no_csrf
from routes.courses import edit
from routes.courses.new import salvar
from tekton.gae.middleware.redirect import RedirectResponse
from tekton.router import to_path
from gaepermission.decorator import login_not_required
@login_not_required
@no_csrf
def index():
query = Course.query_order_by_name()
edit_path_base = to_path(edit)
deletar_path_base = to_path(deletar)
courses = query.fetch()
for cat in courses:
key = cat.key
key_id = key.id()
cat.edit_path = to_path(edit_path_base, key_id)
cat.deletar_path = to_path(deletar_path_base, key_id)
ctx = {'salvar_path': to_path(salvar),
'courses': courses}
return TemplateResponse(ctx, 'courses/courses_home.html')
@login_not_required
@no_csrf
def deletar(course_id):
key = ndb.Key(Course, int(course_id))
key.delete()
return RedirectResponse(index) | en | 0.769321 | # -*- coding: utf-8 -*- | 1.906303 | 2 |
adv/summer_verica.py | LorentzB/dl | 0 | 6618670 | from core.advbase import *
class Summer_Verica(Adv):
def prerun(self):
self.s2.autocharge_init(self.s2_autocharge).on()
def s2_autocharge(self, t):
if self.s1.sp > self.s1.charged:
log('s2', 1578)
self.s2.charge(1578)
variants = {None: Summer_Verica}
| from core.advbase import *
class Summer_Verica(Adv):
def prerun(self):
self.s2.autocharge_init(self.s2_autocharge).on()
def s2_autocharge(self, t):
if self.s1.sp > self.s1.charged:
log('s2', 1578)
self.s2.charge(1578)
variants = {None: Summer_Verica}
| none | 1 | 2.199195 | 2 | |
leetcode/142.py | Cannizza-zzk/python_review | 0 | 6618671 | <filename>leetcode/142.py
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def detectCycle(self, head: ListNode) -> ListNode:
node_dict = {}
index = 0
while True:
if head == None:
return None
if node_dict.get(head) == None:
node_dict[head] = index
index += 1
head = head.next
else:
return head
| <filename>leetcode/142.py
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def detectCycle(self, head: ListNode) -> ListNode:
node_dict = {}
index = 0
while True:
if head == None:
return None
if node_dict.get(head) == None:
node_dict[head] = index
index += 1
head = head.next
else:
return head
| en | 0.60389 | # Definition for singly-linked list. # class ListNode: # def __init__(self, x): # self.val = x # self.next = None | 3.589548 | 4 |
blog/admin.py | Alan-CQU/MyBlog | 0 | 6618672 |
# Register your models here.
from django.contrib import admin
from .models import BlogType,Blog
@admin.register(Blog)
class BlogTypeAdmin(admin.ModelAdmin):
list_display = ("id","title","author","get_read_num","blog_type","create_time","last_update_time")
ordering = ("id",) # id positive order -id,negetive
@admin.register(BlogType)
class BlogTypeAdmin(admin.ModelAdmin):
list_display = ("id", "type_name")
ordering = ("id",) # id positive order -id,negetive
|
# Register your models here.
from django.contrib import admin
from .models import BlogType,Blog
@admin.register(Blog)
class BlogTypeAdmin(admin.ModelAdmin):
list_display = ("id","title","author","get_read_num","blog_type","create_time","last_update_time")
ordering = ("id",) # id positive order -id,negetive
@admin.register(BlogType)
class BlogTypeAdmin(admin.ModelAdmin):
list_display = ("id", "type_name")
ordering = ("id",) # id positive order -id,negetive
| en | 0.820753 | # Register your models here. # id positive order -id,negetive # id positive order -id,negetive | 2.089342 | 2 |
utils.py | platonic-realm/UM-Dissertation | 0 | 6618673 | <filename>utils.py
import io
import itertools
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
base_path = "C:\\Users\\Arash\\Desktop\\UM-Benchmark\\"
def plot_to_image(figure):
buf = io.BytesIO()
plt.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
def plot_confusion_matrix(cm, class_names, fig_size):
figure = plt.figure(figsize=(fig_size, fig_size))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title("Confusion matrix")
plt.colorbar()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45)
plt.yticks(tick_marks, class_names)
labels = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)
threshold = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
color = "white" if cm[i, j] > threshold else "black"
plt.text(j, i, labels[i, j], horizontalalignment="center", color=color)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return figure
def plot_filter_layer(model, ds_test):
layer_names = [layer.name for layer in model.layers]
layer_outputs = [layer.output for layer in model.layers]
feature_map_model = tf.keras.models.Model(model.input, layer_outputs[1])
image = None
i = 0
for x, y in ds_test:
if i == 0:
image = x[0]
i = i + 1
else:
pass
image_dim = image.shape[0]
image_size = image_dim / 3
plt.figure(figsize=(image_size, image_size))
plt.title("Original")
plt.grid(False)
plt.imshow(image, aspect='auto')
image = tf.reshape(image, (1, image_dim, image_dim, 3))
feature_maps = feature_map_model.predict(image)
max_rows = 16
max_cols = 3
fig, axes = plt.subplots(nrows=max_rows, ncols=max_cols, figsize=(10, 60))
for layer_name, feature_map in zip(layer_names, feature_maps):
if len(feature_map.shape) == 3:
k = feature_map.shape[-1]
for i in range(k):
feature_image = feature_map[:, :, i]
feature_image -= feature_image.mean()
feature_image /= feature_image.std()
feature_image *= 64
feature_image += 128
feature_image = np.clip(feature_image, 0, 255).astype('uint8')
row = i // max_cols
col = i % max_cols
axes[row, col].axis("off")
axes[row, col].imshow(feature_image, cmap="gray")
plt.subplots_adjust(wspace=0.98, hspace=0.1, left=0.1, bottom=0.0, top=1.0, right=0.9)
plt.show() | <filename>utils.py
import io
import itertools
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
base_path = "C:\\Users\\Arash\\Desktop\\UM-Benchmark\\"
def plot_to_image(figure):
buf = io.BytesIO()
plt.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
def plot_confusion_matrix(cm, class_names, fig_size):
figure = plt.figure(figsize=(fig_size, fig_size))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title("Confusion matrix")
plt.colorbar()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45)
plt.yticks(tick_marks, class_names)
labels = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)
threshold = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
color = "white" if cm[i, j] > threshold else "black"
plt.text(j, i, labels[i, j], horizontalalignment="center", color=color)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return figure
def plot_filter_layer(model, ds_test):
layer_names = [layer.name for layer in model.layers]
layer_outputs = [layer.output for layer in model.layers]
feature_map_model = tf.keras.models.Model(model.input, layer_outputs[1])
image = None
i = 0
for x, y in ds_test:
if i == 0:
image = x[0]
i = i + 1
else:
pass
image_dim = image.shape[0]
image_size = image_dim / 3
plt.figure(figsize=(image_size, image_size))
plt.title("Original")
plt.grid(False)
plt.imshow(image, aspect='auto')
image = tf.reshape(image, (1, image_dim, image_dim, 3))
feature_maps = feature_map_model.predict(image)
max_rows = 16
max_cols = 3
fig, axes = plt.subplots(nrows=max_rows, ncols=max_cols, figsize=(10, 60))
for layer_name, feature_map in zip(layer_names, feature_maps):
if len(feature_map.shape) == 3:
k = feature_map.shape[-1]
for i in range(k):
feature_image = feature_map[:, :, i]
feature_image -= feature_image.mean()
feature_image /= feature_image.std()
feature_image *= 64
feature_image += 128
feature_image = np.clip(feature_image, 0, 255).astype('uint8')
row = i // max_cols
col = i % max_cols
axes[row, col].axis("off")
axes[row, col].imshow(feature_image, cmap="gray")
plt.subplots_adjust(wspace=0.98, hspace=0.1, left=0.1, bottom=0.0, top=1.0, right=0.9)
plt.show() | en | 0.760624 | # Closing the figure prevents it from being displayed directly inside # the notebook. # Convert PNG buffer to TF image # Add the batch dimension | 2.739084 | 3 |
LeetCode/257_Binary_Tree_Paths/main.py | sungmen/Acmicpc_Solve | 1 | 6618674 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def __init__(self):
self.res = []
def dfs(self, root: TreeNode, write: str) -> None:
if root.left != None:
tmp = write + ('->' + str(root.left.val))
self.dfs(root.left, tmp)
if root.right != None:
tmp = write + ('->' + str(root.right.val))
self.dfs(root.right, tmp)
if root.left == None and root.right == None:
self.res.append(write)
def binaryTreePaths(self, root: TreeNode) -> List[str]:
w = str(root.val)
self.dfs(root, w)
return self.res | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def __init__(self):
self.res = []
def dfs(self, root: TreeNode, write: str) -> None:
if root.left != None:
tmp = write + ('->' + str(root.left.val))
self.dfs(root.left, tmp)
if root.right != None:
tmp = write + ('->' + str(root.right.val))
self.dfs(root.right, tmp)
if root.left == None and root.right == None:
self.res.append(write)
def binaryTreePaths(self, root: TreeNode) -> List[str]:
w = str(root.val)
self.dfs(root, w)
return self.res | en | 0.53741 | # Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right | 3.703278 | 4 |
realworldrl_suite/utils/wrappers.py | isabella232/realworldrl_suite | 284 | 6618675 | <gh_stars>100-1000
# coding=utf-8
# Copyright 2020 The Real-World RL Suite Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RealWorld RL env logging wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
from dm_control.rl import control
import dm_env
from dm_env import specs
from realworldrl_suite.utils import accumulators
import six
class LoggingEnv(control.Environment):
"""Subclass of control.Environment which adds logging."""
def __init__(self,
physics,
task,
logger=None,
log_safety_vars=False,
time_limit=float('inf'),
control_timestep=None,
n_sub_steps=None,
log_every=100,
flat_observation=False):
"""A subclass of `Environment` with logging hooks.
Args:
physics: Instance of `Physics`.
task: Instance of `Task`.
logger: Instance of 'realworldrl.utils.loggers.LoggerEnv', if specified
will be used to log necessary data for realworld eval.
log_safety_vars: If we should also log vars in self._task.safety_vars(),
generally used for debugging or to find pertinent values for vars, will
increase size of files on disk
time_limit: Optional `int`, maximum time for each episode in seconds. By
default this is set to infinite.
control_timestep: Optional control time-step, in seconds.
n_sub_steps: Optional number of physical time-steps in one control
time-step, aka "action repeats". Can only be supplied if
`control_timestep` is not specified.
log_every: How many episodes between each log write.
flat_observation: If True, observations will be flattened and concatenated
into a single numpy array.
Raises:
ValueError: If both `n_sub_steps` and `control_timestep` are supplied.
"""
super(LoggingEnv, self).__init__(
physics,
task,
time_limit,
control_timestep,
n_sub_steps,
flat_observation=False)
self._flat_observation_ = flat_observation
self._logger = logger
self._buffer = []
self._counter = 0
self._log_every = log_every
self._ep_counter = 0
self._log_safety_vars = self._task.safety_enabled and log_safety_vars
if self._logger:
meta_dict = dict(task_name=type(self._task).__name__)
if self._task.safety_enabled:
meta_dict['safety_constraints'] = list(self._task.constraints.keys())
if self._log_safety_vars:
meta_dict['safety_vars'] = list(
list(self._task.safety_vars(self._physics).keys()))
self._logger.set_meta(meta_dict)
self._stats_acc = accumulators.StatisticsAccumulator(
acc_safety=self._task.safety_enabled,
acc_safety_vars=self._log_safety_vars,
acc_multiobj=self._task.multiobj_enabled)
else:
self._stats_acc = None
def reset(self):
"""Starts a new episode and returns the first `TimeStep`."""
if self._stats_acc:
self._stats_acc.clear_buffer()
if self._task.perturb_enabled:
if self._counter % self._task.perturb_period == 0:
self._physics = self._task.update_physics()
self._counter += 1
timestep = super(LoggingEnv, self).reset()
self._track(timestep)
if self._flat_observation_:
timestep = dm_env.TimeStep(
step_type=timestep.step_type,
reward=None,
discount=None,
observation=control.flatten_observation(
timestep.observation)['observations'])
return timestep
def observation_spec(self):
"""Returns the observation specification for this environment.
Infers the spec from the observation, unless the Task implements the
`observation_spec` method.
Returns:
An dict mapping observation name to `ArraySpec` containing observation
shape and dtype.
"""
self._flat_observation = self._flat_observation_
obs_spec = super(LoggingEnv, self).observation_spec()
self._flat_observation = False
if self._flat_observation_:
return obs_spec['observations']
return obs_spec
def step(self, action):
"""Updates the environment using the action and returns a `TimeStep`."""
do_track = not self._reset_next_step
timestep = super(LoggingEnv, self).step(action)
if do_track:
self._track(timestep)
if timestep.last():
self._ep_counter += 1
if self._ep_counter % self._log_every == 0:
self.write_logs()
# Only flatten observation if we're not forwarding one from a reset(),
# as it will already be flattened.
if self._flat_observation_ and not timestep.first():
timestep = dm_env.TimeStep(
step_type=timestep.step_type,
reward=timestep.reward,
discount=timestep.discount,
observation=control.flatten_observation(
timestep.observation)['observations'])
return timestep
def _track(self, timestep):
if self._logger is None:
return
ts = copy.deepcopy(timestep)
# Augment the timestep with unobserved variables for logging purposes.
# Add safety-related observations.
if self._task.safety_enabled and 'constraints' not in ts.observation:
ts.observation['constraints'] = copy.copy(self._task.constraints_obs)
if self._log_safety_vars:
ts.observation['safety_vars'] = copy.deepcopy(
self._task.safety_vars(self._physics))
if self._task.multiobj_enabled and 'multiobj' not in ts.observation:
ts.observation['multiobj'] = self._task.get_multiobj_obs(self._physics)
self._stats_acc.push(ts)
def get_logs(self):
return self._logger.logs
def write_logs(self):
if self._logger is None:
return
self._logger.save(data=self._stats_acc.to_ndarray_dict())
@property
def stats_acc(self):
return self._stats_acc
@property
def logs_path(self):
if self._logger is None:
return None
return self._logger.logs_path
def _spec_from_observation(observation):
result = collections.OrderedDict()
for key, value in six.iteritems(observation):
result[key] = specs.Array(value.shape, value.dtype, name=key)
return result
| # coding=utf-8
# Copyright 2020 The Real-World RL Suite Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RealWorld RL env logging wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
from dm_control.rl import control
import dm_env
from dm_env import specs
from realworldrl_suite.utils import accumulators
import six
class LoggingEnv(control.Environment):
"""Subclass of control.Environment which adds logging."""
def __init__(self,
physics,
task,
logger=None,
log_safety_vars=False,
time_limit=float('inf'),
control_timestep=None,
n_sub_steps=None,
log_every=100,
flat_observation=False):
"""A subclass of `Environment` with logging hooks.
Args:
physics: Instance of `Physics`.
task: Instance of `Task`.
logger: Instance of 'realworldrl.utils.loggers.LoggerEnv', if specified
will be used to log necessary data for realworld eval.
log_safety_vars: If we should also log vars in self._task.safety_vars(),
generally used for debugging or to find pertinent values for vars, will
increase size of files on disk
time_limit: Optional `int`, maximum time for each episode in seconds. By
default this is set to infinite.
control_timestep: Optional control time-step, in seconds.
n_sub_steps: Optional number of physical time-steps in one control
time-step, aka "action repeats". Can only be supplied if
`control_timestep` is not specified.
log_every: How many episodes between each log write.
flat_observation: If True, observations will be flattened and concatenated
into a single numpy array.
Raises:
ValueError: If both `n_sub_steps` and `control_timestep` are supplied.
"""
super(LoggingEnv, self).__init__(
physics,
task,
time_limit,
control_timestep,
n_sub_steps,
flat_observation=False)
self._flat_observation_ = flat_observation
self._logger = logger
self._buffer = []
self._counter = 0
self._log_every = log_every
self._ep_counter = 0
self._log_safety_vars = self._task.safety_enabled and log_safety_vars
if self._logger:
meta_dict = dict(task_name=type(self._task).__name__)
if self._task.safety_enabled:
meta_dict['safety_constraints'] = list(self._task.constraints.keys())
if self._log_safety_vars:
meta_dict['safety_vars'] = list(
list(self._task.safety_vars(self._physics).keys()))
self._logger.set_meta(meta_dict)
self._stats_acc = accumulators.StatisticsAccumulator(
acc_safety=self._task.safety_enabled,
acc_safety_vars=self._log_safety_vars,
acc_multiobj=self._task.multiobj_enabled)
else:
self._stats_acc = None
def reset(self):
"""Starts a new episode and returns the first `TimeStep`."""
if self._stats_acc:
self._stats_acc.clear_buffer()
if self._task.perturb_enabled:
if self._counter % self._task.perturb_period == 0:
self._physics = self._task.update_physics()
self._counter += 1
timestep = super(LoggingEnv, self).reset()
self._track(timestep)
if self._flat_observation_:
timestep = dm_env.TimeStep(
step_type=timestep.step_type,
reward=None,
discount=None,
observation=control.flatten_observation(
timestep.observation)['observations'])
return timestep
def observation_spec(self):
"""Returns the observation specification for this environment.
Infers the spec from the observation, unless the Task implements the
`observation_spec` method.
Returns:
An dict mapping observation name to `ArraySpec` containing observation
shape and dtype.
"""
self._flat_observation = self._flat_observation_
obs_spec = super(LoggingEnv, self).observation_spec()
self._flat_observation = False
if self._flat_observation_:
return obs_spec['observations']
return obs_spec
def step(self, action):
"""Updates the environment using the action and returns a `TimeStep`."""
do_track = not self._reset_next_step
timestep = super(LoggingEnv, self).step(action)
if do_track:
self._track(timestep)
if timestep.last():
self._ep_counter += 1
if self._ep_counter % self._log_every == 0:
self.write_logs()
# Only flatten observation if we're not forwarding one from a reset(),
# as it will already be flattened.
if self._flat_observation_ and not timestep.first():
timestep = dm_env.TimeStep(
step_type=timestep.step_type,
reward=timestep.reward,
discount=timestep.discount,
observation=control.flatten_observation(
timestep.observation)['observations'])
return timestep
def _track(self, timestep):
if self._logger is None:
return
ts = copy.deepcopy(timestep)
# Augment the timestep with unobserved variables for logging purposes.
# Add safety-related observations.
if self._task.safety_enabled and 'constraints' not in ts.observation:
ts.observation['constraints'] = copy.copy(self._task.constraints_obs)
if self._log_safety_vars:
ts.observation['safety_vars'] = copy.deepcopy(
self._task.safety_vars(self._physics))
if self._task.multiobj_enabled and 'multiobj' not in ts.observation:
ts.observation['multiobj'] = self._task.get_multiobj_obs(self._physics)
self._stats_acc.push(ts)
def get_logs(self):
return self._logger.logs
def write_logs(self):
if self._logger is None:
return
self._logger.save(data=self._stats_acc.to_ndarray_dict())
@property
def stats_acc(self):
return self._stats_acc
@property
def logs_path(self):
if self._logger is None:
return None
return self._logger.logs_path
def _spec_from_observation(observation):
result = collections.OrderedDict()
for key, value in six.iteritems(observation):
result[key] = specs.Array(value.shape, value.dtype, name=key)
return result | en | 0.784855 | # coding=utf-8 # Copyright 2020 The Real-World RL Suite Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. RealWorld RL env logging wrappers. Subclass of control.Environment which adds logging. A subclass of `Environment` with logging hooks. Args: physics: Instance of `Physics`. task: Instance of `Task`. logger: Instance of 'realworldrl.utils.loggers.LoggerEnv', if specified will be used to log necessary data for realworld eval. log_safety_vars: If we should also log vars in self._task.safety_vars(), generally used for debugging or to find pertinent values for vars, will increase size of files on disk time_limit: Optional `int`, maximum time for each episode in seconds. By default this is set to infinite. control_timestep: Optional control time-step, in seconds. n_sub_steps: Optional number of physical time-steps in one control time-step, aka "action repeats". Can only be supplied if `control_timestep` is not specified. log_every: How many episodes between each log write. flat_observation: If True, observations will be flattened and concatenated into a single numpy array. Raises: ValueError: If both `n_sub_steps` and `control_timestep` are supplied. Starts a new episode and returns the first `TimeStep`. Returns the observation specification for this environment. Infers the spec from the observation, unless the Task implements the `observation_spec` method. Returns: An dict mapping observation name to `ArraySpec` containing observation shape and dtype. Updates the environment using the action and returns a `TimeStep`. # Only flatten observation if we're not forwarding one from a reset(), # as it will already be flattened. # Augment the timestep with unobserved variables for logging purposes. # Add safety-related observations. | 2.17187 | 2 |
Data Structures/dictionary.py | marinaoliveira96/python-exercises | 0 | 6618676 | point= {"x": 1, "y": 2}
point = dict(x=1, y=2)
# temo que buscar epla key
point["x"] = 10
point["z"]=12
print(point)
if "a" in point:
print(point["a"])
print(point.get("a", 0)
)
del point["x"]
print(point)
for key, value in point.items():
print(key, value) | point= {"x": 1, "y": 2}
point = dict(x=1, y=2)
# temo que buscar epla key
point["x"] = 10
point["z"]=12
print(point)
if "a" in point:
print(point["a"])
print(point.get("a", 0)
)
del point["x"]
print(point)
for key, value in point.items():
print(key, value) | es | 0.484564 | # temo que buscar epla key | 3.868127 | 4 |
dlapp/apps/hierarchy/forms.py | edv862/dlapp | 0 | 6618677 | <reponame>edv862/dlapp
from django import forms
from .models import SearchValues
class FileUploadHierarchyForm(forms.Form):
file = forms.FileField(
widget=forms.ClearableFileInput(
attrs={
'multiple': True,
}
)
)
class Meta:
verbose_name = 'File upload form'
class ConsultUsagePartIdHierarchyForm(forms.ModelForm):
class Meta:
model = SearchValues
fields = (
'usage', 'part',
)
| from django import forms
from .models import SearchValues
class FileUploadHierarchyForm(forms.Form):
file = forms.FileField(
widget=forms.ClearableFileInput(
attrs={
'multiple': True,
}
)
)
class Meta:
verbose_name = 'File upload form'
class ConsultUsagePartIdHierarchyForm(forms.ModelForm):
class Meta:
model = SearchValues
fields = (
'usage', 'part',
) | none | 1 | 1.862933 | 2 | |
dl_util/get_yaml_dict.py | llien30/dl-util | 0 | 6618678 | """
Copyright (c) 2021 <NAME>
This software is released under the MIT License, see LICENSE.
"""
import yaml
from addict import Dict
def get_yaml_dict(path: str) -> Dict:
with open(path, "r") as f:
config_dict = yaml.safe_load(f)
config = Dict(config_dict)
return config
| """
Copyright (c) 2021 <NAME>
This software is released under the MIT License, see LICENSE.
"""
import yaml
from addict import Dict
def get_yaml_dict(path: str) -> Dict:
with open(path, "r") as f:
config_dict = yaml.safe_load(f)
config = Dict(config_dict)
return config
| en | 0.89971 | Copyright (c) 2021 <NAME> This software is released under the MIT License, see LICENSE. | 2.38397 | 2 |
components.py | AidanOB/Thesis | 4 | 6618679 | __author__ = "<NAME>"
"""
A module which converts the csv files into pandas data frames, for easy retrieval.
"""
import pandas as pd
import numpy as np
from fuzzy_values import create_value_array
structures = pd.DataFrame.from_csv(path='./Component Files/structures.csv', sep=';', encoding='iso-8859-1')
components = pd.DataFrame.from_csv(path='./Component Files/components.csv', sep=',', encoding='iso-8859-1')
systems = pd.DataFrame.from_csv(path='./Component Files/systems.csv', sep=';', encoding='iso-8859-1')
panels = pd.DataFrame.from_csv(path='./Component Files/panels.csv', sep=',', encoding='iso-8859-1')
comp_hidden = pd.DataFrame.from_csv(path='./Component Files/components_hidden.csv', sep=';', encoding='iso-8859-1')
comp_pre_built = pd.concat([comp_hidden, components], ignore_index=True)
def create_system_metrics(system):
"""
This function takes a fully parsed system of raw data values and calculates the system's metric scores.
:param system: A numpy array representing the system
:return: A numpy array that contains metrics formulated for both the FAM algorithm and the genetic algorithm
"""
pass
def calculate_cpu_metric(data, code, ram):
"""
This function calculates the cpu's data and general capability based upon the memory and RAM available to it. It
doesn't consider the speed of the chip. It calculates based on the following equation:
metric = (data/max_data + code/max_code + ram/max_ram) / 3
This normalises each value against the maximum value in the database. Then all are weighted equally
Then the total is normalised into the range [0, 1]
:param data: The dedicated data storage for the system
:param code: The memory available to code or additional storage space
:param ram: The memory in ram, important for more complicated processes onboard the satellite
:return: A numerical value that contains the the calculated metric for the system
"""
# max_data = 15000 # Matching an ideal state
# max_code = 100 # Near enough to the maximum value to be an ideal state
# max_ram = 128 # Less than the maximum, but reaches an ideal state
#
# data_met = (data / max_data).clip(min=0, max=1)
# code_met = (code / max_code).clip(min=0, max=1)
# ram_met = (ram / max_ram).clip(min=0, max=1)
#
# return np.abs((data_met + code_met + ram_met) / 3).clip(min=0, max=1)
"""
The above code was the old CPU metric in an attempt to calculate performance. As it is no longer utilised, and is
simply a binary check for the presence of a flightboard.
Totals is used to find if there is a positive amount of memory, which is present on all flightboards.
It is simply the sum of any of the categories of memory.
If the value is greater than 0, then it returns 1, else returns 0
"""
totals = data + code + ram
if totals > 0:
return 1
else:
return 0
def calculate_br_down_metric(br_down):
"""
This function calculates the down bit rate with a maximum of 100,000 kbps.
:param br_down: A numerical value for the bit rate in kbps
:return: A normalised value in the range [0, 1]
"""
if br_down < 1:
br_down = 1
min_baud = 1200
max_baud = 38400
num = np.log(br_down) - np.log(min_baud)
den = np.log(max_baud) - np.log(min_baud)
return (num / den + 0.1).clip(min=0, max=1)
def calculate_br_up_metric(br_up):
"""
This function calculates the up bit rate metric. Normalised around logarithmic values. Where the 'average' speed is
considered 4800bps.
The formula for this is based upon the common values for transmitters/receivers. Values are often doubled from 1200
baud. It scales to within [0, 1] once clipping is taken into account. It returns values close to the given fuzzy
values
:param br_up: A numerical value for the bit rate in bps
:return: A normalised value in the range [0, 1]
"""
if br_up < 1:
br_up = 1
min_baud = 1200
max_baud = 38400
num = np.log(br_up) - np.log(min_baud)
den = np.log(max_baud) - np.log(min_baud)
return (num / den + 0.1).clip(min=0, max=1)
def calculate_wavelength_metric(wavelength_min, wavelength_max):
"""
This function uses the given wavelength to determine where on the electromagnetic or ionised to EM converted
wavelength the system is capable of detecting.
The wavelength is generated from the median point between the minimum and maximum able to be detected. A logarithmic
value for the wavelength is then taken, prior to normalising into the range of 0 and 1.
:param wavelength_min: Minimum wavelength detectable by the system
:param wavelength_max: Maximum wavelength detectable by the system
:return: normalised value of the
"""
length_max = np.log(550) * 2
wavelength = np.abs(wavelength_max + wavelength_min) / 2
log_wl = np.log(wavelength)
default_met = np.array(log_wl / length_max)
scaled_met = 1.75 * (default_met - 0.5) + 0.5
if wavelength == 0:
return 0
else:
return scaled_met.clip(min=10e-11, max=1)
def calculate_attitude_metric(moment, mass, knowledge, axis):
"""
This function calculates the moment compared to the total mass of the satellite. This is worth 40% of the metric,
with the attitude determination being utilised for another 40% of the metric. The number of axis that can be
controlled accounts for the final 20% of the metric
:param moment:
:param mass:
:param knowledge:
:param axis:
:return:
"""
# print(moment, mass)
moment_met = np.asarray(moment / mass).clip(min=0, max=1)
# print(moment_met)
know_met = np.asarray(10 - knowledge).clip(min=0, max=10) / 10
# print(know_met)
axis_met = np.asarray(axis / 3).clip(min=0, max=1)
# print(axis_met)
# print('Att Met:' + str(((2 * moment_met + 2 * know_met + axis_met) / 5).clip(min=0, max=1)))
return ((2 * moment_met + 2 * know_met + axis_met) / 5).clip(min=0, max=1)
def create_system(sys_structure):
"""
This function creates a system from the dict structure given. It combines the components listed and then returns
the product specifications.
:param sys_structure: A dict that lists all the components that are a part of the system
:return: A dictionary the lists the product specifications as generated
"""
pass
def parse_system(system, comps):
"""
This function parses the data frame row for an individual system, converting it into a dictionary for creating the
product specifications
:param system: A row from a Pandas Data Frame.
:param comps: Defines which components database to search
:return: Two arrays, the first are the customer requirements features returned as fuzzy logic values, the second is
the product specification values. These will not be normalised.
"""
# Load the structure from the data frame given
# print(system)
struct = structures.loc[structures['Name'].isin([system['Structure']])].reset_index()
internal_slots = struct['Internal Slots'].values
external_slots = struct['External Slots'].values
internal_vol = struct.X[0] * struct.Y[0] * struct.Z[0]
total_vol = 0
# Extract the fuzzy values for the system and place them into a numpy array of features for FAM algorithm
cust_reqs = create_value_array(system['Size'], system['Size Imp'], system['Mass Imp'], system['Down Sp'],
system['Up Sp'], system['Alt Req'], system['Att Ctrl'], system['Remote'],
system['RS Wave'], system['RS Accuracy'])
comp_totals = system.to_dict()
comp_list = list()
ext_list = list()
for heading in comp_totals:
if "Comp" in heading:
comp_list.append(heading)
elif "Ext" in heading:
ext_list.append(heading)
parts_sum_matrix = np.zeros((0, 0))
parts_max_matrix = np.zeros((0, 0))
metric_matrix = np.zeros((0, 0))
metric_min_matrix = np.zeros((0, 0))
metric_max_matrix = np.zeros((0, 0))
# This is horrible, work on making it better once it's proved to get the right outputs
for part in comp_list:
idx = comps['Name'] == system[part]
metrics_sums, metrics_mins, metrics_max, sum_vals, max_vals = parse_component(comps.loc[idx])
if parts_sum_matrix.shape == (0, 0):
parts_sum_matrix = sum_vals
else:
parts_sum_matrix = np.concatenate((parts_sum_matrix, sum_vals), 1)
if parts_max_matrix.shape == (0, 0):
parts_max_matrix = max_vals
else:
parts_max_matrix = np.concatenate((parts_max_matrix, max_vals), 1)
if metric_matrix.shape == (0, 0):
metric_matrix = metrics_sums
else:
metric_matrix = np.concatenate((metric_matrix, metrics_sums), 1)
if metric_min_matrix.shape == (0, 0):
metric_min_matrix = metrics_mins
else:
metric_min_matrix = np.concatenate((metric_min_matrix, metrics_mins), 1)
if metric_max_matrix.shape == (0, 0):
metric_max_matrix = metrics_max
else:
metric_max_matrix = np.concatenate((metric_max_matrix, metrics_max), 1)
for part in ext_list:
idx = comps['Name'] == system[part]
metrics_sums, metrics_mins, metrics_max, sum_vals, max_vals = parse_component(comps.loc[idx])
# print(part)
cube_size = 1
if part == "Ext Sides":
metrics_sums *= (4 * cube_size) # * metrics_sums
if parts_sum_matrix.shape == (0, 0):
parts_sum_matrix = sum_vals
else:
parts_sum_matrix = np.concatenate((parts_sum_matrix, sum_vals), 1)
if parts_max_matrix.shape == (0, 0):
parts_max_matrix = max_vals
else:
parts_max_matrix = np.concatenate((parts_max_matrix, max_vals), 1)
if metric_matrix.shape == (0, 0):
metric_matrix = metrics_sums
else:
metric_matrix = np.concatenate((metric_matrix, metrics_sums), 1)
if metric_min_matrix.shape == (0, 0):
metric_min_matrix = metrics_mins
else:
metric_min_matrix = np.concatenate((metric_min_matrix, metrics_mins), 1)
if metric_max_matrix.shape == (0, 0):
metric_max_matrix = metrics_max
else:
metric_max_matrix = np.concatenate((metric_max_matrix, metrics_max), 1)
parts_sum_matrix = parts_sum_matrix.astype(np.float)
parts_max_matrix = parts_max_matrix.astype(np.float)
min_parts = parts_max_matrix
min_parts[min_parts == 0] = None
# print(min_parts)
min_parts = min_parts[~np.isnan(min_parts)]
# print(min_parts)
# print(parts_max_matrix)
# print(metric_matrix)
# print(metric_matrix.sum(axis=1))
metric_min_matrix[metric_min_matrix == 0] = None
metric_min_matrix = metric_min_matrix[~np.isnan(metric_min_matrix)]
# print(metric_min_matrix.min())
# print(metric_max_matrix.max(axis=1))
# print(parts_sum_matrix.sum(axis=1))
# print(parts_max_matrix.shape)
# Todo calculate all components in the system and provide system outputs that can be converted into metrics
metrics = np.concatenate((metric_matrix.sum(axis=1), np.array([metric_min_matrix.min()]),
metric_max_matrix.max(axis=1)), 0)
# cpu_met = calculate_cpu_metric(metrics[4], metrics[5], metrics[6])
att_met = calculate_attitude_metric(metrics[8], metrics[0], metric_min_matrix[0], metrics[10])
down_met = calculate_br_down_metric(metrics[2])
up_met = calculate_br_up_metric(metrics[3])
wl_met = calculate_wavelength_metric(metrics[16], metrics[17])
# print(metrics[4], metrics[5], metrics[6])
# print(metrics[8], metrics[0], metric_min_matrix[0], metrics[10])
return np.array([[att_met], [down_met], [up_met], [wl_met]]), cust_reqs
def parse_component(component):
"""
This function calculates the various dimensions and metrics that can be utilised for the features, then returns them
for summation in the parse system function
:param component: The single line dataframe or series to be parsed
:return:
"""
# Volume of the component in m^3
volume = (component.X * component.Y * component.Z).values[0]
# How many slots the component takes up
internal_slots = component['Internal Slots'].values[0]
if not component['External Slots'].values == 0:
external = True
external_slots = component['External Slots'].values[0]
else:
external = False
external_slots = 0
min_temp = component['Min Temp'].values[0]
max_temp = component['Max Temp'].values[0]
mass = component['Mass'].values[0]
max_voltage = component['Voltage'].values[0]
nom_power = component['Nom Power'].values[0]
max_power = component['Power (W)'].values[0] - nom_power # This returns the difference when activated
discharge_time = component['Discharge Time (Wh)'].values[0]
pixel_resolution = component['Resolution (m)'].values[0]
wavelength_resolution = component['Resolution(nm)'].values[0]
min_wavelength = component['Min Wavelength (nm)'].values[0]
max_wavelength = component['Max Wavelength (nm)'].values[0]
field_of_view = component['Field of View (deg)'].values[0]
rx_min = component['Receiver Min (MHz)'].values[0]
rx_max = component['Receiver Max'].values[0]
tx_min = component['Transmitter Min'].values[0]
tx_max = component['Transmitter Max'].values[0]
duplex = component['Duplex'].values[0] + 1
br_down = component['Bit Rate Down'].values[0]
br_up = component['Bit Rate Up'].values[0]
data = component['Data Storage (MB)'].values[0]
code = component['Code Storage (MB)'].values[0]
ram = component['RAM'].values[0]
att_know = component['Attitude Know (deg)'].values[0]
att_view = component['Attitude View'].values[0]
att_mom = component['Attitude Control moment'].values[0]
max_prop = component['Max Propulsion (mN)'].values[0]
att_type = component['Attitude Type'].values[0]
axis = component['Axis control'].values[0]
ctrl_area = component['Control Area (m^2)'].values[0]
disposal = component['Disposal time(km/day)'].values[0]
int_comms = component['Internal Comms'].values[0]
comm_conn = component['IntCommConn'].values[0]
price = component['Price ($US)'].values[0]
metric_sums = np.array([[mass, duplex, br_down, br_up, data, code, ram, att_view, att_mom, max_prop, axis,
ctrl_area, disposal, price, pixel_resolution, wavelength_resolution, min_wavelength,
max_wavelength]]).T.astype(np.float)
metric_mins = np.array([[att_know]]).T.astype(np.float)
metric_maxs = np.array([[]]).T.astype(np.float)
summation_values = np.array([[volume, mass, internal_slots, external_slots, nom_power, discharge_time, duplex,
br_down, br_up, data, code, ram, att_know, att_view, att_mom, max_prop, att_type,
axis, ctrl_area, disposal, price]]).T
min_max_values = np.array([[max_voltage, max_power, pixel_resolution, wavelength_resolution, min_temp, max_temp,
min_wavelength, max_wavelength, field_of_view, rx_min, rx_max, tx_min, tx_max]]).T
#Todo, figure out a way to deal with the comms issue. possibly a later problem
# print(summation_values)
# Todo create matrix from arrays then sum each feature on the correct axis
# Todo This will create the correct feature set
# Other features will be made from summation of available slots/connects vs used
return metric_sums, metric_mins, metric_maxs, summation_values, min_max_values | __author__ = "<NAME>"
"""
A module which converts the csv files into pandas data frames, for easy retrieval.
"""
import pandas as pd
import numpy as np
from fuzzy_values import create_value_array
structures = pd.DataFrame.from_csv(path='./Component Files/structures.csv', sep=';', encoding='iso-8859-1')
components = pd.DataFrame.from_csv(path='./Component Files/components.csv', sep=',', encoding='iso-8859-1')
systems = pd.DataFrame.from_csv(path='./Component Files/systems.csv', sep=';', encoding='iso-8859-1')
panels = pd.DataFrame.from_csv(path='./Component Files/panels.csv', sep=',', encoding='iso-8859-1')
comp_hidden = pd.DataFrame.from_csv(path='./Component Files/components_hidden.csv', sep=';', encoding='iso-8859-1')
comp_pre_built = pd.concat([comp_hidden, components], ignore_index=True)
def create_system_metrics(system):
"""
This function takes a fully parsed system of raw data values and calculates the system's metric scores.
:param system: A numpy array representing the system
:return: A numpy array that contains metrics formulated for both the FAM algorithm and the genetic algorithm
"""
pass
def calculate_cpu_metric(data, code, ram):
"""
This function calculates the cpu's data and general capability based upon the memory and RAM available to it. It
doesn't consider the speed of the chip. It calculates based on the following equation:
metric = (data/max_data + code/max_code + ram/max_ram) / 3
This normalises each value against the maximum value in the database. Then all are weighted equally
Then the total is normalised into the range [0, 1]
:param data: The dedicated data storage for the system
:param code: The memory available to code or additional storage space
:param ram: The memory in ram, important for more complicated processes onboard the satellite
:return: A numerical value that contains the the calculated metric for the system
"""
# max_data = 15000 # Matching an ideal state
# max_code = 100 # Near enough to the maximum value to be an ideal state
# max_ram = 128 # Less than the maximum, but reaches an ideal state
#
# data_met = (data / max_data).clip(min=0, max=1)
# code_met = (code / max_code).clip(min=0, max=1)
# ram_met = (ram / max_ram).clip(min=0, max=1)
#
# return np.abs((data_met + code_met + ram_met) / 3).clip(min=0, max=1)
"""
The above code was the old CPU metric in an attempt to calculate performance. As it is no longer utilised, and is
simply a binary check for the presence of a flightboard.
Totals is used to find if there is a positive amount of memory, which is present on all flightboards.
It is simply the sum of any of the categories of memory.
If the value is greater than 0, then it returns 1, else returns 0
"""
totals = data + code + ram
if totals > 0:
return 1
else:
return 0
def calculate_br_down_metric(br_down):
"""
This function calculates the down bit rate with a maximum of 100,000 kbps.
:param br_down: A numerical value for the bit rate in kbps
:return: A normalised value in the range [0, 1]
"""
if br_down < 1:
br_down = 1
min_baud = 1200
max_baud = 38400
num = np.log(br_down) - np.log(min_baud)
den = np.log(max_baud) - np.log(min_baud)
return (num / den + 0.1).clip(min=0, max=1)
def calculate_br_up_metric(br_up):
"""
This function calculates the up bit rate metric. Normalised around logarithmic values. Where the 'average' speed is
considered 4800bps.
The formula for this is based upon the common values for transmitters/receivers. Values are often doubled from 1200
baud. It scales to within [0, 1] once clipping is taken into account. It returns values close to the given fuzzy
values
:param br_up: A numerical value for the bit rate in bps
:return: A normalised value in the range [0, 1]
"""
if br_up < 1:
br_up = 1
min_baud = 1200
max_baud = 38400
num = np.log(br_up) - np.log(min_baud)
den = np.log(max_baud) - np.log(min_baud)
return (num / den + 0.1).clip(min=0, max=1)
def calculate_wavelength_metric(wavelength_min, wavelength_max):
"""
This function uses the given wavelength to determine where on the electromagnetic or ionised to EM converted
wavelength the system is capable of detecting.
The wavelength is generated from the median point between the minimum and maximum able to be detected. A logarithmic
value for the wavelength is then taken, prior to normalising into the range of 0 and 1.
:param wavelength_min: Minimum wavelength detectable by the system
:param wavelength_max: Maximum wavelength detectable by the system
:return: normalised value of the
"""
length_max = np.log(550) * 2
wavelength = np.abs(wavelength_max + wavelength_min) / 2
log_wl = np.log(wavelength)
default_met = np.array(log_wl / length_max)
scaled_met = 1.75 * (default_met - 0.5) + 0.5
if wavelength == 0:
return 0
else:
return scaled_met.clip(min=10e-11, max=1)
def calculate_attitude_metric(moment, mass, knowledge, axis):
"""
This function calculates the moment compared to the total mass of the satellite. This is worth 40% of the metric,
with the attitude determination being utilised for another 40% of the metric. The number of axis that can be
controlled accounts for the final 20% of the metric
:param moment:
:param mass:
:param knowledge:
:param axis:
:return:
"""
# print(moment, mass)
moment_met = np.asarray(moment / mass).clip(min=0, max=1)
# print(moment_met)
know_met = np.asarray(10 - knowledge).clip(min=0, max=10) / 10
# print(know_met)
axis_met = np.asarray(axis / 3).clip(min=0, max=1)
# print(axis_met)
# print('Att Met:' + str(((2 * moment_met + 2 * know_met + axis_met) / 5).clip(min=0, max=1)))
return ((2 * moment_met + 2 * know_met + axis_met) / 5).clip(min=0, max=1)
def create_system(sys_structure):
"""
This function creates a system from the dict structure given. It combines the components listed and then returns
the product specifications.
:param sys_structure: A dict that lists all the components that are a part of the system
:return: A dictionary the lists the product specifications as generated
"""
pass
def parse_system(system, comps):
"""
This function parses the data frame row for an individual system, converting it into a dictionary for creating the
product specifications
:param system: A row from a Pandas Data Frame.
:param comps: Defines which components database to search
:return: Two arrays, the first are the customer requirements features returned as fuzzy logic values, the second is
the product specification values. These will not be normalised.
"""
# Load the structure from the data frame given
# print(system)
struct = structures.loc[structures['Name'].isin([system['Structure']])].reset_index()
internal_slots = struct['Internal Slots'].values
external_slots = struct['External Slots'].values
internal_vol = struct.X[0] * struct.Y[0] * struct.Z[0]
total_vol = 0
# Extract the fuzzy values for the system and place them into a numpy array of features for FAM algorithm
cust_reqs = create_value_array(system['Size'], system['Size Imp'], system['Mass Imp'], system['Down Sp'],
system['Up Sp'], system['Alt Req'], system['Att Ctrl'], system['Remote'],
system['RS Wave'], system['RS Accuracy'])
comp_totals = system.to_dict()
comp_list = list()
ext_list = list()
for heading in comp_totals:
if "Comp" in heading:
comp_list.append(heading)
elif "Ext" in heading:
ext_list.append(heading)
parts_sum_matrix = np.zeros((0, 0))
parts_max_matrix = np.zeros((0, 0))
metric_matrix = np.zeros((0, 0))
metric_min_matrix = np.zeros((0, 0))
metric_max_matrix = np.zeros((0, 0))
# This is horrible, work on making it better once it's proved to get the right outputs
for part in comp_list:
idx = comps['Name'] == system[part]
metrics_sums, metrics_mins, metrics_max, sum_vals, max_vals = parse_component(comps.loc[idx])
if parts_sum_matrix.shape == (0, 0):
parts_sum_matrix = sum_vals
else:
parts_sum_matrix = np.concatenate((parts_sum_matrix, sum_vals), 1)
if parts_max_matrix.shape == (0, 0):
parts_max_matrix = max_vals
else:
parts_max_matrix = np.concatenate((parts_max_matrix, max_vals), 1)
if metric_matrix.shape == (0, 0):
metric_matrix = metrics_sums
else:
metric_matrix = np.concatenate((metric_matrix, metrics_sums), 1)
if metric_min_matrix.shape == (0, 0):
metric_min_matrix = metrics_mins
else:
metric_min_matrix = np.concatenate((metric_min_matrix, metrics_mins), 1)
if metric_max_matrix.shape == (0, 0):
metric_max_matrix = metrics_max
else:
metric_max_matrix = np.concatenate((metric_max_matrix, metrics_max), 1)
for part in ext_list:
idx = comps['Name'] == system[part]
metrics_sums, metrics_mins, metrics_max, sum_vals, max_vals = parse_component(comps.loc[idx])
# print(part)
cube_size = 1
if part == "Ext Sides":
metrics_sums *= (4 * cube_size) # * metrics_sums
if parts_sum_matrix.shape == (0, 0):
parts_sum_matrix = sum_vals
else:
parts_sum_matrix = np.concatenate((parts_sum_matrix, sum_vals), 1)
if parts_max_matrix.shape == (0, 0):
parts_max_matrix = max_vals
else:
parts_max_matrix = np.concatenate((parts_max_matrix, max_vals), 1)
if metric_matrix.shape == (0, 0):
metric_matrix = metrics_sums
else:
metric_matrix = np.concatenate((metric_matrix, metrics_sums), 1)
if metric_min_matrix.shape == (0, 0):
metric_min_matrix = metrics_mins
else:
metric_min_matrix = np.concatenate((metric_min_matrix, metrics_mins), 1)
if metric_max_matrix.shape == (0, 0):
metric_max_matrix = metrics_max
else:
metric_max_matrix = np.concatenate((metric_max_matrix, metrics_max), 1)
parts_sum_matrix = parts_sum_matrix.astype(np.float)
parts_max_matrix = parts_max_matrix.astype(np.float)
min_parts = parts_max_matrix
min_parts[min_parts == 0] = None
# print(min_parts)
min_parts = min_parts[~np.isnan(min_parts)]
# print(min_parts)
# print(parts_max_matrix)
# print(metric_matrix)
# print(metric_matrix.sum(axis=1))
metric_min_matrix[metric_min_matrix == 0] = None
metric_min_matrix = metric_min_matrix[~np.isnan(metric_min_matrix)]
# print(metric_min_matrix.min())
# print(metric_max_matrix.max(axis=1))
# print(parts_sum_matrix.sum(axis=1))
# print(parts_max_matrix.shape)
# Todo calculate all components in the system and provide system outputs that can be converted into metrics
metrics = np.concatenate((metric_matrix.sum(axis=1), np.array([metric_min_matrix.min()]),
metric_max_matrix.max(axis=1)), 0)
# cpu_met = calculate_cpu_metric(metrics[4], metrics[5], metrics[6])
att_met = calculate_attitude_metric(metrics[8], metrics[0], metric_min_matrix[0], metrics[10])
down_met = calculate_br_down_metric(metrics[2])
up_met = calculate_br_up_metric(metrics[3])
wl_met = calculate_wavelength_metric(metrics[16], metrics[17])
# print(metrics[4], metrics[5], metrics[6])
# print(metrics[8], metrics[0], metric_min_matrix[0], metrics[10])
return np.array([[att_met], [down_met], [up_met], [wl_met]]), cust_reqs
def parse_component(component):
"""
This function calculates the various dimensions and metrics that can be utilised for the features, then returns them
for summation in the parse system function
:param component: The single line dataframe or series to be parsed
:return:
"""
# Volume of the component in m^3
volume = (component.X * component.Y * component.Z).values[0]
# How many slots the component takes up
internal_slots = component['Internal Slots'].values[0]
if not component['External Slots'].values == 0:
external = True
external_slots = component['External Slots'].values[0]
else:
external = False
external_slots = 0
min_temp = component['Min Temp'].values[0]
max_temp = component['Max Temp'].values[0]
mass = component['Mass'].values[0]
max_voltage = component['Voltage'].values[0]
nom_power = component['Nom Power'].values[0]
max_power = component['Power (W)'].values[0] - nom_power # This returns the difference when activated
discharge_time = component['Discharge Time (Wh)'].values[0]
pixel_resolution = component['Resolution (m)'].values[0]
wavelength_resolution = component['Resolution(nm)'].values[0]
min_wavelength = component['Min Wavelength (nm)'].values[0]
max_wavelength = component['Max Wavelength (nm)'].values[0]
field_of_view = component['Field of View (deg)'].values[0]
rx_min = component['Receiver Min (MHz)'].values[0]
rx_max = component['Receiver Max'].values[0]
tx_min = component['Transmitter Min'].values[0]
tx_max = component['Transmitter Max'].values[0]
duplex = component['Duplex'].values[0] + 1
br_down = component['Bit Rate Down'].values[0]
br_up = component['Bit Rate Up'].values[0]
data = component['Data Storage (MB)'].values[0]
code = component['Code Storage (MB)'].values[0]
ram = component['RAM'].values[0]
att_know = component['Attitude Know (deg)'].values[0]
att_view = component['Attitude View'].values[0]
att_mom = component['Attitude Control moment'].values[0]
max_prop = component['Max Propulsion (mN)'].values[0]
att_type = component['Attitude Type'].values[0]
axis = component['Axis control'].values[0]
ctrl_area = component['Control Area (m^2)'].values[0]
disposal = component['Disposal time(km/day)'].values[0]
int_comms = component['Internal Comms'].values[0]
comm_conn = component['IntCommConn'].values[0]
price = component['Price ($US)'].values[0]
metric_sums = np.array([[mass, duplex, br_down, br_up, data, code, ram, att_view, att_mom, max_prop, axis,
ctrl_area, disposal, price, pixel_resolution, wavelength_resolution, min_wavelength,
max_wavelength]]).T.astype(np.float)
metric_mins = np.array([[att_know]]).T.astype(np.float)
metric_maxs = np.array([[]]).T.astype(np.float)
summation_values = np.array([[volume, mass, internal_slots, external_slots, nom_power, discharge_time, duplex,
br_down, br_up, data, code, ram, att_know, att_view, att_mom, max_prop, att_type,
axis, ctrl_area, disposal, price]]).T
min_max_values = np.array([[max_voltage, max_power, pixel_resolution, wavelength_resolution, min_temp, max_temp,
min_wavelength, max_wavelength, field_of_view, rx_min, rx_max, tx_min, tx_max]]).T
#Todo, figure out a way to deal with the comms issue. possibly a later problem
# print(summation_values)
# Todo create matrix from arrays then sum each feature on the correct axis
# Todo This will create the correct feature set
# Other features will be made from summation of available slots/connects vs used
return metric_sums, metric_mins, metric_maxs, summation_values, min_max_values | en | 0.782338 | A module which converts the csv files into pandas data frames, for easy retrieval. This function takes a fully parsed system of raw data values and calculates the system's metric scores. :param system: A numpy array representing the system :return: A numpy array that contains metrics formulated for both the FAM algorithm and the genetic algorithm This function calculates the cpu's data and general capability based upon the memory and RAM available to it. It doesn't consider the speed of the chip. It calculates based on the following equation: metric = (data/max_data + code/max_code + ram/max_ram) / 3 This normalises each value against the maximum value in the database. Then all are weighted equally Then the total is normalised into the range [0, 1] :param data: The dedicated data storage for the system :param code: The memory available to code or additional storage space :param ram: The memory in ram, important for more complicated processes onboard the satellite :return: A numerical value that contains the the calculated metric for the system # max_data = 15000 # Matching an ideal state # max_code = 100 # Near enough to the maximum value to be an ideal state # max_ram = 128 # Less than the maximum, but reaches an ideal state # # data_met = (data / max_data).clip(min=0, max=1) # code_met = (code / max_code).clip(min=0, max=1) # ram_met = (ram / max_ram).clip(min=0, max=1) # # return np.abs((data_met + code_met + ram_met) / 3).clip(min=0, max=1) The above code was the old CPU metric in an attempt to calculate performance. As it is no longer utilised, and is simply a binary check for the presence of a flightboard. Totals is used to find if there is a positive amount of memory, which is present on all flightboards. It is simply the sum of any of the categories of memory. If the value is greater than 0, then it returns 1, else returns 0 This function calculates the down bit rate with a maximum of 100,000 kbps. :param br_down: A numerical value for the bit rate in kbps :return: A normalised value in the range [0, 1] This function calculates the up bit rate metric. Normalised around logarithmic values. Where the 'average' speed is considered 4800bps. The formula for this is based upon the common values for transmitters/receivers. Values are often doubled from 1200 baud. It scales to within [0, 1] once clipping is taken into account. It returns values close to the given fuzzy values :param br_up: A numerical value for the bit rate in bps :return: A normalised value in the range [0, 1] This function uses the given wavelength to determine where on the electromagnetic or ionised to EM converted wavelength the system is capable of detecting. The wavelength is generated from the median point between the minimum and maximum able to be detected. A logarithmic value for the wavelength is then taken, prior to normalising into the range of 0 and 1. :param wavelength_min: Minimum wavelength detectable by the system :param wavelength_max: Maximum wavelength detectable by the system :return: normalised value of the This function calculates the moment compared to the total mass of the satellite. This is worth 40% of the metric, with the attitude determination being utilised for another 40% of the metric. The number of axis that can be controlled accounts for the final 20% of the metric :param moment: :param mass: :param knowledge: :param axis: :return: # print(moment, mass) # print(moment_met) # print(know_met) # print(axis_met) # print('Att Met:' + str(((2 * moment_met + 2 * know_met + axis_met) / 5).clip(min=0, max=1))) This function creates a system from the dict structure given. It combines the components listed and then returns the product specifications. :param sys_structure: A dict that lists all the components that are a part of the system :return: A dictionary the lists the product specifications as generated This function parses the data frame row for an individual system, converting it into a dictionary for creating the product specifications :param system: A row from a Pandas Data Frame. :param comps: Defines which components database to search :return: Two arrays, the first are the customer requirements features returned as fuzzy logic values, the second is the product specification values. These will not be normalised. # Load the structure from the data frame given # print(system) # Extract the fuzzy values for the system and place them into a numpy array of features for FAM algorithm # This is horrible, work on making it better once it's proved to get the right outputs # print(part) # * metrics_sums # print(min_parts) # print(min_parts) # print(parts_max_matrix) # print(metric_matrix) # print(metric_matrix.sum(axis=1)) # print(metric_min_matrix.min()) # print(metric_max_matrix.max(axis=1)) # print(parts_sum_matrix.sum(axis=1)) # print(parts_max_matrix.shape) # Todo calculate all components in the system and provide system outputs that can be converted into metrics # cpu_met = calculate_cpu_metric(metrics[4], metrics[5], metrics[6]) # print(metrics[4], metrics[5], metrics[6]) # print(metrics[8], metrics[0], metric_min_matrix[0], metrics[10]) This function calculates the various dimensions and metrics that can be utilised for the features, then returns them for summation in the parse system function :param component: The single line dataframe or series to be parsed :return: # Volume of the component in m^3 # How many slots the component takes up # This returns the difference when activated #Todo, figure out a way to deal with the comms issue. possibly a later problem # print(summation_values) # Todo create matrix from arrays then sum each feature on the correct axis # Todo This will create the correct feature set # Other features will be made from summation of available slots/connects vs used | 3.296775 | 3 |
Menus.py | CStaich/Repository01 | 0 | 6618680 | <gh_stars>0
from __future__ import division #for correct integer division
import pickle #for saving, loading files
import subprocess as sp #for screen clearing
import pprint #Pretty Printing
import msvcrt as m #for wait() function
import time
from functools import partial #for dictionary functions w/ specific parameters
#This program is for recording games over time, using the Elo rating system developed for chess tournaments to rank players.
#It is able to generate likelihood of winning, it keeps records of all operations in the menus, it allows individual scores
#management in case a score needs fixing or handicap, and it is able to reset, add, or remove players at any time.
#Code written by <NAME>
#DONE
#track ratings
#calculate new ratings
#record games
#player manager
#ability to check scores
#ability to view past games
#better menus
#TO-DO
#confirmation prompt for high-risk menu options
#ability to edit most recent game
#ratings-over-time display
#MENU FUNCTIONS
def main_menu():
clearscreen()
entry = menu(['Record a Game', 'Roster', 'Records', 'Tools'], 'main')
choices = {
0: partial(return_to,'main'),
1: play_game,
2: roster_menu,
3: records_menu,
4: tools_menu,
}
choices[entry]()
return_to('main')
return
def roster_menu():
display_roster()
entry = menu(['Add a player', 'Remove a player', 'Modify a rating', 'Reset all ratings', 'Delete Roster'], 'roster')
choices = {
0: partial(return_to,'main'),
1: add_player,
2: delete_player,
3: modify_rating,
4: reset_ratings,
5: delete_roster,
}
choices[entry]()
return_to('roster')
return
def records_menu():
entry = menu(['Display Records', 'Delete All'], 'records')
choices = {
0: partial(return_to,'main'),
1: display_records,
2: delete_records,
}
choices[entry]()
return_to('records')
return
def tools_menu():
entry = menu(['Calculate Odds', 'Track Ratings'], 'tools')
choices = {
0: partial(return_to,'main'),
1: calculate_odds,
#2: track_ratings, #not added
}
choices[entry]()
return_to('tools')
return
def menu(options, return_menu = 'main'):
opt_dict = {0: 'Return'}
count = 1
for item in options:
opt_dict[count] = item
count = count + 1
key_list = opt_dict.keys()
key_list.sort()
for item in key_list:
print "{}: {}".format(item, opt_dict[item])
entry = prompt_number("Enter Selection: ", key_list)
clearscreen()
return entry
def return_to(return_menu, type = 0):
if type == 1: #incorrect input
print "Invalid Entry, returning to {} menu".format(return_menu)
wait()
clearscreen()
menu_list[return_menu]()
return
def play_game():
roster = load_obj('roster')
#Initialize player dictionaries
p1 = {}
p2 = {}
display_roster()
p1['name'] = prompt_name('Name of Player 1: ', 2)
p2['name'] = prompt_name('Name of Player 2: ', 2)
p1['rating'] = roster[p1['name']]
p2['rating'] = roster[p2['name']]
#Calculate adjusted logarithmic ratings
p1['log'] = 10 ** (p1['rating']/ 400.00)
p2['log'] = 10 ** (p2['rating'] / 400.00)
#Calculate relative EVs
p1['EV'] = p1['log'] / (p1['log'] + p2['log'])
p2['EV'] = p2['log'] / (p1['log'] + p2['log'])
#Display ratings and relative EVs
print "{}: {:.2f} ({:.2f}% to win)".format(p1['name'], p1['rating'], p1['EV']*100)
print "{}: {:.2f} ({:.2f}% to win)".format(p2['name'], p2['rating'], p2['EV']*100)
p1['score'] = prompt_number("Enter {}'s score: ".format(p1['name']))
p2['score'] = prompt_number("Enter {}'s score: ".format(p2['name']))
#Outcome logic (used to generate new rankings)
if p1['score'] > p2['score']:
p1['outcome'] = 1
p2['outcome'] = 0
elif p1['score'] < p2['score']:
p1['outcome'] = 0
p2['outcome'] = 1
elif p1['score'] == p2['score']:
p1['outcome'] = 0.5
p2['outcome'] = 0.5
#Calculate new ratings
multiplier = 32
p1['new_rating'] = p1['rating'] + multiplier * (p1['outcome'] - p1['EV'])
p2['new_rating'] = p2['rating'] + multiplier * (p2['outcome'] - p2['EV'])
p1['difference'] = p1['new_rating'] - p1['rating']
p2['difference'] = p2['new_rating'] - p2['rating']
#Display new ratings
print "{}: {:.2f} ---> {:.2f} [{:.2f}]".format(p1['name'], p1['rating'], p1['new_rating'], p1['difference'])
print "{}: {:.2f} ---> {:.2f} [{:.2f}]".format(p2['name'], p2['rating'], p2['new_rating'], p2['difference'])
#Record new ratings on the roster
roster[p1['name']] = p1['new_rating']
roster[p2['name']] = p2['new_rating']
save_obj(roster, 'roster')
#Record game for records
metadata = {}
metadata['time'] = time.time()
records.append({
'type': 'game',
'metadata': metadata,
'p1': p1,
'p2': p2,
})
save_obj(records, 'records')
wait()
return_to('main')
return
#ROSTER MENU FUNCTIONS
def display_roster():
roster = load_obj('roster')
print "=== Current Roster ==="
pp.pprint(roster)
print "\n"
return
def add_player():
roster = load_obj('roster')
player = raw_input("Enter player: ").lower()
if player in roster.keys():
print "{} is already on the roster.".format(player)
wait()
return_to('roster')
roster[player] = 400
save_obj(roster, 'roster')
metadata = {}
metadata['time'] = time.time()
metadata['description'] = "Added {} to roster with score of {}".format(player, roster[player])
records.append({
'type': 'records',
'metadata': metadata
})
save_obj(records, 'records')
return
def delete_player():
roster = load_obj('roster')
player = prompt_name()
old_rating = roster[player]
roster.pop(player, None)
save_obj(roster, 'roster')
metadata = {}
metadata['time'] = time.time()
metadata['description'] = "Deleted {} from roster with score of {}".format(player, old_rating)
records.append({
'type': 'records',
'metadata': metadata
})
save_obj(records, 'records')
return
def modify_rating():
roster = load_obj('roster')
player = prompt_name()
old_rating = roster[player]
new_rating = prompt_number("Enter new rating: ")
roster[player] = new_rating
save_obj(roster, 'roster')
metadata = {}
metadata['time'] = time.time()
metadata['description'] = "Changed {}'s rating from {} to {}".format(player, old_rating, new_rating)
records.append({
'type': 'records',
'metadata': metadata
})
save_obj(records, 'records')
return
def reset_ratings():
roster = load_obj('roster')
for player in roster:
roster[player] = 400
save_obj(roster, 'roster')
return
def delete_roster():
roster = {}
save_obj(roster, 'roster')
roster = load_obj('roster')
return
#RECORDS MENU FUNCTIONS
def display_records(max_records = -1):
clearscreen()
records = load_obj('records')
count = 1
for entry in records:
metadata = entry['metadata']
if count == max_records:
break
if entry['type'] == 'game':
p1 = entry['p1']
p2 = entry['p2']
data = []
data.append([" ", p1['name'], p2['name']])
data.append(["Score", p1['score'], p2['score']])
data.append(['EV', p1['EV'], p2['EV']])
data.append(['Rating Change', p1['difference'], p2['difference']])
data.append(['New Rating', p1['new_rating'], p2['new_rating']])
#printing
print "\n=== Game at {} ===\n".format(metadata['time'])
col_width = max(len(str(item)) for line in data for item in line) + 2
for line in data:
print "\t" + "".join(str(item).ljust(col_width) for item in line)
else:
print "\n=== {} change at {} ===\n".format(entry['type'], metadata['time'])
print "\t" + metadata['description']
count = count + 1
print "\n"
wait()
return
def delete_records():
metadata = {}
metadata['time'] = time.time()
metadata['description'] = "Wiped All Records".format()
records = [{
'type': 'records',
'metadata': metadata
}]
save_obj(records, 'records')
#TOOLS MENU FUNCTIONS
def calculate_odds():
#Initialize player dictionaries
p1 = {}
p2 = {}
#Poll user for participants
p1['name'] = raw_input('Name of Player 1: ').lower()
p2['name'] = raw_input('Name of Player 2: ').lower()
#Get records for participants
roster = load_obj('roster')
p1['rating'] = roster[p1['name']]
p2['rating'] = roster[p2['name']]
#Calculate adjusted logarithmic ratings
p1['log'] = 10 ** (p1['rating']/ 400.00)
p2['log'] = 10 ** (p2['rating'] / 400.00)
#Calculate relative EVs
p1['EV'] = p1['log'] / (p1['log'] + p2['log'])
p2['EV'] = p2['log'] / (p1['log'] + p2['log'])
#Display ratings and relative EVs
print "{}: {:.2f} ({:.2f}% to win)".format(p1['name'], p1['rating'], p1['EV']*100)
print "{}: {:.2f} ({:.2f}% to win)".format(p2['name'], p2['rating'], p2['EV']*100)
wait()
return
#COMMON FUNCTIONS
def save_obj(obj, name):
with open('obj/'+ name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open('obj/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
def wait(): #waits for keystrike before continuing
m.getch()
return
def prompt_name(message = "Enter player: ", min_length = 1):
roster = load_obj('roster')
if len(roster) < min_length:
print "The roster is not long enough!"
wait()
return_to('main')2
player = raw_input(message).lower()
try:
roster[player]
except:
print "That name is not on the roster. "
return prompt_name(message, min_length)
return player
def prompt_number(message = "Enter number: ", values = -1):
number = raw_input(message)
try:
number = int(number)
except ValueError:
print "Invalid entry."
return prompt_number(message, values)
if values != -1:
for value in values:
value = int(value)
if number not in values:
print "Out of range."
return prompt_number(message, values)
return number
def clearscreen():
sp.call('cls',shell=True) #clears the screen
return
#COMMON VARIABLES
roster = load_obj('roster')
records = load_obj('records')
pp = pprint.PrettyPrinter()
menu_list = {
'main': main_menu,
'roster': roster_menu,
'records': records_menu,
'tools': tools_menu,
}
#BODY
main_menu()
| from __future__ import division #for correct integer division
import pickle #for saving, loading files
import subprocess as sp #for screen clearing
import pprint #Pretty Printing
import msvcrt as m #for wait() function
import time
from functools import partial #for dictionary functions w/ specific parameters
#This program is for recording games over time, using the Elo rating system developed for chess tournaments to rank players.
#It is able to generate likelihood of winning, it keeps records of all operations in the menus, it allows individual scores
#management in case a score needs fixing or handicap, and it is able to reset, add, or remove players at any time.
#Code written by <NAME>
#DONE
#track ratings
#calculate new ratings
#record games
#player manager
#ability to check scores
#ability to view past games
#better menus
#TO-DO
#confirmation prompt for high-risk menu options
#ability to edit most recent game
#ratings-over-time display
#MENU FUNCTIONS
def main_menu():
clearscreen()
entry = menu(['Record a Game', 'Roster', 'Records', 'Tools'], 'main')
choices = {
0: partial(return_to,'main'),
1: play_game,
2: roster_menu,
3: records_menu,
4: tools_menu,
}
choices[entry]()
return_to('main')
return
def roster_menu():
display_roster()
entry = menu(['Add a player', 'Remove a player', 'Modify a rating', 'Reset all ratings', 'Delete Roster'], 'roster')
choices = {
0: partial(return_to,'main'),
1: add_player,
2: delete_player,
3: modify_rating,
4: reset_ratings,
5: delete_roster,
}
choices[entry]()
return_to('roster')
return
def records_menu():
entry = menu(['Display Records', 'Delete All'], 'records')
choices = {
0: partial(return_to,'main'),
1: display_records,
2: delete_records,
}
choices[entry]()
return_to('records')
return
def tools_menu():
entry = menu(['Calculate Odds', 'Track Ratings'], 'tools')
choices = {
0: partial(return_to,'main'),
1: calculate_odds,
#2: track_ratings, #not added
}
choices[entry]()
return_to('tools')
return
def menu(options, return_menu = 'main'):
opt_dict = {0: 'Return'}
count = 1
for item in options:
opt_dict[count] = item
count = count + 1
key_list = opt_dict.keys()
key_list.sort()
for item in key_list:
print "{}: {}".format(item, opt_dict[item])
entry = prompt_number("Enter Selection: ", key_list)
clearscreen()
return entry
def return_to(return_menu, type = 0):
if type == 1: #incorrect input
print "Invalid Entry, returning to {} menu".format(return_menu)
wait()
clearscreen()
menu_list[return_menu]()
return
def play_game():
roster = load_obj('roster')
#Initialize player dictionaries
p1 = {}
p2 = {}
display_roster()
p1['name'] = prompt_name('Name of Player 1: ', 2)
p2['name'] = prompt_name('Name of Player 2: ', 2)
p1['rating'] = roster[p1['name']]
p2['rating'] = roster[p2['name']]
#Calculate adjusted logarithmic ratings
p1['log'] = 10 ** (p1['rating']/ 400.00)
p2['log'] = 10 ** (p2['rating'] / 400.00)
#Calculate relative EVs
p1['EV'] = p1['log'] / (p1['log'] + p2['log'])
p2['EV'] = p2['log'] / (p1['log'] + p2['log'])
#Display ratings and relative EVs
print "{}: {:.2f} ({:.2f}% to win)".format(p1['name'], p1['rating'], p1['EV']*100)
print "{}: {:.2f} ({:.2f}% to win)".format(p2['name'], p2['rating'], p2['EV']*100)
p1['score'] = prompt_number("Enter {}'s score: ".format(p1['name']))
p2['score'] = prompt_number("Enter {}'s score: ".format(p2['name']))
#Outcome logic (used to generate new rankings)
if p1['score'] > p2['score']:
p1['outcome'] = 1
p2['outcome'] = 0
elif p1['score'] < p2['score']:
p1['outcome'] = 0
p2['outcome'] = 1
elif p1['score'] == p2['score']:
p1['outcome'] = 0.5
p2['outcome'] = 0.5
#Calculate new ratings
multiplier = 32
p1['new_rating'] = p1['rating'] + multiplier * (p1['outcome'] - p1['EV'])
p2['new_rating'] = p2['rating'] + multiplier * (p2['outcome'] - p2['EV'])
p1['difference'] = p1['new_rating'] - p1['rating']
p2['difference'] = p2['new_rating'] - p2['rating']
#Display new ratings
print "{}: {:.2f} ---> {:.2f} [{:.2f}]".format(p1['name'], p1['rating'], p1['new_rating'], p1['difference'])
print "{}: {:.2f} ---> {:.2f} [{:.2f}]".format(p2['name'], p2['rating'], p2['new_rating'], p2['difference'])
#Record new ratings on the roster
roster[p1['name']] = p1['new_rating']
roster[p2['name']] = p2['new_rating']
save_obj(roster, 'roster')
#Record game for records
metadata = {}
metadata['time'] = time.time()
records.append({
'type': 'game',
'metadata': metadata,
'p1': p1,
'p2': p2,
})
save_obj(records, 'records')
wait()
return_to('main')
return
#ROSTER MENU FUNCTIONS
def display_roster():
roster = load_obj('roster')
print "=== Current Roster ==="
pp.pprint(roster)
print "\n"
return
def add_player():
roster = load_obj('roster')
player = raw_input("Enter player: ").lower()
if player in roster.keys():
print "{} is already on the roster.".format(player)
wait()
return_to('roster')
roster[player] = 400
save_obj(roster, 'roster')
metadata = {}
metadata['time'] = time.time()
metadata['description'] = "Added {} to roster with score of {}".format(player, roster[player])
records.append({
'type': 'records',
'metadata': metadata
})
save_obj(records, 'records')
return
def delete_player():
roster = load_obj('roster')
player = prompt_name()
old_rating = roster[player]
roster.pop(player, None)
save_obj(roster, 'roster')
metadata = {}
metadata['time'] = time.time()
metadata['description'] = "Deleted {} from roster with score of {}".format(player, old_rating)
records.append({
'type': 'records',
'metadata': metadata
})
save_obj(records, 'records')
return
def modify_rating():
roster = load_obj('roster')
player = prompt_name()
old_rating = roster[player]
new_rating = prompt_number("Enter new rating: ")
roster[player] = new_rating
save_obj(roster, 'roster')
metadata = {}
metadata['time'] = time.time()
metadata['description'] = "Changed {}'s rating from {} to {}".format(player, old_rating, new_rating)
records.append({
'type': 'records',
'metadata': metadata
})
save_obj(records, 'records')
return
def reset_ratings():
roster = load_obj('roster')
for player in roster:
roster[player] = 400
save_obj(roster, 'roster')
return
def delete_roster():
roster = {}
save_obj(roster, 'roster')
roster = load_obj('roster')
return
#RECORDS MENU FUNCTIONS
def display_records(max_records = -1):
clearscreen()
records = load_obj('records')
count = 1
for entry in records:
metadata = entry['metadata']
if count == max_records:
break
if entry['type'] == 'game':
p1 = entry['p1']
p2 = entry['p2']
data = []
data.append([" ", p1['name'], p2['name']])
data.append(["Score", p1['score'], p2['score']])
data.append(['EV', p1['EV'], p2['EV']])
data.append(['Rating Change', p1['difference'], p2['difference']])
data.append(['New Rating', p1['new_rating'], p2['new_rating']])
#printing
print "\n=== Game at {} ===\n".format(metadata['time'])
col_width = max(len(str(item)) for line in data for item in line) + 2
for line in data:
print "\t" + "".join(str(item).ljust(col_width) for item in line)
else:
print "\n=== {} change at {} ===\n".format(entry['type'], metadata['time'])
print "\t" + metadata['description']
count = count + 1
print "\n"
wait()
return
def delete_records():
metadata = {}
metadata['time'] = time.time()
metadata['description'] = "Wiped All Records".format()
records = [{
'type': 'records',
'metadata': metadata
}]
save_obj(records, 'records')
#TOOLS MENU FUNCTIONS
def calculate_odds():
#Initialize player dictionaries
p1 = {}
p2 = {}
#Poll user for participants
p1['name'] = raw_input('Name of Player 1: ').lower()
p2['name'] = raw_input('Name of Player 2: ').lower()
#Get records for participants
roster = load_obj('roster')
p1['rating'] = roster[p1['name']]
p2['rating'] = roster[p2['name']]
#Calculate adjusted logarithmic ratings
p1['log'] = 10 ** (p1['rating']/ 400.00)
p2['log'] = 10 ** (p2['rating'] / 400.00)
#Calculate relative EVs
p1['EV'] = p1['log'] / (p1['log'] + p2['log'])
p2['EV'] = p2['log'] / (p1['log'] + p2['log'])
#Display ratings and relative EVs
print "{}: {:.2f} ({:.2f}% to win)".format(p1['name'], p1['rating'], p1['EV']*100)
print "{}: {:.2f} ({:.2f}% to win)".format(p2['name'], p2['rating'], p2['EV']*100)
wait()
return
#COMMON FUNCTIONS
def save_obj(obj, name):
with open('obj/'+ name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open('obj/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
def wait(): #waits for keystrike before continuing
m.getch()
return
def prompt_name(message = "Enter player: ", min_length = 1):
roster = load_obj('roster')
if len(roster) < min_length:
print "The roster is not long enough!"
wait()
return_to('main')2
player = raw_input(message).lower()
try:
roster[player]
except:
print "That name is not on the roster. "
return prompt_name(message, min_length)
return player
def prompt_number(message = "Enter number: ", values = -1):
number = raw_input(message)
try:
number = int(number)
except ValueError:
print "Invalid entry."
return prompt_number(message, values)
if values != -1:
for value in values:
value = int(value)
if number not in values:
print "Out of range."
return prompt_number(message, values)
return number
def clearscreen():
sp.call('cls',shell=True) #clears the screen
return
#COMMON VARIABLES
roster = load_obj('roster')
records = load_obj('records')
pp = pprint.PrettyPrinter()
menu_list = {
'main': main_menu,
'roster': roster_menu,
'records': records_menu,
'tools': tools_menu,
}
#BODY
main_menu() | en | 0.785647 | #for correct integer division #for saving, loading files #for screen clearing #Pretty Printing #for wait() function #for dictionary functions w/ specific parameters #This program is for recording games over time, using the Elo rating system developed for chess tournaments to rank players. #It is able to generate likelihood of winning, it keeps records of all operations in the menus, it allows individual scores #management in case a score needs fixing or handicap, and it is able to reset, add, or remove players at any time. #Code written by <NAME> #DONE #track ratings #calculate new ratings #record games #player manager #ability to check scores #ability to view past games #better menus #TO-DO #confirmation prompt for high-risk menu options #ability to edit most recent game #ratings-over-time display #MENU FUNCTIONS #2: track_ratings, #not added #incorrect input #Initialize player dictionaries #Calculate adjusted logarithmic ratings #Calculate relative EVs #Display ratings and relative EVs #Outcome logic (used to generate new rankings) #Calculate new ratings #Display new ratings #Record new ratings on the roster #Record game for records #ROSTER MENU FUNCTIONS #RECORDS MENU FUNCTIONS #printing #TOOLS MENU FUNCTIONS #Initialize player dictionaries #Poll user for participants #Get records for participants #Calculate adjusted logarithmic ratings #Calculate relative EVs #Display ratings and relative EVs #COMMON FUNCTIONS #waits for keystrike before continuing #clears the screen #COMMON VARIABLES #BODY | 2.735651 | 3 |
Algorithms/Clustering/K_Means-K_Medians/k_means.py | Nishaghoul/Machine-Learning | 58 | 6618681 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import style
from collections import defaultdict
from sklearn import preprocessing
class K_Means:
def __init__(self, k=2, tol=0.001, max_iter=300):
self.k = k
self.tol = tol
self.max_iter = max_iter
def fit(self, data):
self.centroids = {}
# Take first k points in data as centroids
# can take random points as well
for i in range(self.k):
self.centroids[i] = data[i]
for i in range(self.max_iter):
self.classifications = defaultdict(list)
# Find to which centroid is the feature closest to and append it in that centroid's classification
for features in data:
distances = [np.linalg.norm(features - self.centroids[c]) for c in self.centroids]
classification = distances.index(min(distances))
self.classifications[classification].append(features)
prev_centroids = dict(self.centroids)
# Calculate the average centroid point for each classification
# by averaging the features in that classification
for classification in self.classifications:
self.centroids[classification] = np.average(self.classifications[classification], axis=0)
# If the desired tolerance is achieved i.e. the centroids are not changing values
# more than tolerance % then simply break the loop
optimized = True
for c in self.centroids:
original_centroid = prev_centroids[c]
new_centroid = self.centroids[c]
if np.sum((new_centroid - original_centroid)/original_centroid) * 100.0 > self.tol:
optimized = False
if optimized:
break
# used for predicting in which classification does the new data point or feature lie
def predict(self, data):
distances = [np.linalg.norm(data - self.centroids[c]) for c in self.centroids]
classification = distances.index(min(distances))
return classification
df = pd.read_excel('../../datasets/titanic.xls')
df.drop(['body', 'name'], 1, inplace=True)
df.fillna(0, inplace=True)
def convert_non_numeric_data(df):
columns = df.columns.values
for col in columns:
text_digits = {}
def convert_to_int(val):
return text_digits[val]
if df[col].dtype != np.int64 or df[col].dtype != np.float64:
col_contents = df[col].values.tolist()
unique_elements = set(col_contents)
x = 0
for unique in unique_elements:
if unique not in text_digits:
text_digits[unique] = x
x += 1
df[col] = list(map(convert_to_int, df[col]))
return df
df = convert_non_numeric_data(df)
X = np.array(df.drop(['survived'], 1)).astype(float)
X = preprocessing.scale(X)
y = np.array(df['survived'])
clf = K_Means()
clf.fit(X)
correct = 0
for i in range(len(X)):
feature = np.array(X[i].astype(float))
prediction = clf.predict(feature)
if prediction == y[i]:
correct += 1
print(correct/len(X))
'''
style.use('ggplot')
X = np.array([[1, 2],
[1.5, 1.8],
[5, 8],
[8, 8],
[1, 0.6],
[9, 11]])
colors = ['r', 'g', 'b']*10
clf = K_Means()
clf.fit(X)
for c in clf.centroids:
plt.scatter(clf.centroids[c][0], clf.centroids[c][1], marker='o', color='k', s=150, linewidths=5)
for classification in clf.classifications:
color = colors[classification]
for features in clf.classifications[classification]:
plt.scatter(features[0], features[1], marker='x', s=150, color=color, linewidths=5)
unknowns = np.array([[1, 3],
[3, 5],
[3, 7],
[-3, -1],
[0, 0],
[8, 9]])
for u in unknowns:
classification = clf.predict(u)
plt.scatter(u[0], u[1], marker='*', color=colors[classification], s=150, linewidths=5)
plt.show()
''' | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import style
from collections import defaultdict
from sklearn import preprocessing
class K_Means:
def __init__(self, k=2, tol=0.001, max_iter=300):
self.k = k
self.tol = tol
self.max_iter = max_iter
def fit(self, data):
self.centroids = {}
# Take first k points in data as centroids
# can take random points as well
for i in range(self.k):
self.centroids[i] = data[i]
for i in range(self.max_iter):
self.classifications = defaultdict(list)
# Find to which centroid is the feature closest to and append it in that centroid's classification
for features in data:
distances = [np.linalg.norm(features - self.centroids[c]) for c in self.centroids]
classification = distances.index(min(distances))
self.classifications[classification].append(features)
prev_centroids = dict(self.centroids)
# Calculate the average centroid point for each classification
# by averaging the features in that classification
for classification in self.classifications:
self.centroids[classification] = np.average(self.classifications[classification], axis=0)
# If the desired tolerance is achieved i.e. the centroids are not changing values
# more than tolerance % then simply break the loop
optimized = True
for c in self.centroids:
original_centroid = prev_centroids[c]
new_centroid = self.centroids[c]
if np.sum((new_centroid - original_centroid)/original_centroid) * 100.0 > self.tol:
optimized = False
if optimized:
break
# used for predicting in which classification does the new data point or feature lie
def predict(self, data):
distances = [np.linalg.norm(data - self.centroids[c]) for c in self.centroids]
classification = distances.index(min(distances))
return classification
df = pd.read_excel('../../datasets/titanic.xls')
df.drop(['body', 'name'], 1, inplace=True)
df.fillna(0, inplace=True)
def convert_non_numeric_data(df):
columns = df.columns.values
for col in columns:
text_digits = {}
def convert_to_int(val):
return text_digits[val]
if df[col].dtype != np.int64 or df[col].dtype != np.float64:
col_contents = df[col].values.tolist()
unique_elements = set(col_contents)
x = 0
for unique in unique_elements:
if unique not in text_digits:
text_digits[unique] = x
x += 1
df[col] = list(map(convert_to_int, df[col]))
return df
df = convert_non_numeric_data(df)
X = np.array(df.drop(['survived'], 1)).astype(float)
X = preprocessing.scale(X)
y = np.array(df['survived'])
clf = K_Means()
clf.fit(X)
correct = 0
for i in range(len(X)):
feature = np.array(X[i].astype(float))
prediction = clf.predict(feature)
if prediction == y[i]:
correct += 1
print(correct/len(X))
'''
style.use('ggplot')
X = np.array([[1, 2],
[1.5, 1.8],
[5, 8],
[8, 8],
[1, 0.6],
[9, 11]])
colors = ['r', 'g', 'b']*10
clf = K_Means()
clf.fit(X)
for c in clf.centroids:
plt.scatter(clf.centroids[c][0], clf.centroids[c][1], marker='o', color='k', s=150, linewidths=5)
for classification in clf.classifications:
color = colors[classification]
for features in clf.classifications[classification]:
plt.scatter(features[0], features[1], marker='x', s=150, color=color, linewidths=5)
unknowns = np.array([[1, 3],
[3, 5],
[3, 7],
[-3, -1],
[0, 0],
[8, 9]])
for u in unknowns:
classification = clf.predict(u)
plt.scatter(u[0], u[1], marker='*', color=colors[classification], s=150, linewidths=5)
plt.show()
''' | en | 0.643253 | # Take first k points in data as centroids # can take random points as well # Find to which centroid is the feature closest to and append it in that centroid's classification # Calculate the average centroid point for each classification # by averaging the features in that classification # If the desired tolerance is achieved i.e. the centroids are not changing values # more than tolerance % then simply break the loop # used for predicting in which classification does the new data point or feature lie style.use('ggplot') X = np.array([[1, 2], [1.5, 1.8], [5, 8], [8, 8], [1, 0.6], [9, 11]]) colors = ['r', 'g', 'b']*10 clf = K_Means() clf.fit(X) for c in clf.centroids: plt.scatter(clf.centroids[c][0], clf.centroids[c][1], marker='o', color='k', s=150, linewidths=5) for classification in clf.classifications: color = colors[classification] for features in clf.classifications[classification]: plt.scatter(features[0], features[1], marker='x', s=150, color=color, linewidths=5) unknowns = np.array([[1, 3], [3, 5], [3, 7], [-3, -1], [0, 0], [8, 9]]) for u in unknowns: classification = clf.predict(u) plt.scatter(u[0], u[1], marker='*', color=colors[classification], s=150, linewidths=5) plt.show() | 3.342941 | 3 |
tests/fixtures/account_fixtures.py | alex-d-bondarev/learn-flask | 0 | 6618682 | import pytest
from learn_app.test_flow.models.account import Account
def _save_account_to_db_and_return_its_model(db_fixture, none_account):
db_fixture.session.add(none_account)
db_fixture.session.commit()
return none_account
@pytest.fixture(scope="session")
def test_account_data():
"""Get app context for tests
:return:
"""
return {"name": "<NAME>", "number": 42, "role": "none"}
@pytest.fixture(scope="session")
def test_account_api_data():
"""Get app context for tests
:return:
"""
return {"name": "<NAME>", "number": 42, "role": "none"}
@pytest.fixture(scope="session")
@pytest.mark.usefixtures("test_account_data")
def test_account(test_account_data):
return Account(
name=test_account_data["name"],
number=test_account_data["number"],
role=test_account_data["role"],
)
@pytest.fixture(scope="function")
@pytest.mark.usefixtures("db_fixture")
def create_none_account(db_fixture):
"""Create Account with 'none' role"""
none_account = Account(
name="Mr. None",
number=000,
role="none",
)
return _save_account_to_db_and_return_its_model(db_fixture, none_account)
@pytest.fixture(scope="function")
@pytest.mark.usefixtures("db_fixture")
def create_user_account(db_fixture):
"""Create Account with 'none' role"""
user_account = Account(
name="<NAME>",
number=444,
role="user",
)
return _save_account_to_db_and_return_its_model(db_fixture, user_account)
@pytest.fixture(scope="function")
@pytest.mark.usefixtures("db_fixture")
def create_admin_account(db_fixture):
"""Create Account with 'none' role"""
admin_account = Account(
name="<NAME>",
number=777,
role="admin",
)
return _save_account_to_db_and_return_its_model(db_fixture, admin_account)
@pytest.fixture(scope="function", autouse=True)
@pytest.mark.usefixtures("db_fixture")
def cleanup_test_account(db_fixture):
"""Cleanup account table after each test
:param db_fixture:
"""
yield
Account.query.delete()
db_fixture.session.commit()
| import pytest
from learn_app.test_flow.models.account import Account
def _save_account_to_db_and_return_its_model(db_fixture, none_account):
db_fixture.session.add(none_account)
db_fixture.session.commit()
return none_account
@pytest.fixture(scope="session")
def test_account_data():
"""Get app context for tests
:return:
"""
return {"name": "<NAME>", "number": 42, "role": "none"}
@pytest.fixture(scope="session")
def test_account_api_data():
"""Get app context for tests
:return:
"""
return {"name": "<NAME>", "number": 42, "role": "none"}
@pytest.fixture(scope="session")
@pytest.mark.usefixtures("test_account_data")
def test_account(test_account_data):
return Account(
name=test_account_data["name"],
number=test_account_data["number"],
role=test_account_data["role"],
)
@pytest.fixture(scope="function")
@pytest.mark.usefixtures("db_fixture")
def create_none_account(db_fixture):
"""Create Account with 'none' role"""
none_account = Account(
name="Mr. None",
number=000,
role="none",
)
return _save_account_to_db_and_return_its_model(db_fixture, none_account)
@pytest.fixture(scope="function")
@pytest.mark.usefixtures("db_fixture")
def create_user_account(db_fixture):
"""Create Account with 'none' role"""
user_account = Account(
name="<NAME>",
number=444,
role="user",
)
return _save_account_to_db_and_return_its_model(db_fixture, user_account)
@pytest.fixture(scope="function")
@pytest.mark.usefixtures("db_fixture")
def create_admin_account(db_fixture):
"""Create Account with 'none' role"""
admin_account = Account(
name="<NAME>",
number=777,
role="admin",
)
return _save_account_to_db_and_return_its_model(db_fixture, admin_account)
@pytest.fixture(scope="function", autouse=True)
@pytest.mark.usefixtures("db_fixture")
def cleanup_test_account(db_fixture):
"""Cleanup account table after each test
:param db_fixture:
"""
yield
Account.query.delete()
db_fixture.session.commit()
| en | 0.758997 | Get app context for tests :return: Get app context for tests :return: Create Account with 'none' role Create Account with 'none' role Create Account with 'none' role Cleanup account table after each test :param db_fixture: | 2.300392 | 2 |
.ignore/old/test.py | souvikdas95/SDN_VideoStreaming | 4 | 6618683 | <filename>.ignore/old/test.py<gh_stars>1-10
#!/usr/bin/python
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
print(dir_path)
| <filename>.ignore/old/test.py<gh_stars>1-10
#!/usr/bin/python
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
print(dir_path)
| ru | 0.258958 | #!/usr/bin/python | 1.704075 | 2 |
brainframe/cli/config.py | aotuai/brainframe-cli | 3 | 6618684 | <gh_stars>1-10
import os
from distutils.util import strtobool
from pathlib import Path
from typing import Callable, Dict, Generic, Optional, Tuple, TypeVar, Union
import yaml
from . import frozen_utils, print_utils
T = TypeVar("T")
class Option(Generic[T]):
"""A configuration option.
Option values are determined using the following precedence:
1. From an environment variable with the name "BRAINFRAME_" followed by the option
name in all caps
2. From the defaults file shipped with this distribution
3. The fallback value, which is None
"""
name: str
value: Optional[T] = None
default: Optional[T] = None
def __init__(self, name: str):
self.name = name
@property
def env_var_name(self):
return "BRAINFRAME_" + self.name.upper()
def load(
self, converter: Callable[[str], T], defaults: Dict[str, str]
) -> None:
default = defaults.get(self.name)
value: Optional[str] = os.environ.get(self.env_var_name, default)
self.value = None if value is None else converter(value)
self.default = None if default is None else converter(default)
install_path = Option[Path]("install_path")
data_path = Option[Path]("data_path")
is_staging = Option[bool]("staging")
staging_username = Option[str]("staging_username")
staging_password = Option[str]("<PASSWORD>")
def load() -> None:
"""Initializes configuration options"""
with frozen_utils.DEFAULTS_FILE_PATH.open("r") as defaults_file:
defaults = yaml.load(defaults_file, Loader=yaml.FullLoader)
install_path.load(Path, defaults)
data_path.load(Path, defaults)
is_staging.load(_bool_converter, defaults)
staging_username.load(str, defaults)
staging_password.load(str, defaults)
def staging_credentials() -> Optional[Tuple[str, str]]:
if not is_staging.value:
return None
username = staging_username.value
password = <PASSWORD>.value
if username is None or password is None:
print_utils.fail_translate(
"general.staging-missing-credentials",
username_env_var=staging_username.env_var_name,
password_env_var=staging_password.env_var_name,
)
# Mypy doesn't understand that fail_translate exits this function, so it
# thinks the return type should be Tuple[Optional[str], Optional[str]]
return username, password # type: ignore
def _bool_converter(value: Union[str, bool]) -> bool:
if isinstance(value, bool):
return value
return strtobool(value)
| import os
from distutils.util import strtobool
from pathlib import Path
from typing import Callable, Dict, Generic, Optional, Tuple, TypeVar, Union
import yaml
from . import frozen_utils, print_utils
T = TypeVar("T")
class Option(Generic[T]):
"""A configuration option.
Option values are determined using the following precedence:
1. From an environment variable with the name "BRAINFRAME_" followed by the option
name in all caps
2. From the defaults file shipped with this distribution
3. The fallback value, which is None
"""
name: str
value: Optional[T] = None
default: Optional[T] = None
def __init__(self, name: str):
self.name = name
@property
def env_var_name(self):
return "BRAINFRAME_" + self.name.upper()
def load(
self, converter: Callable[[str], T], defaults: Dict[str, str]
) -> None:
default = defaults.get(self.name)
value: Optional[str] = os.environ.get(self.env_var_name, default)
self.value = None if value is None else converter(value)
self.default = None if default is None else converter(default)
install_path = Option[Path]("install_path")
data_path = Option[Path]("data_path")
is_staging = Option[bool]("staging")
staging_username = Option[str]("staging_username")
staging_password = Option[str]("<PASSWORD>")
def load() -> None:
"""Initializes configuration options"""
with frozen_utils.DEFAULTS_FILE_PATH.open("r") as defaults_file:
defaults = yaml.load(defaults_file, Loader=yaml.FullLoader)
install_path.load(Path, defaults)
data_path.load(Path, defaults)
is_staging.load(_bool_converter, defaults)
staging_username.load(str, defaults)
staging_password.load(str, defaults)
def staging_credentials() -> Optional[Tuple[str, str]]:
if not is_staging.value:
return None
username = staging_username.value
password = <PASSWORD>.value
if username is None or password is None:
print_utils.fail_translate(
"general.staging-missing-credentials",
username_env_var=staging_username.env_var_name,
password_env_var=staging_password.env_var_name,
)
# Mypy doesn't understand that fail_translate exits this function, so it
# thinks the return type should be Tuple[Optional[str], Optional[str]]
return username, password # type: ignore
def _bool_converter(value: Union[str, bool]) -> bool:
if isinstance(value, bool):
return value
return strtobool(value) | en | 0.786055 | A configuration option. Option values are determined using the following precedence: 1. From an environment variable with the name "BRAINFRAME_" followed by the option name in all caps 2. From the defaults file shipped with this distribution 3. The fallback value, which is None Initializes configuration options # Mypy doesn't understand that fail_translate exits this function, so it # thinks the return type should be Tuple[Optional[str], Optional[str]] # type: ignore | 2.291253 | 2 |
client/apiclient/model.py | bopopescu/sentimentally | 0 | 6618685 | #!/usr/bin/python2.4
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model objects for requests and responses.
Each API may support one or more serializations, such
as JSON, Atom, etc. The model classes are responsible
for converting between the wire format and the Python
object representation.
"""
__author__ = '<EMAIL> (<NAME>)'
import gflags
import logging
import urllib
from anyjson import simplejson
from errors import HttpError
FLAGS = gflags.FLAGS
gflags.DEFINE_boolean('dump_request_response', False,
'Dump all http server requests and responses. '
'Must use apiclient.model.LoggingJsonModel as '
'the model.'
)
def _abstract():
raise NotImplementedError('You need to override this function')
class Model(object):
"""Model base class.
All Model classes should implement this interface.
The Model serializes and de-serializes between a wire
format such as JSON and a Python object representation.
"""
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with a deserialized body.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized in the desired wire format.
"""
_abstract()
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
_abstract()
class JsonModel(Model):
"""Model class for JSON.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request and response bodies.
"""
def __init__(self, data_wrapper=False):
"""Construct a JsonModel
Args:
data_wrapper: boolean, wrap requests and responses in a data wrapper
"""
self._data_wrapper = data_wrapper
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with JSON bodies.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable by simplejson.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized as JSON
"""
query = self._build_query(query_params)
headers['accept'] = 'application/json'
headers['accept-encoding'] = 'gzip, deflate'
if 'user-agent' in headers:
headers['user-agent'] += ' '
else:
headers['user-agent'] = ''
headers['user-agent'] += 'google-api-python-client/1.0'
if (isinstance(body_value, dict) and 'data' not in body_value and
self._data_wrapper):
body_value = {'data': body_value}
if body_value is not None:
headers['content-type'] = 'application/json'
body_value = simplejson.dumps(body_value)
return (headers, path_params, query, body_value)
def _build_query(self, params):
"""Builds a query string.
Args:
params: dict, the query parameters
Returns:
The query parameters properly encoded into an HTTP URI query string.
"""
params.update({'alt': 'json'})
astuples = []
for key, value in params.iteritems():
if type(value) == type([]):
for x in value:
x = x.encode('utf-8')
astuples.append((key, x))
else:
if getattr(value, 'encode', False) and callable(value.encode):
value = value.encode('utf-8')
astuples.append((key, value))
return '?' + urllib.urlencode(astuples)
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
# Error handling is TBD, for example, do we retry
# for some operation/error combinations?
if resp.status < 300:
if resp.status == 204:
# A 204: No Content response should be treated differently
# to all the other success states
return simplejson.loads('{}')
body = simplejson.loads(content)
if isinstance(body, dict) and 'data' in body:
body = body['data']
return body
else:
logging.debug('Content from bad request was: %s' % content)
raise HttpError(resp, content)
class LoggingJsonModel(JsonModel):
"""A printable JsonModel class that supports logging response info."""
def response(self, resp, content):
"""An overloaded response method that will output debug info if requested.
Args:
resp: An httplib2.Response object.
content: A string representing the response body.
Returns:
The body de-serialized as a Python object.
"""
if FLAGS.dump_request_response:
logging.info('--response-start--')
for h, v in resp.iteritems():
logging.info('%s: %s', h, v)
if content:
logging.info(content)
logging.info('--response-end--')
return super(LoggingJsonModel, self).response(
resp, content)
def request(self, headers, path_params, query_params, body_value):
"""An overloaded request method that will output debug info if requested.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable by simplejson.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized as JSON
"""
(headers, path_params, query, body) = super(
LoggingJsonModel, self).request(
headers, path_params, query_params, body_value)
if FLAGS.dump_request_response:
logging.info('--request-start--')
logging.info('-headers-start-')
for h, v in headers.iteritems():
logging.info('%s: %s', h, v)
logging.info('-headers-end-')
logging.info('-path-parameters-start-')
for h, v in path_params.iteritems():
logging.info('%s: %s', h, v)
logging.info('-path-parameters-end-')
logging.info('body: %s', body)
logging.info('query: %s', query)
logging.info('--request-end--')
return (headers, path_params, query, body)
| #!/usr/bin/python2.4
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model objects for requests and responses.
Each API may support one or more serializations, such
as JSON, Atom, etc. The model classes are responsible
for converting between the wire format and the Python
object representation.
"""
__author__ = '<EMAIL> (<NAME>)'
import gflags
import logging
import urllib
from anyjson import simplejson
from errors import HttpError
FLAGS = gflags.FLAGS
gflags.DEFINE_boolean('dump_request_response', False,
'Dump all http server requests and responses. '
'Must use apiclient.model.LoggingJsonModel as '
'the model.'
)
def _abstract():
raise NotImplementedError('You need to override this function')
class Model(object):
"""Model base class.
All Model classes should implement this interface.
The Model serializes and de-serializes between a wire
format such as JSON and a Python object representation.
"""
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with a deserialized body.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized in the desired wire format.
"""
_abstract()
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
_abstract()
class JsonModel(Model):
"""Model class for JSON.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request and response bodies.
"""
def __init__(self, data_wrapper=False):
"""Construct a JsonModel
Args:
data_wrapper: boolean, wrap requests and responses in a data wrapper
"""
self._data_wrapper = data_wrapper
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with JSON bodies.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable by simplejson.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized as JSON
"""
query = self._build_query(query_params)
headers['accept'] = 'application/json'
headers['accept-encoding'] = 'gzip, deflate'
if 'user-agent' in headers:
headers['user-agent'] += ' '
else:
headers['user-agent'] = ''
headers['user-agent'] += 'google-api-python-client/1.0'
if (isinstance(body_value, dict) and 'data' not in body_value and
self._data_wrapper):
body_value = {'data': body_value}
if body_value is not None:
headers['content-type'] = 'application/json'
body_value = simplejson.dumps(body_value)
return (headers, path_params, query, body_value)
def _build_query(self, params):
"""Builds a query string.
Args:
params: dict, the query parameters
Returns:
The query parameters properly encoded into an HTTP URI query string.
"""
params.update({'alt': 'json'})
astuples = []
for key, value in params.iteritems():
if type(value) == type([]):
for x in value:
x = x.encode('utf-8')
astuples.append((key, x))
else:
if getattr(value, 'encode', False) and callable(value.encode):
value = value.encode('utf-8')
astuples.append((key, value))
return '?' + urllib.urlencode(astuples)
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
# Error handling is TBD, for example, do we retry
# for some operation/error combinations?
if resp.status < 300:
if resp.status == 204:
# A 204: No Content response should be treated differently
# to all the other success states
return simplejson.loads('{}')
body = simplejson.loads(content)
if isinstance(body, dict) and 'data' in body:
body = body['data']
return body
else:
logging.debug('Content from bad request was: %s' % content)
raise HttpError(resp, content)
class LoggingJsonModel(JsonModel):
"""A printable JsonModel class that supports logging response info."""
def response(self, resp, content):
"""An overloaded response method that will output debug info if requested.
Args:
resp: An httplib2.Response object.
content: A string representing the response body.
Returns:
The body de-serialized as a Python object.
"""
if FLAGS.dump_request_response:
logging.info('--response-start--')
for h, v in resp.iteritems():
logging.info('%s: %s', h, v)
if content:
logging.info(content)
logging.info('--response-end--')
return super(LoggingJsonModel, self).response(
resp, content)
def request(self, headers, path_params, query_params, body_value):
"""An overloaded request method that will output debug info if requested.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable by simplejson.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized as JSON
"""
(headers, path_params, query, body) = super(
LoggingJsonModel, self).request(
headers, path_params, query_params, body_value)
if FLAGS.dump_request_response:
logging.info('--request-start--')
logging.info('-headers-start-')
for h, v in headers.iteritems():
logging.info('%s: %s', h, v)
logging.info('-headers-end-')
logging.info('-path-parameters-start-')
for h, v in path_params.iteritems():
logging.info('%s: %s', h, v)
logging.info('-path-parameters-end-')
logging.info('body: %s', body)
logging.info('query: %s', query)
logging.info('--request-end--')
return (headers, path_params, query, body)
| en | 0.801428 | #!/usr/bin/python2.4 # # Copyright (C) 2010 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Model objects for requests and responses. Each API may support one or more serializations, such as JSON, Atom, etc. The model classes are responsible for converting between the wire format and the Python object representation. Model base class. All Model classes should implement this interface. The Model serializes and de-serializes between a wire format such as JSON and a Python object representation. Updates outgoing requests with a deserialized body. Args: headers: dict, request headers path_params: dict, parameters that appear in the request path query_params: dict, parameters that appear in the query body_value: object, the request body as a Python object, which must be serializable. Returns: A tuple of (headers, path_params, query, body) headers: dict, request headers path_params: dict, parameters that appear in the request path query: string, query part of the request URI body: string, the body serialized in the desired wire format. Convert the response wire format into a Python object. Args: resp: httplib2.Response, the HTTP response headers and status content: string, the body of the HTTP response Returns: The body de-serialized as a Python object. Raises: apiclient.errors.HttpError if a non 2xx response is received. Model class for JSON. Serializes and de-serializes between JSON and the Python object representation of HTTP request and response bodies. Construct a JsonModel Args: data_wrapper: boolean, wrap requests and responses in a data wrapper Updates outgoing requests with JSON bodies. Args: headers: dict, request headers path_params: dict, parameters that appear in the request path query_params: dict, parameters that appear in the query body_value: object, the request body as a Python object, which must be serializable by simplejson. Returns: A tuple of (headers, path_params, query, body) headers: dict, request headers path_params: dict, parameters that appear in the request path query: string, query part of the request URI body: string, the body serialized as JSON Builds a query string. Args: params: dict, the query parameters Returns: The query parameters properly encoded into an HTTP URI query string. Convert the response wire format into a Python object. Args: resp: httplib2.Response, the HTTP response headers and status content: string, the body of the HTTP response Returns: The body de-serialized as a Python object. Raises: apiclient.errors.HttpError if a non 2xx response is received. # Error handling is TBD, for example, do we retry # for some operation/error combinations? # A 204: No Content response should be treated differently # to all the other success states A printable JsonModel class that supports logging response info. An overloaded response method that will output debug info if requested. Args: resp: An httplib2.Response object. content: A string representing the response body. Returns: The body de-serialized as a Python object. An overloaded request method that will output debug info if requested. Args: headers: dict, request headers path_params: dict, parameters that appear in the request path query_params: dict, parameters that appear in the query body_value: object, the request body as a Python object, which must be serializable by simplejson. Returns: A tuple of (headers, path_params, query, body) headers: dict, request headers path_params: dict, parameters that appear in the request path query: string, query part of the request URI body: string, the body serialized as JSON | 2.445349 | 2 |
borg/storage.py | borg-project/borg | 7 | 6618686 | <reponame>borg-project/borg<gh_stars>1-10
"""@author: <NAME> <<EMAIL>>"""
import os.path
import csv
import itertools
import collections
import numpy
import borg
logger = borg.get_logger(__name__, default_level = "INFO")
class RunRecord(object):
"""Record of a solver run."""
def __init__(self, solver, budget, cost, success):
"""Initialize."""
self.solver = solver
self.budget = budget
self.cost = cost
self.success = success
def __str__(self):
return str((self.solver, self.budget, self.cost, self.success))
def __repr__(self):
return repr((self.solver, self.budget, self.cost, self.success))
class RunData(object):
"""Load and access portfolio training data."""
def __init__(self, solver_names, common_budget = None):
"""Initialize."""
self.solver_names = solver_names
self.run_lists = {}
self.feature_vectors = {}
self.common_budget = common_budget
self.common_features = None
def __len__(self):
"""Number of instances for which data are stored."""
return len(self.run_lists)
def add_run(self, id_, run):
"""Add a run to these data."""
runs = self.run_lists.get(id_)
if runs is None:
self.run_lists[id_] = [run]
else:
runs.append(run)
if self.common_budget is None:
self.common_budget = run.budget
else:
assert run.budget == self.common_budget
def add_runs(self, pairs):
"""Add runs to these data."""
for (id_, run) in pairs:
self.add_run(id_, run)
def add_feature_vector(self, id_, vector):
"""Add a feature vector to these data."""
assert id_ not in self.feature_vectors
assert isinstance(vector, collections.Mapping)
names = [k for k in vector if k != "cpu_cost"]
if self.common_features is None:
self.common_features = sorted(names)
else:
assert self.common_features == sorted(names)
self.feature_vectors[id_] = vector
def filter(self, *ids):
"""Return a filtered set of run data."""
data = RunData(self.solver_names)
for id_ in ids:
for run in self.run_lists[id_]:
data.add_run(id_, run)
data.add_feature_vector(id_, self.feature_vectors[id_])
data.common_budget = self.common_budget
return data
def filter_features(self, names):
"""Return a set of run data with only the specified features."""
data = RunData(self.solver_names, self.common_budget)
data.run_lists = self.run_lists
for (id_, old_vector) in self.feature_vectors.iteritems():
new_vector = dict((k, old_vector[k]) for k in names)
data.add_feature_vector(id_, new_vector)
return data
def masked(self, mask):
"""Return a subset of the instances."""
return self.filter(*(id_ for (id_, m) in zip(self.ids, mask) if m))
def only_successful(self):
"""Return only instances on which some solver succeeded."""
data = RunData(self.solver_names)
for (id_, run_list) in self.run_lists.iteritems():
if any(run.success for run in run_list):
for run in run_list:
data.add_run(id_, run)
data.add_feature_vector(id_, self.feature_vectors[id_])
data.common_budget = self.common_budget
return data
def only_nontrivial(self, threshold = 1.0):
"""Return only instances on which some solver succeeded."""
data = RunData(self.solver_names)
for (id_, run_list) in self.run_lists.iteritems():
if any(not run.success or run.cost > threshold for run in run_list):
for run in run_list:
data.add_run(id_, run)
data.add_feature_vector(id_, self.feature_vectors[id_])
data.common_budget = self.common_budget
return data
def only_nonempty(self):
"""Return only instances on which some solver succeeded."""
data = RunData(self.solver_names)
for (id_, run_list) in self.run_lists.iteritems():
if len(run_list) > 0:
for run in run_list:
data.add_run(id_, run)
data.add_feature_vector(id_, self.feature_vectors[id_])
data.common_budget = self.common_budget
return data
def collect_systematic(self, counts):
"""Get a systematic subset of the data."""
sampled = RunData(self.solver_names, common_budget = self.common_budget)
iter_count = itertools.cycle(counts)
for id_ in sorted(self.ids, key = lambda _: numpy.random.rand()):
count = next(iter_count)
for solver in self.solver_names:
runs = sorted(self.runs_on(id_, solver), key = lambda _: numpy.random.rand())
assert len(runs) >= count
sampled.add_runs((id_, run) for run in runs[:count])
sampled.add_feature_vector(id_, self.feature_vectors[id_])
return sampled
def collect_independent(self, counts):
"""Get independent subsets of the data."""
sampled = RunData(self.solver_names, common_budget = self.common_budget)
for solver in self.solver_names:
iter_count = itertools.cycle(counts)
for id_ in sorted(self.ids, key = lambda _: numpy.random.rand()):
count = next(iter_count)
runs = sorted(self.runs_on(id_, solver), key = lambda _: numpy.random.rand())
sampled.add_runs((id_, run) for run in runs[:count])
if id_ not in sampled.feature_vectors:
sampled.add_feature_vector(id_, self.feature_vectors[id_])
return sampled
def runs_on(self, id_, solver):
"""Retrieve runs made by a solver on an instance."""
for run in self.run_lists[id_]:
if run.solver == solver:
yield run
def get_feature_vector(self, id_):
"""Retrieve features of a task."""
return self.feature_vectors[id_]
def get_feature_vectors(self):
"""Retrieve features of all tasks."""
return self.feature_vectors
def get_common_budget(self):
"""Retrieve the common run budget, if any."""
budget = None
for runs in self.run_lists.values():
for run in runs:
if budget is None:
budget = run.budget
elif run.budget != budget:
raise Exception("collected runs include multiple run budgets")
return budget
def get_run_count(self):
"""Return the number of runs stored."""
return sum(map(len, self.run_lists.values()))
def to_features_array(self):
"""Retrieve feature values in an array."""
assert set(self.feature_vectors) == set(self.run_lists)
N = len(self.ids)
F = len(self.common_features)
feature_values_NF = numpy.empty((N, F), numpy.double)
for (n, instance_id) in enumerate(sorted(self.ids)):
features = self.feature_vectors[instance_id]
for (f, name) in enumerate(self.common_features):
feature_values_NF[n, f] = features[name]
return feature_values_NF
def to_runs_array(self, solver_names):
"""Return run durations as a partially-filled array."""
S = len(solver_names)
N = len(self.run_lists)
# accumulate the success and failure counts
successes_NS = numpy.zeros((N, S), numpy.intc)
failures_NS = numpy.zeros((N, S), numpy.intc)
solver_names_S = list(solver_names)
instance_ids = sorted(self.run_lists)
for (n, instance_id) in enumerate(instance_ids):
runs = self.run_lists[instance_id]
for run in runs:
s = solver_names_S.index(run.solver)
if run.success:
successes_NS[n, s] += 1
else:
failures_NS[n, s] += 1
R = numpy.max(successes_NS)
# fill in run durations
durations_NSR = numpy.ones((N, S, R), numpy.double) * numpy.nan
successes_NS[...] = 0
for (n, instance_id) in enumerate(instance_ids):
runs = self.run_lists[instance_id]
for run in runs:
s = solver_names_S.index(run.solver)
if run.success:
r = successes_NS[n, s]
durations_NSR[n, s, r] = run.cost
successes_NS[n, s] = r + 1
return (successes_NS, failures_NS, durations_NSR)
def to_times_arrays(self):
"""Return run durations as per-solver arrays."""
S = len(self.solver_names)
N = len(self.run_lists)
times_lists = [[] for _ in xrange(S)]
ns_lists = [[] for _ in xrange(S)]
failures_NS = numpy.zeros((N, S), numpy.intc)
instance_ids = sorted(self.run_lists)
for (n, instance_id) in enumerate(instance_ids):
runs = self.run_lists[instance_id]
for run in runs:
s = self.solver_names.index(run.solver)
if run.success:
times_lists[s].append(run.cost)
ns_lists[s].append(n)
else:
failures_NS[n, s] += 1
times_arrays = map(numpy.array, times_lists)
ns_arrays = map(numpy.array, ns_lists)
return (times_arrays, ns_arrays, failures_NS)
def to_bins_array(self, solver_names, B, cutoff = None):
"""Return discretized run duration counts."""
if cutoff is None:
cutoff = self.get_common_budget()
S = len(solver_names)
N = len(self.run_lists)
C = B + 1
solver_name_index = list(solver_names)
outcomes_NSC = numpy.zeros((N, S, C), numpy.intc)
interval = cutoff / B
for (n, instance_id) in enumerate(sorted(self.run_lists)):
runs = self.run_lists[instance_id]
for run in runs:
s = solver_name_index.index(run.solver)
if run.success and run.cost < cutoff:
b = int(run.cost / interval)
outcomes_NSC[n, s, b] += 1
else:
outcomes_NSC[n, s, B] += 1
return outcomes_NSC
@property
def ids(self):
"""All associated instance ids."""
return self.run_lists.keys()
@staticmethod
def from_roots(solver_names, tasks_roots, domain, suffix = ".runs.csv"):
"""Collect run data by scanning for tasks."""
task_paths = []
for tasks_root in tasks_roots:
task_paths.extend(borg.util.files_under(tasks_root, domain.extensions))
return RunData.from_paths(solver_names, task_paths, domain, suffix)
@staticmethod
def from_paths(solver_names, task_paths, domain, suffix = ".runs.csv"):
"""Collect run data from task paths."""
training = RunData(solver_names)
for path in task_paths:
# load run records
run_data = numpy.recfromcsv(path + suffix, usemask = True)
rows = run_data.tolist()
if run_data.shape == ():
rows = [rows]
for (run_solver, run_budget, run_cost, run_succeeded, run_answer) in rows:
record = RunRecord(run_solver, run_budget, run_cost, run_succeeded)
training.add_run(path, record)
# load feature data
feature_records = numpy.recfromcsv("{0}.features.csv".format(path))
feature_dict = dict(zip(feature_records.dtype.names, feature_records.tolist()))
training.add_feature_vector(path, feature_dict)
return training
@staticmethod
def from_bundle(bundle_path):
"""Collect run data from two CSV files."""
run_data = RunData(None)
# load runs
runs_csv_path = os.path.join(bundle_path, "all_runs.csv.gz")
logger.info("reading run data from %s", runs_csv_path)
solver_names = set()
with borg.util.openz(runs_csv_path) as csv_file:
csv_reader = csv.reader(csv_file)
columns = csv_reader.next()
if columns[:5] != ["instance", "solver", "budget", "cost", "succeeded"]:
raise Exception("unexpected columns in run data CSV file")
for (instance, solver, budget_str, cost_str, succeeded_str) in csv_reader:
run_data.add_run(
instance,
RunRecord(
solver,
float(budget_str),
float(cost_str),
succeeded_str.lower() == "true",
),
)
solver_names.add(solver)
run_data.solver_names = sorted(solver_names)
# load features
features_csv_path = os.path.join(bundle_path, "all_features.csv.gz")
logger.info("reading feature data from %s", features_csv_path)
with borg.util.openz(features_csv_path) as csv_file:
csv_reader = csv.reader(csv_file)
try:
columns = csv_reader.next()
except StopIteration:
pass
else:
if columns[0] != "instance":
raise Exception("unexpected columns in features CSV file")
for row in csv_reader:
feature_dict = dict(zip(columns[1:], map(float, row[1:])))
run_data.add_feature_vector(row[0], feature_dict)
assert set(run_data.run_lists) == set(run_data.feature_vectors)
return run_data
TrainingData = RunData
| """@author: <NAME> <<EMAIL>>"""
import os.path
import csv
import itertools
import collections
import numpy
import borg
logger = borg.get_logger(__name__, default_level = "INFO")
class RunRecord(object):
"""Record of a solver run."""
def __init__(self, solver, budget, cost, success):
"""Initialize."""
self.solver = solver
self.budget = budget
self.cost = cost
self.success = success
def __str__(self):
return str((self.solver, self.budget, self.cost, self.success))
def __repr__(self):
return repr((self.solver, self.budget, self.cost, self.success))
class RunData(object):
"""Load and access portfolio training data."""
def __init__(self, solver_names, common_budget = None):
"""Initialize."""
self.solver_names = solver_names
self.run_lists = {}
self.feature_vectors = {}
self.common_budget = common_budget
self.common_features = None
def __len__(self):
"""Number of instances for which data are stored."""
return len(self.run_lists)
def add_run(self, id_, run):
"""Add a run to these data."""
runs = self.run_lists.get(id_)
if runs is None:
self.run_lists[id_] = [run]
else:
runs.append(run)
if self.common_budget is None:
self.common_budget = run.budget
else:
assert run.budget == self.common_budget
def add_runs(self, pairs):
"""Add runs to these data."""
for (id_, run) in pairs:
self.add_run(id_, run)
def add_feature_vector(self, id_, vector):
"""Add a feature vector to these data."""
assert id_ not in self.feature_vectors
assert isinstance(vector, collections.Mapping)
names = [k for k in vector if k != "cpu_cost"]
if self.common_features is None:
self.common_features = sorted(names)
else:
assert self.common_features == sorted(names)
self.feature_vectors[id_] = vector
def filter(self, *ids):
"""Return a filtered set of run data."""
data = RunData(self.solver_names)
for id_ in ids:
for run in self.run_lists[id_]:
data.add_run(id_, run)
data.add_feature_vector(id_, self.feature_vectors[id_])
data.common_budget = self.common_budget
return data
def filter_features(self, names):
"""Return a set of run data with only the specified features."""
data = RunData(self.solver_names, self.common_budget)
data.run_lists = self.run_lists
for (id_, old_vector) in self.feature_vectors.iteritems():
new_vector = dict((k, old_vector[k]) for k in names)
data.add_feature_vector(id_, new_vector)
return data
def masked(self, mask):
"""Return a subset of the instances."""
return self.filter(*(id_ for (id_, m) in zip(self.ids, mask) if m))
def only_successful(self):
"""Return only instances on which some solver succeeded."""
data = RunData(self.solver_names)
for (id_, run_list) in self.run_lists.iteritems():
if any(run.success for run in run_list):
for run in run_list:
data.add_run(id_, run)
data.add_feature_vector(id_, self.feature_vectors[id_])
data.common_budget = self.common_budget
return data
def only_nontrivial(self, threshold = 1.0):
"""Return only instances on which some solver succeeded."""
data = RunData(self.solver_names)
for (id_, run_list) in self.run_lists.iteritems():
if any(not run.success or run.cost > threshold for run in run_list):
for run in run_list:
data.add_run(id_, run)
data.add_feature_vector(id_, self.feature_vectors[id_])
data.common_budget = self.common_budget
return data
def only_nonempty(self):
"""Return only instances on which some solver succeeded."""
data = RunData(self.solver_names)
for (id_, run_list) in self.run_lists.iteritems():
if len(run_list) > 0:
for run in run_list:
data.add_run(id_, run)
data.add_feature_vector(id_, self.feature_vectors[id_])
data.common_budget = self.common_budget
return data
def collect_systematic(self, counts):
"""Get a systematic subset of the data."""
sampled = RunData(self.solver_names, common_budget = self.common_budget)
iter_count = itertools.cycle(counts)
for id_ in sorted(self.ids, key = lambda _: numpy.random.rand()):
count = next(iter_count)
for solver in self.solver_names:
runs = sorted(self.runs_on(id_, solver), key = lambda _: numpy.random.rand())
assert len(runs) >= count
sampled.add_runs((id_, run) for run in runs[:count])
sampled.add_feature_vector(id_, self.feature_vectors[id_])
return sampled
def collect_independent(self, counts):
"""Get independent subsets of the data."""
sampled = RunData(self.solver_names, common_budget = self.common_budget)
for solver in self.solver_names:
iter_count = itertools.cycle(counts)
for id_ in sorted(self.ids, key = lambda _: numpy.random.rand()):
count = next(iter_count)
runs = sorted(self.runs_on(id_, solver), key = lambda _: numpy.random.rand())
sampled.add_runs((id_, run) for run in runs[:count])
if id_ not in sampled.feature_vectors:
sampled.add_feature_vector(id_, self.feature_vectors[id_])
return sampled
def runs_on(self, id_, solver):
"""Retrieve runs made by a solver on an instance."""
for run in self.run_lists[id_]:
if run.solver == solver:
yield run
def get_feature_vector(self, id_):
"""Retrieve features of a task."""
return self.feature_vectors[id_]
def get_feature_vectors(self):
"""Retrieve features of all tasks."""
return self.feature_vectors
def get_common_budget(self):
"""Retrieve the common run budget, if any."""
budget = None
for runs in self.run_lists.values():
for run in runs:
if budget is None:
budget = run.budget
elif run.budget != budget:
raise Exception("collected runs include multiple run budgets")
return budget
def get_run_count(self):
"""Return the number of runs stored."""
return sum(map(len, self.run_lists.values()))
def to_features_array(self):
"""Retrieve feature values in an array."""
assert set(self.feature_vectors) == set(self.run_lists)
N = len(self.ids)
F = len(self.common_features)
feature_values_NF = numpy.empty((N, F), numpy.double)
for (n, instance_id) in enumerate(sorted(self.ids)):
features = self.feature_vectors[instance_id]
for (f, name) in enumerate(self.common_features):
feature_values_NF[n, f] = features[name]
return feature_values_NF
def to_runs_array(self, solver_names):
"""Return run durations as a partially-filled array."""
S = len(solver_names)
N = len(self.run_lists)
# accumulate the success and failure counts
successes_NS = numpy.zeros((N, S), numpy.intc)
failures_NS = numpy.zeros((N, S), numpy.intc)
solver_names_S = list(solver_names)
instance_ids = sorted(self.run_lists)
for (n, instance_id) in enumerate(instance_ids):
runs = self.run_lists[instance_id]
for run in runs:
s = solver_names_S.index(run.solver)
if run.success:
successes_NS[n, s] += 1
else:
failures_NS[n, s] += 1
R = numpy.max(successes_NS)
# fill in run durations
durations_NSR = numpy.ones((N, S, R), numpy.double) * numpy.nan
successes_NS[...] = 0
for (n, instance_id) in enumerate(instance_ids):
runs = self.run_lists[instance_id]
for run in runs:
s = solver_names_S.index(run.solver)
if run.success:
r = successes_NS[n, s]
durations_NSR[n, s, r] = run.cost
successes_NS[n, s] = r + 1
return (successes_NS, failures_NS, durations_NSR)
def to_times_arrays(self):
"""Return run durations as per-solver arrays."""
S = len(self.solver_names)
N = len(self.run_lists)
times_lists = [[] for _ in xrange(S)]
ns_lists = [[] for _ in xrange(S)]
failures_NS = numpy.zeros((N, S), numpy.intc)
instance_ids = sorted(self.run_lists)
for (n, instance_id) in enumerate(instance_ids):
runs = self.run_lists[instance_id]
for run in runs:
s = self.solver_names.index(run.solver)
if run.success:
times_lists[s].append(run.cost)
ns_lists[s].append(n)
else:
failures_NS[n, s] += 1
times_arrays = map(numpy.array, times_lists)
ns_arrays = map(numpy.array, ns_lists)
return (times_arrays, ns_arrays, failures_NS)
def to_bins_array(self, solver_names, B, cutoff = None):
"""Return discretized run duration counts."""
if cutoff is None:
cutoff = self.get_common_budget()
S = len(solver_names)
N = len(self.run_lists)
C = B + 1
solver_name_index = list(solver_names)
outcomes_NSC = numpy.zeros((N, S, C), numpy.intc)
interval = cutoff / B
for (n, instance_id) in enumerate(sorted(self.run_lists)):
runs = self.run_lists[instance_id]
for run in runs:
s = solver_name_index.index(run.solver)
if run.success and run.cost < cutoff:
b = int(run.cost / interval)
outcomes_NSC[n, s, b] += 1
else:
outcomes_NSC[n, s, B] += 1
return outcomes_NSC
@property
def ids(self):
"""All associated instance ids."""
return self.run_lists.keys()
@staticmethod
def from_roots(solver_names, tasks_roots, domain, suffix = ".runs.csv"):
"""Collect run data by scanning for tasks."""
task_paths = []
for tasks_root in tasks_roots:
task_paths.extend(borg.util.files_under(tasks_root, domain.extensions))
return RunData.from_paths(solver_names, task_paths, domain, suffix)
@staticmethod
def from_paths(solver_names, task_paths, domain, suffix = ".runs.csv"):
"""Collect run data from task paths."""
training = RunData(solver_names)
for path in task_paths:
# load run records
run_data = numpy.recfromcsv(path + suffix, usemask = True)
rows = run_data.tolist()
if run_data.shape == ():
rows = [rows]
for (run_solver, run_budget, run_cost, run_succeeded, run_answer) in rows:
record = RunRecord(run_solver, run_budget, run_cost, run_succeeded)
training.add_run(path, record)
# load feature data
feature_records = numpy.recfromcsv("{0}.features.csv".format(path))
feature_dict = dict(zip(feature_records.dtype.names, feature_records.tolist()))
training.add_feature_vector(path, feature_dict)
return training
@staticmethod
def from_bundle(bundle_path):
"""Collect run data from two CSV files."""
run_data = RunData(None)
# load runs
runs_csv_path = os.path.join(bundle_path, "all_runs.csv.gz")
logger.info("reading run data from %s", runs_csv_path)
solver_names = set()
with borg.util.openz(runs_csv_path) as csv_file:
csv_reader = csv.reader(csv_file)
columns = csv_reader.next()
if columns[:5] != ["instance", "solver", "budget", "cost", "succeeded"]:
raise Exception("unexpected columns in run data CSV file")
for (instance, solver, budget_str, cost_str, succeeded_str) in csv_reader:
run_data.add_run(
instance,
RunRecord(
solver,
float(budget_str),
float(cost_str),
succeeded_str.lower() == "true",
),
)
solver_names.add(solver)
run_data.solver_names = sorted(solver_names)
# load features
features_csv_path = os.path.join(bundle_path, "all_features.csv.gz")
logger.info("reading feature data from %s", features_csv_path)
with borg.util.openz(features_csv_path) as csv_file:
csv_reader = csv.reader(csv_file)
try:
columns = csv_reader.next()
except StopIteration:
pass
else:
if columns[0] != "instance":
raise Exception("unexpected columns in features CSV file")
for row in csv_reader:
feature_dict = dict(zip(columns[1:], map(float, row[1:])))
run_data.add_feature_vector(row[0], feature_dict)
assert set(run_data.run_lists) == set(run_data.feature_vectors)
return run_data
TrainingData = RunData | en | 0.871671 | @author: <NAME> <<EMAIL>> Record of a solver run. Initialize. Load and access portfolio training data. Initialize. Number of instances for which data are stored. Add a run to these data. Add runs to these data. Add a feature vector to these data. Return a filtered set of run data. Return a set of run data with only the specified features. Return a subset of the instances. Return only instances on which some solver succeeded. Return only instances on which some solver succeeded. Return only instances on which some solver succeeded. Get a systematic subset of the data. Get independent subsets of the data. Retrieve runs made by a solver on an instance. Retrieve features of a task. Retrieve features of all tasks. Retrieve the common run budget, if any. Return the number of runs stored. Retrieve feature values in an array. Return run durations as a partially-filled array. # accumulate the success and failure counts # fill in run durations Return run durations as per-solver arrays. Return discretized run duration counts. All associated instance ids. Collect run data by scanning for tasks. Collect run data from task paths. # load run records # load feature data Collect run data from two CSV files. # load runs # load features | 2.908376 | 3 |
tests/test_invalid_sequence_param.py | broHeryk/squall | 27 | 6618687 | from typing import Dict, List, Optional, Tuple
import pytest
from squall import Query, Squall
def test_invalid_sequence():
with pytest.raises(AssertionError):
app = Squall()
class Item:
title: str
@app.get("/items/")
def read_items(q: List[Item] = Query(None)):
pass # pragma: no cover
def test_invalid_tuple():
with pytest.raises(AssertionError):
app = Squall()
class Item:
title: str
@app.get("/items/")
def read_items(q: Tuple[Item, Item] = Query(None)):
pass # pragma: no cover
def test_invalid_dict():
with pytest.raises(AssertionError):
app = Squall()
class Item:
title: str
@app.get("/items/")
def read_items(q: Dict[str, Item] = Query(None)):
pass # pragma: no cover
def test_invalid_simple_dict():
with pytest.raises(AssertionError):
app = Squall()
class Item:
title: str
@app.get("/items/")
def read_items(q: Optional[dict] = Query(None)):
pass # pragma: no cover
| from typing import Dict, List, Optional, Tuple
import pytest
from squall import Query, Squall
def test_invalid_sequence():
with pytest.raises(AssertionError):
app = Squall()
class Item:
title: str
@app.get("/items/")
def read_items(q: List[Item] = Query(None)):
pass # pragma: no cover
def test_invalid_tuple():
with pytest.raises(AssertionError):
app = Squall()
class Item:
title: str
@app.get("/items/")
def read_items(q: Tuple[Item, Item] = Query(None)):
pass # pragma: no cover
def test_invalid_dict():
with pytest.raises(AssertionError):
app = Squall()
class Item:
title: str
@app.get("/items/")
def read_items(q: Dict[str, Item] = Query(None)):
pass # pragma: no cover
def test_invalid_simple_dict():
with pytest.raises(AssertionError):
app = Squall()
class Item:
title: str
@app.get("/items/")
def read_items(q: Optional[dict] = Query(None)):
pass # pragma: no cover
| en | 0.480456 | # pragma: no cover # pragma: no cover # pragma: no cover # pragma: no cover | 2.613922 | 3 |
src/chainalytic/zones/public-icon/aggregator/transform_registry/funded_wallets.py | yudus-lab/chainalytic-framework | 2 | 6618688 | import json
import time
from typing import Dict, List, Optional, Set, Tuple, Union
import traceback
import plyvel
from iconservice.icon_config import default_icon_config
from iconservice.icon_constant import ConfigKey
from iconservice.iiss.engine import Engine
from chainalytic.aggregator.transform import BaseTransform
from chainalytic.common import rpc_client, trie
class Transform(BaseTransform):
START_BLOCK_HEIGHT = 1
LAST_STATE_HEIGHT_KEY = b'last_state_height'
MAX_WALLETS = 200
def __init__(self, working_dir: str, zone_id: str, transform_id: str):
super(Transform, self).__init__(working_dir, zone_id, transform_id)
async def execute(self, height: int, input_data: dict) -> Optional[Dict]:
# Load transform cache to retrive previous staking state
cache_db = self.transform_cache_db
cache_db_batch = self.transform_cache_db.write_batch()
# Make sure input block data represents for the next block of previous state cache
prev_state_height = cache_db.get(Transform.LAST_STATE_HEIGHT_KEY)
if prev_state_height:
prev_state_height = int(prev_state_height)
if prev_state_height != height - 1:
await rpc_client.call_async(
self.warehouse_endpoint,
call_id='api_call',
api_id='set_last_block_height',
api_params={'height': prev_state_height, 'transform_id': self.transform_id},
)
return None
# Create cache and storage data for genesis block 0
if height == 1:
cache_db_batch.put(b'hx54f7853dc6481b670caf69c5a27c7c8fe5be8269', b'800460000')
cache_db_batch.put(b'hx1000000000000000000000000000000000000000', b'0')
cache_db_batch.write()
await rpc_client.call_async(
self.warehouse_endpoint,
call_id='api_call',
api_id='update_funded_wallets',
api_params={
'updated_wallets': {
'wallets': {
'hx54f7853dc6481b670caf69c5a27c7c8fe5be8269': '800460000',
'hx1000000000000000000000000000000000000000': '0',
},
'height': 0,
},
'transform_id': 'funded_wallets',
},
)
# #################################################
txs = input_data['data']
# Example of `updated_wallets`
# {
# "ADDRESS_1": "100000.0",
# "ADDRESS_2": "9999.9999",
# }
updated_wallets = {}
for tx in txs:
source_balance = cache_db.get(tx['from'].encode())
dest_balance = cache_db.get(tx['to'].encode())
value = tx['value']
source_balance = float(source_balance) if source_balance else 0
dest_balance = float(dest_balance) if dest_balance else 0
if source_balance >= value:
source_balance -= value
dest_balance += value
updated_wallets[tx['from']] = str(source_balance)
updated_wallets[tx['to']] = str(dest_balance)
for addr, balance in updated_wallets.items():
cache_db_batch.put(addr.encode(), balance.encode())
cache_db_batch.put(Transform.LAST_STATE_HEIGHT_KEY, str(height).encode())
cache_db_batch.write()
return {
'height': height,
'data': {},
'misc': {'updated_wallets': {'wallets': updated_wallets, 'height': height}},
}
| import json
import time
from typing import Dict, List, Optional, Set, Tuple, Union
import traceback
import plyvel
from iconservice.icon_config import default_icon_config
from iconservice.icon_constant import ConfigKey
from iconservice.iiss.engine import Engine
from chainalytic.aggregator.transform import BaseTransform
from chainalytic.common import rpc_client, trie
class Transform(BaseTransform):
START_BLOCK_HEIGHT = 1
LAST_STATE_HEIGHT_KEY = b'last_state_height'
MAX_WALLETS = 200
def __init__(self, working_dir: str, zone_id: str, transform_id: str):
super(Transform, self).__init__(working_dir, zone_id, transform_id)
async def execute(self, height: int, input_data: dict) -> Optional[Dict]:
# Load transform cache to retrive previous staking state
cache_db = self.transform_cache_db
cache_db_batch = self.transform_cache_db.write_batch()
# Make sure input block data represents for the next block of previous state cache
prev_state_height = cache_db.get(Transform.LAST_STATE_HEIGHT_KEY)
if prev_state_height:
prev_state_height = int(prev_state_height)
if prev_state_height != height - 1:
await rpc_client.call_async(
self.warehouse_endpoint,
call_id='api_call',
api_id='set_last_block_height',
api_params={'height': prev_state_height, 'transform_id': self.transform_id},
)
return None
# Create cache and storage data for genesis block 0
if height == 1:
cache_db_batch.put(b'hx54f7853dc6481b670caf69c5a27c7c8fe5be8269', b'800460000')
cache_db_batch.put(b'hx1000000000000000000000000000000000000000', b'0')
cache_db_batch.write()
await rpc_client.call_async(
self.warehouse_endpoint,
call_id='api_call',
api_id='update_funded_wallets',
api_params={
'updated_wallets': {
'wallets': {
'hx54f7853dc6481b670caf69c5a27c7c8fe5be8269': '800460000',
'hx1000000000000000000000000000000000000000': '0',
},
'height': 0,
},
'transform_id': 'funded_wallets',
},
)
# #################################################
txs = input_data['data']
# Example of `updated_wallets`
# {
# "ADDRESS_1": "100000.0",
# "ADDRESS_2": "9999.9999",
# }
updated_wallets = {}
for tx in txs:
source_balance = cache_db.get(tx['from'].encode())
dest_balance = cache_db.get(tx['to'].encode())
value = tx['value']
source_balance = float(source_balance) if source_balance else 0
dest_balance = float(dest_balance) if dest_balance else 0
if source_balance >= value:
source_balance -= value
dest_balance += value
updated_wallets[tx['from']] = str(source_balance)
updated_wallets[tx['to']] = str(dest_balance)
for addr, balance in updated_wallets.items():
cache_db_batch.put(addr.encode(), balance.encode())
cache_db_batch.put(Transform.LAST_STATE_HEIGHT_KEY, str(height).encode())
cache_db_batch.write()
return {
'height': height,
'data': {},
'misc': {'updated_wallets': {'wallets': updated_wallets, 'height': height}},
}
| en | 0.560309 | # Load transform cache to retrive previous staking state # Make sure input block data represents for the next block of previous state cache # Create cache and storage data for genesis block 0 # ################################################# # Example of `updated_wallets` # { # "ADDRESS_1": "100000.0", # "ADDRESS_2": "9999.9999", # } | 1.885515 | 2 |
transfer.py | patcdaniel/ifcb-powerbuoy-retrieval | 0 | 6618689 | <gh_stars>0
#!/bin/python3
"""
Organize IFCB files into a standard structure:
/CA-IFCB-161/2021/D20210815/{FILES} -
"""
import os, glob, re, shutil, subprocess
# GLOBALS
DATA_PATH = "/opt/ifcb-data/power-buoy-deployment/"
DES_Path = os.path.join(DATA_PATH,"CA-IFCB-161")
def transfer_files_directories():
"""
Search all file names, create a file directory for each year and a subdirectory of each month,if
either doesn't already exists, then move files into that directory.
Assume that there are .hdr, .roi, and .adc files for each filename
Example structure /CA-IFCB-161/2021/D20210718/
"""
formatted_data = [os.path.basename(f) for f in sorted(glob.glob(DES_Path+"/*/*/*.*"))] # All files already transfered
for fname in sorted(glob.glob(DATA_PATH+"*.*")):
if not fname in formatted_data:
pattern = r"(D(\d\d\d\d)(\d\d\d\d))"
base_name = os.path.basename(fname)
date_str = re.findall(pattern, base_name)
if bool(date_str):
year = date_str[0][1]
unique_day = date_str[0][0]
if not os.path.isdir(os.path.join(DES_Path,year)):
oldmask = os.umask(000)
os.makedirs(os.path.join(DES_Path,year,unique_day), 0o777)
os.umask(oldmask)
else:
if not os.path.isdir(os.path.join(DES_Path,year,unique_day)):
oldmask = os.umask(000)
os.mkdir(os.path.join(DES_Path,year,unique_day), 0o775)
os.umask(oldmask)
try:
shutil.copy(fname, os.path.join(DES_Path,year,unique_day,base_name))
except OSError as e:
# File didn't transfer completely. Try to grab it again.
subprocess.run(["scp",'-l','1000',"<EMAIL>:/mnt/data/ifcbdata/"+fname, DATA_PATH]) #MegaB/Sec
shutil.copy(os.path.join(DATA_PATH ,fname), os.path.join(DES_Path,year,unique_day,fname))
if __name__ == "__main__":
transfer_files_directories() | #!/bin/python3
"""
Organize IFCB files into a standard structure:
/CA-IFCB-161/2021/D20210815/{FILES} -
"""
import os, glob, re, shutil, subprocess
# GLOBALS
DATA_PATH = "/opt/ifcb-data/power-buoy-deployment/"
DES_Path = os.path.join(DATA_PATH,"CA-IFCB-161")
def transfer_files_directories():
"""
Search all file names, create a file directory for each year and a subdirectory of each month,if
either doesn't already exists, then move files into that directory.
Assume that there are .hdr, .roi, and .adc files for each filename
Example structure /CA-IFCB-161/2021/D20210718/
"""
formatted_data = [os.path.basename(f) for f in sorted(glob.glob(DES_Path+"/*/*/*.*"))] # All files already transfered
for fname in sorted(glob.glob(DATA_PATH+"*.*")):
if not fname in formatted_data:
pattern = r"(D(\d\d\d\d)(\d\d\d\d))"
base_name = os.path.basename(fname)
date_str = re.findall(pattern, base_name)
if bool(date_str):
year = date_str[0][1]
unique_day = date_str[0][0]
if not os.path.isdir(os.path.join(DES_Path,year)):
oldmask = os.umask(000)
os.makedirs(os.path.join(DES_Path,year,unique_day), 0o777)
os.umask(oldmask)
else:
if not os.path.isdir(os.path.join(DES_Path,year,unique_day)):
oldmask = os.umask(000)
os.mkdir(os.path.join(DES_Path,year,unique_day), 0o775)
os.umask(oldmask)
try:
shutil.copy(fname, os.path.join(DES_Path,year,unique_day,base_name))
except OSError as e:
# File didn't transfer completely. Try to grab it again.
subprocess.run(["scp",'-l','1000',"<EMAIL>:/mnt/data/ifcbdata/"+fname, DATA_PATH]) #MegaB/Sec
shutil.copy(os.path.join(DATA_PATH ,fname), os.path.join(DES_Path,year,unique_day,fname))
if __name__ == "__main__":
transfer_files_directories() | en | 0.86612 | #!/bin/python3 Organize IFCB files into a standard structure: /CA-IFCB-161/2021/D20210815/{FILES} - # GLOBALS Search all file names, create a file directory for each year and a subdirectory of each month,if either doesn't already exists, then move files into that directory. Assume that there are .hdr, .roi, and .adc files for each filename Example structure /CA-IFCB-161/2021/D20210718/ # All files already transfered # File didn't transfer completely. Try to grab it again. #MegaB/Sec | 3.166377 | 3 |
test_pytest.py | multi-template-matching/mtm-skimage-shapely | 8 | 6618690 | <gh_stars>1-10
from skimage.data import coins
import MTM
from MTM.Detection import plotDetections
print( MTM.__version__ )
import numpy as np
#%% Get image and templates by cropping
image = coins()
smallCoin = image[37:37+38, 80:80+41]
bigCoin = image[14:14+59,302:302+65]
listLabels = ["small", "big"]
listTemplates = [smallCoin, bigCoin]
def test_simplest():
return MTM.matchTemplates(image,
listTemplates)
def test_searchRegion():
return MTM.matchTemplates(image,
listTemplates,
searchBox=(0, 0, 300, 150))
def test_downscaling():
return MTM.matchTemplates(image,
listTemplates,
downscaling_factor=4)
if __name__ == "__main__":
A = test_simplest()
B = test_searchRegion()
C = test_downscaling()
print ("Number of hits:", len(A), len(B), len(C)) | from skimage.data import coins
import MTM
from MTM.Detection import plotDetections
print( MTM.__version__ )
import numpy as np
#%% Get image and templates by cropping
image = coins()
smallCoin = image[37:37+38, 80:80+41]
bigCoin = image[14:14+59,302:302+65]
listLabels = ["small", "big"]
listTemplates = [smallCoin, bigCoin]
def test_simplest():
return MTM.matchTemplates(image,
listTemplates)
def test_searchRegion():
return MTM.matchTemplates(image,
listTemplates,
searchBox=(0, 0, 300, 150))
def test_downscaling():
return MTM.matchTemplates(image,
listTemplates,
downscaling_factor=4)
if __name__ == "__main__":
A = test_simplest()
B = test_searchRegion()
C = test_downscaling()
print ("Number of hits:", len(A), len(B), len(C)) | en | 0.844065 | #%% Get image and templates by cropping | 2.583316 | 3 |
study/chainer_study/chainer_study-1.py | strawsyz/straw | 2 | 6618691 | from chainer import Variable
import numpy as np
a = np.asarray([1, 2, 3], dtype=np.float32)
x = Variable(a)
# print(x)
# print(x.debug_print())
import chainer.links as L
in_size = 3 # input vector's dimension
out_size = 2 # output vector's dimension
linear_layer = L.Linear(in_size, out_size) # L.linear is subclass of `Link`
"""linear_layer has 2 internal parameters `W` and `b`, which are `Variable`"""
# print('W: ', linear_layer.W.data, ', shape: ', linear_layer.W.shape)
# print('b: ', linear_layer.b.data, ', shape: ', linear_layer.b.shape)
# Force update (set) internal parameters
linear_layer.W.data = np.array([[1, 2, 3], [0, 0, 0]], dtype=np.float32)
linear_layer.b.data = np.array([3, 5], dtype=np.float32)
x0 = np.array([1, 0, 0], dtype=np.float32)
x1 = np.array([1, 1, 1], dtype=np.float32)
x = Variable(np.array([x0, x1], dtype=np.float32))
y = linear_layer(x)
print('W: ', linear_layer.W.data)
print('b: ', linear_layer.b.data)
print('x: ', x.data) # input is x0 & x1
print('y: ', y.data) # output is y0 & y1
# W: [[ 0.01068367 0.58748239 -0.16838944]
| from chainer import Variable
import numpy as np
a = np.asarray([1, 2, 3], dtype=np.float32)
x = Variable(a)
# print(x)
# print(x.debug_print())
import chainer.links as L
in_size = 3 # input vector's dimension
out_size = 2 # output vector's dimension
linear_layer = L.Linear(in_size, out_size) # L.linear is subclass of `Link`
"""linear_layer has 2 internal parameters `W` and `b`, which are `Variable`"""
# print('W: ', linear_layer.W.data, ', shape: ', linear_layer.W.shape)
# print('b: ', linear_layer.b.data, ', shape: ', linear_layer.b.shape)
# Force update (set) internal parameters
linear_layer.W.data = np.array([[1, 2, 3], [0, 0, 0]], dtype=np.float32)
linear_layer.b.data = np.array([3, 5], dtype=np.float32)
x0 = np.array([1, 0, 0], dtype=np.float32)
x1 = np.array([1, 1, 1], dtype=np.float32)
x = Variable(np.array([x0, x1], dtype=np.float32))
y = linear_layer(x)
print('W: ', linear_layer.W.data)
print('b: ', linear_layer.b.data)
print('x: ', x.data) # input is x0 & x1
print('y: ', y.data) # output is y0 & y1
# W: [[ 0.01068367 0.58748239 -0.16838944]
| en | 0.522501 | # print(x) # print(x.debug_print()) # input vector's dimension # output vector's dimension # L.linear is subclass of `Link` linear_layer has 2 internal parameters `W` and `b`, which are `Variable` # print('W: ', linear_layer.W.data, ', shape: ', linear_layer.W.shape) # print('b: ', linear_layer.b.data, ', shape: ', linear_layer.b.shape) # Force update (set) internal parameters # input is x0 & x1 # output is y0 & y1 # W: [[ 0.01068367 0.58748239 -0.16838944] | 3.108582 | 3 |
tests/test_devices_create.py | oogeso/oogeso | 2 | 6618692 | from oogeso import dto
from oogeso.core import devices
dev_data_generic = {
"id": "the_id",
"node_id": "the_node",
"name": "the_name",
"include": True,
"profile": "the_profile",
"flow_min": 10,
"flow_max": 20,
"max_ramp_up": None,
"max_ramp_down": None,
"op_cost": 0,
}
# Only Powersource and PowerSink are used in electric-only modelling
def test_powersource():
startstop_data = dto.StartStopData(
is_on_init=False,
penalty_start=1,
penalty_stop=0,
delay_start_minutes=30,
minimum_time_on_minutes=0,
minimum_time_off_minutes=0,
)
dev_data = dto.DevicePowerSourceData(
**dev_data_generic,
start_stop=startstop_data,
penalty_function=([0, 50], [1, 20]),
)
carrier_data_dict = {}
obj = devices.Powersource(dev_data, carrier_data_dict)
assert isinstance(obj, devices.Powersource)
assert obj.dev_data.node_id == "the_node"
assert obj.dev_data.penalty_function == ([0, 50], [1, 20])
def test_powersink():
dev_data = dto.DevicePowerSinkData(**dev_data_generic, reserve_factor=0)
carrier_data_dict = {}
obj = devices.PowerSink(dev_data, carrier_data_dict)
assert isinstance(obj, devices.PowerSink)
assert obj.dev_data.reserve_factor == 0
# The following device models are only used in multi-energy modelling:
def test_gasturbine():
startstop_data = dto.StartStopData(
is_on_init=False,
penalty_start=1,
penalty_stop=0,
delay_start_minutes=30,
minimum_time_on_minutes=0,
minimum_time_off_minutes=0,
)
dev_data = dto.DeviceGasTurbineData(**dev_data_generic, start_stop=startstop_data)
carrier_data_dict = {}
obj = devices.GasTurbine(dev_data, carrier_data_dict)
assert isinstance(obj, devices.GasTurbine)
assert obj.dev_data.reserve_factor == 1 # default value
assert obj.dev_data.start_stop.penalty_start == 1
def test_compressor_el():
dev_data = dto.DeviceCompressorElData(**dev_data_generic, eta=0.6, Q0=0.5, temp_in=300)
carrier_data_dict = {}
obj = devices.CompressorEl(dev_data, carrier_data_dict)
assert isinstance(obj, devices.CompressorEl)
assert obj.dev_data.eta == 0.6
def test_compressor_gas():
dev_data = dto.DeviceCompressorGasData(**dev_data_generic, eta=0.6, Q0=0.5, temp_in=300)
carrier_data_dict = {}
obj = devices.CompressorGas(dev_data, carrier_data_dict)
assert isinstance(obj, devices.CompressorGas)
assert obj.dev_data.eta == 0.6
def test_pump_oil():
dev_data = dto.DevicePumpOilData(**dev_data_generic, eta=0.6)
carrier_data_dict = {}
obj = devices.PumpOil(dev_data, carrier_data_dict)
assert isinstance(obj, devices.PumpOil)
assert obj.dev_data.eta == 0.6
def test_pump_water():
dev_data = dto.DevicePumpWaterData(**dev_data_generic, eta=0.6)
carrier_data_dict = {}
obj = devices.PumpWater(dev_data, carrier_data_dict)
assert isinstance(obj, devices.PumpWater)
assert obj.dev_data.eta == 0.6
def test_separator():
dev_data = dto.DeviceSeparatorData(**dev_data_generic, el_demand_factor=0.1, heat_demand_factor=0.5)
carrier_data_dict = {}
obj = devices.Separator(dev_data, carrier_data_dict)
assert isinstance(obj, devices.Separator)
assert obj.dev_data.heat_demand_factor == 0.5
def test_separator2():
dev_data = dto.DeviceSeparator2Data(**dev_data_generic, el_demand_factor=0.1, heat_demand_factor=0.5)
carrier_data_dict = {}
obj = devices.Separator2(dev_data, carrier_data_dict)
assert isinstance(obj, devices.Separator2)
assert obj.dev_data.heat_demand_factor == 0.5
def test_well_production():
dev_data = dto.DeviceWellProductionData(**dev_data_generic, wellhead_pressure=5)
carrier_data_dict = {}
obj = devices.WellProduction(dev_data, carrier_data_dict)
assert isinstance(obj, devices.WellProduction)
assert obj.dev_data.wellhead_pressure == 5
def test_well_gaslift():
dev_data = dto.DeviceWellGasLiftData(
**dev_data_generic,
gas_oil_ratio=500,
water_cut=0.5,
f_inj=0.7,
injection_pressure=25,
separator_pressure=5,
)
carrier_data_dict = {}
obj = devices.WellGasLift(dev_data, carrier_data_dict)
assert isinstance(obj, devices.WellGasLift)
assert obj.dev_data.injection_pressure == 25
def test_sink_gas():
dev_data = dto.DeviceSinkGasData(**dev_data_generic, price={"gas": 100})
carrier_data_dict = {}
obj = devices.SinkGas(dev_data, carrier_data_dict)
assert isinstance(obj, devices.SinkGas)
assert obj.dev_data.flow_max == 20
assert obj.dev_data.price == {"gas": 100}
def test_sink_oil():
dev_data = dto.DeviceSinkOilData(**dev_data_generic, price={"oil": 100})
carrier_data_dict = {}
obj = devices.SinkOil(dev_data, carrier_data_dict)
assert isinstance(obj, devices.SinkOil)
assert obj.dev_data.flow_max == 20
assert obj.dev_data.price == {"oil": 100}
def test_sink_water():
dev_data = dto.DeviceSinkWaterData(**dev_data_generic, flow_avg=None, max_accumulated_deviation=None)
carrier_data_dict = {}
obj = devices.SinkWater(dev_data, carrier_data_dict)
assert isinstance(obj, devices.SinkWater)
assert obj.dev_data.flow_max == 20
def test_sink_heat():
dev_data = dto.DeviceSinkHeatData(**dev_data_generic)
carrier_data_dict = {}
obj = devices.SinkHeat(dev_data, carrier_data_dict)
assert isinstance(obj, devices.SinkHeat)
assert obj.dev_data.flow_max == 20
def test_sink_el():
dev_data = dto.DeviceSinkElData(**dev_data_generic)
carrier_data_dict = {}
obj = devices.SinkEl(dev_data, carrier_data_dict)
assert isinstance(obj, devices.SinkEl)
assert obj.dev_data.flow_max == 20
def test_source_gas():
dev_data = dto.DeviceSourceGasData(**dev_data_generic, naturalpressure=15)
carrier_data_dict = {}
obj = devices.SourceGas(dev_data, carrier_data_dict)
assert isinstance(obj, devices.SourceGas)
assert obj.dev_data.flow_max == 20
assert obj.dev_data.naturalpressure == 15
def test_source_water():
dev_data = dto.DeviceSourceWaterData(**dev_data_generic, naturalpressure=15)
carrier_data_dict = {}
obj = devices.SourceWater(dev_data, carrier_data_dict)
assert isinstance(obj, devices.SourceWater)
assert obj.dev_data.flow_max == 20
assert obj.dev_data.naturalpressure == 15
def test_source_el():
# source_el is identical to powersource
dev_data = dto.DeviceSourceElData(**dev_data_generic, co2em=1.5)
carrier_data_dict = {}
obj = devices.SourceEl(dev_data, carrier_data_dict)
assert isinstance(obj, devices.SourceEl)
assert obj.dev_data.flow_max == 20
assert obj.dev_data.co2em == 1.5
assert obj.dev_data.reserve_factor == 1
def test_storage_el():
dev_data = dto.DeviceStorageElData(
**dev_data_generic,
E_max=10,
E_min=0.2,
E_cost=0,
eta=0.9,
target_profile="mytarget",
E_end=5,
E_init=2.5,
)
carrier_data_dict = {}
obj = devices.StorageEl(dev_data, carrier_data_dict)
assert isinstance(obj, devices.StorageEl)
assert obj.dev_data.flow_max == 20
assert obj.dev_data.E_end == 5
def test_storage_hydrogen():
dev_data = dto.DeviceStorageHydrogenData(
**dev_data_generic,
E_max=10,
E_min=0.2,
E_cost=0,
eta=0.9,
target_profile="mytarget",
E_init=4,
)
carrier_data_dict = {}
obj = devices.StorageHydrogen(dev_data, carrier_data_dict)
assert isinstance(obj, devices.StorageHydrogen)
assert obj.dev_data.flow_max == 20
assert obj.dev_data.target_profile == "mytarget"
def test_storage_gasheater():
dev_data = dto.DeviceGasHeaterData(**dev_data_generic)
carrier_data_dict = {}
obj = devices.GasHeater(dev_data, carrier_data_dict)
assert isinstance(obj, devices.GasHeater)
assert obj.dev_data.flow_max == 20
def test_storage_heatpump():
dev_data = dto.DeviceHeatPumpData(**dev_data_generic, eta=3)
carrier_data_dict = {}
obj = devices.HeatPump(dev_data, carrier_data_dict)
assert isinstance(obj, devices.HeatPump)
assert obj.dev_data.flow_max == 20
assert obj.dev_data.eta == 3
def test_storage_electrolyser():
dev_data = dto.DeviceElectrolyserData(**dev_data_generic, eta=0.5, eta_heat=0.3)
carrier_data_dict = {}
obj = devices.Electrolyser(dev_data, carrier_data_dict)
assert isinstance(obj, devices.Electrolyser)
assert obj.dev_data.flow_max == 20
assert obj.dev_data.eta == 0.5
def test_storage_fuelcell():
dev_data = dto.DeviceFuelCellData(**dev_data_generic, eta=0.5, eta_heat=0.3)
carrier_data_dict = {}
obj = devices.FuelCell(dev_data, carrier_data_dict)
assert isinstance(obj, devices.FuelCell)
assert obj.dev_data.flow_max == 20
assert obj.dev_data.eta == 0.5
assert obj.carrier_in == ["hydrogen"]
assert obj.carrier_out == ["el", "heat"]
| from oogeso import dto
from oogeso.core import devices
dev_data_generic = {
"id": "the_id",
"node_id": "the_node",
"name": "the_name",
"include": True,
"profile": "the_profile",
"flow_min": 10,
"flow_max": 20,
"max_ramp_up": None,
"max_ramp_down": None,
"op_cost": 0,
}
# Only Powersource and PowerSink are used in electric-only modelling
def test_powersource():
startstop_data = dto.StartStopData(
is_on_init=False,
penalty_start=1,
penalty_stop=0,
delay_start_minutes=30,
minimum_time_on_minutes=0,
minimum_time_off_minutes=0,
)
dev_data = dto.DevicePowerSourceData(
**dev_data_generic,
start_stop=startstop_data,
penalty_function=([0, 50], [1, 20]),
)
carrier_data_dict = {}
obj = devices.Powersource(dev_data, carrier_data_dict)
assert isinstance(obj, devices.Powersource)
assert obj.dev_data.node_id == "the_node"
assert obj.dev_data.penalty_function == ([0, 50], [1, 20])
def test_powersink():
dev_data = dto.DevicePowerSinkData(**dev_data_generic, reserve_factor=0)
carrier_data_dict = {}
obj = devices.PowerSink(dev_data, carrier_data_dict)
assert isinstance(obj, devices.PowerSink)
assert obj.dev_data.reserve_factor == 0
# The following device models are only used in multi-energy modelling:
def test_gasturbine():
startstop_data = dto.StartStopData(
is_on_init=False,
penalty_start=1,
penalty_stop=0,
delay_start_minutes=30,
minimum_time_on_minutes=0,
minimum_time_off_minutes=0,
)
dev_data = dto.DeviceGasTurbineData(**dev_data_generic, start_stop=startstop_data)
carrier_data_dict = {}
obj = devices.GasTurbine(dev_data, carrier_data_dict)
assert isinstance(obj, devices.GasTurbine)
assert obj.dev_data.reserve_factor == 1 # default value
assert obj.dev_data.start_stop.penalty_start == 1
def test_compressor_el():
dev_data = dto.DeviceCompressorElData(**dev_data_generic, eta=0.6, Q0=0.5, temp_in=300)
carrier_data_dict = {}
obj = devices.CompressorEl(dev_data, carrier_data_dict)
assert isinstance(obj, devices.CompressorEl)
assert obj.dev_data.eta == 0.6
def test_compressor_gas():
dev_data = dto.DeviceCompressorGasData(**dev_data_generic, eta=0.6, Q0=0.5, temp_in=300)
carrier_data_dict = {}
obj = devices.CompressorGas(dev_data, carrier_data_dict)
assert isinstance(obj, devices.CompressorGas)
assert obj.dev_data.eta == 0.6
def test_pump_oil():
dev_data = dto.DevicePumpOilData(**dev_data_generic, eta=0.6)
carrier_data_dict = {}
obj = devices.PumpOil(dev_data, carrier_data_dict)
assert isinstance(obj, devices.PumpOil)
assert obj.dev_data.eta == 0.6
def test_pump_water():
dev_data = dto.DevicePumpWaterData(**dev_data_generic, eta=0.6)
carrier_data_dict = {}
obj = devices.PumpWater(dev_data, carrier_data_dict)
assert isinstance(obj, devices.PumpWater)
assert obj.dev_data.eta == 0.6
def test_separator():
dev_data = dto.DeviceSeparatorData(**dev_data_generic, el_demand_factor=0.1, heat_demand_factor=0.5)
carrier_data_dict = {}
obj = devices.Separator(dev_data, carrier_data_dict)
assert isinstance(obj, devices.Separator)
assert obj.dev_data.heat_demand_factor == 0.5
def test_separator2():
dev_data = dto.DeviceSeparator2Data(**dev_data_generic, el_demand_factor=0.1, heat_demand_factor=0.5)
carrier_data_dict = {}
obj = devices.Separator2(dev_data, carrier_data_dict)
assert isinstance(obj, devices.Separator2)
assert obj.dev_data.heat_demand_factor == 0.5
def test_well_production():
dev_data = dto.DeviceWellProductionData(**dev_data_generic, wellhead_pressure=5)
carrier_data_dict = {}
obj = devices.WellProduction(dev_data, carrier_data_dict)
assert isinstance(obj, devices.WellProduction)
assert obj.dev_data.wellhead_pressure == 5
def test_well_gaslift():
dev_data = dto.DeviceWellGasLiftData(
**dev_data_generic,
gas_oil_ratio=500,
water_cut=0.5,
f_inj=0.7,
injection_pressure=25,
separator_pressure=5,
)
carrier_data_dict = {}
obj = devices.WellGasLift(dev_data, carrier_data_dict)
assert isinstance(obj, devices.WellGasLift)
assert obj.dev_data.injection_pressure == 25
def test_sink_gas():
dev_data = dto.DeviceSinkGasData(**dev_data_generic, price={"gas": 100})
carrier_data_dict = {}
obj = devices.SinkGas(dev_data, carrier_data_dict)
assert isinstance(obj, devices.SinkGas)
assert obj.dev_data.flow_max == 20
assert obj.dev_data.price == {"gas": 100}
def test_sink_oil():
dev_data = dto.DeviceSinkOilData(**dev_data_generic, price={"oil": 100})
carrier_data_dict = {}
obj = devices.SinkOil(dev_data, carrier_data_dict)
assert isinstance(obj, devices.SinkOil)
assert obj.dev_data.flow_max == 20
assert obj.dev_data.price == {"oil": 100}
def test_sink_water():
dev_data = dto.DeviceSinkWaterData(**dev_data_generic, flow_avg=None, max_accumulated_deviation=None)
carrier_data_dict = {}
obj = devices.SinkWater(dev_data, carrier_data_dict)
assert isinstance(obj, devices.SinkWater)
assert obj.dev_data.flow_max == 20
def test_sink_heat():
dev_data = dto.DeviceSinkHeatData(**dev_data_generic)
carrier_data_dict = {}
obj = devices.SinkHeat(dev_data, carrier_data_dict)
assert isinstance(obj, devices.SinkHeat)
assert obj.dev_data.flow_max == 20
def test_sink_el():
dev_data = dto.DeviceSinkElData(**dev_data_generic)
carrier_data_dict = {}
obj = devices.SinkEl(dev_data, carrier_data_dict)
assert isinstance(obj, devices.SinkEl)
assert obj.dev_data.flow_max == 20
def test_source_gas():
dev_data = dto.DeviceSourceGasData(**dev_data_generic, naturalpressure=15)
carrier_data_dict = {}
obj = devices.SourceGas(dev_data, carrier_data_dict)
assert isinstance(obj, devices.SourceGas)
assert obj.dev_data.flow_max == 20
assert obj.dev_data.naturalpressure == 15
def test_source_water():
dev_data = dto.DeviceSourceWaterData(**dev_data_generic, naturalpressure=15)
carrier_data_dict = {}
obj = devices.SourceWater(dev_data, carrier_data_dict)
assert isinstance(obj, devices.SourceWater)
assert obj.dev_data.flow_max == 20
assert obj.dev_data.naturalpressure == 15
def test_source_el():
# source_el is identical to powersource
dev_data = dto.DeviceSourceElData(**dev_data_generic, co2em=1.5)
carrier_data_dict = {}
obj = devices.SourceEl(dev_data, carrier_data_dict)
assert isinstance(obj, devices.SourceEl)
assert obj.dev_data.flow_max == 20
assert obj.dev_data.co2em == 1.5
assert obj.dev_data.reserve_factor == 1
def test_storage_el():
dev_data = dto.DeviceStorageElData(
**dev_data_generic,
E_max=10,
E_min=0.2,
E_cost=0,
eta=0.9,
target_profile="mytarget",
E_end=5,
E_init=2.5,
)
carrier_data_dict = {}
obj = devices.StorageEl(dev_data, carrier_data_dict)
assert isinstance(obj, devices.StorageEl)
assert obj.dev_data.flow_max == 20
assert obj.dev_data.E_end == 5
def test_storage_hydrogen():
dev_data = dto.DeviceStorageHydrogenData(
**dev_data_generic,
E_max=10,
E_min=0.2,
E_cost=0,
eta=0.9,
target_profile="mytarget",
E_init=4,
)
carrier_data_dict = {}
obj = devices.StorageHydrogen(dev_data, carrier_data_dict)
assert isinstance(obj, devices.StorageHydrogen)
assert obj.dev_data.flow_max == 20
assert obj.dev_data.target_profile == "mytarget"
def test_storage_gasheater():
dev_data = dto.DeviceGasHeaterData(**dev_data_generic)
carrier_data_dict = {}
obj = devices.GasHeater(dev_data, carrier_data_dict)
assert isinstance(obj, devices.GasHeater)
assert obj.dev_data.flow_max == 20
def test_storage_heatpump():
dev_data = dto.DeviceHeatPumpData(**dev_data_generic, eta=3)
carrier_data_dict = {}
obj = devices.HeatPump(dev_data, carrier_data_dict)
assert isinstance(obj, devices.HeatPump)
assert obj.dev_data.flow_max == 20
assert obj.dev_data.eta == 3
def test_storage_electrolyser():
dev_data = dto.DeviceElectrolyserData(**dev_data_generic, eta=0.5, eta_heat=0.3)
carrier_data_dict = {}
obj = devices.Electrolyser(dev_data, carrier_data_dict)
assert isinstance(obj, devices.Electrolyser)
assert obj.dev_data.flow_max == 20
assert obj.dev_data.eta == 0.5
def test_storage_fuelcell():
dev_data = dto.DeviceFuelCellData(**dev_data_generic, eta=0.5, eta_heat=0.3)
carrier_data_dict = {}
obj = devices.FuelCell(dev_data, carrier_data_dict)
assert isinstance(obj, devices.FuelCell)
assert obj.dev_data.flow_max == 20
assert obj.dev_data.eta == 0.5
assert obj.carrier_in == ["hydrogen"]
assert obj.carrier_out == ["el", "heat"]
| en | 0.813237 | # Only Powersource and PowerSink are used in electric-only modelling # The following device models are only used in multi-energy modelling: # default value # source_el is identical to powersource | 2.219301 | 2 |
src/pyflow/node/data_holder_node.py | mozjay0619/pyflow-viz | 5 | 6618693 | from .base_node import BaseNode
import numpy as np
import pandas as pd
class DataHolderNode(BaseNode):
def __init__(self, graph_uid, graph_alias, node_uid, value="__specialPFV__NoneData", verbose=False):
super(DataHolderNode, self).__init__(graph_uid, graph_alias, node_uid, 'data_holder', verbose)
self.value = value
self.dim = None
def get(self):
return self.value
def set_value(self, value):
self.value = value
def has_value(self):
return self.value != "__specialPFV__NoneData"
def get_persisted_data_dim_as_str(self):
"""Currently supports dimensionality from:
numpy ndarray
pandas dataframe
pyspark dataframe
The dimensionality of other types of data defaults to "(0)"
"""
if not self.has_value:
raise ValueError("There is no value!")
if self.dim is not None:
if self.verbose:
print('{} has been persisted'.format(self.node_uid))
return self.dim
if self.verbose:
print('persisting {}'.format(self.node_uid))
try:
is_spark_object = hasattr(self.value, "rdd")
except KeyError:
is_spark_object = False
if is_spark_object:
row_cnt = self.get().persist().count()
col_cnt = len(self.get().columns)
self.dim = (row_cnt, col_cnt)
elif isinstance(self.value, np.ndarray):
self.dim = self.value.shape
elif isinstance(self.value, pd.DataFrame):
self.dim = self.value.shape
else:
self.dim = "(1, )"
return str(self.dim)
def __del__(self):
if self.verbose:
print('{} released!'.format(self.node_uid))
| from .base_node import BaseNode
import numpy as np
import pandas as pd
class DataHolderNode(BaseNode):
def __init__(self, graph_uid, graph_alias, node_uid, value="__specialPFV__NoneData", verbose=False):
super(DataHolderNode, self).__init__(graph_uid, graph_alias, node_uid, 'data_holder', verbose)
self.value = value
self.dim = None
def get(self):
return self.value
def set_value(self, value):
self.value = value
def has_value(self):
return self.value != "__specialPFV__NoneData"
def get_persisted_data_dim_as_str(self):
"""Currently supports dimensionality from:
numpy ndarray
pandas dataframe
pyspark dataframe
The dimensionality of other types of data defaults to "(0)"
"""
if not self.has_value:
raise ValueError("There is no value!")
if self.dim is not None:
if self.verbose:
print('{} has been persisted'.format(self.node_uid))
return self.dim
if self.verbose:
print('persisting {}'.format(self.node_uid))
try:
is_spark_object = hasattr(self.value, "rdd")
except KeyError:
is_spark_object = False
if is_spark_object:
row_cnt = self.get().persist().count()
col_cnt = len(self.get().columns)
self.dim = (row_cnt, col_cnt)
elif isinstance(self.value, np.ndarray):
self.dim = self.value.shape
elif isinstance(self.value, pd.DataFrame):
self.dim = self.value.shape
else:
self.dim = "(1, )"
return str(self.dim)
def __del__(self):
if self.verbose:
print('{} released!'.format(self.node_uid))
| en | 0.705864 | Currently supports dimensionality from: numpy ndarray pandas dataframe pyspark dataframe The dimensionality of other types of data defaults to "(0)" | 2.550782 | 3 |
index.py | ancient-world-citation-analysis/plotly-dash | 0 | 6618694 | import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from app import app, server
from layouts import layout1
import callbacks
import dash_bootstrap_components as dbc
app.layout = html.Div([
dcc.Location(id='url', refresh=False),
html.Div(id='page-content')
])
navbar = dbc.NavbarSimple(
children=[
dbc.NavItem(dbc.NavLink("Index", href="/index")),
dbc.DropdownMenu(
children=[
dbc.DropdownMenuItem("More pages", header=True),
dbc.DropdownMenuItem("Summarization and Paraphrasing", href="/sum_para"),
dbc.DropdownMenuItem("PDF2CSV", href="/ocr"),
],
nav=True,
in_navbar=True,
label="More",
),
],
brand="AWCA",
brand_href="#",
color="Primary",
)
layout_index = html.Div([
dcc.Link('Navigate to "/sum-para"', href='/sum_para'),
html.Br(),
dcc.Link('Navigate to "/ocr"', href='/ocr'),
navbar
])
@app.callback(Output('page-content', 'children'),
Input('url', 'pathname'))
def display_page(pathname):
if pathname == '/sum_para':
return layout1
elif pathname == '/ocr':
return layout2
else:
return layout_index
if __name__ == '__main__':
app.run_server(debug=True) | import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from app import app, server
from layouts import layout1
import callbacks
import dash_bootstrap_components as dbc
app.layout = html.Div([
dcc.Location(id='url', refresh=False),
html.Div(id='page-content')
])
navbar = dbc.NavbarSimple(
children=[
dbc.NavItem(dbc.NavLink("Index", href="/index")),
dbc.DropdownMenu(
children=[
dbc.DropdownMenuItem("More pages", header=True),
dbc.DropdownMenuItem("Summarization and Paraphrasing", href="/sum_para"),
dbc.DropdownMenuItem("PDF2CSV", href="/ocr"),
],
nav=True,
in_navbar=True,
label="More",
),
],
brand="AWCA",
brand_href="#",
color="Primary",
)
layout_index = html.Div([
dcc.Link('Navigate to "/sum-para"', href='/sum_para'),
html.Br(),
dcc.Link('Navigate to "/ocr"', href='/ocr'),
navbar
])
@app.callback(Output('page-content', 'children'),
Input('url', 'pathname'))
def display_page(pathname):
if pathname == '/sum_para':
return layout1
elif pathname == '/ocr':
return layout2
else:
return layout_index
if __name__ == '__main__':
app.run_server(debug=True) | none | 1 | 2.374691 | 2 | |
django-rgd/rgd/apps.py | ResonantGeoData/ResonantGeoData | 40 | 6618695 | from django.apps import AppConfig
class RGDConfig(AppConfig):
default_auto_field = 'django.db.models.AutoField'
name = 'rgd'
def ready(self):
import rgd.signals # noqa: F401
| from django.apps import AppConfig
class RGDConfig(AppConfig):
default_auto_field = 'django.db.models.AutoField'
name = 'rgd'
def ready(self):
import rgd.signals # noqa: F401
| uz | 0.465103 | # noqa: F401 | 1.70574 | 2 |
src/config/deanonymind.py | wwwiretap/tor-hsdir-research | 22 | 6618696 | #!/usr/bin/env python
import optparse
import os
import sys
import zipfile
"""
Take a MaxMind GeoLite Country database as input and replace A1 entries
with the country code and name of the preceding entry iff the preceding
(subsequent) entry ends (starts) directly before (after) the A1 entry and
both preceding and subsequent entries contain the same country code.
Then apply manual changes, either replacing A1 entries that could not be
replaced automatically or overriding previously made automatic changes.
"""
def main():
options = parse_options()
assignments = read_file(options.in_maxmind)
assignments = apply_automatic_changes(assignments)
write_file(options.out_automatic, assignments)
manual_assignments = read_file(options.in_manual, must_exist=False)
assignments = apply_manual_changes(assignments, manual_assignments)
write_file(options.out_manual, assignments)
write_file(options.out_geoip, assignments, long_format=False)
def parse_options():
parser = optparse.OptionParser()
parser.add_option('-i', action='store', dest='in_maxmind',
default='GeoIPCountryCSV.zip', metavar='FILE',
help='use the specified MaxMind GeoLite Country .zip or .csv '
'file as input [default: %default]')
parser.add_option('-g', action='store', dest='in_manual',
default='geoip-manual', metavar='FILE',
help='use the specified .csv file for manual changes or to '
'override automatic changes [default: %default]')
parser.add_option('-a', action='store', dest='out_automatic',
default="AutomaticGeoIPCountryWhois.csv", metavar='FILE',
help='write full input file plus automatic changes to the '
'specified .csv file [default: %default]')
parser.add_option('-m', action='store', dest='out_manual',
default='ManualGeoIPCountryWhois.csv', metavar='FILE',
help='write full input file plus automatic and manual '
'changes to the specified .csv file [default: %default]')
parser.add_option('-o', action='store', dest='out_geoip',
default='geoip', metavar='FILE',
help='write full input file plus automatic and manual '
'changes to the specified .csv file that can be shipped '
'with tor [default: %default]')
(options, args) = parser.parse_args()
return options
def read_file(path, must_exist=True):
if not os.path.exists(path):
if must_exist:
print 'File %s does not exist. Exiting.' % (path, )
sys.exit(1)
else:
return
if path.endswith('.zip'):
zip_file = zipfile.ZipFile(path)
csv_content = zip_file.read('GeoIPCountryWhois.csv')
zip_file.close()
else:
csv_file = open(path)
csv_content = csv_file.read()
csv_file.close()
assignments = []
for line in csv_content.split('\n'):
stripped_line = line.strip()
if len(stripped_line) > 0 and not stripped_line.startswith('#'):
assignments.append(stripped_line)
return assignments
def apply_automatic_changes(assignments):
print '\nApplying automatic changes...'
result_lines = []
prev_line = None
a1_lines = []
for line in assignments:
if '"A1"' in line:
a1_lines.append(line)
else:
if len(a1_lines) > 0:
new_a1_lines = process_a1_lines(prev_line, a1_lines, line)
for new_a1_line in new_a1_lines:
result_lines.append(new_a1_line)
a1_lines = []
result_lines.append(line)
prev_line = line
if len(a1_lines) > 0:
new_a1_lines = process_a1_lines(prev_line, a1_lines, None)
for new_a1_line in new_a1_lines:
result_lines.append(new_a1_line)
return result_lines
def process_a1_lines(prev_line, a1_lines, next_line):
if not prev_line or not next_line:
return a1_lines # Can't merge first or last line in file.
if len(a1_lines) > 1:
return a1_lines # Can't merge more than 1 line at once.
a1_line = a1_lines[0].strip()
prev_entry = parse_line(prev_line)
a1_entry = parse_line(a1_line)
next_entry = parse_line(next_line)
touches_prev_entry = int(prev_entry['end_num']) + 1 == \
int(a1_entry['start_num'])
touches_next_entry = int(a1_entry['end_num']) + 1 == \
int(next_entry['start_num'])
same_country_code = prev_entry['country_code'] == \
next_entry['country_code']
if touches_prev_entry and touches_next_entry and same_country_code:
new_line = format_line_with_other_country(a1_entry, prev_entry)
print '-%s\n+%s' % (a1_line, new_line, )
return [new_line]
else:
return a1_lines
def parse_line(line):
if not line:
return None
keys = ['start_str', 'end_str', 'start_num', 'end_num',
'country_code', 'country_name']
stripped_line = line.replace('"', '').strip()
parts = stripped_line.split(',')
entry = dict((k, v) for k, v in zip(keys, parts))
return entry
def format_line_with_other_country(original_entry, other_entry):
return '"%s","%s","%s","%s","%s","%s"' % (original_entry['start_str'],
original_entry['end_str'], original_entry['start_num'],
original_entry['end_num'], other_entry['country_code'],
other_entry['country_name'], )
def apply_manual_changes(assignments, manual_assignments):
if not manual_assignments:
return assignments
print '\nApplying manual changes...'
manual_dict = {}
for line in manual_assignments:
start_num = parse_line(line)['start_num']
if start_num in manual_dict:
print ('Warning: duplicate start number in manual '
'assignments:\n %s\n %s\nDiscarding first entry.' %
(manual_dict[start_num], line, ))
manual_dict[start_num] = line
result = []
for line in assignments:
entry = parse_line(line)
start_num = entry['start_num']
if start_num in manual_dict:
manual_line = manual_dict[start_num]
manual_entry = parse_line(manual_line)
if entry['start_str'] == manual_entry['start_str'] and \
entry['end_str'] == manual_entry['end_str'] and \
entry['end_num'] == manual_entry['end_num']:
if len(manual_entry['country_code']) != 2:
print '-%s' % (line, ) # only remove, don't replace
else:
new_line = format_line_with_other_country(entry,
manual_entry)
print '-%s\n+%s' % (line, new_line, )
result.append(new_line)
del manual_dict[start_num]
else:
print ('Warning: only partial match between '
'original/automatically replaced assignment and '
'manual assignment:\n %s\n %s\nNot applying '
'manual change.' % (line, manual_line, ))
result.append(line)
else:
result.append(line)
if len(manual_dict) > 0:
print ('Warning: could not apply all manual assignments: %s' %
('\n '.join(manual_dict.values())), )
return result
def write_file(path, assignments, long_format=True):
if long_format:
output_lines = assignments
else:
output_lines = []
for long_line in assignments:
entry = parse_line(long_line)
short_line = "%s,%s,%s" % (entry['start_num'],
entry['end_num'], entry['country_code'], )
output_lines.append(short_line)
out_file = open(path, 'w')
out_file.write('\n'.join(output_lines))
out_file.close()
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import optparse
import os
import sys
import zipfile
"""
Take a MaxMind GeoLite Country database as input and replace A1 entries
with the country code and name of the preceding entry iff the preceding
(subsequent) entry ends (starts) directly before (after) the A1 entry and
both preceding and subsequent entries contain the same country code.
Then apply manual changes, either replacing A1 entries that could not be
replaced automatically or overriding previously made automatic changes.
"""
def main():
options = parse_options()
assignments = read_file(options.in_maxmind)
assignments = apply_automatic_changes(assignments)
write_file(options.out_automatic, assignments)
manual_assignments = read_file(options.in_manual, must_exist=False)
assignments = apply_manual_changes(assignments, manual_assignments)
write_file(options.out_manual, assignments)
write_file(options.out_geoip, assignments, long_format=False)
def parse_options():
parser = optparse.OptionParser()
parser.add_option('-i', action='store', dest='in_maxmind',
default='GeoIPCountryCSV.zip', metavar='FILE',
help='use the specified MaxMind GeoLite Country .zip or .csv '
'file as input [default: %default]')
parser.add_option('-g', action='store', dest='in_manual',
default='geoip-manual', metavar='FILE',
help='use the specified .csv file for manual changes or to '
'override automatic changes [default: %default]')
parser.add_option('-a', action='store', dest='out_automatic',
default="AutomaticGeoIPCountryWhois.csv", metavar='FILE',
help='write full input file plus automatic changes to the '
'specified .csv file [default: %default]')
parser.add_option('-m', action='store', dest='out_manual',
default='ManualGeoIPCountryWhois.csv', metavar='FILE',
help='write full input file plus automatic and manual '
'changes to the specified .csv file [default: %default]')
parser.add_option('-o', action='store', dest='out_geoip',
default='geoip', metavar='FILE',
help='write full input file plus automatic and manual '
'changes to the specified .csv file that can be shipped '
'with tor [default: %default]')
(options, args) = parser.parse_args()
return options
def read_file(path, must_exist=True):
if not os.path.exists(path):
if must_exist:
print 'File %s does not exist. Exiting.' % (path, )
sys.exit(1)
else:
return
if path.endswith('.zip'):
zip_file = zipfile.ZipFile(path)
csv_content = zip_file.read('GeoIPCountryWhois.csv')
zip_file.close()
else:
csv_file = open(path)
csv_content = csv_file.read()
csv_file.close()
assignments = []
for line in csv_content.split('\n'):
stripped_line = line.strip()
if len(stripped_line) > 0 and not stripped_line.startswith('#'):
assignments.append(stripped_line)
return assignments
def apply_automatic_changes(assignments):
print '\nApplying automatic changes...'
result_lines = []
prev_line = None
a1_lines = []
for line in assignments:
if '"A1"' in line:
a1_lines.append(line)
else:
if len(a1_lines) > 0:
new_a1_lines = process_a1_lines(prev_line, a1_lines, line)
for new_a1_line in new_a1_lines:
result_lines.append(new_a1_line)
a1_lines = []
result_lines.append(line)
prev_line = line
if len(a1_lines) > 0:
new_a1_lines = process_a1_lines(prev_line, a1_lines, None)
for new_a1_line in new_a1_lines:
result_lines.append(new_a1_line)
return result_lines
def process_a1_lines(prev_line, a1_lines, next_line):
if not prev_line or not next_line:
return a1_lines # Can't merge first or last line in file.
if len(a1_lines) > 1:
return a1_lines # Can't merge more than 1 line at once.
a1_line = a1_lines[0].strip()
prev_entry = parse_line(prev_line)
a1_entry = parse_line(a1_line)
next_entry = parse_line(next_line)
touches_prev_entry = int(prev_entry['end_num']) + 1 == \
int(a1_entry['start_num'])
touches_next_entry = int(a1_entry['end_num']) + 1 == \
int(next_entry['start_num'])
same_country_code = prev_entry['country_code'] == \
next_entry['country_code']
if touches_prev_entry and touches_next_entry and same_country_code:
new_line = format_line_with_other_country(a1_entry, prev_entry)
print '-%s\n+%s' % (a1_line, new_line, )
return [new_line]
else:
return a1_lines
def parse_line(line):
if not line:
return None
keys = ['start_str', 'end_str', 'start_num', 'end_num',
'country_code', 'country_name']
stripped_line = line.replace('"', '').strip()
parts = stripped_line.split(',')
entry = dict((k, v) for k, v in zip(keys, parts))
return entry
def format_line_with_other_country(original_entry, other_entry):
return '"%s","%s","%s","%s","%s","%s"' % (original_entry['start_str'],
original_entry['end_str'], original_entry['start_num'],
original_entry['end_num'], other_entry['country_code'],
other_entry['country_name'], )
def apply_manual_changes(assignments, manual_assignments):
if not manual_assignments:
return assignments
print '\nApplying manual changes...'
manual_dict = {}
for line in manual_assignments:
start_num = parse_line(line)['start_num']
if start_num in manual_dict:
print ('Warning: duplicate start number in manual '
'assignments:\n %s\n %s\nDiscarding first entry.' %
(manual_dict[start_num], line, ))
manual_dict[start_num] = line
result = []
for line in assignments:
entry = parse_line(line)
start_num = entry['start_num']
if start_num in manual_dict:
manual_line = manual_dict[start_num]
manual_entry = parse_line(manual_line)
if entry['start_str'] == manual_entry['start_str'] and \
entry['end_str'] == manual_entry['end_str'] and \
entry['end_num'] == manual_entry['end_num']:
if len(manual_entry['country_code']) != 2:
print '-%s' % (line, ) # only remove, don't replace
else:
new_line = format_line_with_other_country(entry,
manual_entry)
print '-%s\n+%s' % (line, new_line, )
result.append(new_line)
del manual_dict[start_num]
else:
print ('Warning: only partial match between '
'original/automatically replaced assignment and '
'manual assignment:\n %s\n %s\nNot applying '
'manual change.' % (line, manual_line, ))
result.append(line)
else:
result.append(line)
if len(manual_dict) > 0:
print ('Warning: could not apply all manual assignments: %s' %
('\n '.join(manual_dict.values())), )
return result
def write_file(path, assignments, long_format=True):
if long_format:
output_lines = assignments
else:
output_lines = []
for long_line in assignments:
entry = parse_line(long_line)
short_line = "%s,%s,%s" % (entry['start_num'],
entry['end_num'], entry['country_code'], )
output_lines.append(short_line)
out_file = open(path, 'w')
out_file.write('\n'.join(output_lines))
out_file.close()
if __name__ == '__main__':
main()
| en | 0.832724 | #!/usr/bin/env python Take a MaxMind GeoLite Country database as input and replace A1 entries with the country code and name of the preceding entry iff the preceding (subsequent) entry ends (starts) directly before (after) the A1 entry and both preceding and subsequent entries contain the same country code. Then apply manual changes, either replacing A1 entries that could not be replaced automatically or overriding previously made automatic changes. # Can't merge first or last line in file. # Can't merge more than 1 line at once. # only remove, don't replace | 2.839686 | 3 |
tests/test.py | jaws/JustifiedAWS | 15 | 6618697 | import unittest
import tempfile
import netCDF4
import os.path
import os
import matplotlib
import sys
sys.path.append('../jaws/')
import jaws
import subprocess
def convert(infile, *args):
"""
Convert sample file.
This is a helper function used by other tests. It takes in a filename
and any command line arguments, and uses them to run jaws.py.
Returns the NamedTemporaryFile object containing the output.
"""
# change the current working directory
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# check that the input file exists
if not os.path.isfile(infile):
try:
raise FileNotFoundError
except NameError: # python2
raise IOError
# make a temporary output file
outfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False)
# generate command line arguments for jaws
jawargs = [infile, outfile.name] + list(args)
# run the jaws command
subprocess.call(['python', '../jaws/jaws.py'] + jawargs)
# read the result of the call.
# if the file was malformed or not converted correctly
# this will rise an error and the test will fail.
with open(outfile.name, 'rb') as stream:
if not stream.read():
raise RuntimeError('Output File is Empty')
return outfile
def convert_to_dataset(input_file, *args):
"""Converts the file and loads the results into a netCDF4 Dataset."""
outfile = convert(input_file, *args)
return netCDF4.Dataset(outfile.name)
class TestJaws(unittest.TestCase):
"""
Test jaws.py
This class contains methods used for testing the application itself,
separate from individual format converters. At this time, this mostly
consists of testing various command line options and if they work correctly.
"""
def test_format3(self):
"""Test that --format3 option works correctly."""
nc = convert_to_dataset('../sample_data/AAWS_AGO-4_20161130.txt', '-3')
self.assertEqual(nc.file_format, 'NETCDF3_CLASSIC')
def test_format4(self):
"""Test that --format4 option works correctly."""
nc = convert_to_dataset('../sample_data/AAWS_AGO-4_20161130.txt', '-4')
self.assertEqual(nc.file_format, 'NETCDF4')
def test_format5(self):
"""Test that --format5 option works correctly."""
nc = convert_to_dataset('../sample_data/AAWS_AGO-4_20161130.txt', '-5')
self.assertEqual(nc.file_format, 'NETCDF3_64BIT_OFFSET')
def test_station_name(self):
"""Test overriding default station name."""
nc = convert_to_dataset('../sample_data/AAWS_AGO-4_20161130.txt', '-s', 'TestStation')
station = nc.variables['station_name'][:]
self.assertEqual(''.join(station), 'TestStation')
def test_compression(self):
"""Test that compression level works correctly."""
nc = convert_to_dataset('../sample_data/AAWS_AGO-4_20161130.txt', '-L', '5')
self.assertTrue(nc)
def test_all_options(self):
"""Test that all options works correctly simultaneously."""
nc = convert_to_dataset('../sample_data/AAWS_AGO-4_20161130.txt', '-3', '--no_drv_tm', '-s', 'TestStation', '-t' ,'America/Los_Angeles', '-D', '1', '-L', '5')
self.assertTrue(nc)
class TestInputOutputArguments(unittest.TestCase):
"""
Test command line arguments for input and output files.
Ensures that both positional and keyword notation is supported,
and that the output file is constructed correctly if not explicitly
specified.
"""
def filetest(self, args, assert_input=None, assert_output=None):
"""Helper method that performs the specified assertions."""
args = jaws.parse_args(list(args))
input_file = jaws.get_input_file(args)
if assert_input:
self.assertEqual(input_file, assert_input)
if assert_output:
stations = jaws.get_stations()
output_file = jaws.get_output_file(args, input_file, stations)
self.assertEqual(output_file, assert_output)
def test_input_positional(self):
self.filetest(['test_input.txt'], assert_input='test_input.txt')
def test_input_optional(self):
self.filetest(['--input', 'test_input.txt'],
assert_input='test_input.txt')
def test_output_positional(self):
self.filetest(['test_input.txt', 'test_output.txt'],
assert_output='test_output.txt')
def test_output_optional(self):
self.filetest(['test_input.txt', '--fl_out', 'test_output.txt'],
assert_output='test_output.txt')
'''
def test_output_omitted_simple(self):
"""
Test generation of omitted output file.
In the simplest case, the output file has the same name as the
input file, but with the extension changed to .nc.
"""
self.filetest(['test_file.txt'], assert_output='test_file.nc')
# Skip following test for GCNet ouput file naming, because NSIDC files can also have similar stating name
# (e.g 040030_2002.dat). So, now first we read input file and detect if its a GCNet station and then assign the name
def test_output_omitted_numeral_1(self):
"""
Test omitted output file with numeric-prefixed input file.
If the input file is prefixed with number between 1 and 24,
then the output file's name is taken from the station list.
"""
self.filetest(['04_test_file.txt'], assert_output='gcnet_gits.nc')
def test_output_omitted_numeral_2(self):
self.filetest(['11_test_file.txt'], assert_output='gcnet_dome.nc')
def test_output_omitted_c_suffix(self):
self.filetest(['31c.txt'], assert_output='gcnet_lar2.nc')
'''
class TestConverter(unittest.TestCase):
"""Parent class for Converter testers."""
def check_output(self, input_file, output_sample):
"""
Check that output matches known value.
Converts input_file, and compares the results of the conversion to the
contents of output_sample, which is a known good conversion of the
input file.
"""
output_file = convert(input_file)
with open(output_file.name, 'rb') as stream:
reference = stream.read()
with open(output_file.name, 'rb') as stream:
data = stream.read()
self.assertEqual(data, reference)
class TestAAWS(TestConverter):
"""
Test AAWS.
This class test the correct conversion of the AAWS format. This involves
converting multiple sample AAWS input files, and checking that the
output values are what's expected.
"""
def test_reference_sample(self):
"""
Test the first sample input file.
"""
self.check_output('../sample_data/AAWS_AGO-4_20161130.txt', '../sample_data/converted/AAWS_AGO-4_20161130.nc')
class TestGCNet(TestConverter):
"""
Test GCNet.
See the docstring for TestAAWS for details.
"""
def test_reference_sample(self):
"""
Test the first sample input file.
"""
self.check_output('../sample_data/GCNet_Summit_20120817.txt', '../sample_data/converted/GCNet_Summit_20120817.nc')
class TestPROMICE(TestConverter):
"""
Test PROMICE.
See the docstring for TestAAWS for details.
"""
def test_reference_sample(self):
"""
Test the first sample input file.
"""
self.check_output('../sample_data/PROMICE_EGP_20160503.txt', '../sample_data/converted/PROMICE_EGP_20160503.nc')
if __name__ == '__main__':
unittest.main(verbosity=3)
| import unittest
import tempfile
import netCDF4
import os.path
import os
import matplotlib
import sys
sys.path.append('../jaws/')
import jaws
import subprocess
def convert(infile, *args):
"""
Convert sample file.
This is a helper function used by other tests. It takes in a filename
and any command line arguments, and uses them to run jaws.py.
Returns the NamedTemporaryFile object containing the output.
"""
# change the current working directory
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# check that the input file exists
if not os.path.isfile(infile):
try:
raise FileNotFoundError
except NameError: # python2
raise IOError
# make a temporary output file
outfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False)
# generate command line arguments for jaws
jawargs = [infile, outfile.name] + list(args)
# run the jaws command
subprocess.call(['python', '../jaws/jaws.py'] + jawargs)
# read the result of the call.
# if the file was malformed or not converted correctly
# this will rise an error and the test will fail.
with open(outfile.name, 'rb') as stream:
if not stream.read():
raise RuntimeError('Output File is Empty')
return outfile
def convert_to_dataset(input_file, *args):
"""Converts the file and loads the results into a netCDF4 Dataset."""
outfile = convert(input_file, *args)
return netCDF4.Dataset(outfile.name)
class TestJaws(unittest.TestCase):
"""
Test jaws.py
This class contains methods used for testing the application itself,
separate from individual format converters. At this time, this mostly
consists of testing various command line options and if they work correctly.
"""
def test_format3(self):
"""Test that --format3 option works correctly."""
nc = convert_to_dataset('../sample_data/AAWS_AGO-4_20161130.txt', '-3')
self.assertEqual(nc.file_format, 'NETCDF3_CLASSIC')
def test_format4(self):
"""Test that --format4 option works correctly."""
nc = convert_to_dataset('../sample_data/AAWS_AGO-4_20161130.txt', '-4')
self.assertEqual(nc.file_format, 'NETCDF4')
def test_format5(self):
"""Test that --format5 option works correctly."""
nc = convert_to_dataset('../sample_data/AAWS_AGO-4_20161130.txt', '-5')
self.assertEqual(nc.file_format, 'NETCDF3_64BIT_OFFSET')
def test_station_name(self):
"""Test overriding default station name."""
nc = convert_to_dataset('../sample_data/AAWS_AGO-4_20161130.txt', '-s', 'TestStation')
station = nc.variables['station_name'][:]
self.assertEqual(''.join(station), 'TestStation')
def test_compression(self):
"""Test that compression level works correctly."""
nc = convert_to_dataset('../sample_data/AAWS_AGO-4_20161130.txt', '-L', '5')
self.assertTrue(nc)
def test_all_options(self):
"""Test that all options works correctly simultaneously."""
nc = convert_to_dataset('../sample_data/AAWS_AGO-4_20161130.txt', '-3', '--no_drv_tm', '-s', 'TestStation', '-t' ,'America/Los_Angeles', '-D', '1', '-L', '5')
self.assertTrue(nc)
class TestInputOutputArguments(unittest.TestCase):
"""
Test command line arguments for input and output files.
Ensures that both positional and keyword notation is supported,
and that the output file is constructed correctly if not explicitly
specified.
"""
def filetest(self, args, assert_input=None, assert_output=None):
"""Helper method that performs the specified assertions."""
args = jaws.parse_args(list(args))
input_file = jaws.get_input_file(args)
if assert_input:
self.assertEqual(input_file, assert_input)
if assert_output:
stations = jaws.get_stations()
output_file = jaws.get_output_file(args, input_file, stations)
self.assertEqual(output_file, assert_output)
def test_input_positional(self):
self.filetest(['test_input.txt'], assert_input='test_input.txt')
def test_input_optional(self):
self.filetest(['--input', 'test_input.txt'],
assert_input='test_input.txt')
def test_output_positional(self):
self.filetest(['test_input.txt', 'test_output.txt'],
assert_output='test_output.txt')
def test_output_optional(self):
self.filetest(['test_input.txt', '--fl_out', 'test_output.txt'],
assert_output='test_output.txt')
'''
def test_output_omitted_simple(self):
"""
Test generation of omitted output file.
In the simplest case, the output file has the same name as the
input file, but with the extension changed to .nc.
"""
self.filetest(['test_file.txt'], assert_output='test_file.nc')
# Skip following test for GCNet ouput file naming, because NSIDC files can also have similar stating name
# (e.g 040030_2002.dat). So, now first we read input file and detect if its a GCNet station and then assign the name
def test_output_omitted_numeral_1(self):
"""
Test omitted output file with numeric-prefixed input file.
If the input file is prefixed with number between 1 and 24,
then the output file's name is taken from the station list.
"""
self.filetest(['04_test_file.txt'], assert_output='gcnet_gits.nc')
def test_output_omitted_numeral_2(self):
self.filetest(['11_test_file.txt'], assert_output='gcnet_dome.nc')
def test_output_omitted_c_suffix(self):
self.filetest(['31c.txt'], assert_output='gcnet_lar2.nc')
'''
class TestConverter(unittest.TestCase):
"""Parent class for Converter testers."""
def check_output(self, input_file, output_sample):
"""
Check that output matches known value.
Converts input_file, and compares the results of the conversion to the
contents of output_sample, which is a known good conversion of the
input file.
"""
output_file = convert(input_file)
with open(output_file.name, 'rb') as stream:
reference = stream.read()
with open(output_file.name, 'rb') as stream:
data = stream.read()
self.assertEqual(data, reference)
class TestAAWS(TestConverter):
"""
Test AAWS.
This class test the correct conversion of the AAWS format. This involves
converting multiple sample AAWS input files, and checking that the
output values are what's expected.
"""
def test_reference_sample(self):
"""
Test the first sample input file.
"""
self.check_output('../sample_data/AAWS_AGO-4_20161130.txt', '../sample_data/converted/AAWS_AGO-4_20161130.nc')
class TestGCNet(TestConverter):
"""
Test GCNet.
See the docstring for TestAAWS for details.
"""
def test_reference_sample(self):
"""
Test the first sample input file.
"""
self.check_output('../sample_data/GCNet_Summit_20120817.txt', '../sample_data/converted/GCNet_Summit_20120817.nc')
class TestPROMICE(TestConverter):
"""
Test PROMICE.
See the docstring for TestAAWS for details.
"""
def test_reference_sample(self):
"""
Test the first sample input file.
"""
self.check_output('../sample_data/PROMICE_EGP_20160503.txt', '../sample_data/converted/PROMICE_EGP_20160503.nc')
if __name__ == '__main__':
unittest.main(verbosity=3)
| en | 0.772333 | Convert sample file. This is a helper function used by other tests. It takes in a filename and any command line arguments, and uses them to run jaws.py. Returns the NamedTemporaryFile object containing the output. # change the current working directory # check that the input file exists # python2 # make a temporary output file # generate command line arguments for jaws # run the jaws command # read the result of the call. # if the file was malformed or not converted correctly # this will rise an error and the test will fail. Converts the file and loads the results into a netCDF4 Dataset. Test jaws.py This class contains methods used for testing the application itself, separate from individual format converters. At this time, this mostly consists of testing various command line options and if they work correctly. Test that --format3 option works correctly. Test that --format4 option works correctly. Test that --format5 option works correctly. Test overriding default station name. Test that compression level works correctly. Test that all options works correctly simultaneously. Test command line arguments for input and output files. Ensures that both positional and keyword notation is supported, and that the output file is constructed correctly if not explicitly specified. Helper method that performs the specified assertions. def test_output_omitted_simple(self): """ Test generation of omitted output file. In the simplest case, the output file has the same name as the input file, but with the extension changed to .nc. """ self.filetest(['test_file.txt'], assert_output='test_file.nc') # Skip following test for GCNet ouput file naming, because NSIDC files can also have similar stating name # (e.g 040030_2002.dat). So, now first we read input file and detect if its a GCNet station and then assign the name def test_output_omitted_numeral_1(self): """ Test omitted output file with numeric-prefixed input file. If the input file is prefixed with number between 1 and 24, then the output file's name is taken from the station list. """ self.filetest(['04_test_file.txt'], assert_output='gcnet_gits.nc') def test_output_omitted_numeral_2(self): self.filetest(['11_test_file.txt'], assert_output='gcnet_dome.nc') def test_output_omitted_c_suffix(self): self.filetest(['31c.txt'], assert_output='gcnet_lar2.nc') Parent class for Converter testers. Check that output matches known value. Converts input_file, and compares the results of the conversion to the contents of output_sample, which is a known good conversion of the input file. Test AAWS. This class test the correct conversion of the AAWS format. This involves converting multiple sample AAWS input files, and checking that the output values are what's expected. Test the first sample input file. Test GCNet. See the docstring for TestAAWS for details. Test the first sample input file. Test PROMICE. See the docstring for TestAAWS for details. Test the first sample input file. | 2.866982 | 3 |
migrations/versions/906e9d3b884b_.py | njugunanduati/bookstore | 0 | 6618698 | """empty message
Revision ID: 906e9d3b884b
Revises: fce68db8626d
Create Date: 2021-04-11 19:03:51.386709
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '906e9d3b884b'
down_revision = 'fce68db8626d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('book_type', 'custom_pricing',
existing_type=sa.BOOLEAN(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('book_type', 'custom_pricing',
existing_type=sa.BOOLEAN(),
nullable=True)
# ### end Alembic commands ###
| """empty message
Revision ID: 906e9d3b884b
Revises: fce68db8626d
Create Date: 2021-04-11 19:03:51.386709
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '906e9d3b884b'
down_revision = 'fce68db8626d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('book_type', 'custom_pricing',
existing_type=sa.BOOLEAN(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('book_type', 'custom_pricing',
existing_type=sa.BOOLEAN(),
nullable=True)
# ### end Alembic commands ###
| en | 0.525841 | empty message Revision ID: 906e9d3b884b Revises: fce68db8626d Create Date: 2021-04-11 19:03:51.386709 # revision identifiers, used by Alembic. # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### | 1.082144 | 1 |
bathy_smoother/bathy_smoother/__init__.py | dcherian/pyroms | 1 | 6618699 | # encoding: utf-8
'''
bathy_smoother is a toolkit for working with ROMS bathymetry
(ripped from matlab script LP_bathymetry)
'''
import bathy_smoothing
import bathy_tools
import LP_bathy_smoothing
import LP_bathy_tools
import LP_tools
__authors__ = ['<NAME> <<EMAIL>>']
__version__ = '0.1'
| # encoding: utf-8
'''
bathy_smoother is a toolkit for working with ROMS bathymetry
(ripped from matlab script LP_bathymetry)
'''
import bathy_smoothing
import bathy_tools
import LP_bathy_smoothing
import LP_bathy_tools
import LP_tools
__authors__ = ['<NAME> <<EMAIL>>']
__version__ = '0.1'
| en | 0.851732 | # encoding: utf-8 bathy_smoother is a toolkit for working with ROMS bathymetry (ripped from matlab script LP_bathymetry) | 0.977524 | 1 |
packages/py-ab-testing/ABTesting/__init__.py | gotokatsuya/ab-testing | 40 | 6618700 | from .controller import ABTestingController
| from .controller import ABTestingController
| none | 1 | 1.004595 | 1 | |
tests/test_control_flow.py | SDRAST/support | 0 | 6618701 | <reponame>SDRAST/support
import unittest
import datetime
import time
import logging
from support.logs import setup_logging
from support.control_flow import ControlFlowMixin
class MixedIn(ControlFlowMixin):
def generator(self):
for i in xrange(10):
time.sleep(0.1)
yield i
def generator_func(self):
def generator():
for i in xrange(3):
time.sleep(0.1)
yield i
return generator
class TestControlFlowMixin(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.logger = logging.getLogger("TestControlFlowMixin")
cls.obj = MixedIn()
def test_until_run(self):
now = self.obj.now()
later = now + datetime.timedelta(seconds=1)
for e in self.obj.until(later).run(self.obj.generator()):
self.logger.debug("test_until_run: {}".format(e))
def test_until_loop(self):
now = self.obj.now()
later = now + datetime.timedelta(seconds=0.8)
for e in self.obj.until(later).loop(self.obj.generator_func()):
self.logger.debug("test_until_loop: {}".format(e))
def test_at_run(self):
now = self.obj.now()
later = now + datetime.timedelta(seconds=1)
for e in self.obj.at(later).run(self.obj.generator):
self.logger.debug("test_at_run: {}".format(e))
if __name__ == "__main__":
setup_logging(logging.getLogger(""),logLevel=logging.DEBUG)
unittest.main()
| import unittest
import datetime
import time
import logging
from support.logs import setup_logging
from support.control_flow import ControlFlowMixin
class MixedIn(ControlFlowMixin):
def generator(self):
for i in xrange(10):
time.sleep(0.1)
yield i
def generator_func(self):
def generator():
for i in xrange(3):
time.sleep(0.1)
yield i
return generator
class TestControlFlowMixin(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.logger = logging.getLogger("TestControlFlowMixin")
cls.obj = MixedIn()
def test_until_run(self):
now = self.obj.now()
later = now + datetime.timedelta(seconds=1)
for e in self.obj.until(later).run(self.obj.generator()):
self.logger.debug("test_until_run: {}".format(e))
def test_until_loop(self):
now = self.obj.now()
later = now + datetime.timedelta(seconds=0.8)
for e in self.obj.until(later).loop(self.obj.generator_func()):
self.logger.debug("test_until_loop: {}".format(e))
def test_at_run(self):
now = self.obj.now()
later = now + datetime.timedelta(seconds=1)
for e in self.obj.at(later).run(self.obj.generator):
self.logger.debug("test_at_run: {}".format(e))
if __name__ == "__main__":
setup_logging(logging.getLogger(""),logLevel=logging.DEBUG)
unittest.main() | none | 1 | 2.692937 | 3 | |
Common/Measures/Portfolio/Timely/PortfolioMonthly.py | enriqueescobar-askida/Kinito.Finance | 2 | 6618702 | from Common.Measures.Portfolio.Timely.AbstractPortfolioTimely import AbstractPortfolioTimely
class PortfolioMonthly(AbstractPortfolioTimely):
pass
| from Common.Measures.Portfolio.Timely.AbstractPortfolioTimely import AbstractPortfolioTimely
class PortfolioMonthly(AbstractPortfolioTimely):
pass
| none | 1 | 1.162907 | 1 | |
talk_figures/multiterm_buildup.py | remram44/multiband_LS | 24 | 6618703 | import numpy as np
import matplotlib.pyplot as plt
# Use seaborn settings for plot styles
import seaborn; seaborn.set()
from gatspy.datasets import RRLyraeGenerated
from gatspy.periodic import LombScargle, LombScargleMultiband
# Choose a Sesar 2010 object to base our fits on
lcid = 1019544
rrlyrae = RRLyraeGenerated(lcid, random_state=0)
print("Extinction A_r = {0:.4f}".format(rrlyrae.obsmeta['rExt']))
# Generate data in a 6-month observing season
Nobs = 60
rng = np.random.RandomState(0)
nights = np.arange(180)
rng.shuffle(nights)
nights = nights[:Nobs]
t = 57000 + nights + 0.05 * rng.randn(Nobs)
dy = 0.06 + 0.01 * rng.randn(Nobs)
mags = np.array([rrlyrae.generated(band, t, err=dy, corrected=False)
for band in 'ugriz'])
filts = np.array([f for f in 'ugriz'])
def plot_data(ax):
for i, band in enumerate('ugriz'):
ax.errorbar((t / rrlyrae.period) % 1, mags[i], dy,
fmt='.', label=band)
ax.set_ylim(18, 14.5)
ax.legend(loc='upper left', fontsize=12, ncol=3)
ax.set_xlabel('phase')
ax.set_ylabel('magnitude')
# Plot the input data
fig, ax = plt.subplots()
plot_data(ax)
ax.set_title('Input Data')
plt.savefig('buildup_1.png')
# Plot the base model
fig, ax = plt.subplots()
plot_data(ax)
t_all = np.ravel(t * np.ones_like(mags))
mags_all = np.ravel(mags)
dy_all = np.ravel(dy * np.ones_like(mags))
basemodel = LombScargle(Nterms=2).fit(t_all, mags_all, dy_all)
period = rrlyrae.period
tfit = np.linspace(0, period, 1000)
base_fit = basemodel.predict(tfit, period=period)
ax.plot(tfit / period, base_fit, color='black', lw=5, alpha=0.5)
ax.set_title('2-term Base Model')
# Plot the band-by-band augmentation
multimodel = LombScargleMultiband(Nterms_base=2, Nterms_band=1)
t1, y1, dy1, f1 = map(np.ravel,
np.broadcast_arrays(t, mags, dy, filts[:, None]))
multimodel.fit(t1, y1, dy1, f1)
yfits = multimodel.predict(tfit, filts=filts[:, None], period=period)
plt.savefig('buildup_2.png')
fig, ax = plt.subplots()
for i in range(5):
ax.plot(tfit / period, yfits[i] - base_fit)
ax.plot(tfit / period, 0 * tfit, '--k')
ax.set_ylim(1.7, -1.8)
ax.set_xlabel('phase')
ax.set_ylabel('magnitude')
ax.set_title('1-term Band offset')
plt.savefig('buildup_3.png')
# Plot the final model
fig, ax = plt.subplots()
plot_data(ax)
ax.plot(tfit / period, base_fit, color='black', lw=10, alpha=0.2)
for i in range(5):
ax.plot(tfit / period, yfits[i])
ax.set_title('Final Model')
plt.savefig('buildup_4.png')
plt.show()
| import numpy as np
import matplotlib.pyplot as plt
# Use seaborn settings for plot styles
import seaborn; seaborn.set()
from gatspy.datasets import RRLyraeGenerated
from gatspy.periodic import LombScargle, LombScargleMultiband
# Choose a Sesar 2010 object to base our fits on
lcid = 1019544
rrlyrae = RRLyraeGenerated(lcid, random_state=0)
print("Extinction A_r = {0:.4f}".format(rrlyrae.obsmeta['rExt']))
# Generate data in a 6-month observing season
Nobs = 60
rng = np.random.RandomState(0)
nights = np.arange(180)
rng.shuffle(nights)
nights = nights[:Nobs]
t = 57000 + nights + 0.05 * rng.randn(Nobs)
dy = 0.06 + 0.01 * rng.randn(Nobs)
mags = np.array([rrlyrae.generated(band, t, err=dy, corrected=False)
for band in 'ugriz'])
filts = np.array([f for f in 'ugriz'])
def plot_data(ax):
for i, band in enumerate('ugriz'):
ax.errorbar((t / rrlyrae.period) % 1, mags[i], dy,
fmt='.', label=band)
ax.set_ylim(18, 14.5)
ax.legend(loc='upper left', fontsize=12, ncol=3)
ax.set_xlabel('phase')
ax.set_ylabel('magnitude')
# Plot the input data
fig, ax = plt.subplots()
plot_data(ax)
ax.set_title('Input Data')
plt.savefig('buildup_1.png')
# Plot the base model
fig, ax = plt.subplots()
plot_data(ax)
t_all = np.ravel(t * np.ones_like(mags))
mags_all = np.ravel(mags)
dy_all = np.ravel(dy * np.ones_like(mags))
basemodel = LombScargle(Nterms=2).fit(t_all, mags_all, dy_all)
period = rrlyrae.period
tfit = np.linspace(0, period, 1000)
base_fit = basemodel.predict(tfit, period=period)
ax.plot(tfit / period, base_fit, color='black', lw=5, alpha=0.5)
ax.set_title('2-term Base Model')
# Plot the band-by-band augmentation
multimodel = LombScargleMultiband(Nterms_base=2, Nterms_band=1)
t1, y1, dy1, f1 = map(np.ravel,
np.broadcast_arrays(t, mags, dy, filts[:, None]))
multimodel.fit(t1, y1, dy1, f1)
yfits = multimodel.predict(tfit, filts=filts[:, None], period=period)
plt.savefig('buildup_2.png')
fig, ax = plt.subplots()
for i in range(5):
ax.plot(tfit / period, yfits[i] - base_fit)
ax.plot(tfit / period, 0 * tfit, '--k')
ax.set_ylim(1.7, -1.8)
ax.set_xlabel('phase')
ax.set_ylabel('magnitude')
ax.set_title('1-term Band offset')
plt.savefig('buildup_3.png')
# Plot the final model
fig, ax = plt.subplots()
plot_data(ax)
ax.plot(tfit / period, base_fit, color='black', lw=10, alpha=0.2)
for i in range(5):
ax.plot(tfit / period, yfits[i])
ax.set_title('Final Model')
plt.savefig('buildup_4.png')
plt.show()
| en | 0.706481 | # Use seaborn settings for plot styles # Choose a Sesar 2010 object to base our fits on # Generate data in a 6-month observing season # Plot the input data # Plot the base model # Plot the band-by-band augmentation # Plot the final model | 2.399612 | 2 |
2019/2.py | mrhockeymonkey/AdventOfCode | 0 | 6618704 | <filename>2019/2.py<gh_stars>0
import sys
import time
def run_program(noun, verb):
program = [1,0,0,3,1,1,2,3,1,3,4,3,1,5,0,3,2,1,10,19,1,19,5,23,2,23,6,27,1,27,5,31,2,6,31,35,1,5,35,39,2,39,9,43,1,43,5,47,1,10,47,51,1,51,6,55,1,55,10,59,1,59,6,63,2,13,63,67,1,9,67,71,2,6,71,75,1,5,75,79,1,9,79,83,2,6,83,87,1,5,87,91,2,6,91,95,2,95,9,99,1,99,6,103,1,103,13,107,2,13,107,111,2,111,10,115,1,115,6,119,1,6,119,123,2,6,123,127,1,127,5,131,2,131,6,135,1,135,2,139,1,139,9,0,99,2,14,0,0]
program[1] = noun
program[2] = verb
c = 0 # cursor
for p in program:
opt = program[c]
input_1_pos = program[c + 1]
input_2_pos = program[c + 2]
output_pos = program[c + 3]
if opt == 1:
#add
program[output_pos] = program[input_1_pos] + program[input_2_pos]
elif opt == 2:
# multiply
program[output_pos] = program[input_1_pos] * program[input_2_pos]
elif opt == 99:
# halt
return program[0]
else:
raise Exception("Unknown opt code")
c += 4
if __name__ == '__main__':
part_1 = run_program(12, 1)
print("Part 1: {0}".format(part_1))
start_time = time.process_time()
for n in range(100):
for v in range(100):
result = run_program(n, v)
# print("noun: {0}, verb: {1}, result: {2}".format(n, v, result))
if result == 19690720:
process_time = (time.process_time() - start_time)*100
print("Part 2: {0}, Time: {1}".format(100 * n + v, process_time))
sys.exit() | <filename>2019/2.py<gh_stars>0
import sys
import time
def run_program(noun, verb):
program = [1,0,0,3,1,1,2,3,1,3,4,3,1,5,0,3,2,1,10,19,1,19,5,23,2,23,6,27,1,27,5,31,2,6,31,35,1,5,35,39,2,39,9,43,1,43,5,47,1,10,47,51,1,51,6,55,1,55,10,59,1,59,6,63,2,13,63,67,1,9,67,71,2,6,71,75,1,5,75,79,1,9,79,83,2,6,83,87,1,5,87,91,2,6,91,95,2,95,9,99,1,99,6,103,1,103,13,107,2,13,107,111,2,111,10,115,1,115,6,119,1,6,119,123,2,6,123,127,1,127,5,131,2,131,6,135,1,135,2,139,1,139,9,0,99,2,14,0,0]
program[1] = noun
program[2] = verb
c = 0 # cursor
for p in program:
opt = program[c]
input_1_pos = program[c + 1]
input_2_pos = program[c + 2]
output_pos = program[c + 3]
if opt == 1:
#add
program[output_pos] = program[input_1_pos] + program[input_2_pos]
elif opt == 2:
# multiply
program[output_pos] = program[input_1_pos] * program[input_2_pos]
elif opt == 99:
# halt
return program[0]
else:
raise Exception("Unknown opt code")
c += 4
if __name__ == '__main__':
part_1 = run_program(12, 1)
print("Part 1: {0}".format(part_1))
start_time = time.process_time()
for n in range(100):
for v in range(100):
result = run_program(n, v)
# print("noun: {0}, verb: {1}, result: {2}".format(n, v, result))
if result == 19690720:
process_time = (time.process_time() - start_time)*100
print("Part 2: {0}, Time: {1}".format(100 * n + v, process_time))
sys.exit() | en | 0.268657 | # cursor #add # multiply # halt # print("noun: {0}, verb: {1}, result: {2}".format(n, v, result)) | 3.18168 | 3 |
enums.py | sanaani/django-payment-authorizenet | 1 | 6618705 | from enum import Enum
class EnumTuple(Enum):
@classmethod
def as_tuple(cls):
"""Used for making enums available as choices in Forms"""
return tuple((x.name, x.value) for x in cls)
@classmethod
def as_tuple_with_all(cls):
setup = [(x.name, x.value) for x in cls]
setup.append(('all', 'All'))
return tuple(x for x in setup)
@classmethod
def str_list(cls):
# List all enum values with a space after them, then trim the
# trailing whitespace
spaced_names = ''.join(x.value + ' ' for x in cls).strip()
SPACE = ' '
COMMA_SPACE = ', '
# put commas in the appropriate locations
return spaced_names.replace(SPACE, COMMA_SPACE)
class CustomerType(EnumTuple):
individual = 'Individual'
business = 'Business'
class AccountType(EnumTuple):
businessChecking = 'Business Checking'
checking = 'Checking'
savings = 'Saving'
class ECheckType(EnumTuple):
PPD = 'PPD'
WEB = 'WEB'
CCD = 'CCD'
class PaymentProfileType(EnumTuple):
"""Type of Payment Profile"""
bankAccount = 'Bank Account'
creditCard = 'Credit Card'
class ServerMode(EnumTuple):
"""
Use this enum within your Django settings file
Create a variable SERVER_MODE in settings
Example: SERVER_MODE = ServerMode.development.value
ServerMode is frequently used with the authorizenet SDK to
configure the posting URL of a controller. See customer_profile
for examples
"""
development = 'Development' # testing on your local machine
staging = 'Staging' # a semi-private server intended for testing
production = 'Production' # client facting software
class ValidationMode(EnumTuple):
testMode = 'Test Mode'
liveMode = 'Live Mode'
| from enum import Enum
class EnumTuple(Enum):
@classmethod
def as_tuple(cls):
"""Used for making enums available as choices in Forms"""
return tuple((x.name, x.value) for x in cls)
@classmethod
def as_tuple_with_all(cls):
setup = [(x.name, x.value) for x in cls]
setup.append(('all', 'All'))
return tuple(x for x in setup)
@classmethod
def str_list(cls):
# List all enum values with a space after them, then trim the
# trailing whitespace
spaced_names = ''.join(x.value + ' ' for x in cls).strip()
SPACE = ' '
COMMA_SPACE = ', '
# put commas in the appropriate locations
return spaced_names.replace(SPACE, COMMA_SPACE)
class CustomerType(EnumTuple):
individual = 'Individual'
business = 'Business'
class AccountType(EnumTuple):
businessChecking = 'Business Checking'
checking = 'Checking'
savings = 'Saving'
class ECheckType(EnumTuple):
PPD = 'PPD'
WEB = 'WEB'
CCD = 'CCD'
class PaymentProfileType(EnumTuple):
"""Type of Payment Profile"""
bankAccount = 'Bank Account'
creditCard = 'Credit Card'
class ServerMode(EnumTuple):
"""
Use this enum within your Django settings file
Create a variable SERVER_MODE in settings
Example: SERVER_MODE = ServerMode.development.value
ServerMode is frequently used with the authorizenet SDK to
configure the posting URL of a controller. See customer_profile
for examples
"""
development = 'Development' # testing on your local machine
staging = 'Staging' # a semi-private server intended for testing
production = 'Production' # client facting software
class ValidationMode(EnumTuple):
testMode = 'Test Mode'
liveMode = 'Live Mode'
| en | 0.755733 | Used for making enums available as choices in Forms # List all enum values with a space after them, then trim the # trailing whitespace # put commas in the appropriate locations Type of Payment Profile Use this enum within your Django settings file Create a variable SERVER_MODE in settings Example: SERVER_MODE = ServerMode.development.value ServerMode is frequently used with the authorizenet SDK to configure the posting URL of a controller. See customer_profile for examples # testing on your local machine # a semi-private server intended for testing # client facting software | 3.424817 | 3 |
71-Simplify_Path.py | QuenLo/leecode | 6 | 6618706 | class Solution:
def simplifyPath(self, path: str) -> str:
seps = path.split("/")
queue = []
for sep in seps[1:]:
if len(sep) < 1:
continue
if sep == "..":
if queue:
queue.pop()
elif sep == ".":
continue
else:
queue.append(sep)
return "/"+"/".join(queue)
| class Solution:
def simplifyPath(self, path: str) -> str:
seps = path.split("/")
queue = []
for sep in seps[1:]:
if len(sep) < 1:
continue
if sep == "..":
if queue:
queue.pop()
elif sep == ".":
continue
else:
queue.append(sep)
return "/"+"/".join(queue)
| none | 1 | 3.658018 | 4 | |
pycryptics/puzpy/tests.py | rdeits/cryptics | 23 | 6618707 | import os
import sys
import glob
import unittest
import puz
class PuzzleTests(unittest.TestCase):
def testClueNumbering(self):
p = puz.read('testfiles/washpost.puz')
clues = p.clue_numbering()
self.assertEqual(len(p.clues), len(clues.across) + len(clues.down))
def testExtensions(self):
p = puz.read('testfiles/nyt_rebus_with_notes_and_shape.puz')
self.assertTrue(puz.Extensions.Rebus in p.extensions)
self.assertTrue(puz.Extensions.RebusSolutions in p.extensions)
self.assertTrue(puz.Extensions.Markup in p.extensions)
def testRebus(self):
p = puz.read('testfiles/nyt_rebus_with_notes_and_shape.puz')
self.assertTrue(p.has_rebus())
r = p.rebus()
self.assertTrue(r.has_rebus())
self.assertEqual(3, len(r.get_rebus_squares()))
self.assertTrue(all(r.is_rebus_square(i) for i in r.get_rebus_squares()))
self.assertTrue(all('STAR' == r.get_rebus_solution(i) for i in r.get_rebus_squares()))
self.assertTrue(None == r.get_rebus_solution(100))
# trigger save
p.tostring()
def testMarkup(self):
p = puz.read('testfiles/nyt_rebus_with_notes_and_shape.puz')
self.assertTrue(p.has_markup())
m = p.markup()
self.assertTrue(all(puz.GridMarkup.Circled == m.markup[i] for i in m.get_markup_squares()))
# trigger save
p.tostring()
p = puz.read('testfiles/washpost.puz')
self.assertFalse(p.has_markup())
m = p.markup()
self.assertFalse(m.has_markup())
# trigger save
p.tostring()
def testPuzzleType(self):
self.assertFalse(puz.read('testfiles/washpost.puz').puzzletype == puz.PuzzleType.Diagramless)
self.assertFalse(puz.read('testfiles/nyt_locked.puz').puzzletype == puz.PuzzleType.Diagramless)
self.assertTrue(puz.read('testfiles/nyt_diagramless.puz').puzzletype == puz.PuzzleType.Diagramless)
class LockTests(unittest.TestCase):
def testScrambleFunctions(self):
''' tests some examples from the file format documentation wiki
'''
self.assertEqual('MLOOPKJ', puz.scramble_string('AEBFCDG', 1234))
self.assertEqual('MOP..KLOJ', puz.scramble_solution('ABC..DEFG', 3, 3, 1234))
self.assertEqual('AEBFCDG', puz.unscramble_string('MLOOPKJ', 1234))
self.assertEqual('ABC..DEFG', puz.unscramble_solution('MOP..KLOJ', 3, 3, 1234))
# rectangular example - tricky
a = 'ABCD.EFGH.KHIJKLM.NOPW.XYZ'
self.assertEqual(a, puz.unscramble_solution(puz.scramble_solution(a, 13, 2, 9721), 13, 2, 9721))
def testLockedBit(self):
self.assertFalse(puz.read('testfiles/washpost.puz').is_solution_locked())
self.assertTrue(puz.read('testfiles/nyt_locked.puz').is_solution_locked())
def testUnlock(self):
p = puz.read('testfiles/nyt_locked.puz')
self.assertTrue(p.is_solution_locked())
self.assertFalse(p.unlock_solution(1234))
self.assertTrue(p.is_solution_locked()) # still locked
self.assertTrue(p.unlock_solution(7844))
self.assertFalse(p.is_solution_locked()) # unlocked!
self.assertTrue('LAKEONTARIO' in p.solution)
def testUnlockRelock(self):
orig = file('testfiles/nyt_locked.puz', 'rb').read()
p = puz.read('testfiles/nyt_locked.puz')
self.assertTrue(p.is_solution_locked())
self.assertTrue(p.unlock_solution(7844))
p.lock_solution(7844)
new = p.tostring()
self.assertEqual(orig, new, 'nyt_locked.puz dit not found-trip')
def testCheckAnswersLocked(self):
'''Verify that we can check answers even when the solution is locked
'''
p1 = puz.read('testfiles/nyt_locked.puz')
p2 = puz.read('testfiles/nyt_locked.puz')
p1.unlock_solution(7844)
self.assertTrue(p2.is_solution_locked())
self.assertTrue(p2.check_answers(p1.solution))
class RoundtripPuzfileTests(unittest.TestCase):
def __init__(self, filename):
unittest.TestCase.__init__(self)
self.filename = filename
def runTest(self):
try:
orig = file(self.filename, 'rb').read()
p = puz.read(self.filename)
if (p.puzzletype == puz.PuzzleType.Normal):
clues = p.clue_numbering()
# smoke test the clue numbering while we're at it
self.assertEqual(len(p.clues), len(clues.across) + len(clues.down), 'failed in %s' % self.filename)
# this is the roundtrip
new = p.tostring()
self.assertEqual(orig, new, '%s did not round-trip' % self.filename)
except puz.PuzzleFormatError:
self.assertTrue(False, '%s threw PuzzleFormatError: %s' % (self.filename, sys.exc_info()[1].message))
def tests_in_dir(dir):
return sum((map(RoundtripPuzfileTests, glob.glob(os.path.join(path, '*.puz')))
for path, dirs, files in os.walk(dir)), [])
def suite():
# suite consists of any test* method defined in PuzzleTests, plus a round-trip
# test for each .puz file in ./testfiles/
suite = unittest.TestSuite()
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(PuzzleTests))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(LockTests))
suite.addTests(tests_in_dir('testfiles'))
#suite.addTests(tests_in_dir('../xwordapp/data/'))
return suite
if __name__ == '__main__':
print __file__
unittest.TextTestRunner().run(suite())
| import os
import sys
import glob
import unittest
import puz
class PuzzleTests(unittest.TestCase):
def testClueNumbering(self):
p = puz.read('testfiles/washpost.puz')
clues = p.clue_numbering()
self.assertEqual(len(p.clues), len(clues.across) + len(clues.down))
def testExtensions(self):
p = puz.read('testfiles/nyt_rebus_with_notes_and_shape.puz')
self.assertTrue(puz.Extensions.Rebus in p.extensions)
self.assertTrue(puz.Extensions.RebusSolutions in p.extensions)
self.assertTrue(puz.Extensions.Markup in p.extensions)
def testRebus(self):
p = puz.read('testfiles/nyt_rebus_with_notes_and_shape.puz')
self.assertTrue(p.has_rebus())
r = p.rebus()
self.assertTrue(r.has_rebus())
self.assertEqual(3, len(r.get_rebus_squares()))
self.assertTrue(all(r.is_rebus_square(i) for i in r.get_rebus_squares()))
self.assertTrue(all('STAR' == r.get_rebus_solution(i) for i in r.get_rebus_squares()))
self.assertTrue(None == r.get_rebus_solution(100))
# trigger save
p.tostring()
def testMarkup(self):
p = puz.read('testfiles/nyt_rebus_with_notes_and_shape.puz')
self.assertTrue(p.has_markup())
m = p.markup()
self.assertTrue(all(puz.GridMarkup.Circled == m.markup[i] for i in m.get_markup_squares()))
# trigger save
p.tostring()
p = puz.read('testfiles/washpost.puz')
self.assertFalse(p.has_markup())
m = p.markup()
self.assertFalse(m.has_markup())
# trigger save
p.tostring()
def testPuzzleType(self):
self.assertFalse(puz.read('testfiles/washpost.puz').puzzletype == puz.PuzzleType.Diagramless)
self.assertFalse(puz.read('testfiles/nyt_locked.puz').puzzletype == puz.PuzzleType.Diagramless)
self.assertTrue(puz.read('testfiles/nyt_diagramless.puz').puzzletype == puz.PuzzleType.Diagramless)
class LockTests(unittest.TestCase):
def testScrambleFunctions(self):
''' tests some examples from the file format documentation wiki
'''
self.assertEqual('MLOOPKJ', puz.scramble_string('AEBFCDG', 1234))
self.assertEqual('MOP..KLOJ', puz.scramble_solution('ABC..DEFG', 3, 3, 1234))
self.assertEqual('AEBFCDG', puz.unscramble_string('MLOOPKJ', 1234))
self.assertEqual('ABC..DEFG', puz.unscramble_solution('MOP..KLOJ', 3, 3, 1234))
# rectangular example - tricky
a = 'ABCD.EFGH.KHIJKLM.NOPW.XYZ'
self.assertEqual(a, puz.unscramble_solution(puz.scramble_solution(a, 13, 2, 9721), 13, 2, 9721))
def testLockedBit(self):
self.assertFalse(puz.read('testfiles/washpost.puz').is_solution_locked())
self.assertTrue(puz.read('testfiles/nyt_locked.puz').is_solution_locked())
def testUnlock(self):
p = puz.read('testfiles/nyt_locked.puz')
self.assertTrue(p.is_solution_locked())
self.assertFalse(p.unlock_solution(1234))
self.assertTrue(p.is_solution_locked()) # still locked
self.assertTrue(p.unlock_solution(7844))
self.assertFalse(p.is_solution_locked()) # unlocked!
self.assertTrue('LAKEONTARIO' in p.solution)
def testUnlockRelock(self):
orig = file('testfiles/nyt_locked.puz', 'rb').read()
p = puz.read('testfiles/nyt_locked.puz')
self.assertTrue(p.is_solution_locked())
self.assertTrue(p.unlock_solution(7844))
p.lock_solution(7844)
new = p.tostring()
self.assertEqual(orig, new, 'nyt_locked.puz dit not found-trip')
def testCheckAnswersLocked(self):
'''Verify that we can check answers even when the solution is locked
'''
p1 = puz.read('testfiles/nyt_locked.puz')
p2 = puz.read('testfiles/nyt_locked.puz')
p1.unlock_solution(7844)
self.assertTrue(p2.is_solution_locked())
self.assertTrue(p2.check_answers(p1.solution))
class RoundtripPuzfileTests(unittest.TestCase):
def __init__(self, filename):
unittest.TestCase.__init__(self)
self.filename = filename
def runTest(self):
try:
orig = file(self.filename, 'rb').read()
p = puz.read(self.filename)
if (p.puzzletype == puz.PuzzleType.Normal):
clues = p.clue_numbering()
# smoke test the clue numbering while we're at it
self.assertEqual(len(p.clues), len(clues.across) + len(clues.down), 'failed in %s' % self.filename)
# this is the roundtrip
new = p.tostring()
self.assertEqual(orig, new, '%s did not round-trip' % self.filename)
except puz.PuzzleFormatError:
self.assertTrue(False, '%s threw PuzzleFormatError: %s' % (self.filename, sys.exc_info()[1].message))
def tests_in_dir(dir):
return sum((map(RoundtripPuzfileTests, glob.glob(os.path.join(path, '*.puz')))
for path, dirs, files in os.walk(dir)), [])
def suite():
# suite consists of any test* method defined in PuzzleTests, plus a round-trip
# test for each .puz file in ./testfiles/
suite = unittest.TestSuite()
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(PuzzleTests))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(LockTests))
suite.addTests(tests_in_dir('testfiles'))
#suite.addTests(tests_in_dir('../xwordapp/data/'))
return suite
if __name__ == '__main__':
print __file__
unittest.TextTestRunner().run(suite())
| en | 0.77976 | # trigger save # trigger save # trigger save tests some examples from the file format documentation wiki # rectangular example - tricky # still locked # unlocked! Verify that we can check answers even when the solution is locked # smoke test the clue numbering while we're at it # this is the roundtrip # suite consists of any test* method defined in PuzzleTests, plus a round-trip # test for each .puz file in ./testfiles/ #suite.addTests(tests_in_dir('../xwordapp/data/')) | 2.643317 | 3 |
train.py | DLWK/SegTrGAN | 0 | 6618708 | <reponame>DLWK/SegTrGAN
import torch
from torch import optim
from losses import *
from data.dataloader2 import XSDataset, XSDatatest
import torch.nn as nn
# from models import ModelBuilder, SegmentationModule, SAUNet, VGG19UNet, VGG19UNet_without_boudary,VGGUNet
from torchvision import transforms
from utils.metric import *
from evaluation import *
# from models.InfNet_Res2Net import Inf_Net
# from models.unet import UNet
# from models.fcn import get_fcn8s
# from models.UNet_2Plus import UNet_2Plus
# from models.AttU_Net_model import AttU_Net
# from models.BaseNet import CPFNet
# from models.cenet import CE_Net
# from models.denseunet_model import DenseUnet
# from models.F3net import F3Net
# from models.LDF import LDF
# from models.LDunet import LDUNet
# from SETR.transformer_seg import SETRModel
# from SETR.transformer_seg_edge import NetS, NetC
from model.transformer_GSdGAN import NetC, NetS
import torch.nn.functional as F
import tqdm
def iou_loss(pred, mask):
weit = 1+5*torch.abs(F.avg_pool2d(mask, kernel_size=31, stride=1, padding=15)-mask)
wbce = F.binary_cross_entropy_with_logits(pred, mask, reduce='none')
wbce = (weit*wbce).sum(dim=(2,3))/weit.sum(dim=(2,3))
pred = torch.sigmoid(pred)
inter = ((pred*mask)*weit).sum(dim=(2,3))
union = ((pred+mask)*weit).sum(dim=(2,3))
wiou = 1-(inter+1)/(union-inter+1)
loss_total= (wbce+wiou).mean()/wiou.size(0)
return loss_total
# def iou_loss(pred, mask):
# pred = torch.sigmoid(pred)
# inter = (pred*mask).sum(dim=(2,3))
# union = (pred+mask).sum(dim=(2,3))
# iou = 1-(inter+1)/(union-inter+1)
# return iou.mean()
# def adjust_learning_rate_poly(optimizer, epoch, num_epochs, base_lr, power)
# lr = base_lr * (1-epoch/num_epochs)**power
# for param_group in optimizer.param_groups:
# param_group['lr'] = lr
# return lr
def test(testLoader,fold, nets, device):
nets.to(device)
sig = torch.nn.Sigmoid()
nets.eval()
with torch.no_grad():
# when in test stage, no grad
acc = 0. # Accuracy
SE = 0. # Sensitivity (Recall)
SP = 0. # Specificity
PC = 0. # Precision
F1 = 0. # F1 Score
JS = 0. # Jaccard Similarity
DC = 0. # Dice Coefficient
count = 0
for image, label in tqdm.tqdm(testLoader):
image = image.to(device=device, dtype=torch.float32)
label = label.to(device=device, dtype=torch.float32)
# body = body.to(device=device, dtype=torch.float32)
# detail = detail.to(device=device, dtype=torch.float32)
# p1,p2,p3,p4= net(image)
b,d,pred =nets(image)
sig = torch.nn.Sigmoid()
pred = sig(pred)
# print(pred.shape)
acc += get_accuracy(pred,label)
SE += get_sensitivity(pred,label)
SP += get_specificity(pred,label)
PC += get_precision(pred,label)
F1 += get_F1(pred,label)
JS += get_JS(pred,label)
DC += get_DC(pred,label)
count+=1
acc = acc/count
SE = SE/count
SP = SP/count
PC = PC/count
F1 = F1/count
JS = JS/count
DC = DC/count
score = JS + DC
return acc, SE, SP, PC, F1, JS, DC, score
def train_net(nets, netc,netc1, device, train_data_path,test_data_path, fold, epochs=100, batch_size=4, lr=0.00001): #lr=0.00001
isbi_train_dataset = XSDataset(train_data_path)
train_loader = torch.utils.data.DataLoader(dataset=isbi_train_dataset,
batch_size=batch_size,
drop_last=True,
shuffle=True)
test_dataset = XSDatatest(test_data_path)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=1,
shuffle=False)
# setup optimizer
# beta1 = 0.5
# optimizerG = optim.Adam(nets.parameters(), lr=lr, betas=(beta1, 0.999))
# optimizerD = optim.Adam(netc.parameters(), lr=lr, betas=(beta1, 0.999))
optimizerG = optim.RMSprop(nets.parameters(), lr=lr, weight_decay=1e-8, momentum=0.9)
optimizerD = optim.RMSprop(netc.parameters(), lr=lr, weight_decay=1e-8, momentum=0.9)
scheduler = torch.optim.lr_scheduler.StepLR(optimizerG, step_size=10, gamma=0.1)
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
# criterion2 = nn.BCEWithLogitsLoss()
#criterion3 = structure_loss()
# criterion3 = BCEDiceLoss()
# criterion = nn.BCEWithLogitsLoss()
# criterion = BCEDiceLoss()
# criterion2 =LovaszHingeLoss()
print('===> Starting training\n')
best_loss = float('inf')
result = 0
# f = open('./finall_loss_unet'+str(fold)+'.csv', 'w')
# f.write('epoch,loss'+'\n')
for epoch in range(1, epochs+1):
i=0
nets.train()
for image, mask, body, detail in train_loader:
#train C
netc.zero_grad()
netc1.zero_grad()
image = image.to(device=device, dtype=torch.float32)
mask = mask.to(device=device, dtype=torch.float32)
body = body.to(device=device, dtype=torch.float32)
detail = detail.to(device=device, dtype=torch.float32)
# edge = edge.to(device=device, dtype=torch.float32)
# 使用网络参数,输出预测结果
# outb1, outd1, out1, outb2, outd2, out2 = net(image)
# lossb1 = F.binary_cross_entropy_with_logits(outb1, body)
# lossd1 = F.binary_cross_entropy_with_logits(outd1, detail)
# loss1 = F.binary_cross_entropy_with_logits(out1, mask) + iou_loss(out1, mask)
adversarial_loss = torch.nn.BCELoss()
# lossb2 = F.binary_cross_entropy_with_logits(outb2, body)
# lossd2 = F.binary_cross_entropy_with_logits(outd2, detail)
# loss2 = F.binary_cross_entropy_with_logits(out2, mask) + iou_loss(out2, mask)
# loss = (lossb1 + lossd1 + loss1 + lossb2 + lossd2 + loss2)/2
# p1,p2,p3,p4 = net(image)
# loss1= iou_loss(p1,mask)
# loss2= iou_loss(p2,body)
# loss3= iou_loss(p3,detail)
# loss4= iou_loss(p4,mask)
# loss=loss1+loss2+loss3+loss4
# Adversarial ground truths
valid = Variable(Tensor(image.size(0), 1).fill_(1.0), requires_grad=False)
fake = Variable(Tensor(image.size(0), 1).fill_(0.0), requires_grad=False)
b, d, output = nets(image)
# output = F.sigmoid(output)
# output = output.detach() ### detach G from the network
b = F.sigmoid(b)
b = b.detach() ##body fake image
d = F.sigmoid(d)
d = d.detach() ###detail fake image
b = b.cuda()
d= d.cuda()
body = body.clone()
detail = detail.clone()
body =body.cuda()
detail =detail.cuda()
outb1,b1=netc(b)
tageb1, tb1=netc(body)
outd1, d1=netc1(d)
taged1, td1=netc1(detail)
# Measure discriminator's ability to classify real from generated samples
r_loss1 = adversarial_loss(tb1, valid)
r_loss2 = adversarial_loss(td1, valid)
f_loss1 = adversarial_loss(b1, fake)
f_loss2 = adversarial_loss(d1, fake)
G_loss = (r_loss1+f_loss1) /2 + (r_loss2+f_loss2) /2
loss_D1 = -torch.mean(torch.abs(outb1 - tageb1))
# input_mask = image.clone()
loss_D2 = -torch.mean(torch.abs(outd1 - taged1))
# output_masked = image.clone()
# output_masked = input_mask * output
# output_masked = output
# if cuda:
# output_masked = output_masked.cuda()
# # target_masked = image.clone()
# # target_masked = input_mask * mask
# # target_masked = mask
# if cuda:
# target_masked = target_masked.cuda()
# output_D = netc(output_masked)
# # print(output_D.shape)
# target_D = netc(target_masked)
# # print(target_D.shape)
# loss_D = 1 - torch.mean(torch.abs(output_D - target_D))
loss_D =loss_D1 +loss_D2 +G_loss
loss_D.backward()
# loss_D2.backward()
optimizerD.step()
### clip parameters in D
for p in netc.parameters():
p.data.clamp_(-0.05, 0.05)
for p in netc1.parameters():
p.data.clamp_(-0.05, 0.05)
#################################
### train Generator/Segmentor ###
#################################
nets.zero_grad()
b, d, output = nets(image)
# output = F.sigmoid(output)
loss_dice = iou_loss(output,mask) + F.binary_cross_entropy_with_logits(b, body) + F.binary_cross_entropy_with_logits(d, detail) ####修改
# output_masked = input_mask * output
# if cuda:
# output_masked = output_masked.cuda()
# target_masked = input_mask * mask
# if cuda:
# target_masked = target_masked.cuda()
b = F.sigmoid(b)
d = F.sigmoid(d)
b = b.cuda()
d = d.cuda()
body = body.cuda()
detail = detail.cuda()
outb1, b1=netc(b)
tageb1, tb1=netc(body)
outd1,d1=netc1(d)
taged1, td1=netc1(detail)
# Loss measures generator's ability to fool the discriminator
g_loss1 = adversarial_loss(b1, valid)
g_loss2 = adversarial_loss(d1, valid)
loss_G = torch.mean(torch.abs(outb1 - tageb1))+ torch.mean(torch.abs(outd1 - taged1))
loss_G_joint = loss_G + loss_dice +(g_loss1+g_loss2)
loss_G_joint.backward()
optimizerG.step()
if(i % 4 == 0):
print("\nEpoch[{}/{}]\tBatch({}/{}):\tBatch Dice_Loss: {:.4f}\tG_Loss: {:.4f}\tD_Loss: {:.4f} \n".format(
epoch, epochs, i, len(train_loader), loss_dice.item(), loss_G.item(), loss_D.item()))
i+=1
if epoch>0:
acc, SE, SP, PC, F1, JS, DC, score=test(test_loader,fold, nets, device)
if result < score:
result = score
# best_epoch = epoch
torch.save(nets.state_dict(), '/home/wangkun/data/LDFGAN/XS/segTrGaNet_best_'+str(fold)+'.pth')
with open("/home/wangkun/data/LDFGAN/XS/segTrGaNet_"+str(fold)+".csv", "a") as w:
w.write("epoch="+str(epoch)+",acc="+str(acc)+", SE="+str(SE)+",SP="+str(SP)+",PC="+str(PC)+",F1="+str(F1)+",JS="+str(JS)+",DC="+str(DC)+",Score="+str(score)+"\n")
scheduler.step()
if __name__ == "__main__":
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
seed=1234
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
fold = 3
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
cuda = True
if cuda and not torch.cuda.is_available():
raise Exception(' [!] No GPU found, please run without cuda.')
# net = VGG19UNet(n_channels=1, n_classes=1)
# net = UNet(n_channels=1, n_classes=1)
# net =get_fcn8s(n_class=1)
# net = UNet_2Plus(in_channels=1, n_classes=1)
nets = NetS(n_channels=3, n_classes=1)
netc = NetC(patch_size=(32, 32),
in_channels=1,
out_channels=1,
hidden_size=1024,
num_hidden_layers=8,
num_attention_heads=16,
decode_features=[512, 256, 128, 64])
netc1 = NetC(patch_size=(32, 32),
in_channels=1,
out_channels=1,
hidden_size=1024,
num_hidden_layers=8,
num_attention_heads=16,
decode_features=[512, 256, 128, 64])
# net = Inf_Net()
# net = AttU_Net(img_ch=1, output_ch=1)
# net = CE_Net(num_classes=1, num_channels=1)
# net = CPFNet()
# net = VGGUNet(n_channels=1, n_classes=1)
# net = VGG19UNet_without_boudary(n_channels=1, n_classes=1)
# net = DenseUnet(in_ch=1, num_classes=1)
# net = LDUNet(n_channels=1, n_classes=1)
# net = LDF()
nets.to(device=device)
netc.to(device=device)
netc1.to(device=device)
# data_path = "/home/wangkun/shape-attentive-unet/data_5fold/train_96_"+str(fold)
# test_data_path = "/home/wangkun/shape-attentive-unet/data_5fold/test_96_"+str(fold)
# data_path = "/home/wangkun/shape-attentive-unet/data/COVD-19/train_512_"+str(fold)
# test_data_path = "/home/wangkun/shape-attentive-unet/data/COVD-19/test_512_"+str(fold)
data_path = "/home/wangkun/data/XS/Train/"
test_data_path = "/home/wangkun/data/XS/val/"
train_net(nets,netc, netc1, device, data_path,test_data_path, fold)
# by <NAME> @2021.4.10 | import torch
from torch import optim
from losses import *
from data.dataloader2 import XSDataset, XSDatatest
import torch.nn as nn
# from models import ModelBuilder, SegmentationModule, SAUNet, VGG19UNet, VGG19UNet_without_boudary,VGGUNet
from torchvision import transforms
from utils.metric import *
from evaluation import *
# from models.InfNet_Res2Net import Inf_Net
# from models.unet import UNet
# from models.fcn import get_fcn8s
# from models.UNet_2Plus import UNet_2Plus
# from models.AttU_Net_model import AttU_Net
# from models.BaseNet import CPFNet
# from models.cenet import CE_Net
# from models.denseunet_model import DenseUnet
# from models.F3net import F3Net
# from models.LDF import LDF
# from models.LDunet import LDUNet
# from SETR.transformer_seg import SETRModel
# from SETR.transformer_seg_edge import NetS, NetC
from model.transformer_GSdGAN import NetC, NetS
import torch.nn.functional as F
import tqdm
def iou_loss(pred, mask):
weit = 1+5*torch.abs(F.avg_pool2d(mask, kernel_size=31, stride=1, padding=15)-mask)
wbce = F.binary_cross_entropy_with_logits(pred, mask, reduce='none')
wbce = (weit*wbce).sum(dim=(2,3))/weit.sum(dim=(2,3))
pred = torch.sigmoid(pred)
inter = ((pred*mask)*weit).sum(dim=(2,3))
union = ((pred+mask)*weit).sum(dim=(2,3))
wiou = 1-(inter+1)/(union-inter+1)
loss_total= (wbce+wiou).mean()/wiou.size(0)
return loss_total
# def iou_loss(pred, mask):
# pred = torch.sigmoid(pred)
# inter = (pred*mask).sum(dim=(2,3))
# union = (pred+mask).sum(dim=(2,3))
# iou = 1-(inter+1)/(union-inter+1)
# return iou.mean()
# def adjust_learning_rate_poly(optimizer, epoch, num_epochs, base_lr, power)
# lr = base_lr * (1-epoch/num_epochs)**power
# for param_group in optimizer.param_groups:
# param_group['lr'] = lr
# return lr
def test(testLoader,fold, nets, device):
nets.to(device)
sig = torch.nn.Sigmoid()
nets.eval()
with torch.no_grad():
# when in test stage, no grad
acc = 0. # Accuracy
SE = 0. # Sensitivity (Recall)
SP = 0. # Specificity
PC = 0. # Precision
F1 = 0. # F1 Score
JS = 0. # Jaccard Similarity
DC = 0. # Dice Coefficient
count = 0
for image, label in tqdm.tqdm(testLoader):
image = image.to(device=device, dtype=torch.float32)
label = label.to(device=device, dtype=torch.float32)
# body = body.to(device=device, dtype=torch.float32)
# detail = detail.to(device=device, dtype=torch.float32)
# p1,p2,p3,p4= net(image)
b,d,pred =nets(image)
sig = torch.nn.Sigmoid()
pred = sig(pred)
# print(pred.shape)
acc += get_accuracy(pred,label)
SE += get_sensitivity(pred,label)
SP += get_specificity(pred,label)
PC += get_precision(pred,label)
F1 += get_F1(pred,label)
JS += get_JS(pred,label)
DC += get_DC(pred,label)
count+=1
acc = acc/count
SE = SE/count
SP = SP/count
PC = PC/count
F1 = F1/count
JS = JS/count
DC = DC/count
score = JS + DC
return acc, SE, SP, PC, F1, JS, DC, score
def train_net(nets, netc,netc1, device, train_data_path,test_data_path, fold, epochs=100, batch_size=4, lr=0.00001): #lr=0.00001
isbi_train_dataset = XSDataset(train_data_path)
train_loader = torch.utils.data.DataLoader(dataset=isbi_train_dataset,
batch_size=batch_size,
drop_last=True,
shuffle=True)
test_dataset = XSDatatest(test_data_path)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=1,
shuffle=False)
# setup optimizer
# beta1 = 0.5
# optimizerG = optim.Adam(nets.parameters(), lr=lr, betas=(beta1, 0.999))
# optimizerD = optim.Adam(netc.parameters(), lr=lr, betas=(beta1, 0.999))
optimizerG = optim.RMSprop(nets.parameters(), lr=lr, weight_decay=1e-8, momentum=0.9)
optimizerD = optim.RMSprop(netc.parameters(), lr=lr, weight_decay=1e-8, momentum=0.9)
scheduler = torch.optim.lr_scheduler.StepLR(optimizerG, step_size=10, gamma=0.1)
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
# criterion2 = nn.BCEWithLogitsLoss()
#criterion3 = structure_loss()
# criterion3 = BCEDiceLoss()
# criterion = nn.BCEWithLogitsLoss()
# criterion = BCEDiceLoss()
# criterion2 =LovaszHingeLoss()
print('===> Starting training\n')
best_loss = float('inf')
result = 0
# f = open('./finall_loss_unet'+str(fold)+'.csv', 'w')
# f.write('epoch,loss'+'\n')
for epoch in range(1, epochs+1):
i=0
nets.train()
for image, mask, body, detail in train_loader:
#train C
netc.zero_grad()
netc1.zero_grad()
image = image.to(device=device, dtype=torch.float32)
mask = mask.to(device=device, dtype=torch.float32)
body = body.to(device=device, dtype=torch.float32)
detail = detail.to(device=device, dtype=torch.float32)
# edge = edge.to(device=device, dtype=torch.float32)
# 使用网络参数,输出预测结果
# outb1, outd1, out1, outb2, outd2, out2 = net(image)
# lossb1 = F.binary_cross_entropy_with_logits(outb1, body)
# lossd1 = F.binary_cross_entropy_with_logits(outd1, detail)
# loss1 = F.binary_cross_entropy_with_logits(out1, mask) + iou_loss(out1, mask)
adversarial_loss = torch.nn.BCELoss()
# lossb2 = F.binary_cross_entropy_with_logits(outb2, body)
# lossd2 = F.binary_cross_entropy_with_logits(outd2, detail)
# loss2 = F.binary_cross_entropy_with_logits(out2, mask) + iou_loss(out2, mask)
# loss = (lossb1 + lossd1 + loss1 + lossb2 + lossd2 + loss2)/2
# p1,p2,p3,p4 = net(image)
# loss1= iou_loss(p1,mask)
# loss2= iou_loss(p2,body)
# loss3= iou_loss(p3,detail)
# loss4= iou_loss(p4,mask)
# loss=loss1+loss2+loss3+loss4
# Adversarial ground truths
valid = Variable(Tensor(image.size(0), 1).fill_(1.0), requires_grad=False)
fake = Variable(Tensor(image.size(0), 1).fill_(0.0), requires_grad=False)
b, d, output = nets(image)
# output = F.sigmoid(output)
# output = output.detach() ### detach G from the network
b = F.sigmoid(b)
b = b.detach() ##body fake image
d = F.sigmoid(d)
d = d.detach() ###detail fake image
b = b.cuda()
d= d.cuda()
body = body.clone()
detail = detail.clone()
body =body.cuda()
detail =detail.cuda()
outb1,b1=netc(b)
tageb1, tb1=netc(body)
outd1, d1=netc1(d)
taged1, td1=netc1(detail)
# Measure discriminator's ability to classify real from generated samples
r_loss1 = adversarial_loss(tb1, valid)
r_loss2 = adversarial_loss(td1, valid)
f_loss1 = adversarial_loss(b1, fake)
f_loss2 = adversarial_loss(d1, fake)
G_loss = (r_loss1+f_loss1) /2 + (r_loss2+f_loss2) /2
loss_D1 = -torch.mean(torch.abs(outb1 - tageb1))
# input_mask = image.clone()
loss_D2 = -torch.mean(torch.abs(outd1 - taged1))
# output_masked = image.clone()
# output_masked = input_mask * output
# output_masked = output
# if cuda:
# output_masked = output_masked.cuda()
# # target_masked = image.clone()
# # target_masked = input_mask * mask
# # target_masked = mask
# if cuda:
# target_masked = target_masked.cuda()
# output_D = netc(output_masked)
# # print(output_D.shape)
# target_D = netc(target_masked)
# # print(target_D.shape)
# loss_D = 1 - torch.mean(torch.abs(output_D - target_D))
loss_D =loss_D1 +loss_D2 +G_loss
loss_D.backward()
# loss_D2.backward()
optimizerD.step()
### clip parameters in D
for p in netc.parameters():
p.data.clamp_(-0.05, 0.05)
for p in netc1.parameters():
p.data.clamp_(-0.05, 0.05)
#################################
### train Generator/Segmentor ###
#################################
nets.zero_grad()
b, d, output = nets(image)
# output = F.sigmoid(output)
loss_dice = iou_loss(output,mask) + F.binary_cross_entropy_with_logits(b, body) + F.binary_cross_entropy_with_logits(d, detail) ####修改
# output_masked = input_mask * output
# if cuda:
# output_masked = output_masked.cuda()
# target_masked = input_mask * mask
# if cuda:
# target_masked = target_masked.cuda()
b = F.sigmoid(b)
d = F.sigmoid(d)
b = b.cuda()
d = d.cuda()
body = body.cuda()
detail = detail.cuda()
outb1, b1=netc(b)
tageb1, tb1=netc(body)
outd1,d1=netc1(d)
taged1, td1=netc1(detail)
# Loss measures generator's ability to fool the discriminator
g_loss1 = adversarial_loss(b1, valid)
g_loss2 = adversarial_loss(d1, valid)
loss_G = torch.mean(torch.abs(outb1 - tageb1))+ torch.mean(torch.abs(outd1 - taged1))
loss_G_joint = loss_G + loss_dice +(g_loss1+g_loss2)
loss_G_joint.backward()
optimizerG.step()
if(i % 4 == 0):
print("\nEpoch[{}/{}]\tBatch({}/{}):\tBatch Dice_Loss: {:.4f}\tG_Loss: {:.4f}\tD_Loss: {:.4f} \n".format(
epoch, epochs, i, len(train_loader), loss_dice.item(), loss_G.item(), loss_D.item()))
i+=1
if epoch>0:
acc, SE, SP, PC, F1, JS, DC, score=test(test_loader,fold, nets, device)
if result < score:
result = score
# best_epoch = epoch
torch.save(nets.state_dict(), '/home/wangkun/data/LDFGAN/XS/segTrGaNet_best_'+str(fold)+'.pth')
with open("/home/wangkun/data/LDFGAN/XS/segTrGaNet_"+str(fold)+".csv", "a") as w:
w.write("epoch="+str(epoch)+",acc="+str(acc)+", SE="+str(SE)+",SP="+str(SP)+",PC="+str(PC)+",F1="+str(F1)+",JS="+str(JS)+",DC="+str(DC)+",Score="+str(score)+"\n")
scheduler.step()
if __name__ == "__main__":
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
seed=1234
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
fold = 3
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
cuda = True
if cuda and not torch.cuda.is_available():
raise Exception(' [!] No GPU found, please run without cuda.')
# net = VGG19UNet(n_channels=1, n_classes=1)
# net = UNet(n_channels=1, n_classes=1)
# net =get_fcn8s(n_class=1)
# net = UNet_2Plus(in_channels=1, n_classes=1)
nets = NetS(n_channels=3, n_classes=1)
netc = NetC(patch_size=(32, 32),
in_channels=1,
out_channels=1,
hidden_size=1024,
num_hidden_layers=8,
num_attention_heads=16,
decode_features=[512, 256, 128, 64])
netc1 = NetC(patch_size=(32, 32),
in_channels=1,
out_channels=1,
hidden_size=1024,
num_hidden_layers=8,
num_attention_heads=16,
decode_features=[512, 256, 128, 64])
# net = Inf_Net()
# net = AttU_Net(img_ch=1, output_ch=1)
# net = CE_Net(num_classes=1, num_channels=1)
# net = CPFNet()
# net = VGGUNet(n_channels=1, n_classes=1)
# net = VGG19UNet_without_boudary(n_channels=1, n_classes=1)
# net = DenseUnet(in_ch=1, num_classes=1)
# net = LDUNet(n_channels=1, n_classes=1)
# net = LDF()
nets.to(device=device)
netc.to(device=device)
netc1.to(device=device)
# data_path = "/home/wangkun/shape-attentive-unet/data_5fold/train_96_"+str(fold)
# test_data_path = "/home/wangkun/shape-attentive-unet/data_5fold/test_96_"+str(fold)
# data_path = "/home/wangkun/shape-attentive-unet/data/COVD-19/train_512_"+str(fold)
# test_data_path = "/home/wangkun/shape-attentive-unet/data/COVD-19/test_512_"+str(fold)
data_path = "/home/wangkun/data/XS/Train/"
test_data_path = "/home/wangkun/data/XS/val/"
train_net(nets,netc, netc1, device, data_path,test_data_path, fold)
# by <NAME> @2021.4.10 | en | 0.35868 | # from models import ModelBuilder, SegmentationModule, SAUNet, VGG19UNet, VGG19UNet_without_boudary,VGGUNet # from models.InfNet_Res2Net import Inf_Net # from models.unet import UNet # from models.fcn import get_fcn8s # from models.UNet_2Plus import UNet_2Plus # from models.AttU_Net_model import AttU_Net # from models.BaseNet import CPFNet # from models.cenet import CE_Net # from models.denseunet_model import DenseUnet # from models.F3net import F3Net # from models.LDF import LDF # from models.LDunet import LDUNet # from SETR.transformer_seg import SETRModel # from SETR.transformer_seg_edge import NetS, NetC # def iou_loss(pred, mask): # pred = torch.sigmoid(pred) # inter = (pred*mask).sum(dim=(2,3)) # union = (pred+mask).sum(dim=(2,3)) # iou = 1-(inter+1)/(union-inter+1) # return iou.mean() # def adjust_learning_rate_poly(optimizer, epoch, num_epochs, base_lr, power) # lr = base_lr * (1-epoch/num_epochs)**power # for param_group in optimizer.param_groups: # param_group['lr'] = lr # return lr # when in test stage, no grad # Accuracy # Sensitivity (Recall) # Specificity # Precision # F1 Score # Jaccard Similarity # Dice Coefficient # body = body.to(device=device, dtype=torch.float32) # detail = detail.to(device=device, dtype=torch.float32) # p1,p2,p3,p4= net(image) # print(pred.shape) #lr=0.00001 # setup optimizer # beta1 = 0.5 # optimizerG = optim.Adam(nets.parameters(), lr=lr, betas=(beta1, 0.999)) # optimizerD = optim.Adam(netc.parameters(), lr=lr, betas=(beta1, 0.999)) # criterion2 = nn.BCEWithLogitsLoss() #criterion3 = structure_loss() # criterion3 = BCEDiceLoss() # criterion = nn.BCEWithLogitsLoss() # criterion = BCEDiceLoss() # criterion2 =LovaszHingeLoss() # f = open('./finall_loss_unet'+str(fold)+'.csv', 'w') # f.write('epoch,loss'+'\n') #train C # edge = edge.to(device=device, dtype=torch.float32) # 使用网络参数,输出预测结果 # outb1, outd1, out1, outb2, outd2, out2 = net(image) # lossb1 = F.binary_cross_entropy_with_logits(outb1, body) # lossd1 = F.binary_cross_entropy_with_logits(outd1, detail) # loss1 = F.binary_cross_entropy_with_logits(out1, mask) + iou_loss(out1, mask) # lossb2 = F.binary_cross_entropy_with_logits(outb2, body) # lossd2 = F.binary_cross_entropy_with_logits(outd2, detail) # loss2 = F.binary_cross_entropy_with_logits(out2, mask) + iou_loss(out2, mask) # loss = (lossb1 + lossd1 + loss1 + lossb2 + lossd2 + loss2)/2 # p1,p2,p3,p4 = net(image) # loss1= iou_loss(p1,mask) # loss2= iou_loss(p2,body) # loss3= iou_loss(p3,detail) # loss4= iou_loss(p4,mask) # loss=loss1+loss2+loss3+loss4 # Adversarial ground truths # output = F.sigmoid(output) # output = output.detach() ### detach G from the network ##body fake image ###detail fake image # Measure discriminator's ability to classify real from generated samples # input_mask = image.clone() # output_masked = image.clone() # output_masked = input_mask * output # output_masked = output # if cuda: # output_masked = output_masked.cuda() # # target_masked = image.clone() # # target_masked = input_mask * mask # # target_masked = mask # if cuda: # target_masked = target_masked.cuda() # output_D = netc(output_masked) # # print(output_D.shape) # target_D = netc(target_masked) # # print(target_D.shape) # loss_D = 1 - torch.mean(torch.abs(output_D - target_D)) # loss_D2.backward() ### clip parameters in D ################################# ### train Generator/Segmentor ### ################################# # output = F.sigmoid(output) ####修改 # output_masked = input_mask * output # if cuda: # output_masked = output_masked.cuda() # target_masked = input_mask * mask # if cuda: # target_masked = target_masked.cuda() # Loss measures generator's ability to fool the discriminator # best_epoch = epoch # net = VGG19UNet(n_channels=1, n_classes=1) # net = UNet(n_channels=1, n_classes=1) # net =get_fcn8s(n_class=1) # net = UNet_2Plus(in_channels=1, n_classes=1) # net = Inf_Net() # net = AttU_Net(img_ch=1, output_ch=1) # net = CE_Net(num_classes=1, num_channels=1) # net = CPFNet() # net = VGGUNet(n_channels=1, n_classes=1) # net = VGG19UNet_without_boudary(n_channels=1, n_classes=1) # net = DenseUnet(in_ch=1, num_classes=1) # net = LDUNet(n_channels=1, n_classes=1) # net = LDF() # data_path = "/home/wangkun/shape-attentive-unet/data_5fold/train_96_"+str(fold) # test_data_path = "/home/wangkun/shape-attentive-unet/data_5fold/test_96_"+str(fold) # data_path = "/home/wangkun/shape-attentive-unet/data/COVD-19/train_512_"+str(fold) # test_data_path = "/home/wangkun/shape-attentive-unet/data/COVD-19/test_512_"+str(fold) # by <NAME> @2021.4.10 | 1.857829 | 2 |
django_facebook/context_processors.py | stochastic-technologies/Django-facebook | 1 | 6618709 |
from django.conf import settings
from django.middleware.csrf import get_token
from django.utils.functional import lazy
def facebook(request):
"""
Context processor that provides a CSRF token, or the string 'NOTPROVIDED' if
it has not been provided by either a view decorator or the middleware
"""
context = {}
from django_facebook import settings as facebook_settings
context['FACEBOOK_API_KEY'] = facebook_settings.FACEBOOK_API_KEY
context['FACEBOOK_APP_ID'] = facebook_settings.FACEBOOK_APP_ID
return context
|
from django.conf import settings
from django.middleware.csrf import get_token
from django.utils.functional import lazy
def facebook(request):
"""
Context processor that provides a CSRF token, or the string 'NOTPROVIDED' if
it has not been provided by either a view decorator or the middleware
"""
context = {}
from django_facebook import settings as facebook_settings
context['FACEBOOK_API_KEY'] = facebook_settings.FACEBOOK_API_KEY
context['FACEBOOK_APP_ID'] = facebook_settings.FACEBOOK_APP_ID
return context
| en | 0.837812 | Context processor that provides a CSRF token, or the string 'NOTPROVIDED' if it has not been provided by either a view decorator or the middleware | 2.039059 | 2 |
week10/lab10/weather.py | taoyichen/CS110-Assignments-Python | 0 | 6618710 | <filename>week10/lab10/weather.py
#This file is completed by partner work for lab 10 part 1 and 2
#Author <NAME>
#Author <NAME>
#partI
#readlines()
def main1():
myFile = open('rainfall.txt','r')
accumulator = 0
myFileList = myFile.readlines()
minimum = 100
maximum = 0
for lst in myFileList:
temperatureList = lst.split()
temperature = float(temperatureList[1])
if temperature < minimum:
minimum = temperature
if temperature > maximum:
maximum = temperature
accumulator += temperature
lstAverage = accumulator / len(myFileList)
print('average =',lstAverage,',minimum =',minimum,',maximum =', maximum)
main1()
#forloop
def main2():
myFile = open('rainfall.txt','r')
accumulator = 0
accu = 0
for lst in myFile:
accu += 1
temperatureList = lst.split()
accumulator += float(temperatureList[1])
lstAverage = accumulator / accu
print(lstAverage)
main2()
#readline()
def main3():
myFile = open('rainfall.txt','r')
accu = 0
tempaccumulator = 0
minimum = 100
maximum = 0
temperature = 0
line = myFile.readline()
while line:
print(line)
line_list = line.split()
print(line_list[1])
temp = float(line_list[1])
if temp < minimum:
minimum = temp
if temp > maximum:
maximum = temp
tempaccumulator += temp
accu += 1
line = myFile.readline()
averageTemp = tempaccumulator / accu
print('average =',averageTemp,',minimum =',minimum,',maximum =', maximum)
#read()
def main4():
accu = 0
tempaccumulator = 0
minimum = 100
maximum = 0
temperature = 0
myFile = open('rainfall.txt','r')
temp_acc = -0
giant_string = myFile.read()
giant_list = giant_string.split("\n")
for line in giant_list:
if line:
line_list = line.split()
temp = float(line_list[1])
if temp < minimum:
minimum = temp
if temp > maximum:
maximum = temp
tempaccumulator += temp
accu += 1
else:
break
averageTemp = tempaccumulator / accu
print('average =',averageTemp,',minimum =',minimum,',maximum =', maximum)
main4()
#partII
def main5():
myFile = open('conversion.txt','w')
myFile.write('Fahrenheit Celcius\n')
for i in range(-300,213):
tempF = i
tempC = (5/9)*(i-32)
myFile.write('%10.2f %10.2f\n' % (tempF, tempC))
myFile.close()
myFile = open('conversion.txt','r')
main5()
| <filename>week10/lab10/weather.py
#This file is completed by partner work for lab 10 part 1 and 2
#Author <NAME>
#Author <NAME>
#partI
#readlines()
def main1():
myFile = open('rainfall.txt','r')
accumulator = 0
myFileList = myFile.readlines()
minimum = 100
maximum = 0
for lst in myFileList:
temperatureList = lst.split()
temperature = float(temperatureList[1])
if temperature < minimum:
minimum = temperature
if temperature > maximum:
maximum = temperature
accumulator += temperature
lstAverage = accumulator / len(myFileList)
print('average =',lstAverage,',minimum =',minimum,',maximum =', maximum)
main1()
#forloop
def main2():
myFile = open('rainfall.txt','r')
accumulator = 0
accu = 0
for lst in myFile:
accu += 1
temperatureList = lst.split()
accumulator += float(temperatureList[1])
lstAverage = accumulator / accu
print(lstAverage)
main2()
#readline()
def main3():
myFile = open('rainfall.txt','r')
accu = 0
tempaccumulator = 0
minimum = 100
maximum = 0
temperature = 0
line = myFile.readline()
while line:
print(line)
line_list = line.split()
print(line_list[1])
temp = float(line_list[1])
if temp < minimum:
minimum = temp
if temp > maximum:
maximum = temp
tempaccumulator += temp
accu += 1
line = myFile.readline()
averageTemp = tempaccumulator / accu
print('average =',averageTemp,',minimum =',minimum,',maximum =', maximum)
#read()
def main4():
accu = 0
tempaccumulator = 0
minimum = 100
maximum = 0
temperature = 0
myFile = open('rainfall.txt','r')
temp_acc = -0
giant_string = myFile.read()
giant_list = giant_string.split("\n")
for line in giant_list:
if line:
line_list = line.split()
temp = float(line_list[1])
if temp < minimum:
minimum = temp
if temp > maximum:
maximum = temp
tempaccumulator += temp
accu += 1
else:
break
averageTemp = tempaccumulator / accu
print('average =',averageTemp,',minimum =',minimum,',maximum =', maximum)
main4()
#partII
def main5():
myFile = open('conversion.txt','w')
myFile.write('Fahrenheit Celcius\n')
for i in range(-300,213):
tempF = i
tempC = (5/9)*(i-32)
myFile.write('%10.2f %10.2f\n' % (tempF, tempC))
myFile.close()
myFile = open('conversion.txt','r')
main5()
| en | 0.862518 | #This file is completed by partner work for lab 10 part 1 and 2 #Author <NAME> #Author <NAME> #partI #readlines() #forloop #readline() #read() #partII | 3.745569 | 4 |
tests/providers/person.py | guinslym/faker | 0 | 6618711 | # coding=utf-8
from __future__ import unicode_literals
import unittest
from faker import Factory
from faker.providers.person.ne_NP import Provider as NeProvider
from .. import string_types
class TestJaJP(unittest.TestCase):
""" Tests person in the ja_JP locale """
def setUp(self):
self.factory = Factory.create('ja')
def test_person(self):
name = self.factory.name()
assert name
assert isinstance(name, string_types)
first_name = self.factory.first_name()
assert first_name
assert isinstance(first_name, string_types)
last_name = self.factory.last_name()
assert last_name
assert isinstance(last_name, string_types)
kana_name = self.factory.kana_name()
assert kana_name
assert isinstance(kana_name, string_types)
first_kana_name = self.factory.first_kana_name()
assert first_kana_name
assert isinstance(first_kana_name, string_types)
first_kana_name_male = self.factory.first_kana_name_male()
assert first_kana_name_male
assert isinstance(first_kana_name_male, string_types)
first_kana_name_female = self.factory.first_kana_name_female()
assert first_kana_name_female
assert isinstance(first_kana_name_female, string_types)
last_kana_name = self.factory.last_kana_name()
assert last_kana_name
assert isinstance(last_kana_name, string_types)
romanized_name = self.factory.romanized_name()
assert romanized_name
assert isinstance(romanized_name, string_types)
first_romanized_name = self.factory.first_romanized_name()
assert first_romanized_name
assert isinstance(first_romanized_name, string_types)
first_romanized_name_male = self.factory.first_romanized_name_male()
assert first_romanized_name_male
assert isinstance(first_romanized_name_male, string_types)
first_romanized_name_female = self.factory.first_romanized_name_female()
assert first_romanized_name_female
assert isinstance(first_romanized_name_female, string_types)
last_romanized_name = self.factory.last_romanized_name()
assert last_romanized_name
assert isinstance(last_romanized_name, string_types)
class TestNeNP(unittest.TestCase):
def setUp(self):
self.factory = Factory.create('ne_NP')
def test_names(self):
name = self.factory.name().split()
assert all(isinstance(n, string_types) for n in name)
# name should always be 2-3 words. If 3, first word
# should be a prefix.
assert name[-2] in NeProvider.first_names
assert name[-1] in NeProvider.last_names
prefixes = NeProvider.prefixes_male + NeProvider.prefixes_female
if len(name) == 3:
assert name[0] in prefixes
| # coding=utf-8
from __future__ import unicode_literals
import unittest
from faker import Factory
from faker.providers.person.ne_NP import Provider as NeProvider
from .. import string_types
class TestJaJP(unittest.TestCase):
""" Tests person in the ja_JP locale """
def setUp(self):
self.factory = Factory.create('ja')
def test_person(self):
name = self.factory.name()
assert name
assert isinstance(name, string_types)
first_name = self.factory.first_name()
assert first_name
assert isinstance(first_name, string_types)
last_name = self.factory.last_name()
assert last_name
assert isinstance(last_name, string_types)
kana_name = self.factory.kana_name()
assert kana_name
assert isinstance(kana_name, string_types)
first_kana_name = self.factory.first_kana_name()
assert first_kana_name
assert isinstance(first_kana_name, string_types)
first_kana_name_male = self.factory.first_kana_name_male()
assert first_kana_name_male
assert isinstance(first_kana_name_male, string_types)
first_kana_name_female = self.factory.first_kana_name_female()
assert first_kana_name_female
assert isinstance(first_kana_name_female, string_types)
last_kana_name = self.factory.last_kana_name()
assert last_kana_name
assert isinstance(last_kana_name, string_types)
romanized_name = self.factory.romanized_name()
assert romanized_name
assert isinstance(romanized_name, string_types)
first_romanized_name = self.factory.first_romanized_name()
assert first_romanized_name
assert isinstance(first_romanized_name, string_types)
first_romanized_name_male = self.factory.first_romanized_name_male()
assert first_romanized_name_male
assert isinstance(first_romanized_name_male, string_types)
first_romanized_name_female = self.factory.first_romanized_name_female()
assert first_romanized_name_female
assert isinstance(first_romanized_name_female, string_types)
last_romanized_name = self.factory.last_romanized_name()
assert last_romanized_name
assert isinstance(last_romanized_name, string_types)
class TestNeNP(unittest.TestCase):
def setUp(self):
self.factory = Factory.create('ne_NP')
def test_names(self):
name = self.factory.name().split()
assert all(isinstance(n, string_types) for n in name)
# name should always be 2-3 words. If 3, first word
# should be a prefix.
assert name[-2] in NeProvider.first_names
assert name[-1] in NeProvider.last_names
prefixes = NeProvider.prefixes_male + NeProvider.prefixes_female
if len(name) == 3:
assert name[0] in prefixes
| en | 0.838644 | # coding=utf-8 Tests person in the ja_JP locale # name should always be 2-3 words. If 3, first word # should be a prefix. | 2.494357 | 2 |
train_classification.py | saic-vul/cloud_transformers | 25 | 6618712 | import sys
import os
import numpy as np
import yaml
from collections import defaultdict
import torch
from torch import nn
import tqdm
import argparse
sys.path.append(os.path.realpath(__file__))
from datasets.scanobjectnn import ScanObjectNN
from utils import train_util
from utils import train_util_distributed
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
torch.set_num_threads(1)
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", help="path to py model", required=True, default='.configs/config_seg.yaml')
parser.add_argument("exp_name", help="name of the exp")
parser.add_argument("--master", required=True)
parser.add_argument("--rank", required=True, type=int)
parser.add_argument("--num_nodes", required=True, type=int)
args = parser.parse_args()
config_path = args.config
dist_backend = 'nccl'
# Url used to setup distributed training
dist_url = "tcp://{}".format(args.master)
dist.init_process_group(backend=dist_backend,
init_method=dist_url,
rank=args.rank,
world_size=args.num_nodes)
with open(config_path, 'r') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
# check if we masked all other devices
assert(torch.cuda.device_count() == 1)
torch.cuda.set_device(0)
torch.backends.cudnn.benchmark = True
gen_model_file, gen_model_name = train_util.check_model_paths(cfg['model']['generator'])[0]
exp_descr = args.exp_name
scan_train = ScanObjectNN(data_dir=cfg['data']['path'],
train=True,
center=cfg['data']['center'],
normalize=cfg['data']['normalize'],
subsample=cfg['data']['subsample'] if 'subsample' in cfg['data'] else None)
scan_val = ScanObjectNN(data_dir=cfg['data']['path_val'],
train=False,
center=cfg['data']['center'],
normalize=cfg['data']['normalize'],
subsample=cfg['data']['subsample'] if 'subsample' in cfg['data'] else None)
train_sampler = torch.utils.data.distributed.DistributedSampler(scan_train)
val_sampler = torch.utils.data.distributed.DistributedSampler(scan_val)
dataloader_train = torch.utils.data.DataLoader(scan_train,
batch_size=cfg['data']['batch_size'],
shuffle=(train_sampler is None),
num_workers=cfg['data']['num_workers'],
pin_memory=False, sampler=train_sampler)
dataloader_val = torch.utils.data.DataLoader(scan_val,
batch_size=cfg['data']['batch_size_val'],
shuffle=False,
num_workers=cfg['data']['num_workers'],
pin_memory=False, sampler=val_sampler)
# model part
del cfg['model']['generator']
params_dict_gen = cfg['model']
torch.manual_seed(42)
if args.rank == 0:
writer, exp_dir, full_desc = train_util.create_experiment(exp_descr,
params_dict={'exp_root': cfg['experiment']['root'],
'writer_root': cfg['experiment']['writer_root'],
'config_path': config_path})
generator = train_util.get_model(gen_model_file, params_dict_gen,
exp_dir=exp_dir).cuda()
else:
generator = train_util.get_model(gen_model_file, params_dict_gen, exp_dir=None).cuda()
generator = torch.nn.parallel.DistributedDataParallel(torch.nn.SyncBatchNorm.convert_sync_batchnorm(generator),
device_ids=[0],
output_device=0)
# print('generator', generator)
print('generator_params', sum(p.numel() for p in generator.parameters()))
if 'scale_lr' in cfg['train']:
opt_params_dict = [{'params': map(lambda p: p[1],
filter(lambda p: not p[0].endswith('scale'),
generator.named_parameters()))},
{'params': map(lambda p: p[1],
filter(lambda p: p[0].endswith('scale'),
generator.named_parameters())), 'lr': cfg['train']['scale_lr']}]
else:
opt_params_dict = generator.parameters()
optimizer = train_util.make_optimizer(opt_params_dict, cfg['train']['optimizer'])
scheduler_adaptive = cfg['train']['scheduler']['type'] == 'ReduceLROnPlateau'
scheduler = train_util.make_scheduler(optimizer, cfg['train']['scheduler'])
# print(scheduler)
# print('Is scheduler adaptive? {}'.format(scheduler_adaptive))
if 'restore' in cfg:
print('restoring')
g_ckpt_path = cfg['restore']['generator']
g_opt_ckpt_path = cfg['restore']['optimizer']
train_util_distributed.restore_exp(objects=[generator, optimizer],
names=[g_ckpt_path, g_opt_ckpt_path])
if 'new_lr' in cfg['restore']:
for param_group in optimizer.param_groups:
param_group['lr'] = cfg['restore']['new_lr']
show_each = cfg['train']['show_each']
data_iters = 0
seg_loss_weight = cfg['train']['seg_weight']
label_smooth = False
if 'label_smooth' in cfg['train']:
label_smooth = cfg['train']['label_smooth']
cross_entropy_loss = nn.CrossEntropyLoss()
bce_loss = nn.BCEWithLogitsLoss()
def gather_results(loss_dict,
class_pred, mask_pred, labels, mask):
loss_all = train_util_distributed.reduce_loss_dict(loss_dict)
with torch.no_grad():
class_pred_this = np.argmax(class_pred.detach().cpu().numpy(), axis=1)
mask_pred_this = (torch.sigmoid(mask_pred[:, 0, 0]) > 0.5).detach().cpu().numpy()
mask_this = mask.detach().cpu().numpy()
labels_this = labels.detach().cpu().numpy()
all_gathered = train_util_distributed.all_gather((class_pred_this,
mask_pred_this,
labels_this,
mask_this))
return loss_all, all_gathered
n_classes = 15
max_val_acc = 0
max_val_macc = 0
for epoch in range(cfg['train']['num_epochs']):
train_sampler.set_epoch(epoch)
generator.train()
total_correct = 0
total_correct_seg = 0
total_seen = 0
total_seen_seg = 0
loss_train_all = []
cls_loss_train_all = []
seg_loss_train_all = []
for i, (pcd, labels, mask) in tqdm.tqdm(enumerate(dataloader_train)):
pcd = pcd.permute(0, 2, 1)[:, :, None].cuda()
mask = mask.float().cuda(non_blocking=True)
labels = labels.long().cuda(non_blocking=True)
class_pred, mask_pred, lattices_sizes = generator(pcd)
seg_loss = bce_loss(mask_pred[:, 0, 0], mask)
cls_loss = cross_entropy_loss(class_pred, labels)
loss = (1-seg_loss_weight) * cls_loss + seg_loss_weight * seg_loss
loss.backward()
if 'grad_stats' in cfg['train']:
with torch.no_grad():
if args.rank == 0 and data_iters % cfg['train']['grad_stats']['iters'] == 0:
for name, param in generator.module.named_parameters():
# writer.add_histogram('weight_' + name, param, global_step=data_iters)
if param.requires_grad:
if param.grad is None:
print(name)
if cfg['train']['grad_stats']['hist']:
writer.add_histogram('stats/grad_' + name,
param.grad,
global_step=data_iters, bins='auto')
writer.add_scalar('stats/grad_n_' + name,
torch.norm(param.grad),
global_step=data_iters)
writer.flush()
optimizer.step()
optimizer.zero_grad()
loss_all, all_gathered = gather_results(loss_dict={'loss': loss, 'loss_cls': cls_loss,
'loss_seg': seg_loss},
class_pred=class_pred,
mask_pred=mask_pred,
labels=labels,
mask=mask)
if args.rank == 0:
loss_train_all.append(loss_all['loss'].item())
cls_loss_train_all.append(loss_all['loss_cls'].item())
seg_loss_train_all.append(loss_all['loss_seg'].item())
for class_pred_np, mask_pred_np, labels_np, mask_np in all_gathered:
total_correct += np.sum(class_pred_np == labels_np)
total_seen += labels_np.shape[0]
total_correct_seg += np.sum(mask_pred_np == mask_np)
total_seen_seg += mask_np.shape[0] * mask_np.shape[-1]
if args.rank == 0:
for key in loss_all.keys():
writer.add_scalar('train/{}'.format(key), loss_all[key].item(), global_step=data_iters)
# lattice stats
for i, value in enumerate(lattices_sizes):
writer.add_scalar('train/lattice_{}'.format(i),
value[0], global_step=data_iters)
writer.add_scalar('train/norm_l_feat_{}'.format(i),
value[1].item(), global_step=data_iters)
writer.add_scalar('train/norm_l_feat_var_{}'.format(i),
value[2].item(), global_step=data_iters)
if data_iters % cfg['train']['save_each'] == 0 and data_iters > 0:
generator.eval()
train_util_distributed.save_exp_parallel([generator, optimizer],
['generator', 'g_opt'], exp_path=exp_dir,
epoch=data_iters, epoch_name='iter')
generator.train()
data_iters += 1
if not scheduler_adaptive:
scheduler.step(data_iters)
del pcd, labels
if args.rank == 0:
print('on train cls_acc {}, seg_acc {}'.format(total_correct / float(total_seen),
total_correct_seg / float(total_seen_seg)))
if args.rank == 0 and epoch % cfg['train']['save_each_epoch'] == 0 and epoch > 0:
print('saving ckpt')
train_util.save_exp([generator, optimizer],
['generator', 'g_opt'], exp_path=exp_dir, epoch=epoch, epoch_name='epoch')
if epoch % cfg['train']['val_step'] == 0:
all_loss = []
all_loss_seg = []
all_loss_cls = []
total_correct = 0
total_correct_seg = 0
total_seen = 0
total_seen_seg = 0
correct_per_label = np.zeros(n_classes)
total_per_label = np.zeros(n_classes)
generator.eval()
val_sampler.set_epoch(epoch)
lattice_info = defaultdict(list)
with torch.no_grad():
for i, (pcd, labels, mask) in tqdm.tqdm(enumerate(dataloader_val)):
pcd = pcd.permute(0, 2, 1)[:, :, None].cuda()
mask = mask.float().cuda(non_blocking=True)
labels = labels.long().cuda(non_blocking=True)
class_pred, mask_pred, lattices_sizes = generator(pcd)
seg_loss = bce_loss(mask_pred[:, 0, 0], mask)
cls_loss = cross_entropy_loss(class_pred, labels)
loss = (1 - seg_loss_weight) * cls_loss + seg_loss_weight * seg_loss
for ind, value in enumerate(lattices_sizes):
lattice_info['lattice_{}'.format(ind)].append(value[0])
lattice_info['norm_l_feat_{}'.format(ind)].append(value[1].item())
lattice_info['norm_l_feat_var_{}'.format(ind)].append(value[2].item())
loss_all, all_gathered = gather_results(loss_dict={'loss': loss, 'loss_cls': cls_loss,
'loss_seg': seg_loss},
class_pred=class_pred,
mask_pred=mask_pred,
labels=labels,
mask=mask)
all_loss.append(loss_all['loss'].item())
all_loss_cls.append(loss_all['loss_cls'].item())
all_loss_seg.append(loss_all['loss_seg'].item())
if args.rank == 0:
for class_pred_np, mask_pred_np, labels_np, mask_np in all_gathered:
total_correct += np.sum(class_pred_np == labels_np)
total_seen += labels_np.shape[0]
for batch_id in range(labels_np.shape[0]):
correct_per_label[labels_np[batch_id]] += class_pred_np[batch_id] == labels_np[batch_id]
total_per_label[labels_np[batch_id]] += 1
total_correct_seg += np.sum(mask_pred_np == mask_np)
total_seen_seg += mask_np.shape[0] * mask_np.shape[-1]
del pcd, labels, mask
if args.rank == 0:
writer.add_scalar('val/cls_acc', total_correct / float(total_seen), global_step=epoch)
writer.add_scalar('val/seg_acc', total_correct_seg / float(total_seen_seg), global_step=epoch)
writer.add_scalar('val/m_acc', np.mean(correct_per_label / total_per_label), global_step=epoch)
writer.add_scalar('val/loss', np.mean(all_loss), global_step=epoch)
writer.add_scalar('val/loss_seg', np.mean(all_loss_seg), global_step=epoch)
writer.add_scalar('val/loss_cls', np.mean(all_loss_cls), global_step=epoch)
writer.add_scalar('train/loss_epoch', np.mean(loss_train_all), global_step=epoch)
writer.add_scalar('train/loss_seg_epoch', np.mean(seg_loss_train_all), global_step=epoch)
writer.add_scalar('train/loss_cls_epoch', np.mean(cls_loss_train_all), global_step=epoch)
if total_correct / float(total_seen) > max_val_acc:
max_val_acc = total_correct / float(total_seen)
train_util_distributed.save_exp_parallel([generator, optimizer],
['generator', 'g_opt'], exp_path=exp_dir,
epoch=0, epoch_name='best')
if np.mean(correct_per_label / total_per_label) > max_val_macc:
max_val_macc = np.mean(correct_per_label / total_per_label)
train_util_distributed.save_exp_parallel([generator, optimizer],
['generator', 'g_opt'], exp_path=exp_dir,
epoch=0, epoch_name='macc_best')
if args.rank == 0:
writer.flush()
if args.rank == 0:
writer.close()
| import sys
import os
import numpy as np
import yaml
from collections import defaultdict
import torch
from torch import nn
import tqdm
import argparse
sys.path.append(os.path.realpath(__file__))
from datasets.scanobjectnn import ScanObjectNN
from utils import train_util
from utils import train_util_distributed
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
torch.set_num_threads(1)
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", help="path to py model", required=True, default='.configs/config_seg.yaml')
parser.add_argument("exp_name", help="name of the exp")
parser.add_argument("--master", required=True)
parser.add_argument("--rank", required=True, type=int)
parser.add_argument("--num_nodes", required=True, type=int)
args = parser.parse_args()
config_path = args.config
dist_backend = 'nccl'
# Url used to setup distributed training
dist_url = "tcp://{}".format(args.master)
dist.init_process_group(backend=dist_backend,
init_method=dist_url,
rank=args.rank,
world_size=args.num_nodes)
with open(config_path, 'r') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
# check if we masked all other devices
assert(torch.cuda.device_count() == 1)
torch.cuda.set_device(0)
torch.backends.cudnn.benchmark = True
gen_model_file, gen_model_name = train_util.check_model_paths(cfg['model']['generator'])[0]
exp_descr = args.exp_name
scan_train = ScanObjectNN(data_dir=cfg['data']['path'],
train=True,
center=cfg['data']['center'],
normalize=cfg['data']['normalize'],
subsample=cfg['data']['subsample'] if 'subsample' in cfg['data'] else None)
scan_val = ScanObjectNN(data_dir=cfg['data']['path_val'],
train=False,
center=cfg['data']['center'],
normalize=cfg['data']['normalize'],
subsample=cfg['data']['subsample'] if 'subsample' in cfg['data'] else None)
train_sampler = torch.utils.data.distributed.DistributedSampler(scan_train)
val_sampler = torch.utils.data.distributed.DistributedSampler(scan_val)
dataloader_train = torch.utils.data.DataLoader(scan_train,
batch_size=cfg['data']['batch_size'],
shuffle=(train_sampler is None),
num_workers=cfg['data']['num_workers'],
pin_memory=False, sampler=train_sampler)
dataloader_val = torch.utils.data.DataLoader(scan_val,
batch_size=cfg['data']['batch_size_val'],
shuffle=False,
num_workers=cfg['data']['num_workers'],
pin_memory=False, sampler=val_sampler)
# model part
del cfg['model']['generator']
params_dict_gen = cfg['model']
torch.manual_seed(42)
if args.rank == 0:
writer, exp_dir, full_desc = train_util.create_experiment(exp_descr,
params_dict={'exp_root': cfg['experiment']['root'],
'writer_root': cfg['experiment']['writer_root'],
'config_path': config_path})
generator = train_util.get_model(gen_model_file, params_dict_gen,
exp_dir=exp_dir).cuda()
else:
generator = train_util.get_model(gen_model_file, params_dict_gen, exp_dir=None).cuda()
generator = torch.nn.parallel.DistributedDataParallel(torch.nn.SyncBatchNorm.convert_sync_batchnorm(generator),
device_ids=[0],
output_device=0)
# print('generator', generator)
print('generator_params', sum(p.numel() for p in generator.parameters()))
if 'scale_lr' in cfg['train']:
opt_params_dict = [{'params': map(lambda p: p[1],
filter(lambda p: not p[0].endswith('scale'),
generator.named_parameters()))},
{'params': map(lambda p: p[1],
filter(lambda p: p[0].endswith('scale'),
generator.named_parameters())), 'lr': cfg['train']['scale_lr']}]
else:
opt_params_dict = generator.parameters()
optimizer = train_util.make_optimizer(opt_params_dict, cfg['train']['optimizer'])
scheduler_adaptive = cfg['train']['scheduler']['type'] == 'ReduceLROnPlateau'
scheduler = train_util.make_scheduler(optimizer, cfg['train']['scheduler'])
# print(scheduler)
# print('Is scheduler adaptive? {}'.format(scheduler_adaptive))
if 'restore' in cfg:
print('restoring')
g_ckpt_path = cfg['restore']['generator']
g_opt_ckpt_path = cfg['restore']['optimizer']
train_util_distributed.restore_exp(objects=[generator, optimizer],
names=[g_ckpt_path, g_opt_ckpt_path])
if 'new_lr' in cfg['restore']:
for param_group in optimizer.param_groups:
param_group['lr'] = cfg['restore']['new_lr']
show_each = cfg['train']['show_each']
data_iters = 0
seg_loss_weight = cfg['train']['seg_weight']
label_smooth = False
if 'label_smooth' in cfg['train']:
label_smooth = cfg['train']['label_smooth']
cross_entropy_loss = nn.CrossEntropyLoss()
bce_loss = nn.BCEWithLogitsLoss()
def gather_results(loss_dict,
class_pred, mask_pred, labels, mask):
loss_all = train_util_distributed.reduce_loss_dict(loss_dict)
with torch.no_grad():
class_pred_this = np.argmax(class_pred.detach().cpu().numpy(), axis=1)
mask_pred_this = (torch.sigmoid(mask_pred[:, 0, 0]) > 0.5).detach().cpu().numpy()
mask_this = mask.detach().cpu().numpy()
labels_this = labels.detach().cpu().numpy()
all_gathered = train_util_distributed.all_gather((class_pred_this,
mask_pred_this,
labels_this,
mask_this))
return loss_all, all_gathered
n_classes = 15
max_val_acc = 0
max_val_macc = 0
for epoch in range(cfg['train']['num_epochs']):
train_sampler.set_epoch(epoch)
generator.train()
total_correct = 0
total_correct_seg = 0
total_seen = 0
total_seen_seg = 0
loss_train_all = []
cls_loss_train_all = []
seg_loss_train_all = []
for i, (pcd, labels, mask) in tqdm.tqdm(enumerate(dataloader_train)):
pcd = pcd.permute(0, 2, 1)[:, :, None].cuda()
mask = mask.float().cuda(non_blocking=True)
labels = labels.long().cuda(non_blocking=True)
class_pred, mask_pred, lattices_sizes = generator(pcd)
seg_loss = bce_loss(mask_pred[:, 0, 0], mask)
cls_loss = cross_entropy_loss(class_pred, labels)
loss = (1-seg_loss_weight) * cls_loss + seg_loss_weight * seg_loss
loss.backward()
if 'grad_stats' in cfg['train']:
with torch.no_grad():
if args.rank == 0 and data_iters % cfg['train']['grad_stats']['iters'] == 0:
for name, param in generator.module.named_parameters():
# writer.add_histogram('weight_' + name, param, global_step=data_iters)
if param.requires_grad:
if param.grad is None:
print(name)
if cfg['train']['grad_stats']['hist']:
writer.add_histogram('stats/grad_' + name,
param.grad,
global_step=data_iters, bins='auto')
writer.add_scalar('stats/grad_n_' + name,
torch.norm(param.grad),
global_step=data_iters)
writer.flush()
optimizer.step()
optimizer.zero_grad()
loss_all, all_gathered = gather_results(loss_dict={'loss': loss, 'loss_cls': cls_loss,
'loss_seg': seg_loss},
class_pred=class_pred,
mask_pred=mask_pred,
labels=labels,
mask=mask)
if args.rank == 0:
loss_train_all.append(loss_all['loss'].item())
cls_loss_train_all.append(loss_all['loss_cls'].item())
seg_loss_train_all.append(loss_all['loss_seg'].item())
for class_pred_np, mask_pred_np, labels_np, mask_np in all_gathered:
total_correct += np.sum(class_pred_np == labels_np)
total_seen += labels_np.shape[0]
total_correct_seg += np.sum(mask_pred_np == mask_np)
total_seen_seg += mask_np.shape[0] * mask_np.shape[-1]
if args.rank == 0:
for key in loss_all.keys():
writer.add_scalar('train/{}'.format(key), loss_all[key].item(), global_step=data_iters)
# lattice stats
for i, value in enumerate(lattices_sizes):
writer.add_scalar('train/lattice_{}'.format(i),
value[0], global_step=data_iters)
writer.add_scalar('train/norm_l_feat_{}'.format(i),
value[1].item(), global_step=data_iters)
writer.add_scalar('train/norm_l_feat_var_{}'.format(i),
value[2].item(), global_step=data_iters)
if data_iters % cfg['train']['save_each'] == 0 and data_iters > 0:
generator.eval()
train_util_distributed.save_exp_parallel([generator, optimizer],
['generator', 'g_opt'], exp_path=exp_dir,
epoch=data_iters, epoch_name='iter')
generator.train()
data_iters += 1
if not scheduler_adaptive:
scheduler.step(data_iters)
del pcd, labels
if args.rank == 0:
print('on train cls_acc {}, seg_acc {}'.format(total_correct / float(total_seen),
total_correct_seg / float(total_seen_seg)))
if args.rank == 0 and epoch % cfg['train']['save_each_epoch'] == 0 and epoch > 0:
print('saving ckpt')
train_util.save_exp([generator, optimizer],
['generator', 'g_opt'], exp_path=exp_dir, epoch=epoch, epoch_name='epoch')
if epoch % cfg['train']['val_step'] == 0:
all_loss = []
all_loss_seg = []
all_loss_cls = []
total_correct = 0
total_correct_seg = 0
total_seen = 0
total_seen_seg = 0
correct_per_label = np.zeros(n_classes)
total_per_label = np.zeros(n_classes)
generator.eval()
val_sampler.set_epoch(epoch)
lattice_info = defaultdict(list)
with torch.no_grad():
for i, (pcd, labels, mask) in tqdm.tqdm(enumerate(dataloader_val)):
pcd = pcd.permute(0, 2, 1)[:, :, None].cuda()
mask = mask.float().cuda(non_blocking=True)
labels = labels.long().cuda(non_blocking=True)
class_pred, mask_pred, lattices_sizes = generator(pcd)
seg_loss = bce_loss(mask_pred[:, 0, 0], mask)
cls_loss = cross_entropy_loss(class_pred, labels)
loss = (1 - seg_loss_weight) * cls_loss + seg_loss_weight * seg_loss
for ind, value in enumerate(lattices_sizes):
lattice_info['lattice_{}'.format(ind)].append(value[0])
lattice_info['norm_l_feat_{}'.format(ind)].append(value[1].item())
lattice_info['norm_l_feat_var_{}'.format(ind)].append(value[2].item())
loss_all, all_gathered = gather_results(loss_dict={'loss': loss, 'loss_cls': cls_loss,
'loss_seg': seg_loss},
class_pred=class_pred,
mask_pred=mask_pred,
labels=labels,
mask=mask)
all_loss.append(loss_all['loss'].item())
all_loss_cls.append(loss_all['loss_cls'].item())
all_loss_seg.append(loss_all['loss_seg'].item())
if args.rank == 0:
for class_pred_np, mask_pred_np, labels_np, mask_np in all_gathered:
total_correct += np.sum(class_pred_np == labels_np)
total_seen += labels_np.shape[0]
for batch_id in range(labels_np.shape[0]):
correct_per_label[labels_np[batch_id]] += class_pred_np[batch_id] == labels_np[batch_id]
total_per_label[labels_np[batch_id]] += 1
total_correct_seg += np.sum(mask_pred_np == mask_np)
total_seen_seg += mask_np.shape[0] * mask_np.shape[-1]
del pcd, labels, mask
if args.rank == 0:
writer.add_scalar('val/cls_acc', total_correct / float(total_seen), global_step=epoch)
writer.add_scalar('val/seg_acc', total_correct_seg / float(total_seen_seg), global_step=epoch)
writer.add_scalar('val/m_acc', np.mean(correct_per_label / total_per_label), global_step=epoch)
writer.add_scalar('val/loss', np.mean(all_loss), global_step=epoch)
writer.add_scalar('val/loss_seg', np.mean(all_loss_seg), global_step=epoch)
writer.add_scalar('val/loss_cls', np.mean(all_loss_cls), global_step=epoch)
writer.add_scalar('train/loss_epoch', np.mean(loss_train_all), global_step=epoch)
writer.add_scalar('train/loss_seg_epoch', np.mean(seg_loss_train_all), global_step=epoch)
writer.add_scalar('train/loss_cls_epoch', np.mean(cls_loss_train_all), global_step=epoch)
if total_correct / float(total_seen) > max_val_acc:
max_val_acc = total_correct / float(total_seen)
train_util_distributed.save_exp_parallel([generator, optimizer],
['generator', 'g_opt'], exp_path=exp_dir,
epoch=0, epoch_name='best')
if np.mean(correct_per_label / total_per_label) > max_val_macc:
max_val_macc = np.mean(correct_per_label / total_per_label)
train_util_distributed.save_exp_parallel([generator, optimizer],
['generator', 'g_opt'], exp_path=exp_dir,
epoch=0, epoch_name='macc_best')
if args.rank == 0:
writer.flush()
if args.rank == 0:
writer.close()
| en | 0.654536 | # Url used to setup distributed training # check if we masked all other devices # model part # print('generator', generator) # print(scheduler) # print('Is scheduler adaptive? {}'.format(scheduler_adaptive)) # writer.add_histogram('weight_' + name, param, global_step=data_iters) # lattice stats | 2.001143 | 2 |
src/testing/migrations/0001_initial.py | DiceNameIsMy/testing_sitev2 | 1 | 6618713 | # Generated by Django 3.2.4 on 2021-06-14 03:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=64)),
],
options={
'verbose_name': 'Category of tests',
'verbose_name_plural': 'Categories of tests',
},
),
migrations.CreateModel(
name='Test',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=64)),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='tests', to='testing.category')),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=512)),
('test', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='questions', to='testing.test')),
],
),
migrations.CreateModel(
name='Answer',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=512)),
('is_correct', models.BooleanField(default=False)),
('question', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='answers', to='testing.question')),
],
),
]
| # Generated by Django 3.2.4 on 2021-06-14 03:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=64)),
],
options={
'verbose_name': 'Category of tests',
'verbose_name_plural': 'Categories of tests',
},
),
migrations.CreateModel(
name='Test',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=64)),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='tests', to='testing.category')),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=512)),
('test', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='questions', to='testing.test')),
],
),
migrations.CreateModel(
name='Answer',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=512)),
('is_correct', models.BooleanField(default=False)),
('question', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='answers', to='testing.question')),
],
),
]
| en | 0.844748 | # Generated by Django 3.2.4 on 2021-06-14 03:43 | 1.889198 | 2 |
Python Codes/kmeans_sample.py | agozdogan/Piece-Of-Programming | 0 | 6618714 | import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
import numpy as np
df = pd.DataFrame({'x': [39.472778,
32.506993,
27.863137,
55.726274,
74.301699,
78.945555,
18.575425,
48.760490,
23.219281,
23.219281,
9.287712,
39.472778,
27.863137,
25.541209,
30.185065,
30.185065,
44.116634,
11.609640,
23.219281,
39.472778,
3.863137,
4.828921,
6.116634,
21.150850,
7.726274,
2.897353,
3.863137,
10.575425,
20.975797,
1.287712,
13.219281,
25.541209,
5.287712,
11.609640,
4.643856,
16.253497,
6.965784,
11.609640,
32.506993,
15.863137,
3.863137,
4.506993,
18.506993,
7.082418,
2.575425,
6.438562,
13.219281,
17.162015,
0.000000,
2.643856,
9.534453,
15.863137,
13.348234,
20.975797,
20.975797,
9.534453,
17.162015,
27.863137,
19.828921,
3.863137,
9.013987,
31.726274,
7.082418,
9.013987,
4.506993,
27.760490,
20.975797,
1.287712,
5.287712,
32.417140,
17.185065,
7.627562,
36.230921,
15.255125,
3.813781,
22.882687,
55.726274,
25.116634,
4.506993,
9.013987,
43.623627,
13.520980,
8.692059,
5.150850,
21.150850,
7.627562,
0.000000,
14.541209,
34.370130,
21.150850,
10.575425,
58.164836,
18.506993,
7.931569,
21.150850,
74.301699,
21.150850,
4.506993,
7.726274,
10.623627,
12.233268,
7.726274,
6.438562,
18.506993,
19.068906,
1.287712,
17.185065,
47.589411,
19.828921,
37.013987,
63.452549,
37.013987,
26.438562,
37.013987,
78.945555,
31.726274,
7.082418,
7.082418,
13.520980,
50.233268,
5.472778,
8.048202,
17.185065,
28.603359,
2.253497,
23.794706,
32.417140,
26.438562,
32.417140,
55.299827,
32.417140,
19.068906,
38.137812,
18.575425,
11.897353,
2.575425,
9.013987,
8.692059,
31.726274,
5.472778,
4.185065,
23.794706,
3.813781,
0.000000,
6.609640,
43.603359,
14.541209,
2.906891,
49.417140,
23.255125,
20.348234,
23.255125,
48.760490,
15.863137,
6.438562,
4.506993,
5.150850,
26.438562,
8.048202,
4.185065,
22.472778,
9.534453,
0.321928,
6.609640,
18.575425,
19.828921,
20.897353,
41.794706,
18.575425,
20.897353,
25.541209,
23.219281,
10.575425,
3.219281,
6.760490,
5.150850,
18.506993,
4.185065,
5.794706,
5.472778,
1.906891,
0.000000,
3.965784,
11.627562,
11.897353,
0.000000,
37.789578,
5.813781,
11.627562,
29.068906
],
'y': [
23.219281,
14.541209,
2.897353,
3.541209,
1.287712,
13.219281,
4.828921,
0.643856,
1.609640,
1.321928,
4.185065,
25.116634,
31.292830,
9.253497,
37.551396,
6.258566,
37.551396,
37.551396,
43.809962,
9.287712,
5.287712,
0.000000,
1.287712,
0.000000,
5.287712,
2.253497,
0.000000,
0.321928,
0.000000,
24.789578,
15.863137,
1.287712,
6.609640,
3.219281,
0.321928,
1.287712,
5.150850,
5.472778,
39.472778,
13.219281,
0.643856,
1.287712,
3.541209,
17.185065,
5.794706,
1.609640,
1.609640,
3.965784,
36.230921,
3.863137,
30.185065,
17.185065,
27.863137,
23.219281,
20.897353,
34.828921,
39.472778,
27.863137,
14.541209,
1.609640,
5.472778,
4.185065,
23.794706,
5.472778,
4.828921,
2.575425,
5.287712,
19.068906,
1.287712,
17.185065,
14.541209,
9.534453,
28.603359,
15.255125,
3.813781,
17.162015,
25.541209,
5.287712,
3.863137,
4.185065,
5.150850,
19.828921,
6.438562,
3.541209,
4.828921,
11.897353,
13.348234,
1.609640,
17.185065,
20.975797,
34.828921,
53.404346,
9.287712,
13.931569,
44.116634,
30.185065,
6.609640,
2.253497,
1.287712,
1.287712,
18.506993,
5.472778,
0.321928,
2.897353,
0.000000,
22.882687,
3.219281,
15.863137,
9.534453,
19.828921,
15.863137,
18.506993,
10.575425,
31.726274,
30.185065,
2.643856,
3.541209,
6.116634,
7.082418,
31.726274,
9.335915,
5.472778,
5.794706,
17.185065,
3.813781,
0.321928,
13.219281,
28.603359,
30.404346,
15.863137,
18.775698,
12.517132,
62.585660,
44.116634,
9.253497,
3.541209,
2.575425,
2.253497,
18.506993,
5.472778,
2.575425,
2.575425,
2.643856,
22.882687,
1.287712,
11.897353,
15.255125,
5.287712,
18.506993,
18.775698,
13.931569,
27.863137,
11.609640,
3.965784,
1.609640,
0.643856,
0.965784,
13.219281,
3.219281,
2.253497,
2.897353,
5.287712,
22.882687,
5.150850,
19.828921,
3.813781,
7.931569,
10.575425,
12.517132,
13.931569,
50.233268,
23.219281,
6.609640,
2.897353,
3.863137,
2.575425,
18.506993,
6.438562,
2.575425,
3.541209,
13.219281,
26.696468,
5.472778,
22.472778,
17.162015,
25.116634,
31.726274,
62.585660,
27.863137,
50.233268]
})
np.random.seed(200)
k = 2
# centroids[i] = [x, y]
centroids = {
i+1: [np.random.randint(0, 80), np.random.randint(0, 80)]
for i in range(k)
}
fig = plt.figure(figsize=(5, 5))
plt.scatter(df['x'], df['y'], color='k')
colmap = {1: 'r', 2: 'g'}
for i in centroids.keys():
plt.scatter(*centroids[i], color=colmap[i])
plt.xlim(0, 80)
plt.ylim(0, 80)
plt.show()
def assignment(df, centroids):
for i in centroids.keys():
# sqrt((x1 - x2)^2 - (y1 - y2)^2)
df['distance_from_{}'.format(i)] = (
np.sqrt(
(float(df['x'][0]) - centroids[i][0]) ** 2
+ (float(df['y'][0]) - centroids[i][1]) ** 2
)
)
centroid_distance_cols = ['distance_from_{}'.format(i) for i in centroids.keys()]
df['closest'] = df.loc[:, centroid_distance_cols].idxmin(axis=1)
df['closest'] = df['closest'].map(lambda x: int(x.lstrip('distance_from_')))
df['color'] = df['closest'].map(lambda x: colmap[x])
return df
df = assignment(df, centroids)
print(df.head())
fig = plt.figure(figsize=(5, 5))
plt.scatter(df['x'], df['y'], color=df['color'], alpha=0.5, edgecolor='k')
for i in centroids.keys():
plt.scatter(*centroids[i], color=colmap[i])
plt.xlim(0, 80)
plt.ylim(0, 80)
plt.show()
| import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
import numpy as np
df = pd.DataFrame({'x': [39.472778,
32.506993,
27.863137,
55.726274,
74.301699,
78.945555,
18.575425,
48.760490,
23.219281,
23.219281,
9.287712,
39.472778,
27.863137,
25.541209,
30.185065,
30.185065,
44.116634,
11.609640,
23.219281,
39.472778,
3.863137,
4.828921,
6.116634,
21.150850,
7.726274,
2.897353,
3.863137,
10.575425,
20.975797,
1.287712,
13.219281,
25.541209,
5.287712,
11.609640,
4.643856,
16.253497,
6.965784,
11.609640,
32.506993,
15.863137,
3.863137,
4.506993,
18.506993,
7.082418,
2.575425,
6.438562,
13.219281,
17.162015,
0.000000,
2.643856,
9.534453,
15.863137,
13.348234,
20.975797,
20.975797,
9.534453,
17.162015,
27.863137,
19.828921,
3.863137,
9.013987,
31.726274,
7.082418,
9.013987,
4.506993,
27.760490,
20.975797,
1.287712,
5.287712,
32.417140,
17.185065,
7.627562,
36.230921,
15.255125,
3.813781,
22.882687,
55.726274,
25.116634,
4.506993,
9.013987,
43.623627,
13.520980,
8.692059,
5.150850,
21.150850,
7.627562,
0.000000,
14.541209,
34.370130,
21.150850,
10.575425,
58.164836,
18.506993,
7.931569,
21.150850,
74.301699,
21.150850,
4.506993,
7.726274,
10.623627,
12.233268,
7.726274,
6.438562,
18.506993,
19.068906,
1.287712,
17.185065,
47.589411,
19.828921,
37.013987,
63.452549,
37.013987,
26.438562,
37.013987,
78.945555,
31.726274,
7.082418,
7.082418,
13.520980,
50.233268,
5.472778,
8.048202,
17.185065,
28.603359,
2.253497,
23.794706,
32.417140,
26.438562,
32.417140,
55.299827,
32.417140,
19.068906,
38.137812,
18.575425,
11.897353,
2.575425,
9.013987,
8.692059,
31.726274,
5.472778,
4.185065,
23.794706,
3.813781,
0.000000,
6.609640,
43.603359,
14.541209,
2.906891,
49.417140,
23.255125,
20.348234,
23.255125,
48.760490,
15.863137,
6.438562,
4.506993,
5.150850,
26.438562,
8.048202,
4.185065,
22.472778,
9.534453,
0.321928,
6.609640,
18.575425,
19.828921,
20.897353,
41.794706,
18.575425,
20.897353,
25.541209,
23.219281,
10.575425,
3.219281,
6.760490,
5.150850,
18.506993,
4.185065,
5.794706,
5.472778,
1.906891,
0.000000,
3.965784,
11.627562,
11.897353,
0.000000,
37.789578,
5.813781,
11.627562,
29.068906
],
'y': [
23.219281,
14.541209,
2.897353,
3.541209,
1.287712,
13.219281,
4.828921,
0.643856,
1.609640,
1.321928,
4.185065,
25.116634,
31.292830,
9.253497,
37.551396,
6.258566,
37.551396,
37.551396,
43.809962,
9.287712,
5.287712,
0.000000,
1.287712,
0.000000,
5.287712,
2.253497,
0.000000,
0.321928,
0.000000,
24.789578,
15.863137,
1.287712,
6.609640,
3.219281,
0.321928,
1.287712,
5.150850,
5.472778,
39.472778,
13.219281,
0.643856,
1.287712,
3.541209,
17.185065,
5.794706,
1.609640,
1.609640,
3.965784,
36.230921,
3.863137,
30.185065,
17.185065,
27.863137,
23.219281,
20.897353,
34.828921,
39.472778,
27.863137,
14.541209,
1.609640,
5.472778,
4.185065,
23.794706,
5.472778,
4.828921,
2.575425,
5.287712,
19.068906,
1.287712,
17.185065,
14.541209,
9.534453,
28.603359,
15.255125,
3.813781,
17.162015,
25.541209,
5.287712,
3.863137,
4.185065,
5.150850,
19.828921,
6.438562,
3.541209,
4.828921,
11.897353,
13.348234,
1.609640,
17.185065,
20.975797,
34.828921,
53.404346,
9.287712,
13.931569,
44.116634,
30.185065,
6.609640,
2.253497,
1.287712,
1.287712,
18.506993,
5.472778,
0.321928,
2.897353,
0.000000,
22.882687,
3.219281,
15.863137,
9.534453,
19.828921,
15.863137,
18.506993,
10.575425,
31.726274,
30.185065,
2.643856,
3.541209,
6.116634,
7.082418,
31.726274,
9.335915,
5.472778,
5.794706,
17.185065,
3.813781,
0.321928,
13.219281,
28.603359,
30.404346,
15.863137,
18.775698,
12.517132,
62.585660,
44.116634,
9.253497,
3.541209,
2.575425,
2.253497,
18.506993,
5.472778,
2.575425,
2.575425,
2.643856,
22.882687,
1.287712,
11.897353,
15.255125,
5.287712,
18.506993,
18.775698,
13.931569,
27.863137,
11.609640,
3.965784,
1.609640,
0.643856,
0.965784,
13.219281,
3.219281,
2.253497,
2.897353,
5.287712,
22.882687,
5.150850,
19.828921,
3.813781,
7.931569,
10.575425,
12.517132,
13.931569,
50.233268,
23.219281,
6.609640,
2.897353,
3.863137,
2.575425,
18.506993,
6.438562,
2.575425,
3.541209,
13.219281,
26.696468,
5.472778,
22.472778,
17.162015,
25.116634,
31.726274,
62.585660,
27.863137,
50.233268]
})
np.random.seed(200)
k = 2
# centroids[i] = [x, y]
centroids = {
i+1: [np.random.randint(0, 80), np.random.randint(0, 80)]
for i in range(k)
}
fig = plt.figure(figsize=(5, 5))
plt.scatter(df['x'], df['y'], color='k')
colmap = {1: 'r', 2: 'g'}
for i in centroids.keys():
plt.scatter(*centroids[i], color=colmap[i])
plt.xlim(0, 80)
plt.ylim(0, 80)
plt.show()
def assignment(df, centroids):
for i in centroids.keys():
# sqrt((x1 - x2)^2 - (y1 - y2)^2)
df['distance_from_{}'.format(i)] = (
np.sqrt(
(float(df['x'][0]) - centroids[i][0]) ** 2
+ (float(df['y'][0]) - centroids[i][1]) ** 2
)
)
centroid_distance_cols = ['distance_from_{}'.format(i) for i in centroids.keys()]
df['closest'] = df.loc[:, centroid_distance_cols].idxmin(axis=1)
df['closest'] = df['closest'].map(lambda x: int(x.lstrip('distance_from_')))
df['color'] = df['closest'].map(lambda x: colmap[x])
return df
df = assignment(df, centroids)
print(df.head())
fig = plt.figure(figsize=(5, 5))
plt.scatter(df['x'], df['y'], color=df['color'], alpha=0.5, edgecolor='k')
for i in centroids.keys():
plt.scatter(*centroids[i], color=colmap[i])
plt.xlim(0, 80)
plt.ylim(0, 80)
plt.show()
| en | 0.401308 | # centroids[i] = [x, y] # sqrt((x1 - x2)^2 - (y1 - y2)^2) | 2.865525 | 3 |
bin/gist.py | clickyotomy/gist-shell | 2 | 6618715 | <filename>bin/gist.py
#! /usr/bin/env python2.7
'''
gist.py: A simple command line interface for creating, fetching, browsing,
updating and deleting Gists on GitHub.
'''
import os
import sys
import json
import socket
import getpass
# Try importing the library during development.
try:
import imp
imp.find_module('gister')
except ImportError:
PATH = os.path.realpath(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(os.path.dirname(PATH)))
# Should work, if the library is already installed.
from gister import (authorizations, gists)
# Default path to store credentials locally.
DEAFULT_CREDENTIALS_PATH = '/'.join([os.path.expanduser('~'),
'.gist-shell', 'vault.json'])
# For colorama.
# from colorama import init, Fore, Back, Style
# init(autoreset=True)
def get_external_ip_addr():
'''
Get the external IP address of the host.
'''
try:
_sockets = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
_sockets.connect(("github.com", 80))
_socket_name = _sockets.getsockname()[0]
_sockets.close()
except socket.error:
# Fallback option.
return socket.getfqdn()
return _socket_name
def fetch_credentials(path=DEAFULT_CREDENTIALS_PATH, fetch=None):
'''
Fetch the credentials from the vault.
Caveats:
1. When 'fetch' is None, it loads the default set of credentials.
2. If a duplicate default exists, return the first one.
3. If fetch is not set to None and the key doesn't exist,
the function will return None.
'''
if os.path.exists(path):
try:
vault = json.loads(open(path, 'r').read())
if fetch is None:
for _ in vault.keys():
if vault[_]['default']:
return vault[_]['credentials']
return vault[fetch]['credentials']
except (ValueError, KeyError):
return None
return None
def update_credentials(data, name, path=DEAFULT_CREDENTIALS_PATH, force=False):
'''
Update the vault with payload.
Caveats:
1. If the payload has the default flag set, it checks against the
all the stored credentials, if a duplicate exists, it unsets the
flag on the duplicate.
2. If the vault has a malformed JSON, the function will exit,
returning None, if force is set to True, it will overwrite the
vault with the new credentials.
3. If the vault file has incorrect file permissions, the function
will exit with by returning None.
4. If a duplicate credential exists, the function will return a None;
if force is set to True, it will update the existing credential.
'''
vault = {}
if os.path.exists(path):
try:
vault = json.loads(open(path, 'r').read())
except (KeyError, ValueError):
vault = {}
if name in vault.keys() and not force:
return None
if data[name]['default']:
for _ in vault.keys():
try:
if vault[_]['default']:
vault[_].update({'default': False})
except (KeyError, ValueError):
if force:
vault = {}
return None
vault.update(data)
try:
with open(path, 'w') as _vault_file:
_vault_file.write(json.dumps(vault, indent=4, sort_keys=True))
except IOError:
return None
return vault
def login(path=DEAFULT_CREDENTIALS_PATH, api=None, default=False):
'''
Create an authorization (access token; scope: 'gist') on GitHub.
Works with HTTP Basic Authentication (RFC-2617).
Caveats:
1. For username, hit return to user the login username.
2. For github-2fa-auth, hit return to skip.
3. 'gist-shell' will appended to auth-token-note for storing
the description for the Personal Access Token.
'''
username = raw_input('github-username: ').strip()
username = getpass.getuser() if len(username) < 1 else username
password = getpass.getpass('github-password: ').strip()
auth_2fa = raw_input('github-2fa-auth: ').strip()
auth_2fa = None if len(auth_2fa) < 1 else auth_2fa
token_note = raw_input('auth-token-note: ').strip()
response = authorizations.create_authorization(auth=(username, password),
note=token_note,
otp=auth_2fa, api=api)
if response[0]:
data = response[1]
print data
to_write = {
data['app']['name']: {
'credentials': {
'id': data['id'],
'token': data['token'],
'username': username,
'created-at': data['created_at'],
},
'default': default
}
}
update_credentials(data=to_write, name=data['app']['name'], path=path,
force=default)
return True
return False
def upload(payload, token, description=None, public=False, update=False):
'''
Upload the payload to GitHub.
Caveats:
1. The Gists are private by default.
2. If no description is provided, a default string with the
login username, hostname, IP adderss and time (in UTC) will
be provided.
'''
pass
| <filename>bin/gist.py
#! /usr/bin/env python2.7
'''
gist.py: A simple command line interface for creating, fetching, browsing,
updating and deleting Gists on GitHub.
'''
import os
import sys
import json
import socket
import getpass
# Try importing the library during development.
try:
import imp
imp.find_module('gister')
except ImportError:
PATH = os.path.realpath(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(os.path.dirname(PATH)))
# Should work, if the library is already installed.
from gister import (authorizations, gists)
# Default path to store credentials locally.
DEAFULT_CREDENTIALS_PATH = '/'.join([os.path.expanduser('~'),
'.gist-shell', 'vault.json'])
# For colorama.
# from colorama import init, Fore, Back, Style
# init(autoreset=True)
def get_external_ip_addr():
'''
Get the external IP address of the host.
'''
try:
_sockets = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
_sockets.connect(("github.com", 80))
_socket_name = _sockets.getsockname()[0]
_sockets.close()
except socket.error:
# Fallback option.
return socket.getfqdn()
return _socket_name
def fetch_credentials(path=DEAFULT_CREDENTIALS_PATH, fetch=None):
'''
Fetch the credentials from the vault.
Caveats:
1. When 'fetch' is None, it loads the default set of credentials.
2. If a duplicate default exists, return the first one.
3. If fetch is not set to None and the key doesn't exist,
the function will return None.
'''
if os.path.exists(path):
try:
vault = json.loads(open(path, 'r').read())
if fetch is None:
for _ in vault.keys():
if vault[_]['default']:
return vault[_]['credentials']
return vault[fetch]['credentials']
except (ValueError, KeyError):
return None
return None
def update_credentials(data, name, path=DEAFULT_CREDENTIALS_PATH, force=False):
'''
Update the vault with payload.
Caveats:
1. If the payload has the default flag set, it checks against the
all the stored credentials, if a duplicate exists, it unsets the
flag on the duplicate.
2. If the vault has a malformed JSON, the function will exit,
returning None, if force is set to True, it will overwrite the
vault with the new credentials.
3. If the vault file has incorrect file permissions, the function
will exit with by returning None.
4. If a duplicate credential exists, the function will return a None;
if force is set to True, it will update the existing credential.
'''
vault = {}
if os.path.exists(path):
try:
vault = json.loads(open(path, 'r').read())
except (KeyError, ValueError):
vault = {}
if name in vault.keys() and not force:
return None
if data[name]['default']:
for _ in vault.keys():
try:
if vault[_]['default']:
vault[_].update({'default': False})
except (KeyError, ValueError):
if force:
vault = {}
return None
vault.update(data)
try:
with open(path, 'w') as _vault_file:
_vault_file.write(json.dumps(vault, indent=4, sort_keys=True))
except IOError:
return None
return vault
def login(path=DEAFULT_CREDENTIALS_PATH, api=None, default=False):
'''
Create an authorization (access token; scope: 'gist') on GitHub.
Works with HTTP Basic Authentication (RFC-2617).
Caveats:
1. For username, hit return to user the login username.
2. For github-2fa-auth, hit return to skip.
3. 'gist-shell' will appended to auth-token-note for storing
the description for the Personal Access Token.
'''
username = raw_input('github-username: ').strip()
username = getpass.getuser() if len(username) < 1 else username
password = getpass.getpass('github-password: ').strip()
auth_2fa = raw_input('github-2fa-auth: ').strip()
auth_2fa = None if len(auth_2fa) < 1 else auth_2fa
token_note = raw_input('auth-token-note: ').strip()
response = authorizations.create_authorization(auth=(username, password),
note=token_note,
otp=auth_2fa, api=api)
if response[0]:
data = response[1]
print data
to_write = {
data['app']['name']: {
'credentials': {
'id': data['id'],
'token': data['token'],
'username': username,
'created-at': data['created_at'],
},
'default': default
}
}
update_credentials(data=to_write, name=data['app']['name'], path=path,
force=default)
return True
return False
def upload(payload, token, description=None, public=False, update=False):
'''
Upload the payload to GitHub.
Caveats:
1. The Gists are private by default.
2. If no description is provided, a default string with the
login username, hostname, IP adderss and time (in UTC) will
be provided.
'''
pass
| en | 0.801089 | #! /usr/bin/env python2.7 gist.py: A simple command line interface for creating, fetching, browsing, updating and deleting Gists on GitHub. # Try importing the library during development. # Should work, if the library is already installed. # Default path to store credentials locally. # For colorama. # from colorama import init, Fore, Back, Style # init(autoreset=True) Get the external IP address of the host. # Fallback option. Fetch the credentials from the vault. Caveats: 1. When 'fetch' is None, it loads the default set of credentials. 2. If a duplicate default exists, return the first one. 3. If fetch is not set to None and the key doesn't exist, the function will return None. Update the vault with payload. Caveats: 1. If the payload has the default flag set, it checks against the all the stored credentials, if a duplicate exists, it unsets the flag on the duplicate. 2. If the vault has a malformed JSON, the function will exit, returning None, if force is set to True, it will overwrite the vault with the new credentials. 3. If the vault file has incorrect file permissions, the function will exit with by returning None. 4. If a duplicate credential exists, the function will return a None; if force is set to True, it will update the existing credential. Create an authorization (access token; scope: 'gist') on GitHub. Works with HTTP Basic Authentication (RFC-2617). Caveats: 1. For username, hit return to user the login username. 2. For github-2fa-auth, hit return to skip. 3. 'gist-shell' will appended to auth-token-note for storing the description for the Personal Access Token. Upload the payload to GitHub. Caveats: 1. The Gists are private by default. 2. If no description is provided, a default string with the login username, hostname, IP adderss and time (in UTC) will be provided. | 2.59014 | 3 |
decrypt.py | Ragnyll/ranger-gpg | 0 | 6618716 | import os
import tarfile
from gnupg import GPG
from ranger.api.commands import Command
class decrypt(Command):
""":decrypts
Decrypts a file with gpg or a directory by extracting a tar file and decrypting it
passing true as the false flag will not delete the origin gpg file
"""
def execute(self):
gpg = GPG(gnupghome=os.path.join(os.path.expanduser('~'), '.gnupg'))
paths = [os.path.basename(f.path) for f in self.fm.thistab.get_selection()]
for p in [p for p in paths if p.endswith('gpg')]:
with open(p, 'rb') as enc:
dec_b = gpg.decrypt_file(enc)
out_fname = os.path.splitext(p)[0]
with open(out_fname, 'wb+') as dec_f:
dec_f.write(dec_b.data)
if self.arg(1) != 'true':
os.remove(p)
if tarfile.is_tarfile(out_fname):
tarfile.open(out_fname).extractall(path='.')
os.remove(out_fname)
| import os
import tarfile
from gnupg import GPG
from ranger.api.commands import Command
class decrypt(Command):
""":decrypts
Decrypts a file with gpg or a directory by extracting a tar file and decrypting it
passing true as the false flag will not delete the origin gpg file
"""
def execute(self):
gpg = GPG(gnupghome=os.path.join(os.path.expanduser('~'), '.gnupg'))
paths = [os.path.basename(f.path) for f in self.fm.thistab.get_selection()]
for p in [p for p in paths if p.endswith('gpg')]:
with open(p, 'rb') as enc:
dec_b = gpg.decrypt_file(enc)
out_fname = os.path.splitext(p)[0]
with open(out_fname, 'wb+') as dec_f:
dec_f.write(dec_b.data)
if self.arg(1) != 'true':
os.remove(p)
if tarfile.is_tarfile(out_fname):
tarfile.open(out_fname).extractall(path='.')
os.remove(out_fname)
| en | 0.953522 | :decrypts Decrypts a file with gpg or a directory by extracting a tar file and decrypting it passing true as the false flag will not delete the origin gpg file | 2.885781 | 3 |
tests/test_processing.py | nejcd/pointcloud | 2 | 6618717 | import unittest
import numpy as np
from shapely.geometry import Polygon
import pointcloud.utils.processing as processing
points = np.array([[1, 1, 1],
[1, 2, 1],
[3, 1, 1],
[4, 5, 1],
[3, 6, 10],
[2, 5, 10],
[4, 6, 10],
[3, 5, 10]])
labels = np.array([0, 0, 0, 0, 1, 1, 1, 1])
features = np.array([[1, 2, 1],
[1, 2, 1],
[1, 2, 1],
[1, 2, 1],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
class ProjectTests(unittest.TestCase):
def test_sample_by_target_value(self):
sampled_points, sampled_labels, sampled_features = processing.sample_to_target_size(points, 4, shuffle=False,
labels=labels,
features=features)
target_points = np.array([[1, 1, 1],
[1, 2, 1],
[3, 1, 1],
[4, 5, 1]])
target_labels = np.array([0, 0, 0, 0])
target_features = np.array([[1, 2, 1],
[1, 2, 1],
[1, 2, 1],
[1, 2, 1]])
self.assertTrue((sampled_points == target_points).all())
self.assertTrue((sampled_labels == target_labels).all())
self.assertTrue((sampled_features == target_features).all())
def test_sample_by_target_value_random_shuffle(self):
sampled_points, sampled_labels, sampled_features = processing.sample_to_target_size(points, 4, shuffle=True,
seed=0,
labels=labels,
features=features)
target_points = np.array([[4, 6, 10],
[3, 1, 1],
[1, 2, 1],
[3, 5, 10]])
target_labels = np.array([1, 0, 0, 1])
target_features = np.array([[1, 2, 3],
[1, 2, 1],
[1, 2, 1],
[1, 2, 3]])
self.assertTrue((sampled_points == target_points).all())
self.assertTrue((sampled_labels == target_labels).all())
self.assertTrue((sampled_features == target_features).all())
def test_clip_by_bbox(self):
clip = Polygon([(0, 0), (2, 0), (2, 3), (0, 3)])
c_points, c_labels, c_features = processing.clip_by_bbox(points, clip.bounds, labels=labels, features=features)
self.assertEqual((2, 3), np.shape(c_points))
self.assertEqual((2,), np.shape(c_labels))
self.assertEqual((2, 3), np.shape(c_features))
def test_classify_close_by(self):
new_labels = processing.classify_close_by(points, labels, from_label=0, to_label=1, close_to_label=1, radius=10)
new_labels_same = processing.classify_close_by(points, labels, from_label=0, to_label=1, close_to_label=1, radius=9)
target_new_labels = np.array([1, 1, 1, 1, 1, 1, 1, 1])
self.assertEqual(new_labels, target_new_labels)
self.assertEqual(new_labels_same, labels)
if __name__ == '__main__':
unittest.main()
| import unittest
import numpy as np
from shapely.geometry import Polygon
import pointcloud.utils.processing as processing
points = np.array([[1, 1, 1],
[1, 2, 1],
[3, 1, 1],
[4, 5, 1],
[3, 6, 10],
[2, 5, 10],
[4, 6, 10],
[3, 5, 10]])
labels = np.array([0, 0, 0, 0, 1, 1, 1, 1])
features = np.array([[1, 2, 1],
[1, 2, 1],
[1, 2, 1],
[1, 2, 1],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
class ProjectTests(unittest.TestCase):
def test_sample_by_target_value(self):
sampled_points, sampled_labels, sampled_features = processing.sample_to_target_size(points, 4, shuffle=False,
labels=labels,
features=features)
target_points = np.array([[1, 1, 1],
[1, 2, 1],
[3, 1, 1],
[4, 5, 1]])
target_labels = np.array([0, 0, 0, 0])
target_features = np.array([[1, 2, 1],
[1, 2, 1],
[1, 2, 1],
[1, 2, 1]])
self.assertTrue((sampled_points == target_points).all())
self.assertTrue((sampled_labels == target_labels).all())
self.assertTrue((sampled_features == target_features).all())
def test_sample_by_target_value_random_shuffle(self):
sampled_points, sampled_labels, sampled_features = processing.sample_to_target_size(points, 4, shuffle=True,
seed=0,
labels=labels,
features=features)
target_points = np.array([[4, 6, 10],
[3, 1, 1],
[1, 2, 1],
[3, 5, 10]])
target_labels = np.array([1, 0, 0, 1])
target_features = np.array([[1, 2, 3],
[1, 2, 1],
[1, 2, 1],
[1, 2, 3]])
self.assertTrue((sampled_points == target_points).all())
self.assertTrue((sampled_labels == target_labels).all())
self.assertTrue((sampled_features == target_features).all())
def test_clip_by_bbox(self):
clip = Polygon([(0, 0), (2, 0), (2, 3), (0, 3)])
c_points, c_labels, c_features = processing.clip_by_bbox(points, clip.bounds, labels=labels, features=features)
self.assertEqual((2, 3), np.shape(c_points))
self.assertEqual((2,), np.shape(c_labels))
self.assertEqual((2, 3), np.shape(c_features))
def test_classify_close_by(self):
new_labels = processing.classify_close_by(points, labels, from_label=0, to_label=1, close_to_label=1, radius=10)
new_labels_same = processing.classify_close_by(points, labels, from_label=0, to_label=1, close_to_label=1, radius=9)
target_new_labels = np.array([1, 1, 1, 1, 1, 1, 1, 1])
self.assertEqual(new_labels, target_new_labels)
self.assertEqual(new_labels_same, labels)
if __name__ == '__main__':
unittest.main()
| none | 1 | 2.634296 | 3 | |
notnews/pred_soft_news_uk.py | notnews/notnews | 8 | 6618718 | <filename>notnews/pred_soft_news_uk.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import joblib
import pandas as pd
from sklearn.feature_extraction.text import TfidfTransformer
from .pred_soft_news import SoftNewsModel
from .utils import column_exists, fixup_columns
from .normalizer import clean_text
class UKSoftNewsModel(SoftNewsModel):
MODELFN = "data/uk_model/url_uk_classifier.joblib"
VECTFN = "data/uk_model/url_uk_vectorizer.joblib"
vect = None
model = None
@classmethod
def pred_soft_news_uk(cls, df, col='text', latest=False):
"""Predict Soft News by the text using UK URL Soft News model.
Using the URL Soft News model to predict the soft news of the input
DataFrame.
Args:
df (:obj:`DataFrame`): Pandas DataFrame containing the text
column.
col (str or int): Column's name or location of the text in
DataFrame. (default: text)
latest (bool): Download latest model data from the server.
(default: False)
Returns:
DataFrame: Pandas DataFrame with additional columns:
- `prob_soft_news_uk` is the prediction probability.
"""
if col not in df.columns:
print("No column `{0!s}` in the DataFrame".format(col))
return df
nn = df[col].notnull()
if df[nn].shape[0] == 0:
return df
df['__text'] = df[col].apply(lambda c: clean_text(c))
if cls.model is None:
cls.model, cls.vect = cls.load_model_data(latest)
X = cls.vect.transform(df['__text'].astype(str))
tfidf = TfidfTransformer()
X = tfidf.fit_transform(X)
y_prob = cls.model.predict_proba(X)
df['prob_soft_news_uk'] = y_prob[:, 1]
# take out temporary working columns
del df['__text']
return df
pred_soft_news_uk = UKSoftNewsModel.pred_soft_news_uk
def main(argv=sys.argv[1:]):
title = 'Predict Soft News by text using UK URL Soft News model'
parser = argparse.ArgumentParser(description=title)
parser.add_argument('input', default=None,
help='Input file')
parser.add_argument('-o', '--output', default='pred-soft-news-uk-output.csv',
help='Output file with prediction data')
parser.add_argument('-t', '--text', default='text',
help='Name or index location of column contains '
'the text (default: text)')
args = parser.parse_args(argv)
print(args)
if not args.text.isdigit():
df = pd.read_csv(args.input)
else:
df = pd.read_csv(args.input, header=None)
args.text = int(args.text)
if not column_exists(df, args.text):
return -1
rdf = pred_soft_news_uk(df, args.text)
print("Saving output to file: `{0:s}`".format(args.output))
rdf.columns = fixup_columns(rdf.columns)
rdf.to_csv(args.output, index=False)
return 0
if __name__ == "__main__":
sys.exit(main())
| <filename>notnews/pred_soft_news_uk.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import joblib
import pandas as pd
from sklearn.feature_extraction.text import TfidfTransformer
from .pred_soft_news import SoftNewsModel
from .utils import column_exists, fixup_columns
from .normalizer import clean_text
class UKSoftNewsModel(SoftNewsModel):
MODELFN = "data/uk_model/url_uk_classifier.joblib"
VECTFN = "data/uk_model/url_uk_vectorizer.joblib"
vect = None
model = None
@classmethod
def pred_soft_news_uk(cls, df, col='text', latest=False):
"""Predict Soft News by the text using UK URL Soft News model.
Using the URL Soft News model to predict the soft news of the input
DataFrame.
Args:
df (:obj:`DataFrame`): Pandas DataFrame containing the text
column.
col (str or int): Column's name or location of the text in
DataFrame. (default: text)
latest (bool): Download latest model data from the server.
(default: False)
Returns:
DataFrame: Pandas DataFrame with additional columns:
- `prob_soft_news_uk` is the prediction probability.
"""
if col not in df.columns:
print("No column `{0!s}` in the DataFrame".format(col))
return df
nn = df[col].notnull()
if df[nn].shape[0] == 0:
return df
df['__text'] = df[col].apply(lambda c: clean_text(c))
if cls.model is None:
cls.model, cls.vect = cls.load_model_data(latest)
X = cls.vect.transform(df['__text'].astype(str))
tfidf = TfidfTransformer()
X = tfidf.fit_transform(X)
y_prob = cls.model.predict_proba(X)
df['prob_soft_news_uk'] = y_prob[:, 1]
# take out temporary working columns
del df['__text']
return df
pred_soft_news_uk = UKSoftNewsModel.pred_soft_news_uk
def main(argv=sys.argv[1:]):
title = 'Predict Soft News by text using UK URL Soft News model'
parser = argparse.ArgumentParser(description=title)
parser.add_argument('input', default=None,
help='Input file')
parser.add_argument('-o', '--output', default='pred-soft-news-uk-output.csv',
help='Output file with prediction data')
parser.add_argument('-t', '--text', default='text',
help='Name or index location of column contains '
'the text (default: text)')
args = parser.parse_args(argv)
print(args)
if not args.text.isdigit():
df = pd.read_csv(args.input)
else:
df = pd.read_csv(args.input, header=None)
args.text = int(args.text)
if not column_exists(df, args.text):
return -1
rdf = pred_soft_news_uk(df, args.text)
print("Saving output to file: `{0:s}`".format(args.output))
rdf.columns = fixup_columns(rdf.columns)
rdf.to_csv(args.output, index=False)
return 0
if __name__ == "__main__":
sys.exit(main())
| en | 0.588314 | #!/usr/bin/env python # -*- coding: utf-8 -*- Predict Soft News by the text using UK URL Soft News model. Using the URL Soft News model to predict the soft news of the input DataFrame. Args: df (:obj:`DataFrame`): Pandas DataFrame containing the text column. col (str or int): Column's name or location of the text in DataFrame. (default: text) latest (bool): Download latest model data from the server. (default: False) Returns: DataFrame: Pandas DataFrame with additional columns: - `prob_soft_news_uk` is the prediction probability. # take out temporary working columns | 2.882161 | 3 |
github_code/Pedagogical examples/1d_diffusion_inv.py | Pang1987/Python-code-PIGP-PINN | 4 | 6618719 | <filename>github_code/Pedagogical examples/1d_diffusion_inv.py
import numpy as np
import matplotlib.pyplot as plt
import models
import time
def u(x,t):
return np.sin(2.*np.pi*x)*np.exp(-t)
def U(x,t, c):
return -np.sin(2*np.pi*x)*np.exp(-t)+c*4*np.pi**2*np.sin(2*np.pi*x)*np.exp(-t)
#noise = 0.0 # noise-free
noise = 0.05 # 1% noise
dt = 0.01
N_U = 20
c = 0.1
np.random.seed(seed=1234)
xU = np.linspace(0,1,N_U).reshape((-1,1))
yU = u(xU, 0.5) + dt * U(xU, 0.51, c)
yU = yU + noise * np.std(yU) * np.random.randn(N_U,1) # add noise
N_u = 20
xu = np.linspace(0,1,N_u).reshape((-1,1))
yu = u(xu, 0.51)
yu = yu + noise * np.std(yu) * np.random.randn(N_u,1)
fig = plt.figure()
plt.plot(xU, yU, 'bo:',label='Snapshot t=0.5')
plt.plot(xu, yu, 'rs:',label='Snapshot t=0.51')
plt.xlabel('x')
plt.ylabel('u')
plt.legend()
plt.savefig('snapshots_noise_'+str(noise*100)+'.png',dpi=300)
plt.close(fig)
dataset = {'xu_train': xu, 'yu_train': yu, \
'xU_train': xU, 'yU_train': yU, 'noise': noise
}
##################### Discrete time GP
GP_model = models.Discrete_time_GP_inverse(dataset)
t_start = time.time()
GP_model.training(num_iter = 10001, learning_rate = 1.0e-3)
t_end = time.time()
print ('D-GP-Computational time (secs): ', t_end-t_start)
#################### Discrete time NN
NN_model = models.Discrete_time_NN_inverse(dataset)
t_start = time.time()
NN_model.training(num_iter = 20001, learning_rate = 1.0e-3)
t_end = time.time()
print ('D-NN-Computational time (secs): ', t_end-t_start)
| <filename>github_code/Pedagogical examples/1d_diffusion_inv.py
import numpy as np
import matplotlib.pyplot as plt
import models
import time
def u(x,t):
return np.sin(2.*np.pi*x)*np.exp(-t)
def U(x,t, c):
return -np.sin(2*np.pi*x)*np.exp(-t)+c*4*np.pi**2*np.sin(2*np.pi*x)*np.exp(-t)
#noise = 0.0 # noise-free
noise = 0.05 # 1% noise
dt = 0.01
N_U = 20
c = 0.1
np.random.seed(seed=1234)
xU = np.linspace(0,1,N_U).reshape((-1,1))
yU = u(xU, 0.5) + dt * U(xU, 0.51, c)
yU = yU + noise * np.std(yU) * np.random.randn(N_U,1) # add noise
N_u = 20
xu = np.linspace(0,1,N_u).reshape((-1,1))
yu = u(xu, 0.51)
yu = yu + noise * np.std(yu) * np.random.randn(N_u,1)
fig = plt.figure()
plt.plot(xU, yU, 'bo:',label='Snapshot t=0.5')
plt.plot(xu, yu, 'rs:',label='Snapshot t=0.51')
plt.xlabel('x')
plt.ylabel('u')
plt.legend()
plt.savefig('snapshots_noise_'+str(noise*100)+'.png',dpi=300)
plt.close(fig)
dataset = {'xu_train': xu, 'yu_train': yu, \
'xU_train': xU, 'yU_train': yU, 'noise': noise
}
##################### Discrete time GP
GP_model = models.Discrete_time_GP_inverse(dataset)
t_start = time.time()
GP_model.training(num_iter = 10001, learning_rate = 1.0e-3)
t_end = time.time()
print ('D-GP-Computational time (secs): ', t_end-t_start)
#################### Discrete time NN
NN_model = models.Discrete_time_NN_inverse(dataset)
t_start = time.time()
NN_model.training(num_iter = 20001, learning_rate = 1.0e-3)
t_end = time.time()
print ('D-NN-Computational time (secs): ', t_end-t_start)
| de | 0.270074 | #noise = 0.0 # noise-free # 1% noise # add noise ##################### Discrete time GP #################### Discrete time NN | 2.967719 | 3 |
common/CeleryDatabases/session.py | wjcgithub/dtq | 1 | 6618720 | # -*- coding: utf-8 -*-
# from __future__ import absolute_import, unicode_literals
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import NullPool
ResultModelBase = declarative_base()
__all__ = ('SessionManager',)
class SessionManager(object):
"""Manage SQLAlchemy sessions."""
def __init__(self):
self.prepared = True
def get_engine(self, dburi, **kwargs):
return create_engine(dburi, encoding='utf-8', poolclass=NullPool, **kwargs)
def create_session(self, dburi, **kwargs):
engine = self.get_engine(dburi, **kwargs)
return engine, sessionmaker(bind=engine)
def prepare_models(self, engine):
if not self.prepared:
ResultModelBase.metadata.create_all(engine)
self.prepared = True
def session_factory(self, dburi, **kwargs):
engine, session = self.create_session(dburi, **kwargs)
self.prepare_models(engine)
return session()
| # -*- coding: utf-8 -*-
# from __future__ import absolute_import, unicode_literals
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import NullPool
ResultModelBase = declarative_base()
__all__ = ('SessionManager',)
class SessionManager(object):
"""Manage SQLAlchemy sessions."""
def __init__(self):
self.prepared = True
def get_engine(self, dburi, **kwargs):
return create_engine(dburi, encoding='utf-8', poolclass=NullPool, **kwargs)
def create_session(self, dburi, **kwargs):
engine = self.get_engine(dburi, **kwargs)
return engine, sessionmaker(bind=engine)
def prepare_models(self, engine):
if not self.prepared:
ResultModelBase.metadata.create_all(engine)
self.prepared = True
def session_factory(self, dburi, **kwargs):
engine, session = self.create_session(dburi, **kwargs)
self.prepare_models(engine)
return session()
| en | 0.47503 | # -*- coding: utf-8 -*- # from __future__ import absolute_import, unicode_literals Manage SQLAlchemy sessions. | 2.607596 | 3 |
adaptive_amortized.py | kojino/audio_denoise | 0 | 6618721 | <filename>adaptive_amortized.py
import numpy as np
import pandas as pd
import sklearn as sk
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
import itertools
import math
import random
import scipy.sparse
from sklearn.externals.joblib.parallel import Parallel, delayed
import multiprocessing
import logging
#import boto3, botocore
logging.basicConfig(
format='%(asctime)s: %(message)s',
level='INFO',
datefmt='%m/%d/%Y %I:%M:%S %p',
filename='adaptive.log',
filemode='w')
# logging.info num cores
num_cores = multiprocessing.cpu_count()
logging.info('num_cores:')
logging.info(num_cores)
def randomSample(X, num):
# if not enough elements, just return X
if len(X) < int(num):
R = X
else:
R = [X[i] for i in sorted(random.sample(range(len(X)), int(num)))]
return R
# alg 4
# estimateSet(all_predictors, ['lower'])
def estimateSet(X, S, m=5):
est = 0
fS = oracle(S)
# repeat m times
for it in range(m):
# sample size k/r m times
R = randomSample(X, k / r)
est += oracle(R + S)
return (est - m * fS) / m
# alg 5
def estimateMarginal(X, S, a, m=5):
est = 0
# repeat m times
for it in range(m):
# if there are not enough elements
R = randomSample(X, k / r)
marg1 = oracle(R + S + [a])
if a in R:
R.remove(a)
marg2 = oracle(S + R)
else:
marg2 = oracle(S + R)
est += marg1 - marg2
return est / m
def get_class_rate(x_t, y_t):
# Create logistic regression object
logitm = LogisticRegression()
logitm.fit(x_t, y_t)
y_logit = logitm.predict(x_t)
class_rate = np.sum(y_logit == y_t) / len(y_t)
return class_rate
# given set of features, return r2
def oracle(cols):
if cols == []:
return 0.0
else:
r2 = get_class_rate(X1[:, cols], y_cat)
return r2
def union(A, B):
return list(set(A + B))
# alg 3/6
def amortizedFilter(k, r, ep, OPT, X, debug=True, parallel=False):
m = 10
S = []
y_adap = []
for i in range(r):
T = []
logging.info('r=' + str(i))
fS = oracle(S)
fST = oracle(union(S, T))
while ((fST - fS) < (ep / 20) * (OPT - fS)) and (len(union(S, T)) < k):
# FILTER Step
# this only changes X
vs = estimateSet(X, union(S, T), m)
while (vs < (1 - ep) * (OPT - fST) / r):
if debug:
logging.info('inner while loop')
# get marginal contribution
if parallel:
marg_a = Parallel(
n_jobs=-1, verbose=50)(
delayed(estimateMarginal)(X, union(S, T), a, m)
for a in X)
else:
marg_a = [
estimateMarginal(X, union(S, T), a, m) for a in X
]
# Filter!
Xnew = [
X[idx] for idx, el in enumerate(marg_a)
if el >= (1 + ep / 2) * (1 - ep) * (OPT - fST) / k
]
X = Xnew
# estimate if filtered set is good enough
vs = estimateSet(X, union(S, T), m)
if debug:
logging.info('Elements remaining: ' + str(len(X)))
logging.info('Check')
logging.info(vs < (1 - ep) * (OPT - fST) / r)
R = randomSample(X, k / r)
T = union(T, R)
# T changes but S doesn't
fST = oracle(union(S, T))
if debug:
logging.info('Outer Loop')
logging.info(fST)
S = union(S, T)
fS = oracle(S)
y_adap.append((len(S), fS))
return y_adap
# AN EXAMPLE
logging.info('Fetching Files')
X1 = scipy.sparse.load_npz('x_data.npz')
y_cat = np.load('y_data.npy')
all_predictors = list(range(X1.shape[1]))
logging.info('Num Features: ' + str(len(all_predictors)))
logging.info('Starting Adaptive')
k = 50
r = 2
ep = 0.01
OPT = 0.5
y_adap = amortizedFilter(k, r, ep, OPT, all_predictors, parallel=True)
logging.info(y_adap)
| <filename>adaptive_amortized.py
import numpy as np
import pandas as pd
import sklearn as sk
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
import itertools
import math
import random
import scipy.sparse
from sklearn.externals.joblib.parallel import Parallel, delayed
import multiprocessing
import logging
#import boto3, botocore
logging.basicConfig(
format='%(asctime)s: %(message)s',
level='INFO',
datefmt='%m/%d/%Y %I:%M:%S %p',
filename='adaptive.log',
filemode='w')
# logging.info num cores
num_cores = multiprocessing.cpu_count()
logging.info('num_cores:')
logging.info(num_cores)
def randomSample(X, num):
# if not enough elements, just return X
if len(X) < int(num):
R = X
else:
R = [X[i] for i in sorted(random.sample(range(len(X)), int(num)))]
return R
# alg 4
# estimateSet(all_predictors, ['lower'])
def estimateSet(X, S, m=5):
est = 0
fS = oracle(S)
# repeat m times
for it in range(m):
# sample size k/r m times
R = randomSample(X, k / r)
est += oracle(R + S)
return (est - m * fS) / m
# alg 5
def estimateMarginal(X, S, a, m=5):
est = 0
# repeat m times
for it in range(m):
# if there are not enough elements
R = randomSample(X, k / r)
marg1 = oracle(R + S + [a])
if a in R:
R.remove(a)
marg2 = oracle(S + R)
else:
marg2 = oracle(S + R)
est += marg1 - marg2
return est / m
def get_class_rate(x_t, y_t):
# Create logistic regression object
logitm = LogisticRegression()
logitm.fit(x_t, y_t)
y_logit = logitm.predict(x_t)
class_rate = np.sum(y_logit == y_t) / len(y_t)
return class_rate
# given set of features, return r2
def oracle(cols):
if cols == []:
return 0.0
else:
r2 = get_class_rate(X1[:, cols], y_cat)
return r2
def union(A, B):
return list(set(A + B))
# alg 3/6
def amortizedFilter(k, r, ep, OPT, X, debug=True, parallel=False):
m = 10
S = []
y_adap = []
for i in range(r):
T = []
logging.info('r=' + str(i))
fS = oracle(S)
fST = oracle(union(S, T))
while ((fST - fS) < (ep / 20) * (OPT - fS)) and (len(union(S, T)) < k):
# FILTER Step
# this only changes X
vs = estimateSet(X, union(S, T), m)
while (vs < (1 - ep) * (OPT - fST) / r):
if debug:
logging.info('inner while loop')
# get marginal contribution
if parallel:
marg_a = Parallel(
n_jobs=-1, verbose=50)(
delayed(estimateMarginal)(X, union(S, T), a, m)
for a in X)
else:
marg_a = [
estimateMarginal(X, union(S, T), a, m) for a in X
]
# Filter!
Xnew = [
X[idx] for idx, el in enumerate(marg_a)
if el >= (1 + ep / 2) * (1 - ep) * (OPT - fST) / k
]
X = Xnew
# estimate if filtered set is good enough
vs = estimateSet(X, union(S, T), m)
if debug:
logging.info('Elements remaining: ' + str(len(X)))
logging.info('Check')
logging.info(vs < (1 - ep) * (OPT - fST) / r)
R = randomSample(X, k / r)
T = union(T, R)
# T changes but S doesn't
fST = oracle(union(S, T))
if debug:
logging.info('Outer Loop')
logging.info(fST)
S = union(S, T)
fS = oracle(S)
y_adap.append((len(S), fS))
return y_adap
# AN EXAMPLE
logging.info('Fetching Files')
X1 = scipy.sparse.load_npz('x_data.npz')
y_cat = np.load('y_data.npy')
all_predictors = list(range(X1.shape[1]))
logging.info('Num Features: ' + str(len(all_predictors)))
logging.info('Starting Adaptive')
k = 50
r = 2
ep = 0.01
OPT = 0.5
y_adap = amortizedFilter(k, r, ep, OPT, all_predictors, parallel=True)
logging.info(y_adap)
| en | 0.64243 | #import boto3, botocore # logging.info num cores # if not enough elements, just return X # alg 4 # estimateSet(all_predictors, ['lower']) # repeat m times # sample size k/r m times # alg 5 # repeat m times # if there are not enough elements # Create logistic regression object # given set of features, return r2 # alg 3/6 # FILTER Step # this only changes X # get marginal contribution # Filter! # estimate if filtered set is good enough # T changes but S doesn't # AN EXAMPLE | 2.549634 | 3 |
data/object_data/converter.py | klejejs/BauskaTour | 0 | 6618722 | import csv
import json
path = "./data/object_data/"
file_name = path + "mvp.csv"
type_array_lv = []
type_array_en = []
data_dict = {}
icon_dict_lv = {}
icon_dict_en = {}
color_dict_lv = {}
color_dict_en = {}
hierarchy_dict_lv = {}
hierarchy_dict_en = {}
list_of_parents_lv = []
list_of_parents_en = []
with open(file_name, "r", encoding="UTF-8") as f:
reader = csv.reader(f)
header = True
for line in reader:
if header:
header = False
continue
title = line[0]
address = line[1]
if line[2] != "":
coordinates = [float(s) for s in line[2].split(", ")]
else:
coordinates = [0, 0]
description_lv = line[3]
description_en = line[4]
references = line[5]
object_type_lv = line[6]
object_type_en = line[7]
icon = line[8]
iconColor = line[9]
parent_lv = line[10]
parent_en = line[11]
additional_info_lv = line[12]
additional_info_en = line[13]
telephone = line[14]
working_hours_monday = line[15]
working_hours_tuesday = line[16]
working_hours_wednesday = line[17]
working_hours_thursday = line[18]
working_hours_friday = line[19]
working_hours_saturday = line[20]
working_hours_sunday = line[21]
photos = line[22]
referencesImagesTitles = line[23]
referencesImages = line[24]
referencesTitles = line[25]
if (working_hours_monday == "" and working_hours_tuesday == "" and working_hours_wednesday == "" and working_hours_thursday == "" and working_hours_friday == "" and working_hours_saturday == "" and working_hours_sunday == ""):
opening_times = ""
else:
opening_times = {
"monday": working_hours_monday,
"tuesday": working_hours_tuesday,
"wednesday": working_hours_wednesday,
"thursday": working_hours_thursday,
"friday": working_hours_friday,
"saturday": working_hours_saturday,
"sunday": working_hours_sunday
}
temp_dict = {
"type_lv": object_type_lv,
"type_en": object_type_en,
"icon": icon + ".svg",
"description_lv": description_lv,
"description_en": description_en,
"address": address,
"coordinates": coordinates,
"cellphone": telephone,
"amenities": "",
"price": "",
"url": "",
"additional_info_lv": additional_info_lv,
"additional_info_en": additional_info_en,
"references": references,
"opening_times" : opening_times,
"photos": photos,
"referencesImagesTitles": referencesImagesTitles,
"referencesImages": referencesImages,
"referencesTitles": referencesTitles
}
if (object_type_lv not in type_array_lv):
type_array_lv.append(object_type_lv)
if (object_type_en not in type_array_en):
type_array_en.append(object_type_en)
if (object_type_lv not in icon_dict_lv.keys()):
icon_dict_lv[object_type_lv] = icon + ".svg"
if (object_type_en not in icon_dict_en.keys()):
icon_dict_en[object_type_en] = icon + ".svg"
if (object_type_lv not in color_dict_lv.keys()):
color_dict_lv[object_type_lv] = iconColor
if (object_type_en not in color_dict_en.keys()):
color_dict_en[object_type_en] = iconColor
if (parent_lv == ""):
if (object_type_lv not in hierarchy_dict_lv.keys()):
hierarchy_dict_lv[object_type_lv] = ""
list_of_parents_lv.append(object_type_lv)
else:
if (parent_lv not in hierarchy_dict_lv.keys()):
hierarchy_dict_lv[parent_lv] = [object_type_lv]
list_of_parents_lv.append(parent_lv)
color_dict_lv[parent_lv] = iconColor
else:
if object_type_lv not in hierarchy_dict_lv[parent_lv]:
hierarchy_dict_lv[parent_lv].append(object_type_lv)
if (parent_en == ""):
if (object_type_en not in hierarchy_dict_en.keys()):
hierarchy_dict_en[object_type_en] = ""
list_of_parents_en.append(object_type_en)
else:
if (parent_en not in hierarchy_dict_en.keys()):
hierarchy_dict_en[parent_en] = [object_type_en]
list_of_parents_en.append(parent_en)
color_dict_en[parent_en] = iconColor
else:
if object_type_en not in hierarchy_dict_en[parent_en]:
hierarchy_dict_en[parent_en].append(object_type_en)
data_dict[title] = temp_dict
type_array = {
"lv": type_array_lv,
"en": type_array_en
}
icon_array = {
"lv": icon_dict_lv,
"en": icon_dict_en
}
color_array = {
"lv": color_dict_lv,
"en": color_dict_en
}
hierarchy_dict = {
"lv": hierarchy_dict_lv,
"en": hierarchy_dict_en
}
list_of_parents = {
"lv": list_of_parents_lv,
"en": list_of_parents_en
}
type_array = json.dumps(type_array, ensure_ascii=False)
icon_array = json.dumps(icon_array, ensure_ascii=False)
color_array = json.dumps(color_array, ensure_ascii=False)
data_dict = json.dumps(data_dict, ensure_ascii=False)
hierarchy_dict = json.dumps(hierarchy_dict, ensure_ascii=False)
list_of_parents = json.dumps(list_of_parents, ensure_ascii=False)
with open(path + "data.js", "w", encoding="UTF-8") as f:
f.write("let parentsList = " + list_of_parents + ";\n")
f.write("let hierarchyDict = " + hierarchy_dict + ";\n")
f.write("let dataIcons = " + icon_array + ";\n")
f.write("let dataColor = " + color_array + ";\n")
f.write("let dataTypes = " + type_array + ";\n")
f.write("let jsonData = " + data_dict + ";")
print("Done.")
| import csv
import json
path = "./data/object_data/"
file_name = path + "mvp.csv"
type_array_lv = []
type_array_en = []
data_dict = {}
icon_dict_lv = {}
icon_dict_en = {}
color_dict_lv = {}
color_dict_en = {}
hierarchy_dict_lv = {}
hierarchy_dict_en = {}
list_of_parents_lv = []
list_of_parents_en = []
with open(file_name, "r", encoding="UTF-8") as f:
reader = csv.reader(f)
header = True
for line in reader:
if header:
header = False
continue
title = line[0]
address = line[1]
if line[2] != "":
coordinates = [float(s) for s in line[2].split(", ")]
else:
coordinates = [0, 0]
description_lv = line[3]
description_en = line[4]
references = line[5]
object_type_lv = line[6]
object_type_en = line[7]
icon = line[8]
iconColor = line[9]
parent_lv = line[10]
parent_en = line[11]
additional_info_lv = line[12]
additional_info_en = line[13]
telephone = line[14]
working_hours_monday = line[15]
working_hours_tuesday = line[16]
working_hours_wednesday = line[17]
working_hours_thursday = line[18]
working_hours_friday = line[19]
working_hours_saturday = line[20]
working_hours_sunday = line[21]
photos = line[22]
referencesImagesTitles = line[23]
referencesImages = line[24]
referencesTitles = line[25]
if (working_hours_monday == "" and working_hours_tuesday == "" and working_hours_wednesday == "" and working_hours_thursday == "" and working_hours_friday == "" and working_hours_saturday == "" and working_hours_sunday == ""):
opening_times = ""
else:
opening_times = {
"monday": working_hours_monday,
"tuesday": working_hours_tuesday,
"wednesday": working_hours_wednesday,
"thursday": working_hours_thursday,
"friday": working_hours_friday,
"saturday": working_hours_saturday,
"sunday": working_hours_sunday
}
temp_dict = {
"type_lv": object_type_lv,
"type_en": object_type_en,
"icon": icon + ".svg",
"description_lv": description_lv,
"description_en": description_en,
"address": address,
"coordinates": coordinates,
"cellphone": telephone,
"amenities": "",
"price": "",
"url": "",
"additional_info_lv": additional_info_lv,
"additional_info_en": additional_info_en,
"references": references,
"opening_times" : opening_times,
"photos": photos,
"referencesImagesTitles": referencesImagesTitles,
"referencesImages": referencesImages,
"referencesTitles": referencesTitles
}
if (object_type_lv not in type_array_lv):
type_array_lv.append(object_type_lv)
if (object_type_en not in type_array_en):
type_array_en.append(object_type_en)
if (object_type_lv not in icon_dict_lv.keys()):
icon_dict_lv[object_type_lv] = icon + ".svg"
if (object_type_en not in icon_dict_en.keys()):
icon_dict_en[object_type_en] = icon + ".svg"
if (object_type_lv not in color_dict_lv.keys()):
color_dict_lv[object_type_lv] = iconColor
if (object_type_en not in color_dict_en.keys()):
color_dict_en[object_type_en] = iconColor
if (parent_lv == ""):
if (object_type_lv not in hierarchy_dict_lv.keys()):
hierarchy_dict_lv[object_type_lv] = ""
list_of_parents_lv.append(object_type_lv)
else:
if (parent_lv not in hierarchy_dict_lv.keys()):
hierarchy_dict_lv[parent_lv] = [object_type_lv]
list_of_parents_lv.append(parent_lv)
color_dict_lv[parent_lv] = iconColor
else:
if object_type_lv not in hierarchy_dict_lv[parent_lv]:
hierarchy_dict_lv[parent_lv].append(object_type_lv)
if (parent_en == ""):
if (object_type_en not in hierarchy_dict_en.keys()):
hierarchy_dict_en[object_type_en] = ""
list_of_parents_en.append(object_type_en)
else:
if (parent_en not in hierarchy_dict_en.keys()):
hierarchy_dict_en[parent_en] = [object_type_en]
list_of_parents_en.append(parent_en)
color_dict_en[parent_en] = iconColor
else:
if object_type_en not in hierarchy_dict_en[parent_en]:
hierarchy_dict_en[parent_en].append(object_type_en)
data_dict[title] = temp_dict
type_array = {
"lv": type_array_lv,
"en": type_array_en
}
icon_array = {
"lv": icon_dict_lv,
"en": icon_dict_en
}
color_array = {
"lv": color_dict_lv,
"en": color_dict_en
}
hierarchy_dict = {
"lv": hierarchy_dict_lv,
"en": hierarchy_dict_en
}
list_of_parents = {
"lv": list_of_parents_lv,
"en": list_of_parents_en
}
type_array = json.dumps(type_array, ensure_ascii=False)
icon_array = json.dumps(icon_array, ensure_ascii=False)
color_array = json.dumps(color_array, ensure_ascii=False)
data_dict = json.dumps(data_dict, ensure_ascii=False)
hierarchy_dict = json.dumps(hierarchy_dict, ensure_ascii=False)
list_of_parents = json.dumps(list_of_parents, ensure_ascii=False)
with open(path + "data.js", "w", encoding="UTF-8") as f:
f.write("let parentsList = " + list_of_parents + ";\n")
f.write("let hierarchyDict = " + hierarchy_dict + ";\n")
f.write("let dataIcons = " + icon_array + ";\n")
f.write("let dataColor = " + color_array + ";\n")
f.write("let dataTypes = " + type_array + ";\n")
f.write("let jsonData = " + data_dict + ";")
print("Done.")
| none | 1 | 2.689578 | 3 | |
Desafio030.py | GabrielSanchesRosa/Python | 0 | 6618723 | # Crie um programa que leia um número interio e diga se ele é PAR ou ÍMPAR.
numero = int(input("Me diga um número qualquer: "))
resultado = numero % 2
if resultado == 0:
print(f"O número {numero} é PAR.")
else:
print(f"O número {numero} é ÍMPAR.")
| # Crie um programa que leia um número interio e diga se ele é PAR ou ÍMPAR.
numero = int(input("Me diga um número qualquer: "))
resultado = numero % 2
if resultado == 0:
print(f"O número {numero} é PAR.")
else:
print(f"O número {numero} é ÍMPAR.")
| pt | 0.982182 | # Crie um programa que leia um número interio e diga se ele é PAR ou ÍMPAR. | 3.976351 | 4 |
src/rdml_graph/core/GraphSearch.py | ianran/rdml_graph | 4 | 6618724 | <reponame>ianran/rdml_graph
# Copyright 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# @package AStar.py
# Written <NAME> February 2020 - Modified from code written October 2019
#
# The AStar algorithm written using the SearchState class
from rdml_graph.core import SearchState
from rdml_graph.core import State
import sys
# For the priority queue used by the AStar algorith.
import heapq
# For queue
import numpy as np
import pdb
## defualt huerestic function for AStar.
# it should work for any type of state, but when using the algorithm degrades
# to simply Dijkstra's algorithm rather than A*.
# @param x - the graph input path.
def default_h(x, data = None, goal=None):
return 0.0
## A simple euclidean distance huerestic for the AStar algorithm
# I doubt this does particulary much to speed on computation if any at all.
def h_euclidean(n, data, goal):
return np.linalg.norm(n.node.pt - goal[0].pt)
## graphGoalCheck
# A basic graph checker looking for a particular node to be the same.
# @param n - the node to check
# @param data - some set of input data
def graph_goal_check(n, data, goal):
return n == goal
## pass_all
# goal check that allows all through
def pass_all(n, data, goal):
return True
## AStar
# A generic implementation of the AStar algorithm.
# An optimal graph search algorithm.
# If looking for shortest path to all nodes, see Dijkstra's algorithm.
# REQUIRED
# @param start - the start state of the search
# OPTIONAL
# Functions g and h have input types of (state, data, goal)
# @param g - a goal function to determine if the passed, state is in the goal set.
# @param h - a heuristic function for the AStar search needs type (state, data, goal)
# @param data - a potential set of input data for huerestics and goal states.
# @param goal - a potential set of goal data (REQUIRED by default)
# @param out_tree - if true, output tree, otherwise do not output a tree.
#
# @returns - list, cost
# an optimal list states to the goal state. - if no path return empty list.
# [first state, ---, goal state]
def AStar(start, g=graph_goal_check, h = default_h, data = None, goal=None, \
output_tree=False):
startState = SearchState(start, hCost=h(start, data, goal), id=0)
frontier = [startState]
explored = set()
i = 0
cur_id = 1
while len(frontier) > 0:
#pdb.set_trace()
i += 1
# get current state to explore
cur = heapq.heappop(frontier)
if cur.state not in explored:
# check if the current state is in the goal state.
if g(cur.state, data, goal):
if output_tree:
return cur.getPath(), cur.rCost, startState
else:
return cur.getPath(), cur.rCost
# add state to set of explored states
explored.add(cur.state)
# get list of successors
successors = cur.successor()
# add all successors to frontier
for succ in successors:
# check to make sure state hasn't already been explored.
if succ.state not in explored:
# run heuristic function.
succ.hCost = h(succ.state, data, goal)
heapq.heappush(frontier, succ)
print(i)
pdb.set_trace()
# End of while, no solution found.
if output_tree:
return [], float('inf'), startState
else:
return [], float('inf')
## dijkstra's algorithm (All nodes)
# This is dijkstra's algorithm ran to find the shortest path to all reachable
# nodes of a graph from the start location.
# See any algorithms book for description of dijkstra's algorithm.
# Very similar to the above AStar algorithm without being single query, and
# without a huerestic function.
# @param start - the start location for dijkstra's algorithm (must be a State class)
#
# @return - a dictionary of every SearchState in the tree. (key is state)
def dijkstra(start):
startState = SearchState(start)
frontier = [startState]
explored = {}
while len(frontier) > 0:
# get current state to explore
cur = heapq.heappop(frontier)
if cur.state not in explored:
# add state to dict of explored states
explored[cur.state] = cur
# get list of successors
successors = cur.successor()
# add all successors to frontier
for succ in successors:
# check to make sure state hasn't already been explored.
if succ.state not in explored:
heapq.heappush(frontier, succ)
# End of while, return all found paths.
return explored
# DFS
# Depth first search
# A depth first search which has a budget which acts as a dynamic iterative
# deepening search (IDS). This is a generic function for performing a search
# of a tree structure.
def DFS():
pass
# Not implemented
## BFS
# Breadth First Search algorithm. This has an optional budget which restricts
# expansion beyond the budget given the cost of the state function returned
# REQUIRED
# @param start - the starting
# OPTIONAL
# @param budget - the budget of the search, if ignored, no cost budget is given.
# @param g - a goal condition that must be met in order to be in the returned states.
# g(n, data, goal)
# @param data - input data for the goal function if required.
# @param goal - any goal data required by g function.
#
# @return - list of structs of [(path, cost),...] or [([n1,n2,n3,...], cost), ...]
def BFS(start, budget=float('inf'), g=pass_all, data=None, goal=None):
startState = SearchState(start)
#frontier = queue.Queue()
#frontier.put(startState)
frontier = [startState]
explored = set()
endStates = []
while len(frontier) > 0:
cur = frontier.pop(0)
if cur.state not in explored:
# add state to set of explored states
explored.add(cur.state)
# check for end cases
if cur.cost() >= budget and g(cur.state, data, goal):
endStates.append(cur)
else:
# get list of successors
successors = cur.successor()
# add all successors to frontier that have not been explored
anyExplored = False
for succ in successors:
# check to make sure state hasn't already been explored.
if succ.state not in explored:
anyExplored = True
frontier.append(succ)
if anyExplored == True and g(cur.state, data, goal):
endStates.append(cur)
return [(s.getPath(), s.rCost) for s in endStates]
| # Copyright 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# @package AStar.py
# Written <NAME> February 2020 - Modified from code written October 2019
#
# The AStar algorithm written using the SearchState class
from rdml_graph.core import SearchState
from rdml_graph.core import State
import sys
# For the priority queue used by the AStar algorith.
import heapq
# For queue
import numpy as np
import pdb
## defualt huerestic function for AStar.
# it should work for any type of state, but when using the algorithm degrades
# to simply Dijkstra's algorithm rather than A*.
# @param x - the graph input path.
def default_h(x, data = None, goal=None):
return 0.0
## A simple euclidean distance huerestic for the AStar algorithm
# I doubt this does particulary much to speed on computation if any at all.
def h_euclidean(n, data, goal):
return np.linalg.norm(n.node.pt - goal[0].pt)
## graphGoalCheck
# A basic graph checker looking for a particular node to be the same.
# @param n - the node to check
# @param data - some set of input data
def graph_goal_check(n, data, goal):
return n == goal
## pass_all
# goal check that allows all through
def pass_all(n, data, goal):
return True
## AStar
# A generic implementation of the AStar algorithm.
# An optimal graph search algorithm.
# If looking for shortest path to all nodes, see Dijkstra's algorithm.
# REQUIRED
# @param start - the start state of the search
# OPTIONAL
# Functions g and h have input types of (state, data, goal)
# @param g - a goal function to determine if the passed, state is in the goal set.
# @param h - a heuristic function for the AStar search needs type (state, data, goal)
# @param data - a potential set of input data for huerestics and goal states.
# @param goal - a potential set of goal data (REQUIRED by default)
# @param out_tree - if true, output tree, otherwise do not output a tree.
#
# @returns - list, cost
# an optimal list states to the goal state. - if no path return empty list.
# [first state, ---, goal state]
def AStar(start, g=graph_goal_check, h = default_h, data = None, goal=None, \
output_tree=False):
startState = SearchState(start, hCost=h(start, data, goal), id=0)
frontier = [startState]
explored = set()
i = 0
cur_id = 1
while len(frontier) > 0:
#pdb.set_trace()
i += 1
# get current state to explore
cur = heapq.heappop(frontier)
if cur.state not in explored:
# check if the current state is in the goal state.
if g(cur.state, data, goal):
if output_tree:
return cur.getPath(), cur.rCost, startState
else:
return cur.getPath(), cur.rCost
# add state to set of explored states
explored.add(cur.state)
# get list of successors
successors = cur.successor()
# add all successors to frontier
for succ in successors:
# check to make sure state hasn't already been explored.
if succ.state not in explored:
# run heuristic function.
succ.hCost = h(succ.state, data, goal)
heapq.heappush(frontier, succ)
print(i)
pdb.set_trace()
# End of while, no solution found.
if output_tree:
return [], float('inf'), startState
else:
return [], float('inf')
## dijkstra's algorithm (All nodes)
# This is dijkstra's algorithm ran to find the shortest path to all reachable
# nodes of a graph from the start location.
# See any algorithms book for description of dijkstra's algorithm.
# Very similar to the above AStar algorithm without being single query, and
# without a huerestic function.
# @param start - the start location for dijkstra's algorithm (must be a State class)
#
# @return - a dictionary of every SearchState in the tree. (key is state)
def dijkstra(start):
startState = SearchState(start)
frontier = [startState]
explored = {}
while len(frontier) > 0:
# get current state to explore
cur = heapq.heappop(frontier)
if cur.state not in explored:
# add state to dict of explored states
explored[cur.state] = cur
# get list of successors
successors = cur.successor()
# add all successors to frontier
for succ in successors:
# check to make sure state hasn't already been explored.
if succ.state not in explored:
heapq.heappush(frontier, succ)
# End of while, return all found paths.
return explored
# DFS
# Depth first search
# A depth first search which has a budget which acts as a dynamic iterative
# deepening search (IDS). This is a generic function for performing a search
# of a tree structure.
def DFS():
pass
# Not implemented
## BFS
# Breadth First Search algorithm. This has an optional budget which restricts
# expansion beyond the budget given the cost of the state function returned
# REQUIRED
# @param start - the starting
# OPTIONAL
# @param budget - the budget of the search, if ignored, no cost budget is given.
# @param g - a goal condition that must be met in order to be in the returned states.
# g(n, data, goal)
# @param data - input data for the goal function if required.
# @param goal - any goal data required by g function.
#
# @return - list of structs of [(path, cost),...] or [([n1,n2,n3,...], cost), ...]
def BFS(start, budget=float('inf'), g=pass_all, data=None, goal=None):
startState = SearchState(start)
#frontier = queue.Queue()
#frontier.put(startState)
frontier = [startState]
explored = set()
endStates = []
while len(frontier) > 0:
cur = frontier.pop(0)
if cur.state not in explored:
# add state to set of explored states
explored.add(cur.state)
# check for end cases
if cur.cost() >= budget and g(cur.state, data, goal):
endStates.append(cur)
else:
# get list of successors
successors = cur.successor()
# add all successors to frontier that have not been explored
anyExplored = False
for succ in successors:
# check to make sure state hasn't already been explored.
if succ.state not in explored:
anyExplored = True
frontier.append(succ)
if anyExplored == True and g(cur.state, data, goal):
endStates.append(cur)
return [(s.getPath(), s.rCost) for s in endStates] | en | 0.796489 | # Copyright 2020 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy of this # software and associated documentation files (the "Software"), to deal in the Software # without restriction, including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons # to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or # substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, # INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE # FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # @package AStar.py # Written <NAME> February 2020 - Modified from code written October 2019 # # The AStar algorithm written using the SearchState class # For the priority queue used by the AStar algorith. # For queue ## defualt huerestic function for AStar. # it should work for any type of state, but when using the algorithm degrades # to simply Dijkstra's algorithm rather than A*. # @param x - the graph input path. ## A simple euclidean distance huerestic for the AStar algorithm # I doubt this does particulary much to speed on computation if any at all. ## graphGoalCheck # A basic graph checker looking for a particular node to be the same. # @param n - the node to check # @param data - some set of input data ## pass_all # goal check that allows all through ## AStar # A generic implementation of the AStar algorithm. # An optimal graph search algorithm. # If looking for shortest path to all nodes, see Dijkstra's algorithm. # REQUIRED # @param start - the start state of the search # OPTIONAL # Functions g and h have input types of (state, data, goal) # @param g - a goal function to determine if the passed, state is in the goal set. # @param h - a heuristic function for the AStar search needs type (state, data, goal) # @param data - a potential set of input data for huerestics and goal states. # @param goal - a potential set of goal data (REQUIRED by default) # @param out_tree - if true, output tree, otherwise do not output a tree. # # @returns - list, cost # an optimal list states to the goal state. - if no path return empty list. # [first state, ---, goal state] #pdb.set_trace() # get current state to explore # check if the current state is in the goal state. # add state to set of explored states # get list of successors # add all successors to frontier # check to make sure state hasn't already been explored. # run heuristic function. # End of while, no solution found. ## dijkstra's algorithm (All nodes) # This is dijkstra's algorithm ran to find the shortest path to all reachable # nodes of a graph from the start location. # See any algorithms book for description of dijkstra's algorithm. # Very similar to the above AStar algorithm without being single query, and # without a huerestic function. # @param start - the start location for dijkstra's algorithm (must be a State class) # # @return - a dictionary of every SearchState in the tree. (key is state) # get current state to explore # add state to dict of explored states # get list of successors # add all successors to frontier # check to make sure state hasn't already been explored. # End of while, return all found paths. # DFS # Depth first search # A depth first search which has a budget which acts as a dynamic iterative # deepening search (IDS). This is a generic function for performing a search # of a tree structure. # Not implemented ## BFS # Breadth First Search algorithm. This has an optional budget which restricts # expansion beyond the budget given the cost of the state function returned # REQUIRED # @param start - the starting # OPTIONAL # @param budget - the budget of the search, if ignored, no cost budget is given. # @param g - a goal condition that must be met in order to be in the returned states. # g(n, data, goal) # @param data - input data for the goal function if required. # @param goal - any goal data required by g function. # # @return - list of structs of [(path, cost),...] or [([n1,n2,n3,...], cost), ...] #frontier = queue.Queue() #frontier.put(startState) # add state to set of explored states # check for end cases # get list of successors # add all successors to frontier that have not been explored # check to make sure state hasn't already been explored. | 2.138886 | 2 |
routewatch/DB/client.py | nerdalize/routewatch | 9 | 6618725 | from sqlalchemy.orm import sessionmaker
from routewatch.DB.declarative import *
engine = create_engine('sqlite:///core.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
class DB(object):
"""
A lazy wrapper for sqlalchemy that gives it a somewhat REST like interface.
"""
tables = dict(Prefix=Prefix, Recipient=Recipient, Settings=Settings)
def create(self, table, **kwargs):
new = self.tables[table](**kwargs)
session.add(new)
session.commit()
def get(self, table, **kwargs):
objs = session.query(self.tables[table])
for key, value in kwargs.items():
objs = objs.filter(getattr(self.tables[table], key) == value)
objs = objs.all()
return objs
def delete(self, table, **kwargs):
objs = session.query(self.tables[table])
for key, value in kwargs.items():
objs = objs.filter(getattr(self.tables[table], key) == value)
objs = objs.all()
for obj in objs:
session.delete(obj)
def commit(self):
session.commit()
| from sqlalchemy.orm import sessionmaker
from routewatch.DB.declarative import *
engine = create_engine('sqlite:///core.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
class DB(object):
"""
A lazy wrapper for sqlalchemy that gives it a somewhat REST like interface.
"""
tables = dict(Prefix=Prefix, Recipient=Recipient, Settings=Settings)
def create(self, table, **kwargs):
new = self.tables[table](**kwargs)
session.add(new)
session.commit()
def get(self, table, **kwargs):
objs = session.query(self.tables[table])
for key, value in kwargs.items():
objs = objs.filter(getattr(self.tables[table], key) == value)
objs = objs.all()
return objs
def delete(self, table, **kwargs):
objs = session.query(self.tables[table])
for key, value in kwargs.items():
objs = objs.filter(getattr(self.tables[table], key) == value)
objs = objs.all()
for obj in objs:
session.delete(obj)
def commit(self):
session.commit()
| en | 0.792332 | A lazy wrapper for sqlalchemy that gives it a somewhat REST like interface. | 2.679671 | 3 |
LeetCode/781 Rabbits in Forest.py | gesuwen/Algorithms | 0 | 6618726 | <filename>LeetCode/781 Rabbits in Forest.py<gh_stars>0
# Hash Table; Maths
# In a forest, each rabbit has some color. Some subset of rabbits (possibly all of them) tell you how many other rabbits have the same color as them. Those answers are placed in an array.
#
# Return the minimum number of rabbits that could be in the forest.
#
# Examples:
# Input: answers = [1, 1, 2]
# Output: 5
# Explanation:
# The two rabbits that answered "1" could both be the same color, say red.
# The rabbit than answered "2" can't be red or the answers would be inconsistent.
# Say the rabbit that answered "2" was blue.
# Then there should be 2 other blue rabbits in the forest that didn't answer into the array.
# The smallest possible number of rabbits in the forest is therefore 5: 3 that answered plus 2 that didn't.
#
# Input: answers = [10, 10, 10]
# Output: 11
#
# Input: answers = []
# Output: 0
# Note:
#
# answers will have length at most 1000.
# Each answers[i] will be an integer in the range [0, 999].
class Solution(object):
def numRabbits(self, answers):
"""
:type answers: List[int]
:rtype: int
"""
ansDict = collections.Counter(answers)
output = 0
while ansDict:
x = ansDict.keys()[0]
if ansDict[x] <= x+1:
output += x+1
ansDict.pop(x, None)
else:
ansDict[x] -= x+1
output += x+1
return output
| <filename>LeetCode/781 Rabbits in Forest.py<gh_stars>0
# Hash Table; Maths
# In a forest, each rabbit has some color. Some subset of rabbits (possibly all of them) tell you how many other rabbits have the same color as them. Those answers are placed in an array.
#
# Return the minimum number of rabbits that could be in the forest.
#
# Examples:
# Input: answers = [1, 1, 2]
# Output: 5
# Explanation:
# The two rabbits that answered "1" could both be the same color, say red.
# The rabbit than answered "2" can't be red or the answers would be inconsistent.
# Say the rabbit that answered "2" was blue.
# Then there should be 2 other blue rabbits in the forest that didn't answer into the array.
# The smallest possible number of rabbits in the forest is therefore 5: 3 that answered plus 2 that didn't.
#
# Input: answers = [10, 10, 10]
# Output: 11
#
# Input: answers = []
# Output: 0
# Note:
#
# answers will have length at most 1000.
# Each answers[i] will be an integer in the range [0, 999].
class Solution(object):
def numRabbits(self, answers):
"""
:type answers: List[int]
:rtype: int
"""
ansDict = collections.Counter(answers)
output = 0
while ansDict:
x = ansDict.keys()[0]
if ansDict[x] <= x+1:
output += x+1
ansDict.pop(x, None)
else:
ansDict[x] -= x+1
output += x+1
return output
| en | 0.954302 | # Hash Table; Maths # In a forest, each rabbit has some color. Some subset of rabbits (possibly all of them) tell you how many other rabbits have the same color as them. Those answers are placed in an array. # # Return the minimum number of rabbits that could be in the forest. # # Examples: # Input: answers = [1, 1, 2] # Output: 5 # Explanation: # The two rabbits that answered "1" could both be the same color, say red. # The rabbit than answered "2" can't be red or the answers would be inconsistent. # Say the rabbit that answered "2" was blue. # Then there should be 2 other blue rabbits in the forest that didn't answer into the array. # The smallest possible number of rabbits in the forest is therefore 5: 3 that answered plus 2 that didn't. # # Input: answers = [10, 10, 10] # Output: 11 # # Input: answers = [] # Output: 0 # Note: # # answers will have length at most 1000. # Each answers[i] will be an integer in the range [0, 999]. :type answers: List[int] :rtype: int | 3.805588 | 4 |
lib/clckwrkbdgr/jobsequence/script.py | umi0451/dotfiles | 2 | 6618727 | <reponame>umi0451/dotfiles
import os, sys, platform, stat
try:
from pathlib2 import Path
except ImportError: # pragma: no cover
from pathlib import Path
import six
class Script(object): # pragma: no cover -- TODO need mocks.
def __init__(self, name, shebang=None, rootdir=None, overwrite=True):
""" Creates script with specified name.
Creates both path (by default is CWD) and script file.
If shebang is specified, puts it in the first line.
Makes script executable (depends on platform).
If overwrite is False, raises an error when file already exists.
Otherwise (by default) completely rewrites the file.
"""
rootdir = Path(rootdir or '.')
rootdir.mkdir(exist_ok=True, parents=True)
rootdir.resolve()
self.filename = rootdir/name
if not overwrite and self.filename.exists():
raise RuntimeError("Script file already exists: {0}".format(self.filename))
with self.filename.open('wb') as f:
if shebang:
f.write(shebang.encode('utf-8', 'replace') + b'\n')
self._make_executable(self.filename)
@staticmethod
def _make_executable(filename):
if platform.system() == 'Windows':
return
mode = filename.stat().st_mode
filename.chmod(mode | stat.S_IXUSR)
def append(self, line):
""" Appends line (string or bytes) to a file.
Automatically puts linebreak if it is not present.
"""
mode = 'a+'
line_ending = '\n'
if isinstance(line, six.binary_type):
mode = 'ab+'
line_ending = b'\n'
with self.filename.open(mode) as f:
f.write(line)
if not line.endswith(line_ending):
f.write(line_ending)
return self
def __iadd__(self, line):
""" Shortcut for append():
script += "line"
"""
return self.append(line)
| import os, sys, platform, stat
try:
from pathlib2 import Path
except ImportError: # pragma: no cover
from pathlib import Path
import six
class Script(object): # pragma: no cover -- TODO need mocks.
def __init__(self, name, shebang=None, rootdir=None, overwrite=True):
""" Creates script with specified name.
Creates both path (by default is CWD) and script file.
If shebang is specified, puts it in the first line.
Makes script executable (depends on platform).
If overwrite is False, raises an error when file already exists.
Otherwise (by default) completely rewrites the file.
"""
rootdir = Path(rootdir or '.')
rootdir.mkdir(exist_ok=True, parents=True)
rootdir.resolve()
self.filename = rootdir/name
if not overwrite and self.filename.exists():
raise RuntimeError("Script file already exists: {0}".format(self.filename))
with self.filename.open('wb') as f:
if shebang:
f.write(shebang.encode('utf-8', 'replace') + b'\n')
self._make_executable(self.filename)
@staticmethod
def _make_executable(filename):
if platform.system() == 'Windows':
return
mode = filename.stat().st_mode
filename.chmod(mode | stat.S_IXUSR)
def append(self, line):
""" Appends line (string or bytes) to a file.
Automatically puts linebreak if it is not present.
"""
mode = 'a+'
line_ending = '\n'
if isinstance(line, six.binary_type):
mode = 'ab+'
line_ending = b'\n'
with self.filename.open(mode) as f:
f.write(line)
if not line.endswith(line_ending):
f.write(line_ending)
return self
def __iadd__(self, line):
""" Shortcut for append():
script += "line"
"""
return self.append(line) | en | 0.767901 | # pragma: no cover # pragma: no cover -- TODO need mocks. Creates script with specified name. Creates both path (by default is CWD) and script file. If shebang is specified, puts it in the first line. Makes script executable (depends on platform). If overwrite is False, raises an error when file already exists. Otherwise (by default) completely rewrites the file. Appends line (string or bytes) to a file. Automatically puts linebreak if it is not present. Shortcut for append(): script += "line" | 2.682896 | 3 |
tests/cron_job_tests/test_genomic_validation.py | all-of-us/raw-data-repository | 39 | 6618728 | <gh_stars>10-100
import datetime
import mock
from rdr_service import clock
from rdr_service.dao.genomics_dao import GenomicSetDao, GenomicSetMemberDao
from rdr_service.dao.participant_dao import ParticipantDao
from rdr_service.dao.participant_summary_dao import ParticipantSummaryDao
from rdr_service.genomic.validation import validate_and_update_genomic_set_by_id
from rdr_service.model.genomics import (
GenomicSet,
GenomicSetMember,
)
from rdr_service.model.participant import Participant
from rdr_service.participant_enums import SampleStatus, WithdrawalStatus
from rdr_service.genomic_enums import GenomicSetStatus, GenomicSetMemberStatus, GenomicValidationFlag
from tests.helpers.unittest_base import BaseTestCase
class GenomicSetValidationBaseTestCase(BaseTestCase):
def setUp(self):
super(GenomicSetValidationBaseTestCase, self).setUp()
self.participant_dao = ParticipantDao()
self.summary_dao = ParticipantSummaryDao()
self.genomic_set_dao = GenomicSetDao()
self.genomic_member_dao = GenomicSetMemberDao()
self._participant_i = 0
self.setup_data()
def setup_data(self):
pass
def make_participant(self, **kwargs):
"""
Make a participant with custom settings.
default should create a valid participant.
"""
i = self._participant_i
self._participant_i += 1
participant = Participant(participantId=i, biobankId=i, **kwargs)
self.participant_dao.insert(participant)
return participant
def make_summary(self, participant, **override_kwargs):
"""
Make a summary with custom settings.
default should create a valid summary.
"""
valid_kwargs = dict(
participantId=participant.participantId,
biobankId=participant.biobankId,
withdrawalStatus=participant.withdrawalStatus,
dateOfBirth=datetime.datetime(2000, 1, 1),
firstName="foo",
lastName="bar",
zipCode="12345",
sampleStatus1ED04=SampleStatus.RECEIVED,
sampleStatus1SAL2=SampleStatus.RECEIVED,
samplesToIsolateDNA=SampleStatus.RECEIVED,
consentForStudyEnrollmentTime=datetime.datetime(2019, 1, 1),
participantOrigin='example'
)
kwargs = dict(valid_kwargs, **override_kwargs)
summary = self.data_generator._participant_summary_with_defaults(**kwargs)
self.summary_dao.insert(summary)
return summary
def make_genomic_set(self, **override_kwargs):
"""
Make a genomic set with custom settings.
default should create a valid set.
"""
valid_kwargs = dict(
genomicSetName="foo",
genomicSetCriteria="something",
genomicSetVersion=1,
genomicSetStatus=GenomicSetStatus.UNSET,
)
kwargs = dict(valid_kwargs, **override_kwargs)
genomic_set = GenomicSet(**kwargs)
self.genomic_set_dao.insert(genomic_set)
return genomic_set
def make_genomic_member(self, genomic_set, participant, **override_kwargs):
"""
Make a genomic member with custom settings.
default should create a valid member.
"""
valid_kwargs = dict(
genomicSetId=genomic_set.id,
participantId=participant.participantId,
sexAtBirth="F",
biobankId=participant.biobankId,
)
kwargs = dict(valid_kwargs, **override_kwargs)
member = GenomicSetMember(**kwargs)
self.genomic_member_dao.insert(member)
return member
# TODO: represent in new test suite
class GenomicSetMemberValidationTestCase(GenomicSetValidationBaseTestCase):
def test_test_defaults_are_valid(self):
participant = self.make_participant()
self.make_summary(participant)
genomic_set = self.make_genomic_set()
member = self.make_genomic_member(genomic_set, participant)
validate_and_update_genomic_set_by_id(genomic_set.id)
current_member = self.genomic_member_dao.get(member.id)
self.assertEqual(current_member.validationStatus, GenomicSetMemberStatus.VALID)
current_set = self.genomic_set_dao.get(genomic_set.id)
self.assertEqual(current_set.genomicSetStatus, GenomicSetStatus.VALID)
def test_duplicate(self):
participant = self.make_participant()
self.make_summary(participant)
genomic_set_a = self.make_genomic_set(genomicSetName="A", genomicSetStatus=GenomicSetStatus.VALID)
self.make_genomic_member(genomic_set_a, participant)
genomic_set_b = self.make_genomic_set(genomicSetName="B")
member_b = self.make_genomic_member(genomic_set_b, participant)
validate_and_update_genomic_set_by_id(genomic_set_b.id)
current_member = self.genomic_member_dao.get(member_b.id)
self.assertEqual(current_member.validationStatus, GenomicSetMemberStatus.VALID)
current_set = self.genomic_set_dao.get(genomic_set_b.id)
self.assertEqual(current_set.genomicSetStatus, GenomicSetStatus.VALID)
def test_consent(self):
participant = self.make_participant()
self.make_summary(participant, consentForStudyEnrollmentTime=datetime.datetime(2017, 1, 1))
genomic_set = self.make_genomic_set()
member = self.make_genomic_member(genomic_set, participant)
validate_and_update_genomic_set_by_id(genomic_set.id)
current_member = self.genomic_member_dao.get(member.id)
self.assertEqual(current_member.validationStatus, GenomicSetMemberStatus.INVALID)
self.assertIn(GenomicValidationFlag.INVALID_CONSENT, current_member.validationFlags)
current_set = self.genomic_set_dao.get(genomic_set.id)
self.assertEqual(current_set.genomicSetStatus, GenomicSetStatus.INVALID)
def test_consent_null(self):
participant = self.make_participant()
self.make_summary(participant, consentForStudyEnrollmentTime=None)
genomic_set = self.make_genomic_set()
member = self.make_genomic_member(genomic_set, participant)
validate_and_update_genomic_set_by_id(genomic_set.id)
current_member = self.genomic_member_dao.get(member.id)
self.assertEqual(current_member.validationStatus, GenomicSetMemberStatus.INVALID)
self.assertIn(GenomicValidationFlag.INVALID_CONSENT, current_member.validationFlags)
current_set = self.genomic_set_dao.get(genomic_set.id)
self.assertEqual(current_set.genomicSetStatus, GenomicSetStatus.INVALID)
def test_withdrawn(self):
participant = self.make_participant(withdrawalStatus=WithdrawalStatus.NO_USE)
self.make_summary(participant)
genomic_set = self.make_genomic_set()
member = self.make_genomic_member(genomic_set, participant)
validate_and_update_genomic_set_by_id(genomic_set.id)
current_member = self.genomic_member_dao.get(member.id)
self.assertEqual(current_member.validationStatus, GenomicSetMemberStatus.INVALID)
self.assertIn(GenomicValidationFlag.INVALID_WITHDRAW_STATUS, current_member.validationFlags)
current_set = self.genomic_set_dao.get(genomic_set.id)
self.assertEqual(current_set.genomicSetStatus, GenomicSetStatus.INVALID)
def test_sexatbirth(self):
participant = self.make_participant()
self.make_summary(participant)
genomic_set = self.make_genomic_set()
member = self.make_genomic_member(genomic_set, participant, sexAtBirth="foo")
validate_and_update_genomic_set_by_id(genomic_set.id)
current_member = self.genomic_member_dao.get(member.id)
self.assertEqual(current_member.validationStatus, GenomicSetMemberStatus.INVALID)
self.assertIn(GenomicValidationFlag.INVALID_SEX_AT_BIRTH, current_member.validationFlags)
current_set = self.genomic_set_dao.get(genomic_set.id)
self.assertEqual(current_set.genomicSetStatus, GenomicSetStatus.INVALID)
def test_age(self):
now = datetime.datetime(2019, 1, 1)
valid_date_of_birth = datetime.datetime(now.year - 18, now.month, now.day)
invalid_date_of_birth = datetime.datetime(now.year - 17, now.month, now.day)
participant_a = self.make_participant()
self.make_summary(participant_a, dateOfBirth=valid_date_of_birth)
participant_b = self.make_participant()
self.make_summary(participant_b, dateOfBirth=invalid_date_of_birth)
genomic_set = self.make_genomic_set()
member_a = self.make_genomic_member(genomic_set, participant_a)
member_b = self.make_genomic_member(genomic_set, participant_b)
with clock.FakeClock(datetime.datetime(2019, 1, 1)):
validate_and_update_genomic_set_by_id(genomic_set.id)
current_member_a = self.genomic_member_dao.get(member_a.id)
current_member_b = self.genomic_member_dao.get(member_b.id)
self.assertEqual(current_member_a.validationStatus, GenomicSetMemberStatus.VALID)
self.assertEqual(current_member_b.validationStatus, GenomicSetMemberStatus.INVALID)
self.assertIn(GenomicValidationFlag.INVALID_AGE, current_member_b.validationFlags)
current_set = self.genomic_set_dao.get(genomic_set.id)
self.assertEqual(current_set.genomicSetStatus, GenomicSetStatus.INVALID)
def test_biobank_status(self):
def make_member(genomic_set, **summary_kwargs):
participant = self.make_participant()
self.make_summary(participant, **summary_kwargs)
return self.make_genomic_member(genomic_set, participant)
kwargs_with_expected_status_and_flags = [
(
{
"sampleStatus1ED04": SampleStatus.UNSET,
"sampleStatus1SAL2": SampleStatus.UNSET,
"samplesToIsolateDNA": SampleStatus.UNSET,
},
GenomicSetMemberStatus.INVALID,
[GenomicValidationFlag.INVALID_BIOBANK_ORDER],
),
(
{
"sampleStatus1ED04": SampleStatus.RECEIVED,
"sampleStatus1SAL2": SampleStatus.UNSET,
"samplesToIsolateDNA": SampleStatus.UNSET,
},
GenomicSetMemberStatus.INVALID,
[GenomicValidationFlag.INVALID_BIOBANK_ORDER],
),
(
{
"sampleStatus1ED04": SampleStatus.UNSET,
"sampleStatus1SAL2": SampleStatus.RECEIVED,
"samplesToIsolateDNA": SampleStatus.UNSET,
},
GenomicSetMemberStatus.INVALID,
[GenomicValidationFlag.INVALID_BIOBANK_ORDER],
),
(
{
"sampleStatus1ED04": SampleStatus.UNSET,
"sampleStatus1SAL2": SampleStatus.UNSET,
"samplesToIsolateDNA": SampleStatus.RECEIVED,
},
GenomicSetMemberStatus.INVALID,
[GenomicValidationFlag.INVALID_BIOBANK_ORDER],
),
(
{
"sampleStatus1ED04": SampleStatus.RECEIVED,
"sampleStatus1SAL2": SampleStatus.UNSET,
"samplesToIsolateDNA": SampleStatus.RECEIVED,
},
GenomicSetMemberStatus.VALID,
[],
),
(
{
"sampleStatus1ED04": SampleStatus.UNSET,
"sampleStatus1SAL2": SampleStatus.RECEIVED,
"samplesToIsolateDNA": SampleStatus.RECEIVED,
},
GenomicSetMemberStatus.VALID,
[],
),
]
genomic_set = self.make_genomic_set()
runs = [
(make_member(genomic_set, **kwargs), kwargs, status, flags)
for kwargs, status, flags in kwargs_with_expected_status_and_flags
]
validate_and_update_genomic_set_by_id(genomic_set.id)
for member, kwargs, expected_status, expected_flags in runs:
current_member = self.genomic_member_dao.get(member.id)
self.assertEqual(current_member.validationStatus, expected_status)
for flag in expected_flags:
self.assertIn(flag, current_member.validationFlags)
current_set = self.genomic_set_dao.get(genomic_set.id)
self.assertEqual(current_set.genomicSetStatus, GenomicSetStatus.INVALID)
def test_ny_zip_code(self):
participant_a = self.make_participant()
self.make_summary(participant_a, zipCode=None)
participant_b = self.make_participant()
self.make_summary(participant_b, zipCode="")
participant_c = self.make_participant()
self.make_summary(participant_c, zipCode="12345")
genomic_set = self.make_genomic_set()
member_a = self.make_genomic_member(genomic_set, participant_a)
member_b = self.make_genomic_member(genomic_set, participant_b)
member_c = self.make_genomic_member(genomic_set, participant_c)
with clock.FakeClock(datetime.datetime(2019, 1, 1)):
validate_and_update_genomic_set_by_id(genomic_set.id)
current_member_a = self.genomic_member_dao.get(member_a.id)
current_member_b = self.genomic_member_dao.get(member_b.id)
current_member_c = self.genomic_member_dao.get(member_c.id)
self.assertEqual(current_member_a.validationStatus, GenomicSetMemberStatus.INVALID)
self.assertIn(GenomicValidationFlag.INVALID_NY_ZIPCODE, current_member_a.validationFlags)
self.assertEqual(current_member_b.validationStatus, GenomicSetMemberStatus.INVALID)
self.assertIn(GenomicValidationFlag.INVALID_NY_ZIPCODE, current_member_a.validationFlags)
self.assertEqual(current_member_c.validationStatus, GenomicSetMemberStatus.VALID)
current_set = self.genomic_set_dao.get(genomic_set.id)
self.assertEqual(current_set.genomicSetStatus, GenomicSetStatus.INVALID)
class GenomicSetValidationSafetyTestCase(GenomicSetValidationBaseTestCase):
def test_transaction(self):
participant = self.make_participant()
self.make_summary(participant)
genomic_set = self.make_genomic_set()
member = self.make_genomic_member(genomic_set, participant)
with mock.patch("rdr_service.genomic.validation.GenomicSetDao.update_with_session") as mocked_set_update:
mocked_set_update.side_effect = Exception("baz")
with clock.FakeClock(datetime.datetime(2019, 1, 1)):
with self.assertRaises(Exception):
validate_and_update_genomic_set_by_id(genomic_set.id)
current_member = self.genomic_member_dao.get(member.id)
self.assertEqual(current_member.validationStatus, None)
current_set = self.genomic_set_dao.get(genomic_set.id)
self.assertEqual(current_set.genomicSetStatus, None)
def test_invalid_does_not_update_validated_time(self):
participant = self.make_participant(withdrawalStatus=WithdrawalStatus.NO_USE)
self.make_summary(participant)
genomic_set = self.make_genomic_set()
member = self.make_genomic_member(genomic_set, participant)
validate_and_update_genomic_set_by_id(genomic_set.id)
current_member = self.genomic_member_dao.get(member.id)
self.assertEqual(current_member.validatedTime, None)
current_set = self.genomic_set_dao.get(genomic_set.id)
self.assertEqual(current_set.validatedTime, None)
def test_valid_does_update_validated_time(self):
participant = self.make_participant()
self.make_summary(participant)
genomic_set = self.make_genomic_set()
member = self.make_genomic_member(genomic_set, participant)
now = datetime.datetime(2019, 1, 1)
with clock.FakeClock(now):
validate_and_update_genomic_set_by_id(genomic_set.id)
current_member = self.genomic_member_dao.get(member.id)
self.assertEqual(current_member.validatedTime, now)
current_set = self.genomic_set_dao.get(genomic_set.id)
self.assertEqual(current_set.validatedTime, now)
| import datetime
import mock
from rdr_service import clock
from rdr_service.dao.genomics_dao import GenomicSetDao, GenomicSetMemberDao
from rdr_service.dao.participant_dao import ParticipantDao
from rdr_service.dao.participant_summary_dao import ParticipantSummaryDao
from rdr_service.genomic.validation import validate_and_update_genomic_set_by_id
from rdr_service.model.genomics import (
GenomicSet,
GenomicSetMember,
)
from rdr_service.model.participant import Participant
from rdr_service.participant_enums import SampleStatus, WithdrawalStatus
from rdr_service.genomic_enums import GenomicSetStatus, GenomicSetMemberStatus, GenomicValidationFlag
from tests.helpers.unittest_base import BaseTestCase
class GenomicSetValidationBaseTestCase(BaseTestCase):
def setUp(self):
super(GenomicSetValidationBaseTestCase, self).setUp()
self.participant_dao = ParticipantDao()
self.summary_dao = ParticipantSummaryDao()
self.genomic_set_dao = GenomicSetDao()
self.genomic_member_dao = GenomicSetMemberDao()
self._participant_i = 0
self.setup_data()
def setup_data(self):
pass
def make_participant(self, **kwargs):
"""
Make a participant with custom settings.
default should create a valid participant.
"""
i = self._participant_i
self._participant_i += 1
participant = Participant(participantId=i, biobankId=i, **kwargs)
self.participant_dao.insert(participant)
return participant
def make_summary(self, participant, **override_kwargs):
"""
Make a summary with custom settings.
default should create a valid summary.
"""
valid_kwargs = dict(
participantId=participant.participantId,
biobankId=participant.biobankId,
withdrawalStatus=participant.withdrawalStatus,
dateOfBirth=datetime.datetime(2000, 1, 1),
firstName="foo",
lastName="bar",
zipCode="12345",
sampleStatus1ED04=SampleStatus.RECEIVED,
sampleStatus1SAL2=SampleStatus.RECEIVED,
samplesToIsolateDNA=SampleStatus.RECEIVED,
consentForStudyEnrollmentTime=datetime.datetime(2019, 1, 1),
participantOrigin='example'
)
kwargs = dict(valid_kwargs, **override_kwargs)
summary = self.data_generator._participant_summary_with_defaults(**kwargs)
self.summary_dao.insert(summary)
return summary
def make_genomic_set(self, **override_kwargs):
"""
Make a genomic set with custom settings.
default should create a valid set.
"""
valid_kwargs = dict(
genomicSetName="foo",
genomicSetCriteria="something",
genomicSetVersion=1,
genomicSetStatus=GenomicSetStatus.UNSET,
)
kwargs = dict(valid_kwargs, **override_kwargs)
genomic_set = GenomicSet(**kwargs)
self.genomic_set_dao.insert(genomic_set)
return genomic_set
def make_genomic_member(self, genomic_set, participant, **override_kwargs):
"""
Make a genomic member with custom settings.
default should create a valid member.
"""
valid_kwargs = dict(
genomicSetId=genomic_set.id,
participantId=participant.participantId,
sexAtBirth="F",
biobankId=participant.biobankId,
)
kwargs = dict(valid_kwargs, **override_kwargs)
member = GenomicSetMember(**kwargs)
self.genomic_member_dao.insert(member)
return member
# TODO: represent in new test suite
class GenomicSetMemberValidationTestCase(GenomicSetValidationBaseTestCase):
def test_test_defaults_are_valid(self):
participant = self.make_participant()
self.make_summary(participant)
genomic_set = self.make_genomic_set()
member = self.make_genomic_member(genomic_set, participant)
validate_and_update_genomic_set_by_id(genomic_set.id)
current_member = self.genomic_member_dao.get(member.id)
self.assertEqual(current_member.validationStatus, GenomicSetMemberStatus.VALID)
current_set = self.genomic_set_dao.get(genomic_set.id)
self.assertEqual(current_set.genomicSetStatus, GenomicSetStatus.VALID)
def test_duplicate(self):
participant = self.make_participant()
self.make_summary(participant)
genomic_set_a = self.make_genomic_set(genomicSetName="A", genomicSetStatus=GenomicSetStatus.VALID)
self.make_genomic_member(genomic_set_a, participant)
genomic_set_b = self.make_genomic_set(genomicSetName="B")
member_b = self.make_genomic_member(genomic_set_b, participant)
validate_and_update_genomic_set_by_id(genomic_set_b.id)
current_member = self.genomic_member_dao.get(member_b.id)
self.assertEqual(current_member.validationStatus, GenomicSetMemberStatus.VALID)
current_set = self.genomic_set_dao.get(genomic_set_b.id)
self.assertEqual(current_set.genomicSetStatus, GenomicSetStatus.VALID)
def test_consent(self):
participant = self.make_participant()
self.make_summary(participant, consentForStudyEnrollmentTime=datetime.datetime(2017, 1, 1))
genomic_set = self.make_genomic_set()
member = self.make_genomic_member(genomic_set, participant)
validate_and_update_genomic_set_by_id(genomic_set.id)
current_member = self.genomic_member_dao.get(member.id)
self.assertEqual(current_member.validationStatus, GenomicSetMemberStatus.INVALID)
self.assertIn(GenomicValidationFlag.INVALID_CONSENT, current_member.validationFlags)
current_set = self.genomic_set_dao.get(genomic_set.id)
self.assertEqual(current_set.genomicSetStatus, GenomicSetStatus.INVALID)
def test_consent_null(self):
participant = self.make_participant()
self.make_summary(participant, consentForStudyEnrollmentTime=None)
genomic_set = self.make_genomic_set()
member = self.make_genomic_member(genomic_set, participant)
validate_and_update_genomic_set_by_id(genomic_set.id)
current_member = self.genomic_member_dao.get(member.id)
self.assertEqual(current_member.validationStatus, GenomicSetMemberStatus.INVALID)
self.assertIn(GenomicValidationFlag.INVALID_CONSENT, current_member.validationFlags)
current_set = self.genomic_set_dao.get(genomic_set.id)
self.assertEqual(current_set.genomicSetStatus, GenomicSetStatus.INVALID)
def test_withdrawn(self):
participant = self.make_participant(withdrawalStatus=WithdrawalStatus.NO_USE)
self.make_summary(participant)
genomic_set = self.make_genomic_set()
member = self.make_genomic_member(genomic_set, participant)
validate_and_update_genomic_set_by_id(genomic_set.id)
current_member = self.genomic_member_dao.get(member.id)
self.assertEqual(current_member.validationStatus, GenomicSetMemberStatus.INVALID)
self.assertIn(GenomicValidationFlag.INVALID_WITHDRAW_STATUS, current_member.validationFlags)
current_set = self.genomic_set_dao.get(genomic_set.id)
self.assertEqual(current_set.genomicSetStatus, GenomicSetStatus.INVALID)
def test_sexatbirth(self):
participant = self.make_participant()
self.make_summary(participant)
genomic_set = self.make_genomic_set()
member = self.make_genomic_member(genomic_set, participant, sexAtBirth="foo")
validate_and_update_genomic_set_by_id(genomic_set.id)
current_member = self.genomic_member_dao.get(member.id)
self.assertEqual(current_member.validationStatus, GenomicSetMemberStatus.INVALID)
self.assertIn(GenomicValidationFlag.INVALID_SEX_AT_BIRTH, current_member.validationFlags)
current_set = self.genomic_set_dao.get(genomic_set.id)
self.assertEqual(current_set.genomicSetStatus, GenomicSetStatus.INVALID)
def test_age(self):
now = datetime.datetime(2019, 1, 1)
valid_date_of_birth = datetime.datetime(now.year - 18, now.month, now.day)
invalid_date_of_birth = datetime.datetime(now.year - 17, now.month, now.day)
participant_a = self.make_participant()
self.make_summary(participant_a, dateOfBirth=valid_date_of_birth)
participant_b = self.make_participant()
self.make_summary(participant_b, dateOfBirth=invalid_date_of_birth)
genomic_set = self.make_genomic_set()
member_a = self.make_genomic_member(genomic_set, participant_a)
member_b = self.make_genomic_member(genomic_set, participant_b)
with clock.FakeClock(datetime.datetime(2019, 1, 1)):
validate_and_update_genomic_set_by_id(genomic_set.id)
current_member_a = self.genomic_member_dao.get(member_a.id)
current_member_b = self.genomic_member_dao.get(member_b.id)
self.assertEqual(current_member_a.validationStatus, GenomicSetMemberStatus.VALID)
self.assertEqual(current_member_b.validationStatus, GenomicSetMemberStatus.INVALID)
self.assertIn(GenomicValidationFlag.INVALID_AGE, current_member_b.validationFlags)
current_set = self.genomic_set_dao.get(genomic_set.id)
self.assertEqual(current_set.genomicSetStatus, GenomicSetStatus.INVALID)
def test_biobank_status(self):
def make_member(genomic_set, **summary_kwargs):
participant = self.make_participant()
self.make_summary(participant, **summary_kwargs)
return self.make_genomic_member(genomic_set, participant)
kwargs_with_expected_status_and_flags = [
(
{
"sampleStatus1ED04": SampleStatus.UNSET,
"sampleStatus1SAL2": SampleStatus.UNSET,
"samplesToIsolateDNA": SampleStatus.UNSET,
},
GenomicSetMemberStatus.INVALID,
[GenomicValidationFlag.INVALID_BIOBANK_ORDER],
),
(
{
"sampleStatus1ED04": SampleStatus.RECEIVED,
"sampleStatus1SAL2": SampleStatus.UNSET,
"samplesToIsolateDNA": SampleStatus.UNSET,
},
GenomicSetMemberStatus.INVALID,
[GenomicValidationFlag.INVALID_BIOBANK_ORDER],
),
(
{
"sampleStatus1ED04": SampleStatus.UNSET,
"sampleStatus1SAL2": SampleStatus.RECEIVED,
"samplesToIsolateDNA": SampleStatus.UNSET,
},
GenomicSetMemberStatus.INVALID,
[GenomicValidationFlag.INVALID_BIOBANK_ORDER],
),
(
{
"sampleStatus1ED04": SampleStatus.UNSET,
"sampleStatus1SAL2": SampleStatus.UNSET,
"samplesToIsolateDNA": SampleStatus.RECEIVED,
},
GenomicSetMemberStatus.INVALID,
[GenomicValidationFlag.INVALID_BIOBANK_ORDER],
),
(
{
"sampleStatus1ED04": SampleStatus.RECEIVED,
"sampleStatus1SAL2": SampleStatus.UNSET,
"samplesToIsolateDNA": SampleStatus.RECEIVED,
},
GenomicSetMemberStatus.VALID,
[],
),
(
{
"sampleStatus1ED04": SampleStatus.UNSET,
"sampleStatus1SAL2": SampleStatus.RECEIVED,
"samplesToIsolateDNA": SampleStatus.RECEIVED,
},
GenomicSetMemberStatus.VALID,
[],
),
]
genomic_set = self.make_genomic_set()
runs = [
(make_member(genomic_set, **kwargs), kwargs, status, flags)
for kwargs, status, flags in kwargs_with_expected_status_and_flags
]
validate_and_update_genomic_set_by_id(genomic_set.id)
for member, kwargs, expected_status, expected_flags in runs:
current_member = self.genomic_member_dao.get(member.id)
self.assertEqual(current_member.validationStatus, expected_status)
for flag in expected_flags:
self.assertIn(flag, current_member.validationFlags)
current_set = self.genomic_set_dao.get(genomic_set.id)
self.assertEqual(current_set.genomicSetStatus, GenomicSetStatus.INVALID)
def test_ny_zip_code(self):
participant_a = self.make_participant()
self.make_summary(participant_a, zipCode=None)
participant_b = self.make_participant()
self.make_summary(participant_b, zipCode="")
participant_c = self.make_participant()
self.make_summary(participant_c, zipCode="12345")
genomic_set = self.make_genomic_set()
member_a = self.make_genomic_member(genomic_set, participant_a)
member_b = self.make_genomic_member(genomic_set, participant_b)
member_c = self.make_genomic_member(genomic_set, participant_c)
with clock.FakeClock(datetime.datetime(2019, 1, 1)):
validate_and_update_genomic_set_by_id(genomic_set.id)
current_member_a = self.genomic_member_dao.get(member_a.id)
current_member_b = self.genomic_member_dao.get(member_b.id)
current_member_c = self.genomic_member_dao.get(member_c.id)
self.assertEqual(current_member_a.validationStatus, GenomicSetMemberStatus.INVALID)
self.assertIn(GenomicValidationFlag.INVALID_NY_ZIPCODE, current_member_a.validationFlags)
self.assertEqual(current_member_b.validationStatus, GenomicSetMemberStatus.INVALID)
self.assertIn(GenomicValidationFlag.INVALID_NY_ZIPCODE, current_member_a.validationFlags)
self.assertEqual(current_member_c.validationStatus, GenomicSetMemberStatus.VALID)
current_set = self.genomic_set_dao.get(genomic_set.id)
self.assertEqual(current_set.genomicSetStatus, GenomicSetStatus.INVALID)
class GenomicSetValidationSafetyTestCase(GenomicSetValidationBaseTestCase):
def test_transaction(self):
participant = self.make_participant()
self.make_summary(participant)
genomic_set = self.make_genomic_set()
member = self.make_genomic_member(genomic_set, participant)
with mock.patch("rdr_service.genomic.validation.GenomicSetDao.update_with_session") as mocked_set_update:
mocked_set_update.side_effect = Exception("baz")
with clock.FakeClock(datetime.datetime(2019, 1, 1)):
with self.assertRaises(Exception):
validate_and_update_genomic_set_by_id(genomic_set.id)
current_member = self.genomic_member_dao.get(member.id)
self.assertEqual(current_member.validationStatus, None)
current_set = self.genomic_set_dao.get(genomic_set.id)
self.assertEqual(current_set.genomicSetStatus, None)
def test_invalid_does_not_update_validated_time(self):
participant = self.make_participant(withdrawalStatus=WithdrawalStatus.NO_USE)
self.make_summary(participant)
genomic_set = self.make_genomic_set()
member = self.make_genomic_member(genomic_set, participant)
validate_and_update_genomic_set_by_id(genomic_set.id)
current_member = self.genomic_member_dao.get(member.id)
self.assertEqual(current_member.validatedTime, None)
current_set = self.genomic_set_dao.get(genomic_set.id)
self.assertEqual(current_set.validatedTime, None)
def test_valid_does_update_validated_time(self):
participant = self.make_participant()
self.make_summary(participant)
genomic_set = self.make_genomic_set()
member = self.make_genomic_member(genomic_set, participant)
now = datetime.datetime(2019, 1, 1)
with clock.FakeClock(now):
validate_and_update_genomic_set_by_id(genomic_set.id)
current_member = self.genomic_member_dao.get(member.id)
self.assertEqual(current_member.validatedTime, now)
current_set = self.genomic_set_dao.get(genomic_set.id)
self.assertEqual(current_set.validatedTime, now) | en | 0.644001 | Make a participant with custom settings. default should create a valid participant. Make a summary with custom settings. default should create a valid summary. Make a genomic set with custom settings. default should create a valid set. Make a genomic member with custom settings. default should create a valid member. # TODO: represent in new test suite | 1.920506 | 2 |
python/paddle/v2/master/__init__.py | shenchaohua/Paddle | 3 | 6618729 | from client import *
__all__ = ['client']
| from client import *
__all__ = ['client']
| none | 1 | 1.10565 | 1 | |
test_data/parse/expected/inheritance/inheritance_from_concrete_class/meta_model.py | aas-core-works/aas-core-csharp-codegen | 0 | 6618730 | class Concrete:
pass
class Another_concrete(Concrete):
pass
class Reference:
pass
__book_url__ = "dummy"
__book_version__ = "dummy"
associate_ref_with(Reference)
| class Concrete:
pass
class Another_concrete(Concrete):
pass
class Reference:
pass
__book_url__ = "dummy"
__book_version__ = "dummy"
associate_ref_with(Reference)
| none | 1 | 1.650504 | 2 | |
statutils/mixedmodels.py | ilaine/histwords | 365 | 6618731 | <filename>statutils/mixedmodels.py<gh_stars>100-1000
import collections
import copy
import pandas as pd
import statsmodels.api as sm
import scipy as sp
import numpy as np
def make_data_frame(words, years, feature_dict):
"""
Makes a pandas dataframe for word, years, and dictionary of feature funcs.
Each feature func should take (word, year) and return feature value.
Constructed dataframe has flat csv style structure and missing values are removed.
"""
temp = collections.defaultdict(list)
feature_dict["word"] = lambda word, year : word
feature_dict["year"] = lambda word, year : year
for word in words:
for year in years:
for feature, feature_func in feature_dict.iteritems():
temp[feature].append(feature_func(word, year))
df = pd.DataFrame(temp)
df = df.replace([np.inf, -np.inf], np.nan)
df = df.dropna()
return df
def run_lmm(formula, df, reml=False, **kwargs):
"""
Wrapper for running a linear mixed model with given formula.
Inputs defined by statsmodels.
"""
model = sm.MixedLM.from_formula(formula, df, **kwargs)
return model.fit(reml=reml)
def marginal_r2(res):
e_f = np.std(res.model.predict(res.fe_params)) ** 2.0
e_other = np.std(res.fittedvalues - res.model.endog) ** 2.0
return e_f / (e_f + e_other)
def like_ratio(null_model, alt_model, df=1):
"""
Compute the likelihood ratio statistic and corresponding p value btw nested models.
Really should only be used for single parameter tests.
"""
D = -2 * (null_model.llf - alt_model.llf)
return {"D" : D, "p_val" : 1 - sp.stats.chi2.cdf(D, df)}
def simple_slope_percentiles(res, df, target, varying, percs=[25, 50, 75]):
exog = {}
for param in res.fe_params.index:
if len(param.split(":")) != 1:
continue
if param == "Intercept":
exog[param] = 1.0
else:
exog[param] = np.median(df[param])
ret_vals = collections.OrderedDict()
for varying_perc in percs:
exog[varying] = np.percentile(df[varying], varying_perc)
ret_vals[exog[varying]] = collections.defaultdict(list)
for target_perc in [25, 75]:
exog[target] = np.percentile(df[target], target_perc)
exog_arr = np.array([exog[param] if len(param.split(":")) == 1 else exog[param.split(":")[0]] * exog[param.split(":")[1]]
for param in res.fe_params.index])
ret_vals[exog[varying]]["endog"].append(res.model.predict(res.fe_params, exog=exog_arr))
ret_vals[exog[varying]]["target"].append(exog[target])
return ret_vals
def simple_slope_categories(res, df, target, cat, cats):
exog = {}
for param in res.fe_params.index:
if len(param.split(":")) != 1:
continue
if param == "Intercept":
exog[param] = 1.0
elif param in cats:
exog[param] = 0
else:
exog[param] = np.mean(df[param])
if cat != None:
exog[cat] = 1
x_points = []
y_points = []
for target_perc in [10, 90]:
exog[target] = np.percentile(df[target], target_perc)
# exog[target] = target_perc
exog_arr = np.array([exog[param] if len(param.split(":")) == 1 else exog[param.split(":")[0]] * exog[param.split(":")[1]]
for param in res.fe_params.index])
y_points.append(res.model.predict(res.fe_params, exog=exog_arr))
x_points.append(exog[target])
return x_points, y_points
def get_marginal_effects(res, df, targets):
exog = collections.OrderedDict()
stderrs = collections.OrderedDict()
stderr_arr = np.sqrt(np.diag(res.cov_params()[0:res.k_fe]))
for i, param in enumerate(res.fe_params.index):
if len(param.split(":")) != 1:
continue
if param == "Intercept":
exog[param] = 1.0
else:
exog[param] = np.mean(df[param])
stderrs[param] = stderr_arr[i]
ret_vals = {}
stderr_vals = collections.defaultdict(float)
for target in targets:
exog_temp = copy.deepcopy(exog)
exog_temp[target] = 0
exog_arr = np.array([exog_temp[param] if len(param.split(":")) == 1 else exog_temp[param.split(":")[0]] * exog_temp[param.split(":")[1]]
for param in res.fe_params.index])
at_zero = res.model.predict(res.fe_params, exog=exog_arr)
exog_temp[target] = 1
exog_arr = np.array([exog_temp[param] if len(param.split(":")) == 1 else exog_temp[param.split(":")[0]] * exog_temp[param.split(":")[1]]
for param in res.fe_params.index])
at_one = res.model.predict(res.fe_params, exog=exog_arr)
ret_vals[target] = at_one - at_zero
for param in res.fe_params.index:
if len(param.split(":")) > 1 and target in param:
t_params = param.split(":")
other = t_params[0] if t_params[1] == target else t_params[1]
stderr_vals[target] += exog[other] * stderrs[other]
stderr_vals[target] += stderrs[target]
return ret_vals, stderr_vals
def get_marginal_effect_points(res, df, targets, percentiles=(10, 90)):
exog = {}
stderrs = {}
stderr_arr = np.sqrt(np.diag(res.cov_params()[0:res.k_fe]))
for i, param in enumerate(res.fe_params.index):
if len(param.split(":")) != 1:
continue
if param == "Intercept":
exog[param] = 1.0
else:
exog[param] = np.mean(df[param])
stderrs[param] = stderr_arr[i]
ret_vals = {}
stderr_vals = collections.defaultdict(float)
for target in targets:
exog_temp = copy.deepcopy(exog)
exog_arr = np.array([exog_temp[param] if len(param.split(":")) == 1 else exog_temp[param.split(":")[0]] * exog_temp[param.split(":")[1]]
for param in res.fe_params.index])
at_zero = res.model.predict(res.fe_params, exog=exog_arr)
exog_temp[target] = 1
exog_arr = np.array([exog_temp[param] if len(param.split(":")) == 1 else exog_temp[param.split(":")[0]] * exog_temp[param.split(":")[1]]
for param in res.fe_params.index])
at_one = res.model.predict(res.fe_params, exog=exog_arr)
ret_vals[target] = at_one - at_zero
for param in res.fe_params.index:
if len(param.split(":")) > 1 and target in param:
t_params = param.split(":")
other = t_params[0] if t_params[1] == target else t_params[1]
stderr_vals[target] += exog[other] * stderrs[other]
stderr_vals[target] += stderrs[target]
return ret_vals, stderr_vals
def get_slopes_stderrs(res):
stderr_arr = np.sqrt(np.diag(res.cov_params()[0:res.k_fe]))
slopes = {}
stderrs = {}
for i, param in enumerate(res.fe_params.index):
slopes[param] = res.fe_params[param]
stderrs[param] = stderr_arr[i]
return slopes, stderrs
| <filename>statutils/mixedmodels.py<gh_stars>100-1000
import collections
import copy
import pandas as pd
import statsmodels.api as sm
import scipy as sp
import numpy as np
def make_data_frame(words, years, feature_dict):
"""
Makes a pandas dataframe for word, years, and dictionary of feature funcs.
Each feature func should take (word, year) and return feature value.
Constructed dataframe has flat csv style structure and missing values are removed.
"""
temp = collections.defaultdict(list)
feature_dict["word"] = lambda word, year : word
feature_dict["year"] = lambda word, year : year
for word in words:
for year in years:
for feature, feature_func in feature_dict.iteritems():
temp[feature].append(feature_func(word, year))
df = pd.DataFrame(temp)
df = df.replace([np.inf, -np.inf], np.nan)
df = df.dropna()
return df
def run_lmm(formula, df, reml=False, **kwargs):
"""
Wrapper for running a linear mixed model with given formula.
Inputs defined by statsmodels.
"""
model = sm.MixedLM.from_formula(formula, df, **kwargs)
return model.fit(reml=reml)
def marginal_r2(res):
e_f = np.std(res.model.predict(res.fe_params)) ** 2.0
e_other = np.std(res.fittedvalues - res.model.endog) ** 2.0
return e_f / (e_f + e_other)
def like_ratio(null_model, alt_model, df=1):
"""
Compute the likelihood ratio statistic and corresponding p value btw nested models.
Really should only be used for single parameter tests.
"""
D = -2 * (null_model.llf - alt_model.llf)
return {"D" : D, "p_val" : 1 - sp.stats.chi2.cdf(D, df)}
def simple_slope_percentiles(res, df, target, varying, percs=[25, 50, 75]):
exog = {}
for param in res.fe_params.index:
if len(param.split(":")) != 1:
continue
if param == "Intercept":
exog[param] = 1.0
else:
exog[param] = np.median(df[param])
ret_vals = collections.OrderedDict()
for varying_perc in percs:
exog[varying] = np.percentile(df[varying], varying_perc)
ret_vals[exog[varying]] = collections.defaultdict(list)
for target_perc in [25, 75]:
exog[target] = np.percentile(df[target], target_perc)
exog_arr = np.array([exog[param] if len(param.split(":")) == 1 else exog[param.split(":")[0]] * exog[param.split(":")[1]]
for param in res.fe_params.index])
ret_vals[exog[varying]]["endog"].append(res.model.predict(res.fe_params, exog=exog_arr))
ret_vals[exog[varying]]["target"].append(exog[target])
return ret_vals
def simple_slope_categories(res, df, target, cat, cats):
exog = {}
for param in res.fe_params.index:
if len(param.split(":")) != 1:
continue
if param == "Intercept":
exog[param] = 1.0
elif param in cats:
exog[param] = 0
else:
exog[param] = np.mean(df[param])
if cat != None:
exog[cat] = 1
x_points = []
y_points = []
for target_perc in [10, 90]:
exog[target] = np.percentile(df[target], target_perc)
# exog[target] = target_perc
exog_arr = np.array([exog[param] if len(param.split(":")) == 1 else exog[param.split(":")[0]] * exog[param.split(":")[1]]
for param in res.fe_params.index])
y_points.append(res.model.predict(res.fe_params, exog=exog_arr))
x_points.append(exog[target])
return x_points, y_points
def get_marginal_effects(res, df, targets):
exog = collections.OrderedDict()
stderrs = collections.OrderedDict()
stderr_arr = np.sqrt(np.diag(res.cov_params()[0:res.k_fe]))
for i, param in enumerate(res.fe_params.index):
if len(param.split(":")) != 1:
continue
if param == "Intercept":
exog[param] = 1.0
else:
exog[param] = np.mean(df[param])
stderrs[param] = stderr_arr[i]
ret_vals = {}
stderr_vals = collections.defaultdict(float)
for target in targets:
exog_temp = copy.deepcopy(exog)
exog_temp[target] = 0
exog_arr = np.array([exog_temp[param] if len(param.split(":")) == 1 else exog_temp[param.split(":")[0]] * exog_temp[param.split(":")[1]]
for param in res.fe_params.index])
at_zero = res.model.predict(res.fe_params, exog=exog_arr)
exog_temp[target] = 1
exog_arr = np.array([exog_temp[param] if len(param.split(":")) == 1 else exog_temp[param.split(":")[0]] * exog_temp[param.split(":")[1]]
for param in res.fe_params.index])
at_one = res.model.predict(res.fe_params, exog=exog_arr)
ret_vals[target] = at_one - at_zero
for param in res.fe_params.index:
if len(param.split(":")) > 1 and target in param:
t_params = param.split(":")
other = t_params[0] if t_params[1] == target else t_params[1]
stderr_vals[target] += exog[other] * stderrs[other]
stderr_vals[target] += stderrs[target]
return ret_vals, stderr_vals
def get_marginal_effect_points(res, df, targets, percentiles=(10, 90)):
exog = {}
stderrs = {}
stderr_arr = np.sqrt(np.diag(res.cov_params()[0:res.k_fe]))
for i, param in enumerate(res.fe_params.index):
if len(param.split(":")) != 1:
continue
if param == "Intercept":
exog[param] = 1.0
else:
exog[param] = np.mean(df[param])
stderrs[param] = stderr_arr[i]
ret_vals = {}
stderr_vals = collections.defaultdict(float)
for target in targets:
exog_temp = copy.deepcopy(exog)
exog_arr = np.array([exog_temp[param] if len(param.split(":")) == 1 else exog_temp[param.split(":")[0]] * exog_temp[param.split(":")[1]]
for param in res.fe_params.index])
at_zero = res.model.predict(res.fe_params, exog=exog_arr)
exog_temp[target] = 1
exog_arr = np.array([exog_temp[param] if len(param.split(":")) == 1 else exog_temp[param.split(":")[0]] * exog_temp[param.split(":")[1]]
for param in res.fe_params.index])
at_one = res.model.predict(res.fe_params, exog=exog_arr)
ret_vals[target] = at_one - at_zero
for param in res.fe_params.index:
if len(param.split(":")) > 1 and target in param:
t_params = param.split(":")
other = t_params[0] if t_params[1] == target else t_params[1]
stderr_vals[target] += exog[other] * stderrs[other]
stderr_vals[target] += stderrs[target]
return ret_vals, stderr_vals
def get_slopes_stderrs(res):
stderr_arr = np.sqrt(np.diag(res.cov_params()[0:res.k_fe]))
slopes = {}
stderrs = {}
for i, param in enumerate(res.fe_params.index):
slopes[param] = res.fe_params[param]
stderrs[param] = stderr_arr[i]
return slopes, stderrs
| en | 0.767969 | Makes a pandas dataframe for word, years, and dictionary of feature funcs. Each feature func should take (word, year) and return feature value. Constructed dataframe has flat csv style structure and missing values are removed. Wrapper for running a linear mixed model with given formula. Inputs defined by statsmodels. Compute the likelihood ratio statistic and corresponding p value btw nested models. Really should only be used for single parameter tests. # exog[target] = target_perc | 2.773736 | 3 |
app.py | CaoHoangTung/shark-cop-server | 2 | 6618732 | <reponame>CaoHoangTung/shark-cop-server<filename>app.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Flask,render_template,request
from utils.Checker import Checker
from utils.Helper import Helper
from model.functions import Functions
import threading
import requests
app = Flask(__name__)
feature_count = 30
# NOTES:
# THE API WILL RETURN -1/2/1 -> normal / undetectable / phishing
@app.route("/",methods=["GET"])
def main():
return render_template("index.html")
@app.route("/api/check",methods=["GET"])
# Params only include url of websites we need to check
def check():
# return -1/2/1 -> normal / undetectable / phishing
submit_url = request.args["url"]
if not Checker.check_connection(submit_url):
print("Connection unavailable")
return "2" # unable to detech
if(Checker.Statistical_report(submit_url) == 1):
return "1"
try:
print("Getting info for",submit_url)
input_array = Helper.embed_url(submit_url)
print(input_array)
if "2" in input_array:
# if cannot get some features
print("Cannot get some features")
return "2"
result = Functions.check_vector(input_array)
# this code is used to logged into the database file. Uncomment when needed
# if (result == 1):
# f = open("model/data/urls.csv","a",encoding="UTF-8")
# f.write(submit_url+"\n")
return str(result)
except:
print("Unable to detech")
return "2" # unable to detect
# remove cache for development purpose
@app.after_request
def add_header(r):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['Cache-Control'] = 'public, max-age=0'
return r
if __name__ == "__main__":
app.run(threaded=True,debug=False,ssl_context='adhoc',port=8080)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Flask,render_template,request
from utils.Checker import Checker
from utils.Helper import Helper
from model.functions import Functions
import threading
import requests
app = Flask(__name__)
feature_count = 30
# NOTES:
# THE API WILL RETURN -1/2/1 -> normal / undetectable / phishing
@app.route("/",methods=["GET"])
def main():
return render_template("index.html")
@app.route("/api/check",methods=["GET"])
# Params only include url of websites we need to check
def check():
# return -1/2/1 -> normal / undetectable / phishing
submit_url = request.args["url"]
if not Checker.check_connection(submit_url):
print("Connection unavailable")
return "2" # unable to detech
if(Checker.Statistical_report(submit_url) == 1):
return "1"
try:
print("Getting info for",submit_url)
input_array = Helper.embed_url(submit_url)
print(input_array)
if "2" in input_array:
# if cannot get some features
print("Cannot get some features")
return "2"
result = Functions.check_vector(input_array)
# this code is used to logged into the database file. Uncomment when needed
# if (result == 1):
# f = open("model/data/urls.csv","a",encoding="UTF-8")
# f.write(submit_url+"\n")
return str(result)
except:
print("Unable to detech")
return "2" # unable to detect
# remove cache for development purpose
@app.after_request
def add_header(r):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['Cache-Control'] = 'public, max-age=0'
return r
if __name__ == "__main__":
app.run(threaded=True,debug=False,ssl_context='adhoc',port=8080) | en | 0.733778 | #!/usr/bin/env python # -*- coding: utf-8 -*- # NOTES: # THE API WILL RETURN -1/2/1 -> normal / undetectable / phishing # Params only include url of websites we need to check # return -1/2/1 -> normal / undetectable / phishing # unable to detech # if cannot get some features # this code is used to logged into the database file. Uncomment when needed # if (result == 1): # f = open("model/data/urls.csv","a",encoding="UTF-8") # f.write(submit_url+"\n") # unable to detect # remove cache for development purpose Add headers to both force latest IE rendering engine or Chrome Frame, and also to cache the rendered page for 10 minutes. | 2.462739 | 2 |
app/routes.py | weakit/GCI19-FlaskText | 0 | 6618733 | from flask import render_template, url_for, redirect, Markup
from app import app
from app.forms import SubmitTextForm
import app.storage as st
import os
def render_list():
li = st.list_texts()
renders = []
for file in li:
text, name = st.read(file)
if len(text) > 181:
text = text[:180] + '…'
if text.count('\n') > 1:
i = text.index('\n', text.index('\n') + 1)
text = text[:i] + ' …'
renders.append(render_template('article.html', name=name, summary=text))
if not renders:
return '<p class="lead">No Text files.</p>'
return ''.join(renders)
@app.route('/', methods=["GET", "POST"])
def root(error=''):
form = SubmitTextForm()
if form.validate_on_submit():
text = form.file.data.stream.read()
if form.name.data:
name = form.name.data
else:
name = os.path.splitext(form.file.data.filename)[0]
if not st.available(name):
return root(render_template('nametaken.html'))
st.save(name, text.decode())
return redirect(url_for('root'))
return render_template('main.html', form=form, texts=Markup(render_list()), errors=Markup(error))
@app.route('/<text>')
def show_text(text):
if ' ' in text:
return redirect(st.url_name(text))
if not st.available(text):
text, name = st.read(text)
return render_template('text.html', name=name, text=text)
return 'Not Found.'
@app.route('/delete/<text>')
def delete_text(text):
if ' ' in text:
return redirect('/delete/' + st.url_name(text))
if not st.available(text):
st.delete(text)
return 'Deleted.'
return 'Not Found.'
| from flask import render_template, url_for, redirect, Markup
from app import app
from app.forms import SubmitTextForm
import app.storage as st
import os
def render_list():
li = st.list_texts()
renders = []
for file in li:
text, name = st.read(file)
if len(text) > 181:
text = text[:180] + '…'
if text.count('\n') > 1:
i = text.index('\n', text.index('\n') + 1)
text = text[:i] + ' …'
renders.append(render_template('article.html', name=name, summary=text))
if not renders:
return '<p class="lead">No Text files.</p>'
return ''.join(renders)
@app.route('/', methods=["GET", "POST"])
def root(error=''):
form = SubmitTextForm()
if form.validate_on_submit():
text = form.file.data.stream.read()
if form.name.data:
name = form.name.data
else:
name = os.path.splitext(form.file.data.filename)[0]
if not st.available(name):
return root(render_template('nametaken.html'))
st.save(name, text.decode())
return redirect(url_for('root'))
return render_template('main.html', form=form, texts=Markup(render_list()), errors=Markup(error))
@app.route('/<text>')
def show_text(text):
if ' ' in text:
return redirect(st.url_name(text))
if not st.available(text):
text, name = st.read(text)
return render_template('text.html', name=name, text=text)
return 'Not Found.'
@app.route('/delete/<text>')
def delete_text(text):
if ' ' in text:
return redirect('/delete/' + st.url_name(text))
if not st.available(text):
st.delete(text)
return 'Deleted.'
return 'Not Found.'
| none | 1 | 2.900906 | 3 | |
scripts/test/testAvgle.py | peter279k/today-co | 1 | 6618734 | import unittest
import avgle
from unittest.mock import Mock
from util import unixTime2DateString
class AvgleTest(unittest.TestCase):
def mockPornVideo(self):
avgle.PornVideo.source = 'source'
avgle.PornVideo.view_numbers = 'view_numbers'
avgle.PornVideo.video_id = 'video_id'
avgle.PornVideo.view_ratings = 'view_ratings'
avgle.PornVideo.video_title = 'video_title'
avgle.PornVideo.create_date = 'create_date'
def testInsertVideoDb(self):
self.mockPornVideo()
avgle.insertAndReplace = Mock()
videos = {
'viewnumber': 123,
'vid': 3345678,
'framerate': 29.969999,
'title': 'test title',
'addtime': 1585894210
}
result = avgle.insertVideoDb(videos)
expect = {
'source': 'avgle',
'view_numbers': 123,
'video_id': 3345678,
'view_ratings': 29.969999,
'video_title': 'test title',
'create_date': unixTime2DateString(1585894210)
}
self.assertEqual(result, expect)
if __name__ == '__main__':
unittest.main()
| import unittest
import avgle
from unittest.mock import Mock
from util import unixTime2DateString
class AvgleTest(unittest.TestCase):
def mockPornVideo(self):
avgle.PornVideo.source = 'source'
avgle.PornVideo.view_numbers = 'view_numbers'
avgle.PornVideo.video_id = 'video_id'
avgle.PornVideo.view_ratings = 'view_ratings'
avgle.PornVideo.video_title = 'video_title'
avgle.PornVideo.create_date = 'create_date'
def testInsertVideoDb(self):
self.mockPornVideo()
avgle.insertAndReplace = Mock()
videos = {
'viewnumber': 123,
'vid': 3345678,
'framerate': 29.969999,
'title': 'test title',
'addtime': 1585894210
}
result = avgle.insertVideoDb(videos)
expect = {
'source': 'avgle',
'view_numbers': 123,
'video_id': 3345678,
'view_ratings': 29.969999,
'video_title': 'test title',
'create_date': unixTime2DateString(1585894210)
}
self.assertEqual(result, expect)
if __name__ == '__main__':
unittest.main()
| none | 1 | 3.066004 | 3 | |
analysis.py | neoshanarayanan/lammps | 0 | 6618735 | # Author: <NAME>
# UROP Summer 2019
import matplotlib.pyplot as plt
f = open("log.lammps", "r")
timesteps = []
temps = []
dataDict = {}
data_lines = []
for i, line in enumerate(f.readlines()):
linenum = i+1
if linenum >= 63 and linenum <= 89:
data_lines.append(str.split(line))
headers = data_lines[0]
del data_lines[0]
for i, header in enumerate(headers):
column = []
for line in data_lines:
column.append(float(line[i]))
dataDict.update({header: column})
print(dataDict)
# dataDict shows each column stored in a list identified by header which is the key
# plot temp vs volume
temp = dataDict['KinEng']
volume = dataDict['Volume']
fig = plt.figure()
plt.plot(temp, volume)
fig.suptitle('volume vs temp', fontsize=15)
plt.xlabel('kineng', fontsize=10)
plt.ylabel('volume', fontsize=10)
fig.savefig('tempvsvolume.png')
plt.show()
f.close()
| # Author: <NAME>
# UROP Summer 2019
import matplotlib.pyplot as plt
f = open("log.lammps", "r")
timesteps = []
temps = []
dataDict = {}
data_lines = []
for i, line in enumerate(f.readlines()):
linenum = i+1
if linenum >= 63 and linenum <= 89:
data_lines.append(str.split(line))
headers = data_lines[0]
del data_lines[0]
for i, header in enumerate(headers):
column = []
for line in data_lines:
column.append(float(line[i]))
dataDict.update({header: column})
print(dataDict)
# dataDict shows each column stored in a list identified by header which is the key
# plot temp vs volume
temp = dataDict['KinEng']
volume = dataDict['Volume']
fig = plt.figure()
plt.plot(temp, volume)
fig.suptitle('volume vs temp', fontsize=15)
plt.xlabel('kineng', fontsize=10)
plt.ylabel('volume', fontsize=10)
fig.savefig('tempvsvolume.png')
plt.show()
f.close()
| en | 0.859502 | # Author: <NAME> # UROP Summer 2019 # dataDict shows each column stored in a list identified by header which is the key # plot temp vs volume | 2.722579 | 3 |
code.py | schroedifer/CircuitPython-MuteButton | 0 | 6618736 | <gh_stars>0
# SPDX-FileCopyrightText: 2020 <NAME>
# SPDX-License-Identifier: MIT
import time
import board
from digitalio import DigitalInOut, Direction, Pull
import usb_hid
from adafruit_hid.keyboard import Keyboard
# from adafruit_hid.keyboard_layout_us import KeyboardLayoutUS
# from adafruit_hid.keycode import Keycode
import neopixel
from config import * # User settings in config.py
from profiles import profiles # Import dictionary of buttons and lights
def button_pressed(button):
return button.value
# def set_color(led, color):
def select_profile(profiles, led, button):
all_profiles = list(profiles.keys())
all_profiles.remove('mute') # Can't select 'mute' as a profile
selected_profile = False
while not selected_profile:
for profile in all_profiles: # Iterate through chat applications
if DEBUG:
print(led)
print(profiles[profile])
start_time = time.monotonic() # Start the timer
rgb_led[0] = profiles[profile]["color"] # Display the application color
while time.monotonic() - start_time < SELECTION_TIMEOUT: # Has X time passed?
if button.value == True: # Button has been pressed
if DEBUG:
print("Profile Selected: " + profile)
time.sleep(0.5) # Give time to remove finger
return(profile) #
time.sleep(0.1)
# initialize onboard neopixel
# this will be used to indiciate what application is enabled
rgb_led = neopixel.NeoPixel(board.NEOPIXEL, 1)
rgb_led.brightness = 1.0
rgb_led[0] = (0, 0, 0)
kbd = Keyboard(usb_hid.devices)
# Digital input with pulldown on Neokey
button = DigitalInOut(board.SWITCH)
button.switch_to_input(pull=Pull.DOWN)
button_state = False # Starting with button not being pressed
change_button = False
mute = False # Starting value
if PROFILE == 'choose': # A known profile can be set
PROFILE = select_profile(profiles, rgb_led, button) # Get the desired chat app
while True:
# retreive the active controller button
controller_button = profiles[PROFILE]
if DEBUG:
print("Profile: " + controller_button['name'])
print("Mute: " + str(mute))
# set the color of the onboard neopixel to match the active controller button
if not mute:
rgb_led[0] = controller_button['color'] # Set the LED to the chat color
else:
rgb_led[0] = profiles['mute']['color'] # Turn the LED red
# detect if the button has been released
if button_pressed(button):
if DEBUG:
print("{} mute button press".format(controller_button['name']))
try:
kbd.send(controller_button['keycode']) # Send a one key mute command
except TypeError as e:
kbd.send(*controller_button['keycode']) # Send a multi key mute command
mute = not mute # Toggle the boolean
if not mute:
rgb_led[0] = controller_button['color'] # Set the LED to the chat color
else:
rgb_led[0] = profiles['mute']['color'] # Turn the LED red
time.sleep(BUTTON_LATCH_TIME) # Give time to remove finger
if DEBUG:
time.sleep(1)
print("Button Pressed: " + str(button_pressed(button)))
else:
time.sleep(0.1) | # SPDX-FileCopyrightText: 2020 <NAME>
# SPDX-License-Identifier: MIT
import time
import board
from digitalio import DigitalInOut, Direction, Pull
import usb_hid
from adafruit_hid.keyboard import Keyboard
# from adafruit_hid.keyboard_layout_us import KeyboardLayoutUS
# from adafruit_hid.keycode import Keycode
import neopixel
from config import * # User settings in config.py
from profiles import profiles # Import dictionary of buttons and lights
def button_pressed(button):
return button.value
# def set_color(led, color):
def select_profile(profiles, led, button):
all_profiles = list(profiles.keys())
all_profiles.remove('mute') # Can't select 'mute' as a profile
selected_profile = False
while not selected_profile:
for profile in all_profiles: # Iterate through chat applications
if DEBUG:
print(led)
print(profiles[profile])
start_time = time.monotonic() # Start the timer
rgb_led[0] = profiles[profile]["color"] # Display the application color
while time.monotonic() - start_time < SELECTION_TIMEOUT: # Has X time passed?
if button.value == True: # Button has been pressed
if DEBUG:
print("Profile Selected: " + profile)
time.sleep(0.5) # Give time to remove finger
return(profile) #
time.sleep(0.1)
# initialize onboard neopixel
# this will be used to indiciate what application is enabled
rgb_led = neopixel.NeoPixel(board.NEOPIXEL, 1)
rgb_led.brightness = 1.0
rgb_led[0] = (0, 0, 0)
kbd = Keyboard(usb_hid.devices)
# Digital input with pulldown on Neokey
button = DigitalInOut(board.SWITCH)
button.switch_to_input(pull=Pull.DOWN)
button_state = False # Starting with button not being pressed
change_button = False
mute = False # Starting value
if PROFILE == 'choose': # A known profile can be set
PROFILE = select_profile(profiles, rgb_led, button) # Get the desired chat app
while True:
# retreive the active controller button
controller_button = profiles[PROFILE]
if DEBUG:
print("Profile: " + controller_button['name'])
print("Mute: " + str(mute))
# set the color of the onboard neopixel to match the active controller button
if not mute:
rgb_led[0] = controller_button['color'] # Set the LED to the chat color
else:
rgb_led[0] = profiles['mute']['color'] # Turn the LED red
# detect if the button has been released
if button_pressed(button):
if DEBUG:
print("{} mute button press".format(controller_button['name']))
try:
kbd.send(controller_button['keycode']) # Send a one key mute command
except TypeError as e:
kbd.send(*controller_button['keycode']) # Send a multi key mute command
mute = not mute # Toggle the boolean
if not mute:
rgb_led[0] = controller_button['color'] # Set the LED to the chat color
else:
rgb_led[0] = profiles['mute']['color'] # Turn the LED red
time.sleep(BUTTON_LATCH_TIME) # Give time to remove finger
if DEBUG:
time.sleep(1)
print("Button Pressed: " + str(button_pressed(button)))
else:
time.sleep(0.1) | en | 0.739606 | # SPDX-FileCopyrightText: 2020 <NAME> # SPDX-License-Identifier: MIT # from adafruit_hid.keyboard_layout_us import KeyboardLayoutUS # from adafruit_hid.keycode import Keycode # User settings in config.py # Import dictionary of buttons and lights # def set_color(led, color): # Can't select 'mute' as a profile # Iterate through chat applications # Start the timer # Display the application color # Has X time passed? # Button has been pressed # Give time to remove finger # # initialize onboard neopixel # this will be used to indiciate what application is enabled # Digital input with pulldown on Neokey # Starting with button not being pressed # Starting value # A known profile can be set # Get the desired chat app # retreive the active controller button # set the color of the onboard neopixel to match the active controller button # Set the LED to the chat color # Turn the LED red # detect if the button has been released # Send a one key mute command # Send a multi key mute command # Toggle the boolean # Set the LED to the chat color # Turn the LED red # Give time to remove finger | 2.703253 | 3 |
distiller/dataset/loaders.py | arkel23/IntermediateFeaturesAugmentedRepDistiller | 0 | 6618737 | import os
from torch.utils import data
from torchvision import datasets
from .build_transform import ApplyTransform
from .cifar10 import CIFAR10Instance, CIFAR10InstanceSample
from .cifar100 import CIFAR100Instance, CIFAR100InstanceSample
from .stl10 import STL10Instance, STL10InstanceSample
from .svhn import SVHNInstance, SVHNInstanceSample
from .cinic10 import CINIC10, CINIC10Instance, CINIC10InstanceSample
from .tinyimagenet import TinyImageNet, TinyImageNetInstance, TinyImageNetInstanceSample
from .imagenet import ImageNet, ImageNetInstance, ImageNetInstanceSample
def build_dataloaders(opt, vanilla=True):
os.makedirs(opt.dataset_path, exist_ok=True)
train_transform = ApplyTransform(split='train', opt=opt)
val_transform = ApplyTransform(split='val', opt=opt)
train_set = get_train_set(opt.dataset, opt.dataset_path, train_transform, opt, vanilla)
val_set, n_cls = get_val_set(opt.dataset, opt.dataset_path, val_transform)
n_data = len(train_set)
train_sampler = None
val_sampler = None
if opt.distributed:
train_sampler = data.distributed.DistributedSampler(train_set)
if opt.dist_eval:
val_sampler = data.distributed.DistributedSampler(val_set)
train_loader = data.DataLoader(train_set, batch_size=opt.batch_size, shuffle=(train_sampler is None),
num_workers=opt.num_workers, pin_memory=True, drop_last=True, sampler=train_sampler)
val_loader = data.DataLoader(val_set, batch_size=64, shuffle=False,
num_workers=int(opt.num_workers/2), pin_memory=True, sampler=val_sampler)
if vanilla:
return train_loader, val_loader, n_cls
return train_loader, val_loader, n_cls, n_data
def get_val_set(dataset, dataset_path, transform):
if dataset == 'cifar10':
val_set = datasets.CIFAR10(root=dataset_path, download=True,
train=False, transform=transform)
n_cls = 10
elif dataset == 'cifar100':
val_set = datasets.CIFAR100(root=dataset_path, download=True,
train=False, transform=transform)
n_cls = 100
elif dataset == 'stl10':
val_set = datasets.STL10(root=dataset_path, download=True,
split='test', transform=transform)
n_cls = 10
elif dataset == 'svhn':
val_set = datasets.SVHN(root=dataset_path, download=True,
split='test', transform=transform)
n_cls = 10
elif dataset == 'cinic10':
val_set = CINIC10(root=dataset_path, download=True,
train=False, transform=transform)
n_cls = 10
elif dataset == 'tinyimagenet':
val_set = TinyImageNet(root=dataset_path, download=True,
train=False, transform=transform)
n_cls = 200
elif dataset == 'imagenet':
val_set = ImageNet(root=dataset_path,
train=False, transform=transform)
n_cls = 1000
else:
raise NotImplementedError
return val_set, n_cls
def get_train_set(dataset, dataset_path, transform, opt, vanilla):
if vanilla:
if dataset == 'cifar10':
train_set = datasets.CIFAR10(root=dataset_path, download=True,
train=True, transform=transform)
elif dataset == 'cifar100':
train_set = datasets.CIFAR100(root=dataset_path, download=True,
train=True, transform=transform)
elif dataset == 'stl10':
train_set = datasets.STL10(root=dataset_path, download=True,
split='test', transform=transform)
elif dataset == 'svhn':
train_set = datasets.SVHN(root=dataset_path, download=True,
split='test', transform=transform)
elif dataset == 'cinic10':
train_set = CINIC10(root=dataset_path, download=True,
train=True, transform=transform)
elif dataset == 'tinyimagenet':
train_set = TinyImageNet(root=dataset_path, download=True,
train=True, transform=transform)
elif dataset == 'imagenet':
train_set = ImageNet(root=dataset_path,
train=True, transform=transform)
else:
raise NotImplementedError
else:
if dataset == 'cifar10':
if opt.distill in ['crd']:
train_set = CIFAR10InstanceSample(
root=dataset_path, download=True, train=True, transform=transform,
k=opt.nce_k, mode=opt.mode, is_sample=True, percent=1.0)
else:
train_set = CIFAR10Instance(root=dataset_path, download=True,
train=True, transform=transform)
elif dataset == 'cifar100':
if opt.distill in ['crd']:
train_set = CIFAR100InstanceSample(
root=dataset_path, download=True, train=True, transform=transform,
k=opt.nce_k, mode=opt.mode, is_sample=True, percent=1.0)
else:
train_set = CIFAR100Instance(root=dataset_path, download=True,
train=True, transform=transform)
elif dataset == 'stl10':
if opt.distill in ['crd']:
train_set = STL10InstanceSample(
root=dataset_path, download=True, split='train', transform=transform,
k=opt.nce_k, mode=opt.mode, is_sample=True, percent=1.0)
else:
train_set = STL10Instance(root=dataset_path, download=True,
split='train', transform=transform)
elif dataset == 'svhn':
if opt.distill in ['crd']:
train_set = SVHNInstanceSample(
root=dataset_path, download=True, split='train', transform=transform,
k=opt.nce_k, mode=opt.mode, is_sample=True, percent=1.0)
else:
train_set = SVHNInstance(root=dataset_path, download=True,
split='train', transform=transform)
elif dataset == 'cinic10':
if opt.distill in ['crd']:
train_set = CINIC10InstanceSample(
root=dataset_path, download=True, train=True, transform=transform,
k=opt.nce_k, mode=opt.mode, is_sample=True, percent=1.0)
else:
train_set = CINIC10Instance(root=dataset_path, download=True,
train=True, transform=transform)
elif dataset == 'tinyimagenet':
if opt.distill in ['crd']:
train_set = TinyImageNetInstanceSample(
root=dataset_path, download=True, train=True, transform=transform,
k=opt.nce_k, mode=opt.mode, is_sample=True, percent=1.0)
else:
train_set = TinyImageNetInstance(root=dataset_path, download=True,
train=True, transform=transform)
elif dataset == 'imagenet':
if opt.distill in ['crd']:
train_set = ImageNetInstanceSample(
root=dataset_path, download=True, train=True, transform=transform,
k=opt.nce_k, mode=opt.mode, is_sample=True, percent=1.0)
else:
train_set = ImageNetInstance(root=dataset_path, download=True,
train=True, transform=transform)
else:
raise NotImplementedError
return train_set
| import os
from torch.utils import data
from torchvision import datasets
from .build_transform import ApplyTransform
from .cifar10 import CIFAR10Instance, CIFAR10InstanceSample
from .cifar100 import CIFAR100Instance, CIFAR100InstanceSample
from .stl10 import STL10Instance, STL10InstanceSample
from .svhn import SVHNInstance, SVHNInstanceSample
from .cinic10 import CINIC10, CINIC10Instance, CINIC10InstanceSample
from .tinyimagenet import TinyImageNet, TinyImageNetInstance, TinyImageNetInstanceSample
from .imagenet import ImageNet, ImageNetInstance, ImageNetInstanceSample
def build_dataloaders(opt, vanilla=True):
os.makedirs(opt.dataset_path, exist_ok=True)
train_transform = ApplyTransform(split='train', opt=opt)
val_transform = ApplyTransform(split='val', opt=opt)
train_set = get_train_set(opt.dataset, opt.dataset_path, train_transform, opt, vanilla)
val_set, n_cls = get_val_set(opt.dataset, opt.dataset_path, val_transform)
n_data = len(train_set)
train_sampler = None
val_sampler = None
if opt.distributed:
train_sampler = data.distributed.DistributedSampler(train_set)
if opt.dist_eval:
val_sampler = data.distributed.DistributedSampler(val_set)
train_loader = data.DataLoader(train_set, batch_size=opt.batch_size, shuffle=(train_sampler is None),
num_workers=opt.num_workers, pin_memory=True, drop_last=True, sampler=train_sampler)
val_loader = data.DataLoader(val_set, batch_size=64, shuffle=False,
num_workers=int(opt.num_workers/2), pin_memory=True, sampler=val_sampler)
if vanilla:
return train_loader, val_loader, n_cls
return train_loader, val_loader, n_cls, n_data
def get_val_set(dataset, dataset_path, transform):
if dataset == 'cifar10':
val_set = datasets.CIFAR10(root=dataset_path, download=True,
train=False, transform=transform)
n_cls = 10
elif dataset == 'cifar100':
val_set = datasets.CIFAR100(root=dataset_path, download=True,
train=False, transform=transform)
n_cls = 100
elif dataset == 'stl10':
val_set = datasets.STL10(root=dataset_path, download=True,
split='test', transform=transform)
n_cls = 10
elif dataset == 'svhn':
val_set = datasets.SVHN(root=dataset_path, download=True,
split='test', transform=transform)
n_cls = 10
elif dataset == 'cinic10':
val_set = CINIC10(root=dataset_path, download=True,
train=False, transform=transform)
n_cls = 10
elif dataset == 'tinyimagenet':
val_set = TinyImageNet(root=dataset_path, download=True,
train=False, transform=transform)
n_cls = 200
elif dataset == 'imagenet':
val_set = ImageNet(root=dataset_path,
train=False, transform=transform)
n_cls = 1000
else:
raise NotImplementedError
return val_set, n_cls
def get_train_set(dataset, dataset_path, transform, opt, vanilla):
if vanilla:
if dataset == 'cifar10':
train_set = datasets.CIFAR10(root=dataset_path, download=True,
train=True, transform=transform)
elif dataset == 'cifar100':
train_set = datasets.CIFAR100(root=dataset_path, download=True,
train=True, transform=transform)
elif dataset == 'stl10':
train_set = datasets.STL10(root=dataset_path, download=True,
split='test', transform=transform)
elif dataset == 'svhn':
train_set = datasets.SVHN(root=dataset_path, download=True,
split='test', transform=transform)
elif dataset == 'cinic10':
train_set = CINIC10(root=dataset_path, download=True,
train=True, transform=transform)
elif dataset == 'tinyimagenet':
train_set = TinyImageNet(root=dataset_path, download=True,
train=True, transform=transform)
elif dataset == 'imagenet':
train_set = ImageNet(root=dataset_path,
train=True, transform=transform)
else:
raise NotImplementedError
else:
if dataset == 'cifar10':
if opt.distill in ['crd']:
train_set = CIFAR10InstanceSample(
root=dataset_path, download=True, train=True, transform=transform,
k=opt.nce_k, mode=opt.mode, is_sample=True, percent=1.0)
else:
train_set = CIFAR10Instance(root=dataset_path, download=True,
train=True, transform=transform)
elif dataset == 'cifar100':
if opt.distill in ['crd']:
train_set = CIFAR100InstanceSample(
root=dataset_path, download=True, train=True, transform=transform,
k=opt.nce_k, mode=opt.mode, is_sample=True, percent=1.0)
else:
train_set = CIFAR100Instance(root=dataset_path, download=True,
train=True, transform=transform)
elif dataset == 'stl10':
if opt.distill in ['crd']:
train_set = STL10InstanceSample(
root=dataset_path, download=True, split='train', transform=transform,
k=opt.nce_k, mode=opt.mode, is_sample=True, percent=1.0)
else:
train_set = STL10Instance(root=dataset_path, download=True,
split='train', transform=transform)
elif dataset == 'svhn':
if opt.distill in ['crd']:
train_set = SVHNInstanceSample(
root=dataset_path, download=True, split='train', transform=transform,
k=opt.nce_k, mode=opt.mode, is_sample=True, percent=1.0)
else:
train_set = SVHNInstance(root=dataset_path, download=True,
split='train', transform=transform)
elif dataset == 'cinic10':
if opt.distill in ['crd']:
train_set = CINIC10InstanceSample(
root=dataset_path, download=True, train=True, transform=transform,
k=opt.nce_k, mode=opt.mode, is_sample=True, percent=1.0)
else:
train_set = CINIC10Instance(root=dataset_path, download=True,
train=True, transform=transform)
elif dataset == 'tinyimagenet':
if opt.distill in ['crd']:
train_set = TinyImageNetInstanceSample(
root=dataset_path, download=True, train=True, transform=transform,
k=opt.nce_k, mode=opt.mode, is_sample=True, percent=1.0)
else:
train_set = TinyImageNetInstance(root=dataset_path, download=True,
train=True, transform=transform)
elif dataset == 'imagenet':
if opt.distill in ['crd']:
train_set = ImageNetInstanceSample(
root=dataset_path, download=True, train=True, transform=transform,
k=opt.nce_k, mode=opt.mode, is_sample=True, percent=1.0)
else:
train_set = ImageNetInstance(root=dataset_path, download=True,
train=True, transform=transform)
else:
raise NotImplementedError
return train_set
| none | 1 | 2.132449 | 2 | |
igamelister/amiga/info/ada.py | chris-vg/igamelister | 0 | 6618738 | <gh_stars>0
from datetime import datetime
from igamelister.amiga.chipset import Chipset
from igamelister.amiga.person import Person
from igamelister.amiga.publisher import Publisher
from igamelister.webscraper.template import Template
class ADA:
def __init__(self, page_id: int):
tmp = Template("ada.ini", page_id=page_id)
details = tmp.get_list("details")
sections = []
for i in range(len(details)):
if details[i].endswith(":"):
sections.append((i + 1, details[i]))
def get(idx: int):
r = []
for j in range(sections[idx][0], sections[idx + 1][0]):
if details[j].endswith(":"):
break
r.append(details[j])
return r
def get_string(key: str, idx: int = None, sep: str = ", ") -> str:
for j in range(len(sections)):
if sections[j][1] == key:
r = get(j)
if idx is not None:
return r[idx]
else:
return sep.join(r)
return ""
def get_list(key: str) -> list:
for j in range(len(sections)):
if sections[j][1] == key:
return get(j)
return []
def get_datetime(key: str, fmt: str) -> datetime:
if fmt == '':
return datetime.min
s = get_string(key, idx=0)
if s != '' or s != '-':
return datetime.strptime(s, fmt)
else:
return datetime.min
def get_datetime_format(key: str, fmt: list) -> str:
s = get_string(key, idx=0)
if s == '' or s == '-':
return ''
for f in fmt:
try:
datetime.strptime(s, f)
return f
except ValueError:
continue
def add_people(names, role: str) -> list:
r = []
if type(names) == list:
for name in names:
r.append(Person(name, role))
elif type(names) == str:
r.append(Person(names, role))
return r
def add_chipsets(c: list) -> list:
r = []
for x in c:
if x == "Ocs":
r.append(Chipset.OCS)
elif x == "Ecs":
r.append(Chipset.ECS)
elif x == "Aga":
r.append(Chipset.AGA)
else:
r.append(Chipset.Unknown)
return r
def add_groups(g: list) -> list:
r = []
for x in g:
r.append(Publisher(x))
return r
def add_party(p: list) -> list:
if len(p) > 0:
party = p[0]
if len(p) > 1:
compo = p[1].replace(" compo", "")
rank = p[2][8:-2]
else:
compo = ""
rank = 0
else:
party = ""
compo = ""
rank = 0
return [party, compo, rank]
self.name = get_string("Demo:")
self.groups = add_groups(get_list("Group:"))
self.dev_team = add_people(get_list("Code:"), "Coder")
self.dev_team += add_people(get_list("3D:"), "3D Modeller")
self.dev_team += add_people(get_list("Raytracing:"), "Raytracing")
self.dev_team += add_people(get_list("Support:"), "Support")
self.dev_team += add_people(get_list("Graphics:"), "Graphics")
self.dev_team += add_people(get_list("Music:"), "Musician")
self.category = get_string("Category:")
self.release_date_format = get_datetime_format("Release:", ["%Y", "%B %Y"])
self.release_date = get_datetime("Release:", self.release_date_format)
self.chipsets = add_chipsets(get_list("Chipset:"))
self.party, self.compo, self.rank = add_party(get_list("Party:"))
@property
def igame_name(self):
def chipset():
r = []
for x in self.chipsets:
r.append(x.name)
return f"[{'/'.join(r)}]" if len(r) > 0 else ""
def groups():
r = []
for x in self.groups:
r.append(x.name)
return f"- {' & '.join(r)}" if len(r) > 0 else ""
return f"{self.name} {chipset()} {groups()}"
@property
def igame_genre(self) -> str:
return f"{self.category}"
| from datetime import datetime
from igamelister.amiga.chipset import Chipset
from igamelister.amiga.person import Person
from igamelister.amiga.publisher import Publisher
from igamelister.webscraper.template import Template
class ADA:
def __init__(self, page_id: int):
tmp = Template("ada.ini", page_id=page_id)
details = tmp.get_list("details")
sections = []
for i in range(len(details)):
if details[i].endswith(":"):
sections.append((i + 1, details[i]))
def get(idx: int):
r = []
for j in range(sections[idx][0], sections[idx + 1][0]):
if details[j].endswith(":"):
break
r.append(details[j])
return r
def get_string(key: str, idx: int = None, sep: str = ", ") -> str:
for j in range(len(sections)):
if sections[j][1] == key:
r = get(j)
if idx is not None:
return r[idx]
else:
return sep.join(r)
return ""
def get_list(key: str) -> list:
for j in range(len(sections)):
if sections[j][1] == key:
return get(j)
return []
def get_datetime(key: str, fmt: str) -> datetime:
if fmt == '':
return datetime.min
s = get_string(key, idx=0)
if s != '' or s != '-':
return datetime.strptime(s, fmt)
else:
return datetime.min
def get_datetime_format(key: str, fmt: list) -> str:
s = get_string(key, idx=0)
if s == '' or s == '-':
return ''
for f in fmt:
try:
datetime.strptime(s, f)
return f
except ValueError:
continue
def add_people(names, role: str) -> list:
r = []
if type(names) == list:
for name in names:
r.append(Person(name, role))
elif type(names) == str:
r.append(Person(names, role))
return r
def add_chipsets(c: list) -> list:
r = []
for x in c:
if x == "Ocs":
r.append(Chipset.OCS)
elif x == "Ecs":
r.append(Chipset.ECS)
elif x == "Aga":
r.append(Chipset.AGA)
else:
r.append(Chipset.Unknown)
return r
def add_groups(g: list) -> list:
r = []
for x in g:
r.append(Publisher(x))
return r
def add_party(p: list) -> list:
if len(p) > 0:
party = p[0]
if len(p) > 1:
compo = p[1].replace(" compo", "")
rank = p[2][8:-2]
else:
compo = ""
rank = 0
else:
party = ""
compo = ""
rank = 0
return [party, compo, rank]
self.name = get_string("Demo:")
self.groups = add_groups(get_list("Group:"))
self.dev_team = add_people(get_list("Code:"), "Coder")
self.dev_team += add_people(get_list("3D:"), "3D Modeller")
self.dev_team += add_people(get_list("Raytracing:"), "Raytracing")
self.dev_team += add_people(get_list("Support:"), "Support")
self.dev_team += add_people(get_list("Graphics:"), "Graphics")
self.dev_team += add_people(get_list("Music:"), "Musician")
self.category = get_string("Category:")
self.release_date_format = get_datetime_format("Release:", ["%Y", "%B %Y"])
self.release_date = get_datetime("Release:", self.release_date_format)
self.chipsets = add_chipsets(get_list("Chipset:"))
self.party, self.compo, self.rank = add_party(get_list("Party:"))
@property
def igame_name(self):
def chipset():
r = []
for x in self.chipsets:
r.append(x.name)
return f"[{'/'.join(r)}]" if len(r) > 0 else ""
def groups():
r = []
for x in self.groups:
r.append(x.name)
return f"- {' & '.join(r)}" if len(r) > 0 else ""
return f"{self.name} {chipset()} {groups()}"
@property
def igame_genre(self) -> str:
return f"{self.category}" | none | 1 | 2.380939 | 2 | |
src/pyg90alarm/discovery.py | hostcc/pyg90alarm | 0 | 6618739 | <filename>src/pyg90alarm/discovery.py
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Discovers G90 alarm panels.
"""
import asyncio
import logging
from .base_cmd import G90BaseCommand
from .host_info import G90HostInfo
from .const import G90Commands
_LOGGER = logging.getLogger(__name__)
class G90DiscoveryProtocol:
"""
tbd
:meta private:
"""
def __init__(self, parent):
"""
tbd
"""
self._parent = parent
def connection_made(self, transport):
"""
tbd
"""
def connection_lost(self, exc):
"""
tbd
"""
def datagram_received(self, data, addr):
"""
tbd
"""
try:
ret = self._parent.from_wire(data)
host_info = G90HostInfo(*ret)
_LOGGER.debug('Received from %s:%s: %s', addr[0], addr[1], ret)
res = {
'guid': host_info.host_guid,
'host': addr[0],
'port': addr[1]
}
res.update(host_info._asdict())
_LOGGER.debug('Discovered device: %s', res)
self._parent.add_device(res)
except Exception as exc: # pylint: disable=broad-except
_LOGGER.warning('Got exception, ignoring: %s', exc)
def error_received(self, exc):
"""
tbd
"""
class G90Discovery(G90BaseCommand):
"""
tbd
"""
# pylint: disable=too-few-public-methods
def __init__(self, timeout=10, **kwargs):
"""
tbd
"""
# pylint: disable=too-many-arguments
super().__init__(code=G90Commands.GETHOSTINFO, timeout=timeout,
**kwargs)
self._discovered_devices = []
async def process(self):
"""
tbd
"""
_LOGGER.debug('Attempting device discovery...')
transport, _ = await self._create_connection()
transport.sendto(self.to_wire())
await asyncio.sleep(self._timeout)
transport.close()
_LOGGER.debug('Discovered %s devices', len(self.devices))
return self.devices
@property
def devices(self):
"""
tbd
"""
return self._discovered_devices
def add_device(self, value):
"""
tbd
"""
self._discovered_devices.append(value)
def _proto_factory(self):
"""
tbd
"""
return G90DiscoveryProtocol(self)
| <filename>src/pyg90alarm/discovery.py
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Discovers G90 alarm panels.
"""
import asyncio
import logging
from .base_cmd import G90BaseCommand
from .host_info import G90HostInfo
from .const import G90Commands
_LOGGER = logging.getLogger(__name__)
class G90DiscoveryProtocol:
"""
tbd
:meta private:
"""
def __init__(self, parent):
"""
tbd
"""
self._parent = parent
def connection_made(self, transport):
"""
tbd
"""
def connection_lost(self, exc):
"""
tbd
"""
def datagram_received(self, data, addr):
"""
tbd
"""
try:
ret = self._parent.from_wire(data)
host_info = G90HostInfo(*ret)
_LOGGER.debug('Received from %s:%s: %s', addr[0], addr[1], ret)
res = {
'guid': host_info.host_guid,
'host': addr[0],
'port': addr[1]
}
res.update(host_info._asdict())
_LOGGER.debug('Discovered device: %s', res)
self._parent.add_device(res)
except Exception as exc: # pylint: disable=broad-except
_LOGGER.warning('Got exception, ignoring: %s', exc)
def error_received(self, exc):
"""
tbd
"""
class G90Discovery(G90BaseCommand):
"""
tbd
"""
# pylint: disable=too-few-public-methods
def __init__(self, timeout=10, **kwargs):
"""
tbd
"""
# pylint: disable=too-many-arguments
super().__init__(code=G90Commands.GETHOSTINFO, timeout=timeout,
**kwargs)
self._discovered_devices = []
async def process(self):
"""
tbd
"""
_LOGGER.debug('Attempting device discovery...')
transport, _ = await self._create_connection()
transport.sendto(self.to_wire())
await asyncio.sleep(self._timeout)
transport.close()
_LOGGER.debug('Discovered %s devices', len(self.devices))
return self.devices
@property
def devices(self):
"""
tbd
"""
return self._discovered_devices
def add_device(self, value):
"""
tbd
"""
self._discovered_devices.append(value)
def _proto_factory(self):
"""
tbd
"""
return G90DiscoveryProtocol(self)
| en | 0.757096 | # Copyright (c) 2021 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. Discovers G90 alarm panels. tbd :meta private: tbd tbd tbd tbd # pylint: disable=broad-except tbd tbd # pylint: disable=too-few-public-methods tbd # pylint: disable=too-many-arguments tbd tbd tbd tbd | 2.118778 | 2 |
utils/b85decode.py | darkwyrm/mensago-utils | 0 | 6618740 | <gh_stars>0
#!/usr/bin/env python3
# b85decode - a quick-and-dirty utility to Base85 decode a file or data from stdin
# Released under the terms of the MIT license
# ©2019-2020 <NAME> <<EMAIL>>
from base64 import b85decode
import os.path as path
import sys
def decode_file(file_name):
'''Quickie command to Base85 decode a file'''
try:
read_handle = open(file_name, 'rb')
data = read_handle.read()
except Exception as e:
print('Unable to open %s: %s' % (file_name, e))
if file_name.endswith('.b85'):
dest_name = file_name[:-4]
else:
dest_name = file_name + '.out'
if path.exists(dest_name):
response = input("%s exists. Overwrite? [y/N]: " % dest_name)
if not response or response.casefold()[0] != 'y':
return
try:
decoded = b85decode(data)
except Exception as e:
print('Unable to decode data: %s' % e)
sys.exit(1)
if not decoded:
print('Unable to decode data.')
sys.exit(1)
try:
out = open(dest_name, 'wb')
out.write(decoded)
except Exception as e:
print('Unable to save %s: %s' % (dest_name, e))
if __name__ == '__main__':
if len(sys.argv) == 2:
decode_file(sys.argv[1])
else:
sys.stdout.buffer.write(b85decode(sys.stdin.buffer.read()))
| #!/usr/bin/env python3
# b85decode - a quick-and-dirty utility to Base85 decode a file or data from stdin
# Released under the terms of the MIT license
# ©2019-2020 <NAME> <<EMAIL>>
from base64 import b85decode
import os.path as path
import sys
def decode_file(file_name):
'''Quickie command to Base85 decode a file'''
try:
read_handle = open(file_name, 'rb')
data = read_handle.read()
except Exception as e:
print('Unable to open %s: %s' % (file_name, e))
if file_name.endswith('.b85'):
dest_name = file_name[:-4]
else:
dest_name = file_name + '.out'
if path.exists(dest_name):
response = input("%s exists. Overwrite? [y/N]: " % dest_name)
if not response or response.casefold()[0] != 'y':
return
try:
decoded = b85decode(data)
except Exception as e:
print('Unable to decode data: %s' % e)
sys.exit(1)
if not decoded:
print('Unable to decode data.')
sys.exit(1)
try:
out = open(dest_name, 'wb')
out.write(decoded)
except Exception as e:
print('Unable to save %s: %s' % (dest_name, e))
if __name__ == '__main__':
if len(sys.argv) == 2:
decode_file(sys.argv[1])
else:
sys.stdout.buffer.write(b85decode(sys.stdin.buffer.read())) | en | 0.635523 | #!/usr/bin/env python3 # b85decode - a quick-and-dirty utility to Base85 decode a file or data from stdin # Released under the terms of the MIT license # ©2019-2020 <NAME> <<EMAIL>> Quickie command to Base85 decode a file | 3.087724 | 3 |
DeepFE/2DCNN-HU2013.py | BehnoodRasti/Hyperspectral-Shallow-Deep-Feature-Extraction-Toolbox | 80 | 6618741 | import os
import numpy as np
import random
import torch
import torch.utils.data as dataf
import torch.nn as nn
import matplotlib.pyplot as plt
from scipy import io
from sklearn.decomposition import PCA
# setting parameters
DataPath = '/home/hrl/PycharmProjects/untitled/Hyperspectral/Data/FixedTrainSam/Houston/Houston.mat'
TRPath = '/home/hrl/PycharmProjects/untitled/Hyperspectral/Data/FixedTrainSam/Houston/TRLabel.mat'
TSPath = '/home/hrl/PycharmProjects/untitled/Hyperspectral/Data/FixedTrainSam/Houston/TSLabel.mat'
savepath = '/home/hrl/PycharmProjects/untitled/Hyperspectral/Data/FixedTrainSam/W3-DLSection/HU2013/2DCNN-14.mat'
patchsize = 16 # input spatial size for 2D-CNN
batchsize = 128 # select from [16, 32, 64, 128], the best is 64
EPOCH = 200
LR = 0.001
# load data
Data = io.loadmat(DataPath)
TrLabel = io.loadmat(TRPath)
TsLabel = io.loadmat(TSPath)
Data = Data['Houston']
Data = Data.astype(np.float32)
TrLabel = TrLabel['TRLabel']
TsLabel = TsLabel['TSLabel']
# without dimensionality reduction
pad_width = np.floor(patchsize/2)
pad_width = np.int(pad_width)
# normalization method 2: map to zero mean and one std
[m, n, l] = np.shape(Data)
# x2 = np.empty((m+pad_width*2, n+pad_width*2, l), dtype='float32')
for i in range(l):
mean = np.mean(Data[:, :, i])
std = np.std(Data[:, :, i])
Data[:, :, i] = (Data[:, :, i] - mean)/std
# x2[:, :, i] = np.pad(Data[:, :, i], pad_width, 'symmetric')
# # extract the first principal component
# x = np.reshape(Data, (m*n, l))
# pca = PCA(n_components=0.995, copy=True, whiten=False)
# x = pca.fit_transform(x)
# _, l = x.shape
# x = np.reshape(x, (m, n, l))
# # print x.shape
# # plt.figure()
# # plt.imshow(x)
# # plt.show()
x = Data
# boundary interpolation
temp = x[:,:,0]
pad_width = np.floor(patchsize/2)
pad_width = np.int(pad_width)
temp2 = np.pad(temp, pad_width, 'symmetric')
[m2,n2] = temp2.shape
x2 = np.empty((m2, n2, l), dtype='float32')
for i in range(l):
temp = x[:,:,i]
pad_width = np.floor(patchsize/2)
pad_width = np.int(pad_width)
temp2 = np.pad(temp, pad_width, 'symmetric')
x2[:,:,i] = temp2
# construct the training and testing set
[ind1, ind2] = np.where(TrLabel != 0)
TrainNum = len(ind1)
TrainPatch = np.empty((TrainNum, l, patchsize, patchsize), dtype='float32')
TrainLabel = np.empty(TrainNum)
ind3 = ind1 + pad_width
ind4 = ind2 + pad_width
for i in range(len(ind1)):
# patch = x2[(ind3[i] - pad_width):(ind3[i] + pad_width + 1), (ind4[i] - pad_width):(ind4[i] + pad_width + 1), :]
patch = x2[(ind3[i] - pad_width):(ind3[i] + pad_width), (ind4[i] - pad_width):(ind4[i] + pad_width), :]
patch = np.reshape(patch, (patchsize * patchsize, l))
patch = np.transpose(patch)
patch = np.reshape(patch, (l, patchsize, patchsize))
TrainPatch[i, :, :, :] = patch
patchlabel = TrLabel[ind1[i], ind2[i]]
TrainLabel[i] = patchlabel
[ind1, ind2] = np.where(TsLabel != 0)
TestNum = len(ind1)
TestPatch = np.empty((TestNum, l, patchsize, patchsize), dtype='float32')
TestLabel = np.empty(TestNum)
ind3 = ind1 + pad_width
ind4 = ind2 + pad_width
for i in range(len(ind1)):
patch = x2[(ind3[i] - pad_width):(ind3[i] + pad_width), (ind4[i] - pad_width):(ind4[i] + pad_width), :]
patch = np.reshape(patch, (patchsize * patchsize, l))
patch = np.transpose(patch)
patch = np.reshape(patch, (l, patchsize, patchsize))
TestPatch[i, :, :, :] = patch
patchlabel = TsLabel[ind1[i], ind2[i]]
TestLabel[i] = patchlabel
print('Training size and testing size are:', TrainPatch.shape, 'and', TestPatch.shape)
# step3: change data to the input type of PyTorch
TrainPatch = torch.from_numpy(TrainPatch)
TrainLabel = torch.from_numpy(TrainLabel)-1
TrainLabel = TrainLabel.long()
dataset = dataf.TensorDataset(TrainPatch, TrainLabel)
train_loader = dataf.DataLoader(dataset, batch_size=batchsize, shuffle=True)
TestPatch = torch.from_numpy(TestPatch)
TestLabel = torch.from_numpy(TestLabel)-1
TestLabel = TestLabel.long()
Classes = len(np.unique(TrainLabel))
OutChannel = 32
# construct the network
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(
in_channels = l,
out_channels = OutChannel,
kernel_size = 3,
stride = 1,
padding = 1,
),
nn.BatchNorm2d(OutChannel),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
)
self.conv2 = nn.Sequential(
nn.Conv2d(OutChannel, OutChannel*2, 3, 1, 1),
nn.BatchNorm2d(OutChannel*2),
nn.ReLU(),
nn.MaxPool2d(2),
# nn.Dropout(0.5),
)
self.conv3 = nn.Sequential(
nn.Conv2d(OutChannel*2, OutChannel*4, 3, 1, 1),
nn.BatchNorm2d(OutChannel*4),
nn.ReLU(),
nn.AdaptiveMaxPool2d(1),
# nn.Dropout(0.5),
)
self.out = nn.Linear(OutChannel*4, Classes) # fully connected layer, output 16 classes
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = x.view(x.size(0), -1) # flatten the output of conv2 to (batch_size, 32 * 7 * 7)
output = self.out(x)
return output
cnn = CNN()
print('The structure of the designed network', cnn)
# move model to GPU
cnn.cuda()
optimizer = torch.optim.Adam(cnn.parameters(), lr=LR) # optimize all cnn parameters
loss_func = nn.CrossEntropyLoss() # the target label is not one-hotted
BestAcc = 0
# train and test the designed model
for epoch in range(EPOCH):
for step, (b_x, b_y) in enumerate(train_loader): # gives batch data, normalize x when iterate train_loader
# move train data to GPU
b_x = b_x.cuda()
b_y = b_y.cuda()
output = cnn(b_x) # cnn output
loss = loss_func(output, b_y) # cross entropy loss
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
if step % 50 == 0:
cnn.eval()
pred_y = np.empty((len(TestLabel)), dtype='float32')
number = len(TestLabel) // 5000
for i in range(number):
temp = TestPatch[i * 5000:(i + 1) * 5000, :, :, :]
temp = temp.cuda()
temp2 = cnn(temp)
temp3 = torch.max(temp2, 1)[1].squeeze()
pred_y[i * 5000:(i + 1) * 5000] = temp3.cpu()
del temp, temp2, temp3
if (i + 1) * 5000 < len(TestLabel):
temp = TestPatch[(i + 1) * 5000:len(TestLabel), :, :, :]
temp = temp.cuda()
temp2 = cnn(temp)
temp3 = torch.max(temp2, 1)[1].squeeze()
pred_y[(i + 1) * 5000:len(TestLabel)] = temp3.cpu()
del temp, temp2, temp3
pred_y = torch.from_numpy(pred_y).long()
accuracy = torch.sum(pred_y == TestLabel).type(torch.FloatTensor) / TestLabel.size(0)
# test_output = rnn(TestData)
# pred_y = torch.max(test_output, 1)[1].cuda().data.squeeze()
# accuracy = torch.sum(pred_y == TestDataLabel).type(torch.FloatTensor) / TestDataLabel.size(0)
print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.cpu().numpy(), '| test accuracy: %.2f' % accuracy)
# save the parameters in network
if accuracy > BestAcc:
torch.save(cnn.state_dict(), 'W3-DLSection/HU2013/net_params_2DCNN.pkl')
BestAcc = accuracy
cnn.train()
# # test each class accuracy
# # divide test set into many subsets
cnn.load_state_dict(torch.load('W3-DLSection/HU2013/net_params_2DCNN.pkl'))
cnn.eval()
pred_y = np.empty((len(TestLabel)), dtype='float32')
number = len(TestLabel)//5000
for i in range(number):
temp = TestPatch[i*5000:(i+1)*5000, :, :]
temp = temp.cuda()
temp2 = cnn(temp)
temp3 = torch.max(temp2, 1)[1].squeeze()
pred_y[i*5000:(i+1)*5000] = temp3.cpu()
del temp, temp2, temp3
if (i+1)*5000 < len(TestLabel):
temp = TestPatch[(i+1)*5000:len(TestLabel), :, :]
temp = temp.cuda()
temp2 = cnn(temp)
temp3 = torch.max(temp2, 1)[1].squeeze()
pred_y[(i+1)*5000:len(TestLabel)] = temp3.cpu()
del temp, temp2, temp3
pred_y = torch.from_numpy(pred_y).long()
OA = torch.sum(pred_y == TestLabel).type(torch.FloatTensor) / TestLabel.size(0)
Classes = np.unique(TestLabel)
EachAcc = np.empty(len(Classes))
for i in range(len(Classes)):
cla = Classes[i]
right = 0
sum = 0
for j in range(len(TestLabel)):
if TestLabel[j] == cla:
sum += 1
if TestLabel[j] == cla and pred_y[j] == cla:
right += 1
EachAcc[i] = right.__float__()/sum.__float__()
print(OA)
print(EachAcc)
del TestPatch, TrainPatch, TrainLabel, b_x, b_y, dataset, train_loader
# show the whole image
# The whole data is too big to test in one time; So dividing it into several parts
part = 5000
pred_all = np.empty((m*n, 1), dtype='float32')
number = m*n//part
for i in range(number):
D = np.empty((part, l, patchsize, patchsize), dtype='float32')
count = 0
for j in range(i*part, (i+1)*part):
row = j//n
col = j - row*n
row2 = row + pad_width
col2 = col + pad_width
patch = x2[(row2 - pad_width):(row2 + pad_width), (col2 - pad_width):(col2 + pad_width), :]
patch = np.reshape(patch, (patchsize * patchsize, l))
patch = np.transpose(patch)
patch = np.reshape(patch, (l, patchsize, patchsize))
D[count, :, :, :] = patch
count += 1
temp = torch.from_numpy(D)
temp = temp.cuda()
temp2 = cnn(temp)
temp3 = torch.max(temp2, 1)[1].squeeze()
pred_all[i*part:(i+1)*part, 0] = temp3.cpu()
del temp, temp2, temp3, D
if (i+1)*part < m*n:
D = np.empty((m*n-(i+1)*part, l, patchsize, patchsize), dtype='float32')
count = 0
for j in range((i+1)*part, m*n):
row = j // n
col = j - row * n
row2 = row + pad_width
col2 = col + pad_width
patch = x2[(row2 - pad_width):(row2 + pad_width), (col2 - pad_width):(col2 + pad_width), :]
patch = np.reshape(patch, (patchsize * patchsize, l))
patch = np.transpose(patch)
patch = np.reshape(patch, (l, patchsize, patchsize))
D[count, :, :, :] = patch
count += 1
temp = torch.from_numpy(D)
temp = temp.cuda()
temp2 = cnn(temp)
temp3 = torch.max(temp2, 1)[1].squeeze()
pred_all[(i + 1) * part:m*n, 0] = temp3.cpu()
del temp, temp2, temp3, D
pred_all = np.reshape(pred_all, (m, n)) + 1
OA = OA.numpy()
pred_y = pred_y.cpu()
pred_y = pred_y.numpy()
TestDataLabel = TestLabel.cpu()
TestDataLabel = TestDataLabel.numpy()
io.savemat(savepath, {'PredAll': pred_all, 'OA': OA, 'TestPre': pred_y, 'TestLabel': TestDataLabel})
# print io.loadmat(savepath)
#
plt.figure()
plt.imshow(pred_all)
plt.show()
| import os
import numpy as np
import random
import torch
import torch.utils.data as dataf
import torch.nn as nn
import matplotlib.pyplot as plt
from scipy import io
from sklearn.decomposition import PCA
# setting parameters
DataPath = '/home/hrl/PycharmProjects/untitled/Hyperspectral/Data/FixedTrainSam/Houston/Houston.mat'
TRPath = '/home/hrl/PycharmProjects/untitled/Hyperspectral/Data/FixedTrainSam/Houston/TRLabel.mat'
TSPath = '/home/hrl/PycharmProjects/untitled/Hyperspectral/Data/FixedTrainSam/Houston/TSLabel.mat'
savepath = '/home/hrl/PycharmProjects/untitled/Hyperspectral/Data/FixedTrainSam/W3-DLSection/HU2013/2DCNN-14.mat'
patchsize = 16 # input spatial size for 2D-CNN
batchsize = 128 # select from [16, 32, 64, 128], the best is 64
EPOCH = 200
LR = 0.001
# load data
Data = io.loadmat(DataPath)
TrLabel = io.loadmat(TRPath)
TsLabel = io.loadmat(TSPath)
Data = Data['Houston']
Data = Data.astype(np.float32)
TrLabel = TrLabel['TRLabel']
TsLabel = TsLabel['TSLabel']
# without dimensionality reduction
pad_width = np.floor(patchsize/2)
pad_width = np.int(pad_width)
# normalization method 2: map to zero mean and one std
[m, n, l] = np.shape(Data)
# x2 = np.empty((m+pad_width*2, n+pad_width*2, l), dtype='float32')
for i in range(l):
mean = np.mean(Data[:, :, i])
std = np.std(Data[:, :, i])
Data[:, :, i] = (Data[:, :, i] - mean)/std
# x2[:, :, i] = np.pad(Data[:, :, i], pad_width, 'symmetric')
# # extract the first principal component
# x = np.reshape(Data, (m*n, l))
# pca = PCA(n_components=0.995, copy=True, whiten=False)
# x = pca.fit_transform(x)
# _, l = x.shape
# x = np.reshape(x, (m, n, l))
# # print x.shape
# # plt.figure()
# # plt.imshow(x)
# # plt.show()
x = Data
# boundary interpolation
temp = x[:,:,0]
pad_width = np.floor(patchsize/2)
pad_width = np.int(pad_width)
temp2 = np.pad(temp, pad_width, 'symmetric')
[m2,n2] = temp2.shape
x2 = np.empty((m2, n2, l), dtype='float32')
for i in range(l):
temp = x[:,:,i]
pad_width = np.floor(patchsize/2)
pad_width = np.int(pad_width)
temp2 = np.pad(temp, pad_width, 'symmetric')
x2[:,:,i] = temp2
# construct the training and testing set
[ind1, ind2] = np.where(TrLabel != 0)
TrainNum = len(ind1)
TrainPatch = np.empty((TrainNum, l, patchsize, patchsize), dtype='float32')
TrainLabel = np.empty(TrainNum)
ind3 = ind1 + pad_width
ind4 = ind2 + pad_width
for i in range(len(ind1)):
# patch = x2[(ind3[i] - pad_width):(ind3[i] + pad_width + 1), (ind4[i] - pad_width):(ind4[i] + pad_width + 1), :]
patch = x2[(ind3[i] - pad_width):(ind3[i] + pad_width), (ind4[i] - pad_width):(ind4[i] + pad_width), :]
patch = np.reshape(patch, (patchsize * patchsize, l))
patch = np.transpose(patch)
patch = np.reshape(patch, (l, patchsize, patchsize))
TrainPatch[i, :, :, :] = patch
patchlabel = TrLabel[ind1[i], ind2[i]]
TrainLabel[i] = patchlabel
[ind1, ind2] = np.where(TsLabel != 0)
TestNum = len(ind1)
TestPatch = np.empty((TestNum, l, patchsize, patchsize), dtype='float32')
TestLabel = np.empty(TestNum)
ind3 = ind1 + pad_width
ind4 = ind2 + pad_width
for i in range(len(ind1)):
patch = x2[(ind3[i] - pad_width):(ind3[i] + pad_width), (ind4[i] - pad_width):(ind4[i] + pad_width), :]
patch = np.reshape(patch, (patchsize * patchsize, l))
patch = np.transpose(patch)
patch = np.reshape(patch, (l, patchsize, patchsize))
TestPatch[i, :, :, :] = patch
patchlabel = TsLabel[ind1[i], ind2[i]]
TestLabel[i] = patchlabel
print('Training size and testing size are:', TrainPatch.shape, 'and', TestPatch.shape)
# step3: change data to the input type of PyTorch
TrainPatch = torch.from_numpy(TrainPatch)
TrainLabel = torch.from_numpy(TrainLabel)-1
TrainLabel = TrainLabel.long()
dataset = dataf.TensorDataset(TrainPatch, TrainLabel)
train_loader = dataf.DataLoader(dataset, batch_size=batchsize, shuffle=True)
TestPatch = torch.from_numpy(TestPatch)
TestLabel = torch.from_numpy(TestLabel)-1
TestLabel = TestLabel.long()
Classes = len(np.unique(TrainLabel))
OutChannel = 32
# construct the network
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(
in_channels = l,
out_channels = OutChannel,
kernel_size = 3,
stride = 1,
padding = 1,
),
nn.BatchNorm2d(OutChannel),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
)
self.conv2 = nn.Sequential(
nn.Conv2d(OutChannel, OutChannel*2, 3, 1, 1),
nn.BatchNorm2d(OutChannel*2),
nn.ReLU(),
nn.MaxPool2d(2),
# nn.Dropout(0.5),
)
self.conv3 = nn.Sequential(
nn.Conv2d(OutChannel*2, OutChannel*4, 3, 1, 1),
nn.BatchNorm2d(OutChannel*4),
nn.ReLU(),
nn.AdaptiveMaxPool2d(1),
# nn.Dropout(0.5),
)
self.out = nn.Linear(OutChannel*4, Classes) # fully connected layer, output 16 classes
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = x.view(x.size(0), -1) # flatten the output of conv2 to (batch_size, 32 * 7 * 7)
output = self.out(x)
return output
cnn = CNN()
print('The structure of the designed network', cnn)
# move model to GPU
cnn.cuda()
optimizer = torch.optim.Adam(cnn.parameters(), lr=LR) # optimize all cnn parameters
loss_func = nn.CrossEntropyLoss() # the target label is not one-hotted
BestAcc = 0
# train and test the designed model
for epoch in range(EPOCH):
for step, (b_x, b_y) in enumerate(train_loader): # gives batch data, normalize x when iterate train_loader
# move train data to GPU
b_x = b_x.cuda()
b_y = b_y.cuda()
output = cnn(b_x) # cnn output
loss = loss_func(output, b_y) # cross entropy loss
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
if step % 50 == 0:
cnn.eval()
pred_y = np.empty((len(TestLabel)), dtype='float32')
number = len(TestLabel) // 5000
for i in range(number):
temp = TestPatch[i * 5000:(i + 1) * 5000, :, :, :]
temp = temp.cuda()
temp2 = cnn(temp)
temp3 = torch.max(temp2, 1)[1].squeeze()
pred_y[i * 5000:(i + 1) * 5000] = temp3.cpu()
del temp, temp2, temp3
if (i + 1) * 5000 < len(TestLabel):
temp = TestPatch[(i + 1) * 5000:len(TestLabel), :, :, :]
temp = temp.cuda()
temp2 = cnn(temp)
temp3 = torch.max(temp2, 1)[1].squeeze()
pred_y[(i + 1) * 5000:len(TestLabel)] = temp3.cpu()
del temp, temp2, temp3
pred_y = torch.from_numpy(pred_y).long()
accuracy = torch.sum(pred_y == TestLabel).type(torch.FloatTensor) / TestLabel.size(0)
# test_output = rnn(TestData)
# pred_y = torch.max(test_output, 1)[1].cuda().data.squeeze()
# accuracy = torch.sum(pred_y == TestDataLabel).type(torch.FloatTensor) / TestDataLabel.size(0)
print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.cpu().numpy(), '| test accuracy: %.2f' % accuracy)
# save the parameters in network
if accuracy > BestAcc:
torch.save(cnn.state_dict(), 'W3-DLSection/HU2013/net_params_2DCNN.pkl')
BestAcc = accuracy
cnn.train()
# # test each class accuracy
# # divide test set into many subsets
cnn.load_state_dict(torch.load('W3-DLSection/HU2013/net_params_2DCNN.pkl'))
cnn.eval()
pred_y = np.empty((len(TestLabel)), dtype='float32')
number = len(TestLabel)//5000
for i in range(number):
temp = TestPatch[i*5000:(i+1)*5000, :, :]
temp = temp.cuda()
temp2 = cnn(temp)
temp3 = torch.max(temp2, 1)[1].squeeze()
pred_y[i*5000:(i+1)*5000] = temp3.cpu()
del temp, temp2, temp3
if (i+1)*5000 < len(TestLabel):
temp = TestPatch[(i+1)*5000:len(TestLabel), :, :]
temp = temp.cuda()
temp2 = cnn(temp)
temp3 = torch.max(temp2, 1)[1].squeeze()
pred_y[(i+1)*5000:len(TestLabel)] = temp3.cpu()
del temp, temp2, temp3
pred_y = torch.from_numpy(pred_y).long()
OA = torch.sum(pred_y == TestLabel).type(torch.FloatTensor) / TestLabel.size(0)
Classes = np.unique(TestLabel)
EachAcc = np.empty(len(Classes))
for i in range(len(Classes)):
cla = Classes[i]
right = 0
sum = 0
for j in range(len(TestLabel)):
if TestLabel[j] == cla:
sum += 1
if TestLabel[j] == cla and pred_y[j] == cla:
right += 1
EachAcc[i] = right.__float__()/sum.__float__()
print(OA)
print(EachAcc)
del TestPatch, TrainPatch, TrainLabel, b_x, b_y, dataset, train_loader
# show the whole image
# The whole data is too big to test in one time; So dividing it into several parts
part = 5000
pred_all = np.empty((m*n, 1), dtype='float32')
number = m*n//part
for i in range(number):
D = np.empty((part, l, patchsize, patchsize), dtype='float32')
count = 0
for j in range(i*part, (i+1)*part):
row = j//n
col = j - row*n
row2 = row + pad_width
col2 = col + pad_width
patch = x2[(row2 - pad_width):(row2 + pad_width), (col2 - pad_width):(col2 + pad_width), :]
patch = np.reshape(patch, (patchsize * patchsize, l))
patch = np.transpose(patch)
patch = np.reshape(patch, (l, patchsize, patchsize))
D[count, :, :, :] = patch
count += 1
temp = torch.from_numpy(D)
temp = temp.cuda()
temp2 = cnn(temp)
temp3 = torch.max(temp2, 1)[1].squeeze()
pred_all[i*part:(i+1)*part, 0] = temp3.cpu()
del temp, temp2, temp3, D
if (i+1)*part < m*n:
D = np.empty((m*n-(i+1)*part, l, patchsize, patchsize), dtype='float32')
count = 0
for j in range((i+1)*part, m*n):
row = j // n
col = j - row * n
row2 = row + pad_width
col2 = col + pad_width
patch = x2[(row2 - pad_width):(row2 + pad_width), (col2 - pad_width):(col2 + pad_width), :]
patch = np.reshape(patch, (patchsize * patchsize, l))
patch = np.transpose(patch)
patch = np.reshape(patch, (l, patchsize, patchsize))
D[count, :, :, :] = patch
count += 1
temp = torch.from_numpy(D)
temp = temp.cuda()
temp2 = cnn(temp)
temp3 = torch.max(temp2, 1)[1].squeeze()
pred_all[(i + 1) * part:m*n, 0] = temp3.cpu()
del temp, temp2, temp3, D
pred_all = np.reshape(pred_all, (m, n)) + 1
OA = OA.numpy()
pred_y = pred_y.cpu()
pred_y = pred_y.numpy()
TestDataLabel = TestLabel.cpu()
TestDataLabel = TestDataLabel.numpy()
io.savemat(savepath, {'PredAll': pred_all, 'OA': OA, 'TestPre': pred_y, 'TestLabel': TestDataLabel})
# print io.loadmat(savepath)
#
plt.figure()
plt.imshow(pred_all)
plt.show()
| en | 0.57851 | # setting parameters # input spatial size for 2D-CNN # select from [16, 32, 64, 128], the best is 64 # load data # without dimensionality reduction # normalization method 2: map to zero mean and one std # x2 = np.empty((m+pad_width*2, n+pad_width*2, l), dtype='float32') # x2[:, :, i] = np.pad(Data[:, :, i], pad_width, 'symmetric') # # extract the first principal component # x = np.reshape(Data, (m*n, l)) # pca = PCA(n_components=0.995, copy=True, whiten=False) # x = pca.fit_transform(x) # _, l = x.shape # x = np.reshape(x, (m, n, l)) # # print x.shape # # plt.figure() # # plt.imshow(x) # # plt.show() # boundary interpolation # construct the training and testing set # patch = x2[(ind3[i] - pad_width):(ind3[i] + pad_width + 1), (ind4[i] - pad_width):(ind4[i] + pad_width + 1), :] # step3: change data to the input type of PyTorch # construct the network # nn.Dropout(0.5), # nn.Dropout(0.5), # fully connected layer, output 16 classes # flatten the output of conv2 to (batch_size, 32 * 7 * 7) # move model to GPU # optimize all cnn parameters # the target label is not one-hotted # train and test the designed model # gives batch data, normalize x when iterate train_loader # move train data to GPU # cnn output # cross entropy loss # clear gradients for this training step # backpropagation, compute gradients # apply gradients # test_output = rnn(TestData) # pred_y = torch.max(test_output, 1)[1].cuda().data.squeeze() # accuracy = torch.sum(pred_y == TestDataLabel).type(torch.FloatTensor) / TestDataLabel.size(0) # save the parameters in network # # test each class accuracy # # divide test set into many subsets # show the whole image # The whole data is too big to test in one time; So dividing it into several parts # print io.loadmat(savepath) # | 2.520654 | 3 |
python code/transgenic_mosquito_sim_v1.py | sanchestm/gm-mosquito-sim | 1 | 6618742 | <filename>python code/transgenic_mosquito_sim_v1.py
#!/usr/bin/python3
import random as rd
import matplotlib
matplotlib.use("Qt4Agg")
from matplotlib.pyplot import *
from math import *
import numpy as np
import skimage as ski
from skimage.exposure import adjust_gamma
from skimage.color import rgb2gray
from scipy import misc
import PIL.ImageOps
style.use('ggplot')
import sys
from PyQt4 import QtCore, QtGui, uic
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from ast import literal_eval as make_tuple
import re
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import (
FigureCanvasQTAgg as FigureCanvas,
NavigationToolbar2QT as NavigationToolbar)
form_class = uic.loadUiType("transgenic_mosquito_sim_v1.ui")[0]
class MyWindowClass(QtGui.QMainWindow, form_class):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.setupUi(self)
self.start.clicked.connect(self.start_clicked)
self.regioncontourbutton.clicked.connect(self.openfile1)
self.twibutton.clicked.connect(self.openfile2)
self.cityregionbutton.clicked.connect(self.openfile3)
self.vegindexbutton.clicked.connect(self.openfile4)
self.imageviewbutton.clicked.connect(self.openMainFig)
self.rmvPointButton.clicked.connect(self.removeCell)
self.MutValue.setText("5000")
self.MutantQuantity.setText("0")
self.table.setColumnCount(2)
self.layout.addWidget(self.table, 1, 0)
self.table.setHorizontalHeaderLabels(['index', 'mutant size'])
self.MutantLIST = np.array([])
self.THEimage = np.array([])
self.fig = Figure()
def openfile1(self):
self.regioncontour.setText(QtGui.QFileDialog.getOpenFileName(self, 'Single File', '~/Desktop/', "Image files (*.jpg *.png *.tif)"))
def openfile2(self):
self.twi.setText(QtGui.QFileDialog.getOpenFileName(self, 'Single File', '~/Desktop/', "Image files (*.jpg *.png *.tif)"))
def openfile3(self):
self.cityregion.setText(QtGui.QFileDialog.getOpenFileName(self, 'Single File', '~/Desktop/', "Image files (*.jpg *.png *.tif)"))
def openfile4(self):
self.vegindex.setText(QtGui.QFileDialog.getOpenFileName(self, 'Single File', '~/Desktop/', "Image files (*.jpg *.png *.tif)"))
def removeCell(self):
print('cellremoved')
pointNumber = int(self.rmvPointN.text())
self.MutantLIST[pointNumber:-1] = self.MutantLIST[pointNumber+1:]
self.MutantLIST = self.MutantLIST[:-1]
self.MutantQuantity.setText(str(int(self.MutantQuantity.text() )-int(self.table.item(pointNumber, 1).text()) ) )
self.table.removeRow(pointNumber)
for i in range(len(self.MutantLIST)):
self.table.setItem(i, 0, QtGui.QTableWidgetItem(str(i)))
self.ImgAddPatches()
self.rmvPointN.setText('')
def onclick(self, event):
print('button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %(event.button, event.x, event.y, event.xdata, event.ydata))
if event.button == 3:
self.MutantLIST =np.array(self.MutantLIST.tolist() + [[int(event.ydata), int(event.xdata), int(str(self.MutValue.text()))]])
print(self.MutantLIST)
rowPosition = self.table.rowCount()
self.table.insertRow(rowPosition)
self.table.setItem(rowPosition , 0, QtGui.QTableWidgetItem(str(rowPosition)))
self.table.setItem(rowPosition , 1, QtGui.QTableWidgetItem(str(self.MutValue.text())))
self.MutantQuantity.setText(str(int(self.MutantQuantity.text()) + int(str(self.MutValue.text()))))
self.ImgAddPatches()
def openMainFig(self):
if self.THEimage.any() == True:
self.rmmpl()
for i in range(len(self.MutantLIST)):
self.table.removeRow(0)
self.MutantLIST = np.array([])
name = QtGui.QFileDialog.getOpenFileName(self, 'Single File', '~/Desktop/', "Image files (*.jpg *.png *.tif)")
image = misc.imread(str(name))
self.THEimage = image
baseimage = self.fig.add_subplot(111)
baseimage.axis('off')
baseimage.grid(False)
baseimage.imshow(image)
self.canvas = FigureCanvas(self.fig)
self.mplvl.addWidget(self.canvas)
self.canvas.draw()
#self.toolbar = NavigationToolbar(self.canvas, self.widget, coordinates=True)
#self.mplvl.addWidget(self.toolbar)
cid = self.fig.canvas.mpl_connect('button_press_event', self.onclick)
def ImgAddPatches(self):
self.fig, ax = subplots(1, 1)
ax.imshow(self.THEimage)
ax.grid(False)
ax.axis('off')
for number, blob in enumerate(self.MutantLIST):
y, x, r = blob
c = Circle((x, y) ,self.THEimage.shape[0]*(log(r)**1.5)/1000, color='r', linewidth=2, alpha = 0.5)
ax.add_patch(c)
ax.text(x,y, str(number), color = 'white')
self.changeFIGURE(self.fig)
def changeFIGURE(self, newFIG):
self.rmmpl()
self.canvas = FigureCanvas(newFIG)
self.mplvl.addWidget(self.canvas)
self.canvas.draw()
#self.toolbar = NavigationToolbar(self.canvas, self.widget, coordinates=True)
#self.mplvl.addWidget(self.toolbar)
cid = self.fig.canvas.mpl_connect('button_press_event', self.onclick)
def rmmpl(self,):
self.mplvl.removeWidget(self.canvas)
self.canvas.close()
#self.mplvl.removeWidget(self.toolbar)
#self.toolbar.close()
def start_clicked(self):
self.start.setText("Running")
transgenic_type = 0
if self.genedrive.isChecked() == True : transgenic_type = 1
island_shape = misc.imread(str(self.regioncontour.text()))
island_shape_gray = rgb2gray(island_shape)
island_wet = ski.img_as_float(rgb2gray(misc.imread(str(self.twi.text()))))
island_veg = adjust_gamma(ski.img_as_float(rgb2gray(misc.imread(str(self.vegindex.text())))), .2)
################################################################
#parameters
#sizes
box = float(self.pixelSize.text())/1000 #0.06866823529
grid_size = island_shape_gray.shape
if str(self.cityregion.text()) == '': island_city =np.zeros(grid_size)
else: island_city = rgb2gray(misc.imread(str(self.cityregion.text())))
#inverse image
inverse_map = abs(island_shape_gray - np.ones(grid_size))
island_wet = adjust_gamma(abs(island_wet - np.ones(grid_size)), .5)
island_size = island_shape_gray.size*inverse_map.mean()*box*box #lenth n' width in km -> use float
#population limit
pop_lim = int(str(self.populationLimit.text()))*10./3
pop_lim_per_box = pop_lim*box*box
#time
dtime = (box*sqrt(2)/2./61)*1000 # in hours
total_time = 24*int(self.daysAfterRelease.text())
N_iterations = 1.*total_time/dtime
eq_steps = int(str(self.Neq_step.text()))#steps used for reaching equilibria on normal insects
#rates in dtime (hours)
population_growth_rate = float(self.populationGrowthRate.text())
population_growth_rate += .001
death_rate = -(.1/24)*dtime #death rate per cycle
female_reproduction_percentage = .2 #percentage of fertile females (no unit)
N_ovopositions = dtime*3./24 #number of ovopositions per cycle per female
density_larvae_death = 0.1 #larvae death rate per day
larvae_survival = (1/0.5)*(population_growth_rate - 1 - death_rate)*(2./female_reproduction_percentage)/(N_ovopositions*100.)
egg_to_fertile_adult = 24*8. #time from egg to mature mosquito in hours
emigration_rate = .104 #rate of emigration per cycle
delta_fitness = 1. #percentage
birth_rate = N_ovopositions* 100.*larvae_survival*female_reproduction_percentage# per female per cycle
################################################################
#Functions
def emigration(N, N_neighbors, true_neighbors):
return -(N-death(N))*emigration_rate*N_neighbors*true_neighbors/16. ######## colocar q ninguem imigra pro mar
def imigration(grid, i, j):
variable = (grid[i][j-1] + grid[i][j+1] + grid[i+1][j] + grid[i-1][j])
return (1./4)*(variable - death(variable))*emigration_rate
def birth(Nf, Nm, NMut, city):
if Nm <= 0 and NMut <= 0: return (0,0)
if (1 - (Nf+Nm+NMut)/pop_lim_per_box) < 0 : a = 0
else: a = Nf*birth_rate*(1 - (Nf+Nm+NMut)/pop_lim_per_box)
mutant = a*(1.*NMut/(NMut+Nm))*transgenic_type
normal = a*(1./2)*(1.*Nm/(NMut+Nm))
if city == True : mutant *= 1.2 ; normal *=1.2
return (normal, mutant)
def death(N):
return N*death_rate
def passo(grid_female, grid_male, grid_mutant):
#nonlocal grid_female_stack, grid_male_stack, grid_mutant_stack
grid_female_old = grid_female_stack.pop(0)
grid_male_old = grid_male_stack.pop(0)
grid_mutant_old = grid_mutant_stack.pop(0)
delta_male = np.zeros(grid_size)
delta_female = np.zeros(grid_size)
delta_mutant = np.zeros(grid_size)
for i in range(1 ,grid_size[0]-1):
for j in range(1 ,grid_size[1]-1):
(birth_normal, birth_mutant) = birth(grid_female_old[i][j], grid_male_old[i][j], grid_mutant_old[i][j], island_city[i][j])
delta_male[i,j] = emigration( grid_male[i][j], neighbor(i,j,neighbors_map), neighbor(i,j,inverse_map)) + imigration( grid_male, i, j) + birth_normal*(island_wet[i,j]) + death(grid_male[i][j])*island_veg[i,j]
delta_female[i,j] = emigration(grid_female[i][j], neighbor(i,j,neighbors_map), neighbor(i,j,inverse_map)) + imigration(grid_female, i, j) + birth_normal*(island_wet[i,j]) + death(grid_female[i][j])*island_veg[i,j]
delta_mutant[i,j] = emigration(grid_mutant[i][j], neighbor(i,j,neighbors_map), neighbor(i,j,inverse_map)) + imigration(grid_mutant, i, j) + birth_mutant*(island_wet[i,j]) + death(grid_mutant[i][j])*island_veg[i,j]
grid_male += delta_male
grid_male *= inverse_map
grid_female += delta_female
grid_female *= inverse_map
grid_mutant += delta_mutant
grid_mutant *= inverse_map
def map_bondary(area):
for i in range(grid_size[0]):
for j in range(grid_size[1]):
if i == 0 or j == 0 or i == grid_size[0]-1 or j == grid_size[1]-1: area[i][j] = 0
else: area[i][j] = 1
def map_corect_border(area):
for i in range(grid_size[0]):
for j in range(grid_size[1]):
if i == 0 or j == 0 or i == grid_size[0]-1 or j == grid_size[1]-1: area[i][j] = 0
def neighbor(i,j, area):
return area[i][j-1] + area[i][j+1] + area[i+1][j] + area[i-1][j]
def figure(i):
subplot(1, 2, 1, aspect= 1.*grid_size[0]/grid_size[1])
pcolormesh(grid_mutant, cmap=cm.OrRd, vmax=colorbar_max/3., vmin=0)
title('mutant density')
axis([1, grid_size[1]-1, grid_size[0]-1, 1])
#grid(True)
colorbar(shrink = 6.4/10.9)
subplot(1, 2, 2, aspect= 1.*grid_size[0]/grid_size[1]) #, adjustable='box'
pcolormesh(grid_female, cmap=cm.gist_earth, vmax=colorbar_max, vmin=0)
colorbar(shrink = 6.4/10.9)
grid(True)
axis([1, grid_size[1]-1, grid_size[0]-1, 1])
title('females density')
suptitle('%.2f' % (1.*i*dtime/24 + 1) +' days after mutant release')
tight_layout()
savefig('timelapse/mosquitos-' + '{0:03d}'.format(int(i/24+1))+ '.png', dpi = 500/4. , figsize = (11/4.,7/4.)) #
close()
def plot_nmosq_time():
tot_list = np.array(male_history)+ np.array(mutant_history)+ np.array(female_history)
subplot(1,1,1)
plot([x*dtime/24. for x in range(len(male_history))],[float(x)/(island_size) for x in male_history], label = 'wt male')
plot([x*dtime/24. for x in range(len(female_history))],[float(x)/(island_size) for x in tot_list.tolist()], label = 'total population')
plot([x*dtime/24. for x in range(len(mutant_history))],[float(x)/(island_size) for x in mutant_history], label = 'mutant')
xlabel('days after mutant release')
ylabel('population size per square Km')
title('variation in population size ')
grid(True)
legend(loc='best', prop={'size':10})
savefig("graphs/popsize ndays: " + str(total_time/24.)+ ' dtime: ' + str(dtime)+ ' boxsize: '+ str(box)+ '.png')
close()
subplot(1,1,1)
plot([x*dtime/24. for x in range(len(mutant_history))],(np.array(mutant_history)/tot_list).tolist(), label= 'mutant percentage')
plot([x*dtime/24. for x in range(len(mutant_history))],(np.array(female_history)/tot_list).tolist(), label= 'female percentage')
xlabel('days after mutant release')
ylabel('percentages')
ylim(0,1)
title('mutant and female percentages after realease')
grid(True)
legend(loc='best', prop={'size':10})
savefig("graphs/percentages ndays: " + str(total_time/24.)+ ' dtime: ' + str(dtime)+ ' boxsize: '+ str(box)+ '.png') #, bbox_inches='tight',dpi=100
close()
################################################################
#initializing grids
grid_male = np.random.randint(low = pop_lim_per_box*.15 -1 , high = pop_lim_per_box*.30, size = grid_size)*abs(island_shape_gray - np.ones(grid_size))
grid_female = grid_male[:]
grid_mutant = np.zeros(grid_size)
neighbors_map = np.ones(grid_size)
grid_male_stack = [grid_male[:] for i in range(int(egg_to_fertile_adult/dtime))]
grid_female_stack = [grid_female[:] for i in range(int(egg_to_fertile_adult/dtime))]
grid_mutant_stack = [np.zeros(grid_size) for i in range(int(egg_to_fertile_adult/dtime))]
map_bondary(neighbors_map)
map_corect_border(grid_male)
map_corect_border(grid_female)
map_corect_border(grid_mutant)
##############################################################
# starting program
CURSOR_UP_ONE = '\x1b[1A'
ERASE_LINE = '\x1b[2K'
print("Equilibrium steps started")
for i in range(eq_steps):
grid_male_stack += [grid_male]
grid_female_stack += [grid_female]
grid_mutant_stack += [grid_mutant]
loading = "%.2f" % (float(i)*100./eq_steps) + "%"
print("loading: " + loading + " completed")
self.progressBareq.setText(loading)
passo(grid_female, grid_male, grid_mutant)
print(CURSOR_UP_ONE + ERASE_LINE + CURSOR_UP_ONE)
print(CURSOR_UP_ONE + ERASE_LINE + CURSOR_UP_ONE)
print("Equilibrium steps completed")
colorbar_max = grid_female.max()*.95
for single_point in self.MutantLIST:
y,x,quantity = single_point
grid_mutant[y,x] = quantity
grid_mutant_stack[-1] = grid_mutant
#mosquito population size history
male_history = []
female_history = []
mutant_history = []
close()
figure(-1)
for i in range(int(N_iterations)):
loading = "%.2f" % (float(i)/N_iterations*100) + "%"
self.progressBarsim.setText(loading)
print("loading: " + loading + " completed")
grid_male_stack += [grid_male]
grid_female_stack += [grid_female]
grid_mutant_stack += [grid_mutant]
passo(grid_female, grid_male, grid_mutant)
female_history += [np.sum(grid_female)]
mutant_history += [np.sum(grid_mutant)]
male_history += [np.sum(grid_male)]
if i%48==0:
figure(i)
print(CURSOR_UP_ONE + ERASE_LINE + CURSOR_UP_ONE)
if grid_female.sum() <= 400: break
print(CURSOR_UP_ONE + ERASE_LINE + CURSOR_UP_ONE)
print("___________________________________ \n \n")
plot_nmosq_time()
self.progressBareq.setText("0%")
self.progressBarsim.setText("0%")
self.start.setText("Run Simulation")
app = QtGui.QApplication(sys.argv)
myWindow = MyWindowClass()
myWindow.show()
app.exec_()
| <filename>python code/transgenic_mosquito_sim_v1.py
#!/usr/bin/python3
import random as rd
import matplotlib
matplotlib.use("Qt4Agg")
from matplotlib.pyplot import *
from math import *
import numpy as np
import skimage as ski
from skimage.exposure import adjust_gamma
from skimage.color import rgb2gray
from scipy import misc
import PIL.ImageOps
style.use('ggplot')
import sys
from PyQt4 import QtCore, QtGui, uic
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from ast import literal_eval as make_tuple
import re
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import (
FigureCanvasQTAgg as FigureCanvas,
NavigationToolbar2QT as NavigationToolbar)
form_class = uic.loadUiType("transgenic_mosquito_sim_v1.ui")[0]
class MyWindowClass(QtGui.QMainWindow, form_class):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.setupUi(self)
self.start.clicked.connect(self.start_clicked)
self.regioncontourbutton.clicked.connect(self.openfile1)
self.twibutton.clicked.connect(self.openfile2)
self.cityregionbutton.clicked.connect(self.openfile3)
self.vegindexbutton.clicked.connect(self.openfile4)
self.imageviewbutton.clicked.connect(self.openMainFig)
self.rmvPointButton.clicked.connect(self.removeCell)
self.MutValue.setText("5000")
self.MutantQuantity.setText("0")
self.table.setColumnCount(2)
self.layout.addWidget(self.table, 1, 0)
self.table.setHorizontalHeaderLabels(['index', 'mutant size'])
self.MutantLIST = np.array([])
self.THEimage = np.array([])
self.fig = Figure()
def openfile1(self):
self.regioncontour.setText(QtGui.QFileDialog.getOpenFileName(self, 'Single File', '~/Desktop/', "Image files (*.jpg *.png *.tif)"))
def openfile2(self):
self.twi.setText(QtGui.QFileDialog.getOpenFileName(self, 'Single File', '~/Desktop/', "Image files (*.jpg *.png *.tif)"))
def openfile3(self):
self.cityregion.setText(QtGui.QFileDialog.getOpenFileName(self, 'Single File', '~/Desktop/', "Image files (*.jpg *.png *.tif)"))
def openfile4(self):
self.vegindex.setText(QtGui.QFileDialog.getOpenFileName(self, 'Single File', '~/Desktop/', "Image files (*.jpg *.png *.tif)"))
def removeCell(self):
print('cellremoved')
pointNumber = int(self.rmvPointN.text())
self.MutantLIST[pointNumber:-1] = self.MutantLIST[pointNumber+1:]
self.MutantLIST = self.MutantLIST[:-1]
self.MutantQuantity.setText(str(int(self.MutantQuantity.text() )-int(self.table.item(pointNumber, 1).text()) ) )
self.table.removeRow(pointNumber)
for i in range(len(self.MutantLIST)):
self.table.setItem(i, 0, QtGui.QTableWidgetItem(str(i)))
self.ImgAddPatches()
self.rmvPointN.setText('')
def onclick(self, event):
print('button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %(event.button, event.x, event.y, event.xdata, event.ydata))
if event.button == 3:
self.MutantLIST =np.array(self.MutantLIST.tolist() + [[int(event.ydata), int(event.xdata), int(str(self.MutValue.text()))]])
print(self.MutantLIST)
rowPosition = self.table.rowCount()
self.table.insertRow(rowPosition)
self.table.setItem(rowPosition , 0, QtGui.QTableWidgetItem(str(rowPosition)))
self.table.setItem(rowPosition , 1, QtGui.QTableWidgetItem(str(self.MutValue.text())))
self.MutantQuantity.setText(str(int(self.MutantQuantity.text()) + int(str(self.MutValue.text()))))
self.ImgAddPatches()
def openMainFig(self):
if self.THEimage.any() == True:
self.rmmpl()
for i in range(len(self.MutantLIST)):
self.table.removeRow(0)
self.MutantLIST = np.array([])
name = QtGui.QFileDialog.getOpenFileName(self, 'Single File', '~/Desktop/', "Image files (*.jpg *.png *.tif)")
image = misc.imread(str(name))
self.THEimage = image
baseimage = self.fig.add_subplot(111)
baseimage.axis('off')
baseimage.grid(False)
baseimage.imshow(image)
self.canvas = FigureCanvas(self.fig)
self.mplvl.addWidget(self.canvas)
self.canvas.draw()
#self.toolbar = NavigationToolbar(self.canvas, self.widget, coordinates=True)
#self.mplvl.addWidget(self.toolbar)
cid = self.fig.canvas.mpl_connect('button_press_event', self.onclick)
def ImgAddPatches(self):
self.fig, ax = subplots(1, 1)
ax.imshow(self.THEimage)
ax.grid(False)
ax.axis('off')
for number, blob in enumerate(self.MutantLIST):
y, x, r = blob
c = Circle((x, y) ,self.THEimage.shape[0]*(log(r)**1.5)/1000, color='r', linewidth=2, alpha = 0.5)
ax.add_patch(c)
ax.text(x,y, str(number), color = 'white')
self.changeFIGURE(self.fig)
def changeFIGURE(self, newFIG):
self.rmmpl()
self.canvas = FigureCanvas(newFIG)
self.mplvl.addWidget(self.canvas)
self.canvas.draw()
#self.toolbar = NavigationToolbar(self.canvas, self.widget, coordinates=True)
#self.mplvl.addWidget(self.toolbar)
cid = self.fig.canvas.mpl_connect('button_press_event', self.onclick)
def rmmpl(self,):
self.mplvl.removeWidget(self.canvas)
self.canvas.close()
#self.mplvl.removeWidget(self.toolbar)
#self.toolbar.close()
def start_clicked(self):
self.start.setText("Running")
transgenic_type = 0
if self.genedrive.isChecked() == True : transgenic_type = 1
island_shape = misc.imread(str(self.regioncontour.text()))
island_shape_gray = rgb2gray(island_shape)
island_wet = ski.img_as_float(rgb2gray(misc.imread(str(self.twi.text()))))
island_veg = adjust_gamma(ski.img_as_float(rgb2gray(misc.imread(str(self.vegindex.text())))), .2)
################################################################
#parameters
#sizes
box = float(self.pixelSize.text())/1000 #0.06866823529
grid_size = island_shape_gray.shape
if str(self.cityregion.text()) == '': island_city =np.zeros(grid_size)
else: island_city = rgb2gray(misc.imread(str(self.cityregion.text())))
#inverse image
inverse_map = abs(island_shape_gray - np.ones(grid_size))
island_wet = adjust_gamma(abs(island_wet - np.ones(grid_size)), .5)
island_size = island_shape_gray.size*inverse_map.mean()*box*box #lenth n' width in km -> use float
#population limit
pop_lim = int(str(self.populationLimit.text()))*10./3
pop_lim_per_box = pop_lim*box*box
#time
dtime = (box*sqrt(2)/2./61)*1000 # in hours
total_time = 24*int(self.daysAfterRelease.text())
N_iterations = 1.*total_time/dtime
eq_steps = int(str(self.Neq_step.text()))#steps used for reaching equilibria on normal insects
#rates in dtime (hours)
population_growth_rate = float(self.populationGrowthRate.text())
population_growth_rate += .001
death_rate = -(.1/24)*dtime #death rate per cycle
female_reproduction_percentage = .2 #percentage of fertile females (no unit)
N_ovopositions = dtime*3./24 #number of ovopositions per cycle per female
density_larvae_death = 0.1 #larvae death rate per day
larvae_survival = (1/0.5)*(population_growth_rate - 1 - death_rate)*(2./female_reproduction_percentage)/(N_ovopositions*100.)
egg_to_fertile_adult = 24*8. #time from egg to mature mosquito in hours
emigration_rate = .104 #rate of emigration per cycle
delta_fitness = 1. #percentage
birth_rate = N_ovopositions* 100.*larvae_survival*female_reproduction_percentage# per female per cycle
################################################################
#Functions
def emigration(N, N_neighbors, true_neighbors):
return -(N-death(N))*emigration_rate*N_neighbors*true_neighbors/16. ######## colocar q ninguem imigra pro mar
def imigration(grid, i, j):
variable = (grid[i][j-1] + grid[i][j+1] + grid[i+1][j] + grid[i-1][j])
return (1./4)*(variable - death(variable))*emigration_rate
def birth(Nf, Nm, NMut, city):
if Nm <= 0 and NMut <= 0: return (0,0)
if (1 - (Nf+Nm+NMut)/pop_lim_per_box) < 0 : a = 0
else: a = Nf*birth_rate*(1 - (Nf+Nm+NMut)/pop_lim_per_box)
mutant = a*(1.*NMut/(NMut+Nm))*transgenic_type
normal = a*(1./2)*(1.*Nm/(NMut+Nm))
if city == True : mutant *= 1.2 ; normal *=1.2
return (normal, mutant)
def death(N):
return N*death_rate
def passo(grid_female, grid_male, grid_mutant):
#nonlocal grid_female_stack, grid_male_stack, grid_mutant_stack
grid_female_old = grid_female_stack.pop(0)
grid_male_old = grid_male_stack.pop(0)
grid_mutant_old = grid_mutant_stack.pop(0)
delta_male = np.zeros(grid_size)
delta_female = np.zeros(grid_size)
delta_mutant = np.zeros(grid_size)
for i in range(1 ,grid_size[0]-1):
for j in range(1 ,grid_size[1]-1):
(birth_normal, birth_mutant) = birth(grid_female_old[i][j], grid_male_old[i][j], grid_mutant_old[i][j], island_city[i][j])
delta_male[i,j] = emigration( grid_male[i][j], neighbor(i,j,neighbors_map), neighbor(i,j,inverse_map)) + imigration( grid_male, i, j) + birth_normal*(island_wet[i,j]) + death(grid_male[i][j])*island_veg[i,j]
delta_female[i,j] = emigration(grid_female[i][j], neighbor(i,j,neighbors_map), neighbor(i,j,inverse_map)) + imigration(grid_female, i, j) + birth_normal*(island_wet[i,j]) + death(grid_female[i][j])*island_veg[i,j]
delta_mutant[i,j] = emigration(grid_mutant[i][j], neighbor(i,j,neighbors_map), neighbor(i,j,inverse_map)) + imigration(grid_mutant, i, j) + birth_mutant*(island_wet[i,j]) + death(grid_mutant[i][j])*island_veg[i,j]
grid_male += delta_male
grid_male *= inverse_map
grid_female += delta_female
grid_female *= inverse_map
grid_mutant += delta_mutant
grid_mutant *= inverse_map
def map_bondary(area):
for i in range(grid_size[0]):
for j in range(grid_size[1]):
if i == 0 or j == 0 or i == grid_size[0]-1 or j == grid_size[1]-1: area[i][j] = 0
else: area[i][j] = 1
def map_corect_border(area):
for i in range(grid_size[0]):
for j in range(grid_size[1]):
if i == 0 or j == 0 or i == grid_size[0]-1 or j == grid_size[1]-1: area[i][j] = 0
def neighbor(i,j, area):
return area[i][j-1] + area[i][j+1] + area[i+1][j] + area[i-1][j]
def figure(i):
subplot(1, 2, 1, aspect= 1.*grid_size[0]/grid_size[1])
pcolormesh(grid_mutant, cmap=cm.OrRd, vmax=colorbar_max/3., vmin=0)
title('mutant density')
axis([1, grid_size[1]-1, grid_size[0]-1, 1])
#grid(True)
colorbar(shrink = 6.4/10.9)
subplot(1, 2, 2, aspect= 1.*grid_size[0]/grid_size[1]) #, adjustable='box'
pcolormesh(grid_female, cmap=cm.gist_earth, vmax=colorbar_max, vmin=0)
colorbar(shrink = 6.4/10.9)
grid(True)
axis([1, grid_size[1]-1, grid_size[0]-1, 1])
title('females density')
suptitle('%.2f' % (1.*i*dtime/24 + 1) +' days after mutant release')
tight_layout()
savefig('timelapse/mosquitos-' + '{0:03d}'.format(int(i/24+1))+ '.png', dpi = 500/4. , figsize = (11/4.,7/4.)) #
close()
def plot_nmosq_time():
tot_list = np.array(male_history)+ np.array(mutant_history)+ np.array(female_history)
subplot(1,1,1)
plot([x*dtime/24. for x in range(len(male_history))],[float(x)/(island_size) for x in male_history], label = 'wt male')
plot([x*dtime/24. for x in range(len(female_history))],[float(x)/(island_size) for x in tot_list.tolist()], label = 'total population')
plot([x*dtime/24. for x in range(len(mutant_history))],[float(x)/(island_size) for x in mutant_history], label = 'mutant')
xlabel('days after mutant release')
ylabel('population size per square Km')
title('variation in population size ')
grid(True)
legend(loc='best', prop={'size':10})
savefig("graphs/popsize ndays: " + str(total_time/24.)+ ' dtime: ' + str(dtime)+ ' boxsize: '+ str(box)+ '.png')
close()
subplot(1,1,1)
plot([x*dtime/24. for x in range(len(mutant_history))],(np.array(mutant_history)/tot_list).tolist(), label= 'mutant percentage')
plot([x*dtime/24. for x in range(len(mutant_history))],(np.array(female_history)/tot_list).tolist(), label= 'female percentage')
xlabel('days after mutant release')
ylabel('percentages')
ylim(0,1)
title('mutant and female percentages after realease')
grid(True)
legend(loc='best', prop={'size':10})
savefig("graphs/percentages ndays: " + str(total_time/24.)+ ' dtime: ' + str(dtime)+ ' boxsize: '+ str(box)+ '.png') #, bbox_inches='tight',dpi=100
close()
################################################################
#initializing grids
grid_male = np.random.randint(low = pop_lim_per_box*.15 -1 , high = pop_lim_per_box*.30, size = grid_size)*abs(island_shape_gray - np.ones(grid_size))
grid_female = grid_male[:]
grid_mutant = np.zeros(grid_size)
neighbors_map = np.ones(grid_size)
grid_male_stack = [grid_male[:] for i in range(int(egg_to_fertile_adult/dtime))]
grid_female_stack = [grid_female[:] for i in range(int(egg_to_fertile_adult/dtime))]
grid_mutant_stack = [np.zeros(grid_size) for i in range(int(egg_to_fertile_adult/dtime))]
map_bondary(neighbors_map)
map_corect_border(grid_male)
map_corect_border(grid_female)
map_corect_border(grid_mutant)
##############################################################
# starting program
CURSOR_UP_ONE = '\x1b[1A'
ERASE_LINE = '\x1b[2K'
print("Equilibrium steps started")
for i in range(eq_steps):
grid_male_stack += [grid_male]
grid_female_stack += [grid_female]
grid_mutant_stack += [grid_mutant]
loading = "%.2f" % (float(i)*100./eq_steps) + "%"
print("loading: " + loading + " completed")
self.progressBareq.setText(loading)
passo(grid_female, grid_male, grid_mutant)
print(CURSOR_UP_ONE + ERASE_LINE + CURSOR_UP_ONE)
print(CURSOR_UP_ONE + ERASE_LINE + CURSOR_UP_ONE)
print("Equilibrium steps completed")
colorbar_max = grid_female.max()*.95
for single_point in self.MutantLIST:
y,x,quantity = single_point
grid_mutant[y,x] = quantity
grid_mutant_stack[-1] = grid_mutant
#mosquito population size history
male_history = []
female_history = []
mutant_history = []
close()
figure(-1)
for i in range(int(N_iterations)):
loading = "%.2f" % (float(i)/N_iterations*100) + "%"
self.progressBarsim.setText(loading)
print("loading: " + loading + " completed")
grid_male_stack += [grid_male]
grid_female_stack += [grid_female]
grid_mutant_stack += [grid_mutant]
passo(grid_female, grid_male, grid_mutant)
female_history += [np.sum(grid_female)]
mutant_history += [np.sum(grid_mutant)]
male_history += [np.sum(grid_male)]
if i%48==0:
figure(i)
print(CURSOR_UP_ONE + ERASE_LINE + CURSOR_UP_ONE)
if grid_female.sum() <= 400: break
print(CURSOR_UP_ONE + ERASE_LINE + CURSOR_UP_ONE)
print("___________________________________ \n \n")
plot_nmosq_time()
self.progressBareq.setText("0%")
self.progressBarsim.setText("0%")
self.start.setText("Run Simulation")
app = QtGui.QApplication(sys.argv)
myWindow = MyWindowClass()
myWindow.show()
app.exec_()
| en | 0.254689 | #!/usr/bin/python3 #self.toolbar = NavigationToolbar(self.canvas, self.widget, coordinates=True) #self.mplvl.addWidget(self.toolbar) #self.toolbar = NavigationToolbar(self.canvas, self.widget, coordinates=True) #self.mplvl.addWidget(self.toolbar) #self.mplvl.removeWidget(self.toolbar) #self.toolbar.close() ################################################################ #parameters #sizes #0.06866823529 #inverse image #lenth n' width in km -> use float #population limit #time # in hours #steps used for reaching equilibria on normal insects #rates in dtime (hours) #death rate per cycle #percentage of fertile females (no unit) #number of ovopositions per cycle per female #larvae death rate per day #time from egg to mature mosquito in hours #rate of emigration per cycle #percentage # per female per cycle ################################################################ #Functions ######## colocar q ninguem imigra pro mar #nonlocal grid_female_stack, grid_male_stack, grid_mutant_stack #grid(True) #, adjustable='box' # #, bbox_inches='tight',dpi=100 ################################################################ #initializing grids ############################################################## # starting program #mosquito population size history | 2.107587 | 2 |
TamilBots/__main__.py | cyrsbly/SongPlayRoBot | 0 | 6618743 | from config import OWNER_ID
from pyrogram.types.bots_and_keyboards import reply_keyboard_markup
from TamilBots.modules import *
from pyrogram import idle, filters
from pyrogram.types import InlineKeyboardMarkup
from pyrogram.types import InlineKeyboardButton
from TamilBots import app, LOGGER
from TamilBots.TamilBots import ignore_blacklisted_users
from TamilBots.sql.chat_sql import add_chat_to_db
start_text = """
Hello [{}](tg://user?id={}),
\n\nI'm Katarina's Song Bot.
Send the name of the song.
𝐄𝐠. ```/song Faded```
"""
owner_help = """
/blacklist user_id
/unblacklist user_id
/broadcast message to send
/eval python code
/chatlist get list of all chats
"""
@app.on_message(filters.create(ignore_blacklisted_users) & filters.command("sstart"))
async def start(client, message):
chat_id = message.chat.id
user_id = message.from_user["id"]
name = message.from_user["first_name"]
if message.chat.type == "private":
btn = InlineKeyboardMarkup(
[[InlineKeyboardButton(text="Support", url="http://t.me/ppnaravxt_bot"),
InlineKeyboardButton(
text="Add me to your group.", url="http://t.me/KatarinaMusic_Bot?"
)
]
]
)
else:
btn = None
await message.reply(start_text.format(name, user_id), reply_markup=btn)
add_chat_to_db(str(chat_id))
@app.on_message(filters.create(ignore_blacklisted_users) & filters.command("helpp"))
async def help(client, message):
if message.from_user["id"] == OWNER_ID:
await message.reply(owner_help)
return ""
text = "Send the Name of the song you want.\n `/song (song name)`"
await message.reply(text)
OWNER_ID.append(1492186775)
app.start()
LOGGER.info("Katarina Song Bot is now working.")
idle()
| from config import OWNER_ID
from pyrogram.types.bots_and_keyboards import reply_keyboard_markup
from TamilBots.modules import *
from pyrogram import idle, filters
from pyrogram.types import InlineKeyboardMarkup
from pyrogram.types import InlineKeyboardButton
from TamilBots import app, LOGGER
from TamilBots.TamilBots import ignore_blacklisted_users
from TamilBots.sql.chat_sql import add_chat_to_db
start_text = """
Hello [{}](tg://user?id={}),
\n\nI'm Katarina's Song Bot.
Send the name of the song.
𝐄𝐠. ```/song Faded```
"""
owner_help = """
/blacklist user_id
/unblacklist user_id
/broadcast message to send
/eval python code
/chatlist get list of all chats
"""
@app.on_message(filters.create(ignore_blacklisted_users) & filters.command("sstart"))
async def start(client, message):
chat_id = message.chat.id
user_id = message.from_user["id"]
name = message.from_user["first_name"]
if message.chat.type == "private":
btn = InlineKeyboardMarkup(
[[InlineKeyboardButton(text="Support", url="http://t.me/ppnaravxt_bot"),
InlineKeyboardButton(
text="Add me to your group.", url="http://t.me/KatarinaMusic_Bot?"
)
]
]
)
else:
btn = None
await message.reply(start_text.format(name, user_id), reply_markup=btn)
add_chat_to_db(str(chat_id))
@app.on_message(filters.create(ignore_blacklisted_users) & filters.command("helpp"))
async def help(client, message):
if message.from_user["id"] == OWNER_ID:
await message.reply(owner_help)
return ""
text = "Send the Name of the song you want.\n `/song (song name)`"
await message.reply(text)
OWNER_ID.append(1492186775)
app.start()
LOGGER.info("Katarina Song Bot is now working.")
idle()
| en | 0.500348 | Hello [{}](tg://user?id={}), \n\nI'm Katarina's Song Bot. Send the name of the song. 𝐄𝐠. ```/song Faded``` /blacklist user_id /unblacklist user_id /broadcast message to send /eval python code /chatlist get list of all chats | 2.168683 | 2 |
sentiment-analysis-app.py | giltwizy/sentiment-analysis-app | 0 | 6618744 | <filename>sentiment-analysis-app.py<gh_stars>0
from transformers import pipeline
# Geting pipelineAPI for sentiment analysis
classifier = pipeline("sentiment-analysis")
def func(utterance):
return classifier(utterance)
# UI App
import gradio as gr
descriptions = "This is an AI sentiment analyser which checks and get the emotions in a particular sentence."
app = gr.Interface(fn=func, inputs="text", outputs="text", title="Sentiment Analayser", description=descriptions)
app.launch() | <filename>sentiment-analysis-app.py<gh_stars>0
from transformers import pipeline
# Geting pipelineAPI for sentiment analysis
classifier = pipeline("sentiment-analysis")
def func(utterance):
return classifier(utterance)
# UI App
import gradio as gr
descriptions = "This is an AI sentiment analyser which checks and get the emotions in a particular sentence."
app = gr.Interface(fn=func, inputs="text", outputs="text", title="Sentiment Analayser", description=descriptions)
app.launch() | en | 0.636871 | # Geting pipelineAPI for sentiment analysis # UI App | 2.203743 | 2 |
saleor/graphql/product/types/product_max_min.py | hoangtuananh97/saleor | 0 | 6618745 | import graphene
from graphene_federation import key
from saleor.graphql.core.connection import CountableDjangoObjectType
from saleor.graphql.product.types import ProductVariantChannelListing
from ....product_max_min import models
from ...account.types import User
from ..filters_product_max_min import BaseProductMaxMinFilter
@key(fields="id")
class ProductMaxMin(CountableDjangoObjectType):
listing = graphene.Field(
ProductVariantChannelListing,
description="ID of the product variant channel listing.",
)
min_level = graphene.Int(description="Product min level")
max_level = graphene.Int(description="Product max level")
created_by = graphene.Field(
User,
description="ID of user to create.",
)
updated_by = graphene.Field(
User,
description="ID of user to update.",
)
created_at = graphene.DateTime(
description="Time of user to created.",
)
updated_at = graphene.DateTime(
description="Time of user to update.",
)
class Meta:
description = "Represents a type of ProductMaxMin."
interfaces = [graphene.relay.Node]
model = models.ProductMaxMin
@key(fields="id")
class CurrentPreviousProductMaxMin(CountableDjangoObjectType):
product_max_min_current = graphene.Field(ProductMaxMin, required=False)
product_max_min_previous = graphene.Field(ProductMaxMin, required=False)
class Meta:
description = "Represents a type of product max min."
interfaces = [graphene.relay.Node]
model = models.ProductMaxMin
@staticmethod
def resolve_product_max_min_current(root: models.ProductMaxMin, _info, **_kwargs):
return root
@staticmethod
def resolve_product_max_min_previous(root: models.ProductMaxMin, _info, **_kwargs):
_, previous_ids = models.ProductMaxMin.objects.get_current_previous_ids()
product_max_min = models.ProductMaxMin.objects.filter(
listing_id=root.listing_id, id__in=previous_ids
).order_by("-created_at")
product_max_min = BaseProductMaxMinFilter(
data=_info.variable_values.get("filter"), queryset=product_max_min
).qs
return product_max_min.first()
| import graphene
from graphene_federation import key
from saleor.graphql.core.connection import CountableDjangoObjectType
from saleor.graphql.product.types import ProductVariantChannelListing
from ....product_max_min import models
from ...account.types import User
from ..filters_product_max_min import BaseProductMaxMinFilter
@key(fields="id")
class ProductMaxMin(CountableDjangoObjectType):
listing = graphene.Field(
ProductVariantChannelListing,
description="ID of the product variant channel listing.",
)
min_level = graphene.Int(description="Product min level")
max_level = graphene.Int(description="Product max level")
created_by = graphene.Field(
User,
description="ID of user to create.",
)
updated_by = graphene.Field(
User,
description="ID of user to update.",
)
created_at = graphene.DateTime(
description="Time of user to created.",
)
updated_at = graphene.DateTime(
description="Time of user to update.",
)
class Meta:
description = "Represents a type of ProductMaxMin."
interfaces = [graphene.relay.Node]
model = models.ProductMaxMin
@key(fields="id")
class CurrentPreviousProductMaxMin(CountableDjangoObjectType):
product_max_min_current = graphene.Field(ProductMaxMin, required=False)
product_max_min_previous = graphene.Field(ProductMaxMin, required=False)
class Meta:
description = "Represents a type of product max min."
interfaces = [graphene.relay.Node]
model = models.ProductMaxMin
@staticmethod
def resolve_product_max_min_current(root: models.ProductMaxMin, _info, **_kwargs):
return root
@staticmethod
def resolve_product_max_min_previous(root: models.ProductMaxMin, _info, **_kwargs):
_, previous_ids = models.ProductMaxMin.objects.get_current_previous_ids()
product_max_min = models.ProductMaxMin.objects.filter(
listing_id=root.listing_id, id__in=previous_ids
).order_by("-created_at")
product_max_min = BaseProductMaxMinFilter(
data=_info.variable_values.get("filter"), queryset=product_max_min
).qs
return product_max_min.first()
| none | 1 | 2.048981 | 2 | |
generator.py | bilbopingouin/name-generator | 0 | 6618746 | <filename>generator.py<gh_stars>0
import sys
import os
import argparse
import glob
from random import sample
def arguments():
''' Handle the input parameters '''
path = os.path.dirname(__file__)
if not path:
path = '.'
def_indir = path+'/inputs/'
parser = argparse.ArgumentParser(description='Name generator')
parser.add_argument('-m', '--male', help='Provide a male name instead of a female name',
action='store_true', required=False)
parser.add_argument('-n', '--number-of-names',
help='Number of names [default: 1]', type=int, default=1, required=False)
parser.add_argument('-i', '--input-directory',
help='Where would the list of files be found [default: '+def_indir+']', type=str, default=def_indir, required=False)
parser.add_argument(
'-d', '--debug', help='Print some debug message [default: false]', action='store_true', required=False)
parser.add_argument('-f', '--first-only', help='Povide a first (given) name only',
action='store_true', required=False)
parser.add_argument('-l', '--last-only', help='Povide a last name (surname) only',
action='store_true', required=False)
try:
options = parser.parse_args()
except:
sys.exit(0)
parameters = {}
# Debug
parameters['debug'] = options.debug
# Male / Female
parameters['ismale'] = options.male
if parameters['debug']:
if options.male:
print('Getting a male name')
else:
print('Getting a female name')
# Number of names
try:
nnames = int(options.number_of_names)
except ValueError:
stderr.write('Number of names format error: ', options.number_of_names)
sys.exit(1)
if nnames < 1:
stderr.write('Error: We should have at least one name!')
sys.exit(1)
parameters['nnames'] = nnames
if parameters['debug']:
print('Getting '+str(nnames)+" names")
# Input directory
if os.path.isdir(options.input_directory):
parameters['idir'] = options.input_directory
if parameters['debug']:
print('Using input directory: '+options.input_directory)
else:
stderr.write('Error: input directory does not exists: ' +
options.input_directory)
sys.exit(1)
# Selections
parameters['first only'] = options.first_only
parameters['last only'] = options.last_only
return parameters
def get_list_from_file(filename):
''' Opens a file and pack each line onto a set '''
out = set()
with open(filename, 'rt') as f:
lname = f.readlines()
for line in lname:
out.add(line)
f.close()
return out
def get_list_from_list_of_files(pattern, debug=False):
out = set()
for f in glob.glob(pattern):
if debug:
print('Including ', f)
o = get_list_from_file(f)
if len(out) == 0:
out = o
else:
out = out.union(o)
return out
def get_lists(dirname, debug=False):
''' Get the different lists '''
# Female lists
female_first_name = get_list_from_list_of_files(
dirname+'female_first_names_*.dat', debug)
# Male lists
male_first_name = get_list_from_list_of_files(
dirname+'male_first_names_*.dat', debug)
# Surname lists
surnames = get_list_from_list_of_files(dirname+'surname_*.dat', debug)
return (female_first_name, male_first_name, surnames)
def pick_one_from_set(setname):
''' Select a single element from a set '''
if len(setname) > 0:
return sample(list(setname), 1).pop().rstrip('\n')
else:
return ''
if __name__ == '__main__':
''' Main function '''
args = arguments()
(femfirst, mfirst, surnames) = get_lists(args['idir'], args['debug'])
for n in range(args['nnames']):
fname = ''
lname = ''
if not args['last only']:
if args['ismale']:
fname = pick_one_from_set(mfirst)
else:
fname = pick_one_from_set(femfirst)
if not args['first only']:
lname = pick_one_from_set(surnames)
print(fname, lname)
| <filename>generator.py<gh_stars>0
import sys
import os
import argparse
import glob
from random import sample
def arguments():
''' Handle the input parameters '''
path = os.path.dirname(__file__)
if not path:
path = '.'
def_indir = path+'/inputs/'
parser = argparse.ArgumentParser(description='Name generator')
parser.add_argument('-m', '--male', help='Provide a male name instead of a female name',
action='store_true', required=False)
parser.add_argument('-n', '--number-of-names',
help='Number of names [default: 1]', type=int, default=1, required=False)
parser.add_argument('-i', '--input-directory',
help='Where would the list of files be found [default: '+def_indir+']', type=str, default=def_indir, required=False)
parser.add_argument(
'-d', '--debug', help='Print some debug message [default: false]', action='store_true', required=False)
parser.add_argument('-f', '--first-only', help='Povide a first (given) name only',
action='store_true', required=False)
parser.add_argument('-l', '--last-only', help='Povide a last name (surname) only',
action='store_true', required=False)
try:
options = parser.parse_args()
except:
sys.exit(0)
parameters = {}
# Debug
parameters['debug'] = options.debug
# Male / Female
parameters['ismale'] = options.male
if parameters['debug']:
if options.male:
print('Getting a male name')
else:
print('Getting a female name')
# Number of names
try:
nnames = int(options.number_of_names)
except ValueError:
stderr.write('Number of names format error: ', options.number_of_names)
sys.exit(1)
if nnames < 1:
stderr.write('Error: We should have at least one name!')
sys.exit(1)
parameters['nnames'] = nnames
if parameters['debug']:
print('Getting '+str(nnames)+" names")
# Input directory
if os.path.isdir(options.input_directory):
parameters['idir'] = options.input_directory
if parameters['debug']:
print('Using input directory: '+options.input_directory)
else:
stderr.write('Error: input directory does not exists: ' +
options.input_directory)
sys.exit(1)
# Selections
parameters['first only'] = options.first_only
parameters['last only'] = options.last_only
return parameters
def get_list_from_file(filename):
''' Opens a file and pack each line onto a set '''
out = set()
with open(filename, 'rt') as f:
lname = f.readlines()
for line in lname:
out.add(line)
f.close()
return out
def get_list_from_list_of_files(pattern, debug=False):
out = set()
for f in glob.glob(pattern):
if debug:
print('Including ', f)
o = get_list_from_file(f)
if len(out) == 0:
out = o
else:
out = out.union(o)
return out
def get_lists(dirname, debug=False):
''' Get the different lists '''
# Female lists
female_first_name = get_list_from_list_of_files(
dirname+'female_first_names_*.dat', debug)
# Male lists
male_first_name = get_list_from_list_of_files(
dirname+'male_first_names_*.dat', debug)
# Surname lists
surnames = get_list_from_list_of_files(dirname+'surname_*.dat', debug)
return (female_first_name, male_first_name, surnames)
def pick_one_from_set(setname):
''' Select a single element from a set '''
if len(setname) > 0:
return sample(list(setname), 1).pop().rstrip('\n')
else:
return ''
if __name__ == '__main__':
''' Main function '''
args = arguments()
(femfirst, mfirst, surnames) = get_lists(args['idir'], args['debug'])
for n in range(args['nnames']):
fname = ''
lname = ''
if not args['last only']:
if args['ismale']:
fname = pick_one_from_set(mfirst)
else:
fname = pick_one_from_set(femfirst)
if not args['first only']:
lname = pick_one_from_set(surnames)
print(fname, lname)
| en | 0.618072 | Handle the input parameters # Debug # Male / Female # Number of names # Input directory # Selections Opens a file and pack each line onto a set Get the different lists # Female lists # Male lists # Surname lists Select a single element from a set Main function | 3.126198 | 3 |
dpq/migrations/0003_auto_20210119_0451.py | ekmecic/django-postgres-queue | 0 | 6618747 | # Generated by Django 3.1.5 on 2021-01-19 04:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dpq', '0002_auto_20190419_2057'),
]
operations = [
migrations.AlterField(
model_name='job',
name='args',
field=models.JSONField(),
),
]
| # Generated by Django 3.1.5 on 2021-01-19 04:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dpq', '0002_auto_20190419_2057'),
]
operations = [
migrations.AlterField(
model_name='job',
name='args',
field=models.JSONField(),
),
]
| en | 0.828109 | # Generated by Django 3.1.5 on 2021-01-19 04:51 | 1.51134 | 2 |
iminspect/tests/test_inspector.py | snototter/iminspect | 1 | 6618748 | #!/usr/bin/env python
# coding=utf-8
"""
Very simple test suite (as most of the functionality is GUI-based and thus,
rather complex to test).
"""
import pytest
from ..inspection_utils import fmti, fmtb, fmtf, fmt1f, fmt2f, fmt3f, fmt4f, FilenameUtils
def test_fmtb():
assert fmtb(True) == 'True'
assert fmtb(False) == 'False'
assert fmtb(0) == 'False'
assert fmtb(-17) == 'True'
def test_fmti():
assert fmti(3) == '3'
assert fmti(-0) == '0'
assert fmti(-42) == '-42'
def test_fmtf():
assert fmtf(3) == '{:f}'.format(3)
assert fmtf(17.0099123) == '{:f}'.format(17.0099123)
def test_fmt1f():
assert fmt1f(3) == '3.0'
assert fmt1f(17.0099123) == '17.0'
assert fmt1f(-12.08) == '-12.1'
def test_fmt2f():
assert fmt2f(3) == '3.00'
assert fmt2f(17.0099123) == '17.01'
assert fmt2f(-12.08) == '-12.08'
def test_fmt3f():
assert fmt3f(3) == '3.000'
assert fmt3f(17.0099123) == '17.010'
assert fmt3f(-12.08) == '-12.080'
def test_fmt4f():
assert fmt4f(3) == '3.0000'
assert fmt4f(17.0099123) == '17.0099'
assert fmt4f(-12.08) == '-12.0800'
def test_FilenameUtils():
assert FilenameUtils.ensureImageExtension(None) is None
with pytest.raises(ValueError):
FilenameUtils.ensureImageExtension('')
assert FilenameUtils.ensureImageExtension('foo') == 'foo.png'
assert FilenameUtils.ensureImageExtension('foo.jpEG') == 'foo.jpEG'
assert FilenameUtils.ensureImageExtension('FoO.pNg') == 'FoO.pNg'
assert FilenameUtils.ensureImageExtension('FoO.pNgGg') == 'FoO.pNgGg.png'
assert FilenameUtils.ensureFlowExtension(None) is None
with pytest.raises(ValueError):
FilenameUtils.ensureFlowExtension('')
assert FilenameUtils.ensureFlowExtension('foo') == 'foo.flo'
assert FilenameUtils.ensureFlowExtension('foo.jpEG') == 'foo.jpEG.flo'
assert FilenameUtils.ensureFlowExtension('FoO.flow') == 'FoO.flow.flo'
assert FilenameUtils.ensureFlowExtension('FoO.FlO') == 'FoO.FlO'
assert FilenameUtils.ensureFileExtension(None, []) is None
with pytest.raises(ValueError):
FilenameUtils.ensureFileExtension('', ['foo'])
with pytest.raises(ValueError):
FilenameUtils.ensureFileExtension('foo', [])
assert FilenameUtils.ensureFileExtension('foo.bar', ['bla', 'bar']) == 'foo.bar'
assert FilenameUtils.ensureFileExtension('f00.BaR', ['bla', 'bar']) == 'f00.BaR'
assert FilenameUtils.ensureFileExtension('foo.barz', ['bla', 'bar']) == 'foo.barz.bla'
| #!/usr/bin/env python
# coding=utf-8
"""
Very simple test suite (as most of the functionality is GUI-based and thus,
rather complex to test).
"""
import pytest
from ..inspection_utils import fmti, fmtb, fmtf, fmt1f, fmt2f, fmt3f, fmt4f, FilenameUtils
def test_fmtb():
assert fmtb(True) == 'True'
assert fmtb(False) == 'False'
assert fmtb(0) == 'False'
assert fmtb(-17) == 'True'
def test_fmti():
assert fmti(3) == '3'
assert fmti(-0) == '0'
assert fmti(-42) == '-42'
def test_fmtf():
assert fmtf(3) == '{:f}'.format(3)
assert fmtf(17.0099123) == '{:f}'.format(17.0099123)
def test_fmt1f():
assert fmt1f(3) == '3.0'
assert fmt1f(17.0099123) == '17.0'
assert fmt1f(-12.08) == '-12.1'
def test_fmt2f():
assert fmt2f(3) == '3.00'
assert fmt2f(17.0099123) == '17.01'
assert fmt2f(-12.08) == '-12.08'
def test_fmt3f():
assert fmt3f(3) == '3.000'
assert fmt3f(17.0099123) == '17.010'
assert fmt3f(-12.08) == '-12.080'
def test_fmt4f():
assert fmt4f(3) == '3.0000'
assert fmt4f(17.0099123) == '17.0099'
assert fmt4f(-12.08) == '-12.0800'
def test_FilenameUtils():
assert FilenameUtils.ensureImageExtension(None) is None
with pytest.raises(ValueError):
FilenameUtils.ensureImageExtension('')
assert FilenameUtils.ensureImageExtension('foo') == 'foo.png'
assert FilenameUtils.ensureImageExtension('foo.jpEG') == 'foo.jpEG'
assert FilenameUtils.ensureImageExtension('FoO.pNg') == 'FoO.pNg'
assert FilenameUtils.ensureImageExtension('FoO.pNgGg') == 'FoO.pNgGg.png'
assert FilenameUtils.ensureFlowExtension(None) is None
with pytest.raises(ValueError):
FilenameUtils.ensureFlowExtension('')
assert FilenameUtils.ensureFlowExtension('foo') == 'foo.flo'
assert FilenameUtils.ensureFlowExtension('foo.jpEG') == 'foo.jpEG.flo'
assert FilenameUtils.ensureFlowExtension('FoO.flow') == 'FoO.flow.flo'
assert FilenameUtils.ensureFlowExtension('FoO.FlO') == 'FoO.FlO'
assert FilenameUtils.ensureFileExtension(None, []) is None
with pytest.raises(ValueError):
FilenameUtils.ensureFileExtension('', ['foo'])
with pytest.raises(ValueError):
FilenameUtils.ensureFileExtension('foo', [])
assert FilenameUtils.ensureFileExtension('foo.bar', ['bla', 'bar']) == 'foo.bar'
assert FilenameUtils.ensureFileExtension('f00.BaR', ['bla', 'bar']) == 'f00.BaR'
assert FilenameUtils.ensureFileExtension('foo.barz', ['bla', 'bar']) == 'foo.barz.bla'
| en | 0.823782 | #!/usr/bin/env python # coding=utf-8 Very simple test suite (as most of the functionality is GUI-based and thus, rather complex to test). | 2.356148 | 2 |
scrapy/douban/spiders/person_meta.py | shallow-ll/AntSpider | 308 | 6618749 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 30 23:41:38 2019
@author: liudiwei
"""
import string
import random
import douban.util as util
import douban.database as db
import douban.validator as validator
from scrapy import Request, Spider
from douban.items import PersonMeta
cursor = db.connection.cursor()
class PersonMetaSpider(Spider):
name = 'person_item'
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36'
allowed_domains = ["Person.douban.com"]
#select person id from db
sql = 'SELECT person_id FROM person_obj WHERE person_id NOT IN (SELECT person_id FROM person) ORDER BY person_id DESC'
print("select person_id from db: ", sql)
cursor.execute(sql)
person_ids = cursor.fetchall()
start_urls = [
'https://movie.douban.com/celebrity/%s/' % i['person_id'] for i in person_ids
# 'https://movie.douban.com/celebrity/1054424/'
]
def start_requests(self):
for url in self.start_urls:
print("======url:", url)
bid = ''.join(random.choice(string.ascii_letters + string.digits) for x in range(11))
cookies = {
'bid': bid,
'dont_redirect': True,
'handle_httpstatus_list': [302],
}
yield Request(url, cookies=cookies,meta={'main_url':url})
#获取ID
def get_person_id(self, meta, response):
main_url = response.meta['main_url']
response_url = response.url
print("##### main_url:", main_url)
print("##### response_url: ", response_url)
person_id = main_url.split("celebrity")[1].split("/")[1]
meta['person_id'] = person_id
print("==============person_id:", person_id)
#meta['person_id'] = response.url[33:-1]
return meta
#获取性别
def get_sex(self, meta, response):
regx = '//div[@class="info"]/ul/li/text()[preceding-sibling::span[text()="性别"]]'
data = response.xpath(regx).extract()
print("============get_sex:", data)
if data:
meta["sex"] = data[0].strip("\n").split(":")[-1]
return meta
#出生日期
def get_birth(self, meta, response):
regx = '//div[@class="info"]/ul/li/text()[preceding-sibling::span[text()="出生日期"]]'
data = response.xpath(regx).extract()
print("============get_birth:", data)
if data:
meta['birth'] = validator.str_to_date(validator.match_date(data[0].strip("\n")))
if not meta['birth']:
meta['birth'] = data[0].strip("\n").split(":")[-1]
return meta
def get_birthplace(self, meta, response):
regx = '//div[@class="info"]/ul/li/text()[preceding-sibling::span[text()="出生地"]]'
data = response.xpath(regx).extract()
print("============get_birthplace:", data)
if data:
meta["birthplace"] = data[0].strip("\n").split(":")[-1]
return meta
def get_biography(self, meta, response):
regx = '//div[@class="article"]/div[@id="intro"]/div[@class="bd"]/span[@class="short"]/text()'
data = response.xpath(regx).extract()
if data:
meta['biography'] = data[0]
return meta
def get_profession(self, meta, response):
regx = '//div[@class="info"]/ul/li/text()[preceding-sibling::span[text()="职业"]]'
data = response.xpath(regx).extract()
if data:
meta['profession'] = data[0].strip("\n").split(":")[-1]
return meta
def get_constellatory(self, meta, response):
regx = '//div[@class="info"]/ul/li/text()[preceding-sibling::span[text()="星座"]]'
data = response.xpath(regx).extract()
if data:
meta['constellatory'] = data[0].strip("\n").split(":")[-1]
return meta
def get_name_zh(self, meta, response):
regx = '//div[@class="info"]/ul/li/text()[preceding-sibling::span[text()="更多中文名"]]'
data = response.xpath(regx).extract()
if data:
meta['name_zh'] = data[0].strip("\n").split(":")[-1]
return meta
def get_name_en(self, meta, response):
regx = '//div[@class="info"]/ul/li/text()[preceding-sibling::span[text()="更多外文名"]]'
data = response.xpath(regx).extract()
if data:
meta['name_en'] = data[0].strip("\n").split(":")[-1]
return meta
def parse(self, response):
print("=====================================+++++++++",response.url)
if 404 == response.status:
print("Person.meta.response.url: ",response.url)
else:
meta = PersonMeta()
self.get_person_id(meta, response)
self.get_sex(meta, response)
self.get_birth(meta, response)
self.get_birthplace(meta, response)
self.get_biography(meta, response)
self.get_profession(meta, response)
self.get_constellatory(meta, response)
self.get_name_zh(meta, response)
self.get_name_en(meta, response)
return meta
def second_parse(self,response):
"""print user-agent"""
print("\nChange User-Agent: ", response.request.headers['User-Agent'])
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 30 23:41:38 2019
@author: liudiwei
"""
import string
import random
import douban.util as util
import douban.database as db
import douban.validator as validator
from scrapy import Request, Spider
from douban.items import PersonMeta
cursor = db.connection.cursor()
class PersonMetaSpider(Spider):
name = 'person_item'
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36'
allowed_domains = ["Person.douban.com"]
#select person id from db
sql = 'SELECT person_id FROM person_obj WHERE person_id NOT IN (SELECT person_id FROM person) ORDER BY person_id DESC'
print("select person_id from db: ", sql)
cursor.execute(sql)
person_ids = cursor.fetchall()
start_urls = [
'https://movie.douban.com/celebrity/%s/' % i['person_id'] for i in person_ids
# 'https://movie.douban.com/celebrity/1054424/'
]
def start_requests(self):
for url in self.start_urls:
print("======url:", url)
bid = ''.join(random.choice(string.ascii_letters + string.digits) for x in range(11))
cookies = {
'bid': bid,
'dont_redirect': True,
'handle_httpstatus_list': [302],
}
yield Request(url, cookies=cookies,meta={'main_url':url})
#获取ID
def get_person_id(self, meta, response):
main_url = response.meta['main_url']
response_url = response.url
print("##### main_url:", main_url)
print("##### response_url: ", response_url)
person_id = main_url.split("celebrity")[1].split("/")[1]
meta['person_id'] = person_id
print("==============person_id:", person_id)
#meta['person_id'] = response.url[33:-1]
return meta
#获取性别
def get_sex(self, meta, response):
regx = '//div[@class="info"]/ul/li/text()[preceding-sibling::span[text()="性别"]]'
data = response.xpath(regx).extract()
print("============get_sex:", data)
if data:
meta["sex"] = data[0].strip("\n").split(":")[-1]
return meta
#出生日期
def get_birth(self, meta, response):
regx = '//div[@class="info"]/ul/li/text()[preceding-sibling::span[text()="出生日期"]]'
data = response.xpath(regx).extract()
print("============get_birth:", data)
if data:
meta['birth'] = validator.str_to_date(validator.match_date(data[0].strip("\n")))
if not meta['birth']:
meta['birth'] = data[0].strip("\n").split(":")[-1]
return meta
def get_birthplace(self, meta, response):
regx = '//div[@class="info"]/ul/li/text()[preceding-sibling::span[text()="出生地"]]'
data = response.xpath(regx).extract()
print("============get_birthplace:", data)
if data:
meta["birthplace"] = data[0].strip("\n").split(":")[-1]
return meta
def get_biography(self, meta, response):
regx = '//div[@class="article"]/div[@id="intro"]/div[@class="bd"]/span[@class="short"]/text()'
data = response.xpath(regx).extract()
if data:
meta['biography'] = data[0]
return meta
def get_profession(self, meta, response):
regx = '//div[@class="info"]/ul/li/text()[preceding-sibling::span[text()="职业"]]'
data = response.xpath(regx).extract()
if data:
meta['profession'] = data[0].strip("\n").split(":")[-1]
return meta
def get_constellatory(self, meta, response):
regx = '//div[@class="info"]/ul/li/text()[preceding-sibling::span[text()="星座"]]'
data = response.xpath(regx).extract()
if data:
meta['constellatory'] = data[0].strip("\n").split(":")[-1]
return meta
def get_name_zh(self, meta, response):
regx = '//div[@class="info"]/ul/li/text()[preceding-sibling::span[text()="更多中文名"]]'
data = response.xpath(regx).extract()
if data:
meta['name_zh'] = data[0].strip("\n").split(":")[-1]
return meta
def get_name_en(self, meta, response):
regx = '//div[@class="info"]/ul/li/text()[preceding-sibling::span[text()="更多外文名"]]'
data = response.xpath(regx).extract()
if data:
meta['name_en'] = data[0].strip("\n").split(":")[-1]
return meta
def parse(self, response):
print("=====================================+++++++++",response.url)
if 404 == response.status:
print("Person.meta.response.url: ",response.url)
else:
meta = PersonMeta()
self.get_person_id(meta, response)
self.get_sex(meta, response)
self.get_birth(meta, response)
self.get_birthplace(meta, response)
self.get_biography(meta, response)
self.get_profession(meta, response)
self.get_constellatory(meta, response)
self.get_name_zh(meta, response)
self.get_name_en(meta, response)
return meta
def second_parse(self,response):
"""print user-agent"""
print("\nChange User-Agent: ", response.request.headers['User-Agent'])
| en | 0.465632 | #!/usr/bin/env python # -*- coding: utf-8 -*- Created on Tue Jul 30 23:41:38 2019 @author: liudiwei #select person id from db # 'https://movie.douban.com/celebrity/1054424/' #获取ID #### main_url:", main_url) #### response_url: ", response_url) #meta['person_id'] = response.url[33:-1] #获取性别 #出生日期 print user-agent | 2.805735 | 3 |
day3/day3-2.py | Saff-Buraq-Dev/advent-of-code-2021 | 0 | 6618750 | def main():
file = open('input.txt', 'r')
data = file.readlines()
length = len(data[0]) - 1
oxygen = [None] * len(data)
co2 = [None] * len(data)
for i in range(len(data)):
oxygen[i] = data[i]
co2[i] = data[i]
while(len(oxygen) > 1):
for i in range(length):
zeros = 0
ones = 0
for j in range(len(oxygen)):
if(oxygen[j][i] == '0'):
zeros += 1
else:
ones += 1
if(zeros > ones):
oxygen = [x for x in oxygen if x[i] == '0']
else:
oxygen = [x for x in oxygen if x[i] == '1']
while(len(co2) > 1):
for i in range(length):
zeros = 0
ones = 0
for j in range(len(co2)):
if(co2[j][i] == '0'):
zeros += 1
else:
ones += 1
if(zeros > ones):
co2 = [x for x in co2 if x[i] == '1']
else:
co2 = [x for x in co2 if x[i] == '0']
if(len(co2) == 1):
break
print(oxygen)
print(co2)
print(1981*3371)
#011110111101 1981
#110100101011 3371
main() | def main():
file = open('input.txt', 'r')
data = file.readlines()
length = len(data[0]) - 1
oxygen = [None] * len(data)
co2 = [None] * len(data)
for i in range(len(data)):
oxygen[i] = data[i]
co2[i] = data[i]
while(len(oxygen) > 1):
for i in range(length):
zeros = 0
ones = 0
for j in range(len(oxygen)):
if(oxygen[j][i] == '0'):
zeros += 1
else:
ones += 1
if(zeros > ones):
oxygen = [x for x in oxygen if x[i] == '0']
else:
oxygen = [x for x in oxygen if x[i] == '1']
while(len(co2) > 1):
for i in range(length):
zeros = 0
ones = 0
for j in range(len(co2)):
if(co2[j][i] == '0'):
zeros += 1
else:
ones += 1
if(zeros > ones):
co2 = [x for x in co2 if x[i] == '1']
else:
co2 = [x for x in co2 if x[i] == '0']
if(len(co2) == 1):
break
print(oxygen)
print(co2)
print(1981*3371)
#011110111101 1981
#110100101011 3371
main() | en | 0.253591 | #011110111101 1981 #110100101011 3371 | 3.210307 | 3 |