id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
8061590 | <filename>carball/analysis/stats/rumble/rumble.py
import math
from typing import Dict, List
import pandas as pd
from carball.generated.api import game_pb2
from carball.generated.api.player_pb2 import Player
from carball.generated.api.stats.player_stats_pb2 import PlayerStats
from carball.generated.api.stats.team_stats_pb2 import TeamStats
from carball.generated.api.stats.events_pb2 import RumbleItemEvent
from carball.generated.api.stats.rumble_pb2 import PowerUp, RumbleStats
from carball.json_parser.game import Game
from carball.analysis.stats.stats import BaseStat
from carball.generated.api.metadata.game_metadata_pb2 import RANKED_RUMBLE, UNRANKED_RUMBLE
class RumbleItemStat(BaseStat):
def calculate_player_stat(self, player_stat_map: Dict[str, PlayerStats], game: Game, proto_game: game_pb2.Game,
player_map: Dict[str, Player], data_frame: pd.DataFrame):
if not is_rumble_enabled(game):
return
for player_key, stats in player_stat_map.items():
player_name = player_map[player_key].name
player_data_frame = data_frame[player_name]
events = _get_power_up_events(player_map[player_key], player_data_frame, game,
proto_game.game_stats.rumble_items)
_calculate_rumble_stats(stats.rumble_stats, events, data_frame['game'])
def calculate_team_stat(self, team_stat_list: Dict[int, TeamStats], game: Game, proto_game: game_pb2.Game,
player_map: Dict[str, Player], data_frame: pd.DataFrame):
if not is_rumble_enabled(game):
return
orange_ids = list(map(lambda x: x.id.id, filter(lambda x: x.is_orange, player_map.values())))
blue_ids = list(map(lambda x: x.id.id, filter(lambda x: not x.is_orange, player_map.values())))
orange_events = list(filter(lambda x: x.player_id.id in orange_ids, proto_game.game_stats.rumble_items))
blue_events = list(filter(lambda x: x.player_id.id in blue_ids, proto_game.game_stats.rumble_items))
_calculate_rumble_stats(team_stat_list[1].rumble_stats, orange_events, data_frame['game'])
_calculate_rumble_stats(team_stat_list[0].rumble_stats, blue_events, data_frame['game'])
def is_rumble_enabled(game: Game) -> bool:
"""
Check whether rumble is enabled or not.
:param game: parsed game object
:return: True if rumble
"""
if game is None or game.game_info is None:
return False
return game.game_info.playlist in [RANKED_RUMBLE, UNRANKED_RUMBLE] or \
(game.game_info.rumble_mutator is not None and
game.game_info.rumble_mutator.startswith('Archetypes.Mutators.SubRules.ItemsMode'))
def _get_power_up_events(player: Player, df: pd.DataFrame, game: Game, proto_rumble_item_events) \
-> List[RumbleItemEvent]:
"""
Finds the item get and item use events
:param player: Player info protobuf. Get's the id from here
:param df: player dataframe, assumes the frames between goal and kickoff are already discarded
:param game: game object
:param proto_rumble_item_events: protobuf repeated api.stats.RumbleItemEvent
:return list of rumble events
"""
events = []
if 'power_up_active' in df and 'power_up' in df:
if 'time_till_power_up' not in df:
# someone actually uploaded a 10 second replay of the end of the match..
df['time_till_power_up'] = math.nan
df = df[['time_till_power_up', 'power_up', 'power_up_active']]
ranges = [(game.kickoff_frames[i], game.kickoff_frames[i + 1]) for i in range(len(game.kickoff_frames) - 1)]
ranges.append((game.kickoff_frames[-1], df.index[-1]))
data_frames = map(lambda x: df.loc[x[0]:x[1] - 1], ranges)
data_frames = map(_squash_power_up_df, data_frames)
for data_frame in data_frames:
if len(data_frame) == 0:
# goal before items
continue
if not math.isnan(data_frame.iloc[0]['power_up_active']):
# happens when kickoff starts with power ups after a goal that was scored less then 1 second before
# time's up
data_frame.loc[-1] = [0.0, math.nan, math.nan]
data_frame.sort_index(inplace=True)
prev_row = data_frame.iloc[0]
proto_current_item = None
demoed = False
for i, row in data_frame.iloc[1:].iterrows():
if math.isnan(prev_row['power_up_active']):
if row['power_up_active'] == False:
if not demoed:
# Rumble item get event
proto_current_item = proto_rumble_item_events.add()
proto_current_item.frame_number_get = i
proto_current_item.item = PowerUp.Value(row['power_up'].upper())
proto_current_item.player_id.id = player.id.id
else:
# back from the dead
demoed = False
if row['power_up_active'] == True:
# immediately used items (mostly bots)
tmp_item = proto_rumble_item_events.add()
tmp_item.frame_number_get = i
tmp_item.frame_number_use = i
tmp_item.item = PowerUp.Value(row['power_up'].upper())
tmp_item.player_id.id = player.id.id
events.append(tmp_item)
elif prev_row['power_up_active'] == False:
if row['power_up_active'] == True or \
math.isnan(row['power_up_active']) and prev_row['power_up'] == 'ball_freeze':
# Rumble item use event
# When a spiked ball is frozen, there is not 'ball_freeze,True' row, it just gets deleted
# immediately
# Could also happen when the freeze is immediately broken
# in theory this should not happen with other power ups?
proto_current_item.frame_number_use = i
events.append(proto_current_item)
proto_current_item = None
elif math.isnan(row['power_up_active']):
# happens when player is demoed
demoed = True
prev_row = row
if proto_current_item is not None:
# unused item
events.append(proto_current_item)
proto_current_item = None
return events
def _squash_power_up_df(df: pd.DataFrame):
"""
Remove all the rows with repeated 'power_up_active'. The frames are kept whenever the value is changed.
"""
a = df['power_up_active']
a = a.loc[(a.shift(-1).isnull() ^ a.isnull()) | (a.shift(1).isnull() ^ a.isnull()) | ~a.isnull()]
a = a.loc[(a.shift(1) != a) | (a.shift(-1) != a)]
# Drop any false values that come after true values
while len(a.loc[(a.shift(1) == True) & (a == False)]) > 0:
a = a.loc[(a.shift(1) != True) | (a != False)]
df = pd.concat([df[['time_till_power_up', 'power_up']], a], axis=1, join='inner')
return df
def _calculate_rumble_stats(rumble_proto: RumbleStats, events: List[RumbleItemEvent], game_df: pd.DataFrame):
"""
Calculate rumble stats for all items
:param rumble_proto: proto for stats
:param events: list of rumble events
:param game_df: game dataframe, used for getting the time delta
"""
for power_up in set(PowerUp.values()):
item_stats = _calculate_rumble_stats_for_power_up(events, power_up, game_df)
rumble_item_proto = rumble_proto.rumble_items.add()
rumble_item_proto.item = power_up
rumble_item_proto.used = item_stats['used']
rumble_item_proto.unused = item_stats['unused']
rumble_item_proto.average_hold = item_stats['average_hold']
def _calculate_rumble_stats_for_power_up(events: List[RumbleItemEvent], power_up: int, game_df: pd.DataFrame) -> Dict:
"""
Calculate rumble statistics (used, unused, average hold) based on rumble events.
:param events: list of rumble events
:param power_up: power up enum
:param game_df: game dataframe, used for getting the time delta
:return: stats
"""
item_events = filter(lambda x: x.item == power_up, events)
base = {'used': 0, 'unused': 0, 'average_hold': 0}
for event in item_events:
if event.frame_number_use != -1:
base['used'] += 1
base['average_hold'] += game_df.loc[event.frame_number_use]['time'] - \
game_df.loc[event.frame_number_get]['time']
else:
base['unused'] += 1
if base['used'] > 0:
base['average_hold'] /= base['used']
return base
| StarcoderdataPython |
12846470 | <gh_stars>1-10
#Script_sFuzz_data_retriever_v0.6
#format = python3 sfuzz_data_retr.py <filename> <contractname> <contracts_folder>
import json
import os
from decimal import Decimal
import sys
from openpyxl import load_workbook
import pandas as pd
import coverage_json
import vulnerabilities_json
filename = sys.argv[1]
Contractname = sys.argv[2]
contracts_fold = sys.argv[3]
for root, dirs, files in os.walk(contracts_fold):
for file in files:
if file == "stats.json":
File = os.path.join(root, file)
if os.path.isfile(File) == False or os.stat(File).st_size == 0 :
sys.exit()
with open(File, 'r',encoding="utf-8") as f:
vuln_json = json.load(f)
dur = float(vuln_json["duration"])
#time = "{:.2f}".format(*100)
time_taken = "{:.2f} secs".format(dur)
total_execs = vuln_json["totalExecs"]
vulnerabilities = vuln_json["vulnerabilities"]
branches = vuln_json["branches"]
Branch_coverage = "{} % ({})".format(vuln_json["coverage"],vuln_json["branches"])
for key,value in vulnerabilities.items():
if value != "0":
if key == "gasless send":
vulnerabilities_json.Vulnerabilities_detected.append("gasless")
elif key == "dangerous delegatecall":
vulnerabilities_json.Vulnerabilities_detected.append("DangerousDelegatecall")
elif key == "exception disorder":
vulnerabilities_json.Vulnerabilities_detected.append("UnhandledException")
elif key == "freezing ether":
vulnerabilities_json.Vulnerabilities_detected.append("Locking")
elif key == "reentrancy":
vulnerabilities_json.Vulnerabilities_detected.append("Reentrancy")
elif key == "integer overflow":
vulnerabilities_json.Vulnerabilities_detected.append("Overflow")
elif key == "timestamp dependency":
vulnerabilities_json.Vulnerabilities_detected.append("BlockStateDep")
elif key == "integer underflow":
vulnerabilities_json.Vulnerabilities_detected.append("Overflow")
elif key == "block number dependency":
vulnerabilities_json.Vulnerabilities_detected.append("BlockStateDep")
coverage_json.Branchcov = Branch_coverage
coverage_json.Transactions = total_execs
coverage_json.timetaken = time_taken
coverage_json.coverage_json_maker()
vulnerabilities_json.vuln_Jsonmaker() | StarcoderdataPython |
228475 | from ..error import NauticalError
from math import radians, sin, cos, atan2, sqrt
from .point import Point
_EARTH_RADIUS_METERS = 6372800
def haversine(p1: Point, p2: Point) -> float:
"""Haversine method for determining distance between two points.
:param p1: Point 1
:param p2: Point 2
:return: Find the distance between two points using the Haversine methodology
"""
lat1 = radians(p1.latitude)
lat2 = radians(p2.latitude)
diff1 = radians(p1.latitude - p2.latitude)
diff2 = radians(p1.longitude - p2.longitude)
a = sin(diff1 / 2.0) ** 2 + cos(lat1) * cos(lat2) * sin(diff2 / 2.0) ** 2
return 2.0 * _EARTH_RADIUS_METERS * atan2(sqrt(a), sqrt(1 - a))
def in_range_ll(lat1_deg: float, lon1_deg: float, lat2_deg: float, lon2_deg: float, distance_m) -> bool:
"""Determine if points are within a distance of each other provided
with the latitude, longitude of each point
:param lat1_deg: Latitude of point 1 in degrees
:param lon1_deg: Longitude of point 1 in degrees
:param lat2_deg: Latitude of point 2 in degrees
:param lon2_deg: Longitude of point 2 in degrees
:param distance_m: Max allowed distance between points to return true (meters).
:return: True when the distance between P1 and P2 is less than (or equal to) distance_m
"""
return in_range(Point(lat1_deg, lon1_deg), Point(lat2_deg, lon2_deg), distance_m)
def in_range(p1: Point, p2: Point, distance_m) -> bool:
"""Determine if the points are within a distance of each other.
:param p1: Point 1
:param p2: Point 2
:param distance_m: Max allowed distance between points to return true (meters).
:return: True when the distance between P1 and P2 is less than (or equal to) distance_m
"""
return haversine(p1, p2) <= distance_m
def area_converter(area: [Point]) -> [Point]:
"""
Pass in a list of points, determine the maximum and minimum latitude and longitude
values, create a square (4 points) from the list.
:param area: original list of points
:return: list of Points
"""
max_lat = -float("inf")
min_lat = float("inf")
max_lon = -float("inf")
min_lon = float("inf")
for point in area:
if isinstance(point, Point):
max_lat = point.latitude if point.latitude > max_lat else max_lat
min_lat = point.latitude if point.latitude < min_lat else min_lat
max_lon = point.longitude if point.longitude > max_lon else max_lon
min_lon = point.longitude if point.longitude < min_lon else min_lon
return [Point(min_lat, min_lon), Point(max_lat, max_lon)]
def in_area(p: Point, area: [Point]) -> bool:
"""
Determine if point p is in the area. NOTE: the user should pass the original
list of points through the area_converter. This will provide an approximate area.
:param p: Point to determine if it is in the area
:param area: 2 point area min, min -> max, max
:return: true if it is in the area false otherwise
"""
if len(area) != 2:
raise NauticalError("area should be 2 points (min, min) -> (max, max).")
max_lat = max(area[0].lat, area[1].lat)
min_lat = min(area[0].lat, area[1].lat)
max_lon = max(area[0].lon, area[1].lon)
min_lon = min(area[0].lon, area[1].lon)
return max_lat >= p.latitude >= min_lat and max_lon >= p.longitude >= min_lon
| StarcoderdataPython |
8155846 | <gh_stars>1-10
import db
class ParamSet(db.Tbl):
def __init__(self):
super().__init__('param_set')
self.param_set_id = db.serial64
self.model = db.text
self.hidden_size = db.int32
self.optimizer = db.text
self.state_dict_prefix = db.text
self.window_size = db.int32
self.frames_height_width = db.array_int32
self.num_actors = db.int64
self.num_steps = db.int64
self.epsilon = db.float64
self.alpha = db.float64
self.gamma = db.float64
self.q_target_sync_freq = db.int64
self.min_replay_mem_size = db.int64
self.replay_sample_size = db.int64
self.soft_capacity = db.int64
self.priority_exponent = db.float64
self.importance_sampling_exponent = db.float64
self.action_suggester = db.text
self.reward_adjuster = db.text
self.policy = db.text
self.pk(self.get_cols())
class ActorData(db.Tbl):
def __init__(self, alias=None):
super().__init__('actor_data', alias)
self.param_set_id = db.int16
self.actor_id = db.int16
self.timestamp = db.timestamp
self.train_num = db.int32
self.ep_count = db.int32
self.episode_index = db.int32
self.index_in_episode = db.int32
self.action = db.int16
self.q_action = db.int16
self.reward = db.float32
self.position_action = db.int16
self.position_q_action = db.int16
self.position_index_in_episode = db.int32
self.sum_reward = db.float32
self.idx([self.param_set_id])
self.idx([self.actor_id])
self.idx([self.timestamp])
self.idx([self.train_num])
self.idx([self.ep_count])
class LearnerData(db.Tbl):
def __init__(self, alias=None):
super().__init__('learner_data', alias)
self.param_set_id = db.int16
self.timestamp = db.timestamp
self.train_num = db.int32
self.step_num = db.int32
self.loss = db.float32
self.q = db.array_float32
self.before_priorities = db.array_float32
self.after_priorities = db.array_float32
self.indices = db.array_int32
self.target_sync_num = db.int32
self.send_param_num = db.int32
self.idx([self.param_set_id])
self.idx([self.train_num])
self.idx([self.timestamp])
self.idx([self.param_set_id, self.timestamp])
class RewardAdjData(db.Tbl):
def __init__(self, alias=None):
super().__init__('reward_adj_data', alias)
self.param_set_id = db.int16
self.actor_id = db.int16
self.ep_count = db.int32
self.index_in_episode = db.int32
self.reward_adj = db.float32
self.idx([self.param_set_id])
self.idx([self.actor_id])
self.idx([self.ep_count])
self.idx([self.index_in_episode])
| StarcoderdataPython |
3437476 | <filename>scripts/remove_unplaced_multiallelic.py
# import necessary modules
import os
import time
import argparse
import re
import gzip
# Keep track of when the script began
startTime = time.time()
char = '\n' + ('*' * 70) + '\n'
# Argparse Information
parser = argparse.ArgumentParser(description="Positions that are multiallelic or duplicates are removed because \
programs such as PLINK and SHAPEIT2 can not handle these types of sites. Also, positions that contain missing genotype \
information (i.e. './.') in more then one sample are removed to improve phasing accuracy.")
parser.add_argument('input_vcf', help='Input VCF file')
parser.add_argument('output_vcf', help='Output VCF file')
args = parser.parse_args()
# Create variables of each argument from argparse
inputFile = args.input_vcf
outputFile = args.output_vcf.rstrip(".gz")
tempFile = "/tmp/temp.vcf"
fileWithoutSuffix = re.findall(r'([\w\-_/]+)\.', outputFile)[0]
duplicateFile = f"{fileWithoutSuffix}_removed_duplicates.vcf"
# Set of chromosomes to keep
chrToKeep = {"chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chr10", "chr11", "chr12", "chr13",\
"chr14", "chr15", "chr16", "chr17", "chr18", "chr19", "chr20", "chr21", "chr22", "chrX", "chrY"}
# Remove multiallelic sites and keep positions where genotype information is available for patient and at least one parent
with gzip.open(inputFile, "rt") as inFile, open(tempFile, "wt") as outFile:
for line in inFile:
if line.startswith("#"):
outFile.write(line)
else:
splitLine = line.split("\t")
if splitLine[0] in chrToKeep and "," not in splitLine[4] and line.count("./.") < 2:
outFile.write(line)
os.system(f"bgzip -f {tempFile}")
tempFile = "/tmp/temp.vcf.gz"
# Remove all duplicate sites
posDict = dict()
dupDict = dict()
with gzip.open(tempFile, "rt") as inputFile:
for line in inputFile:
if not line.startswith("#"):
line = line.split("\t")
chromosome = line[0]
pos = line[1]
if chromosome not in posDict:
posDict[chromosome] = set()
posDict[chromosome].add(pos)
dupDict[chromosome] = set()
elif chromosome in posDict and pos not in posDict[chromosome]:
posDict[chromosome].add(pos)
elif chromosome in posDict and pos in posDict[chromosome]:
dupDict[chromosome].add(pos)
with gzip.open(tempFile, "rt") as inputFile, open(outputFile, "wt") as outFile, open(duplicateFile, "w") as duplicates:
for line in inputFile:
if not line.startswith("#"):
splitLine = line.split("\t")
chromosome = splitLine[0]
pos = splitLine[1]
if pos not in dupDict[chromosome]:
outFile.write(line)
else:
duplicates.write(line)
else:
outFile.write(line)
duplicates.write(line)
os.system(f"bgzip -f {outputFile}")
# Output time it took to complete
timeElapsedMinutes = round((time.time()-startTime) / 60, 2)
timeElapsedHours = round(timeElapsedMinutes / 60, 2)
print(f'{char}Done. Time elapsed: {timeElapsedMinutes} minutes ({timeElapsedHours} hours){char}') | StarcoderdataPython |
3456871 | # Generated by Django 2.0 on 2018-12-07 11:05
from django.conf import settings
import django.contrib.postgres.fields
import django.contrib.postgres.fields.citext
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [
migrations.CreateModel(
name="Catch",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("operation", models.TextField(null=True)),
("setup_date", models.DateTimeField(null=True)),
("collect_date", models.DateTimeField(null=True)),
("in_out", models.TextField(null=True)),
("male_count", models.IntegerField(default=0, null=True)),
("female_count", models.IntegerField(default=0, null=True)),
("unknown_count", models.IntegerField(default=0, null=True)),
("remarks", models.TextField(default="")),
(
"distance_to_targets",
models.DecimalField(decimal_places=3, max_digits=10, null=True),
),
("near_intervention", models.CharField(max_length=100)),
("elev_change", models.IntegerField(null=True)),
("trap_elev", models.IntegerField(null=True)),
("target_elev", models.IntegerField(null=True)),
("elev_diff", models.IntegerField(null=True)),
("uuid", models.TextField(default=uuid.uuid4, unique=True)),
(
"source",
models.TextField(
choices=[("excel", "Excel"), ("API", "API")],
default="excel",
null=True,
),
),
],
),
migrations.CreateModel(
name="GpsImport",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("filename", models.TextField()),
("file_date_time", models.DateTimeField(null=True)),
("creator", models.TextField(blank=True, null=True)),
("created_at", models.DateTimeField(auto_now_add=True)),
(
"user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="GpsWaypoint",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.TextField()),
("date_time", models.DateTimeField()),
(
"latitude",
models.DecimalField(decimal_places=6, max_digits=10, null=True),
),
(
"longitude",
models.DecimalField(decimal_places=6, max_digits=10, null=True),
),
(
"elevation",
models.DecimalField(decimal_places=2, max_digits=7, null=True),
),
(
"tags",
django.contrib.postgres.fields.ArrayField(
base_field=django.contrib.postgres.fields.citext.CITextField(blank=True, max_length=255),
blank=True,
null=True,
size=20,
),
),
("ignore", models.BooleanField(default=False)),
(
"gps_import",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="vector_control.GpsImport",
),
),
],
),
migrations.CreateModel(
name="Site",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=50, null=True)),
("zone", models.TextField(null=True)),
(
"latitude",
models.DecimalField(decimal_places=6, max_digits=10, null=True),
),
(
"longitude",
models.DecimalField(decimal_places=6, max_digits=10, null=True),
),
(
"altitude",
models.DecimalField(decimal_places=2, max_digits=7, null=True),
),
(
"accuracy",
models.DecimalField(decimal_places=2, max_digits=7, null=True),
),
("habitat", models.CharField(max_length=255, null=True)),
("description", models.CharField(max_length=255, null=True)),
("first_survey", models.CharField(max_length=255, null=True)),
("first_survey_date", models.DateTimeField(null=True)),
("count", models.IntegerField()),
("total", models.IntegerField()),
("uuid", models.TextField(default=uuid.uuid4, unique=True)),
(
"source",
models.TextField(
choices=[("excel", "Excel"), ("API", "API")],
default="excel",
null=True,
),
),
(
"user",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="Target",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.TextField(null=True)),
("deployment", models.IntegerField(null=True)),
("full_name", models.TextField(null=True)),
("gps", models.CharField(max_length=100)),
(
"latitude",
models.DecimalField(decimal_places=6, max_digits=10, null=True),
),
(
"longitude",
models.DecimalField(decimal_places=6, max_digits=10, null=True),
),
(
"altitude",
models.DecimalField(decimal_places=2, max_digits=7, null=True),
),
("date_time", models.DateTimeField(null=True)),
("river", models.TextField(null=True)),
],
),
migrations.AddField(
model_name="catch",
name="site",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="vector_control.Site"),
),
migrations.AddField(
model_name="catch",
name="user",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
to=settings.AUTH_USER_MODEL,
),
),
]
| StarcoderdataPython |
3370576 | # -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2018. All Rights Reserved.
# pragma pylint: disable=unused-argument, no-self-use
"""Function implementation"""
import logging
import os
import tempfile
import time
from fn_virustotal.lib.resilient_common import validateFields, get_input_entity, get_resilient_client
from fn_virustotal.lib.errors import IntegrationError
from virus_total_apis import PublicApi as VirusTotal
from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError
from resilient_lib import RequestsCommon
RC_NOT_FOUND = 0
RC_READY = 1
RC_IN_QUEUE = -2
HTTP_OK = 200
HTTP_RATE_LIMIT = 204
class FunctionComponent(ResilientComponent):
"""Component that implements Resilient function 'virustotal"""
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(FunctionComponent, self).__init__(opts)
self.opts = opts
self.options = opts.get("fn_virustotal", {})
self.resilient = opts.get("resilient", {})
self._init_virustotal()
def _init_virustotal(self):
""" validate required fields for app.config """
validateFields(('api_token', 'polling_interval_sec', 'max_polling_wait_sec'), self.options)
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.opts = opts
self.options = opts.get("fn_virustotal", {})
self.resilient = opts.get("resilient", {})
self._init_virustotal()
@function("virustotal")
def _virustotal_function(self, event, *args, **kwargs):
"""Function: perform different scans on the following types:
ip addresses
hash - this will attempt to find an existing file report on the hash
domain
url - this will attempt to find an existing file report on the url. If none exist, a new scan is queued
file - this will start a new scan for the file and queue for a report later.
"""
try:
validateFields(('incident_id', 'vt_type'), kwargs) # required
# Init RequestsCommon with app.config options
rc = RequestsCommon(opts=self.opts, function_opts=self.options)
#ย Create a VirusTotal instance with the API Token and any proxies gathered by RequestsCommon
vt = VirusTotal(self.options['api_token'], rc.get_proxies())
# Get the function parameters:
incident_id = kwargs.get("incident_id") # number
artifact_id = kwargs.get("artifact_id") # number
attachment_id = kwargs.get("attachment_id") # number
vt_type = kwargs.get("vt_type") # text
vt_data = kwargs.get("vt_data") # text
self.log = logging.getLogger(__name__)
self.log.info("incident_id: %s", incident_id)
self.log.info("artifact_id: %s", artifact_id)
self.log.info("attachment_id: %s", attachment_id)
self.log.info("vt_type: %s", vt_type)
self.log.info("vt_data: %s", vt_data)
yield StatusMessage("starting...")
# determine next steps based on the API call to make
if vt_type.lower() == 'file':
entity = get_input_entity(get_resilient_client(self.resilient), incident_id, attachment_id, artifact_id)
# Create a temporary file to write the binary data to.
with tempfile.NamedTemporaryFile('w+b', delete=False) as temp_file_binary:
# Write binary data to a temporary file. Make sure to close the file here...this
# code must work on Windows and on Windows the file cannot be opened a second time
# While open. Floss will open the file again to read the data, so close before
# calling Floss.
temp_file_binary.write(entity["data"])
temp_file_binary.close()
try:
response = vt.scan_file(temp_file_binary.name, filename=entity["name"])
except Exception as err:
raise err
finally:
os.unlink(temp_file_binary.name)
file_result = self.return_response(response, vt.get_file_report, time.time())
## was a sha-256 returned? try an existing report first
if file_result.get("sha256"):
response = vt.get_file_report(file_result.get("sha256"))
report_result = self.return_response(response, None, time.time())
if report_result.get("response_code") and report_result.get("response_code") == 1:
result = report_result
else:
result = file_result
elif vt_type.lower() == 'url':
# attempt to see if a report already exists
response = vt.get_url_report(vt_data)
result = self.return_response(response, None, time.time())
# check if result is not found, meaning no report exists
if result['response_code'] == RC_NOT_FOUND:
response = vt.scan_url(vt_data)
result = self.return_response(response, vt.get_url_report, time.time())
elif vt_type.lower() == 'ip':
response = vt.get_ip_report(vt_data)
result = self.return_response(response, None, time.time())
elif vt_type.lower() == 'domain':
response = vt.get_domain_report(vt_data)
result = self.return_response(response, None, time.time())
elif vt_type.lower() == 'hash':
response = vt.get_file_report(vt_data)
result = self.return_response(response, None, time.time())
else:
raise ValueError("Unknown type field: {}. Check workflow pre-processor script.".format(vt_type))
results = {
"scan": result
}
self.log.debug("scan: {}".format(results))
# Produce a FunctionResult with the results
yield FunctionResult(results)
except Exception:
yield FunctionError()
def return_response(self, response, callback, start_time):
"""
parse the response and return the json results if successful
:param resp:
:return:
"""
if response and type(response) is not dict:
raise IntegrationError('Invalid response: {}'.format(response))
status = response.get('response_code', -1)
if status == HTTP_RATE_LIMIT:
raise IntegrationError('API rate limit exceeded')
self.log.info(response)
if status != HTTP_OK:
raise IntegrationError('Invalid response status: {}'.format(status))
return self.check_results(response['results'], callback, start_time)
def check_results(self, results, callback, start_time):
'''
continue checking for the scans to complete
:param results: possibly interim results
:return: final results of scan
'''
code = results.get('response_code', None)
scan_id = results.get('scan_id', None)
if code == RC_READY or code == RC_NOT_FOUND:
return results
elif code == RC_IN_QUEUE:
curr_time = time.time()
if int(curr_time - start_time)/1000 >= int(self.options.get('max_polling_wait_sec')):
raise IntegrationError("exceeded max wait time: {}".format(self.options.get('max_polling_wait_sec')))
if callback:
time.sleep(int(self.options['polling_interval_sec']))
# start again to review results
response = callback(id)
results = self.return_response(response, callback, start_time)
else:
raise IntegrationError("no callback function specified with response code: {} scan id {}".format(code, scan_id))
else:
raise IntegrationError("unexpected response code: {} for scan_id {}".format(code, scan_id))
self.log.debug(results)
return results | StarcoderdataPython |
3334 | <reponame>HosseyNJF/Telethon
from .. import types
from ... import utils
class Button:
"""
.. note::
This class is used to **define** reply markups, e.g. when
sending a message or replying to events. When you access
`Message.buttons <telethon.tl.custom.message.Message.buttons>`
they are actually `MessageButton
<telethon.tl.custom.messagebutton.MessageButton>`,
so you might want to refer to that class instead.
Helper class to allow defining ``reply_markup`` when
sending a message with inline or keyboard buttons.
You should make use of the defined class methods to create button
instances instead making them yourself (i.e. don't do ``Button(...)``
but instead use methods line `Button.inline(...) <inline>` etc.
You can use `inline`, `switch_inline`, `url` and `auth`
together to create inline buttons (under the message).
You can use `text`, `request_location`, `request_phone` and `request_poll`
together to create a reply markup (replaces the user keyboard).
You can also configure the aspect of the reply with these.
The latest message with a reply markup will be the one shown to the user
(messages contain the buttons, not the chat itself).
You **cannot** mix the two type of buttons together,
and it will error if you try to do so.
The text for all buttons may be at most 142 characters.
If more characters are given, Telegram will cut the text
to 128 characters and add the ellipsis (โฆ) character as
the 129.
"""
def __init__(self, button, *, resize, single_use, selective):
self.button = button
self.resize = resize
self.single_use = single_use
self.selective = selective
@staticmethod
def _is_inline(button):
"""
Returns `True` if the button belongs to an inline keyboard.
"""
return isinstance(button, (
types.KeyboardButtonCallback,
types.KeyboardButtonSwitchInline,
types.KeyboardButtonUrl,
types.InputKeyboardButtonUrlAuth
))
@staticmethod
def inline(text, data=None):
"""
Creates a new inline button with some payload data in it.
If `data` is omitted, the given `text` will be used as `data`.
In any case `data` should be either `bytes` or `str`.
Note that the given `data` must be less or equal to 64 bytes.
If more than 64 bytes are passed as data, ``ValueError`` is raised.
If you need to store more than 64 bytes, consider saving the real
data in a database and a reference to that data inside the button.
When the user clicks this button, `events.CallbackQuery
<telethon.events.callbackquery.CallbackQuery>` will trigger with the
same data that the button contained, so that you can determine which
button was pressed.
"""
if not data:
data = text.encode('utf-8')
elif not isinstance(data, (bytes, bytearray, memoryview)):
data = str(data).encode('utf-8')
if len(data) > 64:
raise ValueError('Too many bytes for the data')
return types.KeyboardButtonCallback(text, data)
@staticmethod
def switch_inline(text, query='', same_peer=False):
"""
Creates a new inline button to switch to inline query.
If `query` is given, it will be the default text to be used
when making the inline query.
If ``same_peer is True`` the inline query will directly be
set under the currently opened chat. Otherwise, the user will
have to select a different dialog to make the query.
When the user clicks this button, after a chat is selected, their
input field will be filled with the username of your bot followed
by the query text, ready to make inline queries.
"""
return types.KeyboardButtonSwitchInline(text, query, same_peer)
@staticmethod
def url(text, url=None):
"""
Creates a new inline button to open the desired URL on click.
If no `url` is given, the `text` will be used as said URL instead.
You cannot detect that the user clicked this button directly.
When the user clicks this button, a confirmation box will be shown
to the user asking whether they want to open the displayed URL unless
the domain is trusted, and once confirmed the URL will open in their
device.
"""
return types.KeyboardButtonUrl(text, url or text)
@staticmethod
def auth(text, url=None, *, bot=None, write_access=False, fwd_text=None):
"""
Creates a new inline button to authorize the user at the given URL.
You should set the `url` to be on the same domain as the one configured
for the desired `bot` via `@BotFather <https://t.me/BotFather>`_ using
the ``/setdomain`` command.
For more information about letting the user login via Telegram to
a certain domain, see https://core.telegram.org/widgets/login.
If no `url` is specified, it will default to `text`.
Args:
bot (`hints.EntityLike`):
The bot that requires this authorization. By default, this
is the bot that is currently logged in (itself), although
you may pass a different input peer.
.. note::
For now, you cannot use ID or username for this argument.
If you want to use a different bot than the one currently
logged in, you must manually use `client.get_input_entity()
<telethon.client.users.UserMethods.get_input_entity>`.
write_access (`bool`):
Whether write access is required or not.
This is `False` by default (read-only access).
fwd_text (`str`):
The new text to show in the button if the message is
forwarded. By default, the button text will be the same.
When the user clicks this button, a confirmation box will be shown
to the user asking whether they want to login to the specified domain.
"""
return types.InputKeyboardButtonUrlAuth(
text=text,
url=url or text,
bot=utils.get_input_user(bot or types.InputUserSelf()),
request_write_access=write_access,
fwd_text=fwd_text
)
@classmethod
def text(cls, text, *, resize=None, single_use=None, selective=None):
"""
Creates a new keyboard button with the given text.
Args:
resize (`bool`):
If present, the entire keyboard will be reconfigured to
be resized and be smaller if there are not many buttons.
single_use (`bool`):
If present, the entire keyboard will be reconfigured to
be usable only once before it hides itself.
selective (`bool`):
If present, the entire keyboard will be reconfigured to
be "selective". The keyboard will be shown only to specific
users. It will target users that are @mentioned in the text
of the message or to the sender of the message you reply to.
When the user clicks this button, a text message with the same text
as the button will be sent, and can be handled with `events.NewMessage
<telethon.events.newmessage.NewMessage>`. You cannot distinguish
between a button press and the user typing and sending exactly the
same text on their own.
"""
return cls(types.KeyboardButton(text),
resize=resize, single_use=single_use, selective=selective)
@classmethod
def request_location(cls, text, *,
resize=None, single_use=None, selective=None):
"""
Creates a new keyboard button to request the user's location on click.
``resize``, ``single_use`` and ``selective`` are documented in `text`.
When the user clicks this button, a confirmation box will be shown
to the user asking whether they want to share their location with the
bot, and if confirmed a message with geo media will be sent.
"""
return cls(types.KeyboardButtonRequestGeoLocation(text),
resize=resize, single_use=single_use, selective=selective)
@classmethod
def request_phone(cls, text, *,
resize=None, single_use=None, selective=None):
"""
Creates a new keyboard button to request the user's phone on click.
``resize``, ``single_use`` and ``selective`` are documented in `text`.
When the user clicks this button, a confirmation box will be shown
to the user asking whether they want to share their phone with the
bot, and if confirmed a message with contact media will be sent.
"""
return cls(types.KeyboardButtonRequestPhone(text),
resize=resize, single_use=single_use, selective=selective)
@classmethod
def request_poll(cls, text, *, force_quiz=False,
resize=None, single_use=None, selective=None):
"""
Creates a new keyboard button to request the user to create a poll.
If `force_quiz` is `False`, the user will be allowed to choose whether
they want their poll to be a quiz or not. Otherwise, the user will be
forced to create a quiz when creating the poll.
If a poll is a quiz, there will be only one answer that is valid, and
the votes cannot be retracted. Otherwise, users can vote and retract
the vote, and the pol might be multiple choice.
``resize``, ``single_use`` and ``selective`` are documented in `text`.
When the user clicks this button, a screen letting the user create a
poll will be shown, and if they do create one, the poll will be sent.
"""
return cls(types.KeyboardButtonRequestPoll(text, quiz=force_quiz),
resize=resize, single_use=single_use, selective=selective)
@staticmethod
def clear():
"""
Clears all keyboard buttons after sending a message with this markup.
When used, no other button should be present or it will be ignored.
"""
return types.ReplyKeyboardHide()
@staticmethod
def force_reply():
"""
Forces a reply to the message with this markup. If used,
no other button should be present or it will be ignored.
"""
return types.ReplyKeyboardForceReply()
| StarcoderdataPython |
8081661 | <reponame>ramtw/pyMD<filename>pyMD.py
#! /usr/bin/python
# -*- coding: utf-8 -*-
# PythenMusicDeamon (pyMD) Common File
#
# $Id: $
#
# Authors: <NAME> <ann<EMAIL>.<EMAIL>eck at outlook.de>
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
import os
import sys
import hashlib
import configparser
import random
import binascii
ERR_PASSWD = "__err_passwd__"
INF_SERDOWN = "__info_serverdown__"
CL_HELLO = "__client_hello__"
CL_EXIT = "__client_bye__"
VERSION = '0.91.5 AES'
CODENAME = 'Pockenberg'
AUTHOR = '<NAME> <<EMAIL>>'
PYMDString = 'pyMD ' + VERSION + ' ' + CODENAME
def get_hashfrompass(passwd):
salt = str(random.getrandbits(128))
dk = hashlib.sha256(str.encode(passwd + salt)).digest()
return dk
def byteToHex(byteHash):
return binascii.hexlify(byteHash).decode("utf-8")
def hexToByte(hexHash):
return binascii.unhexlify(str.encode(hexHash))
def get_config_path(file):
if sys.platform.startswith('linux'):
return "/etc/" + file
elif sys.platform.startswith('win'):
return file
elif sys.platform.startswith('darwin'):
return "/etc/" + file
def get_log_path():
if sys.platform.startswith('linux'):
return "/var/log/pymd.log"
elif sys.platform.startswith('win'):
return "pymd.log"
elif sys.platform.startswith('darwin'):
return "/var/log/pymd.log"
class client_config:
def __init__(self):
self.m_path = get_config_path("pyMDClient.ini")
self.m_config = configparser.ConfigParser()
if os.path.isfile(self.m_path) == True :
self.m_config.read(self.m_path)
else:
print("[Client First run] Create config")
host = input("Host: ")
port = input("Port: ")
has = input("hash: ")
self.m_config['client'] = {'hash': has,
'port': port,
'addr': host }
self.save()
self.m_config.read(self.m_path)
def save(self):
with open(self.m_path, 'w') as configfile:
self.m_config.write(configfile)
def get_server_port(self):
return int(self.m_config['client']['port'])
def get_server_addr(self):
return self.m_config['client']['addr']
def get_server_hash(self):
hexhash = self.m_config['client']['hash']
return hexToByte(hexhash)
class server_config:
def __init__(self):
self.m_path = get_config_path("pyMDServer.ini")
self.m_config = configparser.ConfigParser()
if os.path.isfile(self.m_path) == True :
self.m_config.read(self.m_path)
else:
print("[First run] Create config")
passwd = input("Please enter the server password: ")
temp = get_hashfrompass(passwd)
self.m_config['music'] = {'path': 'data',
'volume': '80',
'soundcard': '0'}
self.m_config['server'] = {'hash': byteToHex(temp),
'port': '8089',
'bind': 'localhost',
'loggingLevel': '0',
'loggingFile': get_log_path() }
self.save()
self.m_config.read(self.m_path)
def get_music_path(self):
return self.m_config['music']['path']
def get_music_volume(self):
return int(self.m_config['music']['volume'])
def get_server_hash(self):
hexhash = self.m_config['server']['hash']
return hexToByte(hexhash)
def get_server_port(self):
return int(self.m_config['server']['port'])
def get_server_addr(self):
return self.m_config['server']['bind']
def get_server_loggingLevel(self):
return int(self.m_config['server']['loggingLevel'])
def get_server_loggingFile(self):
return self.m_config['server']['loggingFile']
def set_music_path(self, path):
self.m_config['music']['path'] = path
def set_music_volume(self, volume):
self.m_config['music']['volume'] = volume
def set_server_pass(self, passwd):
self.m_config['server']['hash'] = get_hashfrompass(passwd)
def set_server_port(self, port):
self.m_config['server']['port'] = port
def save(self):
with open(self.m_path, 'w') as configfile:
self.m_config.write(configfile)
| StarcoderdataPython |
1678170 | <reponame>bober/hello-world<filename>src/aos_vis_client/vis_data_accessors/vis_data_accessor.py
# Copyright (c) 2018 EPAM Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from copy import copy
from time import sleep, time
from aos_vis_client.vis_data_accessors import VISBase, RequestInfo, VISClientNoValueException
logger = logging.getLogger(__name__)
class VISDataAccessor(VISBase):
"""
Provide functionality for reading/writing data from/to VIS.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ws = None
self.is_data_received = False
self._value = None
self._get_request_info = None
self._set_request_info = None
@property
def request_ids(self):
requests_info = (self._get_request_info, self._set_request_info, )
return [item.id for item in requests_info if item]
def get_value(self, wait_timeout=None):
"""
Returns VIS value.
Will raise `VISClientNoValueException` if there is no value and `wait_timeout` is None.
"""
if wait_timeout:
start_time = time()
while not self.is_data_received:
sleep(self.WAITING_INTERVAL)
if wait_timeout and start_time + wait_timeout < time():
raise TimeoutError()
else:
if not self.is_data_received:
raise VISClientNoValueException()
return copy(self._value)
def send_get_action(self):
"""
Initialize and send 'get' action to VIS.
"""
if self._get_request_info and self._get_request_info.timeout < time():
logger.error(
"Response was not received from VIS for {path} path in {seconds} seconds, requesting data again."
.format(path=self.path, seconds=self.TIMEOUT_LIMIT)
)
else:
logger.debug("Waiting for VIS response for {} path.".format(self.path))
if self._get_request_info is None:
request_id = self._send_get_action()
self._get_request_info = RequestInfo(id=request_id, timeout=time() + self.TIMEOUT_LIMIT)
def send_set_action(self, value):
"""
Initialize and send 'set' message to VIS.
"""
if self._set_request_info and self._set_request_info.timeout < time():
logger.error(
"Set respond was not received from VIS for setting {path} path in {seconds} seconds.".format(
path=self.path, seconds=self.TIMEOUT_LIMIT
)
)
else:
logger.debug("VIS did not respond for the last set operation for {} path yet.".format(self.path))
if self._set_request_info is None:
request_id = self._send_set_action(value)
self._set_request_info = RequestInfo(id=request_id, timeout=time() + self.TIMEOUT_LIMIT)
def process(self, data):
"""
Handle response to request message.
"""
request_id = data.get(self.VIS_REQUEST_ID)
if not request_id:
logger.error("Data structure does not have {} field.".format(self.VIS_REQUEST_ID))
return
if request_id == self._get_request_info.id:
logger.info("Received value for path {}".format(self.path))
self._value = data.get(self.VIS_VALUE)
self.is_data_received = True
elif request_id == self._set_request_info.id:
logger.info("Received VIS confirmation to data set for path {}".format(self.path))
else:
logger.error("Received message with unexpected request id {mid}, expected: {sid}".format(
mid=request_id,
sid=str(self.request_ids)
))
| StarcoderdataPython |
9722701 | <reponame>dcs4cop/xcube-cci
# The MIT License (MIT)
# Copyright (c) 2021 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from xcube.constants import EXTENSION_POINT_DATA_OPENERS
from xcube.constants import EXTENSION_POINT_DATA_STORES
from xcube.util import extension
from xcube_cci.constants import DATASET_OPENER_ID
from xcube_cci.constants import DATA_STORE_ID
from xcube_cci.constants import ZARR_DATA_STORE_ID
def init_plugin(ext_registry: extension.ExtensionRegistry):
"""xcube CCI ODP extensions"""
data_store_notice_content = \
'The ESA CCI Open Data Portal (ODP) utilises an "[ontology]' \
'(http://vocab-test.ceda.ac.uk/ontology/cci/cci-content/index.html) ' \
'whose terms might slightly differ from the ones used in this ' \
'software.\nFor example, a *Dataset* in the CCI terminology may ' \
'refer to all data products generated by a certain CCI project using ' \
'a specific configuration of algorithms and auxiliary data.\n' \
'In this software, a *Data Source* refers to a subset (a file set) ' \
'of a given ODP dataset whose data share a common spatio-temporal ' \
'grid and/or share other common properties, e.g. the instrument ' \
'used for the original measurements.\n' \
'In addition, the term *Dataset* is used to represent in-memory ' \
'instances of gridded data sources or subsets of them.'
data_completeness_content = \
'This data store currently provides **only a subset of all ' \
'datasets** provided by the "ESA CCI Open Data Portal (ODP), ' \
'namely gridded datasets originally stored in NetCDF format.\n' \
'In upcoming versions, the store will also allow for browsing and ' \
'accessing the remaining ODP datasets. This includes gridded data in ' \
'TIFF format and also vector data using ESRI Shapefile format.\n' \
'For the time being users can download the missing vector data from ' \
'the ODP FTP server](http://cci.esa.int/data#ftp) ' \
'`ftp://anon-ftp.ceda.ac.uk/neodc/esacci/`\n' \
'* CCI Glaciers in FTP directory `glaciers`\n' \
'* CCI Ice Sheets in FTP directories `ice_sheets_antarctica` and ' \
'`ice_sheets_greenland'
ext_registry.add_extension(
loader=extension.import_component(
'xcube_cci.dataaccess:CciOdpDataStore'),
point=EXTENSION_POINT_DATA_STORES,
name=DATA_STORE_ID,
description='ESA CCI Open Data Portal',
data_store_notices=[dict(id='terminologyClarification',
title='Terminology Clarification',
content=data_store_notice_content,
intent='primary',
icon='info-sign'),
dict(id='dataCompleteness',
title='Data Completeness',
content=data_completeness_content,
intent='warning',
icon='warning-sign')])
ext_registry.add_extension(
loader=extension.import_component(
'xcube_cci.dataaccess:CciOdpDatasetOpener'),
point=EXTENSION_POINT_DATA_OPENERS,
name=DATASET_OPENER_ID,
description='xarray.Dataset in Zarr format'
' from ESA CCI Open Data Portal'
)
ext_registry.add_extension(
loader=extension.import_component(
'xcube_cci.zarraccess:CciZarrDataStore'),
point=EXTENSION_POINT_DATA_STORES,
name=ZARR_DATA_STORE_ID,
description='xarray.Dataset in Zarr format'
' from ESA CCI Object Storage'
)
| StarcoderdataPython |
11327781 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by versioneer-0.6
# (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = " (HEAD, 1.17b1, 1.17, origin/firefox31, origin/1.17, 1.17)"
git_full = "12f7d53e8b5fc015a15fa4a30fa588e81e9e9b2e"
import subprocess
def run_command(args, cwd=None, verbose=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
except EnvironmentError, e:
if verbose:
print "unable to run %s" % args[0]
print e
return None
stdout = p.communicate()[0].strip()
if p.returncode != 0:
if verbose:
print "unable to run %s (error)" % args[0]
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
for line in open(versionfile_source,"r").readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
for ref in list(refs):
if not re.search(r'\d', ref):
refs.discard(ref)
# Assume all version tags have a digit. git's %d expansion
# behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us
# distinguish between branches and tags. By ignoring refnames
# without digits, we filter out many common branch names like
# "release" and "stabilization", as well as "HEAD" and "master".
for ref in sorted(refs):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, thus
# the containing directory is the root of the source tree), or someone
# ran a project-specific entry point (and this code is in _version.py,
# thus the containing directory is somewhere deeper in the source tree).
# This only gets called if the git-archive 'subst' variables were *not*
# expanded, and _version.py hasn't already been rewritten with a short
# version string, meaning we're inside a checked out source tree.
try:
here = os.path.abspath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
if not os.path.exists(os.path.join(root, ".git")):
return {}
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print "tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix)
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
try:
here = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to _version.py, when
# this is used by the runtime. Invert this to find the root from
# __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
except NameError:
# try a couple different things to handle py2exe, bbfreeze, and
# non-CPython implementations which don't do __file__. This code
# either lives in versioneer.py (used by setup.py) or _version.py
# (used by the runtime). In the versioneer.py case, sys.argv[0] will
# be setup.py, in the root of the source tree. In the _version.py
# case, we have no idea what sys.argv[0] is (some
# application-specific runner).
root = os.path.dirname(os.path.abspath(sys.argv[0]))
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print "dirname '%s' doesn't start with prefix '%s'" % (dirname, parentdir_prefix)
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
tag_prefix = ""
parentdir_prefix = "addon-sdk-"
versionfile_source = "python-lib/cuddlefish/_version.py"
def get_versions():
variables = { "refnames": git_refnames, "full": git_full }
ver = versions_from_expanded_variables(variables, tag_prefix)
if not ver:
ver = versions_from_vcs(tag_prefix, versionfile_source)
if not ver:
ver = versions_from_parentdir(parentdir_prefix, versionfile_source)
if not ver:
ver = {"version": "unknown", "full": ""}
return ver
| StarcoderdataPython |
8087901 | <gh_stars>1-10
#from socket import CAN_J1939
from ..ModuleFactory import *
from .. import bitutils as bu
import sys
import q3c
class ModuleImpl6522(ModuleImplBase):
RS0 = 0
D0 = 16
RW = 24 #B
PHI2= 25
IRQ = 26
CS1 = 40
CS2 = 41 #B
CA1 = 42
CA2 = 43
CB1 = 44
CB2 = 45
PA0 = 48
PB0 = 56
def __init__(self, **kwargs):
#log.warn("Hello from log")
#self._6502desc = q3c.c6502_init()
self._opened = None
self._pins = 0
self._prevPins = None
self._prevResb = None
self._prevClock = None
self._iv = None
super(ModuleImpl6522,self).__init__(**kwargs)
self._moduleType = ModuleType.ATOMIC
def __del__(self):
log.warn("Hello from 6522")
super(ModuleImpl6522,self).__del__()
def init(self,**kwargs) -> dict:
initParms = {}
#self._init = q3c.c6522_init(initParms)
#self._init = q3c.c6522({
# 'command':'init'
# })
self._init = {}
return self._init
def open(self):
self._opened = q3c.c6522({
'command':'open'
})
lebin = self._opened['lebin']
#self._pins = self._opened['pins']
self._pins = bu.lebin2dec(lebin)
self._iv = self._opened['iv']
io = {}
io['RESB']=self.newIO(
name = 'RESB',
size = 1,
ioType = IoType.INPUT,
direction = direction.LEFT
)
io['D0']=self.newIO(
name = 'D0',
size = 8,
ioType = IoType.DYNAMIC,
direction = direction.LEFT,
props = {
'from':self.D0
}
)
io['RWB']=self.newIO(
name = 'RWB',
size = 1,
ioType = IoType.INPUT,
direction = direction.LEFT,
props = {
'from':self.RW
}
)
io['PHI2']=self.newIO(
name = 'PHI2',
size = 1,
ioType = IoType.INPUT,
direction = direction.RIGHT
)
io['CS2B']=self.newIO(
name = 'CS2B',
size = 1,
ioType = IoType.INPUT,
direction = direction.LEFT,
props = {
'from':self.CS2
}
)
io['CS1']=self.newIO(
name = 'CS1',
size = 1,
ioType = IoType.INPUT,
direction = direction.LEFT,
props = {
'from':self.CS1
}
)
io['RS0']=self.newIO(
name = 'RS0',
size = 1,
ioType = IoType.INPUT,
direction = direction.LEFT,
props = {
'from':self.RS0
}
)
io['RS1']=self.newIO(
name = 'RS1',
size = 1,
ioType = IoType.INPUT,
direction = direction.LEFT,
props = {
'from':self.RS0+1
}
)
io['RS2']=self.newIO(
name = 'RS2',
size = 1,
ioType = IoType.INPUT,
direction = direction.LEFT,
props = {
'from':self.RS0+2
}
)
io['RS3']=self.newIO(
name = 'RS3',
size = 1,
ioType = IoType.INPUT,
direction = direction.LEFT,
props = {
'from':self.RS0+3
}
)
io['PA']=self.newIO(
name = 'PA',
size = 8,
ioType = IoType.DYNAMIC,
direction = direction.RIGHT,
props = {
'from':self.PA0
}
)
io['PB']=self.newIO(
name = 'PB',
size = 8,
ioType = IoType.DYNAMIC,
direction = direction.RIGHT,
props = {
'from':self.PB0
}
)
self._io = io
return self._io
def readBits(self,pins,fr,sz):
return bu.readBits(pins,fr,sz)
def writeBits(self,pins,fr,sz,uint):
return bu.writeBits(pins,fr,sz,uint)
def readPins(self,fr,sz):
pins = self._pins
return self.readBits(pins,fr,sz)
def writePins(self,fr,sz,uint):
pins = self._pins
self._pins = self.writeBits(pins,fr,sz,uint)
def updateFromNodes(self):
for n in self.nodes().values():
if n.ioType() in [IoType.INPUT,IoType.DYNAMIC]:
ds = n.driveSignal()
if ds!=None and n.signals().size()>1:
for s in n.signals().values():
if s!=ds:
ssize = s.size() if s.prop('size')==None else s.prop('size')
sfrom = s.prop('from')
sv = s.valueAsUInt()
# had to invert some signals after long investigation
# chips implementation is not compliant with pins specs
#if s.name() in ['CS2B','RWB']:
# sv = ~sv
if sfrom !=None:
self.writePins(sfrom,ssize,sv)
def updateSignals(self):
for s in self.signals().values():
ssize = s.size()
sfrom = s.prop('from')
if sfrom!=None:
cv = self.readPins(sfrom,ssize)
s.setValue(cv)
self._prevPins = self._pins
def calc(self):
resv = self.sig('RESB').value()
if not resv and self._prevResb != resv: #riseb
q3c.c6522({
'command':'reset',
'iv':self._iv
})
self._prevResb = resv
clkv = self.sig('PHI2').value()
clkRise = not clkv and clkv != self._prevClock
if clkRise: #clkRise
self.updateFromNodes()
#pp = pow(2,64)
#print(f'm6522callCalc {pp} {sys.maxsize} {self._pins}\n',)
self._calc = q3c.c6522({
'command':'calc',
'iv':self._iv,
#'pins':self._pins
'lebin':bu.dec2lebin(self._pins)
})
#print('m6522callCalcAfter\n')
#self._pins = self._calc['pins']
lebin = self._calc['lebin']
self._pins = bu.lebin2dec(lebin)
if self._prevPins != self._pins:
self.updateSignals()
self._prevClock = clkv | StarcoderdataPython |
4867204 | from __future__ import annotations
import asyncio
import math
from collections import defaultdict
from dataclasses import dataclass
from typing import TYPE_CHECKING, NewType
import pandas as pd
from distributed.protocol import to_serialize
from distributed.utils import sync
if TYPE_CHECKING:
from distributed.worker import Worker
ShuffleId = NewType("ShuffleId", str)
# NOTE: we use these dataclasses primarily for type-checking benefits.
# They take the place of positional arguments to `shuffle_init`,
# which the type-checker can't validate when it's called as an RPC.
@dataclass(frozen=True, eq=False)
class NewShuffleMetadata:
"Metadata to create a shuffle"
id: ShuffleId
empty: pd.DataFrame
column: str
npartitions: int
@dataclass(frozen=True, eq=False)
class ShuffleMetadata(NewShuffleMetadata):
"""
Metadata every worker needs to share about a shuffle.
A `ShuffleMetadata` is created with a task and sent to all workers
over the `ShuffleWorkerExtension.shuffle_init` RPC.
"""
workers: list[str]
def worker_for(self, output_partition: int) -> str:
"Get the address of the worker which should hold this output partition number"
assert output_partition >= 0, f"Negative output partition: {output_partition}"
if output_partition >= self.npartitions:
raise IndexError(
f"Output partition {output_partition} does not exist in a shuffle producing {self.npartitions} partitions"
)
i = len(self.workers) * output_partition // self.npartitions
return self.workers[i]
def _partition_range(self, worker: str) -> tuple[int, int]:
"Get the output partition numbers (inclusive) that a worker will hold"
i = self.workers.index(worker)
first = math.ceil(self.npartitions * i / len(self.workers))
last = math.ceil(self.npartitions * (i + 1) / len(self.workers)) - 1
return first, last
def npartitions_for(self, worker: str) -> int:
"Get the number of output partitions a worker will hold"
first, last = self._partition_range(worker)
return last - first + 1
class Shuffle:
"State for a single active shuffle"
def __init__(self, metadata: ShuffleMetadata, worker: Worker) -> None:
self.metadata = metadata
self.worker = worker
self.output_partitions: defaultdict[int, list[pd.DataFrame]] = defaultdict(list)
self.output_partitions_left = metadata.npartitions_for(worker.address)
self.transferred = False
def receive(self, output_partition: int, data: pd.DataFrame) -> None:
assert not self.transferred, "`receive` called after barrier task"
self.output_partitions[output_partition].append(data)
async def add_partition(self, data: pd.DataFrame) -> None:
assert not self.transferred, "`add_partition` called after barrier task"
tasks = []
# NOTE: `groupby` blocks the event loop, but it also holds the GIL,
# so we don't bother offloading to a thread. See bpo-7946.
for output_partition, data in data.groupby(self.metadata.column):
# NOTE: `column` must refer to an integer column, which is the output partition number for the row.
# This is always `_partitions`, added by `dask/dataframe/shuffle.py::shuffle`.
addr = self.metadata.worker_for(int(output_partition))
task = asyncio.create_task(
self.worker.rpc(addr).shuffle_receive(
shuffle_id=self.metadata.id,
output_partition=output_partition,
data=to_serialize(data),
)
)
tasks.append(task)
# TODO Once RerunGroup logic exists (https://github.com/dask/distributed/issues/5403),
# handle errors and cancellation here in a way that lets other workers cancel & clean up their shuffles.
# Without it, letting errors kill the task is all we can do.
await asyncio.gather(*tasks)
def get_output_partition(self, i: int) -> pd.DataFrame:
assert self.transferred, "`get_output_partition` called before barrier task"
assert self.metadata.worker_for(i) == self.worker.address, (
f"Output partition {i} belongs on {self.metadata.worker_for(i)}, "
f"not {self.worker.address}. {self.metadata!r}"
)
# ^ NOTE: this check isn't necessary, just a nice validation to prevent incorrect
# data in the case something has gone very wrong
assert (
self.output_partitions_left > 0
), f"No outputs remaining, but requested output partition {i} on {self.worker.address}."
self.output_partitions_left -= 1
try:
parts = self.output_partitions.pop(i)
except KeyError:
return self.metadata.empty
assert parts, f"Empty entry for output partition {i}"
return pd.concat(parts, copy=False)
def inputs_done(self) -> None:
assert not self.transferred, "`inputs_done` called multiple times"
self.transferred = True
def done(self) -> bool:
return self.transferred and self.output_partitions_left == 0
class ShuffleWorkerExtension:
"Extend the Worker with routes and state for peer-to-peer shuffles"
def __init__(self, worker: Worker) -> None:
# Attach to worker
worker.handlers["shuffle_receive"] = self.shuffle_receive
worker.handlers["shuffle_init"] = self.shuffle_init
worker.handlers["shuffle_inputs_done"] = self.shuffle_inputs_done
worker.extensions["shuffle"] = self
# Initialize
self.worker: Worker = worker
self.shuffles: dict[ShuffleId, Shuffle] = {}
# Handlers
##########
# NOTE: handlers are not threadsafe, but they're called from async comms, so that's okay
def shuffle_init(self, comm: object, metadata: ShuffleMetadata) -> None:
"""
Hander: Register a new shuffle that is about to begin.
Using a shuffle with an already-known ID is an error.
"""
if metadata.id in self.shuffles:
raise ValueError(
f"Shuffle {metadata.id!r} is already registered on worker {self.worker.address}"
)
self.shuffles[metadata.id] = Shuffle(metadata, self.worker)
def shuffle_receive(
self,
comm: object,
shuffle_id: ShuffleId,
output_partition: int,
data: pd.DataFrame,
) -> None:
"""
Hander: Receive an incoming shard of data from a peer worker.
Using an unknown ``shuffle_id`` is an error.
"""
self._get_shuffle(shuffle_id).receive(output_partition, data)
def shuffle_inputs_done(self, comm: object, shuffle_id: ShuffleId) -> None:
"""
Hander: Inform the extension that all input partitions have been handed off to extensions.
Using an unknown ``shuffle_id`` is an error.
"""
shuffle = self._get_shuffle(shuffle_id)
shuffle.inputs_done()
if shuffle.done():
# If the shuffle has no output partitions, remove it now;
# `get_output_partition` will never be called.
# This happens when there are fewer output partitions than workers.
del self.shuffles[shuffle_id]
# Tasks
#######
def create_shuffle(self, new_metadata: NewShuffleMetadata) -> ShuffleMetadata:
return sync(self.worker.loop, self._create_shuffle, new_metadata) # type: ignore
async def _create_shuffle(
self, new_metadata: NewShuffleMetadata
) -> ShuffleMetadata:
"""
Task: Create a new shuffle and broadcast it to all workers.
"""
# TODO would be nice to not have to have the RPC in this method, and have shuffles started implicitly
# by the first `receive`/`add_partition`. To do that, shuffle metadata would be passed into
# every task, and from there into the extension (rather than stored within a `Shuffle`),
# However:
# 1. It makes scheduling much harder, since it's a widely-shared common dep
# (https://github.com/dask/distributed/pull/5325)
# 2. Passing in metadata everywhere feels contrived when it would be so easy to store
# 3. The metadata may not be _that_ small (1000s of columns + 1000s of workers);
# serializing and transferring it repeatedly adds overhead.
if new_metadata.id in self.shuffles:
raise ValueError(
f"Shuffle {new_metadata.id!r} is already registered on worker {self.worker.address}"
)
identity = await self.worker.scheduler.identity()
workers = list(identity["workers"])
metadata = ShuffleMetadata(
new_metadata.id,
new_metadata.empty,
new_metadata.column,
new_metadata.npartitions,
workers,
)
# Start the shuffle on all peers
# Note that this will call `shuffle_init` on our own worker as well
await asyncio.gather(
*(
self.worker.rpc(addr).shuffle_init(metadata=to_serialize(metadata))
for addr in metadata.workers
),
)
# TODO handle errors from peers, and cancellation.
# If any peers can't start the shuffle, tell successful peers to cancel it.
return metadata # NOTE: unused in tasks, just handy for tests
def add_partition(self, data: pd.DataFrame, shuffle_id: ShuffleId) -> None:
sync(self.worker.loop, self._add_partition, data, shuffle_id)
async def _add_partition(self, data: pd.DataFrame, shuffle_id: ShuffleId) -> None:
"""
Task: Hand off an input partition to the ShuffleExtension.
This will block until the extension is ready to receive another input partition.
Using an unknown ``shuffle_id`` is an error.
"""
await self._get_shuffle(shuffle_id).add_partition(data)
def barrier(self, shuffle_id: ShuffleId) -> None:
sync(self.worker.loop, self._barrier, shuffle_id)
async def _barrier(self, shuffle_id: ShuffleId) -> None:
"""
Task: Note that the barrier task has been reached (`add_partition` called for all input partitions)
Using an unknown ``shuffle_id`` is an error. Calling this before all partitions have been
added is undefined.
"""
# NOTE: in this basic shuffle implementation, doing things during the barrier
# is mostly unnecessary. We only need it to inform workers that don't receive
# any output partitions that they can clean up.
# (Otherwise, they'd have no way to know if they needed to keep the `Shuffle` around
# for more input partitions, which might come at some point. Workers that _do_ receive
# output partitions could infer this, since once `get_output_partition` gets called the
# first time, they can assume there are no more inputs.)
#
# Technically right now, we could call the `shuffle_inputs_done` RPC only on workers
# where `metadata.npartitions_for(worker) == 0`.
# However, when we have buffering, this barrier step will become important for
# all workers, since they'll use it to flush their buffers and send any leftover shards
# to their peers.
metadata = self._get_shuffle(shuffle_id).metadata
# Set worker restrictions for unpack tasks
# Could do this during `create_shuffle`, but we might as well overlap it with the time
# workers will be flushing buffers to each other.
name = "shuffle-unpack-" + metadata.id # TODO single-source task name
# FIXME TODO XXX what about when culling means not all of the output tasks actually exist??!
# - these restrictions are invalid
# - get_output_partition won't be called enough times, so cleanup won't happen
# - also, we're transferring data we don't need to transfer
restrictions = {
f"('{name}', {i})": [metadata.worker_for(i)]
for i in range(metadata.npartitions)
}
# Tell all peers that we've reached the barrier
# Note that this will call `shuffle_inputs_done` on our own worker as well
await asyncio.gather(
*(
self.worker.rpc(worker).shuffle_inputs_done(shuffle_id=shuffle_id)
for worker in metadata.workers
),
self.worker.scheduler.set_restrictions(worker=restrictions),
)
# TODO handle errors from workers and scheduler, and cancellation.
def get_output_partition(
self, shuffle_id: ShuffleId, output_partition: int
) -> pd.DataFrame:
"""
Task: Retrieve a shuffled output partition from the ShuffleExtension.
Calling this for a ``shuffle_id`` which is unknown or incomplete is an error.
"""
shuffle = self._get_shuffle(shuffle_id)
output = shuffle.get_output_partition(output_partition)
if shuffle.done():
# key missing if another thread got to it first
self.shuffles.pop(shuffle_id, None)
return output
def _get_shuffle(self, shuffle_id: ShuffleId) -> Shuffle:
"Get a shuffle by ID; raise ValueError if it's not registered."
try:
return self.shuffles[shuffle_id]
except KeyError:
raise ValueError(
f"Shuffle {shuffle_id!r} is not registered on worker {self.worker.address}"
) from None
| StarcoderdataPython |
8171837 | import re
from django.http import JsonResponse
from rest_framework import status
from .models import ServiceAccount
class ServiceAccountControlMixin:
""" A mixin for Django Rest Framework viewsets that checks if the request is made from a ServiceAccount and only
allows access to the endpoint if the ServiceAccount is enabled and admin_enabled. If the ServiceAccount is disabled
by the owner or an admin, a 403 error will be received instead of the API response. This currently works for APIs
using TokenAuthentication (rest_framework.authentication.TokenAuthentication). """
def dispatch(self, request, *args, **kwargs):
mo = re.search('Token (\S+)', request.META.get('HTTP_AUTHORIZATION', ''))
if mo:
srv_acct = ServiceAccount.objects.get_object_or_none(user__auth_token__key=mo.group(1))
if srv_acct:
if srv_acct.admin_enabled is False:
return JsonResponse(data={'detail': 'this service account has been administratively disabled'},
status=status.HTTP_403_FORBIDDEN)
elif srv_acct.enabled is False:
return JsonResponse(data={'detail': 'this service account has been disabled'},
status=status.HTTP_403_FORBIDDEN)
return super().dispatch(request, *args, **kwargs)
class AllowOnlyServiceAccountMixin:
""" A mixin for Django Rest Framework viewsets that only allows responses to requests made from ServiceAccounts.
This currently works for APIs using TokenAuthentication (rest_framework.authentication.TokenAuthentication)."""
def dispatch(self, request, *args, **kwargs):
mo = re.search('Token (\S+)', request.META.get('HTTP_AUTHORIZATION', ''))
if mo:
srv_acct = ServiceAccount.objects.get_object_or_none(user__auth_token__key=mo.group(1))
if not srv_acct:
return JsonResponse(data={'detail': 'access to this endpoint is only available to service accounts'},
status=status.HTTP_403_FORBIDDEN)
return super().dispatch(request, *args, **kwargs)
class AllowOnlyEnabledServiceAccountMixin:
""" A mixin for Django Rest Framework viewsets that checks if the request is made from a ServiceAccount and only
allows access to the endpoint if an enabled ServiceAccount made the request. If the ServiceAccount is disabled
by the owner or an admin, a 403 error will be received instead of the API response. This currently works for APIs
using TokenAuthentication (rest_framework.authentication.TokenAuthentication). """
def dispatch(self, request, *args, **kwargs):
mo = re.search('Token (\S+)', request.META.get('HTTP_AUTHORIZATION', ''))
if mo:
srv_acct = ServiceAccount.objects.get_object_or_none(user__auth_token__key=mo.group(1))
if srv_acct:
if srv_acct.admin_enabled is False:
return JsonResponse(data={'detail': 'this service account has been administratively disabled'},
status=status.HTTP_403_FORBIDDEN)
elif srv_acct.enabled is False:
return JsonResponse(data={'detail': 'this service account has been disabled'},
status=status.HTTP_403_FORBIDDEN)
else:
return JsonResponse(data={'detail': 'access to this endpoint is only available to enabled '
'service accounts'}, status=status.HTTP_403_FORBIDDEN)
return super().dispatch(request, *args, **kwargs)
| StarcoderdataPython |
9674556 | <filename>setup.py
from setuptools import setup, find_packages
setup(
name='abm1559',
url='https://github.com/barnabemonnot/abm1559',
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(include=['abm1559', 'abm1559.*']),
install_requires=['numpy', 'pandas'],
version='0.0.2',
license='MIT',
description='Agent-based simulation environment for EIP 1559',
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
python_requires='>=3.8',
)
| StarcoderdataPython |
1736348 | """
Platformer Game
python -m arcade.examples.platform_tutorial.11_animate_character
"""
import math
import os
import arcade
# Constants
SCREEN_WIDTH = 1000
SCREEN_HEIGHT = 650
SCREEN_TITLE = "Platformer"
# Constants used to scale our sprites from their original size
TILE_SCALING = 0.5
CHARACTER_SCALING = TILE_SCALING * 2
COIN_SCALING = TILE_SCALING
SPRITE_PIXEL_SIZE = 128
GRID_PIXEL_SIZE = SPRITE_PIXEL_SIZE * TILE_SCALING
# Movement speed of player, in pixels per frame
PLAYER_MOVEMENT_SPEED = 7
GRAVITY = 1.5
PLAYER_JUMP_SPEED = 30
# How many pixels to keep as a minimum margin between the character
# and the edge of the screen.
LEFT_VIEWPORT_MARGIN = 200
RIGHT_VIEWPORT_MARGIN = 200
BOTTOM_VIEWPORT_MARGIN = 150
TOP_VIEWPORT_MARGIN = 100
PLAYER_START_X = 2
PLAYER_START_Y = 1
# Constants used to track if the player is facing left or right
RIGHT_FACING = 0
LEFT_FACING = 1
LAYER_NAME_MOVING_PLATFORMS = "Moving Platforms"
LAYER_NAME_PLATFORMS = "Platforms"
LAYER_NAME_COINS = "Coins"
LAYER_NAME_BACKGROUND = "Background"
LAYER_NAME_LADDERS = "Ladders"
LAYER_NAME_PLAYER = "Player"
LAYER_NAME_ENEMIES = "Enemies"
def load_texture_pair(filename):
"""
Load a texture pair, with the second being a mirror image.
"""
return [
arcade.load_texture(filename),
arcade.load_texture(filename, flipped_horizontally=True),
]
class Entity(arcade.Sprite):
def __init__(self, name_folder, name_file):
super().__init__()
# Default to facing right
self.facing_direction = RIGHT_FACING
# Used for image sequences
self.cur_texture = 0
self.scale = CHARACTER_SCALING
main_path = f":resources:images/animated_characters/{name_folder}/{name_file}"
self.idle_texture_pair = load_texture_pair(f"{main_path}_idle.png")
self.jump_texture_pair = load_texture_pair(f"{main_path}_jump.png")
self.fall_texture_pair = load_texture_pair(f"{main_path}_fall.png")
# Load textures for walking
self.walk_textures = []
for i in range(8):
texture = load_texture_pair(f"{main_path}_walk{i}.png")
self.walk_textures.append(texture)
# Load textures for climbing
self.climbing_textures = []
texture = arcade.load_texture(f"{main_path}_climb0.png")
self.climbing_textures.append(texture)
texture = arcade.load_texture(f"{main_path}_climb1.png")
self.climbing_textures.append(texture)
# Set the initial texture
self.texture = self.idle_texture_pair[0]
# Hit box will be set based on the first image used. If you want to specify
# a different hit box, you can do it like the code below.
# self.set_hit_box([[-22, -64], [22, -64], [22, 28], [-22, 28]])
self.set_hit_box(self.texture.hit_box_points)
class Enemy(Entity):
def __init__(self, name_folder, name_file):
# Setup parent class
super().__init__(name_folder, name_file)
self.should_update_walk = 0
def update_animation(self, delta_time: float = 1 / 60):
# Figure out if we need to flip face left or right
if self.change_x < 0 and self.facing_direction == RIGHT_FACING:
self.facing_direction = LEFT_FACING
elif self.change_x > 0 and self.facing_direction == LEFT_FACING:
self.facing_direction = RIGHT_FACING
# Idle animation
if self.change_x == 0:
self.texture = self.idle_texture_pair[self.facing_direction]
return
# Walking animation
if self.should_update_walk == 3:
self.cur_texture += 1
if self.cur_texture > 7:
self.cur_texture = 0
self.texture = self.walk_textures[self.cur_texture][self.facing_direction]
self.should_update_walk = 0
return
self.should_update_walk += 1
class RobotEnemy(Enemy):
def __init__(self):
# Set up parent class
super().__init__("robot", "robot")
class ZombieEnemy(Enemy):
def __init__(self):
# Set up parent class
super().__init__("zombie", "zombie")
class PlayerCharacter(Entity):
"""Player Sprite"""
def __init__(self):
# Set up parent class
super().__init__("male_person", "malePerson")
# Track our state
self.jumping = False
self.climbing = False
self.is_on_ladder = False
def update_animation(self, delta_time: float = 1 / 60):
# Figure out if we need to flip face left or right
if self.change_x < 0 and self.facing_direction == RIGHT_FACING:
self.facing_direction = LEFT_FACING
elif self.change_x > 0 and self.facing_direction == LEFT_FACING:
self.facing_direction = RIGHT_FACING
# Climbing animation
if self.is_on_ladder:
self.climbing = True
if not self.is_on_ladder and self.climbing:
self.climbing = False
if self.climbing and abs(self.change_y) > 1:
self.cur_texture += 1
if self.cur_texture > 7:
self.cur_texture = 0
if self.climbing:
self.texture = self.climbing_textures[self.cur_texture // 4]
return
# Jumping animation
if self.change_y > 0 and not self.is_on_ladder:
self.texture = self.jump_texture_pair[self.facing_direction]
return
elif self.change_y < 0 and not self.is_on_ladder:
self.texture = self.fall_texture_pair[self.facing_direction]
return
# Idle animation
if self.change_x == 0:
self.texture = self.idle_texture_pair[self.facing_direction]
return
# Walking animation
self.cur_texture += 1
if self.cur_texture > 7:
self.cur_texture = 0
self.texture = self.walk_textures[self.cur_texture][self.facing_direction]
class MyGame(arcade.Window):
"""
Main application class.
"""
def __init__(self):
"""
Initializer for the game
"""
# Call the parent class and set up the window
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
# Set the path to start with this program
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
# Track the current state of what key is pressed
self.left_pressed = False
self.right_pressed = False
self.up_pressed = False
self.down_pressed = False
self.jump_needs_reset = False
# Our TileMap Object
self.tile_map = None
# Our Scene Object
self.scene = None
# Separate variable that holds the player sprite
self.player_sprite = None
# Our 'physics' engine
self.physics_engine = None
# A Camera that can be used for scrolling the screen
self.camera = None
# A Camera that can be used to draw GUI elements
self.gui_camera = None
self.end_of_map = 0
# Keep track of the score
self.score = 0
# Load sounds
self.collect_coin_sound = arcade.load_sound(":resources:sounds/coin1.wav")
self.jump_sound = arcade.load_sound(":resources:sounds/jump1.wav")
self.game_over = arcade.load_sound(":resources:sounds/gameover1.wav")
def setup(self):
"""Set up the game here. Call this function to restart the game."""
# Setup the Cameras
self.camera = arcade.Camera(self.width, self.height)
self.gui_camera = arcade.Camera(self.width, self.height)
# Map name
map_name = f":resources:tiled_maps/map_with_ladders.json"
# Layer Specific Options for the Tilemap
layer_options = {
LAYER_NAME_PLATFORMS: {
"use_spatial_hash": True,
},
LAYER_NAME_MOVING_PLATFORMS: {
"use_spatial_hash": True,
},
LAYER_NAME_LADDERS: {
"use_spatial_hash": True,
},
LAYER_NAME_COINS: {
"use_spatial_hash": True,
},
}
# Load in TileMap
self.tile_map = arcade.load_tilemap(map_name, TILE_SCALING, layer_options)
# Initiate New Scene with our TileMap, this will automatically add all layers
# from the map as SpriteLists in the scene in the proper order.
self.scene = arcade.Scene.from_tilemap(self.tile_map)
# Keep track of the score
self.score = 0
# Set up the player, specifically placing it at these coordinates.
self.player_sprite = PlayerCharacter()
self.player_sprite.center_x = (
self.tile_map.tiled_map.tile_size[0] * TILE_SCALING * PLAYER_START_X
)
self.player_sprite.center_y = (
self.tile_map.tiled_map.tile_size[1] * TILE_SCALING * PLAYER_START_Y
)
self.scene.add_sprite(LAYER_NAME_PLAYER, self.player_sprite)
# Calculate the right edge of the my_map in pixels
self.end_of_map = self.tile_map.tiled_map.map_size.width * GRID_PIXEL_SIZE
# -- Enemies
enemies_layer = self.tile_map.object_lists[LAYER_NAME_ENEMIES]
for my_object in enemies_layer:
cartesian = self.tile_map.get_cartesian(
my_object.shape[0], my_object.shape[1]
)
enemy_type = my_object.properties["type"]
if enemy_type == "robot":
enemy = RobotEnemy()
elif enemy_type == "zombie":
enemy = ZombieEnemy()
enemy.center_x = math.floor(
cartesian[0] * TILE_SCALING * self.tile_map.tile_width
)
enemy.center_y = math.floor(
(cartesian[1] + 1) * (self.tile_map.tile_height * TILE_SCALING)
)
if "boundary_left" in my_object.properties:
enemy.boundary_left = my_object.properties["boundary_left"]
if "boundary_right" in my_object.properties:
enemy.boundary_right = my_object.properties["boundary_right"]
if "change_x" in my_object.properties:
enemy.change_x = my_object.properties["change_x"]
self.scene.add_sprite(LAYER_NAME_ENEMIES, enemy)
# --- Other stuff
# Set the background color
if self.tile_map.tiled_map.background_color:
arcade.set_background_color(self.tile_map.tiled_map.background_color)
# Create the 'physics engine'
self.physics_engine = arcade.PhysicsEnginePlatformer(
self.player_sprite,
[
self.scene.get_sprite_list(LAYER_NAME_PLATFORMS),
self.scene.get_sprite_list(LAYER_NAME_MOVING_PLATFORMS),
],
gravity_constant=GRAVITY,
ladders=self.scene.get_sprite_list(LAYER_NAME_LADDERS),
)
def on_draw(self):
"""Render the screen."""
# Clear the screen to the background color
arcade.start_render()
# Activate the game camera
self.camera.use()
# Draw our Scene
self.scene.draw()
# Activate the GUI camera before drawing GUI elements
self.gui_camera.use()
# Draw our score on the screen, scrolling it with the viewport
score_text = f"Score: {self.score}"
arcade.draw_text(
score_text,
10,
10,
arcade.csscolor.BLACK,
18,
)
# Draw hit boxes.
# for wall in self.wall_list:
# wall.draw_hit_box(arcade.color.BLACK, 3)
#
# self.player_sprite.draw_hit_box(arcade.color.RED, 3)
def process_keychange(self):
"""
Called when we change a key up/down or we move on/off a ladder.
"""
# Process up/down
if self.up_pressed and not self.down_pressed:
if self.physics_engine.is_on_ladder():
self.player_sprite.change_y = PLAYER_MOVEMENT_SPEED
elif (
self.physics_engine.can_jump(y_distance=10)
and not self.jump_needs_reset
):
self.player_sprite.change_y = PLAYER_JUMP_SPEED
self.jump_needs_reset = True
arcade.play_sound(self.jump_sound)
elif self.down_pressed and not self.up_pressed:
if self.physics_engine.is_on_ladder():
self.player_sprite.change_y = -PLAYER_MOVEMENT_SPEED
# Process up/down when on a ladder and no movement
if self.physics_engine.is_on_ladder():
if not self.up_pressed and not self.down_pressed:
self.player_sprite.change_y = 0
elif self.up_pressed and self.down_pressed:
self.player_sprite.change_y = 0
# Process left/right
if self.right_pressed and not self.left_pressed:
self.player_sprite.change_x = PLAYER_MOVEMENT_SPEED
elif self.left_pressed and not self.right_pressed:
self.player_sprite.change_x = -PLAYER_MOVEMENT_SPEED
else:
self.player_sprite.change_x = 0
def on_key_press(self, key, modifiers):
"""Called whenever a key is pressed."""
if key == arcade.key.UP or key == arcade.key.W:
self.up_pressed = True
elif key == arcade.key.DOWN or key == arcade.key.S:
self.down_pressed = True
elif key == arcade.key.LEFT or key == arcade.key.A:
self.left_pressed = True
elif key == arcade.key.RIGHT or key == arcade.key.D:
self.right_pressed = True
self.process_keychange()
def on_key_release(self, key, modifiers):
"""Called when the user releases a key."""
if key == arcade.key.UP or key == arcade.key.W:
self.up_pressed = False
self.jump_needs_reset = False
elif key == arcade.key.DOWN or key == arcade.key.S:
self.down_pressed = False
elif key == arcade.key.LEFT or key == arcade.key.A:
self.left_pressed = False
elif key == arcade.key.RIGHT or key == arcade.key.D:
self.right_pressed = False
self.process_keychange()
def center_camera_to_player(self):
screen_center_x = self.player_sprite.center_x - (self.camera.viewport_width / 2)
screen_center_y = self.player_sprite.center_y - (
self.camera.viewport_height / 2
)
if screen_center_x < 0:
screen_center_x = 0
if screen_center_y < 0:
screen_center_y = 0
player_centered = screen_center_x, screen_center_y
self.camera.move_to(player_centered, 0.2)
def on_update(self, delta_time):
"""Movement and game logic"""
# Move the player with the physics engine
self.physics_engine.update()
# Update animations
if self.physics_engine.can_jump():
self.player_sprite.can_jump = False
else:
self.player_sprite.can_jump = True
if self.physics_engine.is_on_ladder() and not self.physics_engine.can_jump():
self.player_sprite.is_on_ladder = True
self.process_keychange()
else:
self.player_sprite.is_on_ladder = False
self.process_keychange()
# Update Animations
self.scene.update_animation(
delta_time,
[
LAYER_NAME_COINS,
LAYER_NAME_BACKGROUND,
LAYER_NAME_PLAYER,
LAYER_NAME_ENEMIES,
],
)
# Update moving platforms and enemies
self.scene.update([LAYER_NAME_MOVING_PLATFORMS, LAYER_NAME_ENEMIES])
# See if the enemy hit a boundary and needs to reverse direction.
for enemy in self.scene.get_sprite_list(LAYER_NAME_ENEMIES):
if (
enemy.boundary_right
and enemy.right > enemy.boundary_right
and enemy.change_x > 0
):
enemy.change_x *= -1
if (
enemy.boundary_left
and enemy.left < enemy.boundary_left
and enemy.change_x < 0
):
enemy.change_x *= -1
# See if the moving wall hit a boundary and needs to reverse direction.
for wall in self.scene.get_sprite_list(LAYER_NAME_MOVING_PLATFORMS):
if (
wall.boundary_right
and wall.right > wall.boundary_right
and wall.change_x > 0
):
wall.change_x *= -1
if (
wall.boundary_left
and wall.left < wall.boundary_left
and wall.change_x < 0
):
wall.change_x *= -1
if wall.boundary_top and wall.top > wall.boundary_top and wall.change_y > 0:
wall.change_y *= -1
if (
wall.boundary_bottom
and wall.bottom < wall.boundary_bottom
and wall.change_y < 0
):
wall.change_y *= -1
# See if we hit any coins
coin_hit_list = arcade.check_for_collision_with_list(
self.player_sprite, self.scene.get_sprite_list(LAYER_NAME_COINS)
)
# Loop through each coin we hit (if any) and remove it
for coin in coin_hit_list:
# Figure out how many points this coin is worth
if "Points" not in coin.properties:
print("Warning, collected a coin without a Points property.")
else:
points = int(coin.properties["Points"])
self.score += points
# Remove the coin
coin.remove_from_sprite_lists()
arcade.play_sound(self.collect_coin_sound)
# Position the camera
self.center_camera_to_player()
def main():
"""Main function"""
window = MyGame()
window.setup()
arcade.run()
if __name__ == "__main__":
main()
| StarcoderdataPython |
9655107 | <filename>noticias_BBB22.py
from requests import get
from bs4 import BeautifulSoup
site = get("https://www.purepeople.com.br/famosos/bbb-22_p554532/noticias/1")
soup = BeautifulSoup(site.content, "html.parser")
noticias = soup.find_all("div", {"class": "c-article-flux__content u-clearfix"})
for noticia in noticias:
titulo = noticia.find("a", {"class": "c-article-flux__title u-hover-color u-global-link"}).get_text()
subtitulo = noticia.find("div", {"class": "c-article-flux__chapo u-visible@tablet"}).get_text()
link = 'https://www.purepeople.com.br' + noticia.find("a", {"class": "c-article-flux__title u-hover-color u-global-link"})["href"]
data = noticia.find("span", {"class": "c-article-flux__date-day"}).get_text()
horario = noticia.find("span", {"class": "c-article-flux__date-hour"}).get_text()
print("-" * 150)
print(f"titulo: {titulo}")
print(f"subtitulo: {subtitulo}")
print(f"link: {link}")
print(f"Data: {data}", end = " ")
print(f"ร s {horario}") | StarcoderdataPython |
11327721 | from baseparser import BaseParser
from bs4 import BeautifulSoup
import re
import datetime
import dateutil.parser
DATE_FORMAT = '%A, %B %e %Y, %l:%M %p'
class WashPoParser(BaseParser):
SUFFIX = '?print=true'
domains = ['www.washingtonpost.com']
feeder_pat = '^https?://www.washingtonpost.com/.*_story.html|https?://www.washingtonpost.com/.*/wp/.*/'
feeder_pages = ['http://www.washingtonpost.com/']
def _printableurl(self):
return re.sub('_story.html.*', '_print.html', self.url)
def _blog_parse(self, soup):
elt = soup.find('h1', itemprop="headline")
if elt is None:
return False
self.title = elt.getText().strip()
elt = soup.find('span', itemprop='author')
if elt is None:
self.byline = ''
else:
self.byline = elt.getText().strip()
elt = soup.find('span', itemprop="datePublished")
if elt is None:
self.date = ''
else:
datestr = elt['content']
if datestr[-2:] == u'00' and datestr[-3] != u':':
datestr = datestr[:-2] + u':00' # fix timezone formatting
date = dateutil.parser.parse(datestr)
self.date = date.strftime(DATE_FORMAT)
div = soup.find('article', itemprop='articleBody')
if div is None:
return False
self.body = '\n'+'\n\n'.join([x.getText().strip() for x in div.findAll('p')])
return True
def _parse(self, html):
soup = BeautifulSoup(html)
self.meta = soup.findAll('meta')
elt = soup.find('h1', property="dc.title")
if elt is None:
if not self._blog_parse(soup):
self.real_article = False
return
self.title = elt.getText().strip()
elt = soup.find('h3', property="dc.creator")
if elt is None:
self.byline = ''
else:
self.byline = elt.getText().strip()
elt = soup.find('span', datetitle="published")
if elt is None:
self.date = ''
else:
date = datetime.datetime.fromtimestamp(float(elt['epochtime'])/1000)
self.date = date.strftime(DATE_FORMAT)
div = soup.find('div', id='content')
if div is None:
self.real_article = False
return
self.body = '\n'+'\n\n'.join([x.getText().strip() for x in div.findAll('p')])
| StarcoderdataPython |
1604100 | <reponame>papsebestyen/aswan<filename>aswan/tests/unit/test_config_cls.py
from dataclasses import asdict
from aswan import (
AswanConfig,
ProdConfig,
project_from_dir,
project_from_prod_conf,
project_from_prod_info,
)
from aswan.constants import Envs
def test_from_dir(tmp_path):
conf = AswanConfig.default_from_dir(tmp_path, remote_root="/remote")
project = project_from_dir(tmp_path, remote_root="/remote")
assert conf == project.config
saved_to = tmp_path / "save_loc"
saved_to.mkdir()
conf.save(saved_to)
assert conf.test == AswanConfig.load(saved_to).test
assert conf.exp == AswanConfig.load(saved_to).exp
assert conf.prod == ProdConfig(**asdict(AswanConfig.load(saved_to).prod))
assert conf.remote_root == "/remote"
assert conf.remote_root == AswanConfig.load(saved_to).remote_root
def test_partial(tmp_path):
prod_t2 = tmp_path / "other_t2"
conf = AswanConfig.default_from_dir(tmp_path, prod_t2_root=prod_t2)
project = project_from_prod_info(dirpath=tmp_path, prod_t2_root=prod_t2)
assert conf == project.config
assert conf.prod.t2_root == str(prod_t2)
def test_prodconf(tmp_path):
prod_conf = ProdConfig.from_dir(tmp_path / "other_prod")
conf = AswanConfig.default_from_dir(tmp_path)
conf.prod = prod_conf
project = project_from_prod_conf(dirpath=tmp_path, prodenvconf=prod_conf)
assert conf == project.config
assert project.config.env_dict()[Envs.PROD] == prod_conf
| StarcoderdataPython |
6595180 | <gh_stars>0
import unittest
from selenium import webdriver
from library.selenium_actions import SeleniumActions
from library.tools import Tools
from library.selenium_driver import SeleniumDriver
from selenium.webdriver.support.ui import Select
#####
##
# Basic Integration Test
##
#####
def test_selenium_integration():
web_driver = SeleniumDriver.fetch_chrome_webdriver()
url = 'http://www.google.com'
web_driver.get(url)
actual_url = web_driver.current_url
print(actual_url)
SeleniumDriver.close_web_driver(web_driver)
def test_select():
web_driver = SeleniumDriver.fetch_chrome_webdriver()
url ="http://the-internet.herokuapp.com/dropdown"
web_driver.get(url)
element_path = "//select[@id='dropdown']"
web_element = SeleniumActions.find_by_xpath(web_driver, element_path)
Tools.sleep(2)
select_1 = Select(web_element)
print(len(select_1.options))
select_1.select_by_visible_text('Option 1')
Tools.sleep(3)
print("**")
select_2 = Select(web_element)
select_2.select_by_index(2)
Tools.sleep(3)
print("--")
select_3 = Select(web_element)
select_3.select_by_value("1")
text = select_3.first_selected_option.text
print(text)
Tools.sleep(5)
web_driver.close()
#####
##
# Run
##
#####
if __name__ == "__main__":
#test_selenium_integration()
test_select()
| StarcoderdataPython |
5062672 | <filename>deepspeed/runtime/compression/cupy.py
'''
Copyright 2020 The Microsoft DeepSpeed Team
'''
import cupy
from torch.utils.dlpack import to_dlpack
from torch.utils.dlpack import from_dlpack
class CupyBackend(object):
def __init__(self):
pass
def torch2cupy(self, tensor):
return cupy.fromDlpack(to_dlpack(tensor))
def cupy2torch(self, cupy_tensor):
return from_dlpack(cupy_tensor.toDlpack())
def compress_by_chunk(self, cupy_bool_tensor, num_chunks):
packed_sign = cupy.packbits(cupy_bool_tensor)
sign_list_packed = cupy.split(packed_sign, num_chunks)
cupy.cuda.get_current_stream().synchronize()
return sign_list_packed
| StarcoderdataPython |
5091425 | from django.conf.urls import url
from films import views
urlpatterns = [
# ๅ็ฑป-็ตๅฝฑ
url(r'^list/(?P<category_id>\d+)/films/$', views.CategoryView.as_view()),
# ๅฅฝ่ฏๆฆ
url(r'^grade/$', views.GradeView.as_view()),
# ็ญๆญๅงๅบ
url(r'^hot/$', views.HotView.as_view()),
# ็ปๅ
ธๅงๅบ
url(r'^classics/$', views.ClassicsView.as_view()),
# ๅ็ฑป-็ฑปๅซ
url(r'^category/$', views.Category.as_view()),
] | StarcoderdataPython |
12807347 | <filename>mdp_playground/envs/rl_toy_env.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import warnings
import logging
import copy
from datetime import datetime
import numpy as np
import scipy
from scipy import stats
from scipy.spatial import distance
import gym
from mdp_playground.spaces import (
BoxExtended,
DiscreteExtended,
TupleExtended,
ImageMultiDiscrete,
ImageContinuous,
GridActionSpace,
)
class RLToyEnv(gym.Env):
"""
The base toy environment in MDP Playground. It is parameterised by a config dict and can be instantiated to be an MDP with any of the possible dimensions from the accompanying research paper. The class extends OpenAI Gym's environment gym.Env.
The accompanying paper is available at: https://arxiv.org/abs/1909.07750.
Instead of implementing a new class for every type of MDP, the intent is to capture as many common dimensions across different types of environments as possible and to be able to control the difficulty of an environment by allowing fine-grained control over each of these dimensions. The focus is to be as flexible as possible.
The configuration for the environment is passed as a dict at initialisation and contains all the information needed to determine the dynamics of the MDP that the instantiated environment will emulate. We recommend looking at the examples in example.py to begin using the environment since the dimensions and config options are mostly self-explanatory. If you want to specify custom MDPs, please see the use_custom_mdp config option below. For more details, we list here the dimensions and config options (their names here correspond to the keys to be passed in the config dict):
state_space_type : str
Specifies what the environment type is. Options are "continuous", "discrete" and "grid". The "grid" environment is, basically, a discretised version of the continuous environment.
delay : int >= 0
Delays each reward by this number of timesteps. Default value: 0.
sequence_length : int >= 1
Intrinsic sequence length of the reward function of an environment. For discrete environments, randomly selected sequences of this length are set to be rewardable at initialisation if use_custom_mdp = false and generate_random_mdp = true. Default value: 1.
transition_noise : float in range [0, 1] or Python function(rng)
For discrete environments, it is a float that specifies the fraction of times the environment transitions to a noisy next state at each timestep, independently and uniformly at random.
For continuous environments, if it's a float, it's used as the standard deviation of an i.i.d. normal distribution of noise. If it is a Python function with one argument, it is added to next state. The argument is the Random Number Generator (RNG) of the environment which is an np.random.RandomState object. This RNG should be used to perform calls to the desired random function to be used as noise to ensure reproducibility. Default value: 0.
reward_noise : float or Python function(rng)
If it's a float, it's used as the standard deviation of an i.i.d. normal distribution of noise.
If it's a Python function with one argument, it is added to the reward given at every time step. The argument is the Random Number Generator (RNG) of the environment which is an np.random.RandomState object. This RNG should be used to perform calls to the desired random function to be used as noise to ensure reproducibility. Default value: 0.
reward_density : float in range [0, 1]
The fraction of possible sequences of a given length that will be selected to be rewardable at initialisation time. Default value: 0.25.
reward_scale : float
Multiplies the rewards by this value at every time step. Default value: 1.
reward_shift : float
This value is added to the reward at every time step. Default value: 0.
diameter : int > 0
For discrete environments, if diameter = d, the set of states is set to be a d-partite graph (and NOT a complete d-partite graph), where, if we order the d sets as 1, 2, .., d, states from set 1 will have actions leading to states in set 2 and so on, with the final set d having actions leading to states in set 1. Number of actions for each state will, thus, be = (number of states) / (d). Default value: 1 for discrete environments. For continuous environments, this dimension is set automatically based on the state_space_max value.
terminal_state_density : float in range [0, 1]
For discrete environments, the fraction of states that are terminal; the terminal states are fixed to the "last" states when we consider them to be ordered by their numerical value. This is w.l.o.g. because discrete states are categorical. For continuous environments, please see terminal_states and term_state_edge for how to control terminal states. Default value: 0.25.
term_state_reward : float
Adds this to the reward if a terminal state was reached at the current time step. Default value: 0.
image_representations : boolean
Boolean to associate an image as the external observation with every discrete categorical state.
For discrete envs, this is handled by an mdp_playground.spaces.ImageMultiDiscrete object. It associates the image of an n + 3 sided polygon for a categorical state n. More details can be found in the documentation for the ImageMultiDiscrete class.
For continuous and grid envs, this is handled by an mdp_playground.spaces.ImageContinuous object. More details can be found in the documentation for the ImageContinuous class.
irrelevant_features : boolean
If True, an additional irrelevant sub-space (irrelevant to achieving rewards) is present as part of the observation space. This sub-space has its own transition dynamics independent of the dynamics of the relevant sub-space.
For discrete environments, additionally, state_space_size must be specified as a list.
For continuous environments, the option relevant_indices must be specified. This option specifies the dimensions relevant to achieving rewards.
For grid environments, nothing additional needs to be done as relevant grid shape is also used as the irrelevant grid shape.
use_custom_mdp : boolean
If true, users specify their own transition and reward functions using the config options transition_function and reward_function (see below). Optionally, they can also use init_state_dist and terminal_states for discrete spaces (see below).
transition_function : Python function(state, action) or a 2-D numpy.ndarray
A Python function emulating P(s, a). For discrete envs it's also possible to specify an |S|x|A| transition matrix.
reward_function : Python function(state_sequence, action_sequence) or a 2-D numpy.ndarray
A Python function emulating R(state_sequence, action_sequence). The state_sequence is recorded by the environment and transition_function is called before reward_function, so the "current" state (when step() was called) and next state are the last 2 states in the sequence.
For discrete environments, it's also possible to specify an |S|x|A| transition matrix where reward is assumed to be a function over the "current" state and action.
If use_custom_mdp = false and the environment is continuous, this is a string that chooses one of the following predefined reward functions: move_along_a_line or move_to_a_point.
If use_custom_mdp = false and the environment is grid, this is a string that chooses one of the following predefined reward functions: move_to_a_point. Support for sequences is planned.
Also see make_denser documentation.
Specific to discrete environments:
state_space_size : int > 0 or list of length 2
A number specifying size of the state space for normal discrete environments and a list of len = 2 when irrelevant_features is True (The list contains sizes of relevant and irrelevant sub-spaces where the 1st sub-space is assumed relevant and the 2nd sub-space is assumed irrelevant).
NOTE: When automatically generating MDPs, do not specify this value as its value depends on the action_space_size and the diameter as state_space_size = action_space_size * diameter.
action_space_size : int > 0
Similar description as state_space_size. When automatically generating MDPs, however, its value determines the state_space_size.
reward_dist : list with 2 floats or a Python function(env_rng, reward_sequence_dict)
If it's a list with 2 floats, then these 2 values are interpreted as a closed interval and taken as the end points of a categorical distribution which points equally spaced along the interval.
If it's a Python function, it samples rewards for the rewardable_sequences dict of the environment. The rewardable_sequences dict of the environment holds the rewardable_sequences with the key as a tuple holding the sequence and value as the reward handed out. The 1st argument for the reward_dist function is the Random Number Generator (RNG) of the environment which is an np.random.RandomState object. This RNG should be used to perform calls to the desired random function to be used to sample rewards to ensure reproducibility. The 2nd argument is the rewardable_sequences dict of the environment. This is available because one may need access to the already created reward sequences in the reward_dist function.
init_state_dist : 1-D numpy.ndarray
Specifies an array of initialisation probabilities for the discrete state space.
terminal_states : Python function(state) or 1-D numpy.ndarray
A Python function with the state as argument that returns whether the state is terminal. If this is specified as an array, the array lists the discrete states that are terminal.
Specific to image_representations for discrete envs:
image_transforms : str
String containing the transforms that must be applied to the image representations. As long as one of the following words is present in the string - shift, scale, rotate, flip - the corresponding transform will be applied at random to the polygon in the image representation whenever an observation is generated. Care is either explicitly taken that the polygon remains inside the image region or a warning is generated.
sh_quant : int
An int to quantise the shift transforms.
scale_range : (float, float)
A tuple of real numbers to specify (min_scaling, max_scaling).
ro_quant : int
An int to quantise the rotation transforms.
Specific to continuous environments:
state_space_dim : int
A number specifying state space dimensionality. A Gym Box space of this dimensionality will be instantiated.
action_space_dim : int
Same description as state_space_dim. This is currently set equal to the state_space_dim and doesn't need to specified.
relevant_indices : list
A list that provides the dimensions relevant to achieving rewards for continuous environments. The dynamics for these dimensions are independent of the dynamics for the remaining (irrelevant) dimensions.
state_space_max : float
Max absolute value that a dimension of the space can take. A Gym Box will be instantiated with range [-state_space_max, state_space_max]. Sampling will be done as for Gym Box spaces. Default value: Infinity.
action_space_max : float
Similar description as for state_space_max. Default value: Infinity.
terminal_states : numpy.ndarray
The centres of hypercube sub-spaces which are terminal.
term_state_edge : float
The edge of the hypercube sub-spaces which are terminal.
transition_dynamics_order : int
An order of n implies that the n-th state derivative is set equal to the action/inertia. Default value: 1.
inertia : float or numpy.ndarray
inertia of the rigid body or point object that is being simulated. If numpy.ndarray, it specifies independent inertiae for the dimensions and the shape should be (state_space_dim,). Default value: 1.
time_unit : float
time duration over which the action is applied to the system. Default value: 1.
target_point : numpy.ndarray
The target point in case move_to_a_point is the reward_function. If make_denser is false, target_radius determines distance from the target point at which the sparse reward is handed out. Default value: array of 0s.
action_loss_weight : float
A coefficient to multiply the norm of the action and subtract it from the reward to penalise the action magnitude. Default value: 0.
Specific to grid environments:
grid_shape : tuple
Shape of the grid environment. If irrelevant_features is True, this is replicated to add a grid which is irrelevant to the reward.
target_point : numpy.ndarray
The target point in case move_to_a_point is the reward_function. If make_denser is false, reward is only handed out when the target point is reached.
terminal_states : Python function(state) or 1-D numpy.ndarray
Same description as for terminal_states under discrete envs
Other important config:
Specific to discrete environments:
repeats_in_sequences : boolean
If true, allows rewardable sequences to have repeating states in them.
maximally_connected : boolean
If true, sets the transition function such that every state in independent set i can transition to every state in independent set i + 1. If false, then sets the transition function such that a state in independent set i may have any state in independent set i + 1 as the next state for a transition.
reward_every_n_steps : boolean
Hand out rewards only at multiples of sequence_length steps. This makes the probability that an agent is executing overlapping rewarding sequences 0. This makes it simpler to evaluate HRL algorithms and whether they can "discretise" time correctly. Noise is added at every step, regardless of this setting. Currently, not implemented for either the make_denser = true case or for continuous and grid environments.
generate_random_mdp : boolean
If true, automatically generate MDPs when use_custom_mdp = false. Currently, this option doesn't need to be specified because random MDPs are always generated when use_custom_mdp = false.
Specific to continuous environments:
none as of now
For all, continuous, discrete and grid environments:
make_denser : boolean
If true, makes the reward denser in environments.
For discrete environments, hands out a partial reward for completing partial sequences.
For continuous environments, for reward function move_to_a_point, the base reward handed out is equal to the distance moved towards the target point in the current timestep.
For grid envs, the base reward handed out is equal to the Manhattan distance moved towards the target point in the current timestep.
seed : int or dict
Recommended to be passed as an int which generates seeds to be used for the various components of the environment. It is, however, possible to control individual seeds by passing it as a dict. Please see the default initialisation for seeds below to see how to do that.
log_filename : str
The name of the log file to which logs are written.
log_level : logging.LOG_LEVEL option
Python log level for logging
Below, we list the important attributes and methods for this class.
Attributes
----------
config : dict
the config contains all the details required to generate an environment
seed : int or dict
recommended to set to an int, which would set seeds for the env, relevant and irrelevant and externally visible observation and action spaces automatically. If fine-grained control over the seeds is necessary, a dict, with key values as in the source code further below, can be passed.
observation_space : Gym.Space
The externally visible observation space for the enviroment.
action_space : Gym.Space
The externally visible action space for the enviroment.
rewardable_sequences : dict
holds the rewardable sequences. The keys are tuples of rewardable sequences and values are the rewards handed out. When make_denser is True for discrete environments, this dict also holds the rewardable partial sequences.
Methods
-------
init_terminal_states()
Initialises terminal states, T
init_init_state_dist()
Initialises initial state distribution, rho_0
init_transition_function()
Initialises transition function, P
init_reward_function()
Initialises reward function, R
transition_function(state, action)
the transition function of the MDP, P
P(state, action)
defined as a lambda function in the call to init_transition_function() and is equivalent to calling transition_function()
reward_function(state, action)
the reward function of the MDP, R
R(state, action)
defined as a lambda function in the call to init_reward_function() and is equivalent to calling reward_function()
get_augmented_state()
gets underlying Markovian state of the MDP
reset()
Resets environment state
seed()
Sets the seed for the numpy RNG used by the environment (state and action spaces have their own seeds as well)
step(action, imaginary_rollout=False)
Performs 1 transition of the MDP
"""
def __init__(self, **config):
"""Initialises the MDP to be emulated using the settings provided in config.
Parameters
----------
config : dict
the member variable config is initialised to this value after inserting defaults
"""
print("Passed config:", config, "\n")
if config == {}:
config = {
"state_space_size": 8,
"action_space_size": 8,
"state_space_type": "discrete",
"action_space_type": "discrete",
"terminal_state_density": 0.25,
"maximally_connected": True,
}
# Print initial "banner"
screen_output_width = 132 # #hardcoded #TODO get from system
repeat_equal_sign = (screen_output_width - 20) // 2
set_ansi_escape = "\033[32;1m"
reset_ansi_escape = "\033[0m"
print(
set_ansi_escape
+ "=" * repeat_equal_sign
+ "Initialising Toy MDP"
+ "=" * repeat_equal_sign
+ reset_ansi_escape
)
print("Current working directory:", os.getcwd())
# Set other default settings for config to use if config is passed without any values for them
if "log_level" not in config:
self.log_level = logging.CRITICAL # #logging.NOTSET
else:
self.log_level = config["log_level"]
# print('self.log_level', self.log_level)
logging.getLogger(__name__).setLevel(self.log_level)
# fmtr = logging.Formatter(fmt='%(message)s - %(levelname)s - %(name)s - %(asctime)s', datefmt='%m.%d.%Y %I:%M:%S %p', style='%')
# sh = logging.StreamHandler()
# sh.setFormatter(fmt=fmtr)
self.logger = logging.getLogger(__name__)
# self.logger.addHandler(sh)
if "log_filename" in config:
# self.log_filename = __name__ + '_' +\
# datetime.today().strftime('%m.%d.%Y_%I:%M:%S_%f') + '.log' #
# #TODO Make a directoy 'log/' and store there.
# else:
# checks that handlers is [], before adding a file logger, otherwise we
# would have multiple loggers to file if multiple RLToyEnvs were
# instantiated by the same process.
if (not self.logger.handlers):
self.log_filename = config["log_filename"]
# logging.basicConfig(filename='/tmp/' + self.log_filename, filemode='a', format='%(message)s - %(levelname)s - %(name)s - %(asctime)s', datefmt='%m.%d.%Y %I:%M:%S %p', level=self.log_level)
log_file_handler = logging.FileHandler(self.log_filename)
self.logger.addHandler(log_file_handler)
# log_filename = "logs/output.log"
# os.makedirs(os.path.dirname(log_filename), exist_ok=True)
# #seed
if (
"seed" not in config
): # #####IMP It's very important to not modify the config dict since it may be shared across multiple instances of the Env in the same process and could lead to very hard to catch bugs (I faced problems with Ray's A3C)
self.seed_int = None
need_to_gen_seeds = True
elif isinstance(config["seed"], dict):
self.seed_dict = config["seed"]
need_to_gen_seeds = False
elif (
isinstance(config["seed"], int)
): # should be an int then. Gym doesn't accept np.int64, etc..
self.seed_int = config["seed"]
need_to_gen_seeds = True
else:
raise TypeError("Unsupported data type for seed: ", type(config["seed"]))
# #seed #TODO move to seed() so that obs., act. space, etc. have their
# seeds reset too when env seed is reset?
if need_to_gen_seeds:
self.seed_dict = {}
self.seed_dict["env"] = self.seed_int
self.seed(self.seed_dict["env"])
# ##IMP All these diff. seeds may not be needed (you could have one
# seed for the joint relevant + irrelevant parts). But they allow for easy
# separation of the relevant and irrelevant dimensions!! _And_ the seed
# remaining the same for the underlying discrete environment makes it
# easier to write tests!
self.seed_dict["relevant_state_space"] = self.np_random.randint(
sys.maxsize
) # #random
self.seed_dict["relevant_action_space"] = self.np_random.randint(
sys.maxsize
) # #random
self.seed_dict["irrelevant_state_space"] = self.np_random.randint(
sys.maxsize
) # #random
self.seed_dict["irrelevant_action_space"] = self.np_random.randint(
sys.maxsize
) # #random
# #IMP This is currently used to sample only for continuous spaces and not used for discrete spaces by the Environment. User might want to sample from it for multi-discrete environments. #random
self.seed_dict["state_space"] = self.np_random.randint(sys.maxsize)
# #IMP This IS currently used to sample random actions by the RL agent for both discrete and continuous environments (but not used anywhere by the Environment). #random
self.seed_dict["action_space"] = self.np_random.randint(sys.maxsize)
self.seed_dict["image_representations"] = self.np_random.randint(
sys.maxsize
) # #random
# print("Mersenne0, dummy_eval:", self.np_random.get_state()[2], "dummy_eval" in config)
else: # if seed dict was passed
self.seed(self.seed_dict["env"])
# print("Mersenne0 (dict), dummy_eval:", self.np_random.get_state()[2], "dummy_eval" in config)
self.logger.warning("Seeds set to:" + str(self.seed_dict))
# print(f'Seeds set to {self.seed_dict=}') # Available from Python 3.8
config["state_space_type"] = config["state_space_type"].lower()
# #defaults ###TODO throw warning in case unknown config option is passed
if "use_custom_mdp" not in config:
self.use_custom_mdp = False
else:
self.use_custom_mdp = config["use_custom_mdp"]
if self.use_custom_mdp:
assert "transition_function" in config
assert "reward_function" in config
# if config["state_space_type"] == "discrete":
# assert "init_state_dist" in config
if "terminal_state_density" not in config:
self.terminal_state_density = 0.25
else:
self.terminal_state_density = config["terminal_state_density"]
if not self.use_custom_mdp:
if "generate_random_mdp" not in config:
self.generate_random_mdp = True
else:
self.generate_random_mdp = config["generate_random_mdp"]
if "term_state_reward" not in config:
self.term_state_reward = 0.0
else:
self.term_state_reward = config["term_state_reward"]
if "delay" not in config:
self.delay = 0
else:
self.delay = config["delay"]
self.reward_buffer = [0.0] * (self.delay)
if "sequence_length" not in config:
self.sequence_length = 1
else:
self.sequence_length = config["sequence_length"]
if "reward_density" not in config:
self.reward_density = 0.25
else:
self.reward_density = config["reward_density"]
if "make_denser" not in config:
self.make_denser = False
else:
self.make_denser = config["make_denser"]
if "maximally_connected" not in config:
self.maximally_connected = True
else:
self.maximally_connected = config["maximally_connected"]
if "reward_noise" in config:
if callable(config["reward_noise"]):
self.reward_noise = config["reward_noise"]
else:
reward_noise_std = config["reward_noise"]
self.reward_noise = lambda a: a.normal(0, reward_noise_std)
else:
self.reward_noise = None
if "transition_noise" in config:
if config["state_space_type"] == "continuous":
if callable(config["transition_noise"]):
self.transition_noise = config["transition_noise"]
else:
p_noise_std = config["transition_noise"]
self.transition_noise = lambda a: a.normal(0, p_noise_std)
else: # discrete case
self.transition_noise = config["transition_noise"]
else: # no transition noise
self.transition_noise = None
if "reward_scale" not in config:
self.reward_scale = 1.0
else:
self.reward_scale = config["reward_scale"]
if "reward_shift" not in config:
self.reward_shift = 0.0
else:
self.reward_shift = config["reward_shift"]
if "irrelevant_features" not in config:
self.irrelevant_features = False
else:
self.irrelevant_features = config["irrelevant_features"]
if "image_representations" not in config:
self.image_representations = False
else:
self.image_representations = config["image_representations"]
if "image_transforms" in config:
assert config["state_space_type"] == "discrete", (
"Image " "transforms are only applicable to discrete envs."
)
self.image_transforms = config["image_transforms"]
else:
self.image_transforms = "none"
if "image_width" in config:
self.image_width = config["image_width"]
else:
self.image_width = 100
if "image_height" in config:
self.image_height = config["image_height"]
else:
self.image_height = 100
# The following transforms are only applicable in discrete envs:
if config["state_space_type"] == "discrete":
if "image_sh_quant" not in config:
if "shift" in self.image_transforms:
warnings.warn(
"Setting image shift quantisation to the \
default of 1, since no config value was provided for it."
)
self.image_sh_quant = 1
else:
self.image_sh_quant = None
else:
self.image_sh_quant = config["image_sh_quant"]
if "image_ro_quant" not in config:
if "rotate" in self.image_transforms:
warnings.warn(
"Setting image rotate quantisation to the \
default of 1, since no config value was provided for it."
)
self.image_ro_quant = 1
else:
self.image_ro_quant = None
else:
self.image_ro_quant = config["image_ro_quant"]
if "image_scale_range" not in config:
if "scale" in self.image_transforms:
warnings.warn(
"Setting image scale range to the default \
of (0.5, 1.5), since no config value was provided for it."
)
self.image_scale_range = (0.5, 1.5)
else:
self.image_scale_range = None
else:
self.image_scale_range = config["image_scale_range"]
if config["state_space_type"] == "discrete":
if "reward_dist" not in config:
self.reward_dist = None
else:
self.reward_dist = config["reward_dist"]
if "diameter" not in config:
self.diameter = 1
else:
self.diameter = config["diameter"]
elif config["state_space_type"] == "continuous":
# if not self.use_custom_mdp:
self.state_space_dim = config["state_space_dim"]
if "transition_dynamics_order" not in config:
self.dynamics_order = 1
else:
self.dynamics_order = config["transition_dynamics_order"]
if "inertia" not in config:
self.inertia = 1.0
else:
self.inertia = config["inertia"]
if "time_unit" not in config:
self.time_unit = 1.0
else:
self.time_unit = config["time_unit"]
if "target_radius" not in config:
self.target_radius = 0.05
else:
self.target_radius = config["target_radius"]
elif config["state_space_type"] == "grid":
assert "grid_shape" in config
self.grid_shape = config["grid_shape"]
else:
raise ValueError("Unknown state_space_type")
if "action_loss_weight" not in config:
self.action_loss_weight = 0.0
else:
self.action_loss_weight = config["action_loss_weight"]
if "reward_every_n_steps" not in config:
self.reward_every_n_steps = False
else:
self.reward_every_n_steps = config["reward_every_n_steps"]
if "repeats_in_sequences" not in config:
self.repeats_in_sequences = False
else:
self.repeats_in_sequences = config["repeats_in_sequences"]
self.dtype = np.float32 if "dtype" not in config else config["dtype"]
if config["state_space_type"] == "discrete":
if self.irrelevant_features:
assert (
len(config["action_space_size"]) == 2
), "Currently, 1st sub-state (and action) space is assumed to be relevant to rewards and 2nd one is irrelevant. Please provide a list with sizes for the 2."
self.action_space_size = config["action_space_size"]
else: # uni-discrete space
assert isinstance(
config["action_space_size"], int
), "Did you mean to turn irrelevant_features? If so, please set irrelevant_features = True in config. If not, please provide an int for action_space_size."
self.action_space_size = [
config["action_space_size"]
] # Make a list to be able to iterate over observation spaces in for loops later
# assert type(config["state_space_size"]) == int, 'config["state_space_size"] has to be provided as an int when we have a simple Discrete environment. Was:' + str(type(config["state_space_size"]))
if self.use_custom_mdp:
self.state_space_size = [config["state_space_size"]]
else:
self.state_space_size = np.array(self.action_space_size) * np.array(
self.diameter
)
# assert (np.array(self.state_space_size) % np.array(self.diameter) == 0).all(), "state_space_size should be a multiple of the diameter to allow for the generation of regularly connected MDPs."
elif config["state_space_type"] == "continuous":
self.action_space_dim = self.state_space_dim
if self.irrelevant_features:
assert (
"relevant_indices" in config
), "Please provide dimensions\
of state space relevant to rewards."
if "relevant_indices" not in config:
config["relevant_indices"] = range(self.state_space_dim)
# config["irrelevant_indices"] = list(set(range(len(config["state_space_dim"]))) - set(config["relevant_indices"]))
elif config["state_space_type"] == "grid":
# Repeat the grid for the irrelevant part as well
if self.irrelevant_features:
self.grid_shape = self.grid_shape * 2
if ("init_state_dist" in config) and ("relevant_init_state_dist" not in config):
config["relevant_init_state_dist"] = config["init_state_dist"]
assert (
self.sequence_length > 0
), 'config["sequence_length"] <= 0. Set to: ' + str(
self.sequence_length
) # also should be int
if (
"maximally_connected" in config and config["maximally_connected"]
): # ###TODO remove
pass
# assert config["state_space_size"] == config["action_space_size"], "config[\"state_space_size\"] != config[\"action_space_size\"]. For maximally_connected transition graphs, they should be equal. Please provide valid values. Vals: " + str(config["state_space_size"]) + " " + str(config["action_space_size"]) + ". In future, \"maximally_connected\" graphs are planned to be supported!"
# assert config["irrelevant_state_space_size"] ==
# config["irrelevant_action_space_size"],
# "config[\"irrelevant_state_space_size\"] !=
# config[\"irrelevant_action_space_size\"]. For maximally_connected
# transition graphs, they should be equal. Please provide valid values!
# Vals: " + str(config["irrelevant_state_space_size"]) + " " +
# str(config["irrelevant_action_space_size"]) + ". In future,
# \"maximally_connected\" graphs are planned to be supported!" #TODO
# Currently, irrelevant dimensions have a P similar to that of relevant
# dimensions. Should this be decoupled?
if config["state_space_type"] == "continuous":
# assert config["state_space_dim"] == config["action_space_dim"], "For continuous spaces, state_space_dim has to be = action_space_dim. state_space_dim was: " + str(config["state_space_dim"]) + " action_space_dim was: " + str(config["action_space_dim"])
if config["reward_function"] == "move_to_a_point":
assert self.sequence_length == 1
if "target_point" in config:
self.target_point = np.array(config["target_point"], dtype=self.dtype)
assert self.target_point.shape == (
len(config["relevant_indices"]),
), "target_point should have dimensionality = relevant_state_space dimensionality"
else:
# Sets default
self.target_point = np.zeros(shape=(config["state_space_dim"],))
elif config["state_space_type"] == "grid":
if config["reward_function"] == "move_to_a_point":
self.target_point = config["target_point"]
self.config = config
self.augmented_state_length = self.sequence_length + self.delay + 1
self.total_episodes = 0
# This init_...() is done before the others below because it's needed
# for image_representations for continuous
self.init_terminal_states()
if config["state_space_type"] == "discrete":
self.observation_spaces = [
DiscreteExtended(
self.state_space_size[0],
seed=self.seed_dict["relevant_state_space"],
)
] # #seed #hardcoded, many time below as well
self.action_spaces = [
DiscreteExtended(
self.action_space_size[0],
seed=self.seed_dict["relevant_action_space"],
)
] # #seed #hardcoded
if self.irrelevant_features:
self.observation_spaces.append(
DiscreteExtended(
self.state_space_size[1],
seed=self.seed_dict["irrelevant_state_space"],
)
) # #seed #hardcoded
self.action_spaces.append(
DiscreteExtended(
self.action_space_size[1],
seed=self.seed_dict["irrelevant_action_space"],
)
) # #seed #hardcoded
# Commented code below may used to generalise relevant sub-spaces to more than the current max of 2.
# self.observation_spaces = [None] * len(config["all_indices"])
# for i in config["relevant_indices"]:
# self.observation_spaces[i] =
# self.action_spaces[i] = DiscreteExtended(self.action_space_size[i], seed=self.seed_dict["relevant_action_space"]) #seed
# for i in config["irrelevant_indices"]:
# self.observation_spaces[i] = DiscreteExtended(self.state_space_size[i], seed=self.seed_dict["irrelevant_state_space"])) #seed # hack
# self.action_spaces[i] = DiscreteExtended(self.action_space_size[i],
# seed=self.seed_dict["irrelevant_action_space"]) #seed
if self.image_representations:
# underlying_obs_space = MultiDiscreteExtended(self.state_space_size, seed=self.seed_dict["state_space"]) #seed
self.observation_space = ImageMultiDiscrete(
self.state_space_size,
width=self.image_width,
height=self.image_height,
transforms=self.image_transforms,
sh_quant=self.image_sh_quant,
scale_range=self.image_scale_range,
ro_quant=self.image_ro_quant,
circle_radius=20,
seed=self.seed_dict["image_representations"],
) # #seed
if self.irrelevant_features:
self.action_space = TupleExtended(
self.action_spaces, seed=self.seed_dict["action_space"]
) # #seed
else:
self.action_space = self.action_spaces[0]
else:
if self.irrelevant_features:
self.observation_space = TupleExtended(
self.observation_spaces, seed=self.seed_dict["state_space"]
) # #seed # hack #TODO
# Gym (and so Ray) apparently needs observation_space as a
# member of an env.
self.action_space = TupleExtended(
self.action_spaces, seed=self.seed_dict["action_space"]
) # #seed
else:
self.observation_space = self.observation_spaces[0]
self.action_space = self.action_spaces[0]
elif config["state_space_type"] == "continuous":
self.state_space_max = (
config["state_space_max"] if "state_space_max" in config else np.inf
) # should we
# select a random max? #test?
self.feature_space = BoxExtended(
-self.state_space_max,
self.state_space_max,
shape=(self.state_space_dim,),
seed=self.seed_dict["state_space"],
dtype=self.dtype,
) # #seed
# hack #TODO # low and high are 1st 2 and required arguments
# for instantiating BoxExtended
self.action_space_max = (
config["action_space_max"] if "action_space_max" in config else np.inf
) # #test?
# config["action_space_max"] = \
# num_to_list(config["action_space_max"]) * config["action_space_dim"]
self.action_space = BoxExtended(
-self.action_space_max,
self.action_space_max,
shape=(self.action_space_dim,),
seed=self.seed_dict["action_space"],
dtype=self.dtype,
) # #seed
# hack #TODO
if self.image_representations:
self.observation_space = ImageContinuous(
self.feature_space,
width=self.image_width,
height=self.image_height,
term_spaces=self.term_spaces,
target_point=self.target_point,
circle_radius=5,
seed=self.seed_dict["image_representations"],
) # #seed
else:
self.observation_space = self.feature_space
elif config["state_space_type"] == "grid":
underlying_space_maxes = list_to_float_np_array(self.grid_shape)
# The min for grid envs is 0, 0, 0, ...
self.feature_space = BoxExtended(
0 * underlying_space_maxes,
underlying_space_maxes,
seed=self.seed_dict["state_space"],
dtype=self.dtype,
) # #seed
lows = np.array([-1] * len(self.grid_shape))
highs = np.array([1] * len(self.grid_shape))
self.action_space = GridActionSpace(
lows,
highs,
seed=self.seed_dict["action_space"],
) # #seed
if self.image_representations:
target_pt = list_to_float_np_array(self.target_point)
self.observation_space = ImageContinuous(
self.feature_space,
width=self.image_width,
height=self.image_height,
term_spaces=self.term_spaces,
target_point=target_pt,
circle_radius=5,
grid_shape=self.grid_shape,
seed=self.seed_dict["image_representations"],
) # #seed
else:
self.observation_space = self.feature_space
# if config["action_space_type"] == "discrete":
# if not config["generate_random_mdp"]:
# # self.logger.error("User defined P and R are currently not supported.") ##TODO
# # sys.exit(1)
# self.P = config["transition_function"] if callable(config["transition_function"]) else lambda s, a: config["transition_function"][s, a] ##IMP callable may not be optimal always since it was deprecated in Python 3.0 and 3.1
# self.R = config["reward_function"] if callable(config["reward_function"]) else lambda s, a: config["reward_function"][s, a]
# else:
# ##TODO Support imaginary rollouts for continuous envs. and user-defined P and R? Will do it depending on demand for it. In fact, for imagined rollouts, let our code handle storing augmented_state, curr_state, etc. in separate variables, so that it's easy for user to perform imagined rollouts instead of having to maintain their own state and action sequences.
# #TODO Generate state and action space sizes also randomly?
# ###IMP The order in which the following inits are called is important, so don't change!!
# #init_state_dist: Initialises uniform distribution over non-terminal states for discrete distribution; After looking into Gym code, I can say that for continuous, it's uniform over non-terminal if limits are [a, b], shifted exponential if exactly one of the limits is np.inf, normal if both limits are np.inf - this sampling is independent for each dimension (and is done for the defined limits for the respective dimension).
self.init_init_state_dist()
self.init_transition_function()
# print("Mersenne1, dummy_eval:", self.np_random.get_state()[2], "dummy_eval" in self.config)
self.init_reward_function()
self.curr_obs = (
self.reset()
) # #TODO Maybe not call it here, since Gym seems to expect to _always_ call this method when using an environment; make this seedable? DO NOT do seed dependent initialization in reset() otherwise the initial state distrbution will always be at the same state at every call to reset()!! (Gym env has its own seed? Yes, it does, as does also space);
self.logger.info(
"self.augmented_state, len: "
+ str(self.augmented_state)
+ ", "
+ str(len(self.augmented_state))
)
self.logger.info(
"MDP Playground toy env instantiated with config: " + str(self.config)
)
print("MDP Playground toy env instantiated with config: " + str(self.config))
def init_terminal_states(self):
"""Initialises terminal state set to be the 'last' states for discrete environments. For continuous environments, terminal states will be in a hypercube centred around config['terminal_states'] with the edge of the hypercube of length config['term_state_edge']."""
if self.config["state_space_type"] == "discrete":
if (
self.use_custom_mdp and "terminal_states" in self.config
): # custom/user-defined terminal states
self.is_terminal_state = (
self.config["terminal_states"]
if callable(self.config["terminal_states"])
else lambda s: s in self.config["terminal_states"]
)
else:
# Define the no. of terminal states per independent set of the state space
self.num_terminal_states = int(
self.terminal_state_density * self.action_space_size[0]
) # #hardcoded ####IMP Using action_space_size
# since it contains state_space_size // diameter
# if self.num_terminal_states == 0: # Have at least 1 terminal state?
# warnings.warn("WARNING: int(terminal_state_density * relevant_state_space_size) was 0. Setting num_terminal_states to be 1!")
# self.num_terminal_states = 1
self.config["terminal_states"] = np.array(
[
j * self.action_space_size[0] - 1 - i
for j in range(1, self.diameter + 1)
for i in range(self.num_terminal_states)
]
) # terminal states
# inited to be at the "end" of the sorted states
self.logger.warning(
"Inited terminal states to self.config['terminal_states']: "
+ str(self.config["terminal_states"])
+ ". Total "
+ str(self.num_terminal_states)
)
self.is_terminal_state = lambda s: s in self.config["terminal_states"]
elif self.config["state_space_type"] == "continuous":
# print("# TODO for cont. spaces: term states")
self.term_spaces = []
if "terminal_states" in self.config: # ##TODO For continuous spaces,
# could also generate terminal spaces based on a terminal_state_density
# given by user (Currently, user specifies terminal state points
# around which hypercubes in state space are terminal. If the user
# want a specific density and not hypercubes, the user has to design
# the terminal states they specify such that they would have a given
# density in space.). But only for state spaces with limits? For state
# spaces without limits, could do it for a limited subspace of the
# infinite state space 1st and then repeat that pattern indefinitely
# along each dimension's axis. #test?
if callable(self.config["terminal_states"]):
self.is_terminal_state = self.config["terminal_states"]
else:
for i in range(
len(self.config["terminal_states"])
): # List of centres
# of terminal state regions.
assert len(self.config["terminal_states"][i]) == len(
self.config["relevant_indices"]
), (
"Specified terminal state centres should have"
" dimensionality = number of relevant_indices. That"
" was not the case for centre no.: " + str(i) + ""
)
lows = np.array(
[
self.config["terminal_states"][i][j]
- self.config["term_state_edge"] / 2
for j in range(len(self.config["relevant_indices"]))
]
)
highs = np.array(
[
self.config["terminal_states"][i][j]
+ self.config["term_state_edge"] / 2
for j in range(len(self.config["relevant_indices"]))
]
)
# print("Term state lows, highs:", lows, highs)
self.term_spaces.append(
BoxExtended(
low=lows, high=highs, seed=self.seed_, dtype=self.dtype
)
) # #seed #hack #TODO
self.logger.debug(
"self.term_spaces samples:"
+ str(self.term_spaces[0].sample())
+ str(self.term_spaces[-1].sample())
)
self.is_terminal_state = lambda s: np.any(
[
self.term_spaces[i].contains(
s[self.config["relevant_indices"]]
)
for i in range(len(self.term_spaces))
]
)
# ### TODO for cont. #test?
else: # no custom/user-defined terminal states
self.is_terminal_state = lambda s: False
elif self.config["state_space_type"] == "grid":
self.term_spaces = []
if "terminal_states" in self.config:
if callable(self.config["terminal_states"]):
self.is_terminal_state = self.config["terminal_states"]
else:
for i in range(len(self.config["terminal_states"])): # List of
# terminal states on the grid
term_state = list_to_float_np_array(
self.config["terminal_states"][i]
)
lows = term_state
highs = term_state # #hardcoded
self.term_spaces.append(
BoxExtended(
low=lows, high=highs, seed=self.seed_, dtype=np.int64
)
) # #seed #hack #TODO
def is_term(s):
cont_state = list_to_float_np_array(s)
return np.any(
[
self.term_spaces[i].contains(cont_state)
for i in range(len(self.term_spaces))
]
)
self.is_terminal_state = is_term
else: # no custom/user-defined terminal states
self.is_terminal_state = lambda s: False
def init_init_state_dist(self):
"""Initialises initial state distrbution, rho_0, to be uniform over the non-terminal states for discrete environments. For both discrete and continuous environments, the uniform sampling over non-terminal states is taken care of in reset() when setting the initial state for an episode."""
# relevant dimensions part
if self.config["state_space_type"] == "discrete":
if (
self.use_custom_mdp and "init_state_dist" in self.config
): # custom/user-defined phi_0
# self.config["relevant_init_state_dist"] = #TODO make this also a lambda function?
pass
else:
# For relevant sub-space
non_term_state_space_size = (
self.action_space_size[0] - self.num_terminal_states
) # #hardcoded
self.config["relevant_init_state_dist"] = (
[
1 / (non_term_state_space_size * self.diameter)
for i in range(non_term_state_space_size)
]
+ [0 for i in range(self.num_terminal_states)]
) * self.diameter # #TODO
# Currently only uniform distribution over non-terminal
# states; Use Dirichlet distribution to select prob. distribution to use?
# #TODO make init_state_dist the default sample() for state space?
self.config["relevant_init_state_dist"] = np.array(
self.config["relevant_init_state_dist"]
)
self.logger.warning(
"self.relevant_init_state_dist:"
+ str(self.config["relevant_init_state_dist"])
)
# #irrelevant sub-space
if self.irrelevant_features:
non_term_state_space_size = self.state_space_size[1] # #hardcoded
self.config["irrelevant_init_state_dist"] = [
1 / (non_term_state_space_size)
for i in range(non_term_state_space_size)
] # diameter not needed here as we directly take the state_space_size in the prev. line
self.config["irrelevant_init_state_dist"] = np.array(
self.config["irrelevant_init_state_dist"]
)
self.logger.warning(
"self.irrelevant_init_state_dist:"
+ str(self.config["irrelevant_init_state_dist"])
)
else: # if continuous or grid space
pass # this is handled in reset where we resample if we sample a term. state
def init_transition_function(self):
"""Initialises transition function, P by selecting random next states for every (state, action) tuple for discrete environments. For continuous environments, we have 1 option for the transition function which varies depending on dynamics order and inertia and time_unit for a point object."""
if self.config["state_space_type"] == "discrete":
if self.use_custom_mdp: # custom/user-defined P
pass
else:
# relevant dimensions part
self.config["transition_function"] = np.zeros(
shape=(self.state_space_size[0], self.action_space_size[0]),
dtype=object,
) # #hardcoded
self.config["transition_function"][:] = -1 # #IMP # To avoid
# having a valid value from the state space before we actually
# assign a usable value below!
if self.maximally_connected:
if self.diameter == 1: # #hack # TODO Remove this if block;
# this case is currently separately handled just so that tests
# do not fail. Using prob=prob in the sample call causes the
# sampling to change even if the probabilities remain the
# same. All solutions I can think of are hacky except changing
# the expected values in all the test cases which would take
# quite some time.
for s in range(self.state_space_size[0]):
self.config["transition_function"][
s
] = self.observation_spaces[0].sample(
size=self.action_space_size[0], replace=False
) # #random #TODO Preferably use the seed of the
# Env for this? #hardcoded
else: # if diam > 1
for s in range(self.state_space_size[0]):
i_s = (
s // self.action_space_size[0]
) # select the current independent set number
prob = np.zeros(shape=(self.state_space_size[0],))
prob_next_states = (
np.ones(shape=(self.action_space_size[0],))
/ self.action_space_size[0]
)
ind_1 = (
(i_s + 1) * self.action_space_size[0]
) % self.state_space_size[0]
ind_2 = (
(i_s + 2) * self.action_space_size[0]
) % self.state_space_size[0]
# print(ind_1, ind_2)
if ind_2 <= ind_1: # edge case
ind_2 += self.state_space_size[0]
prob[ind_1:ind_2] = prob_next_states
self.config["transition_function"][
s
] = self.observation_spaces[0].sample(
prob=prob, size=self.action_space_size[0], replace=False
) # #random #TODO
# Preferably use the seed of the Env for this? #hardcoded
# hacky way to do the above
# self.config["transition_function"][s] = self.observation_spaces[0].sample(max=self.action_space_size[0], size=self.action_space_size[0], replace=False) #random #TODO Preferably use the seed of the Env for this? #hardcoded
# Set the transitions from current state to be to the next independent set's states
# self.config["transition_function"][s] += ((i_s + 1) * self.action_space_size[0]) % self.state_space_size[0]
else: # if not maximally_connected
for s in range(self.state_space_size[0]):
i_s = (
s // self.action_space_size[0]
) # select the current independent
# set number
# Set the probabilities of the next state for the current independent set
prob = np.zeros(shape=(self.state_space_size[0],))
prob_next_states = (
np.ones(shape=(self.action_space_size[0],))
/ self.action_space_size[0]
)
ind_1 = (
(i_s + 1) * self.action_space_size[0]
) % self.state_space_size[0]
ind_2 = (
(i_s + 2) * self.action_space_size[0]
) % self.state_space_size[0]
# print(ind_1, ind_2)
if ind_2 <= ind_1: # edge case
ind_2 += self.state_space_size[0]
prob[ind_1:ind_2] = prob_next_states
for a in range(self.action_space_size[0]):
# prob[i_s * self.action_space_size[0] : (i_s + 1) * self.action_space_size[0]] = prob_next_states
self.config["transition_function"][
s, a
] = self.observation_spaces[0].sample(prob=prob)
# #random #TODO Preferably use the seed of the Env for this?
# Set the next state for terminal states to be themselves, for any action taken.
for i_s in range(self.diameter):
for s in range(
self.action_space_size[0] - self.num_terminal_states,
self.action_space_size[0],
):
for a in range(self.action_space_size[0]):
assert (
self.is_terminal_state(
i_s * self.action_space_size[0] + s
)
)
self.config["transition_function"][
i_s * self.action_space_size[0] + s, a
] = (
i_s * self.action_space_size[0] + s
) # Setting
# P(s, a) = s for terminal states, for P() to be
# meaningful even if someone doesn't check for
# 'done' being = True
# #irrelevant dimensions part
if self.irrelevant_features: # #test
self.config["transition_function_irrelevant"] = np.zeros(
shape=(self.state_space_size[1], self.action_space_size[1]),
dtype=object,
)
self.config["transition_function_irrelevant"][:] = -1 # #IMP
# To avoid having a valid value from the state space before we
# actually assign a usable value below!
if self.maximally_connected:
for s in range(self.state_space_size[1]):
i_s = s // self.action_space_size[1] # select the
# current independent set number
# Set the probabilities of the next state for the
# current independent set
prob = np.zeros(shape=(self.state_space_size[1],))
prob_next_states = (
np.ones(shape=(self.action_space_size[1],))
/ self.action_space_size[1]
)
ind_1 = (
(i_s + 1) * self.action_space_size[1]
) % self.state_space_size[1]
ind_2 = (
(i_s + 2) * self.action_space_size[1]
) % self.state_space_size[1]
print(ind_1, ind_2)
if ind_2 <= ind_1: # edge case
ind_2 += self.state_space_size[1]
prob[ind_1:ind_2] = prob_next_states
self.config["transition_function_irrelevant"][
s
] = self.observation_spaces[1].sample(
prob=prob, size=self.action_space_size[1], replace=False
)
# #random #TODO Preferably use the seed of the
# Env for this? #hardcoded
# self.config["transition_function_irrelevant"][s] = self.observation_spaces[1].sample(max=self.action_space_size[1], size=self.action_space_size[1], replace=False) #random #TODO Preferably use the seed of the Env for this?
# self.config["transition_function_irrelevant"][s] += ((i_s + 1) * self.action_space_size[1]) % self.state_space_size[1]
else:
for s in range(self.state_space_size[1]):
i_s = s // self.action_space_size[1] # select the
# current independent set number
# Set the probabilities of the next state for the
# current independent set
prob = np.zeros(shape=(self.state_space_size[1],))
prob_next_states = (
np.ones(shape=(self.action_space_size[1],))
/ self.action_space_size[1]
)
ind_1 = (
(i_s + 1) * self.action_space_size[1]
) % self.state_space_size[1]
ind_2 = (
(i_s + 2) * self.action_space_size[1]
) % self.state_space_size[1]
# print(ind_1, ind_2)
if ind_2 <= ind_1: # edge case
ind_2 += self.state_space_size[1]
prob[ind_1:ind_2] = prob_next_states
for a in range(self.action_space_size[1]):
# prob[i_s * self.action_space_size[1] : (i_s + 1)
# * self.action_space_size[1]] = prob_next_states
self.config["transition_function_irrelevant"][
s, a
] = self.observation_spaces[1].sample(prob=prob)
# #random #TODO Preferably use the seed of the Env for this?
self.logger.warning(
str(self.config["transition_function_irrelevant"])
+ "init_transition_function _irrelevant"
+ str(type(self.config["transition_function_irrelevant"][0, 0]))
)
if not callable(self.config["transition_function"]):
self.transition_matrix = self.config["transition_function"]
self.config[
"transition_function"
] = lambda s, a: self.transition_matrix[s, a]
print(
"transition_matrix inited to:\n"
+ str(self.transition_matrix)
+ "\nPython type of state: "
+ str(type(self.config["transition_function"](0, 0)))
) # The
# Python type of the state can lead to hard to catch bugs
else: # if continuous or grid space
# self.logger.debug("# TODO for cont. spaces") # transition function is a
# fixed parameterisation for cont. envs. right now.
pass
self.P = lambda s, a: self.transition_function(s, a)
def init_reward_function(self):
"""Initialises reward function, R by selecting random sequences to be rewardable for discrete environments. For continuous environments, we have fixed available options for the reward function."""
# print("Mersenne2, dummy_eval:", self.np_random.get_state()[2], "dummy_eval" in self.config)
# #TODO Maybe refactor this code and put useful reusable permutation generators, etc. in one library
if self.config["state_space_type"] == "discrete":
if self.use_custom_mdp: # custom/user-defined R
if not callable(self.config["reward_function"]):
self.reward_matrix = self.config["reward_function"]
self.config["reward_function"] = lambda s, a: self.reward_matrix[
s[-2], a
] # #hardcoded
# to be 2nd last state in state sequence passed to reward
# function, so that reward is R(s, a) when transition is s, a, r, s'
print("reward_matrix inited to:" + str(self.reward_matrix))
else:
non_term_state_space_size = (
self.action_space_size[0] - self.num_terminal_states
)
def get_sequences(maximum, length, fraction, repeats=False, diameter=1):
"""
Returns random sequences of integers
maximum: int
Max value of the integers in the sequence
length: int
Length of sequence
fraction: float
Fraction of total possible sequences to be returned
repeats: boolean
Allows repeats in returned sequences
diameter: int
Relates to the diameter of the MDP
"""
sequences = []
if repeats:
num_possible_sequences = (maximum) ** length
num_sel_sequences = int(fraction * num_possible_sequences)
if num_sel_sequences == 0:
num_sel_sequences = 1
warnings.warn(
"0 rewardable sequences per independent"
" set for given reward_density, sequence_length,"
" diameter and terminal_state_density. Setting it to 1."
)
sel_sequence_nums = self.np_random.choice(
num_possible_sequences,
size=num_sel_sequences,
replace=False,
) # #random # This assumes that all
# sequences have an equal likelihood of being selected
# for being a reward sequence; This line also makes it
# not possible to have this function be portable as
# part of a library because it use the np_random
# member variable of this class
for i_s in range(diameter): # Allow sequences to begin in
# any of the independent sets and therefore this loop is
# over the no. of independent sets(= diameter)
for i in range(num_sel_sequences):
curr_sequence_num = sel_sequence_nums[i]
specific_sequence = []
while len(specific_sequence) != length:
specific_sequence.append(
curr_sequence_num % (non_term_state_space_size)
+ ((len(specific_sequence) + i_s) % diameter)
* self.action_space_size[0]
)
# #TODO this uses a member variable of the
# class. Add another function param to
# receive this value? Name it independent set size?
curr_sequence_num = curr_sequence_num // (
non_term_state_space_size
)
# #bottleneck When we sample sequences here,
# it could get very slow if reward_density is
# high; alternative would be to assign numbers
# to sequences and then sample these numbers
# without replacement and take those sequences
# specific_sequence =
# self.relevant_observation_space.sample(size=self.sequence_length,
# replace=True) # Be careful that sequence_length is less than state space
# size
sequences.append(specific_sequence)
self.logger.info(
"Total no. of rewarded sequences:"
+ str(len(sequences))
+ "Out of"
+ str(num_possible_sequences)
+ "per independent set"
)
else: # if no repeats
assert length <= diameter * maximum, (
"When there are no"
" repeats in sequences, the sequence length should be"
" <= diameter * maximum."
)
permutations = []
for i in range(length):
permutations.append(maximum - i // diameter)
# permutations = list(range(maximum + 1 - length, maximum + 1))
self.logger.info(
"No. of choices for each element in a"
" possible sequence (Total no. of permutations will be a"
" product of this), no. of possible perms per independent"
" set: "
+ str(permutations)
+ ", "
+ str(np.prod(permutations))
)
for i_s in range(diameter): # Allow sequences to begin in
# any of the independent sets and therefore this loop is
# over the no. of independent sets(= diameter). Could
# maybe sample independent set no. as "part" of
# sel_sequence_nums below and avoid this loop?
num_possible_permutations = np.prod(permutations) # Number
# of possible permutations/sequences for, say, a
# diameter of 3 and 24 total states and
# terminal_state_density = 0.25, i.e., 6 non-terminal
# states (out of 8 states) per independent set, for
# sequence length of 5 is np.prod([6, 6, 6, 5, 5]) * 3;
# the * diameter at the end is needed because the
# sequence can begin in any of the independent sets;
# However, for simplicity, we omit * diameter here and
# just perform the same procedure per independent set.
# This can lead to slightly fewer rewardable sequences
# than should be the case for a given reward_density -
# this is due int() in the next step
num_sel_sequences = int(
fraction * num_possible_permutations
)
if (
num_sel_sequences == 0
): # ##TODO Remove this test here and above?
num_sel_sequences = 1
warnings.warn(
"0 rewardable sequences per"
" independent set for given reward_density,"
" sequence_length, diameter and"
" terminal_state_density. Setting it to 1."
)
# print("Mersenne3:", self.np_random.get_state()[2])
sel_sequence_nums = self.np_random.choice(
num_possible_permutations,
size=num_sel_sequences,
replace=False,
) # #random # This assumes that all
# sequences have an equal likelihood of being
# selected for being a reward sequence; # TODO
# this code could be replaced with self.np_random.permutation(
# non_term_state_space_size)[self.sequence_length]?
# Replacement becomes a problem then! We have to
# keep sampling until we have all unique rewardable sequences.
# print("Mersenne4:", self.np_random.get_state()[2])
total_clashes = 0
for i in range(num_sel_sequences):
curr_permutation = sel_sequence_nums[i]
seq_ = []
curr_rem_digits = []
for j in range(diameter):
curr_rem_digits.append(
list(range(maximum))
) # # has to contain every number up to n so
# that any one of them can be picked as part
# of the sequence below
for enum, j in enumerate(permutations): # Goes
# from largest to smallest number among the factors of nPk
rem_ = curr_permutation % j
# rem_ = (enum // maximum) * maximum + rem_
seq_.append(
curr_rem_digits[(enum + i_s) % diameter][rem_]
+ ((enum + i_s) % diameter)
* self.action_space_size[0]
) # Use (enum + i_s)
# to allow other independent sets to have
# states beginning a rewardable sequence
del curr_rem_digits[(enum + i_s) % diameter][rem_]
# print("curr_rem_digits", curr_rem_digits)
curr_permutation = curr_permutation // j
if seq_ in sequences: # #hack
total_clashes += (
1 # #TODO remove these extra checks and
)
# assert below
sequences.append(seq_)
self.logger.debug(
"Number of generated sequences that"
" did not clash with an existing one when it was"
" generated:" + str(total_clashes)
)
assert total_clashes == 0, (
"None of the generated"
" sequences should have clashed with an existing"
" rewardable sequence when it was generated. No. of"
" times a clash was detected:" + str(total_clashes)
)
self.logger.info(
"Total no. of rewarded sequences:"
+ str(len(sequences))
+ "Out of"
+ str(num_possible_permutations)
+ "per independent set"
)
return sequences
def insert_sequence(sequence):
"""
Inserts rewardable sequences into the rewardable_sequences dict member variable
"""
sequence = tuple(sequence) # tuples are immutable and can be
# used as keys for a dict
if callable(self.reward_dist):
self.rewardable_sequences[sequence] = self.reward_dist(
self.np_random, self.rewardable_sequences
)
else:
self.rewardable_sequences[sequence] = 1.0 # this is the
# default reward value, reward scaling will be handled later
self.logger.warning(
"specific_sequence that will be rewarded" + str(sequence)
)
# #TODO impose a different distribution for these:
# independently sample state for each step of specific
# sequence; or conditionally dependent samples if we want
# something like DMPs/manifolds
if self.make_denser:
for ss_len in range(1, len(sequence)):
sub_sequence = tuple(sequence[:ss_len])
if sub_sequence not in self.rewardable_sequences:
self.rewardable_sequences[sub_sequence] = 0.0
self.rewardable_sequences[sub_sequence] += (
self.rewardable_sequences[sequence]
* ss_len
/ len(sequence)
)
# this could cause problems if we support variable sequence lengths and
# there are clashes in selected rewardable sequences
self.rewardable_sequences = {}
if self.repeats_in_sequences:
rewardable_sequences = get_sequences(
maximum=non_term_state_space_size,
length=self.sequence_length,
fraction=self.reward_density,
repeats=True,
diameter=self.diameter,
)
else: # if no repeats_in_sequences
rewardable_sequences = get_sequences(
maximum=non_term_state_space_size,
length=self.sequence_length,
fraction=self.reward_density,
repeats=False,
diameter=self.diameter,
)
# Common to both cases: repeats_in_sequences or not
if isinstance(self.reward_dist, list): # Specified as interval
reward_dist_ = self.reward_dist
num_rews = self.diameter * len(rewardable_sequences)
print("num_rewardable_sequences set to:", num_rews)
if num_rews == 1:
rews = [1.0]
else:
rews = np.linspace(
reward_dist_[0], reward_dist_[1], num=num_rews
)
assert rews[-1] == 1.0
self.np_random.shuffle(rews)
def get_rews(rng, r_dict):
return rews[len(r_dict)]
self.reward_dist = get_rews
if len(rewardable_sequences) > 1000:
warnings.warn(
"Too many rewardable sequences and/or too long"
" rewardable sequence length. Environment might be too slow."
" Please consider setting the reward_density to be lower or"
" reducing the sequence length. No. of rewardable sequences:"
+ str(len(rewardable_sequences))
) # #TODO Maybe even exit the
# program if too much memory is (expected to be) taken.; Took
# about 80s for 40k iterations of the for loop below on my laptop
for specific_sequence in rewardable_sequences:
insert_sequence(specific_sequence)
# else: # "repeats" in sequences are allowed until diameter - 1
# steps have been taken: We sample the sequences as the state
# number inside each independent set, which are numbered from 0 to
# action_space_size - 1
# pass
print(
"rewardable_sequences: " + str(self.rewardable_sequences)
) # #debug print
elif self.config["state_space_type"] == "continuous":
# self.logger.debug("# TODO for cont. spaces?: init_reward_function")
# reward functions are fixed for cont. right now with a few available choices.
pass
elif self.config["state_space_type"] == "grid":
... # ###TODO Make sequences compatible with grid
self.R = lambda s, a: self.reward_function(s, a)
def transition_function(self, state, action):
"""The transition function, P.
Performs a transition according to the initialised P for discrete environments (with dynamics independent for relevant vs irrelevant dimension sub-spaces). For continuous environments, we have a fixed available option for the dynamics (which is the same for relevant or irrelevant dimensions):
The order of the system decides the dynamics. For an nth order system, the nth order derivative of the state is set to the action value / inertia for time_unit seconds. And then the dynamics are integrated over the time_unit to obtain the next state.
Parameters
----------
state : list
The state that the environment will use to perform a transition.
action : list
The action that the environment will use to perform a transition.
Returns
-------
int or np.array
The state at the end of the current transition
"""
if self.config["state_space_type"] == "discrete":
next_state = self.config["transition_function"](state, action)
if self.transition_noise:
probs = (
np.ones(shape=(self.state_space_size[0],))
* self.transition_noise
/ (self.state_space_size[0] - 1)
)
probs[next_state] = 1 - self.transition_noise
# TODO Samples according to new probs to get noisy discrete transition
new_next_state = self.observation_spaces[0].sample(prob=probs) # random
# print("noisy old next_state, new_next_state", next_state, new_next_state)
if next_state != new_next_state:
self.logger.info(
"NOISE inserted! old next_state, new_next_state"
+ str(next_state)
+ str(new_next_state)
)
self.total_noisy_transitions_episode += 1
# print("new probs:", probs, self.relevant_observation_space.sample(prob=probs))
next_state = new_next_state
# assert np.sum(probs) == 1, str(np.sum(probs)) + " is not equal to " + str(1)
elif self.config["state_space_type"] == "continuous":
# ##TODO implement imagined transitions also for cont. spaces
if self.use_custom_mdp:
next_state = self.config["transition_function"](state, action)
else:
assert len(action.shape) == 1, (
"Action should be specified as a 1-D tensor."
" However, shape of action was: " + str(action.shape)
)
assert action.shape[0] == self.action_space_dim, (
"Action shape is: "
+ str(action.shape[0])
+ ". Expected: "
+ str(self.action_space_dim)
)
if self.action_space.contains(action):
# ### TODO implement for multiple orders, currently only for 1st order systems.
# if self.dynamics_order == 1:
# next_state = state + action * self.time_unit / self.inertia
# print('self.state_derivatives:', self.state_derivatives)
# Except the last member of state_derivatives, the other occupy the same
# place in memory. Could create a new copy of them every time, but I think
# this should be more efficient and as long as tests don't fail should be
# fine.
# action is presumed to be n-th order force ##TODO Could easily scale this
# per dimension to give different kinds of dynamics per dimension: maybe
# even sample this scale per dimension from a probability distribution to
# generate different random Ps?
self.state_derivatives[-1] = (action / self.inertia)
factorial_array = scipy.special.factorial(
np.arange(1, self.dynamics_order + 1)
) # This is just to speed things up as scipy calculates the factorial only for largest array member
for i in range(self.dynamics_order):
for j in range(self.dynamics_order - i):
# print('i, j, self.state_derivatives, (self.time_unit**(j + 1)), factorial_array:', i, j, self.state_derivatives, (self.time_unit**(j + 1)), factorial_array)
# +ย state_derivatives_prev[i] Don't need to add previous value as it's already in there at the beginning ##### TODO Keep an old self.state_derivatives and a new one otherwise higher order derivatives will be overwritten before being used by lower order ones.
self.state_derivatives[i] += (self.state_derivatives[i + j + 1] *
(self.time_unit ** (j + 1)) / factorial_array[j])
# print('self.state_derivatives:', self.state_derivatives)
next_state = self.state_derivatives[0]
else: # if action is from outside allowed action_space
next_state = state
warnings.warn(
"WARNING: Action "
+ str(action)
+ " out of range of action space. Applying 0 action!!"
)
# if "transition_noise" in self.config:
noise_in_transition = (
self.transition_noise(self.np_random) if self.transition_noise else 0
) # #random
self.total_abs_noise_in_transition_episode += np.abs(noise_in_transition)
next_state += noise_in_transition # ##IMP Noise is only applied to
# state and not to higher order derivatives
# TODO Check if next_state is within state space bounds
if not self.observation_space.contains(next_state):
self.logger.info(
"next_state out of bounds. next_state, clipping to"
+ str(next_state)
+ str(
np.clip(next_state, -self.state_space_max, self.state_space_max)
)
)
next_state = np.clip(
next_state, -self.state_space_max, self.state_space_max
)
# Could also "reflect"
# next_state when it goes out of bounds. Would seem more logical
# for a "wall", but would need to take care of multiple
# reflections near a corner/edge.
# Resets all higher order derivatives to 0
zero_state = np.array([0.0] * (self.state_space_dim), dtype=self.dtype)
# #####IMP to have copy() otherwise it's the same array
# (in memory) at every position in the list:
self.state_derivatives = [
zero_state.copy() for i in range(self.dynamics_order + 1)
]
self.state_derivatives[0] = next_state
if self.config["reward_function"] == "move_to_a_point":
next_state_rel = np.array(next_state, dtype=self.dtype)[
self.config["relevant_indices"]
]
dist_ = np.linalg.norm(next_state_rel - self.target_point)
if dist_ < self.target_radius:
self.reached_terminal = True
elif self.config["state_space_type"] == "grid":
# state passed and returned is an np.array
# Need to check that dtype is int because Gym doesn't
if (
self.action_space.contains(action)
and np.array(action).dtype == np.int64
):
if self.transition_noise:
# self.np_random.choice only works for 1-D arrays
if self.np_random.uniform() < self.transition_noise: # #random
while True: # Be careful of infinite loops
new_action = list(self.action_space.sample()) # #random
if new_action != action:
self.logger.info(
"NOISE inserted! old action, new_action"
+ str(action)
+ str(new_action)
)
# print(str(action) + str(new_action))
self.total_noisy_transitions_episode += 1
action = new_action
break
next_state = []
for i in range(len(self.grid_shape)):
# actions -1, 0, 1 represent back, noop, forward respt.
next_state.append(state[i] + action[i])
if next_state[i] < 0:
self.logger.info("Underflow in grid next state. Bouncing back.")
next_state[i] = 0
if next_state[i] >= self.grid_shape[i]:
self.logger.info("Overflow in grid next state. Bouncing back.")
next_state[i] = self.grid_shape[i] - 1
else: # if action is from outside allowed action_space
next_state = list(state)
warnings.warn(
"WARNING: Action " + str(action) + " out of range"
" of action space. Applying noop action!!"
)
if self.config["reward_function"] == "move_to_a_point":
if self.target_point == next_state:
self.reached_terminal = True
next_state = np.array(next_state)
return next_state
def reward_function(self, state, action):
"""The reward function, R.
Rewards the sequences selected to be rewardable at initialisation for discrete environments. For continuous environments, we have fixed available options for the reward function:
move_to_a_point rewards for moving to a predefined location. It has sparse and dense settings.
move_along_a_line rewards moving along ANY direction in space as long as it's a fixed direction for sequence_length consecutive steps.
Parameters
----------
state : list
The underlying MDP state (also called augmented state in this code) that the environment uses to calculate its reward. Normally, just the sequence of past states of length delay + sequence_length + 1.
action : single action dependent on action space
Action magnitudes are penalised immediately in the case of continuous spaces and, in effect, play no role for discrete spaces as the reward in that case only depends on sequences of states. We say "in effect" because it _is_ used in case of a custom R to calculate R(s, a) but that is equivalent to using the "next" state s' as the reward determining criterion in case of deterministic transitions. _Sequences_ of _actions_ are currently NOT used to calculate the reward. Since the underlying MDP dynamics are deterministic, a state and action map 1-to-1 with the next state and so, just a sequence of _states_ should be enough to calculate the reward.
Returns
-------
double
The reward at the end of the current transition
"""
# #TODO Make reward depend on the action sequence too instead of just state sequence, as it is currently?
delay = self.delay
sequence_length = self.sequence_length
reward = 0.0
# print("TEST", self.augmented_state[0 : self.augmented_state_length - delay], state, action, self.rewardable_sequences, type(state), type(self.rewardable_sequences))
state_considered = state # if imaginary_rollout else self.augmented_state # When we imagine a rollout, the user has to provide full augmented state as the argument!!
# if not isinstance(state_considered, list):
# state_considered = [state_considered] # to get around case when sequence is an int; it should always be a list except if a user passes in a state; would rather force them to pass a list: assert for it!!
# TODO These asserts are only needed if imaginary_rollout is True, as users then pass in a state sequence
# if imaginary_rollout:
# assert isinstance(state_considered, list), "state passed in should be a list of states containing at the very least the state at beginning of the transition, s, and the one after it, s'. type was: " + str(type(state_considered))
# assert len(state_considered) == self.augmented_state_length, "Length of list of states passed should be equal to self.augmented_state_length. It was: " + str(len(state_considered))
if self.use_custom_mdp:
reward = self.config["reward_function"](state_considered, action)
self.reward_buffer.append(reward) # ##TODO Modify seq_len and delay
# code for discrete and continuous case to use buffer too?
reward = self.reward_buffer[0]
# print("rewards:", self.reward_buffer, old_reward, reward)
del self.reward_buffer[0]
elif self.config["state_space_type"] == "discrete":
if np.isnan(state_considered[0]):
pass # ###IMP: This check is to get around case of
# augmented_state_length being > 2, i.e. non-vanilla seq_len or
# delay, because then rewards may be handed out for the initial
# state being part of a sequence which is not fair since it is
# handed out without having the agent take an action.
else:
self.logger.debug(
"state_considered for reward:"
+ str(state_considered)
+ " with delay "
+ str(self.delay)
)
if not self.reward_every_n_steps or (
self.reward_every_n_steps
and self.total_transitions_episode % self.sequence_length == delay
):
# ###TODO also implement this for make_denser case and continuous envs.
sub_seq = tuple(
state_considered[1: self.augmented_state_length - delay]
)
if sub_seq in self.rewardable_sequences:
# print(state_considered, "with delay", self.delay, "rewarded with:", 1)
reward += self.rewardable_sequences[sub_seq]
else:
# print(state_considered, "with delay", self.delay, "NOT rewarded.")
pass
self.logger.info("rew" + str(reward))
elif self.config["state_space_type"] == "continuous":
# ##TODO Make reward for along a line case to be length of line
# travelled - sqrt(Sum of Squared distances from the line)? This
# should help with keeping the mean reward near 0. Since the principal
# component is always taken to be the direction of travel, this would
# mean a larger distance covered in that direction and hence would
# lead to +ve reward always and would mean larger random actions give
# a larger reward! Should penalise actions in proportion that scale then?
if np.isnan(state_considered[0][0]): # Instead of below commented out
# check, this is more robust for imaginary transitions
# if self.total_transitions_episode + 1 < self.augmented_state_length:
# + 1 because augmented_state_length is always 1 greater than seq_len + del
pass # #TODO
else:
if self.config["reward_function"] == "move_along_a_line":
# print("######reward test", self.total_transitions_episode, np.array(self.augmented_state), np.array(self.augmented_state).shape)
# #test: 1. for checking 0 distance for same action being always applied; 2. similar to 1. but for different dynamics orders; 3. similar to 1 but for different action_space_dims; 4. for a known applied action case, check manually the results of the formulae and see that programmatic results match: should also have a unit version of 4. for dist_of_pt_from_line() and an integration version here for total_deviation calc.?.
data_ = np.array(state_considered, dtype=self.dtype)[
1: self.augmented_state_length - delay,
self.config["relevant_indices"],
]
data_mean = data_.mean(axis=0)
uu, dd, vv = np.linalg.svd(data_ - data_mean)
self.logger.info(
"uu.shape, dd.shape, vv.shape ="
+ str(uu.shape)
+ str(dd.shape)
+ str(vv.shape)
)
line_end_pts = (
vv[0] * np.linspace(-1, 1, 2)[:, np.newaxis]
) # vv[0] = 1st
# eigenvector, corres. to Principal Component #hardcoded -100
# to 100 to get a "long" line which should make calculations more
# robust(?: didn't seem to be the case for 1st few trials, so changed it
# to -1, 1; even tried up to 10000 - seems to get less precise for larger
# numbers) to numerical issues in dist_of_pt_from_line() below; newaxis
# added so that expected broadcasting takes place
line_end_pts += data_mean
total_deviation = 0
for (
data_pt
) in (
data_
): # find total distance of all data points from the fit line above
total_deviation += dist_of_pt_from_line(
data_pt, line_end_pts[0], line_end_pts[-1]
)
self.logger.info(
"total_deviation of pts from fit line:" + str(total_deviation)
)
reward += -total_deviation / self.sequence_length
elif self.config["reward_function"] == "move_to_a_point": # Could
# generate target points randomly but leaving it to the user to do
# that. #TODO Generate it randomly to have random Rs?
if self.make_denser:
old_relevant_state = np.array(
state_considered, dtype=self.dtype
)[-2 - delay, self.config["relevant_indices"]]
new_relevant_state = np.array(
state_considered, dtype=self.dtype
)[-1 - delay, self.config["relevant_indices"]]
reward = -np.linalg.norm(new_relevant_state - self.target_point)
# Should allow other powers of the distance from target_point,
# or more norms?
reward += np.linalg.norm(old_relevant_state - self.target_point)
# Reward is the distance moved towards the target point.
# Should rather be the change in distance to target point, so reward given is +ve if "correct" action was taken and so reward function is more natural (this _is_ the current implementation)
# It's true that giving the total -ve distance from target as the loss at every step gives a stronger signal to algorithm to make it move faster towards target but this seems more natural (as in the other case loss/reward go up quadratically with distance from target point while in this case it's linear). The value function is in both cases higher for states further from target. But isn't that okay? Since the greater the challenge (i.e. distance from target), the greater is the achieved overall reward at the end.
# #TODO To enable seq_len, we can hand out reward if distance to target point is reduced (or increased - since that also gives a better signal than giving 0 in that case!!) for seq_len consecutive steps, otherwise 0 reward - however we need to hand out fixed reward for every "sequence" achieved otherwise, if we do it by adding the distance moved towards target in the sequence, it leads to much bigger rewards for larger seq_lens because of overlapping consecutive sequences.
# TODO also make_denser, sparse rewards only at target
else: # sparse reward
new_relevant_state = np.array(
state_considered, dtype=self.dtype
)[-1 - delay, self.config["relevant_indices"]]
if (
np.linalg.norm(new_relevant_state - self.target_point)
< self.target_radius
):
reward = 1.0 # Make the episode terminate as well?
# Don't need to. If algorithm is smart enough, it will
# stay in the radius and earn more reward.
reward -= self.action_loss_weight * np.linalg.norm(
np.array(action, dtype=self.dtype)
)
elif self.config["state_space_type"] == "grid":
if self.config["reward_function"] == "move_to_a_point":
if self.make_denser:
old_relevant_state = np.array(state_considered)[-2 - delay]
new_relevant_state = np.array(state_considered)[-1 - delay]
manhat_dist_old = distance.cityblock(
old_relevant_state, np.array(self.target_point)
)
manhat_dist_new = distance.cityblock(
new_relevant_state, np.array(self.target_point)
)
reward += manhat_dist_old - manhat_dist_new
else: # sparse reward
new_relevant_state = np.array(state_considered)[-1 - delay]
if list(new_relevant_state) == self.target_point:
reward += 1.0
reward *= self.reward_scale
noise_in_reward = self.reward_noise(self.np_random) if self.reward_noise else 0
# #random ###TODO Would be better to parameterise this in terms of state, action and time_step as well. Would need to change implementation to have a queue for the rewards achieved and then pick the reward that was generated delay timesteps ago.
self.total_abs_noise_in_reward_episode += np.abs(noise_in_reward)
self.total_reward_episode += reward
reward += noise_in_reward
reward += self.reward_shift
return reward
def step(self, action, imaginary_rollout=False):
"""The step function for the environment.
Parameters
----------
action : int or np.array
The action that the environment will use to perform a transition.
imaginary_rollout: boolean
Option for the user to perform "imaginary" transitions, e.g., for model-based RL. If set to true, underlying augmented state of the MDP is not changed and user is responsible to maintain and provide a list of states to this function to be able to perform a rollout.
Returns
-------
int or np.array, double, boolean, dict
The next state, reward, whether the episode terminated and additional info dict at the end of the current transition
"""
# For imaginary transitions, discussion:
# 1) Use external observation_space as argument to P() and R(). But then it's not possible for P and R to know underlying MDP state unless we pass it as another argument. This is not desirable as we want P and R to simply be functions of external state/observation and action. 2) The other possibility is how it's currently done: P and R _know_ the underlying state. But in this case, we need an extra imaginary_rollout argument to P and R and we can't perform imaginary rollouts longer than one step without asking the user to maintain a sequence of underlying states and actions to be passed as arguments to P and R.
# P and R knowing the underlying state seems a poor design choice to me
# because it makes the code structure more brittle, so I propose that
# step() handles the underlying state vs external observation conversion
# and user can use P and R with underlying state. And step should handle
# the case of imaginary rollouts by building a tree of transitions and
# allowing rollback to states along the tree. However, user will probably
# want access to P and R by using only observations as well instead of the
# underlying state. In this case, P and R need to be aware of underlying
# state and be able to store imaginary rollouts if needed.
# Transform multi-discrete to discrete for discrete state spaces with
# irrelevant dimensions; needed only for imaginary rollouts, otherwise,
# internal augmented state is used.
if imaginary_rollout:
print("Imaginary rollouts are currently not supported.")
sys.exit(1)
if self.config["state_space_type"] == "discrete":
if self.irrelevant_features:
state, action, state_irrelevant, action_irrelevant = (
self.curr_state[0],
action[0],
self.curr_state[1],
action[1],
)
else:
state, action = self.curr_state, action
else: # cont. or grid case
state, action = self.curr_state, action
# ### TODO Decide whether to give reward before or after transition ("after" would mean taking next state into account and seems more logical to me) - make it a dimension? - R(s) or R(s, a) or R(s, a, s')? I'd say give it after and store the old state in the augmented_state to be able to let the R have any of the above possible forms. That would also solve the problem of implicit 1-step delay with giving it before. _And_ would not give any reward for already being in a rewarding state in the 1st step but _would_ give a reward if 1 moved to a rewardable state - even if called with R(s, a) because s' is stored in the augmented_state! #####IMP
# ###TODO P uses last state while R uses augmented state; for cont. env, P does know underlying state_derivatives - we don't want this to be the case for the imaginary rollout scenario;
next_state = self.P(state, action)
# if imaginary_rollout:
# pass
# # print("imaginary_rollout") # Since transition_function currently depends only on current state and action, we don't need to do anything here!
# else:
del self.augmented_state[0]
if self.config["state_space_type"] == "discrete":
self.augmented_state.append(next_state)
elif self.config["state_space_type"] == "continuous":
self.augmented_state.append(next_state.copy())
elif self.config["state_space_type"] == "grid":
self.augmented_state.append([next_state[i] for i in range(2)])
self.total_transitions_episode += 1
self.reward = self.R(self.augmented_state, action)
# #irrelevant dimensions part
if self.config["state_space_type"] == "discrete":
if self.irrelevant_features:
next_state_irrelevant = self.config["transition_function_irrelevant"][
state_irrelevant, action_irrelevant
]
if self.transition_noise:
probs = (
np.ones(shape=(self.state_space_size[1],))
* self.transition_noise
/ (self.state_space_size[1] - 1)
)
probs[next_state_irrelevant] = 1 - self.transition_noise
new_next_state_irrelevant = self.observation_spaces[1].sample(
prob=probs
)
# #random
# if next_state_irrelevant != new_next_state_irrelevant:
# print("NOISE inserted! old next_state_irrelevant, new_next_state_irrelevant", next_state_irrelevant, new_next_state_irrelevant)
# self.total_noisy_transitions_irrelevant_episode += 1
next_state_irrelevant = new_next_state_irrelevant
# Transform discrete back to multi-discrete if needed
if self.config["state_space_type"] == "discrete":
if self.irrelevant_features:
next_obs = next_state = (next_state, next_state_irrelevant)
else:
next_obs = next_state
else: # cont. or grid space
next_obs = next_state
if self.image_representations:
next_obs = self.observation_space.get_concatenated_image(next_state)
self.curr_state = next_state
self.curr_obs = next_obs
# #### TODO curr_state is external state, while we need to check relevant state for terminality! Done - by using augmented_state now instead of curr_state!
self.done = (self.is_terminal_state(self.augmented_state[-1]) or self.reached_terminal)
if self.done:
self.reward += (
self.term_state_reward * self.reward_scale
) # Scale before or after?
self.logger.info(
"sas'r: "
+ str(self.augmented_state[-2])
+ " "
+ str(action)
+ " "
+ str(self.augmented_state[-1])
+ " "
+ str(self.reward)
)
return self.curr_obs, self.reward, self.done, self.get_augmented_state()
def get_augmented_state(self):
"""Intended to return the full augmented state which would be Markovian. (However, it's not Markovian wrt the noise in P and R because we're not returning the underlying RNG.) Currently, returns the augmented state which is the sequence of length "delay + sequence_length + 1" of past states for both discrete and continuous environments. Additonally, the current state derivatives are also returned for continuous environments.
Returns
-------
dict
Contains at the end of the current transition
"""
# #TODO For noisy processes, this would need the noise distribution and random seed too. Also add the irrelevant state parts, etc.? We don't need the irrelevant parts for the state to be Markovian.
if self.config["state_space_type"] == "discrete":
augmented_state_dict = {
"curr_state": self.curr_state,
"curr_obs": self.curr_obs,
"augmented_state": self.augmented_state,
}
elif self.config["state_space_type"] == "continuous":
augmented_state_dict = {
"curr_state": self.curr_state,
"curr_obs": self.curr_obs,
"augmented_state": self.augmented_state,
"state_derivatives": self.state_derivatives,
}
elif self.config["state_space_type"] == "grid":
augmented_state_dict = {
"curr_state": self.curr_state,
"curr_obs": self.curr_obs,
"augmented_state": self.augmented_state,
}
return augmented_state_dict
def reset(self):
"""Resets the environment for the beginning of an episode and samples a start state from rho_0. For discrete environments uses the defined rho_0 directly. For continuous environments, samples a state and resamples until a non-terminal state is sampled.
Returns
-------
int or np.array
The start state for a new episode.
"""
# on episode "end" stuff (to not be invoked when reset() called when
# self.total_episodes = 0; end is in quotes because it may not be a true
# episode end reached by reaching a terminal state, but reset() may have
# been called in the middle of an episode):
if not self.total_episodes == 0:
self.logger.info(
"Noise stats for previous episode num.: "
+ str(self.total_episodes)
+ " (total abs. noise in rewards, total abs."
" noise in transitions, total reward, total noisy transitions, total"
" transitions): "
+ str(self.total_abs_noise_in_reward_episode)
+ " "
+ str(self.total_abs_noise_in_transition_episode)
+ " "
+ str(self.total_reward_episode)
+ " "
+ str(self.total_noisy_transitions_episode)
+ " "
+ str(self.total_transitions_episode)
)
# on episode start stuff:
self.reward_buffer = [0.0] * (self.delay)
self.total_episodes += 1
if self.config["state_space_type"] == "discrete":
self.curr_state_relevant = self.np_random.choice(
self.state_space_size[0], p=self.config["relevant_init_state_dist"]
) # #random
self.curr_state = self.curr_state_relevant # # curr_state set here
# already in case if statement below is not entered
if self.irrelevant_features:
self.curr_state_irrelevant = self.np_random.choice(
self.state_space_size[1],
p=self.config["irrelevant_init_state_dist"],
) # #random
self.curr_state = (self.curr_state_relevant, self.curr_state_irrelevant)
self.logger.info(
"RESET called. Relevant part of state reset to:"
+ str(self.curr_state_relevant)
)
self.logger.info(
"Irrelevant part of state reset to:"
+ str(self.curr_state_irrelevant)
)
self.augmented_state = [
np.nan for i in range(self.augmented_state_length - 1)
]
self.augmented_state.append(self.curr_state_relevant)
# self.augmented_state = np.array(self.augmented_state) # Do NOT make an
# np.array out of it because we want to test existence of the array in an
# array of arrays which is not possible with np.array!
elif self.config["state_space_type"] == "continuous":
# self.logger.debug("#TODO for cont. spaces: reset")
while True: # Be careful about infinite loops
term_space_was_sampled = False
self.curr_state = self.feature_space.sample() # #random
if self.is_terminal_state(self.curr_state):
j = None
# Could this sampling be made more efficient? In general, the non-terminal
# space could have any shape and assiging equal sampling probability to
# each point in this space is pretty hard.
for i in range(len(self.term_spaces)):
if self.term_spaces[i].contains(self.curr_state):
j = i
self.logger.info(
"A state was sampled in term state subspace."
" Therefore, resampling. State was, subspace was:"
+ str(self.curr_state)
+ str(j)
) # ##TODO Move this logic
# into a new class in Gym spaces that can contain
# subspaces for term states! (with warning/error if term
# subspaces cover whole state space, or even a lot of it)
term_space_was_sampled = True
# break
if not term_space_was_sampled:
break
# if not self.use_custom_mdp:
# init the state derivatives needed for continuous spaces
zero_state = np.array([0.0] * (self.state_space_dim), dtype=self.dtype)
self.state_derivatives = [
zero_state.copy() for i in range(self.dynamics_order + 1)
] # #####IMP to have copy()
# otherwise it's the same array (in memory) at every position in the list
self.state_derivatives[0] = self.curr_state
self.augmented_state = [
[np.nan] * self.state_space_dim
for i in range(self.augmented_state_length - 1)
]
self.augmented_state.append(self.curr_state.copy())
elif self.config["state_space_type"] == "grid":
# Need to set self.curr_state, self.augmented_state
while True: # Be careful about infinite loops
term_space_was_sampled = False
# curr_state is an np.array while curr_state_relevant is a list
self.curr_state = self.feature_space.sample().astype(int) # #random
self.curr_state_relevant = list(self.curr_state[[0, 1]]) # #hardcoded
if self.is_terminal_state(self.curr_state_relevant):
self.logger.info(
"A terminal state was sampled. Therefore,"
" resampling. State was:" + str(self.curr_state)
)
term_space_was_sampled = True
break
if not term_space_was_sampled:
break
self.augmented_state = [
np.nan for i in range(self.augmented_state_length - 1)
]
self.augmented_state.append(self.curr_state_relevant)
if self.image_representations:
self.curr_obs = self.observation_space.get_concatenated_image(
self.curr_state
)
else:
self.curr_obs = self.curr_state
self.logger.info("RESET called. curr_state reset to: " + str(self.curr_state))
self.reached_terminal = False
self.total_abs_noise_in_reward_episode = 0
self.total_abs_noise_in_transition_episode = (
0 # only present in continuous spaces
)
self.total_noisy_transitions_episode = 0 # only present in discrete spaces
self.total_reward_episode = 0
self.total_transitions_episode = 0
self.logger.info(
" self.delay, self.sequence_length:"
+ str(self.delay)
+ str(self.sequence_length)
)
return self.curr_obs
def seed(self, seed=None):
"""Initialises the Numpy RNG for the environment by calling a utility for this in Gym.
The environment has its own RNG and so do the state and action spaces held by the environment.
Parameters
----------
seed : int
seed to initialise the np_random instance held by the environment. Cannot use numpy.int64 or similar because Gym doesn't accept it.
Returns
-------
int
The seed returned by Gym
"""
# If seed is None, you get a randomly generated seed from gym.utils...
self.np_random, self.seed_ = gym.utils.seeding.np_random(seed) # #random
print(
"Env SEED set to: "
+ str(seed)
+ ". Returned seed from Gym: "
+ str(self.seed_)
)
return self.seed_
def dist_of_pt_from_line(pt, ptA, ptB):
"""Returns shortest distance of a point from a line defined by 2 points - ptA and ptB. Based on: https://softwareengineering.stackexchange.com/questions/168572/distance-from-point-to-n-dimensional-line"""
tolerance = 1e-13
lineAB = ptA - ptB
lineApt = ptA - pt
dot_product = np.dot(lineAB, lineApt)
if np.linalg.norm(lineAB) < tolerance:
return 0
else:
proj = dot_product / np.linalg.norm(
lineAB
) # #### TODO could lead to division by zero if line is a null vector!
sq_dist = np.linalg.norm(lineApt) ** 2 - proj ** 2
if sq_dist < 0:
if sq_dist < tolerance:
logging.warning(
"The squared distance calculated in dist_of_pt_from_line()"
" using Pythagoras' theorem was less than the tolerance allowed."
" It was: " + str(sq_dist) + ". Tolerance was: -" + str(tolerance)
) # logging.warn() has been deprecated since Python
# 3.3 and we should use logging.warning.
sq_dist = 0
dist = np.sqrt(sq_dist)
# print('pt, ptA, ptB, lineAB, lineApt, dot_product, proj, dist:', pt, ptA, ptB, lineAB, lineApt, dot_product, proj, dist)
return dist
def list_to_float_np_array(lis):
"""Converts list to numpy float array"""
return np.array(list(float(i) for i in lis))
if __name__ == "__main__":
print("Please see example.py for how to use RLToyEnv.")
| StarcoderdataPython |
158453 | from django.test import TestCase
from mock import patch
from digest.management.commands.import_importpython import ImportPythonParser
from digest.utils import MockResponse, read_fixture
class ImportPythonWeeklyTest(TestCase):
def setUp(self):
self.url = "http://importpython.com/newsletter/no/60/"
test_fixture = 'fixture_test_import_importpython_test_get_blocks.txt'
self.patcher = patch(
'digest.management.commands.import_importpython.urlopen')
self.urlopen_mock = self.patcher.start()
self.urlopen_mock.return_value = MockResponse(
read_fixture(test_fixture))
self.parser = ImportPythonParser()
def tearDown(self):
self.patcher.stop()
def test_correctly_creates_issue_urls(self):
self.assertEqual(ImportPythonParser.get_issue_url(2),
"http://importpython.com/static/files/issue2.html")
self.assertEqual(ImportPythonParser.get_issue_url(12),
"http://importpython.com/newsletter/draft/12")
self.assertEqual(ImportPythonParser.get_issue_url(56),
"http://importpython.com/newsletter/no/56")
with self.assertRaises(ValueError):
ImportPythonParser.get_issue_url(-100)
def test_correct_number_of_blocks_parsed(self):
blocks = self.parser.get_blocks(self.url)
self.assertEqual(len(blocks), 25)
def test_correctly_parses_block(self):
blocks = self.parser.get_blocks(self.url)
block = blocks[0]
self.assertEqual(block['link'],
"https://talkpython.fm/episodes/show/44/project-jupyter-and-ipython")
self.assertEqual(block['title'],
"Project Jupyter and IPython Podcast Interview")
self.assertEqual(block['content'],
"One of the fastest growing areas in Python is scientific computing. In scientific computing with Python, there are a few key packages that make it special. These include NumPy / SciPy / and related packages. The one that brings it all together, visually, is IPython (now known as Project Jupyter). That's the topic on episode 44 of Talk Python To Me. ")
def test_correctly_gets_latest_url(self):
test_latest = 'fixture_test_import_importpython_test_get_latest_url.txt'
self._old_return_value = self.urlopen_mock.return_value
self.urlopen_mock.return_value = MockResponse(read_fixture(test_latest))
latest_url = self.parser.get_latest_issue_url()
self.assertEqual(latest_url,
"http://importpython.com/newsletter/no/72/")
| StarcoderdataPython |
5148972 | import unittest
import cachetools.keys
class CacheKeysTest(unittest.TestCase):
def test_hashkey(self, key=cachetools.keys.hashkey):
self.assertEqual(key(), key())
self.assertEqual(hash(key()), hash(key()))
self.assertEqual(key(1, 2, 3), key(1, 2, 3))
self.assertEqual(hash(key(1, 2, 3)), hash(key(1, 2, 3)))
self.assertEqual(key(1, 2, 3, x=0), key(1, 2, 3, x=0))
self.assertEqual(hash(key(1, 2, 3, x=0)), hash(key(1, 2, 3, x=0)))
self.assertNotEqual(key(1, 2, 3), key(3, 2, 1))
self.assertNotEqual(key(1, 2, 3), key(1, 2, 3, x=None))
self.assertNotEqual(key(1, 2, 3, x=0), key(1, 2, 3, x=None))
self.assertNotEqual(key(1, 2, 3, x=0), key(1, 2, 3, y=0))
with self.assertRaises(TypeError):
hash(key({}))
# untyped keys compare equal
self.assertEqual(key(1, 2, 3), key(1.0, 2.0, 3.0))
self.assertEqual(hash(key(1, 2, 3)), hash(key(1.0, 2.0, 3.0)))
def test_typedkey(self, key=cachetools.keys.typedkey):
self.assertEqual(key(), key())
self.assertEqual(hash(key()), hash(key()))
self.assertEqual(key(1, 2, 3), key(1, 2, 3))
self.assertEqual(hash(key(1, 2, 3)), hash(key(1, 2, 3)))
self.assertEqual(key(1, 2, 3, x=0), key(1, 2, 3, x=0))
self.assertEqual(hash(key(1, 2, 3, x=0)), hash(key(1, 2, 3, x=0)))
self.assertNotEqual(key(1, 2, 3), key(3, 2, 1))
self.assertNotEqual(key(1, 2, 3), key(1, 2, 3, x=None))
self.assertNotEqual(key(1, 2, 3, x=0), key(1, 2, 3, x=None))
self.assertNotEqual(key(1, 2, 3, x=0), key(1, 2, 3, y=0))
with self.assertRaises(TypeError):
hash(key({}))
# typed keys compare unequal
self.assertNotEqual(key(1, 2, 3), key(1.0, 2.0, 3.0))
def test_addkeys(self, key=cachetools.keys.hashkey):
self.assertIsInstance(key(), tuple)
self.assertIsInstance(key(1, 2, 3) + key(4, 5, 6), type(key()))
self.assertIsInstance(key(1, 2, 3) + (4, 5, 6), type(key()))
self.assertIsInstance((1, 2, 3) + key(4, 5, 6), type(key()))
def test_pickle(self, key=cachetools.keys.hashkey):
import pickle
for k in [key(), key('abc'), key('abc', 123), key('abc', q='abc')]:
# white-box test: assert cached hash value is not pickled
self.assertEqual(len(k.__dict__), 0)
h = hash(k)
self.assertEqual(len(k.__dict__), 1)
pickled = pickle.loads(pickle.dumps(k))
self.assertEqual(len(pickled.__dict__), 0)
self.assertEqual(k, pickled)
self.assertEqual(h, hash(pickled))
| StarcoderdataPython |
209074 | def print_hello_world(n):
while n > 0:
print('Hello, world!')
n = n - 1
print_hello_world(3)
| StarcoderdataPython |
4823355 | #!/usr/bin/env python
# coding: utf-8
# # Frequentist vs Bayesian
#
# <p style="color:blue"> <NAME></p>
# <p style="color:blue">Station10 Ltd</p>
# ```{image} ./Bayesian-vs-frequentist.png
# :class: bg-primary mb-1
# :width: 500px
# :align: center
# ```
# ## 1. The problem formulation
# In a marketing campaign, a retailer sends a coupon to the customer. How likely is it that the customer will buy the product? Maybe we should give it a shot since it won't cost us a lot. Statistically, we can make some experiments, estimate the probability, and then decide how to mark targets based on the results. Global warming is a big topic that is relevant to all people on the planet. As we are an independent thinker, not the ones who follow the crowd, we would like to ask: Is it possible to still do some random experiments for the conclusion? Maybe it is absolutely impossible.
#
# We need impeccable logic and support from data!
#
# Generally speaking, there are two schools of thought in the field of data science and machine learning: the Bayesian and the frequentist. By analyzing their basic assumptions, this blog will explain the theoretical foundations and backgrounds of these two schools.
#
#
# Suppose there is system$f_\theta$, $\theta$ is a vector. The random variable $X$ is generated from the system.
#
# $$
# X \sim p(x|\theta)
# $$
# How to estate the parameter $\theta$ from the observation $x$ from frequentist and Bayesian?
# ## 2ใFrequentist
# ```{image} ./god-prospective.jpg
# :class: bg-primary mb-1
# :width: 500px
# :align: center
# ```
# A frequentist believes that the world is determinstic. There is an ontology, and its true value is constant. Our goal is to determine this value or the range in which it lies. However, the true values are unknown. The only way to estimate true values are through the random phenomena that result from them.
#
# In a jar with seven red balls and three blue balls, for example, if we randomly take out one ball and record it before putting it back in the jar. The probability that we will take out the red ball each time is $70\%$. The probability, no matter how the experiment is performed, is objective and unique. It is this objective fact that underpins the series of observations. Let us assume the unknown probability is $\theta$ and we can estimate it. If we perform $100$ experiments and get a red ball $72$ times, we can intuitively estimate that it is $72\%$ red, and since there are only $10$ balls in the jar, we can determine that the most plausable estimation is that there are $7$ red balls.
#
# This is in fact an optimization problem with a maximum likelihood function.
#
# Suppose the probability of getting out red ball is $\theta$, which is the Bernolli distribution:
#
# $$
# p(x_i, \theta) = \theta^{x_i}(1-\theta)^{(1-x_i)}
# $$
#
# $x_i = 1$ indicates that we get read ball, and $x_i = 0$ for blue ball
#
#
# Suppose we perform $n$ experiments. In theory, $\theta$ can take any value between $0$ and $1$, including $0$ and $1$. This is $\theta\in[0, 1]$. The $n$ experiments can produce arbitrary permutations of length $n$ sequence with elements of $0$s and $1$s. The total number of sequences is the permutations is $2^n$. But, after one round of our experiments, we only get one of $2^n$ possibility in the sequence $0,1$.
#
# Why did we get this sequence and not any other in one experiment? It is the physical reality that there are $10$ balls in the jar, $7$ of which are red balls which makes the observation happened. This objective reality determines $\theta$, and $\theta$ in turn determines which sequence is most likely to be observed. That is, $\theta$ makes the most likely sequence to occur. Represent this idea using mathematical formulation:
#
# $$
# \theta = argmax_\theta\prod_{i=1}^N\theta^{x_i}(1-\theta)^{1-x_i}
# $$
#
# For ease of calculation, take the logarithm of the above equation๏ผ
#
# $$
# \theta = \underset{\theta}{\operatorname{argmax}}\sum_{i=1}^Nlog(\theta^{x_i}(1-\theta)^{1-x_i}) = \underset{\theta}{\operatorname{argmax}}(-\sum_{i=1}^Nlog(\theta^{x_i}(1-\theta)^{1-x_i}))
# $$
#
# Let $L=-\sum_{i=1}^Nlog(\theta^{x_i}(1-\theta)^{1-x_i})$, and we calculate the derivative of $L$ with respect to $\theta$๏ผ
#
# $$
# \frac{\partial L}{\partial \theta} = -\sum_{i=1}^N(\frac{x_i}{\theta} + (1-x_i)\frac{-1}{1-\theta}) = 0
# $$
#
# We get:
#
# $$
# \hat \theta = \frac{\sum_i^N x_i}{N}
# $$
#
#
#
# ## 3. Bayesian
# ```{image} ./bayesian.jpg
# :class: bg-primary mb-1
# :width: 500px
# :align: center
# ```
# The Bayesian does not attempt to say that 'events themselves are random', or that 'the world is somehow random in its ontology'. It starts from the point of 'imperfect knowledge of the observer' and constructs a framework for making inferences based on uncertain of the knowledge.
#
# The random event in the eyes of frequentist is not random any more in the view of Bayesian, but is only unknown to the observer. So, the observer makes inferring from the observed evidence. The randomness is not arising from whether the event itself occurred or not, but merely describes the state of the observer's knowledge of the event.
#
# Bayesian probabilities are based on limited knowledge, while frequentists describe the ontology. An observer's knowledge is updated when a new observation is made according to a Bayesian theorem. In Bayesian probability theory, it is assumed that the observer has limited knowledge of the event (for instance, Tom believes *a priori* that a coin is even based on his daily observations and feelings). Once the observer gets new observations (Tom tosses the coin over and over again and discovers that out of 100 tosses, only 20 come up heads), that will affect the observer's original beliefs in the form of logical uncertainty (Tom doubts the coin is even and even begins to conclude it is not even). Because incomplete information prevents the observer from relying on simple logic to form inferences, he must turn to plausible reasoning, which assigns plausibility to a range of possible outcomes.
#
#
# By way of example, Bayesian analysis describes the above process as the observer holding a particular prior belief, gaining new evidence through observation, and combining the new observed evidence and prior knowledge to arrive at a posterior belief, which reflects an updated state of knowledge. Bayesian probabilistic inference is concerned with building a logical system from incomplete knowledge and an assertion as a measure of plausibility. An observer's beliefs or knowledge about a variable is called a probability distribution by a frequentist. In a Bayesian approach, representations of human knowledge are constructed rather than representations of the objective world. Bayesian probabilistic inference is, therefore, in many cases a better approach to solving the problem of observer inference in machine learning and bypasses the discussion about ontology.
#
# The mathematical representation of the above discussion is:
#
#
# $$
# p(\theta|x) = \frac{p(x|\theta)p(\theta)}{p(x)}
# $$
#
# where,
#
#
# $$
# p(x) = \int p(x|\theta)p(\theta)d\theta
# $$
#
# By using the likelihood function, Bayes' theorem relates the prior probability to the posterior probability. We can get the maximum posterior probability๏ผ
#
# $$
# \hat \theta = \theta_{max} = \underset{\theta}{\operatorname{argmax}} p(\theta|x)
# = \underset{\theta}{\operatorname{argmax}}\frac{p(x|\theta)p(\theta)}{p(x)}
# = \underset{\theta}{\operatorname{argmax}} p(x|\theta)p(\theta)
# $$
#
#
# The powerful and fascinating part of Bayesian reasoning is that we start with subjective *a priori* beliefs and acquire an objective knowledge of the world by objective iterative observation. For example, we can obtain the posterior probabilities by combining evidence and prior probabilities. However, if we receive new evidence again, we can update our posterior probabilities by combining the previously obtained posterior probabilities with the new evidence. It can be is an iterative process.
#
# Below I will use one simple example to show the process of iterative.
#
#
# Incidence of breast cancer $0.1\%$ in the some place. If the person has breast cancer, the test positive is $90\%$ and if the person has no breast cancer, the test nagative is $90\%$. Suppose, these is a woman, whose the first time test is positive. How much posibility she has breast cancer? How about if the second time her test is still positive? How do you make decision if you were the doctor?
#
# If we have no information, we randomly take one person, who has the probability of $0.001$ to have breast cancer. This is the prior probability. $p(c)=0.001$. The person has cancer and the test is positive, which means $p(+|c) = 0.9$. So $p(-|c) = 0.1$. If a person has no cancer, the $p(-|\bar c) = 0.9$ and $p(-|\bar c) = 0.1$.
#
# So, after the first test is positive.
#
# $$
# p(c|+) = \frac{p(c)p(+|c)}{p(c)p(+|c) + p(\bar c)p(+|\bar c)} = \frac{0.001x0.9}{0.001x0.9 + 0.999x0.1} \approx 0.9\%
# $$
#
# So, the first test as positive only means that she has 0.9% probability to have cancer. Maybe at this time the doctor can't confirm the woman has cancer.
#
# How about if the second time is still positive?
#
# We take posterior probability๏ผ$p(c|+) \approx 0.009$) as the prior probability of second time test. So, $p(c) = 0.009$ now.
#
# $$
# p(c|+) = \frac{p(c)p(+|c)}{p(c)p(+|c) + p(\bar c)p(+|\bar c)} = \frac{0.009x0.9}{0.009x0.9 + 0.991x0.1} \approx 7.6\%
# $$
#
# Maybe now the doctor stll can't confirm the woman has cancer. She will need further test
#
# This example shows the process of how to update our beliefs with new evidence using Bayesian reasoning.
# ## 4. Comments
# We'll get the method based on how we look at the problem. The frequentist believes that the parameters are objective and do not change, even if they are unknown. The optimizationย of the likelihood function based on the observationsย can sometimes produce very extreme results. The Bayesian, on the other hand, believes that all parameters have random values and thus have probability distributions. Bayesian estimates of posterior distributions based on prior knowledge combined with new evidence do not produce extreme results. Because all parameters are random variables with distributions, Bayesians can use some sampling algorithm (e.g., MCMC), making it easier to build complex models.
| StarcoderdataPython |
9644650 | #!/usr/bin/env python
#-----------------------------------------------------------------------------
#
# variables02.py
# Learn Python
#
# Created by <NAME> on 31/03/17.
# Copyright 2017 <NAME>. All rights reserved.
#
# The full license is in the file LICENSE, distributed with this software.
#-----------------------------------------------------------------------------
"""
Simple python implementation: how to strucure a code in python
Varibles scope:
global, local
"""
# Global variables
RA = 145.65
def function():
RA = 80.0 # this is a local variable will not change the global RA
#
print "I am the function"
print " --> In function : %f",RA
#
def function2():
global RA
RA = 80.0 # this is a global variable will change the global RA
#
print "I am the function 2"
print " --> In function 2 : %f",RA
#
if __name__ == '__main__': # this is the MAIN for python
function()
#
print "I am the main program"
print " --> In __main__: RA %f",RA
#
function2()
print "I am the main program again"
print " --> In __main__: RA %f",RA
| StarcoderdataPython |
3242909 | <filename>frille-lang/lib/python3.6/site-packages/nbformat/corpus/tests/test_words.py<gh_stars>1-10
"""Tests for nbformat corpus"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import pytest
import random
from .. import words
def test_acceptable_nouns_set():
assert len(words.acceptable_nouns()) > 1000
for word in words.acceptable_nouns():
assert len(word) > 3, word
assert word == word.strip()
def test_acceptable_adjectives_set():
assert len(words.acceptable_adjectives()) > 1000
for word in words.acceptable_adjectives():
assert len(word) > 3, word
assert word == word.strip()
def test_generate_corpus_id():
assert len(words.generate_corpus_id()) > 7
# 1 in 5073324 (3714 x 1366) times this will fail
assert words.generate_corpus_id() != words.generate_corpus_id()
| StarcoderdataPython |
6444858 | <gh_stars>0
"""
One stop shopping for processing a DESI exposure
Examples at NERSC:
# ARC: 18 min on 2 nodes
time srun -N 2 -n 60 -C haswell -t 25:00 --qos realtime desi_proc --mpi -n 20191029 -e 22486
# FLAT: 13 min
time srun -n 20 -N 1 -C haswell -t 15:00 --qos realtime desi_proc --mpi -n 20191029 -e 22487
# TWILIGHT: 8min
time srun -n 20 -N 1 -C haswell -t 15:00 --qos realtime desi_proc --mpi -n 20191029 -e 22497
# SKY: 11 min
time srun -n 20 -N 1 -C haswell -t 15:00 --qos realtime desi_proc --mpi -n 20191029 -e 22536
# ZERO: 2 min
time srun -n 20 -N 1 -C haswell -t 15:00 --qos realtime desi_proc --mpi -n 20191029 -e 22561
"""
import time
start_imports = time.time()
import sys, os, argparse, re
import subprocess
from copy import deepcopy
import json
import numpy as np
import fitsio
from astropy.io import fits
from astropy.table import Table,vstack
import glob
import desiutil.timer
import desispec.io
from desispec.io import findfile, replace_prefix, shorten_filename
from desispec.io.util import create_camword, parse_cameras, validate_badamps
from desispec.calibfinder import findcalibfile,CalibFinder,badfibers
from desispec.fiberflat import apply_fiberflat
from desispec.sky import subtract_sky
from desispec.util import runcmd
import desispec.scripts.extract
import desispec.scripts.specex
import desispec.scripts.nightly_bias
from desispec.maskbits import ccdmask
from desitarget.targetmask import desi_mask
from desiutil.log import get_logger, DEBUG, INFO
import desiutil.iers
from desispec.workflow.desi_proc_funcs import assign_mpi, get_desi_proc_parser, update_args_with_headers, \
find_most_recent
from desispec.workflow.desi_proc_funcs import determine_resources, create_desi_proc_batch_script
stop_imports = time.time()
#########################################
######## Begin Body of the Code #########
#########################################
def parse(options=None):
parser = get_desi_proc_parser()
args = parser.parse_args(options)
return args
def main(args=None, comm=None):
if args is None:
args = parse()
# elif isinstance(args, (list, tuple)):
# args = parse(args)
log = get_logger()
start_time = time.time()
start_mpi_connect = time.time()
if comm is not None:
#- Use the provided comm to determine rank and size
rank = comm.rank
size = comm.size
else:
#- Check MPI flags and determine the comm, rank, and size given the arguments
comm, rank, size = assign_mpi(do_mpi=args.mpi, do_batch=args.batch, log=log)
stop_mpi_connect = time.time()
#- Start timer; only print log messages from rank 0 (others are silent)
timer = desiutil.timer.Timer(silent=(rank>0))
#- Fill in timing information for steps before we had the timer created
if args.starttime is not None:
timer.start('startup', starttime=args.starttime)
timer.stop('startup', stoptime=start_imports)
timer.start('imports', starttime=start_imports)
timer.stop('imports', stoptime=stop_imports)
timer.start('mpi_connect', starttime=start_mpi_connect)
timer.stop('mpi_connect', stoptime=stop_mpi_connect)
#- Freeze IERS after parsing args so that it doesn't bother if only --help
timer.start('freeze_iers')
desiutil.iers.freeze_iers()
timer.stop('freeze_iers')
#- Preflight checks
timer.start('preflight')
if rank > 0:
#- Let rank 0 fetch these, and then broadcast
args, hdr, camhdr = None, None, None
else:
args, hdr, camhdr = update_args_with_headers(args)
## Make sure badamps is formatted properly
if comm is not None and rank == 0 and args.badamps is not None:
args.badamps = validate_badamps(args.badamps)
if comm is not None:
args = comm.bcast(args, root=0)
hdr = comm.bcast(hdr, root=0)
camhdr = comm.bcast(camhdr, root=0)
known_obstype = ['SCIENCE', 'ARC', 'FLAT', 'ZERO', 'DARK',
'TESTARC', 'TESTFLAT', 'PIXFLAT', 'SKY', 'TWILIGHT', 'OTHER']
if args.obstype not in known_obstype:
raise RuntimeError('obstype {} not in {}'.format(args.obstype, known_obstype))
timer.stop('preflight')
#-------------------------------------------------------------------------
#- Create and submit a batch job if requested
if args.batch:
#exp_str = '{:08d}'.format(args.expid)
jobdesc = args.obstype.lower()
if args.obstype == 'SCIENCE':
# if not doing pre-stdstar fitting or stdstar fitting and if there is
# no flag stopping flux calibration, set job to poststdstar
if args.noprestdstarfit and args.nostdstarfit and (not args.nofluxcalib):
jobdesc = 'poststdstar'
# elif told not to do std or post stdstar but the flag for prestdstar isn't set,
# then perform prestdstar
elif (not args.noprestdstarfit) and args.nostdstarfit and args.nofluxcalib:
jobdesc = 'prestdstar'
#elif (not args.noprestdstarfit) and (not args.nostdstarfit) and (not args.nofluxcalib):
# jobdesc = 'science'
scriptfile = create_desi_proc_batch_script(night=args.night, exp=args.expid, cameras=args.cameras,
jobdesc=jobdesc, queue=args.queue,
runtime=args.runtime,
batch_opts=args.batch_opts, timingfile=args.timingfile,
system_name=args.system_name)
err = 0
if not args.nosubmit:
err = subprocess.call(['sbatch', scriptfile])
sys.exit(err)
#-------------------------------------------------------------------------
#- Proceeding with running
#- What are we going to do?
if rank == 0:
log.info('----------')
log.info('Input {}'.format(args.input))
log.info('Night {} expid {}'.format(args.night, args.expid))
log.info('Obstype {}'.format(args.obstype))
log.info('Cameras {}'.format(args.cameras))
log.info('Output root {}'.format(desispec.io.specprod_root()))
log.info('----------')
#- Create output directories if needed
if rank == 0:
preprocdir = os.path.dirname(findfile('preproc', args.night, args.expid, 'b0'))
expdir = os.path.dirname(findfile('frame', args.night, args.expid, 'b0'))
os.makedirs(preprocdir, exist_ok=True)
os.makedirs(expdir, exist_ok=True)
#- Wait for rank 0 to make directories before proceeding
if comm is not None:
comm.barrier()
#-------------------------------------------------------------------------
#- Create nightly bias from N>>1 ZEROs, but only for B-cameras
if args.nightlybias:
timer.start('nightlybias')
bcamword = None
if rank == 0:
bcameras = [cam for cam in args.cameras if cam.lower().startswith('b')]
bcamword = parse_cameras(bcameras)
if comm is not None:
bcamword = comm.bcast(bcamword, root=0)
cmd = f"desi_compute_nightly_bias -n {args.night} -c {bcamword}"
if rank == 0:
log.info(f'RUNNING {cmd}')
desispec.scripts.nightly_bias.main(cmd.split()[1:], comm=comm)
timer.stop('nightlybias')
#-------------------------------------------------------------------------
#- Preproc
#- All obstypes get preprocessed
timer.start('fibermap')
#- Assemble fibermap for science exposures
fibermap = None
fibermap_ok = None
if rank == 0 and args.obstype == 'SCIENCE':
fibermap = findfile('fibermap', args.night, args.expid)
if not os.path.exists(fibermap):
tmp = findfile('preproc', args.night, args.expid, 'b0')
preprocdir = os.path.dirname(tmp)
fibermap = os.path.join(preprocdir, os.path.basename(fibermap))
log.info('Creating fibermap {}'.format(fibermap))
cmd = 'assemble_fibermap -n {} -e {} -o {}'.format(
args.night, args.expid, fibermap)
if args.badamps is not None:
cmd += ' --badamps={}'.format(args.badamps)
runcmd(cmd, inputs=[], outputs=[fibermap])
fibermap_ok = os.path.exists(fibermap)
#- Some commissioning files didn't have coords* files that caused assemble_fibermap to fail
#- these are well known failures with no other solution, so for those, just force creation
#- of a fibermap with null coordinate information
if not fibermap_ok and int(args.night) < 20200310:
log.info("Since night is before 20200310, trying to force fibermap creation without coords file")
cmd += ' --force'
runcmd(cmd, inputs=[], outputs=[fibermap])
fibermap_ok = os.path.exists(fibermap)
#- If assemble_fibermap failed and obstype is SCIENCE, exit now
if comm is not None:
fibermap_ok = comm.bcast(fibermap_ok, root=0)
if args.obstype == 'SCIENCE' and not fibermap_ok:
sys.stdout.flush()
if rank == 0:
log.critical('assemble_fibermap failed for science exposure; exiting now')
sys.exit(13)
#- Wait for rank 0 to make fibermap if needed
if comm is not None:
fibermap = comm.bcast(fibermap, root=0)
timer.stop('fibermap')
if not (args.obstype in ['SCIENCE'] and args.noprestdstarfit):
timer.start('preproc')
for i in range(rank, len(args.cameras), size):
camera = args.cameras[i]
outfile = findfile('preproc', args.night, args.expid, camera)
outdir = os.path.dirname(outfile)
cmd = "desi_preproc -i {} -o {} --outdir {} --cameras {}".format(
args.input, outfile, outdir, camera)
if args.scattered_light :
cmd += " --scattered-light"
if fibermap is not None:
cmd += " --fibermap {}".format(fibermap)
if not args.obstype in ['ARC'] : # never model variance for arcs
if not args.no_model_pixel_variance and args.obstype != 'DARK' :
cmd += " --model-variance"
runcmd(cmd, inputs=[args.input], outputs=[outfile])
timer.stop('preproc')
if comm is not None:
comm.barrier()
#-------------------------------------------------------------------------
#- Get input PSFs
timer.start('findpsf')
input_psf = dict()
if rank == 0 and args.obstype not in ['DARK',]:
for camera in args.cameras :
if args.psf is not None :
input_psf[camera] = args.psf
elif args.calibnight is not None :
# look for a psfnight psf for this calib night
psfnightfile = findfile('psfnight', args.calibnight, args.expid, camera)
if not os.path.isfile(psfnightfile) :
log.error("no {}".format(psfnightfile))
raise IOError("no {}".format(psfnightfile))
input_psf[camera] = psfnightfile
else :
# look for a psfnight psf
psfnightfile = findfile('psfnight', args.night, args.expid, camera)
if os.path.isfile(psfnightfile) :
input_psf[camera] = psfnightfile
elif args.most_recent_calib:
nightfile = find_most_recent(args.night, file_type='psfnight')
if nightfile is None:
input_psf[camera] = findcalibfile([hdr, camhdr[camera]], 'PSF')
else:
input_psf[camera] = nightfile
else :
input_psf[camera] = findcalibfile([hdr, camhdr[camera]], 'PSF')
log.info("Will use input PSF : {}".format(input_psf[camera]))
if comm is not None:
input_psf = comm.bcast(input_psf, root=0)
timer.stop('findpsf')
#-------------------------------------------------------------------------
#- Dark (to detect bad columns)
if args.obstype == 'DARK' :
# check exposure time and perform a dark inspection only
# if it is a 300s exposure
exptime = None
if rank == 0 :
rawfilename=findfile('raw', args.night, args.expid)
head=fitsio.read_header(rawfilename,1)
exptime=head["EXPTIME"]
if comm is not None :
exptime = comm.bcast(exptime, root=0)
if exptime > 270 and exptime < 330 :
timer.start('inspect_dark')
if rank == 0 :
log.info('Starting desi_inspect_dark at {}'.format(time.asctime()))
for i in range(rank, len(args.cameras), size):
camera = args.cameras[i]
preprocfile = findfile('preproc', args.night, args.expid, camera)
badcolumnsfile = findfile('badcolumns', night=args.night, camera=camera)
if not os.path.isfile(badcolumnsfile) :
cmd = "desi_inspect_dark"
cmd += " -i {}".format(preprocfile)
cmd += " --badcol-table {}".format(badcolumnsfile)
runcmd(cmd, inputs=[preprocfile], outputs=[badcolumnsfile])
else:
log.info(f'{badcolumnsfile} already exists; skipping desi_inspect_dark')
if comm is not None :
comm.barrier()
timer.stop('inspect_dark')
elif rank == 0:
log.warning(f'Not running desi_inspect_dark for DARK with exptime={exptime:.1f}')
#-------------------------------------------------------------------------
#- Traceshift
if ( args.obstype in ['FLAT', 'TESTFLAT', 'SKY', 'TWILIGHT'] ) or \
( args.obstype in ['SCIENCE'] and (not args.noprestdstarfit) ):
timer.start('traceshift')
if rank == 0 and args.traceshift :
log.info('Starting traceshift at {}'.format(time.asctime()))
for i in range(rank, len(args.cameras), size):
camera = args.cameras[i]
preprocfile = findfile('preproc', args.night, args.expid, camera)
inpsf = input_psf[camera]
outpsf = findfile('psf', args.night, args.expid, camera)
if not os.path.isfile(outpsf) :
if args.traceshift :
cmd = "desi_compute_trace_shifts"
cmd += " -i {}".format(preprocfile)
cmd += " --psf {}".format(inpsf)
cmd += " --outpsf {}".format(outpsf)
cmd += " --degxx 2 --degxy 0"
if args.obstype in ['FLAT', 'TESTFLAT', 'TWILIGHT'] :
cmd += " --continuum"
else :
cmd += " --degyx 2 --degyy 0"
if args.obstype in ['SCIENCE', 'SKY']:
cmd += ' --sky'
else :
cmd = "ln -s {} {}".format(inpsf,outpsf)
runcmd(cmd, inputs=[preprocfile, inpsf], outputs=[outpsf])
else :
log.info("PSF {} exists".format(outpsf))
timer.stop('traceshift')
if comm is not None:
comm.barrier()
#-------------------------------------------------------------------------
#- PSF
#- MPI parallelize this step
if args.obstype in ['ARC', 'TESTARC']:
timer.start('arc_traceshift')
if rank == 0:
log.info('Starting traceshift before specex PSF fit at {}'.format(time.asctime()))
for i in range(rank, len(args.cameras), size):
camera = args.cameras[i]
preprocfile = findfile('preproc', args.night, args.expid, camera)
inpsf = input_psf[camera]
outpsf = findfile('psf', args.night, args.expid, camera)
outpsf = replace_prefix(outpsf, "psf", "shifted-input-psf")
if not os.path.isfile(outpsf) :
cmd = "desi_compute_trace_shifts"
cmd += " -i {}".format(preprocfile)
cmd += " --psf {}".format(inpsf)
cmd += " --outpsf {}".format(outpsf)
cmd += " --degxx 0 --degxy 0 --degyx 0 --degyy 0"
cmd += ' --arc-lamps'
runcmd(cmd, inputs=[preprocfile, inpsf], outputs=[outpsf])
else :
log.info("PSF {} exists".format(outpsf))
timer.stop('arc_traceshift')
if comm is not None:
comm.barrier()
timer.start('psf')
if rank == 0:
log.info('Starting specex PSF fitting at {}'.format(time.asctime()))
if rank > 0:
cmds = inputs = outputs = None
else:
cmds = dict()
inputs = dict()
outputs = dict()
for camera in args.cameras:
preprocfile = findfile('preproc', args.night, args.expid, camera)
tmpname = findfile('psf', args.night, args.expid, camera)
inpsf = replace_prefix(tmpname,"psf","shifted-input-psf")
outpsf = replace_prefix(tmpname,"psf","fit-psf")
log.info("now run specex psf fit")
cmd = 'desi_compute_psf'
cmd += ' --input-image {}'.format(preprocfile)
cmd += ' --input-psf {}'.format(inpsf)
cmd += ' --output-psf {}'.format(outpsf)
# fibers to ignore for the PSF fit
# specex uses the fiber index in a camera
fibers_to_ignore = badfibers([hdr, camhdr[camera]],["BROKENFIBERS","BADCOLUMNFIBERS"])%500
if fibers_to_ignore.size>0 :
fibers_to_ignore_str=str(fibers_to_ignore[0])
for fiber in fibers_to_ignore[1:] :
fibers_to_ignore_str+=",{}".format(fiber)
cmd += ' --broken-fibers {}'.format(fibers_to_ignore_str)
if rank == 0 :
log.warning('broken fibers: {}'.format(fibers_to_ignore_str))
if not os.path.exists(outpsf):
cmds[camera] = cmd
inputs[camera] = [preprocfile, inpsf]
outputs[camera] = [outpsf,]
if comm is not None:
cmds = comm.bcast(cmds, root=0)
desispec.scripts.specex.run(comm,cmds,args.cameras)
else:
log.warning('fitting PSFs without MPI parallelism; this will be SLOW')
for camera in args.cameras:
if camera in cmds:
runcmd(cmds[camera], inputs=inputs[camera], outputs=outputs[camera])
if comm is not None:
comm.barrier()
# loop on all cameras and interpolate bad fibers
for camera in args.cameras[rank::size]:
t0 = time.time()
psfname = findfile('psf', args.night, args.expid, camera)
inpsf = replace_prefix(psfname,"psf","fit-psf")
#- Check if a noisy amp might have corrupted this PSF;
#- if so, rename to *.badreadnoise
#- Currently the data is flagged per amp (25% of pixels), but do
#- more generic test for 12.5% of pixels (half of one amp)
log.info(f'Rank {rank} checking for noisy input CCD amps')
preprocfile = findfile('preproc', args.night, args.expid, camera)
mask = fitsio.read(preprocfile, 'MASK')
noisyfrac = np.sum((mask & ccdmask.BADREADNOISE) != 0) / mask.size
if noisyfrac > 0.25*0.5:
log.error(f"{100*noisyfrac:.0f}% of {camera} input pixels have bad readnoise; don't use this PSF")
os.rename(inpsf, inpsf+'.badreadnoise')
continue
log.info(f'Rank {rank} interpolating {camera} PSF over bad fibers')
# fibers to ignore for the PSF fit
# specex uses the fiber index in a camera
fibers_to_ignore = badfibers([hdr, camhdr[camera]],["BROKENFIBERS","BADCOLUMNFIBERS"])%500
if fibers_to_ignore.size>0 :
fibers_to_ignore_str=str(fibers_to_ignore[0])
for fiber in fibers_to_ignore[1:] :
fibers_to_ignore_str+=",{}".format(fiber)
outpsf = replace_prefix(psfname,"psf","fit-psf-fixed-listed")
if os.path.isfile(inpsf) and not os.path.isfile(outpsf):
cmd = 'desi_interpolate_fiber_psf'
cmd += ' --infile {}'.format(inpsf)
cmd += ' --outfile {}'.format(outpsf)
cmd += ' --fibers {}'.format(fibers_to_ignore_str)
log.info('For camera {} interpolating PSF for fibers: {}'.format(camera,fibers_to_ignore_str))
runcmd(cmd, inputs=[inpsf], outputs=[outpsf])
if os.path.isfile(outpsf) :
os.rename(inpsf,inpsf.replace("fit-psf","fit-psf-before-listed-fix"))
subprocess.call('cp {} {}'.format(outpsf,inpsf),shell=True)
dt = time.time() - t0
log.info(f'Rank {rank} {camera} PSF interpolation took {dt:.1f} sec')
timer.stop('psf')
#-------------------------------------------------------------------------
#- Merge PSF of night if applicable
#if args.obstype in ['ARC']:
if False:
if rank == 0:
for camera in args.cameras :
psfnightfile = findfile('psfnight', args.night, args.expid, camera)
if not os.path.isfile(psfnightfile) : # we still don't have a psf night, see if we can compute it ...
psfs = glob.glob(findfile('psf', args.night, args.expid, camera).replace("psf","fit-psf").replace(str(args.expid),"*"))
log.info("Number of PSF for night={} camera={} = {}".format(args.night,camera,len(psfs)))
if len(psfs)>4 : # lets do it!
log.info("Computing psfnight ...")
dirname=os.path.dirname(psfnightfile)
if not os.path.isdir(dirname) :
os.makedirs(dirname)
desispec.scripts.specex.mean_psf(psfs,psfnightfile)
if os.path.isfile(psfnightfile) : # now use this one
input_psf[camera] = psfnightfile
#-------------------------------------------------------------------------
#- Extract
#- This is MPI parallel so handle a bit differently
# maybe add ARC and TESTARC too
if ( args.obstype in ['FLAT', 'TESTFLAT', 'SKY', 'TWILIGHT'] ) or \
( args.obstype in ['SCIENCE'] and (not args.noprestdstarfit) ):
timer.start('extract')
if rank == 0:
log.info('Starting extractions at {}'.format(time.asctime()))
if rank > 0:
cmds = inputs = outputs = None
else:
#- rank 0 collects commands to broadcast to others
cmds = dict()
inputs = dict()
outputs = dict()
for camera in args.cameras:
cmd = 'desi_extract_spectra'
#- Based on data from SM1-SM8, looking at central and edge fibers
#- with in mind overlapping arc lamps lines
if camera.startswith('b'):
cmd += ' -w 3600.0,5800.0,0.8'
elif camera.startswith('r'):
cmd += ' -w 5760.0,7620.0,0.8'
elif camera.startswith('z'):
cmd += ' -w 7520.0,9824.0,0.8'
preprocfile = findfile('preproc', args.night, args.expid, camera)
psffile = findfile('psf', args.night, args.expid, camera)
finalframefile = findfile('frame', args.night, args.expid, camera)
if os.path.exists(finalframefile):
log.info('{} already exists; not regenerating'.format(
os.path.basename(finalframefile)))
continue
#- finalframefile doesn't exist; proceed with command
framefile = finalframefile.replace(".fits","-no-badcolumn-mask.fits")
cmd += ' -i {}'.format(preprocfile)
cmd += ' -p {}'.format(psffile)
cmd += ' -o {}'.format(framefile)
cmd += ' --psferr 0.1'
if args.obstype == 'SCIENCE' or args.obstype == 'SKY' :
log.info('Include barycentric correction')
cmd += ' --barycentric-correction'
cmds[camera] = cmd
inputs[camera] = [preprocfile, psffile]
outputs[camera] = [framefile,]
#- TODO: refactor/combine this with PSF comm splitting logic
if comm is not None:
cmds = comm.bcast(cmds, root=0)
inputs = comm.bcast(inputs, root=0)
outputs = comm.bcast(outputs, root=0)
#- split communicator by 20 (number of bundles)
extract_size = 20
if (rank == 0) and (size%extract_size != 0):
log.warning('MPI size={} should be evenly divisible by {}'.format(
size, extract_size))
extract_group = rank // extract_size
num_extract_groups = (size + extract_size - 1) // extract_size
comm_extract = comm.Split(color=extract_group)
for i in range(extract_group, len(args.cameras), num_extract_groups):
camera = args.cameras[i]
if camera in cmds:
cmdargs = cmds[camera].split()[1:]
extract_args = desispec.scripts.extract.parse(cmdargs)
if comm_extract.rank == 0:
print('RUNNING: {}'.format(cmds[camera]))
desispec.scripts.extract.main_mpi(extract_args, comm=comm_extract)
comm.barrier()
else:
log.warning('running extractions without MPI parallelism; this will be SLOW')
for camera in args.cameras:
if camera in cmds:
runcmd(cmds[camera], inputs=inputs[camera], outputs=outputs[camera])
timer.stop('extract')
if comm is not None:
comm.barrier()
#-------------------------------------------------------------------------
#- Badcolumn specmask and fibermask
if ( args.obstype in ['FLAT', 'TESTFLAT', 'SKY', 'TWILIGHT'] ) or \
( args.obstype in ['SCIENCE'] and (not args.noprestdstarfit) ):
if rank==0 :
log.info('Starting desi_compute_badcolumn_mask at {}'.format(time.asctime()))
for i in range(rank, len(args.cameras), size):
camera = args.cameras[i]
outfile = findfile('frame', args.night, args.expid, camera)
infile = outfile.replace(".fits","-no-badcolumn-mask.fits")
psffile = findfile('psf', args.night, args.expid, camera)
badcolfile = findfile('badcolumns', night=args.night, camera=camera)
cmd = "desi_compute_badcolumn_mask -i {} -o {} --psf {} --badcolumns {}".format(
infile, outfile, psffile, badcolfile)
if os.path.exists(outfile):
log.info('{} already exists; not (re-)applying bad column mask'.format(os.path.basename(outfile)))
continue
if os.path.exists(badcolfile):
runcmd(cmd, inputs=[infile,psffile,badcolfile], outputs=[outfile])
#- if successful, remove temporary frame-*-no-badcolumn-mask
if os.path.isfile(outfile) :
log.info("rm "+infile)
os.unlink(infile)
else:
log.warning(f'Missing {badcolfile}; not applying badcol mask')
log.info(f"mv {infile} {outfile}")
os.rename(infile, outfile)
if comm is not None :
comm.barrier()
#-------------------------------------------------------------------------
#- Fiberflat
if args.obstype in ['FLAT', 'TESTFLAT'] :
exptime = None
if rank == 0 :
rawfilename=findfile('raw', args.night, args.expid)
head=fitsio.read_header(rawfilename,1)
exptime=head["EXPTIME"]
if comm is not None :
exptime = comm.bcast(exptime, root=0)
if exptime > 10:
timer.start('fiberflat')
if rank == 0:
log.info('Flat exposure time was greater than 10 seconds')
log.info('Starting fiberflats at {}'.format(time.asctime()))
for i in range(rank, len(args.cameras), size):
camera = args.cameras[i]
framefile = findfile('frame', args.night, args.expid, camera)
fiberflatfile = findfile('fiberflat', args.night, args.expid, camera)
cmd = "desi_compute_fiberflat"
cmd += " -i {}".format(framefile)
cmd += " -o {}".format(fiberflatfile)
runcmd(cmd, inputs=[framefile,], outputs=[fiberflatfile,])
timer.stop('fiberflat')
if comm is not None:
comm.barrier()
#-------------------------------------------------------------------------
#- Get input fiberflat
if args.obstype in ['SCIENCE', 'SKY'] and (not args.nofiberflat):
timer.start('find_fiberflat')
input_fiberflat = dict()
if rank == 0:
for camera in args.cameras :
if args.fiberflat is not None :
input_fiberflat[camera] = args.fiberflat
elif args.calibnight is not None :
# look for a fiberflatnight for this calib night
fiberflatnightfile = findfile('fiberflatnight',
args.calibnight, args.expid, camera)
if not os.path.isfile(fiberflatnightfile) :
log.error("no {}".format(fiberflatnightfile))
raise IOError("no {}".format(fiberflatnightfile))
input_fiberflat[camera] = fiberflatnightfile
else :
# look for a fiberflatnight fiberflat
fiberflatnightfile = findfile('fiberflatnight',
args.night, args.expid, camera)
if os.path.isfile(fiberflatnightfile) :
input_fiberflat[camera] = fiberflatnightfile
elif args.most_recent_calib:
nightfile = find_most_recent(args.night, file_type='fiberflatnight')
if nightfile is None:
input_fiberflat[camera] = findcalibfile([hdr, camhdr[camera]], 'FIBERFLAT')
else:
input_fiberflat[camera] = nightfile
else :
input_fiberflat[camera] = findcalibfile(
[hdr, camhdr[camera]], 'FIBERFLAT')
log.info("Will use input FIBERFLAT: {}".format(input_fiberflat[camera]))
if comm is not None:
input_fiberflat = comm.bcast(input_fiberflat, root=0)
timer.stop('find_fiberflat')
#-------------------------------------------------------------------------
#- Apply fiberflat and write fframe file
if args.obstype in ['SCIENCE', 'SKY'] and args.fframe and \
( not args.nofiberflat ) and (not args.noprestdstarfit):
timer.start('apply_fiberflat')
if rank == 0:
log.info('Applying fiberflat at {}'.format(time.asctime()))
for i in range(rank, len(args.cameras), size):
camera = args.cameras[i]
fframefile = findfile('fframe', args.night, args.expid, camera)
if not os.path.exists(fframefile):
framefile = findfile('frame', args.night, args.expid, camera)
fr = desispec.io.read_frame(framefile)
flatfilename=input_fiberflat[camera]
if flatfilename is not None :
ff = desispec.io.read_fiberflat(flatfilename)
fr.meta['FIBERFLT'] = desispec.io.shorten_filename(flatfilename)
apply_fiberflat(fr, ff)
fframefile = findfile('fframe', args.night, args.expid, camera)
desispec.io.write_frame(fframefile, fr)
else :
log.warning("Missing fiberflat for camera {}".format(camera))
timer.stop('apply_fiberflat')
if comm is not None:
comm.barrier()
#-------------------------------------------------------------------------
#- Select random sky fibers (inplace update of frame file)
#- TODO: move this to a function somewhere
#- TODO: this assigns different sky fibers to each frame of same spectrograph
if (args.obstype in ['SKY', 'SCIENCE']) and (not args.noskysub) and (not args.noprestdstarfit):
timer.start('picksky')
if rank == 0:
log.info('Picking sky fibers at {}'.format(time.asctime()))
for i in range(rank, len(args.cameras), size):
camera = args.cameras[i]
framefile = findfile('frame', args.night, args.expid, camera)
orig_frame = desispec.io.read_frame(framefile)
#- Make a copy so that we can apply fiberflat
fr = deepcopy(orig_frame)
if np.any(fr.fibermap['OBJTYPE'] == 'SKY'):
log.info('{} sky fibers already set; skipping'.format(
os.path.basename(framefile)))
continue
#- Apply fiberflat then select random fibers below a flux cut
flatfilename=input_fiberflat[camera]
if flatfilename is None :
log.error("No fiberflat for {}".format(camera))
continue
ff = desispec.io.read_fiberflat(flatfilename)
apply_fiberflat(fr, ff)
sumflux = np.sum(fr.flux, axis=1)
fluxcut = np.percentile(sumflux, 30)
iisky = np.where(sumflux < fluxcut)[0]
iisky = np.random.choice(iisky, size=100, replace=False)
#- Update fibermap or original frame and write out
orig_frame.fibermap['OBJTYPE'][iisky] = 'SKY'
orig_frame.fibermap['DESI_TARGET'][iisky] |= desi_mask.SKY
desispec.io.write_frame(framefile, orig_frame)
timer.stop('picksky')
if comm is not None:
comm.barrier()
#-------------------------------------------------------------------------
#- Sky subtraction
if args.obstype in ['SCIENCE', 'SKY'] and (not args.noskysub ) and (not args.noprestdstarfit):
timer.start('skysub')
if rank == 0:
log.info('Starting sky subtraction at {}'.format(time.asctime()))
for i in range(rank, len(args.cameras), size):
camera = args.cameras[i]
framefile = findfile('frame', args.night, args.expid, camera)
hdr = fitsio.read_header(framefile, 'FLUX')
fiberflatfile=input_fiberflat[camera]
if fiberflatfile is None :
log.error("No fiberflat for {}".format(camera))
continue
skyfile = findfile('sky', args.night, args.expid, camera)
cmd = "desi_compute_sky"
cmd += " -i {}".format(framefile)
cmd += " --fiberflat {}".format(fiberflatfile)
cmd += " --o {}".format(skyfile)
if args.no_extra_variance :
cmd += " --no-extra-variance"
if not args.no_sky_wavelength_adjustment : cmd += " --adjust-wavelength"
if not args.no_sky_lsf_adjustment : cmd += " --adjust-lsf"
if (not args.no_sky_wavelength_adjustment) and (not args.no_sky_lsf_adjustment) and args.save_sky_adjustments :
cmd += " --save-adjustments {}".format(skyfile.replace("sky-","skycorr-"))
if args.adjust_sky_with_more_fibers :
cmd += " --adjust-with-more-fibers"
if (not args.no_sky_wavelength_adjustment) or (not args.no_sky_lsf_adjustment) :
pca_corr_filename = findcalibfile([hdr, camhdr[camera]], 'SKYCORR')
if pca_corr_filename is not None :
cmd += " --pca-corr {}".format(pca_corr_filename)
else :
log.warning("No SKYCORR file, do you need to update DESI_SPECTRO_CALIB?")
runcmd(cmd, inputs=[framefile, fiberflatfile], outputs=[skyfile,])
#- sframe = flatfielded sky-subtracted but not flux calibrated frame
#- Note: this re-reads and re-does steps previously done for picking
#- sky fibers; desi_proc is about human efficiency,
#- not I/O or CPU efficiency...
sframefile = desispec.io.findfile('sframe', args.night, args.expid, camera)
if not os.path.exists(sframefile):
frame = desispec.io.read_frame(framefile)
fiberflat = desispec.io.read_fiberflat(fiberflatfile)
sky = desispec.io.read_sky(skyfile)
apply_fiberflat(frame, fiberflat)
subtract_sky(frame, sky, apply_throughput_correction=True)
frame.meta['IN_SKY'] = shorten_filename(skyfile)
frame.meta['FIBERFLT'] = shorten_filename(fiberflatfile)
desispec.io.write_frame(sframefile, frame)
timer.stop('skysub')
if comm is not None:
comm.barrier()
#-------------------------------------------------------------------------
#- Standard Star Fitting
if args.obstype in ['SCIENCE',] and \
(not args.noskysub ) and \
(not args.nostdstarfit) :
timer.start('stdstarfit')
if rank == 0:
log.info('Starting flux calibration at {}'.format(time.asctime()))
#- Group inputs by spectrograph
framefiles = dict()
skyfiles = dict()
fiberflatfiles = dict()
night, expid = args.night, args.expid #- shorter
for camera in args.cameras:
sp = int(camera[1])
if sp not in framefiles:
framefiles[sp] = list()
skyfiles[sp] = list()
fiberflatfiles[sp] = list()
framefiles[sp].append(findfile('frame', night, expid, camera))
skyfiles[sp].append(findfile('sky', night, expid, camera))
fiberflatfiles[sp].append(input_fiberflat[camera])
#- Hardcoded stdstar model version
starmodels = os.path.join(
os.getenv('DESI_BASIS_TEMPLATES'), 'stdstar_templates_v2.2.fits')
#- Fit stdstars per spectrograph (not per-camera)
spectro_nums = sorted(framefiles.keys())
## for sp in spectro_nums[rank::size]:
for i in range(rank, len(spectro_nums), size):
sp = spectro_nums[i]
stdfile = findfile('stdstars', night, expid, spectrograph=sp)
cmd = "desi_fit_stdstars"
cmd += " --frames {}".format(' '.join(framefiles[sp]))
cmd += " --skymodels {}".format(' '.join(skyfiles[sp]))
cmd += " --fiberflats {}".format(' '.join(fiberflatfiles[sp]))
cmd += " --starmodels {}".format(starmodels)
cmd += " --outfile {}".format(stdfile)
cmd += " --delta-color 0.1"
if args.maxstdstars is not None:
cmd += " --maxstdstars {}".format(args.maxstdstars)
inputs = framefiles[sp] + skyfiles[sp] + fiberflatfiles[sp]
runcmd(cmd, inputs=inputs, outputs=[stdfile])
timer.stop('stdstarfit')
if comm is not None:
comm.barrier()
# -------------------------------------------------------------------------
# - Flux calibration
def list2str(xx) :
s=""
for x in xx :
s+=" "+str(x)
return s
if args.obstype in ['SCIENCE'] and \
(not args.noskysub) and \
(not args.nofluxcalib):
timer.start('fluxcalib')
night, expid = args.night, args.expid #- shorter
if rank == 0 :
r_cameras = []
for camera in args.cameras :
if camera[0] == 'r' :
r_cameras.append(camera)
if len(r_cameras)>0 :
outfile = findfile('calibstars',night, expid)
frames = list2str([findfile('frame', night, expid, camera) for camera in r_cameras])
fiberflats = list2str([input_fiberflat[camera] for camera in r_cameras])
skys = list2str([findfile('sky', night, expid, camera) for camera in r_cameras])
models = list2str([findfile('stdstars', night, expid,spectrograph=int(camera[1])) for camera in r_cameras])
cmd = f"desi_select_calib_stars --frames {frames} --fiberflats {fiberflats} --skys {skys} --models {models} -o {outfile}"
cmd += " --delta-color-cut 0.1"
runcmd(cmd,inputs=[],outputs=[outfile,])
if comm is not None:
comm.barrier()
#- Compute flux calibration vectors per camera
for camera in args.cameras[rank::size]:
framefile = findfile('frame', night, expid, camera)
skyfile = findfile('sky', night, expid, camera)
spectrograph = int(camera[1])
stdfile = findfile('stdstars', night, expid,spectrograph=spectrograph)
calibfile = findfile('fluxcalib', night, expid, camera)
calibstars = findfile('calibstars',night, expid)
fiberflatfile = input_fiberflat[camera]
cmd = "desi_compute_fluxcalibration"
cmd += " --infile {}".format(framefile)
cmd += " --sky {}".format(skyfile)
cmd += " --fiberflat {}".format(fiberflatfile)
cmd += " --models {}".format(stdfile)
cmd += " --outfile {}".format(calibfile)
cmd += " --selected-calibration-stars {}".format(calibstars)
inputs = [framefile, skyfile, fiberflatfile, stdfile, calibstars]
runcmd(cmd, inputs=inputs, outputs=[calibfile,])
timer.stop('fluxcalib')
if comm is not None:
comm.barrier()
#-------------------------------------------------------------------------
#- Applying flux calibration
if args.obstype in ['SCIENCE',] and (not args.noskysub ) and (not args.nofluxcalib) :
night, expid = args.night, args.expid #- shorter
timer.start('applycalib')
if rank == 0:
log.info('Starting cframe file creation at {}'.format(time.asctime()))
for camera in args.cameras[rank::size]:
framefile = findfile('frame', night, expid, camera)
skyfile = findfile('sky', night, expid, camera)
spectrograph = int(camera[1])
stdfile = findfile('stdstars', night, expid, spectrograph=spectrograph)
calibfile = findfile('fluxcalib', night, expid, camera)
cframefile = findfile('cframe', night, expid, camera)
fiberflatfile = input_fiberflat[camera]
cmd = "desi_process_exposure"
cmd += " --infile {}".format(framefile)
cmd += " --fiberflat {}".format(fiberflatfile)
cmd += " --sky {}".format(skyfile)
cmd += " --calib {}".format(calibfile)
cmd += " --outfile {}".format(cframefile)
cmd += " --cosmics-nsig 6"
if args.no_xtalk :
cmd += " --no-xtalk"
inputs = [framefile, fiberflatfile, skyfile, calibfile]
runcmd(cmd, inputs=inputs, outputs=[cframefile,])
if comm is not None:
comm.barrier()
timer.stop('applycalib')
#-------------------------------------------------------------------------
#- Wrap up
# if rank == 0:
# report = timer.report()
# log.info('Rank 0 timing report:\n' + report)
if comm is not None:
timers = comm.gather(timer, root=0)
else:
timers = [timer,]
if rank == 0:
stats = desiutil.timer.compute_stats(timers)
if args.timingfile:
if os.path.exists(args.timingfile):
with open(args.timingfile) as fx:
previous_stats = json.load(fx)
#- augment previous_stats with new entries, but don't overwrite old
for name in stats:
if name not in previous_stats:
previous_stats[name] = stats[name]
stats = previous_stats
tmpfile = args.timingfile + '.tmp'
with open(tmpfile, 'w') as fx:
json.dump(stats, fx, indent=2)
os.rename(tmpfile, args.timingfile)
log.info(f'Timing stats saved to {args.timingfile}')
log.info('Timing max duration per step [seconds]:')
for stepname, steptiming in stats.items():
tmax = steptiming['duration.max']
log.info(f' {stepname:16s} {tmax:.2f}')
if rank == 0:
duration_seconds = time.time() - start_time
mm = int(duration_seconds) // 60
ss = int(duration_seconds - mm*60)
log.info('All done at {}; duration {}m{}s'.format(
time.asctime(), mm, ss))
| StarcoderdataPython |
6693916 | #
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import RandomUtils
import StateTransition
import UtilityFunctions
from Enums import EStateElementType, EStateTransitionOrderMode
from State import State
from base.Sequence import Sequence
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
# This test specifies a State with PC, floating point register and system
# register values. It then transitions to that State by processing all elements
# of a given type at once. This results in fewer instructions being generated
# in many cases because the StateTransitionHandlers can choose instructions to
# process the elements in bulk.
class MainSequence(Sequence):
# Main entry point into the test template. FORCE will invoke the
# generate() method to start processing the test template.
def generate(self, **kargs):
state = self.createState()
# We have to specify an order for all element types even if the State
# we create doesn't use all types
state_elem_type_order = (
EStateElementType.VectorRegister,
EStateElementType.Memory,
EStateElementType.SystemRegister,
EStateElementType.FloatingPointRegister,
EStateElementType.PredicateRegister,
EStateElementType.GPR,
EStateElementType.VmContext,
EStateElementType.PrivilegeLevel,
EStateElementType.PC,
)
StateTransition.transitionToState(
state,
EStateTransitionOrderMode.ByStateElementType,
state_elem_type_order,
)
# Create State with specific PC, floating point register and system
# register values.
def createState(self):
state = State()
# Get a random 4-byte virtual address as a target PC value
pc_val = self.genVA(Size=4, Align=4, Type="I")
state.addPcStateElement(pc_val)
# Add half of the floating point registers here and half of the
# floating point registers below. This helps demonstrate how the
# transition logic consolidates the elements by type regardless of the
# order in which they were specified.
fp_reg_indices = self.getRandomRegisters(6, reg_type="FPR")
for i in range(0, 3):
fp_reg_index = fp_reg_indices[i]
fp_reg_name = "D%d" % fp_reg_index
state.addRegisterStateElement(fp_reg_name, [RandomUtils.random64()])
state.addRegisterStateElement("mscratch", [RandomUtils.random64()])
mepc_val = UtilityFunctions.getAlignedValue(RandomUtils.random64(), 4)
state.addRegisterStateElement("mepc", [mepc_val])
for i in range(3, 6):
fp_reg_index = fp_reg_indices[i]
fp_reg_name = "D%d" % fp_reg_index
state.addRegisterStateElement(fp_reg_name, [RandomUtils.random64()])
return state
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV
| StarcoderdataPython |
21771 | <gh_stars>0
#!/usr/bin/env python
from hostapdconf.parser import HostapdConf
from hostapdconf import helpers as ha
import subprocess
def create_hostapd_conf(ssid, password, interface):
"""
Create a new hostapd.conf with the given ssid, password, interface.
Overwrites the current config file.
"""
subprocess.call(['touch', './hostapd.conf'])
conf = HostapdConf('./hostapd.conf')
# set some common options
ha.set_ssid(conf, ssid)
ha.reveal_ssid(conf)
ha.set_iface(conf, interface)
ha.set_driver(conf, ha.STANDARD)
ha.set_channel(conf, 2)
ha.enable_wpa(conf, passphrase=password, wpa_mode=ha.WPA2_ONLY)
ha.set_country(conf, 'ro')
# my hostapd doesn't like the default values of -1 here, so we set some
# dummy values
conf.update({'rts_threshold': 0, 'fragm_threshold': 256})
print("writing configuration")
conf.write()
if __name__ == '__main__':
print("Creating conf file...")
create_hostapd_conf('test_conf_supplicant', 'password', '<PASSWORD>')
| StarcoderdataPython |
6516364 | <filename>pyanomaly/core/hook/abstract/abstract_hook.py
"""
@author: <NAME>
@contact: yuhao.cheng[at]outlook.com
"""
import torch
from ..hook_registry import HOOK_REGISTRY
import abc
__all__ = ['HookBase', 'EvaluateHook']
@HOOK_REGISTRY.register()
class HookBase(object):
"""
Base class for hooks that can be registered with :class:`TrainerBase`.
Each hook can implement 4 methods. The way they are called is demonstrated
in the following snippet:
.. code-block:: python
hook.before_train()
for iter in range(start_iter, max_iter):
hook.before_step()
trainer.run_step()
hook.after_step()
hook.after_train()
Notes:
1. In the hook method, users can access `self.trainer` to access more
properties about the context (e.g., current iteration).
2. A hook that does something in :meth:`before_step` can often be
implemented equivalently in :meth:`after_step`.
If the hook takes non-trivial time, it is strongly recommended to
implement the hook in :meth:`after_step` instead of :meth:`before_step`.
The convention is that :meth:`before_step` should only take negligible time.
Following this convention will allow hooks that do care about the difference
between :meth:`before_step` and :meth:`after_step` (e.g., timer) to
function properly.
Attributes:
trainer: A weak reference to the trainer object. Set by the trainer when the hook is
registered.
"""
def before_train(self):
"""
Called before the first iteration.
"""
pass
def after_train(self):
"""
Called after the last iteration.
"""
pass
def before_step(self, current_step):
"""
Called before each iteration.
"""
pass
def after_step(self, current_step):
"""
Called after each iteration.
"""
pass
@HOOK_REGISTRY.register()
class EvaluateHook(HookBase):
def after_step(self, current_step):
acc = 0.0
if current_step % self.engine.steps.param['eval'] == 0 and current_step != 0:
with torch.no_grad():
acc = self.evaluate(current_step)
if acc > self.engine.accuarcy:
self.engine.accuarcy = acc
# save the model & checkpoint
self.engine.save(current_step, best=True)
elif current_step % self.engine.steps.param['save'] == 0 and current_step != 0:
# save the checkpoint
self.engine.save(current_step)
self.engine.logger.info('LOL==>the accuracy is not imporved in epcoh{} but save'.format(current_step))
else:
pass
else:
pass
def inference(self):
acc = self.evaluate(0)
self.engine.logger.info(f'The inference metric is:{acc:.3f}')
@abc.abstractmethod
def evaluate(self, current_step)->float:
pass | StarcoderdataPython |
1615370 | <filename>src/wildfires/dask_cx1/__init__.py
# -*- coding: utf-8 -*-
"""Modules to ease Dask usage on CX1."""
from .dask_cx1 import *
from .dask_rf import *
from .dask_utils import *
| StarcoderdataPython |
3570116 | <gh_stars>1-10
import json
#local imports
from .base_test import Settings
meetups_url = "api/v2/meetups/upcoming"
class TestMeetup(Settings):
meetup = {
"location": "PAC",
"images": "image.jpg",
"title": "Python",
"tags": "me, you",
"happeningOn": "14/4/2019"
}
def test_post_meetup(self):
"""
Test post a meetup
"""
token = self.give_token()
res = self.app.post(meetups_url,
data=json.dumps(self.meetup),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
res1 = json.loads(res.data.decode())
self.assertEqual(res1['message'], 'meetup added successfully')
self.assertEqual(res.status_code, 201)
def test_get_single_meetup(self):
"""Test API can get a single meetup by using it's id."""
token = self.give_token()
res = self.app.post(meetups_url,
data=json.dumps(self.meetup),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
res1 = json.loads(res.data.decode())
self.assertEqual(res.status_code, 201)
rv1 = self.app.get('api/v2/meetups/upcoming/1')
data = json.loads(rv1.data.decode())
self.assertEqual(rv1.status_code, 200)
self.assertIn('PAC', str(rv1.data))
def test_get_all_meetups(self):
token = self.give_token()
res = self.app.post(meetups_url,
data=json.dumps(self.meetup),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
res1 = json.loads(res.data.decode())
self.assertEqual(res.status_code, 201)
res1 = self.app.get('api/v2/meetups/upcoming')
data = json.loads(res1.get_data().decode())
self.assertEqual(res1.status_code, 200)
self.assertIn('Python', str(res1.data))
def test_xdelete_meetup(self):
token = self.give_token()
res = self.app.post(meetups_url,
data=json.dumps(self.meetup),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
res1 = json.loads(res.data.decode())
self.assertEqual(res.status_code, 201)
res1 = self.app.delete('api/v2/meetups/upcoming/1', data=json.dumps(self.meetup),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
data = json.loads(res1.get_data().decode())
self.assertEqual(res1.status_code, 200)
result = self.app.get('/api/v2/meetus/upcoming/1')
self.assertEqual(result.status_code, 404)
| StarcoderdataPython |
396029 | # -*- coding: utf-8 -*-
from django.db import models
class User(models.Model):
username = models.CharField(max_length=50, null=False, primary_key=True)
password = models.CharField(max_length=50, null=False)
name = models.CharField(max_length=50, null=False) # ๅ็งฐ
regtime = models.DateTimeField(auto_now=True)
logintime = models.DateTimeField(auto_now=True)
school = models.CharField(max_length=50, null=False, default="")
course = models.CharField(max_length=50, null=False, default="")
classes = models.CharField(max_length=50, null=False, default="") #่กๆฟ็ญ
number = models.CharField(max_length=50, null=False, default="")
realname = models.CharField(max_length=50, null=False)
qq = models.CharField(max_length=50, null=True, default="")
email = models.CharField(max_length=50, null=True, default="")
type = models.IntegerField(null=False, default=1) # 1 ๆฎ้ 2 ็ฎก็ๅ 3 ่ถ
็บง็ฎก็ๅ
objects = models.Manager()
def __str__(self):
return self.username
class UserData(models.Model):
username = models.CharField(max_length=50, null=False, primary_key=True)
ac = models.IntegerField(null=False, default=0)
submit = models.IntegerField(null=False, default=0)
score = models.IntegerField(default=0)
des = models.CharField(max_length=50, null=True)
rating = models.IntegerField(default=1500)
acpro = models.TextField(null=True, default="")
objects = models.Manager()
def __str__(self):
return self.username
class UserLoginData(models.Model):
username = models.CharField(max_length=50, null=False)
ip = models.CharField(max_length=50, null=True,default="unknow")
logintime = models.DateTimeField(auto_now=True)
msg = models.TextField(null=True)
objects = models.Manager()
def __str__(self):
return self.username
| StarcoderdataPython |
4889475 | # -*- coding: utf-8 -*-
# Copyright (C) 2004-2008 <NAME> and <NAME>
# Copyright (C) 2012-2014 <NAME>
# Copyright (C) 2015-2018 <NAME>
from __future__ import absolute_import, division, print_function
import pytest
import re
from dosagelib.util import normaliseURL, unescape, tagre, get_system_uid
class TestURL(object):
"""
Tests for URL utility functions.
"""
def test_unescape(self):
# Test HTML replacement.
assert unescape(u'foo&bar') == u'foo&bar'
assert unescape(u'foo bar') == u'foo\xa0bar'
assert unescape(u'"foo"') == u'"foo"'
def test_normalisation(self):
# Test URL normalisation.
assert (normaliseURL('http://example.com//bar/baz&baz') ==
u'http://example.com/bar/baz&baz')
class TestRegex(object):
ValuePrefix = '/bla/'
@pytest.mark.parametrize("tag,value,domatch", [
('<img src="%s">', ValuePrefix + 'foo', True),
('< img src = "%s" >', ValuePrefix, True),
('<img class="prev" src="%s">', ValuePrefix + '...', True),
('<img origsrc="%s">', ValuePrefix, False),
('<Img src="%s">', ValuePrefix, True),
('<img SrC="%s">', ValuePrefix, True),
('<img src="%s">', ValuePrefix[:-1], False),
('<img class="prev" src="%s" a="b">', ValuePrefix, True),
])
def test_regex(self, tag, value, domatch):
matcher = re.compile(tagre("img", "src", '(%s[^"]*)' %
self.ValuePrefix))
self.match_tag(matcher, tag, value, domatch)
def match_tag(self, matcher, tag, value, domatch=True):
text = tag % value
match = matcher.search(text)
if domatch:
assert match, "%s should match %s" % (matcher.pattern, text)
assert match.group(1) == value
else:
assert not match, "%s should not match %s" % (matcher.pattern,
text)
class TestUid(object):
"""
Tests for unique system IDs.
"""
def test_system_uid(self):
assert get_system_uid()
| StarcoderdataPython |
3254387 | # Input Nodes
from .LoadVoicesNode import LoadVoicesNode
# TODO: Output Nodes
# Measure Nodes
from .MeasureSHRPNode import MeasureSHRPNode
from .MeasureDurationNode import MeasureDurationNode
from .MeasureIntensityNode import MeasureIntensityNode
from .MeasureFormantNode import MeasureFormantNode
from .MeasureHarmonicityNode import MeasureHarmonicityNode
from .MeasureJitterNode import MeasureJitterNode
from .MeasurePitchNode import MeasurePitchNode
from .MeasureShimmerNode import MeasureShimmerNode
from .MeasureVocalTractEstimatesNode import MeasureVocalTractEstimatesNode
from .MeasureSpeechRateNode import MeasureSpeechRateNode
from .MeasureSNRNode import MeasureSNRNode
from .MeasureCPPNode import MeasureCPPNode
from .MeasureSpectralTiltNode import MeasureSpectralTiltNode
from .MeasureEnergyNode import MeasureEnergyNode
from .MeasureFormantPositionsNode import MeasureFormantPositionsNode
from .MeasureLTASNode import MeasureLTASNode
from .MeasureSpectralShapeNode import MeasureSpectralShapeNode
from .TEVANode import TEVANode
# from .MeasurePitchYinNode import MeasurePitchYinNode
# from .MeasurePitchCrepeNode import MeasurePitchCrepeNode
# Manipulate Nodes
# from .ManipulateFormantsNode import ManipulateFormantsNode
# from .ManipulatePitchAndFormantsNode import ManipulatePitchAndFormantsNode
from .ManipulateLowerPitchAndFormantsNode import ManipulateLowerPitchAndFormantsNode
from .ManipulateRaisePitchAndFormantsNode import ManipulateRaisePitchAndFormantsNode
from .ManipulateLowerFormantsNode import ManipulateLowerFormantsNode
from .ManipulateRaiseFormantsNode import ManipulateRaiseFormantsNode
from .ManipulatePitchLowerNode import ManipulatePitchLowerNode
from .ManipulatePitchHigherNode import ManipulatePitchHigherNode
from .ScaleIntensityNode import ScaleIntensityNode
from .ResampleSoundsNode import ResampleSoundsNode
from .ReverseSoundsNode import ReverseSoundsNode
# Visualization Nodes
from .VisualizeVoiceNode import VisualizeVoiceNode
# Experimental Nodes
from .F1F2PlotNode import F1F2PlotNode
| StarcoderdataPython |
4854897 | from Model.grid import Grid
from Model.point import Point
from Model.Item.player import Player
from Model.Direction.CardinalDirection.YDirection.north import NORTH
from Model.Direction.CardinalDirection.YDirection.south import SOUTH
from Model.Direction.CardinalDirection.XDirection.west import WEST
from Model.Direction.CardinalDirection.XDirection.east import EAST
from Model.Direction.ZDirection.up import UP
from Model.Direction.ZDirection.down import DOWN
from Model.Direction.DiagonalDirection.se import SE
from Model.Direction.DiagonalDirection.sw import SW
from Model.Direction.DiagonalDirection.ne import NE
from Model.Direction.DiagonalDirection.nw import NW
from View.divider import Divider
from View.hud import HUD
from Controller.command import Command
from Controller.move import Move
from Controller.look import Look
from Controller.Attack.attack import Attack
from Controller.Attack.meleeattack import MeleeAttack
from Controller.Attack.rangedattack import RangedAttack
class Game:
def __init__ (self, width, height):
self.width = width
self.height = height
self.grid = Grid (width, height)
self.players = []
self.walls = []
self.items = []
def addPlayer (self, player):
#point = self.grid.getRandomTraversablePoint0 ()
#player = Player (point, 100, 100, 0)
assert self.grid.getCell (player.pt).isTraversable ()
self.players.append (player)
#self.grid.addPlayer (player)
#return player
self.grid.addPlayer (player)
def removePlayer (self, player):
self.players.remove (player)
self.grid.removePlayer (player)
def containsPlayer (self, player):
return self.grid.containsPlayer (player)
def isAlive (self):
#return len (filter (
# lambda player: player.isAlive (),
# self.players)) is not 0
return len (self.players) is not 0
def doLookCommand (self, player, command):
if not isinstance (command, Look): return True
player.d = command.d
return True
def doAttackCommand (self, player, command):
if not isinstance (command, Attack): return True
# TODO
tryPt = self.getTryPt (player, command)
if tryPt == player.pt: return True
if not self.grid.containsPoint (tryPt): return True
cell = self.grid.getCell (tryPt)
#c = chain (cell.players, cell.walls, cell.items)
#tgt = next (c, None)
#if tgt is None: return
#tgt.HP -= 1
#if tgt.HP is 0:
# self.removePlayer
for p in cell.players:
assert player is not p
#if p.isBlocking
p.HP -= 1
if p.HP is 0: self.removePlayer (p)
return True
for p in cell.walls:
p.HP -= 1
if p.HP is 0: self.removeWall (p)
return True
for p in cell.items:
p.HP -= 1
if p.HP is 0: self.removeItem (p)
return True
#player.HP -= 1
#if player.HP is 0:
# self.removePlayer (p)
# return False
return True
def getTryPt (self, player, command):
tryX = player.pt.x
tryY = player.pt.y
move = command.d
if move in [NORTH, NE, NW]: tryY -= 1
if move in [SOUTH, SE, SW]: tryY += 1
if move in [EAST, NE, SE]: tryX += 1
if move in [WEST, NW, SW]: tryX -= 1
return Point (tryX, tryY)
def doMoveCommand (self, player, command):
if not isinstance (command, Move): return True
tryPt = self.getTryPt (player, command)
# TODO resolve UP, DOWN
if tryPt == player.pt: return True
if not self.grid.containsPoint (tryPt):
self.removePlayer (player)
# TODO fall into void?
# TODO go to another game?
return False
if not self.grid.getCell (tryPt).isTraversable ():
# TODO attack
return True
self.grid.removePlayer (player)
#grid.getCell (player.pt.y, player.pt.x).items.remove (player)
player.pt = tryPt
#grid.getCell (tryY, tryX).items.add (player)
self.grid.addPlayer (player)
return True
def doMove (self, player, command):
# TODO check player move timeout before continuing
cont = self.doLookCommand (player, command)
if not cont: return
cont = self.doAttackCommand (player, command)
if not cont: return
cont = self.doMoveCommand (player, command)
def toString (self, cb):
ret = cb (self.grid)
divider = Divider (self.grid.width)
for player in self.players:
ret += cb (divider)
ret += cb (HUD (player))
return ret
def __repr__ (self): return self.toString (repr)
def __str__ (self): return self.toString (str) | StarcoderdataPython |
5082181 | <filename>etl/data_extraction/constants.py
"""Shared constants within the data_extraction package
"""
from shared.constants import *
AGENT_HEADER = "<nowiki>https://cai-artbrowserstaging.fbi.h-da.de/; <EMAIL></nowiki>"
HTTP_HEADER = {
"Content-Type": "application/json",
"user_agent": AGENT_HEADER,
}
MAX_LAG = 10 # see https://www.mediawiki.org/wiki/Manual:Maxlag_parameter
SLEEP_TIME = 60 # Time in seconds to sleep if a request failed
TIMEOUT = 5 # Time to timeout a request
QID_PATTERN = r"^Q[1-9]\d*" # Possible QIDs regex starting from Q1
GET_WIKIDATA_ITEMS_LOG_FILENAME = "get_wikidata_items.log"
GET_WIKIPEDIA_EXTRACS_LOG_FILENAME = "get_wikipedia_extracts.log"
WIKIDATA_MAP_ATTRIBUTE_LOG_FILENAME = "map_wd_attribute.log"
WIKIDATA_MAP_RESPONSE_LOG_FILENAME = "map_wd_response.log"
ARTWORK_IDS_QUERY_FILENAME = "artwork_ids_query.sparql"
WIKIDATA_SPARQL_URL = "https://query.wikidata.org/sparql"
WIKIDATA_ENTITY_URL = "http://www.wikidata.org/entity/"
WIKIDATA_API_URL = "https://www.wikidata.org/w/api.php"
CLAIMS = "claims"
SITELINKS = "sitelinks"
MAINSNAK = "mainsnak"
DATAVALUE = "datavalue"
VALUE = "value"
AMOUNT = "amount"
UNIT = "unit"
TIME = "time"
QUALIFIERS = "qualifiers"
DATATYPE = "datatype"
PROPERTY = "property"
WIKIBASE_ITEM = "wikibase-item"
QUANTITY = "quantity"
STRING = "string"
URL = "url"
MONOLINGUALTEXT = "monolingualtext"
TEXT = "text"
COMMONS_MEDIA = "commonsMedia"
EN = "en"
ENTITIES = "entities"
ABBREVIATION = "abbreviation"
# The wd: prefix is used here because these ids are used in a SPARQL query
DRAWING = {PLURAL: "drawings", ID: "wd:Q93184"}
SCULPTURE = {PLURAL: "sculptures", ID: "wd:Q860861"}
PAINTING = {PLURAL: "paintings", ID: "wd:Q3305213"}
LATITUDE = {SINGULAR: "latitude", ABBREVIATION: "lat"}
LONGITUDE = {SINGULAR: "longitude", ABBREVIATION: "lon"}
ART_MOVEMENT = {SINGULAR: "art_movement", ID: "Q968159"}
ART_STYLE = {SINGULAR: "art_style", ID: "Q1792644"}
IMAGE = "image"
START_TIME = f"start_{TIME}"
END_TIME = f"end_{TIME}"
INCEPTION = "inception"
HEIGHT = "height"
HEIGHT_UNIT = f"{HEIGHT}_{UNIT}"
WIDTH = "width"
WIDTH_UNIT = f"{WIDTH}_{UNIT}"
LENGTH = "length"
LENGTH_UNIT = f"{LENGTH}_{UNIT}"
DIAMETER = "diameter"
DIAMETER_UNIT = f"{DIAMETER}_{UNIT}"
UNIT_SYMBOL = f"{UNIT}_symbol"
ICONCLASS = {SINGULAR: "iconclass", PLURAL: "iconclasses"}
MAIN_SUBJECT = {SINGULAR: "main_subject", PLURAL: "main_subjects"}
INFLUENCED_BY = "influenced_by"
DATE_OF_BIRTH = "date_of_birth"
DATE_OF_DEATH = "date_of_death"
WEBSITE = "website"
COORDINATE = "coordinate"
SUBCLASS_OF = "subclass_of"
EXHIBITION = "exhibition"
# All properties extracted from the wikidata entities mapped to their openartbrowser key-label. They don't have a particular order.
PROPERTY_NAME_TO_PROPERTY_ID = {
IMAGE: "P18",
CLASS[SINGULAR]: "P31", # Is called "instance of" in wikidata
ARTIST[SINGULAR]: "P170", # Is called "creator" in wikidata
LOCATION[SINGULAR]: "P276",
START_TIME: "P580",
END_TIME: "P582",
GENRE[SINGULAR]: "P136",
MOVEMENT[SINGULAR]: "P135",
INCEPTION: "P571",
MATERIAL[SINGULAR]: "P186", # Is called "material used" in wikidata
MOTIF[SINGULAR]: "P180", # Is called "depicts" in wikidata
COUNTRY: "P17",
HEIGHT: "P2048",
WIDTH: "P2049",
LENGTH: "P2043",
DIAMETER: "P2386",
UNIT_SYMBOL: "P5061",
ICONCLASS[SINGULAR]: "P1257",
MAIN_SUBJECT[SINGULAR]: "P921",
INFLUENCED_BY: "P737",
GENDER: "P21", # Is called "sex or gender" in wikidata
DATE_OF_BIRTH: "P569",
DATE_OF_DEATH: "P570",
PLACE_OF_BIRTH: "P19",
PLACE_OF_DEATH: "P20",
CITIZENSHIP: "P27", # Is called "country of citizenship" in wikidata
WEBSITE: "P856", # Is called "official website" in wikidata
PART_OF: "P361",
HAS_PART: "P527",
COORDINATE: "P625", # Is called "coordinate location" in wikidata
SUBCLASS_OF: "P279",
EXHIBITION_HISTORY: "P608",
SIGNIFICANT_EVENT: "P793",
"point_in_time": "P585",
"of": "P642",
"owned_by": "P127",
"price": "P2284",
"participant": "P710",
"organizer": "P664",
"has_cause": "P828",
"donated_by": "P1028",
"cause_of_destruction": "P770",
"destination_point": "P1444",
"criterion_used": "P1013",
"statement_is_subject_of": "P805",
"applies_to_part": "P518",
"commissioned_by": "P88",
"operator": "P137",
"speaker": "P823",
"determination_method": "P459",
"date_of_disappearance": "P746",
"sponsor": "P859",
"earliest_date": "P1319",
"latest_date": "P1326",
"sourcing_circumstances": "P1480",
"end_cause": "P1534",
"manager": "P1037",
"cost": "P2130",
"uses": "P2283",
"unveiled_by": "P1656",
"architect": "P84",
"dissolved_abolished_or_demolished": "P576",
"catalog_code": "P528",
"lot_number": "P4775",
"inscription": "P1684",
"described_at_url": "P973",
"title": "P1476",
"inventory_number": "P217",
"depicted_format": "P7984",
"author_name_string": "P2093",
"series_ordinal": "P1545",
"commons_category": "P373",
"official_name": "P1448",
}
# Inverse dict
PROPERTY_ID_TO_PROPERTY_NAME = {v: k for k, v in PROPERTY_NAME_TO_PROPERTY_ID.items()}
| StarcoderdataPython |
4935752 | <reponame>larsoner/genz-1
# -*- coding: utf-8 -*-
import warnings
import numpy as np
from nose.tools import assert_raises
from numpy.testing import assert_array_equal
from genz.static.expyfun import parallel_func, _check_n_jobs
from genz.static.expyfun import requires_lib
warnings.simplefilter('always')
def fun(x):
return x
@requires_lib('joblib')
def test_parallel():
"""Test parallel support."""
assert_raises(TypeError, _check_n_jobs, 'foo')
parallel, p_fun, _ = parallel_func(fun, 1)
a = np.array(parallel(p_fun(x) for x in range(10)))
parallel, p_fun, _ = parallel_func(fun, 2)
b = np.array(parallel(p_fun(x) for x in range(10)))
assert_array_equal(a, b)
| StarcoderdataPython |
6533158 | import numpy as np
import random as rand
from helper import Helper as h
def generateTest():
str_test = 'ABCDEFGH'
return h.convertStringToBinary64(str_test)[0]
class SBox:
@staticmethod
def generateRandom(seed, height = 16, randNum = 16):
numbers = []
for i in range(randNum):
numbers.append(i)
result = []
for i in range(height):
numbers_copy = numbers.copy()
rand.seed(seed + i)
rand.shuffle(numbers_copy)
result.append(numbers_copy)
print('[', end='')
for i in range(len(result)):
print(result[i], end='')
if (i + 1 != len(result)):
print(',')
else:
print(']')
@staticmethod
def getRowColIdx(input_string):
row_idx = ''
col_idx = ''
for i in range(len(input_string)):
if i % 2 == 0:
row_idx += input_string[i]
else:
col_idx += input_string[i]
row_idx = h.convertBitToInt(row_idx)
col_idx = h.convertBitToInt(col_idx)
return row_idx, col_idx
def __init__(self, idx = 1):
if idx == 1:
self.sboxKey = np.array([[10, 5, 12, 9, 14, 3, 0, 8, 13, 2, 15, 6, 11, 1, 4, 7],
[2, 3, 4, 13, 9, 1, 6, 7, 0, 15, 10, 14, 12, 5, 11, 8],
[13, 1, 8, 3, 6, 10, 9, 5, 14, 15, 11, 0, 4, 12, 7, 2],
[3, 14, 7, 9, 13, 11, 4, 5, 12, 8, 1, 0, 15, 6, 2, 10],
[14, 10, 8, 15, 9, 4, 13, 11, 12, 1, 0, 3, 2, 6, 5, 7],
[3, 1, 6, 8, 10, 15, 12, 7, 13, 0, 11, 2, 4, 5, 9, 14],
[2, 8, 14, 10, 11, 15, 5, 4, 12, 13, 3, 0, 9, 7, 6, 1],
[10, 5, 0, 1, 9, 4, 6, 2, 3, 15, 11, 7, 12, 8, 13, 14],
[1, 7, 14, 11, 12, 3, 9, 0, 6, 2, 5, 13, 8, 10, 4, 15],
[7, 15, 0, 14, 1, 6, 5, 9, 11, 3, 12, 2, 13, 10, 4, 8],
[7, 6, 1, 14, 0, 5, 2, 13, 4, 15, 8, 10, 12, 11, 9, 3],
[4, 9, 7, 10, 1, 5, 15, 13, 12, 3, 2, 14, 11, 8, 0, 6],
[10, 8, 12, 1, 2, 5, 9, 15, 0, 13, 3, 6, 4, 14, 7, 11],
[6, 10, 14, 9, 3, 7, 0, 1, 11, 8, 2, 15, 5, 4, 12, 13],
[4, 0, 6, 11, 2, 9, 14, 8, 12, 13, 3, 15, 7, 10, 1, 5],
[10, 11, 0, 2, 9, 14, 7, 4, 5, 6, 3, 13, 15, 8, 12, 1]])
elif idx == 2:
self.sboxKey = np.array([[0, 11, 13, 8, 4, 7, 3, 6, 10, 5, 9, 14, 2, 12, 15, 1],
[0, 15, 13, 8, 14, 9, 6, 10, 1, 5, 2, 12, 7, 11, 4, 3],
[5, 14, 13, 6, 7, 1, 11, 15, 3, 2, 4, 0, 12, 9, 8, 10],
[0, 3, 14, 1, 7, 6, 13, 4, 10, 8, 5, 15, 11, 9, 12, 2],
[10, 0, 5, 11, 13, 6, 3, 14, 4, 12, 2, 9, 7, 15, 1, 8],
[8, 0, 7, 10, 11, 6, 9, 13, 14, 15, 2, 4, 12, 1, 5, 3],
[4, 3, 15, 13, 11, 2, 10, 1, 5, 12, 8, 0, 7, 6, 9, 14],
[9, 14, 10, 1, 5, 8, 2, 15, 12, 13, 0, 4, 3, 6, 7, 11],
[2, 15, 6, 0, 14, 9, 10, 11, 7, 4, 1, 12, 13, 3, 5, 8],
[7, 8, 12, 9, 14, 3, 5, 2, 0, 15, 13, 11, 10, 4, 1, 6],
[13, 12, 15, 1, 6, 8, 0, 11, 14, 10, 3, 2, 9, 5, 7, 4],
[1, 7, 9, 3, 4, 0, 2, 12, 13, 10, 5, 14, 11, 15, 6, 8],
[11, 10, 1, 9, 4, 15, 3, 12, 2, 0, 7, 6, 13, 8, 5, 14],
[1, 5, 13, 14, 3, 0, 6, 9, 11, 10, 2, 15, 8, 7, 12, 4],
[10, 12, 3, 4, 9, 11, 5, 8, 0, 6, 2, 1, 7, 14, 13, 15],
[1, 10, 15, 2, 4, 6, 11, 3, 5, 8, 13, 7, 0, 12, 14, 9]])
def execute48_24(self, input_string):
split_num = 8
split_lines = [input_string[i:i+split_num] for i in range(0, len(input_string), split_num)]
result = ''
for split_line in split_lines:
row_idx, col_idx = self.getRowColIdx(split_line)
result_int = self.sboxKey[row_idx][col_idx]
result += h.convertIntToBit(result_int, 4)
return result | StarcoderdataPython |
4912235 | def add(a, b):
a + b
# ---
add(a=1, b=10)
| StarcoderdataPython |
4953144 | from PyQt5 import QtGui
from PyQt5.QtWidgets import QApplication, QMainWindow
import sys
from PyQt5.QtGui import QPainter, QPen, QBrush
from PyQt5.QtCore import Qt
class Window(QMainWindow):
def __init__(self):
super().__init__()
self.title = "PyQt5 Brush Styles"
self.top = 200
self.left = 500
self.width = 600
self.height = 400
self.InitWindow()
def InitWindow(self):
self.setWindowIcon(QtGui.QIcon("icon.png"))
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.show()
def paintEvent(self, event):
painter = QPainter(self)
painter.setPen(QPen(Qt.black, 4, Qt.SolidLine))
painter.setBrush(QBrush(Qt.red, Qt.DiagCrossPattern))
painter.drawRect(10, 100, 150, 100)
painter.setPen(QPen(Qt.black, 4, Qt.SolidLine))
painter.setBrush(QBrush(Qt.red, Qt.Dense1Pattern))
painter.drawRect(180, 100, 150, 100)
painter.setPen(QPen(Qt.black, 4, Qt.SolidLine))
painter.setBrush(QBrush(Qt.red, Qt.HorPattern))
painter.drawRect(350, 100, 150, 100)
painter.setPen(QPen(Qt.black, 4, Qt.SolidLine))
painter.setBrush(QBrush(Qt.red, Qt.VerPattern))
painter.drawRect(10, 220, 150, 100)
painter.setPen(QPen(Qt.black, 4, Qt.SolidLine))
painter.setBrush(QBrush(Qt.red, Qt.BDiagPattern))
painter.drawRect(180, 220, 150, 100)
painter.setPen(QPen(Qt.black, 4, Qt.SolidLine))
painter.setBrush(QBrush(Qt.red, Qt.Dense3Pattern))
painter.drawRect(350, 220, 150, 100)
painter.setPen(QPen(Qt.black, 4, Qt.SolidLine))
painter.setBrush(QBrush(Qt.red, Qt.Dense4Pattern))
painter.drawRect(10, 340, 150, 100)
App = QApplication(sys.argv)
window = Window()
sys.exit(App.exec())
| StarcoderdataPython |
9797941 | <gh_stars>1-10
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
# common mean and std parameters from pytorch website
norm_mean = [0.485, 0.456, 0.406]
norm_std = [0.229, 0.224, 0.225]
# this function is from official pytroch website:
# https://github.com/pytorch/examples/blob/0.4/fast_neural_style/neural_style/utils.py#L21-L26
def gram_matrix(y):
"""
:param x: torch tensor
:return: the gram matrix of x
"""
(b, ch, h, w) = y.size()
features = y.view(b, ch, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (ch * h * w)
return gram
# reference: https://en.wikipedia.org/wiki/Total_variation_denoising
def total_variance(img):
"""
get the L1 loss of total variance loss
:param img: tensor of shape(B,C,H,W)
:return: a scalar, total variance loss
"""
return torch.sum(torch.abs(img[:,:,1:,:] - img[:,:,:-1,:])) + torch.sum(torch.abs(img[:,:,:,1:] - img[:,:,:,:-1]))
# the following functions are used to preprocess/save image
def common_transforms(h, w):
"""
an function of process image if h and w is known
:param h: height
:param w: width
:return: transformation function
"""
# convert PIL image in range[0,255] of shape(H,W,C) to a FloatTensor in range[0,1] of shape(C,H,W)
return transforms.Compose([
transforms.Resize((h, w)),
# maybe try to use random crop when adjusting preprocessing methods?
transforms.CenterCrop((h, w)),
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std)
])
def preprocess_image(img, h = None, w = None):
"""
preprocess an image
:param img: PIL image
:param h: height
:param w: width
:return: FloatTensor with 4 dimension(B, C, H, W)
"""
if h and w:
func = common_transforms(h, w)
else:
# the height and width may be not known
func = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std)
])
return func(img).unsqueeze(0)
def read_image(path, h = None, w = None):
"""
open an image
:param path: the path of image
:return: torch tensor of an image
"""
img = Image.open(path)
tensor_img = preprocess_image(img, h, w)
return tensor_img
def torchTensorToImage(tensor):
"""
convert torch tensor range in [0,1] of shape(B,C,H,W) to numpy array range in [0,255] of shape(B,H,W,C)
:param tensor: torch tensor
:return: numpy array of the first image in batch
"""
image = tensor.detach().cpu().numpy()
image = image * np.array(norm_std).reshape((1, 3, 1, 1)) + np.array(norm_mean).reshape((1, 3, 1, 1))
image = image.transpose(0, 2, 3, 1) * 255.
# numpy clip and change type to integer
image = image.clip(0, 255).astype(np.uint8) # in pytorch clip is clamp
return image[0]
def paint_image(tensor, title=None):
"""
paint the image after recover it to numpy array
:param ts: torch tensor of image
:return: NULL
"""
image = torchTensorToImage(tensor)
plt.axis('off')
plt.imshow(image)
plt.show()
if title is not None:
plt.title(title)
plt.close()
def save_image(tensor, path):
"""
save the image converted from a tensor format
:param tensor: torch tensor of the image
:param path: oriented path to save the image
"""
img =torchTensorToImage(tensor)
Image.fromarray(img).save(path)
print("Successfully save the final stylized image to:", path)
def save_paint_plot(st, ct, ot, path):
"""
save three images in one plot
:param st: style_image in torch tensor
:param ct: content_image in torch tensor
:param ot: output_image in torch tensor
:param path: oriented path to save the image
"""
st_img = torchTensorToImage(st)
ct_img = torchTensorToImage(ct[0])
ot_img = torchTensorToImage(ot[0])
print(type(st_img))
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, dpi=100, figsize=(10, 4))
ax1.axis('off')
ax1.set_title('style_image')
ax1.imshow(st_img)
ax2.axis('off')
ax2.set_title('content_image')
ax2.imshow(ct_img)
ax3.axis('off')
ax3.set_title('output_image')
ax3.imshow(ot_img)
fig.show()
fig.savefig(path)
plt.close()
if __name__ == "__main__":
from constant import output_img_path
test_img = read_image("./image/style_img_v1.jpg")
print("test image size:", test_img.size())
paint_image(test_img)
save_paint_plot(test_img, test_img, test_img, output_img_path+"test.jpg")
| StarcoderdataPython |
3389529 | <reponame>jenndryden/coding-challenges<filename>35-search-insert-position/35-search-insert-position.py
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
start = 0
end = len(nums) - 1
while start <= end:
middle = (start + end) // 2
if nums[middle] == target:
return middle
if nums[middle]>target:
end = middle - 1
else:
start = middle + 1
return start | StarcoderdataPython |
6435463 | <gh_stars>0
"""
Dialog that enables the entering of log directory locations, regex's can be used
for the log directories
"""
__version__ = '0.01'
__author__ = '<NAME>'
import sys
from PyQt5 import QtWidgets
from PyQt5.QtCore import Qt
from GUIDesign.log_location_dialog import Ui_logLocations
from Operations.config import Config
class LogLocationDialogFunctional():
def __init__(self):
self.dialog = QtWidgets.QDialog()
self.ui = Ui_logLocations()
self.ui.setupUi(self.dialog)
config = Config()
existing_log_locations = config.get_config("existing_log_locations")
for log_location in existing_log_locations:
self.ui.existingLogLocation.addItem(log_location)
self.configure_locations_dialog()
self.configure_locations_actions()
def configure_locations_dialog(self):
""" Configure dialog menu tip """
self.ui.logLocationAdd.setStatusTip('Add Log File Location')
def configure_locations_actions(self):
""" Configure the dialog keyboard shortcuts """
self.ui.logLocationExit.clicked.connect(self.exit_dialog)
self.ui.logLocationExit.setShortcut('Ctrl+Q')
self.ui.logLocationAdd.clicked.connect(self.add_location)
self.ui.logLocationAdd.setShortcut('Ctrl+N')
self.ui.logLocationRemove.clicked.connect(self.remove_selected_location)
self.ui.logLocationRemove.setShortcut('Ctrl+R')
def exit_dialog(self):
"""
On exit of the dialog, save the changes and forward directory locations to
the available logs handler
"""
self.dialog.close()
locations_list = self.get_locations_list()
config = Config()
config.add_config_data("existing_log_locations", locations_list)
config.save_config_data()
self.available_logs_ops.add_log_files(locations_list)
def add_location(self):
log_location = self.ui.enterLogLocation.text()
log_location.strip()
if log_location:
self.ui.existingLogLocation.addItem(log_location)
def remove_selected_location(self):
selected_items = self.ui.existingLogLocation.selectedItems()
if selected_items:
for item in selected_items:
self.ui.existingLogLocation.takeItem(
self.ui.existingLogLocation.row(item)
)
def show_log_location_dialog(self, availablelogsops):
self.available_logs_ops = availablelogsops
self.dialog.show()
def get_locations_list(self):
""" return the entered locations as a standard python list """
all_items = [str(self.ui.existingLogLocation.item(i).text())
for i in range(self.ui.existingLogLocation.count())]
return all_items
| StarcoderdataPython |
3206909 | <filename>src/counter/package.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:Mod: package
:Synopsis:
:Author:
servilla
:Created:
8/27/20
"""
from lxml import etree
import requests
from counter.config import Config
from counter import pasta_db
def clean(text):
return " ".join(text.split())
def get_entity_name(dataset, rid: str):
name = None
urls = dataset.findall("./physical/distribution/online/url")
for url in urls:
if rid == url.text.strip():
name = dataset.find("./entityName").text.strip()
break
return name
class Package:
def __init__(self, eml: str):
self._eml = etree.fromstring(eml.encode("utf-8"))
self._pid = self._get_package_id()
self._title = self._get_title()
self._doi = self._get_doi()
@property
def doi(self):
return self._doi
@property
def title(self):
return self._title
def _get_doi(self) -> str:
doi = None
alt_ids = self._eml.findall("./dataset/alternateIdentifier")
for alt_id in alt_ids:
if alt_id.get("system") == "https://doi.org":
doi = clean(alt_id.xpath("string()"))
if doi is None:
if Config.USE_DB:
pid = self._get_package_id()
sql = (
"SELECT doi FROM datapackagemanager.resource_registry "
f"WHERE package_id='{pid}' "
"AND resource_type='dataPackage'"
)
_ = pasta_db.query(Config.DB_HOST_PACKAGE, sql)
if len(_) == 1:
doi = _[0][0]
if Config.VERBOSE == 3:
print(f"{sql} - {doi}")
else:
pid = self._get_package_id()
scope, identifier, revision = pid.split(".")
doi_url = (
f"{Config.BASE_PACKAGE_URL}/doi/eml/{scope}/"
f"{identifier}/{revision}"
)
r = requests.get(doi_url, auth=Config.AUTH)
r.raise_for_status()
doi = r.text.strip()
if Config.VERBOSE == 3:
print(f"{doi_url} - {doi}")
return doi
def _get_package_id(self) -> str:
_ = self._eml.get("packageId")
scope, identifier, revision = _.split(".")
pid = f"{scope}.{int(identifier)}.{int(revision)}"
return pid
def _get_title(self) -> str:
title = clean(self._eml.find("./dataset/title").xpath("string()"))
return title
def get_entity_name(self, rid: str) -> str:
name = None
datatables = self._eml.findall("./dataset/dataTable")
for datatable in datatables:
name = get_entity_name(datatable, rid)
if name is not None:
return name
otherentities = self._eml.findall("./dataset/otherEntity")
for otherentity in otherentities:
name = get_entity_name(otherentity, rid)
if name is not None:
return name
spatialrasters = self._eml.findall("./dataset/spatialRaster")
for spatialraster in spatialrasters:
name = get_entity_name(spatialraster, rid)
if name is not None:
return name
spatialvectors = self._eml.findall("./dataset/spatialVector")
for spatialvector in spatialvectors:
name = get_entity_name(spatialvector, rid)
return name
| StarcoderdataPython |
3255472 | <reponame>Blatzar/notify-send
"""
Displays a notification suitable for the platform being run on.
Examples:
```
from notify import notification
notification('what you want said', title=':)')
```
License:
`MIT, see LICENSE for more details.`
"""
import sys
from importlib import import_module
platform = sys.platform
try:
mod = import_module("." + platform, __package__)
except:
raise RuntimeError("Unsupported operating system: {}".format(sys.platform))
else:
send = getattr(mod, "{}Notification".format(platform.title()))()
class Notification:
"""
Displays a notification.
Args:
message: The message body.
title: The summary text (optional).
timeout: notification length in milliseconds (optional).
**kwargs: Additional arguments (optional).
"""
def __init__(self, message, title="", timeout=None, **kwargs):
self.message = message
self.title = title
self.timeout = timeout
self.kwargs = kwargs
def __call__(self):
send(self.message, self.title, self.timeout, **self.kwargs)
def notification(message, title="", timeout=None, **kwargs):
n = Notification(message, title, timeout, **kwargs)
n()
| StarcoderdataPython |
3560984 | <gh_stars>0
'''Arquivos utilizados na criaรงรฃo de dataframes e correntes de markov.'''
# PATH do arquivo com notรญcias armazenadas
NOTICIAS_CSV = 'dados/Banco_noticias.csv' # Armazenamento do Dataframe
# Armazenamento das noticรญas para leitura do Markovify
CHAMADAS_TXT = 'dados/Chamadas.txt'
# Corrente de Markov armazenada como json
MARKOV_JSON = 'dados/Corrente_markov.json' | StarcoderdataPython |
11303852 | <reponame>adh/flask-bootstrap-components<filename>flask_bootstrap_components/grid.py
from .markup import element
class GridColumn:
__slots__ = ["widths"]
def __init__(self, width=3, **widths):
if not widths:
widths = {"md": width}
self.widths = widths
def get_class(self):
return " ".join(["col-{}-{}".format(k, v)
for k, v in self.widths.items()])
def render(self, content):
return element("div", {"class": self.get_class()}, content)
| StarcoderdataPython |
8049162 | import os
import sys
import threading
import json
import ijson
import operator
import ast
from create_xml2json import xml2json
from create_sql2json import sql2json
# id_list = {}
# def create_search_table():
# with open("./output_file/simplewiki-article.json", encoding="utf-8") as f:
# while True:
# data = f.readline()
# if not data:
# break
# key = json.loads(data)
# data = f.readline()
# value = json.loads(data)
# value["title"] = value["title"].replace(" ", "_")
#transform_title2id(id_list)
def transform_title2id(id_list):
array = []
with open("./output_file/link.json", encoding="utf-8") as f:
while True:
data = f.readline()
if not data:
break
# key = json.loads(data)
# key = str(key).replace("'", "\"")
# data = f.readline()
value = json.loads(data)
if(value["link_keyword"] in id_list) :
value["link_keyword"] = id_list[value["link_keyword"]]
value = str(value).replace("'", "\"")
# fw.write(key + "\n")
# fw.write(str(value) + ",\n")
array.append(value)
with open('./output_file/new_link.json', 'w', encoding="utf-8") as fw:
fw.write("[\n")
for i in range(len(array)):
fw.write(array[i])
if(i != len(array) - 1):
fw.write(",\n")
fw.write("\n]")
def get_string_links():
link_str = "{"
with open("./output_file/link.json", 'r', encoding="UTF-8") as fd:
parser = ijson.parse(fd)
from_id = 0
for prefix, event, value in parser:
# print(prefix, event, value)
if prefix.endswith('.from'):
from_id = value
link_str += '"%s":' % (value)
elif prefix.endswith('.link_keyword'):
link_str += '"%d/%s", ' % (int(from_id), value)
print("Completed get_string_links!")
return link_str[:-2] + '}'
# unsorted = pd.read_json("./output_file/link.json")
# (unsorted.sort_values("from")).to_json("./output_file/sorted_list.json")
# sorted = unsorted.sort_values("from")
#
# sorted = sorted['link_keyword'].groupby([sorted["from"]]).apply(list).reset_index()
# #print(sorted[['from', 'link_keyword']])
# print("DataFrame make complete!")
# sorted.to_json("./output_file/sorted_list.json", orient='records', lines=True)
def join_table(links_data):
id_title_dict = dict()
for link in links_data:
title_list = ""
if link[0] in id_title_dict:
id_title_dict[link[0]] = id_title_dict[link[0]] + "," + link[1]
else:
id_title_dict[link[0]] = link[1]
print("Completed id_title_dict!!")
# line_count = len(id_title_dict)
template = '{"index":{"_type":"page","_id":"%s"}}\n{"title": "%s", "text": "%s", "link": "%s"}\n'
ff = open("./output_file/simplewiki-article.json", "r", encoding="utf-8")
count = 0
file_count = 0
while True:
article_data = ff.readline()
if not article_data:
break
article_data = ast.literal_eval(article_data)
match_title_list = ""
article_id = article_data["id"]
if article_id in id_title_dict:
match_title_list = id_title_dict[article_id]
if count % 10000 == 0:
file_count += 1
open("./output_file/dataset" + str(file_count) + ".json", "a", encoding="utf-8").write(template % (article_id, article_data["title"], article_data["text"].replace("\n", " "), match_title_list))
# if count <= int(line_count/2):
# open("./output_file/dataset1.json", "a", encoding="utf-8").write(template % (article_id, article_data["title"], article_data["text"].replace("\n", " "), match_title_list))
# else:
# open("./output_file/dataset2.json", "a", encoding="utf-8").write(template % (article_id, article_data["title"], article_data["text"].replace("\n", " "), match_title_list))
count += 1
# def join_table():
# dataset_list = [open("./output_file/dataset1.json", "w", encoding="utf-8"), open("./output_file/dataset2.json", "w", encoding="utf-8")]
# #dataset = open("./output_file/dataset.json", "w", encoding="utf-8")
# template = '{"index":{"_type":"page","_id":"%s"}}\n{"title": "%s", "text": "%s", "link": "%s"}\n'
#
# ff = open("./output_file/simplewiki-article.json", "r", encoding="utf-8").readlines()
# i = 0
# with open("./output_file/sorted_list.json", encoding="utf-8") as f:
# for dataset in dataset_list:
# if i > len(ff):
# continue
# while True:
# link = f.readline()
# if not link:
# break
#
# link = json.loads(link)
# while True:
# if(i >= len(ff)):
# break
# post = ff[i]
#
# post = json.loads(post)
# id = post["index"]["_id"]
#
# if(int(link["from"]) < int(id)):
# break
#
# if int(link["from"]) == int(id):
# post = json.loads(ff[i + 1])
# dataset.write(template % (id, id, post["title"], post["text"].replace("\n", " "), link["link_keyword"]))
# i += 2
# break
# else:
# post = json.loads(ff[i + 1])
# dataset.write(template % (id, id, post["title"], post["text"].replace("\n", " "), "[]"))
# i += 2
#
# while True:
# if(i >= len(ff)):
# break
# post = ff[i]
# post = json.loads(post)
# id = post["index"]["_id"]
# post = json.loads(ff[i + 1])
# dataset.write(template % (id, post["title"], post["text"], "[]"))
# i += 2
def transform_xml2json(input_xml_file, output_json_file):
Txml2json = xml2json(input_xml_file, output_json_file)
Txml2json.change_xml2json()
def transform_sql2json(input_sql_file):
Tsql2json = sql2json(input_sql_file)
Tsql2json.change_sql2json()
def change_files():
SOURCE_FILE = "./xml_file/simplewiki-20190701-pages-articles.xml"
LINK_SQL_FILE = "./sql_file/simplewiki-20190701-pagelinks.sql"
OUTPUT_FILE = "./output_file/simplewiki-article.json"
th1 = threading.Thread(target=transform_xml2json, args=(SOURCE_FILE, OUTPUT_FILE,))
th2 = threading.Thread(target=transform_sql2json, args=(LINK_SQL_FILE,))
th1.start(); th2.start()
th1.join(); th2.join()
# create_search_table()
final_link_str = get_string_links()
change_json = json.loads(final_link_str)
sorted_links = sorted(change_json.items(), key=operator.itemgetter(0))
join_table(sorted_links)
# fiw = open("./output_file/join_table.json", "w", encoding="utf-8")
# final_link_str.sort(key=extract_time, reverse=True)
# faw = open("./output_file/sort_link.json", "w", encoding="utf-8")
# faw.write(final_link_str)
#join_table()
if __name__ == '__main__':
sys.exit(change_files())
| StarcoderdataPython |
11351335 | <gh_stars>100-1000
from pathlib import Path
import pandas
import plotly
from bio_embeddings.visualize.mutagenesis import plot_mutagenesis
def main():
cwd = Path(__file__).resolve().parent
probabilities = pandas.read_csv(cwd.joinpath("probabilities.csv"))
plotly.offline.plot(plot_mutagenesis(probabilities), filename=str(cwd.joinpath("plot.html")))
if __name__ == "__main__":
main()
| StarcoderdataPython |
35600 | import os
import sys
import neuron
import json
from pprint import pprint
from neuron import h
import matplotlib.pyplot as plt
import numpy as np
import h5py
## Runs the 5 cell iclamp simulation but in NEURON for each individual cell
# $ python pure_nrn.py <gid>
neuron.load_mechanisms('../components/mechanisms')
h.load_file('stdgui.hoc')
h.load_file('import3d.hoc')
cells_table = {
# gid = [model id, cre line, morph file]
0: [472363762, 'Scnn1a', 'Scnn1a_473845048_m.swc'],
1: [473863510, 'Rorb', 'Rorb_325404214_m.swc'],
2: [473863035, 'Nr5a1', 'Nr5a1_471087815_m.swc'],
3: [472912177, 'PV1', 'Pvalb_470522102_m.swc'],
4: [473862421, 'PV2', 'Pvalb_469628681_m.swc']
}
def run_simulation(gid, morphologies_dir='../components/morphologies', plot_results=True):
swc_file = os.path.join(morphologies_dir, cells_table[gid][2])
model_file = 'model_gid{}_{}_{}.json'.format(gid, cells_table[gid][0], cells_table[gid][1])
params_dict = json.load(open(model_file, 'r'))
# pprint(params_dict)
# load the cell
nrn_swc = h.Import3d_SWC_read()
nrn_swc.input(str(swc_file))
imprt = h.Import3d_GUI(nrn_swc, 0)
h("objref this")
imprt.instantiate(h.this)
# Cut the axon
h("soma[0] area(0.5)")
for sec in h.allsec():
sec.nseg = 1 + 2 * int(sec.L / 40.0)
if sec.name()[:4] == "axon":
h.delete_section(sec=sec)
h('create axon[2]')
for sec in h.axon:
sec.L = 30
sec.diam = 1
sec.nseg = 1 + 2 * int(sec.L / 40.0)
h.axon[0].connect(h.soma[0], 0.5, 0.0)
h.axon[1].connect(h.axon[0], 1.0, 0.0)
h.define_shape()
# set model params
h("access soma")
for sec in h.allsec():
sec_name = sec.name().split('[')[0]
# special case for passive channels rev. potential
sec.insert('pas')
for seg in sec:
if sec_name not in params_dict['e_pas']:
continue
seg.pas.e = params_dict['e_pas'][sec_name]
# insert mechanisms (if req.) and set density
for prop in params_dict[sec_name]:
if 'mechanism' in prop:
sec.insert(prop['mechanism'])
setattr(sec, prop['name'], prop['value'])
# simulation properties
h.stdinit()
h.tstop = 4000.0
h.dt = 0.1
h.steps_per_ms = 1/h.dt
h.celsius = 34.0
h.v_init = -80.0
# stimuli is an increasing series of 3 step currents
cclamp1 = h.IClamp(h.soma[0](0.5))
cclamp1.delay = 500.0
cclamp1.dur = 500.0
cclamp1.amp = 0.1500
cclamp2 = h.IClamp(h.soma[0](0.5))
cclamp2.delay = 1500.0
cclamp2.dur = 500.0
cclamp2.amp = 0.1750
cclamp3 = h.IClamp(h.soma[0](0.5))
cclamp3.delay = 2500.0
cclamp3.dur = 500.0
cclamp3.amp = 0.2000
# run simulation
v_vec = h.Vector()
v_vec.record(h.soma[0](0.5)._ref_v)
h.startsw()
h.run(h.tstop)
voltages = [v for v in v_vec]
cell_var_name = 'cellvar_gid{}_{}_{}.h5'.format(gid, cells_table[gid][0], cells_table[gid][1])
with h5py.File(cell_var_name, 'w') as h5:
# fake a mapping table just for convience
h5.create_dataset('/mapping/gids', data=[gid], dtype=np.uint16)
h5.create_dataset('/mapping/element_pos', data=[0.5], dtype=np.float)
h5.create_dataset('/mapping/element_id', data=[0], dtype=np.uint16)
h5.create_dataset('/mapping/index_pointer', data=[0], dtype=np.uint16)
h5.create_dataset('/v/data', data=voltages, dtype=np.float64)
if plot_results:
times = np.linspace(0.0, h.tstop, len(voltages))
plt.plot(times, voltages)
plt.show()
if __name__ == '__main__':
if __file__ != sys.argv[-1]:
run_simulation(sys.argv[-1])
else:
for gid in range(5):
run_simulation(gid, plot_results=False)
| StarcoderdataPython |
1692323 | from __future__ import print_function
import tempfile # generate Tempfiles
import subprocess # Commando line commands
import os
"""
#*****************************************************#
# #
# OPEN File in EDITOR e.g VIM
# #
#-----------------------------------------------------#
"""
def file_input_via_editor(initial_msg, editor='vim'):
""" get write a initial_msg to a file and opens it in an editor, e.g. VIM
utilizing the tempfile module to create a tmp file, that is
automatically deleted afterwards.
It returns afterwards the edited content of the file as a string
initialMessage = str, initial Message written in Vim File
return = str, user edited Message in Vim File
drawback, also inclueds the initial Message!
"""
EDITOR = os.environ.get('EDITOR', editor)
with tempfile.NamedTemporaryFile(suffix=".tmp") as tf:
tf.write(initial_msg)
# flush msg to file
tf.flush()
# open the file using EDITOR
subprocess.call([EDITOR, tf.name])
# go back to the beginning
tf.seek(0)
# read context of file
msg = tf.read()
return msg
"""
#*****************************************************#
# #
# Grep Routines
# #
#-----------------------------------------------------#
"""
def pygrep_str(text, keyword, length=100, ishift=0, begin=0):
""" greps part of a text, the fragment Text has lenght characters
and is the text in shift characters from the keyword
uses the str.find() function of python, begin gives in
characters the start where it will start to search for the keyword.
"""
# get position of keyword in text
istart = text.find(keyword, begin)
#
if istart == -1:
print("pygrep error: '%s' not found in text!" % keyword)
return None, begin
#
if istart + ishift + length > len(text):
return text[istart + ishift:], -1
#
else:
return (text[istart + ishift: istart + ishift + length],
istart + ishift + length)
def partial_string(string, ishift, ilen):
""" return partial string """
if len(string) < ishift+ilen:
return string[ishift:]
return string[ishift: ishift + ilen]
def pygrep_iterator(iterator, keyword, ilen=100, ishift=0, begin=0):
""" """
ibuffer = -1
out_str = ""
maxlen = ishift + ilen
#
for line in iterator:
if keyword in line:
out_str = keyword + line.partition(keyword)[2]
ibuffer = len(out_str)
break
# did not find output in file
if ibuffer == -1:
return (None, -1)
# just single line input
if ibuffer > maxlen:
return (partial_string(out_str, ishift, ilen), 1)
# multi line grep
for line in iterator:
ibuffer += len(line)
out_str += line
if ibuffer > maxlen:
return (partial_string(out_str, ishift, ilen), 1)
return (out_str, 1)
def pygrep_iterator_lines(iterator, keyword, ilen=10, ishift=0):
""" """
assert ishift >= 0
#
istart = 0
#
out_str = ""
icount = -1
# check shift
if ishift == 0:
istart = 1
# get keyword
for line in iterator:
if keyword in line:
out_str = line
icount = 1
break
# did not find item in iterator
if icount == -1:
return (None, -1)
# only get first item
if istart == 1 and ilen == 1:
return (out_str[:-1], 1)
# skip ishift lines!
if ishift != 0:
out_str = ""
if ishift == 1:
pass
else:
for line in iterator:
icount += 1
if icount == ishift:
break
else:
icount = ishift
# if end of file before end of iskip
if icount != ishift:
return (None, -1)
# get all remaining lines
for line in iterator:
out_str += line
istart += 1
if istart == ilen:
break
# return output
return out_str[:-1], 1
def pyxgrep_iterator_lines(iterator, keyword, ilen=10, ishift=0):
""" """
assert ishift >= 0
#
istart = 0
#
icount = -1
# check shift
if ishift == 0:
istart = 1
# get keyword
for line in iterator:
if keyword in line:
out_str = [line]
icount = 1
break
# did not find item in iterator
if icount == -1:
return (None, -1)
# only get first item
if istart == 1 and ilen == 1:
return (out_str, 1)
# skip ishift lines!
if ishift != 0:
out_str = []
if ishift == 1:
pass
else:
for line in iterator:
icount += 1
if icount == ishift:
break
else:
icount = ishift
# if end of file before end of iskip
if icount != ishift:
return (None, -1)
# get all remaining lines
for line in iterator:
out_str.append(line)
istart += 1
if istart == ilen:
break
# return output
return out_str, 1
| StarcoderdataPython |
188730 | <gh_stars>0
from setuptools import setup
setup(name='read_dicom',
version='0.1.1',
description='Read DICOM files in Python',
url='http://github.com/louismullie/read_dicom',
author='Read DICCOM files in Python',
author_email='<EMAIL>',
license='MIT',
packages=['read_dicom'],
zip_safe=False) | StarcoderdataPython |
8115392 | <filename>data.gov_mon_mt.py
# Imports
import sys
import os
import requests
import urllib.request
import csv
from datetime import datetime
import time
import concurrent.futures
import threading
import socket
#from multiprocessing import Pool
#osname = sys.platform
#if osname == "darwin":
# print("MacOS detected: set No Proxy to avoid system config crash.")
os.environ['no_proxy'] = "*"
http_proxies = {
"http": None,
"https": None
}
# initialize counters
pagesize = 10
timeout = 5 # wait up to 5 seconds for data source to respond
socket.setdefaulttimeout(timeout)
# name of report file containing results of all data sources
common_report_file = 'data.gov_mon_rpt_' + datetime.now().strftime("%m_%d_%Y") + '.csv'
with open(common_report_file, 'w') as f:
writer = csv.writer(f)
#write header row
writer.writerow(['URL', 'Name', 'Description', 'Resource State', 'Protocol', 'Status', 'Link State'])
# lock to keep threads from writing report file over each other
report_file_lock = threading.Lock()
if len(sys.argv) > 1:
search_string = sys.argv[1]
else:
search_string = "climate"
print("Searching for resources containing: ", search_string)
# reusable functions
def handle_ftplink(resources, report_row, i):
ftplinks = 0
good = 0
bad = 0
ftplinks += 1
# use urllib since requests package is only for HTTP
ftplink = urllib.request.urlopen(resources[i]['url'], data=None)
report_row.append('FTP')
report_row.append('NA')
good += 1
report_row.append('GOOD')
return ftplinks, good, bad
def handle_httplink(resources, report_row, i):
httplinks = 0
good = 0
bad = 0
testlink = requests.get(resources[i]['url'], timeout=timeout, proxies=http_proxies)
report_row.append('HTTP')
report_row.append(testlink.status_code)
httplinks += 1
if testlink.status_code == 200:
good += 1
report_row.append('GOOD')
else:
bad += 1
report_row.append('BAD')
return httplinks, good, bad
# worker function
def get_results(row_range):
# Initialize counts for this run
num_resources = 0
start = row_range[0]
end = row_range[1]
resource_report = []
resources = []
httplinks = 0
ftplinks = 0
good = 0
bad = 0
unknown = 0
report_file = common_report_file
# Now get all results page by page
for startrow in range (start, end, pagesize):
parameters = {'q': search_string, 'rows': pagesize, 'start': startrow}
try:
r = requests.get('https://catalog.data.gov/api/3/action/package_search', params = parameters, proxies=http_proxies)
json_dict = r.json()
num_results_total = json_dict['result']['count']
num_resources_in_response = len(json_dict['result']['results'])
except:
#skip
continue
results = []
resources = []
previous_url = None
# build list of resources within results
for i in range(0, num_resources_in_response):
try:
results.append(json_dict['result']['results'][i])
for j in range(0, len(results[i]['resources'])):
rsrc = results[i]['resources'][j]
# check for URL same as previous - if so, skip
if rsrc['url'] == previous_url:
continue
else:
previous_url = rsrc['url']
resources.append(rsrc)
except:
# just skip bad JSON resource
continue
# now go through and test all resources
num_resources = len(resources)
for i in range(0, num_resources):
report_row = [resources[i]['url'], resources[i]['name'],resources[i]['description'], resources[i]['state']]
# initialize internal function return values
f = 0 # ftplinks count
h = 0 # httplinks count
g = 0 # good count
b = 0 # bad count
# test resource URL
try:
# Check HTTP resources
if resources[i]['resource_locator_protocol'] == 'HTTP' or resources[i]['url'][:4] == 'http':
h, g, b = handle_httplink(resources, report_row, i)
# Check FTP resources
if resources[i]['url'][:3] == 'ftp':
f, g, b = handle_ftplink(resources, report_row, i)
except requests.exceptions.RequestException:
bad += 1
report_row.append('UNKNOWN')
report_row.append('NONE')
report_row.append('BAD')
except:
# maybe bad JSON - check URL directly
try:
if resources[i]['url'][:3] == 'ftp':
f, g, b = handle_ftplink(resources, report_row, i)
else:
if resources[i]['url'][:4] == 'http':
h,g,b = handle_httplink(resources, report_row, i)
else:
unknown += 1
report_row.append('UNKNOWN')
report_row.append('NONE')
report_row.append('UNKNOWN')
except:
bad += 1
report_row.append('UNKNOWN')
report_row.append('NONE')
report_row.append('BAD')
httplinks += h
ftplinks += f
good += g
bad += b
# write result row to CSV
with report_file_lock:
with open(report_file, 'a') as f:
writer = csv.writer(f)
writer.writerow(report_row)
# create return result
results = [num_resources,httplinks, ftplinks, good, bad, unknown]
return results
# Main logic ...
def main():
# We will report elapsed time
start_time = time.time()
# Get count of total results
parameters = {'q': search_string, 'rows': 0}
r = requests.get('https://catalog.data.gov/api/3/action/package_search', params = parameters)
json_dict = r.json()
num_results_total = json_dict['result']['count']
r = requests.get('https://catalog.data.gov/api/3/action/package_search?q=climate&rows=0', timeout=10, proxies=http_proxies)
json_dict = r.json()
print ('Request success = ', json_dict['success'])
num_results_total = json_dict['result']['count']
print('Total results: ', num_results_total)
# Create thread pool and run
poolsize = 10
results_per_thread = 10
batch_size = 100
num_results_test = num_results_total # for testing only
# create list of ranges
ranges = []
# Reset result counts
total_resources = 0
good = 0
bad = 0
unknown = 0
httplinks = 0
ftplinks = 0
for batch_no in range(0, num_results_test, batch_size):
ranges.append([batch_no, batch_no+batch_size-1])
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
list_of_results = executor.map(get_results, ranges)
# Consolidate counts
for each_result in list_of_results:
total_resources += each_result[0]
httplinks += each_result[1]
ftplinks += each_result[2]
good += each_result[3]
bad += each_result[4]
unknown += each_result[5]
# Print summary of run
print ('Total number of resources: ', total_resources)
print ('HTTP Links: ', httplinks)
print ('FTP links: ', ftplinks)
print ('Good links: ', good)
print ('Bad links: ', bad)
print ("Unknown: ", unknown)
print ('See detailed report in ', common_report_file)
# Print elapsed time needed to create report
elapsed_time = time.time() - start_time
print ('Elapsed Time: ', round(elapsed_time), ' seconds')
if __name__ == '__main__':
main()
| StarcoderdataPython |
6562428 | <gh_stars>1-10
#!/usr/bin/env python3
# Copyright (c) 2019, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the "hack" directory of this source tree.;
import argparse
from difflib import unified_diff
from glob import glob, iglob
import json
import os
import re
import shlex
import subprocess
import sys
from typing import Iterable, List
from libfb.py.fbcode_root import get_fbcode_dir
HACK_TEST_DIR_DEFAULT = os.path.join(get_fbcode_dir(), "hphp/hack/test")
def parse_args():
ap = argparse.ArgumentParser()
ap.add_argument('--dir', default=HACK_TEST_DIR_DEFAULT,
help="root directory containing Hack files to be tested")
ap.add_argument('--file', '-f', action='append',
help="test specific file only (may be repeated)")
ap.add_argument('--keep-going', action='store_true',
help="do not stop after the first failure (consider with --no-diff)")
ap.add_argument('--no-diff', dest='diff', action='store_false',
help="do not show diff of OCaml vs Rust output, respectively")
ap.add_argument('--show-ok', action='store_true', help="show passing tests")
ap.add_argument('--skip-re', default=r"|".join([
r"keyword_autocomplete/function_parameter\.php",
r"namespace_infinite_loop1\.php",
# TODO: remove lines below when HHBC mangling logic for XHP names
r".*xhp.*\.php",
r"typecheck/(wrong_fixme_format|extra_scope|attr_children)\.php",
r"typecheck/(string_expressions12|hh_fixme1[23]|generic_attr2)\.php",
r"(test_mutable_return5(_7)?|bad_inout_use_lvalue7)\.php",
r"(lint/ui_namespace_bad|doesnt_record_any_map_extraction)\.php",
r"(fileresponse|D2172778|full_analysis/zack_repro).php",
r"((href|style)_attrib|notification_renderer)\.php",
r"(userinput_to_img_src_attr|signed_uri|receiver_sinks|.*linkshim)\.php",
r"processed_data/display_.*\.php",
r"lsp_exchanges/completion_extras\.php",
r"(basic_nested_tag|XScheduleAsyncController)\.php",
r"ast_to_nast/classvar_properties.php",
r"ffp/tests/(extra_scope|decl_alias)\.php",
]))
ap.add_argument('passthrough_args', nargs='*')
return ap.parse_args()
def find_hack_files(root: str, skip_re) -> Iterable[str]:
for ext in (".hack", ".hhi", ".php"):
for path in iglob(os.path.join(root, "**/*" + ext), recursive=True):
if not skip_re.search(path):
yield path
def locate_binary_under_test(suffix) -> str:
path = glob(
get_fbcode_dir() + "/buck-out/**/hphp/hack/test/rust/" + suffix)
assert len(path) < 2, \
"Found {} binaries (try `buck clean` with suffix {}".format(
len(path), suffix)
return path[0]
def test_all(paths: Iterable[str], args: List[str], on_failure, on_success):
def binary_output_as_pretty_json(suffix: str, path: str) -> List[str]:
cmd = [locate_binary_under_test(suffix)] + args + ["--file-path", path]
print("RUN: " + " ".join(map(shlex.quote, cmd)))
obj = json.loads(subprocess.check_output(cmd))
return json.dumps(obj, sort_keys=True, indent=2).split("\n")
correct, total = 0, 0
for path in paths:
total += 1
caml_output = binary_output_as_pretty_json(
"facts_json_ocaml/facts_json_ocaml.opt",
path)
rust_output = binary_output_as_pretty_json(
"facts_json_rust#binary/facts_json_rust",
path)
ok = (rust_output == caml_output)
if ok:
correct += 1
print("%d/%d" % (correct, total))
if not ok:
print("FAILED:", path)
on_failure(caml_output, rust_output)
else:
on_success(path)
if __name__ == '__main__':
args = parse_args()
def on_failed(caml_output, rust_output):
if args.diff:
for line in unified_diff(caml_output, rust_output):
if not (line.startswith("---") or line.startswith("+++")):
print(line)
if not args.keep_going:
sys.exit(1)
def on_success(path):
if args.show_ok:
print("OK:", path)
dir_skip_re = re.compile(args.skip_re)
paths = args.file if args.file else find_hack_files(args.dir, dir_skip_re)
test_all(paths, args.passthrough_args, on_failed, on_success)
| StarcoderdataPython |
109894 | <filename>core/views.py
import os, json
from PIL import Image
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.generic import ListView
from django.contrib import messages
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.conf import settings as django_settings
from django.shortcuts import render, redirect, get_object_or_404
from django.db.models.functions import TruncDay
from django.db.models import Count, Q
from core.forms import ProfileForm, ChangePasswordForm
from feeds.models import Feed
from feeds.views import FEEDS_NUM_PAGES, feeds
from articles.models import Article, ArticleComment
from questions.models import Question, Answer
from activities.models import Activity
def home(request):
if request.user.is_authenticated:
return feeds(request)
else:
return redirect('login')
@login_required
def network(request):
users_list = User.objects.filter(is_active=True).order_by('username')
paginator = Paginator(users_list, 100)
page = request.GET.get('page')
try:
users = paginator.page(page)
except PageNotAnInteger:
users = paginator.page(1)
except EmptyPage:
users = paginator.page(paginator.num_pages)
return render(request, 'core/network.html', { 'users': users })
@method_decorator([login_required], name='dispatch')
class UserList(ListView):
# model = get_user_model()
paginate_by = 36
template_name = 'core/users.html'
context_object_name = 'users'
def get_queryset(self):
query = self.request.GET.get('q','')
queryset = User.objects.filter(is_active=True).order_by('username')
if query:
queryset = queryset.filter(
Q(username__icontains = query)|
Q(email__icontains = query)|
Q(first_name__icontains = query)|
Q(last_name__icontains = query)
)
return queryset
@login_required
def profile(request, username):
user = get_object_or_404(User, username=username)
all_feeds = Feed.get_feeds().filter(user=user)
paginator = Paginator(all_feeds, FEEDS_NUM_PAGES)
feeds = paginator.page(1)
counts = {
'feeds': Feed.objects.filter(user=user).count(),
'article':Article.objects.filter(create_user=user).count(),
'article_comment':ArticleComment.objects.filter(user=user).count(),
'question':Question.objects.filter(user=user).count(),
'answer':Answer.objects.filter(user=user).count(),
'activity':Activity.objects.filter(user=user).count(),
# 'messages':Message.objects.filter(Q(from_user=user) | Q(user=user)).count(),
}
# Daily user activity
user_activity = Activity.objects.filter(user=user).annotate(day=TruncDay(
'date')).values('day').annotate(c=Count('id')).values('day', 'c')
dates, datapoints = zip(*[[a['c'], str(a['day'].date())] for a in user_activity]) if user_activity else ([],[])
data = {
'page_user': user,
'counts': counts,
'global_interactions': sum(counts.values()), # noqa: E501
'bar_data': list(counts.values()),
'line_labels': json.dumps(datapoints),
'line_data': json.dumps(dates),
'feeds': feeds,
'from_feed': feeds[0].id if feeds else -1 # pragma: no cover
}
return render(request, 'core/profile.html', data)
@login_required
def settings(request):
user = request.user
if request.method == 'POST':
form = ProfileForm(request.POST)
if form.is_valid():
user.first_name = form.cleaned_data.get('first_name')
user.last_name = form.cleaned_data.get('last_name')
user.profile.job_title = form.cleaned_data.get('job_title')
user.email = form.cleaned_data.get('email')
user.profile.url = form.cleaned_data.get('url')
user.profile.location = form.cleaned_data.get('location')
user.save()
messages.add_message(request, messages.SUCCESS, 'Your profile were successfully edited.')
else:
form = ProfileForm(instance=user, initial={
'job_title': user.profile.job_title,
'url': user.profile.url,
'location': user.profile.location
})
return render(request, 'core/settings.html', {'form':form})
@login_required
def picture(request):
uploaded_picture = False
try:
if request.GET.get('upload_picture') == 'uploaded':
uploaded_picture = True
except Exception:
pass
return render(request, 'core/picture.html', {'uploaded_picture': uploaded_picture})
@login_required
def password(request):
user = request.user
if request.method == 'POST':
form = ChangePasswordForm(request.POST)
if form.is_valid():
new_password = form.cleaned_data.get('new_password')
user.set_password(<PASSWORD>)
user.save()
messages.add_message(request, messages.SUCCESS, 'Your password were successfully changed.')
else:
form = ChangePasswordForm(instance=user)
return render(request, 'core/password.html', {'form':form})
@login_required
def upload_picture(request):
try:
profile_pictures = django_settings.MEDIA_ROOT + '/profile_pictures/'
if not os.path.exists(profile_pictures):
os.makedirs(profile_pictures)
f = request.FILES['picture']
filename = profile_pictures + request.user.username + '_tmp.jpg'
with open(filename, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
im = Image.open(filename)
width, height = im.size
if width > 350:
new_width = 350
new_height = (height * 350) / width
new_size = new_width, new_height
im.thumbnail(new_size, Image.ANTIALIAS)
im.save(filename)
return redirect('/settings/picture/?upload_picture=uploaded')
except Exception:
# print e
return redirect('/settings/picture/')
@login_required
def save_uploaded_picture(request):
try:
x = int(request.POST.get('x'))
y = int(request.POST.get('y'))
w = int(request.POST.get('w'))
h = int(request.POST.get('h'))
tmp_filename = django_settings.MEDIA_ROOT + '/profile_pictures/' + request.user.username + '_tmp.jpg'
filename = django_settings.MEDIA_ROOT + '/profile_pictures/' + request.user.username + '.jpg'
im = Image.open(tmp_filename)
cropped_im = im.crop((x, y, w+x, h+y))
cropped_im.thumbnail((200, 200), Image.ANTIALIAS)
cropped_im.save(filename)
os.remove(tmp_filename)
except Exception:
pass
return redirect('/settings/picture/') | StarcoderdataPython |
245301 | #!/usr/bin/env python
#-------------------------------------------------------------------------------#
# A tool which exports a development tree to userland.
# The development tree is supposed to properly built
# and tested.
#-------------------------------------------------------------------------------#
import re
import os
import os.path
import tarfile
import logging
import argparse
import tempfile
from datetime import date
LOGGERS_CREATED = set()
LOGGER_LEVEL = logging.DEBUG
# --- Sets up a console logger
def setup_console_logger(name):
logger = logging.getLogger(name)
if name not in LOGGERS_CREATED:
logger.setLevel(LOGGER_LEVEL)
ch = logging.StreamHandler()
ch.setLevel(LOGGER_LEVEL)
ch.setFormatter(logging.Formatter('%(asctime)s - %(name)s - '
'%(levelname)s - %(message)s'))
logger.addHandler(ch)
LOGGERS_CREATED.add(name)
return logger
# -- Reading the arguments
def read_arguments():
parser = argparse.ArgumentParser(description='tool for exporting '
'an RTBkit tree')
parser.add_argument('--rtbkit_root', required=True,
help='path to the rtbkit root directory')
parser.add_argument('--local_root', required=True,
help='path to the local directory where platform-deps were installed')
parser.add_argument('--prefix',
help='Optionally specify a basename for the export. default to rtbkit-YYYMMDD')
return parser.parse_args()
def main_build (args):
cwd = os.getcwd()
logger.info('building rtbkit in %s'%args.rtbkit_root)
try:
os.chdir(args.rtbkit_root)
# os.system('git pull origin master')
# os.system('make compile test')
finally:
os.chdir(cwd)
pass
def main_archive (args):
logger = setup_console_logger('export_rtbkit')
bin_root = args.rtbkit_root + '/build/x86_64/bin'
base = 'rtbkit-%s'%date.today().strftime('%Y%m%d')
if args.prefix: base = args.prefix
tar_filename=base + '.tar.bz2'
tar_out = tarfile.open(name=tar_filename ,mode='w:bz2', dereference=True)
def build_makefile ():
Makefile = """##
## This simple makefile allows to build RTBkit samples
## you may need to adjust it to your needs.
##
CPPFLAGS = -I$(RTBKIT_HOME)/include
CPPFLAGS += -I$(RTBKIT_HOME)/include/rtbkit/
CPPFLAGS += -I$(RTBKIT_HOME)/include/rtbkit/leveldb/include
CXXFLAGS += -std=c++0x -ggdb -Wno-deprecated-declarations
LDFLAGS = -Wl,--rpath-link,$(RTBKIT_HOME)/lib -L$(RTBKIT_HOME)/lib
LDFLAGS += -Wl,--copy-dt-needed-entries
LDFLAGS += -Wl,--no-as-needed
LOADLIBES = -lrtbkit -lexception_hook -ltcmalloc -ldl
EXECS = multi_agent data_logger_ex \
bidding_agent_console bidding_agent_ex \
bid_request_endpoint \
adserver_endpoint \
integration_endpoints
all: $(EXECS)
clean:
$(RM) $(EXECS) *.o
.PHONY: all clean
"""
t = tempfile.mkstemp()
os.write(t[0], Makefile)
return t[1]
pass
def build_env_script():
t = tempfile.mkstemp()
os.write(t[0], '#!/bin/bash\n#\n# Set up the environment i/o use %s\n#'%base)
os.write(t[0], '\nexport RTBKIT_HOME=`pwd`')
os.write(t[0], '\nexport LD_LIBRARY_PATH=$RTBKIT_HOME/lib:$LD_LIBRARY_PATH')
os.write(t[0], '\nexport PATH=$RTBKIT_HOME/bin:$RTBKIT_HOME/bin/zookeeper/bin:$PATH')
os.write(t[0], '\n[ -L rtbkit ] || ln -s . rtbkit')
os.write(t[0], '\nif [ ! -L build/x86_64/bin ]\nthen\n mkdir -p build/x86_64')
os.write(t[0], '\n cd build/x86_64\n ln -s ../../bin\nfi\ncd $RTBKIT_HOME\n')
return t[1]
pass
logger.info ('opening archive: ' + tar_filename)
def tarfile_add (arc, name, arcname):
logger.info ('adding %s to %s'%(name, arcname))
arc.add(name=name,arcname=arcname)
def strip_root(root, path):
root_vec = root.split('/')
path_vec = path.split('/')
if len(root_vec) >= len(path_vec): return path
v = '/'.join(path_vec[len(root_vec):])
return v
def is_header_file (path):
# print path
if (path.endswith('.h') or path.endswith('.hh') or path.endswith('.hpp') or path.endswith('.h.in')):
return True
return False
# add our environment script
# env_script = build_env_script()
tarfile_add(tar_out, build_env_script(), '%s/%s.env.sh'%(base,base))
# add our makefile
makefile = build_makefile()
tarfile_add(tar_out, build_makefile(), '%s/examples/Makefile'%base)
# we do do rtbkit includes
exclude_re = re.compile ('.*\/(build|examples)\/.*')
for root, dirs, files in os.walk(args.rtbkit_root):
if '.git' in dirs:
dirs.remove ('.git')
for f in files:
p= os.path.join(root,f)
if is_header_file(p) and not exclude_re.match(p):
nname = base+'/include/rtbkit/'+strip_root(args.rtbkit_root,p)
tarfile_add(tar_out,p,nname)
# we now deal with rtbkit binaries
exclude_re = re.compile('.*\.(mk|[a-z0-9]{19,}\.so)$')
for root, dirs, files in os.walk(bin_root):
if '.git' in dirs:
dirs.remove ('.git')
for f in files:
p= os.path.join(root,f)
if f[0] != '.' and not exclude_re.match(p):
rep = '/lib/' if p.endswith('.so') else '/bin/'
tarfile_add(tar_out, p,base+rep+f)
pass
# we now add the examples directory
tarfile_add (tar_out, args.rtbkit_root+'/rtbkit/examples', base+'/examples')
# we deal with local includes
tarfile_add (tar_out, args.local_root+'/include',base+'/include')
# we now deal with local libs
tarfile_add (tar_out, args.local_root+'/lib',base+'/lib')
# we now deal with local libs
tarfile_add (tar_out, args.local_root+'/bin',base+'/bin')
# grab our sample json configs
sample_configs= ['sample.bootstrap.json','sample.launch.json','sample.launch.scale.json','sample.redis.conf','sample.zookeeper.conf']
for sample_config in sample_configs:
fn=args.rtbkit_root+'/rtbkit/'+sample_config
if os.path.isfile(fn):
tarfile_add (tar_out, fn, base+'/config/'+sample_config)
else:
logger.warning ('missing %s'%sample_config)
continue
if sample_config=='sample.zookeeper.conf':
tarfile_add (tar_out, fn, base+'/bin/zookeeper/bin/'+sample_config)
tarfile_add (tar_out, fn, base+'/bin/zookeeper/conf/zoo.cfg')
tar_out.close()
##########################
if __name__ == '__main__':
args = read_arguments()
main_archive (args)
| StarcoderdataPython |
4913707 | #3Data_Preprocessing.py
import os
import random
import numpy as np
from six import string_types
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Activation , Flatten, Dropout
from keras.layers import Conv2D, MaxPool2D
#from keras.optimizers import SGD
from tensorflow.keras.optimizers import SGD
from keras.utils import np_utils
from keras.models import load_model
from keras import backend as K
import cv2
from tensorflow.python.keras.engine.training import Model
Image_size = 64
def resize_image(image,height=Image_size,width=Image_size):
top,bottom,left,right = 0,0,0,0
h,w,tunnel = image.shape
longest_edge = max(h,w)
if (h<longest_edge):
d = longest_edge - h
top = d // 2 #"//"ๆฏๅๆด้ค็ๅ
bottom = d // 2
elif (w<longest_edge):
d = longest_edge - w
left = d//2
right = d//2
else:
pass
BLACK = [0,0,0]
constant = cv2.copyMakeBorder(image,top,bottom,left,right,cv2.BORDER_CONSTANT,value=BLACK)
return cv2.resize(constant,(height,width))
def read_dataset(dir_path):
images, labels = list(),list()
aim_dir = dir_path
dir_name = deal_dir_str(dir_path)
for dir_item in os.listdir(aim_dir):
#os.listdir(็ฎๆจ่ณๆๅคพ)๏ผๅฏไปฅๅๅบ็ฎๆจ่ณๆๅคพ่ฃก้ข็ๆๆๆชๆก่่ณๆๅคพ
#dir_item.endswith('.jpg')๏ผๅฏไปฅๆชขๆฅๆชๆกๅ็จฑๆๆซ็ซฏๆฏๅฆๅ
ๅซๆ'.jpg'
if dir_item.endswith('.jpg'):
full_path = os.path.join(aim_dir,dir_item)
abs_full_path = os.path.abspath(full_path)
image = cv2.imread(abs_full_path)
image = resize_image(image,Image_size,Image_size)
images.append(image)
labels.append(dir_name)
print(labels)
return images,labels
def deal_dir_str(dir_path):
fixed_dir_path = dir_path[:len(dir_path)-1]
pos = fixed_dir_path.rfind('/')
head_pos = pos + 1
tail_pos = len(fixed_dir_path)
dir_name = fixed_dir_path[head_pos:tail_pos]
return dir_name
####################################
def load_dataset(dir_name):
images,labels = read_dataset(dir_name)
#ๆimages็ฑlistๅๆ
ๅ็บ็ฉ้ฃ
images = np.array(images)
data_labels = list()
print("(ๅ็ๆชๆกๆธ้,้ท,ๅฏฌ,่ฒๅฝฉ้้)={}".format(images.shape))
for label in labels:
if label.endswith('FaceData_wong'):
data_labels.append(1)
else:
data_labels.append(0)
data_labels = np.array(data_labels)
return images,data_labels
#####################################
#####################################
####### ๆธฌ่ฉฆๅ ######
def data_list(dir_path):
#ๆๆๅ่ณๆๅคพ็ๅ็่ๆจ็ฑคๅๅฅๅ ๅ
ฅlist๏ผๅๅณๆญค่ณๆๅคพ็ๅ็list่ๆจ็ฑคlist
images,labels = read_dataset(dir_path)
data_labels = list()
for label in labels:
if label.endswith('Trump_test'):
data_labels.append(1)
else:
data_labels.append(0)
return images,data_labels
def load_multi_dataset(dir_list):
#ๆๆๆ่ณๆๅคพ็ๅ็่ๆจ็ฑค้ฝๅ ๅ
ฅlist๏ผไนๅพๅๆAll_imgsใAll_labels่ฝๆnp.array๏ผ
#ๅๅณnp.arrayๅๆ
็ๅ
ฉๅ้ฃๅ
All_imgs= list()
All_labels= list()
for dir_path in dir_list:
images,labels = data_list(dir_path)
All_imgs = All_imgs + images
All_labels = All_labels + labels
All_imgs = np.array(All_imgs)
All_labels = np.array(All_labels)
return All_imgs,All_labels
####### ๆธฌ่ฉฆๅ ######
######################################
#4face_train.py
IMAGE_SIZE = 64
class Dataset:
def __init__(self,path_name1,path_name2):
#่จ็ทด้
self.train_images = None
self.train_labels = None
#ๆธฌ่ฉฆ้
self.test_images = None
self.test_labels = None
#่ณๆ่ทฏๅพ
self.path_name1 = path_name1
self.path_name2 = path_name2
#self.path_name3 = path_name3
#self.path_name4 = path_name4
#self.path_name5 = path_name5
self.dir_list = list()
self.dir_list.append(self.path_name1)
self.dir_list.append(self.path_name2)
#self.dir_list.append(self.path_name3)
#self.dir_list.append(self.path_name4)
#self.dir_list.append(self.path_name5)
#็ถๅ็่ณๆ็ถญๅบฆ้ ๅบ
self.input_shape = None
'''
def __init__(self,path_name):
#่จ็ทด้
self.train_images = None
self.train_labels = None
#ๆธฌ่ฉฆ้
self.test_images = None
self.test_labels = None
#่ณๆ่ทฏๅพ
self.path_name = path_name
#็ถๅ็่ณๆ็ถญๅบฆ้ ๅบ
self.input_shape = None
'''
def load(self, img_rows = IMAGE_SIZE, img_cols = IMAGE_SIZE, img_channels = 3, nb_classes = 2):
###ๆธฌ่ฉฆๅ
images,labels = load_multi_dataset(self.dir_list)
'''
images, labels = load_dataset(self.path_name) #้ๆฏๅฏ็จ็
'''
train_images, test_images, train_labels, test_labels = train_test_split(images, labels, test_size = 0.3, random_state = random.randint(0,10))
#่ผธๅบ่จ็ทด่ณๆ้ใๆธฌ่ฉฆ่ณๆ้็ๆธ้
print(train_images.shape[0],'train samples')
print(test_images.shape[0],'test samples')
#ไฝฟ็จcategorical_crossentropyไฝ็บๆๅคฑๅฝๆธ
#classๆจ็ฑค้ฒ่กone-hot็ทจ็ขผไฝฟๅ
ถๅ้ๅ๏ผๅจๆญค็ทด็ฟไธญๆจ็ฑคๅชๆๅ
ฉ็จฎ
train_labels = np_utils.to_categorical(train_labels, nb_classes)
test_labels = np_utils.to_categorical(test_labels, nb_classes)
#ๅฐๅ็ๆตฎ้ปๅไปฅไพฟๆญธไธๅ
train_images = train_images.astype('float32')
test_images = test_images.astype('float32')
#้ๅงๆญธไธๅ๏ผๅฐๅๅ็ๅๅ็ด ๅผ
train_images = train_images / 255.0
test_images = test_images / 255.0
self.input_shape = (img_rows,img_cols,img_channels)
self.train_images = train_images
self.test_images = test_images
self.train_labels = train_labels
self.test_labels = test_labels
class MODEL:
def __init__(self):
self.model = None
self.history = object()
def build_model(self,dataset,nb_classes = 2):
self.model = Sequential()
#ไปฅไธๆฏ็ฌฌไธๅcode็
self.model.add(Conv2D(32,kernel_size=(3,3),padding = "same", input_shape = (64,64,3),activation = "relu"))
self.model.add(MaxPool2D(pool_size=(2,2)))
self.model.add(Conv2D(32,kernel_size = (3,3),padding = "same",activation="relu"))
self.model.add(MaxPool2D(pool_size = (2,2)))
self.model.add(Dropout(0.25))
self.model.add(Conv2D(64,3,3,padding="same",activation="relu"))
self.model.add(MaxPool2D(pool_size=(2,2)))
self.model.add(Dropout(0.25))
self.model.add(Flatten())
self.model.add(Dense(512,activation="relu"))
self.model.add(Dropout(0.5))
self.model.add(Dense(nb_classes,activation = "softmax"))
self.model.summary()
def train(self, dataset, batch_size = 20, epochs = 20, data_augmentation = False):
sgd = SGD(learning_rate = 0.01, momentum = 0.9, nesterov = False, decay = 1e-6)
self.model.compile(loss='categorical_crossentropy',optimizer = sgd, metrics = ['accuracy'])
#######################ๅฏ็จblock#############################
'''self.history = self.model.fit(dataset.train_images, dataset.train_labels,
batch_size = batch_size, epochs = epochs,
validation_data = (dataset.test_images, dataset.test_labels),
shuffle = True)'''
######################ๅฏ็จblock###############################
########################ๆธฌ่ฉฆ block#############################################
if not data_augmentation:
self.history = self.model.fit(dataset.train_images, dataset.train_labels,
batch_size = batch_size, epochs = epochs,
validation_data = (dataset.test_images, dataset.test_labels),
shuffle = True)
else:
datagen = ImageDataGenerator(
featurewise_center = False, #ๆฏๅฆไฝฟ่ผธๅ
ฅ่ณๆๅปไธญๅฟๅ๏ผๅๅผ็บ0๏ผ๏ผ
samplewise_center = False, #ๆฏๅฆไฝฟ่ผธๅ
ฅ่ณๆ็ๆฏๅๆจฃๆฌๅๅผ็บ0
featurewise_std_normalization = False, #ๆฏๅฆ่ณๆๆจๆบๅ๏ผ่ผธๅ
ฅ่ณๆ้คไปฅ่ณๆ้็ๆจๆบๅทฎ๏ผ
samplewise_std_normalization = False, #ๆฏๅฆๅฐๆฏๅๆจฃๆฌ่ณๆ้คไปฅ่ช่บซ็ๆจๆบๅทฎ
zca_whitening = False, #ๆฏๅฆๅฐ่ผธๅ
ฅ่ณๆๆฝไปฅZCA็ฝๅ
rotation_range = 20, #่ณๆๆๅๆๅ็้จๆฉ่ฝๅ็่งๅบฆ(็ฏๅ็บ0๏ฝ180)
width_shift_range = 0.2, #่ณๆๆๅๆๅ็ๆฐดๅนณๅ็งป็ๅน
ๅบฆ๏ผๅฎไฝ็บๅ็ๅฏฌๅบฆ็ไฝๆฏ๏ผ0~1ไน้็ๆตฎ้ปๆธ๏ผ
height_shift_range = 0.2, #ๅไธ๏ผๅชไธ้้่ฃกๆฏๅ็ด
horizontal_flip = True, #ๆฏๅฆ้ฒ่ก้จๆฉๆฐดๅนณ็ฟป่ฝ
vertical_flip = False) #ๆฏๅฆ้ฒ่ก้จๆฉๅ็ด็ฟป่ฝ
datagen.fit(dataset.train_images)
self.history = self.model.fit_generator(datagen.flow(dataset.train_images,dataset.train_labels,batch_size = batch_size),
steps_per_epoch = None,
epochs = epochs,
validation_data = (dataset.test_images,dataset.test_labels))
########################ๆธฌ่ฉฆ block##############################################
MODEL_PATH = './face_model.h5'
def save_model(self,file_path):
self.model.save(file_path)
def load_model(self,file_path = MODEL_PATH):
self.model = load_model(file_path)
def evaluate(self,dataset):
score = self.model.evaluate(dataset.test_images, dataset.test_labels, verbose = 1)
print(f'{self.model.metrics_names[1]}:{score[1] * 100}%')
def face_predict(self,image):
image = resize_image(image)
image = image.reshape((1,IMAGE_SIZE,IMAGE_SIZE,3))
image = image.astype('float32')
image = image / 255.0
#result = self.model.predict_classes(image)
result = self.model.predict(image)
result = np.argmax(result,axis =1)
#print('result:{}'.format(result))
return result
# def show_img(img):
# cv2.imshow('test',img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
def face_recognition_api(filename):
print(filename)
model = MODEL()
model.load_model('./model/only_face_trump.h5')
img = cv2.imread(filename)
#img = cv2.imread('./test/trump_test1.png')
result = model.face_predict(img)
if result[0] == 1:
return "้ๅผตๅ็่พจ่ญ็บๅทๆฎ"
else:
return "้ๅผตๅ็่พจ่ญ็บ้ๆญฃๆฉ"
# if __name__ == '__main__':
# model = MODEL()
# model.load_model('./model/only_face_trump.h5')
# img = cv2.imread('./test/trump_test1.png')
# result = model.face_predict(img)
# if result[0] == 1:
# print("้ๅผตๅ็่พจ่ญ็บๅทๆฎ")
# else:
# print("้ๅผตๅ็่พจ่ญ็บ้ๆญฃๆฉ")
#cv2.imwrite("./static/predict_"+filename+".jpg", img)
#show_img(img)
| StarcoderdataPython |
3256168 | <reponame>dimkarakostas/advent-of-code
from collections import defaultdict
from math import ceil
product_requirements, reaction_amounts = {}, {}
for l in open('input14').readlines():
reaction = [r.split(',') for r in l.strip().split('=>')]
inputs, output = reaction[0], reaction[1][0]
inputs = [i.strip().split(' ') for i in inputs]
output = output.strip().split(' ')
product_requirements[output[1]] = [(int(a), b) for [a, b] in inputs]
reaction_amounts[output[1]] = int(output[0])
def mine(element, quantity):
if element == 'ORE':
used[element] += quantity
else:
if quantity <= excess[element]:
used[element] += quantity
excess[element] -= quantity
else:
quantity = quantity - excess[element]
used[element] += excess[element]
excess[element] = 0
reactions_needed = int(ceil(float(quantity) / reaction_amounts[element]))
for (q, elem) in product_requirements[element]:
mine(elem, reactions_needed * q)
used[element] += quantity
excess[element] += reactions_needed * reaction_amounts[element] - quantity
used, excess = defaultdict(int), defaultdict(int)
mine('FUEL', 1)
print 'Part 1:', used['ORE']
max_ore = 1000000000000
fuels_min, fuels_max = max_ore / used['ORE'], max_ore
while fuels_min < fuels_max:
mid_fuel = (fuels_max + fuels_min) / 2
used, excess = defaultdict(int), defaultdict(int)
mine('FUEL', mid_fuel)
if used['ORE'] > max_ore:
fuels_max = mid_fuel - 1
elif used['ORE'] < max_ore:
fuels_min = mid_fuel + 1
else:
fuels_max = mid_fuel
break
print 'Part 2:', fuels_max
| StarcoderdataPython |
5191998 | <reponame>vhn0912/python-snippets<filename>notebook/numpy_broadcasting_3d.py
import numpy as np
a = np.zeros((2, 3, 4), dtype=np.int)
print(a)
# [[[0 0 0 0]
# [0 0 0 0]
# [0 0 0 0]]
#
# [[0 0 0 0]
# [0 0 0 0]
# [0 0 0 0]]]
print(a.shape)
# (2, 3, 4)
b = np.arange(4)
print(b)
# [0 1 2 3]
print(b.shape)
# (4,)
print(a + b)
# [[[0 1 2 3]
# [0 1 2 3]
# [0 1 2 3]]
#
# [[0 1 2 3]
# [0 1 2 3]
# [0 1 2 3]]]
b_1_1_4 = b.reshape(1, 1, 4)
print(b_1_1_4)
# [[[0 1 2 3]]]
print(np.tile(b_1_1_4, (2, 3, 1)))
# [[[0 1 2 3]
# [0 1 2 3]
# [0 1 2 3]]
#
# [[0 1 2 3]
# [0 1 2 3]
# [0 1 2 3]]]
| StarcoderdataPython |
4900182 | __copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pymongo
from ..excepts import MongoDBException
from ..logging import get_logger
from typing import Optional, Dict, List, Union
class MongoDBHandler:
"""
Mongodb Handler to connect to the database & insert documents in the collection
"""
def __init__(self, hostname: str, username: str, password: str,
database_name: str, collection_name: str):
self.logger = get_logger(self.__class__.__name__)
self.hostname = hostname
self.username = username
self.password = password
self.database_name = database_name
self.collection_name = collection_name
self.connection_string = \
f'mongodb+srv://{self.username}:{self.password}@{self.hostname}'
def __enter__(self):
return self.connect()
def connect(self) -> 'MongoDBHandler':
try:
self.client = pymongo.MongoClient(self.connection_string)
self.client.admin.command('ismaster')
self.logger.info('Successfully connected to the database')
except pymongo.errors.ConnectionFailure:
raise MongoDBException('Database server is not available')
except pymongo.errors.ConfigurationError:
raise MongoDBException('Credentials passed are not correct!')
except pymongo.errors.PyMongoError as exp:
raise MongoDBException(exp)
except Exception as exp:
raise MongoDBException(exp)
return self
@property
def database(self):
return self.client[self.database_name]
@property
def collection(self):
return self.database[self.collection_name]
def find(self, query: Dict[str, Union[Dict, List]]) -> None:
try:
return self.collection.find_one(query)
except pymongo.errors.PyMongoError as exp:
self.logger.error(f'got an error while finding a document in the db {exp}')
def insert(self, document: str) -> Optional[str]:
try:
result = self.collection.insert_one(document)
self.logger.info(f'Pushed current summary to the database')
return result.inserted_id
except pymongo.errors.PyMongoError as exp:
self.logger.error(f'got an error while inserting a document in the db {exp}')
def replace(self, document: Dict, query: Dict):
try:
result = self.collection.replace_one(query, document)
return result.modified_count
except pymongo.errors.PyMongoError as exp:
self.logger.error(f'got an error while replacing a document in the db {exp}')
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.client.close()
except pymongo.errors.PyMongoError as exp:
raise MongoDBException(exp)
| StarcoderdataPython |
4904907 | import pytest
import os
import shutil
import prody as pr
from metalprot import ligand_database as ldb
def test_2ndshell():
#workdir = '/mnt/e/GitHub_Design/Metalprot/tests/data/'
workdir = os.path.dirname(os.path.realpath(__file__)) + '/test_data/'
pdb_prody = pr.parsePDB(workdir + '5od1_zn.pdb')
assert '5od1' in pdb_prody.getTitle()
metal_sel = 'name ZN'
#step test into 'get_metal_core_seq'
nis = pdb_prody.select(metal_sel)
ni = nis[0]
ni_index = ni.getIndex()
all_near = pdb_prody.select('protein and within 2.83 of index ' + str(ni_index))
assert len(all_near.select('nitrogen or oxygen or sulfur')) >= 3
inds = all_near.select('nitrogen or oxygen or sulfur').getResindices()
assert len(inds) == 3
#test 'extend_res_indices'
ext_inds = ldb.extend_res_indices(inds, pdb_prody, extend=4)
assert len(ext_inds) == 22
#test 'get_2ndshell_indices'
_2ndshell_resindices = ldb.get_2ndshell_indices(inds, pdb_prody, ni_index)
assert _2ndshell_resindices[0] == 57
#test 'get_metal_core_seq_2ndshell'
core_2ndshell = ldb.get_metal_core_seq_2ndshell(pdb_prody, metal_sel)
ldb.writepdb(core_2ndshell, workdir + 'output/')
pdb_core = pr.parsePDB(workdir + 'output/' + '5od1_zn_ZN_1.pdb')
#test 'extract_all_core_aa' with extract2ndshell=True
aa_cores = ldb.extract_all_core_aa([pdb_core], metal_sel, aa = 'resname HIS', extract2ndshell=True)
ldb.writepdb(aa_cores, workdir + 'output/his/')
assert len(aa_cores[1].select('bb')) == 8 #There are two amino acid
#shutil.rmtree(workdir + 'output/')
| StarcoderdataPython |
3313965 | from meps_db.processors.encoders.population_characteristics_encoder import PopulationCharacteristicsEncoder
from meps_db.processors.encoders.office_based_visits_encoder import OfficeBasedVisitsEncoder
from meps_db.processors.encoders.outpatient_visits_encoder import OutpatientVisitsEncoder
from meps_db.processors.encoders.emergency_room_event_encoder import EmergencyRoomVisitsEncoder
from meps_db.processors.encoders.hosptial_inpatient_stays_encoder import HospitalInpatientStaysEncoder
from meps_db.processors.encoders.dental_care_events_encoder import DentalVisitsEncoder
from meps_db.processors.encoders.home_health_days_encoder import HomeHealthEncoder
from meps_db.processors.encoders.other_medical_events_encoder import OtherMedicalExpensesEncoder
from meps_db.processors.encoders.prescribed_medicines_encoder import PrescribedMedicinesEncoder
class RespondentHistoryGenerator:
""" Queries the Events Tables and merges events from various sources to respondents. """
def __init__(self, years, dupersids=None):
"""
Required_Inputs:
years: Years to fetch data for
Optional Inputs:
dupersids: list of respondent dupersids to exclusively fetch data for
"""
self.years = years
self.dupersids = dupersids
def run(self):
""" Primary Entry Point of RespondentHistoryGenerator """
respondent_history = {}
for year in self.years:
population = PopulationCharacteristicsEncoder(year=year, dupersids=self.dupersids).run()
office_based = OfficeBasedVisitsEncoder(year=year, dupersids=self.dupersids).run()
outpatient = OutpatientVisitsEncoder(year=year, dupersids=self.dupersids).run()
emergency_room = EmergencyRoomVisitsEncoder(year=year, dupersids=self.dupersids).run()
hosptial_inpatient = HospitalInpatientStaysEncoder(year=year, dupersids=self.dupersids).run()
dental_care = DentalVisitsEncoder(year=year, dupersids=self.dupersids).run()
home_health = HomeHealthEncoder(year=year, dupersids=self.dupersids).run()
other_medical = OtherMedicalExpensesEncoder(year=year, dupersids=self.dupersids).run()
presciption_medicines = PrescribedMedicinesEncoder(year=year, dupersids=self.dupersids).run()
respondent_history[year] = self.merge_sources(
population=population,
office_based=office_based,
outpatient=outpatient,
emergency_room=emergency_room,
hosptial_inpatient=hosptial_inpatient,
dental_care=dental_care,
home_health=home_health,
other_medical=other_medical,
presciption_medicines=presciption_medicines,
)
return respondent_history
@staticmethod
def merge_sources(
population,
office_based,
outpatient,
emergency_room,
hosptial_inpatient,
dental_care,
home_health,
other_medical,
presciption_medicines,
):
""" Takes a population dictionary and dictionaries of all events types. Matches events to each member of the
population dictionary. """
event_lookup = [
("office_based", office_based),
("outpatient", outpatient),
("emergency_room", emergency_room),
("hosptial_inpatient", hosptial_inpatient),
("dental_care", dental_care),
("home_health", home_health),
("other_medical", other_medical),
("presciption_medicines", presciption_medicines),
]
for resp_id, resp_dict in population.items():
for event_name, event_dict in event_lookup:
resp_dict.update({event_name: event_dict.get(resp_id, [])})
return population
| StarcoderdataPython |
5097453 | <reponame>amarkpayne/ARC
#!/usr/bin/env python3
# encoding: utf-8
import arc.job.inputs
import arc.job.job
import arc.job.local
import arc.job.ssh
import arc.job.submit
import arc.job.trsh
| StarcoderdataPython |
6492698 | <gh_stars>1-10
๏ปฟimport PIL.Image as pimg
import os
import PIL.ImageDraw as draw
import utils as ut
import torch
import torchvision
import time
#path = r"./test_img"
path = r"./test_img"
path_out = r"./test_img_out"
norm_t=torchvision.transforms.Normalize([0.5,0.5,0.5], [0.5,0.5,0.5])
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
NET_p = torch.load('./pp2/net_p3.pth')
NET_r = torch.load('./pp2/net_r3.pth')
NET_o = torch.load('./pp2/net_o3.pth')
def c_s(ms):
cx=(ms[:,1:2]+ms[:,3:4])/2
cy=(ms[:,2:3]+ms[:,4:5])/2
ms[:,1:2]=1.05*ms[:,1:2]-0.05*cx
ms[:,2:3]=1.1*ms[:,2:3]-0.1*cy
ms[:,3:4]=0.95*ms[:,3:4]+0.05*cx
ms[:,4:5]=0.9*ms[:,4:5]+0.1*cy
return ms
def cmp(img_c,img_l,p,a):
xy=(torch.nonzero(torch.gt(img_c,p))[:,2:]).float()
l = img_c[:,0][torch.gt(img_c[:,0],p)].view(-1,1).float()
x1 = img_l[:,0][torch.gt(img_c[:,0],p)].view(-1,1).float()
y1 = img_l[:,1][torch.gt(img_c[:,0],p)].view(-1,1).float()
x2 = img_l[:,2][torch.gt(img_c[:,0],p)].view(-1,1).float()
y2 = img_l[:,3][torch.gt(img_c[:,0],p)].view(-1,1).float()
l=torch.cat((l,(xy[:,1:]*2+x1*12)/a),1)
l=torch.cat((l,(xy[:,:1]*2+y1*12)/a),1)
l=torch.cat((l,(xy[:,1:]*2+x2*12)/a),1)
l=torch.cat((l,(xy[:,:1]*2+y2*12)/a),1)
return l
def build_n(img,k_s,l):
if l.size()!=torch.Size([0]):
a1=l[:,3]-l[:,1]
a2=l[:,4]-l[:,2]
a=torch.max(a1,a2)
x=l[:,3]+l[:,1]
y=l[:,4]+l[:,2]
a=torch.div(a,2)
x=torch.div(x,2)
y=torch.div(y,2)
x=x.int().cpu().numpy().tolist()
y=y.int().cpu().numpy().tolist()
a=a.int().cpu().numpy().tolist()
xl=torch.tensor([]).view(-1,3).float()
xs=torch.tensor([]).view(-1,3,k_s,k_s).float()
if l.size()!=torch.Size([0]):
for ss,(i,j,k) in enumerate(zip(x,y,a)):
im=img.crop((i-k,j-k,i+k,j+k))
# if k_s == 24:
# im.save('{}/{}.png'.format(path_out,str(ss)))
im=im.resize((k_s,k_s))
# if k_s == 24:
# im.save('{}/{}.png'.format(path_out,str(ss)))
img_tensor = torchvision.transforms.ToTensor()(im)
im=norm_t(img_tensor).view(-1,3,k_s,k_s)
il=torch.tensor([[i-k,j-k,2*k]]).float()
xs=torch.cat((xs,im),0)
xl=torch.cat((xl,il),0)
return xs,xl
#def build_n(img,k_s,l):
# if l.size()!=torch.Size([0]):
# img=torchvision.transforms.ToTensor()(img)
# img=norm_t(img)
# img=img.view(1,img.size(0),img.size(1),img.size(2))
# a1=l[:,3]-l[:,1]
# a2=l[:,4]-l[:,2]
# a=torch.max(a1,a2)
# x=l[:,3]+l[:,1]
# y=l[:,4]+l[:,2]
# a=torch.div(a,2)
# x=torch.div(x,2)
# y=torch.div(y,2)
# xl=torch.cat(((x-a).view(-1,1),(y-a).view(-1,1),(a*2).view(-1,1)),1).float()
# x=x.int().cpu().numpy().tolist()
# y=y.int().cpu().numpy().tolist()
# a=a.int().cpu().numpy().tolist()
# xs=torch.tensor([]).view(-1,3,k_s,k_s).float()
# for i,j,k in zip(x,y,a):
# if i<=k:
# i=k
# if j<=k:
# j=k
# im=img[:,:,(j-k):(j+k),(i-k):(i+k)]
# imn=torch.nn.AdaptiveAvgPool2d(k_s)
# im=imn(im)
# xs=torch.cat((xs,im),0).float()
# else:
# xs=torch.tensor([]).view(-1,3,k_s,k_s).float()
# xl=torch.tensor([]).view(-1,3).float()
#
# return xs,xl
def ump(loc,out_c,out_l,k_s,p):
c = out_c[:,0][torch.gt(out_c[:,0],p)].view(-1,1).float()
x1 = out_l[:,0][torch.gt(out_c[:,0],p)].view(-1,1).float()
y1 = out_l[:,1][torch.gt(out_c[:,0],p)].view(-1,1).float()
x2 = out_l[:,2][torch.gt(out_c[:,0],p)].view(-1,1).float()
y2 = out_l[:,3][torch.gt(out_c[:,0],p)].view(-1,1).float()
x3 = out_l[:,4][torch.gt(out_c[:,0],p)].view(-1,1).float()
y3 = out_l[:,5][torch.gt(out_c[:,0],p)].view(-1,1).float()
x4 = out_l[:,6][torch.gt(out_c[:,0],p)].view(-1,1).float()
y4 = out_l[:,7][torch.gt(out_c[:,0],p)].view(-1,1).float()
x5 = out_l[:,8][torch.gt(out_c[:,0],p)].view(-1,1).float()
y5 = out_l[:,9][torch.gt(out_c[:,0],p)].view(-1,1).float()
x6 = out_l[:,10][torch.gt(out_c[:,0],p)].view(-1,1).float()
y6 = out_l[:,11][torch.gt(out_c[:,0],p)].view(-1,1).float()
x7 = out_l[:,12][torch.gt(out_c[:,0],p)].view(-1,1).float()
y7 = out_l[:,13][torch.gt(out_c[:,0],p)].view(-1,1).float()
loc = loc[torch.gt(out_c[:,0],p)]
c=torch.cat((c,(loc[:,:1]+x1*loc[:,2:]).int().float()),1)
c=torch.cat((c,(loc[:,1:2]+y1*loc[:,2:]).int().float()),1)
c=torch.cat((c,(loc[:,:1]+x2*loc[:,2:]).int().float()),1)
c=torch.cat((c,(loc[:,1:2]+y2*loc[:,2:]).int().float()),1)
c=torch.cat((c,(loc[:,:1]+x3*loc[:,2:]).int().float()),1)
c=torch.cat((c,(loc[:,1:2]+y3*loc[:,2:]).int().float()),1)
c=torch.cat((c,(loc[:,:1]+x4*loc[:,2:]).int().float()),1)
c=torch.cat((c,(loc[:,1:2]+y4*loc[:,2:]).int().float()),1)
c=torch.cat((c,(loc[:,:1]+x5*loc[:,2:]).int().float()),1)
c=torch.cat((c,(loc[:,1:2]+y5*loc[:,2:]).int().float()),1)
c=torch.cat((c,(loc[:,:1]+x6*loc[:,2:]).int().float()),1)
c=torch.cat((c,(loc[:,1:2]+y6*loc[:,2:]).int().float()),1)
c=torch.cat((c,(loc[:,:1]+x7*loc[:,2:]).int().float()),1)
c=torch.cat((c,(loc[:,1:2]+y7*loc[:,2:]).int().float()),1)
return c
def testp(img,NET_p):
alph=0.7
cms=torch.tensor([]).view(-1,5).to(device)#(-1,15)
x,y = img.size
while True:
img_c = img.resize((int(x*alph),int(y*alph)))
img_tensor = torchvision.transforms.ToTensor()(img_c)
inp=norm_t(img_tensor).view(1,3,int(y*alph),int(x*alph)).to(device)
out_p_c,out_p_l=NET_p(inp)#out(1,5,n,m)
cm=ut.nms(cmp(out_p_c,out_p_l[:,:4],0.7,alph),5,0.4).to(device)#(-1,5)
cms=torch.cat((cms,cm),0)
alph=alph*0.7
if alph*img.size[0]<=12 or alph*img.size[1]<=12:
break
# cms=ut.nms(cms,0.99,True)
# print('p',cms.size(0))
return cms
def testr(img,cms,NET_r):
if cms.size()==torch.Size([0,5]):
return torch.tensor([]).view(-1,15)
inp_r,loc_r=build_n(img,24,cms)
loc_r=loc_r.to(device)
inp_r=inp_r.to(device)
out_r_c,out_r_l=NET_r(inp_r)#out(-1,5)p
rms=ut.nms(ump(loc_r,out_r_c,out_r_l,24,0.9),15,0.9).view(-1,15) #(-1,5)
# print(ump(loc_r,out_r_c,out_r_l,24,0.75).size())
# print('r',rms.size(0))
return rms
def testo(img,rms,NET_o):
if rms.size()==torch.Size([0,15]):
# print('o','0')
return torch.tensor([]).view(-1,15)
inp_o,loc_o=build_n(img,48,rms[:,:5])
loc_o=loc_o.to(device)
inp_o=inp_o.to(device)
out_o_c,out_o_l=NET_o(inp_o)
oms=ut.nms(ump(loc_o,out_o_c,out_o_l,48,0.8),15,0.3,True).view(-1,15) #(-1,5)
# print('o',oms.size(0))
return oms
def test_all(img):
NET_p.eval()
NET_r.eval()
NET_o.eval()
cms=testp(img,NET_p).int().float()
# cms=c_s(cms)
# img_c=img.copy()
# img_draw = draw.ImageDraw(img_c)
# cms_c=cms.cpu().detach().numpy().tolist()
# for j in cms_c:
# img_draw.rectangle(j[1:],fill=None,outline="red")
# img_c.save('{}/{}.png'.format(path_out,'1'+str(i)))
rms=testr(img,cms,NET_r)
# rms=c_s(rms)
# rms_c=rms.cpu().detach().numpy().tolist()
# for j in rms_c:
# img_draw.rectangle(j[1:5],fill=None,outline="green")
# img_draw.ellipse((j[5]-1,j[6]-1,j[5]+1,j[6]+1),fill='green')
# img_draw.ellipse((j[7]-1,j[8]-1,j[7]+1,j[8]+1),fill='green')
# img_draw.ellipse((j[9]-1,j[10]-1,j[9]+1,j[10]+1),fill='green')
# img_draw.ellipse((j[11]-1,j[12]-1,j[11]+1,j[12]+1),fill='green')
# img_draw.ellipse((j[13]-1,j[14]-1,j[13]+1,j[14]+1),fill='green')
# if rms.size()!=torch.Size([0]):
# img_c.save('{}/{}.png'.format(path_out,'2'+str(i)))
#
oms=testo(img,rms,NET_o)
oms=c_s(oms)
# oms_c=oms.cpu().detach().numpy().tolist()
# for j in oms_c:
# img_draw.rectangle(j[1:5],fill=None,outline="blue")
# img_draw.ellipse((j[5]-1,j[6]-1,j[5]+1,j[6]+1),fill='blue')
# img_draw.ellipse((j[7]-1,j[8]-1,j[7]+1,j[8]+1),fill='blue')
# img_draw.ellipse((j[9]-1,j[10]-1,j[9]+1,j[10]+1),fill='blue')
# img_draw.ellipse((j[11]-1,j[12]-1,j[11]+1,j[12]+1),fill='blue')
# img_draw.ellipse((j[13]-1,j[14]-1,j[13]+1,j[14]+1),fill='blue')
# if oms.size()!=torch.Size([0]):
# img_c.save('{}/{}.png'.format(path_out,'3'+str(i)))
#
return oms
#s_time=time.time()
#torch.cuda.empty_cache()
#for i,lab in enumerate(os.listdir(path)):
## try:
# img=pimg.open('{}/{}'.format(path,lab))
# img=img.convert("RGB")
#
# oms=test_all(img,lab)
# print(lab)
# torch.cuda.empty_cache()
## except:
## continue
#f_time=time.time()
#print(f_time-s_time)
# in(img,(-1,5)) inp_r(-1,24,24,3) loc_r(-1,3)
# oms_c=oms.cpu().detach().numpy().tolist()
# for j in oms_c:
# img_draw = draw.ImageDraw(img)
# img_draw.rectangle(j[1:],fill=None,outline="red")
# img.save('{}/{}.png'.format(path_out,str(i)+'1'))
#img=pimg.open('./test_img/170sss.jpg')
#img=img.convert("RGB")
#oms=test_all(img,'1170.jpg')
| StarcoderdataPython |
4900802 | <reponame>dongmengshi/easylearn
class cal:
cal_name = 'computer'
def __init__(self,x,y):
self.x = x
self.y = y
#ๅจcal_addๅฝๆฐๅๅ ไธ@property๏ผไฝฟๅพ่ฏฅๅฝๆฐๅฏ็ดๆฅ่ฐ็จ๏ผๅฐ่ฃ
่ตทๆฅ
@property
def cal_add(self):
return self.x + self.y
#ๅจcal_infoๅฝๆฐๅๅ ไธ@classmethon๏ผๅ่ฏฅๅฝๆฐๅไธบ็ฑปๆนๆณ๏ผ่ฏฅๅฝๆฐๅช่ฝ่ฎฟ้ฎๅฐ็ฑป็ๆฐๆฎๅฑๆง๏ผไธ่ฝ่ทๅๅฎไพ็ๆฐๆฎๅฑๆง
@classmethod
def cal_info(cls): #python่ชๅจไผ ๅ
ฅไฝ็ฝฎๅๆฐclsๅฐฑๆฏ็ฑปๆฌ่บซ
print(cls.cal_name) # cls.cal_name่ฐ็จ็ฑป่ชๅทฑ็ๆฐๆฎๅฑๆง
@staticmethod #้ๆๆนๆณ ็ฑปๆๅฎไพๅๅฏ่ฐ็จ
def cal_test(a,b,c): #ๆน้ๆๆนๆณๅฝๆฐ้ไธไผ ๅ
ฅself ๆ cls
print(a,b,c)
c1 = cal(10,11)
c1.cal_test(1,2,3)
c1.cal_info()
print(c1.cal_add) | StarcoderdataPython |
4941320 | import random
from django.http import HttpResponse
from django.shortcuts import render
from rest_framework import status
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from meiduo_29.libs.captcha.captcha import captcha
from django_redis import get_redis_connection
import logging
from verifications import constants
from verifications.serializers import ImageCodeCheckSerializer
from meiduo_29.utils.yuntongxun.sms import CCP
from celery_tasks.sms.tasks import send_sms_code
logger = logging.getLogger('django')
class ImageCodeView(APIView):
'''
ๅพๅฝข้ช่ฏ็ ไธ้่ฆไฝฟ็จๅบๅๅๅจ ็ปงๆฟAPIView
่ฎฟ้ฎๆนๅผ๏ผ GET /image_codes/(?P<image_code_id>[\w-]+)/
่ฏทๆฑๅๆฐ๏ผ ่ทฏๅพๅๆฐ
ๅๆฐ ็ฑปๅ ๆฏๅฆๅฟ
้กป ่ฏดๆ
image_code_id uuidๅญ็ฌฆไธฒ ๆฏ ๅพ็้ช่ฏ็ ็ผๅท
่ฟๅๆฐๆฎ๏ผ
้ช่ฏ็ ๅพ็
'''
def get(self,request,image_code_id):
# ๆฅๆถๅๆฐ ๆ ก้ชๅๆฐ ่ฎฟ้ฎๆนๅผๆญฃๅๅน้
ๅทฒ็ปๅฎ็ฐ
# ็ๆ้ช่ฏ็ ๅพ็
text,image =captcha.generate_captcha()
# ไฟๅญ็ๅฎๅผ
# ่ฟๆฅverify_code 2ๅทredisๆฐๆฎๅบ
redis_conn = get_redis_connection('verify_code')
# ่ฎพ็ฝฎๆๆๆsetex(ๅญๅฐredisๆฐๆฎๅบ้็้ฎๅๅญ,ๆฐๆฎๆๆๆ,ๆฐๆฎ)
redis_conn.setex("img_%s" % image_code_id, constants.IMAGE_CODE_REDIS_EXPIRES, text)
print("ๅพ็้ช่ฏ็ ๆฏ:%s"%text)
# ่ฟๅๅพ็
return HttpResponse(image,content_type="images/jpg")
class SMSCodeView(GenericAPIView):
'''่ฎฟ้ฎๆนๅผ๏ผ GET /sms_codes/(?P<mobile>1[3-9]\d{9})/?image_code_id=xxx&text=xxx
่ฏทๆฑๅๆฐ๏ผ ่ทฏๅพๅๆฐไธๆฅ่ฏขๅญ็ฌฆไธฒๅๆฐ
ๅๆฐ ็ฑปๅ ๆฏๅฆๅฟ
้กป ่ฏดๆ
mobile str ๆฏ ๆๆบๅท
image_code_id uuidๅญ็ฌฆไธฒ ๆฏ ๅพ็้ช่ฏ็ ็ผๅท
text str ๆฏ ็จๆท่พๅ
ฅ็ๅพ็้ช่ฏ็
่ฟๅๆฐๆฎ๏ผ JSON
่ฟๅๅผ ็ฑปๅ ๆฏๅฆๅฟ
ไผ ่ฏดๆ
message str ๅฆ OK๏ผๅ้ๆๅ'''
#ๅฃฐๆๅบๅๅๅจ็ฑป
serializer_class = ImageCodeCheckSerializer
def get(self,request,mobile):
# ๆฅๅๅๆฐ ๆ ก้ชๅๆฐ ๅบๅๅๅจ้ช่ฏ
# request.query_params rest่ชๅธฆๆนๆณ ๆฅ่ฏขๅญ็ฌฆไธฒ้็ๆๆๅๆฐ
print('request.query_params',request.query_params)
serializer = self.get_serializer(data = request.query_params)
#่ฎพ็ฝฎ้ช่ฏๅนถไธๆๅบๅผๅธธ
serializer.is_valid(raise_exception = True)
# ็ๆ็ญไฟก้ช่ฏ็
sms_code = '%06d'%random.randint(0,999999)
# ไฟๅญ็ญไฟก้ช่ฏ็ ๆๆบๅ้่ฎฐๅฝ,
redis_conn = get_redis_connection('verify_code')
# redis_conn.setex("sms_%s" % mobile, constants.SMS_CODE_REDIS_EXPIRES, sms_code)/
# redis_conn.setex("send_flag_%s" % mobile, constants.SEND_SMS_CODE_INTERVAL, 1)
# ไฝฟ็จredis็ฎก้ๆๅๆฐๆฎๅบ้ซๅนถๅ่ฝๅ
pl = redis_conn.pipeline()
pl.setex("sms_%s" % mobile, constants.SMS_CODE_REDIS_EXPIRES, sms_code)
pl.setex("send_flag_%s" % mobile, constants.SEND_SMS_CODE_INTERVAL, 1)
# ็ฎก้ๅญๅจ็ๅฝไปคๆง่ก
pl.execute()
# ๅ้็ญไฟก
# try:
# ccp = CCP()
# #ๅๆฐ ๆๆบๅท [็ญไฟก้ช่ฏ็ ,ๆๆๆ] ็ญไฟกๆจก็
# expires = constants.SMS_CODE_REDIS_EXPIRES // 60
# result = ccp.send_template_sms(mobile,[sms_code,expires],constants.SMS_CODE_TEMP_ID)
# print(sms_code)
# except Exception as e:
# logger.error('็ญไฟกๅ้[ๅผๅธธ][mobile:%s,message:%s]'%(mobile,e))
# return Response({'message':'failed'},status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# else:
# if result == 0:
# logger.info('็ญไฟกๅ้[ๆญฃๅธธ][mobile:%s]'%(mobile))
# return Response({'message': 'OK'})
#
# else:
# logger.warning('็ญไฟกๅ้[ๅคฑ่ดฅ][mobile:%s]'%(mobile))
# return Response({'message': 'failed'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# ไฝฟ็จceleryๅ้็ญไฟก
expires = constants.SMS_CODE_REDIS_EXPIRES // 60
send_sms_code.delay(mobile,sms_code,expires,constants.SMS_CODE_TEMP_ID)
return Response({'message': 'OK'})
# ่ฟๅ | StarcoderdataPython |
11251295 | """
Module for models storing feedback from front end users
"""
from django.db import models
from .feedback import Feedback
from ..events.event import Event
class EventFeedback(Feedback):
"""
Feedback on single events
"""
event = models.ForeignKey(Event, on_delete=models.CASCADE)
class Meta:
default_permissions = ()
| StarcoderdataPython |
64627 | <gh_stars>1-10
import logging
from typing import List, Optional
import bhamon_orchestra_model.user_roles as user_role_classes
logger = logging.getLogger("AuthorizationProvider")
class AuthorizationProvider:
""" Expose methods to check if a user is authorized to view a resource or perform an action. """
def authorize_request(self, user: Optional[dict], method: str, route: str) -> bool:
""" Check if a user is authorized to perform a web request with the provided method and route """
user_roles = self.build_user_roles(user)
for role in user_roles:
if role.is_route_authorized(method, route):
return True
return False
def authorize_view(self, user: Optional[dict], view: str) -> bool:
""" Check if a user is authorized to see a set of resources, based on a view identifier
This is useful when displaying a complex web page with links to resources with varying authorization requirements. """
user_roles = self.build_user_roles(user)
for role in user_roles:
if role.is_view_authorized(view):
return True
return False
def authorize_worker(self, user: Optional[dict]) -> bool: # pylint: disable = no-self-use
""" Check if a user is authorized to run a worker """
return user is not None and user["is_enabled"] and "Worker" in user["roles"]
def build_user_roles(self, user: Optional[dict]) -> List[object]: # pylint: disable = no-self-use
""" Instantiate user role classes based on a user record """
if user is None:
return [ user_role_classes.Anonymous() ]
if not user["is_enabled"]:
return [ user_role_classes.Default() ]
user_roles = [ user_role_classes.Default() ]
if "Administrator" in user["roles"]:
user_roles.append(user_role_classes.Administrator())
if "Auditor" in user["roles"]:
user_roles.append(user_role_classes.Auditor())
if "Operator" in user["roles"]:
user_roles.append(user_role_classes.Operator())
if "Viewer" in user["roles"]:
user_roles.append(user_role_classes.Viewer())
if "Worker" in user["roles"]:
user_roles.append(user_role_classes.Worker())
return user_roles
def get_administrator_roles(self) -> List[str]: # pylint: disable = no-self-use
""" Return the default list of roles for a user with administrator privileges """
return [ "Administrator" ]
| StarcoderdataPython |
3204047 | # coding=utf-8
from __future__ import absolute_import, division, print_function
import json
import numpy as np
import os
import sys
import time
from matplotlib import pyplot as plt
from src.data import datasets
def batched(data_generator, batch_size):
batches = [iter(data_generator)] * batch_size
batches = zip(*batches)
for batch in batches:
yield [np.array(item) for item in zip(*batch)]
def batched_slow(data_generator, batch_size):
images = []
labels = []
for image, label in data_generator:
images.append(image)
labels.append(label)
if len(images) == batch_size:
yield np.array(images), np.array(labels)
images = []
labels = []
# testing code for data loader
# TODO: convert into proper unit test for the function
def time_data_generator(data_generator, sample_size):
start = time.clock()
for idx, items in enumerate(data_generator):
sys.stdout.flush()
if idx >= sample_size:
break
print('Processed {} items: ({})'.format(idx + 1, [type(item) for item in items]), end='\r')
print(time.clock() - start)
def test_dataset(solver):
data_config = solver['data']
dataset_name = data_config['dataset_name']
print('Preparing to train on {} data...'.format(dataset_name))
supplementary_data_config = data_config['val']
data_config.update(supplementary_data_config)
np.random.seed(1337) # for reproducibility
dataset = getattr(datasets, dataset_name)(config=data_config)
sample_size = 10
time_data_generator(dataset.flow(), sample_size=sample_size)
def test(solver):
np.random.seed(1337) # for reproducibility
# full_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# sys.path.append(full_path)
data_config = solver['data']
supplementery_data_config = data_config['val']
data_config.update(supplementery_data_config)
dataset_name = 'mscoco'
print('Loading {} data...'.format(dataset_name))
dataset = getattr(datasets, dataset_name)(config=data_config)
print('Done')
for idx, item in enumerate(dataset.flow()):
img, lbl = item[0].astype(np.uint8), item[1]
batch_size = img.shape[0]
h = img.shape[1]
w = img.shape[2]
nc = lbl.shape[-1]
lbl = np.reshape(lbl, (batch_size, h, w, nc))
for batch_index in range(data_config['batch_size']):
binary_masks = split_label_channels(lbl[batch_index, ...])
img_item = img[batch_index, ...]
for class_idx, binary_mask in binary_masks.items():
# class_name = dataset.CATEGORIES[dataset.IDS[class_idx]]
class_name = dataset.CATEGORIES[class_idx]
plt.rcParams["figure.figsize"] = [4 * 3, 4]
fig = plt.figure()
subplot1 = fig.add_subplot(131)
subplot1.imshow(img_item)
subplot1.set_title('rgb image')
subplot1.axis('off')
subplot2 = fig.add_subplot(132)
subplot2.imshow(binary_mask, cmap='gray')
subplot2.set_title('{} binary mask'.format(class_name))
subplot2.axis('off')
subplot3 = fig.add_subplot(133)
masked = np.array(img_item)
masked[binary_mask == 0] = 0
subplot3.imshow(masked)
subplot3.set_title('{} label'.format(class_name))
subplot3.axis('off')
fig.tight_layout()
plt.show()
# shapes.append(img.shape)
print('Processed {} items: ({})'.format(idx + 1, type(item)), end='\r')
sys.stdout.flush()
def split_label_channels(label):
binary_masks = {}
for i in range(label.shape[-1]):
binary_mask = label[..., i]
if not np.any(binary_mask > 0):
continue
binary_mask[binary_mask > 0] = 1
binary_masks[i] = binary_mask.astype(np.uint8)
return binary_masks
if __name__ == '__main__':
solver_json = 'config/solver.json'
print('solver json: {}'.format(os.path.abspath(solver_json)))
test(solver=json.load(open(solver_json)))
# test_dataset(solver=json.load(open(solver_json)))
| StarcoderdataPython |
8131296 | <reponame>dmontoya1/platform_control<gh_stars>0
from .platform_list import PlatformListAPIView
from .platform_detail import PlatformDetail
from .platform_create import PlatformCreate
| StarcoderdataPython |
12832111 | <filename>MainApp/admin.py<gh_stars>1-10
from django.contrib import admin
from .models import EmailInfo, DOIQuery, EmailInfoJournal, JournalQuery
# Register your models here.
admin.site.register(EmailInfo)
admin.site.register(DOIQuery)
admin.site.register(JournalQuery)
admin.site.register(EmailInfoJournal) | StarcoderdataPython |
9618623 | <reponame>iporollo/monosi
# TODO: Runner may be responsible for loading
# the configuration for a data source based
# on the monitor definition
class Runner:
def __init__(self, config):
self.config = config
self.driver = None
def _initialize(self):
try:
from core.common.drivers.factory import load_driver
driver_cls = load_driver(self.config)
self.driver = driver_cls(self.config)
except Exception as e:
print(e)
raise Exception("Could not initialize connection to database in Runner.")
def _execute(self, sql: str):
if self.driver is None:
raise Exception("Initialize runner before execution.")
results = self.driver.execute(sql)
return results
def run(self, sql: str):
self._initialize()
return self._execute(sql)
| StarcoderdataPython |
9604753 | from flask import Flask, render_template
app = Flask(__name__)
@app.route("/", methods=["GET"])
def home():
return render_template("home.html")
app.run(host="0.0.0.0") | StarcoderdataPython |
9797980 | <filename>tests/test_visitors/test_ast/test_blocks/test_overlap/test_function_block.py
import pytest
from wemake_python_styleguide.violations.best_practices import (
BlockAndLocalOverlapViolation,
)
from wemake_python_styleguide.visitors.ast.blocks import BlockVariableVisitor
# Functions:
function_def1 = 'def {0}():'
# Wrong usages:
function_template1 = """
{0}
...
{1}
"""
function_template2 = """
{1}
{0}
...
"""
# Correct usages:
method_template1 = """
{1}
class Test(object):
{0}
...
"""
method_template2 = """
class Test(object):
{0}
...
{1}
"""
@pytest.mark.parametrize('function_statement', [
function_def1,
])
@pytest.mark.parametrize('context', [
function_template1,
function_template2,
])
@pytest.mark.parametrize('variable_name', [
'should_raise',
])
def test_function_block_overlap(
assert_errors,
assert_error_text,
parse_ast_tree,
function_statement,
assign_statement,
context,
variable_name,
default_options,
mode,
):
"""Ensures that overlaping variables exist."""
code = context.format(
function_statement.format(variable_name),
assign_statement.format(variable_name),
)
tree = parse_ast_tree(mode(code))
visitor = BlockVariableVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [BlockAndLocalOverlapViolation])
assert_error_text(visitor, variable_name)
@pytest.mark.parametrize('function_statement', [
function_def1,
])
@pytest.mark.parametrize('context', [
method_template1,
method_template2,
])
@pytest.mark.parametrize('variable_name', [
'should_raise',
])
def test_method_block_overlap(
assert_errors,
parse_ast_tree,
function_statement,
assign_and_annotation_statement,
context,
variable_name,
default_options,
mode,
):
"""Ensures that overlaping variables exist."""
code = context.format(
function_statement.format(variable_name),
assign_and_annotation_statement.format(variable_name),
)
tree = parse_ast_tree(mode(code))
visitor = BlockVariableVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('function_statement', [
function_def1,
])
@pytest.mark.parametrize('context', [
function_template1,
function_template2,
method_template1,
method_template2,
])
@pytest.mark.parametrize('variable_name', [
'should_raise',
])
def test_function_block_usage(
assert_errors,
parse_ast_tree,
function_statement,
context,
variable_name,
default_options,
mode,
):
"""Ensures using variables is fine."""
code = context.format(
function_statement.format(variable_name),
'print({0})'.format(variable_name),
)
tree = parse_ast_tree(mode(code))
visitor = BlockVariableVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('function_statement', [
function_def1,
])
@pytest.mark.parametrize('context', [
function_template1,
function_template2,
method_template1,
method_template2,
])
@pytest.mark.parametrize(('first_name', 'second_name'), [
('unique_name', 'unique_name1'),
('_', '_'),
])
def test_function_block_correct(
assert_errors,
parse_ast_tree,
function_statement,
assign_and_annotation_statement,
context,
first_name,
second_name,
default_options,
mode,
):
"""Ensures that different variables do not overlap."""
code = context.format(
function_statement.format(first_name),
assign_and_annotation_statement.format(second_name),
)
tree = parse_ast_tree(mode(code))
visitor = BlockVariableVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
pipeline = """
def pipeline(function):
return
"""
overload_template = """
{0}
{1}
{1}
"""
@pytest.mark.parametrize('import_overload', [
'@overload',
'@typing.overload',
])
def test_function_overload(
assert_errors,
assert_error_text,
parse_ast_tree,
default_options,
import_overload,
mode,
):
"""Ensures that overload from typing do not overlap."""
code = overload_template.format(import_overload, pipeline)
tree = parse_ast_tree(mode(code))
visitor = BlockVariableVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('decorator_template', [
'@typing.func',
'@module.overload',
'@decorate',
])
def test_no_function_overload(
assert_errors,
assert_error_text,
parse_ast_tree,
default_options,
decorator_template,
mode,
):
"""Ensures that not overload from typing do overlap."""
code = overload_template.format(decorator_template, pipeline)
tree = parse_ast_tree(mode(code))
visitor = BlockVariableVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [BlockAndLocalOverlapViolation])
method_setter_template = """
class Test(object):
@property
def {0}():
...
@{0}.setter
def {0}():
{0} = ...
"""
def test_property_setter(
assert_errors,
parse_ast_tree,
default_options,
mode,
):
"""Ensures that property setter do not overlap."""
code = method_setter_template.format('func')
tree = parse_ast_tree(mode(code))
visitor = BlockVariableVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
| StarcoderdataPython |
9761291 |
from .solver import Simul
from .output import Output
short_name = "canonical"
__all__ = ["Simul", "Output", "short_name"]
| StarcoderdataPython |
1848932 | <gh_stars>0
# -*- coding: utf-8 -*-
#
# Copyright ยฉ 2021โ2022 <NAME> <<EMAIL>>
# Released under the MIT Licence
#
from .datarecord import DataRecord
from .gender import Gender
from .exceptions import BaseException
class Draw(DataRecord):
class InvalidDataError(BaseException):
pass
def __init__(self, name, *, gendered=None, **kwargs):
if gendered is not None:
gendered = Gender.from_string(gendered)
super().__init__(name=name, gendered=gendered, **kwargs)
def __repr__(self):
s = f"<{self.__class__.__name__}({self.name}"
try:
s = f"{s}, {self.description}"
except AttributeError:
pass
return f"{s})>"
def __str__(self):
return self.name
def __lt__(self, other):
if self.get("ladies_first", True) and other.get("ladies_first", True):
def women_first(name):
return name.replace("W", "0").replace("M", "1")
return women_first(self.name).__lt__(women_first(other.name))
else:
return self.name.__lt__(other.name)
def __hash__(self):
return hash(self.name)
if __name__ == "__main__":
draw = Draw("W0", description="Women's Open", gendered=Gender.N)
print(vars(draw))
print(repr(draw))
try:
import ipdb
ipdb.set_trace()
except ImportError:
pass
| StarcoderdataPython |
12828164 | <filename>MLTrainer/models.py
from sklearn import ensemble, linear_model, naive_bayes, neighbors, svm, tree, model_selection, metrics
from xgboost import XGBClassifier
import numpy as np, pandas as pd, logging, os, joblib
from .model_params import classf_grids
import warnings
from typing import Union
class MLTrainer:
def __init__(self, ensemble: bool=True, linear: bool=True, naive_bayes: bool=True, neighbors: bool=True, svm: bool=True, decision_tree: bool=True, seed: int=100) -> None:
"""
PARAMS
==========
ensemble: bool
True if want ensemble models
linear: bool
True if want linear models
naive_bayes: bool
True if want naive bayes models
neighbors: bool
True if want neighbors models
svm: bool
True if want svm models
decision tree: bool
True if want decision tree models
NOTE: Need fix naive bayes and folder names
"""
self.models = [] # list containing names of models, i.e. strings
self.n_classes = None # Number of classes
self.fitted = False
self.ensemble = ensemble
self.linear = linear
self.naive_bayes = naive_bayes
self.neighbors = neighbors
self.svm = svm
self.decision_trees = decision_tree
self.seed = seed
self.cv_scores = dict()
self.model_keys = dict()
self.idx_label_dic = dict()
self.init_all_models()
def init_ensemble(self) -> None:
all_models = [ensemble.AdaBoostClassifier(), ensemble.BaggingClassifier(), ensemble.ExtraTreesClassifier(),
ensemble.GradientBoostingClassifier(), ensemble.RandomForestClassifier(), XGBClassifier()]
self.models.extend(all_models)
models = ["adaboost", "bagging", "extratrees", "gradientboosting", 'randomforest', "xgboost"]
for mod in models:
self.model_keys[mod] = "ensemble"
def init_linear(self) -> None:
all_models = [linear_model.LogisticRegression()]
self.models.extend(all_models)
models = ["logreg"]
for mod in models:
self.model_keys[mod] = "linear"
def init_naive_bayes(self) -> None:
"""
MultinomialNB works with occurrence counts
BernoulliNB is designed for binary/boolean features
"""
all_models = [naive_bayes.BernoulliNB(), naive_bayes.GaussianNB(), naive_bayes.MultinomialNB(), naive_bayes.ComplementNB()]
self.models.extend(all_models)
models = ["bernoulli", "gaussian", "multinomial", "complement"]
for mod in models:
self.model_keys[mod] = "nb"
def init_neighbors(self) -> None:
all_models = [neighbors.KNeighborsClassifier()]
self.models.extend(all_models)
models = ["knn"]
for mod in models:
self.model_keys[mod] = "neighbors"
def init_svm(self) -> None:
all_models = [svm.NuSVC(probability=True), svm.SVC(probability=True)]
self.models.extend(all_models)
models = ["nu", "svc"]
for mod in models:
self.model_keys[mod] = "svm"
def init_decision_tree(self) -> None:
all_models = [tree.DecisionTreeClassifier(), tree.ExtraTreeClassifier()]
self.models.extend(all_models)
models = ["decision", "extra"]
for mod in models:
self.model_keys[mod] = "tree"
def init_all_models(self) -> None:
if self.ensemble:
self.init_ensemble()
if self.linear:
self.init_linear()
if self.naive_bayes:
self.init_naive_bayes()
if self.neighbors:
self.init_neighbors()
if self.svm:
self.init_svm()
if self.decision_trees:
self.init_decision_tree()
if len(self.models) == 0:
raise Exception("No Models Selected, Look at the Parameters of ___init__")
def fit(self, X: Union[tuple, list, np.ndarray], Y: Union[tuple, list, np.ndarray], n_folds: int=5, scoring: str="accuracy", n_jobs: int=-1, gridsearchcv: bool=False, param_grids: dict={}, greater_is_better: bool=True):
"""
PARAMS
==========
X: numpy array
shape is (n_samples, n_features)
Y: numpy array
shape is (n_samples,)
n_folds: int
number of cross validation folds
njobs: int
sklearn parallel
scoring: str
string indicating scoring metric, reference can be found at https://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter
gridsearchcv: bool
True if want parameter search with gridsearch
param_grids: nested dictionary
contains several parameter grids
greater_is_better: bool
True if the evaluation metric is better when it is greater, the results dataframe will be sorted with ascending = not greater_is_better
"""
self.n_classes = len(np.unique(Y))
cv_metric = "mean_cv_"+scoring
self.cv_scores = {"model": [], "parameters": [], cv_metric: [], "remarks": []}
if gridsearchcv:
param_grids = classf_grids
counter = 0
for model_name, model in zip(list(self.model_keys.keys()),self.models):
if gridsearchcv:
mod = model_selection.GridSearchCV(model, param_grids[self.model_keys[model_name]][model_name], n_jobs=n_jobs)
else:
mod = model
if hasattr(mod, "n_jobs"):
mod.n_jobs = n_jobs
if hasattr(mod, "random_state"):
mod.random_state = self.seed
mod.set_params(**param_grids.get(model_name, dict()))
params = None
score = None
remark = ""
try:
if gridsearchcv:
mod.fit(X, Y)
score = mod.best_score_
mod = mod.best_estimator_
else:
score = np.mean(model_selection.cross_val_score(mod, X, Y, cv=n_folds, scoring=scoring))
mod.fit(X, Y)
params = mod.get_params()
except Exception as e:
remark = e
self.models[counter] = mod
counter += 1
self.cv_scores["model"].append(model_name)
self.cv_scores["parameters"].append(params)
self.cv_scores["remarks"].append(remark)
self.cv_scores[cv_metric].append(score)
self.cv_scores = pd.DataFrame(self.cv_scores)
self.fitted = True
return self
def predict(self, X: Union[tuple, list, np.ndarray]) -> dict:
"""
PARAMS
==========
X: numpy array
shape is (n_samples, n_features)
RETURNS
==========
test_Y: numpy array
shape is (n_samples,)
"""
assert self.fitted == True, "Call .fit() method first"
result = dict()
model_names = list(self.model_keys.keys())
for idx, model in enumerate(self.models):
model_name = model_names[idx]
try:
predictions = model.predict(X)
except Exception as e:
predictions = e
result[model_name] = predictions
return result
def predict_proba(self, X: Union[tuple, list, np.ndarray]) -> dict:
"""
PARAMS
==========
X: numpy array
shape is (n_samples, n_features)
RETURNS
==========
test_Y: numpy array
shape is (n_samples,)
"""
assert self.fitted == True, "Call .fit() method first"
result = dict()
model_names = list(self.model_keys.keys())
for idx, model in enumerate(self.models):
model_name = model_names[idx]
try:
proba = model.predict_proba(X)
except Exception as e:
proba = e
result[model_name] = proba
return result
def evaluate(self, test_X: Union[tuple, list, np.ndarray], test_Y: Union[tuple, list, np.ndarray], idx_label_dic: dict=None, class_report: str="classf_report.csv", con_mat: str="confusion_matrix.csv", pred_proba: str="predictions_proba.csv") -> None:
"""
PARAMS
==========
test_X: numpy array
shape is (n_samples, n_features), test features
test_Y: numpy array
shape is (n_samples, 1), test labels
idx_label_dic: dictionary
keys are indices, values are string labels
class_report: str
file path to save classification report
con_mat: str
file path to save confusion matrix
pred_proba: str
file path to save csv containing prediction probabilities
RETURNS
==========
Saves classification report, confusion matrix and label probabilities in CSV
"""
assert self.fitted == True, "Call .fit() method first"
if idx_label_dic is None:
idx_label_dic = {idx: str(idx) for idx in range(self.n_classes)}
self.idx_label_dic = idx_label_dic
del idx_label_dic
for model_name, model in zip(list(self.model_keys.keys()) ,self.models):
folder = "./" + model_name + "/"
if not os.path.exists(folder):
os.makedirs(folder)
self.evaluate_model(model, test_X, test_Y, folder, class_report=class_report, con_mat=con_mat, pred_proba=pred_proba)
def evaluate_model(self, model, test_X: Union[tuple, list, np.ndarray], test_Y: Union[tuple, list, np.ndarray], folder: str="", class_report: str="classf_report.csv", con_mat: str="confusion_matrix.csv", pred_proba: str="predictions_proba.csv") -> None:
"""
PARAMS
==========
model: Sklearn model object
test_X: numpy array
shape is (n_samples, n_features), test features
test_Y: numpy array
shape is (n_samples, 1), test labels
folder: string
path to folder where all files are saved in
class_report: string
path to save classification report in csv
confusion_mat: string
path to save confusion matrix
pred_proba: string
path to save predicted probabilities
RETURNS
==========
Saves classification report, confusion matrix and label probabilities in CSV
"""
try:
predictions = model.predict(test_X)
predictions_proba = model.predict_proba(test_X)
except:
return
else:
self.save_classf_report(metrics.classification_report(test_Y, predictions, labels=list(self.idx_label_dic.keys())), folder+class_report) # Save sklearn classification report in csv
self.save_conf_mat(test_Y, predictions, folder+con_mat)
self.save_label_proba(predictions_proba, folder+pred_proba)
def save_classf_report(self, report, file_path: str):
"""
PARAMS
==========
report: sklearn classification report
file_path: string
path to save classification report as csv
RETURNS
==========
Saves classification report in CSV
"""
report_data = []
lines = report.split('\n')
for line in lines[2:-4]:
row_data = line.split()
if len(row_data) != 0:
row = {}
row['precision'] = float(row_data[-4])
row['recall'] = float(row_data[-3])
row['f1_score'] = float(row_data[-2])
row['support'] = float(row_data[-1])
row['class'] = self.idx_label_dic[int(row_data[0])]
report_data.append(row)
df = pd.DataFrame.from_dict(report_data)
df.to_csv(file_path, index=False)
def save_conf_mat(self, test_Y: Union[tuple, list, np.ndarray], predictions: Union[tuple, list, np.ndarray], file_path: str):
"""
PARAMS
==========
test_Y: numpy array
shape is (n_samples, 1), true labels
predictions: numpy array
shape is (n_samples, 1), predicted labels
file_path: string
path to save confusion matrix
RETURNS
==========
Saves confusion matrix in CSV
"""
confusion_mat = metrics.confusion_matrix(test_Y, predictions, labels=list(self.idx_label_dic.keys()))
total_row = confusion_mat.sum(axis=0)
total_col = [np.nan] + list(confusion_mat.sum(axis=1)) + [sum(total_row)]
confusion_mat_df = pd.DataFrame({})
confusion_mat_df["Predicted"] = ["True"] + list(self.idx_label_dic.values()) + ["All"]
for idx, label in self.idx_label_dic.items():
temp = [np.nan] + list(confusion_mat[:, idx]) + [total_row[idx]]
confusion_mat_df[label] = temp
confusion_mat_df["All"] = total_col
confusion_mat_df.to_csv(file_path, index=False)
def save_label_proba(self, pred_proba: np.ndarray, file_path: str):
"""
PARAMS
==========
pred_proba: numpy array
shape is (n_samples, 1), predicted probabilities
file_path: string
file path to save label probabilities in CSV
RETURNS
==========
Saves label probabilities in CSV
"""
proba_df = pd.DataFrame({})
for idx, label in self.idx_label_dic.items():
proba_df[label] = pred_proba[:, idx]
proba_df.to_csv(file_path, index=False) | StarcoderdataPython |
6435200 | <gh_stars>0
import jax
import jax.numpy as jnp
import jax.scipy.stats as stats
from gaul import advi
class TestADVI:
def test_mvn(self):
@jax.jit
def ln_posterior(params):
return stats.norm.logpdf(params["x"], 0, 1).sum()
n_dims = 5
n_samples = 2000
params = dict(x=jnp.ones(n_dims))
samples = advi.sample(ln_posterior, params, n_steps=10000, n_samples=n_samples)
x_samples = samples["x"]
assert x_samples.shape == (n_dims, n_samples)
def close_to(chain, val, tol=3.0):
return jnp.abs(jnp.mean(chain) - val) < tol * jnp.std(chain, axis=0)
assert close_to(x_samples, 0.0, 3.0).sum() / n_samples >= 0.99
| StarcoderdataPython |
9700857 | from NeuralNest import NeuralNest
from random import randint
import pygame
import pygame.surfarray
import numpy as np
import pandas
from TrainingData import TrainingData
from GamePlayerNetwork import GamePlayerNetwork
class NeuralNestAIPlayer:
LEARNING = 'learning'
PLAYING = 'playing'
def __init__(self):
self.nnest = None
self.surface_array = None
self.network = None
self.mode = 'training'
self.training_data = TrainingData()
def get_neural_nest(self):
return self.nnest
def gather_data(self, FPS):
self.nnest = NeuralNest(observer=self,
window_width=800,
window_height=800,
surface_width=20,
surface_height=20,
drop_height=0,
drop_threshold=17,
basket_width=5,
min_speed=1,
max_speed=2,
egg_radius=1)
self.nnest.FPS = FPS
pygame.display.update = self.function_combine(pygame.display.update, self.on_screen_update)
print("Loading game")
caught, dropped = self.nnest.run(300)
self.training_data.save_csv("one_thousand_run.csv")
print("Game complete: caught={0} dropped={1}".format(caught, dropped))
# function that we can give two functions to and will return us a new function that calls both
def function_combine(self, screen_update_func, our_intercepting_func):
def wrap(*args, **kwargs):
screen_update_func(*args,
**kwargs) # call the screen update func we intercepted so the screen buffer is updated
our_intercepting_func() # call our own function to get the screen buffer
return wrap
def on_screen_update(self):
if self.mode == self.LEARNING:
surface_array = self.nnest.display.get_surface_grayscale_array()
assert(len(surface_array) > 0)
best_action = self.nnest.get_best_player_action()
self.training_data.append_training_data(surface_array, best_action)
# The game will call us when it is time for a move
def get_ai_action(self):
if self.mode == self.PLAYING:
surface_array = self.nnest.display.get_surface_grayscale_array()
result = self.network.get_player_action(surface_array)
return result
def caught(self):
return
def dropped(self):
return
def learn(self):
network = GamePlayerNetwork(20, 20)
network.train("synthetic_training_data.txt")
network.save_model("trained_model")
network.display_training_results()
network.plot_model()
def play(self):
self.network = GamePlayerNetwork(20, 20)
self.network.load_model("trained_model")
self.nnest = NeuralNest(observer=self,
window_width=800,
window_height=800,
surface_width=20,
surface_height=20,
drop_height=0,
drop_threshold=17,
basket_width=5,
min_speed=1,
max_speed=2,
egg_radius=1)
self.nnest.FPS = 20
self.nnest.get_player_action = self.get_ai_action
pygame.display.update = self.function_combine(pygame.display.update, self.on_screen_update)
print("Loading game")
caught, dropped = self.nnest.run(100)
print("Game complete: caught={0} dropped={1}".format(caught, dropped))
if __name__ == "__main__":
ai_player = NeuralNestAIPlayer()
ai_player.mode = NeuralNestAIPlayer.LEARNING
# ai_player.gather_data(60)
# ai_player.learn()
ai_player.mode = 'playing'
ai_player.play()
| StarcoderdataPython |
3408493 | <reponame>HUJI-Deep/FlowKet<gh_stars>10-100
import itertools
import tensorflow.keras.backend as K
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Input, Lambda
from tensorflow.keras.models import Model
from flowket.machines.ensemble import make_2d_obc_invariants, make_pbc_invariants, build_ensemble, build_symmetrization_ensemble
from .simple_models import real_values_2d_model, real_values_1d_model
DEFAULT_TF_GRAPH = tf.get_default_graph()
def transform_sample(sample, num_of_rotations, flip):
if num_of_rotations > 0:
sample = np.rot90(sample, k=num_of_rotations, axes=(1, 2))
if flip:
sample = np.flip(sample, axis=2)
return sample
def roll_sample(sample, roll_for_axis):
return np.roll(sample, roll_for_axis, tuple(range(1, len(roll_for_axis))))
@pytest.mark.parametrize('model_builder, batch_size', [
(real_values_2d_model, 100),
])
def test_make_2d_obc_invariants(model_builder, batch_size):
with DEFAULT_TF_GRAPH.as_default():
keras_model = model_builder()
keras_model.summary()
shape = K.int_shape(keras_model.input)[1:]
obc_input = Input(shape=shape, dtype=keras_model.input.dtype)
invariant_model = make_2d_obc_invariants(obc_input, keras_model)
invariant_model_func = K.function(inputs=[obc_input], outputs=[invariant_model.output])
size = (batch_size,) + K.int_shape(keras_model.input)[1:]
batch = np.random.rand(*size)
batch_transformations = [batch,
transform_sample(batch, num_of_rotations=1, flip=False),
transform_sample(batch, num_of_rotations=2, flip=False),
transform_sample(batch, num_of_rotations=3, flip=False),
transform_sample(batch, num_of_rotations=0, flip=True),
transform_sample(batch, num_of_rotations=1, flip=True),
transform_sample(batch, num_of_rotations=2, flip=True),
transform_sample(batch, num_of_rotations=3, flip=True)
]
vals = [invariant_model_func([transformation])[0] for transformation in batch_transformations]
allclose = [np.allclose(vals[0], another_val, rtol=1e-3) for another_val in vals[1:]]
assert np.all(allclose)
@pytest.mark.parametrize('model_builder, batch_size', [
(real_values_2d_model, 5),
(real_values_1d_model, 5),
])
def test_make_pbc_invariants(model_builder, batch_size):
with DEFAULT_TF_GRAPH.as_default():
keras_model = model_builder()
keras_model.summary()
shape = K.int_shape(keras_model.input)[1:]
pbc_input = Input(shape=shape, dtype=keras_model.input.dtype)
invariant_model = make_pbc_invariants(pbc_input, keras_model, apply_also_obc_invariants=False)
invariant_model_func = K.function(inputs=[pbc_input], outputs=[invariant_model.output])
size = (batch_size,) + K.int_shape(keras_model.input)[1:]
batch = np.random.rand(*size)
batch_transformations = [roll_sample(batch, i) for i in
itertools.product(*[range(dim_size) for dim_size in shape])]
vals = [invariant_model_func([transformation])[0] for transformation in batch_transformations]
allclose = [np.allclose(vals[0], another_val, rtol=1e-3) for another_val in vals[1:]]
assert np.all(allclose)
@pytest.mark.parametrize('model_builder, batch_size', [
(real_values_2d_model, 5),
(real_values_1d_model, 5),
])
def test_build_symmetrization_ensemble(model_builder, batch_size):
with DEFAULT_TF_GRAPH.as_default():
keras_model = model_builder()
keras_model.summary()
shape = K.int_shape(keras_model.input)[1:]
symmetrization_input = Input(shape=shape, dtype=keras_model.input.dtype)
ensemble_input = Input(shape=shape, dtype=keras_model.input.dtype)
symmetrization_model = Model(inputs=symmetrization_input, outputs=build_symmetrization_ensemble([symmetrization_input, Lambda(lambda x:x * -1)(symmetrization_input)], keras_model))
ensemble_model = Model(inputs=ensemble_input, outputs=build_ensemble([keras_model(ensemble_input), keras_model(Lambda(lambda x:x * -1)(ensemble_input))]))
symmetrization_model_func = K.function(inputs=[symmetrization_input], outputs=[symmetrization_model.output])
ensemble_model_func = K.function(inputs=[ensemble_input], outputs=[ensemble_model.output])
size = (batch_size,) + K.int_shape(keras_model.input)[1:]
batch = np.random.rand(*size)
symmetrization_model_vals = symmetrization_model_func([batch])[0]
ensemble_model_vals = ensemble_model_func([batch])[0]
assert np.allclose(symmetrization_model_vals, ensemble_model_vals, rtol=1e-3)
| StarcoderdataPython |
5082468 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, absolute_import
import copy
from builtins import * # pylint: disable=unused-import, redefined-builtin
import pytest
from flexget.utils import json
series_schema = {
"type": "object",
"properties": {
"airs_dayofweek": {"type": "string"},
"airs_time": {"type": "string"},
"aliases": {"type": "array", "items": {"type": "string"}},
"banner": {"type": "string"},
"content_rating": {"type": "string"},
"expired": {"type": "boolean"},
"first_aired": {"type": "string"},
"genres": {"type": "array", "items": {"type": "string"}},
"imdb_id": {"type": "string"},
"language": {"type": "string"},
"last_updated": {"type": "string"},
"network": {"type": "string"},
"overview": {"type": "string"},
"posters": {"type": "array", "items": {"type": "string"}},
"rating": {"type": "number"},
"runtime": {"type": "integer"},
"series_name": {"type": "string"},
"status": {"type": "string"},
"tvdb_id": {"type": "integer"},
"zap2it_id": {"type": "string"}
},
"required": [
"airs_dayofweek",
"airs_time",
"aliases",
"banner",
"content_rating",
"expired",
"first_aired",
"genres",
"imdb_id",
"language",
"last_updated",
"network",
"overview",
"posters",
"rating",
"runtime",
"series_name",
"status",
"tvdb_id",
"zap2it_id"
]
}
series_schema_actors = copy.deepcopy(series_schema)
series_schema_actors.update({'properties': {"actors": {"type": "array", "items": {"type": "string"}}}})
series_schema_actors['required'].append('actors')
episode_schema = {
"type": "object",
"properties": {
"absolute_number": {"type": ["null", "integer"]},
"director": {"type": "string"},
"episode_name": {"type": "string"},
"episode_number": {"type": "integer"},
"expired": {"type": "boolean"},
"first_aired": {"type": "string"},
"id": {"type": "integer"},
"image": {"type": "string"},
"last_update": {"type": "integer"},
"overview": {"type": "string"},
"rating": {"type": "number"},
"season_number": {"type": "integer"},
"series_id": {"type": "integer"}
},
"required": ["absolute_number", "director", "episode_name", "episode_number", "expired", "first_aired",
"id", "image", "last_update", "overview", "rating", "season_number", "series_id"]
}
search_results_schema = {
"type": "object",
"properties": {
"search_results": {
"type": "array",
"items": {
"type": "object",
"properties": {
"aliases": {"type": "array", "items": {"type": "string"}},
"banner": {"type": ["string", "null"]},
"first_aired": {"type": "string"},
"network": {"type": "string"},
"overview": {"type": ["string", "null"]},
"series_name": {"type": "string"},
"status": {"type": "string"},
"tvdb_id": {"type": "integer"}
},
"required": ["aliases", "banner", "first_aired", "network", "overview", "series_name", "status",
"tvdb_id"]}}
},
"required": [
"search_results"
]
}
@pytest.mark.online
class TestTVDBSeriesLookupAPI(object):
config = 'tasks: {}'
def test_tvdb_series_lookup(self, api_client, schema_match):
values = {
'tvdb_id': 77398,
'imdb_id': 'tt0106179',
'language': 'en',
'series_name': 'The X-Files',
'zap2it_id': 'EP00080955'
}
rsp = api_client.get('/tvdb/series/The X-Files/')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(series_schema, data)
assert not errors
for field, value in values.items():
assert data.get(field) == value
@pytest.mark.online
class TestTVDBSeriesActorsLookupAPI(object):
config = 'tasks: {}'
def test_tvdb_series_lookup_with_actors(self, api_client, schema_match):
values = {
'tvdb_id': 77398,
'imdb_id': 'tt0106179',
'language': 'en',
'series_name': 'The X-Files',
'zap2it_id': 'EP00080955'
}
rsp = api_client.get('/tvdb/series/The X-Files/?include_actors=true')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(series_schema_actors, data)
assert not errors
for field, value in values.items():
assert data.get(field) == value
@pytest.mark.online
class TestTVDBEpisodeLookupAPI(object):
config = 'tasks: {}'
def test_tvdb_episode_lookup_season_and_ep_number(self, api_client, schema_match):
rsp = api_client.get('/tvdb/episode/77398/')
assert rsp.status_code == 500, 'Response code is %s' % rsp.status_code
values = {
'episode_number': 6,
'id': 5313345,
'season_number': 10,
'series_id': 77398,
'absolute_number': None
}
rsp = api_client.get('/tvdb/episode/77398/?season_number=10&ep_number=6')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(episode_schema, data)
assert not errors
for field, value in values.items():
assert data.get(field) == value
@pytest.mark.online
class TestTVDBEpisodeABSLookupAPI(object):
config = 'tasks: {}'
def test_tvdb_episode_lookup_by_absolute_number(self, api_client, schema_match):
values = {
'episode_number': 23,
'id': 5598674,
'season_number': 2,
'series_id': 279121,
'absolute_number': 46
}
rsp = api_client.get('/tvdb/episode/279121/?absolute_number=46')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(episode_schema, data)
assert not errors
for field, value in values.items():
assert data.get(field) == value
@pytest.mark.online
class TestTVDSearchNameLookupAPI(object):
config = 'tasks: {}'
def test_tvdb_search_results_by_name(self, api_client, schema_match):
rsp = api_client.get('/tvdb/search/?search_name=supernatural')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(search_results_schema, data)
assert not errors
values = {
'series_name': "Supernatural",
'tvdb_id': 78901
}
for field, value in values.items():
assert data['search_results'][0].get(field) == value
@pytest.mark.online
class TestTVDSearchIMDBLookupAPI(object):
config = 'tasks: {}'
def test_tvdb_search_results_by_imdb_id(self, api_client, schema_match):
rsp = api_client.get('/tvdb/search/?imdb_id=tt0944947')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(search_results_schema, data)
assert not errors
values = {
'series_name': "Game of Thrones",
'tvdb_id': 121361
}
for field, value in values.items():
assert data['search_results'][0].get(field) == value
@pytest.mark.online
class TestTVDSearchZAP2ITLookupAPI(object):
config = 'tasks: {}'
def test_tvdb_search_results_by_zap2it_id(self, api_client, schema_match):
rsp = api_client.get('/tvdb/search/?zap2it_id=EP01922936')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(search_results_schema, data)
assert not errors
values = {
'series_name': "The Flash (2014)",
'tvdb_id': 279121
}
for field, value in values.items():
assert data['search_results'][0].get(field) == value
| StarcoderdataPython |
8087343 | import re
from typing import Any, Union
import numpy
import torch
from torch import Tensor
from torch.onnx.symbolic_helper import _onnx_main_opset, _onnx_stable_opsets
OPSET = _onnx_stable_opsets[-1]
def extract_number(f):
s = re.findall(r"\d+", str(f))
return int(s[-1]) if s else -1
def remove_weight_norm(m):
try:
torch.nn.utils.remove_weight_norm(m)
except ValueError:
pass
def to_tensor(array: Union[Tensor, numpy.ndarray, Any], device):
if not isinstance(array, (Tensor, numpy.ndarray)):
array = numpy.asarray(array)
if isinstance(array, numpy.ndarray):
array = torch.from_numpy(array)
return array.to(device)
| StarcoderdataPython |
6530000 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SECRET_KEY'] = "Thisissecret"
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite://///Users/ankurkumar/Documents/Git/Python_Flask/12_09_20/data.db"
db = SQLAlchemy(app)
subs = db.Table('subs',
db.Column('user_id',db.Integer, db.ForeignKey("user.user_id"),primary_key=True),
db.Column('channel_id',db.Integer,db.ForeignKey("channel.channel_id"),primary_key=True)
)
class User(db.Model):
user_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20))
subscriptions = db.relationship('Channel',secondary='subs', backref=db.backref('subscribers',lazy='dynamic'))
class Channel(db.Model):
channel_id = db.Column(db.Integer, primary_key=True)
channel_name = db.Column(db.String(20))
# >>> from many import *
# /Users/ankurkumar/Documents/Git/Python_Flask/03_09_20/venv/lib/python3.8/site-packages/flask_sqlalchemy/__init__.py:833: FSADeprecationWarning: SQLALCHEMY_TRACK_MODIFICATIONS adds significant overhead and will be disabled by default in the future. Set it to True or False to suppress this warning.
# warnings.warn(FSADeprecationWarning(
# >>> db.create_all()
# >>> user1 = User(name="Ankur")
# >>> user2 = User(name="Stan")
# >>> user3 = User(name="Ron")
# >>> db.session.add(user1)
# >>> db.session.add(user2)
# >>> db.session.add(user3)
# >>> db.commit_all()
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# AttributeError: 'SQLAlchemy' object has no attribute 'commit_all'
# >>> db.commit()
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# AttributeError: 'SQLAlchemy' object has no attribute 'commit'
# >>> db.session.commit()
# >>> cha1 = Channel(channel_name='TeaTime')
# >>> cha2 = Channel(channel_name='CofeeTime')
# >>> cha3 = Channel(channel_name='ExpressoTime')
# >>> db.session.add(cha1)
# >>> db.session.add(cha2)
# >>> db.session.add(cha3)
# >>> db.session.commit()
# >>> cha1.subscribers.append(user1)
# try:
# ... db.session.commit()
# ... except:
# ... db.session.rollback() | StarcoderdataPython |
8132378 | import hashlib
#print(hashlib.algorithms_available)
#print(hashlib.algorithms_guaranteed)
hash_object = hashlib.md5(b'Hello World')
print(hash_object.hexdigest())
mystring = input('Enter String to hash: ')
# Assumes the default UTF-8
hash_object = hashlib.md5(mystring.encode())
print(hash_object.hexdigest())
hash_object = hashlib.sha256(b'Hello World')
hex_dig = hash_object.hexdigest()
print(hex_dig)
newstring = input('Enter string to sha256 hash: ')
new_hash_object = hashlib.sha256(newstring.encode())
print(new_hash_object.hexdigest())
| StarcoderdataPython |
5133136 | from itertools import chain
import re
from yaml_rulz.rulebook import get_rule_response_dict
from yaml_rulz.rulebook import BooleanRule
from yaml_rulz.rulebook import GreaterThanRule
from yaml_rulz.rulebook import LessThanRule
from yaml_rulz.rulebook import OmitRule
from yaml_rulz.rulebook import PredefinedRegExpRule
from yaml_rulz.rulebook import RegExpRule
from yaml_rulz.rulebook import UniquenessRule
from yaml_rulz.yaml_handler import ResourceHandler
from yaml_rulz.yaml_handler import SchemaHandler
WARNING_SEVERITY = "Warning"
MISSING_RESOURCE = "Item is missing from resource"
MISSING_SCHEMA = "No rules were found for resource"
MISSING_PROTOTYPE = "No matching prototype was found"
RULE_SEPARATOR_REGEXP = r"\s+\|\s+"
EOL_REGEXP = r"$"
class YAMLValidator(object):
known_rule_tokens = {
"*": OmitRule,
"?": BooleanRule,
">": GreaterThanRule,
"<": LessThanRule,
"~": RegExpRule,
"@": PredefinedRegExpRule,
"!": UniquenessRule,
}
def __init__(self, schema_content, resource_content, exclusions_content=None):
self.schema_handler = SchemaHandler(schema_content)
self.resource_handler = ResourceHandler(resource_content)
self.exclusions = YAMLValidator._import_exclusions(exclusions_content)
def get_validation_issues(self):
issues = [issue for issue in chain(
self._find_missing_resource_scalars(),
self._find_missing_schema_scalars(),
self._validate_scalars(),
self._validate_lists(),
)]
return self._update_severity(issues)
def _update_severity(self, issues):
has_errors = False
for issue in issues:
if self._is_excluded(issue["schema"]) or self._is_excluded(issue["resource"]):
issue["severity"] = WARNING_SEVERITY
else:
has_errors = True
return has_errors, issues
def _is_excluded(self, key):
try:
for exc in self.exclusions:
if re.match(exc, key):
return True
return False
except TypeError:
return False
def _find_missing_resource_scalars(self):
for issue in self._yield_missing_scalar_error(self.schema_handler,
self.resource_handler,
MISSING_RESOURCE):
yield issue
def _find_missing_schema_scalars(self):
for issue in self._yield_missing_scalar_error(self.resource_handler,
self.schema_handler,
MISSING_SCHEMA):
yield issue
def _validate_rules(self, flat_schema, flat_resource):
for resource_key in flat_resource:
key_mask = self._key_to_mask(self.resource_handler, resource_key)
for schema_key in flat_schema:
if re.match(key_mask + EOL_REGEXP, schema_key):
rule_chain = self._split_rules(flat_schema[schema_key])
for rule_expression in rule_chain:
rule = self._get_rule(schema_key, resource_key, rule_expression)
# Whole resource must be passed to match() because of possible references in rules
result = rule.match(self.resource_handler.flat_yml)
if result:
yield result
def _validate_scalars(self):
for result in self._validate_rules(self.schema_handler.scalars, self.resource_handler.scalars):
yield result
def _validate_lists(self):
for resource_path, resource in self.resource_handler.list_handler.groups.items():
# Collect prototypes
path_mask = self._key_to_mask(self.resource_handler, resource_path)
prototype_candidates = [
prototype_groups
for schema_path, prototype_groups in self.schema_handler.list_handler.groups.items()
if re.match(path_mask + EOL_REGEXP, schema_path)
]
prototypes = self._filter_matching_prototypes(resource, prototype_candidates)
if not prototypes:
yield get_rule_response_dict(
schema=prototype_candidates,
resource=resource_path,
message=MISSING_PROTOTYPE,
)
# Evaluate prototypes
prototype_failure_count = 0
prototype_failures = []
for prototype in prototypes:
result = [failure for failure in self._validate_rules(prototype, resource)]
if result:
prototype_failures.extend(result)
prototype_failure_count += 1
if prototype_failure_count >= len(prototypes):
for failure in prototype_failures:
yield failure
def _filter_matching_prototypes(self, resource, prototypes):
matching_prototypes = []
masked_resource_keys = [self._key_to_mask(self.schema_handler, key) for key in resource.keys()]
for prototype in prototypes:
masked_prototype_keys = [self._key_to_mask(self.schema_handler, key) for key in prototype.keys()]
if set(masked_prototype_keys) == set(masked_resource_keys):
if re.search(self.schema_handler.list_handler.list_item_regexp + EOL_REGEXP, list(prototype)[0]):
for key, value in prototype.items():
matching_prototypes.append({key: value})
else:
matching_prototypes.append(prototype)
return matching_prototypes
def _get_rule(self, schema_key, resource_key, rule_expression):
try:
token, criterion = rule_expression.split(" ", 1)
except (ValueError, AttributeError):
return OmitRule(schema_key, resource_key, None)
else:
if token in self.known_rule_tokens:
return self.known_rule_tokens[token](schema_key, resource_key, criterion)
return OmitRule(schema_key, resource_key, None)
@staticmethod
def _yield_missing_scalar_error(outer_handler, inner_handler, message):
for key in outer_handler.scalars:
if key not in inner_handler.scalars:
yield get_rule_response_dict(
schema=key if outer_handler.role == "schema" else None,
resource=key if outer_handler.role == "resource" else None,
message=message,
)
@staticmethod
def _import_exclusions(exclusions_content):
if not exclusions_content:
return []
return [exc.strip() for exc in exclusions_content.splitlines() if exc]
@staticmethod
def _split_rules(rule_chain):
return re.split(RULE_SEPARATOR_REGEXP, str(rule_chain))
@staticmethod
def _key_to_mask(handler, key):
return re.sub(handler.list_handler.list_item_regexp, handler.list_handler.list_item_regexp, key)
| StarcoderdataPython |
9770373 | <reponame>yuriimchg/slack_moderator<gh_stars>0
from flask import Flask
app = Flask(__name__)
from app import routesfrom app import routes
| StarcoderdataPython |
3378161 | #!/usr/bin/python3
# run time halt 20,24 sec
# PiJuice sw 1.4 is installed for Python 3
# sudo find / -name "pijuice.py" /usr/lib/python3.5/dist-packages/pijuice.py
#https://feeding.cloud.geek.nz/posts/time-synchronization-with-ntp-and-systemd/
# on systemd apt-get purge ntp to use only systemd-timesyncd.service
# edit /etc/systemd/timesyncd.conf
# systemctl restart systemd-timesyncd.service
# timedatectl status
#to enable NTP synchronized
# timedatectl set-ntp true
#The system is configured to read the RTC time in the local time zone.
#This mode can not be fully supported. It will create various problems
#with time zone changes and daylight saving time adjustments. The RTC
#time is never updated, it relies on external facilities to maintain it.
#If at all possible, use RTC in UTC by calling
#'timedatectl set-local-rtc 0'.
# timedatectl set-local-rtc 0
# to set date
#sudo date -s 16:31
# read hwclock
# sudo hwclock -r
# set hwclock with system time
# sudo hwclock -w --systohc same time, not utc vs local
# set hwclock with date
# sudo hwclock --set --date --localtime "1/4/2013 23:10:45" 4 jan
#set system time from hwclock
# sudo hwclock -s --hctosys
# use --debug
# use --utc or --localtime when setting hwclock
"""
# juice internal led 2
juice ok. blink blue 2x. else solid red
ntp ok. blink green 1x, 100ms, intensity 50 . else use hwclock blink red
pic sent. blink green 2x, 100ms, intensity 200 else fails blink red . else nigth blink blue
halt. blink blue 2x. else stay on blink red 2x
"""
# After the initial set of system time using the Linux date command and the copy to PiJuice RTC sudo hwclock -w, you simply just need to run at system startup do a 'sudo hwclock -s' to copy the time from the RTC to the system clock e.g. in /etc/rc.local. This is also assuming that your ID EEPROM is set to 0x50 in which case the RTC driver is loaded at boot.
# lsmod to check module Force loading the module by adding the following line to /boot/config.txt:
# dtoverlay=i2c-rtc,ds1339
# i2cdetect must shows UU instead of 68 https://github.com/PiSupply/PiJuice/issues/186
# hwclock -r read hwckock to stdout -w date to hw -s hw to date
#sudo ntpd -gq force ntp update
from __future__ import print_function
import RPi.GPIO as GPIO
import os
import time
# python2
#import httplib, urllib
import http.client, urllib
import datetime
# python 2
#import thread
import _thread
#https://github.com/vshymanskyy/blynk-library-python
#pip install blynk-library-python
import BlynkLib
# sudo apt-get install ntp
# systemctl
# sudo apt-get install ntpdate (client)
#pip install ntplib
import ntplib
#sudo pip install thingspeak
#https://thingspeak.readthedocs.io/en/latest/
import thingspeak
import pijuice
import subprocess
import sys
import logging
import subprocess
print('juice camera v2.1')
# wakeup every x mn
DELTA_MIN=20 # mn
limit_soc=15 # limit send pushover . a bit higher than the HALT_POWER_OFF in juice config
#Low values should be typically between 5-10%
# in juice config, minimum charge is 10%, min voltage 3.2V. wakeup on charge 70%
print ('send pushover when soc is below %d' %(limit_soc))
pin_halt=26
pin_led=16 # not used anymore
# Blynk Terminal V8, button V18
bbutton=18 # halt or tun
bterminal=8
bsoc=20
bsleep=21 #programmable sleep time
btemp=22 # of bat
bvbat=23 # of pijuice
bcount=24 # increment at each run
# use external led
led = False
#button value 2 un-initialized
button="2"
# THIS IS A string
count = 0
sleep_time=60
print ("STARTING. set system time from hwclock: ")
subprocess.call(["sudo", "hwclock", "--hctosys"])
# --systohc to set hwclock
# to measure execution time
start_time=datetime.datetime.now()
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin_led,GPIO.OUT) # external led
# on at start, while running
GPIO.output(pin_led, GPIO.HIGH)
# connect to ground to keep running
GPIO.setup(pin_halt,GPIO.IN,pull_up_down=GPIO.PUD_UP)
t=20
print ("sleep to make sure boot is over and juice started:", t, ' sec')
time.sleep(t)
halt = GPIO.input(pin_halt)
print ("state of keep running pin is (pullup, no jumper = HIGH = HALT)", halt)
if halt ==0:
print(" WARNING: will keep running!!!!")
if led: # led is false. could use external led blink. use internal led instead
# flash led to signal start
print ("flash external led")
flash(5,0.2)
# debug, info, warning, error, critical
log_file = "/home/pi/beecamjuice/log.log"
print ("logging to: " , log_file)
logging.basicConfig(filename=log_file,level=logging.INFO)
logging.info(str(datetime.datetime.now())+ '\n-------------- beecamjuice starting ...' )
s = os.popen('date').read()
print ("system date: ", s)
logging.info(str(datetime.datetime.now())+ ' system date at start: ' + s )
s = os.popen('sudo hwclock -r').read()
logging.info(str(datetime.datetime.now())+ ' read hw clock at start: ' + s )
print ("read hw clock: " , s)
# cycle , delay . flash external led if present
def flash(c,d): # external led
for i in range(0,c):
GPIO.output(pin_led, GPIO.HIGH)
time.sleep(d)
GPIO.output(pin_led, GPIO.LOW)
time.sleep(d)
# python3
def send_pushover(s):
# P2 conn = http.HTTPSConnection("api.pushover.net:443")
conn = http.client.HTTPSConnection("api.pushover.net:443")
conn.request("POST", "/1/messages.json",
#urllib has been split up in Python 3. The urllib.urlencode() function is now urllib.parse.urlencode(), and the urllib.urlopen() function is now urllib.request.urlopen().
# P2 urllib.urlencode({
urllib.parse.urlencode({
"token": "your token",
"user": "your token",
"message": s,
}), { "Content-type": "application/x-www-form-urlencoded" })
conn.getresponse()
logging.info(str(datetime.datetime.now())+ ' send pushover ...' + s )
#https://github.com/PiSupply/PiJuice/issues/91
# Record start time for testing
#txt = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ' -- Started\n'
#with open('/home/pi/beecamjuice/test.log','a') as f:
# f.write(txt)
while not os.path.exists('/dev/i2c-1'):
time.sleep(0.1)
try:
pj = pijuice.PiJuice(1, 0x14)
except:
# cannot use internal led to signal error. pj not created
print("Cannot create pijuice object")
logging.error(str(datetime.datetime.now())+ '!!!! cannot create pijuice object, exit and keep running ...' )
send_pushover("PiJuice: cannot create PiJuice object. will exit")
sys.exit()
status = pj.status.GetStatus()
print ('juice object created:', status )
status = status ['error']
if status == 'NO_ERROR':
print ("PiJuice Status OK")
# internal led 2 'D2'. blue [r,g,b] range 0-255
#pj.SetLedState('D2', [0, 0 , 200])
# blink x times, Blue 500 ms, off 500 ms
pj.status.SetLedBlink('D2',2, [0,0,200], 500, [0, 0, 0], 500)
# executed on juice microcontroler. if next set led too quick, will overwrite
else:
# internal led. solid red RGB
pj.status.SetLedState('D2', [200, 0 ,0])
print ("PiJuice Status ERROR")
logging.error(str(datetime.datetime.now())+ ' PiJuice status ERROR' )
enable_wakeup(pj) # in case
shut(pj) # otherwize was staying on, sucking power. sucker
print ("juice firmware version: ", pj.config.GetFirmwareVersion()['data']['version'])
# dict
soc = pj.status.GetChargeLevel()
soc = "%0.0f" %(soc['data'])
print ("soc ", soc)
logging.info(str(datetime.datetime.now())+ ' soc: ' + str(soc) )
soc = int(soc)
if soc < limit_soc:
logging.info(str(datetime.datetime.now())+ ' soc too low: ' + str(soc) )
time.sleep(0.4)
vbat = pj.status.GetBatteryVoltage()
vbat = "%0.1f" %(vbat['data']/1000.0)
print ("vbat on board battery voltage", vbat)
logging.info(str(datetime.datetime.now())+ ' vbat: ' + str(vbat) )
time.sleep(0.4)
ibat = pj.status.GetBatteryCurrent()
time.sleep(0.4)
ibat = pj.status.GetBatteryCurrent() # false read ?
ibat = ibat['data']
print ("ibat current supplied from the battery", ibat)
logging.debug(str(datetime.datetime.now())+ ' ibat: ' + str(ibat) )
# iio 200 ma, Vio 5V ibat 300 ma when not charging, -2000 when charging
# Vbat is the on-board battery voltage and ibat is the current that is being supplied from the battery.
time.sleep(0.4)
temp=pj.status.GetBatteryTemperature()
temp = "%0.0f" %(temp['data'])
print ("temp ", temp)
logging.debug(str(datetime.datetime.now())+ ' temp: ' + str(temp) )
# vio is the voltage that is on the IO pin weather this is input or output and the iio is the current being provided or drawn.
# When reading analog read on IO1 it will output the same as vio.
time.sleep(0.4)
vio=pj.status.GetIoVoltage()
vio = vio['data']
print ("vio voltage on IO, input or output", vio)
logging.debug(str(datetime.datetime.now())+ ' vio: ' + str(vio) )
time.sleep(0.4)
iio=pj.status.GetIoCurrent()
iio = iio['data']
print ("iio current drawn or supplied on IO", iio)
logging.debug(str(datetime.datetime.now())+ ' iio: ' + str(iio) )
"""
time.sleep(0.4)
print ("reading analog in")
lipovbat=pj.status.GetIoAnalogInput(1)
print (lipovbat)
lipovbat= "%0.1f" %(2.0 * lipovbat['data']/1000.0) # pont diviseur. 3.3v logic max 3.6
print ("lipo vbat Volt", lipovbat)
logging.info(str(datetime.datetime.now())+ ' lipo bat Volt: ' + str(lipovbat) )
"""
print ("reset fault flag")
pj.status.ResetFaultFlags(['powerOffFlag', 'sysPowerOffFlag'])
# thinkspeak
print ( "publish to thinkspeak" )
try:
thing = thingspeak.Channel(285664, api_key="<KEY>", \
write_key="<KEY>", fmt='json', timeout=None)
response = thing.update({1:vbat,2:soc,3:temp})
print (response)
except:
print("Thingspeak failed")
# program alarm
def set_alarm(sleep_tile,pj):
# Set RTC alarm x minutes from now
# RTC is kept in UTC or localtime
a={}
a['year'] = 'EVERY_YEAR'
a['month'] = 'EVERY_MONTH'
a['day'] = 'EVERY_DAY'
a['hour'] = 'EVERY_HOUR'
t = datetime.datetime.utcnow()
#a['minute'] = (t.minute + DELTA_MIN) % 60
a['minute'] = (t.minute + sleep_time) % 60
a['second'] = 0
try:
print ("setting RTC alarm " , a['minute'] )
status = pj.rtcAlarm.SetAlarm(a)
print ("rtc Set alarm status: " + str(status)) # if not str exception cannot concatenate str and dict
logging.info(str(datetime.datetime.now())+ ' rtc Set alarm status: ' + str(status) )
if status['error'] != 'NO_ERROR':
print('Cannot set RTC alarm')
logging.error(str(datetime.datetime.now())+ ' Cannot set RTC alarm; will exit and keep running ' )
blynk.virtual_write(bterminal, 'cannot set RTC. RUN')
send_pushover("PiJuice: cannot set RTC alarm. will exit and keep running")
time.sleep(5)
sys.exit()
else:
print('RTC Get Alarm: ' + str(pj.rtcAlarm.GetAlarm()))
logging.info(str(datetime.datetime.now())+ ' RTC Get Alarm: ' + str(pj.rtcAlarm.GetAlarm()) )
except Exception as e:
logging.info(str(datetime.datetime.now())+ ' !!! EXCEPTION in enable wakeup' + str(e))
blynk.virtual_write(bterminal, 'Exception Wakeup\n')
send_pushover("PiJuice: Exception Wakeupup enable")
def enable_wakeup(pj):
# Enable wakeup, otherwise power to the RPi will not be applied when the RTC alarm goes off
s= pj.rtcAlarm.SetWakeupEnabled(True)
print ("enable wakeup to power PI on RTC alarm " + str(s))
logging.info(str(datetime.datetime.now())+ ' enable wakeup: ' + str(s) )
time.sleep(0.4)
# set power off
def power_off(delay,pj):
try:
# remove 5V power to PI
pj.power.SetPowerOff(delay)
logging.info(str(datetime.datetime.now())+ ' setpoweroff after ' + str(delay))
except Exception as e:
print ("exception in setpoweroff: ", str(e))
logging.error(str(datetime.datetime.now())+ ' exception in setpoweroff ' + str(e) )
# blynk
def blynk_thread(now):
# remove first word dow so that it fits in android screen
print (now)
n=now.split()
del n[0]
now=""
for i in n:
now=now+i+' '
print (now)
print(" BLYNK_START blynk thread started "), now
def blynk_connected():
print(" BLYNK_CONNECTED Blynk has connected. Synching button...")
logging.info(str(datetime.datetime.now())+ ' Blynk connected. string: ' + now)
#blynk.sync_all()
blynk.sync_virtual(bbutton)
blynk.sync_virtual(bcount)
blynk.sync_virtual(bsleep)
print(" BLYNK_WRITE write date to blynk: "), now
#blynk.virtual_write(bterminal, now.isoformat()+'\n')
# use ntp time , already a string
# value already formatted, rounded
blynk.virtual_write(bterminal, now + '\n')
blynk.virtual_write(btemp, temp)
blynk.virtual_write(bsoc, soc)
blynk.virtual_write(bvbat, vbat)
#blynk.virtual_write(blipovbat, lipovbat)
#blynk.virtual_write(bibat, ibat)
@blynk.VIRTUAL_WRITE(bbutton)
def button_write_button(value):
global button
print(' BLYNK_SYNCHED button handler: Current button value: {}'.format(value))
button=value
# will not sync if value is empty (just created)
@blynk.VIRTUAL_WRITE(bcount)
def button_write_count(value):
global count
print(' BLYNK_SYNCHED count handler: Current count value: {}'.format(value))
# value str
count=int(value)
if count < 1000:
count = count + 1
else:
count =1
@blynk.VIRTUAL_WRITE(bsleep)
def button_write_sleep(value):
global sleep_time
print(' BLYNK_SYNCHED sleep handler: Current sleep value: {}'.format(value))
# value str
if sleep_time >=5 and sleep_time <=120:
sleep_time=int(value)
else:
sleep_time=60
logging.info(str(datetime.datetime.now())+ ' sleep time ' + str(sleep_time) )
# .py will not run long enough ?
@blynk.VIRTUAL_WRITE(bterminal)
def v8_write_handler(value):
print (" blynk handler: read terminal")
print (value)
blynk.virtual_write(bterminal, 'Command: ' + value + '\n')
blynk.on_connect(blynk_connected)
try:
blynk.run()
#never returns
# thread.exit()
except Exception as e:
print ("exception in Blynk.run. Blynk thread exit ") , e
print("blynk thread exit")
# use ntp , if fails used hwclock
def get_time_and_file_hwclock():
now = datetime.datetime.now()
year = int(datetime.datetime.now().year)
month = int(datetime.datetime.now().month)
day = int(datetime.datetime.now().day)
hour = int(datetime.datetime.now().hour)
mn = int(datetime.datetime.now().minute)
file_name = str(month) + "_" + str(day) + "_" + str(hour) + "_" \
+ str(mn) + ".jpg"
# set timestring to send to blynk
# Y year, m month, d day, H hour, M minute, S sec
# I hour 12 padded, p AM PM; A weekday, a weekday abbrev, B,b month
# 10:32PM September 23 1st word removed before sending to blynk (dow is returned by google NTP)
time_string = now.strftime("holder %b %d %H:%M")
# fist field removed. from ntp
return(time_string,file_name,hour,month)
############ try this first. if exception, use hwclock
def get_time_and_file_ntp():
# to get correct time stamp in log. for pic filename, we could only get it from google ntp
#print("synching NTP")
#os.system("sudo ntpdate -u pool.ntp.org")
# even if off, we only care about the delta
# not true. ntp can set time while we run
#start_time=datetime.datetime.now()
# use NTP directly to test for nigth and file name
print ("call google time server")
# string send thru blynk
time_string = ("will ask google")
file_name = "uninitialized" # in case, to avoid exception not assigned
# set time_string and file_name
try:
c=ntplib.NTPClient()
response = c.request('time.google.com', version=3)
# sec to string
# time string formatted there
time_string = time.ctime(response.tx_time)
print ("time from google ntp: ", time_string)
hour = datetime.datetime.fromtimestamp(response.tx_time).hour
month = datetime.datetime.fromtimestamp(response.tx_time).month
year = datetime.datetime.fromtimestamp(response.tx_time).year
mn = datetime.datetime.fromtimestamp(response.tx_time).minute
day = datetime.datetime.fromtimestamp(response.tx_time).day
file_name = str(month) + "_" + str(day) + "_" + str(hour) + "_" \
+ str(mn) + ".jpg"
logging.info(str(datetime.datetime.now())+ ' Google NTP responded ' + time_string )
pj.status.SetLedBlink('D2', 1, [0,50,0], 100, [0, 0, 0], 100)
except Exception as e:
print ("NTPlib error, use hw clock ", str(e))
# python 3 ?
#AttributeError: 'NTPException' object has no attribute 'message'
#print (e.message, e.args)
logging.error(str(datetime.datetime.now())+ ' NTP lib exception, will use hwclock ' + str(e) )
(time_string,file_name,hour,month) = get_time_and_file_hwclock()
pj.status.SetLedBlink('D2', 1, [50,0,0], 100, [0, 0, 0], 100)
finally:
return(time_string,file_name,hour,month)
# which wifi
# also iwgetid
s1="/home/pi/beecamjuice/get_wifi.sh"
try:
s = subprocess.check_output([s1])
except Exception as e:
print ("exception iwconfig ", str(e))
s= "exception getting wifi"
# python3 'wifi' is str s is bytes. cannot concatenate
print ("wifi being used: ", s)
logging.info(str(datetime.datetime.now())+ ' wifi: ' + s.decode('utf-8') )
# get time stamp for blynk, filename for pic and hour for day nigth
# hwclock or ntp
print ("get time from NTP and backup to hwclock if fails")
#(time_string,file_name,hour,month) = get_time_and_file_hwclock()
(time_string,file_name,hour,month) = get_time_and_file_ntp()
print ("picture filename: " , file_name)
print ("time string for blynk: " , time_string)
logging.info(str(datetime.datetime.now())+ ' ' + time_string + ' ' + file_name)
# sunrise and sunset per month
#sun = [8,17,8,18,7,19,7,20,6,21,6,21,6,21,7,20,7,19,7,17,7,18,7,17]
sun = [7,18,6,18,6,19,7,20,6,21,6,21,6,21,5,21,5,21,6,20,6,18,7,18]
# to take and send pic to cloud
script = "/home/pi/beecamjuice/send_pic_juice.sh"
# send log to cloud. not used
script_log = "/home/pi/beecamjuice/send_log.sh"
# send pic during day
print ("hour is" , hour, "month is ", month)
sunrise= sun[(month-1)*2]
sunset= sun[(month-1)*2+1]
print ("sun rise ", sunrise, "sunset ", sunset)
if soc < limit_soc: # quiet at nite
logging.info(str(datetime.datetime.now())+ ' soc below limit: ' +str(soc) )
if (hour >= sunrise) and (hour <= sunset):
logging.info(str(datetime.datetime.now())+ ' check sun: DAY ' )
print ("apres lever et avant coucher , push pic")
if soc < limit_soc: # quiet at nite
# P3 Can't convert 'int' object to str implicitly < str(limit)
send_pushover("PiJuice: soc %d < limit %d. Config: shutdown at 10%, wakeup at 70%" %(soc,limit_soc) )
# in foreground ?
#file_name = file_name + " &"
print ("call script: " , script, " " , file_name)
ret_code=666
try:
# process = subprocess.Popen([script,file_name], stdout=subprocess.PIPE)
# print (" ")
# print (process.communicate())
# print (" ")
# ret_code = process.wait()
s= script + " " + file_name
ret_code = os.system(s)
print ("script :" , s , "ret code: " , ret_code)
except Exception as e:
logging.info(str(datetime.datetime.now())+ ' Exception in script ' + str(e) )
print ("exception in script: " , str(e))
#logging.info(str(datetime.datetime.now())+ ' send picture script returned: (0=OK) ' + str(ret_code) )
if ret_code == 0:
logging.info(str(datetime.datetime.now())+ ' picture sent OK ' + file_name )
print ("pic script return 0, ie OK")
if led:
flash(3,0.1)
# blink green
# if 500ms, will be overwriten by further set led
pj.status.SetLedBlink('D2', 2, [0,200,0], 100, [0, 0,0], 100)
else :
logging.error(str(datetime.datetime.now())+ ' picture NOT sent. ret code: ' + str(ret_code) )
print ("ERROR in pic script " , ret_code)
send_pushover("PiJuice: picture not sent %d" %(ret_code) )
flash(3,1)
#blink red
pj.status.SetLedBlink('D2', 2, [200,0,0], 100, [0, 0,0], 100)
else:
print ("nigth. no pic")
logging.info(str(datetime.datetime.now())+ ' Check sun: NIGTH ' )
if led:
flash(1,0.1)
time_string = time_string + " N"
# string send to blynk shows no pic
# blynk blue
pj.status.SetLedBlink('D2', 2, [0,0,200], 100, [0, 0,0], 100)
# will not run until script has ran
# after blynk connected and synch
print ("BASH script has run or nigth. waiting for Blynk synch ..")
# create blynk there so that it is available in main thread as well
print ("starting blynk thread")
blynk = BlynkLib.Blynk('your token')
print ("time string to be sent blynk terminal", time_string)
# _ for python3
_thread.start_new_thread(blynk_thread, (time_string,))
print ("wait for blynk to run and synch button")
c=1
while (button == "2"):
time.sleep(1)
c=c+1
if c==30:
print ("!!!!!!!!! button could not synch. assume OFF , ie string 0")
logging.error(str(datetime.datetime.now())+ ' stay on button did not synch. assumes OFF ' )
button="0"
break
print ("button has synched or timeout on synch")
blynk.virtual_write(bcount, count)
# halt or stay on ?
# STAY ON if either pin to ground (jumper), or stay on button == "1"
# SHUTDOWN if pin high (no jumper) and stay on button == "0"
# pin: default PULLUP , no jumper = HIGH
# halt is int, button UNICODE !!!!!!!
print ("halt pin: " , halt , "stay on button: " , button)
print ("keep running if halt = LOW (jumper to ground) or stay on button = ON ie string 1")
print ("shutdown if halt = HIGH (No jumper to ground, PULLUP) and stay on button = OFF ie string 0")
logging.info(str(datetime.datetime.now())+ ' Blynk synched: halt pin: ' + str(halt) + ' button: ' + button )
# HALT
if (halt==1) and (button=="0"): # default pullup shutdown
print ("halt pin jumper to HIGH and button OFF. HALTING")
end_time=datetime.datetime.now()
print ("start time: ", start_time)
print ("end time: ", end_time)
# flash led to signal end
if led:
flash(10,0.1)
#blink ligth green RGB
pj.status.SetLedBlink('D2', 2, [0,0,200], 100, [0, 0,0], 100)
# set alarm , done in GUI or here
print ("set sleep time: %d" %(sleep_time))
set_alarm(sleep_time,pj)
print ("enable wakeup")
enable_wakeup(pj)
print ("cut MCU power switch")
pj.power.SetSystemPowerSwitch(0) # pi supply recommended
delay=20
print ("PI power down in %d sec" %(delay))
#set poweroff done in gui or here
power_off(delay,pj)
logging.info(str(datetime.datetime.now())+ ' halt' )
# send log to cloud
#ret_code = os.system(script_log)
#print ("send log script :" , script_log , "ret code: " , ret_code)
# clear led
pj.status.SetLedState('D2', [0, 0,0])
print ("halt")
subprocess.call(["sudo", "halt"])
time.sleep(600)
# keep running
if ((halt==0) or (button=="1")):
print ("halt pin jumper to LOW or button ON. RUNNING\n\n")
if button == "1":
# blynk button stay on
send_pushover("PiJuice: stay on button")
end_time=datetime.datetime.now()
print ("start time: ", start_time)
print ("end time: ", end_time)
#SetLedBlink(led, count, rgb1, period1, rgb2, period2)
# count = 255 infinite period 10 2550 ms RGB 0 255
# if 255 will keep bliking even if python has exited (stm32 does it)
# blink red
pj.status.SetLedBlink('D2', 2, [200,0,0], 100, [0,0,0], 100)
# flash led to signal end
if led:
flash(5,1)
blynk.virtual_write(bterminal, 'KEEP RUNNING\n')
logging.info(str(datetime.datetime.now())+ ' Exit. KEEP RUNNING' )
# send log to cloud
#ret_code = os.system(script_log)
#print ("send log script :" , script_log , "ret code: " , ret_code)
# clear led
pj.status.SetLedState('D2', [0, 0,0])
print ("exit and keep PI running")
# cleanup turn running led ofr
#GPIO.cleanup()
exit(0)
| StarcoderdataPython |
5060823 | class LanChatError(Exception):
pass
class ServerNotFound(LanChatError):
pass
class InvalidCommand(LanChatError):
pass
| StarcoderdataPython |
3246963 | from rest_framework.serializers import ModelSerializer
from django_remote_queryset.viewset import DRQModelViewSet
from polls.models import Poll
class PollModelSerializer(ModelSerializer):
class Meta:
model = Poll
fields = ('id', 'title',)
class PollModelViewSet(DRQModelViewSet):
queryset = Poll.objects.all()
serializer_class = PollModelSerializer
| StarcoderdataPython |
6648986 | # https://programmers.co.kr/learn/courses/30/lessons/42895
# N์ผ๋ก ํํ
# ์ธ๋ป๋ณด๊ธฐ์ ๋น์ทํ๊ฒ ์๊ธด ๋ฌธ์ ๋ก ์ฐ์ฐ์์ ์๋ฅผ ์ต์ํํ๋ ๋ฌธ์ ๊ฐ ์๋๋ฐ
# ๊ทธ ๋ฌธ์ ๋ bfs๋ฅผ ์ด์ฉํ๋ฉด ์ฝ๊ฒ ํ ์ ์๋ ๊ฑธ๋ก ๊ธฐ์ตํ๋ค
# ์ด ๋ฌธ์ ๋ ์ค์๋ก ๋ถ๋ฅ๋ฅผ ๋ด๋ฒ๋ ธ๋ค. DP๋ฌธ์ ๋ผ๊ณ ํ๋ค.
# DP[N][:number]๋ฅผ ๋ง๋ ๋ค๊ณ ์ณ๋
# number๋ฅผ ๊ตฌ์ฑํ๋ ๋ฐฉ๋ฒ์ด ๋๋ฌด ๋ง๋ค.
# ๊ทธ๋ฆฌ๊ณ DP[N][:number]๋ง์ผ๋ก๋ ์ต์ ์ ๋ฐฉ๋ฒ์ ๋ชป ์ฐพ์ ์๋ ์๋ค.
# ํ
์ผ๋ง ๋ด๋
# DP[5][12] = DP[5][60] + DP[5][1]์ธ ๊ฒ์ ๋ณผ ์ ์๋ค.
# ์ฆ, ์ ์ด์ optimal substructure๋ฅผ ๊ฐ์ง ์๋๋ค.
# DP์ ํต์ฌ์ Optimal substructure์ Overlapping Subproblems์ด๋ค
# ์๊ฐํ ๋๋, ๊ทธ๋ฅ ํ์ ๋ฌธ์ ๊ฐ ํ๋ ธ๋ค๊ณ ํ์ ๋
# ํ์ ๋ฌธ์ ์ ๋ต์ ์ด์ฉํด์ ์์ ์๊ฐ(subproblem๊ฐ์์ ๋ฐ๋ผ์ ๊ผญ ์์ ์๊ฐ์ ์๋์ด๋ ๊ด์ฐฎ๋ค) ์์ ํ ์ ์๋๋ก ์๊ฐํ๋ฉด ๋ ๊ฒ ๊ฐ๋ค.
# N์ ์ต๋ 8๊ฐ๋ฅผ ์ด์ฉํด์ number๋ฅผ ๋ง๋๋ ๋ฐฉ๋ฒ์ด๋ผ...
# ์ฌ์ฉ ๊ฐ๋ฅํ ์ฌ์น์ฐ์ฐ์ด 4๊ฐ ์๊ณ , ์ฌ์ฉ ๊ฐ๋ฅํ ์ซ์๋ {N, NN, ..., NNNNNNNN, ๊ธฐํ N์ 8๊ฐ ์ดํ๋ก ์ฌ์ฉํด์ ๋ง๋ค ์ ์๋ ์)
# ํ ...
# ์ํ์ผ๋ก ํ๋ฉด ๋๋ฌด ์ค๋ ๊ฑธ๋ฆฌ๋?
# N์ 1๊ฐ ์ด์ฉํด์ ๋ง๋ค ์ ์๋ ์
# N์ 2๊ฐ ์ด์ฉํด์ ๋ง๋ค ์ ์๋ ์
# N์ 3๊ฐ ์ด์ฉํด์ ๋ง๋ค ์ ์๋ ์
# ...
# N์ 8๊ฐ ์ด์ฉํด์ ๋ง๋ค ์ ์๋ ์
# N์ n๊ฐ ์ด์ฉํด์ ๋ง๋ค ์ ์๋ ์ = N์ n-1๊ฐ ์ด์ฉํด์ ๋ง๋ค ์ ์๋ ์์ N์ 1๊ฐ ์ด์ฉํด์ ๋ง๋ค ์ ์๋ ์๋ฅผ ์ด์ฉํด์
# + n-2์ 2 + ... + 1๊ณผ n - 1 + 'N'*n
# ๊ทผ๋ฐ ์ด๊ฒ ๋จ์ํ ๊ฐ์๋ง ์ ์ฅํ๋ฉด ์ ๋๊ณ , ์ค์ ๊ฐ์ ๋นํธ๋ง์คํน ๋๋์ผ๋ก ๊ฐ๊ณ ์๊ณ , ํฉ์ฑํ ๋ union์ ํด์ผ ํ๋ค.
MAX_n = 8
# A list of set
dp = [set() for _ in range(MAX_n + 1)]
def construct(n, N):
global dp
dp[n].add(int(str(N)*n))
for i in range(1, n):
opds_1 = dp[i]
opds_2 = dp[n - i]
for opd_1 in opds_1:
for opd_2 in opds_2:
dp[n].add(opd_1 + opd_2)
dp[n].add(opd_1 - opd_2)
dp[n].add(opd_1 * opd_2)
if opd_2 != 0:# and opd_1 % opd_2 == 0:
dp[n].add(opd_1 // opd_2)
# dp[n].add(int(opd_1 / opd_2))
def solution(N, number):
for i in range(1, MAX_n + 1):
construct(i, N)
if number in dp[i]:
print(dp)
return i
return -1
print(solution(2, 2))
| StarcoderdataPython |
8180160 | # -*- coding: utf-8 -*-
"""
shellstreaming.operator.copy_split
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:synopsis:
"""
# standard modules
import cPickle as pickle
# my modules
from shellstreaming.operator.base import Base
class CopySplit(Base):
""""""
def __init__(self, num_copies, **kw):
"""
:param conditions: tuple of conditions.
Each condition is simply `eval`ed after replacing column name into actual value.
"""
self._num_copies = num_copies
in_qs, out_qs = (kw['input_queues'], kw['output_queues'])
# input queue
assert(len(in_qs) == 1)
self._in_q = in_qs.values()[0]
# output queues
assert(len(out_qs) == num_copies)
self._out_qs = out_qs.values()
Base.__init__(self)
def run(self):
"""Filter batch according to :param:`*conditions`
"""
while True:
batch = self._in_q.pop()
if batch is None:
map(lambda q: q.push(None), self._out_qs)
break
# create batch's deep-copy and push to all
for q in self._out_qs:
cp = pickle.dumps(batch)
cp = pickle.loads(cp)
q.push(cp)
@staticmethod
def out_stream_edge_id_suffixes(num_copies):
assert(len(num_copies) == 1)
return ['copy%d' % (i) for i in range(num_copies[0])]
| StarcoderdataPython |
3413007 | <reponame>omginteractive/fetner-django-syncable<filename>syncable/apps.py
from django.apps import AppConfig
class SyncableConfig(AppConfig):
name = 'syncable'
| StarcoderdataPython |
3398515 | # -*- coding: utf-8 -*-
from datetime import datetime
from dateutil.tz import tzlocal
import numpy as np
from pynwb import NWBFile, NWBHDF5IO, get_manager, ProcessingModule
from pynwb.device import Device
from pynwb.ecephys import LFP, ElectricalSeries
from pynwb.core import DynamicTable, DynamicTableRegion, VectorData
from pynwb.epoch import TimeIntervals
from pynwb.misc import DecompositionSeries
from pynwb.base import TimeSeries
from pynwb.file import Subject
from ndx_spectrum import Spectrum
from nwbext_ecog import CorticalSurfaces, ECoGSubject
def nwb_copy(old_file, new_file, cp_objs={}):
"""
Copy fields defined in 'obj', from existing NWB file to new NWB file.
Parameters
----------
old_file : str, path
String such as '/path/to/old_file.nwb'.
new_file : str, path
String such as '/path/to/new_file.nwb'.
cp_objs : dict
Name:Value pairs (Group:Children) listing the groups and respective
children from the current NWB file to be copied. Children can be:
- Boolean, indicating an attribute (e.g. for institution, lab)
- List of strings, containing several children names
Example:
{'institution':True,
'lab':True,
'acquisition':['microphone'],
'ecephys':['LFP','DecompositionSeries']}
"""
manager = get_manager()
# Open original signal file
with NWBHDF5IO(old_file, 'r', manager=manager, load_namespaces=True) as io1:
nwb_old = io1.read()
# Creates new file
nwb_new = NWBFile(session_description=str(nwb_old.session_description),
identifier='',
session_start_time=datetime.now(tzlocal()))
with NWBHDF5IO(new_file, mode='w', manager=manager, load_namespaces=False) as io2:
#Institution name --------------------------------------------------
if 'institution' in cp_objs:
nwb_new.institution = str(nwb_old.institution)
#Lab name ----------------------------------------------------------
if 'lab' in cp_objs:
nwb_new.lab = str(nwb_old.lab)
#Session id --------------------------------------------------------
if 'session' in cp_objs:
nwb_new.session_id = nwb_old.session_id
#Devices -----------------------------------------------------------
if 'devices' in cp_objs:
for aux in list(nwb_old.devices.keys()):
dev = Device(nwb_old.devices[aux].name)
nwb_new.add_device(dev)
#Electrode groups --------------------------------------------------
if 'electrode_groups' in cp_objs:
for aux in list(nwb_old.electrode_groups.keys()):
nwb_new.create_electrode_group(name=nwb_old.electrode_groups[aux].name,
description=nwb_old.electrode_groups[aux].description,
location=nwb_old.electrode_groups[aux].location,
device=nwb_new.get_device(nwb_old.electrode_groups[aux].device.name))
#Electrodes --------------------------------------------------------
if 'electrodes' in cp_objs:
nElec = len(nwb_old.electrodes['x'].data[:])
for aux in np.arange(nElec):
nwb_new.add_electrode(x=nwb_old.electrodes['x'][aux],
y=nwb_old.electrodes['y'][aux],
z=nwb_old.electrodes['z'][aux],
imp=nwb_old.electrodes['imp'][aux],
location=nwb_old.electrodes['location'][aux],
filtering=nwb_old.electrodes['filtering'][aux],
group=nwb_new.get_electrode_group(nwb_old.electrodes['group'][aux].name),
group_name=nwb_old.electrodes['group_name'][aux])
# if there are custom variables
new_vars = list(nwb_old.electrodes.colnames)
default_vars = ['x', 'y', 'z', 'imp', 'location', 'filtering', 'group', 'group_name']
[new_vars.remove(var) for var in default_vars]
for var in new_vars:
nwb_new.add_electrode_column(name=var,
description=nwb_old.electrodes[var].description,
data=nwb_old.electrodes[var].data[:])
#Epochs ------------------------------------------------------------
if 'epochs' in cp_objs:
nEpochs = len(nwb_old.epochs['start_time'].data[:])
for i in np.arange(nEpochs):
nwb_new.add_epoch(start_time=nwb_old.epochs['start_time'].data[i],
stop_time=nwb_old.epochs['stop_time'].data[i])
# if there are custom variables
new_vars = list(nwb_old.epochs.colnames)
default_vars = ['start_time', 'stop_time', 'tags', 'timeseries']
[new_vars.remove(var) for var in default_vars if var in new_vars]
for var in new_vars:
nwb_new.add_epoch_column(name=var,
description=nwb_old.epochs[var].description,
data=nwb_old.epochs[var].data[:])
#Invalid times -----------------------------------------------------
if 'invalid_times' in cp_objs:
nInvalid = len(nwb_old.invalid_times['start_time'][:])
for aux in np.arange(nInvalid):
nwb_new.add_invalid_time_interval(start_time=nwb_old.invalid_times['start_time'][aux],
stop_time=nwb_old.invalid_times['stop_time'][aux])
#Trials ------------------------------------------------------------
if 'trials' in cp_objs:
nTrials = len(nwb_old.trials['start_time'])
for aux in np.arange(nTrials):
nwb_new.add_trial(start_time=nwb_old.trials['start_time'][aux],
stop_time=nwb_old.trials['stop_time'][aux])
# if there are custom variables
new_vars = list(nwb_old.trials.colnames)
default_vars = ['start_time', 'stop_time']
[new_vars.remove(var) for var in default_vars]
for var in new_vars:
nwb_new.add_trial_column(name=var,
description=nwb_old.trials[var].description,
data=nwb_old.trials[var].data[:])
#Intervals ---------------------------------------------------------
if 'intervals' in cp_objs:
all_objs_names = list(nwb_old.intervals.keys())
for obj_name in all_objs_names:
obj_old = nwb_old.intervals[obj_name]
#create and add TimeIntervals
obj = TimeIntervals(name=obj_old.name,
description=obj_old.description)
nInt = len(obj_old['start_time'])
for ind in np.arange(nInt):
obj.add_interval(start_time=obj_old['start_time'][ind],
stop_time=obj_old['stop_time'][ind])
#Add to file
nwb_new.add_time_intervals(obj)
#Stimulus ----------------------------------------------------------
if 'stimulus' in cp_objs:
all_objs_names = list(nwb_old.stimulus.keys())
for obj_name in all_objs_names:
obj_old = nwb_old.stimulus[obj_name]
obj = TimeSeries(name=obj_old.name,
description=obj_old.description,
data=obj_old.data[:],
rate=obj_old.rate,
resolution=obj_old.resolution,
conversion=obj_old.conversion,
starting_time=obj_old.starting_time,
unit=obj_old.unit)
nwb_new.add_stimulus(obj)
#Processing modules ------------------------------------------------
if 'ecephys' in cp_objs:
if cp_objs['ecephys'] is True:
all_proc_names = list(nwb_old.processing['ecephys'].data_interfaces.keys())
else: #list of items
all_proc_names = cp_objs['ecephys']
# Add ecephys module to NWB file
ecephys_module = ProcessingModule(name='ecephys',
description='Extracellular electrophysiology data.')
nwb_new.add_processing_module(ecephys_module)
for proc_name in all_proc_names:
obj_old = nwb_old.processing['ecephys'].data_interfaces[proc_name]
obj = copy_obj(obj_old, nwb_old, nwb_new)
if obj is not None:
ecephys_module.add_data_interface(obj)
#Acquisition -------------------------------------------------------
if 'acquisition' in cp_objs:
if cp_objs['acquisition'] is True:
all_acq_names = list(nwb_old.acquisition.keys())
else: #list of items
all_acq_names = cp_objs['acquisition']
for acq_name in all_acq_names:
obj_old = nwb_old.acquisition[acq_name]
obj = copy_obj(obj_old, nwb_old, nwb_new)
if obj is not None:
nwb_new.add_acquisition(obj)
#Subject -----------------------------------------------------------
if 'subject' in cp_objs:
try:
cortical_surfaces = CorticalSurfaces()
surfaces = nwb_old.subject.cortical_surfaces.surfaces
for sfc in list(surfaces.keys()):
cortical_surfaces.create_surface(name=surfaces[sfc].name,
faces=surfaces[sfc].faces,
vertices=surfaces[sfc].vertices)
nwb_new.subject = ECoGSubject(cortical_surfaces=cortical_surfaces,
subject_id=nwb_old.subject.subject_id,
age=nwb_old.subject.age,
description=nwb_old.subject.description,
genotype=nwb_old.subject.genotype,
sex=nwb_old.subject.sex,
species=nwb_old.subject.species,
weight=nwb_old.subject.weight,
date_of_birth=nwb_old.subject.date_of_birth)
except:
nwb_new.subject = Subject(age=nwb_old.subject.age,
description=nwb_old.subject.description,
genotype=nwb_old.subject.genotype,
sex=nwb_old.subject.sex,
species=nwb_old.subject.species,
subject_id=nwb_old.subject.subject_id,
weight=nwb_old.subject.weight,
date_of_birth=nwb_old.subject.date_of_birth)
#Write new file with copied fields
io2.write(nwb_new, link_data=False)
def copy_obj(obj_old, nwb_old, nwb_new):
""" Creates a copy of obj_old. """
obj = None
obj_type = type(obj_old).__name__
#ElectricalSeries ----------------------------------------------------------
if obj_type=='ElectricalSeries':
nChannels = obj_old.electrodes.table['x'].data.shape[0]
elecs_region = nwb_new.electrodes.create_region(name='electrodes',
region=np.arange(nChannels).tolist(),
description='')
obj = ElectricalSeries(name=obj_old.name,
data=obj_old.data[:],
electrodes=elecs_region,
rate=obj_old.rate,
description=obj_old.description)
#LFP -----------------------------------------------------------------------
if obj_type=='LFP':
obj = LFP(name=obj_old.name)
els_name = list(obj_old.electrical_series.keys())[0]
els = obj_old.electrical_series[els_name]
nChannels = els.data.shape[1]
elecs_region = nwb_new.electrodes.create_region(name='electrodes',
region=np.arange(nChannels).tolist(),
description='')
obj_ts = obj.create_electrical_series(name=els.name,
comments=els.comments,
conversion=els.conversion,
data=els.data[:],
description=els.description,
electrodes=elecs_region,
rate=els.rate,
resolution=els.resolution,
starting_time=els.starting_time)
#TimeSeries ----------------------------------------------------------------
elif obj_type=='TimeSeries':
obj = TimeSeries(name=obj_old.name,
description=obj_old.description,
data=obj_old.data[:],
rate=obj_old.rate,
resolution=obj_old.resolution,
conversion=obj_old.conversion,
starting_time=obj_old.starting_time,
unit=obj_old.unit)
#DecompositionSeries -------------------------------------------------------
elif obj_type=='DecompositionSeries':
list_columns = []
for item in obj_old.bands.columns:
bp = VectorData(name=item.name,
description=item.description,
data=item.data[:])
list_columns.append(bp)
bandsTable = DynamicTable(name=obj_old.bands.name,
description=obj_old.bands.description,
columns=list_columns,
colnames=obj_old.bands.colnames)
obj = DecompositionSeries(name=obj_old.name,
data=obj_old.data[:],
description=obj_old.description,
metric=obj_old.metric,
unit=obj_old.unit,
rate=obj_old.rate,
#source_timeseries=lfp,
bands=bandsTable,)
#Spectrum ------------------------------------------------------------------
elif obj_type=='Spectrum':
file_elecs = nwb_new.electrodes
nChannels = len(file_elecs['x'].data[:])
elecs_region = file_elecs.create_region(name='electrodes',
region=np.arange(nChannels).tolist(),
description='')
obj = Spectrum(name=obj_old.name,
frequencies=obj_old.frequencies[:],
power=obj_old.power,
electrodes=elecs_region)
return obj
| StarcoderdataPython |
4945010 | from models.basket import Basket
class Customer:
def __init__(self, name):
self.name = name
self.basket = Basket(self)
def add_gift_card(self, gift_card):
return self.basket.add_item(gift_card)
def get_total_basket_price(self):
return self.basket.get_total_cost()
| StarcoderdataPython |
1859961 | <filename>vlab_inventory_api/lib/worker/tasks.py
# -*- coding: UTF-8 -*-
"""
This module defines all tasks for creating, deleting, listing virtual vLANs.
All responses from a task *MUST* be a dictionary, and *MUST* contain the following
keys:
- ``error`` An error message about bad user input,or None
- ``params`` The parameters provided by the user, or an empty dictionary
- ``content`` The resulting output from running a task, or an empty dictionary
Example:
.. code-block:: python
# If everything works
{'error' : None,
'content' : {'vlan' : 24, 'name': 'bob_FrontEnd'}
'params' : {'vlan-name' : 'FrontEnd'}
}
# If bad params are provided
{'error' : "Valid parameters are foo, bar, baz",
'content' : {},
'params' : {'doh': 'Not a valid param'}
}
"""
from celery import Celery
from vlab_api_common import get_task_logger
from vlab_inf_common.vmware import vCenter
from vlab_inventory_api.lib import const
from vlab_inventory_api.lib.worker import vmware
app = Celery('inventory', backend='rpc://', broker=const.VLAB_MESSAGE_BROKER)
@app.task(name='inventory.show', bind=True)
def show(self, username, txn_id):
"""Obtain information about all virtual machines a user owns
:Returns: Dictionary
:param username: The owner of the virtual machines
:type username: String
:param txn_id: A unique string supplied by the client to track the call through logs
:type txn_id: String
"""
logger = get_task_logger(txn_id=txn_id, task_id=self.request.id, loglevel=const.VLAB_INVENTORY_LOG_LEVEL.upper())
resp = {'content' : {}, 'error' : None, 'params' : {}}
logger.info('Task Starting')
try:
info = vmware.show_inventory(username)
except (FileNotFoundError, ValueError):
status = 404
resp['error'] = 'User {} has no inventory. HINT: Has the lab been initialized yet?'.format(username)
else:
resp['content'] = info
logger.info('Task Complete')
return resp
@app.task(name='inventory.delete', bind=True)
def delete(self, username, txn_id):
"""Destroy a user's inventory
:Returns: Dictionary
:param username: The owner of the inventory to delete
:type username: String
:param everything: Optionally destroy all the VMs associated with the user
:type everything: Boolean
:param txn_id: A unique string supplied by the client to track the call through logs
:type txn_id: String
"""
logger = get_task_logger(txn_id=txn_id, task_id=self.request.id, loglevel=const.VLAB_INVENTORY_LOG_LEVEL.upper())
resp = {'content' : {}, 'error' : None, 'params' : {}}
logger.info('Task Starting')
resp['error'] = vmware.delete_inventory(username, logger)
logger.info('Task Complete')
return resp
@app.task(name='inventory.create', bind=True)
def create(self, username, txn_id):
"""Make a folder for tacking a user's VM inventory
:Returns: Dictionary
:param username: The name of the user to create a folder for
:type username: String
:param txn_id: A unique string supplied by the client to track the call through logs
:type txn_id: String
"""
logger = get_task_logger(txn_id=txn_id, task_id=self.request.id, loglevel=const.VLAB_INVENTORY_LOG_LEVEL.upper())
resp = {'content' : {}, 'error' : None, 'params' : {}}
logger.info('Task Starting')
vmware.create_inventory(username)
logger.info('Task Complete')
return resp
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.