id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
77479 | <reponame>hajime9652/observations
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.grunfeld1 import grunfeld1
def test_grunfeld1():
"""Test module grunfeld1.py by downloading
grunfeld1.csv and testing shape of
extracted data has 200 rows and 5 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = grunfeld1(test_path)
try:
assert x_train.shape == (200, 5)
except:
shutil.rmtree(test_path)
raise()
| StarcoderdataPython |
48243 | <gh_stars>0
from yuntu.core.common.utils import loadMethod,loadMethodFromFile
import itertools
def loadTransform(transformDict):
if transformDict is not None:
if "path" in transformDict:
return loadMethodFromFile(transformDict["path"],transformDict["method"])
else:
return loadMethod(transformDict["method"])
else:
return None
def getCombinations(listArr):
return list(itertools.product(*listArr))
def filterExpr(group):
expr = ""
cols = list(group.keys())
val = group[cols[0]]
if isinstance(val,str):
val = "'"+val+"'"
expr = cols[0]+"=="+val
for i in range(1,len(cols)):
val = group[cols[i]]
if isinstance(val,str):
val = "'"+val+"'"
expr += " & "+cols[i]+"=="+str(val)
return ex
| StarcoderdataPython |
1768200 | #!/usr/bin/env python
import click
import pandas as pd
import numpy as np
from os import mkdir
from os.path import basename, join
from functools import partial
DATA_TYPES_NUMERIC = ('int', 'float')
FINAL_LIST = [
'study_id', 'host_scientific_name',
'latitude_deg', 'longitude_deg',
'envo_biome_3', 'empo_3',
'temperature_deg_c', 'ph', 'salinity_psu',
'oxygen_mg_per_l', 'nitrate_umol_per_l']
COLS_TO_IGNORE = [
# Sample - not relevant or too many values
'#SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'Description',
'host_subject_id', 'depth_m', 'elevation_m', 'altitude_m', 'country',
'collection_timestamp', 'sample_taxid',
# Study - not relevant or redundant with study_id
'title', 'principal_investigator', 'doi', 'ebi_accession',
# Prep
'target_gene', 'target_subfragment', 'pcr_primers', 'illumina_technology',
'extraction_center', 'run_center', 'run_date',
# Sequences
'read_length_bp', 'sequences_split_libraries',
'observations_closed_ref_greengenes', 'observations_closed_ref_silva',
'observations_open_ref_greengenes', 'observations_deblur_90bp',
'observations_deblur_100bp', 'observations_deblur_150bp',
# Subsets
'all_emp', 'qc_filtered', 'subset_10k', 'subset_5k', 'subset_2k',
# Sample type - redundant with empo_3 and envo_biome_3
'sample_scientific_name', 'envo_biome_0', 'envo_biome_1', 'envo_biome_2',
'envo_biome_4', 'envo_biome_5', 'empo_0', 'empo_1', 'empo_2', 'env_feature',
'env_material', 'env_biome',
# Alpha-diversity
'adiv_observed_otus', 'adiv_chao1', 'adiv_shannon', 'adiv_faith_pd',
# Nutrients - redundant with nitrate_umol_per_l
'phosphate_umol_per_l', 'ammonium_umol_per_l', 'sulfate_umol_per_l',
# Taxonomy - redundant with host_scientific_name
'host_superkingdom', 'host_kingdom', 'host_phylum', 'host_class',
'host_order', 'host_family', 'host_genus', 'host_species',
'host_common_name', 'host_taxid',
# Duplicated
'host_common_name_provided']
DATA_FIELDS = {
# Study
'study_id': 'categorical',
# Taxonomy
'sample_taxid': 'categorical', 'sample_scientific_name': 'categorical',
'host_taxid': 'categorical', 'host_common_name': 'categorical',
'host_scientific_name': 'categorical', 'host_superkingdom': 'categorical',
'host_kingdom': 'categorical', 'host_phylum': 'categorical',
'host_class': 'categorical', 'host_order': 'categorical',
'host_family': 'categorical', 'host_genus': 'categorical',
'host_species': 'categorical',
# Geography
'collection_timestamp': 'categorical', 'country': 'categorical',
'latitude_deg': 'float', 'longitude_deg': 'float', 'altitude_m': 'float',
'depth_m': 'float', 'elevation_m': 'float',
# Ontology
'env_biome': 'categorical', 'env_feature': 'categorical',
'env_material': 'categorical', 'envo_biome_0': 'categorical',
'envo_biome_1': 'categorical', 'envo_biome_2': 'categorical',
'envo_biome_3': 'categorical', 'envo_biome_4': 'categorical',
'envo_biome_5': 'categorical', 'empo_0': 'categorical',
'empo_1': 'categorical', 'empo_2': 'categorical', 'empo_3': 'categorical',
# Environment
'temperature_deg_c': 'float', 'ph': 'float', 'salinity_psu': 'float',
'oxygen_mg_per_l': 'float', 'phosphate_umol_per_l': 'float',
'ammonium_umol_per_l': 'float', 'nitrate_umol_per_l': 'float',
'sulfate_umol_per_l': 'float'}
@click.option('--mapping', type=click.File('rb'), help='mapping filepath')
@click.option('--output', type=click.Path(exists=False),
help='output filepath')
@click.command()
def servicio(mapping, output):
if mapping is None:
raise ValueError("You need to pass a mapping")
if output is None:
raise ValueError("You need to pass a output")
mkdir(output)
pj = partial(join, output)
mn = basename(mapping.name)
quartiles_fp = pj(mn + '.quartiles.tsv')
deciles_fp = pj(mn + '.deciles.tsv')
map_ = pd.read_csv(mapping, sep='\t', dtype=str)
map_.set_index('#SampleID', inplace=True)
# initial cleaning
# ignore all columns that are part of COLS_TO_IGNORE or alpha div
for column_name in map_.columns.values:
if column_name in set(COLS_TO_IGNORE) - set(FINAL_LIST):
map_[column_name] = np.nan
else:
dt = DATA_FIELDS[column_name]
# remove all columns with only NaN
map_.dropna(axis=1, how='all', inplace=True)
# generate our new DataFrames
quartiles = map_.copy(deep=True)
deciles = map_.copy(deep=True)
for column_name in FINAL_LIST:
dt = DATA_FIELDS[column_name]
if dt in DATA_TYPES_NUMERIC:
# we need to calculate the bins so we know that nothing else
# will fail
quartiles[column_name] = pd.to_numeric(
quartiles[column_name], errors='coerce')
deciles[column_name] = pd.to_numeric(
deciles[column_name], errors='coerce')
quartiles[column_name], qbins = pd.qcut(
quartiles[column_name], 4, labels=False, retbins=True,
duplicates='drop')
deciles[column_name], dbins = pd.qcut(
deciles[column_name], 10, labels=False, retbins=True,
duplicates='drop')
# confirm that we have the expected number of bins
if len(qbins) != 5:
quartiles[column_name] = np.nan
if len(dbins) != 11:
deciles[column_name] = np.nan
quartiles.dropna(axis=1, how='all', inplace=True)
deciles.dropna(axis=1, how='all', inplace=True)
for column_name in quartiles:
quartiles[column_name] = _clean_column(quartiles[column_name])
for column_name in deciles:
deciles[column_name] = _clean_column(deciles[column_name])
quartiles.dropna(axis=1, how='all', inplace=True)
deciles.dropna(axis=1, how='all', inplace=True)
quartiles.fillna('nan', inplace=True)
deciles.fillna('nan', inplace=True)
quartiles.to_csv(quartiles_fp, sep='\t')
deciles.to_csv(deciles_fp, sep='\t')
def _clean_column(column):
# let's check the size of the groups and discard anything that
# has less than 50 samples or represents under 0.03 of the size
counts = pd.DataFrame(
[column.value_counts(),
column.value_counts(normalize=True)],
index=['counts', 'perc']).T
check = (counts['counts'] < 50) | (counts['perc'] < 0.003)
replace = check.index[check]
if replace.size != 0:
column.replace({v: np.nan for v in replace}, inplace=True)
if column.nunique() == 1:
column = np.nan
return column
if __name__ == '__main__':
servicio()
| StarcoderdataPython |
143325 | import json
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
apikey = os.environ['apikey']
url = os.environ['url']
authenticator = IAMAuthenticator('apikey')
language_translator = LanguageTranslatorV3(
version='2018-05-01',
authenticator=authenticator
)
language_translator.set_service_url('https://api.us-south.language-translator.watson.cloud.ibm.com')
def english_to_french(english_text):
#This function translates English to French
translation = language_translator.translate(text=english_text, model_id="en-fr").get_result()
french_text=translation['translations'][0]['translation']
return french_text
def french_to_english(french_text):
#This function translates French to English
translation = language_translator.translate(text=french_text, model_id='fr-en').get_result()
english_text=translation['translations'][0]['translation']
return english_text
| StarcoderdataPython |
3241817 | import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import numpy as np
from app import app
from scripts.read_data import get_language
layout = dbc.Nav([
dbc.DropdownMenu(
[dbc.DropdownMenuItem(["English ", html.I(className='fa fa-language')], className="drop-items", id="english"),
# dbc.DropdownMenuItem(["Spanish ", html.I(className='fa fa-language')], className="drop-items", id="spanish"),
dbc.DropdownMenuItem(["French ", html.I(className='fa fa-language')], className="drop-items", id="french")],
label="Language", id='language', nav=True, className="ml-2", disabled=False,
),
dbc.NavItem(dbc.NavLink("About", id='about'), className="ml-2"),
dbc.Modal(
[
dbc.ModalHeader(id='about-title'),
dbc.ModalBody(id='about-body'),
dbc.ModalFooter(
dbc.Button("Close", id="about-close", className="ml-auto")
),
],
size="lg",
id="about-modal",
)
])
@app.callback(
Output("about-modal", "is_open"),
[Input("about", "n_clicks"),
Input("about-close", "n_clicks")],
[State("about-modal", "is_open")],
)
def toggle_popover(n_1, n_2, is_open):
if n_1 or n_2:
return not is_open
return is_open
@app.callback(
Output('current-language', 'data'),
[Input(language, 'n_clicks_timestamp') for language in ['english', 'french']],
[State('current-language', 'data')],
prevent_initial_call=True
)
def current_language(n1, n2, language):
language_list = ['english', 'french']
n_list = []
for n in [n1, n2]:
if n is None:
n_list.append(0)
else:
n_list.append(n)
if (n1 == n2) and language:
language = language
else:
language_index = np.array(n_list).argmax()
language = language_list[language_index]
return language
@app.callback(
[Output('about', 'children'),
Output('about-title', 'children'),
Output('about-body', 'children'),
Output('about-close', 'children'),
Output('language', 'label')],
[Input('current-language', 'modified_timestamp')],
[State('current-language', 'data')],
)
def update_language(ts, language):
if not language:
raise PreventUpdate
language_dic = get_language(language)
about_content = [html.P(language_dic['about']['body'][0]), html.P(language_dic['about']['body'][1]),
html.P(language_dic['about']['body'][2]), html.P(language_dic['about']['body'][3]),
dbc.Row([dbc.Col(html.A(html.Img(src='../assets/kth.png', style={'height': '130px'}),
href='https://www.energy.kth.se/energy-systems/about-the-division-of-energy-systems-1.937036'),
width=3),
dbc.Col(html.A(html.Img(src='../assets/sei.png', style={'height': '130px'}),
href='https://www.sei.org/'), width=4),
dbc.Col(html.A(html.Img(src='../assets/fao.png', style={'height': '130px'}),
href='http://www.fao.org/home/en/'), width=2)], justify="center")
]
return language_dic['about']['header'], \
language_dic['about']['title'], \
about_content, language_dic['about']['close'], \
language_dic['language']
| StarcoderdataPython |
4834280 | <reponame>urig/entropy<filename>entropylab/api/tests/test_plot.py
from entropylab.api.plot import CirclePlotGenerator
from plotly.graph_objects import Figure
def test_circle_plot_plotly():
target = CirclePlotGenerator()
figure = Figure()
data = [[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]]
target.plot_plotly(figure, data)
i = 0
| StarcoderdataPython |
1686151 | <gh_stars>0
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from django.conf import settings
from .models import Email
class EmailServer(object):
def __init__(self):
self.server = smtplib.SMTP('smtp.gmail.com', 587)
self.server.starttls()
self.server.login(settings.EMAIL_USERNAME, settings.EMAIL_PASSWORD)
def tear_down(self):
self.server.quit()
def send_email(self, recipient, message, subject):
msg = MIMEMultipart()
msg['From'] = settings.EMAIL_USERNAME
msg['To'] = recipient
msg['Subject'] = subject
body = message
msg.attach(MIMEText(body, 'plain'))
text = msg.as_string()
self.server.sendmail(settings.EMAIL_USERNAME, recipient, text)
def send_substitution_form_email(event, requestee, requester):
from .tasks import send_unsent_emails
msg = """
Hello, {}!
{} has requested a substitution for {} at {}. Please log on to DUMBDash to either accept or decline the substitution request.
Sincerely,
SG
""".format(
requestee.account.first_name,
requester.account.get_full_name(),
event.title,
event.time.strftime("%I:%M %p on %B %d, %Y"))
subject = "{} has requested a substitution".format(requester.account.get_full_name())
Email.objects.create(recipient=requestee.account.email, subject=subject, body=msg)
send_unsent_emails.apply_async(())
| StarcoderdataPython |
3326923 | <reponame>GArmane/python-fastapi-hex-todo<gh_stars>10-100
from faker.providers import BaseProvider
from faker import Faker
from passlib.hash import argon2
fake = Faker()
class PasswordHashProvider(BaseProvider):
def password_hash(self) -> str:
return str(argon2.hash(fake.pystr()))
| StarcoderdataPython |
1782226 | # robothon06
# rasterise the shape in glyph "A"
# and draw boxes in a new glyph named "A.silly"
from robofab.world import CurrentFont, CurrentGlyph
sourceGlyph = "a"
f = CurrentFont()
source = f[sourceGlyph]
# find out how big the shape is from the glyph.box attribute
xMin, yMin, xMax, yMax = source.box
# create a new glyph
dest = f.newGlyph(sourceGlyph+".silly")
dest.width = source.width
# get a pen to draw in the new glyph
myPen = dest.getPen()
# a function which draws a rectangle at a specified place
def drawRect(pen, x, y, size=50):
pen.moveTo((x-.5*size, y-.5*size))
pen.lineTo((x+.5*size, y-.5*size))
pen.lineTo((x+.5*size, y+.5*size))
pen.lineTo((x-.5*size, y+.5*size))
pen.closePath()
# the size of the raster unit
resolution = 30
# draw from top to bottom
yValues = range(yMin, yMax, resolution)
yValues.reverse()
# go for it!
for y in yValues:
for x in range(xMin, xMax, resolution):
# check the source glyph is white or black at x,y
if source.pointInside((x, y)):
drawRect(myPen, x, y, resolution-5)
# update for each line if you like the animation
# otherwise move the update() out of the loop
dest.update()
| StarcoderdataPython |
125866 | # -*- coding: utf-8 -*-
from dataclasses import dataclass
from pprint import pprint
from serpyco import Serializer
@dataclass
class Point(object):
x: float
y: float
serializer = Serializer(Point)
pprint(serializer.json_schema())
pprint(serializer.load({"x": 3.14, "y": 1.5}))
try:
serializer.load({"x": 3.14, "y": "wrong"})
except Exception as ex:
pprint(ex)
pprint(serializer.dump(Point(x=3.14, y=1.5)))
try:
serializer.dump(Point(x=3.14, y="wrong"), validate=True)
except Exception as ex:
pprint(ex)
| StarcoderdataPython |
1711301 | <reponame>the-fridge/Python_Projects<gh_stars>1-10
''' You need to install geopy first using
pip3 install geopy
'''
from geopy.geocoders import Nominatim
# This project gives you the location of the city you
# entered along with its latitude and longitude
'''
For this program to work an internet connection is required
as Nominatim fetches the location from its server.
'''
city = input("Enter City: ")
location = Nominatim().geocode(city) # Fetches the location
print(location)
print("Latitude : %s" % location.latitude)
print("Longitude : %s" % location.longitude)
| StarcoderdataPython |
127660 | import RPi.GPIO as GPIO
import time
import thread
redled = 17 #Red LED connected to G17
redbtn = 16 # red button connected G16
GPIO.setmode(GPIO.BCM) # function to set up the LEDs
GPIO.setup(redled, GPIO.OUT, initial = GPIO.LOW) #HIGH=1 LOW=0
GPIO.setup(redbtn, GPIO.IN, pull_up_down = GPIO.PUD_DOWN) #HIGH=1 LOW=0
def blinked():
for i in range (1, 100):
GPIO.output(redled, GPIO.HIGH) # turn on led
time.sleep(0.10)
GPIO.output(redled, GPIO.LOW) # turn off led
time.sleep(0.10)
def btnPressed(channel):
thread.start_new_thread(blinked,())
GPIO.add_event_detect(redbtn, GPIO.RISING, callback = btnPressed, bouncetime = 300)
while True:
time.sleep(10)
pass
| StarcoderdataPython |
1691123 | <gh_stars>1-10
import threading
NUM_THREAD = 10
printed = False
def print_text():
print ("printed once")
threads = []
for i in range (NUM_THREAD):
t = threading.Thread (target=print_text)
threads.append(t)
t.start()
for i in range (NUM_THREAD):
threads[i].join()
| StarcoderdataPython |
1692520 | from django.db import models
from django.utils import timezone
# Create your models here.
class City(models.Model):
name = models.CharField(max_length=25)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'cities'
class Data(models.Model):
name = f'{str(timezone.now()).replace(" ", "_")}_export.json'
export_name = models.CharField(max_length=int(len(name) + 5), default=name)
span_int = models.IntegerField(name="span_int", default=7)
wind_direction = models.BooleanField(name='wind_direction', default=False)
avg_wind_speed = models.BooleanField(name="avg_wind_speed", default=False)
wind_gust = models.BooleanField(name="wind_gust", default=False)
rainfall = models.BooleanField(name="rainfall", default=False)
humidity = models.BooleanField(name="humidity", default=False)
ambient_temp = models.BooleanField(name="ambient_temp", default=False)
ground_temp = models.BooleanField(name="ground_temp", default=False)
pressure = models.BooleanField(name="pressure", default=False)
timestamps = models.BooleanField(name="timestamps", default=False)
def export(self):
pass
def __str__(self):
return self.export_name, self.span_int, self.wind_direction, self.avg_wind_speed, self.wind_gust, self.rainfall, \
self.humidity, self.ambient_temp, self.ground_temp, self.pressure, self.timestamps
class Config(models.Model):
def __str__(self):
pass
| StarcoderdataPython |
62749 | <filename>app/models/__init__.py
from .base import Base
from .user import User
from .todolist import List
from .card import Card
| StarcoderdataPython |
110879 | from typing import Any, Union
from unittest.mock import Mock
import pystac
class MockStacIO(pystac.StacIO):
"""Creates a mock that records StacIO calls for testing and allows
clients to replace StacIO functionality, all within a context scope.
"""
def __init__(self) -> None:
self.mock = Mock()
def read_text(
self, source: Union[str, pystac.Link], *args: Any, **kwargs: Any
) -> str:
self.mock.read_text(source)
return pystac.StacIO.default().read_text(source)
def write_text(
self, dest: Union[str, pystac.Link], txt: str, *args: Any, **kwargs: Any
) -> None:
self.mock.write_text(dest, txt)
pystac.StacIO.default().write_text(dest, txt)
| StarcoderdataPython |
85647 | <gh_stars>0
# From https://stackoverflow.com/a/49375740/827927
# import os, sys
# sys.path.append(os.path.dirname(os.path.realpath(__file__)))
| StarcoderdataPython |
47683 | '''
.. moduleauthor:: <NAME> / estani
This module manages the abstraction of a user providing thus all information about him/her that
might be required anywhere else.
'''
import pwd
import os
import sys
from ConfigParser import SafeConfigParser as Config
from evaluation_system.misc import config, utils
from evaluation_system.model.db import UserDB
class User(object):
'''
This Class encapsulates a user (configurations, etc).
'''
CONFIG_DIR = 'config'
"The directory name where all plug-in/system configurations will be stored."
CACHE_DIR = 'cache'
"The temporary directory where plug-ins can store files while performing some computation."
OUTPUT_DIR = 'output'
"The directory where output files are stored. Intended for files containing data and thus taking much space."
PLOTS_DIR = 'plots'
"""The directory where just plots are stored. Plots are assumed to be much smaller in size than data and might
therefore live longer"""
PROCESSES_DIR = 'processes'
"The directory might handle information required for each running process."
EVAL_SYS_CONFIG = os.path.join(CONFIG_DIR,'evaluation_system.config')
"""The file containing a central configuration for the whole system (user-wise)"""
EVAL_SYS_DEFAULT_CONFIG = os.path.normpath(os.path.dirname(sys.modules[__name__].__file__)+'/../../etc/system_default.config')
"""The central default configuration file for all users. It should not be confused with the system configuration
file that is handled by :class:`evaluation_system.api.config`."""
def __init__(self, uid = None, email = None):
'''Creates a user object for the provided id. If no id is given, a user object for
the current user, i.e. the one that started the application, is created instead.
:type uid: int
:param uid: user id in the local system, if not provided the current user is used.
:type email: str
:param email: user's email address
'''
self._dir_type = config.get(config.DIRECTORY_STRUCTURE_TYPE)
if uid is None:
uid = os.getuid()
self._userdata = None
if isinstance(uid, basestring):
self._userdata = pwd.getpwnam(uid)
else:
self._userdata = pwd.getpwuid(uid)
if self._userdata is None:
raise Exception("Cannot find user %s" % uid)
if email is None:
self._email = ''
else:
self._email = email
self._userconfig = Config()
#try to load teh configuration from the very first time.
self._userconfig.read([User.EVAL_SYS_DEFAULT_CONFIG, os.path.join(self._userdata.pw_dir, User.EVAL_SYS_CONFIG)])
self._db = UserDB(self)
row_id = self._db.getUserId(self.getName())
if row_id:
try:
self._db.updateUserLogin(row_id, email)
except:
raise
pass
else:
self._db.createUser(self.getName(), email=self._email)
#-------------------------- self._meta = metadict(compact_creation=True,
#--------------------------------- USER_BASE_DIR=)
# """Expand the user specific values in the given string. Those values might be one of:
# $USER_BASE_DIR := central directory for this user in the evaluation system.
# $USER_OUTPUT_DIR := directory where the output data for this user is stored.
#------ $USER_PLOT_DIR := directory where the plots for this user is stored.
# $USER_CACHE_DIR := directory where the cached data for this user is stored."""
def __str__(self):
return "<User (username:%s, info:%s)>" % (self._userdata[0], str(self._userdata[2:]))
def getUserConfig(self):
""":returns: the user configuration object :py:class:`ConfigParser.SafeConfigParser`"""
return self._userconfig
def getUserDB(self):
""":returns: the db abstraction for this user.
:rtype: :class:`evaluation_system.model.db.UserDB`"""
return self._db
def reloadConfig(self):
"""Reloads user central configuration from disk (not the plug-in related one)."""
self._userconfig = Config()
self._userconfig.read([User.EVAL_SYS_DEFAULT_CONFIG, os.path.join(self.getUserBaseDir(), User.EVAL_SYS_CONFIG)])
return self._userconfig
def writeConfig(self):
"""Writes the user central configuration to disk according to :class:`EVAL_SYS_CONFIG`"""
fp = open(os.path.join(self.getUserBaseDir(), User.EVAL_SYS_CONFIG), 'w')
self._userconfig.write(fp)
fp.close()
def getName(self):
""":returns: the user name
:rtype: str"""
return self._userdata.pw_name
def getEmail(self):
"""
:returns: user's email address. Maybe None. :rtype: str
"""
return self._email
def getUserID(self):
""":returns: the user id.
:rtype: int"""
return self._userdata.pw_uid
def getUserHome(self):
""":returns: the path to the user home directory.
:rtype: str"""
return self._userdata.pw_dir
def getUserScratch(self):
""":returns: the path to the user's scratch directory.
:rtype: str"""
path = config.get(config.SCRATCH_DIR)
path = path.replace('$USER', self.getName())
return path
def _getUserBaseDir(self):
if self._dir_type == config.DIRECTORY_STRUCTURE.LOCAL:
return os.path.join(self.getUserHome(), config.get(config.BASE_DIR))
elif self._dir_type == config.DIRECTORY_STRUCTURE.CENTRAL:
return os.path.join(config.get(config.BASE_DIR_LOCATION), config.get(config.BASE_DIR), str(self.getName()))
elif self._dir_type == config.DIRECTORY_STRUCTURE.SCRATCH:
return os.path.join(config.get(config.BASE_DIR_LOCATION), str(self.getName()), config.get(config.BASE_DIR))
def _getUserDir(self, dir_type, tool = None, create=False):
base_dir = dict(base='', config=User.CONFIG_DIR, cache=User.CACHE_DIR, output=User.OUTPUT_DIR, \
plots=User.PLOTS_DIR, processes=User.PROCESSES_DIR, \
scheduler_in=config.get(config.SCHEDULER_INPUT_DIR), \
scheduler_out=config.get(config.SCHEDULER_OUTPUT_DIR))
if tool is None:
bd = base_dir[dir_type]
# concatenate relative paths only
if bd and bd[0]=='/':
dir_name = bd
else:
#return the directory where the tool configuration files are stored
dir_name = os.path.join(self._getUserBaseDir(), bd)
else:
#It's too confusing if we create case sensitive directories...
tool = tool.lower()
#return the specific directory for the given tool
dir_name = os.path.join(self._getUserBaseDir(), base_dir[dir_type], tool)
#make sure we have a canonical path
dir_name = os.path.abspath(dir_name)
if create and not os.path.isdir(dir_name):
#we are letting this fail in case of problems.
utils.supermakedirs(dir_name, 0755)
return dir_name
def getUserBaseDir(self, **kwargs):
"""Returns path to where this system is managing this user data.
:param kwargs: ``create`` := If ``True`` assure the directory exists after the call is done.
:returns: (str) path"""
return self._getUserDir('base', **kwargs)
def getUserSchedulerInputDir(self, **kwargs):
"""Returns path to where this system is managing this user data.
:param kwargs: ``create`` := If ``True`` assure the directory exists after the call is done.
:returns: (str) path"""
return self._getUserDir('scheduler_in', **kwargs)
def getUserSchedulerOutputDir(self, **kwargs):
"""Returns path to where this system is managing this user data.
:param kwargs: ``create`` := If ``True`` assure the directory exists after the call is done.
:returns: (str) path"""
return self._getUserDir('scheduler_out', **kwargs)
def getUserToolConfig(self, tool, **kwargs):
"""Returns the path to the configuration file.
:param kwargs: ``create`` := If ``True`` assure the underlaying directory exists after the call is done.
:param tool: tool/plug-in for which the information is returned.
:type tool: str
:returns: path to the configuration file."""
config_dir = self._getUserDir('config', tool, **kwargs)
return os.path.join(config_dir,'%s.conf' % tool)
def getUserConfigDir(self, tool = None, **kwargs):
"""Return the path to the directory where all configurations for this user are stored.
:param kwargs: ``create`` := If ``True`` assure the directory exists after the call is done.
:param tool: tool/plug-in for which the information is returned. If None, then the directory
where all information for all tools reside is returned insted (normally, that would
be the parent directrory).
:type tool: str
:returns: path to the directory."""
return self._getUserDir('config', tool, **kwargs)
def getUserCacheDir(self, tool = None, **kwargs):
"""Return directory where cache files for this user (might not be "only" for this user though).
:param kwargs: ``create`` := If ``True`` assure the directory exists after the call is done.
:param tool: tool/plug-in for which the information is returned. If None, then the directory
where all information for all tools reside is returned insted (normally, that would
be the parent directrory).
:type tool: str
:returns: path to the directory."""
return self._getUserDir('cache', tool, **kwargs)
def getUserProcessDir(self, tool = None, **kwargs):
"""Return directory where files required for processes can be held. Is not clear what this will
be used for, but it should at least serve as a possibility for the future.
:param kwargs: ``create`` := If ``True`` assure the directory exists after the call is done.
:param tool: tool/plug-in for which the information is returned. If None, then the directory
where all information for all tools reside is returned insted (normally, that would
be the parent directrory).
:type tool: str
:returns: path to the directory."""
return self._getUserDir('processes', tool, **kwargs)
def getUserOutputDir(self, tool = None, **kwargs):
"""Return directory where output data for this user is stored.
:param kwargs: ``create`` := If ``True`` assure the directory exists after the call is done.
:param tool: tool/plug-in for which the information is returned. If None, then the directory
where all information for all tools reside is returned insted (normally, that would
be the parent directrory).
:type tool: str
:returns: path to the directory."""
return self._getUserDir('output', tool, **kwargs)
def getUserPlotsDir(self, tool = None, **kwargs):
"""Return directory where all plots for this user are stored.
:param kwargs: ``create`` := If ``True`` assure the directory exists after the call is done.
:param tool: tool/plug-in for which the information is returned. If None, then the directory
where all information for all tools reside is returned insted (normally, that would
be the parent directrory).
:type tool: str
:returns: path to the directory."""
return self._getUserDir('plots', tool, **kwargs)
def prepareDir(self):
"""Prepares the configuration directory for this user if it's not already been done."""
if os.path.isdir(self.getUserBaseDir()):
#we assume preparation was successful... but we might to be sure though...
#return
pass
if not os.path.isdir(self.getUserHome()):
raise Exception("Can't create configuration, user HOME doesn't exist (%s)" % self.getUserHome())
#create directory for the framework
#create all required subdirectories
dir_creators = [self.getUserBaseDir,
self.getUserConfigDir,
self.getUserCacheDir,
self.getUserOutputDir,
self.getUserPlotsDir,
self.getUserSchedulerInputDir,
self.getUserSchedulerOutputDir,]
for f in dir_creators:
f(create=True)
| StarcoderdataPython |
135046 | import random
from datetime import datetime
random.seed(datetime.now())
class SoS(object):
def __init__(self, CSs, environment):
self.CSs = CSs
self.environment = environment
pass
def run(self, tick):
logs = []
random.shuffle(self.CSs)
for CS in self.CSs:
result = CS.act(tick, self.environment)
if result:
logs.append(result)
logs.append(str(self.environment))
return logs
def reset(self):
for CS in self.CSs:
CS.reset()
self.resetEnvironment()
def resetEnvironment(self):
for i in range(len(self.environment)):
self.environment[i] = 0
| StarcoderdataPython |
149331 | # Generated by Django 3.2.8 on 2021-10-25 01:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('itinerary', '0007_auto_20211025_1153'),
]
operations = [
migrations.AlterField(
model_name='activity',
name='cost',
field=models.CharField(choices=[('HI', 'High'), ('ME', 'Medium'), ('LO', 'Low')], max_length=2),
),
]
| StarcoderdataPython |
3365294 | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.http import Http404
from django.views.decorators.csrf import ensure_csrf_cookie
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, redirect, HttpResponse, get_object_or_404
from django.template import RequestContext
from django.template.loader import render_to_string
from django.contrib.auth.decorators import login_required
from home.models import POST, Category, IMAGE_STORE
from datetime import datetime
from django.core.paginator import Paginator, PageNotAnInteger
from google.appengine.api import memcache
import json
from django.views.decorators.cache import cache_page
from django.db.models import Q
from home.function import MultiCookie
from google.appengine.ext import blobstore
from google.appengine.api import images
import cgi
now = datetime.now()
@cache_page(60 * 5)
def index(request, views=None):
posts_list = None
views_most = False
comment_most = False
lang = request.LANGUAGE_CODE
if views is not None and views == "xem-nhieu":
views_most = True
posts_list = memcache.get('post-views')
if posts_list is None:
posts_list = POST.objects.all().order_by('-views')
memcache.set('post-views', list(posts_list), 300)
elif views is not None and views == "binh-luan-nhieu":
comment_most = True
posts_list = memcache.get('post-comments')
if posts_list is None:
posts_list = POST.objects.all().order_by('-comments')
memcache.set('post-comments', list(posts_list), 300)
else:
posts_list = memcache.get('post-trang-chu')
if posts_list is None:
posts_list = POST.objects.all().order_by('-date')
memcache.set('post-trang-chu', list(posts_list), 300)
paginator = Paginator(posts_list, 5)
posts = paginator.page(1)
categories = memcache.get('categories-'+lang)
if categories is None:
categories = Category.objects.all().order_by('order')
memcache.set('categories-'+lang, list(categories), 300)
redirect = "/"
if views_most == True:
redirect = set_redirect(lang,"xem_nhieu")
elif comment_most == True:
redirect = set_redirect(lang,"binh_luan_nhieu")
return render_to_response('home/index.html', {"redirect":redirect, "posts":posts, "categories":categories, "lang":lang}, context_instance=RequestContext(request))
def set_redirect(lang="vi", type_redirect_page=None, post=None, category=None):
redirect = "/"
if type_redirect_page == "xem_nhieu" and lang == "vi":
return "/most-view/"
elif type_redirect_page == "xem_nhieu" and lang == "en":
return "/xem-nhieu/"
elif type_redirect_page == "binh_luan_nhieu" and lang == "en":
return "/binh-luan-nhieu/"
elif type_redirect_page == "binh_luan_nhieu" and lang == "vi":
return "/most-comment/"
elif type_redirect_page == "detail" and lang == "en":
return "/"+post.category.slug+"/"+post.slug+"/"
elif type_redirect_page == "detail" and lang == "vi":
return "/"+post.category.slug_en+"/"+post.slug_en+"/"
elif type_redirect_page == "category" and lang == "en":
return "/"+category.slug+"/"
elif type_redirect_page == "category" and lang == "vi":
return "/"+category.slug_en+"/"
else:
return redirect
def get_posts(request):
if request.method == 'POST':
category = None
page = request.POST.get('page')
if "category" in request.POST:
category = request.POST["category"]
if request.LANGUAGE_CODE == 'vi':
cate= get_object_or_404(Category,slug=category)
else:
cate= get_object_or_404(Category,slug_en=category)
posts_list = memcache.get('categories-%s' % category)
if posts_list is None:
posts_list = POST.objects.filter(category=cate).order_by('-date')
memcache.set('categories-%s' % category, list(posts_list), 300)
else:
posts_list = memcache.get('post-trang-chu')
if posts_list is None:
posts_list = POST.objects.all().order_by('-date')
paginator = Paginator(posts_list, 5)
try:
posts = paginator.page(page)
except PageNotAnInteger:
return HttpResponse(status=400)
data = {"posts":posts, "lang":request.LANGUAGE_CODE}
if category is not None:
data["cate_current"] = category
html = render_to_string("post/post_ajax.html", data)
serialized_data = json.dumps({"html": html})
return HttpResponse(serialized_data, mimetype='application/json')
return HttpResponse(status=400)
def get_posts_detail_more(request):
if request.method == 'POST':
page = request.POST.get('page')
typeGet = request.POST.get('type')
category = request.POST["category"]
cate= get_object_or_404(Category,slug=category)
oldcookie = MultiCookie(cookie=request.COOKIES.get('viewed_post'))
list_viewed = oldcookie.values
if list_viewed is None:
list_viewed = []
if "viewed" == typeGet:
posts_list = POST.objects.filter(pk__in=list_viewed,category=cate).order_by('-date')
else:
posts_list = POST.objects.filter(~Q(pk__in=list_viewed),category=cate).order_by('-date')
paginator = Paginator(posts_list, 6)
try:
posts = paginator.page(page)
except PageNotAnInteger:
return HttpResponse(status=400)
data = {"posts":posts, "type":typeGet, "lang":request.LANGUAGE_CODE}
if category is not None:
data["cate_current"] = category
html = render_to_string("post/more_post_detail.html", data)
serialized_data = json.dumps({"html": html})
return HttpResponse(serialized_data, mimetype='application/json')
return HttpResponse(status=400)
@cache_page(60 * 4)
def detail_post(request, category=None, slug=None):
lang = request.LANGUAGE_CODE
if lang == 'vi':
try:
post = POST.objects.get(slug_en=slug)
except:
post = get_object_or_404(POST, slug=slug)
else:
try:
post = POST.objects.get(slug=slug)
except:
post = get_object_or_404(POST, slug_en=slug)
post.updateView()
oldcookie = MultiCookie(cookie=request.COOKIES.get('viewed_post'))
list_viewed = oldcookie.values
if list_viewed is None:
list_viewed = [post.id]
else:
if exits_in_array(list_viewed, post.id) == False:
list_viewed.append(post.id)
categories = Category.objects.all().order_by('order')
redirect = set_redirect(lang, "detail", post)
response = render_to_response('home/detail.html', {"redirect":redirect,"post":post,"categories":categories, "lang":lang}, context_instance=RequestContext(request))
newcookie = MultiCookie(values=list_viewed)
response.set_cookie('viewed_post',value=newcookie)
return response
@cache_page(60 * 15)
def category(request, category=None):
lang = request.LANGUAGE_CODE
if lang == 'vi':
try:
cate= Category.objects.get(slug_en=category)
except:
cate= get_object_or_404(Category,slug=category)
else:
try:
cate= Category.objects.get(slug=category)
except:
cate= get_object_or_404(Category,slug_en=category)
posts_list = memcache.get(category)
if posts_list is None:
posts_list = POST.objects.filter(category=cate).order_by('-date')
memcache.set(category, list(posts_list), 300)
paginator = Paginator(posts_list, 5)
posts = paginator.page(1)
categories = Category.objects.all().order_by('order')
redirect = set_redirect(lang, "category", None, cate)
return render_to_response('home/index.html', {"redirect": redirect ,"posts":posts,"categories":categories, "cate_current":cate,"lang":request.LANGUAGE_CODE}, context_instance=RequestContext(request))
def get_array_field(dict_list, field):
arr_return = []
for item in dict_list:
arr_return.append(getattr(item, field))
return arr_return
def exits_in_array(dict_list, ele):
for item in dict_list:
if item == ele:
return True
return False
def category_post_relative(request, category=None):
post=request.GET["post"]
oldcookie = MultiCookie(cookie=request.COOKIES.get('viewed_post'))
list_viewed = oldcookie.values
if list_viewed is None:
list_viewed = []
if request.LANGUAGE_CODE == "vi":
cate= get_object_or_404(Category,slug=category)
else:
cate= get_object_or_404(Category,slug_en=category)
posts_list_not_view = POST.objects.filter(~Q(pk__in=list_viewed),category=cate).order_by('-date')
posts_list__viewed = POST.objects.filter(pk__in=list_viewed,category=cate).order_by('-date')
paginator = Paginator(posts_list_not_view, 6)
posts_not_view = paginator.page(1)
paginator_viewed = Paginator(posts_list__viewed, 6)
posts_viewed = paginator_viewed.page(1)
data = {"posts_not_view":posts_not_view, "posts_viewed":posts_viewed, "cate_current":category, "lang":request.LANGUAGE_CODE}
html = render_to_string("post/post_relative_ajax.html", data)
serialized_data = json.dumps({"html": html})
return HttpResponse(serialized_data, mimetype='application/json')
@login_required
def upload_image(request):
upload_files = get_uploads(request, field_name='file', populate_post=True) # 'file' is file upload field in the form
blob_info = upload_files[0]
image = IMAGE_STORE()
image.blob_key = blob_info.key()
image.created_date = blob_info.creation
image.size = blob_info.size
image.file_name = blob_info.filename
image.save()
return redirect(request.POST["redirect"])
def get_uploads(request, field_name=None, populate_post=False):
"""Get uploads sent to this handler.
Args:
field_name: Only select uploads that were sent as a specific field.
populate_post: Add the non blob fields to request.POST
Returns:
A list of BlobInfo records corresponding to each upload.
Empty list if there are no blob-info records for field_name.
"""
if hasattr(request,'__uploads') == False:
request.META['wsgi.input'].seek(0)
fields = cgi.FieldStorage(request.META['wsgi.input'], environ=request.META)
request.__uploads = {}
if populate_post:
request.POST = {}
for key in fields.keys():
field = fields[key]
if isinstance(field, cgi.FieldStorage) and 'blob-key' in field.type_options:
request.__uploads.setdefault(key, []).append(blobstore.parse_blob_info(field))
elif populate_post:
request.POST[key] = field.value
if field_name:
try:
return list(request.__uploads[field_name])
except KeyError:
return []
else:
results = []
for uploads in request.__uploads.itervalues():
results += uploads
return results
@login_required
def get_images(request):
images_list = IMAGE_STORE.objects.all().order_by('-created_date')
paginator = Paginator(images_list, 6)
imagesPage = paginator.page(1)
urls = []
for blob in imagesPage:
urls.append(images.get_serving_url(blob.blob_key))
data = {"urls" : urls, "images":imagesPage}
html = render_to_string("image/image_ajax.html", data)
serialized_data = json.dumps({"html": html})
return HttpResponse(serialized_data, mimetype='application/json')
@login_required
def get_images_more(request):
if request.method == 'POST':
page = request.POST.get('page')
images_list = IMAGE_STORE.objects.all().order_by('-created_date')
paginator = Paginator(images_list, 6)
try:
imagesPage = paginator.page(page)
except PageNotAnInteger:
return HttpResponse(status=400)
urls = []
for blob in imagesPage:
urls.append(images.get_serving_url(blob.blob_key))
data = {"urls" : urls, "images":imagesPage}
html = render_to_string("image/image_ajax.html", data)
serialized_data = json.dumps({"html": html})
return HttpResponse(serialized_data, mimetype='application/json')
return HttpResponse(status=400)
def commented(request):
if request.method == "POST":
post = get_object_or_404(POST, pk=request.POST["p"])
if "type" in request.POST:
post.updateComment("removed")
else:
post.updateComment()
return HttpResponse(status=200)
return HttpResponse(status=400)
def liked(request):
if request.method == "POST":
post = get_object_or_404(POST, pk=request.POST["p"])
if "type" in request.POST:
post.updateLike("unliked")
else:
post.updateLike()
return HttpResponse(status=200)
return HttpResponse(status=400) | StarcoderdataPython |
1732536 | <reponame>ZhichengHuang/Food-Project
import torch
import os
from collections import Counter
class feature_lib:
def __init__(self,cfg):
self.feature_lib_path= cfg.FEATURELIB.PATH
self.lib = self.load_lib()
# size 2048*n
self.lib_feature = self.lib['feature']
self.lib_label = self.lib['labels']
# 白米饭,黑米饭,小碗汤,西瓜,红薯,玉米,馒头,粥,花卷,酸奶,小菜
self.common_labels=cfg.FEATURELIB.COMMONLABEL
def save_lib(self):
lib={
"feature":self.lib_feature,
"labels": self.lib_label
}
torch.save(lib,self.feature_lib_path)
def fix_lib(self):
self.today_label=self.lib_label
self.today_lib = self.lib_feature
def load_lib(self):
if os.path.exists(self.feature_lib_path):
lib = torch.load(self.feature_lib_path)
return lib
else:
lib={"feature":None,
"labels":[]}
return lib
def get_today_lib(self,labels):
try:
labels.extend(self.common_labels)
today_feature_list=[]
today_labels=[]
for index,la in enumerate(self.lib_label):
if la in labels:
f = self.lib_feature[:,index]
today_feature_list.append(f)
today_labels.append(la)
self.today_lib = torch.stack(today_feature_list,dim=1)
self.today_label = today_labels
except Exception:
return 0
else:
return 1
def update_lib(self,feature,label):
print("feature=",feature.size())
if self.lib_feature is None:
self.lib_feature = feature
self.lib_label.append(label)
else:
self.lib_feature = torch.cat([self.lib_feature,feature],dim=1)
self.lib_label.append(label)
def get_label(self,in_feature,th=0.85):
"""
return : -1 stands for the unrecognition dishes
"""
sim = torch.matmul(in_feature,self.today_lib)
mask = sim>th
mask_list = mask.tolist()
result_list=[]
for mask_item in mask_list:
out_label = [label for label,index in zip( self.today_label,mask_item) if index==1]
if len(out_label)>0:
out = Counter(out_label).most_common(1)[0][0]
result_list.append(out)
else:
result_list.append(0)
return result_list
| StarcoderdataPython |
60252 | <reponame>daumann/chronas-application
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls.i18n import i18n_patterns
from django.conf.urls import patterns, url, include
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
from django.views.decorators.cache import cache_page
from leaflet_storage.views import MapShortUrl
from . import views
admin.autodiscover()
urlpatterns = patterns(
'',
(r'^app/admin/', include(admin.site.urls)),
url('', include('social.apps.django_app.urls', namespace='social')),
url(r'^app/m/(?P<pk>\d+)/$', MapShortUrl.as_view(), name='umap_short_url'),
url(r'^app/ajax-proxy/$', cache_page(180)(views.ajax_proxy), name='ajax-proxy'),
)
urlpatterns += i18n_patterns(
'',
url(r'^app/$', views.home, name="home"),
url(r'^app/showcase/$', cache_page(24 * 60 * 60)(views.showcase), name='maps_showcase'),
url(r'^app/search/$', views.search, name="search"),
url(r'^app/about/$', views.about, name="about"),
url(r'^app/user/(?P<username>[-_\w@]+)/$', views.user_maps, name='user_maps'),
(r'', include('leaflet_storage.urls')),
)
if settings.DEBUG and settings.MEDIA_ROOT:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += staticfiles_urlpatterns()
| StarcoderdataPython |
1635139 | from challenge import hint, enc
from Crypto.Cipher import AES
from Crypto.Util.Padding import unpad
import hashlib
iv = bytes.fromhex(enc['iv'])
enc = bytes.fromhex(enc['enc'])
key = ...
sha1 = hashlib.sha1()
sha1.update(str(key).encode('ascii'))
aes_key = sha1.digest()[:16]
cipher = AES.new(aes_key, AES.MODE_CBC, iv)
flag = unpad(cipher.decrypt(enc), 16)
print(flag) | StarcoderdataPython |
1718559 | <reponame>mrcrnkovich/stupidb
"""Top-level package for stupidb."""
import importlib.metadata
from stupidb.aggregation import Window # noqa: F401
from stupidb.api import * # noqa: F401,F403
__version__ = importlib.metadata.version(__name__)
del importlib
| StarcoderdataPython |
83375 | with open('students_log.txt', 'r', encoding='utf-8') as f:
for row in f.read().splitlines():
last_name, first_name, patronymic, row_marks = row.split(maxsplit=3)
patronymic = patronymic.strip(',')
# marks = list(map(int, map(str.strip, row_marks.split(','))))
# ' 5' -> map str.strip(' 5') -> '5' -> map int('5') -> 5
# map(callback_1, iterable)
# map(callback_2, map(callback_1, iterable))
# map(callback_3, map(callback_2, map(callback_1, src_iterable)))
marks = []
for mark in row_marks.split(','):
marks.append(int(mark.strip()))
avg_mark = sum(marks) / len(marks)
print(last_name, first_name, patronymic, ':', marks, ', средний балл', avg_mark)
| StarcoderdataPython |
3245821 | <reponame>rakati/ppci-mirror
import unittest
from test_asm import AsmTestCaseBase
class Sse1TestCase(AsmTestCaseBase):
""" Checks sse1 instructions """
march = 'x86_64'
def test_movss(self):
""" Test move scalar single-fp values """
self.feed('movss xmm4, xmm6')
self.feed('movss xmm3, [rax, 10]')
self.feed('movss [rax, 10], xmm9')
self.check('f30f10e6 f30f10580a f3440f11480a')
def test_addss(self):
""" Test add scalar single-fp values """
self.feed('addss xmm13, xmm9')
self.feed('addss xmm6, [r11, 1000]')
self.check('f3450f58e9 f3410f58b3e8030000')
def test_subss(self):
""" Test substract scalar single-fp values """
self.feed('subss xmm11, xmm5')
self.feed('subss xmm1, [rax, 332]')
self.check('f3440f5cdd f30f5c884c010000')
def test_mulss(self):
""" Test multiply scalar single-fp values """
self.feed('mulss xmm14, xmm2')
self.feed('mulss xmm8, [rsi, 3]')
self.check('f3440f59f2 f3440f594603')
def test_divss(self):
""" Test divide scalar single-fp values """
self.feed('divss xmm7, xmm13')
self.feed('divss xmm6, [rax, 55]')
self.check('f3410f5efd f30f5e7037')
def test_cvtsi2ss(self):
""" Test cvtsi2ss """
self.feed('cvtsi2ss xmm7, rdx')
self.feed('cvtsi2ss xmm3, [rbp, 13]')
self.check('f3480f2afa f3480f2a5d0d')
def test_comiss(self):
""" Test compare single scalar """
self.feed('comiss xmm2, [rcx]')
self.check('0f2f11')
def test_ucomiss(self):
""" Test unordered compare single scalar """
self.feed('ucomiss xmm0, xmm1')
self.check('0f2ec1')
class Sse2TestCase(AsmTestCaseBase):
""" Checks sse2 instructions """
march = 'x86_64'
def test_movsd(self):
""" Test move float64 values """
self.feed('movsd xmm4, xmm6')
self.feed('movsd xmm3, [rax, 10]')
self.feed('movsd [rax, 18], xmm9')
self.check('f20f10e6 f20f10580a f2440f114812')
def test_addsd(self):
""" Test add scalar float64 """
self.feed('addsd xmm14, xmm14')
self.feed('addsd xmm14, [100]')
self.check('f2450f58f6 f2440f58342564000000')
def test_subsd(self):
""" Test substract scalar float64 """
self.feed('subsd xmm9, xmm2')
self.feed('subsd xmm4, [r14]')
self.check('f2440f5cca f2410f5c26')
def test_mulsd(self):
""" Test multiply scalar float64 """
self.feed('mulsd xmm6, xmm12')
self.feed('mulsd xmm5, [rcx, 99]')
self.check('f2410f59f4 f20f596963')
def test_divsd(self):
""" Test divide scalar float64 """
self.feed('divsd xmm3, xmm1')
self.feed('divsd xmm2, [r9]')
self.check('f20f5ed9 f2410f5e11')
def test_cvtsd2si(self):
""" Test convert scalar float64 to integer"""
self.feed('cvtsd2si rbx, xmm1')
self.feed('cvtsd2si r11, [rax, 111]')
self.check('f2480f2dd9 f24c0f2d586f')
def test_cvtsi2sd(self):
""" Test convert integer to scalar float64 """
self.feed('cvtsi2sd xmm3, r12')
self.feed('cvtsi2sd xmm9, [rsi, 29]')
self.check('f2490f2adc f24c0f2a4e1d')
def test_cvtsd2ss(self):
""" Test convert scalar float64 to float32 """
self.feed('cvtsd2ss xmm6, [rbx]')
self.feed('cvtsd2ss xmm2, [rcx]')
self.check('f20f5a33 f20f5a11')
def test_cvtss2sd(self):
""" Test convert scalar float32 to float64 """
self.feed('cvtss2sd xmm6, [rbx]')
self.feed('cvtss2sd xmm2, [rcx]')
self.check('f30f5a33 f30f5a11')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3362945 | from __future__ import print_function
import numpy as np
import pinocchio as pin
from numpy.testing import assert_almost_equal as assertApprox
from sot_talos_balance.simple_zmp_estimator import SimpleZmpEstimator
pin.switchToNumpyMatrix()
# --- Create estimator
print("--- Create estimator ---")
estimator = SimpleZmpEstimator("ciao")
print("\nSignals (at creation):")
estimator.displaySignals()
# --- Test vs precomputed values
print()
print("--- Test vs precomputed ---")
estimator.wrenchLeft.value = [0.0, 0.0, 10.0, 0.0, 0.0, 0.0]
estimator.wrenchRight.value = [0.0, 0.0, 10.0, 0.0, 0.0, 0.0]
estimator.poseLeft.value = [[1.0, 0.0, 0.0, 1.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.1], [0.0, 0.0, 0.0, 1.0]]
estimator.poseRight.value = [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 1.0], [0.0, 0.0, 1.0, 0.1], [0.0, 0.0, 0.0, 1.0]]
print()
print("wrenchLeft: %s" % (estimator.wrenchLeft.value, ))
print("wrenchRight: %s" % (estimator.wrenchRight.value, ))
print("poseLeft:\n%s" % (np.matrix(estimator.poseLeft.value), ))
print("poseRight:\n%s" % (np.matrix(estimator.poseRight.value), ))
estimator.init()
estimator.zmp.recompute(0)
copLeft = (1.0, 0.0, 0.0)
copRight = (0.0, 1.0, 0.0)
zmp = (0.5, 0.5, 0.0)
print()
print("copLeft: %s" % (estimator.copLeft.value, ))
assertApprox(estimator.copLeft.value, copLeft)
print("copRight: %s" % (estimator.copRight.value, ))
assertApprox(estimator.copRight.value, copRight)
print("zmp: %s" % (estimator.zmp.value, ))
assertApprox(estimator.zmp.value, zmp)
# --- Test emergency stop
print()
print("--- Test emergency stop ---")
print()
estimator.emergencyStop.recompute(0)
stop = estimator.emergencyStop.value
print("emergencyStop: %d" % stop)
np.testing.assert_equal(stop, 0)
estimator.wrenchLeft.value = [0.0, 0.0, 0.01, 0.0, 0.0, 0.0]
estimator.emergencyStop.recompute(1)
stop = estimator.emergencyStop.value
print("emergencyStop: %d" % stop)
np.testing.assert_equal(stop, 0)
estimator.wrenchRight.value = [0.0, 0.0, 0.01, 0.0, 0.0, 0.0]
estimator.emergencyStop.recompute(2)
stop = estimator.emergencyStop.value
print("emergencyStop: %d" % stop)
np.testing.assert_equal(stop, 1)
# --- Test vs CoM
print()
print("--- Test vs CoM ---")
estimator = SimpleZmpEstimator("ciao2")
model = pin.buildSampleModelHumanoid()
data = model.createData()
rightId = model.getFrameId('rleg_effector_body')
leftId = model.getFrameId('lleg_effector_body')
q = pin.neutral(model)
q[:3] = np.matrix([1.0, 0.0, 0.0]).T # displace freeflyer along x for testing
q[3:7] = np.matrix([np.sqrt(2) / 2, 0.0, np.sqrt(2) / 2, 0.0]).T # orient the base so that the feet are flat
pin.framesForwardKinematics(model, data, q)
poseRight = data.oMf[rightId].homogeneous.tolist()
poseLeft = data.oMf[leftId].homogeneous.tolist()
com = pin.centerOfMass(model, data, q).flatten().tolist()[0]
m = sum([inertia.mass for inertia in model.inertias[1:]])
g = 9.81
fz = m * g / 2.0
forceLeft = [0.0, 0.0, fz]
forceRight = [0.0, 0.0, fz]
lever = com[0] - data.oMf[rightId].translation[0]
tauy = -fz * lever
wrenchLeft = forceLeft + [0.0, tauy, 0.0]
wrenchRight = forceRight + [0.0, tauy, 0.0]
estimator.wrenchLeft.value = wrenchLeft
estimator.wrenchRight.value = wrenchRight
estimator.poseLeft.value = poseLeft
estimator.poseRight.value = poseRight
print()
print("wrenchLeft: %s" % (estimator.wrenchLeft.value, ))
print("wrenchRight: %s" % (estimator.wrenchRight.value, ))
print("poseLeft:\n%s" % (np.matrix(estimator.poseLeft.value), ))
print("poseRight:\n%s" % (np.matrix(estimator.poseRight.value), ))
estimator.init()
estimator.zmp.recompute(0)
print("copLeft: %s" % (estimator.copLeft.value, ))
print("copRight: %s" % (estimator.copRight.value, ))
print("zmp: %s" % (estimator.zmp.value, ))
print("com: %s" % (com, ))
assertApprox(estimator.zmp.value[:2], com[:2])
| StarcoderdataPython |
77392 | """
A simple wrapper to invoke pbundler without needing to install it, making debugging easier in an IDE
"""
import sys
from pbundler import PBCli
def main():
sys.exit(PBCli().run(sys.argv))
if __name__ == '__main__':
main() | StarcoderdataPython |
4456 | <filename>examples/django_mongoengine/bike/models.py
from mongoengine import Document
from mongoengine.fields import (
FloatField,
StringField,
ListField,
URLField,
ObjectIdField,
)
class Shop(Document):
meta = {"collection": "shop"}
ID = ObjectIdField()
name = StringField()
address = StringField()
website = URLField()
class Bike(Document):
meta = {"collection": "bike"}
ID = ObjectIdField()
name = StringField()
brand = StringField()
year = StringField()
size = ListField(StringField())
wheel_size = FloatField()
type = StringField()
| StarcoderdataPython |
3255833 | <gh_stars>1-10
# Generated by Django 2.2.13 on 2020-11-17 16:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('projects', '0076_deadline_types'),
]
operations = [
migrations.CreateModel(
name='ProjectPhaseDeadlineSection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('index', models.PositiveIntegerField(default=0, verbose_name='index')),
],
options={
'verbose_name': 'project phase deadline section',
'verbose_name_plural': 'project phase deadline sections',
'ordering': ('index',),
},
),
migrations.AddField(
model_name='deadline',
name='attribute',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='deadline', to='projects.Attribute', verbose_name='attribute'),
),
migrations.AlterField(
model_name='deadline',
name='condition_attributes',
field=models.ManyToManyField(blank=True, related_name='condition_to_deadlines', to='projects.Attribute', verbose_name='show if any attribute is set'),
),
migrations.AlterUniqueTogether(
name='deadline',
unique_together={('abbreviation', 'subtype')},
),
migrations.CreateModel(
name='ProjectPhaseSectionDeadline',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('index', models.PositiveIntegerField(default=0, verbose_name='index')),
('deadline', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.Deadline', verbose_name='deadline')),
('section', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.ProjectPhaseDeadlineSection', verbose_name='deadline phase section')),
],
options={
'verbose_name': 'project phase deadline section item',
'verbose_name_plural': 'project phase deadline section items',
'ordering': ('index',),
},
),
migrations.AddField(
model_name='projectphasedeadlinesection',
name='deadlines',
field=models.ManyToManyField(related_name='phase_sections', through='projects.ProjectPhaseSectionDeadline', to='projects.Deadline', verbose_name='deadlines'),
),
migrations.AddField(
model_name='projectphasedeadlinesection',
name='phase',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='deadline_sections', to='projects.ProjectPhase', verbose_name='phase'),
),
migrations.RemoveField(
model_name='deadline',
name='identifier',
),
]
| StarcoderdataPython |
3240097 | """
A class to maintain model data about Lobes (collections of Nodes)
"""
import bisect
from helper import centerOfMass, cartesian2Polar
class Lobe:
def __init__(self, uID, name):
"""
Lobe constructor.
Args:
uID: The unique ID string of this lobe
name: The string name of this lobe.
"""
# Model Data
self.uID = uID
self.name = name
self.nodes = [] # Sorted by polar theta coord
def addNode(self, node):
"""
Insert a new node into this lobe's sorted list.
Args:
node: A Node instance
Return:
None
"""
bisect.insort(self.nodes, node)
def weight(self):
"""
Return the weight of this lobe.
"""
return sum([node.weight() for node in self.nodes])
def centerOfMass(self):
"""
Return the center of mass of this lobe.
Return:
A 3-tuple of floats (x, y, z)
"""
assert len(self.nodes) != 0
node_positions = [node.pos for node in self.nodes]
node_weights = [node.weight() for node in self.nodes]
return centerOfMass(node_positions, node_weights)
def __lt__(self, other):
"""
Comparator to define ordering of lobes based on theta polar coordinate of
their center of mass.
Precondition: Because ordering depends on weights, the operand lobe's
nodes must have their layers list initialized.
Return:
Boolean
"""
assert len(self.nodes) != 0
assert len(other.nodes) != 0
# TODO: Write tests and delete old commented out code.
#self_node_positions = [node.pos for node in self.nodes]
#self_node_weights = [node.weight for node in self.nodes]
#(self_x, self_y, self_z) = centerOfMass(self_node_positions, self_node_weights)
(self_x, self_y, self_z) = self.centerOfMass()
#other_node_positions = [node.pos for node in other.nodes]
#other_node_weights = [1] * len(other.nodes) # TODO: Account for weighted nodes
#(other_x, other_y, other_z) = centerOfMass(other_node_positions, other_node_weights)
(other_x, other_y, other_z) = other.centerOfMass()
(self_r, self_theta) = cartesian2Polar(self_x, self_y)
(other_r, other_theta) = cartesian2Polar(other_x, other_y)
return self_theta < other_theta
| StarcoderdataPython |
3346870 | <reponame>attardi/iwpt-shared-task-2020
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-03-14 17:06
from typing import Union, Tuple
import tensorflow as tf
from edparser.common.structure import SerializableDict
from edparser.common.transform_tf import Transform
from edparser.common.vocab_tf import VocabTF
from edparser.utils.io_util import load_json
from edparser.utils.util import merge_locals_kwargs
def get_positions(start_idx, end_idx, length):
""" Get subj/obj position sequence. """
return list(range(-start_idx, 0)) + [0] * (end_idx - start_idx + 1) + \
list(range(1, length - end_idx))
class TACREDTransform(Transform):
def __init__(self, config: SerializableDict = None, map_x=True, map_y=True, lower=False, **kwargs) -> None:
super().__init__(**merge_locals_kwargs(locals(), kwargs))
self.token_vocab = VocabTF()
self.pos_vocab = VocabTF(pad_token=None, unk_token=None)
self.ner_vocab = VocabTF(pad_token=None)
self.deprel_vocab = VocabTF(pad_token=None, unk_token=None)
self.rel_vocab = VocabTF(pad_token=None, unk_token=None)
def fit(self, trn_path: str, **kwargs) -> int:
count = 0
for (tokens, pos, ner, head, deprel, subj_positions, obj_positions, subj_type,
obj_type), relation in self.file_to_samples(
trn_path, gold=True):
count += 1
self.token_vocab.update(tokens)
self.pos_vocab.update(pos)
self.ner_vocab.update(ner)
self.deprel_vocab.update(deprel)
self.rel_vocab.add(relation)
return count
def file_to_inputs(self, filepath: str, gold=True):
data = load_json(filepath)
for d in data:
tokens = list(d['token'])
ss, se = d['subj_start'], d['subj_end']
os, oe = d['obj_start'], d['obj_end']
pos = d['stanford_pos']
ner = d['stanford_ner']
deprel = d['stanford_deprel']
head = [int(x) for x in d['stanford_head']]
assert any([x == 0 for x in head])
relation = d['relation']
yield (tokens, pos, ner, head, deprel, ss, se, os, oe), relation
def inputs_to_samples(self, inputs, gold=False):
for input in inputs:
if gold:
(tokens, pos, ner, head, deprel, ss, se, os, oe), relation = input
else:
tokens, pos, ner, head, deprel, ss, se, os, oe = input
relation = self.rel_vocab.safe_pad_token
l = len(tokens)
subj_positions = get_positions(ss, se, l)
obj_positions = get_positions(os, oe, l)
subj_type = ner[ss]
obj_type = ner[os]
# anonymize tokens
tokens[ss:se + 1] = ['SUBJ-' + subj_type] * (se - ss + 1)
tokens[os:oe + 1] = ['OBJ-' + obj_type] * (oe - os + 1)
# min head is 0, but root is not included in tokens, so take 1 off from each head
head = [h - 1 for h in head]
yield (tokens, pos, ner, head, deprel, subj_positions, obj_positions, subj_type, obj_type), relation
def create_types_shapes_values(self) -> Tuple[Tuple, Tuple, Tuple]:
# (tokens, pos, ner, head, deprel, subj_positions, obj_positions, subj_type, obj_type), relation
types = (tf.string, tf.string, tf.string, tf.int32, tf.string, tf.int32, tf.int32, tf.string,
tf.string), tf.string
shapes = ([None], [None], [None], [None], [None], [None], [None], [], []), []
pads = (self.token_vocab.safe_pad_token, self.pos_vocab.safe_pad_token, self.ner_vocab.safe_pad_token, 0,
self.deprel_vocab.safe_pad_token,
0, 0, self.ner_vocab.safe_pad_token, self.ner_vocab.safe_pad_token), self.rel_vocab.safe_pad_token
return types, shapes, pads
def x_to_idx(self, x) -> Union[tf.Tensor, Tuple]:
tokens, pos, ner, head, deprel, subj_positions, obj_positions, subj_type, obj_type = x
tokens = self.token_vocab.lookup(tokens)
pos = self.pos_vocab.lookup(pos)
ner = self.ner_vocab.lookup(ner)
deprel = self.deprel_vocab.lookup(deprel)
subj_type = self.ner_vocab.lookup(subj_type)
obj_type = self.ner_vocab.lookup(obj_type)
return tokens, pos, ner, head, deprel, subj_positions, obj_positions, subj_type, obj_type
def y_to_idx(self, y) -> tf.Tensor:
return self.rel_vocab.lookup(y)
| StarcoderdataPython |
3277570 | # Load movies from HDFS, generate embeddings of movie titles with BERT, then save embeddings to
# redis and HDFS.
import subprocess
from time import localtime, strftime
import numpy as np
import redis
import tensorflow_hub as hub
import tensorflow_text as text
import os
HDFS_PATH_MOVIE_EMBEDDINGS="hdfs:///sparrow_recsys/movie-embeddings/"
REDIS_SERVER="localhost"
REDIS_PORT=6379
REDIS_KEY_MOVIE_EMBEDDING_VERSION="sparrow_recsys:version:me"
REDIS_KEY_PREFIX_MOVIE_EMBEDDING="sparrow_recsys:me"
# load movies from HDFS
movies = []
cat_hdfs_movies = subprocess.Popen(["hadoop", "fs", "-cat", "hdfs:///sparrow_recsys/movies/*/part-*"], stdout=subprocess.PIPE)
for line in cat_hdfs_movies.stdout:
movie_str = line.strip()
movie_info = movie_str.split(b"\t")
if len(movie_info) == 3:
movies.append(movie_info)
movies = np.array(movies)
print(f"HDFS movies count: {len(movies)}, first: {movies[0]}")
if len(movies) == 0:
exit(1)
# get embeddings
tfhub_handle_preprocess = "https://hub.tensorflow.google.cn/tensorflow/bert_en_uncased_preprocess/3"
tfhub_handle_encoder = "https://hub.tensorflow.google.cn/tensorflow/small_bert/bert_en_uncased_L-4_H-128_A-2/2"
bert_preprocess_model = hub.KerasLayer(tfhub_handle_preprocess)
bert_model = hub.KerasLayer(tfhub_handle_encoder)
movie_ids = list(map(lambda x: int(x.decode('utf-8')), movies[:, 0]))
movie_titles = movies[:, 1]
text_preprocessed = bert_preprocess_model(movie_titles)
bert_results = bert_model(text_preprocessed)
print(f'Pooled Outputs Shape: {bert_results["pooled_output"].shape}')
print(f'Pooled Outputs Values[0, :12]: {bert_results["pooled_output"][0, :12]}')
movie_embeddings = list(map(lambda embeddings: ','.join(list(map(lambda f: str(f), embeddings.numpy()))), bert_results["pooled_output"]))
movie_embeddings = list(zip(movie_ids, movie_embeddings))
# remove duplicates
movie_embeddings = dict(sorted(dict(movie_embeddings).items()))
movie_embeddings = list(movie_embeddings.items())
print(f"Movie embedding sample: {movie_embeddings[0]}")
# save to HDFS
tmp_file_name = 'movie-embeddings.csv'
if os.path.isfile(tmp_file_name):
os.remove(tmp_file_name)
with open(tmp_file_name, 'a') as tmp_file:
list(map(lambda x: tmp_file.write(f"{x[0]}\t{x[1]}\n"), movie_embeddings))
if os.path.isfile(tmp_file_name):
subprocess.Popen(["hadoop", "fs", "-rm", "-r", HDFS_PATH_MOVIE_EMBEDDINGS], stdout=subprocess.PIPE).communicate()
subprocess.Popen(["hadoop", "fs", "-mkdir", "-p", f"{HDFS_PATH_MOVIE_EMBEDDINGS}0000/"], stdout=subprocess.PIPE).communicate()
subprocess.Popen(["hadoop", "fs", "-put", f"./{tmp_file_name}", f"{HDFS_PATH_MOVIE_EMBEDDINGS}0000/part-0"], stdout=subprocess.PIPE).communicate()
os.remove(tmp_file_name)
print(f"Movie embeddings is uploaded to HDFS: {HDFS_PATH_MOVIE_EMBEDDINGS}")
# save to redis
version=strftime("%Y%m%d%H%M%S", localtime())
movie_embeddings_redis = list(map(
lambda x: (f"{REDIS_KEY_PREFIX_MOVIE_EMBEDDING}:{version}:{x[0]}", x[1]),
movie_embeddings))
r = redis.Redis(host=REDIS_SERVER, port=REDIS_PORT)
r.mset(dict(movie_embeddings_redis))
r.set(REDIS_KEY_MOVIE_EMBEDDING_VERSION, version)
print(f"Movie embedding version is updated to: {version}")
| StarcoderdataPython |
3206839 | <reponame>lucasdavid/edge
def cost(g):
"""Return the cost of a given path or circuit represented by a nx.Graph object.
:param g: the graph object which represents the path or circuit.
:return: the float cost of transversing the path.
"""
return sum((d['weight'] for _, _, d in g.edges(data=True)))
| StarcoderdataPython |
1713471 | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.EGL import _types as _cs
# End users want this...
from OpenGL.raw.EGL._types import *
from OpenGL.raw.EGL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'EGL_VERSION_EGL_1_3'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.EGL,'EGL_VERSION_EGL_1_3',error_checker=_errors._error_checker)
EGL_CONFORMANT=_C('EGL_CONFORMANT',0x3042)
EGL_CONTEXT_CLIENT_VERSION=_C('EGL_CONTEXT_CLIENT_VERSION',0x3098)
EGL_MATCH_NATIVE_PIXMAP=_C('EGL_MATCH_NATIVE_PIXMAP',0x3041)
EGL_OPENGL_ES2_BIT=_C('EGL_OPENGL_ES2_BIT',0x0004)
EGL_VG_ALPHA_FORMAT=_C('EGL_VG_ALPHA_FORMAT',0x3088)
EGL_VG_ALPHA_FORMAT_NONPRE=_C('EGL_VG_ALPHA_FORMAT_NONPRE',0x308B)
EGL_VG_ALPHA_FORMAT_PRE=_C('EGL_VG_ALPHA_FORMAT_PRE',0x308C)
EGL_VG_ALPHA_FORMAT_PRE_BIT=_C('EGL_VG_ALPHA_FORMAT_PRE_BIT',0x0040)
EGL_VG_COLORSPACE=_C('EGL_VG_COLORSPACE',0x3087)
EGL_VG_COLORSPACE_LINEAR=_C('EGL_VG_COLORSPACE_LINEAR',0x308A)
EGL_VG_COLORSPACE_LINEAR_BIT=_C('EGL_VG_COLORSPACE_LINEAR_BIT',0x0020)
EGL_VG_COLORSPACE_sRGB=_C('EGL_VG_COLORSPACE_sRGB',0x3089)
| StarcoderdataPython |
4839566 | #!/usr/bin/env python
"""
Advent of Code 2017: Day 12 Part 1
https://adventofcode.com/2017/day/12
"""
import sys, re
# Define a Node class to store tree information (parents and children), and to implement union-find
class Node:
def __init__(self):
self.parent = self
self.children = []
# Find this node's root
def find_root(self):
return self if self.parent is self else self.parent.find_root()
# Merge two nodes' roots into the same tree
def merge(self, other):
this_root = self.find_root()
other_root = other.find_root()
if this_root is not other_root:
other_root.parent = this_root
this_root.children.append(other_root)
# Count all nodes below (and including) this one
def count_nodes(self):
return sum(child.count_nodes() for child in self.children) + 1
with open(sys.argv[1], 'r') as file:
numeric_regex = re.compile(r'\d+')
input_list = [[int(num) for num in numeric_regex.findall(line)] for line in file.readlines()]
# Create a dictionary of new nodes based on the input
node_dict = dict((input[0], Node()) for input in input_list)
# Merge (union) nodes that are connected
for input in input_list:
for connected in input[1:]:
node_dict[input[0]].merge(node_dict[connected])
# Print count of nodes in the same group as node 0
print node_dict[0].find_root().count_nodes()
| StarcoderdataPython |
93756 | <gh_stars>0
#HN uses the https://news.ycombinator.com/front?day={yyyy}-{mm}-{dd} format for top posts of that day
import requests
from bs4 import BeautifulSoup as bs
from datetime import date, datetime, timedelta
import pandas as pd
import re
import html2text
import numpy as np
class Scrape(object):
def __init__(self):
self.url_base = 'https://news.ycombinator.com/front'
self.url = 'https://news.ycombinator.com/'
def return_comments_page(self, item_url):
req = requests.get(self.url+item_url)
soup = bs(req.content, 'html.parser')
comments = list(map(lambda x: x.text.strip(), soup.find_all(class_='comment')))
comments = np.array(comments)
return comments
def return_comments_date(self, date):
#returns the comments for a particular day as a list
params = {'day': date.strftime('%Y-%m-%d')}
req = requests.get(self.url_base, params)
content = html2text.html2text(str(req.content))
comment_links = list(set(re.findall('item\?id=[0-9]*', content)))
comments = list(map(lambda x: self.return_comments_page(x), comment_links))
comments = np.array(comments)
return comments
def return_comments_range(self, start_date, end_date):
#returns all the comments for a range of dates
date_range = pd.date_range(start_date, end_date)
comments = list(map(lambda x: self.return_comments(x), date_range))
comments = np.array(comments)
return comments
| StarcoderdataPython |
1714386 | <filename>VQA/Stacked Attention/extract_features.py
"""
Created on Tue May 08 19:06:33 2018
author: <NAME>
"""
from keras.applications.vgg19 import VGG19, preprocess_input
from keras.models import Model
from keras.layers import Input
from keras.optimizers import SGD
import cv2, numpy as np
import h5py
import json
from glob import glob
import keras.backend as K
K.set_image_dim_ordering('tf')
### load json
def get_model(weights_path=None):
## [17-june-2018]Use residual after this
input_tensor = Input(shape=(448,448,3))
base_model = VGG19(weights='imagenet', include_top=False, input_tensor=input_tensor)
#base_model.summary()
for layer in base_model.layers:
layer.trainable = False
model = Model(input=base_model.input, output=base_model.get_layer('block5_pool').output)
#model.summary()
#model = VGG19(weights_path)
#model.summary()
return model
def extract(path):
im = cv2.imread(path)
#img = image.load_img(path, target_size=(448,448))
if im is None:
raise Exception("Incorrect path")
#im = cv2.resize(im, (448, 448))
#im = im.transpose((2,0,1))
#im = np.expand_dims(im, axis=0)
im = cv2.resize(im, (448,448)).astype(np.float32)
im = im * 255
im[:,:,0] -= 103.939
im[:,:,1] -= 116.779
im[:,:,2] -= 123.68
#im = im.transpose((2,0,1))
im = np.expand_dims(im, axis=0)
#x = image.img_to_array(img)
#x = np.expand_dims(x, axis=0)
#x = preprocess_input(x)
im = preprocess_input(im)
# print (im.shape)
# Test pretrained model
model = get_model()
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy')
out = model.predict(im)
return out
path_to_images='E:/akshita_workspace/Audio-Vision/VIS-LSTM/data/coco' #Give path where COCO images reside
features_path='E:/akshita_workspace/Audio-Vision/VIS-LSTM/data/features'
with open('data/vqa_data_prepro.json') as f:
data = json.load(f)
type_="train"
mydata=data['unique_img_'+type_]
feat=[]
for i in mydata:
img=i.split('/')[-1]
ftr = extract(path_to_images+'/'+img)
feat.append(ftr)
print (feat.shape())
bre
h5f_data = h5py.File(features_path + '/'+type_+'.h5', 'w')
h5f_data.create_dataset('images_'+ type_, data=np.array(feat))
h5f_data.close()
| StarcoderdataPython |
113549 | from types import TracebackType
from typing import Dict, Optional, Type, Union
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
from aiohttp.client import ClientSession
from warnings import warn
from neispy.error import ExceptionsMapping
class NeispyRequest:
BASE = "https://open.neis.go.kr/hub"
def __init__(
self,
KEY: Optional[str],
Type: Literal["json", "xml"],
pIndex: int,
pSize: int,
session: Optional[ClientSession],
only_rows: bool = True,
) -> None:
self.KEY = KEY
if not KEY:
warn("API키가 없습니다, 샘플키로 요청합니다", UserWarning)
self.pIndex = pIndex
self.pSize = pSize
self.Type = Type
self.session = session
self.only_rows = only_rows
def _default_params(self) -> Dict[str, Union[str, int]]:
default_params = {
"pIndex": self.pIndex,
"pSize": self.pSize,
"type": self.Type,
}
if self.KEY:
default_params["KEY"] = self.KEY
return default_params
async def request(
self,
method: str,
endpoint: str,
params: Dict[str, Union[str, int]],
):
URL = self.BASE + endpoint
if not self.session:
self.session = ClientSession()
default_params = self._default_params()
default_params.update(params)
async with self.session.request(method, URL, params=default_params) as response:
data = await response.json(content_type=None)
if data.get("RESULT"):
result = data["RESULT"]
code = result["CODE"]
if code != "INFO-000":
msg = result["MESSAGE"]
raise ExceptionsMapping[result["CODE"]](code, msg)
if self.only_rows:
return list(data.values())[0][1]["row"]
return data
async def get_schoolInfo(self, params: Dict[str, Union[str, int]]):
return await self.request("GET", "/schoolInfo", params)
async def get_mealServiceDietInfo(self, params: Dict[str, Union[str, int]]):
return await self.request("GET", "/mealServiceDietInfo", params)
async def get_SchoolSchedule(self, params: Dict[str, Union[str, int]]):
return await self.request("GET", "/SchoolSchedule", params)
async def get_acaInsTiInfo(self, params: Dict[str, Union[str, int]]):
return await self.request("GET", "/acaInsTiInfo", params)
async def get_elsTimetable(self, params: Dict[str, Union[str, int]]):
return await self.request("GET", "/elsTimetable", params)
async def get_misTimetable(self, params: Dict[str, Union[str, int]]):
return await self.request("GET", "/misTimetable", params)
async def get_hisTimetable(self, params: Dict[str, Union[str, int]]):
return await self.request("GET", "/hisTimetable", params)
async def get_spsTimetable(self, params: Dict[str, Union[str, int]]):
return await self.request("GET", "/spsTimetable", params)
async def get_classInfo(self, params: Dict[str, Union[str, int]]):
return await self.request("GET", "/classInfo", params)
async def get_schoolMajorinfo(self, params: Dict[str, Union[str, int]]):
return await self.request("GET", "/schoolMajorinfo", params)
async def get_schulAflcoinfo(self, params: Dict[str, Union[str, int]]):
return await self.request("GET", "/schulAflcoinfo", params)
async def get_tiClrminfo(self, params: Dict[str, Union[str, int]]):
return await self.request("GET", "/tiClrminfo", params)
async def __aenter__(self):
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
):
if self.session:
await self.session.close()
| StarcoderdataPython |
1773561 | <reponame>HtrTech/SnapMap-OSINT
#!/usr/bin/env python3
# Inspired by https://github.com/HtrTech/SnapMap/ & https://github.com/HtrTech/snap-map-private-api/
# Created by <NAME>
#
#
import requests, time, argparse, os, json
from geopy.geocoders import Nominatim
def parse_args():
parser = argparse.ArgumentParser(description="Instagram OSINT tool")
parser.add_argument("--address", help="Address", required=True)
parser.add_argument("--radius", help="Radius of area, default is 5000", required=False, default=5000, type=int)
return parser.parse_args()
def address_to_coordinates(address):
coords = Nominatim(user_agent="Map").geocode(address)
return coords.latitude, coords.longitude
def download_contents(data):
i = 0
l = len(data["manifest"]["elements"])
print(f"Downloading {l} media items")
for value in data["manifest"]["elements"]:
filetype = value["snapInfo"]["streamingThumbnailInfo"]["infos"][0]["thumbnailUrl"].split(".")[-1]
with open(f"{i}.{filetype}", "wb") as f:
f.write(requests.get(value["snapInfo"]["streamingThumbnailInfo"]["infos"][0]["thumbnailUrl"]).content)
i += 1
time.sleep(.5)
def export_json(data):
filename = "snapmap_data.json"
with open(filename, "w") as f:
f.write(json.dumps(data))
print(f"Wrote JSON data to file {filename}")
def getEpoch():
return requests.post('https://ms.sc-jpl.com/web/getLatestTileSet',headers={'Content-Type':'application/json'},data='{}').json()['tileSetInfos'][1]['id']['epoch']
def main():
args = parse_args()
os.mkdir(args.address + "-Snap-map")
os.chdir(args.address + "-Snap-map")
lat, lon = address_to_coordinates(args.address)
post_data = '{"requestGeoPoint":{"lat":'+str(lat)+',"lon":'+str(lon)+'},"tileSetId":{"flavor":"default","epoch":'+str(getEpoch())+',"type":1},"radiusMeters":'+str(args.radius)+'}'
r = requests.post("https://ms.sc-jpl.com/web/getPlaylist", headers={"Content-Type":"application/json","User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:77.0) Gecko/20100101 Firefox/77.0"},data=post_data)
export_json(r.json())
download_contents(r.json())
if __name__ == "__main__":
main()
| StarcoderdataPython |
3375560 | """
The experiment MAIN for Communities and Crime.
* Run the file and the CC experiments will complete
* See compas experiment file for more details on how to read results.
"""
import warnings
warnings.filterwarnings('ignore')
from adversarial_models import *
from utils import *
from get_data import *
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import numpy as np
import pandas as pd
import lime
import lime.lime_tabular
import shap
from copy import deepcopy
# Set up experiment parameters
params = Params("model_configurations/experiment_params.json")
np.random.seed(params.seed)
X, y, cols = get_and_preprocess_cc(params)
# add unrelated columns, setup
X['unrelated_column_one'] = np.random.choice([0,1],size=X.shape[0])
X['unrelated_column_two'] = np.random.choice([0,1],size=X.shape[0])
features = [c for c in X]
race_indc = features.index('racePctWhite numeric')
unrelated_indcs = features.index('unrelated_column_one')
unrelated_indcs1 = features.index('unrelated_column_two')
X = X.values
###
## The models f and psi for CC. We discriminate based on race for f and concider two RANDOMLY DRAWN features to display in psi
#
# the biased model
class racist_model_f:
# Decision rule: classify negatively if race is black
def predict(self,X):
return np.array([params.negative_outcome if x[race_indc] > 0 else params.positive_outcome for x in X])
def predict_proba(self, X):
return one_hot_encode(self.predict(X))
def score(self, X,y):
return np.sum(self.predict(X)==y) / len(X)
# the display model with one unrelated feature
class innocuous_model_psi:
# Decision rule: classify according to randomly drawn column 'unrelated column'
def predict_proba(self, X):
return one_hot_encode(np.array([params.negative_outcome if x[unrelated_indcs] > 0 else params.positive_outcome for x in X]))
# the display model with two unrelated features
class innocuous_model_psi_two:
def predict_proba(self, X):
# Using 0.5 to make it easier to detect decision boundary on perturbation
A = np.where(X[:,unrelated_indcs] > .5, params.positive_outcome, params.negative_outcome)
B = np.where(X[:,unrelated_indcs1] < -.5, params.positive_outcome, params.negative_outcome)
preds = np.logical_xor(A, B).astype(int)
return one_hot_encode(preds)
#
##
###
def experiment_main():
"""
Run through experiments for LIME/SHAP on CC using both one and two unrelated features.
* This may take some time given that we iterate through every point in the test set
* We print out the rate at which features occur in the top three features
"""
xtrain,xtest,ytrain,ytest = train_test_split(X,y,test_size=0.1)
ss = StandardScaler().fit(xtrain)
xtrain = ss.transform(xtrain)
xtest = ss.transform(xtest)
print ('---------------------')
print ("Beginning LIME CC Experiments....")
print ("(These take some time to run because we have to generate explanations for every point in the test set) ")
print ('---------------------')
# Train the adversarial model for LIME with f and psi
adv_lime = Adversarial_Lime_Model(racist_model_f(), innocuous_model_psi()).train(xtrain, ytrain, categorical_features=[features.index('unrelated_column_one'),features.index('unrelated_column_two')], feature_names=features, perturbation_multiplier=30)
adv_explainer = lime.lime_tabular.LimeTabularExplainer(xtrain, feature_names=adv_lime.get_column_names(), discretize_continuous=False, categorical_features=[features.index('unrelated_column_one'),features.index('unrelated_column_two')])
explanations = []
for i in range(xtest.shape[0]):
explanations.append(adv_explainer.explain_instance(xtest[i], adv_lime.predict_proba).as_list())
# Display Results
print ("LIME Ranks and Pct Occurances (1 corresponds to most important feature) for one unrelated feature:")
print (experiment_summary(explanations, features))
print ("Fidelity:", round(adv_lime.fidelity(xtest),2))
# Repeat the same thing for two features
adv_lime = Adversarial_Lime_Model(racist_model_f(), innocuous_model_psi_two()).train(xtrain, ytrain, feature_names=features, perturbation_multiplier=30, categorical_features=[features.index('unrelated_column_one'),features.index('unrelated_column_two')])
adv_explainer = lime.lime_tabular.LimeTabularExplainer(xtrain, feature_names=adv_lime.get_column_names(), discretize_continuous=False, categorical_features=[features.index('unrelated_column_one'),features.index('unrelated_column_two')])
explanations = []
for i in range(xtest.shape[0]):
explanations.append(adv_explainer.explain_instance(xtest[i], adv_lime.predict_proba).as_list())
print ("LIME Ranks and Pct Occurances two unrelated features:")
print (experiment_summary(explanations, features))
print ("Fidelity:", round(adv_lime.fidelity(xtest),2))
print ('---------------------')
print ('Beginning SHAP CC Experiments....')
print ('---------------------')
#Setup SHAP
background_distribution = shap.kmeans(xtrain,10)
adv_shap = Adversarial_Kernel_SHAP_Model(racist_model_f(), innocuous_model_psi()).train(xtrain, ytrain, feature_names=features)
adv_kerenel_explainer = shap.KernelExplainer(adv_shap.predict, background_distribution)
explanations = adv_kerenel_explainer.shap_values(xtest)
# format for display
formatted_explanations = []
for exp in explanations:
formatted_explanations.append([(features[i], exp[i]) for i in range(len(exp))])
print ("SHAP Ranks and Pct Occurances one unrelated features:")
print (experiment_summary(formatted_explanations, features))
print ("Fidelity:",round(adv_shap.fidelity(xtest),2))
background_distribution = shap.kmeans(xtrain,10)
adv_shap = Adversarial_Kernel_SHAP_Model(racist_model_f(), innocuous_model_psi_two()).train(xtrain, ytrain, feature_names=features)
adv_kerenel_explainer = shap.KernelExplainer(adv_shap.predict, background_distribution)
explanations = adv_kerenel_explainer.shap_values(xtest)
# format for display
formatted_explanations = []
for exp in explanations:
formatted_explanations.append([(features[i], exp[i]) for i in range(len(exp))])
print ("SHAP Ranks and Pct Occurances two unrelated features:")
print (experiment_summary(formatted_explanations, features))
print ("Fidelity:",round(adv_shap.fidelity(xtest),2))
print ('---------------------')
if __name__ == "__main__":
experiment_main()
| StarcoderdataPython |
1648626 | <filename>core/models.py
from django.db import models
class Post(models.Model):
name = models.CharField(max_length=43)
body = models.TextField()
def __str__(self):
return self.name
| StarcoderdataPython |
182663 | <reponame>jkennedyvz/DeepFaceLive
from collections import Iterable
class AAxes(Iterable):
__slots__ = ['axes','ndim','_inversed']
def __init__(self, axes, shape_ndim=None):
"""
Constructs AAxes from user argument
arguments
axes AAxes
Int
Iterable of ints
None
shape_ndim(None) provide shape_ndim if axes contain negative values
can raise an errors during the construction
AAxes supports:
A+B : concat A_axes with B_axes
A-B : removes B_axes from A_axes
"""
if isinstance(axes, AAxes):
self.axes = axes.axes
self.ndim = axes.ndim
self._inversed = axes._inversed
elif axes is None:
self.axes = None
self.ndim = None
self._inversed = None
else:
if not isinstance(axes, Iterable):
axes = (axes,)
if isinstance(axes, Iterable):
valid_axes = []
for x in axes:
if x is None:
raise ValueError(f'Incorrent value {x} in axes {axes}')
x = int(x)
if x < 0:
if shape_ndim is None:
raise ValueError(f'Incorrent value {x} in axes {axes}, or provide shape_ndim')
x = shape_ndim + x
if x in valid_axes:
raise ValueError(f'Axes must contain unique values.')
valid_axes.append(x)
self.axes = tuple(valid_axes)
self.ndim = len(self.axes)
self._inversed = None
def is_none_axes(self):
"""
returns True if AAxes is constructed with (None) argument, i.e. all-axes
"""
return self.axes is None
def sorted(self) -> 'AAxes':
"""
returns sorted AAxes
"""
return AAxes(sorted(self.axes))
def swapped_axes(self, axis_a, axis_b) -> 'AAxes':
x = list(self.axes)
if axis_a < 0:
axis_a = len(x) + axis_a
if axis_b < 0:
axis_b = len(x) + axis_b
x[axis_b], x[axis_a] = x[axis_a], x[axis_b]
return AAxes( tuple(x) )
def inversed(self) -> 'AAxes':
"""
Returns inversed axes order
Example:
for (0,2,3,1) returns (0,3,1,2)
"""
if self.is_none_axes():
raise Exception(f'none-axes does not support inversed(). Handle none-axes by calling .is_none_axes()')
if self._inversed is None:
x = { axis:i for i,axis in enumerate(self.axes) }
t = []
for i in range(self.ndim):
axis = x.get(i, None)
if axis is None:
raise Exception(f'axes {self.axes} are inconsistent to do inverse order.')
t.append(axis)
self._inversed = AAxes(t)
return self._inversed
def __hash__(self): return self.axes.__hash__()
def __eq__(self, other):
if isinstance(other, AAxes):
return self.axes == other.axes
elif isinstance(other, Iterable):
return self.axes == tuple(other)
return False
def __iter__(self):
if self.is_none_axes():
raise Exception(f'none-axes does not support iteration. Handle none-axes by calling .is_none_axes()')
return self.axes.__iter__()
def __len__(self): return self.ndim
def __getitem__(self,key):
if self.is_none_axes():
raise Exception(f'none-axes does not support indexing. Handle none-axes by calling .is_none_axes()')
elif isinstance(key, slice):
return AAxes(self.axes[key])
return self.axes[key]
def __radd__(self, o):
if isinstance(o, Iterable):
return AAxes( tuple(o) + self.axes)
else:
raise ValueError(f'unable to use type {o.__class__} in AAxes append')
def __add__(self, o):
if isinstance(o, Iterable):
return AAxes( self.axes + tuple(o) )
else:
raise ValueError(f'unable to use type {o.__class__} in AAxes append')
def __rsub__(self, o):
if isinstance(o, Iterable):
new_axes = []
for axis in o:
if axis not in self.axes:
new_axes.append(axis)
return AAxes(new_axes)
else:
raise ValueError(f'unable to use type {o.__class__} in AAxes substraction')
def __sub__(self, o):
if isinstance(o, Iterable):
new_axes = []
o_axes = tuple(o)
for axis in self.axes:
if axis not in o_axes:
new_axes.append(axis)
return AAxes(new_axes)
else:
raise ValueError(f'unable to use type {o.__class__} in AAxes substraction')
def __str__(self):
if self.is_none_axes():
return '(None)'
return str(self.axes)
def __repr__(self): return 'AAxes' + self.__str__()
__all__ = ['AAxes'] | StarcoderdataPython |
3382897 | import logging
from concurrent.futures import ThreadPoolExecutor
from os import listdir
from os import path as osp
import netifaces
import defaults
import errors
import utils.async
from system.drive_manager import DRIVE_MANAGER
from API.handlers import APIHandler
from tornado.concurrent import run_on_executor
from transitions import MachineError
from API import stitcher_api
from clientmessenger import CLIENT_MESSENGER
from utils.settings_manager import SETTINGS
from debug.systemmonitor import MONITOR
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
LOG_FOLDER_URL = "log"
LOG_PYTHON_LOG_NAME = "python"
class DebugAPI(APIHandler):
"""REST interface for debugging / testing
"""
log_executor = ThreadPoolExecutor(1)
stitcher_executor = stitcher_api.StitcherAPI.stitcher_executor
executor = ThreadPoolExecutor(1)
def __init__(self, extra):
"""Init
"""
self.server = extra["server"]
self.stitcher = extra["video_stitcher"]
self.project_manager = extra["project_manager"]
self.output_manager = extra["output_manager"]
self.preset_manager = extra["preset_manager"]
self.camera = extra["camera"]
self.monitor = MONITOR
@run_on_executor
def list_logs(self, parameters=None):
from utils.log import LOG_FILES
logs = {}
for logName, logInfo in LOG_FILES.iteritems():
logs[logName] = "/{}/{}".format(LOG_FOLDER_URL, logInfo[0])
if not SETTINGS.python_log_file:
del logs[LOG_PYTHON_LOG_NAME]
return logs
@run_on_executor
def stop_server(self, parameters=None):
utils.async.delay(0.5, self.server.force_stop)
@run_on_executor(executor='log_executor')
def log(self, parameters=None):
logger.info(parameters.get("message"))
@run_on_executor
def send_error(self, parameters=None):
CLIENT_MESSENGER.send_error(errors.VSError(parameters["message"]))
@run_on_executor
def send_event(self, parameters=None):
CLIENT_MESSENGER.send_event(parameters["name"], parameters.get("payload"))
@run_on_executor
def clear_messages(self, parameters=None):
CLIENT_MESSENGER.clear()
@run_on_executor
def get_drive_list(self, parameters):
"""Returns the available storage devices
Returns:
- ``InternalError``
- Otherwise: a list with 0 or more drives.
Note:
Calling this function can create some overhead.
"""
return self.monitor.storage()
@run_on_executor
def get_drive_info(self, parameters):
"""Returns the storage drive information
Returns:
- ``InternalError``
- Structure::
{
'results': {
'used': string,
'percent': float,
'free': string,
'label': string,
'fs': string,
'device': string,
'mountpoint': string,
'total': string
}
}
Note:
Calling this function can create some overhead
"""
try:
drive = parameters['drive']
except:
raise errors.InvalidParameter('No specified drive')
mp_stats = self.monitor.drive_info(drive)
if mp_stats is None:
raise errors.InternalError(
'No information found for drive {}'.format(parameters))
@run_on_executor
def get_available_percentage(self, parameters):
"""Returns the available size of a selected drive in percentage
Args:
parameters(JSON): Should containt the field 'mountpoint'
specifying the target drive
Returns:
- ``InternalError``
- Otherwise: the available percentage of the drive.
Note:
Calling this function can create some overhead
"""
return self.monitor.get_available_percentage(parameters)
def get_network_adapters(self, parameters):
"""Returns the available network adapters.
Returns:
- ``InternalError``
- Otherwise: a list of 0 or more network adapters.
"""
return netifaces.interfaces()
@run_on_executor
def get_hardware_status(self, parameters=None):
config = {"hardware": self.monitor.status()}
return config
# Camera
@run_on_executor
def connect_camera(self, parameters=None):
self.server.camera.t_force_connect()
@run_on_executor
def disconnect_camera(self, parameters=None):
try:
self.server.camera.t_disconnect()
except MachineError:
pass
@run_on_executor
def simulate_camera_calibration_files(self, parameters=None):
with open("./test/test_data/calibration_rig_parameters.json", "r") as rig_params_file:
rig_parameters = rig_params_file.read()
self.camera.rig_parameters = rig_parameters
@run_on_executor
def force_update_firmware(self, parameters):
return self.camera.force_update_firmware(str(parameters.get("name")) if parameters is not None else None)
@run_on_executor
def get_firmware_list(self, parameters=None):
files = [wfile for wfile in listdir(defaults.FIRMWARE_DIR_PATH)
if osp.splitext(wfile)[1] == defaults.FIRMWARE_EXTENSION]
return {"entries": files}
# Stitcher and profiler
@run_on_executor(executor='stitcher_executor')
def start_profiling(self, parameters=None):
"""Starts the profiling.
"""
self.output_manager.get_output("profiling").start()
@run_on_executor(executor='stitcher_executor')
def stop_profiling(self, parameters=None):
"""Stop the profiling.
"""
self.output_manager.get_output("profiling").stop()
@run_on_executor(executor='stitcher_executor')
def reset_profiling(self, parameters=None):
"""Reset the profiling.
"""
self.output_manager.get_output("profiling").reset()
@run_on_executor(executor='stitcher_executor')
def get_status(self, parameters=None):
"""Get the debugging result.
result={
"inputs": {"latency": "RTMP latency"},
"streaming": {"latency": "RTMP latency"},
"preview": {"latency": "RTMP latency"},
"profiling": {
"fps": "frame rate",
"cpu": "CPU usage",
"gpu": "GPU usage",
"enc": "NVENC usage"
}
}
"""
input_status = { "latency" : self.project_manager.get_latency() }
streaming_status = { "latency" : self.output_manager.get_output("stream").get_latency() }
preview_status = { "latency" : self.output_manager.get_output("preview").get_latency() }
profiling_status = self.output_manager.get_output("profiling").get_statistics()
status = {"inputs": input_status,
"streaming": streaming_status,
"preview": preview_status,
"profiling": profiling_status}
return status
@run_on_executor(executor='stitcher_executor')
def start_stream_with_json_preset(self, parameters):
self.output_manager.start_output("stream", parameters)
@run_on_executor(executor='stitcher_executor')
def add_managed_drive(self, parameters):
drive_path = parameters.get('drive_path')
if not drive_path:
return
DRIVE_MANAGER.add_managed_drive(drive_path)
# Settings
def get_setting(self, key):
return getattr(SETTINGS, key)
def set_setting(self, parameter):
for key, value in parameter.iteritems():
setattr(SETTINGS, key, value)
# exposure compensation
"""
@api(name="StartExposureCompensation",
endpoint="algorithm.start_exposure_compensation",
description="Start exposure compensation algorithm",
errors=[errors.AlgorithmError, errors.StitcherError]
)
"""
@run_on_executor
def start_exposure_compensation(self, parameters=None):
if self.stitcher.algorithm_manager:
self.stitcher.algorithm_manager.start_exposure_compensation()
"""
@api(name="StopExposureCompensation",
endpoint="algorithm.stop_exposure_compensation",
description="Stop exposure compensation algorithm",
errors=[errors.AlgorithmError, errors.StitcherError]
)
"""
@run_on_executor
def stop_exposure_compensation(self, parameters=None):
if self.stitcher.algorithm_manager:
self.stitcher.algorithm_manager.stop_exposure_compensation()
"""
@api(name="ResetExposureCompensation",
endpoint="algorithm.reset_exposure_compensation",
description="Reset exposure compensation",
errors=[errors.StitcherError]
)
"""
@run_on_executor
def reset_exposure_compensation(self, parameters=None):
self.project_manager.reset_exposure()
"""
@api(name="StartMetadataProcessing",
endpoint="algorithm.start_metadata_processing",
description="Start exposure & IMU metadata processing",
errors=[errors.StitcherError]
)
"""
@run_on_executor
def start_metadata_processing(self, parameters=None):
self.project_manager.start_metadata_processing()
"""
@api(name="StopMetadataProcessing",
endpoint="algorithm.stop_metadata_processing",
description="Stop exposure & IMU metadata processing",
errors=[errors.StitcherError]
)
"""
@run_on_executor
def stop_metadata_processing(self, parameters=None):
self.project_manager.stop_metadata_processing()
| StarcoderdataPython |
3294204 | <filename>hood/urls.py
from django.conf.urls import url,include
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns=[
url('^$',views.index,name = 'index'),
url(r'^profile/$',views.profile,name='profile'),
url(r'^edit/profile/$',views.edit_profile,name='edit_profile'),
url(r'^search/', views.search_results, name='search_results'),
url(r'^hoods/new/post/(\d+)$', views.new_post, name='new-post'),
url(r'^hoods/new/comment/(\d+)',views.newcomment, name='newcomment'),
url(r'^location$', views.location, name='location'),
url(r'^hoods/new/business/(\d+)$',views.post_business, name='new-business'),
url(r'^hood/(\d+)',views.hood,name='hood'),
url(r'^new/hood$', views.new_hood, name='new-hood')
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) | StarcoderdataPython |
4839136 | # Import required libraries
import argparse
import pathlib
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import pandas as pd
import plotly.express as px
import psycopg2
from dash.dash import no_update
from dash.dependencies import Input, Output
from dash.exceptions import PreventUpdate
from utils.config import *
from utils.nhl_proxy import NHLProxy
# get relative data folder
PATH = pathlib.Path(__file__).parent
DATA_PATH = PATH.joinpath("data").resolve()
app = dash.Dash(
__name__, meta_tags=[{"name": "viewport", "content": "width=device-width"}]
)
app.title = 'NHL Stats'
server = app.server
# Create app layout
app.layout = html.Div(
[
dcc.Store(id="aggregate_data"),
# empty Div to trigger javascript file for graph resizing
html.Div(id="output-clientside"),
html.Div(
[
html.Div(
[
html.Img(
src=app.get_asset_url("nhl-logo.png"),
id="plotly-image",
style={
"height": "60px",
"width": "auto",
"margin-bottom": "25px",
},
)
],
className="one-third column",
),
html.Div(
[
html.Div(
[
html.H3(
"NHL Stats",
style={"margin-bottom": "0px"},
),
html.H5(
"Proyecto Bases de Datos", style={"margin-top": "0px"}
),
]
)
],
className="one-half column",
id="title",
),
html.Div(
[
html.A(
html.Button("Repo", id="learn-more-button"),
href="https://github.com/rudyn2/nhl-analysis",
)
],
className="one-third column",
id="button",
),
],
id="header",
className="row flex-display",
style={"margin-bottom": "25px"},
),
html.Div(
[
html.Div(
[
html.P(
"Filtra por temporada",
className="control_label",
),
dcc.Dropdown(
id="season_selector",
options=[
{'label': f'{i}-{i + 1}', 'value': i} for i in range(2015, 2019)
],
clearable=False,
value=2015
),
html.P(
"Filtra por tipo de temporada",
className="control_label",
),
dcc.RadioItems(
id="type_season_selector",
options=[
{"label": "Regular ", "value": "regular"},
{"label": "Play-Off", "value": "play-off"}
],
value="regular",
labelStyle={"display": "inline-block"},
className="dcc_control",
),
html.P(
"Filtra por equipo",
className="control_label",
),
dcc.Dropdown(
id='team_selector',
options=[
{'label': f'Equipo-{i}', 'value': i} for i in range(10)
],
placeholder="Equipo A"
),
],
className="pretty_container four columns",
id="cross-filter-options",
# style={'height': '500px'}
),
html.Div(
[
html.Div(
[
html.Div(
[html.H6(id="total_plays", children=10), html.P("Partidos jugados")],
id="wells",
className="mini_container",
),
html.Div(
[html.H6(id="total_win", children=6), html.P("Partidos ganados")],
id="gas",
className="mini_container",
),
html.Div(
[html.H6(id="total_lost", children=4), html.P("Partidos perdidos")],
id="oil",
className="mini_container",
),
html.Div(
[html.H6(id="overtime_loss", children=0), html.P("Overtime loss")],
id="water",
className="mini_container",
),
],
id="info-container",
className="row container-display",
),
html.Div(
[dash_table.DataTable(
id='team_stats_table',
style_cell={'textAlign': 'center',
'minWidth': '180px', 'width': '180px', 'maxWidth': '180px',
'whiteSpace': 'normal'},
style_header={
'backgroundColor': 'rgb(230, 230, 230)',
'fontWeight': 'bold'
},
style_data_conditional=[
{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
}
],
fixed_rows={'headers': True},
style_table={'height': '30%', 'overflowY': 'auto'}
# id='table',
# columns=[{"name": i, "id": i} for i in df.columns],
# data=df.to_dict('records'),
)],
id="countGraphContainer",
className="pretty_container",
),
],
id="right-column",
className="eight columns",
),
],
className="row flex-display",
),
html.Div(
[
html.Div(
[
dcc.Graph(id='league_ranking_graph')],
className="pretty_container twelve columns",
)
],
id='league_ranking_div',
className="row flex-display"
),
html.Div(
[
html.Div(
[
html.H5("Skater stats"),
dash_table.DataTable(
id='skater_stats',
style_cell={'textAlign': 'center',
'minWidth': '50px', 'width': '40px', 'maxWidth': '180px',
'whiteSpace': 'normal'},
style_header={
'backgroundColor': 'rgb(230, 230, 230)',
'fontWeight': 'bold',
'z-index': '5px'
},
style_data_conditional=[
{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
}
],
fixed_rows={'headers': True},
style_table={'height': '30%', 'overflowY': 'auto'}
)],
className="pretty_container six columns",
),
html.Div(
[
html.H5("Goalie Stats"),
dash_table.DataTable(
id='goalie_stats',
style_cell={'textAlign': 'center',
'minWidth': '50px', 'width': '100px', 'maxWidth': '180px',
'whiteSpace': 'normal'},
style_header={
'backgroundColor': 'rgb(230, 230, 230)',
'fontWeight': 'bold'
},
style_data_conditional=[
{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
}
],
fixed_rows={'headers': True},
style_table={'height': '30%', 'overflowY': 'auto'}
)],
className="pretty_container six columns",
),
],
id="detail_team_stats",
className="row flex-display",
)
],
id="mainContainer",
style={"display": "flex", "flex-direction": "column"},
)
def abbreviation2name(s: str, map_dict: dict) -> str:
if '-' in s:
return map_dict[s.split('-')[0]]
return map_dict[s]
def process_stats(df_stats: pd.DataFrame, team: str, keep_cols: list, team_dict: dict) -> pd.DataFrame:
sub_df = df_stats.copy()
sub_df['team'] = sub_df['team'].map(lambda x: abbreviation2name(x, team_dict))
sub_df = sub_df[sub_df['team'] == team]
sub_df = sub_df[keep_cols]
return sub_df
@app.callback(Output("team_selector", "options"),
[Input("season_selector", "value"),
Input("type_season_selector", "value")])
def update_team_selector(season, type_season):
sub_df = team_stats[team_stats['season'] == int(f'{season}{season + 1}')]
teams = list(sub_df['team'].values)
options = [{'label': team, 'value': team} for team in teams]
return options
@app.callback([Output("team_stats_table", "data"),
Output("team_stats_table", "columns"),
Output("total_plays", "children"),
Output("total_win", "children"),
Output("total_lost", "children"),
Output("overtime_loss", "children"),
Output("info-container", "style"),
Output("league_ranking_graph", "figure"),
Output("league_ranking_div", "style")],
[Input("season_selector", "value"),
Input("type_season_selector", "value"),
Input("team_selector", "value")])
def update_team_stats_table(season, type_season, team):
if not season:
raise PreventUpdate
sub_df = team_stats[team_stats['season'] == int(f'{season}{season + 1}')]
if team:
# return metrics of a specific team
sub_df = sub_df[sub_df['team'] == team]
total = sub_df['gp']
win = sub_df['w']
lost = sub_df['l']
ot = sub_df['ot']
sub_df = sub_df[Team.KEEP_COLS]
new_info_container_class = {'display': 'flex'}
# league ranking
league_ranking_style = {'display': 'none'}
league_ranking_graph = no_update
else:
# return entire league and hide mini containers
sub_df = sub_df[League.KEEP_COLS]
sub_df.sort_values(by='w', axis=0, inplace=True, ascending=False)
total = win = lost = ot = no_update
# league ranking
new_info_container_class = {'display': 'none'}
league_ranking_style = {'display': 'flex'}
sub_df = sub_df.rename(columns=Team.NAME_MAPPING)
fig = px.bar(sub_df, y='Partidos ganados', x='Equipo', text='Partidos ganados')
fig.update_traces(texttemplate='%{Partidos ganados:.2s}', textposition='outside')
fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide')
league_ranking_graph = fig
# data-table attributes
sub_df = sub_df.rename(columns=Team.NAME_MAPPING)
cols = [{"name": i, "id": i} for i in sub_df.columns]
return sub_df.to_dict('records'), cols, total, win, lost, ot, \
new_info_container_class, league_ranking_graph, league_ranking_style
@app.callback([Output("skater_stats", "data"),
Output("goalie_stats", "data"),
Output("skater_stats", "columns"),
Output("goalie_stats", "columns"),
Output("detail_team_stats", "style")],
[Input("season_selector", "value"),
Input("type_season_selector", "value"),
Input("team_selector", "value")])
def update_skater_stats(season, type_season, team):
if not season:
raise PreventUpdate
if team:
sub_df_skater = skater[skater['season'] == int(f'{season}{season + 1}')].copy()
sub_df_goalie = goalie[goalie['season'] == int(f'{season}{season + 1}')].copy()
sub_df_skater = process_stats(sub_df_skater, team, Skater.KEEP_COLS, team_abbreviation_dict)
sub_df_goalie = process_stats(sub_df_goalie, team, Goalie.KEEP_COLS, team_abbreviation_dict)
sub_df_skater = sub_df_skater.rename(columns=Skater.NAME_MAPPING)
sub_df_goalie = sub_df_goalie.rename(columns=Goalie.NAME_MAPPING)
skater_cols = [{"name": i, "id": i} for i in sub_df_skater.columns]
goalie_cols = [{"name": i, "id": i} for i in sub_df_goalie.columns]
return sub_df_skater.to_dict("records"), sub_df_goalie.to_dict("records"), \
skater_cols, goalie_cols, {'display': 'flex'}
else:
skater_data = goalie_data = skater_cols = goalie_cols = no_update
skater_goalie_stats_display = {'display': 'none'}
return skater_data, goalie_data, skater_cols, goalie_cols, skater_goalie_stats_display
if __name__ == "__main__":
import json
parser = argparse.ArgumentParser(description='NHL Stats App Web.')
parser.add_argument('--mode', type=str, default='dev',
help='mode [dev or production]')
parser.add_argument('--creds', type=str, default='params_dev.json', help='Path to credential json.')
args = parser.parse_args()
mode = args.mode
# init parameters and data
with open(args.creds, 'r') as f:
params = json.load(f)
conn = psycopg2.connect(**params)
n = NHLProxy(conn, 'queries')
print("Connection ready!")
team_stats = n.get_team_stats()
skater = n.get_skater_stats()
goalie = n.get_goalie_stats()
team_abbreviation_dict = n.get_team_abbreviations()
app_host = '0.0.0.0' if mode == 'production' else 'localhost'
app_debug = True if mode == 'dev' else False
app_port = 80
app.run_server(host=app_host, port=app_port, debug=app_debug)
| StarcoderdataPython |
13227 | <gh_stars>1-10
from pygame import image
class ShowFaces():
def __init__(self, filePath, colour = (0, 0, 0), posX = 0, posY = 100, resourcePath = ""):
self.filePath = filePath
self.colour = colour
self.posX = posX
self.posY = posY
self.resourcePath = resourcePath
self.image = image.load(self.resourcePath + "img/faces/" + self.filePath + ".png")
self.faceRect = self.image.get_rect()
def update(self):
self.faceRect.centerX = self.posX + self.image.get_width() / 2
self.faceRect.centerY = self.posY + self.image.get_height() / 2 | StarcoderdataPython |
3344868 | """
This submodule contains formatting utilities and formatters which will work only
on Python 3.6+. There is no inherent reasons why it would not work on earlier
version of Python, it just makes use of features that are 3.6 only – Like
f-strings – to make the code more readable. Feel free to send patches that makes
it compatible with earlier versions of python
"""
from html import escape
from typing import List
from IPython.display import HTML
from .vendor import get_repr_mimebundle
text_formatter = get_ipython().display_formatter.formatters['text/plain']
def repr(o):
"""
Alternative implementation of repr, whcih goes through IPython display system from plain-text.
We go directly though the formatter to avoid recursion complication with
get_mime_types, ans for a tiny bit more speed.
If the real repr is needed, then one need to use builtins.repr
"""
return text_formatter(o)
# This is the CSS we want to inject before each top-level object. We should
# (try-to) make it to work with most frontend, as not all frontends do support
# CSS injection, let's try to not rely on too much customisation
thecss = """
/* summary::-webkit-details-marker {
display: none;
}
summary {
text-decoration-line: underline;
text-decoration-style: dotted;
} */
.rendered_html pre, .rendered_html code {
background-color: transparent; /* bug in notebook css */
}
.rendered_html .jupyter-extra-info ul{
list-style: none;
}
.jupyter-extra-info {
background-color: hsla(0, 0%, 5%, 0.07);
padding: 0.5em;
border: thin solid silver;
}
dl.jupyter-inner-mapping-repr {
padding-left: 1em;
margin-bottom: 0;
}
dl.jupyter-inner-mapping-repr > dd {
padding-left:2em;
}
ul.jupyter-flat-container-repr li > p{
padding-left:2em;
display: inline;
padding: 0;
}
ul.jupyter-flat-container-repr, ul.jupyter-flat-container-repr ul , ul.jupyter-flat-container-repr ul il{
list-style-type: none;
display: inline;
padding-left: 0;
}
ul.jupyter-flat-container-repr > details {
display: inline-block;
margin-left: -1em;
}
ul.jupyter-flat-container-repr li{
padding-left:2em;
list-style-type: none;
}
summary > code {
display: inline
}
ul.jupyter-flat-container-repr summary {
margin-left: 0em;
display: inline-block;
}
.rendered_html ul.jupyter-flat-container-repr {
padding-left: 0px;
margin-left: 0em;
}
.jupyter-flat-container-repr details {
display: inline;
}
.jupyter-flat-container-repr details ~ p {
margin-top: 0;
display: inline;
}
.jupyter-flat-container-repr details[open] ~ p {
/*display: block;*/
}
details.jupyter-details[open] ~ .jupyter-breaking-placeholder {
display: block;
}
.jupyter-details ~ .jupyter-breaking-placeholder {
display: inline;
}
.output_subarea > ul.jupyter-flat-container-repr, .output_subarea > ul.jupyter-flat-container-repr > p {
margin-left: 1em;
}
"""
##########################################################################
# Utilities #
##########################################################################
def safe(obj):
"""
Given an object (str, or html), return an HTML version.
That is to say, if Object is already an HTML object, return it. If it's a string, escape it.
"""
if isinstance(obj, HTML):
return obj
else:
return HTML(htmlify_repr(obj))
def htmlify_repr(obj)-> str:
"""
Return a string which is safe to embed in html.
ie, if obj define rerp_html, return this, otherwise escape its text_repr
"""
return get_repr_mimebundle(obj).data.get('text/html', None) or\
escape(repr(obj))
def details(summary, details_):
if details:
rsum = safe(summary)._repr_html_()
rdetails = safe(details_)._repr_html_()
return HTML(f"<details class='jupyter-details'><summary>{rsum}</summary>{rdetails}</details>")
else:
rsum = htmlify_repr(summary)
return HTML(f"{rsum}")
def code(string):
assert isinstance(string, str)
return HTML(f"<code>{escape(string)}</code>")
def well(s):
s = safe(s)._repr_html_()
return HTML('<div class="jupyter-extra-info">' + s + '</div>')
##########################################################################
# Formatters #
##########################################################################
def html_flat_container(container: List, delims, empty) -> str:
"""Retrun an Html representation of a list with recursive HTML repr for all sub objects.
If an object does not define an html representation, fallback on plain-text.
"""
if not container:
return empty
x = []
for index, elem in enumerate(container):
last = (index == len(container) - 1)
rpr = htmlify_repr(elem)
pc = '<span class="post-comma">,</span>' if not last else ''
x.append('<li>{}{}</li>'.format(rpr, pc))
return f"""<ul class="jupyter-flat-container-repr">
<details class='jupyter-details' open>
<summary>{delims[0]}</summary>
{''.join(x)}
</details>
<span class='jupyter-breaking-placeholder'></span><p>{delims[1]}</p>
</ul>
"""
def html_formatter_for_list(t):
return html_flat_container(t, delims='[]', empty='[]')
def html_formatter_for_tuple(t):
return html_flat_container(t, delims='()', empty='()')
def html_formatter_for_set(t):
return html_flat_container(t, delims='{}', empty='set({})')
def _inner_html_formatter_for_mapping(mapping):
x = []
for key, elem in mapping.items():
mimebundle = get_repr_mimebundle(elem).data
representation = mimebundle.get(
'text/html', mimebundle.get('text/html', None)) or escape(repr(elem))
x.append(f"""<dl class='jupyter-inner-mapping-repr'>
<dt><b>{escape(str(key))}:</b></dt>
<dd>{representation}</dd>
</dl>
""")
return ''.join(x)
def html_formatter_for_mapping(mapping, *, open=True):
if not mapping:
return 'dict({})'
delims = '{}'
op = 'open' if open else ''
return f"""
<details class='jupyter-details' {op}>
<summary>{delims[0]}</summary>
{_inner_html_formatter_for_mapping(mapping)}
</details>
{delims[1]}
"""
html_formatter_for_dict = html_formatter_for_mapping
def html_formatter_for_Response(req):
import json
attrs = None
def in_f(k, v):
if k == 'headers':
return HTML(html_formatter_for_mapping(v, open=False))
else:
return v
attrs = _inner_html_formatter_for_mapping(
{k: in_f(k, v) for (k, v) in vars(req).items() if not k.startswith('_')})
try:
json_content = req.json()
return f"""
<style>
{thecss}
</style>
<details class='jupyter-details'><summary><code>{escape(repr(req))}</code></summary>
{attrs}
<details class="jupyter-details">
<summary>Content (JSON)</summary>
{_inner_html_formatter_for_mapping(json_content)}
</details>
</details>
"""
except json.JSONDecodeError:
return f"""
<style>
{thecss}
</style>
<details class='jupyter-details'><summary><code>{escape(repr(req))}</code></summary>
{attrs}
</details>
"""
def gen_help(obj):
doc = next(filter(None, (x.__doc__ for x in type(obj).mro())))
return f"""
<code title='{escape(doc)}'>{escape(repr(obj))}<code>
"""
def general_repr(obj):
return f'<style>{thecss}</style>' +\
f'<details class="jupyter-details"><summary><code>{escape(repr(obj))}<code></summary>' +\
_inner_html_formatter_for_mapping({k: v for (k, v) in vars(obj).items() if not k.startswith('_')}) +\
'</details>'
def html_formatter_for_type(obj):
try:
mro = obj.mro() # [o for o in if o is not object]
except TypeError:
mro = ()
if len(mro) > 1:
mime = get_repr_mimebundle(mro[1], include='text/html').data
return f'<style>{thecss}</style>' + \
f'<details class="jupyter-details" ><summary><code>{escape(repr(obj))}</code></summary>'\
+ well(HTML(f"""
<p><code alpha>{obj.__doc__ or ''}</code></p>
<p> Inherit from :</p>
<ul>
<li>{mime.get('text/html')}</li>
</ul>"""))._repr_html_()\
+ '</details>'
else:
return f'<style>{thecss}</style>' + f'<code>{escape(repr(obj))}</code>'
def html_formatter_for_builtin_function_or_method(obj):
ip = get_ipython()
res = {k: v for (k, v) in ip.inspector.info(obj).items() if v}
docstring = res.get('docstring')
res.pop('found')
res.pop('string_form')
res.pop('base_class')
if res.get('definition', None):
res['definition'] = code(obj.__name__ + res['definition'])
if docstring != '<no docstring>':
res['docstring'] = code(docstring)
else:
del res['docstring']
return f'<style>{thecss}</style>' + htmlify_repr(details(code(repr(obj)), well(HTML(_inner_html_formatter_for_mapping(res)))))
def html_formatter_for_module(obj):
return f'<style>{thecss}</style>' + details(code(repr(obj)), well(code(obj.__doc__ or '')))._repr_html_()
| StarcoderdataPython |
1661771 | from django.urls import path
from . import views
urlpatterns = [
path('<str:name>', views.xcl, name = "xcl")
] | StarcoderdataPython |
159180 | <reponame>firstprojectfor/FPF_python
import sys
import pygame
from pygame.sprite import Group
from game.alien import Alien
from game.bullet import Bullet
from game.settings import Settings
from game.ship import Ship
def check_event(setting: Settings, screen, ship: Ship, bullets: Group):
"""检查事件"""
for event in pygame.event.get():
check_exit_events(event)
check_key_down_events(event, setting, screen, ship, bullets)
check_key_up_events(event, ship, bullets)
def check_exit_events(event):
if event.type == pygame.QUIT:
sys.exit()
def check_key_down_events(event, setting, screen, ship, bullets):
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
ship.moving_right = True
elif event.key == pygame.K_LEFT:
ship.moving_left = True
elif event.key == pygame.K_SPACE:
fire_bullet(setting, screen, ship, bullets)
elif event.key == pygame.K_q:
sys.exit()
def fire_bullet(setting, screen, ship, bullets):
if len(bullets) < setting.bullet_count:
new_bullet = Bullet(setting, screen, ship)
bullets.add(new_bullet)
def check_key_up_events(event, ship, bullets):
if event.type == pygame.KEYUP:
if event.key == pygame.K_RIGHT:
ship.moving_right = False
elif event.key == pygame.K_LEFT:
ship.moving_left = False
elif event.key == pygame.K_SPACE:
pass
def update_bullets(bullets: Group):
bullets.update()
for bullet in bullets.copy():
if bullet.rect.top <= 0:
bullets.remove(bullet)
def create_fleet(setting: Settings, screen, ship, alias):
alien = Alien(setting, screen)
number_aliens_x = get_numbers_alien_x(setting, alien.rect.width)
number_row = get_number_rows(setting, ship.rect.height, alien.rect.height)
for row_number in range(0, number_row):
for alien_number in range(0, number_aliens_x):
create_alien(setting, screen, alias, row_number, alien_number)
def get_numbers_alien_x(setting, alien_width):
"""获取每行创建的外星人数量"""
available_width_x = setting.screen_width - 2 * alien_width
number_aliens_x = int(available_width_x / (alien_width * 2))
return number_aliens_x
def create_alien(setting, screen, aliens, row_number, alien_number):
alien = Alien(setting, screen)
alien_width = alien.rect.width
alien_height = alien.rect.height
alien.x = alien_width + 2 * alien_width * alien_number
alien.y = alien_height + 2 * alien_height * row_number
alien.rect.x = alien.x
alien.rect.y = alien.y
aliens.add(alien)
def get_number_rows(setting: Settings, ship_height, alien_height):
available_space_y = setting.screen_height - (5 * alien_height) - ship_height
number_rows = int(available_space_y / (2 * alien_height))
return number_rows
def update_aliens(setting: Settings, aliens: Group):
check_fleet_edges(setting, aliens)
aliens.update()
def check_fleet_edges(setting, aliens: Group):
for alien in aliens.sprites():
if alien.check_edges():
change_fleet_direction(setting, aliens)
break
def change_fleet_direction(setting: Settings, aliens):
for alien in aliens.sprites():
alien.rect.y += setting.fleet_drop_factor
setting.fleet_direction *= -1
def update_screen(setting: Settings, screen, ship, bullets, aliens):
screen.fill(setting.bg_color)
for bullet in bullets.sprites():
bullet.draw_bullet()
aliens.draw(screen)
ship.blit_me()
pygame.display.flip()
| StarcoderdataPython |
3240386 | # Generated by Django 3.2.8 on 2021-10-21 15:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sentirsebien', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='DataUNFV',
fields=[
('id', models.UUIDField(editable=False, primary_key=True, serialize=False)),
('facultad', models.CharField(max_length=255)),
('escuela', models.CharField(max_length=255)),
('codigo_estudiante', models.CharField(max_length=20)),
('correo', models.EmailField(max_length=254)),
('dni', models.CharField(blank=True, max_length=20, null=True)),
('nombre_completo', models.CharField(max_length=255)),
('estado', models.BooleanField(default=True)),
('creado', models.DateTimeField(auto_now_add=True)),
('actualizado', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ['-creado'],
},
),
]
| StarcoderdataPython |
1638110 | # -*- coding: utf-8 -*-
import sys
import subprocess
#
# Exceptions.
#
class ShellError(Exception):
pass
width = lambda: int(subprocess.check_output(['tput', 'cols']))
height = lambda: int(subprocess.check_output(['tput', 'lines']))
#
# Shell tables.
#
class Table:
def __init__(self, output_format='text',
page=False, auto_flush=False,
sizes=None, borders_color=None,
csv_sep=';'):
self.output_format = output_format
self.page = page
self.auto_flush = auto_flush
self.sizes = sizes
self.borders_color = borders_color
self.csv_separtor = csv_sep
self._buffer = []
def _colorize(self, color, value):
return '\033[%sm%s\033[00m' % (color, value)
def _autoflush(func):
def wrapper(self, *args, **kwargs):
func(self, *args, **kwargs)
if self.auto_flush:
self.flush()
return wrapper
@_autoflush
def add_border(self, sizes=None, color=None):
if self.output_format != 'text':
return
line = '+'
for idx, size in enumerate(sizes or self.sizes):
line += '-' * size
line += '+'
if color or self.borders_color:
line = self._colorize(color or self.borders_color, line)
self._buffer.append(line)
def _add_text_line(self, values, sizes=None, colors=None,
borders_color=None, indent=1, sep='|'):
sizes = sizes or self.sizes
if sizes is None:
raise ShellError('no sizes')
if len(sizes) != len(values):
raise ShellError('length of sizes is different from length of values')
lines = []
for idx, value in enumerate(values):
size = sizes[idx] - 2
if not isinstance(value, str):
value = str(value)
line_number = 0
column_lines = []
# Split column value on new line.
value_lines = value.split('\n')
for line in value_lines:
# Split line on word.
line = line.split(' ')
cur_line = ' '
while line:
word = line.pop(0)
new_line = cur_line + word + ' '
if len(new_line) > size + 2:
if cur_line == ' ':
cur_line = new_line
column_lines.append(cur_line)
cur_line = ' ' * indent + ' '
else:
cur_line += ' ' * (size + 2 - len(cur_line))
column_lines.append(cur_line)
cur_line = ' ' * indent + ' ' + word + ' '
else:
cur_line = new_line
cur_line += ' ' * (size + 2 - len(cur_line))
column_lines.append(cur_line)
# Add column lines.
for line in column_lines:
if line_number > len(lines) - 1:
# Initialize a new line.
new_line = []
for __ in range(len(sizes)):
new_column = ' ' * sizes[__]
if colors and colors[__]:
new_column = colorize(colors[idx], new_column)
new_line.append(new_column)
lines.append(new_line)
if colors and colors[idx]:
line = colorize(colors[idx], line)
lines[line_number][idx] = line
line_number += 1
border = sep if not borders_color else colorize(borders_color, sep)
self._buffer.extend(border + border.join(line) + border for line in lines)
def _add_csv_line(self, values, sep):
pass
def _add_dokuwiki_line(self, values, sep):
self._buffer.append('%s %s %s' % (
sep,
(' %s ' % sep).join([val.replace('\n', ' \\\\') for val in values]),
sep))
@_autoflush
def add_line(self, values, sizes=None, colors=None, borders_color=None, indent=1):
{'text': lambda: self._add_text_line(values, sizes, colors, borders_color, indent, sep='|'),
'dokuwiki': lambda: self._add_dokuwiki_line(map(str, values), sep='|'),
'csv': lambda: self._add_csv_line(values, self.csv_separator)
}.get(self.output_format)()
@_autoflush
def add_header(self, values, sizes=None, colors=None, borders_color=None, indent=1):
{'text': lambda: self._add_text_line(values, sizes, colors, borders_color, indent, sep='|'),
'dokuwiki': lambda: self._add_dokuwiki_line(values, sep='^'),
'csv': lambda: self._add_csv_line(values, self.csv_separator)
}.get(self.output_format)()
def flush(self):
if self.page:
import os, pydoc
os.environ['PAGER'] = 'less -c -r'
pydoc.pager('\n'.join(self._buffer))
else:
print('\n'.join(self._buffer))
self._buffer = []
| StarcoderdataPython |
3394243 | <reponame>andrewmeltzer/picframe<filename>src/picframe_blackout.py
# Project Picframe
# Copyright 2021, <NAME>, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
"""
Calculate and send a message when in a blackout window (usually meant for
turning the screen off overnight) and when coming out of blackout.
This runs as a separate process in a continual loop, sleeping until it is
next tested.
"""
import time
import datetime
from picframe_settings import PFSettings
from picframe_message import PFMessage
from picframe_env import PFEnv
class PFBlackout:
"""
See if we are in a nightly blackout window, and if so, send a message
to black out the screen.
If emerging from the blackout window, send a message indicating that.
"""
in_blackout = False
############################################################
#
# check_blackout_window
#
@staticmethod
def check_blackout_window():
"""
Check for a change in blackout status.
"""
if PFSettings.blackout_hour is None:
return 0
secs_per_min = 60
mins_per_hour = 60
hour = datetime.datetime.now().hour
minute = datetime.datetime.now().minute
now_time = hour*60 + minute
blackout_time = (PFSettings.blackout_hour * mins_per_hour) + PFSettings.blackout_minute
end_blackout_time = (PFSettings.end_blackout_hour * mins_per_hour) + PFSettings.end_blackout_minute
blackout_length = 0
# if it is blacked out across midnight and it is before midnight
# but in the blackout period
if end_blackout_time < blackout_time and now_time > blackout_time:
blackout_length = (end_blackout_time * secs_per_min) \
+ (24 * mins_per_hour) - now_time
blackout_length = (end_blackout_time + (24 * mins_per_hour) \
- now_time) * secs_per_min
# if it is blacked out across midnight and it is after midnight
# but in the blackout period
if end_blackout_time < blackout_time and now_time < end_blackout_time:
blackout_length = (end_blackout_time - now_time) * secs_per_min
# if it is not blacked out across midnight, but in the blackout period
if now_time > blackout_time and now_time < end_blackout_time:
blackout_length = (end_blackout_time - now_time) * secs_per_min
return blackout_length
############################################################
#
# blackout_main
#
@staticmethod
def blackout_main(canvas_mq):
"""
Continually loop, checking to see if the blackout status changes
and if so, send the blackout message.
Inputs:
canvas_mq: The canvas message queue
"""
PFEnv.setup_logger()
while True:
blackout_interval = PFBlackout.check_blackout_window()
if blackout_interval > 0:
if not PFBlackout.in_blackout:
# Send blackout message
canvas_mq.put(PFMessage(PFMessage.BLACKOUT))
PFEnv.logger.info("Going dark for %d seconds." % (blackout_interval,))
PFBlackout.in_blackout = True
else:
if PFBlackout.in_blackout:
# Send end blackout message
canvas_mq.put(PFMessage(PFMessage.END_BLACKOUT))
PFBlackout.in_blackout = False
# Test every 60 seconds
time.sleep(60)
| StarcoderdataPython |
3294014 | <reponame>HiAwesome/dive-into-python3-practice<filename>c02/p053_all_thing_is_object.py
import c02.p044_humansize as humansize
print(humansize.approximate_size(4096, True))
print()
print(humansize.approximate_size.__doc__)
"""
4.0 KiB
Convert a file size to human-readable form.
Keyword arguments:
size -- file size in bytes
a_kilobyte_is_1024_bytes -- if True (default), use multiples of 1024
if False, use multiples of 1000
Returns: string
"""
| StarcoderdataPython |
3330012 | <reponame>m00nb0w/oghma
#!/bin/python3
import sys
grid = []
for grid_i in range(20):
grid_t = [int(grid_temp) for grid_temp in input().strip().split(' ')]
grid.append(grid_t)
dy = [1, 1, 1, 0]
dx = [-1, 0, 1, 1]
m = len(grid)
n = len(grid[0])
res = 0
for i in range(0, m):
for j in range(0, n):
for k in range(0, 4):
t = 1
flag = True
for l in range(0, 4):
if (i + dx[k] * l >= 0) and (i + dx[k] * l < m) and (j + dy[k] * l >= 0) and (j + dy[k] * l < n):
t *= grid[i + dx[k] * l][j + dy[k] * l]
else:
flag = False
break
res = max(res, t)
print(res)
| StarcoderdataPython |
1758575 | <gh_stars>1-10
from pathlib import Path
from ...graphs import Graph016
from ...utils import BaseGraphSystemTester
from ....engine_input import ValidPrefix
from ....engine import BGPSimpleAS
class Test027BadDiagram(BaseGraphSystemTester):
GraphInfoCls = Graph016
EngineInputCls = ValidPrefix
base_dir = Path(__file__).parent
BaseASCls = BGPSimpleAS
| StarcoderdataPython |
1783494 | import os, sys
import math
import random
from .switch import _Switch
from .node import _Node
sys.path.insert(0, os.path.basename(__file__) + os.sep + '..')
from utils import util
# TODO: latent bug
class _Cluster(object):
def __init__(self, num_switch=0, num_node_p_switch=0, num_gpu_p_node=0, num_cpu_p_node=0, mem_p_node=0):
''' Init GPU cluster with basic switch, node, gpu information'''
self.set_spec(num_switch=num_switch, num_node_p_switch=num_node_p_switch, \
num_gpu_p_node=num_gpu_p_node, num_cpu_p_node=num_cpu_p_node, mem_p_node=mem_p_node)
#for non-placement
self.switch_list = list()
#for gandiva
self.set_node_group()
# flag init
self.ir_init = False
self.init = False
def set_node_group(self, ):
self.free_nodes = list()
self.node_g = dict()
for i in [1, 2, 4, 8, 12, 16, 24, 32, 64]:
setattr(self, 'node_g{}'.format(i), list())
self.node_g[i] = getattr(self, 'node_g{}'.format(i))
def set_spec(self, num_switch=0, num_node_p_switch=0, num_gpu_p_node=0, num_cpu_p_node=0, mem_p_node=0):
self.num_switch = num_switch
self.num_node_p_switch = num_node_p_switch
self.num_gpu_p_node = num_gpu_p_node
self.num_cpu_p_node = num_cpu_p_node
self.mem_p_node = mem_p_node
self.num_node = num_switch * num_node_p_switch
self.num_gpu = self.num_node * num_gpu_p_node
self.num_cpu = self.num_node * num_cpu_p_node
self.free_gpu = self.num_gpu
self.mem = self.num_node * mem_p_node
def print_cluster_spec(self):
print('Custer Spec')
print('#ofswitch: %d, #ofnode: %d, #ofgpu: %d, #ofcpu: %d, #ofmem: %d'%(self.num_switch, self.num_node, self.num_gpu, self.num_cpu, self.mem))
def init_infra(self, num_switch=0, num_node_p_switch=0, num_gpu_p_node=0, num_cpu_p_node=0, mem_p_node=0):
assert self.init == False and self.ir_init == False, 'only init once'
self.init = True
# Init and create cluster infration entities (switches, nodes) by using class _Switch, _Node
self.set_spec(num_switch, num_node_p_switch, num_gpu_p_node, num_cpu_p_node, mem_p_node)
# create/init switch and node objects
for switch_id in range(self.num_switch):
switch_instance = _Switch(switch_id, self.num_node_p_switch, self.num_gpu_p_node, self.num_cpu_p_node, self.mem_p_node)
switch_instance.add_nodes(self.num_node_p_switch, self.num_gpu_p_node, self.num_cpu_p_node, self.mem_p_node, self)
self.switch_list.append(switch_instance)
def set_ir_spec(self, cluster_info):
assert self.init == False and self.ir_init == False, 'only init once'
self.ir_init = True
self.num_switch = len(cluster_info.keys())
self.num_node_p_switch = dict()
self.num_gpu_p_node = dict()
self.num_cpu_p_node = dict()
self.mem_p_node = dict()
for switch_name in cluster_info.keys():
assert 'switch' in switch_name, 'switch must exists in {}'.format(switch_name)
self.num_node_p_switch[switch_name] = len(cluster_info[switch_name])
switch_info = cluster_info[switch_name]
for node_name in switch_info.keys():
assert node_name not in self.num_gpu_p_node, 'exists same node name which is not allowed'
self.num_gpu_p_node[node_name] = switch_info[node_name]['num_gpu']
self.num_cpu_p_node[node_name] = switch_info[node_name]['num_cpu']
self.mem_p_node[node_name] = switch_info[node_name]['mem']
self.num_node = len(self.num_gpu_p_node)
self.num_gpu = sum([self.num_gpu_p_node[node_name] for node_name in self.num_gpu_p_node.keys()])
self.num_cpu = sum([self.num_cpu_p_node[node_name] for node_name in self.num_cpu_p_node.keys()])
self.free_gpu = self.num_gpu
self.mem = sum([self.mem_p_node[node_name] for node_name in self.num_cpu_p_node.keys()])
def ir_init_infra(self, cluster_info):
# Init and create cluster infration entities (switches, nodes) by using class _Switch, _Node
self.set_ir_spec(cluster_info)
# create/init switch and node objects
for switch_name in cluster_info.keys():
switch_instance = _Switch(switch_name)
switch_instance.ir_init(cluster_info[switch_name])
switch_instance.add_ir_nodes(cluster_info[switch_name], self)
self.switch_list.append(switch_instance)
def init_gandiva_nodes(self, ):
# init node class
for switch in self.switch_list:
for node in switch.node_list:
self.free_nodes.append(node)
assert len(self.free_nodes) == self.num_node, '# of free nodes {} is incorrect'.format(len(self.free_nodes))
def release_gpus(self, job, status='END'):
for placement in job['placements']:
assert 'switch' in placement and 'nodes' in placement
switch = self.switch_list[placement['switch']]
assert switch.release_gpus(placement['nodes'], job) == True
if status == 'END':
job['status'] = 'END'
print('**** job[%d] completed' % job['job_idx'])
return True
def release_job_resource(self, job, status='END'):
for placement in job['placements']:
assert 'switch' in placement and 'nodes' in placement
found = False
for switch in self.switch_list:
if switch.id == placement['switch']:
found = True
assert switch.release_job_resource(placement['nodes'], job=job) == True
break
assert found == True, 'should exist in switch list'
if status == 'END': job['status'] = 'END'
job['gpus'] = list()
job['placements'] = list() # prepare an empty job_placement
job['topology'] = None
return True
def cluster_partition(self, user_share):
gpu_num = self.check_total_gpus()
print(len(user_share))
for user, share in user_share.items():
required_gpu_num = int(share * gpu_num)
for switch in self.switch_list:
for node in switch.node_list:
if node.permission_user_list is None:
node.permission_user_list = [user]
required_gpu_num -= node.check_total_gpus()
if required_gpu_num <= 0: break
if required_gpu_num <= 0: break
assert required_gpu_num <= 0, '{} do not have resource'.format(user)
def check_free_gpus(self, user_name=None):
return sum([switch.check_free_gpus(user_name) for switch in self.switch_list])
def check_free_guarante_gpus(self, user_name=None):
return sum([switch.check_free_guarante_gpus(user_name) for switch in self.switch_list])
def check_free_spot_gpus(self, user_name=None):
return sum([switch.check_free_spot_gpus(user_name) for switch in self.switch_list])
def check_total_gpus(self, user_name=None):
return sum([switch.check_total_gpus(user_name) for switch in self.switch_list])
def check_total_guarante_gpus(self, user_name=None):
return sum([switch.check_total_guarante_gpus(user_name) for switch in self.switch_list])
def check_total_spot_gpus(self, user_name=None):
return sum([switch.check_total_spot_gpus(user_name) for switch in self.switch_list])
def check_free_cpus(self, ):
return sum([switch.check_free_cpus() for switch in self.switch_list])
def check_total_cpus(self, ):
return sum([switch.check_total_cpus() for switch in self.switch_list])
def gandiva_node_set_adjust(self, cur_time, jobs):
"""
when there are free nodes in cluster, reduce burden of heavy nodes
"""
total_gpu_demands = 0
nl_gpu_demands = dict()
nl_gpu_occupied = dict()
for num_gpu, node_list in self.node_g.items():
total_jobs = 0
occupied_gpus = 0
for node_set in node_list:
total_jobs += len(node_set['jobs'])
occupied_gpus += sum([node.check_total_gpus() for node in node_set['nodes']])
total_gpu_demands += total_jobs * num_gpu
nl_gpu_demands[num_gpu] = total_jobs * num_gpu
nl_gpu_occupied[num_gpu] = occupied_gpus
if total_gpu_demands == 0:
return
for num_gpu, node_list in self.node_g.items():
if nl_gpu_demands[num_gpu] == 0:
continue
nl_gpu_plan = int(math.floor(1.0 * nl_gpu_demands[num_gpu] / total_gpu_demands * self.num_gpu))
nl_gpu_target = min(nl_gpu_plan, nl_gpu_demands[num_gpu])
nl_gpu_diff = nl_gpu_target - nl_gpu_occupied[num_gpu]
if nl_gpu_diff > 0:
# growth:
num_ns = int(math.ceil(1. * nl_gpu_diff / num_gpu))
expand_ns = self.gandiva_node_set_expand(num_gpu, node_list, num_ns, cur_time, jobs)
elif nl_gpu_diff < 0:
# shrink
num_ns = int(math.ceil(-1. * nl_gpu_diff / num_gpu))
shrink_ns = self.gandiva_node_set_shrink(num_gpu, node_list, num_ns, cur_time, jobs)
def gandiva_node_set_shrink(self, node_group, occupied_node_list, release_node_num, cur_time, jobs):
'''
ns_num_gpu: num_gpu of job in this node_set
'''
# can't shrink too many node_set ?? why
# decrease ns nodes
if len(occupied_node_list) <= release_node_num:
release_node_num = len(occupied_node_list) - 1 # at least keep single node
job_list = list()
i = 0
for i in range(1, release_node_num + 1):
node_set = occupied_node_list.pop(0)
if len(node_set['jobs']) > 0:
job_list.extend(node_set['jobs'])
update_info = {
'jobs': list(),
'concurrency' : 0,
'util' : 0,
'num_jobs' : 0,
}
node_set.update(update_info)
for node in node_set['nodes']:
self.free_nodes.append(node)
for job in job_list:
node_set = occupied_node_list[0]
job_util = round(job['model']['mem_util'], 2)
node_set['util'] = round(node_set['util'] + job_util, 2)
assert job not in node_set['jobs'], 'cannot repeat too many times'
node_set['jobs'].append(job)
node_set['num_jobs'] += 1
occupied_node_list.sort(key=lambda x: x.__getitem__('util'))
if i > 0:
print("node_g{} shrink {} node_sets" .format(node_group, i))
return i
def gandiva_node_set_expand(self, node_group, occupied_node_list, required_node_num, cur_time, jobs):
acquired_node_num = 0
for acquired_node_num in range(1, required_node_num + 1):
sorted_free_nodes = sorted(self.free_nodes, key=lambda node: node.check_free_gpus(), reverse=True)
cum_gpus = 0
for idx, free_node in enumerate(sorted_free_nodes):
if cum_gpus + free_node.check_free_gpus() >= node_group:
node_set = {
'nodes' : list(),
'jobs' : list(),
'concurrency' : 0,
'capacity' : int((cum_gpus + free_node.check_free_gpus()) * 1.0 / node_group),
'util' : 0,
'num_gpus': node_group,
'num_jobs' : 0,
}
for j in range(idx+1):
free_node = sorted_free_nodes[j]
self.free_nodes.remove(free_node)
node_set['nodes'].append(free_node)
occupied_node_list.append(node_set)
break
else:
cum_gpus + free_node.check_free_gpus()
if acquired_node_num > 0: # TODO
job_list = list()
for node_set in occupied_node_list:
if len(node_set['jobs']) > 0:
job_list.extend(node_set['jobs'])
update_info = {
'jobs': list(),
'concurrency' : 0,
'util' : 0,
'num_jobs' : 0,
}
node_set.update(update_info)
for job in job_list:
node_set = occupied_node_list[0]
job_util = round(job['model']['mem_util'], 2)
node_set['util'] = round(node_set['util'] + job_util, 2)
assert job not in node_set['jobs'], 'cannot repeat too many times'
node_set['jobs'].append(job)
node_set['num_jobs'] += 1
occupied_node_list.sort(key=lambda x: x.__getitem__('util'))
print("node_g{} expand {} node_sets".format(node_group, acquired_node_num))
def time_slicing_execute(self, cur_time, jobs, time_diff):
node_release = False
switch_job = int(cur_time % 60) == 0 # specify time, switch job
used_gpus = 0
for num_gpu, node_list in self.node_g.items():
release_nodes = list() # release nodes
for node_set in node_list:
concurrency = 0
total_util = 0
for r_job in node_set['jobs']:
total_util = total_util + r_job['model']['mem_util']
if total_util > node_set['capacity']:
break
concurrency += 1
tmp_used_gpus = \
num_gpu if (len(node_set['jobs']) * num_gpu) > node_set['nodes'][0].check_total_gpus() else (len(node_set['jobs']) * num_gpu) # TODO: figure out
used_gpus += tmp_used_gpus
i = 0
end_job_list = list()
for r_job in node_set['jobs']:
r_job['executed_time'] = r_job['executed_time'] + time_diff
if r_job['executed_time'] >= r_job['duration']:
r_job['end_time'] = cur_time + r_job['duration'] - r_job['executed_time']
r_job['status'] = 'END'
end_job_list.append(r_job)
print("job[%d] ends at time[%d]" %(r_job['job_id'], r_job['end_time']))
i += 1
if i >= concurrency:
break
if switch_job and len(node_set['jobs']) > concurrency:
# node_set['jobs'].reverse()
random.shuffle(node_set['jobs'])
for end_job in end_job_list:
jobs.running_jobs.remove(end_job)
node_set['jobs'].remove(end_job)
node_set['num_jobs'] = node_set['num_jobs'] - 1
if len(node_set['jobs']) == 0:
assert node_set['num_jobs'] == 0
for node in node_set['nodes']:
self.free_nodes.append(node)
release_nodes.append(node_set)
used_gpus = used_gpus - tmp_used_gpus
node_release = True
for release_node in release_nodes:
node_list.remove(release_node)
return node_release
CLUSTER = _Cluster()
_allowed_symbols = [
'CLUSTER'
] | StarcoderdataPython |
5500 | # Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for representing files stored in GridFS."""
import datetime
import io
import math
import os
from bson.int64 import Int64
from bson.son import SON
from bson.binary import Binary
from bson.objectid import ObjectId
from pymongo import ASCENDING
from pymongo.collection import Collection
from pymongo.cursor import Cursor
from pymongo.errors import (ConfigurationError,
CursorNotFound,
DuplicateKeyError,
InvalidOperation,
OperationFailure)
from pymongo.read_preferences import ReadPreference
from gridfs.errors import CorruptGridFile, FileExists, NoFile
try:
_SEEK_SET = os.SEEK_SET
_SEEK_CUR = os.SEEK_CUR
_SEEK_END = os.SEEK_END
# before 2.5
except AttributeError:
_SEEK_SET = 0
_SEEK_CUR = 1
_SEEK_END = 2
EMPTY = b""
NEWLN = b"\n"
"""Default chunk size, in bytes."""
# Slightly under a power of 2, to work well with server's record allocations.
DEFAULT_CHUNK_SIZE = 255 * 1024
_C_INDEX = SON([("files_id", ASCENDING), ("n", ASCENDING)])
_F_INDEX = SON([("filename", ASCENDING), ("uploadDate", ASCENDING)])
def _grid_in_property(field_name, docstring, read_only=False,
closed_only=False):
"""Create a GridIn property."""
def getter(self):
if closed_only and not self._closed:
raise AttributeError("can only get %r on a closed file" %
field_name)
# Protect against PHP-237
if field_name == 'length':
return self._file.get(field_name, 0)
return self._file.get(field_name, None)
def setter(self, value):
if self._closed:
self._coll.files.update_one({"_id": self._file["_id"]},
{"$set": {field_name: value}})
self._file[field_name] = value
if read_only:
docstring += "\n\nThis attribute is read-only."
elif closed_only:
docstring = "%s\n\n%s" % (docstring, "This attribute is read-only and "
"can only be read after :meth:`close` "
"has been called.")
if not read_only and not closed_only:
return property(getter, setter, doc=docstring)
return property(getter, doc=docstring)
def _grid_out_property(field_name, docstring):
"""Create a GridOut property."""
def getter(self):
self._ensure_file()
# Protect against PHP-237
if field_name == 'length':
return self._file.get(field_name, 0)
return self._file.get(field_name, None)
docstring += "\n\nThis attribute is read-only."
return property(getter, doc=docstring)
def _clear_entity_type_registry(entity, **kwargs):
"""Clear the given database/collection object's type registry."""
codecopts = entity.codec_options.with_options(type_registry=None)
return entity.with_options(codec_options=codecopts, **kwargs)
def _disallow_transactions(session):
if session and session.in_transaction:
raise InvalidOperation(
'GridFS does not support multi-document transactions')
class GridIn(object):
"""Class to write data to GridFS.
"""
def __init__(self, root_collection, session=None, **kwargs):
"""Write a file to GridFS
Application developers should generally not need to
instantiate this class directly - instead see the methods
provided by :class:`~gridfs.GridFS`.
Raises :class:`TypeError` if `root_collection` is not an
instance of :class:`~pymongo.collection.Collection`.
Any of the file level options specified in the `GridFS Spec
<http://dochub.mongodb.org/core/gridfsspec>`_ may be passed as
keyword arguments. Any additional keyword arguments will be
set as additional fields on the file document. Valid keyword
arguments include:
- ``"_id"``: unique ID for this file (default:
:class:`~bson.objectid.ObjectId`) - this ``"_id"`` must
not have already been used for another file
- ``"filename"``: human name for the file
- ``"contentType"`` or ``"content_type"``: valid mime-type
for the file
- ``"chunkSize"`` or ``"chunk_size"``: size of each of the
chunks, in bytes (default: 255 kb)
- ``"encoding"``: encoding used for this file. Any :class:`str`
that is written to the file will be converted to :class:`bytes`.
:Parameters:
- `root_collection`: root collection to write to
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession` to use for all
commands
- `**kwargs` (optional): file level options (see above)
.. versionchanged:: 4.0
Removed the `disable_md5` parameter. See
:ref:`removed-gridfs-checksum` for details.
.. versionchanged:: 3.7
Added the `disable_md5` parameter.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.0
`root_collection` must use an acknowledged
:attr:`~pymongo.collection.Collection.write_concern`
"""
if not isinstance(root_collection, Collection):
raise TypeError("root_collection must be an "
"instance of Collection")
if not root_collection.write_concern.acknowledged:
raise ConfigurationError('root_collection must use '
'acknowledged write_concern')
_disallow_transactions(session)
# Handle alternative naming
if "content_type" in kwargs:
kwargs["contentType"] = kwargs.pop("content_type")
if "chunk_size" in kwargs:
kwargs["chunkSize"] = kwargs.pop("chunk_size")
coll = _clear_entity_type_registry(
root_collection, read_preference=ReadPreference.PRIMARY)
# Defaults
kwargs["_id"] = kwargs.get("_id", ObjectId())
kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE)
object.__setattr__(self, "_session", session)
object.__setattr__(self, "_coll", coll)
object.__setattr__(self, "_chunks", coll.chunks)
object.__setattr__(self, "_file", kwargs)
object.__setattr__(self, "_buffer", io.BytesIO())
object.__setattr__(self, "_position", 0)
object.__setattr__(self, "_chunk_number", 0)
object.__setattr__(self, "_closed", False)
object.__setattr__(self, "_ensured_index", False)
def __create_index(self, collection, index_key, unique):
doc = collection.find_one(projection={"_id": 1}, session=self._session)
if doc is None:
try:
index_keys = [index_spec['key'] for index_spec in
collection.list_indexes(session=self._session)]
except OperationFailure:
index_keys = []
if index_key not in index_keys:
collection.create_index(
index_key.items(), unique=unique, session=self._session)
def __ensure_indexes(self):
if not object.__getattribute__(self, "_ensured_index"):
_disallow_transactions(self._session)
self.__create_index(self._coll.files, _F_INDEX, False)
self.__create_index(self._coll.chunks, _C_INDEX, True)
object.__setattr__(self, "_ensured_index", True)
def abort(self):
"""Remove all chunks/files that may have been uploaded and close.
"""
self._coll.chunks.delete_many(
{"files_id": self._file['_id']}, session=self._session)
self._coll.files.delete_one(
{"_id": self._file['_id']}, session=self._session)
object.__setattr__(self, "_closed", True)
@property
def closed(self):
"""Is this file closed?
"""
return self._closed
_id = _grid_in_property("_id", "The ``'_id'`` value for this file.",
read_only=True)
filename = _grid_in_property("filename", "Name of this file.")
name = _grid_in_property("filename", "Alias for `filename`.")
content_type = _grid_in_property("contentType", "Mime-type for this file.")
length = _grid_in_property("length", "Length (in bytes) of this file.",
closed_only=True)
chunk_size = _grid_in_property("chunkSize", "Chunk size for this file.",
read_only=True)
upload_date = _grid_in_property("uploadDate",
"Date that this file was uploaded.",
closed_only=True)
md5 = _grid_in_property("md5", "MD5 of the contents of this file "
"if an md5 sum was created.",
closed_only=True)
def __getattr__(self, name):
if name in self._file:
return self._file[name]
raise AttributeError("GridIn object has no attribute '%s'" % name)
def __setattr__(self, name, value):
# For properties of this instance like _buffer, or descriptors set on
# the class like filename, use regular __setattr__
if name in self.__dict__ or name in self.__class__.__dict__:
object.__setattr__(self, name, value)
else:
# All other attributes are part of the document in db.fs.files.
# Store them to be sent to server on close() or if closed, send
# them now.
self._file[name] = value
if self._closed:
self._coll.files.update_one({"_id": self._file["_id"]},
{"$set": {name: value}})
def __flush_data(self, data):
"""Flush `data` to a chunk.
"""
self.__ensure_indexes()
if not data:
return
assert(len(data) <= self.chunk_size)
chunk = {"files_id": self._file["_id"],
"n": self._chunk_number,
"data": Binary(data)}
try:
self._chunks.insert_one(chunk, session=self._session)
except DuplicateKeyError:
self._raise_file_exists(self._file['_id'])
self._chunk_number += 1
self._position += len(data)
def __flush_buffer(self):
"""Flush the buffer contents out to a chunk.
"""
self.__flush_data(self._buffer.getvalue())
self._buffer.close()
self._buffer = io.BytesIO()
def __flush(self):
"""Flush the file to the database.
"""
try:
self.__flush_buffer()
# The GridFS spec says length SHOULD be an Int64.
self._file["length"] = Int64(self._position)
self._file["uploadDate"] = datetime.datetime.utcnow()
return self._coll.files.insert_one(
self._file, session=self._session)
except DuplicateKeyError:
self._raise_file_exists(self._id)
def _raise_file_exists(self, file_id):
"""Raise a FileExists exception for the given file_id."""
raise FileExists("file with _id %r already exists" % file_id)
def close(self):
"""Flush the file and close it.
A closed file cannot be written any more. Calling
:meth:`close` more than once is allowed.
"""
if not self._closed:
self.__flush()
object.__setattr__(self, "_closed", True)
def read(self, size=-1):
raise io.UnsupportedOperation('read')
def readable(self):
return False
def seekable(self):
return False
def write(self, data):
"""Write data to the file. There is no return value.
`data` can be either a string of bytes or a file-like object
(implementing :meth:`read`). If the file has an
:attr:`encoding` attribute, `data` can also be a
:class:`str` instance, which will be encoded as
:attr:`encoding` before being written.
Due to buffering, the data may not actually be written to the
database until the :meth:`close` method is called. Raises
:class:`ValueError` if this file is already closed. Raises
:class:`TypeError` if `data` is not an instance of
:class:`bytes`, a file-like object, or an instance of :class:`str`.
Unicode data is only allowed if the file has an :attr:`encoding`
attribute.
:Parameters:
- `data`: string of bytes or file-like object to be written
to the file
"""
if self._closed:
raise ValueError("cannot write to a closed file")
try:
# file-like
read = data.read
except AttributeError:
# string
if not isinstance(data, (str, bytes)):
raise TypeError("can only write strings or file-like objects")
if isinstance(data, str):
try:
data = data.encode(self.encoding)
except AttributeError:
raise TypeError("must specify an encoding for file in "
"order to write str")
read = io.BytesIO(data).read
if self._buffer.tell() > 0:
# Make sure to flush only when _buffer is complete
space = self.chunk_size - self._buffer.tell()
if space:
try:
to_write = read(space)
except:
self.abort()
raise
self._buffer.write(to_write)
if len(to_write) < space:
return # EOF or incomplete
self.__flush_buffer()
to_write = read(self.chunk_size)
while to_write and len(to_write) == self.chunk_size:
self.__flush_data(to_write)
to_write = read(self.chunk_size)
self._buffer.write(to_write)
def writelines(self, sequence):
"""Write a sequence of strings to the file.
Does not add seperators.
"""
for line in sequence:
self.write(line)
def writeable(self):
return True
def __enter__(self):
"""Support for the context manager protocol.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Support for the context manager protocol.
Close the file and allow exceptions to propagate.
"""
self.close()
# propagate exceptions
return False
class GridOut(io.IOBase):
"""Class to read data out of GridFS.
"""
def __init__(self, root_collection, file_id=None, file_document=None,
session=None):
"""Read a file from GridFS
Application developers should generally not need to
instantiate this class directly - instead see the methods
provided by :class:`~gridfs.GridFS`.
Either `file_id` or `file_document` must be specified,
`file_document` will be given priority if present. Raises
:class:`TypeError` if `root_collection` is not an instance of
:class:`~pymongo.collection.Collection`.
:Parameters:
- `root_collection`: root collection to read from
- `file_id` (optional): value of ``"_id"`` for the file to read
- `file_document` (optional): file document from
`root_collection.files`
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession` to use for all
commands
.. versionchanged:: 3.8
For better performance and to better follow the GridFS spec,
:class:`GridOut` now uses a single cursor to read all the chunks in
the file.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.0
Creating a GridOut does not immediately retrieve the file metadata
from the server. Metadata is fetched when first needed.
"""
if not isinstance(root_collection, Collection):
raise TypeError("root_collection must be an "
"instance of Collection")
_disallow_transactions(session)
root_collection = _clear_entity_type_registry(root_collection)
super().__init__()
self.__chunks = root_collection.chunks
self.__files = root_collection.files
self.__file_id = file_id
self.__buffer = EMPTY
self.__chunk_iter = None
self.__position = 0
self._file = file_document
self._session = session
_id = _grid_out_property("_id", "The ``'_id'`` value for this file.")
filename = _grid_out_property("filename", "Name of this file.")
name = _grid_out_property("filename", "Alias for `filename`.")
content_type = _grid_out_property("contentType", "Mime-type for this file.")
length = _grid_out_property("length", "Length (in bytes) of this file.")
chunk_size = _grid_out_property("chunkSize", "Chunk size for this file.")
upload_date = _grid_out_property("uploadDate",
"Date that this file was first uploaded.")
aliases = _grid_out_property("aliases", "List of aliases for this file.")
metadata = _grid_out_property("metadata", "Metadata attached to this file.")
md5 = _grid_out_property("md5", "MD5 of the contents of this file "
"if an md5 sum was created.")
def _ensure_file(self):
if not self._file:
_disallow_transactions(self._session)
self._file = self.__files.find_one({"_id": self.__file_id},
session=self._session)
if not self._file:
raise NoFile("no file in gridfs collection %r with _id %r" %
(self.__files, self.__file_id))
def __getattr__(self, name):
self._ensure_file()
if name in self._file:
return self._file[name]
raise AttributeError("GridOut object has no attribute '%s'" % name)
def readable(self):
return True
def readchunk(self):
"""Reads a chunk at a time. If the current position is within a
chunk the remainder of the chunk is returned.
"""
received = len(self.__buffer)
chunk_data = EMPTY
chunk_size = int(self.chunk_size)
if received > 0:
chunk_data = self.__buffer
elif self.__position < int(self.length):
chunk_number = int((received + self.__position) / chunk_size)
if self.__chunk_iter is None:
self.__chunk_iter = _GridOutChunkIterator(
self, self.__chunks, self._session, chunk_number)
chunk = self.__chunk_iter.next()
chunk_data = chunk["data"][self.__position % chunk_size:]
if not chunk_data:
raise CorruptGridFile("truncated chunk")
self.__position += len(chunk_data)
self.__buffer = EMPTY
return chunk_data
def read(self, size=-1):
"""Read at most `size` bytes from the file (less if there
isn't enough data).
The bytes are returned as an instance of :class:`str` (:class:`bytes`
in python 3). If `size` is negative or omitted all data is read.
:Parameters:
- `size` (optional): the number of bytes to read
.. versionchanged:: 3.8
This method now only checks for extra chunks after reading the
entire file. Previously, this method would check for extra chunks
on every call.
"""
self._ensure_file()
remainder = int(self.length) - self.__position
if size < 0 or size > remainder:
size = remainder
if size == 0:
return EMPTY
received = 0
data = io.BytesIO()
while received < size:
chunk_data = self.readchunk()
received += len(chunk_data)
data.write(chunk_data)
# Detect extra chunks after reading the entire file.
if size == remainder and self.__chunk_iter:
try:
self.__chunk_iter.next()
except StopIteration:
pass
self.__position -= received - size
# Return 'size' bytes and store the rest.
data.seek(size)
self.__buffer = data.read()
data.seek(0)
return data.read(size)
def readline(self, size=-1):
"""Read one line or up to `size` bytes from the file.
:Parameters:
- `size` (optional): the maximum number of bytes to read
"""
remainder = int(self.length) - self.__position
if size < 0 or size > remainder:
size = remainder
if size == 0:
return EMPTY
received = 0
data = io.BytesIO()
while received < size:
chunk_data = self.readchunk()
pos = chunk_data.find(NEWLN, 0, size)
if pos != -1:
size = received + pos + 1
received += len(chunk_data)
data.write(chunk_data)
if pos != -1:
break
self.__position -= received - size
# Return 'size' bytes and store the rest.
data.seek(size)
self.__buffer = data.read()
data.seek(0)
return data.read(size)
def tell(self):
"""Return the current position of this file.
"""
return self.__position
def seek(self, pos, whence=_SEEK_SET):
"""Set the current position of this file.
:Parameters:
- `pos`: the position (or offset if using relative
positioning) to seek to
- `whence` (optional): where to seek
from. :attr:`os.SEEK_SET` (``0``) for absolute file
positioning, :attr:`os.SEEK_CUR` (``1``) to seek relative
to the current position, :attr:`os.SEEK_END` (``2``) to
seek relative to the file's end.
"""
if whence == _SEEK_SET:
new_pos = pos
elif whence == _SEEK_CUR:
new_pos = self.__position + pos
elif whence == _SEEK_END:
new_pos = int(self.length) + pos
else:
raise IOError(22, "Invalid value for `whence`")
if new_pos < 0:
raise IOError(22, "Invalid value for `pos` - must be positive")
# Optimization, continue using the same buffer and chunk iterator.
if new_pos == self.__position:
return
self.__position = new_pos
self.__buffer = EMPTY
if self.__chunk_iter:
self.__chunk_iter.close()
self.__chunk_iter = None
def seekable(self):
return True
def __iter__(self):
"""Return an iterator over all of this file's data.
The iterator will return lines (delimited by ``b'\\n'``) of
:class:`bytes`. This can be useful when serving files
using a webserver that handles such an iterator efficiently.
.. versionchanged:: 3.8
The iterator now raises :class:`CorruptGridFile` when encountering
any truncated, missing, or extra chunk in a file. The previous
behavior was to only raise :class:`CorruptGridFile` on a missing
chunk.
.. versionchanged:: 4.0
The iterator now iterates over *lines* in the file, instead
of chunks, to conform to the base class :py:class:`io.IOBase`.
Use :meth:`GridOut.readchunk` to read chunk by chunk instead
of line by line.
"""
return self
def close(self):
"""Make GridOut more generically file-like."""
if self.__chunk_iter:
self.__chunk_iter.close()
self.__chunk_iter = None
super().close()
def write(self, value):
raise io.UnsupportedOperation('write')
def writelines(self, lines):
raise io.UnsupportedOperation('writelines')
def writable(self):
return False
def __enter__(self):
"""Makes it possible to use :class:`GridOut` files
with the context manager protocol.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Makes it possible to use :class:`GridOut` files
with the context manager protocol.
"""
self.close()
return False
def fileno(self):
raise io.UnsupportedOperation('fileno')
def flush(self):
# GridOut is read-only, so flush does nothing.
pass
def isatty(self):
return False
def truncate(self, size=None):
# See https://docs.python.org/3/library/io.html#io.IOBase.writable
# for why truncate has to raise.
raise io.UnsupportedOperation('truncate')
# Override IOBase.__del__ otherwise it will lead to __getattr__ on
# __IOBase_closed which calls _ensure_file and potentially performs I/O.
# We cannot do I/O in __del__ since it can lead to a deadlock.
def __del__(self):
pass
class _GridOutChunkIterator(object):
"""Iterates over a file's chunks using a single cursor.
Raises CorruptGridFile when encountering any truncated, missing, or extra
chunk in a file.
"""
def __init__(self, grid_out, chunks, session, next_chunk):
self._id = grid_out._id
self._chunk_size = int(grid_out.chunk_size)
self._length = int(grid_out.length)
self._chunks = chunks
self._session = session
self._next_chunk = next_chunk
self._num_chunks = math.ceil(float(self._length) / self._chunk_size)
self._cursor = None
def expected_chunk_length(self, chunk_n):
if chunk_n < self._num_chunks - 1:
return self._chunk_size
return self._length - (self._chunk_size * (self._num_chunks - 1))
def __iter__(self):
return self
def _create_cursor(self):
filter = {"files_id": self._id}
if self._next_chunk > 0:
filter["n"] = {"$gte": self._next_chunk}
_disallow_transactions(self._session)
self._cursor = self._chunks.find(filter, sort=[("n", 1)],
session=self._session)
def _next_with_retry(self):
"""Return the next chunk and retry once on CursorNotFound.
We retry on CursorNotFound to maintain backwards compatibility in
cases where two calls to read occur more than 10 minutes apart (the
server's default cursor timeout).
"""
if self._cursor is None:
self._create_cursor()
try:
return self._cursor.next()
except CursorNotFound:
self._cursor.close()
self._create_cursor()
return self._cursor.next()
def next(self):
try:
chunk = self._next_with_retry()
except StopIteration:
if self._next_chunk >= self._num_chunks:
raise
raise CorruptGridFile("no chunk #%d" % self._next_chunk)
if chunk["n"] != self._next_chunk:
self.close()
raise CorruptGridFile(
"Missing chunk: expected chunk #%d but found "
"chunk with n=%d" % (self._next_chunk, chunk["n"]))
if chunk["n"] >= self._num_chunks:
# According to spec, ignore extra chunks if they are empty.
if len(chunk["data"]):
self.close()
raise CorruptGridFile(
"Extra chunk found: expected %d chunks but found "
"chunk with n=%d" % (self._num_chunks, chunk["n"]))
expected_length = self.expected_chunk_length(chunk["n"])
if len(chunk["data"]) != expected_length:
self.close()
raise CorruptGridFile(
"truncated chunk #%d: expected chunk length to be %d but "
"found chunk with length %d" % (
chunk["n"], expected_length, len(chunk["data"])))
self._next_chunk += 1
return chunk
__next__ = next
def close(self):
if self._cursor:
self._cursor.close()
self._cursor = None
class GridOutIterator(object):
def __init__(self, grid_out, chunks, session):
self.__chunk_iter = _GridOutChunkIterator(grid_out, chunks, session, 0)
def __iter__(self):
return self
def next(self):
chunk = self.__chunk_iter.next()
return bytes(chunk["data"])
__next__ = next
class GridOutCursor(Cursor):
"""A cursor / iterator for returning GridOut objects as the result
of an arbitrary query against the GridFS files collection.
"""
def __init__(self, collection, filter=None, skip=0, limit=0,
no_cursor_timeout=False, sort=None, batch_size=0,
session=None):
"""Create a new cursor, similar to the normal
:class:`~pymongo.cursor.Cursor`.
Should not be called directly by application developers - see
the :class:`~gridfs.GridFS` method :meth:`~gridfs.GridFS.find` instead.
.. versionadded 2.7
.. seealso:: The MongoDB documentation on `cursors <https://dochub.mongodb.org/core/cursors>`_.
"""
_disallow_transactions(session)
collection = _clear_entity_type_registry(collection)
# Hold on to the base "fs" collection to create GridOut objects later.
self.__root_collection = collection
super(GridOutCursor, self).__init__(
collection.files, filter, skip=skip, limit=limit,
no_cursor_timeout=no_cursor_timeout, sort=sort,
batch_size=batch_size, session=session)
def next(self):
"""Get next GridOut object from cursor.
"""
_disallow_transactions(self.session)
# Work around "super is not iterable" issue in Python 3.x
next_file = super(GridOutCursor, self).next()
return GridOut(self.__root_collection, file_document=next_file,
session=self.session)
__next__ = next
def add_option(self, *args, **kwargs):
raise NotImplementedError("Method does not exist for GridOutCursor")
def remove_option(self, *args, **kwargs):
raise NotImplementedError("Method does not exist for GridOutCursor")
def _clone_base(self, session):
"""Creates an empty GridOutCursor for information to be copied into.
"""
return GridOutCursor(self.__root_collection, session=session)
| StarcoderdataPython |
4814396 | <gh_stars>0
from myflaskbackend import my_app
import pytest
import json
from pathlib import Path
import os
# fixture function to only be invoked once per test module (the default is to invoke once per test function)
@pytest.fixture(scope='module')
def client():
my_app.app.config['TESTING'] = True
with my_app.app.test_client() as client:
# with flaskr.app.app_context():
# flaskr.init_db()
yield client
# Anything after yield will be executed at the end of tests
# close processes...
#delete temporary files...
def test_get_root_dir(client):
"""GET app root directory"""
rv = client.get('/')
data_string = rv.data.decode('utf8')
data_json = json.loads(data_string)
assert data_json['my_root_resource'] == "root node"
# The request fixture is a special fixture providing information of the requesting test function
def test_upload_csv_file(client, request):
"""POST csv file to backend"""
file_path = Path(request.fspath).parent / 'data_tests/dummy_data.csv'
with open(str(file_path), 'rb') as f:
rv = client.post(
'/uploadcsvfile',
data={'client_file': f},
)
assert rv.status == "200 OK"
assert rv.status_code == 200
assert 'dummy_data.csv' in rv.data.decode()
| StarcoderdataPython |
99216 | <gh_stars>0
# coding: utf-8
# In[19]:
import pandas as pd
dat = pd.read_csv("2015_City.csv", skiprows = 4, encoding = 'iso-8859-1')
# In[20]:
dat.head()
# In[21]:
from matplotlib import pyplot as plt
plt.style.use('ggplot')
# In[22]:
plt.hist(dat["Total Wages"], bins = 50)
plt.xlabel("Wages")
plt.ylabel("LOVE")
plt.show()
# In[23]:
dat.sort_values(by="Total Wages", ascending=False)["Total Wages"].head()
# In[24]:
# remove the rows with total wages <= 0
new_dat = dat.loc[dat["Total Wages"] >= 18000]
# In[25]:
new_dat.sort_values(by="Total Wages", ascending=True)["Total Wages"].head(15)
# In[26]:
len(new_dat), len(dat)
# In[27]:
float(len(new_dat))/float(len(dat)) # removed 30% of our data! :O
# In[28]:
plt.hist(new_dat["Total Wages"], bins = 20)
plt.xlabel("Wages")
plt.ylabel("LOVE")
plt.title("Full Time Workers")
plt.show()
# In[30]:
dat = pd.read_csv("2015_City.csv", skiprows = 4, encoding = 'iso-8859-1')
fnames = ["2009_City.csv","2010_City.csv","2011_City.csv", "2012_City.csv", "2013_City.csv", "2014_City.csv", "2015_City.csv"]
bigass_df = pd.DataFrame()
li = []
for f in fnames:
df = pd.read_csv(f, skiprows = 4, usecols = ["Year", "Total Wages"])
li.append(df)
bigass_df = pd.concat(li)
bigass_df.head()
# In[41]:
from ggplot import *
myplot = (ggplot(aes(x = "Total Wages", color = "Year"), data = bigass_df) + geom_density(alpha = 0.2))
print(myplot)
| StarcoderdataPython |
3393828 | from IcrisCrawler import settings
import requests
from urllib.parse import urljoin
fps_api = urljoin(settings.FP_SERVER_URL, '/api/proxy/')
anonymity = settings.FP_SERVER_PROXY_ANONYMITY
def fetch_proxy(scheme, count):
"""
Get proxy from fpserver by given scheme.
:scheme: `str` proxy protocol
:return:
urls
"""
params = {
"scheme": scheme,
"anonymity": anonymity,
"count": count,
}
text = None
try:
req = requests.get(fps_api, params=params)
text = req.text
data = req.json()
except:
print("Failed to fetch proxy: %s" % text)
else:
_code = data.get('code')
_proxies = data.get('data', {}).get('detail', [])
if (_code is not 0) or (not _proxies):
print('Response of fetch_proxy: %s' % data)
return
for _p in _proxies:
if _p.get('url'):
yield _p.get('url')
| StarcoderdataPython |
3230312 | import os
def main():
try:
while True:
while True:
mode = input('Mode: ').lower()
if 'search'.startswith(mode):
mode = False
break
elif 'destroy'.startswith(mode):
mode = True
break
print('"search" or "destroy"')
path = input('Path: ')
extention = input('Extention: ')
for path_name in search(path, extention, mode):
print('Found:', path_name)
except:
pass
def search(path, extention, destroy):
assert os.path.isdir(path)
path_list = list()
for name in os.listdir(path):
path_name = os.path.join(path, name)
try:
if os.path.isdir(path_name):
path_list += search(path_name, extention, destroy)
elif os.path.isfile(path_name):
if path_name.endswith(extention) or not extention:
if destroy:
os.remove(path_name)
else:
path_list.append(path_name)
except:
print('Error:', path_name)
return path_list
if __name__ == '__main__':
main()
| StarcoderdataPython |
1681424 | <filename>crabageprediction/venv/Lib/site-packages/fontTools/colorLib/geometry.py<gh_stars>1-10
"""Helpers for manipulating 2D points and vectors in COLR table."""
from math import copysign, cos, hypot, isclose, pi
from fontTools.misc.roundTools import otRound
def _vector_between(origin, target):
return (target[0] - origin[0], target[1] - origin[1])
def _round_point(pt):
return (otRound(pt[0]), otRound(pt[1]))
def _unit_vector(vec):
length = hypot(*vec)
if length == 0:
return None
return (vec[0] / length, vec[1] / length)
_CIRCLE_INSIDE_TOLERANCE = 1e-4
# The unit vector's X and Y components are respectively
# U = (cos(α), sin(α))
# where α is the angle between the unit vector and the positive x axis.
_UNIT_VECTOR_THRESHOLD = cos(3 / 8 * pi) # == sin(1/8 * pi) == 0.38268343236508984
def _rounding_offset(direction):
# Return 2-tuple of -/+ 1.0 or 0.0 approximately based on the direction vector.
# We divide the unit circle in 8 equal slices oriented towards the cardinal
# (N, E, S, W) and intermediate (NE, SE, SW, NW) directions. To each slice we
# map one of the possible cases: -1, 0, +1 for either X and Y coordinate.
# E.g. Return (+1.0, -1.0) if unit vector is oriented towards SE, or
# (-1.0, 0.0) if it's pointing West, etc.
uv = _unit_vector(direction)
if not uv:
return (0, 0)
result = []
for uv_component in uv:
if -_UNIT_VECTOR_THRESHOLD <= uv_component < _UNIT_VECTOR_THRESHOLD:
# unit vector component near 0: direction almost orthogonal to the
# direction of the current axis, thus keep coordinate unchanged
result.append(0)
else:
# nudge coord by +/- 1.0 in direction of unit vector
result.append(copysign(1.0, uv_component))
return tuple(result)
class Circle:
def __init__(self, centre, radius):
self.centre = centre
self.radius = radius
def __repr__(self):
return f"Circle(centre={self.centre}, radius={self.radius})"
def round(self):
return Circle(_round_point(self.centre), otRound(self.radius))
def inside(self, outer_circle, tolerance=_CIRCLE_INSIDE_TOLERANCE):
dist = self.radius + hypot(*_vector_between(self.centre, outer_circle.centre))
return (
isclose(outer_circle.radius, dist, rel_tol=_CIRCLE_INSIDE_TOLERANCE)
or outer_circle.radius > dist
)
def concentric(self, other):
return self.centre == other.centre
def move(self, dx, dy):
self.centre = (self.centre[0] + dx, self.centre[1] + dy)
def round_start_circle_stable_containment(c0, r0, c1, r1):
"""Round start circle so that it stays inside/outside end circle after rounding.
The rounding of circle coordinates to integers may cause an abrupt change
if the start circle c0 is so close to the end circle c1's perimiter that
it ends up falling outside (or inside) as a result of the rounding.
To keep the gradient unchanged, we nudge it in the right direction.
See:
https://github.com/googlefonts/colr-gradients-spec/issues/204
https://github.com/googlefonts/picosvg/issues/158
"""
start, end = Circle(c0, r0), Circle(c1, r1)
inside_before_round = start.inside(end)
round_start = start.round()
round_end = end.round()
inside_after_round = round_start.inside(round_end)
if inside_before_round == inside_after_round:
return round_start
elif inside_after_round:
# start was outside before rounding: we need to push start away from end
direction = _vector_between(round_end.centre, round_start.centre)
radius_delta = +1.0
else:
# start was inside before rounding: we need to push start towards end
direction = _vector_between(round_start.centre, round_end.centre)
radius_delta = -1.0
dx, dy = _rounding_offset(direction)
# At most 2 iterations ought to be enough to converge. Before the loop, we
# know the start circle didn't keep containment after normal rounding; thus
# we continue adjusting by -/+ 1.0 until containment is restored.
# Normal rounding can at most move each coordinates -/+0.5; in the worst case
# both the start and end circle's centres and radii will be rounded in opposite
# directions, e.g. when they move along a 45 degree diagonal:
# c0 = (1.5, 1.5) ===> (2.0, 2.0)
# r0 = 0.5 ===> 1.0
# c1 = (0.499, 0.499) ===> (0.0, 0.0)
# r1 = 2.499 ===> 2.0
# In this example, the relative distance between the circles, calculated
# as r1 - (r0 + distance(c0, c1)) is initially 0.57437 (c0 is inside c1), and
# -1.82842 after rounding (c0 is now outside c1). Nudging c0 by -1.0 on both
# x and y axes moves it towards c1 by hypot(-1.0, -1.0) = 1.41421. Two of these
# moves cover twice that distance, which is enough to restore containment.
max_attempts = 2
for _ in range(max_attempts):
if round_start.concentric(round_end):
# can't move c0 towards c1 (they are the same), so we change the radius
round_start.radius += radius_delta
assert round_start.radius >= 0
else:
round_start.move(dx, dy)
if inside_before_round == round_start.inside(round_end):
break
else: # likely a bug
raise AssertionError(
f"Rounding circle {start} "
f"{'inside' if inside_before_round else 'outside'} "
f"{end} failed after {max_attempts} attempts!"
)
return round_start
| StarcoderdataPython |
3321352 | import numpy as np
class Mass:
def __init__(self, model):
"""
Defines the ShellProperties object.
Parameters
----------
model : BDF
the BDF object
"""
self.model = model
self.n = 0
self.conm1 = model.conm1
self.conm2 = model.conm2
self.pmass = model.pmass
self.cmass1 = model.cmass1
self.cmass2 = model.cmass2
self.cmass3 = model.cmass3
self.cmass4 = model.cmass4
self.cmass5 = model.cmass5
def allocate(self, card_count):
etypes = self._get_types(nlimit=False)
for etype in etypes:
if etype.type in card_count:
etype.allocate(card_count[etype.type])
#else:
#assert hasattr(etype, 'allocate'), '%s doesnt support allocate' % etype.type
def build(self):
#self.n = 0
types = self._get_types(nlimit=False)
#print('bt', types)
for elems in types:
if elems.n:
self.model.log.debug(' building %s' % elems.__class__.__name__)
elems.build()
self.n += elems.n
#self.model.log.debug(' elements.mass.n = %s' % self.n)
#eid = concatenate(pshell.pid, pcomp.pid)
#unique_eids = unique(eid)
#if unique_eids != len(eid):
# raise RuntimeError('There are duplicate CTRIA3/CQUAD4 IDs...')
def rebuild(self):
raise NotImplementedError()
def add_conm1(self, card, comment):
self.conm1.add(card, comment)
def add_conm2(self, card, comment):
self.conm2.add(card, comment)
def add_cmass1(self, card, comment):
self.cmass1.add(card, comment)
def add_cmass2(self, card, comment):
self.cmass2.add(card, comment)
def add_cmass3(self, card, comment):
self.cmass3.add(card, comment)
def add_cmass4(self, card, comment):
self.cmass4.add(card, comment)
def add_cmass5(self, card, comment):
self.cmass5.add(card, comment)
def add_pmass(self, card, comment):
self.pmass.add(card, comment)
#=========================================================================
def get_indexs(self, element_ids=None):
#mass_types = self._get_types()
#for mass_type in mass_types:
#element_ids.extend(mass_type.element_id)
types = self._get_types()
if types:
_element_ids = concatenate([mtype.element_id for mtype in types])
i = argsort(_element_ids)
return _element_ids, i
return None, None
def get_mass_by_element_id(self, element_id=None, total=False):
assert element_id is None
mass_types = self._get_types(nlimit=True)
element_id, i = self.get_indexs(element_id)
if element_id is None:
return None
n = len(element_id)
for mass_type in mass_types:
element_id2 = union1d(element_id, mass_type.element_id)
massi = mass_type.get_mass_by_element_id(element_id2, total)
if total:
mass = massi.sum()
else:
mass = massi
return mass
#=========================================================================
def write_card(self, bdf_file, size=8, is_double=False, element_id=None):
types = self._get_types(nlimit=True)
if types:
bdf_file.write('$ELEMENTS_MASS\n')
for elems in types:
#print("MASS", elems.type)
elems.write_card(bdf_file, size=size, element_id=element_id)
def _get_types(self, nlimit=True):
mtypes = [
self.conm1, self.conm2,
#self.cmass1, self.cmass2, self.cmass3, self.cmass4, self.cmass5,
#self.pmass,
]
if nlimit:
d = []
for mtype in mtypes:
#print('type=%s n=%s' % (mtype.type, mtype.n))
if mtype.n > 0:
d.append(mtype)
#mtypes = d
return d
else:
return mtypes
#return [mtype if mtype.n > 0 for mtype in mtypes]
#return mtypes
def get_stats(self):
msg = []
types = self._get_types(nlimit=True)
for element in types:
nele = element.n
if nele:
msg.append(' %-8s: %i' % (element.type, nele))
return msg
def _verify(self, xref=True):
types = self._get_types()
for elems in types:
elems._verify(xref=xref)
#def __repr__(self):
#return '\n'.join(self.get_stats())
def __repr__(self):
return '<%s object; n=%s>' % (self.__class__.__name__, self.n)
| StarcoderdataPython |
3361828 | import rospy
import serial
from ros_waspmote_reader.msg import wasp
### $ sudo usermod -a -G dialout $USER
class co2_reader():
def __init__(self,
frame_id = 'gas_sensor',
serial_port = '/dev/ttyUSB0',
serial_baudrate = 115200
):
self.pub = rospy.Publisher('espeleo_gas_pub', wasp, queue_size = 10)
rospy.init_node('wasp_node')
self.rate = rospy.Rate(1)
self.gas_data = wasp()
self.gas_data.header.stamp = rospy.Time.now()
self.gas_data.header.frame_id = frame_id
self.gas_data.sensor_name = ['CO2','TEMPERATURE','HUMIDITY','PRESSURE']
self.ser = serial.Serial(serial_port, serial_baudrate)
rospy.loginfo("Publisher Created")
def parse(self):
self.data = self.ser.readline().split(":")
rospy.loginfo(self.data)
if self.data[0] == "Gas concentration":
self.g_data = self.data[1].split(",")
#rospy.loginfo(self.g_data)
return [float(self.g_data[i]) for i in range(len(self.g_data)-1)]
else:
rospy.loginfo("Waiting for the sensor to warm up! 2 Minutes")
def initiate(self):
rospy.loginfo("Publisher Initiated")
while not rospy.is_shutdown():
#self.aaa = self.ser.readline()
self.gas_data.reads = self.parse()
if (self.gas_data is not None):
self.pub.publish(self.gas_data)
self.rate.sleep()
| StarcoderdataPython |
3378204 | import tkinter
import tkinter.messagebox
from PIL import Image, ImageTk
from scripts import General, Warnings, InputConstraints, Parameters, Constants, Log
from scripts.frontend import Navigation, ClientConnection
from scripts.frontend.custom_widgets import CustomButtons, CustomLabels
from scripts.frontend.custom_widgets.CustomButtons import InformationButton, SearchButton
from scripts.frontend.logic import MediapipHandAngler, SensorListener, DatasetRecorder
from scripts.frontend.page_components import \
InformationBlock, DatasetGraphBlock, InfoInputBlock, ProgressBar, \
StatusIndicator, SearchBlock, DataInfoBlock
from scripts.frontend.pages import GenericPage
TITLE_SELECTED_DATASET_INFORMATION = "Selected Dataset Information"
TITLE_NEW_DATASET_INFORMATION = "New Dataset Information"
datasets_page = None
class Frame(GenericPage.NavigationFrame):
def __init__(self, root, base_frame=None):
GenericPage.NavigationFrame.__init__(self, root=root, base_frame=base_frame,
page_title=Navigation.TITLE_DATASETS)
self.grid(padx=0, pady=0, sticky=tkinter.NSEW)
self.config(bd=0, relief=None)
# Weights
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
# Frames
self.view_frame = ViewFrame(self, base_frame=base_frame)
self.new_frame = NewFrame(self, base_frame=base_frame)
self.view_frame.grid(row=0)
self.new_frame.grid(row=0)
self.showing_view_frame = True
# Swt switch frame functions
self.view_frame.set_switch_to_new_frame(self._switch_to_new_frame)
self.new_frame.set_switch_to_view_frame(self._switch_to_view_frame)
# Default view the new frame
self._switch_to_view_frame()
def _switch_to_view_frame(self):
# Visual Switch
self.new_frame.grid_remove()
self.current_frame = self.view_frame
self.current_frame.grid()
# Logical Switch
self.new_frame.stop_new_frame_processes()
def _switch_to_new_frame(self):
# Visual Switch
self.view_frame.grid_remove()
self.current_frame = self.new_frame
self.current_frame.grid()
# Logical Switch
self.new_frame.start_new_frame_processes()
def update_colour(self):
super().update_colour()
self.config(bg=General.washed_colour_hex(Parameters.COLOUR_BRAVO, Parameters.ColourGrad_A))
self.view_frame.update_colour()
self.new_frame.update_colour()
def update_content(self):
super().update_content()
self.view_frame.update_content()
self.new_frame.update_content()
def destroy(self):
super().destroy()
self.view_frame.destroy()
self.new_frame.destroy()
class ViewFrame(GenericPage.NavigationFrame):
def __init__(self, root, base_frame=None):
GenericPage.NavigationFrame.__init__(self, root=root, base_frame=base_frame,
page_title=Navigation.TITLE_DATASETS)
# Weights
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=3)
self.rowconfigure(2, weight=1)
"""
Search space
"""
self.search_frame = SearchBlock.DatasetSearchFrame(self, column=0, row=0, rowspan=2, title="Dataset List",
multi_select=True, sort_columnspan=3,
select_change_command=self.selected_entry_update_command,
search_frame_command=self.search_frame_command)
self.search_frame.grid(sticky=tkinter.NSEW)
# Additional buttons for the search frame
self.new_dataset_button = SearchButton(self.search_frame.button_frame, column=0, row=0, text="New",
command=Warnings.not_complete)
self.merge_selected_button = SearchButton(self.search_frame.button_frame, column=1, row=0,
text="Merge Selected", command=self.merge_selected_button_command)
"""
Info Frame
"""
self.info_frame = DataInfoBlock.DatasetInfo(
self, column=1, row=0, title="Selected Dataset Information",
general_options_data={"Name": True, "ID_Owner": False, "Date_Created": False,
"Permission": False, "Rating": True, "Is_Raw": False},
right_options_data={"Num_Frames": False, "FPS": False,
"Sensor_Savagol_Distance": False, "Sensor_Savagol_Degree": False,
"Angle_Savagol_Distance": False, "Angle_Savagol_Degree": False},
right_column_title="Smoothing Information")
self.smooth_frame = DataInfoBlock.DatasetInfo(
self, column=1, row=0, title="Smoothed Dataset Information",
general_options_data={"Name": True, "ID_Owner": False, "Date_Created": False,
"Permission": False, "Rating": True, "Is_Raw": False},
right_options_data={"Num_Frames": False, "FPS": False,
"Sensor_Savagol_Distance": True, "Sensor_Savagol_Degree": True,
"Angle_Savagol_Distance": True, "Angle_Savagol_Degree": True},
right_column_title="Smoothing Information")
self.smooth_frame.grid_remove()
# Additional buttons for the info frame
self.button_frame = GenericPage.Frame(self,
column=1, row=1,
columnspan=1, rowspan=1)
self.button_frame.config(padx=Constants.SHORT_SPACING, pady=Constants.SHORT_SPACING)
# Configure button frame weights
for i in range(0, 5):
self.button_frame.columnconfigure(i, weight=1)
# Create info frame buttons
self.update_button = InformationButton(self.button_frame, column=0, row=0, text="Update",
command=self.update_button_command)
# self.favourite_button = InformationButton(self.button_frame, column=1, row=0, text="Favourite",
# command=lambda: self.info_frame.toggle_favourite_item(
# self.search_frame.get_selected_main_id()))
self.smooth_button = InformationButton(self.button_frame, column=1, row=0, text="Smooth Dataset",
command=lambda: self.set_is_smoothing(True))
self.confirm_button = InformationButton(self.button_frame, column=2, row=0, text="Confirm",
command=self.smooth_dataset_button_command)
self.cancel_button = InformationButton(self.button_frame, column=3, row=0, text="Cancel",
command=lambda: self.set_is_smoothing(False))
self.delete_button = InformationButton(self.button_frame, column=4, row=0, text="Delete",
command=self.delete_button_command)
self.is_smoothing = False
self.set_is_smoothing(False)
# Prediction Preview frame
self.graph_frame = DatasetGraphBlock.Frame(self, column=0, row=2, columnspan=2)
self.graph_frame.metric_button_frame.enable_all_buttons(False)
def update_colour(self):
super().update_colour()
# Search frame
self.search_frame.update_colour()
self.new_dataset_button.update_colour()
self.merge_selected_button.update_colour()
# Info frame
self.smooth_frame.update_colour()
self.info_frame.update_colour()
self.button_frame.update_colour()
self.update_button.update_colour()
# self.favourite_button.update_colour()
# self.duplicate_button.update_colour()
self.smooth_button.update_colour()
self.confirm_button.update_colour()
self.cancel_button.update_colour()
self.delete_button.update_colour()
# Other
self.graph_frame.update_colour()
def update_content(self):
super().update_content()
# Search frame
self.search_frame.update_content()
self.new_dataset_button.update_content()
self.merge_selected_button.update_content()
# Info frame
if self.is_smoothing is True:
self.smooth_frame.update_content()
self.smooth_frame.update_entries({"Date_Created": General.get_current_slashed_date()})
else:
self.info_frame.update_content()
self.button_frame.update_content()
self.update_button.update_content()
# self.favourite_button.update_content()
# self.duplicate_button.update_content()
self.smooth_button.update_content()
self.confirm_button.update_content()
self.cancel_button.update_content()
self.delete_button.update_content()
# Other
self.graph_frame.update_content()
def update_button_command(self):
result = self.info_frame.save_item(is_selected=self.search_frame.scroll_block.is_selected_main(),
item_id=self.search_frame.get_selected_main_id())
if result is True:
self.search_frame.search_button_command()
def delete_button_command(self):
result = self.info_frame.delete_item(self.search_frame.scroll_block.is_selected_main(),
self.search_frame.get_selected_main_id())
Log.debug("The database deletion result is: " + str(result))
if result is True:
self.search_frame.search_button_command()
def merge_selected_button_command(self):
result = self.search_frame.merge_selected_datasets()
Log.debug("The database merging result is: " + str(result))
if result is True:
self.search_frame.search_button_command()
def search_frame_command(self):
# self.search_frame.search_button_command()
self.info_frame.clear_info_frame()
self.graph_frame.metric_button_frame.enable_all_buttons(False)
self.set_is_smoothing(False)
self.graph_frame.image_frame.clear_images()
def selected_entry_update_command(self):
self.set_is_smoothing(False)
# Obtains the data
selected_index = self.search_frame.scroll_block.get_selected_main()
data_at_index = self.search_frame.get_index_data(selected_index)
# Prepares the entries
entries = {}
for i in range(0, len(Constants.DATASET_ENTRY_TRANSFER_DATA)):
entries[Constants.DATASET_ENTRY_TRANSFER_DATA[i]] = data_at_index[i]
owner_name = ClientConnection.get_user_name_of(entries.get("ID_Owner"))
# Updates the info frame
self.info_frame.update_entries(entries=entries, owner_name=owner_name)
self.graph_frame.metric_button_frame.enable_all_buttons(True)
if self.search_frame.get_selected_main_data()[Constants.DATASET_ENTRY_TRANSFER_DATA.index("Is_Raw")] == 0:
self.smooth_button.disable()
self.graph_frame.metric_button_frame.enable_vel_acc_buttons(True)
else:
self.smooth_button.enable()
self.graph_frame.metric_button_frame.enable_vel_acc_buttons(False)
self.graph_frame.metric_button_frame.set_image_state(1, False)
self.graph_frame.metric_button_frame.set_image_state(2, False)
# Updates the Dataset Graph Block
selected_dataset_id = self.search_frame.get_selected_main_id()
self.graph_frame.image_frame.load_new_images(
dataset_id=selected_dataset_id,
is_raw=self.search_frame.list_storage[selected_index][
Constants.DATASET_ENTRY_TRANSFER_DATA.index("Is_Raw")])
self.graph_frame.metric_button_frame.update_image_state()
def smooth_dataset_button_command(self):
result = self.smooth_frame.smooth_dataset(
self.search_frame.get_index_entry(self.search_frame.scroll_block.get_selected_main(), "ID_Owner"),
self.search_frame.get_selected_main_id())
Log.debug("The database smoothing result is: " + str(result))
if result is True:
self.set_is_smoothing(False)
self.search_frame.search_button_command()
self.graph_frame.image_frame.clear_images()
def set_is_smoothing(self, smooth):
if smooth is True:
if self.search_frame.scroll_block.is_selected_main() is True:
if ClientConnection.is_logged_in() is True:
self.is_smoothing = True
# Change frame display
self.info_frame.grid_remove()
self.smooth_frame.grid()
# Button enablement management
self.update_button.disable()
self.smooth_button.disable()
self.confirm_button.enable()
self.cancel_button.enable()
self.delete_button.disable()
# Set entry values
self.smooth_frame.update_entries({
"Name": self.info_frame.get_value("Name") + "_smth",
"ID_Owner": ClientConnection.get_user_id(),
"Date_Created": General.get_current_slashed_date(),
"Permission": Constants.PERMISSION_LEVELS.get(self.info_frame.get_value("Permission")),
"Rating": self.info_frame.get_value("Rating"),
"Is_Raw": 0,
"Num_Frames": self.info_frame.get_value("Num_Frames"),
"FPS": self.info_frame.get_value("FPS"),
}, owner_name=ClientConnection.get_user_name())
# Clears the smoothing parameters
self.smooth_frame.update_entries({
"Sensor_Savagol_Distance": "",
"Sensor_Savagol_Degree": "",
"Angle_Savagol_Distance": "",
"Angle_Savagol_Degree": ""
})
else:
tkinter.messagebox.showwarning("Warning!", "Can not smooth the data. User is not logged in.")
else:
tkinter.messagebox.showwarning("Warning!", "No dataset is selected.")
else:
self.is_smoothing = False
# Change frame display
self.smooth_frame.grid_remove()
self.info_frame.grid()
# Button enablement management
self.update_button.enable()
self.smooth_button.enable()
self.confirm_button.disable()
self.cancel_button.disable()
self.delete_button.enable()
def set_switch_to_new_frame(self, command):
self.new_dataset_button.config(command=command)
class NewFrame(GenericPage.NavigationFrame):
class DataRecInfoFrame(GenericPage.Frame):
def __init__(self, root, hand_angler, column, row, columnspan=1, rowspan=1):
GenericPage.Frame.__init__(self,
root,
column=column, row=row,
columnspan=columnspan, rowspan=rowspan)
self.config(padx=Constants.SHORT_SPACING, pady=Constants.SHORT_SPACING)
# Configure weights
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.rowconfigure(0, weight=0)
self.rowconfigure(1, weight=1)
# Data Recording Information
self.record_options = ["Sensor zeroing delay (seconds)", "Training length (seconds)", "Frames per second"]
self.input_frame = InfoInputBlock.Frame(self, column=0, row=0,
options=self.record_options,
title="Data Recording Information")
# Input frame
self.input_frame.set_entry_value("Sensor zeroing delay (seconds)",
Constants.RECORDING_DEFAULT_SENSOR_ZEROING_DELAY)
self.input_frame.set_entry_value("Training length (seconds)", Constants.RECORDING_DEFAULT_TRAINING_LENGTH)
self.input_frame.set_entry_value("Frames per second", Constants.RECORDING_DEFAULT_FRAMES_PER_SECOND)
# Main frames
self.process_frame = GenericPage.Frame(self, column=1, row=0)
self.process_frame.columnconfigure(0, weight=1)
self.process_frame.columnconfigure(1, weight=1)
self.process_frame.rowconfigure(1, weight=1)
self.process_frame.rowconfigure(2, weight=1)
# Progress start/stop
self.start_stop_title = CustomLabels.TitleLabel(self.process_frame,
column=0, row=0, columnspan=3,
text="Data Recording Control Panel")
self.start_progress_button = CustomButtons.InformationButton(self.process_frame,
column=0, row=1, text="Start Data Gathering")
self.status_label = StatusIndicator.Label(self.process_frame, column=1, row=1)
self.stop_progress_button = CustomButtons.InformationButton(self.process_frame,
column=2, row=1, text="Stop Data Gathering")
self.progress_bar = ProgressBar.Frame(self.process_frame, column=0, row=2, columnspan=3,
metric_text=" seconds", max_count=100)
# Progress start/stop configuration
self.start_stop_title.grid(padx=Constants.STANDARD_SPACING)
self.start_progress_button.grid(sticky=tkinter.EW, padx=Constants.LONG_SPACING)
self.status_label.grid(padx=Constants.SHORT_SPACING)
self.stop_progress_button.grid(sticky=tkinter.EW, padx=Constants.LONG_SPACING)
# Camera + default parameters
self.hand_angler = hand_angler
self.camera_frame = GenericPage.Frame(self, column=0, row=1, columnspan=2)
self.camera_frame.columnconfigure(0, weight=1)
self.camera_frame.rowconfigure(0, weight=1)
self.camera_label = CustomLabels.TitleLabel(self.camera_frame, column=0, row=0)
self.camera_label.grid(sticky=tkinter.N)
def update_colour(self):
# Set colour
self.input_frame.set_frame_colour(
General.washed_colour_hex(Parameters.COLOUR_ALPHA, Parameters.ColourGrad_B))
self.input_frame.set_label_colour(
General.washed_colour_hex(Parameters.COLOUR_BRAVO, Parameters.ColourGrad_D))
self.progress_bar.set_background_colour(
General.washed_colour_hex(Parameters.COLOUR_BRAVO, Parameters.ColourGrad_C))
self.progress_bar.set_progress_colour(
General.washed_colour_hex(Constants.COLOUR_GREEN, Parameters.ColourGrad_F))
# Update colour
self.input_frame.update_colour()
self.process_frame.update_colour()
self.progress_bar.update_colour()
self.start_stop_title.update_colour()
self.start_progress_button.update_colour()
self.status_label.update_colour()
self.stop_progress_button.update_colour()
self.camera_frame.update_colour()
self.camera_label.update_colour()
# Set colours
self.start_stop_title.config(bg=General.washed_colour_hex(Parameters.COLOUR_BRAVO, Parameters.ColourGrad_D))
self.process_frame.config(bg=General.washed_colour_hex(Parameters.COLOUR_ALPHA, Parameters.ColourGrad_B))
self.config(bg=General.washed_colour_hex(Parameters.COLOUR_BRAVO, Parameters.ColourGrad_B))
def update_content(self):
self.input_frame.update_content()
self.process_frame.update_content()
self.progress_bar.update_content()
self.camera_frame.update_content()
self.camera_label.update_content()
self.start_progress_button.update_content()
self.status_label.update_content()
self.stop_progress_button.update_content()
# Paint the camera
if (self.hand_angler is not None) and (self.hand_angler.get_processed_image() is not None):
image = self.hand_angler.get_processed_image()
# Resize image
ratio = General.resizing_scale(width=image.width, height=image.height,
space_width=self.camera_frame.winfo_width(),
space_height=self.camera_frame.winfo_height())
image = image.resize((int(ratio * image.width), int(ratio * image.height)))
# Apply image
imageTk = ImageTk.PhotoImage(image=image)
self.camera_label.config(image=imageTk)
self.camera_label.image = imageTk
def stop_hand_angler(self):
if (self.hand_angler is not None) \
and self.hand_angler.is_running():
self.hand_angler.stop()
def destroy(self):
self.stop_hand_angler()
super().destroy()
def __init__(self, root, base_frame=None):
GenericPage.NavigationFrame.__init__(self, root=root, base_frame=base_frame,
page_title=Navigation.TITLE_DATASETS)
# Weights
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=5)
# self.rowconfigure(0, weight=1)
self.rowconfigure(1, weight=4)
self.rowconfigure(2, weight=5)
# Cancel Button
self.cancel_new_dataset = CustomButtons.SearchButton(self, column=0, row=0, text="View Datasets")
# Frames
self.general_options = ["Name", "Owner", "Date created", "Access permissions", "Personal rating"]
self.general_info_frame = InfoInputBlock.Frame(self,
column=0, row=1,
options=self.general_options,
title="General Information")
self.general_info_frame.disable_entry("Owner") # Note: Owner entry is automatically updated
self.general_info_frame.disable_entry("Date created") # Note: Date created entry is automatically updated
self.general_info_frame.set_perm_option_menu("Access permissions")
self.upload_dataset_button = CustomButtons.SearchButton(self.general_info_frame,
column=0, row=6, columnspan=2,
command=self.upload_dataset_to_server,
text="Upload Dataset to Server")
# Cam Control Info
self.cam_control_options = ["Video source", "Width", "Height", "Zoom %", "Frames per second"]
self.cam_control_frame = InfoInputBlock.Frame(self,
column=0, row=2,
options=self.cam_control_options,
title="Camera Control")
self.cam_control_frame.set_video_source_option_menu("Video source")
self.apply_cam_settings = CustomButtons.SearchButton(self.cam_control_frame,
column=0, row=6, columnspan=2,
command=self.reconfigure_hand_angler,
text="Apply Camera Settings")
# Give default camera settings variables
self.cam_control_frame.set_entry_value("Width", Constants.CAMERA_DEFAULT_RESOLUTION_X)
self.cam_control_frame.set_entry_value("Height", Constants.CAMERA_DEFAULT_RESOLUTION_Y)
self.cam_control_frame.set_entry_value("Zoom %", Constants.CAMERA_DEFAULT_ZOOM_PERCENT)
self.cam_control_frame.set_entry_value("Frames per second", Constants.CAMERA_DEFAULT_FRAMES_PER_SECOND)
"""
logic threads and objects
"""
# Setup Sensor Reader
self.sensor_listener = None
self.data_recorder = None
# Setup the hand angler
self.hand_angler = None
self.reconfigure_hand_angler()
def update_colour(self):
super().update_colour()
# Label colour
self.general_info_frame.set_frame_colour(
General.washed_colour_hex(Parameters.COLOUR_ALPHA, Parameters.ColourGrad_B))
self.general_info_frame.set_label_colour(
General.washed_colour_hex(Parameters.COLOUR_BRAVO, Parameters.ColourGrad_D))
self.cam_control_frame.set_frame_colour(
General.washed_colour_hex(Parameters.COLOUR_ALPHA, Parameters.ColourGrad_B))
self.cam_control_frame.set_label_colour(
General.washed_colour_hex(Parameters.COLOUR_BRAVO, Parameters.ColourGrad_D))
# Update colour
self.cancel_new_dataset.update_colour()
self.general_info_frame.update_colour()
self.upload_dataset_button.update_colour()
self.cam_control_frame.update_colour()
self.data_rec_info_frame.update_colour()
self.apply_cam_settings.update_colour()
def update_content(self):
super().update_content()
self.general_info_frame.update_content()
self.upload_dataset_button.update_content()
self.cam_control_frame.update_content()
self.data_rec_info_frame.update_content()
self.cancel_new_dataset.update_content()
self.apply_cam_settings.update_content()
# Updates user and date
owner = ClientConnection.get_user_name()
if owner is None:
owner = ""
self.general_info_frame.set_entry_value("Owner", owner)
self.general_info_frame.set_entry_value("Date created", General.get_current_slashed_date())
# Updates data recorder
if self.data_recorder is not None:
self.data_rec_info_frame.status_label.set_status(self.data_recorder.is_running())
# Enables uploading to the server
if (self.data_recorder is not None) and (self.data_recorder.is_successful() is True):
self.upload_dataset_button.enable()
else:
self.upload_dataset_button.disable()
def destroy(self):
if self.data_recorder is not None:
self.data_recorder.stop()
if self.sensor_listener is not None:
self.sensor_listener.stop_reading()
self.sensor_listener.stop_running()
if self.hand_angler is not None:
self.hand_angler.stop_watching()
self.hand_angler.stop()
super().destroy()
def upload_dataset_to_server(self):
# Extract values
name = self.general_info_frame.get_value("Name")
owner_name = self.general_info_frame.get_value("Owner")
date_created = self.general_info_frame.get_value("Date created")
access_permissions = self.general_info_frame.get_value("Access permissions")
personal_rating = self.general_info_frame.get_value("Personal rating")
frames_per_second = self.data_recorder.frames_per_second
# Assert the input constraints
can_upload = True
can_upload &= InputConstraints.assert_string_non_empty("Name", name)
can_upload &= InputConstraints.assert_string_non_empty("Owner", owner_name)
can_upload &= InputConstraints.assert_string_non_empty("Date created", date_created)
can_upload &= InputConstraints.assert_string_from_set("Access permissions", access_permissions,
Constants.PERMISSION_LEVELS.keys())
can_upload &= InputConstraints.assert_int_positive("Frames per second", frames_per_second)
can_upload &= InputConstraints.assert_int_non_negative("Personal rating", personal_rating, 100)
# Uploads to the server if the input constraints are satisfied
if can_upload is True:
Log.info("Uploading the dataset '" + name + "' to the server: " + ClientConnection.get_server_address())
assert self.data_recorder is not None
assert self.data_recorder.is_successful()
# Post-recoding variable deduction
num_frames = self.data_recorder.get_number_of_frames()
# Uploads the dataset to the server
access_perm_level = Constants.PERMISSION_LEVELS.get(access_permissions)
result = ClientConnection.upload_dataset(name, ClientConnection.get_user_id(),
date_created, access_perm_level, personal_rating,
num_frames, frames_per_second)
if result is True:
tkinter.messagebox.showinfo("Upload: Success!",
"The dataset '" + name + "' was successfully uploaded to the server.")
Log.info("Successfully uploaded the dataset named '" + name + "'.")
self.data_recorder = None
else:
tkinter.messagebox.showwarning("Upload: Failed!", "The dataset failed to upload to the server.")
Log.warning("Was not able to upload the dataset named '" + name + "'.")
else:
InputConstraints.warn("The dataset was not uploaded to the server. Input constraints were not satisfied.")
def set_switch_to_view_frame(self, command):
self.cancel_new_dataset.config(command=command)
def reconfigure_hand_angler(self):
video_source = self.cam_control_frame.get_value("Video source")
width = self.cam_control_frame.get_value("Width")
height = self.cam_control_frame.get_value("Height")
zoom_percent = self.cam_control_frame.get_value("Zoom %")
frames_per_second = self.cam_control_frame.get_value("Frames per second")
# Checks if the reset is allowed
reconfigure = True
# reconfigure &= InputConstraints.assert_string_from_set("Video source", video_source, Video)
reconfigure &= InputConstraints.assert_int_positive("Width", width)
reconfigure &= InputConstraints.assert_int_positive("Height", height)
reconfigure &= InputConstraints.assert_int_positive("Zoom %", zoom_percent)
reconfigure &= InputConstraints.assert_int_positive("Frames per second", frames_per_second)
# Performs reset if allowed (Setup Hand Angler)
if reconfigure is True:
if self.hand_angler is not None:
self.hand_angler.stop_watching()
self.hand_angler.stop()
# Turns on either the mediapipe or leap motion hand tracker
if video_source == "Video Camera":
self.hand_angler = MediapipHandAngler.MediaPipeHandAnglerReader()
self.hand_angler.start()
self.hand_angler.set_configurations(width=int(width), height=int(height), zoom=int(zoom_percent),
frames_per_second=int(frames_per_second))
self.hand_angler.start_watching()
elif video_source == "Leap Motion":
self.hand_angler = MediapipHandAngler.LeapMotionHandAnglerReader()
self.hand_angler.start()
self.hand_angler.set_configurations(width=int(width), height=int(height), zoom=int(zoom_percent),
frames_per_second=int(frames_per_second))
self.hand_angler.start_watching()
else:
Warnings.not_to_reach()
# Data recording frame
self.data_rec_info_frame = NewFrame.DataRecInfoFrame(self, hand_angler=self.hand_angler,
column=1, row=0, rowspan=3)
self.data_rec_info_frame.start_progress_button.config(command=self.start_dataset_recording)
self.data_rec_info_frame.stop_progress_button.config(command=self.stop_dataset_recording)
self.data_rec_info_frame.update_colour()
def start_dataset_recording(self):
# Retrieves the data
init_sleep_seconds = self.data_rec_info_frame.input_frame.get_value("Sensor zeroing delay (seconds)")
training_length_seconds = self.data_rec_info_frame.input_frame.get_value("Training length (seconds)")
frames_per_second = self.data_rec_info_frame.input_frame.get_value("Frames per second")
# Assert the training constrains
begin_training = True
begin_training &= InputConstraints.assert_int_range_inclusive("Sensor zeroing delay (seconds)",
init_sleep_seconds, 5, 60)
begin_training &= InputConstraints.assert_int_positive("Training length (seconds)", training_length_seconds)
begin_training &= InputConstraints.assert_int_positive("Frames per second", frames_per_second)
if (begin_training is True) and (self.data_recorder is None or self.data_recorder.is_running() is False):
# Sets up the sensor lost
if self.sensor_listener is None:
try:
self.sensor_listener = SensorListener.SensorReadingsListener()
self.sensor_listener.start_running()
self.sensor_listener.start()
except:
self.sensor_listener = None
InputConstraints.warn(
"Warning, was not able to establish communications with COM3 port.\n" +
"Please ensure that the sensor reading device is connected.")
if self.sensor_listener is not None:
# Starts the data processing
self.data_recorder = DatasetRecorder.Recorder(sensor_listener=self.sensor_listener,
hand_angler=self.hand_angler,
init_sleep_seconds=int(init_sleep_seconds),
training_length_seconds=int(training_length_seconds),
frames_per_second=int(frames_per_second),
progress_bar=self.data_rec_info_frame.progress_bar)
self.data_recorder.start()
def stop_dataset_recording(self):
self.data_rec_info_frame.progress_bar.reset()
if (self.sensor_listener is None) or (self.data_recorder is None) or (self.data_recorder.is_running() is False):
InputConstraints.warn("The dataset recording process is not running.")
else:
if self.data_recorder is not None:
self.data_recorder.stop()
self.sensor_listener.stop_reading()
def start_new_frame_processes(self):
# Starts watching hand angler
assert self.data_rec_info_frame.hand_angler is not None
self.hand_angler.start_watching()
if self.sensor_listener is not None:
self.sensor_listener.start_reading()
def stop_new_frame_processes(self):
# Stop hand angler
self.hand_angler.stop_watching()
if self.sensor_listener is not None:
self.sensor_listener.stop_reading()
| StarcoderdataPython |
1685979 | <filename>tb/test.py
from cocotb_test import simulator
from os import system, getcwd, environ
import pytest
from contextlib import contextmanager
insts = environ.get("INSTS", "rv32ui-p-simple").split()
if "SIM" not in environ:
environ["SIM"] = "verilator"
includes = [
"./build/ousia_0/src/verilog-arbiter_0-r3/src/",
"./build/ousia_0/src/cdc_utils_0.1/rtl/verilog/",
"./build/ousia_0/src/wb_intercon_1.2.2-r1/rtl/verilog/",
"./build/ousia_0/src/ousia-wb_intercon_0/",
"./build/ousia_0/src/wb_common_1.0.3/",
"./build/ousia_0/src/uart16550_1.5.5-r1/rtl/verilog/",
"./build/ousia_0/src/gpio_0/",
]
@contextmanager
def prepare(elf_dir, elf_name):
top_v = f"tb/cocotb_top_{elf_name}.v"
memfile = f"{getcwd()}/meminit/{elf_name}.verilog"
dumpfile = f"{getcwd()}/meminit/{elf_name}.dump"
system(f"cp tb/cocotb_top.v {top_v}")
system(f"""sed -i 's|readmemh.*|readmemh("{memfile}", ram.mem);|' {top_v}""")
environ["DUMPFILE"] = dumpfile # riscv_test.py 里用
try:
yield top_v
finally:
system(f"rm {top_v}")
@pytest.mark.parametrize("inst", insts)
@pytest.mark.timeout(90)
def test_inst(inst):
elf_dir = "/usr/riscv32-unknown-elf/share/riscv-tests/isa"
with prepare(elf_dir, inst) as top_v:
simulator.run(
verilog_sources=["build/ousia_0/src/ousia_0/Naive.v", "naive_soc.v", top_v],
includes=includes,
toplevel="cocotb_top",
module="riscv_test",
sim_build="sim_build/" + inst,
# extra_args=["--trace", "--trace-structs"],
)
def test_firmware():
with prepare("firmware", "firmware") as top_v:
simulator.run(
verilog_sources=["build/ousia_0/src/ousia_0/Naive.v", "naive_soc.v", top_v],
includes=includes,
toplevel="cocotb_top",
module="firmware_test",
sim_build="sim_build/firmware",
)
@contextmanager
def prepare_module(name):
top_v = name + ".v"
system(f"awk '/module {name}/,/endmodule/' Naive.v > {top_v}")
try:
yield top_v
finally:
system(f"rm {top_v}")
def test_sv32():
with prepare_module("MMUSimple") as top_v:
simulator.run(
verilog_sources=[top_v],
includes=includes,
toplevel="MMUSimple",
module="sv32_test",
sim_build="sim_build/sv32",
)
| StarcoderdataPython |
3316785 | from typing import List
from guet.commands.command import Command
from guet.commands.decorators.command_factory_decorator import CommandFactoryDecorator
from guet.commands.decorators.start_required_decorator import StartRequiredDecorator
from guet.settings.settings import Settings
class LocalDecorator(CommandFactoryDecorator):
def build(self, args: List[str], settings: Settings) -> Command:
if '--local' in args:
return StartRequiredDecorator(self.decorated).build(args, settings)
else:
return self.decorated.build(args, settings)
| StarcoderdataPython |
1781014 | <filename>codegen/snake2pascal.py
import re
import typing
def _upper_zero_group(match: typing.Match) -> str:
return match.group("let").upper()
def snake2pascal(name: str) -> str:
return re.sub(r"(?:_|\A)(?P<let>[a-z])", _upper_zero_group, name)
| StarcoderdataPython |
3252103 | import pytest
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import Group, Permission
from rest_framework.test import APIClient
from trucks.models import PaymentMethod
from .factories import PaymentMethodFactory, UserFactory
GROUP = "Owners"
MODELS = ["Truck", "Image", "Location"]
PERMISSIONS = ["add", "change", "delete", "view"]
@pytest.fixture()
def base_setup(db):
PaymentMethodFactory(payment_name="Cash")
PaymentMethodFactory(payment_name="Credit Card")
PaymentMethodFactory(payment_name="Debit Card")
PaymentMethodFactory(payment_name="By phone")
new_group, created = Group.objects.get_or_create(name=GROUP)
for model in MODELS:
for permission in PERMISSIONS:
name = f"Can {permission} {model}"
model_add_perm = Permission.objects.get(name=name)
new_group.permissions.add(model_add_perm)
assert PaymentMethod.objects.count() == 4
assert new_group.permissions.count() == 12
@pytest.fixture()
def basic_user(db, django_user_model, django_username_field):
user = UserFactory(
username="basic_user",
email="<EMAIL>",
password=make_password("<PASSWORD>"),
is_active=True,
is_staff=False,
is_superuser=False,
)
return user
@pytest.fixture()
def basic_user_client(db, basic_user):
client = APIClient()
client.login(username=basic_user.username, password="<PASSWORD>")
return client
@pytest.fixture()
def owner_user(db, django_user_model, django_username_field):
user = UserFactory(
username="owner_user",
email="<EMAIL>",
password=make_password("<PASSWORD>"),
is_active=True,
is_staff=False,
is_superuser=False,
)
return user
@pytest.fixture()
def owner_user_client(db, base_setup, owner_user):
owner_group = Group.objects.get(name="Owners")
owner_group.user_set.add(owner_user)
client = APIClient()
client.login(username=owner_user.username, password="<PASSWORD>")
return client
| StarcoderdataPython |
4829009 | from flask import Flask
from flask import request
import json
import requests
import hashlib as hasher
import datetime as date
node = Flask(__name__)
# Define what a Snakecoin block is
class Block:
def __init__(self, index, timestamp, data, previous_hash):
self.index = index
self.timestamp = timestamp
self.data = data
self.previous_hash = previous_hash
self.hash = self.hash_block()
def hash_block(self):
sha = hasher.sha256()
sha.update(str(self.index) + str(self.timestamp) + str(self.data) + str(self.previous_hash))
return sha.hexdigest()
# Generate genesis block
def create_genesis_block():
# Manually construct a block with
# index zero and arbitrary previous hash
return Block(0, date.datetime.now(), {
"proof-of-work": 9,
"transactions": None
}, "0")
# A completely random address of the owner of this node
miner_address = "q3nf394hjg-random-miner-address-34nf3i4nflkn3oi"
# This node's blockchain copy
blockchain = []
blockchain.append(create_genesis_block())
# Store the transactions that
# this node has in a list
this_nodes_transactions = []
# Store the url data of every
# other node in the network
# so that we can communicate
# with them
peer_nodes = []
# A variable to deciding if we're mining or not
mining = True
@node.route('/txion', methods=['POST'])
def transaction():
# On each new POST request,
# we extract the transaction data
new_txion = request.get_json()
# Then we add the transaction to our list
this_nodes_transactions.append(new_txion)
# Because the transaction was successfully
# submitted, we log it to our console
print "New transaction"
print "FROM: {}".format(new_txion['from'].encode('ascii','replace'))
print "TO: {}".format(new_txion['to'].encode('ascii','replace'))
print "AMOUNT: {}\n".format(new_txion['amount'])
# Then we let the client know it worked out
return "Transaction submission successful\n"
@node.route('/blocks', methods=['GET'])
def get_blocks():
chain_to_send = blockchain
# Convert our blocks into dictionaries
# so we can send them as json objects later
for i in range(len(chain_to_send)):
block = chain_to_send[i]
block_index = str(block.index)
block_timestamp = str(block.timestamp)
block_data = str(block.data)
block_hash = block.hash
chain_to_send[i] = {
"index": block_index,
"timestamp": block_timestamp,
"data": block_data,
"hash": block_hash
}
chain_to_send = json.dumps(chain_to_send)
return chain_to_send
def find_new_chains():
# Get the blockchains of every
# other node
other_chains = []
for node_url in peer_nodes:
# Get their chains using a GET request
block = requests.get(node_url + "/blocks").content
# Convert the JSON object to a Python dictionary
block = json.loads(block)
# Add it to our list
other_chains.append(block)
return other_chains
def consensus():
# Get the blocks from other nodes
other_chains = find_new_chains()
# If our chain isn't longest,
# then we store the longest chain
longest_chain = blockchain
for chain in other_chains:
if len(longest_chain) < len(chain):
longest_chain = chain
# If the longest chain isn't ours,
# then we stop mining and set
# our chain to the longest one
blockchain = longest_chain
def proof_of_work(last_proof):
# Create a variable that we will use to find
# our next proof of work
incrementor = last_proof + 1
# Keep incrementing the incrementor until
# it's equal to a number divisible by 9
# and the proof of work of the previous
# block in the chain
while not (incrementor % 9 == 0 and incrementor % last_proof == 0):
incrementor += 1
# Once that number is found,
# we can return it as a proof
# of our work
return incrementor
@node.route('/mine', methods = ['GET'])
def mine():
# Get the last proof of work
last_block = blockchain[len(blockchain) - 1]
last_proof = last_block.data['proof-of-work']
# Find the proof of work for
# the current block being mined
# Note: The program will hang here until a new
# proof of work is found
proof = proof_of_work(last_proof)
# Once we find a valid proof of work,
# we know we can mine a block so
# we reward the miner by adding a transaction
this_nodes_transactions.append(
{ "from": "network", "to": miner_address, "amount": 1 }
)
# Now we can gather the data needed
# to create the new block
new_block_data = {
"proof-of-work": proof,
"transactions": list(this_nodes_transactions)
}
new_block_index = last_block.index + 1
new_block_timestamp = this_timestamp = date.datetime.now()
last_block_hash = last_block.hash
# Empty transaction list
this_nodes_transactions[:] = []
# Now create the
# new block!
mined_block = Block(
new_block_index,
new_block_timestamp,
new_block_data,
last_block_hash
)
blockchain.append(mined_block)
# Let the client know we mined a block
return json.dumps({
"index": new_block_index,
"timestamp": str(new_block_timestamp),
"data": new_block_data,
"hash": last_block_hash
}) + "\n"
node.run()
| StarcoderdataPython |
1790835 | <filename>game03/2_1_timer.py<gh_stars>1-10
import pgzrun
from random import randint
from time import time
TITLE = "🐍🐍 Connetti i satelliti 🐍🐍"
WIDTH = 800
HEIGHT = 600
satelliti = []
linee = []
indice_prossimo_satellite = 0
# Variabili per la gestione del tempo
tempo_iniziale = 0
tempo_totale = 0
tempo_finale = 0
NUM_SATELLITI = 8
def crea_satelliti():
global tempo_iniziale
for count in range(0, NUM_SATELLITI):
satellite = Actor("satellite")
satellite.pos = randint(40, WIDTH-40), randint(40, HEIGHT-40)
satelliti.append(satellite)
# Inizializza il tempo
tempo_iniziale = time()
print(tempo_iniziale)
def draw():
global tempo_totale
screen.blit("sfondo", (0,0))
numero = 1
for satellite in satelliti:
screen.draw.text(str(numero), (satellite.pos[0], satellite.pos[1]+20))
satellite.draw()
numero = numero + 1
for line in linee:
screen.draw.line(line[0], line[1], (255,255,255))
# Mostra
if indice_prossimo_satellite < NUM_SATELLITI:
tempo_totale = time() - tempo_iniziale
# Senza round l'intervallo di tempo ha molti decimali!
screen.draw.text(str(round(tempo_totale,2)), (10,10), fontsize=30)
else:
screen.draw.text(str(round(tempo_totale,2)), (10,10), fontsize=30)
# def update():
# pass
def on_mouse_down(pos):
global indice_prossimo_satellite, linee
if indice_prossimo_satellite < NUM_SATELLITI:
if satelliti[indice_prossimo_satellite].collidepoint(pos):
if indice_prossimo_satellite:
linee.append((satelliti[indice_prossimo_satellite-1].pos, satelliti[indice_prossimo_satellite].pos))
indice_prossimo_satellite = indice_prossimo_satellite + 1
else:
linee = []
indice_prossimo_satellite = 0
crea_satelliti()
pgzrun.go() | StarcoderdataPython |
165770 | # The MIT License (MIT)
#
# Copyright (c) 2014 <NAME> <<EMAIL>>
# Copyright (c) 2015 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# This is an import script for Tiled .tmx map files.
#
# based on http://codebad.com/~hdon/import-tmx.py
# using http://pythonhosted.org/tmx/ for parsing
# using https://pypi.python.org/pypi/six
#
bl_info = {
"name": "Import Tiled Map (.tmx)",
"author": "<NAME> (<EMAIL>), <NAME> (<EMAIL>)",
"version": (1, 1, 0),
"blender": (2, 74, 0),
"location": "File > Import > Tiled (.tmx)",
"description": "Import a Tiled map (.tmx)",
"warning": "Still under development",
"category": "Import-Export"}
import bpy, bmesh
from bpy.props import StringProperty
from bpy_extras.io_utils import ImportHelper
import importlib.machinery
import os.path
# Tile UV offset calculator
tileUVOffsets = [(0,-1), (1,-1), (1,0), (0,0)]
level = None
def makeLayerMesh(i, posX, posY, posZ):
# Create new bmesh
bm = bmesh.new()
tmxLayer = level.layers[i]
tw = level.width
th = level.height
usedMaterialNames = {}
# usedMaterials.append(bpy.data.materials['import_tmx_material0'])
# Add vertices
for y in range(th + 1):
for x in range(tw + 1):
bm.verts.new().co = (float(x + posX), posY, float(y + posZ))
bm.verts.index_update()
# Add faces and UVs
bm.loops.layers.uv.new()
uvlay = bm.loops.layers.uv.active
bm.verts.ensure_lookup_table()
for y in range(th):
for x in range(tw):
# Construct face
f = bm.faces.new((
bm.verts[(y+0)*(tw + 1)+x+0],
bm.verts[(y+0)*(tw + 1)+x+1],
bm.verts[(y+1)*(tw + 1)+x+1],
bm.verts[(y+1)*(tw + 1)+x+0]))
# Assign UVs
# print ('th = ' + str(th))
# print ('x,y = ' + str(x) + ',' + str(y))
tileIDy = th - y - 1 # flip!
# print('tileIDy = ' + str(tileIDy))
tileID = tmxLayer.tiles[(tileIDy * tw) + x].gid
# print('tileID = ' + str(tileID))
# Why? Is 0 empty?
if tileID == 0:
for iLoop, loop in enumerate(f.loops):
loop[uvlay].uv = (0.0, 0.0)
else:
tileset = findTileset(tileID);
mName = materialName(tileset)
usedMaterialNames[mName] = mName
tsw = int(int(tileset.image.width) / (tileset.tilewidth + tileset.spacing))
tsh = int(int(tileset.image.height) / (tileset.tileheight + tileset.spacing))
tswf = float(tileset.image.width) / (tileset.tilewidth + tileset.spacing)
tshf = float(tileset.image.height) / (tileset.tileheight + tileset.spacing)
# print('spacing', tileset.spacing)
for iLoop, loop in enumerate(f.loops):
# loop[uvlay].uv = (0.0, 0.0)
position = (tileID - tileset.firstgid)
tx = position % tsw
ty = position // tsw
xoffset = tileUVOffsets[iLoop][0]
yoffset = tileUVOffsets[iLoop][1]
if xoffset > 0:
xoffset -= (tileset.spacing * 0.5) / tileset.tilewidth
else:
xoffset += (tileset.spacing * 0.5) / tileset.tilewidth
if yoffset < 0:
yoffset += (tileset.spacing * 0.5) / tileset.tileheight
else:
yoffset -= (tileset.spacing * 0.5) / tileset.tileheight
loop[uvlay].uv = (((tx + xoffset) / tswf)
# - (tx * 0.001)
,
(((tshf - ty) + yoffset) / tshf)
# + (ty * 0.005)
)
# print (loop[uvlay].uv)
# print (tx, ty)
me = bpy.data.meshes.new('gen_' + str(i) + '_' + tmxLayer.name)
for mName in usedMaterialNames.values():
me.materials.append(bpy.data.materials.get(mName))
bm.to_mesh(me)
ob = bpy.data.objects.new(tmxLayer.name, me)
ob.show_transparent = True
return ob
def findTileset(gid):
found = None
for tileset in level.tilesets:
if (gid >= tileset.firstgid):
found = tileset
#if (found != None):
# print (found.name)
return found
def materialName(tileset):
return tileset.name + '_material'
def textureName(tileset):
return tileset.name + '_texture'
def createTilesetMaterial(i):
mName = materialName(level.tilesets[i])
ma = None
if (bpy.data.materials.get(mName) == None):
ma = bpy.data.materials.new(mName)
te = bpy.data.textures.new(textureName(level.tilesets[i]), type='IMAGE')
source = level.tilesets[i].image.source
# print ('Material image @ ' + source)
im = bpy.data.images.load(source)
te.image = im
mt = ma.texture_slots.add()
mt.texture = te
mt.texture_coords = 'UV'
mt.use_map_color_diffuse = True
mt.mapping = 'FLAT'
# else:
# print ('Material ' + mName + ' already exists')
return ma
class ImportTMX(bpy.types.Operator, ImportHelper):
bl_idname = 'import.tmx'
bl_label = 'Import Tiled Map (.tmx)'
bl_options = {'PRESET'}
filename_ext = '.tmx'
filter_glob = StringProperty(default='*.tmx', options={'HIDDEN'})
filepath = bpy.props.StringProperty(
name = 'File Path',
description = 'Import file path',
maxlen = 1024,
default = '')
def execute(self, context):
global level
try:
cwd = os.path.dirname(os.path.realpath(__file__));
loader = importlib.machinery.SourceFileLoader("six", cwd + "/libs/six.py")
loader.load_module()
loader = importlib.machinery.SourceFileLoader("tmx", cwd + "/libs/tmx.py")
tmx = loader.load_module()
level = tmx.TileMap.load(self.properties.filepath)
for i in range(len(level.tilesets)):
createTilesetMaterial(i)
for i in range(len(level.layers)):
# print (i)
if (hasattr(level.layers[i], 'tiles')):
ob = makeLayerMesh(i, 0.0, -i, 0.0)
bpy.context.scene.objects.link(ob)
if (not level.layers[i].visible):
ob.hide = True
ob.hide_render = True
#else:
# print ('Found non tile layer ' + level.layers[i].name)
# print ('Finished')
finally:
level = None
return {'FINISHED'}
def menu_func(self, context):
self.layout.operator(ImportTMX.bl_idname, text="Tiled Map (.tmx)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_import.append(menu_func)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_import.remove(menu_func)
if __name__ == '__main__':
register() | StarcoderdataPython |
135586 | import numpy as np
import pandas as pd
class StrategyOptimiser:
def __init__(self,
fitness_function,
n_generations,
generation_size,
n_genes,
gene_ranges,
mutation_probability,
gene_mutation_probability,
n_select_best):
"""
Initializes a genetic algorithm with the given parameters.
Params
--
`fitness_function` the function to optimize
`n_generations` the number of generations to run for
`generation_size` the number of individuals per generation
`n_genes` the number of genes per individual
`gene_ranges` list of length `n_genes` tuples describing each
gene's value range
`mutation_probability` the probability that an individual will
be mutated
`gene_mutation_probability` the probability that a gene will
be mutated (assuming that the individual was selected
for mutation)
`n_select_best` the number of individuals that are selected
to mate in order to create the next generation
"""
self.fitness_function = fitness_function
self.n_generations = n_generations
self.generation_size = generation_size
self.n_genes = n_genes
self.gene_ranges = gene_ranges
self.mutation_probability = mutation_probability
self.gene_mutation_probability = gene_mutation_probability
self.n_select_best = n_select_best
def create_individual(self):
""" Returns a randomly-generated individual with `n_genes` genes,
each gene ranging between the values defined in `gene_ranges` """
individual = []
for i in range(self.n_genes):
gene = np.random.randint(self.gene_ranges[i][0], self.gene_ranges[i][1])
individual.append(gene)
return individual
def create_population(self, n_individuals):
""" Creates a population of `n_individuals` """
population = []
for i in range(n_individuals):
population.append(self.create_individual())
return population
def mate_parents(self, parents, n_offspring):
""" Takes a list of parents and mates them, creating `n_offspring` offspring """
n_parents = len(parents)
offspring = []
for i in range(n_offspring):
random_dad = parents[np.random.randint(0, n_parents - 1)]
random_mom = parents[np.random.randint(0, n_parents - 1)]
dad_mask = np.random.randint(0, 2, size = np.array(random_dad).shape)
mom_mask = np.logical_not(dad_mask)
child = np.add(np.multiply(random_dad, dad_mask), np.multiply(random_mom, mom_mask))
offspring.append(child)
return offspring
def mutate_individual(self, individual):
""" Takes an individual and mutates it gene by gene.
The probability that a gene will be mutated is `gene_mutation_probability`
"""
new_individual = []
for i in range(0, self.n_genes):
gene = individual[i]
if np.random.random() < self.gene_mutation_probability:
# mutate gene
if np.random.random() < 0.5:
# mutate brute force way
gene = np.random.randint(self.gene_ranges[i][0], self.gene_ranges[i][1])
else:
# mutate nicer way
left_range = self.gene_ranges[i][0]
right_range = self.gene_ranges[i][1]
gene_dist = right_range - left_range
# gene_mid = gene_dist / 2
x = individual[i] + gene_dist / 2 * (2 * np.random.random() - 1)
if x > right_range:
x = (x - left_range) % gene_dist + left_range
elif x < left_range:
x = (right_range - x) % gene_dist + left_range
gene = int(x)
new_individual.append(gene)
return new_individual
def mutate_population(self, population):
""" Takes a population and mutates its individuals,
with a mutation probability of `mutation_probability`.
IE (`mutation_probability` * 100)% of the population
will mutate """
mutated_pop = []
for individual in population:
new_individual = individual
if np.random.random() < self.mutation_probability:
new_individual = self.mutate_individual(individual)
mutated_pop.append(new_individual)
return mutated_pop
def select_best(self, population, n_best):
""" Selects the best `n_best` individuals in a population
(those with the highest fitness)"""
fitnesses = []
for idx, individual in enumerate(population):
individual_fitness = self.fitness_function(individual)
fitnesses.append([idx, individual_fitness])
costs_tmp = pd.DataFrame(fitnesses).sort_values(by=1, ascending = False).reset_index(drop=True)
selected_parents_idx = list(costs_tmp.iloc[:n_best, 0])
selected_parents = [parent for idx, parent in enumerate(population) if idx in selected_parents_idx]
print('best: {}, average: {}, and worst: {}'.format(
costs_tmp[1].max(),
round(costs_tmp[1].mean(), 2),
costs_tmp[1].min()
))
print("best individual:", population[selected_parents_idx[0]])
return selected_parents
def run_genetic_algo(self):
"""
Runs a genetic algorithm to optimize the `fitness_function`.
Returns
--
The best individual solution.\
"""
parent_gen = self.create_population(self.generation_size)
for i in range(self.n_generations):
print("Generation:", i, "Selecting best...")
parent_gen = self.select_best(parent_gen, self.n_select_best)
print("Mating parents & Mutating children...")
parent_gen = self.mate_parents(parent_gen, self.generation_size)
parent_gen = self.mutate_population(parent_gen)
best_children = self.select_best(parent_gen, 10)
return best_children
| StarcoderdataPython |
3263499 | <reponame>ourresearch/journalsdb
import pandas as pd
import pytest
from ingest.open_access import import_open_access
from models.usage import OpenAccess
from views import app
test_data = {
"issn_l": ["2291-5222"],
"title": ["Tropical Parasitology"],
"year": ["2010"],
"num_dois": ["10"],
"num_open": [7],
"open_rate": ["0.7"],
"num_green": [7],
"green_rate": ["0.7"],
"num_bronze": [0],
"bronze_rate": ["0.0"],
"num_hybrid": [0],
"hybrid_rate": ["0.0"],
"num_gold": [0],
"gold_rate": ["0.0"],
"is_in_doaj": [False],
"is_gold_journal": [False],
}
@pytest.mark.skip(reason="need to refactor due to open access import changes")
def test_import_open_access(ingest_client, mocker):
mocker.patch(
"ingest.open_access.pd.read_csv",
return_value=[pd.DataFrame(data=test_data)],
)
# run command
runner = app.test_cli_runner()
runner.invoke(import_open_access)
oa = OpenAccess.query.filter_by(issn_l="2291-5222").first()
assert oa.is_in_doaj is False
assert oa.year == 2010
assert oa.num_dois == 10
assert oa.open_rate == 0.7
@pytest.mark.skip(reason="need to refactor due to open access import changes")
def test_import_open_access_no_duplicate(api_client, mocker):
mocker.patch(
"ingest.open_access.pd.read_csv",
return_value=[pd.DataFrame(data=test_data)],
)
# run command
runner = app.test_cli_runner()
runner.invoke(import_open_access)
# run again
runner.invoke(import_open_access)
oas = OpenAccess.query.filter_by(issn_l="2291-5222").all()
assert len(oas) == 1
| StarcoderdataPython |
1725705 | <gh_stars>1-10
import numpy as np
import numba
import pyfftw
from scipy import ndimage as scnd
from ..proc import sobel_canny as sc
from ..util import gauss_utils as gt
from ..util import image_utils as iu
@numba.jit
def resize_rotate(original_4D,
final_size,
rotangle,
sampler=2,
mask_val=1.25,
masking=True):
"""
Resize the 4D-STEM dataset
Parameters
----------
original_4D: ndarray
Experimental dataset
final_size: ndarray
Size of the final ronchigram
rotangle: float
Angle to rotate the CBED pattern in degrees
sampler: float
Upsampling or downsampling ratio for CBED pattern
mask_val: float
Cut off data as a ratio of the beam radius,
Default is 1.25
masking: bool
If true, apply masking
Returns
-------
processed_4D: ndarray
4D-STEM dataset where every CBED pattern
has been rotated and resized
Notes
-----
Experimental 4D-STEM datasets are often flipped, or rotated.
It is recommended to use DPC module to check the beam rotation,
and then rotate the patterns. Also, to maintain consistency
and faster processing of ptychography, make the real space and
Fourier space pixels consistent
:Authors:
<NAME> <<EMAIL>>
"""
data_size = (np.asarray(original_4D.shape)).astype(int)
processed_4D = (np.zeros((data_size[0],data_size[1],final_size[0],final_size[1])))
_,_,original_radius = iu.fit_circle(np.mean(original_4D,axis=(0,1)))
new_radius = original_radius*sampler
circle_mask = iu.make_circle(final_size,final_size[1]/2,final_size[0]/2,new_radius*mask_val)
for jj in range(data_size[1]):
for ii in range(data_size[0]):
ronchigram = original_4D[ii,jj,:,:]
ronchi_size = (np.asarray(ronchigram.shape)).astype(int)
resized_ronchigram = iu.resizer2D((ronchigram + 1),(1/sampler)) - 1
resized_rotated_ronchigram = scnd.rotate(resized_ronchigram,rotangle)
resized_shape = (np.asarray(resized_rotated_ronchigram.shape)).astype(int)
pad_size = np.round((np.asarray(final_size) - resized_shape)/2)
before_pad_size = np.copy(pad_size)
after_pad_size = np.copy(pad_size)
if (2*pad_size[0] + resized_shape[0]) < final_size[0]:
after_pad_size[0] = pad_size[0] + 1
if (2*pad_size[1] + resized_shape[1]) < final_size[1]:
after_pad_size[1] = pad_size[1] + 1
before_pad_size = (before_pad_size).astype(int)
after_pad_size = (after_pad_size).astype(int)
FullPadSize = ((before_pad_size[0],after_pad_size[0]),(before_pad_size[1],after_pad_size[1]))
padded_ronchi = np.pad(resized_rotated_ronchigram, FullPadSize, 'constant', constant_values=(0, 0))
processed_4D[ii,jj,:,:] = padded_ronchi
if masking:
processed_4D = np.multiply(processed_4D,circle_mask)
return processed_4D
def move_probe(probe_im,
x_pixels,
y_pixels):
"""
Move Images with sub-pixel precision
Parameters
----------
image_to_move: ndarray
Original Image to be moved
x_pixels: float
Pixels to shift in X direction
y_pixels: float
Pixels to Shift in Y direction
Returns
-------
moved_image: ndarray
Moved Image
Notes
-----
The underlying idea is that a shift in the real space
is phase shift in Fourier space. So we take the original
image, and take it's Fourier transform. Also, we calculate
how much the image shifts result in the phase change, by
calculating the Fourier pixel dimensions. We then multiply
the FFT of the image with the phase shift value and then
take the inverse FFT.
:Authors:
<NAME> <<EMAIL>>
"""
pyfftw.interfaces.cache.enable()
image_size = np.asarray(probe_im.shape)
fourier_cal_y = np.linspace((-image_size[0] / 2), ((image_size[0] / 2) - 1), image_size[0])
fourier_cal_y = fourier_cal_y / (image_size[0])
fourier_cal_x = np.linspace((-image_size[1] / 2), ((image_size[1] / 2) - 1), image_size[1])
fourier_cal_x = fourier_cal_x / (image_size[1])
[fourier_mesh_x, fourier_mesh_y] = np.meshgrid(fourier_cal_x, fourier_cal_y)
move_matrix = np.multiply(fourier_mesh_x,x_pixels) + np.multiply(fourier_mesh_y,y_pixels)
move_phase = np.exp((-2) * np.pi * 1j * move_matrix)
original_image_fft = np.fft.fftshift(pyfftw.interfaces.numpy_fft.fft2(probe_im))
moved_in_fourier = np.multiply(move_phase,original_image_fft)
moved_image = pyfftw.interfaces.numpy_fft.ifft2(moved_in_fourier)
return moved_image
@numba.jit
def update_function(objec_func,
probe_func,
data4D_sqrt,
pos_x,
pos_y,
alpha_val=0.1):
"""
Ptychographic Iterative Engine Update Function
Parameters
----------
objec_func: ndarray
Complex function of the image object
probe_func: ndarray
Complex function of the electron probe
data4D_sqrt: ndarray
Square root of the resized data
pos_x: int
X-Position of the electron beam
pos_y: int
Y-Position of the electron beam
alpha_val: float
Update parameter, updates will be smaller
for smaller values. Default is 0.1
Returns
-------
new_obj: ndarray
Updated Object Function
new_prb: ndarray
Updated Probe Function
Notes
-----
The complex probe is multiplied by the complex object
and then propagated to the Fourier plane. The complex
probe is generated by moving the probe using the
move_probe function. The Fourier of the exit wave is'
compared with the experimental dataset, with the
amplitude replaced by the square root of the resized
data corresponding to that scan position. This new
function is backpropagated to the image plane and the
difference with the existing original exit wave is used
to reconstruct beam and probe update functions
References
----------
Maiden, <NAME>., and <NAME>. "An improved ptychographical
phase retrieval algorithm for diffractive imaging."
Ultramicroscopy 109.10 (2009): 1256-1262.
See Also
--------
move_probe
:Authors:
<NAME> <<EMAIL>>
"""
pyfftw.interfaces.cache.enable()
objec_func[np.isnan(objec_func)] = 0
probe_func[np.isnan(probe_func)] = 0
square_root_ronchi = data4D_sqrt[pos_y,pos_x,:,:]
data_shape = np.asarray(np.shape(data4D_sqrt))
moved_probe = move_probe(probe_func,(pos_y - (data_shape[0]/2)),(pos_x - (data_shape[1]/2)))
Psi_Orig = objec_func * moved_probe
Psi_FFT = pyfftw.interfaces.numpy_fft.fft2(Psi_Orig)
Psi_New_FFT = square_root_ronchi * (np.exp(1j * np.angle(Psi_FFT)))
Psi_New = pyfftw.interfaces.numpy_fft.ifft2(Psi_New_FFT)
Psi_Diff = Psi_New - Psi_Orig
max_probe_val = (np.amax(np.abs(moved_probe))) ** 2
obj_updater = (np.multiply(np.conj(moved_probe),Psi_Diff)) * (alpha_val/max_probe_val)
new_obj = objec_func + obj_updater
moved_objec = move_probe(objec_func,((data_shape[0]/2) - pos_y),((data_shape[1]/2) - pos_x))
max_objec_val = (np.amax(np.abs(moved_objec))) ** 2
prb_updater = (np.multiply(np.conj(moved_objec),Psi_Diff)) * (alpha_val/max_objec_val)
new_prb = probe_func + prb_updater
return new_obj,new_prb
def Ptych_Engine(data4D_sqrt,
original_probe,
pad_pix,
iterations=2):
"""
Ptychographic Iterative Engine Main Function
Parameters
----------
data4D_sqrt: ndarray
Square root of the resized data
original_probe: ndarray
Original 2D complex probe function
pad_pix: int
Number of pixels used for padding to
remove edge artifacts
iterations: int
No of ePIE iterations
Returns
-------
object_function: ndarray
Calculated complex object function
calc_probe: ndarray
Calculated complex probe function
Notes
-----
At each scan position, the probe and the object are updated
using the update_function. Once all the scan positions have
been used, one ePIE iteration finishes. Be careful about using
padding for scan positions. Your padded pixels are 0s, and
your reconstruction will be all NAN if your beam is made to update
from a padded region where there is no signal in the data4D_sqrt
References
----------
Maiden, <NAME>., and <NAME>. "An improved ptychographical
phase retrieval algorithm for diffractive imaging."
Ultramicroscopy 109.10 (2009): 1256-1262.
See Also
--------
update_function
:Authors:
<NAME> <<EMAIL>>
"""
data_size = np.asarray(np.shape(data4D_sqrt))
calc_probe = np.copy(original_probe)
y_scan = data_size[0] - (2*pad_pix)
x_scan = data_size[1] - (2*pad_pix)
object_function = np.ones((data_size[0],data_size[1]),dtype=complex)
for kk in np.arange(iterations):
for ii in (pad_pix + np.arange(y_scan)):
for jj in (pad_pix + np.arange(x_scan)):
object_function, calc_probe = update_function(object_function,calc_probe,data4D_sqrt,jj,ii)
return object_function, calc_probe | StarcoderdataPython |
3372917 | from typing import List
from .BaseDoc import BaseDoc
from random import sample
class CPF(BaseDoc):
def __init__(self, repeated_digits: bool = False):
self.digits = list(range(10))
self.repeated_digits = repeated_digits
def validate(self, doc: str = '') -> bool:
doc = list(self._only_digits(doc))
if len(doc) != 11:
return False
if not self.repeated_digits and self._check_repeated_digits(doc):
return False
return self._generate_first_digit(doc) == doc[9]\
and self._generate_second_digit(doc) == doc[10]
def generate(self, mask: bool = False) -> str:
cpf = [str(sample(self.digits, 1)[0]) for i in range(9)]
cpf.append(self._generate_first_digit(cpf))
cpf.append(self._generate_second_digit(cpf))
cpf = "".join(cpf)
return self.mask(cpf) if mask else cpf
def mask(self, doc: str = '') -> str:
return "{}.{}.{}-{}".format(doc[:3], doc[3:6], doc[6:9], doc[-2:])
def _generate_first_digit(self, doc: list) -> str:
sum = 0
for i in range(10, 1, -1):
sum += int(doc[10 - i]) * i
sum = (sum * 10) % 11
if sum == 10:
sum = 0
return str(sum)
def _generate_second_digit(self, doc: list) -> str:
sum = 0
for i in range(11, 1, -1):
sum += int(doc[11-i]) * i
sum = (sum * 10) % 11
if sum == 10:
sum = 0
return str(sum)
def _check_repeated_digits(self, doc: List[str]) -> bool:
return len(set(doc)) == 1
| StarcoderdataPython |
9081 | <reponame>IBCNServices/StardogStreamReasoning
import threading
class RWLock:
"""Synchronization object used in a solution of so-called second
readers-writers problem. In this problem, many readers can simultaneously
access a share, and a writer has an exclusive access to this share.
Additionally, the following constraints should be met:
1) no reader should be kept waiting if the share is currently opened for
reading unless a writer is also waiting for the share,
2) no writer should be kept waiting for the share longer than absolutely
necessary.
The implementation is based on [1, secs. 4.2.2, 4.2.6, 4.2.7]
with a modification -- adding an additional lock (C{self.__readers_queue})
-- in accordance with [2].
Sources:
[1] <NAME>: "The little book of semaphores", Version 2.1.5, 2008
[2] <NAME>, <NAME>, <NAME>:
"Concurrent Control with 'Readers' and 'Writers'",
Communications of the ACM, 1971 (via [3])
[3] http://en.wikipedia.org/wiki/Readers-writers_problem
"""
def __init__(self):
self.__read_switch = _LightSwitch()
self.__write_switch = _LightSwitch()
self.__no_readers = threading.Lock()
self.__no_writers = threading.Lock()
self.__readers_queue = threading.Lock()
"""A lock giving an even higher priority to the writer in certain
cases (see [2] for a discussion)"""
def reader_acquire(self):
self.__readers_queue.acquire()
self.__no_readers.acquire()
self.__read_switch.acquire(self.__no_writers)
self.__no_readers.release()
self.__readers_queue.release()
def reader_release(self):
self.__read_switch.release(self.__no_writers)
def writer_acquire(self):
self.__write_switch.acquire(self.__no_readers)
self.__no_writers.acquire()
def writer_release(self):
self.__no_writers.release()
self.__write_switch.release(self.__no_readers)
class _LightSwitch:
"""An auxiliary "light switch"-like object. The first thread turns on the
"switch", the last one turns it off (see [1, sec. 4.2.2] for details)."""
def __init__(self):
self.__counter = 0
self.__mutex = threading.Lock()
def acquire(self, lock):
self.__mutex.acquire()
self.__counter += 1
if self.__counter == 1:
lock.acquire()
self.__mutex.release()
def release(self, lock):
self.__mutex.acquire()
self.__counter -= 1
if self.__counter == 0:
lock.release()
self.__mutex.release()
| StarcoderdataPython |
1750181 | <filename>test/fail_debugger.py<gh_stars>0
import pdb
pdb.set_trace()
import ipdb
import pydevd
pydevd.set_trace()
| StarcoderdataPython |
1621328 | <gh_stars>0
"""
DESAFIO 077: Contando Vogais em Tupla
Crie um programa que tenha uma tupla com várias palavras (não usar acentos).
Depois disso, você deve mostrar, para cada palavra, quais são as suas vogais.
"""
palavras = ('aprender', 'programar', 'linguagem', 'python',
'curso', 'gratis', 'estudar', 'praticar',
'trabalhar', 'mercado', 'programador', 'futuro')
vogais = 'aeiou'
for p in palavras:
v = ''
for letra in p:
if letra in vogais:
v += ' ' + letra
print(f'Na palavra {p.upper()} temos as vogais:{v}')
| StarcoderdataPython |
9196 | #!/usr/bin/env python3
"""
Base-Client Class
This is the parent-class of all client-classes and holds properties and functions they all depend on.
Author: <NAME>
"""
import src.util.debugger as Debugger
import src.util.configmaker as configmaker
class BaseClient(object):
"""Base-Client Class"""
def __init__(self, configpath, configtype, debugFlag = False):
self._Debug = Debugger.Debugger(debugFlag)
self._Debug.write("INIT BaseClient")
defaultPrompt = "-"
self._prompt = defaultPrompt
self._clientConfig = configmaker.getConfig(configpath, configtype)
self._Debug.write("INIT_END BaseClient")
@property
def prompt(self):
return self._prompt
def get_client_configuration():
"""Base Class for getting client configuration"""
def load_client_configuration():
"""Base Class for loading client configuration into memory"""
| StarcoderdataPython |
3253928 | from django.urls import re_path
from . import views
urlpatterns = [
re_path(r'areas$', views.AreasView.as_view()),
re_path(r'houses/index$', views.HousesIndexView.as_view()),
re_path(r'houses/(?P<house_id>\d+)/images$', views.HousesImageView.as_view()),
re_path(r'houses/(?P<pk>\d+)$', views.HousesInfoView.as_view()),
re_path(r'houses$', views.HousesView.as_view()),
]
| StarcoderdataPython |
3291572 | <gh_stars>0
import csv
import cv2
import os
if not os.path.exists('./dataset'):
os.makedirs('./dataset')
name = input("enter your name")
roll = input("enter your id")
row = [name,roll,'A']
l =[]
for root ,dire,filenames in os.walk('dataset'):
for names in dire:
l.append(int(names))
folder = str(l[-1]+1)
os.makedirs(f'./dataset/{folder}')
def add(row):
with open('data.csv','a') as f:
writer = csv.writer(f,lineterminator='\n')
writer.writerow(row)
# with open('data.csv') as f:
# data = csv.reader(f)
# next(data)
# for names in data:
# if names[0] == name:
# print('already exist!!')
# break
# else:
# add(row)
# print('added')
# break
# print(names)
capture = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
framecount = 0
flag,image = capture.read()
while True:
flag,frame = capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
framecount += 1
cv2.imwrite(f'dataset/{folder}/{name}.{roll}.{framecount}.jpg',frame)
print('frame no',framecount,' captured!')
cv2.rectangle(frame, (x,y), (x+w, y+h), (255,0,0), 2)
cv2.waitKey(100)
cv2.imshow('img',frame)
cv2.waitKey(1)
if framecount >200:
break
capture.release()
cv2.destroyAllWindows() | StarcoderdataPython |
31682 | <reponame>thisisshi/sdk
import json
import pandas
def output_sanitization(path_to_excel, path_to_out_json=None):
''' Find the Success percentage of each output report '''
path = path_to_excel
out_obj = []
excel_obj = []
# Output Sanitization
wb = pandas.read_excel(path, engine='openpyxl')
cols = 0
for s in wb.sheets():
for col in range(s.ncols):
if s.cell(0, col).value == "F5 ID":
cols_id = col
if s.cell(0, col).value == "Status":
cols_status = col
if cols_id and cols_status:
for row in range(s.nrows):
if row == 0:
continue
if s.cell(row, cols_status).value == 'SUCCESSFUL' or\
s.cell(row, cols_status).value == 'PARTIAL':
if s.cell(row, cols_id ) == 'hash' or\
s.cell(row, cols_id) == 'oneconnect':
value = None
else:
value = s.cell(row, cols_id).value
if value:
excel_obj.append(value)
break
with open(path_to_out_json, 'r') as file_strem:
file_strem = json.load(file_strem)
for entity in file_strem:
if entity != 'META' and entity != 'VsVip':
# print file_strem
for obj in file_strem[entity]:
out_obj.append(obj.get('name'))
print(len(out_obj))
print(len(excel_obj))
excel_obj.sort()
out_obj.sort()
print("Object Common in Both Excel and Output ")
for obj in excel_obj:
if obj not in out_obj:
print(obj)
def percentage_success(path_to_excel):
# Percetage Success from Excel Reportss
# find the status colummn
path = path_to_excel
wb = pandas.read_excel(path, engine='openpyxl')
for s in wb.sheets():
for col in range(s.ncols):
if s.cell(0, col).value == "Status":
col_status_val = col
if s.cell(0, col).value == "F5 type" or\
s.cell(0, col).value == "Netscaler Command":
col_type_val = col
break
report_dict = dict()
for s in wb.sheets():
for row in range(s.nrows):
if row == 0:
continue
# taking col_type_val column for type and col_status_val for status
val, state = s.cell(row, col_type_val), s.cell(row, col_status_val)
state = state.value
val = val.value
fail = 1
suc = 0
if state == "PARTIAL" or state == "SUCCESSFUL":
fail = 0
suc = 1
if val not in report_dict:
report_dict.update({val: {'success': suc, 'fail': fail}})
else:
report_dict[val]['success'] += suc
report_dict[val]['fail'] += fail
break
for key in report_dict.keys():
if report_dict[key]['success'] + report_dict[key]['fail'] != 0:
percent = float(report_dict[key]['success'] * 100 /
(report_dict[key]['success'] + report_dict[key]['fail']))
report_dict[key].update({'percent': percent})
else:
report_dict[key].update({'percent': 100.0})
for key in report_dict.keys():
print(key, " -> ", report_dict[key]['percent'], "%")
| StarcoderdataPython |
4818460 | <reponame>jihunroh/ProjectEuler-Python
from ProjectEulerCommons.Base import *
from calendar import monthrange
Answer(
quantify(
[(year, month) for year in range(1901, 2000 + 1) for month in range(1, 12 + 1)],
lambda year_month_pair: monthrange(year_month_pair[0], year_month_pair[1])[0] == 6
)
)
"""
------------------------------------------------
ProjectEuler.Problem.019.py
The Answer is: 171
Time Elasped: 0.012347698211669922sec
------------------------------------------------
"""
| StarcoderdataPython |
4815303 | import json
from django.shortcuts import render
from django.http import JsonResponse, HttpResponseServerError
from . import models
def get_games(request):
games = models.Game.objects.all().order_by("-score").values()
gamelist = list(games)
return JsonResponse(gamelist, safe=False)
def post_logs(request):
if request.method == 'POST' and request.body:
json_dict = json.loads(request.body)
score = int(json_dict['score'])
game = models.Game.objects.create(score=score)
for log in json_dict['logs'].values():
models.State.objects.create(
game=game,
time=float(log['time']),
vol=int(log['vol']),
ordered=int(log['ordered']),
outs=int(log['outs']),
holding_cost=float(log['hc']),
ordering_cost=int(log['oc']),
revenue=int(log['rv']),
)
return JsonResponse(json_dict)
else:
return HttpResponseServerError()
| StarcoderdataPython |
3319311 | <reponame>ayemos/tatami<filename>tatami/downloaders/s3_downloader.py
import six
import os
from multiprocessing import Process
from boto3 import resource, client
from tatami import downloader
class S3Downloader(downloader.Downloader):
def __init__(self, bucket_name, root_prefix, data_directory_path='./tmp'):
super(S3Downloader, self).__init__()
self.__resource = None
self.__client = None
self.__bucket_name = bucket_name
self.__root_prefix = root_prefix
self.__data_directory_path = data_directory_path
def download(self, dataset_name, target_dir):
return self.__download_dir(self.__root_prefix, target_dir)
def __download_dir(self, prefix, target_dir, num_threads=4):
paginator = self.__get_client().get_paginator('list_objects')
for result in paginator.paginate(Bucket=self.__bucket_name,
Delimiter='/', Prefix=prefix):
if result.get('CommonPrefixes') is not None:
for subdir in result.get('CommonPrefixes'):
self.__download_dir(subdir.get('Prefix'), target_dir)
elif result.get('Contents') is not None:
for content in result.get('Contents'):
if content.get('Size') > 0:
local_file_path = target_dir + os.sep + os.path.basename(content.get('Key'))
if not os.path.exists(os.path.dirname(local_file_path)):
os.makedirs(os.path.dirname(local_file_path))
proc = Process(
target=self._download_file_for_key,
args=(content.get('Key'), local_file_path))
proc.start()
def _download_file_for_key(self, key, path):
self.__get_resource().Bucket(self.__bucket_name).download_file(key, path)
def __get_resource(self):
if self.__resource is None:
self.__resource = resource('s3')
return self.__resource
def __get_client(self):
if self.__client is None:
self.__client = client('s3')
return self.__client
| StarcoderdataPython |
1762896 | import pytest
from ...product.models import ProductType
from ..utils import associate_attribute_values_to_instance
def test_associate_attribute_to_non_product_instance(color_attribute):
instance = ProductType()
attribute = color_attribute
value = color_attribute.values.first()
with pytest.raises(AssertionError) as exc:
associate_attribute_values_to_instance(instance, attribute, value) # noqa
assert exc.value.args == ("ProductType is unsupported",)
def test_associate_attribute_to_product_instance_from_different_attribute(
product, color_attribute, size_attribute
):
"""Ensure an assertion error is raised when one tries to associate attribute values
to an object that don't belong to the supplied attribute.
"""
instance = product
attribute = color_attribute
value = size_attribute.values.first()
with pytest.raises(AssertionError) as exc:
associate_attribute_values_to_instance(instance, attribute, value)
assert exc.value.args == ("Some values are not from the provided attribute.",)
def test_associate_attribute_to_product_instance_without_values(product):
"""Ensure clearing the values from a product is properly working."""
old_assignment = product.attributes.first()
assert old_assignment is not None, "The product doesn't have attribute-values"
assert old_assignment.values.count() == 1
attribute = old_assignment.attribute
# Clear the values
new_assignment = associate_attribute_values_to_instance(product, attribute)
# Ensure the values were cleared and no new assignment entry was created
assert new_assignment.pk == old_assignment.pk
assert new_assignment.values.count() == 0
def test_associate_attribute_to_product_instance_multiple_values(product):
"""Ensure multiple values in proper order are assigned."""
old_assignment = product.attributes.first()
assert old_assignment is not None, "The product doesn't have attribute-values"
assert old_assignment.values.count() == 1
attribute = old_assignment.attribute
values = attribute.values.all()
# Assign new values
new_assignment = associate_attribute_values_to_instance(
product, attribute, values[1], values[0]
)
# Ensure the new assignment was created and ordered correctly
assert new_assignment.pk == old_assignment.pk
assert new_assignment.values.count() == 2
assert list(
new_assignment.productvalueassignment.values_list("value__pk", "sort_order")
) == [(values[1].pk, 0), (values[0].pk, 1)]
def test_associate_attribute_to_page_instance_multiple_values(page):
"""Ensure multiple values in proper order are assigned."""
old_assignment = page.attributes.first()
assert old_assignment is not None, "The page doesn't have attribute-values"
assert old_assignment.values.count() == 1
attribute = old_assignment.attribute
values = attribute.values.all()
# Clear the values
new_assignment = associate_attribute_values_to_instance(
page, attribute, values[1], values[0]
)
# Ensure the new assignment was created and ordered correctly
assert new_assignment.pk == old_assignment.pk
assert new_assignment.values.count() == 2
assert list(
new_assignment.pagevalueassignment.values_list("value__pk", "sort_order")
) == [(values[1].pk, 0), (values[0].pk, 1)]
def test_associate_attribute_to_variant_instance_multiple_values(variant):
"""Ensure multiple values in proper order are assigned."""
attribute = variant.product.product_type.variant_attributes.first()
values = attribute.values.all()
new_assignment = associate_attribute_values_to_instance(
variant, attribute, values[0], values[1]
)
# Ensure the new assignment was created and ordered correctly
assert new_assignment.values.count() == 2
assert list(
new_assignment.variantvalueassignment.values_list("value__pk", "sort_order")
) == [(values[0].pk, 0), (values[1].pk, 1)]
| StarcoderdataPython |
121195 | <filename>db_handler.py<gh_stars>0
import psycopg2
def get_artist_details(artist_name):
print("entered")
conn = psycopg2.connect(database="songspedia", user="saumya",
password="<PASSWORD>", host="127.0.0.1")
cur = conn.cursor()
query = '''WITH ARTISTID AS (SELECT ID FROM ARTISTS WHERE NAME='<NAME>') SELECT RELEASES.NAME, SONGS.TITLE FROM RELEASES INNER JOIN songs ON RELEASES.ID = SONGS.RELEASE_ID WHERE RELEASES.ARTIST_ID IN (SELECT ID FROM ARTISTID);'''
print(query)
cur.execute(query)
# cur.execute(query)
print("till here")
rows = cur.fetchall()
print(cur.rowcount)
print(rows)
for i in rows:
print("release: ", i[0], " ", "song: ", i[1])
return rows
| StarcoderdataPython |
3335405 | <gh_stars>0
from datetime import datetime
import json
import requests
from requests.exceptions import ReadTimeout
from websocket import create_connection
from websocket._exceptions import WebSocketTimeoutException
from pysense.config import yamlcfg
API_URL = yamlcfg.sense.api.url
API_TIMEOUT = yamlcfg.sense.api.timeout
REALTIME_URL = yamlcfg.sense.realtime.url
WSS_TIMEOUT = yamlcfg.websocket.timeout
USERNAME = yamlcfg.sense.username
PASSWORD = <PASSWORD>
# for the last hour, day, week, month, or year
trend_ranges = ['HOUR', 'DAY', 'WEEK', 'MONTH', 'YEAR']
class SenseAPITimeoutException(Exception):
pass
class SenseMonitor(object):
__username__ = None
__password__ = None
_realtime_ = None
_devices_ = None
_trend_data_ = None
def __init__(self, username=None,
password=<PASSWORD>,
api_timeout=API_TIMEOUT,
wss_timeout=WSS_TIMEOUT):
if username is None:
username = USERNAME
if password is None:
password = PASSWORD
auth_data = {
"email": username,
"password": password
}
# Timeout instance variables
self.api_timeout = api_timeout
self.wss_timeout = wss_timeout
# Create session
self.s = requests.session()
self._trend_data_ = {}
for scale in trend_ranges:
self._trend_data_[scale] = {}
# Get auth token
try:
response = self.s.post(API_URL+'authenticate',
auth_data,
timeout=self.api_timeout)
except Exception as e:
raise Exception('Connection failure: %s' % e)
# check for 200 return
if response.status_code != 200:
raise Exception("Please check username and password."
" API Return Code: %s" % response.status_code)
# Build out some common variables
json = response.json()
self.sense_access_token = json['access_token']
self.account_id = json['account_id']
self.user_id = json['user_id']
self.monitor_id = json['monitors'][0]['id']
self.monitor = json['monitors'][0]
self.date_created = json['date_created']
# create the auth header
self.headers = {'Authorization': 'bearer {}'
.format(self.sense_access_token)}
@property
def devices(self):
"""Return devices."""
if self._devices_ is None:
self._devices_ = self.get_discovered_device_names()
return self._devices_
def get_realtime(self):
try:
ws = create_connection(REALTIME_URL % (self.monitor_id,
self.sense_access_token),
timeout=self.wss_timeout)
for i in range(5): # hello, features, [updates,] data
result = json.loads(ws.recv())
if result.get('type') == 'realtime_update':
self._realtime_ = result['payload']
return self._realtime_
except WebSocketTimeoutException:
raise SenseAPITimeoutException("API websocket timed out")
finally:
if ws:
ws.close()
def api_call(self, url, payload=None):
if payload is None:
payload = {}
try:
return self.s.get(API_URL + url,
headers=self.headers,
timeout=self.api_timeout,
data=payload)
except ReadTimeout:
raise SenseAPITimeoutException("API call timed out")
@property
def realtime(self):
if not self._realtime_:
self.get_realtime()
return self._realtime_
def active(self, item):
"""Return the current active value from the realtime stream
:param item: string
:return: string or int
"""
return self.realtime.get(item, 0)
@property
def active_power(self):
return self.realtime.get('w', 0)
@property
def active_solar_power(self):
return self.realtime.get('solar_w', 0)
@property
def active_voltage(self):
return self.realtime.get('voltage', 0)
@property
def active_frequency(self):
return self.realtime.get('hz', 0)
@property
def daily_usage(self):
return self.get_trend('DAY', False)
@property
def daily_production(self):
return self.get_trend('DAY', True)
@property
def weekly_usage(self):
# Add today's usage
return self.get_trend('WEEK', False)
@property
def weekly_production(self):
# Add today's production
return self.get_trend('WEEK', True)
@property
def monthly_usage(self):
# Add today's usage
return self.get_trend('MONTH', False)
@property
def monthly_production(self):
# Add today's production
return self.get_trend('MONTH', True)
@property
def yearly_usage(self):
# Add this month's usage
return self.get_trend('YEAR', False)
@property
def yearly_production(self):
# Add this month's production
return self.get_trend('YEAR', True)
@property
def active_devices(self):
if not self.realtime:
self.get_realtime()
return [d['name'] for d in self.realtime.get('devices', {})]
def get_trend(self, scale, is_production):
key = "production" if is_production else "consumption"
if not self._trend_data_[scale]:
self.get_trend_data(scale)
if key not in self._trend_data_[scale]:
return 0
total = self._trend_data_[scale][key].get('total', 0)
if scale == 'WEEK' or scale == 'MONTH':
return total + self.get_trend('DAY', is_production)
if scale == 'YEAR':
return total + self.get_trend('MONTH', is_production)
return total
def get_discovered_device_names(self):
# lots more info in here to be parsed out
response = self.api_call('app/monitors/%s/devices' %
self.monitor_id)
self._devices_ = [entry['name'] for entry in response.json()]
return self._devices_
def devices_map(self):
response = self.api_call('monitors/%s/devices' %
self.monitor_id)
return response.json()
def always_on_info(self):
# Always on info - pretty generic similar to the web page
response = self.api_call('app/monitors/%s/devices/always_on' %
self.monitor_id)
return response.json()
def get_monitor_info(self):
# View info on your monitor & device detection status
response = self.api_call('app/monitors/%s/status' %
self.monitor_id)
return response.json()
def get_device_info(self, device_id):
# Get specific information about a device
response = self.api_call('app/monitors/%s/devices/%s' %
(self.monitor_id, device_id))
return response.json()
def get_notification_preferences(self):
# Get notification preferences
payload = {'monitor_id': '%s' % self.monitor_id}
response = self.api_call('users/%s/notifications' %
self.user_id, payload)
return response.json()
def get_trend_data(self, scale):
if scale.upper() not in trend_ranges:
raise Exception("%s not a valid range" % scale)
t = datetime.now().replace(hour=12)
response = self.api_call('app/history/trends?monitor_id=%s&scale=%s&start=%s' %
(self.monitor_id, scale, t.isoformat()))
self._trend_data_[scale] = response.json()
def update_trend_data(self):
for scale in trend_ranges:
self.get_trend_data(scale)
def get_all_usage_data(self, num=30):
payload = {'n_items': num}
# lots of info in here to be parsed out
response = self.api_call('users/%s/timeline' % self.user_id, payload)
return response.json()
| StarcoderdataPython |
3322261 | import os.path as osp
import json
import requests
import time
import numpy as np
import io
from PIL import Image
import logging
import torch
import random
logger = logging.getLogger('global')
from dataset_base import BaseDataset
from datasets import build_transform
class ImageNetDataset(BaseDataset):
"""
ImageNet Dataset.
Arguments:
- root_dir (:obj:`str`): root directory of dataset
- meta_file (:obj:`str`): name of meta file
- transform (list of ``Transform`` objects): list of transforms
- read_type (:obj:`str`): read type from the original meta_file
- evaluator (:obj:`Evaluator`): evaluate to get metrics
- image_reader_type (:obj:`str`): reader type 'pil' or 'ks'
- server_cfg (list): server configurations
Metafile example::
"n01440764/n01440764_10026.JPEG 0\n"
"""
def __init__(self,
is_train,
args):
if args.load_type == "ori":
super(ImageNetDataset, self).__init__(read_from="ori",conf_path=args.conf_path)
elif args.load_type == "petrel":
super(ImageNetDataset, self).__init__(read_from="petrel",conf_path=args.conf_path)
elif args.load_type == "mc":
super(ImageNetDataset, self).__init__(read_from="mc",conf_path=args.conf_path)
else:
raise RuntimeError("unknown load type")
split = "train" if is_train else "val"
self.data_root = osp.join(args.data_path,split)
if args.meta_file == "None":
self.meta_file = osp.join(args.data_path, "meta/" + split + ".txt")
else:
self.meta_file = args.meta_file
self.load_type = args.load_type
self.image_transform = build_transform(is_train, args)
# read in the meta files
with open(self.meta_file) as f:
lines = f.readlines()
self.num = len(lines)
self.metas = []
for line in lines:
filename, label = line.rstrip().split()
self.metas.append({'filename': filename, 'label': label})
def __len__(self):
return self.num
def __getitem__(self, idx):
curr_meta = self.metas[idx]
filename = osp.join(self.data_root, curr_meta['filename'])
label = int(curr_meta['label'])
if self.read_from == "ori":
image = Image.open(filename)
image = image.convert('RGB')
else:
img_bytes = self.read_file(filename)
image = self.image_reader(img_bytes, filename)
image = self.image_transform(image)
# dict_out = {
# "images": image,
# "label": torch.tensor(label, dtype=torch.long),
# }
# dict_nouse = {
# "image_id": filename
# }
return image, torch.tensor(label, dtype=torch.long)
def image_reader(self, img_bytes, filepath):
buff = io.BytesIO(img_bytes)
try:
with Image.open(buff) as img:
img = img.convert('RGB')
except IOError:
logger.info('Failed in loading {}'.format(filepath))
return img
| StarcoderdataPython |
1608363 | import random
import numpy as np
import skimage.io as sio
import skimage.color as sc
import skimage.transform as st
import torch
from torchvision import transforms
def get_patch(haze_tensor, A_tensor, t_tensor, latent_tensor, patch_size):
assert haze_tensor.shape[1:] == A_tensor.shape[1:]
assert haze_tensor.shape[1:] == t_tensor.shape[1:]
assert haze_tensor.shape[1:] == latent_tensor.shape[1:]
ih, iw = haze_tensor.shape[1:]
ix = random.randrange(0, iw - patch_size + 1)
iy = random.randrange(0, ih - patch_size + 1)
haze_tensor = haze_tensor[:, iy:iy + patch_size, ix:ix + patch_size]
A_tensor = A_tensor[:, iy:iy + patch_size, ix:ix + patch_size]
t_tensor = t_tensor[:, iy:iy + patch_size, ix:ix + patch_size]
latent_tensor = latent_tensor[:, iy:iy + patch_size, ix:ix + patch_size]
return haze_tensor, A_tensor, t_tensor, latent_tensor
def set_channel(l, n_channel):
def _set_channel(img):
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
c = img.shape[2]
if n_channel == 1 and c == 3:
img = np.expand_dims(sc.rgb2ycbcr(img)[:, :, 0], 2)
elif n_channel == 3 and c == 1:
img = np.concatenate([img] * n_channel, 2)
return img
return [_set_channel(_l) for _l in l]
def np2Tensor(l, rgb_range):
def _np2Tensor(img):
if img.ndim == 3:
np_transpose = np.ascontiguousarray(img.transpose((2, 0, 1)))
tensor = torch.from_numpy(np_transpose).float()
tensor.mul_(rgb_range / 255)
elif img.ndim == 2:
tensor = torch.from_numpy(np.ascontiguousarray(img)).float()
tensor.mul_(rgb_range / 255)
else:
pass
return tensor
return [_np2Tensor(_l) for _l in l]
def augment(l, hflip=True, rot=True):
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
def _augment(img):
if hflip: img = img[:, ::-1, :]
if vflip: img = img[::-1, :, :]
if rot90: img = img.transpose(1, 0, 2)
return img
return [_augment(_l) for _l in l]
| StarcoderdataPython |
174425 | from typing import List, Tuple, Optional
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib import cm
import matplotlib.colors as mplcolors
from ramachandran.io import read_residue_torsion_collection_from_file
def get_coordinates_on_reference_map(
phi_psi_angle: Tuple[float, float],
reference_map: np.ndarray) -> Tuple[int, int]:
phi = phi_psi_angle[0]
psi = phi_psi_angle[1]
height = reference_map.shape[0]
width = reference_map.shape[1]
i = int((180 - psi) / 360 * height)
j = int((phi + 180) / 360 * width)
# If i or j == resolution, adjust it.
if i == height:
i = height - 1
if j == width:
j = width - 1
return (i, j)
def create_ramachandran_plot(phi_psi_angles: List[Tuple[float, float]],
plot_file_path: str,
reference_map: Optional[np.ndarray] = None,
cmap: Optional[mplcolors.ListedColormap] = None,
protein_name: Optional[str] = None,
rendering_interpolation: bool = True) -> None:
phi_psi_angles_numpy = np.array(phi_psi_angles)
x_numpy = phi_psi_angles_numpy[:, 0]
y_numpy = phi_psi_angles_numpy[:, 1]
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
if protein_name is not None:
ax.set_title(protein_name, fontsize=24)
interpolation = None
if rendering_interpolation is True:
interpolation = "bilinear"
if reference_map is not None:
percentile_1 = np.percentile(reference_map, 60)
percentile_2 = np.percentile(reference_map, 90)
ax.imshow(np.rot90(reference_map),
interpolation=interpolation,
cmap=cmap,
norm=mplcolors.BoundaryNorm(
boundaries=[0, percentile_1, percentile_2, 1],
ncolors=cmap.N),
origin="upper",
extent=(-180, 180, -180, 180))
# Find outliers
outliers_idx = []
for i, phi_psi_angle in enumerate(phi_psi_angles):
map_i, map_j = get_coordinates_on_reference_map(
phi_psi_angle=phi_psi_angle,
reference_map=np.rot90(reference_map))
if np.rot90(reference_map)[map_i, map_j] < percentile_1:
outliers_idx.append(i)
x_outliers_numpy = x_numpy[outliers_idx]
y_outliers_numpy = y_numpy[outliers_idx]
x_numpy = np.delete(x_numpy, outliers_idx)
y_numpy = np.delete(y_numpy, outliers_idx)
ax.scatter(x_outliers_numpy,
y_outliers_numpy,
s=20,
color="red",
edgecolors="black")
ax.scatter(x_numpy, y_numpy, s=20, color="blue", edgecolors="black")
ax.set_xlim(-180, 180)
ax.set_ylim(-180, 180)
ax.xaxis.set_major_locator(ticker.MultipleLocator(45))
ax.yaxis.set_major_locator(ticker.MultipleLocator(45))
ax.xaxis.set_tick_params(labelsize=12)
ax.yaxis.set_tick_params(labelsize=12)
ax.plot([-180, 180], [0, 0], "--", linewidth=0.5, color="black")
ax.plot([0, 0], [-180, 180], "--", linewidth=0.5, color="black")
ax.set_xlabel(r"${\phi}$", fontsize=18, fontweight="bold")
ax.set_ylabel(r"${\psi}$", fontsize=18, fontweight="bold")
fig.savefig(plot_file_path, format="svg", dpi=600, bbox_inches="tight")
plt.close()
return
def create_ramachandran_plots_from_file(
file_path: str,
save_dir_path: str,
# reference_map_type: Optional[str] = "unsmoothed",
protein_name: Optional[str] = None,
rendering_interpolation: bool = False) -> None:
if not os.path.exists(save_dir_path):
os.makedirs(save_dir_path)
residue_torsion_collection = read_residue_torsion_collection_from_file(
file_path=file_path)
phi_psi_angles_general = residue_torsion_collection.collect_torsion_angles_general(
)
phi_psi_angles_gly = residue_torsion_collection.collect_torsion_angles_gly(
)
phi_psi_angles_pro = residue_torsion_collection.collect_torsion_angles_pro(
)
phi_psi_angles_prepro = residue_torsion_collection.collect_torsion_angles_prepro(
)
phi_psi_angles_list = [
phi_psi_angles_general, phi_psi_angles_gly, phi_psi_angles_pro,
phi_psi_angles_prepro
]
package_dir, filename = os.path.split(__file__)
# Using unsmoothed probability.npz is problematic because
# many probabilities are exactly zeros and thus the many percentiles are exactly zeros.
# Plotting these zero values is very problematic.
# Gaussian density is fine because none of the probability density values are exactly zero.
# if reference_map_type == "unsmoothed":
# npz_file_path = os.path.join(package_dir, "data", "probability.npz")
# npz_file = np.load(npz_file_path)
# elif reference_map_type == "smoothed":
# npz_file_path = os.path.join(package_dir, "data", "gaussian_density.npz")
# npz_file = np.load(npz_file_path)
# else:
# raise RuntimeError("Unsupported reference map type.")
npz_file_path = os.path.join(package_dir, "data", "gaussian_density.npz")
npz_file = np.load(npz_file_path)
reference_map_general = npz_file["general"]
reference_map_gly = npz_file["gly"]
reference_map_pro = npz_file["pro"]
reference_map_prepro = npz_file["prepro"]
reference_map_list = [
reference_map_general, reference_map_gly, reference_map_pro,
reference_map_prepro
]
# Using Erdős Gábor's cmaps.
# https://github.com/gerdos/PyRAMA/blob/301df17e5f2c32544b34321c4f8b0254697183ce/pyrama/config.py
cmap_general = mplcolors.ListedColormap(['#FFFFFF', '#B3E8FF', '#7FD9FF'])
cmap_gly = mplcolors.ListedColormap(['#FFFFFF', '#FFE8C5', '#FFCC7F'])
cmap_pro = mplcolors.ListedColormap(['#FFFFFF', '#D0FFC5', '#7FFF8C'])
cmap_prepro = mplcolors.ListedColormap(['#FFFFFF', '#B3E8FF', '#7FD9FF'])
cmap_list = [cmap_general, cmap_gly, cmap_pro, cmap_prepro]
filename_list = ["general.svg", "gly.svg", "pro.svg", "prepro.svg"]
file_path_list = [
os.path.join(save_dir_path, filename) for filename in filename_list
]
for phi_psi_angles, reference_map, cmap, file_path in zip(
phi_psi_angles_list, reference_map_list, cmap_list,
file_path_list):
create_ramachandran_plot(
phi_psi_angles=phi_psi_angles,
reference_map=reference_map,
cmap=cmap,
plot_file_path=file_path,
rendering_interpolation=rendering_interpolation,
protein_name=protein_name)
return
| StarcoderdataPython |
3352023 | <reponame>Thalizin06/Painel-S
# Imports
import discord
from discord.ext import commands
from discord.ext.commands import Bot
from discord.ext import commands
import asyncio
import os
import random
from decouple import config
import json
import requests
os.system('cls' if os.name == 'nt' else 'clear')
RED = "\033[1;31m"
BLUE = "\033[1;34m"
CYAN = "\033[1;36m"
GREEN = "\033[0;32m"
RESET = "\033[0;0m"
BOLD = "\033[;1m"
REVERSE = "\033[;7m"
# Inicio
bot = commands.Bot("-")
@bot.event
async def on_ready():
await bot.change_presence(activity=discord.Streaming(name="Painel-S v2.10.7", url="https://www.twitch.tv/thalizin06"))
print(CYAN + f"Logado como: " + RED + f"{bot.user}" + CYAN + "!" )
print(RED + '''______ _ _ _____
| ___ \ (_) | | / ___|
| |_/ / __ _ _ _ __ ___ | | \ `--.
| __/ / _` || || '_ \ / _ \| | `--. |
| | | (_| || || | | || __/| | /\__/ /
\_| \__,_||_||_| |_| \___||_| \____/ .cc
''')
@bot.event
async def on_message(message):
if message.author == bot.user:
return
await bot.process_commands(message)
# Comando convite
@bot.command(name="invite", help="Cria um convite de servidor")
async def send_server(ctx):
server = "https://discord.gg/xQGvng4TBH"
response = "**Convite: **" + server
await ctx.send(response)
# Comando Kiny-Painel
@bot.command(name="painel", help="abre o kiny painel (O meu é melhor é claro)")
async def embed(ctx):
await ctx.message.delete()
embed=discord.Embed(title="**Kiny Painel v2.3.1**", color=0x4B0082)
embed.set_author(name="Painel-S by Thaliz", url="https://www.twitch.tv/thalizin06", icon_url="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRZ2Ba_Vfca2eMC6dEkSKKLp50X3UCMdTC2BA&usqp=CAU")
embed.add_field(name="DOWNLOAD", value="https://github.com/Kiny-Kiny/Kiny-Painel", inline=False)
embed.set_image(url="https://github.com/Kiny-Kiny/Kiny-Painel/raw/main/IMG_20210815_155210_616.jpg")
embed.set_footer(icon_url=ctx.author.avatar_url, text="Information requested by: {}".format(ctx.author.display_name))
await ctx.send(embed=embed)
# Comando Otimizar-pc
@bot.command(name="otimizar", help="Abre um otimizador testado e de otima qualidade")
async def embed(ctx):
await ctx.message.delete()
embed=discord.Embed(title="**Pc Otimizer v1.13.1**", color=0xE6E6FA)
embed.set_author(name="Painel-S by Thaliz", url="https://www.twitch.tv/thalizin06", icon_url="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRZ2Ba_Vfca2eMC6dEkSKKLp50X3UCMdTC2BA&usqp=CAU")
embed.add_field(name="DOWNLOAD", value="https://cdn.discordapp.com/attachments/935532000725594122/946153784185352222/otimizar.rar", inline=False)
embed.set_image(url="https://cdn.discordapp.com/attachments/935532000725594122/946154248255729744/unknown.png")
embed.set_footer(icon_url=ctx.author.avatar_url, text="Information requested by: {}".format(ctx.author.display_name))
await ctx.send(embed=embed)
# Apagar mensagem (apaga 10 mensagens)
@bot.command(name='cls', help='Apaga 10 msgs')
async def clear(ctx, amount = 11):
if ctx.author.guild_permissions.manage_messages:
await ctx.channel.purge(limit=amount)
else:
await ctx.send('Você não tem permissão!')
# Apagar mensagem (apaga 100 mensagens) OBS: Pode causar lag e erros.
@bot.command(name='cls0', help='Apaga 100 msgs OBS: Podem ocorrer problemas.')
async def clear(ctx, amount = 101):
if ctx.author.guild_permissions.manage_messages:
await ctx.channel.purge(limit=amount)
else:
await ctx.send('Você não tem permissão!')
# Apagar mensagem (apaga 50 mensagens) OBS: Pode causar lag.
@bot.command(name='cls1', help='Apaga 50 msgs OBS: Podem ocorrer problemas.')
async def clear(ctx, amount = 51):
if ctx.author.guild_permissions.manage_messages:
await ctx.channel.purge(limit=amount)
else:
await ctx.send('Você não tem permissão!')
# Comando gerar senha (até 10 caracteres)
@bot.command(name="senha", help= "Gera senhas aleatorias")
async def senha(ctx):
await ctx.message.delete()
senha = random.randrange(10000000000)
msg = "**Senha gerada!:** "
await ctx.send(msg)
await ctx.send(senha)
# Dados gratis pra quem quiser
@bot.command(name="dados")
async def dados(ctx):
dados = (
[
{
"nome": "<NAME>",
"idade": 64,
"cpf": "132.648.650-09",
"rg": "14.456.898-6",
"data_nasc": "16/01/1958",
"sexo": "Masculino",
"signo": "Capricórnio",
"mae": "<NAME>",
"pai": "<NAME>",
"email": "<EMAIL>",
"senha": "Fl<PASSWORD>ft0wZ",
"cep": "71090-545",
"endereco": "Colônia Agrícola Águas Claras Chácara 44",
"numero": 555,
"bairro": "Guará I",
"cidade": "Brasília",
"estado": "DF",
"telefone_fixo": "(61) 2912-7621",
"celular": "(61) 99247-9786",
"altura": "1,90",
"peso": 60,
"tipo_sanguineo": "O+",
"cor": "vermelho"
}
]
)
await ctx.send(dados)
# Painel-IP (API)
@bot.command(name="ip", help="Consulta um ip")
async def embed(ctx):
await ctx.message.delete()
name = ctx.message.content
str = name
result1 = str.replace('-ip ', '')
site = 'http://ipwhois.app/json/'
consultar = site + result1
response = requests.get(consultar)
data = response.json()
ip = data.get("ip")
cnt = data.get("continent")
pais = data.get("country")
success = data.get("success")
type = data.get("type")
continent = data.get("continent_code")
countryc = data.get("country_code")
countryf = data.get("country_flag")
countrycp = data.get("country_capital")
countryp = data.get("country_phone")
countryn = data.get("country_neighbours")
region = data.get("region")
city = data.get("city")
latitude = data.get("latitude")
longitude = data.get("longitude")
asn = data.get("asn")
org = data.get("org")
timezone = data.get("timezone")
currency = data.get("currency")
currencyc = data.get("currency_code")
timezoneg = data.get("timezone_gmt")
embed=discord.Embed(title="**Painel-IP v1.10.7**", color=0xFF5733)
embed.set_author(name="Painel-S by Thaliz", url="https://www.twitch.tv/thalizin06", icon_url="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRZ2Ba_Vfca2eMC6dEkSKKLp50X3UCMdTC2BA&usqp=CAU")
embed.add_field(name="IP:", value=ip, inline=True)
embed.add_field(name="CONTINENTE:", value=cnt, inline=True)
embed.add_field(name="PAÍS:", value=pais, inline=True)
embed.add_field(name="SUCCESS:", value=success, inline=True)
embed.add_field(name="TYPE:", value=type, inline=True)
embed.add_field(name="CONTINENT COD:", value=continent, inline=True)
embed.add_field(name="COUNTRY COD:", value=countryc, inline=True)
embed.add_field(name="BANDEIRA:", value=countryf, inline=True)
embed.add_field(name="CAPITAL:", value=countrycp, inline=True)
embed.add_field(name="COUNTRY PN:", value=countryp, inline=True)
embed.add_field(name="COUNTRY NGB:", value=countryn, inline=True)
embed.add_field(name="REGIÃO:", value=region, inline=True)
embed.add_field(name="CIDADE:", value=city, inline=True)
embed.add_field(name="LATITUDE:", value=latitude, inline=True)
embed.add_field(name="LONGITUDE:", value=longitude, inline=True)
embed.add_field(name="ASN:", value=asn, inline=True)
embed.add_field(name="ORG:", value=org, inline=True)
embed.add_field(name="TIMEZONE:", value=timezone, inline=True)
embed.add_field(name="CURRENCY:", value=currency, inline=True)
embed.add_field(name="CURRENCY CD:", value=currencyc, inline=True)
embed.add_field(name="TIMEZONE GMT:", value=timezoneg, inline=True)
embed.set_image(url=countryf)
embed.set_footer(icon_url=ctx.author.avatar_url, text="Information requested by: {}".format(ctx.author.display_name))
await ctx.send(embed=embed)
# Painel-DDD (API)
@bot.command(name="ddd", help="Consulta um ddd")
async def send_hello(ctx):
await ctx.message.delete()
name = ctx.message.content
str = name
result1 = str.replace('-ddd ', '')
site = 'https://brasilapi.com.br/api/ddd/v1/'
consultar = site + result1
response = requests.get(consultar)
data = response.json()
result0 = data.get("state")
result1 = data.get("cities")
res0 = '**Estado: **'
res1 = '**Cidades: **'
await ctx.send(res0)
await ctx.send(result0)
await ctx.send(res1)
await ctx.send(result1)
# Painel-CEP (API)
@bot.command(name="cep", help="Consulta um cep")
async def cep(ctx):
await ctx.message.delete()
name = ctx.message.content
str = name
result = str.replace('-cep ', '')
site = 'https://brasilapi.com.br/api/cep/v1/'
consultar = site + result
response = requests.get(consultar)
data = response.json()
cep2 = data.get("cep")
state2 = data.get("state")
city2 = data.get("city")
neighborhood2 = data.get("neighborhood")
street2 = data.get("street")
service2 = data.get("service")
embed=discord.Embed(title="**Painel-CEP v1.13.7**", color=0xB0E0E6)
embed.set_author(name="Painel-S by Thaliz", url="https://www.twitch.tv/thalizin06", icon_url="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRZ2Ba_Vfca2eMC6dEkSKKLp50X3UCMdTC2BA&usqp=CAU")
embed.add_field(name="CEP:", value=cep2, inline=True)
embed.add_field(name="ESTADO:", value=state2, inline=True)
embed.add_field(name="CIDADE:", value=city2, inline=True)
embed.add_field(name="LOCAL:", value=neighborhood2, inline=True)
embed.add_field(name="ENDEREÇO:", value=street2, inline=True)
embed.add_field(name="SERVIÇO:", value=service2, inline=True)
embed.set_footer(icon_url=ctx.author.avatar_url, text="Information requested by: {}".format(ctx.author.display_name))
await ctx.send(embed=embed)
# Area de Testes
# Iniciar Bot
TOKEN = config("TOKEN")
bot.run(TOKEN)
| StarcoderdataPython |
1726449 | #!/usr/bin/env python
import os
import boto
from boto.s3.key import Key
home = os.environ['HOME']
s3 = boto.connect_s3(host='localhost', port=10001, is_secure=False)
b = s3.get_bucket('mocking')
k_img = Key(b)
k_img.key = 'Pictures/django.jpg'
k_img.set_contents_from_filename('%s/Pictures/django.jpg' % home)
| StarcoderdataPython |
193730 | import time
from sqlalchemy import Column, Integer, String, ForeignKey
from anarcho import db
from sqlalchemy.orm import relationship, backref
class Build(db.Model):
__tablename__ = "builds"
id = Column('build_id', Integer, primary_key=True)
app_key = Column('app_key', String, ForeignKey('apps.app_key'))
version_code = Column('version_code', Integer)
version_name = Column('version_name', String)
release_notes = Column('release_notes', String)
created_on = Column('created_on', Integer)
app = relationship("Application", backref=backref("builds", cascade="all,delete"))
def __init__(self, app_key, version_code, version_name, release_notes=None):
self.app_key = app_key
self.version_code = version_code
self.version_name = version_name
self.release_notes = release_notes
self.created_on = time.time()
def __repr__(self):
return '<Build %r>' % self.app_key
| StarcoderdataPython |
156249 | # Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
from typing import Optional
import github
class GithubAuthentication:
"""
Represents a token manager for authentication via GitHub App.
"""
def get_token(self, namespace: str, repo: str) -> str:
"""
Get a GitHub token for requested repository.
Args:
namespace: Namespace of the repository.
repo: Name of the repository.
Returns:
A token that can be used in PyGithub instance for authentication.
"""
raise NotImplementedError()
@property
def pygithub_instance(self) -> "github.Github":
"""
Returns:
Generic PyGithub instance. Used for `GitUser` for example.
"""
raise NotImplementedError()
@staticmethod
def try_create(**kwargs) -> Optional["GithubAuthentication"]:
"""
Tries to construct authentication object from provided keyword arguments.
Returns:
`GithubAuthentication` object or `None` if the creation was not
successful.
"""
raise NotImplementedError()
| StarcoderdataPython |
3330107 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
@Project Name macro_economic
@File Name: money_supply
@Software: PyCharm
@Time: 2018/6/9 14:19
@Author: taosheng
@contact: <EMAIL>
@version: 1.0
@Description:
"""
import datetime
import numpy as np
import pandas as pd
import tushare as ts
import matplotlib.pyplot as plt
np.set_printoptions(threshold=2000, linewidth=1000) # default 1000 default 75
pd.set_option('display.width', 1000) # default is 80
if __name__ == "__main__":
ms = ts.get_money_supply()
"""
货币供应量
month :统计时间
m2 :货币和准货币(广义货币M2)(亿元)
m2_yoy:货币和准货币(广义货币M2)同比增长(%)
m1:货币(狭义货币M1)(亿元)
m1_yoy:货币(狭义货币M1)同比增长(%)
m0:流通中现金(M0)(亿元)
m0_yoy:流通中现金(M0)同比增长(%)
cd:活期存款(亿元)
cd_yoy:活期存款同比增长(%)
qm:准货币(亿元)
qm_yoy:准货币同比增长(%)
ftd:定期存款(亿元)
ftd_yoy:定期存款同比增长(%)
sd:储蓄存款(亿元)
sd_yoy:储蓄存款同比增长(%)
rests:其他存款(亿元)
rests_yoy:其他存款同比增长(%)
"""
# print(ms)
# ms["month"] = ms["month"].apply(lambda s: datetime.datetime.strptime(s, '%Y.%m'))
ms["month"] = pd.to_datetime(ms["month"])
ms = ms.set_index("month")
ms = ms.replace("--", np.nan)
ms = ms.astype(np.float64)
ms = ms.iloc[::-1, :]
print(ms)
# ax = ms.plot(grid=True, title="money supply")
ax = ms[["m2","m1","m0","cd","qm","ftd","sd","rests"]].plot(grid=True, title="money supply")
# ax = ms[["m2_yoy","m1_yoy","m0_yoy","cd_yoy","qm_yoy","ftd_yoy","sd_yoy","rests_yoy"]].plot(grid=True, title="money supply")
ax.grid(True)
# plt.show()
####################################################################################################################
msb = ts.get_money_supply_bal()
"""
货币供应量(年底余额)
year :统计年度
m2 :货币和准货币(亿元)
m1:货币(亿元)
m0:流通中现金(亿元)
cd:活期存款(亿元)
qm:准货币(亿元)
ftd:定期存款(亿元)
sd:储蓄存款(亿元)
rests:其他存款(亿元)
"""
# print(msb)
msb["year"] = pd.to_datetime(msb["year"])
msb = msb.set_index("year")
msb = msb.replace("--", np.nan)
msb = msb.astype(np.float64)
msb = msb.iloc[::-1, :]
print(msb)
msb.plot(grid=True, title="money supply balance")
ax.grid(True)
plt.show()
| StarcoderdataPython |
75868 | from unittest import TestCase
from common import *
from sc import *
from sc_tests.test_utils import *
class TestScSet(TestCase):
def test_sc_set(self):
ctx = TestScSet.MemoryCtx()
addr1 = ctx.CreateNode(ScType.NodeConst)
addr2 = ctx.CreateNode(ScType.Node)
addr3 = ctx.CreateNode(ScType.Node)
edge = ctx.CreateEdge(ScType.EdgeAccessConstPosPerm, addr1, addr2)
_set = ScSet(ctx, addr1)
# check has element
self.assertTrue(_set.Has(addr2))
self.assertFalse(_set.Has(addr3))
# check add element
self.assertTrue(_set.Add(addr3))
self.assertTrue(_set.Has(addr3))
self.assertFalse(_set.Add(addr3))
# check remove element
self.assertTrue(_set.Remove(addr3))
self.assertFalse(_set.Has(addr3))
self.assertFalse(_set.Remove(addr3))
self.assertTrue(_set.Has(addr2))
def test_sc_set_clear(self):
ctx = TestScSet.MemoryCtx()
addrSet = ctx.CreateNode(ScType.Node)
addr1 = ctx.CreateNode(ScType.NodeConst)
addr2 = ctx.CreateNode(ScType.Node)
addr3 = ctx.CreateNode(ScType.Node)
elements = [addr1, addr2, addr3]
_set = ScSet(ctx, addrSet)
for el in elements:
self.assertTrue(_set.Add(el))
_set.Clear()
for el in elements:
self.assertFalse(_set.Has(el))
def test_sc_set_iter(self):
ctx = TestScSet.MemoryCtx()
addrSet = ctx.CreateNode(ScType.Node)
addr1 = ctx.CreateNode(ScType.NodeConst)
addr2 = ctx.CreateNode(ScType.Node)
addr3 = ctx.CreateNode(ScType.Node)
elements = [addr1, addr2, addr3]
_set = ScSet(ctx, addrSet)
for a in elements:
self.assertTrue(_set.Add(a))
# iterate elements in set
count = 0
for el in _set:
self.assertTrue(el in elements)
count += 1
self.assertEqual(count, len(elements))
def test_sc_set_relation(self):
ctx = TestScSet.MemoryCtx()
addrSet = ctx.CreateNode(ScType.Node)
relAddr = ctx.CreateNode(ScType.NodeConstNoRole)
addr1 = ctx.CreateNode(ScType.NodeConst)
addr2 = ctx.CreateNode(ScType.NodeConstClass)
addr3 = ctx.CreateNode(ScType.NodeConstAbstract)
elements = [addr1, addr2, addr3]
_set = ScRelationSet(ctx, addrSet, relAddr)
for a in elements:
self.assertFalse(_set.Has(a))
for a in elements:
self.assertTrue(_set.Add(a))
count = 0
for el in _set:
self.assertTrue(el in elements)
count += 1
self.assertEqual(count, len(elements))
_set.Clear()
for a in elements:
self.assertFalse(_set.Has(a))
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.