id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3382539 | <gh_stars>0
import discord
from discord.ext import commands
import json
import time
import datetime
import sys
import psutil
import time
import platform
def seconds_elapsed():
return time.time() - psutil.boot_time()
OS = platform.platform()
OS2 = platform.system()
with open("cconf.json", "r") as config:
data = json.load(config)
token = data["token"]
prefix = data["prefix"]
bot = commands.Bot(command_prefix=prefix)
bot.remove_command("help")
DEV = ""
@bot.command()
async def create(ctx, infos):
await ctx.send(f"Deine Infos {infos}")
await ctx.send("Okay Jut die bot infos wurden an die API Geteilt das kann jetzt bis zu 2Wochen dauern!")
@bot.command()
async def status(ctx):
uptime = str(datetime.timedelta(seconds=int(round(time.time()-startTime))))
m = discord.Embed(title="INFO")
m.add_field(name="OS-V", value=f"{OS}")
m.add_field(name="OS", value=f"{OS2}")
m.add_field(name="UPTIME", value=uptime)
await ctx.send(embed=m)
@bot.command()
async def delete(ctx):
await ctx.send("Sorry Du hast keine Bots!")
@bot.command()
async def liste(ctx):
await ctx.send("Sorry Du hast keine Bots!")
@bot.command()
async def help(ctx):
await ctx.message.delete()
helpem = discord.Embed(title="HELP PAGE USER",timestamp=datetime.datetime.utcnow())
helpem.add_field(name='create',value='damit erstellst du dir dein bot',inline=True)
helpem.add_field(name='delete',value='Löscht ein bot von dir',inline=True)
helpem.add_field(name='liste',value='zeigt deine erstellten bots',inline=True)
helpem.add_field(name='status',value='Uptime und so',inline=True)
helpem.set_footer(text=f'Geöffnet von {ctx.author.name}')
helpem.set_thumbnail(url='https://i.pinimg.com/originals/f7/b1/91/f7b1914abbb5aa8d5270bcc35cc3771d.png')
await ctx.send(embed=helpem)
@bot.event
async def on_ready():
global startTime
startTime = time.time()
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"BOOTING Bot Ver - {discord.__version__}"))
print("""
________________________
CYOB OS LOADED SUCCSFULLY
_________________________""")
print(f"OS VERSION : {discord.__version__}")
time.sleep(1)
print(f"""
___OS - LOGING__
__I {bot.user} I__
""")
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"BOOT SUCCSFULLY STARTING P-KDJH2 ..."))
time.sleep(5)
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"C!help I auf {len(bot.guilds)} Server Aktiv"))
bot.run(token) | StarcoderdataPython |
126432 | <reponame>tlentali/cyanobyte<gh_stars>0
# Copyright (C) 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Auto-generated file for Bmp280 v0.1.0.
# Generated from peripherals/Bmp280.yaml using Cyanobyte Codegen v0.1.0
"""
Class for Bmp280
"""
import sys
try:
import smbus
except ImportError:
print("Fatal error! Make sure to install smbus!")
sys.exit(1)
def _sign(val, length):
"""
Convert unsigned integer to signed integer
"""
if val & (1 << (length - 1)):
return val - (1 << length)
return val
class Bmp280:
"""
Bosch Digital Pressure Sensor
"""
DEVICE_ADDRESS = 119
REGISTER_TEMPMSB = 250
REGISTER_TEMPLSB = 251
REGISTER_TEMPXLSB = 252
REGISTER_DIGT1 = 136
REGISTER_DIGT2 = 138
REGISTER_DIGT3 = 140
def __init__(self):
# Initialize connection to peripheral
self.bus = smbus.SMBus(1)
def get_tempmsb(self):
"""
Part 1 of temperature
"""
val = self.bus.read_byte_data(
self.DEVICE_ADDRESS,
self.REGISTER_TEMPMSB
)
return val
def set_tempmsb(self, data):
"""
Part 1 of temperature
"""
self.bus.write_byte_data(
self.DEVICE_ADDRESS,
self.REGISTER_TEMPMSB,
data
)
def get_templsb(self):
"""
Part 2 of temperature
"""
val = self.bus.read_byte_data(
self.DEVICE_ADDRESS,
self.REGISTER_TEMPLSB
)
return val
def set_templsb(self, data):
"""
Part 2 of temperature
"""
self.bus.write_byte_data(
self.DEVICE_ADDRESS,
self.REGISTER_TEMPLSB,
data
)
def get_tempxlsb(self):
"""
Final part of temperature
"""
val = self.bus.read_byte_data(
self.DEVICE_ADDRESS,
self.REGISTER_TEMPXLSB
)
return val
def set_tempxlsb(self, data):
"""
Final part of temperature
"""
self.bus.write_byte_data(
self.DEVICE_ADDRESS,
self.REGISTER_TEMPXLSB,
data
)
def get_digt1(self):
"""
Used for Celcius conversion
"""
val = self.bus.read_word_data(
self.DEVICE_ADDRESS,
self.REGISTER_DIGT1
)
return val
def set_digt1(self, data):
"""
Used for Celcius conversion
"""
self.bus.write_word_data(
self.DEVICE_ADDRESS,
self.REGISTER_DIGT1,
data
)
def get_digt2(self):
"""
Used for Celcius conversion
"""
val = self.bus.read_word_data(
self.DEVICE_ADDRESS,
self.REGISTER_DIGT2
)
return val
def set_digt2(self, data):
"""
Used for Celcius conversion
"""
self.bus.write_word_data(
self.DEVICE_ADDRESS,
self.REGISTER_DIGT2,
data
)
def get_digt3(self):
"""
Used for Celcius conversion
"""
val = self.bus.read_word_data(
self.DEVICE_ADDRESS,
self.REGISTER_DIGT3
)
# Unsigned > Signed integer
val = _sign(val, 16)
return val
def set_digt3(self, data):
"""
Used for Celcius conversion
"""
self.bus.write_word_data(
self.DEVICE_ADDRESS,
self.REGISTER_DIGT3,
data
)
def temperature_asraw(self):
"""
Reads the temperature
"""
value_msb = None # Variable declaration
value_lsb = None # Variable declaration
value_xlsb = None # Variable declaration
output = None # Variable declaration
value_msb = self.get_tempmsb()
value_lsb = self.get_templsb()
value_xlsb = self.get_tempxlsb()
output = ((value_msb << 12)+(value_lsb << 4)+(value_xlsb >> 4))
return output
def temperature_ascelsius(self):
"""
Reads the temperature
"""
value_msb = None # Variable declaration
value_lsb = None # Variable declaration
value_xlsb = None # Variable declaration
value_d_t1 = None # Variable declaration
value_d_t2 = None # Variable declaration
value_d_t3 = None # Variable declaration
raw_temp = None # Variable declaration
raw_comp1 = None # Variable declaration
raw_comp2 = None # Variable declaration
raw_comp3 = None # Variable declaration
celsius = None # Variable declaration
value_msb = self.get_tempmsb()
value_lsb = self.get_templsb()
value_xlsb = self.get_tempxlsb()
value_d_t1 = self.get_digt1()
value_d_t2 = self.get_digt2()
value_d_t3 = self.get_digt3()
raw_temp = ((value_msb << 12)+(value_lsb << 4)+(value_xlsb >> 4))
raw_comp1 = (((raw_temp/16384.0)-(value_d_t1/1024.0))*value_d_t2)
raw_comp3 = ((raw_temp/131072.0)-(value_d_t1/8192.0))
raw_comp2 = (raw_comp3*raw_comp3*value_d_t3)
celsius = ((raw_comp1+raw_comp2)/5120.0)
return celsius
| StarcoderdataPython |
1771426 | import tempfile
from unittest import TestCase
from kg_covid_19.transform_utils.scibite_cord import ScibiteCordTransform
class TestScibiteCord(TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.input_dir = "tests/resources/scibite_cord"
cls.output_dir = "tests/resources/scibite_cord"
cls.tmpdir = tempfile.TemporaryDirectory(dir=cls.input_dir)
cls.scibite = ScibiteCordTransform(input_dir=cls.input_dir,
output_dir=cls.tmpdir.name)
def test_run(self):
self.scibite.run()
| StarcoderdataPython |
1647141 | <reponame>developmentseed/ckanext-mvt<gh_stars>1-10
def task_imports():
return ['ckanext.mvt.tasks']
| StarcoderdataPython |
1783154 | # -*- coding: utf-8 -*-
from .base import Platform, MovingPlatform, FixedPlatform, MultiTransitionMovingPlatform
__all__ = ['Platform', 'MovingPlatform', 'FixedPlatform', 'MultiTransitionMovingPlatform']
| StarcoderdataPython |
3384392 | <reponame>underscorefan/itsachemtrail_gatherer
from .url2doc import soup_from_response, new_soup, get_html
| StarcoderdataPython |
3226465 | #!/usr/bin/env python3
# -*-coding:utf-8-*-
from PySide2.QtWidgets import QApplication, QWidget, QLabel, QVBoxLayout, \
QPushButton, QLineEdit, QMessageBox
class Form(QWidget):
def __init__(self):
super().__init__()
nameLabel = QLabel("Name:")
self.nameLine = QLineEdit()
self.submitButton = QPushButton("Submit")
bodyLayout = QVBoxLayout()
bodyLayout.addWidget(nameLabel)
bodyLayout.addWidget(self.nameLine)
bodyLayout.addWidget(self.submitButton)
self.submitButton.clicked.connect(self.submit)
self.setLayout(bodyLayout)
self.setWindowTitle("Hello Qt")
self.show()
def submit(self):
name = self.nameLine.text()
if name == "":
QMessageBox.information(self, "Empty Field",
"Please enter a name.")
return
else:
QMessageBox.information(self, "Success!",
"Hello %s!" % name)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
screen = Form()
sys.exit(app.exec_())
| StarcoderdataPython |
1660047 | <reponame>NSLS-II-OPLS/profile_collection
from bluesky.callbacks.fitting import PeakStats
import bluesky.preprocessors as bpp
# NEEDS TO BE FIXED
def set_zero_alpha():
chi_nom=geo.forward(0,0,0).chi
yield from set_chi(chi_nom)
phi_nom=geo.forward(0,0,0).phi
yield from set_phi(phi_nom)
tth_nom=geo.forward(0,0,0).tth
yield from set_tth(tth_nom)
sh_nom=geo.forward(0,0,0).sh
yield from set_sh(sh_nom)
yield from set_ih(0)
yield from set_ia(0)
yield from set_oa(0)
yield from set_oh(0)
def direct_beam():
yield from bps.mov(abs1,1)
yield from bps.mov(abs2,8)
yield from bps.mov(shutter,1)
yield from mab(0,0)
yield from bps.movr(sh,-0.2)
alphai = 0.11
def check_sh_fine(value=0.05,detector=lambda_det):
yield from bps.mv(geo.det_mode,1)
yield from bps.mv(abs2,5)
yield from mabt(value,value,0)
tmp1=geo.sh.position
print('Start the height scan before GID')
# Msg('reset_settle_time', sh.settle_time, value)
# yield from bp.rel_scan([detector],sh,-0.1,0.1,21,per_step=shutter_flash_scan)
# tmp2=peaks.cen['%s_stats2_total'%detector.name]
local_peaks = PeakStats(sh.user_readback.name, '%s_stats2_total'%detector.name)
yield from bpp.subs_wrapper(bp.rel_scan([detector],sh,-0.15,0.15,16,per_step=shutter_flash_scan), local_peaks)
print("at #1")
tmp2 = local_peaks.cen #get the height for roi2 of detector.name with max intens
print("at #2")
yield from bps.mv(sh,tmp2)
yield from set_sh(tmp1)
Msg('reset_settle_time', sh.settle_time, 0)
def check_sh_coarse(value=0, detector=lambda_det):
'''
Aligh the sample height
'''
yield from bps.mv(geo.det_mode,1)
yield from bps.mv(abs2,6)
yield from mabt(value,value,0)
tmp1=geo.sh.position
#Msg('reset_settle_time', sh.settle_time, 2)
print('Start the height scan before GID')
# yield from bp.rel_scan([detector],sh,-1,1,21,per_step=shutter_flash_scan)
# tmp2=peaks.cen['%s_stats2_total'%detector.name]
local_peaks = PeakStats(sh.user_readback.name, '%s_stats2_total'%detector.name)
yield from bpp.subs_wrapper(bp.rel_scan([detector],sh,-1,1,21,per_step=shutter_flash_scan), local_peaks)
tmp2 = local_peaks.cen #get the height for roi2 of detector.name with max intens )
yield from bps.mv(sh,tmp2)
yield from set_sh(tmp1)
Msg('reset_settle_time', sh.settle_time, 0)
def sample_height_set_fine_pilatus(detector = pilatus300k):
yield from bps.mv(geo.det_mode,3)
yield from det_exposure_time_new(detector, 1,1)
#yield from bps.mv(detector.roi2.size.y,16)
#yield from bps.mv(detector.roi2.min_xyz.min_y,97)
# with Detsaxy=60, rois set between 80 an 100 in y
yield from bps.mv(abs2,5)
yield from mabt(0.08,0.08,0)
tmp1=geo.sh.position
# yield from bps.mov(shutter,1)
print('Start the height scan before GID')
yield from bp.rel_scan([pilatus300k], sh, -0.2,0.2,21, per_step=sleepy_step)
#yield from bps.mov(shutter,0)
tmp2=peaks.cen['pilatus300k_stats2_total']
yield from bps.mv(sh,tmp2)
yield from set_sh(tmp1)
def check_ih():
'''Align the Align the spectrometer stage height
'''
yield from bps.mv(geo.det_mode,1) #move lamda detector in ?
yield from bps.mv(abs2,6) #move the second absorber in
yield from mabt(0,0,0) # don't understand???,
yield from bps.mv(sh,-1) # move the Sample vertical translation to -1
yield from bps.mv(shutter,1) # open shutter
print('resetting ih')
#yield from bp.rel_scan([quadem],ih,-0.15,0.15,16) #scan the quadem detector against XtalDfl-height
#tmp=peaks.cen['quadem_current3_mean_value'] #get the height for roi2 of quadem with a max intensity
local_peaks = PeakStats(ih.user_readback.name, quadem.current3.mean_value.name)
yield from bpp.subs_wrapper(bp.rel_scan([quadem],ih,-0.15,0.15,16), local_peaks)
tmp = local_peaks.cen #get the height for roi2 of quadem with a max intens
yield from bps.mv(ih,tmp) #move the XtalDfl to this height
yield from set_ih(0) #set this height as 0
yield from bps.mv(shutter,0) # close shutter
def check_tth():
'''Align the spectrometer rotation angle'''
yield from bps.mv(geo.det_mode,1)
yield from bps.mv(abs2,6)
yield from mabt(0,0,0)
tmp1= geo.tth.position
print('resetting tth')
yield from bps.mv(sh,-1)
yield from bps.mv(shutter,1) # open shutter
local_peaks = PeakStats(tth.user_readback.name, quadem.current3.mean_value.name)
#yield from bp.rel_scan([quadem],tth,-0.1,0.1,21)
yield from bpp.subs_wrapper(bp.rel_scan([quadem],tth,-0.1,0.1,21), local_peaks)
tmp2 = local_peaks.cen #get the height for roi2 of quadem with a max intens
yield from bps.mv(tth,tmp2)
yield from set_tth(tmp1)
yield from bps.mv(shutter,0) # close shutter
def check_astth(detector=lambda_det):
'''Align the detector arm rotation angle'''
yield from bps.mv(geo.det_mode,1)
yield from bps.mv(abs2,6)
yield from mabt(0.0,0.0,0)
tmp1=geo.astth.position
yield from bps.mvr(sh,-1)
print('setting astth')
yield from bps.mv(shutter,1) # open shutter
# yield from bp.rel_scan([detector],astth,-0.1,0.1,21)
# tmp2=peaks.cen['%s_stats2_total'%detector.name]
local_peaks = PeakStats(astth.user_readback.name, '%s_stats2_total'%detector.name)
yield from bpp.subs_wrapper(bp.rel_scan([detector],astth,-0.1,0.1,21), local_peaks)
tmp2 = local_peaks.cen #get the height for roi2 of detector.name with max intens
yield from bps.mv(astth,tmp2)
yield from bps.mv(shutter,0) # close shutter
yield from set_astth(tmp1)
def check_linear_time():
# eta
global dif
dif = np.zeros((4, 7))
t=[0.1,0.2,0.5,1,2,5,10]
for i,j in enumerate(t):
# yield from bps.mv(i, i)
exp_t=j
yield from bps.mov(
lambda_det.cam.acquire_time, exp_t,
lambda_det.cam.acquire_period, exp_t+0.2,
lambda_det.cam.num_images, int(exp_t/exp_t))
yield from bp.count([quadem,lambda_det])
dif[0, i]=exp_t
dif[1, i] = quadem.current3.mean_value.get()
dif[2, i] = lambda_det.stats3.total.get()
dif[3, i] = dif[2,i]/dif[0,i]
print(dif)
def mplot1():
plt.figure()
plt.plot(dif[0, :], dif[3, :])
plt.xscale("log")
plt.xlabel('exposure time [s]')
plt.ylabel('pilatus100k intensity/exposure time [counts/s]')
plt.show()
return
def check_linear_slits():
# eta
global dif
dif = np.zeros((4, 18))
slit_width=[0.01,0.01,0.02,0.02,0.03,0.03,0.04,0.04,0.05,0.05,0.06,0.06,0.07,0.07,0.08,0.08,0.09,0.09]
for i,j in enumerate(slit_width):
yield from bps.mov(S2.vg,j)
yield from bp.count([quadem,lambda_det])
dif[0, i]=j
dif[1, i] = quadem.current3.mean_value.get()
dif[2, i] = lambda_det.stats2.total.get()
dif[3, i] = dif[2,i]/dif[1,i]
print(dif)
def mplot2():
plt.figure()
plt.plot(dif[0, :], 5*dif[3, :],color='r',label="detector/monitor")
plt.plot(dif[0, :], dif[2, :]/4,'g',label="detector")
plt.plot(dif[0, :], dif[1, :]/0.006,'b',label="monitor")
plt.xlabel('s2.vg')
plt.ylabel('counts/monitor')
plt.show()
return
| StarcoderdataPython |
194266 | <gh_stars>1-10
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Krb5(AutotoolsPackage):
"""Network authentication protocol"""
homepage = "https://kerberos.org"
url = "https://kerberos.org/dist/krb5/1.16/krb5-1.16.1.tar.gz"
version('1.16.1', '848e9b80d6aaaa798e3f3df24b83c407')
depends_on('bison', type='build')
depends_on('openssl')
configure_directory = 'src'
build_directory = 'src'
def configure_args(self):
args = ['--disable-debug',
'--disable-dependency-tracking',
'--disable-silent-rules',
'--without-system-verto']
return args
| StarcoderdataPython |
3284700 | <reponame>46graus/pagarme-python
# -*- coding: utf-8 -*-
import os
import re
from setuptools import setup, find_packages
__description__ = 'Pagar.me Python'
__long_description__ = 'Python library for Pagar.me API'
__author__ = '<NAME>, <NAME>'
__author_email__ = '<EMAIL>'
__special_things__ = '<NAME>, <NAME>'
testing_extras = [
'pytest',
'pytest-cov',
]
def _find_version():
filename = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'pagarme/sdk.py'
)
with open(filename) as f:
data = f.read()
match = re.search(r"VERSION = '(.+)'", data)
return match.groups()[0]
__version__ = _find_version()
install_requires = open('requirements.txt').read().strip().split('\n')
setup(
name='pagarme-python',
version=__version__,
author=__author__,
author_email=__author_email__,
packages=find_packages(),
license='MIT',
description=__description__,
long_description=__long_description__,
special_things=__special_things__,
url='https://github.com/pagarme/pagarme-python',
keywords='Payment, pagarme',
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development',
'Environment :: Web Environment',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
],
tests_require=['pytest'],
extras_require={
'testing': testing_extras,
},
)
| StarcoderdataPython |
191410 | from collections import ChainMap
import logging
import time
import sys
import GetArgs
import GetSite
from Config import Defaults
from Config import LoadSites
from Config import LoadConfig
from Config import Settings
def work():
def init_config():
args = GetArgs.get_args()
return ChainMap(args, LoadConfig.load_config(args.get("config"), args.get("no_config")), Defaults.get_defaults())
def init_logging():
logfile_location = Settings.get_logfile_location(config)
if logfile_location == "" or config.get("no_logfile", False):
logging.basicConfig(level=config["log_level"], format="%(asctime)s %(message)s")
else:
logging.basicConfig(filename=logfile_location, level=config["log_level"],
filemode=config["logfile_mode"], format="%(asctime)s %(message)s")
def get_site(site):
def site_disabled():
return site.get("disabled", False)
if not site_disabled():
GetSite.get_site(site, config)
config = init_config()
init_logging()
logging.info("Start")
start_time = time.time()
list(map(get_site, LoadSites.load_sites(config["sites_file"])))
logging.info("End (total time: %d seconds)" % (time.time() - start_time))
try:
work()
except KeyboardInterrupt:
logging.shutdown()
sys.exit(0)
| StarcoderdataPython |
3377654 | # Generated by Django 3.2.8 on 2021-11-15 20:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('usuarios', '0002_auto_20211115_1703'),
]
operations = [
migrations.RenameModel(
old_name='Funcionarios',
new_name='Funcionario',
),
]
| StarcoderdataPython |
1662972 | <filename>tests/test_main.py
from lrc_kit import ComboLyricsProvider, SearchRequest, KugouProvider, Flac123Provider, MegalobizProvider, PROVIDERS
import lrc_kit
import logging, os
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel('DEBUG')
def test_custom():
providers = lrc_kit.MINIMAL_PROVIDERS + [lrc_kit.Flac123Provider]
engine = ComboLyricsProvider(providers)
res = engine.search(SearchRequest('Mk.Gee', 'You'))
res.export(os.path.join('files', 'you'))
def test_individual_success_multi_word():
search = SearchRequest('<NAME>', 'Broke Boi')
LOGGER.info(list(map(lambda p:p.name, PROVIDERS)))
for provider in PROVIDERS:
engine = provider()
result = engine.search(search)
if result != None:
result.export(os.path.join('files', f'{engine.name}_stan'))
LOGGER.info(engine.name + ' Success!')
else:
LOGGER.info(engine.name + " Fail :(")
def test_individual_success():
search = SearchRequest('eminem', 'stan')
LOGGER.info(list(map(lambda p:p.name, PROVIDERS)))
for provider in PROVIDERS:
engine = provider()
result = engine.search(search)
if result != None:
result.export(os.path.join('files', f'{engine.name}_stan'))
LOGGER.info(engine.name + ' Success!')
else:
LOGGER.info(engine.name + " Fail :(")
def test_individual_fail():
search = SearchRequest('Felly', 'Fabrics')
for provider in PROVIDERS:
engine = provider()
result = engine.search(search)
if result != None:
result.export(os.path.join('files', f'{engine.name}_felly'))
def test_combo_fail_2():
engine = ComboLyricsProvider()
search = SearchRequest('431242424234', 'DJ adsfasdfsdafadsfsd')
result = engine.search(search)
assert result == None
def test_combo_success():
engine = ComboLyricsProvider()
search = SearchRequest('eminem', 'stan')
result = engine.search(search)
result.export(os.path.join('files', 'stan'), extension='.lrc')
assert result != None
| StarcoderdataPython |
30360 | <reponame>myworldhere/dailyfresh
# coding=utf-8
from django.shortcuts import render, redirect
from django.core.paginator import Paginator
from models import *
from haystack.views import SearchView
# Create your views here.
def index(request):
category_list = Category.objects.all()
array = []
for category in category_list:
news = category.goodsinfo_set.order_by('-id')[0:4]
hots = category.goodsinfo_set.order_by('-click')[0:4]
array.append({'news': news, 'hots': hots, 'category': category})
context = {'page_style': 'goods', 'title': '首页', 'array': array}
return render(request, 'df_goods/index.html', context)
def list(request, tid, index, sort):
category = Category.objects.get(id=tid)
# 新品推荐
news = category.goodsinfo_set.order_by('-id')[0:2]
if sort == '1': # 默认 上架时间排序
goods_list = GoodsInfo.objects.filter(category_id=int(tid)).order_by('-id')
elif sort == '2': # 价格排序
goods_list = GoodsInfo.objects.filter(category_id=int(tid)).order_by('-price')
elif sort == '3': # 人气,点击量排序
goods_list = GoodsInfo.objects.filter(category_id=int(tid)).order_by('-click')
paginator = Paginator(goods_list, 3)
page = paginator.page(int(index))
context = {
'title': category.title,
'page_style': 'goods',
'page': page,
'news': news,
'sort': sort,
'category': category,
'paginator': paginator,
'sort_title': ['默认', '价格', '人气']
}
return render(request, 'df_goods/list.html', context)
def detail(request, id):
goods = GoodsInfo.objects.get(id=id)
news = goods.category.goodsinfo_set.order_by('-id')[0:2]
goods.click = goods.click + 1
goods.save()
context = {'title': goods.category.title, 'page_style': 'goods', 'goods': goods, 'news': news}
response = render(request, 'df_goods/detail.html', context)
# 最近浏览记录
records = request.COOKIES.get('records', '')
if records != '':
records_array = records.split(',')
if records_array.count(id) >= 1: # 商品已记录则删除
records_array.remove(id)
records_array.insert(0, id) # 添加到首位
if len(records_array) > 5: # 记录个数超过5个,删除尾部元素
records_array.pop(5)
records = ','.join(records_array) # 拼接成字符串
else:
records = id
response.set_cookie('records', records)
return response
# 自己定全文检索上下文
class MySearchView(SearchView):
def extra_context(self):
context = super(MySearchView, self).extra_context()
context['title'] = '搜索'
context['page_style'] = 'goods'
return context
| StarcoderdataPython |
154450 | class Solution:
def XXX(self, nums: List[int]) -> int:
l=len(nums)
ans=0
i=0
while 1:
if (nums[ans]==nums[i+1]):
i+=1
else:
nums[ans+1]=nums[i+1]
i+=1
ans+=1
if i>l-2:
break
return ans+1
| StarcoderdataPython |
3281326 | <gh_stars>10-100
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from unittest import TestCase
import os
class TestStackAdvisorInitialization(TestCase):
def setUp(self):
import imp
self.test_directory = os.path.dirname(os.path.abspath(__file__))
stack_advisor_path = os.path.join(self.test_directory, '../../main/resources/scripts/stack_advisor.py')
with open(stack_advisor_path, 'rb') as fp:
self.stack_advisor = imp.load_module( 'stack_advisor', fp, stack_advisor_path, ('.py', 'rb', imp.PY_SOURCE) )
def test_stackAdvisorLoadedForNotHDPStack(self):
path_template = os.path.join(self.test_directory, '../resources/stacks/{0}/{1}/services/stack_advisor.py')
path_template_name = "STACK_ADVISOR_IMPL_PATH_TEMPLATE"
setattr(self.stack_advisor, path_template_name, path_template)
self.assertEquals(path_template, getattr(self.stack_advisor, path_template_name))
instantiate_stack_advisor_method_name = 'instantiateStackAdvisor'
instantiate_stack_advisor_method = getattr(self.stack_advisor, instantiate_stack_advisor_method_name)
stack_advisor = instantiate_stack_advisor_method("XYZ", "1.0.1", ["1.0.0"])
self.assertEquals("XYZ101StackAdvisor", stack_advisor.__class__.__name__)
services = {
"Versions":
{
"stack_name":"XYZ",
"stack_version":"1.0.1"
},
"services":[
{
"StackServices":{
"service_name":"YARN"
},
"components":[
{
"StackServiceComponents": {
"component_name": "RESOURCEMANAGER"
}
},
{
"StackServiceComponents": {
"component_name": "APP_TIMELINE_SERVER"
}
},
{
"StackServiceComponents": {
"component_name":"YARN_CLIENT"
}
},
{
"StackServiceComponents": {
"component_name": "NODEMANAGER"
}
}
]
}
]
}
hosts= {
"items": [
{"Hosts": {"host_name": "host1"}},
{"Hosts": {"host_name": "host2"}}
]
}
config_recommendations = stack_advisor.recommendConfigurations(services, hosts)
yarn_configs = config_recommendations["recommendations"]["blueprint"]["configurations"]["yarn-site"]["properties"]
'''Check that value is populated from child class, not parent'''
self.assertEquals("-Xmx101m", yarn_configs["yarn.nodemanager.resource.memory-mb"])
def test_stackAdvisorDefaultImpl(self):
instantiate_stack_advisor_method_name = 'instantiateStackAdvisor'
instantiate_stack_advisor_method = getattr(self.stack_advisor, instantiate_stack_advisor_method_name)
'''Not existent stack - to return default implementation'''
default_stack_advisor = instantiate_stack_advisor_method("HDP1", "2.0.6", [])
self.assertEquals("DefaultStackAdvisor", default_stack_advisor.__class__.__name__)
services = {
"Versions":
{
"stack_name":"HDP1",
"stack_version":"2.0.6"
},
"services" : [
{
"StackServices" : {
"service_name" : "GANGLIA",
"service_version" : "3.5.0",
},
"components" : [
{
"StackServiceComponents" : {
"cardinality" : "ALL",
"component_name" : "GANGLIA_MONITOR",
"is_master" : False,
"hostnames" : [ ]
}
},
{
"StackServiceComponents" : {
"cardinality" : "1",
"component_name" : "GANGLIA_SERVER",
"is_master" : True,
"hostnames" : [ ]
}
}
]
},
{
"StackServices" : {
"service_name" : "HBASE",
"service_version" : "0.9192.168.3.11"
},
"components" : [
{
"StackServiceComponents" : {
"cardinality" : "1+",
"component_name" : "HBASE_CLIENT",
"is_master" : False,
"hostnames" : [ ]
}
},
{
"StackServiceComponents" : {
"cardinality" : "1+",
"component_name" : "HBASE_MASTER",
"is_master" : True,
"hostnames" : [ ]
}
},
{
"StackServiceComponents" : {
"cardinality" : "1+",
"component_name" : "HBASE_REGIONSERVER",
"is_master" : False,
"hostnames" : [ ]
}
}
]
},
{
"StackServices" : {
"service_name" : "HDFS",
"service_version" : "2.4.0.2.1"
},
"components" : [
{
"StackServiceComponents" : {
"cardinality" : "1+",
"component_name" : "DATANODE",
"is_master" : False,
"hostnames" : [ ]
}
}, {
"StackServiceComponents" : {
"cardinality" : "1+",
"component_name" : "HDFS_CLIENT",
"is_master" : False,
"hostnames" : [ ]
}
}, {
"StackServiceComponents" : {
"cardinality" : "0+",
"component_name" : "JOURNALNODE",
"is_master" : False,
"hostnames" : [ ]
}
},
{
"StackServiceComponents" : {
"cardinality" : "1-2",
"component_name" : "NAMENODE",
"is_master" : True,
"hostnames" : [ ]
}
},
{
"StackServiceComponents" : {
"cardinality" : "1",
"component_name" : "SECONDARY_NAMENODE",
"is_master" : True,
"hostnames" : [ ]
}
},
{
"StackServiceComponents" : {
"cardinality" : "0+",
"component_name" : "ZKFC",
"is_master" : False,
"hostnames" : [ ]
}
}
]
},
{
"StackServices" : {
"service_name" : "PIG",
"service_version" : "0.192.168.127.12"
},
"components" : [
{
"StackServiceComponents" : {
"cardinality" : "0+",
"component_name" : "PIG",
"is_master" : False,
"hostnames" : [ ]
}
}
]
},
{
"StackServices" : {
"service_name" : "TEZ",
"service_version" : "0.4.0.2.1"
},
"components" : [
{
"StackServiceComponents" : {
"cardinality" : "0+",
"component_name" : "TEZ_CLIENT",
"is_master" : False,
"hostnames" : [ ]
}
}
]
},
{
"StackServices" : {
"service_name" : "ZOOKEEPER",
"service_version" : "3.4.5.2.1",
},
"components" : [
{
"StackServiceComponents" : {
"cardinality" : "1+",
"component_category" : "CLIENT",
"component_name" : "ZOOKEEPER_CLIENT",
"is_master" : False,
"hostnames" : [ ]
}
},
{
"StackServiceComponents" : {
"cardinality" : "1+",
"component_name" : "ZOOKEEPER_SERVER",
"is_master" : True,
"hostnames" : [ ]
}
}
]
}
],
"configurations" : {}
}
hosts= {
"items": [
{"Hosts": {"host_name": "host1",
"cpu_count": 1,
"total_mem": 2097152,
"disk_info": [{
"size": '80000000',
"mountpoint": "/"
}]
}
},
{"Hosts": {"host_name": "host2",
"cpu_count": 1,
"total_mem": 2097152,
"disk_info": [{
"size": '80000000',
"mountpoint": "/"
}]
}
}
]
}
actualValidateConfigResponse = default_stack_advisor.validateConfigurations(services, hosts)
actualValidateLayoutResponse = default_stack_advisor.validateComponentLayout(services, hosts)
expectedValidationResponse = {
"Versions": {"stack_name": "HDP1", "stack_version": "2.0.6"},
"items": []
}
self.assertEquals(actualValidateConfigResponse, expectedValidationResponse)
self.assertEquals(actualValidateLayoutResponse, expectedValidationResponse)
actualRecommendConfigResponse = default_stack_advisor.recommendConfigurations(services, hosts)
expectedRecommendConfigResponse = {
"Versions": {"stack_name": "HDP1", "stack_version": "2.0.6"},
"hosts": ["host1", "host2"],
"services": ['GANGLIA', 'HBASE', 'HDFS', 'PIG', 'TEZ', 'ZOOKEEPER'],
"recommendations": {
"blueprint": {
"configurations": {},
"host_groups": []
},
"blueprint_cluster_binding": {
"host_groups": []
}
}
}
self.assertEquals(actualRecommendConfigResponse, expectedRecommendConfigResponse)
actualRecommendLayoutResponse = default_stack_advisor.recommendComponentLayout(services, hosts)
expectedRecommendLayoutResponse = {
"Versions": {"stack_name": "HDP1", "stack_version": "2.0.6"},
"hosts": ["host1", "host2"],
"services": ['GANGLIA', 'HBASE', 'HDFS', 'PIG', 'TEZ', 'ZOOKEEPER'],
"recommendations": {
"blueprint": {
"host_groups": [
{
"name": "host-group-1",
"components": []
},
{
"name": "host-group-2",
"components": [
{"name": "GANGLIA_SERVER"},
{"name": "HBASE_MASTER"},
{"name": "NAMENODE"},
{"name": "SECONDARY_NAMENODE"},
{"name": "ZOOKEEPER_SERVER"},
{"name": "ZOOKEEPER_CLIENT"}
]
}
]
},
"blueprint_cluster_binding":
{
"host_groups": [
{
"name": "host-group-1",
"hosts": [{"fqdn": "host2"}]
},
{
"name": "host-group-2",
"hosts": [{"fqdn": "host1"}]
}
]
}
}
}
self.assertEquals(actualRecommendLayoutResponse, expectedRecommendLayoutResponse)
# Test with maintenance_state. One host is in maintenance mode.
hosts= {
"items": [
{"Hosts": {"host_name": "host1",
"maintenance_state":"OFF",
"cpu_count": 1}
},
{"Hosts": {"host_name": "host2",
"maintenance_state":"ON",
"cpu_count": 1}
}
]
}
actualRecommendLayoutResponse = default_stack_advisor.recommendComponentLayout(services, hosts)
expectedRecommendLayoutResponse = {
"services": ["GANGLIA", "HBASE", "HDFS", "PIG", "TEZ", "ZOOKEEPER"],
"recommendations": {
"blueprint": {
"host_groups": [
{
"name": "host-group-1",
"components": [
{
"name": "GANGLIA_SERVER"
},
{
"name": "HBASE_MASTER"
},
{
"name": "NAMENODE"
},
{
"name": "SECONDARY_NAMENODE"
},
{
"name": "ZOOKEEPER_SERVER"
},
{
"name": "ZOOKEEPER_CLIENT"
}
]
}
]
},
"blueprint_cluster_binding":
{
"host_groups": [
{
"hosts": [{"fqdn": "host1"}],
"name": "host-group-1"
}
]
}
},
"hosts": ["host1"],
"Versions": {"stack_name": "HDP1", "stack_version": "2.0.6"}
}
self.assertEquals(actualRecommendLayoutResponse, expectedRecommendLayoutResponse)
# Test with maintenance_state. Both hosts are in maintenance mode.
hosts= {
"items": [
{"Hosts": {"host_name": "host1",
"maintenance_state":"ON",
"cpu_count": 1,
"total_mem": 2097152,
"disk_info": [{
"size": '80000000',
"mountpoint": "/"
}]
}
},
{"Hosts": {"host_name": "host2",
"maintenance_state":"ON",
"cpu_count": 1,
"total_mem": 2097152,
"disk_info": [{
"size": '80000000',
"mountpoint": "/"
}]
}
}
]
}
actualRecommendLayoutResponse = default_stack_advisor.recommendComponentLayout(services, hosts)
expectedRecommendLayoutResponse = {
"Versions": {"stack_name": "HDP1", "stack_version": "2.0.6"},
"hosts": [],
"services": ['GANGLIA', 'HBASE', 'HDFS', 'PIG', 'TEZ', 'ZOOKEEPER'],
"recommendations": {
"blueprint": {
"host_groups": []
},
"blueprint_cluster_binding": {
"host_groups": []
}
}
}
self.assertEquals(actualRecommendLayoutResponse, expectedRecommendLayoutResponse)
# Config groups support by default
services["config-groups"] = [{
"configurations": {
},
"hosts": [
'host2'
]
}]
actualConfigGroupRecommendConfigResponse = \
default_stack_advisor.recommendConfigurations(services, hosts)
expectedConfigGroupRecommendConfigResponse = {
"Versions": {"stack_name": "HDP1", "stack_version": "2.0.6"},
"hosts": ["host1", "host2"],
"services": ['GANGLIA', 'HBASE', 'HDFS', 'PIG', 'TEZ', 'ZOOKEEPER'],
"recommendations": {
'config-groups': [
{
'configurations': {},
'dependent_configurations': {},
'hosts': [
'host2'
]
}
],
"blueprint": {
"configurations": {},
"host_groups": []
},
"blueprint_cluster_binding": {
"host_groups": []
}
}
}
self.assertEquals(actualConfigGroupRecommendConfigResponse, expectedConfigGroupRecommendConfigResponse)
services = {
"services": [
{
"StackServices" : {
"service_name" : "YARN",
"stack_name" : "HDP",
"stack_version" : "2.3"
},
"configurations" : [
{
"StackConfigurations" : {
"property_depended_by" : [
{
"type" : "yarn-site",
"name" : "yarn.scheduler.minimum-allocation-vcores"
},
{
"type" : "yarn-site",
"name" : "yarn.scheduler.maximum-allocation-vcores"
}
],
"property_name" : "yarn.nodemanager.resource.cpu-vcores",
"type" : "yarn-site.xml"
},
"dependencies": []
},
{
"StackConfigurations" : {
"property_name" : "yarn.nodemanager.resource.memory-mb",
"type" : "yarn-site.xml"
},
"dependencies": [
{
"StackConfigurationDependency" : {
"dependency_name": "yarn.scheduler.maximum-allocation-mb",
"dependency_type": "yarn-site"
}
},
{
"StackConfigurationDependency" : {
"dependency_name": "yarn.scheduler.minimum-allocation-mb",
"dependency_type": "yarn-site"
}
},
]
},
{
"StackConfigurations" : {
"property_depended_by" : [
{
"type" : "mapred-site",
"name" : "yarn.app.mapreduce.am.resource.mb"
},
{
"type" : "mapred-site",
"name" : "mapreduce.map.memory.mb"
},
{
"type" : "mapred-site",
"name" : "mapreduce.reduce.memory.mb"
}
],
"property_name" : "yarn.scheduler.maximum-allocation-mb",
"type" : "yarn-site.xml"
},
"dependencies": []
},
{
"StackConfigurations" : {
"property_depended_by" : [ ],
"property_name" : "yarn.scheduler.maximum-allocation-vcores",
"type" : "yarn-site.xml"
},
"dependencies": []
},
{
"StackConfigurations" : {
"property_name" : "yarn.scheduler.minimum-allocation-mb",
"type" : "yarn-site.xml"
},
"dependencies": [
{
"StackConfigurationDependency" : {
"dependency_name": "hive.tez.container.size",
"dependency_type": "hive-site"
}
},
{
"StackConfigurationDependency" : {
"dependency_name": "yarn.app.mapreduce.am.resource.mb",
"dependency_type": "mapred-site"
}
},
{
"StackConfigurationDependency" : {
"dependency_name": "mapreduce.map.memory.mb",
"dependency_type": "mapred-site"
}
},
{
"StackConfigurationDependency" : {
"dependency_name": "mapreduce.reduce.memory.mb",
"dependency_type": "mapred-site"
}
},
]
},
{
"StackConfigurations" : {
"property_name" : "yarn.scheduler.minimum-allocation-vcores",
"type" : "yarn-site.xml"
},
"dependencies": []
}
]
}
],
"changed-configurations": [
{
"type": "yarn-site",
"name": "yarn.nodemanager.resource.memory-mb"
}
]
}
properties_dict = default_stack_advisor.getAffectedConfigs(services)
expected_properties_dict = [{'name': 'yarn.scheduler.maximum-allocation-mb', 'type': 'yarn-site'},
{'name': 'yarn.scheduler.minimum-allocation-mb', 'type': 'yarn-site'},
{'name': 'hive.tez.container.size', 'type': 'hive-site'},
{'name': 'yarn.app.mapreduce.am.resource.mb', 'type': 'mapred-site'},
{'name': 'mapreduce.map.memory.mb', 'type': 'mapred-site'},
{'name': 'mapreduce.reduce.memory.mb', 'type': 'mapred-site'}]
self.assertEquals(properties_dict, expected_properties_dict) | StarcoderdataPython |
1757134 | <gh_stars>1-10
from setuptools import find_packages, setup
with open("README.rst", "r") as readme:
long_description = readme.read()
setup(
name = 'uwVIKOR',
packages = find_packages(include=['uwVIKOR']),
version = '0.1.0',
author = '<NAME>',
author_email='<EMAIL>',
description = 'Unweighted VIKOR method',
long_description=long_description,
license = 'MIT',
url='https://github.com/Aaron-AALG/uwVIKOR',
download_url = 'https://github.com/Aaron-AALG/uwVIKOR/releases/tag/uwVIKOR',
install_requires=['pandas >= 1.2.4',
'numpy >= 1.19',
'scipy >= 1.6.3'],
classifiers=["Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License"],
)
| StarcoderdataPython |
3387393 | <reponame>MIT-Hydration/HydrationIII
from time import sleep # this lets us have a time delay
import time
from abc import ABC, abstractmethod # https://docs.python.org/3/library/abc.html
import numpy
import threading
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
if config.getboolean('Operating System', 'RunningInRPi'):
from gpiozero import PWMLED, DigitalOutputDevice
class AbstractRelayTriac(ABC):
@abstractmethod
# returns whether the heater is on or not
def getHeater(self):
pass
@abstractmethod
def setHeater(self, val):
pass
@abstractmethod
def getDrill(self):
pass
@abstractmethod
def setDrill(self, val):
pass
@abstractmethod
def getTriacLevel(self):
pass
@abstractmethod
def setTriacLevel(self, val):
pass
class MockRelayTriac(AbstractRelayTriac):
def __init__(self):
self.heater = False
self.drill = False
self.triacLevel = 0.0
def getHeater(self):
return self.heater
def setHeater(self, val):
if val:
self.drill = False
self.heater = val
def getDrill(self):
return self.drill
def setDrill(self, val):
if val:
self.heater = False
self.drill = val
def getTriacLevel(self):
return self.triacLevel
def setTriacLevel(self, val):
self.triacLevel = val
class FileWriterThread(threading.Thread):
def __init__(self, relay_triac):
threading.Thread.__init__(self)
self.relay_triac = relay_triac
self.stopped = True
def run(self):
self.stopped = False
time_start_s = time.time()
fp = open(f"RelayTriac_{time_start_s}.csv", "w")
keys = ["time_s", "triac_level", "drill", "heater"]
for k in keys:
fp.write(f"{k},")
fp.write("\n")
sampling_time = config.getfloat("RelayAndTriac", "SamplingTime")
while not self.stopped:
loop_start = time.time()
fp.write(f"{loop_start},{self.relay_triac.getTriacLevel()}," \
f"{self.relay_triac.getDrill()},{self.relay_triac.getHeater()}\n")
loop_end = time.time()
delta_time = loop_end - loop_start
if (delta_time < sampling_time):
time.sleep(sampling_time - delta_time)
fp.close()
def stop(self):
self.stopped = True
class RelayTriac(AbstractRelayTriac):
def __init__(self):
self.file_writer_thread = FileWriterThread(self)
self.triac = PWMLED(config.getint('RelayAndTriac', 'TriacGPIOPin'))
self.drill = DigitalOutputDevice(
config.getint('RelayAndTriac', 'DrillRelayPin'), active_high = False)
self.heater = DigitalOutputDevice(
config.getint('RelayAndTriac', 'HeaterRelayPin'), active_high = False)
self.triac.value = 0.0
self.drill.off()
self.heater.off()
self.file_writer_thread.start()
print("Finished initializing RelayTriac...")
def getHeater(self):
return (self.heater.value > 0)
def setHeater(self, val):
if val:
self.drill.off()
self.heater.on()
else:
self.heater.off()
def getDrill(self):
return (self.drill.value > 0)
def setDrill(self, val):
if val:
self.heater.off()
self.drill.on()
else:
self.drill.off()
def getTriacLevel(self):
return self.triac.value
def setTriacLevel(self, val):
print(f"Setting triac level to: {val}")
self.triac.value = val | StarcoderdataPython |
1636399 | from __future__ import division
import argparse
import matplotlib.pyplot as plt
import pickle
import gzip
import numpy as np
import tensorflow as tf
import matplotlib.gridspec as gridspec
import os
# from tensorflow.examples.tutorials.mnist import input_data
# np.set_printoptions(threshold=np.inf)
f =gzip.open('./screenshot_data2002003.gzip','rb')
save_file='./model/vae.ckpt'
z_dim = 500
X_dim = 200
X_channel = 1
conv_dim = 32
h_dim = 128
VAE=False # VAE if true, else AE
CONV=True # convolution if true, else dense layers only
#lr = 1e-4
def lrelu(x, alpha=0.1):
return tf.nn.relu(x) - alpha * tf.nn.relu(-x)
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape=size, stddev=xavier_stddev)
# =============================== Q(z|X) ======================================
X = tf.placeholder(tf.float32, shape=[None,X_dim,X_dim,X_channel])
z = tf.placeholder(tf.float32, shape=[None, z_dim])
lr = tf.placeholder(tf.float32)
if CONV:
with tf.variable_scope('encoder', reuse=tf.AUTO_REUSE):
Q_W1 = tf.Variable(xavier_init([int(X_dim*X_dim/((2*2))*conv_dim), h_dim]))
Q_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
Q_W2_mu = tf.Variable(xavier_init([h_dim, z_dim]))
Q_b2_mu = tf.Variable(tf.zeros(shape=[z_dim]))
Q_W2_sigma = tf.Variable(xavier_init([h_dim, z_dim]))
Q_b2_sigma = tf.Variable(tf.zeros(shape=[z_dim]))
def Q(X):
with tf.variable_scope('encoder', reuse=tf.AUTO_REUSE):
# X = tf.reshape(X, [-1, X_dim, X_dim, 3])
conv = tf.contrib.layers.conv2d(X,
conv_dim,
[5, 5],
(2, 2),
padding='SAME',
activation_fn=lrelu,
normalizer_fn=tf.contrib.layers.batch_norm)
conv = tf.contrib.layers.conv2d(conv,
conv_dim,
[5, 5],
(1, 1),
padding='SAME',
activation_fn=lrelu,
normalizer_fn=tf.contrib.layers.batch_norm)
flat = tf.contrib.layers.flatten(conv)
#print(flat.shape)
h = tf.nn.relu(tf.matmul(flat, Q_W1) + Q_b1)
z_mu = tf.matmul(h, Q_W2_mu) + Q_b2_mu
z_logvar = tf.matmul(h, Q_W2_sigma) + Q_b2_sigma
return z_mu, z_logvar
else: # dense layers only
def Q(X):
with tf.variable_scope('encoder', reuse=tf.AUTO_REUSE):
X=tf.layers.flatten(X)
X=tf.layers.dense(X, h_dim, activation=lrelu)
z_mu=tf.layers.dense(X, z_dim, activation=None)
z_logvar=tf.layers.dense(X, z_dim, activation=None)
return z_mu, z_logvar
def sample_z(mu, log_var):
eps = tf.random_normal(shape=tf.shape(mu))
return mu + tf.math.exp(log_var / 2) * eps
# =============================== P(X|z) ======================================
if CONV:
P_W1 = tf.Variable(xavier_init([z_dim, h_dim]))
P_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
P_W2 = tf.Variable(xavier_init([h_dim, int(X_dim*X_dim/((2*2))*conv_dim)]))
P_b2 = tf.Variable(tf.zeros(shape=[int(X_dim*X_dim/((2*2))*conv_dim)]))
def P(z):
h = tf.nn.relu(tf.matmul(z, P_W1) + P_b1)
logits = tf.matmul(h, P_W2) + P_b2
logits=tf.reshape(logits, [-1,int(X_dim/2),int(X_dim/2),conv_dim])
trans_conv = tf.contrib.layers.conv2d_transpose(logits,
conv_dim,
[5, 5],
(1, 1),
padding='SAME',
activation_fn=lrelu,
normalizer_fn=tf.contrib.layers.batch_norm)
trans_conv = tf.contrib.layers.conv2d_transpose(trans_conv,
X_channel, # output dim, 3 for 3-channel image
[5, 5],
(2, 2),
padding='SAME',
# activation_fn=lrelu,
activation_fn=tf.nn.sigmoid,
normalizer_fn=tf.contrib.layers.batch_norm)
# out = tf.nn.sigmoid(trans_conv)
# out = tf.nn.relu6(trans_conv)/6.
# out = tf.nn.relu(trans_conv)
out = trans_conv
return out, logits
else: # dense layers only
def P(z):
z=tf.layers.dense(z, h_dim, activation=lrelu)
logits=tf.layers.dense(z, X_dim*X_dim*conv_dim, activation=lrelu)
out=tf.nn.sigmoid(logits)
out=tf.reshape(out, [-1, X_dim, X_dim, X_channel])
return out, logits
# =============================== TRAINING ====================================
z_mu, z_logvar = Q(X)
z_sample = sample_z(z_mu, z_logvar)
if VAE:
out, logits = P(z_sample)
else:
out, logits = P(z_mu)
# Sampling from random z
X_samples, _ = P(z)
# E[log P(X|z)]
# recon_loss = tf.reduce_sum(tf.abs(out - X))
recon_loss=tf.reduce_sum(tf.losses.mean_squared_error(out, X))
# D_KL(Q(z|X) || P(z)); calculate in closed form as both dist. are Gaussian
kl_loss = 0.5 * tf.reduce_sum(tf.math.exp(z_logvar) + z_mu**2 - 1. - z_logvar)
#recon_loss=tf.reduce_sum(tf.abs(X - X))
if VAE:
# VAE loss
vae_loss = tf.reduce_mean(recon_loss + kl_loss)
else:
# AE loss
vae_loss = tf.reduce_mean(recon_loss)
solver = tf.train.AdamOptimizer(lr).minimize(vae_loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
if not os.path.exists('convae/'):
os.makedirs('convae/')
Loss=[]
It=[]
train_times=1000
batch=[]
data_samples=1
epoch_samples=1
# load data
for i in range(data_samples):
print(i)
if X_channel>1: # channel==3
batch.append(pickle.load(f)/255.) # rgb image value range 0-255
else: # channel==1
batch.append(pickle.load(f)[:,:,1:2]/255.) # rgb image value range 0-255
print(np.array(batch).shape)
# save original img
if X_channel>1:
plt.imshow(batch[0])
else:
plt.imshow(batch[0][:,:,0])
plt.savefig('convae/{}.png'.format(str('origin').zfill(3)), bbox_inches='tight')
# vae training
for it in range(train_times):
for epo in range(data_samples//epoch_samples):
_, loss ,recon_l, kl_l, output = sess.run([solver, vae_loss,recon_loss,kl_loss,out], \
feed_dict={X: batch[epo*epoch_samples:epoch_samples*(epo+1)],lr:1e-3/train_times})
Loss.append(loss)
It.append(it)
print('Iter: {}'.format(it))
#print('Loss: {:.4}'. format(loss),recon_l,kl_l)
print('Loss: {:.4}, KL: {}, Recon: {}'.format(loss, kl_l, recon_l))
sample = sess.run(X_samples, feed_dict={z: np.random.randn(1,z_dim)})
if X_channel>1:
plt.imshow(sample.reshape(X_dim,X_dim,X_channel))
else:
plt.imshow(sample.reshape(X_dim,X_dim))
plt.savefig('convae/{}.png'.format(str(it).zfill(3)), bbox_inches='tight')
saver.save(sess, save_file)
f.close() | StarcoderdataPython |
1609326 | <filename>modules/PiDisplaySleep/main.py<gh_stars>0
from os import popen,system
from time import sleep
state=1 #State of the display 1 On 0 Off
ip="192.168.86.26" #Enter the IP address of the device that should keep the display awake
while True:
nmap_out=str(popen('nmap -sP '+ip).read()) #nmap command to scan on the given IP address
sleep(2)
if nmap_out.find('latency') == -1: #looks for the word "latency" in the output
if state==0 : #this nested if makes sure that commands are not repeated
pass
else :
system('vcgencmd display_power 0') #Bash command that turns off the display
state=0 #Updating the display state variable
elif nmap_out.find('latency') > 1:
if state==1:
pass
else :
system('vcgencmd display_power 1') #Bash command to turn on the display
state=1
sleep(5) #Scan rate in seconds
| StarcoderdataPython |
3203589 | <reponame>mattkjames7/themissc
'''
Prod L Description
========================================================================
FGM 2 Fluxgate Magnetometer Level 2 CDF
FGM 1 Fluxgate Magnetometer Level 1 CDF
FGE 0 Fluxgate Magnetometer Engineering Rate L0 Packets
FGH 0 Fluxgate Magnetometer High Rate Level 0 Packets
FGL 0 Fluxgate Magnetometer Low Rate Level 0 Packets
'''
from . import _FGM
from .DownloadData import DownloadData
from .URL import URL
from .DataAvailability import DataAvailability
from .DeleteDate import DeleteDate
from .ReadCDF import ReadCDF
from .ReadIndex import ReadIndex
from .RebuildDataIndex import RebuildDataIndex
| StarcoderdataPython |
1724164 | <reponame>kojit/calendar_sync_garoon_outlook<filename>calendar_sync_garoon_outlook.py
from pathlib import Path
import base64
import datetime as dt
import dateutil
import json
import requests
from O365 import Account
CONFIG_FILE = 'calendar_sync_garoon_outlook.json'
WEEKS = 2
MAX_EVENT_NUM = 100
def get_period():
now = dt.datetime.now().astimezone()
#print(now.tzinfo, now.isoformat())
end = now + dt.timedelta(weeks=WEEKS)
return now, end
def get_garoon_events(cfg, now, end):
cybozu_credential = cfg['CYBOZU_USER_NAME'] + ':' + cfg['CYBOZU_USER_PASSWORD']
basic_credential = cfg['BASIC_AUTH_USER'] + ':' + cfg['BASIC_AUTH_PASSWORD']
url = cfg['BASE_URL'] + 'events'
basic_credentials = base64.b64encode(basic_credential.encode('utf-8'))
headers = {
'content-type': 'application/json',
'X-Cybozu-Authorization': base64.b64encode(cybozu_credential.encode('utf-8')),
'Authorization': 'Basic ' + basic_credentials.decode('utf-8')
}
params = {
'limit': MAX_EVENT_NUM,
'rangeStart': now.isoformat(),
'rangeEnd': end.isoformat()
}
response = requests.get(url, headers=headers, params=params)
response.raise_for_status()
res_json = response.json()
#print(json.dumps(res_json, indent=2))
outlook_origin_events = {}
events = {}
for event in res_json['events']:
if 'repeatId' in event:
gid = event['id'] + '_' + event['repeatId']
else:
gid = event['id']
#print('Garoon {} {}'.format(gid, event['subject']))
if event['subject'].startswith('OID:'):
oidpair = event['subject'].split()[0]
outlook_id = oidpair.split(':')[1]
outlook_origin_events[outlook_id] = event
else:
events[gid] = event
return events, outlook_origin_events
def get_outlook_events(cfg, now, end):
credential = (cfg['AZURE_APP_APPLICATION_ID'], cfg['AZURE_APP_CLIENT_SECRET'])
account = Account(credential)
if not account.is_authenticated:
account.authenticate(scopes=['basic', 'calendar_all'])
schedule = account.schedule()
calendar = schedule.get_default_calendar()
q = calendar.new_query('start').greater_equal(now)
q.chain('and').on_attribute('end').less_equal(end)
events = calendar.get_events(limit=100, query=q, include_recurring=True)
"""
# we can only get 25 events, so I will get every weeks
now = dt.datetime.now().astimezone()
events = []
for i in range(WEEKS):
end = now + dt.timedelta(weeks=1)
q = calendar.new_query('start').greater_equal(now)
q.chain('and').on_attribute('end').less_equal(end)
now = end
events = events + list(calendar.get_events(limit=100, query=q, include_recurring=True))
"""
garoon_origin_events = {}
outlook_events = {}
for event in events:
#print('Outlook ' + event.subject)
if event.subject.startswith('GID:'):
gidpair = event.subject.split()[0]
garoon_id = gidpair.split(':')[1]
garoon_origin_events[garoon_id] = event
print('Outlook - Garoon Origin Event ' + event.subject)
else:
outlook_events[event.object_id] = event
print('Outlook - Outlook Origin Event ' + event.subject)
return calendar, garoon_origin_events, outlook_events
def update_outlook_event(cfg, oevent, gid, gevent):
subject = 'GID:' + gid + ' - ' + gevent['subject']
if subject != oevent.subject:
oevent.subject = subject
oevent.body = cfg['EVENT_URL'] + (gid.split('_')[0] if '_' in gid else gid)
start = dateutil.parser.parse(gevent['start']['dateTime'])
if start != oevent.start:
oevent.start = start
end = dateutil.parser.parse(gevent['end']['dateTime'])
if end != oevent.end:
oevent.end = end
if 'facilities' in gevent and len(gevent['facilities']) > 0:
location = gevent['facilities'][0]['name']
if not oevent.location or location != oevent.location['displayName']:
oevent.location = location
is_all_day = True if gevent['isAllDay'] == 'true' else False
if is_all_day != oevent.is_all_day:
oevent.is_all_day = is_all_day
if oevent.is_reminder_on != False:
oevent.is_reminder_on = False
oevent.save() # O365 module only updates if there is any changes
def main(cfg):
start, end = get_period()
try:
garoon_events, outlook_origin_events = get_garoon_events(cfg, start, end)
outlook_calendar, garoon_origin_events, outlook_events = get_outlook_events(cfg, start, end)
except Exception as e:
print(e)
return
### Garoon -> Outlook
# remove garoon origin event on outlook if it no longer exists.
for key in list(garoon_origin_events.keys()):
if key not in garoon_events:
print('remove event {}'.format(key))
garoon_origin_events[key].delete()
del garoon_origin_events[key]
# add/update garoon events to outlook
for key, value in garoon_events.items():
if key in garoon_origin_events:
update_outlook_event(cfg, garoon_origin_events[key], key, value)
else:
print('add event - {} {}'.format(key, value['subject']))
oevent = outlook_calendar.new_event() # creates a new unsaved event
update_outlook_event(cfg, oevent, key, value)
### TODO: Outlook -> Garoon
"""
# remove outlook origin event on garoon if it no longer exists.
for key in list(outlook_origin_events.keys()):
if key not in outlook_events:
print('remove event {}'.format(key))
outlook_origin_events[key].delete()
del outlook_origin_events[key]
# add/update outlook events to garoon
for key, value in outlook_events.items():
if key in outlook_origin_events:
update_garoon_event(cfg, outlook_origin_events[key], key, value)
else:
print('add event - {}'.format(value.subject))
gevent = garoon_calendar.new_event() # creates a new unsaved event
update_garoon_event(cfg, gevent, key, value)
"""
if __name__ == '__main__':
if Path.exists(Path.cwd() / CONFIG_FILE):
with (Path.cwd() / CONFIG_FILE).open() as f:
main(json.load(f))
elif Path.exists(Path.home() / CONFIG_FILE):
with (Path.home() / CONFIG_FILE).open() as f:
main(json.load(f))
else:
print('There is no config file')
| StarcoderdataPython |
194387 | # -*- coding: utf-8 -*-
# @Time : 2019-05-15 15:52
# @Author : ShaHeTop-Almighty-ares
# @Email : <EMAIL>
# @File : run.py
# @Software: PyCharm
import os
import warnings
import platform
import threading
from ApplicationExample import create_app
from ExtendRegister.hook_register import * # 导入拦截器
from ExtendRegister.excep_register import * # 导入异常处理器
app = create_app()
def run_tips(x):
msg = ''
if x == 'FLASK_ENV':
msg = '\n\nTips:未找到Flask环境变量 "FLASK_ENV" 请配置!如需了解配置可查阅:https://github.com/yangyuexiong/Flask_BestPractices\n\n'
# if x == 'STARTUP_MODE':
# msg = '\n\nTips:未找到启动项目方式变量 "STARTUP_MODE" 请配置!如需了解配置可查阅:https://github.com/yangyuexiong/Flask_BestPractices\n\n'
print("\033[31m{}\033[0m".format(msg))
def check_env(*args):
"""检查环境变量"""
for i in args:
if not os.environ.get(str(i)):
run_tips(str(i))
def main():
"""启动"""
# 必须变量
check_env('FLASK_ENV')
# Linux服务器启动
if platform.system() == 'Linux':
app.run(host=app.config['RUN_HOST'], port=app.config['RUN_PORT'])
# check_env('STARTUP_MODE')
#
# # 终端
# if os.environ.get('STARTUP_MODE') == 'ter':
# app.run(host=app.config['RUN_HOST'], port=app.config['RUN_PORT'])
#
# # Pycharm
# if os.environ.get('STARTUP_MODE') == 'pyc':
# app.run(debug=True, host='0.0.0.0', port=9999)
else:
# app.run(debug=True, host='0.0.0.0', port=9999)
app.run(debug=app.config.get('DEBUG'), host=app.config.get('RUN_HOST'), port=app.config.get('RUN_PORT'))
if __name__ == '__main__':
pass
"""
# 设置环境
export FLASK_ENV=development
export FLASK_ENV=production
export STARTUP_MODE=pyc
export STARTUP_MODE=ter
# 调试
os.environ.get('FLASK_ENV')
os.environ.get('STARTUP_MODE')
"""
flask_env = os.environ.get('FLASK_ENV')
startup_mode = os.environ.get('STARTUP_MODE')
print('<', '-' * 66, '>')
print('时间:{}'.format(datetime.datetime.now()))
print('操作系统:{}'.format(platform.system()))
print('项目路径:{}'.format(os.getcwd()))
print('当前环境:{}'.format(flask_env))
print('启动方式:{}'.format(startup_mode))
print('threading:{}'.format(threading.get_ident()))
print('当前进程id:{}'.format(os.getpid()))
print('父进程id:{}'.format(os.getppid()))
print('<', '-' * 66, '>')
main()
| StarcoderdataPython |
1769190 | import numpy as np # linear algebra
import pandas as pd
import pickle
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
file = pd.read_excel("../datasets/SportsArticles/features.xlsx")
file.dropna()
#print(str(file.head()))
# Get columns names
#print(str(list(file)))
# Get output
output = file["Label"]
#print(str(output.head()))
# Get input
inputs = file.drop(['Label','TextID','URL','baseform','fullstops','imperative','present3rd','present1st2nd','sentence1st','sentencelast','txtcomplexity','pronouns1st','pronouns2nd','pronouns3rd','compsupadjadv','past','ellipsis','semanticobjscore','semanticsubjscore'], axis=1)
inputs = inputs.rename(index=str, columns={"NNPs": "NNP", "INs": "IN","TOs":"TO","semicolon":";","commas":",","colon":":"})
kek = list(inputs)
ot = ['NNP', 'VBD', 'VBN', 'IN', 'CD', 'VBP', ',', 'DT', 'NN', 'JJ', 'RB', 'TO', 'SYM', 'PRP', 'NNS', 'CC', 'PRP$', 'POS', 'FW', 'VBG', ':', 'WRB', 'EX', 'JJR', 'WDT', 'totalWordsCount', ';', 'questionmarks', 'exclamationmarks', 'Quotes']
inputs = inputs[ot]
print(" LISTE : "+str(list(inputs.columns)))
print(" LEN LISTE : "+str(len(list(inputs.columns))))
#print(str(list(inputs)))
# 0 = objective
# 1 = subjective
output = output.replace("objective", 0)
output = output.replace("subjective", 1)
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device('cpu')
X_train, X_test, Y_train, Y_test = train_test_split(inputs, output, test_size=0.33)
X_train = torch.tensor(X_train.to_numpy())
Y_train = torch.tensor(Y_train.to_numpy())
X_train = X_train.to(device=device, dtype=torch.int64).type(torch.FloatTensor)
Y_train = Y_train.to(device=device, dtype=torch.int64).type(torch.FloatTensor)
X_test = torch.tensor(X_test.to_numpy())
Y_test = torch.tensor(Y_test.to_numpy())
X_test = X_test.to(device=device, dtype=torch.float32).type(torch.FloatTensor)
Y_test = Y_test.to(device=device, dtype=torch.float32).type(torch.FloatTensor)
print("Training ")
D_in = len(list(inputs))# Input Dimension
D_out= 1
print(" input dim :"+str(D_in)+" output dim : "+str(D_out))
modules = []
count=0
representation = [100,100,100,100]
for i in range(len(representation)):
if count==0:
modules.append(nn.Linear(D_in, representation[i]))
modules.append(nn.ReLU())
elif count==len(representation)-1:
modules.append(nn.Linear(representation[i-1], D_out))
modules.append(nn.Sigmoid())
else:
modules.append(nn.Linear(representation[i-1], representation[i]))
modules.append(nn.ReLU())
count+=1
model = nn.Sequential(*modules)
learning_rate = 1e-4
N = 32 # Batch Size
epochs = 1000
model.train()
size = list(X_train.shape)[0]
loss_fn = torch.nn.MSELoss(reduction='sum')
for t in range(epochs):
print("Epoch ("+str(t+1)+"/"+str(epochs)+")")
for i in range(0, int(size/N)):
batchX, batchY = X_train[i*N:(i*N)+N], Y_train[i*N:(i*N)+N]
batchX = batchX.resize_(N, D_in) #.to_numpy()#.resize_(N, D_in)
batchY = batchY.resize_(N, D_out) #.to_numpy()#.resize_(N, D_out)
y_pred = model(batchX)
loss = loss_fn(y_pred, batchY)
if t==0:
print("Loss : "+str(loss))
model.zero_grad()
loss.backward()
with torch.no_grad():
for param in model.parameters():
param.data -= learning_rate * param.grad
model.eval()
from sklearn.metrics import accuracy_score
with torch.no_grad():
y_pred = torch.round(model(X_test))
#print("Results : "+str(set(list(y_pred))))
result = accuracy_score(Y_test, y_pred)
print("Accuracy : "+str(result))
pickle.dump(model, open("./models/model-objectivity.pickle", "wb")) | StarcoderdataPython |
3292932 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class project_compute_tasks(osv.osv_memory):
_name = 'project.compute.tasks'
_description = 'Project Compute Tasks'
_columns = {
'project_id': fields.many2one('project.project', 'Project', required=True)
}
def compute_date(self, cr, uid, ids, context=None):
"""
Schedule the tasks according to users and priority.
"""
project_pool = self.pool.get('project.project')
task_pool = self.pool.get('project.task')
if context is None:
context = {}
context['compute_by'] = 'project'
data = self.read(cr, uid, ids, [])[0]
project_id = data['project_id'][0]
project_pool.schedule_tasks(cr, uid, [project_id], context=context)
return self._open_task_list(cr, uid, data, context=context)
def _open_task_list(self, cr, uid, data, context=None):
"""
Return the scheduled task list.
"""
if context is None:
context = {}
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj._get_id(cr, uid, 'project_long_term', 'act_resouce_allocation')
id = mod_obj.read(cr, uid, [result], ['res_id'])[0]['res_id']
result = {}
if not id:
return result
result = act_obj.read(cr, uid, [id], context=context)[0]
result['target'] = 'current'
return result
project_compute_tasks()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| StarcoderdataPython |
1677684 | import numpy as np
import matplotlib.pyplot as plt
xm = np.array([78, 82, 72, 76, 74, 69])
xp = np.array([65, 70, 62, 82])
ym = np.array([44.44, 46.32, 90.91, 83.33, 78.95, 74.44])
yp = np.array([94.94, 84.51, 99.12, 42.55])
plt.scatter(xp, yp, c = 'c', marker = 'o', label = "Puiši")
plt.scatter(xm, ym,c = 'm', marker = 'o', label = "Meitenes")
plt.title("Fiziskā sagatavotība/miera pulss")
plt.xlabel("Miera stāvokļa pulss")
plt.ylabel("Fiziskās sagatavotības indekss")
x = np.array([78, 82, 72, 77, 74, 69, 65, 70, 62, 82])
y = np.array([44.44, 46.32, 90.91, 83.33, 78.95, 74.44, 94.94, 84.51, 99.12, 42.55])
m, b = np.polyfit(x, y, 1)
plt.plot(x, m*x + b)
plt.legend(loc = "center left", bbox_to_anchor = (1, 0.5), numpoints = 1)
plt.show()
| StarcoderdataPython |
1771817 | # Copyright 2015 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
import sys
import os.path
sys.path.insert(0, os.path.abspath('.'))
from elbhelper import __version__, __author__, __author_email__
with open('requirements.txt') as f:
_requirements = f.read().splitlines()
with open('README.md') as f:
_long_description = f.read()
setup(
name='elbhelper',
version=__version__,
packages=find_packages(),
url='https://github.com/PaloAltoNetworks-BD/aws-elbhelper',
license='http://www.apache.org/licenses/LICENSE-2.0',
author=__author__,
author_email=__author_email__,
description='Targeted script that allows update of the FW NAT rules based on the dynamic AWS ELB VIP changes',
include_package_data=True,
install_requires=_requirements,
long_description=_long_description
)
| StarcoderdataPython |
3337373 | <reponame>Balogunolalere/masonite-crud
"""Post Model."""
from masoniteorm.models import Model
class Post(Model):
"""Post Model."""
__table__ = 'posts'
__fillable__ = ['title', 'author', 'body', 'description'] | StarcoderdataPython |
137868 | <reponame>mccreery/sandbox
import math
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "(" + str(self.x) + ", " + str(self.y) + ")"
def rotate(self, angle):
sin = math.sin(angle)
cos = math.cos(angle)
x = self.x * cos - self.y * sin
y = self.x * sin + self.y * cos
self.x = x
self.y = y
return self
def __mul__(self, other):
return type(self)(self.x * other, self.y * other)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self.__mul__(1.0 / other)
def __neg__(self):
return type(self)(-self.x, -self.y)
def __pos__(self):
return self
def __abs__(self):
return type(self)(abs(self.x), abs(self.y))
def __add__(self, other):
return type(self)(self.x + other.x, self.y + other.y)
def __sub__(self, other):
return self.__add__(-other)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __ne__(self, other):
return self.x != other.x or self.y != other.y
Point.ORIGIN = Point(0, 0) | StarcoderdataPython |
44736 | #!/usr/bin/python
import hsgw
from sys import argv, exit
if len(argv) != 4:
print argv[0], "<key> <addr> <value>"
(key, addr, value) = argv[1:]
print "key =", key
print "addr =", addr
print "value =", value
if not hsgw.initConnection(key = key):
print "Could not initialize connection."
exit(1)
print "Setting value of", hsgw.comm_objects[addr]['name'].encode('utf-8'), "[" + str(addr) + "] to ", value
hsgw.setValue(addr, value)
hsgw.closeConnection()
| StarcoderdataPython |
3359007 | # Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from mars.tests.core import ExecutorForTest, TestBase
from mars.tensor import tensor
from mars.dataframe import Series, DataFrame
class Test(TestBase):
def setUp(self) -> None:
super().setUp()
self.executor = ExecutorForTest('numpy')
def testSeriesQuantileExecution(self):
raw = pd.Series(np.random.rand(10), name='a')
a = Series(raw, chunk_size=3)
# q = 0.5, scalar
r = a.quantile()
result = self.executor.execute_dataframe(r, concat=True)[0]
expected = raw.quantile()
self.assertEqual(result, expected)
# q is a list
r = a.quantile([0.3, 0.7])
result = self.executor.execute_dataframe(r, concat=True)[0]
expected = raw.quantile([0.3, 0.7])
pd.testing.assert_series_equal(result, expected)
# test interpolation
r = a.quantile([0.3, 0.7], interpolation='midpoint')
result = self.executor.execute_dataframe(r, concat=True)[0]
expected = raw.quantile([0.3, 0.7], interpolation='midpoint')
pd.testing.assert_series_equal(result, expected)
ctx, executor = self._create_test_context(self.executor)
with ctx:
q = tensor([0.3, 0.7])
# q is a tensor
r = a.quantile(q)
result = executor.execute_dataframes([r])[0]
expected = raw.quantile([0.3, 0.7])
pd.testing.assert_series_equal(result, expected)
def testDataFrameQuantileExecution(self):
raw = pd.DataFrame({'a': np.random.rand(10),
'b': np.random.randint(1000, size=10),
'c': np.random.rand(10),
'd': [np.random.bytes(10) for _ in range(10)],
'e': [pd.Timestamp(f'201{i}') for i in range(10)],
'f': [pd.Timedelta(f'{i} days') for i in range(10)]
},
index=pd.RangeIndex(1, 11))
df = DataFrame(raw, chunk_size=3)
# q = 0.5, axis = 0, series
r = df.quantile()
result = self.executor.execute_dataframe(r, concat=True)[0]
expected = raw.quantile()
pd.testing.assert_series_equal(result, expected)
# q = 0.5, axis = 1, series
r = df.quantile(axis=1)
result = self.executor.execute_dataframe(r, concat=True)[0]
expected = raw.quantile(axis=1)
pd.testing.assert_series_equal(result, expected)
# q is a list, axis = 0, dataframe
r = df.quantile([0.3, 0.7])
result = self.executor.execute_dataframe(r, concat=True)[0]
expected = raw.quantile([0.3, 0.7])
pd.testing.assert_frame_equal(result, expected)
# q is a list, axis = 1, dataframe
r = df.quantile([0.3, 0.7], axis=1)
result = self.executor.execute_dataframe(r, concat=True)[0]
expected = raw.quantile([0.3, 0.7], axis=1)
pd.testing.assert_frame_equal(result, expected)
# test interpolation
r = df.quantile([0.3, 0.7], interpolation='midpoint')
result = self.executor.execute_dataframe(r, concat=True)[0]
expected = raw.quantile([0.3, 0.7], interpolation='midpoint')
pd.testing.assert_frame_equal(result, expected)
ctx, executor = self._create_test_context(self.executor)
with ctx:
q = tensor([0.3, 0.7])
# q is a tensor
r = df.quantile(q)
result = executor.execute_dataframes([r])[0]
expected = raw.quantile([0.3, 0.7])
pd.testing.assert_frame_equal(result, expected)
# test numeric_only
raw2 = pd.DataFrame({'a': np.random.rand(10),
'b': np.random.randint(1000, size=10),
'c': np.random.rand(10),
'd': [pd.Timestamp(f'201{i}') for i in range(10)],
}, index=pd.RangeIndex(1, 11))
df2 = DataFrame(raw2, chunk_size=3)
r = df2.quantile([0.3, 0.7], numeric_only=False)
result = self.executor.execute_dataframe(r, concat=True)[0]
expected = raw2.quantile([0.3, 0.7], numeric_only=False)
pd.testing.assert_frame_equal(result, expected)
r = df2.quantile(numeric_only=False)
result = self.executor.execute_dataframe(r, concat=True)[0]
expected = raw2.quantile(numeric_only=False)
pd.testing.assert_series_equal(result, expected)
def testDataFrameCorr(self):
rs = np.random.RandomState(0)
raw = rs.rand(20, 10)
raw = pd.DataFrame(np.where(raw > 0.4, raw, np.nan), columns=list('ABCDEFGHIJ'))
raw['k'] = pd.Series(['aaa'] * 20)
df = DataFrame(raw)
result = df.corr()
pd.testing.assert_frame_equal(self.executor.execute_dataframe(result, concat=True)[0],
raw.corr())
result = df.corr(method='kendall')
pd.testing.assert_frame_equal(self.executor.execute_dataframe(result, concat=True)[0],
raw.corr(method='kendall'))
df = DataFrame(raw, chunk_size=6)
with self.assertRaises(Exception):
self.executor.execute_dataframe(df.corr(method='kendall'), concat=True)
result = df.corr()
pd.testing.assert_frame_equal(self.executor.execute_dataframe(result, concat=True)[0],
raw.corr())
result = df.corr(min_periods=7)
pd.testing.assert_frame_equal(self.executor.execute_dataframe(result, concat=True)[0],
raw.corr(min_periods=7))
def testDataFrameCorrWith(self):
rs = np.random.RandomState(0)
raw_df = rs.rand(20, 10)
raw_df = pd.DataFrame(np.where(raw_df > 0.4, raw_df, np.nan), columns=list('ABCDEFGHIJ'))
raw_df2 = rs.rand(20, 10)
raw_df2 = pd.DataFrame(np.where(raw_df2 > 0.4, raw_df2, np.nan), columns=list('ACDEGHIJKL'))
raw_s = rs.rand(20)
raw_s = pd.Series(np.where(raw_s > 0.4, raw_s, np.nan))
raw_s2 = rs.rand(10)
raw_s2 = pd.Series(np.where(raw_s2 > 0.4, raw_s2, np.nan), index=raw_df2.columns)
df = DataFrame(raw_df)
df2 = DataFrame(raw_df2)
result = df.corrwith(df2)
pd.testing.assert_series_equal(self.executor.execute_dataframe(result, concat=True)[0],
raw_df.corrwith(raw_df2))
result = df.corrwith(df2, axis=1)
pd.testing.assert_series_equal(self.executor.execute_dataframe(result, concat=True)[0],
raw_df.corrwith(raw_df2, axis=1))
result = df.corrwith(df2, method='kendall')
pd.testing.assert_series_equal(self.executor.execute_dataframe(result, concat=True)[0],
raw_df.corrwith(raw_df2, method='kendall'))
df = DataFrame(raw_df, chunk_size=4)
df2 = DataFrame(raw_df2, chunk_size=6)
s = Series(raw_s, chunk_size=5)
s2 = Series(raw_s2, chunk_size=5)
with self.assertRaises(Exception):
self.executor.execute_dataframe(df.corrwith(df2, method='kendall'), concat=True)
result = df.corrwith(df2)
pd.testing.assert_series_equal(self.executor.execute_dataframe(result, concat=True)[0].sort_index(),
raw_df.corrwith(raw_df2).sort_index())
result = df.corrwith(df2, axis=1)
pd.testing.assert_series_equal(self.executor.execute_dataframe(result, concat=True)[0].sort_index(),
raw_df.corrwith(raw_df2, axis=1).sort_index())
result = df.corrwith(s)
pd.testing.assert_series_equal(self.executor.execute_dataframe(result, concat=True)[0].sort_index(),
raw_df.corrwith(raw_s).sort_index())
result = df.corrwith(s2, axis=1)
pd.testing.assert_series_equal(self.executor.execute_dataframe(result, concat=True)[0].sort_index(),
raw_df.corrwith(raw_s2, axis=1).sort_index())
def testSeriesCorr(self):
rs = np.random.RandomState(0)
raw = rs.rand(20)
raw = pd.Series(np.where(raw > 0.4, raw, np.nan))
raw2 = rs.rand(20)
raw2 = pd.Series(np.where(raw2 > 0.4, raw2, np.nan))
s = Series(raw)
s2 = Series(raw2)
result = s.corr(s2)
self.assertEqual(self.executor.execute_dataframe(result, concat=True)[0],
raw.corr(raw2))
result = s.corr(s2, method='kendall')
self.assertEqual(self.executor.execute_dataframe(result, concat=True)[0],
raw.corr(raw2, method='kendall'))
result = s.autocorr(2)
self.assertEqual(self.executor.execute_dataframe(result, concat=True)[0],
raw.autocorr(2))
s = Series(raw, chunk_size=6)
s2 = Series(raw2, chunk_size=4)
with self.assertRaises(Exception):
self.executor.execute_dataframe(s.corr(s2, method='kendall'), concat=True)
result = s.corr(s2)
self.assertAlmostEqual(self.executor.execute_dataframe(result, concat=True)[0],
raw.corr(raw2))
result = s.corr(s2, min_periods=7)
self.assertAlmostEqual(self.executor.execute_dataframe(result, concat=True)[0],
raw.corr(raw2, min_periods=7))
result = s.autocorr(2)
self.assertAlmostEqual(self.executor.execute_dataframe(result, concat=True)[0],
raw.autocorr(2))
| StarcoderdataPython |
1787414 | import cv2 as cv
import os
import imutils
# La capa de entrada es la primera parte de nuestra pequeña red neuronal que almacenara
# todos los datos iniciales de Reconocimiento Facial
class Capa1Entrada():
def __init__(self):
pass
# La funcion busca la ruta y verifica si existe la carpeta en la ruta exapta y si no existe la crea
def CrearCarpeta(self,nombrePersona):
self.nombreModelo = nombrePersona
rutaCaras = 'C:/Users/A L E J A N D R O/Documents/Python/Reconocimiento Facial (Basico)/Entrenamientos/Caras'
self.rutaCompleta = rutaCaras + '/' + self.nombreModelo # Guarda la ruta en una variable
# Si no existe la carpeta con el nombre la crea
if not os.path.exists(self.rutaCompleta):
print(f'\nLa carpeta en la ruta {self.rutaCompleta} no existe...')
os.makedirs(self.rutaCompleta)
print(f'Se creo la carpeta {self.nombreModelo} en la ruta {self.rutaCompleta}')
# Funcion donde se hara todo el proceso de caputa de la cara para guardarlos en las carpeta como imagenes
def Captura(self):
self.video = cv.VideoCapture(0) # Se define el numero de la camara o la ruta de imagen o video a capturar
self.ruidos = cv.CascadeClassifier('Reconocimiento Facial (Basico)\Entrenamientos\haarcascade_frontalface_default.xml')
id = 0
while True:
respuesta,captura = self.video.read() # Se lee la captura para verificar si funciona
if respuesta == False:
print(f'--Error al ejecutar el video: {respuesta}')
break
captura = imutils.resize(captura, width = 640) # Se cambia las dimensiones de la caputa - video para que no sea tan pesada la imagen
print('Convirtiendo Captura a Escala de Grises...')
grisCaptura = cv.cvtColor(captura, cv.COLOR_BGR2GRAY) # La captura - video se pasa a una version de grises para su mejor lectura
idCaptura = captura.copy()
caraDetectada = self.ruidos.detectMultiScale(grisCaptura, 1.3, 5)
for(x, y, e1, e2) in caraDetectada:
cv.rectangle(captura, (x, y), (x+e1, y+e2), (0, 255, 0), 2)
rostroCapturado = idCaptura[y:y + e2, x:x + e1]
rostroCapturado = cv.resize(rostroCapturado, (160, 160), interpolation = cv.INTER_CUBIC)
cv.imwrite(self.rutaCompleta+'/imagen_{}.jpg'.format(id),rostroCapturado)
id = id + 1
cv.imshow('Resultado rostro', captura)
if id == 501:
print(f'\nSe Capturaron: {id} fotografias.')
break
self.video.release()
cv.destroyAllWindows()
print(f'Finalizando Caputa de rostro...')
| StarcoderdataPython |
35078 | <filename>slack_bolt/response/__init__.py<gh_stars>100-1000
from .response import BoltResponse
| StarcoderdataPython |
1756689 | <gh_stars>0
from .core import get_tweets
from .console import get_text
def main():
text = get_text()
if text:
for tweet in get_tweets(text):
print(tweet)
| StarcoderdataPython |
1645837 |
# !pip install selenium
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep, strftime
from random import randint
import pandas as pd
from selenium.webdriver.common.by import By
import time
from config import *
import logging
logging.basicConfig(filename='test.log', level=logging.DEBUG,
format='%(asctime)s:%(levelname)s:%(message)s')
limits = {}
limits['follow_limit_per_hour'] = randint(5,10)
limits['unfollow_limit_per_hour'] = randint(3,10)
limits['like_limit_per_hour'] = randint(50,80)
limits['comment_limit_per_hour'] = randint(10,19)
# follow_limit_per_hour = randint(5,10)
# unfollow_limit_per_hour= randint(3,10)
# like_limit_per_hour = randint(80,120)
# comment_limit_per_hour = randint(30,50)
posts_to_reach_per_hashtag = 50
# Iterate through the hashtags stored in "hashtag_list"
new_followed = []
new_unfollowed=[]
my_dict = {}
my_dict_cum = {}
my_dict['followed'] = 0
my_dict['unfollowed']=0
my_dict['likes'] = 0
my_dict['comments'] = 0
my_dict['total_actions'] = 0
my_dict_time = {}
my_dict_time ['like_timer'] =time.time()
my_dict_time ['follow_timer'] =time.time()
my_dict_time ['unfollow_timer']=time.time()
my_dict_time ['comment_timer'] =time.time()
my_dict_cum['followed'] = 0
my_dict_cum['unfollowed']=0
my_dict_cum['likes'] = 0
my_dict_cum['comments'] = 0
my_dict_cum['total_actions'] = 0
# Use WebDriver to open a Chrome tab and navigate to Instagram login page
webdriver = webdriver.Chrome(executable_path = chromedriver_path)
webdriver.get("https://www.instagram.com/accounts/login")
sleep(1)
# In[36]:
username = webdriver.find_element_by_name("username")
username.send_keys(un)
password = webdriver.find_element_by_name("password")
password.send_keys(pw)
sleep(1)
# Click login button
login_Xpath = '//*[@id="loginForm"]/div/div[3]/button/div'
webdriver.find_element_by_xpath(login_Xpath).click()
sleep(5)
# In[37]:
# Click "Not Now" on "Save Your Login Info?" popup
not_now = webdriver.find_element_by_css_selector("#react-root > section > main > div > div > div > div > button")
not_now.click()
sleep(randint(2,5))
# Click "Not Now" on popup "Turn on Notifications"
not_now = webdriver.find_element_by_css_selector("body > div.RnEpo.Yx5HN > div > div > div > div.mt3GC > button.aOOlW.HoLwm")
not_now.click()
sleep(randint(2,5))
# In[38]:
# a ='45412'
# float(a.replace(',',''))
# In[39]:
#refresh
def refresh(un):
webdriver.get("https://www.instagram.com/"+un+'/')
sleep(randint(2,5))
picture=webdriver.find_element_by_css_selector("#react-root > section > main > div > div._2z6nI > article > div > div > div > div.v1Nh3.kIKUG._bz0w > a > div > div._9AhH0")
picture.click()
sleep(randint(2,5))
comment = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.sH9wk._JgwE > div > form > textarea")
comment.click()
sleep(randint(2,5))
comment_hashtags= '#gold,#accessories,#earrings,#necklace'
comment = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.sH9wk._JgwE > div > form > textarea")
comment.send_keys(comment_hashtags)
sleep(randint(2,5))
comment_click = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.sH9wk._JgwE > div > form > button > div")
comment_click.click()
#Number of followers function
def num_followers(username):
url = "https://www.instagram.com/"+username+'/'
sleep(2)
webdriver.execute_script("window.open('');")
webdriver.switch_to.window(webdriver.window_handles[1])
webdriver.get(url)
sleep(3)
num_of_followers = webdriver.find_element_by_css_selector('#react-root > section > main > div > header > section > ul > li:nth-child(2) > a > div > span').text
if num_of_followers[-1] == 'k':
num = float(num_of_followers[:-1].replace(',',''))*1000
elif num_of_followers[-1] == 'm':
num = float(num_of_followers[:-1].replace(',',''))*1000000
else:
num = float(num_of_followers.replace(',',''))
sleep(2)
webdriver.close()
webdriver.switch_to.window(webdriver.window_handles[0])
return num
#Follow method and moving to next image
def unfollow():
if (time.time()-my_dict_time ['unfollow_timer']) < 3600 and my_dict['unfollowed']<limits['unfollow_limit_per_hour']:
for i in range(2):
webdriver.get("https://www.instagram.com/"+un+'/')
following_=webdriver.find_element_by_partial_link_text("following")
following_.click()
sleep(randint(1,3))
webdriver.find_element_by_xpath("/html/body/div[6]/div/div/div/div[3]/ul/div/li[1]/div/div[3]/button").click()
sleep(randint(1,3))
webdriver.find_element_by_xpath("/html/body/div[7]/div/div/div/div[3]/button[1]").click()
sleep(randint(1,3))
webdriver.find_element_by_xpath("/html/body/div[6]/div/div/div/div[1]/div/div[3]/div/button").click()
sleep(randint(1,2))
i+=1
my_dict['unfollowed']+=1
my_dict['total_actions']+=1
my_dict_cum['unfollowed']+=1
my_dict_cum['total_actions']+=1
logging.debug('unfollow : {}:total_unfollowed {}: total_actions {}'.format(username, my_dict_cum['unfollowed'],my_dict_cum['total_actions']))
elif (time.time()-my_dict_time ['unfollow_timer']) > 2*3600:
for i in range(5):
my_dict_time ['unfollow_timer'] =time.time()
my_dict['unfollowed'] = 0
limits['unfollow_limit_per_hour']= randint(3,10)
webdriver.get("https://www.instagram.com/"+un+'/')
following_=webdriver.find_element_by_partial_link_text("following")
following_.click()
sleep(randint(1,5))
webdriver.find_element_by_xpath("/html/body/div[6]/div/div/div/div[3]/ul/div/li[1]/div/div[3]/button").click()
sleep(randint(1,5))
webdriver.find_element_by_xpath("/html/body/div[7]/div/div/div/div[3]/button[1]").click()
sleep(randint(1,5))
webdriver.find_element_by_xpath("/html/body/div[6]/div/div/div/div[1]/div/div[3]/div/button").click()
sleep(randint(1,5))
# Increment "unfollowed" counter, add username to new_unfollowed list
new_unfollowed.append(username)
i+=1
my_dict['unfollowed'] += 1
my_dict['total_actions'] +=1
my_dict_cum['unfollowed']+=1
my_dict_cum['total_actions']+=1
logging.debug('unfollow : {}:total_unfollowed {}: total_actions {}'.format(username, my_dict_cum['unfollowed'],my_dict_cum['total_actions']))
elif (time.time()-my_dict_time ['unfollow_timer']) > 3600 and my_dict['unfollowed']<limits['unfollow_limit_per_hour']:
for i in range(5):
my_dict_time ['unfollow_timer'] =time.time()
my_dict['unfollowed'] = 0
limits['unfollow_limit_per_hour']= randint(3,10)
webdriver.get("https://www.instagram.com/"+un+'/')
following_=webdriver.find_element_by_partial_link_text("following")
following_.click()
sleep(randint(1,5))
webdriver.find_element_by_xpath("/html/body/div[6]/div/div/div/div[3]/ul/div/li[1]/div/div[3]/button").click()
sleep(randint(1,5))
webdriver.find_element_by_xpath("/html/body/div[7]/div/div/div/div[3]/button[1]").click()
sleep(randint(1,5))
webdriver.find_element_by_xpath("/html/body/div[6]/div/div/div/div[1]/div/div[3]/div/button").click()
sleep(randint(1,5))
# Increment "unfollowed" counter, add username to new_unfollowed list
new_unfollowed.append(username)
i+=1
my_dict['unfollowed'] += 1
my_dict['total_actions'] +=1
my_dict_cum['unfollowed']+=1
my_dict_cum['total_actions']+=1
logging.debug('unfollow : {}:total_unfollowed {}: total_actions {}'.format(username, my_dict_cum['unfollowed'],my_dict_cum['total_actions']))
def follow():
follow_ = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.UE9AK > div > header > div.o-MQd.z8cbW > div.PQo_0.RqtMr > div.bY2yH > button > div")
username = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.UE9AK > div > header > div.o-MQd.z8cbW > div.PQo_0.RqtMr > div.e1e1d > div > span > a").text
if (time.time()-my_dict_time ['follow_timer']) < 3600 and my_dict['followed']<limits['follow_limit_per_hour']:
# Click follow
follow_.click()
sleep(randint(30,60))
# Increment "followed" counter, add username to new_followed list
new_followed.append(username)
my_dict['followed'] += 1
my_dict['total_actions'] +=1
my_dict_cum['followed'] += 1
my_dict_cum['total_actions'] +=1
logging.debug('follow : {}:total_followed {}: total_actions {}'.format(username, my_dict_cum['followed'],my_dict_cum['total_actions']))
elif (time.time()-my_dict_time ['follow_timer']) > 2*3600:
my_dict_time ['follow_timer'] =time.time()
my_dict['followed'] = 0
limits['follow_limit_per_hour'] = randint(5,10)
# Click follow
follow_.click()
sleep(randint(30,60))
# Increment "followed" counter, add username to new_followed list
new_followed.append(username)
my_dict['followed'] += 1
my_dict['total_actions'] +=1
my_dict_cum['followed'] += 1
my_dict_cum['total_actions'] +=1
logging.debug('follow : {}:total_followed {}: total_actions {}'.format(username, my_dict_cum['followed'],my_dict_cum['total_actions']))
elif (time.time()-my_dict_time ['follow_timer']) > 3600 and my_dict['followed']<limits['follow_limit_per_hour']:
my_dict_time ['follow_timer'] =time.time()
my_dict['followed'] = 0
limits['follow_limit_per_hour'] = randint(5,10)
# Click follow
follow_.click()
sleep(randint(30,60))
# Increment "followed" counter, add username to new_followed list
new_followed.append(username)
my_dict['followed'] += 1
my_dict['total_actions'] +=1
my_dict_cum['followed'] += 1
my_dict_cum['total_actions'] +=1
logging.debug('follow : {}:total_followed {}: total_actions {}'.format(username, my_dict_cum['followed'],my_dict_cum['total_actions']))
#like function
def like ():
if (time.time()-my_dict_time ['like_timer']) < 3600 and my_dict['likes'] <limits['like_limit_per_hour']:
like = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.ltpMr.Slqrh > span.fr66n > button")
like.click()
sleep(randint(30,60))
# Increment "likes" counter
my_dict['likes'] += 1
my_dict['total_actions'] +=1
my_dict_cum['likes'] += 1
my_dict_cum['total_actions'] +=1
logging.debug('like: total_likes {}: total_actions {}'.format( my_dict_cum['likes'],my_dict_cum['total_actions']))
elif (time.time()-my_dict_time ['like_timer']) > 2*3600:
my_dict_time ['like_timer'] = time.time()
my_dict['likes'] = 0
limits['like_limit_per_hour'] = randint(80,120)
like = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.ltpMr.Slqrh > span.fr66n > button")
like.click()
sleep(randint(30,60))
# Increment "likes" counter
my_dict['likes'] += 1
my_dict['total_actions'] +=1
my_dict_cum['likes'] += 1
my_dict_cum['total_actions'] +=1
logging.debug('like: total_likes {}: total_actions {}'.format( my_dict_cum['likes'],my_dict_cum['total_actions']))
elif (time.time()-my_dict_time ['like_timer']) > 3600 and my_dict['likes'] <limits['like_limit_per_hour']:
my_dict_time ['like_timer'] = time.time()
my_dict['likes'] = 0
limits['like_limit_per_hour'] = randint(80,120)
like = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.ltpMr.Slqrh > span.fr66n > button")
like.click()
sleep(randint(30,60))
# Increment "likes" counter
my_dict['likes'] += 1
my_dict['total_actions'] +=1
my_dict_cum['likes'] += 1
my_dict_cum['total_actions'] +=1
logging.debug('like: total_likes {}: total_actions {}'.format( my_dict_cum['likes'],my_dict_cum['total_actions']))
#Comment function
def comment(num_of_followers):
if (time.time()-my_dict_time ['comment_timer']) < 3600 and my_dict['comments'] <limits ['comment_limit_per_hour']:
comment = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.sH9wk._JgwE > div > form > textarea")
comment.click()
sleep(randint(1,5))
# Use "randint" to post different comments
rand_comment = randint(1,len(comments_list))
if num_of_followers>20000:
pick_comment = 'If you are interested being a brand ambassador please leave us a message on our page'
else:
pick_comment=comments_list[rand_comment]
comment = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.sH9wk._JgwE > div > form > textarea")
comment.send_keys(pick_comment)
sleep(randint(1,5))
comment_click = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.sH9wk._JgwE > div > form > button > div")
comment_click.click()
sleep(randint(30,60))
# Increment "comments" counter
my_dict['comments'] += 1
my_dict['total_actions'] +=1
my_dict_cum['comments'] += 1
my_dict_cum['total_actions'] +=1
logging.debug('comment:total_comments {}: total_actions {}'.format( my_dict_cum['comments'],my_dict_cum['total_actions']))
elif (time.time()-my_dict_time ['comment_timer']) > 2*3600:
my_dict['comments'] = 0
my_dict_time ['comment_timer'] =time.time()
limits ['comment_limit_per_hour'] = randint(30,50)
comment = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.sH9wk._JgwE > div > form > textarea")
comment.click()
sleep(randint(1,5))
# Use "randint" to post different comments
rand_comment = randint(1,len(comments_list))
#rand_comment=random.randrange(0,5)
if num_of_followers>20000:
pick_comment = 'If you are interested being a brand ambassador please leave us a message on our page'
else:
pick_comment=comments_list[rand_comment]
comment = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.sH9wk._JgwE > div > form > textarea")
comment.send_keys(pick_comment)
sleep(randint(2,4))
comment_click = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.sH9wk._JgwE > div > form > button > div")
comment_click.click()
sleep(randint(30,60))
# Increment "comments" counter
my_dict['comments'] += 1
my_dict['total_actions'] +=1
my_dict_cum['comments'] += 1
my_dict_cum['total_actions'] +=1
logging.debug('comment:total_comments {}: total_actions {}'.format( my_dict_cum['comments'],my_dict_cum['total_actions']))
elif (time.time()-my_dict_time ['comment_timer']) > 3600 and my_dict['comments'] < limits ['comment_limit_per_hour']:
my_dict['comments'] = 0
my_dict_time ['comment_timer'] =time.time()
limits ['comment_limit_per_hour'] = randint(30,50)
comment = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.sH9wk._JgwE > div > form > textarea")
comment.click()
sleep(randint(1,5))
# Use "randint" to post different comments
rand_comment = randint(1,len(comments_list))
#rand_comment=random.randrange(0,5)
if num_of_followers>20000:
pick_comment = 'If you are interested being a brand ambassador please leave us a message on our page'
else:
pick_comment=comments_list[rand_comment]
comment = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.sH9wk._JgwE > div > form > textarea")
comment.send_keys(pick_comment)
sleep(randint(1,5))
comment_click = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.sH9wk._JgwE > div > form > button > div")
comment_click.click()
sleep(randint(30,60))
# Increment "comments" counter
my_dict['comments'] += 1
my_dict['total_actions'] +=1
my_dict_cum['comments'] += 1
my_dict_cum['total_actions'] +=1
logging.debug('comment:total_comments {}: total_actions {}'.format( my_dict_cum['comments'],my_dict_cum['total_actions']))
# In[40]:
for hashtag in hashtag_list:
# Navigate to Instagram "explore/tags" page for current hashtag
webdriver.get("https://www.instagram.com/explore/tags/"+hashtag+"/")
sleep(randint(1,2))
# Click on the second thumbnail in the current hashtag's explore page
first_thumbnail = webdriver.find_element_by_css_selector("#react-root > section > main > article > div.EZdmt > div > div > div:nth-child(1) > div:nth-child(2) > a > div > div._9AhH0")
first_thumbnail.click()
sleep(randint(1,2))
try:
# Iterate through the current hashtag
for _ in range(posts_to_reach_per_hashtag):
try:
follow_ = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.UE9AK > div > header > div.o-MQd.z8cbW > div.PQo_0.RqtMr > div.bY2yH > button > div")
username = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.UE9AK > div > header > div.o-MQd.z8cbW > div.PQo_0.RqtMr > div.e1e1d > div > span > a").text
number_of_followers = num_followers(username)
sleep(randint(1,3))
if my_dict['total_actions']>=340 and my_dict['total_actions']<350:
unfollow()
elif my_dict['total_actions']>=350:
print('Actions during this session')
my_dict.items()
print('Total actions')
my_dict_cum.items()
refresh()
sleep(86400)
my_dict['followed'] = 0
my_dict['unfollowed']=0
my_dict['likes'] = 0
my_dict['comments'] = 0
my_dict['total_actions'] = 0
my_dict_time ['like_timer'] =time.time()
my_dict_time ['follow_timer'] =time.time()
my_dict_time ['unfollow_timer']=time.time()
my_dict_time ['comment_timer'] =time.time()
elif follow_.text == "Follow" and username != "jewelrymdjewelry" and number_of_followers >= 100:
follow()
sleep(randint(1,3))
like()
sleep(randint(1,3))
comment(number_of_followers)
sleep(randint(1,3))
# Click "next" to go to next picture within the same hashtag
next = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.Z2Inc._7c9RR > div > div.l8mY4.feth3 > button")
next.click()
sleep(randint(2,5))
except Exception as ex:
# Write out what type of Exception
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
driver_len = len(webdriver.window_handles) #fetching the Number of Opened tabs
if driver_len > 1: # Will execute if more than 1 tabs found.
for i in range(driver_len - 1, 0, -1):
webdriver.switch_to.window(webdriver.window_handles[i]) #will close the last tab first.
webdriver.close()
webdriver.switch_to.window(webdriver.window_handles[0]) # Switching the driver focus to First tab.
# Click "next" to go to next picture within the same hashtag
next = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.Z2Inc._7c9RR > div > div.l8mY4.feth3 > button")
next.click()
sleep(randint(2,5))
except Exception as ex:
# Write out what type of Exception
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
driver_len = len(webdriver.window_handles) #fetching the Number of Opened tabs
if driver_len > 1: # Will execute if more than 1 tabs found.
for i in range(driver_len - 1, 0, -1):
webdriver.switch_to.window(webdriver.window_handles[i]) #will close the last tab first.
webdriver.close()
webdriver.switch_to.window(webdriver.window_handles[0]) # Switching the driver focus to First tab.
print(message)
# In[ ]:
my_dict_cum.items()
# In[ ]:
| StarcoderdataPython |
3328264 | <gh_stars>0
import os
from os import path as p
import petpy
import pandas
import json
key = os.getenv('API_KEY')
secret = os.getenv('SECRET')
pf = petpy.Petfinder(key=key, secret=secret)
def breeds():
if p.exists("cats/cat_breeds.json") and p.exists("dogs/dog_breeds.json"):
print("json already generated")
return
else:
cat_breeds = open("cats/cat_breeds.json","w+")
cats = pf.breeds('cat')
cat_breeds.write(json.dumps(cats))
cat_breeds.close()
dog_breeds = open("dogs/dog_breeds.json","w+")
dogs = pf.breeds('dog')
dog_breeds.write(json.dumps(dogs))
dog_breeds.close()
return
def catnames():
if p.exists("cats/cat_names.txt"):
print("cat names already generated")
return
else:
cats = pf.animals(animal_type='cat',pages=100, return_df=True)
with open("cats/cat_names.txt","w+") as cat_names:
cat_names.write(cats['name'].str.cat(sep='\n'))
cat_names.close()
return
def dognames():
if p.exists("dogs/dog_names.txt"):
print("dog names already generated")
return
else:
dog = pf.animals(animal_type='dog',pages=100, return_df=True)
with open("dogs/dog_names.txt","w+") as dog_names:
dog_names.write(dog['name'].str.cat(sep='\n'))
dog_names.close()
return
if __name__ == "__main__":
breeds()
catnames()
dognames()
pass | StarcoderdataPython |
3206106 | <filename>web3auth/fields.py
from django.db import models
from django import forms
from web3auth.utils import validate_eth_address, validate_eth_transaction
class EthAddressField(models.CharField):
def __init__(self, *args, **kwargs):
if 'max_length' not in kwargs:
kwargs['max_length'] = 42
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super().__init__(*args, **kwargs)
self.validators.append(validate_eth_address)
class EthAddressFormField(forms.CharField):
def __init__(self, *args, **kwargs):
if 'max_length' not in kwargs:
kwargs['max_length'] = 42
super().__init__(*args, **kwargs)
self.validators.append(validate_eth_address)
class EthTransactionField(models.CharField):
def __init__(self, *args, **kwargs):
if 'max_length' not in kwargs:
kwargs['max_length'] = 66
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super().__init__(*args, **kwargs)
self.validators.append(validate_eth_transaction)
class EthTransactionFormField(forms.CharField):
def __init__(self, *args, **kwargs):
if 'max_length' not in kwargs:
kwargs['max_length'] = 66
super().__init__(*args, **kwargs)
self.validators.append(validate_eth_transaction)
| StarcoderdataPython |
191027 | '''OpenGL extension SGIX.calligraphic_fragment
This module customises the behaviour of the
OpenGL.raw.GL.SGIX.calligraphic_fragment to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/SGIX/calligraphic_fragment.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.SGIX.calligraphic_fragment import *
from OpenGL.raw.GL.SGIX.calligraphic_fragment import _EXTENSION_NAME
def glInitCalligraphicFragmentSGIX():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | StarcoderdataPython |
94528 | <reponame>sanskarvijpuria/Deposit-Information-System
from num2words import num2words
import streamlit as st
from annotated_text import annotated_text
import pandas as pd
from babel.numbers import format_currency, format_decimal
st.set_page_config(page_title= "Deposit Information", layout='wide', initial_sidebar_state='collapsed')
st.title("Deposit Information")
st.header("Deposit Slip Information Filling System")
denominations=[2000,500,200,100,50,20,10,5]
# Dividing things in two columns
left_column_1, right_column_1 = st.beta_columns(2)
def number_of_notes():
"""
Function for input of Number of Denomination.
"""
number_of_notes=[]
with left_column_1:
st.text("Currency Notes Details")
with st.form(key='my_form'):
st.text("Enter number of Cash notes of:")
for denomination in denominations:
deno = st.number_input("Denomination of {}:".format(denomination), 0)
number_of_notes.append(deno)
submit_button = st.form_submit_button(label='Submit')
return number_of_notes
amount_list= []
def amount(number_of_notes,denominations=denominations):
"""
Function to calculate the total amount.
"""
sum=0
with right_column_1:
st.text("Amount Details Details")
for cashnotes, deno in zip(number_of_notes, denominations):
amt=deno*cashnotes
amount_list.append(amt)
sum+=amt
return sum
#Calling both the functions
number_of_notes=number_of_notes()
sum=amount(number_of_notes)
amount_list= [format_decimal(i, locale='en_IN') for i in amount_list]
# Right column
with right_column_1:
df = pd.DataFrame(list(zip(number_of_notes, denominations,amount_list)),columns=["Number of Notes", "Denomination", "Amount"])
df.index = [""] * len(df)
st.table(df)
st.header("Amount is "+ str(format_currency(sum, 'INR', locale='en_IN')))
str ="Amount in word is: "+num2words(str(sum), to="currency", lang="en_IN",currency='INR').title()
annotated_text((str,"","#faa"))
#Footer HTML
footer="""<style>
a:link , a:visited{
color: blue;
background-color: transparent;
text-decoration: underline;
}
a:hover, a:active {
color: red;
background-color: transparent;
text-decoration: underline;
}
.footer {
position: fixed;
left: 0;
bottom: 0;
width: 100%;
background-color: white;
color: black;
text-align: center;
}
</style>
<div class="footer">
<p>Developed with ❤ by <a style='display: block; text-align: center;' href="https://github.com/sanskarvijpuria" target="_blank"><NAME></a></p>
</div>
"""
st.markdown(footer,unsafe_allow_html=True)
| StarcoderdataPython |
3259917 | from django.contrib import admin
from .models import Untappd, UntappdMapping, UserCheckIn, UserWishList
# Register your models here.
class UntappdAdmin(admin.ModelAdmin):
list_display = ('beer_id',
'brewery',
'style',
'rating',
'num_ratings',
'last_updated')
search_fields = ('beer_id__name', )
class UntappdMappingAdmin(admin.ModelAdmin):
list_display = ('beer_id',
'untappd_id',
'auto_match',
'verified',
'last_updated')
search_fields = ('beer_id__name', )
class UserCheckInAdmin(admin.ModelAdmin):
list_display = ('user',
'beer_id',
'rating',
'last_updated')
search_fields = ('beer_id__name', )
class UserWishListAdmin(admin.ModelAdmin):
list_display = ('user',
'beer_id',
'last_updated')
search_fields = ('beer_id__name', )
admin.site.register(Untappd, UntappdAdmin)
admin.site.register(UntappdMapping, UntappdMappingAdmin)
admin.site.register(UserCheckIn, UserCheckInAdmin)
admin.site.register(UserWishList, UserWishListAdmin) | StarcoderdataPython |
101781 | import pPEG
print("Arith operatpr expression example....")
arith = pPEG.compile("""
exp = add
add = sub ('+' sub)*
sub = mul ('-' mul)*
mul = div ('*' div)*
div = pow ('/' pow)*
pow = val ('^' val)*
grp = '(' exp ')'
val = " " (sym / num / grp) " "
sym = [a-zA-Z]+
num = [0-9]+
""")
tests = [
" 1 + 2 * 3 ",
"x^2^3 - 1"
];
for test in tests:
p = arith.parse(test)
print(p)
# 1+2*3 ==> (+ 1 (* 2 3))
# ["add",[["num","1"],["mul",[["num","2"],["num","3"]]]]]
# x^2^3+1 ==> (+ (^ x 2 3) 1)
# ["add",[["pow",[["sym","x"],["num","2"],["num","3"]]],["num","1"]]]
| StarcoderdataPython |
1711179 | <reponame>cleoold/types-linq
from typing import TypedDict
class ModuleSpec(TypedDict):
file_path: str
name: str
classes: dict[str, 'ClassSpec']
class ClassSpec(TypedDict):
methods: set[str]
readonly_properties: set[str]
# ==========================================================
# This describes which APIs are exported for doc
_path = '../types_linq'
_project = 'types_linq'
type_file = f'{_path}/more_typing.py'
modules: list[ModuleSpec] = [
{
'file_path': f'{_path}/cached_enumerable.py',
'name': f'{_project}.cached_enumerable',
'classes': {
'CachedEnumerable': {
'methods': {
'as_cached',
},
'readonly_properties': {*()},
},
},
},
{
'file_path': f'{_path}/enumerable.pyi',
'name': f'{_project}.enumerable',
'classes': {
'Enumerable': {
'methods': {
'__init__',
'__contains__',
'__getitem__',
'__iter__',
'__len__',
'__reversed__',
'aggregate',
'all',
'any',
'append',
'as_cached',
'average',
'average2',
'cast',
'concat',
'contains',
'count',
'default_if_empty',
'distinct',
'element_at',
'empty',
'except1',
'first',
'first2',
'group_by',
'group_by2',
'group_join',
'intersect',
'join',
'last',
'last2',
'max',
'max2',
'min',
'min2',
'of_type',
'order_by',
'order_by_descending',
'prepend',
'range',
'repeat',
'reverse',
'select',
'select2',
'select_many',
'select_many2',
'sequence_equal',
'single',
'single2',
'skip',
'skip_last',
'skip_while',
'skip_while2',
'sum',
'sum2',
'take',
'take_last',
'take_while',
'take_while2',
'to_dict',
'to_set',
'to_list',
'to_lookup',
'union',
'where',
'where2',
'zip',
'zip2',
'elements_in',
'to_tuple',
},
'readonly_properties': {*()},
},
},
},
{
'file_path': f'{_path}/grouping.py',
'name': f'{_project}.grouping',
'classes': {
'Grouping': {
'methods': {*()},
'readonly_properties': {
'key',
},
},
},
},
{
'file_path': f'{_path}/lookup.py',
'name': f'{_project}.lookup',
'classes': {
'Lookup': {
'methods': {
'__contains__',
'__len__',
'__getitem__',
'apply_result_selector',
'contains',
},
'readonly_properties': {
'count',
},
},
},
},
{
'file_path': f'{_path}/ordered_enumerable.pyi',
'name': f'{_project}.ordered_enumerable',
'classes': {
'OrderedEnumerable': {
'methods': {
'create_ordered_enumerable',
'then_by',
'then_by_descending',
},
'readonly_properties': {*()},
}
},
},
{
'file_path': f'{_path}/types_linq_error.py',
'name': f'{_project}.types_linq_error',
'classes': {
'TypesLinqError': {
'methods': {*()},
'readonly_properties': {*()},
},
'InvalidOperationError': {
'methods': {*()},
'readonly_properties': {*()},
},
'IndexOutOfRangeError': {
'methods': {*()},
'readonly_properties': {*()},
},
},
},
{
'file_path': f'{_path}/more/more_enumerable.pyi',
'name': f'{_project}.more.more_enumerable',
'classes': {
'MoreEnumerable': {
'methods': {
'aggregate_right',
'as_more',
'distinct_by',
'enumerate',
'except_by',
'flatten',
'flatten2',
'for_each',
'for_each2',
'interleave',
'maxima_by',
'minima_by',
'pipe',
'traverse_breath_first',
'traverse_depth_first',
},
'readonly_properties': {*()},
},
},
},
{
'file_path': f'{_path}/more/extrema_enumerable.pyi',
'name': f'{_project}.more.extrema_enumerable',
'classes': {
'ExtremaEnumerable': {
'methods': {
'take',
'take_last',
},
'readonly_properties': {*()},
},
},
}
]
| StarcoderdataPython |
4818412 | <gh_stars>0
import os
def parseMegan(filename, prefix=""):
''' Takes the MEGAN_info file generated from the MEGAN GUI and split it into the
respective categories (TAX, INTERPRO2GO etc). '''
output = {}
key = ""
data = ""
with open(filename,"r") as f:
while True:
line = f.readline().strip()
if line == "END_OF_DATA_TABLE":
break
elif line.split("\t")[0] == "@Names":
data = line[6:]
data = "CATEGORY\tNUM" + data
elif line[0] == "@":
continue
else:
key = line.split("\t")[0]
if key not in output.keys():
output[key] = []
output[key].append(line)
for key, value in output.items():
file = prefix + "_" + key + ".tsv"
with open(file, "w") as newfile:
newfile.write(data+"\n")
newfile.write('\n'.join(line for line in value))
def interproscan_reformat(filename):
''' Reformat the INTERPROSCAN to GO mapping to be more consistent and easier for downstream analysis.'''
interpro_id_ls =[]
interpro_name_ls = []
go_id_ls = []
go_name_ls = []
interpro, go = "", ""
with open(filename, "r") as f:
for line in f.readlines():
if line[0] == "!":
continue
else:
interpro, go = line.split(" > ")
# interpro processing
interpro = interpro.split()
interpro_id = interpro[0].split(":")[1]
interpro_id_ls.append(interpro_id.strip())
interpro_name = " ".join(interpro[1:])
interpro_name_ls.append(interpro_name.strip())
# go processing
go_name, go_id = go.split(" ; ")
go_id_ls.append(go_id.strip())
go_name_ls.append(go_name.strip())
newfile_name = "INTERPRO2GO_MAP_CLEANED.tsv"
with open(newfile_name, "w") as newfile:
for a,b,c,d in zip(interpro_id_ls,interpro_name_ls,go_id_ls,go_name_ls):
newfile.write("\t".join([a,b,c,d])+"\n")
def interproscan_goatools(filename, output="interproscan_goatools.txt"):
mapping_data = {}
with open(filename, "r") as f:
for line in f.readlines():
line = line.split("\t")
if line[0][3:] not in mapping_data.keys():
mapping_data[line[0][3:]] = []
mapping_data[line[0][3:]].append(line[2])
with open(output, "w") as out:
for key, value in mapping_data.items():
out.write(key+"\t"+";".join(value)+"\n")
def combine_bracken_output(filepath,level="P"):
file_list = os.listdir(filepath)
main_dic = {}
#read in all data
for file in file_list:
sample = file.replace("_bracken_phylums.kreport", "")
main_dic[sample] = {}
with open(os.path.join(filepath,file), "r") as f:
lines = f.readlines()
for line in lines:
line = line.split("\t")
if line[3] == level:
main_dic[sample][line[5].strip()] = line[1]
all_taxa = set()
#get unique taxas
for key in main_dic.keys():
all_taxa.update(list(main_dic[key].keys()))
out = ["taxa"]
out.extend(all_taxa)
for key in main_dic.keys():
out[0] += "\t" + key
for i in range(1, len(out)):
taxa = out[i].split("\t")[0]
out[i] += "\t" + main_dic[key].get(taxa, "0")
with open("bracken_combined.tsv", "w") as f:
f.write("\n".join(out))
#interproscan_reformat("INTERPRO2GO_MAP.txt")
#parseMegan("daa2rma.megan", "rma")
#parseMegan("root_4m_info", prefix="root4m")
#parseMegan("bulk_4m_info", prefix="bulk4m")
#interproscan_goatools("INTERPRO2GO_MAP_CLEANED.tsv")
#combine_bracken_output("C:\\Users\\YZ\\Desktop\\FYP\\dip_metagenome\\results\\bracken_kreport",level="P")
| StarcoderdataPython |
1742593 | import auxly.filesys
import auxly.shell
from auxly._modu import *
from auxly._modu import __version__
| StarcoderdataPython |
1785521 | <filename>common/commandqueue.py<gh_stars>0
# Command Queue is a queue to store command
from collections import deque
class Command:
""" Command class that represents a command """
def __init__(self, id):
self.id = id
class CommandQueue:
def __init__(self, MaxSize):
self.CmdQueue = deque([])
self.MaxSize = MaxSize
def add(self, cmd):
if (len(self.CmdQueue) < self.MaxSize):
self.CmdQueue.append(cmd)
return True
else:
return False
def get(self):
if (len(self.CmdQueue) > 0):
cmd = self.CmdQueue.popleft()
return cmd
else:
return None
def count(self):
return len(self.CmdQueue)
cq = CommandQueue(5)
cq.add(Command(0x00000001))
cq.add(Command(0x00000010))
cq.add(Command(0x00000020))
cq.add(Command(0x00000001))
cq.add(Command(0x00000010))
cq.add(Command(0x00000020))
cnt = cq.count()
print(cnt)
for idx in range(cnt):
cmd = cq.get()
if cmd is not None:
print("Get queue success. Cmd = ", cmd.id)
else:
print("Unable to get command from queue") | StarcoderdataPython |
80888 | '''
Tasks which control a plant under pure machine control. Used typically for initializing BMI decoder parameters.
'''
import numpy as np
import time
import os
import pdb
import multiprocessing as mp
import pickle
import tables
import re
import tempfile, traceback, datetime
import riglib.bmi
from riglib.stereo_opengl import ik
from riglib.experiment import traits, experiment
from riglib.bmi import clda, assist, extractor, train, goal_calculators, ppfdecoder
from riglib.bmi.bmi import Decoder, BMISystem, GaussianStateHMM, BMILoop, GaussianState, MachineOnlyFilter
from riglib.bmi.extractor import DummyExtractor
from riglib.stereo_opengl.window import WindowDispl2D, FakeWindow
from riglib.bmi.state_space_models import StateSpaceEndptVel2D
from .bmimultitasks import BMIControlMulti
bmi_ssm_options = ['Endpt2D', 'Tentacle', 'Joint2L']
class EndPostureFeedbackController(BMILoop, traits.HasTraits):
ssm_type_options = bmi_ssm_options
ssm_type = traits.OptionsList(*bmi_ssm_options, bmi3d_input_options=bmi_ssm_options)
def load_decoder(self):
self.ssm = StateSpaceEndptVel2D()
A, B, W = self.ssm.get_ssm_matrices()
filt = MachineOnlyFilter(A, W)
units = []
self.decoder = Decoder(filt, units, self.ssm, binlen=0.1)
self.decoder.n_features = 1
def create_feature_extractor(self):
self.extractor = DummyExtractor()
self._add_feature_extractor_dtype()
class TargetCaptureVisualFeedback(EndPostureFeedbackController, BMIControlMulti):
assist_level = (1, 1)
is_bmi_seed = True
def move_effector(self):
pass
class TargetCaptureVFB2DWindow(TargetCaptureVisualFeedback, WindowDispl2D):
fps = 20.
def __init__(self,*args, **kwargs):
super(TargetCaptureVFB2DWindow, self).__init__(*args, **kwargs)
self.assist_level = (1, 1)
def _start_wait(self):
self.wait_time = 0.
super(TargetCaptureVFB2DWindow, self)._start_wait()
def _test_start_trial(self, ts):
return ts > self.wait_time and not self.pause
@classmethod
def get_desc(cls, params, report):
if isinstance(report, list) and len(report) > 0:
duration = report[-1][-1] - report[0][-1]
reward_count = 0
for item in report:
if item[0] == "reward":
reward_count += 1
return "{} rewarded trials in {} min".format(reward_count, int(np.ceil(duration / 60)))
elif isinstance(report, dict):
duration = report['runtime'] / 60
reward_count = report['n_success_trials']
return "{} rewarded trials in {} min".format(reward_count, int(np.ceil(duration / 60)))
else:
return "No trials"
| StarcoderdataPython |
1632480 | import os
from app import create_app, db
from app.models import User
"""
Populates a test database with a root user
"""
app = create_app("test")
with app.app_context():
root = User.query.filter_by(email="<EMAIL>").first()
if root is None:
root = User(
username="sudo",
email="<EMAIL>",
name="<NAME> (Root)",
active=True,
confirmed=True,
sudo=True,
)
db.session.add(root)
db.session.commit()
| StarcoderdataPython |
1730863 | # -*- coding: utf-8 -*-
import argparse
import os
from os.path import abspath, dirname, exists, join
from shutil import rmtree
from subprocess import call
from tempfile import mkdtemp
REPO_ROOT = dirname(dirname(abspath(__file__)))
TEMPLATE_PATH = join(REPO_ROOT, 'project_template')
DEV_SITE_NAME = 'dev_site'
DEV_SITE_PATH = join(REPO_ROOT, DEV_SITE_NAME)
def create_test_site(path):
call([
'django-admin.py',
'startproject',
DEV_SITE_NAME,
path,
'--template=%s' % TEMPLATE_PATH,
'--extension=py,rst,html'
])
def create():
if not(exists(DEV_SITE_PATH)):
os.makedirs(DEV_SITE_PATH)
elif os.listdir(DEV_SITE_PATH) != []:
print 'Directory< %s > is not empty' % DEV_SITE_PATH
return
create_test_site(DEV_SITE_PATH)
def diff():
tmp_dir = mkdtemp()
create_test_site(tmp_dir)
call([
'colordiff',
'-ENBwbur',
'-x',
"*.pyc",
'-x',
"*.json",
'-x',
"*.db",
DEV_SITE_PATH,
tmp_dir,
])
rmtree(tmp_dir)
def patch():
tmp_dir = mkdtemp()
create_test_site(tmp_dir)
with open(join(REPO_ROOT,'dev_site.patch'), "w") as patchfile:
call(
[
'diff',
'-ENBwbur',
'-x'
"*.pyc",
'-x'
"*.json",
'-x',
"settings.py",
'-x',
"*.db",
'.',
tmp_dir,
],
cwd=DEV_SITE_PATH,
stdout=patchfile
)
rmtree(tmp_dir)
print "Applying the path ..."
call([
'patch',
'-d',
DEV_SITE_PATH,
'-i',
join(REPO_ROOT,'dev_site.patch'),
'-p0'
])
def main():
parser = argparse.ArgumentParser()
parser.add_argument("command", type=str, choices=['create', 'diff', 'patch'], help="Execute an command")
args = parser.parse_args()
if args.command=="create":
create()
elif args.command=="diff":
diff()
elif args.command=="patch":
patch()
if __name__ == '__main__':
main()
| StarcoderdataPython |
186513 | <gh_stars>0
from __future__ import division
try:
import caffe
except:
pass
import torch
import math
import random
from PIL import Image, ImageOps
try:
import accimage
except ImportError:
accimage = None
import numpy as np
import numbers
import types
import collections
import torchvision.transforms.functional as F
class Compose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img):
for t in self.transforms:
img = t(img)
return img
class ToTensor(object):
"""Convert a ``numpy.ndarray`` to tensor.
"""
def __call__(self, pic):
"""
Args:
pic (numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if isinstance(pic, np.ndarray):
# handle numpy array
img = torch.from_numpy(pic)
# backward compatibility
return img.float()[[2,1,0]]
class Normalize(object):
"""Normalize an tensor image with mean and standard deviation.
Given mean: (R, G, B),
will normalize each channel of the torch.*Tensor, i.e.
channel = channel - mean
Args:
mean (sequence): Sequence of means for R, G, B channels respecitvely.
"""
# def __init__(self, mean=None, meanfile=None):
# if mean:
# self.mean = mean
# else:
# data = open(meanfile, 'rb').read()
# blob = caffe.proto.caffe_pb2.BlobProto()
# blob.ParseFromString(data)
# arr = np.array(caffe.io.blobproto_to_array(blob))
# self.mean = torch.from_numpy(arr[0].astype('float32'))
#
# def __call__(self, tensor):
# """
# Args:
# tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
#
# Returns:
# Tensor: Normalized image.
# """
# # TODO: make efficient
# for t, m in zip(tensor, self.mean):
# t.sub_(m)
# return tensor
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized Tensor image.
"""
# print('Tensor shape in Normalize: %s' %tensor.shape)
# print(tensor.shape)
return F.normalize(tensor, self.mean, self.std)
class Scale(object):
"""Rescale the input PIL.Image to the given size.
Args:
size (sequence or int): Desired output size. If size is a sequence like
(w, h), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
"""
def __init__(self, size, interpolation=Image.BILINEAR):
assert isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)
self.size = size
self.interpolation = interpolation
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be scaled.
"""
assert(img.shape[1]==self.size)
assert(img.shape[2]==self.size)
return img
class CenterCrop(object):
"""Crops the given PIL.Image at the center.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be cropped.
Returns:
PIL.Image: Cropped image.
"""
w, h = (img.shape[1], img.shape[2])
th, tw = self.size
w_off = int((w - tw) / 2.)
h_off = int((h - th) / 2.)
img = img[:, h_off:h_off+th, w_off:w_off+tw]
return img
class Pad(object):
"""Pad the given PIL.Image on all sides with the given "pad" value.
Args:
padding (int or sequence): Padding on each border. If a sequence of
length 4, it is used to pad left, top, right and bottom borders respectively.
fill: Pixel fill value. Default is 0.
"""
def __init__(self, padding, fill=0):
assert isinstance(padding, numbers.Number)
assert isinstance(fill, numbers.Number) or isinstance(fill, str) or isinstance(fill, tuple)
self.padding = padding
self.fill = fill
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be padded.
Returns:
PIL.Image: Padded image.
"""
return ImageOps.expand(img, border=self.padding, fill=self.fill)
class Lambda(object):
"""Apply a user-defined lambda as a transform.
Args:
lambd (function): Lambda/function to be used for transform.
"""
def __init__(self, lambd):
assert isinstance(lambd, types.LambdaType)
self.lambd = lambd
def __call__(self, img):
return self.lambd(img)
class RandomCrop(object):
"""Crop the given PIL.Image at a random location.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
padding (int or sequence, optional): Optional padding on each border
of the image. Default is 0, i.e no padding. If a sequence of length
4 is provided, it is used to pad left, top, right, bottom borders
respectively.
"""
def __init__(self, size, padding=0):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be cropped.
Returns:
PIL.Image: Cropped image.
"""
if self.padding > 0:
img = ImageOps.expand(img, border=self.padding, fill=0)
w, h = img.size
th, tw = self.size
if w == tw and h == th:
return img
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
return img.crop((x1, y1, x1 + tw, y1 + th))
class RandomHorizontalFlip(object):
"""Horizontally flip the given PIL.Image randomly with a probability of 0.5."""
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be flipped.
Returns:
PIL.Image: Randomly flipped image.
"""
if random.random() < 0.5:
img = np.flip(img, axis=2).copy()
return img
class RandomSizedCrop(object):
"""Crop the given PIL.Image to random size and aspect ratio.
A crop of random size of (0.08 to 1.0) of the original size and a random
aspect ratio of 3/4 to 4/3 of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: size of the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
def __call__(self, img):
h_off = random.randint(0, img.shape[1]-self.size)
w_off = random.randint(0, img.shape[2]-self.size)
img = img[:, h_off:h_off+self.size, w_off:w_off+self.size]
return img
| StarcoderdataPython |
199917 | #!/usr/bin/python
import os
import sys
from cryptography.fernet import Fernet
from manageInventory import manage_host_config
class manage_sec(manage_host_config):
def __init__(self, **kwargs):
super(manage_sec, self).__init__()
def generateKey(self):
return Fernet.generate_key()
def createKeyFile(self):
with open("key.key", "wb") as key_file:
key_file.write(self.generateKey())
def getKeyFromFile(self):
KeyFile = open("key.key", "rb").read()
cip = Fernet(KeyFile)
return cip
def encryptPassword(self, defaultSshPassword):
self.createKeyFile()
key = self.getKeyFromFile()
encodedSecret = key.encrypt(defaultSshPassword)
return encodedSecret
def decryptPassword(self):
key = self.getKeyFromFile()
return key.decrypt(self.getDefaultEncryptedSshPassword())
def storeEncryptedPassword(self):
encryptedPassword = self.encryptPassword()
self.setDefaultEncryptedSshPassword(encryptedPassword)
| StarcoderdataPython |
64644 | import os
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
from pandas import DataFrame
from sklearn.model_selection import train_test_split
def create_info_csv(mvtec_dir: Path) -> DataFrame:
df = pd.DataFrame({})
for data_type in ["train", "test"]:
for p in mvtec_dir.glob(f"*/{data_type}/*/*.png"):
raw_stem = p.stem
defect = p.parents[0].name
data_type = p.parents[1].name
category = p.parents[2].name
df = df.append(
{
"raw_img_path": str(p),
"raw_stem": raw_stem,
"defect": defect,
"data_type": data_type,
"category": category,
},
ignore_index=True,
)
for category in df["category"].unique():
category_df = df.query("data_type=='train' & category==@category")
_, val_index = train_test_split(
category_df.index.tolist(),
train_size=0.8,
test_size=0.2,
random_state=5,
shuffle=True,
)
df.loc[val_index, "data_type"] = "val"
df["stem"] = df.apply(
lambda x: f"{x.category}_{x.data_type}_{x.defect}_{x.raw_stem}",
axis=1,
)
df["raw_mask_path"] = df.apply(
lambda x: f"{mvtec_dir}/{x.category}/ground_truth/{x.defect}/{x.raw_stem}_mask.png",
axis=1,
)
return df
def move_images_and_masks(df: DataFrame) -> None:
os.makedirs("/data/images", exist_ok=True)
os.makedirs("/data/masks", exist_ok=True)
for i in df.index:
raw_img_path, raw_mask_path, stem = df.loc[i, ["raw_img_path", "raw_mask_path", "stem"]]
if os.path.exists(raw_mask_path):
os.rename(raw_mask_path, f"/data/masks/{stem}.png")
else:
# create masks for train images
img = cv2.imread(raw_img_path)
mask = np.zeros(img.shape)
cv2.imwrite(f"/data/masks/{stem}.png", mask)
os.rename(raw_img_path, f"/data/images/{stem}.png")
df.drop(columns=["raw_stem", "raw_img_path", "raw_mask_path"])
df.to_csv("/data/info.csv", index=False)
if __name__ == "__main__":
mvtec_dir = Path("/data/MVTec")
df = create_info_csv(mvtec_dir)
move_images_and_masks(df)
| StarcoderdataPython |
3392768 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 19 14:40:44 2012
@author: VHOEYS
"""
import numpy as np
import scipy as sp
from scipy import special
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['font.size'] = 14
mpl.rcParams['axes.labelsize'] = 20
mpl.rcParams['lines.color'] = 'k'
def calc_meantipow(off,loglam,chi,n):
'''
calculate mean of power transformed topographic index
See literature:
Clark, <NAME>., <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. Framework for Understanding Structural Errors (FUSE): A modular framework to diagnose differences between hydrological models. Water Resources Research 44 (2008): 14.
Original code from Clark, <NAME>.
'''
Ti_off=off
Ti_shp=chi #shape of the Gamma distribution chi eigenlijk
Ti_chi= (loglam-Ti_off)/Ti_shp #Chi -- loglamb is the first parameter (mean) phi eigenlijk
print Ti_chi,'tichi'
nn=n
# values for testing (Sivapalan et al., WRR, December 1987)
# TI_OFF = 3.82_SP ! TI_OFF = 2.92_SP
# TI_SHP = 2.48_SP ! TI_SHP = 3.52_SP
# TI_CHI = 1.00_SP ! TI_CHI = 0.742_SP
# loop through the frequency distribution
LOWERV = 0.
LOWERP = 0.
AVELOG = 0. #testing
AVEPOW = 0.
Nbins=2000
Ti_max=50.
for ibin in range (1,Nbins):
# get probability for the current bin
UPPERV = (float(ibin)/Nbins) * Ti_max # upper value in frequency bin
GMARG2 = max(0., UPPERV - Ti_off) / Ti_chi # 2nd argument to the Gamma function
UPPERP = special.gammainc(Ti_shp, GMARG2) # GAMMP is the incomplete Gamma function GAMMP(Ti_shp, GMARG2)
PROBIN = UPPERP-LOWERP # probability of the current bin
# get the scaled topographic index value
LOGVAL = 0.5*(LOWERV+UPPERV) # log-transformed index for the current bin
POWVAL = (np.exp(LOGVAL))**(1./nn) # power-transformed index for the current bin
AVELOG = AVELOG + LOGVAL*PROBIN #! average log-transformed index (testing)
AVEPOW += POWVAL*PROBIN # average power-transformed index
# print LOWERV, UPPERV, LOGVAL, POWVAL, AVEPOW
# !write(*,'(7(f9.3,1x))') lowerv, upperv, logval, powval, avelog, avepow
# save the lower value and probability
LOWERV = UPPERV # lower value for the next bin
LOWERP = UPPERP # cumulative probability for the next bin
return POWVAL,AVEPOW,AVELOG
def calc_meantipow_2(off,loglam,chi,nn):
'''
calculate mean of power transformed topographic index
See literature:
Clark, <NAME>., <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. Framework for Understanding Structural Errors (FUSE): A modular framework to diagnose differences between hydrological models. Water Resources Research 44 (2008): 14.
Original code from Clark, <NAME>.
'''
mu=off
loglambda=loglam
phi= (loglambda-mu)/chi #Chi -- loglamb is the first parameter (mean) phi eigenlijk
n=nn
# values for testing (Sivapalan et al., WRR, December 1987)
# TI_OFF = 3.82_SP ! TI_OFF = 2.92_SP
# TI_SHP = 2.48_SP ! TI_SHP = 3.52_SP
# TI_CHI = 1.00_SP ! TI_CHI = 0.742_SP
# loop through the frequency distribution
avelog = 0. #testing
avepow = 0.
Nbins=20000
Ti_max=50.
width=Ti_max/Nbins
for ibin in range (1,Nbins):
# get probability for the current bin
zeta = (float(ibin)/Nbins) * Ti_max # upper value in frequency bin
# print zeta
temp=max(0.,(zeta-mu)/chi)
fzeta=width *(1./(chi*special.gamma(phi))) * temp**(phi-1) * np.exp(-temp)
powval = (np.exp(zeta))**(1./n)
avepow = avepow + fzeta*powval
avelog = avelog + zeta*fzeta
return powval,avepow,avelog
def calc_meantipow_nv(off,loglam,chi,n):
'''
calculate mean of power transformed topographic index
needs par-library as input!
See literature:
Clark, <NAME>., <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. Framework for Understanding Structural Errors (FUSE): A modular framework to diagnose differences between hydrological models. Water Resources Research 44 (2008): 14.
Original code from Clark, <NAME>.
'''
mu=off
loglambda=loglam
phi= (loglambda-mu)/chi #Chi -- loglamb is the first parameter (mean) phi eigenlijk
nn=n
# loop through the frequency distribution
LOWERV = 0.
LOWERP = 0.
AVELOG = 0. #testing
AVEPOW = 0.
Nbins=20000
Ti_max=50.
for ibin in range (1,Nbins):
# get probability for the current bin
UPPERV = (float(ibin)/Nbins) * Ti_max # upper value in frequency bin
GMARG2 = max(0., UPPERV - mu) / chi # 2nd argument to the Gamma function Ti_arg = max(0., Ti_log - Ti_off) / chi
UPPERP = special.gammainc(phi, GMARG2) # GAMMP is the incomplete Gamma function GAMMP(Ti_shp, GMARG2)
PROBIN = UPPERP-LOWERP # probability of the current bin
# get the scaled topographic index value
LOGVAL = 0.5*(LOWERV+UPPERV) # log-transformed index for the current bin
POWVAL = (np.exp(LOGVAL))**(1./nn) # power-transformed index for the current bin
AVELOG = AVELOG + LOGVAL*PROBIN # ! average log-transformed index (testing)
AVEPOW += POWVAL*PROBIN # average power-transformed index
# print LOWERV, UPPERV, LOGVAL, POWVAL, AVEPOW
# !write(*,'(7(f9.3,1x))') lowerv, upperv, logval, powval, avelog, avepow
# save the lower value and probability
LOWERV = UPPERV # lower value for the next bin
LOWERP = UPPERP # cumulative probability for the next bin
return POWVAL,AVEPOW,AVELOG
#calculate value
nn=10.
mu=3.
chi=1.0
phi=2.48
loglambda= chi*phi+mu
powval,avepow,avelog=calc_meantipow_2(mu,loglambda,chi,nn)
#1 parameter implementation => phi is chi
chi=1.0
phi=2.48
loglambda= chi*phi+mu
powvala,avepowa,aveloga=calc_meantipow(mu,loglambda,phi,nn)
#3 parameter implementation
chi=1.0
phi=2.48
loglambda= chi*phi+mu
powvalb,avepowb,avelogb=calc_meantipow_nv(mu,loglambda,chi,nn)
print avepow,avepowa,avepowb
print avelog,aveloga,avelogb
###############################################################################
##FOR OVERLAND FLOW
###############################################################################
mu=3.
plt.figure()
plt.subplots_adjust(wspace = 0.2)
plt.subplot(121)
chi=1.0
loglambda= [5.0,8.0,10.0]
liness=['k-','k--','k-.']
cntt=0
for ll in loglambda:
phi= (ll-mu)/chi
zeta=np.arange(.0,18.,0.01)
fzeta=np.zeros(zeta.size)
cnt=0
for i in zeta:
temp=max(0.,(i-mu)/chi)
fzeta[cnt]=1./(chi*special.gamma(phi)) * temp**(phi-1) * np.exp(-temp)
cnt+=1
print cntt,'cnt'
plt.plot(zeta,1.-fzeta.cumsum()/100.,liness[cntt],label=r'$\lambda$ = '+str(ll))
cntt+=1
#plt.xlabel(r'$\zeta$ ($ln(\alpha / \tan \beta $)')
plt.xlabel(r'$\zeta$')
plt.ylabel(r'$\frac{A_c}{A}$')
plt.legend()
plt.subplot(122)
chi=[0.1,1.25,3.0]
loglambda= 7.5
liness=['k-','k--','k-.']
cntt=0
for ch in chi:
phi= (loglambda-mu)/ch
zeta=np.arange(.0,18.,0.01)
fzeta=np.zeros(zeta.size)
cnt=0
for i in zeta:
temp=max(0.,(i-mu)/ch)
# print temp,'temp'
fzeta[cnt]=1./(ch*special.gamma(phi)) * temp**(phi-1) * np.exp(-temp)
cnt+=1
# print cntt,'cnt'
plt.plot(zeta,1.-fzeta.cumsum()/100.,liness[cntt],label=r'$\chi$ = '+str(ch))
cntt+=1
#plt.xlabel(r'$\zeta$ ($ln(\alpha / \tan \beta $)')
plt.xlabel(r'$\zeta$')
#plt.ylabel(r'$\frac{A_c}{A}$')
plt.legend()
#testcase:
S1=np.arange(0.1,499,0.1)
sata=np.zeros(S1.size)
#FOR OVERLAND FLOW: 1 par implementation!
chi=2.48
phi=1.0
loglambda= chi*phi+mu
for i in range(S1.size):
nozerodivide=1.e-8 #prevent zero dividing
Ti_sat = avepow/(S1[i]/(500.+nozerodivide))
if Ti_sat > powval:
Sat_area = 0.0
else:
Ti_log = np.log(Ti_sat**nn)
Ti_off=mu
Ti_chi = (loglambda-Ti_off)/chi
Ti_arg = max(0., Ti_log - Ti_off) / Ti_chi
sata[i] = 1.0 - special.gammainc(chi, Ti_arg)
#FOR OVERLAND FLOW: 3 par implementation!
chi=1.0
phi=2.48
loglambda= chi*phi+mu
#phi=(loglambda-mu)/chi
#print phi,'phi'
#t1=sp.special.gammainc(phi,(zeta_crit-mu)/chi)
satb=np.zeros(S1.size)
for i in range(S1.size):
nozerodivide=1.e-8 #prevent zero dividing
Ti_sat = avepow/(S1[i]/(500.+nozerodivide))
if Ti_sat > powval:
Sat_area = 0.0
else:
Ti_log = np.log(Ti_sat**nn)
Ti_off=mu
# Ti_chi = (loglambda-Ti_off)/chi
Ti_arg = max(0., Ti_log - Ti_off) / chi
# satb[i] = 1.0 - special.gammainc(phi, Ti_arg)
satb[i] = special.gammaincc(phi, Ti_arg)
plt.figure()
plt.plot(S1,sata)
plt.plot(S1,satb)
plt.title('BEIDE IMPLEMENTATIES EFFECTIEF ANALOOG')
###############################################################################
#CONCLUSION:
#both ar equal, but chi en phi get interchanged meaning!!
###############################################################################
set_par={}
set_par['mut']=2.
def qtimedelay(set_par,deltim=1.):
'''
gamma-function based weight function to control the runoff delay
'''
alpha=3.0
print set_par['mut']
alamb = alpha/set_par['mut']
psave=0.0
set_par['frac_future']=np.zeros(500.) #Parameter added
ntdh = set_par['frac_future'].size
deltim=1.
print 'qtimedelay is calculated with a unit of',deltim,'hours'
for jtim in range(ntdh):
# print jtim
tfuture=jtim*deltim
# print alamb*tfuture
cumprob= special.gammainc(alpha, alamb*tfuture)# hoeft niet want verschil wordt genomen: /special.gamma(alpha)
# print cumprob
set_par['frac_future'][jtim]=max(0.,cumprob-psave)
if set_par['frac_future'][jtim] > 0.0001:
print set_par['frac_future'][jtim]
psave = cumprob
if cumprob < 0.99:
print 'not enough bins in the frac_future'
#make sure sum to one
set_par['frac_future'][:]=set_par['frac_future'][:]/set_par['frac_future'][:].sum()
return set_par
tt=qtimedelay(set_par,deltim=24)
plt.figure()
plt.plot(tt['frac_future'])
###############################################################################
## plot the distirbution
###############################################################################
mu=3.82
chi=1.0
phi=2.48
loglambda= chi*phi+mu
zeta=np.arange(mu,14.,0.01)
fzeta=np.zeros(zeta.size)
cnt=0
for i in zeta:
temp=(i-mu)/chi
fzeta[cnt]=1./(chi*special.gamma(phi)) * temp**(phi-1) * np.exp(-temp)
cnt+=1
plt.plot(zeta,1.-fzeta.cumsum()/100.)
plt.xlabel(r'Topographic Index ($ln(\alpha / \tan \beta $)')
plt.ylabel(r'Ac/A')
| StarcoderdataPython |
1637353 | from ddq.topics.logics.logic import Node
class Quantifier(Node):
pass
| StarcoderdataPython |
1760134 | from __future__ import print_function
import logging
import os
import random
import numpy as np
from tqdm import tqdm, trange
from transformers_config import *
import torch
from torch.utils.data import DataLoader, RandomSampler
## Optimization
from transformers import (
WEIGHTS_NAME,
AdamW,
get_linear_schedule_with_warmup,
)
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
from data_utils import (
SquadResult,
SquadV1Processor,
SquadV2Processor,
squad_convert_examples_to_features
)
logger = logging.getLogger(__name__)
import argparse
from utils import no_decay, evaluate, get_arguments
def run(args, device, fine_tune_config, out_dir, writer):
model_name, tokenizer_class, model_class, config_class, qa_class = MODELS_dict[args.trans_model]
if args.cache_dir == "":
config = config_class.from_pretrained(args.config_name if args.config_name else model_name,
cache_dir=args.cache_dir if args.cache_dir != "" else None)
else:
config = config_class.from_pretrained(args.cache_dir)
# Set usage of language embedding to True if model is xlm
if args.model_type == "xlm":
config.use_lang_emb = True
if args.cache_dir == "":
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else model_name,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir != "" else None)
model = qa_class.from_pretrained(model_name,
from_tf=bool(".ckpt" in model_name),
config=config,
cache_dir=args.cache_dir if args.cache_dir != "" else None)
else:
tokenizer = tokenizer_class.from_pretrained(args.cache_dir)
model = qa_class.from_pretrained(args.cache_dir)
lang2id = config.lang2id if args.model_type == "xlm" else None
model.to(device)
processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor()
## TRAIN EXAMPLES
train_examples = processor.get_train_examples(args.data_dir, task="tydiqa", languages=args.train_langs)
print("Train examples convertion to features")
train_features, train_dataset = squad_convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=True,
return_dataset="pt",
threads=8,
lang2id=lang2id)
### TEST EXAMPLES
test_features = {}
test_dataset = {}
test_examples = {}
for lang in args.test_langs:
test_examples.update({lang: processor.get_test_examples(args.data_dir, task="tydiqa", language=lang)})
print("Test examples convertion to features %s len(test_examples[lang]):%d", lang, len(test_examples[lang]))
test_features_lang, test_dataset_lang = squad_convert_examples_to_features(examples=test_examples[lang],
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=True,
return_dataset="pt",
threads=8,
lang2id=lang2id)
test_features.update({lang: test_features_lang})
test_dataset.update({lang: test_dataset_lang})
### Training
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=fine_tune_config["batch_size"])
num_train_epochs = args.epoch
train_dataloader_num = len(train_dataloader)
t_total = train_dataloader_num // fine_tune_config["gradient_accumulation_steps"] * num_train_epochs
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": fine_tune_config["weight_decay"],
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=fine_tune_config["adam_lr"], eps=fine_tune_config["adam_eps"])
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=fine_tune_config["warmup_steps"],
num_training_steps=t_total)
local_rank = -1
print("TRAIN FROM SCRATCH:")
epochs_trained = 0
model.zero_grad()
train_iterator = trange(epochs_trained, int(num_train_epochs), desc="Epoch", disable=local_rank not in [-1, 0])
global_step, tr_loss, logging_loss = 0, 0.0, 0.0
for _ in train_iterator:
for _ in tqdm(range(opt_config["epoch"])):
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(device) for t in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": None if args.model_type in ["xlm", "xlm-roberta", "distilbert"] else batch[2],
"start_positions": batch[3],
"end_positions": batch[4],
}
if args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": batch[5], "p_mask": batch[6]})
if args.version_2_with_negative:
inputs.update({"is_impossible": batch[7]})
if args.model_type == "xlm":
inputs["langs"] = batch[7]
outputs = model(**inputs)
loss = outputs[0]
loss = loss.mean()
if fine_tune_config["gradient_accumulation_steps"] > 1:
loss = loss / fine_tune_config["gradient_accumulation_steps"]
loss.backward()
tr_loss += loss.item()
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
if (step + 1) % fine_tune_config["gradient_accumulation_steps"] == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), fine_tune_config["max_grad_norm"])
global_step += 1
## Write loss metrics
writer.add_scalar("fine_tune_lr", scheduler.get_lr()[0], global_step)
writer.add_scalar("FINE_TUNE_loss", (tr_loss - logging_loss) / fine_tune_config["logging_steps"],
global_step)
logging_loss = tr_loss
if fine_tune_config["save_steps"] > 0 and global_step % fine_tune_config["save_steps"] == 0:
output_dir = os.path.join(out_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if global_step % fine_tune_config["save_steps"] == 0:
for lang in args.test_langs:
test_results = evaluate(tokenizer, model, test_examples[lang], lang, "test", args.model_type,
out_dir, fine_tune_config["n_best_size"], fine_tune_config["max_answer_length"],
args.version_2_with_negative, args.verbose_logging, args.do_lower_case,
args.null_score_diff_threshold, lang2id, device, args)
print("PRE-TRAIN TEST on :", lang, " test_results:", test_results)
for key, value in test_results.items():
writer.add_scalar("PRE_TRAIN_test_{}_{}".format(lang, key), value, global_step)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
devices = torch.cuda.device_count()
print("devices:", devices)
if devices > 1:
torch.cuda.manual_seed_all(args.seed)
if __name__ == "__main__":
args = get_arguments()
set_seed(args)
""" Config Parameters """
pre_train_config = {"pre_train_steps": args.pre_train_steps, "batch_size": args.batch_size, "adam_lr": args.adam_lr,
"adam_eps": args.adam_eps, "gradient_accumulation_steps": args.gradient_accumulation_steps,
"warmup_steps": args.warmup_steps, "max_grad_norm": args.max_grad_norm,
"save_steps": args.save_steps, "weight_decay": args.weight_decay,
"logging_steps": args.logging_steps, "eval_batch_size": args.eval_batch_size,
"n_best_size": args.n_best_size, "max_answer_length": args.max_seq_length}
data_config = {"n_way": args.n_way, "k_spt": args.k_spt, "q_qry": args.q_qry, "batch_sz": args.batch_sz}
opt_config = {"epoch": args.epoch, "n_task": args.n_task, "n_up_train_step": args.n_up_train_step,
"n_up_test_step": args.n_up_test_step, "alpha_lr": args.alpha_lr, "beta_lr": args.beta_lr,
"gamma_lr": args.gamma_lr}
""" Output Directory """
if args.use_adapt:
flag_adapt = "use_adapt/"
else:
flag_adapt = "no_adapt/"
freeze_bert_flag = ""
if len(args.freeze_bert) > 0:
freeze_bert_flag = "freeze_bert_" + ",".join(args.freeze_bert)
out_dir = os.path.join(args.out_dir, "PRE_TRAIN_SEED_"+str(args.seed)+"/train_"+",".join(args.train_langs)+"-test_"+",".join(args.test_langs)
+ "/l2l/kspt_" + str(data_config["k_spt"]) + "-qqry_" + str(data_config["q_qry"])
+ "/en_train_set/" + freeze_bert_flag + "/few_shot_"+",".join(args.dev_langs)+"/"
+ flag_adapt)
print("Saving in out_dir:", out_dir)
writer = SummaryWriter(os.path.join(out_dir, 'runs'))
""" Cuda/CPU device setup"""
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
n_gpu = 1
run(args.config_name, args.trans_model, args.model_type, args.tokenizer_name, args.do_lower_case, args.cache_dir,
device, args.version_2_with_negative, args.null_score_diff_threshold, args.verbose_logging, args.data_dir,
args.train_langs, args.dev_langs, args.test_langs, args.max_seq_length, args.doc_stride, args.max_query_length,
pre_train_config, data_config, opt_config, out_dir, writer, args.freeze_bert, args.use_pretrained_model,
args.pre_trained_model_name)
| StarcoderdataPython |
1776204 | <filename>algorithm/python/quick_sort.py
#---------------------------------------------------------------
# QUICK SORT
#---------------------------------------------------------------
# V0
# steps
# 0) get pivot (last element from original array)
# 1) init big, small sub array
# 2) loop over element
# -> put "< pivot" elements to small sub array,
# -> put "> pivot" elements to small big array
# 3) run same algorithm on sub array, big array
# 4) return result
def quick_sort(arr):
# edge case
if len(arr) < 2:
return arr
# use last element as first pivot
pivot = arr.pop(-1)
# init small, big array
small = []
big = []
for i in arr:
if i > pivot:
big.append(i)
else:
small.append(i)
# recursive do quick_sort
return quick_sort(small) + [pivot] + quick_sort(big)
# V1
# https://github.com/yennanliu/Python/blob/master/sorts/quick_sort.py
from __future__ import annotations
def quick_sort(collection: list) -> list:
"""
A pure Python implementation of quick sort algorithm
:param collection: a mutable collection of comparable items
:return: the same collection ordered by ascending
Examples:
>>> quick_sort([0, 5, 3, 2, 2])
[0, 2, 2, 3, 5]
>>> quick_sort([])
[]
>>> quick_sort([-2, 5, 0, -45])
[-45, -2, 0, 5]
"""
if len(collection) < 2:
return collection
pivot = collection.pop() # Use the last element as the first pivot
greater: list[int] = [] # All elements greater than pivot
lesser: list[int] = [] # All elements less than or equal to pivot
for element in collection:
(greater if element > pivot else lesser).append(element)
return quick_sort(lesser) + [pivot] + quick_sort(greater)
# if __name__ == "__main__":
# user_input = input("Enter numbers separated by a comma:\n").strip()
# unsorted = [int(item) for item in user_input.split(",")]
# print(quick_sort(unsorted))
# V1'
# https://github.com/yennanliu/Python/blob/master/sorts/recursive_quick_sort.py
# IDEA : recursive quick sort
def quick_sort(data: list) -> list:
"""
>>> for data in ([2, 1, 0], [2.2, 1.1, 0], "quick_sort"):
... quick_sort(data) == sorted(data)
True
True
True
"""
if len(data) <= 1:
return data
else:
return (
quick_sort([e for e in data[1:] if e <= data[0]])
+ [data[0]]
+ quick_sort([e for e in data[1:] if e > data[0]])
)
# if __name__ == "__main__":
# import doctest
#
# doctest.testmod()
# V1''
# https://github.com/yennanliu/Python/blob/master/sorts/quick_sort_3_partition.py
# IDEA : quick sort partition
def quick_sort_3partition(sorting: list, left: int, right: int) -> None:
if right <= left:
return
a = i = left
b = right
pivot = sorting[left]
while i <= b:
if sorting[i] < pivot:
sorting[a], sorting[i] = sorting[i], sorting[a]
a += 1
i += 1
elif sorting[i] > pivot:
sorting[b], sorting[i] = sorting[i], sorting[b]
b -= 1
else:
i += 1
quick_sort_3partition(sorting, left, a - 1)
quick_sort_3partition(sorting, b + 1, right)
def quick_sort_lomuto_partition(sorting: list, left: int, right: int) -> None:
"""
A pure Python implementation of quick sort algorithm(in-place)
with Lomuto partition scheme:
https://en.wikipedia.org/wiki/Quicksort#Lomuto_partition_scheme
:param sorting: sort list
:param left: left endpoint of sorting
:param right: right endpoint of sorting
:return: None
Examples:
>>> nums1 = [0, 5, 3, 1, 2]
>>> quick_sort_lomuto_partition(nums1, 0, 4)
>>> nums1
[0, 1, 2, 3, 5]
>>> nums2 = []
>>> quick_sort_lomuto_partition(nums2, 0, 0)
>>> nums2
[]
>>> nums3 = [-2, 5, 0, -4]
>>> quick_sort_lomuto_partition(nums3, 0, 3)
>>> nums3
[-4, -2, 0, 5]
"""
if left < right:
pivot_index = lomuto_partition(sorting, left, right)
quick_sort_lomuto_partition(sorting, left, pivot_index - 1)
quick_sort_lomuto_partition(sorting, pivot_index + 1, right)
def lomuto_partition(sorting: list, left: int, right: int) -> int:
"""
Example:
>>> lomuto_partition([1,5,7,6], 0, 3)
2
"""
pivot = sorting[right]
store_index = left
for i in range(left, right):
if sorting[i] < pivot:
sorting[store_index], sorting[i] = sorting[i], sorting[store_index]
store_index += 1
sorting[right], sorting[store_index] = sorting[store_index], sorting[right]
return store_index
def three_way_radix_quicksort(sorting: list) -> list:
"""
Three-way radix quicksort:
https://en.wikipedia.org/wiki/Quicksort#Three-way_radix_quicksort
First divide the list into three parts.
Then recursively sort the "less than" and "greater than" partitions.
>>> three_way_radix_quicksort([])
[]
>>> three_way_radix_quicksort([1])
[1]
>>> three_way_radix_quicksort([-5, -2, 1, -2, 0, 1])
[-5, -2, -2, 0, 1, 1]
>>> three_way_radix_quicksort([1, 2, 5, 1, 2, 0, 0, 5, 2, -1])
[-1, 0, 0, 1, 1, 2, 2, 2, 5, 5]
"""
if len(sorting) <= 1:
return sorting
return (
three_way_radix_quicksort([i for i in sorting if i < sorting[0]])
+ [i for i in sorting if i == sorting[0]]
+ three_way_radix_quicksort([i for i in sorting if i > sorting[0]])
)
# if __name__ == "__main__":
# import doctest
#
# doctest.testmod(verbose=True)
#
# user_input = input("Enter numbers separated by a comma:\n").strip()
# unsorted = [int(item) for item in user_input.split(",")]
# quick_sort_3partition(unsorted, 0, len(unsorted) - 1)
# print(unsorted) | StarcoderdataPython |
3203302 | # Generated by Django 2.0.3 on 2018-06-18 22:12
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zhihu', '0003_auto_20180616_2256'),
]
operations = [
migrations.AddField(
model_name='answercomment',
name='add_time',
field=models.DateTimeField(auto_now_add=True,
default=django.utils.timezone.now,
verbose_name='添加时间'),
preserve_default=False,
),
]
| StarcoderdataPython |
91491 | # Generated by Django 2.2.5 on 2020-11-12 01:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0011_auto_20201024_1533'),
]
operations = [
migrations.RemoveField(
model_name='costcenter',
name='active',
),
migrations.RemoveField(
model_name='costcenter',
name='created',
),
migrations.RemoveField(
model_name='costcenter',
name='modified',
),
migrations.RemoveField(
model_name='employee',
name='active',
),
migrations.RemoveField(
model_name='employee',
name='created',
),
migrations.RemoveField(
model_name='employee',
name='modified',
),
migrations.RemoveField(
model_name='unity',
name='active',
),
migrations.RemoveField(
model_name='unity',
name='created',
),
migrations.RemoveField(
model_name='unity',
name='modified',
),
]
| StarcoderdataPython |
3302228 | #coding:utf-8
#
# id: bugs.core_0053
# title: FIRST 1 vs ORDER DESC vs explicit plan (ODS11)
# decription:
# Test uses pre-created database which has several procedures for analyzing performance by with the help of MON$ tables.
# Performance results are gathered in the table STAT_LOG, each odd run will save mon$ counters with "-" sign and next
# (even) run will save them with "+" -- see SP_GATHER_STAT.
# Aggegation of results is done in the view V_AGG_STAT (negative values relate to start, positive to the end of measure,
# difference between them means performance expenses which we want to evaluate).
# NOTE. Before each new measure we have to set generator G_GATHER_STAT to zero in order to make it produce proper values
# starting with 1 (odd --> NEGATIVE sign for counters). This is done in SP_TRUNCATE_STAT.
#
# :::::::::::::::::::::::::::::::::::::::: NB ::::::::::::::::::::::::::::::::::::
# 18.08.2020. FB 4.x has incompatible behaviour with all previous versions since build 4.0.0.2131 (06-aug-2020):
# statement 'alter sequence <seq_name> restart with 0' changes rdb$generators.rdb$initial_value to -1 thus next call
# gen_id(<seq_name>,1) will return 0 (ZERO!) rather than 1.
# See also CORE-6084 and its fix: https://github.com/FirebirdSQL/firebird/commit/23dc0c6297825b2e9006f4d5a2c488702091033d
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# This is considered as *expected* and is noted in doc/README.incompatibilities.3to4.txt
#
# Because of this, it was decided to change code of SP_TRUNCATE_STAT: instead of 'alter sequence restart...' we do
# reset like this: c = gen_id(g_gather_stat, -gen_id(g_gather_stat, 0));
#
# Checked on:
# 4.0.0.2164 SS: 2.511s.
# 4.0.0.2164 CS: 2.533s.
# 3.0.7.33356 SS: 1.495s.
# 3.0.7.33356 CS: 2.865s.
# 2.5.9.27150 SC: 0.730s.
#
# tracker_id: CORE-0053
# min_versions: ['2.5.1']
# versions: 2.5.1
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5.1
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(from_backup='mon-stat-gathering-2_5.fbk', init=init_script_1)
test_script_1 = """
set list on;
create or alter procedure gendata as begin end;
recreate table test (F1 integer, F2 date);
commit;
set term ^;
create or alter procedure GenData as
declare i integer;
begin
i= 0;
while (i < 100000) do begin
insert into test(F1, F2) values (:i, 'yesterday');
i= i+1;
end
end
^
set term ;^
commit;
execute procedure gendata;
commit;
create desc index test_f1_f2 on test(F1, F2);
commit;
execute procedure sp_truncate_stat;
commit;
-- #################### MEASURE-1 #################
execute procedure sp_gather_stat; ------- catch statistics BEFORE measured statement(s)
commit;
set plan on;
select first 1 f1
from test t
where t.f1=17 and f2 <= 'today'
plan (T order test_f1_f2)
order by F1 desc, F2 desc;
set plan off;
execute procedure sp_gather_stat; ------- catch statistics BEFORE measured statement(s)
commit;
-- #################### MEASURE-2 #################
execute procedure sp_gather_stat; ------- catch statistics BEFORE measured statement(s)
commit;
set plan on;
select first 1 f1
from test t
where t.f1=17 and f2 <= 'today'
plan (t order test_f1_f2 index (test_f1_f2))
order by F1 desc, F2 desc;
set plan off;
execute procedure sp_gather_stat; ------- catch statistics BEFORE measured statement(s)
commit;
-- #################### ANALYZING RESULTS #################
set list on;
select
iif( idx_1 / idx_2 > max_ratio, 'PLAN (T ORDER <idx_name>) is slow! Ratio > ' || max_ratio,
iif( idx_2 / idx_1 > max_ratio, 'PLAN (T ORDER <idx_name> INDEX(<idx_name>)) is slow! Ratio > '|| max_ratio,
'PERFORMANCE IS THE SAME.'
)
) result
from (
select
cast(min(idx_1) as double precision) as idx_1,
cast( min(idx_2) as double precision) as idx_2,
3.00 as max_ratio
from (
select iif(rowset=1, indexed_reads, null) idx_1, iif(rowset=2, indexed_reads, null) idx_2
from v_agg_stat
) g
);
-- Difference of indexed reads that is reported by MON$ tables:
-- on 2.5 = {5, 5}, on 3.0 = {5, 3} ==> ratio 3.00 should be always enough.
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
PLAN (T ORDER TEST_F1_F2)
F1 17
PLAN (T ORDER TEST_F1_F2 INDEX (TEST_F1_F2))
F1 17
RESULT PERFORMANCE IS THE SAME.
"""
@pytest.mark.version('>=2.5.1')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| StarcoderdataPython |
63797 | import pandas as pd
import urllib.request
# Linear pathway data
BASE_URL = "https://github.com/sys-bio/network-modeling-summer-school-2021/raw/main/"
BASE_DATA_URL = "%sdata/" % BASE_URL
BASE_MODULE_URL = "%ssrc/" % BASE_URL
BASE_MODEL_URL = "%smodels/" % BASE_URL
LOCAL_FILE = "local_file.txt"
def getData(csvFilename):
"""
Creates a dataframe from a CSV structured URL file.
Parameters
----------
csvFilename: str
Name of the CSV file (w/o ".csv" extension)
Returns
-------
pd.DataFrame
"""
url = "%s%s.csv" % (BASE_DATA_URL, csvFilename)
filename, _ = urllib.request.urlretrieve(url, filename=LOCAL_FILE)
return pd.read_csv(LOCAL_FILE)
def getModule(moduleName):
"""
Obtains common codes from the github repository.
Parameters
----------
moduleName: str
name of the python module in the src directory
"""
url = "%s%s.py" % (BASE_MODULE_URL, moduleName)
_, _ = urllib.request.urlretrieve(url, filename=LOCAL_FILE)
with open(LOCAL_FILE, "r") as fd:
codeStr = "".join(fd.readlines())
return codeStr
def getModel(modelName):
"""
Creates returns the string for the antimony model.
Parameters
----------
modelName: str
Name of the model w/o ".ant"
Returns
-------
str
"""
url = "%s%s.ant" % (BASE_MODEL_URL, modelName)
filename, _ = urllib.request.urlretrieve(url, filename=LOCAL_FILE)
with open(LOCAL_FILE, "r") as fd:
result = "".join(fd.readlines())
return result
# Set models
WOLF_MODEL = getModel("wolf")
WOLF_DF = getData("wolf")
WOLF_ARR = WOLF_DF.to_numpy()
LINEAR_PATHWAY_DF = getData("linear_pathway")
LINEAR_PATHWAY_ARR = LINEAR_PATHWAY_DF.to_numpy()
LINEAR_PATHWAY_MODEL = getModel("linear_pathway")
| StarcoderdataPython |
3310020 | # -*- coding: utf-8 -*-
import io
import socket
import struct
import time
MSG_SIZE_FIELD_SIZE = 4
API_KEY_FIELD_SIZE = 2
API_VERSION_FIELD_SIZE = 2
FLAGS_FIELD_SIZE = 2
PARTITION_KEY_FIELD_SIZE = 4
TOPIC_SIZE_FIELD_SIZE = 2
TIMESTAMP_FIELD_SIZE = 8
KEY_SIZE_FIELD_SIZE = 4
VALUE_SIZE_FIELD_SIZE = 4
ANY_PARTITION_FIXED_BYTES = MSG_SIZE_FIELD_SIZE + API_KEY_FIELD_SIZE + \
API_VERSION_FIELD_SIZE + FLAGS_FIELD_SIZE + \
TOPIC_SIZE_FIELD_SIZE + TIMESTAMP_FIELD_SIZE + \
KEY_SIZE_FIELD_SIZE + VALUE_SIZE_FIELD_SIZE
PARTITION_KEY_FIXED_BYTES = ANY_PARTITION_FIXED_BYTES + \
PARTITION_KEY_FIELD_SIZE
ANY_PARTITION_API_KEY = 256
ANY_PARTITION_API_VERSION = 0
PARTITION_KEY_API_KEY = 257
PARTITION_KEY_API_VERSION = 0
def create_msg(partition, topic, key_bytes, value_bytes):
topic_bytes = bytes(topic)
msg_size = PARTITION_KEY_FIXED_BYTES + \
len(topic_bytes) + len(key_bytes) + len(value_bytes)
buf = io.BytesIO()
flags = 0
buf.write(struct.pack('>ihhhih', msg_size,
PARTITION_KEY_API_KEY,
PARTITION_KEY_API_VERSION, flags,
partition, len(topic_bytes)))
buf.write(topic_bytes)
buf.write(struct.pack('>qi', int(time.time() * 1000), len(key_bytes)))
buf.write(key_bytes)
buf.write(struct.pack('>i', len(value_bytes)))
buf.write(value_bytes)
result_bytes = buf.getvalue()
buf.close()
return result_bytes
def open_bruce_socket():
return socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
| StarcoderdataPython |
1651751 | # Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This Module implements SQLFlow Step in Couler'''
from os import path
import couler.argo as couler
def escape_sql(original_sql):
'''Escape special chars in SQL'''
return original_sql.replace('\\', '\\\\').replace('"', r'\"').replace(
"`", r'\`').replace("$", r'\$')
def sqlflow(sql,
image="sqlflow/sqlflow",
env=None,
secret=None,
resources=None,
log_file=None):
'''sqlflow step call run_container to append a workflow step.
'''
if not log_file:
command = '''step -e "%s"''' % escape_sql(sql)
else:
# wait for some seconds to exit in case the
# step pod is recycled too fast
exit_time_wait = 0
if isinstance(env, dict):
exit_time_wait = env.get("SQLFLOW_WORKFLOW_EXIT_TIME_WAIT", "0")
log_dir = path.dirname(log_file)
command = "".join([
"if [[ -f /opt/sqlflow/init_step_container.sh ]]; "
"then bash /opt/sqlflow/init_step_container.sh; fi",
" && set -o pipefail", # fail when any sub-command fail
" && mkdir -p %s" % log_dir,
""" && (step -e "%s" 2>&1 | tee %s)""" %
(escape_sql(sql), log_file),
" && sleep %s" % exit_time_wait
])
couler.run_container(command=command,
image=image,
env=env,
secret=secret,
resources=resources)
| StarcoderdataPython |
147617 | #!/usr/bin/env python
import roslib
roslib.load_manifest('crazyflie_control')
import rospy
import sys
from geometry_msgs.msg import Vector3
from nav_msgs.msg import Odometry
from crazyflie_driver.msg import RPYT
import dynamic_reconfigure.server
from crazyflie_control.cfg import CrazyflieControlConfig
from math import *
import numpy as np
class CrazyflieControlNode(object):
mass = 1.0
gravity = 9.801
kpz = 1.0
kdz = 1.0
kpx = 1.0
kpy = 1.0
kdx = 1.0
kdy = 1.0
xd = 0.0
yd = 0.0
zd = 0.0
xp = 0.0
yp = 0.0
zp = 0.0
x = 0.0
y = 0.0
z = 0.0
q0 = 1.0
q1 = 0.0
q2 = 0.0
q3 = 0.0
last_odometry_update = rospy.Time()
def __init__(self, default_name='apollo', default_update_rate=100):
self.default_name = default_name
self.default_update_rate = default_update_rate
rospy.init_node('crazyflie_control')
self._init_params()
self._init_pubsub()
dynamic_reconfigure.server.Server(CrazyflieControlConfig, self.reconfigure)
self.last_odometry_update = rospy.get_rostime()
def _init_params(self):
self.name = rospy.get_param('~name', self.default_name)
self.update_rate = rospy.get_param('~update_rate', self.default_update_rate)
def _init_pubsub(self):
self.vicon_sub = rospy.Subscriber('/' + self.name + '/odom', Odometry, self.set_odometry)
self.rotation_desired_pub = rospy.Publisher('/' + self.name + '/rotation_desired', RPYT)
self.rotation_actual_pub = rospy.Publisher('/' + self.name + '/rotation_actual', Vector3)
def set_odometry(self, msg):
now = rospy.get_rostime()
dt = self.last_odometry_update - now
x_old = self.x
y_old = self.y
z_old = self.z
self.x = msg.pose.pose.position.x * 0.001
self.y = msg.pose.pose.position.y * 0.001
self.z = msg.pose.pose.position.z * 0.001
self.q1 = msg.pose.pose.orientation.x
self.q2 = msg.pose.pose.orientation.y
self.q3 = msg.pose.pose.orientation.z
self.q0 = msg.pose.pose.orientation.w
self.xd = (2.0/dt.to_sec())*(self.x - x_old) - self.xd
self.yd = (2.0/dt.to_sec())*(self.y - y_old) - self.yd
self.zd = (2.0/dt.to_sec())*(self.z - z_old) - self.zd
self.last_odometry_update = now
def reconfigure(self, config, level):
self.kpx = config['kpx']
self.kpy = config['kpy']
self.kpz = config['kpz']
self.kdx = config['kdx']
self.kdy = config['kdy']
self.kdz = config['kdz']
self.xd = config['xd']
self.yd = config['yd']
self.zd = config['zd']
self.power = config['power']
return config
def spin(self):
rospy.loginfo("Spinning")
r = rospy.Rate(self.update_rate)
while not rospy.is_shutdown():
gx = 2 * (self.q1*self.q3 - self.q0*self.q2);
gy = 2 * (self.q0*self.q1 + self.q2*self.q3);
gz = self.q0*self.q0 - self.q1*self.q1 - self.q2*self.q2 + self.q3*self.q3;
yaw = atan2(2*self.q1*self.q2 - 2*self.q0*self.q3, 2*self.q0*self.q0 + 2*self.q1*self.q1 - 1) * 180 /pi;
pitch = atan(gx / sqrt(gy*gy + gz*gz)) * 180 / pi;
roll = atan(gy / sqrt(gx*gx + gz*gz)) * 180 / pi;
msg_actual = Vector3()
msg_actual.x = roll
msg_actual.y = pitch
msg_actual.z = yaw
self.rotation_actual_pub.publish(msg_actual)
R = [ [0]*3 ]*3
R[0][0] = pow(self.q0,2) + pow(self.q1,2) - pow(self.q2,2) - pow(self.q3,2)
R[0][1] = 2*self.q0*self.q1 - 2*self.q0*self.q3
R[0][2] = 2*self.q1*self.q3 + 2*self.q0*self.q2
R[1][0] = 2*self.q0*self.q1 + 2*self.q0*self.q3
R[1][1] = pow(self.q0,2) - pow(self.q1,2) + pow(self.q2,2) - pow(self.q3,2)
R[1][2] = 2*self.q2*self.q3 - 2*self.q0*self.q1
R[2][0] = 2*self.q1*self.q3 - 2*self.q0*self.q2
R[2][1] = 2*self.q2*self.q3 + 2*self.q0*self.q1
R[2][2] = pow(self.q0,2) - pow(self.q1,2) - pow(self.q2,2) + pow(self.q3,2)
r_matrix = np.matrix(R)
# This is the thrust, should be also placed in the function below...
f = self.mass / R[2][2] * ( self.gravity - self.kpz*(self.z-self.zd) - self.kdz*self.zp )
r13d = self.mass / f * ( -self.kpx*(self.x-self.xd) - self.kdx*self.xp )
r23d = self.mass / f * ( -self.kpy*(self.y-self.yd) - self.kdy*self.yp )
r33d = sqrt(1-pow(r13d,2)-pow(r23d,2))
v = [0]*3
v[0] = -r23d
v[1] = r13d
v[2] = 0.0
angle = acos(r33d)
ca = cos(angle)
sa = sin(angle)
A = [ [0]*3 ]*3
A[0][0] = ca + pow(v[0],2)*(1-ca)
A[0][1] = v[0]*v[1]*(1-ca) - v[2]*sa
A[0][2] = v[0]*v[2]*(1-ca) + v[1]*sa
A[1][0] = v[0]*v[1]*(1-ca) + v[2]*sa
A[1][1] = ca + pow(v[1],2)*(1-ca)
A[1][2] = v[1]*v[2]*(1-ca) - v[0]*sa
A[2][0] = v[0]*v[2]*(1-ca) + v[1]*sa
A[2][1] = v[1]*v[2]*(1-ca) + v[0]*sa
A[2][2] = ca + pow(v[2],2)*(1-ca)
a_matrix = np.matrix(A)
rd = [0]*3
rd[0] = r13d
rd[1] = r23d
rd[2] = r33d
rd_matrix = np.matrix(rd)
gd = np.transpose(r_matrix)*a_matrix*np.transpose(rd_matrix)
eulerRollDesired = atan2(gd[1],sqrt(pow(gd[1],2)+pow(gd[2],2))) * 180 / pi
eulerPitchDesired = -atan(gd[0]/sqrt(pow(gd[1],2)+pow(gd[2],2))) * 180 / pi
eulerYawDesired = 0.0;
msg_desired = RPYT()
msg_desired.roll = eulerRollDesired
msg_desired.pitch = eulerPitchDesired
msg_desired.yaw = eulerYawDesired
if self.power:
msg_desired.thrust = f
else:
msg_desired.thrust = 0.0
self.rotation_desired_pub.publish(msg_desired)
r.sleep()
def crazyflie_control_main(argv):
c = CrazyflieControlNode()
c.spin()
if __name__ == '__main__':
crazyflie_control_main(sys.argv)
| StarcoderdataPython |
1613602 | """ Implements a task queue worker and routing. This is just
a template and not the actual script which is run. Actual scripts
can be found in /etc/appscale/celery/workers.
Find and replace the following:
APP_ID: Set this to the current application ID.
CELERY_CONFIGURATION: The name of the celery configuration file.
"""
import datetime
import httplib
import os
import sys
import yaml
def setup_environment():
ENVIRONMENT_FILE = "/etc/appscale/environment.yaml"
FILE = open(ENVIRONMENT_FILE)
env = yaml.load(FILE.read())
APPSCALE_HOME = env["APPSCALE_HOME"]
sys.path.append(APPSCALE_HOME + "/AppServer")
sys.path.append(APPSCALE_HOME + "/lib")
setup_environment()
from celery import Celery
from celery.utils.log import get_task_logger
from urlparse import urlparse
import appscale_info
import constants
from appscale.taskqueue.brokers import rabbitmq
from appscale.taskqueue.distributed_tq import TaskName
from appscale.taskqueue.tq_config import TaskQueueConfig
from appscale.taskqueue.tq_lib import TASK_STATES
from google.appengine.runtime import apiproxy_errors
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_distributed
from google.appengine.api import datastore
from google.appengine.ext import db
sys.path.append(TaskQueueConfig.CELERY_CONFIG_DIR)
sys.path.append(TaskQueueConfig.CELERY_WORKER_DIR)
app_id = 'APP_ID'
module_name = TaskQueueConfig.get_celery_worker_module_name(app_id)
celery = Celery(module_name, broker=rabbitmq.get_connection_string(),
backend='amqp://')
celery.config_from_object('CELERY_CONFIGURATION')
logger = get_task_logger(__name__)
master_db_ip = appscale_info.get_db_master_ip()
connection_str = master_db_ip + ":" + str(constants.DB_SERVER_PORT)
ds_distrib = datastore_distributed.DatastoreDistributed(
"appscaledashboard", connection_str, require_indexes=False)
apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', ds_distrib)
os.environ['APPLICATION_ID'] = "appscaledashboard"
# This template header and tasks can be found in appscale/AppTaskQueue/templates
| StarcoderdataPython |
194603 |
import nomad.api as api
import os
class Nomad(object):
def __init__(self,
host='127.0.0.1',
secure=False,
port=4646,
address=os.getenv('NOMAD_ADDR', None),
namespace=os.getenv('NOMAD_NAMESPACE', None),
token=os.getenv('NOMAD_TOKEN', None),
timeout=5,
region=os.getenv('NOMAD_REGION', None),
version='v1',
verify=False,
cert=()):
""" Nomad api client
https://github.com/jrxFive/python-nomad/
optional arguments:
- host (defaults 127.0.0.1), string ip or name of the nomad api server/agent that will be used.
- port (defaults 4646), integer port that will be used to connect.
- secure (defaults False), define if the protocol is secured or not (https or http)
- version (defaults v1), vesion of the api of nomad.
- verify (defaults False), verify the certificate when tls/ssl is enabled
at nomad.
- cert (defaults empty), cert, or key and cert file to validate the certificate
configured at nomad.
- region (defaults None), version of the region to use. It will be used then
regions of the current agent of the connection.
- namespace (defaults to None), Specifies the enterpise namespace that will
be use to deploy or to ask info to nomad.
- token (defaults to None), Specifies to append ACL token to the headers to
make authentication on secured based nomad environemnts.
returns: Nomad api client object
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
- nomad.api.exceptions.URLNotAuthorizedNomadException
"""
self.host = host
self.secure = secure
self.port = port
self.address = address
self.timeout = timeout
self.version = version
self.verify = verify
self.cert = cert
self.requester = api.Requester(address=address, uri=self.get_uri(), port=port, namespace=namespace,
token=token, timeout=timeout, version=version, verify=verify, cert=cert)
self._jobs = api.Jobs(self.requester)
self._job = api.Job(self.requester)
self._nodes = api.Nodes(self.requester)
self._node = api.Node(self.requester)
self._allocations = api.Allocations(self.requester)
self._allocation = api.Allocation(self.requester)
self._evaluations = api.Evaluations(self.requester)
self._evaluation = api.Evaluation(self.requester)
self._agent = api.Agent(self.requester)
self._client = api.Client(self.requester)
self._deployments = api.Deployments(self.requester)
self._deployment = api.Deployment(self.requester)
self._regions = api.Regions(self.requester)
self._status = api.Status(self.requester)
self._system = api.System(self.requester)
self._operator = api.Operator(self.requester)
self._validate = api.Validate(self.requester)
self._namespaces = api.Namespaces(self.requester)
self._namespace = api.Namespace(self.requester)
self._acl = api.Acl(self.requester)
self._sentinel = api.Sentinel(self.requester)
self._metrics = api.Metrics(self.requester)
def set_namespace(self, namespace):
self.requester.namespace = namespace
def set_token(self, token):
self.requester.token = token
def get_namespace(self):
return self.requester.namespace
def get_token(self):
return self.requester.token
def get_uri(self):
if self.secure:
protocol = "https"
else:
protocol = "http"
return "{protocol}://{host}".format(protocol=protocol, host=self.host)
@property
def jobs(self):
return self._jobs
@property
def job(self):
return self._job
@property
def nodes(self):
return self._nodes
@property
def node(self):
return self._node
@property
def allocations(self):
return self._allocations
@property
def allocation(self):
return self._allocation
@property
def evaluations(self):
return self._evaluations
@property
def evaluation(self):
return self._evaluation
@property
def agent(self):
return self._agent
@property
def client(self):
return self._client
@property
def deployments(self):
return self._deployments
@property
def deployment(self):
return self._deployment
@property
def regions(self):
return self._regions
@property
def status(self):
return self._status
@property
def system(self):
return self._system
@property
def operator(self):
return self._operator
@property
def validate(self):
return self._validate
@property
def namespaces(self):
return self._namespaces
@property
def namespace(self):
return self._namespace
@property
def acl(self):
return self._acl
@property
def sentinel(self):
return self._sentinel
@property
def metrics(self):
return self._metrics
| StarcoderdataPython |
3393998 | <reponame>mashaka/TravelHelper
"""
Copyright 2017, MachineHeads
Author: <NAME>
Description: Utils
"""
from geopy.distance import great_circle
def flatten_music_events(music_events):
"""
Change a structure of JSON array
Args:
music_events: [{
"name": "<NAME>",
"events": [{
"end_time": "2018-03-07T23:59:00-0300",
"name": "<NAME> y Queens Of The Stone Age | <NAME>",
"id": "275702786271312",
"start_time": "2018-03-07T19:00:00-0300",
"place": {
"name": "<NAME>",
"id": "1601288550083218",
"location": {
"latitude": -34.63537404966,
"zip": "1408",
"city": "Buenos Aires",
"street": "Av. Juan B. Justo 9200",
"country": "Argentina",
"longitude": -58.520695048503
}
}
}]
}]
Returns:
[{
"name": "<NAME>",
"end_time": "2018-03-07T23:59:00-0300",
"name": "<NAME> y Queens Of The Stone Age | <NAME>",
"id": "275702786271312",
"start_time": "2018-03-07T19:00:00-0300",
"place": {
"name": "<NAME>",
"id": "1601288550083218",
"location": {
"latitude": -34.63537404966,
"zip": "1408",
"city": "Buenos Aires",
"street": "Av. Juan B. Justo 9200",
"country": "Argentina",
"longitude": -58.520695048503
}
}
}]
"""
events = []
for band_info in music_events:
if 'events' in band_info:
for band_event in band_info['events']:
events.append(dict(
{
'performer': band_info['name'],
'type': 'music',
'cover_url': band_info['cover_url'] if 'cover_url' in band_info else None
},
**band_event
))
return events
def calc_distance(trip_a, trip_b):
""" Calculate distance between two locations """
coords_1 = get_coordinates(trip_a)
coords_2 = get_coordinates(trip_b)
return great_circle(coords_1, coords_2).km
def get_coordinates(trip):
""" Return trip coordinates """
return trip["locations"][0]["place"]["location"]["latitude"], trip["locations"][0]["place"]["location"]["longitude"]
| StarcoderdataPython |
3224504 | # Python3
from solution1 import arrayPacking as f
qa = [
([24, 85, 0], 21784),
([23, 45, 39], 2567447),
([1, 2, 4, 8], 134480385),
([5], 5),
([187, 99, 42, 43], 724198331)
]
for *q, a in qa:
for i, e in enumerate(q):
print('input{0}: {1}'.format(i + 1, e))
ans = f(*q)
if ans != a:
print(' [failed]')
print(' output:', ans)
print(' expected:', a)
else:
print(' [ok]')
print(' output:', ans)
print()
| StarcoderdataPython |
4812327 | import math
import config
class Object(object):
def __init__(self, position=(0., 0.), velocity=(0., 0.), acceleration=(0., 0.), angle=0., rotation_speed=0.5):
self.position = position
self.velocity = velocity
self.acceleration = acceleration
self.angle = angle
self.rotation_speed = rotation_speed
def is_in_range(self, other_object):
distance = ((self.position[0] - other_object.position[0]) ** 2 + (
self.position[1] - other_object.position[1]) ** 2) ** 0.5
return distance < config.COLLISION_RADIUS
@property
def is_out_of_bounds(self):
return self.position[0] > config.screen_shape[0] or self.position[0] < 0 or self.position[
1] > config.screen_shape[1] or self.position[1] < 0
def step_forward(self):
self.velocity = tuple(sum(pair) for pair in zip(self.velocity, self.acceleration))
self.position = tuple(sum(pair) for pair in zip(self.position, self.velocity))
self.angle = (self.angle + self.rotation_speed) % (2 * math.pi)
def __str__(self):
return "position:{}\n" \
"velocity:{}\n" \
"acceleration:{}".format(self.position, self.velocity, self.acceleration)
class NoteObject(Object):
def __init__(self, position=(0., 0.), velocity=(0., 0.), acceleration=(0., 0.), note=None, rotation_speed=0.5):
super(NoteObject, self).__init__(position, velocity, acceleration, rotation_speed=rotation_speed)
self.note = note
class Scorer(object):
def __init__(self, decay_rate=config.DECAY_RATE):
self.score = 0
self.decay_rate = decay_rate
def add_points(self, points):
self.score += points
def decay(self):
self.score -= self.decay_rate
self.score = max(self.score, 0)
| StarcoderdataPython |
166241 | from pathlib import Path
import hydra
import numpy as np
import torch
from hydra.utils import to_absolute_path
from nnsvs.base import PredictionType
from nnsvs.mdn import mdn_loss
from nnsvs.pitch import nonzero_segments
from nnsvs.train_util import save_checkpoint, setup
from nnsvs.util import make_non_pad_mask
from omegaconf import DictConfig, OmegaConf
from torch import nn
from tqdm import tqdm
def note_segments(lf0_score_denorm):
"""Compute note segments (start and end indices) from log-F0
Note that unvoiced frames must be set to 0 in advance.
Args:
lf0_score_denorm (Tensor): (B, T)
Returns:
list: list of note (start, end) indices
"""
segments = []
for s, e in nonzero_segments(lf0_score_denorm):
out = torch.sign(torch.abs(torch.diff(lf0_score_denorm[s : e + 1])))
transitions = torch.where(out > 0)[0]
note_start, note_end = s, -1
for pos in transitions:
note_end = int(s + pos)
segments.append((note_start, note_end))
note_start = note_end
return segments
def compute_pitch_regularization_weight(segments, N, decay_size=25, max_w=0.5):
"""Compute pitch regularization weight given note segments
Args:
segments (list): list of note (start, end) indices
N (int): number of frames
decay_size (int): size of the decay window
max_w (float): maximum weight
Returns:
Tensor: weights of shape (N,)
"""
w = torch.zeros(N)
for s, e in segments:
L = e - s
w[s:e] = max_w
if L > decay_size * 2:
w[s : s + decay_size] *= torch.arange(decay_size) / decay_size
w[e - decay_size : e] *= torch.arange(decay_size - 1, -1, -1) / decay_size
return w
def compute_batch_pitch_regularization_weight(lf0_score_denorm):
"""Batch version of computing pitch regularization weight
Args:
lf0_score_denorm (Tensor): (B, T)
Returns:
Tensor: weights of shape (B, N, 1)
"""
B, T = lf0_score_denorm.shape
w = torch.zeros_like(lf0_score_denorm)
for idx in range(len(lf0_score_denorm)):
segments = note_segments(lf0_score_denorm[idx])
w[idx, :] = compute_pitch_regularization_weight(segments, T).to(w.device)
return w.unsqueeze(-1)
def train_step(
model,
optimizer,
train,
in_feats,
out_feats,
lengths,
pitch_reg_dyn_ws,
pitch_reg_weight=1.0,
):
optimizer.zero_grad()
criterion = nn.MSELoss(reduction="none")
# Apply preprocess if required (e.g., FIR filter for shallow AR)
# defaults to no-op
out_feats = model.preprocess_target(out_feats)
# Run forward
pred_out_feats, lf0_residual = model(in_feats, lengths)
# Mask (B, T, 1)
mask = make_non_pad_mask(lengths).unsqueeze(-1).to(in_feats.device)
# Compute loss
if model.prediction_type() == PredictionType.PROBABILISTIC:
pi, sigma, mu = pred_out_feats
# (B, max(T)) or (B, max(T), D_out)
mask_ = mask if len(pi.shape) == 4 else mask.squeeze(-1)
# Compute loss and apply mask
loss = mdn_loss(pi, sigma, mu, out_feats, reduce=False)
loss = loss.masked_select(mask_).mean()
else:
loss = criterion(
pred_out_feats.masked_select(mask), out_feats.masked_select(mask)
).mean()
# Pitch regularization
# NOTE: l1 loss seems to be better than mse loss in my experiments
# we could use l2 loss as suggested in the sinsy's paper
loss += (
pitch_reg_weight
* (pitch_reg_dyn_ws * lf0_residual.abs()).masked_select(mask).mean()
)
if train:
loss.backward()
optimizer.step()
return loss
def train_loop(
config,
logger,
device,
model,
optimizer,
lr_scheduler,
data_loaders,
writer,
in_scaler,
):
out_dir = Path(to_absolute_path(config.train.out_dir))
best_loss = torch.finfo(torch.float32).max
in_lf0_idx = config.data.in_lf0_idx
in_rest_idx = config.data.in_rest_idx
if in_lf0_idx is None or in_rest_idx is None:
raise ValueError("in_lf0_idx and in_rest_idx must be specified")
pitch_reg_weight = config.train.pitch_reg_weight
for epoch in tqdm(range(1, config.train.nepochs + 1)):
for phase in data_loaders.keys():
train = phase.startswith("train")
model.train() if train else model.eval()
running_loss = 0
for in_feats, out_feats, lengths in data_loaders[phase]:
# NOTE: This is needed for pytorch's PackedSequence
lengths, indices = torch.sort(lengths, dim=0, descending=True)
in_feats, out_feats = (
in_feats[indices].to(device),
out_feats[indices].to(device),
)
# Compute denormalized log-F0 in the musical scores
lf0_score_denorm = (
in_feats[:, :, in_lf0_idx]
* float(
in_scaler.data_max_[in_lf0_idx]
- in_scaler.data_min_[in_lf0_idx]
)
+ in_scaler.data_min_[in_lf0_idx]
)
# Fill zeros for rest and padded frames
lf0_score_denorm *= (in_feats[:, :, in_rest_idx] <= 0).float()
for idx, length in enumerate(lengths):
lf0_score_denorm[idx, length:] = 0
# Compute time-variant pitch regularization weight vector
pitch_reg_dyn_ws = compute_batch_pitch_regularization_weight(
lf0_score_denorm
)
loss = train_step(
model,
optimizer,
train,
in_feats,
out_feats,
lengths,
pitch_reg_dyn_ws,
pitch_reg_weight,
)
running_loss += loss.item()
ave_loss = running_loss / len(data_loaders[phase])
writer.add_scalar(f"Loss/{phase}", ave_loss, epoch)
ave_loss = running_loss / len(data_loaders[phase])
logger.info("[%s] [Epoch %s]: loss %s", phase, epoch, ave_loss)
if not train and ave_loss < best_loss:
best_loss = ave_loss
save_checkpoint(
logger, out_dir, model, optimizer, lr_scheduler, epoch, is_best=True
)
lr_scheduler.step()
if epoch % config.train.checkpoint_epoch_interval == 0:
save_checkpoint(
logger, out_dir, model, optimizer, lr_scheduler, epoch, is_best=False
)
save_checkpoint(
logger, out_dir, model, optimizer, lr_scheduler, config.train.nepochs
)
logger.info("The best loss was %s", best_loss)
def _check_resf0_config(logger, model, config, in_scaler, out_scaler):
logger.info("Checking model configs for residual F0 prediction")
if in_scaler is None or out_scaler is None:
raise ValueError("in_scaler and out_scaler must be specified")
in_lf0_idx = config.data.in_lf0_idx
in_rest_idx = config.data.in_rest_idx
out_lf0_idx = config.data.out_lf0_idx
if in_lf0_idx is None or in_rest_idx is None or out_lf0_idx is None:
raise ValueError("in_lf0_idx, in_rest_idx and out_lf0_idx must be specified")
logger.info("in_lf0_idx: %s", in_lf0_idx)
logger.info("in_rest_idx: %s", in_rest_idx)
logger.info("out_lf0_idx: %s", out_lf0_idx)
ok = True
if hasattr(model, "in_lf0_idx"):
if model.in_lf0_idx != in_lf0_idx:
logger.warn(
"in_lf0_idx in model and data config must be same",
model.in_lf0_idx,
in_lf0_idx,
)
ok = False
if hasattr(model, "out_lf0_idx"):
if model.out_lf0_idx != out_lf0_idx:
logger.warn(
"out_lf0_idx in model and data config must be same",
model.out_lf0_idx,
out_lf0_idx,
)
ok = False
if hasattr(model, "in_lf0_min") and hasattr(model, "in_lf0_max"):
# Inject values from the input scaler
if model.in_lf0_min is None or model.in_lf0_max is None:
model.in_lf0_min = in_scaler.data_min_[in_lf0_idx]
model.in_lf0_max = in_scaler.data_max_[in_lf0_idx]
logger.info("in_lf0_min: %s", model.in_lf0_min)
logger.info("in_lf0_max: %s", model.in_lf0_max)
if not np.allclose(model.in_lf0_min, in_scaler.data_min_[model.in_lf0_idx]):
logger.warn(
f"in_lf0_min is set to {model.in_lf0_min}, "
f"but should be {in_scaler.data_min_[model.in_lf0_idx]}"
)
ok = False
if not np.allclose(model.in_lf0_max, in_scaler.data_max_[model.in_lf0_idx]):
logger.warn(
f"in_lf0_max is set to {model.in_lf0_max}, "
f"but should be {in_scaler.data_max_[model.in_lf0_idx]}"
)
ok = False
if hasattr(model, "out_lf0_mean") and hasattr(model, "out_lf0_scale"):
# Inject values from the output scaler
if model.out_lf0_mean is None or model.out_lf0_scale is None:
model.out_lf0_mean = out_scaler.mean_[out_lf0_idx]
model.out_lf0_scale = out_scaler.scale_[out_lf0_idx]
logger.info("model.out_lf0_mean: %s", model.out_lf0_mean)
logger.info("model.out_lf0_scale: %s", model.out_lf0_scale)
if not np.allclose(model.out_lf0_mean, out_scaler.mean_[model.out_lf0_idx]):
logger.warn(
f"out_lf0_mean is set to {model.out_lf0_mean}, "
f"but should be {out_scaler.mean_[model.out_lf0_idx]}"
)
ok = False
if not np.allclose(model.out_lf0_scale, out_scaler.scale_[model.out_lf0_idx]):
logger.warn(
f"out_lf0_scale is set to {model.out_lf0_scale}, "
f"but should be {out_scaler.scale_[model.out_lf0_idx]}"
)
ok = False
if not ok:
if (
model.in_lf0_idx == in_lf0_idx
and hasattr(model, "in_lf0_min")
and hasattr(model, "out_lf0_mean")
):
logger.info(
f"""
If you are 100% sure that you set model.in_lf0_idx and model.out_lf0_idx correctly,
Please consider the following parameters in your model config:
in_lf0_idx: {model.in_lf0_idx}
out_lf0_idx: {model.out_lf0_idx}
in_lf0_min: {in_scaler.data_min_[model.in_lf0_idx]}
in_lf0_max: {in_scaler.data_max_[model.in_lf0_idx]}
out_lf0_mean: {out_scaler.mean_[model.out_lf0_idx]}
out_lf0_scale: {out_scaler.scale_[model.out_lf0_idx]}
"""
)
raise ValueError("The model config has wrong configurations.")
# Overwrite the parameters to the config
for key in ["in_lf0_min", "in_lf0_max", "out_lf0_mean", "out_lf0_scale"]:
config.model.netG[key] = float(getattr(model, key))
@hydra.main(config_path="conf/train_resf0", config_name="config")
def my_app(config: DictConfig) -> None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
(
model,
optimizer,
lr_scheduler,
data_loaders,
writer,
logger,
in_scaler,
out_scaler,
) = setup(config, device)
_check_resf0_config(logger, model, config, in_scaler, out_scaler)
# Save configs again in case the model config has been changed
out_dir = Path(to_absolute_path(config.train.out_dir))
with open(out_dir / "config.yaml", "w") as f:
OmegaConf.save(config, f)
with open(out_dir / "model.yaml", "w") as f:
OmegaConf.save(config.model, f)
train_loop(
config,
logger,
device,
model,
optimizer,
lr_scheduler,
data_loaders,
writer,
in_scaler,
)
def entry():
my_app()
if __name__ == "__main__":
my_app()
| StarcoderdataPython |
1630728 | <filename>fable/long.py
def fromBits(lowBits: int, highBits: int, unsigned: bool):
return lowBits + (highBits << 32)
def op_LeftShift(self, numBits):
return self << numBits
| StarcoderdataPython |
144851 | <reponame>boniaditya/scraping
from bs4 import BeautifulSoup
import urllib2
import requests
def get_tree(url):
source = urllib2.urlopen(url).read()
tree = BeautifulSoup(source, "html.parser")
return tree
happyhours = 'https://www.downtownla.com/explore/dining-nightlife'
happy_source = urllib2.urlopen(happyhours).read()
r = requests.get(happyhours)
happy_soup = BeautifulSoup(r.content)
#print(happy_soup)
#print(happy_source.info().get('Content-Encoding'))
#if __name__ == '__main__':
#First, I am going to identify the areas of the page I want to look at
# tree = get_tree('https://www.downtownla.com/explore/dining-nightlife/happy-hour-finder')
the_tree = get_tree('http://python.org')
happy_tree = get_tree('https://www.downtownla.com/explore/dining-nightlife/happy-hour-finder/bunker-hill-bar-grill')
#print(get_tree('http://google.com'))
#print(happy_tree)
ptags = happy_soup.find_all('p')
print(ptags)
print(len(ptags))
i=0
for me in happy_soup.find_all('div'):
i += 1
print i
print i
print(happy_soup.find('title').text)
print(happy_soup.find('body').text)
for li in happy_soup.find_all('li'):
print li.text
"""
found_happy_hours = []
for t in happy_soup.find_all('div', {'class':'info'}):
text = t.text
print(text)
print(found_happy_hours)
""" | StarcoderdataPython |
1653234 | <filename>purchases/views.py
from typing import List
from django.db.models import Count, F, QuerySet
from django.urls import reverse
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.permissions import BasePermission, IsAdminUser, IsAuthenticated
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from users.models import Profile
from .models import BeverageType, Purchase
from .serializers import (
BeverageTypeSerializer,
PurchaseCountSerializer,
PurchaseSerializer,
)
class BeverageTypeViewSet(ModelViewSet):
queryset = BeverageType.objects.all()
serializer_class = BeverageTypeSerializer
def get_permissions(self) -> List[BasePermission]:
"""Allow viewing to authenticated users, creating, deletion and updating only to
staff
"""
permission_classes = [IsAuthenticated]
if self.action in ('create', 'update', 'partial_update', 'destroy'):
permission_classes += [IsAdminUser]
return [permission() for permission in permission_classes]
def get_queryset(self) -> QuerySet:
"""Support `BeverageType.name` queries"""
queryset = super().get_queryset()
qp = self.request.query_params
name = qp.get('name', None)
if name is not None:
queryset = queryset.filter(name__icontains=name)
return queryset
class PurchaseViewSet(ModelViewSet):
queryset = Purchase.objects.all()
serializer_class = PurchaseSerializer
_orders = ('user', '-user', 'date', '-date', 'beverage_type', '-beverage_type')
def get_permissions(self) -> List[BasePermission]:
"""Allow viewing and creating to authenticated users, deletion and
updating only to staff
"""
permission_classes = [IsAuthenticated]
if self.action in ('update', 'partial_update', 'destroy'):
permission_classes += [IsAdminUser]
return [permission() for permission in permission_classes]
def perform_create(self, serializer: PurchaseSerializer) -> None:
"""Update `Profile.balance` corresponding to `Purchase.price`"""
user, beverage_type = (
serializer.validated_data['user'],
serializer.validated_data['beverage_type'],
)
if not user.profile.is_freeloader:
Profile.objects.filter(id=self.request.user.id).update(
balance=F('balance') - beverage_type.price
)
return super().perform_create(serializer)
def get_queryset(self) -> QuerySet:
"""Support `Purchase.user`, `Purchase.beverage_type` and non default order queries"""
queryset = super().get_queryset()
qp = self.request.query_params
user_id, beverage_type_id, order = (
qp.get('user', None),
qp.get('beverage_type', None),
qp.get('order', None),
)
if user_id is not None:
try:
user_id = int(user_id)
queryset = queryset.filter(user=user_id)
except ValueError:
pass
if beverage_type_id is not None:
try:
beverage_type_id = int(beverage_type_id)
queryset = queryset.filter(beverage_type=beverage_type_id)
except ValueError:
pass
if order in self._orders:
queryset = queryset.order_by(order)
return queryset
def get_serializer_context(self):
"""Set request to none to return relative urls for relationships"""
return {'request': None, 'format': self.format_kwarg, 'view': self}
def create(self, request: Request) -> Response:
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
if (
serializer.validated_data['user'].id != request.user.id
and not request.user.is_staff
):
return Response(
{
'user': 'Cannot set user different from authenticated user'
'unless staff'
},
status=status.HTTP_403_FORBIDDEN,
)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(
serializer.data, status=status.HTTP_201_CREATED, headers=headers
)
@action(detail=False, methods=['get'])
def counts(self, request: Request) -> Response:
"""Action for counts of each beverage type"""
order, user_id = request.query_params.get('order'), request.query_params.get(
'user'
)
if order not in ('count', '-count'):
order = 'count'
if user_id is not None:
try:
user_id = int(user_id)
except ValueError:
user_id = None
purchases = (
self.get_queryset().filter(user=user_id)
if user_id is not None
else self.get_queryset()
)
purchase_counts = list(
purchases.values('beverage_type')
.annotate(count=Count('beverage_type'))
.order_by(order)
)
for purchase_count in purchase_counts:
purchase_count['beverage_type'] = reverse(
'beveragetype-detail', args=[purchase_count['beverage_type']]
)
serializer = PurchaseCountSerializer(
purchase_counts, many=True, context=self.get_serializer_context()
)
return Response(serializer.data)
| StarcoderdataPython |
71746 | <filename>virtual/lib/python3.6/site-packages/alembic/__init__.py<gh_stars>0
import sys
from . import context
from . import op
__version__ = "1.7.4"
| StarcoderdataPython |
188999 | <reponame>everthemore/opyrators
import unittest
import sys
sys.path.append("..")
from opyrators.fermions import operator
class optermTest(unittest.TestCase):
def setUp(self):
return
def test_operator_addition(self):
A = operator({"112233":0.2})
B = operator({"112230":1.3})
C = A - B
# Make sure A and B didn't change
self.assertEqual(A.terms["112233"],0.2)
self.assertEqual(B.terms["112230"],1.3)
# Check resulting operator
self.assertEqual(C.terms["112233"], 0.2)
self.assertEqual(C.terms["112230"], -1.3)
def test_identical_operator_addition(self):
A = operator({"112233":0.2})
B = operator({"112233":1.3})
C = A + B
self.assertEqual(C.terms["112233"], 1.5)
def test_identical_operator_subtraction(self):
A = operator({"112233":0.2})
B = operator({"112233":0.2})
C = A - B
self.assertEqual(len(C.terms), 0)
# Check that multiplying with a zero operator results in zero
D = A * C
self.assertEqual(len(D.terms),0)
def test_identical_operator_multiplication(self):
A = operator({"010":1})
B = operator({"010":1})
C = A * B
self.assertEqual(len(C.terms), 0)
def test_cdagger_c_multiplication(self):
A = operator({"010":1})
B = operator({"020":1})
C = A * B
self.assertEqual(C.terms["030"], 1)
def test_c_cdagger_multiplication(self):
A = operator({"020":1})
B = operator({"010":1})
C = A * B
self.assertEqual(C.terms["000"], 1)
self.assertEqual(C.terms["030"], -1)
def test_scalar_multiplication(self):
# Test multiplication with scalar on left
A = operator({"112233":1})
B = 3.0*A
print(B)
self.assertEqual(B.terms["112233"], 3.0)
# Test multiplication with scalar on right
A = operator({"112233":1})
B = A*2.7
self.assertEqual(B.terms["112233"], 2.7)
def test_conjugation(self):
A = operator({"112233":3+0.723j})
A = A.conj()
self.assertEqual(A.terms["221133"],3-0.723j)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1681128 | #!/usr/bin/env python
#
# generate a tester program for the API
#
import sys
import os
import string
try:
import libxml2
except:
print("libxml2 python bindings not available, skipping testapi.c generation")
sys.exit(0)
if len(sys.argv) > 1:
srcPref = sys.argv[1] + '/'
else:
srcPref = ''
#
# Modules we want to skip in API test
#
skipped_modules = [ "SAX", "xlink", "threads", "globals",
"xmlmemory", "xmlversion", "xmlexports",
]
#
# defines for each module
#
modules_defines = {
"HTMLparser": "LIBXML_HTML_ENABLED",
"catalog": "LIBXML_CATALOG_ENABLED",
"xmlreader": "LIBXML_READER_ENABLED",
"relaxng": "LIBXML_SCHEMAS_ENABLED",
"schemasInternals": "LIBXML_SCHEMAS_ENABLED",
"xmlschemas": "LIBXML_SCHEMAS_ENABLED",
"xmlschemastypes": "LIBXML_SCHEMAS_ENABLED",
"xpath": "LIBXML_XPATH_ENABLED",
"xpathInternals": "LIBXML_XPATH_ENABLED",
"xinclude": "LIBXML_XINCLUDE_ENABLED",
"xpointer": "LIBXML_XPTR_ENABLED",
"xmlregexp" : "LIBXML_REGEXP_ENABLED",
"xmlautomata" : "LIBXML_AUTOMATA_ENABLED",
"xmlsave" : "LIBXML_OUTPUT_ENABLED",
"xmlmodule" : "LIBXML_MODULES_ENABLED",
"pattern" : "LIBXML_PATTERN_ENABLED",
"schematron" : "LIBXML_SCHEMATRON_ENABLED",
}
#
# defines for specific functions
#
function_defines = {
"htmlDefaultSAXHandlerInit": "LIBXML_HTML_ENABLED",
"xmlSAX2EndElement" : "LIBXML_SAX1_ENABLED",
"xmlSAX2StartElement" : "LIBXML_SAX1_ENABLED",
"xmlSAXDefaultVersion" : "LIBXML_SAX1_ENABLED",
"UTF8Toisolat1" : "LIBXML_OUTPUT_ENABLED",
"xmlIOParseDTD": "LIBXML_VALID_ENABLED",
"xmlParseDTD": "LIBXML_VALID_ENABLED",
"xmlParseDoc": "LIBXML_SAX1_ENABLED",
"xmlParseMemory": "LIBXML_SAX1_ENABLED",
"xmlRecoverDoc": "LIBXML_SAX1_ENABLED",
"xmlParseFile": "LIBXML_SAX1_ENABLED",
"xmlRecoverFile": "LIBXML_SAX1_ENABLED",
"xmlRecoverMemory": "LIBXML_SAX1_ENABLED",
"xmlSAXParseFileWithData": "LIBXML_SAX1_ENABLED",
"xmlSAXParseMemory": "LIBXML_SAX1_ENABLED",
"xmlSAXUserParseMemory": "LIBXML_SAX1_ENABLED",
"xmlSAXParseDoc": "LIBXML_SAX1_ENABLED",
"xmlSAXParseDTD": "LIBXML_SAX1_ENABLED",
"xmlSAXUserParseFile": "LIBXML_SAX1_ENABLED",
"xmlParseEntity": "LIBXML_SAX1_ENABLED",
"xmlParseExternalEntity": "LIBXML_SAX1_ENABLED",
"xmlSAXParseMemoryWithData": "LIBXML_SAX1_ENABLED",
"xmlParseBalancedChunkMemory": "LIBXML_SAX1_ENABLED",
"xmlParseBalancedChunkMemoryRecover": "LIBXML_SAX1_ENABLED",
"xmlSetupParserForBuffer": "LIBXML_SAX1_ENABLED",
"xmlStopParser": "LIBXML_PUSH_ENABLED",
"xmlAttrSerializeTxtContent": "LIBXML_OUTPUT_ENABLED",
"xmlSAXParseFile": "LIBXML_SAX1_ENABLED",
"xmlSAXParseEntity": "LIBXML_SAX1_ENABLED",
"xmlNewTextChild": "LIBXML_TREE_ENABLED",
"xmlNewDocRawNode": "LIBXML_TREE_ENABLED",
"xmlNewProp": "LIBXML_TREE_ENABLED",
"xmlReconciliateNs": "LIBXML_TREE_ENABLED",
"xmlValidateNCName": "LIBXML_TREE_ENABLED",
"xmlValidateNMToken": "LIBXML_TREE_ENABLED",
"xmlValidateName": "LIBXML_TREE_ENABLED",
"xmlNewChild": "LIBXML_TREE_ENABLED",
"xmlValidateQName": "LIBXML_TREE_ENABLED",
"xmlSprintfElementContent": "LIBXML_OUTPUT_ENABLED",
"xmlValidGetPotentialChildren" : "LIBXML_VALID_ENABLED",
"xmlValidGetValidElements" : "LIBXML_VALID_ENABLED",
"xmlTextReaderPreservePattern" : "LIBXML_PATTERN_ENABLED",
}
#
# Some functions really need to be skipped for the tests.
#
skipped_functions = [
# block on I/O
"xmlFdRead", "xmlReadFd", "xmlCtxtReadFd",
"htmlFdRead", "htmlReadFd", "htmlCtxtReadFd",
"xmlReaderNewFd", "xmlReaderForFd",
"xmlIORead", "xmlReadIO", "xmlCtxtReadIO",
"htmlIORead", "htmlReadIO", "htmlCtxtReadIO",
"xmlReaderNewIO", "xmlBufferDump", "xmlNanoFTPConnect",
"xmlNanoFTPConnectTo", "xmlNanoHTTPMethod", "xmlNanoHTTPMethodRedir",
# Complex I/O APIs
"xmlCreateIOParserCtxt", "xmlParserInputBufferCreateIO",
"xmlRegisterInputCallbacks", "xmlReaderForIO",
"xmlOutputBufferCreateIO", "xmlRegisterOutputCallbacks",
"xmlSaveToIO", "xmlIOHTTPOpenW",
# library state cleanup, generate false leak information and other
# troubles, heavillyb tested otherwise.
"xmlCleanupParser", "xmlRelaxNGCleanupTypes", "xmlSetListDoc",
"xmlSetTreeDoc", "xmlUnlinkNode",
# hard to avoid leaks in the tests
"xmlStrcat", "xmlStrncat", "xmlCatalogAddLocal", "xmlNewTextWriterDoc",
"xmlXPathNewValueTree", "xmlXPathWrapString",
# unimplemented
"xmlTextReaderReadInnerXml", "xmlTextReaderReadOuterXml",
"xmlTextReaderReadString",
# destructor
"xmlListDelete", "xmlOutputBufferClose", "xmlNanoFTPClose", "xmlNanoHTTPClose",
# deprecated
"xmlCatalogGetPublic", "xmlCatalogGetSystem", "xmlEncodeEntities",
"xmlNewGlobalNs", "xmlHandleEntity", "xmlNamespaceParseNCName",
"xmlNamespaceParseNSDef", "xmlNamespaceParseQName",
"xmlParseNamespace", "xmlParseQuotedString", "xmlParserHandleReference",
"xmlScanName",
"xmlDecodeEntities",
# allocators
"xmlMemFree",
# verbosity
"xmlCatalogSetDebug", "xmlShellPrintXPathError", "xmlShellPrintNode",
# Internal functions, no user space should really call them
"xmlParseAttribute", "xmlParseAttributeListDecl", "xmlParseName",
"xmlParseNmtoken", "xmlParseEntityValue", "xmlParseAttValue",
"xmlParseSystemLiteral", "xmlParsePubidLiteral", "xmlParseCharData",
"xmlParseExternalID", "xmlParseComment", "xmlParsePITarget", "xmlParsePI",
"xmlParseNotationDecl", "xmlParseEntityDecl", "xmlParseDefaultDecl",
"xmlParseNotationType", "xmlParseEnumerationType", "xmlParseEnumeratedType",
"xmlParseAttributeType", "xmlParseAttributeListDecl",
"xmlParseElementMixedContentDecl", "xmlParseElementChildrenContentDecl",
"xmlParseElementContentDecl", "xmlParseElementDecl", "xmlParseMarkupDecl",
"xmlParseCharRef", "xmlParseEntityRef", "xmlParseReference",
"xmlParsePEReference", "xmlParseDocTypeDecl", "xmlParseAttribute",
"xmlParseStartTag", "xmlParseEndTag", "xmlParseCDSect", "xmlParseContent",
"xmlParseElement", "xmlParseVersionNum", "xmlParseVersionInfo",
"xmlParseEncName", "xmlParseEncodingDecl", "xmlParseSDDecl",
"xmlParseXMLDecl", "xmlParseTextDecl", "xmlParseMisc",
"xmlParseExternalSubset", "xmlParserHandlePEReference",
"xmlSkipBlankChars",
# Legacy
"xmlCleanupPredefinedEntities", "xmlInitializePredefinedEntities",
"xmlSetFeature", "xmlGetFeature", "xmlGetFeaturesList",
# location sets
"xmlXPtrLocationSetAdd",
"xmlXPtrLocationSetCreate",
"xmlXPtrLocationSetDel",
"xmlXPtrLocationSetMerge",
"xmlXPtrLocationSetRemove",
"xmlXPtrWrapLocationSet",
]
#
# These functions have side effects on the global state
# and hence generate errors on memory allocation tests
#
skipped_memcheck = [ "xmlLoadCatalog", "xmlAddEncodingAlias",
"xmlSchemaInitTypes", "xmlNanoFTPProxy", "xmlNanoFTPScanProxy",
"xmlNanoHTTPScanProxy", "xmlResetLastError", "xmlCatalogConvert",
"xmlCatalogRemove", "xmlLoadCatalogs", "xmlCleanupCharEncodingHandlers",
"xmlInitCharEncodingHandlers", "xmlCatalogCleanup",
"xmlSchemaGetBuiltInType",
"htmlParseFile", "htmlCtxtReadFile", # loads the catalogs
"xmlTextReaderSchemaValidate", "xmlSchemaCleanupTypes", # initialize the schemas type system
"xmlCatalogResolve", "xmlIOParseDTD" # loads the catalogs
]
#
# Extra code needed for some test cases
#
extra_pre_call = {
"xmlSAXUserParseFile": """
#ifdef LIBXML_SAX1_ENABLED
if (sax == (xmlSAXHandlerPtr)&xmlDefaultSAXHandler) user_data = NULL;
#endif
""",
"xmlSAXUserParseMemory": """
#ifdef LIBXML_SAX1_ENABLED
if (sax == (xmlSAXHandlerPtr)&xmlDefaultSAXHandler) user_data = NULL;
#endif
""",
"xmlParseBalancedChunkMemory": """
#ifdef LIBXML_SAX1_ENABLED
if (sax == (xmlSAXHandlerPtr)&xmlDefaultSAXHandler) user_data = NULL;
#endif
""",
"xmlParseBalancedChunkMemoryRecover": """
#ifdef LIBXML_SAX1_ENABLED
if (sax == (xmlSAXHandlerPtr)&xmlDefaultSAXHandler) user_data = NULL;
#endif
""",
"xmlParserInputBufferCreateFd":
"if (fd >= 0) fd = -1;",
}
extra_post_call = {
"xmlAddChild":
"if (ret_val == NULL) { xmlFreeNode(cur) ; cur = NULL ; }",
"xmlAddEntity":
"if (ret_val != NULL) { xmlFreeNode(ret_val) ; ret_val = NULL; }",
"xmlAddChildList":
"if (ret_val == NULL) { xmlFreeNodeList(cur) ; cur = NULL ; }",
"xmlAddSibling":
"if (ret_val == NULL) { xmlFreeNode(elem) ; elem = NULL ; }",
"xmlAddNextSibling":
"if (ret_val == NULL) { xmlFreeNode(elem) ; elem = NULL ; }",
"xmlAddPrevSibling":
"if (ret_val == NULL) { xmlFreeNode(elem) ; elem = NULL ; }",
"xmlDocSetRootElement":
"if (doc == NULL) { xmlFreeNode(root) ; root = NULL ; }",
"xmlReplaceNode":
"""if (cur != NULL) {
xmlUnlinkNode(cur);
xmlFreeNode(cur) ; cur = NULL ; }
if (old != NULL) {
xmlUnlinkNode(old);
xmlFreeNode(old) ; old = NULL ; }
\t ret_val = NULL;""",
"xmlTextMerge":
"""if ((first != NULL) && (first->type != XML_TEXT_NODE)) {
xmlUnlinkNode(second);
xmlFreeNode(second) ; second = NULL ; }""",
"xmlBuildQName":
"""if ((ret_val != NULL) && (ret_val != ncname) &&
(ret_val != prefix) && (ret_val != memory))
xmlFree(ret_val);
\t ret_val = NULL;""",
"xmlNewDocElementContent":
"""xmlFreeDocElementContent(doc, ret_val); ret_val = NULL;""",
"xmlDictReference": "xmlDictFree(dict);",
# Functions which deallocates one of their parameters
"xmlXPathConvertBoolean": """val = NULL;""",
"xmlXPathConvertNumber": """val = NULL;""",
"xmlXPathConvertString": """val = NULL;""",
"xmlSaveFileTo": """buf = NULL;""",
"xmlSaveFormatFileTo": """buf = NULL;""",
"xmlIOParseDTD": "input = NULL;",
"xmlRemoveProp": "cur = NULL;",
"xmlNewNs": "if ((node == NULL) && (ret_val != NULL)) xmlFreeNs(ret_val);",
"xmlCopyNamespace": "if (ret_val != NULL) xmlFreeNs(ret_val);",
"xmlCopyNamespaceList": "if (ret_val != NULL) xmlFreeNsList(ret_val);",
"xmlNewTextWriter": "if (ret_val != NULL) out = NULL;",
"xmlNewTextWriterPushParser": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;} if (ret_val != NULL) ctxt = NULL;",
"xmlNewIOInputStream": "if (ret_val != NULL) input = NULL;",
"htmlParseChunk": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"htmlParseDocument": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"xmlParseDocument": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"xmlParseChunk": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"xmlParseExtParsedEnt": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"xmlDOMWrapAdoptNode": "if ((node != NULL) && (node->parent == NULL)) {xmlUnlinkNode(node);xmlFreeNode(node);node = NULL;}",
"xmlBufferSetAllocationScheme": "if ((buf != NULL) && (scheme == XML_BUFFER_ALLOC_IMMUTABLE) && (buf->content != NULL) && (buf->content != static_buf_content)) { xmlFree(buf->content); buf->content = NULL;}"
}
modules = []
def is_skipped_module(name):
for mod in skipped_modules:
if mod == name:
return 1
return 0
def is_skipped_function(name):
for fun in skipped_functions:
if fun == name:
return 1
# Do not test destructors
if name.find('Free') != -1:
return 1
return 0
def is_skipped_memcheck(name):
for fun in skipped_memcheck:
if fun == name:
return 1
return 0
missing_types = {}
def add_missing_type(name, func):
try:
list = missing_types[name]
list.append(func)
except:
missing_types[name] = [func]
generated_param_types = []
def add_generated_param_type(name):
generated_param_types.append(name)
generated_return_types = []
def add_generated_return_type(name):
generated_return_types.append(name)
missing_functions = {}
missing_functions_nr = 0
def add_missing_functions(name, module):
global missing_functions_nr
missing_functions_nr = missing_functions_nr + 1
try:
list = missing_functions[module]
list.append(name)
except:
missing_functions[module] = [name]
#
# Provide the type generators and destructors for the parameters
#
def type_convert(str, name, info, module, function, pos):
# res = str.replace(" ", " ")
# res = str.replace(" ", " ")
# res = str.replace(" ", " ")
res = str.replace(" *", "_ptr")
# res = str.replace("*", "_ptr")
res = res.replace(" ", "_")
if res == 'const_char_ptr':
if name.find("file") != -1 or \
name.find("uri") != -1 or \
name.find("URI") != -1 or \
info.find("filename") != -1 or \
info.find("URI") != -1 or \
info.find("URL") != -1:
if function.find("Save") != -1 or \
function.find("Create") != -1 or \
function.find("Write") != -1 or \
function.find("Fetch") != -1:
return('fileoutput')
return('filepath')
if res == 'void_ptr':
if module == 'nanoftp' and name == 'ctx':
return('xmlNanoFTPCtxtPtr')
if function == 'xmlNanoFTPNewCtxt' or \
function == 'xmlNanoFTPConnectTo' or \
function == 'xmlNanoFTPOpen':
return('xmlNanoFTPCtxtPtr')
if module == 'nanohttp' and name == 'ctx':
return('xmlNanoHTTPCtxtPtr')
if function == 'xmlNanoHTTPMethod' or \
function == 'xmlNanoHTTPMethodRedir' or \
function == 'xmlNanoHTTPOpen' or \
function == 'xmlNanoHTTPOpenRedir':
return('xmlNanoHTTPCtxtPtr');
if function == 'xmlIOHTTPOpen':
return('xmlNanoHTTPCtxtPtr')
if name.find("data") != -1:
return('userdata')
if name.find("user") != -1:
return('userdata')
if res == 'xmlDoc_ptr':
res = 'xmlDocPtr'
if res == 'xmlNode_ptr':
res = 'xmlNodePtr'
if res == 'xmlDict_ptr':
res = 'xmlDictPtr'
if res == 'xmlNodePtr' and pos != 0:
if (function == 'xmlAddChild' and pos == 2) or \
(function == 'xmlAddChildList' and pos == 2) or \
(function == 'xmlAddNextSibling' and pos == 2) or \
(function == 'xmlAddSibling' and pos == 2) or \
(function == 'xmlDocSetRootElement' and pos == 2) or \
(function == 'xmlReplaceNode' and pos == 2) or \
(function == 'xmlTextMerge') or \
(function == 'xmlAddPrevSibling' and pos == 2):
return('xmlNodePtr_in');
if res == 'const xmlBufferPtr':
res = 'xmlBufferPtr'
if res == 'xmlChar_ptr' and name == 'name' and \
function.find("EatName") != -1:
return('eaten_name')
if res == 'void_ptr*':
res = 'void_ptr_ptr'
if res == 'char_ptr*':
res = 'char_ptr_ptr'
if res == 'xmlChar_ptr*':
res = 'xmlChar_ptr_ptr'
if res == 'const_xmlChar_ptr*':
res = 'const_xmlChar_ptr_ptr'
if res == 'const_char_ptr*':
res = 'const_char_ptr_ptr'
if res == 'FILE_ptr' and module == 'debugXML':
res = 'debug_FILE_ptr';
if res == 'int' and name == 'options':
if module == 'parser' or module == 'xmlreader':
res = 'parseroptions'
return res
known_param_types = []
def is_known_param_type(name):
for type in known_param_types:
if type == name:
return 1
return name[-3:] == 'Ptr' or name[-4:] == '_ptr'
def generate_param_type(name, rtype):
global test
for type in known_param_types:
if type == name:
return
for type in generated_param_types:
if type == name:
return
if name[-3:] == 'Ptr' or name[-4:] == '_ptr':
if rtype[0:6] == 'const ':
crtype = rtype[6:]
else:
crtype = rtype
define = 0
if module in modules_defines:
test.write("#ifdef %s\n" % (modules_defines[module]))
define = 1
test.write("""
#define gen_nb_%s 1
#define gen_%s(no, nr) NULL
#define des_%s(no, val, nr)
""" % (name, name, name))
if define == 1:
test.write("#endif\n\n")
add_generated_param_type(name)
#
# Provide the type destructors for the return values
#
known_return_types = []
def is_known_return_type(name):
for type in known_return_types:
if type == name:
return 1
return 0
#
# Copy the beginning of the C test program result
#
try:
input = open("testapi.c", "r")
except:
input = open(srcPref + "testapi.c", "r")
test = open('testapi.c.new', 'w')
def compare_and_save():
global test
test.close()
try:
input = open("testapi.c", "r").read()
except:
input = ''
test = open('testapi.c.new', "r").read()
if input != test:
try:
os.system("rm testapi.c; mv testapi.c.new testapi.c")
except:
os.system("mv testapi.c.new testapi.c")
print("Updated testapi.c")
else:
print("Generated testapi.c is identical")
line = input.readline()
while line != "":
if line == "/* CUT HERE: everything below that line is generated */\n":
break;
if line[0:15] == "#define gen_nb_":
type = line[15:].split()[0]
known_param_types.append(type)
if line[0:19] == "static void desret_":
type = line[19:].split('(')[0]
known_return_types.append(type)
test.write(line)
line = input.readline()
input.close()
if line == "":
print("Could not find the CUT marker in testapi.c skipping generation")
test.close()
sys.exit(0)
print("Scanned testapi.c: found %d parameters types and %d return types\n" % (
len(known_param_types), len(known_return_types)))
test.write("/* CUT HERE: everything below that line is generated */\n")
#
# Open the input API description
#
doc = libxml2.readFile(srcPref + 'doc/libxml2-api.xml', None, 0)
if doc == None:
print("Failed to load doc/libxml2-api.xml")
sys.exit(1)
ctxt = doc.xpathNewContext()
#
# Generate a list of all function parameters and select only
# those used in the api tests
#
argtypes = {}
args = ctxt.xpathEval("/api/symbols/function/arg")
for arg in args:
mod = arg.xpathEval('string(../@file)')
func = arg.xpathEval('string(../@name)')
if (mod not in skipped_modules) and (func not in skipped_functions):
type = arg.xpathEval('string(@type)')
if type not in argtypes:
argtypes[type] = func
# similarly for return types
rettypes = {}
rets = ctxt.xpathEval("/api/symbols/function/return")
for ret in rets:
mod = ret.xpathEval('string(../@file)')
func = ret.xpathEval('string(../@name)')
if (mod not in skipped_modules) and (func not in skipped_functions):
type = ret.xpathEval('string(@type)')
if type not in rettypes:
rettypes[type] = func
#
# Generate constructors and return type handling for all enums
# which are used as function parameters
#
enums = ctxt.xpathEval("/api/symbols/typedef[@type='enum']")
for enum in enums:
module = enum.xpathEval('string(@file)')
name = enum.xpathEval('string(@name)')
#
# Skip any enums which are not in our filtered lists
#
if (name == None) or ((name not in argtypes) and (name not in rettypes)):
continue;
define = 0
if (name in argtypes) and is_known_param_type(name) == 0:
values = ctxt.xpathEval("/api/symbols/enum[@type='%s']" % name)
i = 0
vals = []
for value in values:
vname = value.xpathEval('string(@name)')
if vname == None:
continue;
i = i + 1
if i >= 5:
break;
vals.append(vname)
if vals == []:
print("Didn't find any value for enum %s" % (name))
continue
if module in modules_defines:
test.write("#ifdef %s\n" % (modules_defines[module]))
define = 1
test.write("#define gen_nb_%s %d\n" % (name, len(vals)))
test.write("""static %s gen_%s(int no, int nr ATTRIBUTE_UNUSED) {\n""" %
(name, name))
i = 1
for value in vals:
test.write(" if (no == %d) return(%s);\n" % (i, value))
i = i + 1
test.write(""" return(0);
}
static void des_%s(int no ATTRIBUTE_UNUSED, %s val ATTRIBUTE_UNUSED, int nr ATTRIBUTE_UNUSED) {
}
""" % (name, name));
known_param_types.append(name)
if (is_known_return_type(name) == 0) and (name in rettypes):
if define == 0 and (module in modules_defines):
test.write("#ifdef %s\n" % (modules_defines[module]))
define = 1
test.write("""static void desret_%s(%s val ATTRIBUTE_UNUSED) {
}
""" % (name, name))
known_return_types.append(name)
if define == 1:
test.write("#endif\n\n")
#
# Load the interfaces
#
headers = ctxt.xpathEval("/api/files/file")
for file in headers:
name = file.xpathEval('string(@name)')
if (name == None) or (name == ''):
continue
#
# Some module may be skipped because they don't really consists
# of user callable APIs
#
if is_skipped_module(name):
continue
#
# do not test deprecated APIs
#
desc = file.xpathEval('string(description)')
if desc.find('DEPRECATED') != -1:
print("Skipping deprecated interface %s" % name)
continue;
test.write("#include <libxml/%s.h>\n" % name)
modules.append(name)
#
# Generate the callers signatures
#
for module in modules:
test.write("static int test_%s(void);\n" % module);
#
# Generate the top caller
#
test.write("""
/**
* testlibxml2:
*
* Main entry point of the tester for the full libxml2 module,
* it calls all the tester entry point for each module.
*
* Returns the number of error found
*/
static int
testlibxml2(void)
{
int test_ret = 0;
""")
for module in modules:
test.write(" test_ret += test_%s();\n" % module)
test.write("""
printf("Total: %d functions, %d tests, %d errors\\n",
function_tests, call_tests, test_ret);
return(test_ret);
}
""")
#
# How to handle a function
#
nb_tests = 0
def generate_test(module, node):
global test
global nb_tests
nb_cond = 0
no_gen = 0
name = node.xpathEval('string(@name)')
if is_skipped_function(name):
return
#
# check we know how to handle the args and return values
# and store the information for the generation
#
try:
args = node.xpathEval("arg")
except:
args = []
t_args = []
n = 0
for arg in args:
n = n + 1
rtype = arg.xpathEval("string(@type)")
if rtype == 'void':
break;
info = arg.xpathEval("string(@info)")
nam = arg.xpathEval("string(@name)")
type = type_convert(rtype, nam, info, module, name, n)
if is_known_param_type(type) == 0:
add_missing_type(type, name);
no_gen = 1
if (type[-3:] == 'Ptr' or type[-4:] == '_ptr') and \
rtype[0:6] == 'const ':
crtype = rtype[6:]
else:
crtype = rtype
t_args.append((nam, type, rtype, crtype, info))
try:
rets = node.xpathEval("return")
except:
rets = []
t_ret = None
for ret in rets:
rtype = ret.xpathEval("string(@type)")
info = ret.xpathEval("string(@info)")
type = type_convert(rtype, 'return', info, module, name, 0)
if rtype == 'void':
break
if is_known_return_type(type) == 0:
add_missing_type(type, name);
no_gen = 1
t_ret = (type, rtype, info)
break
if no_gen == 0:
for t_arg in t_args:
(nam, type, rtype, crtype, info) = t_arg
generate_param_type(type, rtype)
test.write("""
static int
test_%s(void) {
int test_ret = 0;
""" % (name))
if no_gen == 1:
add_missing_functions(name, module)
test.write("""
/* missing type support */
return(test_ret);
}
""")
return
try:
conds = node.xpathEval("cond")
for cond in conds:
test.write("#if %s\n" % (cond.get_content()))
nb_cond = nb_cond + 1
except:
pass
define = 0
if name in function_defines:
test.write("#ifdef %s\n" % (function_defines[name]))
define = 1
# Declare the memory usage counter
no_mem = is_skipped_memcheck(name)
if no_mem == 0:
test.write(" int mem_base;\n");
# Declare the return value
if t_ret != None:
test.write(" %s ret_val;\n" % (t_ret[1]))
# Declare the arguments
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
# add declaration
test.write(" %s %s; /* %s */\n" % (crtype, nam, info))
test.write(" int n_%s;\n" % (nam))
test.write("\n")
# Cascade loop on of each argument list of values
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
#
test.write(" for (n_%s = 0;n_%s < gen_nb_%s;n_%s++) {\n" % (
nam, nam, type, nam))
# log the memory usage
if no_mem == 0:
test.write(" mem_base = xmlMemBlocks();\n");
# prepare the call
i = 0;
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
#
test.write(" %s = gen_%s(n_%s, %d);\n" % (nam, type, nam, i))
i = i + 1;
# add checks to avoid out-of-bounds array access
i = 0;
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
# assume that "size", "len", and "start" parameters apply to either
# the nearest preceding or following char pointer
if type == "int" and (nam == "size" or nam == "len" or nam == "start"):
for j in (*range(i - 1, -1, -1), *range(i + 1, len(t_args))):
(bnam, btype) = t_args[j][:2]
if btype == "const_char_ptr" or btype == "const_xmlChar_ptr":
test.write(
" if ((%s != NULL) &&\n"
" (%s > (int) strlen((const char *) %s) + 1))\n"
" continue;\n"
% (bnam, nam, bnam))
break
i = i + 1;
# do the call, and clanup the result
if name in extra_pre_call:
test.write(" %s\n"% (extra_pre_call[name]))
if t_ret != None:
test.write("\n ret_val = %s(" % (name))
need = 0
for arg in t_args:
(nam, type, rtype, crtype, info) = arg
if need:
test.write(", ")
else:
need = 1
if rtype != crtype:
test.write("(%s)" % rtype)
test.write("%s" % nam);
test.write(");\n")
if name in extra_post_call:
test.write(" %s\n"% (extra_post_call[name]))
test.write(" desret_%s(ret_val);\n" % t_ret[0])
else:
test.write("\n %s(" % (name));
need = 0;
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
if need:
test.write(", ")
else:
need = 1
if rtype != crtype:
test.write("(%s)" % rtype)
test.write("%s" % nam)
test.write(");\n")
if name in extra_post_call:
test.write(" %s\n"% (extra_post_call[name]))
test.write(" call_tests++;\n");
# Free the arguments
i = 0;
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
# This is a hack to prevent generating a destructor for the
# 'input' argument in xmlTextReaderSetup. There should be
# a better, more generic way to do this!
if info.find('destroy') == -1:
test.write(" des_%s(n_%s, " % (type, nam))
if rtype != crtype:
test.write("(%s)" % rtype)
test.write("%s, %d);\n" % (nam, i))
i = i + 1;
test.write(" xmlResetLastError();\n");
# Check the memory usage
if no_mem == 0:
test.write(""" if (mem_base != xmlMemBlocks()) {
printf("Leak of %%d blocks found in %s",
\t xmlMemBlocks() - mem_base);
\t test_ret++;
""" % (name));
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
test.write(""" printf(" %%d", n_%s);\n""" % (nam))
test.write(""" printf("\\n");\n""")
test.write(" }\n")
for arg in t_args:
test.write(" }\n")
test.write(" function_tests++;\n")
#
# end of conditional
#
while nb_cond > 0:
test.write("#endif\n")
nb_cond = nb_cond -1
if define == 1:
test.write("#endif\n")
nb_tests = nb_tests + 1;
test.write("""
return(test_ret);
}
""")
#
# Generate all module callers
#
for module in modules:
# gather all the functions exported by that module
try:
functions = ctxt.xpathEval("/api/symbols/function[@file='%s']" % (module))
except:
print("Failed to gather functions from module %s" % (module))
continue;
# iterate over all functions in the module generating the test
i = 0
nb_tests_old = nb_tests
for function in functions:
i = i + 1
generate_test(module, function);
# header
test.write("""static int
test_%s(void) {
int test_ret = 0;
if (quiet == 0) printf("Testing %s : %d of %d functions ...\\n");
""" % (module, module, nb_tests - nb_tests_old, i))
# iterate over all functions in the module generating the call
for function in functions:
name = function.xpathEval('string(@name)')
if is_skipped_function(name):
continue
test.write(" test_ret += test_%s();\n" % (name))
# footer
test.write("""
if (test_ret != 0)
\tprintf("Module %s: %%d errors\\n", test_ret);
return(test_ret);
}
""" % (module))
#
# Generate direct module caller
#
test.write("""static int
test_module(const char *module) {
""");
for module in modules:
test.write(""" if (!strcmp(module, "%s")) return(test_%s());\n""" % (
module, module))
test.write(""" return(0);
}
""");
print("Generated test for %d modules and %d functions" %(len(modules), nb_tests))
compare_and_save()
missing_list = []
for missing in missing_types.keys():
if missing == 'va_list' or missing == '...':
continue;
n = len(missing_types[missing])
missing_list.append((n, missing))
missing_list.sort(key=lambda a: a[0])
print("Missing support for %d functions and %d types see missing.lst" % (missing_functions_nr, len(missing_list)))
lst = open("missing.lst", "w")
lst.write("Missing support for %d types" % (len(missing_list)))
lst.write("\n")
for miss in missing_list:
lst.write("%s: %d :" % (miss[1], miss[0]))
i = 0
for n in missing_types[miss[1]]:
i = i + 1
if i > 5:
lst.write(" ...")
break
lst.write(" %s" % (n))
lst.write("\n")
lst.write("\n")
lst.write("\n")
lst.write("Missing support per module");
for module in missing_functions.keys():
lst.write("module %s:\n %s\n" % (module, missing_functions[module]))
lst.close()
| StarcoderdataPython |
1710940 | from .attribute_builder import AttributeBuilder
class Method(AttributeBuilder):
"""
Represents 'method' attribute.
"""
def __init__(self):
super().__init__()
self.attributes = ["method"]
| StarcoderdataPython |
87238 | import os
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, List, Optional
from hydra.core.config_store import ConfigStore
from omegaconf import MISSING
from hiding_adversarial_attacks.config.attack.adversarial_attack_config import (
ALL_CLASSES,
)
from hiding_adversarial_attacks.config.classifier_training_config import (
ClassifierTrainingConfig,
)
from hiding_adversarial_attacks.config.classifiers.classifier_config import (
Cifar10ClassifierConfig,
FashionMNISTClassifierConfig,
MNISTClassifierConfig,
)
from hiding_adversarial_attacks.config.data_sets.data_set_config import (
AdversarialCifar10Config,
AdversarialCifar10WithExplanationsConfig,
AdversarialFashionMNISTConfig,
AdversarialFashionMNISTWithExplanationsConfig,
AdversarialMNISTConfig,
)
from hiding_adversarial_attacks.config.explainers.deep_lift_baseline_config import (
BlurBaselineConfig,
LocalMeanBaselineConfig,
ZeroBaselineConfig,
)
from hiding_adversarial_attacks.config.explainers.explainer_config import (
DeepLiftConfig,
ExplainerConfig,
GuidedBackpropConfig,
InputXGradientConfig,
IntegratedGradientsConfig,
LayerDeepLiftConfig,
LayerGradCamConfig,
LRPConfig,
)
from hiding_adversarial_attacks.config.logger.logger import LoggingConfig
from hiding_adversarial_attacks.config.losses.similarity_loss_config import (
MSELoss,
PCCLoss,
SimilarityLoss,
SSIMLoss,
)
VAL_NORM_TOTAL_LOSS = "val_normalized_total_loss"
defaults = [
{"data_set": "AdversarialMNIST"},
{"classifier": "MNISTClassifier"},
{"similarity_loss": "MSE"},
{"explainer": "DeepLiftExplainer"},
{"explainer.baseline": "ZeroBaseline"},
]
optuna_search_spaces = {
"MNIST": {
"lr": {
"log": True,
"low": 1e-4,
"high": 5e-4,
},
"loss_weight_similarity": {"low": 1, "high": 15, "step": 1},
"batch_size": [16, 32, 64],
# currently unused:
"similarity_loss": {"choices": [MSELoss]},
},
"FashionMNIST_PCC": {
"lr": {
"log": True,
"low": 6e-5,
"high": 3e-4,
},
"loss_weight_similarity": {"low": 2, "high": 2, "step": 1},
"ce_class_weight": {"low": 2, "high": 4, "step": 1},
"weight_decay": [0],
"batch_size": [64, 128],
"steps_lr": [5, 8],
"gamma": [0.4, 0.2, 0.1],
# currently unused:
"similarity_loss": {"choices": [PCCLoss]},
},
"FashionMNIST_MSE": {
"lr": {
"log": True,
"low": 1e-5,
"high": 5e-5,
},
"loss_weight_similarity": {"low": 6, "high": 10, "step": 1},
"batch_size": [64],
"weight_decay": [0.01, 0.005, 0.001],
# currently unused:
"similarity_loss": {"choices": [MSELoss]},
},
"CIFAR10_PCC": {
"lr": {
"log": True,
"low": 1e-7,
"high": 8e-7,
},
"loss_weight_similarity": {"low": 1, "high": 2, "step": 1},
"batch_size": [64],
"ce_class_weight": {"low": 7, "high": 10, "step": 1},
"weight_decay": [0, 0.1, 0.01],
"steps_lr": [1, 3, 5],
"gamma": [0.1, 0.5, 0.9, 1],
# currently unused:
"similarity_loss": {"choices": [PCCLoss]},
},
"CIFAR10_MSE": {
"lr": {
"log": True,
"low": 2e-6,
"high": 5e-6,
},
"loss_weight_similarity": {"low": 4, "high": 5, "step": 1},
"ce_class_weight": {"low": 7, "high": 9, "step": 1},
"weight_decay": [0],
"batch_size": [128],
# currently unused:
"similarity_loss": {"choices": [MSELoss]},
},
}
class Stage(Enum):
STAGE_TRAIN = "train"
STAGE_VAL = "val"
STAGE_TEST = "test"
@dataclass
class EarlyStoppingConfig:
_target_: str = (
"hiding_adversarial_attacks.callbacks."
"early_stopping_callback.CustomEarlyStopping"
)
monitor: str = "val_exp_sim"
min_delta: float = 0.0
patience: int = 5
verbose: bool = False
mode: str = "min"
@dataclass
class ManipulatedClassifierCheckpointConfig:
_target_: str = "pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint"
monitor: str = VAL_NORM_TOTAL_LOSS
filename: str = "model-{epoch:02d}-{val_total_loss:.2f}"
save_top_k: int = 1
mode: str = "min"
@dataclass
class OptunaConfig:
# General options
use_optuna: bool = False
prune_trials: bool = True
number_of_trials: int = 10
timeout: Optional[int] = None
# Search spaces for hyperparameters
search_space: Any = field(
default_factory=lambda: optuna_search_spaces["FashionMNIST_PCC"]
)
@dataclass
class ManipulatedModelTrainingConfig(ClassifierTrainingConfig):
name: str = "ManipulatedModelTrainingConfig"
defaults: List[Any] = field(default_factory=lambda: defaults)
# Path of attacked data
data_path: str = MISSING
# Path of explanations
explanations_path: str = MISSING
# Path to weights of pre-trained initial classifier
classifier_checkpoint: str = ""
# Path(s) to attacked classifier after adversarial fine-tuning
checkpoint: List = field(default_factory=lambda: [])
# Config for saving checkpoints
checkpoint_config: ManipulatedClassifierCheckpointConfig = (
ManipulatedClassifierCheckpointConfig()
)
# Explainability technique config
explainer: ExplainerConfig = MISSING
# Hyperparameters
similarity_loss: SimilarityLoss = MISSING
lr: float = 0.0001
gamma: Optional[float] = 0.7 # LR decay factor
steps_lr: Optional[int] = 3 # LR decay frequency
loss_weight_orig_ce: float = 1.0
loss_weight_adv_ce: float = 1.0
loss_weight_similarity: float = 1.0
ce_class_weight: float = 1
# Max number of epochs
max_epochs: Optional[int] = 10
# IDs of classes to train with
included_classes: List[Any] = field(default_factory=lambda: [ALL_CLASSES])
# Path where logs will be saved / moved to
log_path: str = os.path.join(LoggingConfig.log_root, "manipulate_model")
# How often to log explanations & other images to Neptune
image_log_intervals: Any = field(
default_factory=lambda: {
Stage.STAGE_TRAIN.value: 600,
Stage.STAGE_VAL.value: 100,
Stage.STAGE_TEST.value: 50,
}
)
# Neptune options
# Tag 'trash' will be added to tags if trash_run is True
tags: List[str] = field(default_factory=lambda: ["manipulate-model"])
neptune_offline_mode: bool = True
# Optuna options
optuna: OptunaConfig = OptunaConfig()
early_stopping: bool = False
early_stopping_config: EarlyStoppingConfig = EarlyStoppingConfig()
kfold_num_folds: Optional[int] = None
gradient_clip_val: Optional[float] = None
weight_decay: float = 0.0
normalize_explanations: bool = False
normalize_abs: bool = False
precision: int = 32
seed_everything: bool = False
freeze: bool = False
auto_lr_find: bool = False
cs = ConfigStore.instance()
cs.store(group="data_set", name="AdversarialMNIST", node=AdversarialMNISTConfig)
cs.store(
group="data_set",
name="AdversarialFashionMNIST",
node=AdversarialFashionMNISTConfig,
)
cs.store(
group="data_set",
name="AdversarialFashionMNISTWithExplanations",
node=AdversarialFashionMNISTWithExplanationsConfig,
)
cs.store(
group="data_set",
name="AdversarialCifar10",
node=AdversarialCifar10Config,
)
cs.store(
group="data_set",
name="AdversarialCifar10WithExplanations",
node=AdversarialCifar10WithExplanationsConfig,
)
cs.store(group="classifier", name="MNISTClassifier", node=MNISTClassifierConfig)
cs.store(
group="classifier",
name="FashionMNISTClassifier",
node=FashionMNISTClassifierConfig,
)
cs.store(group="classifier", name="Cifar10Classifier", node=Cifar10ClassifierConfig)
cs.store(group="explainer", name="DeepLiftExplainer", node=DeepLiftConfig)
cs.store(group="explainer", name="LayerDeepLiftExplainer", node=LayerDeepLiftConfig)
cs.store(group="explainer", name="GuidedBackpropExplainer", node=GuidedBackpropConfig)
cs.store(group="explainer", name="LRPExplainer", node=LRPConfig)
cs.store(group="explainer.baseline", name="ZeroBaseline", node=ZeroBaselineConfig)
cs.store(group="explainer.baseline", name="BlurBaseline", node=BlurBaselineConfig)
cs.store(
group="explainer.baseline",
name="LocalMeanBaseline",
node=LocalMeanBaselineConfig,
)
cs.store(group="explainer", name="GradCamExplainer", node=LayerGradCamConfig)
cs.store(
group="explainer",
name="IntegratedGradientsExplainer",
node=IntegratedGradientsConfig,
)
cs.store(group="explainer", name="InputXGradientExplainer", node=InputXGradientConfig)
cs.store(group="similarity_loss", name="MSE", node=MSELoss)
cs.store(group="similarity_loss", name="PCC", node=PCCLoss)
cs.store(group="similarity_loss", name="SSIM", node=SSIMLoss)
cs.store(
name="manipulated_model_training_config",
node=ManipulatedModelTrainingConfig,
)
| StarcoderdataPython |
1700572 | from copy import deepcopy
import json
import re
from django.shortcuts import render, redirect
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib import messages
from django.http import StreamingHttpResponse
from bq_data_access.feature_search.util import SearchableFieldHelper
from django.http import HttpResponse, JsonResponse
from models import Cohort, Workbook, Worksheet, Worksheet_comment, Worksheet_variable, Worksheet_gene, Worksheet_cohort, Worksheet_plot, Worksheet_plot_cohort
from variables.models import VariableFavorite, Variable
from genes.models import GeneFavorite
from analysis.models import Analysis
from projects.models import Project
from sharing.service import create_share
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
debug = settings.DEBUG
if settings.DEBUG :
import sys
@login_required
def workbook_list(request):
template = 'workbooks/workbook_list.html',
userWorkbooks = request.user.workbook_set.all()
sharedWorkbooks = Workbook.objects.filter(shared__matched_user=request.user, shared__active=True, active=True)
workbooks = userWorkbooks | sharedWorkbooks
workbooks = workbooks.distinct()
return render(request, template, {'workbooks' : workbooks})
def workbook_samples(request):
template = 'workbooks/workbook_samples.html'
return render(request, template, {
'workbooks': Workbook.objects.all().filter(is_public=True, active=True)
})
#TODO secure this url
@login_required
def workbook_create_with_cohort(request):
cohort_id = request.POST.get('cohort_id')
cohort = Cohort.objects.get(id=cohort_id)
workbook_model = Workbook.create(name="Untitled Workbook", description="This workbook was created with cohort \"" + cohort.name + "\" added to the first worksheet. Click Edit Details to change your workbook title and description.", user=request.user)
worksheet_model = Worksheet.objects.create(name="worksheet 1", description="", workbook=workbook_model)
worksheet_model.add_cohort(cohort=cohort)
redirect_url = reverse('workbook_detail', kwargs={'workbook_id':workbook_model.id})
return redirect(redirect_url)
@login_required
def workbook_create_with_cohort_list(request):
cohort_ids = json.loads(request.body)['cohorts']
if len(cohort_ids) > 0 :
workbook_model = Workbook.create(name="Untitled Workbook", description="This is a workbook created with cohorts added to the first worksheet. Click Edit Details to change your workbook title and description.", user=request.user)
worksheet_model = Worksheet.objects.create(name="worksheet 1", description="", workbook=workbook_model)
for id in cohort_ids :
cohort = Cohort.objects.get(id=id)
worksheet_model.add_cohort(cohort=cohort)
result = {'workbook_id' : workbook_model.id,
'worksheet_id' : worksheet_model.id}
else :
result = {'error' : 'parameters are not correct'}
return HttpResponse(json.dumps(result), status=200)
#TODO maybe complete
@login_required
def workbook_create_with_project(request):
project_id = request.POST.get('project_id')
project_model = Project.objects.get(id=project_id)
workbook_model = Workbook.create(name="Untitled Workbook", description="this is an untitled workbook with all variables of project \"" + project_model.name + "\" added to the first worksheet. Click Edit Details to change your workbook title and description.", user=request.user)
worksheet_model = Worksheet.objects.create(name="worksheet 1", description="", workbook=workbook_model)
#add every variable within the model
for study in project_model.study_set.all().filter(active=True) :
for var in study.user_feature_definitions_set.all() :
work_var = Worksheet_variable.objects.create(worksheet_id = worksheet_model.id,
name = var.feature_name,
url_code = var.bq_map_id,
feature_id = var.id)
work_var.save()
redirect_url = reverse('workbook_detail', kwargs={'workbook_id':workbook_model.id})
return redirect(redirect_url)
@login_required
def workbook_create_with_variables(request):
json_data = request.POST.get('json_data')
if json_data:
data = json.loads(json_data)
# TODO: Refactor so that user can create using multiple variable lists
var_list_id = data['variable_list_id'][0]
else:
var_list_id = request.POST.get('variable_list_id')
var_list_model = VariableFavorite.objects.get(id=var_list_id)
name = request.POST.get('name', var_list_model.name + ' workbook')
workbook_model = Workbook.create(name=name, description="this is an untitled workbook with all variables of variable favorite list \"" + var_list_model.name + "\" added to the first worksheet. Click Edit Details to change your workbook title and description.", user=request.user)
workbook_model.save()
worksheet_model = Worksheet.objects.create(name="worksheet 1", description="", workbook=workbook_model)
worksheet_model.save()
print workbook_model.id
for var in var_list_model.get_variables() :
work_var = Worksheet_variable.objects.create(worksheet_id = worksheet_model.id,
name = var.name,
url_code = var.code,
type = var.type,
feature_id = var.feature_id)
work_var.save()
redirect_url = reverse('workbook_detail', kwargs={'workbook_id':workbook_model.id})
if json_data:
return JsonResponse({'workbook_id': workbook_model.id, 'worksheet_id': worksheet_model.id})
else:
return redirect(redirect_url)
@login_required
def workbook_create_with_analysis(request):
analysis_type = request.POST.get('analysis')
allowed_types = Analysis.get_types()
redirect_url = reverse('sample_analyses')
for type in allowed_types :
if analysis_type == type['name'] :
workbook_model = Workbook.create(name="Untitled Workbook", description="this is an untitled workbook with a \"" + analysis_type + "\" plot added to the first worksheet. Click Edit Details to change your workbook title and description.", user=request.user)
worksheet_model = Worksheet.objects.create(name="worksheet 1", description="", workbook=workbook_model)
worksheet_model.set_plot(type=analysis_type)
redirect_url = reverse('workbook_detail', kwargs={'workbook_id':workbook_model.id})
break
return redirect(redirect_url)
def get_gene_datatypes():
datatype_labels = {'GEXP' : 'Gene Expression',
'METH' : 'Methylation',
'CNVR' : 'Copy Number',
'RPPA' : 'Protein',
'GNAB' : 'Mutation'}
datatype_list = SearchableFieldHelper.get_fields_for_all_datatypes()
if debug: print >> sys.stderr, ' attrs ' + json.dumps(datatype_list)
return_list = []
for type in datatype_list:
if type['datatype'] != 'CLIN' and type['datatype'] != 'MIRN' :
type['label'] = datatype_labels[type['datatype']]
return_list.append(type)
#remove gene in fields as they are set with the variable selection
for index, field in enumerate(type['fields']):
if field['label'] == "Gene":
del type['fields'][index]
return return_list
@login_required
def workbook(request, workbook_id=0):
template = 'workbooks/workbook.html'
command = request.path.rsplit('/',1)[1]
if request.method == "POST" :
if command == "create" :
workbook_model = Workbook.createDefault(name="Untitled Workbook", description="", user=request.user)
elif command == "edit" :
workbook_model = Workbook.edit(id=workbook_id, name=request.POST.get('name'), description=request.POST.get('description'))
elif command == "copy" :
workbook_model = Workbook.copy(id=workbook_id, user=request.user)
elif command == "delete" :
Workbook.destroy(id=workbook_id)
if command == "delete":
redirect_url = reverse('workbooks')
return redirect(redirect_url)
else :
redirect_url = reverse('workbook_detail', kwargs={'workbook_id':workbook_model.id})
return redirect(redirect_url)
elif request.method == "GET" :
if workbook_id:
try :
ownedWorkbooks = request.user.workbook_set.all().filter(active=True)
sharedWorkbooks = Workbook.objects.filter(shared__matched_user=request.user, shared__active=True, active=True)
publicWorkbooks = Workbook.objects.all().filter(is_public=True,active=True)
workbooks = ownedWorkbooks | sharedWorkbooks | publicWorkbooks
workbooks = workbooks.distinct()
workbook_model = workbooks.get(id=workbook_id)
workbook_model.worksheets = workbook_model.get_deep_worksheets()
is_shareable = workbook_model.is_shareable(request)
shared = None
if workbook_model.owner.id != request.user.id and not workbook_model.is_public:
shared = request.user.shared_resource_set.get(workbook__id=workbook_id)
plot_types = Analysis.get_types()
return render(request, template, {'workbook' : workbook_model,
'datatypes' : get_gene_datatypes(),
'is_shareable': is_shareable,
'shared' : shared,
'plot_types' : plot_types})
except ObjectDoesNotExist:
redirect_url = reverse('workbooks')
return redirect(redirect_url)
else :
redirect_url = reverse('workbooks')
return redirect(redirect_url)
@login_required
def workbook_share(request, workbook_id=0):
emails = re.split('\s*,\s*', request.POST['share_users'].strip())
workbook = request.user.workbook_set.get(id=workbook_id, active=True)
create_share(request, workbook, emails, 'Workbook')
return JsonResponse({
'status': 'success'
})
@login_required
#used to display a particular worksheet on page load
def worksheet_display(request, workbook_id=0, worksheet_id=0):
template = 'workbooks/workbook.html'
workbook_model = Workbook.deep_get(workbook_id)
workbook_model.mark_viewed(request)
is_shareable = workbook_model.is_shareable(request)
for worksheet in workbook_model.worksheets:
if str(worksheet.id) == worksheet_id :
display_worksheet = worksheet
plot_types = Analysis.get_types()
return render(request, template, {'workbook' : workbook_model,
'is_shareable' : is_shareable,
'datatypes' : get_gene_datatypes(),
'display_worksheet' : display_worksheet,
'plot_types' : plot_types})
@login_required
def worksheet(request, workbook_id=0, worksheet_id=0):
command = request.path.rsplit('/',1)[1]
if request.method == "POST" :
if command == "create" :
worksheet = Worksheet.create(workbook_id=workbook_id, name=request.POST.get('name'), description=request.POST.get('description'))
redirect_url = reverse('worksheet_display', kwargs={'workbook_id':workbook_id, 'worksheet_id': worksheet.id})
elif command == "edit" :
worksheet = Worksheet.edit(id=worksheet_id, name=request.POST.get('name'), description=request.POST.get('description'))
redirect_url = reverse('worksheet_display', kwargs={'workbook_id':workbook_id, 'worksheet_id': worksheet.id})
elif command == "copy" :
worksheet = Worksheet.copy(id=worksheet_id)
redirect_url = reverse('worksheet_display', kwargs={'workbook_id':workbook_id, 'worksheet_id': worksheet.id})
elif command == "delete" :
Worksheet.destroy(id=worksheet_id)
redirect_url = reverse('workbook_detail', kwargs={'workbook_id':workbook_id})
return redirect(redirect_url)
@login_required
def worksheet_variable_delete(request, workbook_id=0, worksheet_id=0, variable_id=0):
Worksheet.objects.get(id=worksheet_id).remove_variable(variable_id);
redirect_url = reverse('worksheet_display', kwargs={'workbook_id':workbook_id, 'worksheet_id': worksheet_id})
return redirect(redirect_url)
@login_required
def worksheet_variables(request, workbook_id=0, worksheet_id=0, variable_id=0):
command = request.path.rsplit('/',1)[1];
json_response = False
workbook_name = "Untitled Workbook"
result = {}
if request.method == "POST" :
if command == "delete" :
Worksheet_variable.destroy(workbook_id=workbook_id, worksheet_id=worksheet_id, id=variable_id, user=request.user)
result['message'] = "variables have been deleted from workbook"
else :
variables = []
#from Edit Page
if "variables" in request.body :
json_response = True
name = json.loads(request.body)['name']
variable_list = json.loads(request.body)['variables']
variable_favorite_result = VariableFavorite.create(name = name,
variables = variable_list,
user = request.user)
model = VariableFavorite.objects.get(id=variable_favorite_result['id'])
messages.info(request, 'The variable favorite list \"' + model.name + '\" was created and added to your worksheet')
variables = model.get_variables()
#from Details Page or list page
if request.POST.get("variable_list_id") :
workbook_name = request.POST.get("name")
variable_id = request.POST.get("variable_list_id")
try :
variable_fav = VariableFavorite.objects.get(id=variable_id)
variables = variable_fav.get_variables()
except ObjectDoesNotExist:
result['error'] = "variable favorite does not exist"
#from Select Page
if "var_favorites" in request.body :
variable_fav_list = json.loads(request.body)['var_favorites']
json_response = True
for fav in variable_fav_list:
try:
fav = VariableFavorite.objects.get(id=fav['id'])
variables = fav.get_variables()
except ObjectDoesNotExist:
result['error'] = "variable favorite does not exist"
if len(variables) > 0:
if workbook_id == 0:
workbook_model = Workbook.create(name=workbook_name, description="This workbook was created with variables added to the first worksheet. Click Edit Details to change your workbook title and description.", user=request.user)
worksheet_model = Worksheet.objects.create(name="worksheet 1", description="", workbook=workbook_model)
else :
workbook_model = Workbook.objects.get(id=workbook_id)
worksheet_model = Worksheet.objects.get(id=worksheet_id)
Worksheet_variable.edit_list(workbook_id=workbook_model.id, worksheet_id=worksheet_model.id, variable_list=variables, user=request.user)
result['workbook_id'] = workbook_model.id
result['worksheet_id'] = worksheet_model.id
else :
result['error'] = "no variables to add"
else :
result['error'] = "method not correct"
if json_response :
return HttpResponse(json.dumps(result), status=200)
else :
redirect_url = reverse('worksheet_display', kwargs={'workbook_id':workbook_model.id, 'worksheet_id': worksheet_model.id})
return redirect(redirect_url)
@login_required
def workbook_create_with_genes(request):
return worksheet_genes(request=request)
@login_required
def worksheet_gene_delete(request, workbook_id=0, worksheet_id=0, gene_id=0):
Worksheet.objects.get(id=worksheet_id).remove_gene(gene_id);
redirect_url = reverse('worksheet_display', kwargs={'workbook_id':workbook_id, 'worksheet_id': worksheet_id})
return redirect(redirect_url)
@login_required
def worksheet_genes(request, workbook_id=0, worksheet_id=0, genes_id=0):
command = request.path.rsplit('/',1)[1];
json_response = False
result = {}
if request.method == "POST" :
if command == "delete" :
Worksheet_gene.destroy(workbook_id=workbook_id, worksheet_id=worksheet_id, id=genes_id, user=request.user)
result['message'] = "genes have been deleted from workbook"
else :
genes = []
workbook_name = 'Untitled Workbook'
#from Gene Edit Page
if request.POST.get("genes-list") :
# Get workbook name
if request.POST.get('name'):
workbook_name = request.POST.get('name')
name = request.POST.get("genes-name")
gene_list = request.POST.get("genes-list")
gene_list = [x.strip() for x in gene_list.split(' ')]
gene_list = list(set(gene_list))
GeneFavorite.create(name=name, gene_list=gene_list, user=request.user)
messages.info(request, 'The gene favorite list \"' + name + '\" was created and added to your worksheet')
for g in gene_list:
genes.append(g)
#from Gene Details Page
if request.POST.get("gene_list_id") :
# Get workbook name
if request.POST.get('name'):
workbook_name = request.POST.get('name')
gene_id = request.POST.get("gene_list_id")
try :
gene_fav = GeneFavorite.objects.get(id=gene_id)
names = gene_fav.get_gene_name_list()
for g in names:
genes.append(g)
except ObjectDoesNotExist:
None
#from Gene List Page
if "gene_fav_list" in request.body :
json_response = True
gene_fav_list = json.loads(request.body)['gene_fav_list']
for id in gene_fav_list:
try:
fav = GeneFavorite.objects.get(id=id)
names = fav.get_gene_name_list()
for g in names:
genes.append(g)
except ObjectDoesNotExist:
None
if len(genes) > 0:
if workbook_id is 0:
workbook_model = Workbook.create(name=workbook_name, description="This workbook was created with genes added to the first worksheet. Click Edit Details to change your workbook title and description.", user=request.user)
worksheet_model = Worksheet.objects.create(name="worksheet 1", description="", workbook=workbook_model)
else :
workbook_model = Workbook.objects.get(id=workbook_id)
worksheet_model = Worksheet.objects.get(id=worksheet_id)
Worksheet_gene.edit_list(workbook_id=workbook_model.id, worksheet_id=worksheet_model.id, gene_list=genes, user=request.user)
result['genes'] = genes
else :
result['error'] = "no genes to add"
else :
result['error'] = "method not correct"
if json_response :
return HttpResponse(json.dumps(result), status=200)
else :
redirect_url = reverse('worksheet_display', kwargs={'workbook_id':workbook_model.id, 'worksheet_id': worksheet_model.id})
return redirect(redirect_url)
@login_required
def workbook_create_with_plot(request):
return worksheet_plots(request=request)
@login_required
def worksheet_plots(request, workbook_id=0, worksheet_id=0, plot_id=0):
command = request.path.rsplit('/',1)[1];
json_response = False
default_name = "Untitled Workbook"
result = {}
if request.method == "POST" :
if command == "delete" :
var = Worksheet_plot.objects.get(id=plot_id).delete()
result['message'] = "the plot has been deleted from workbook"
else :
#update
if "attrs" in request.body :
json_response = True
attrs = json.loads(request.body)['attrs']
settings = json.loads(request.body)['settings']
if plot_id :
plot_model = Worksheet_plot.objects.get(id=plot_id)
plot_model.settings_json = settings
if attrs['cohorts'] :
try :
Worksheet_plot_cohort.objects.filter(plot=plot_model).delete()
for obj in attrs['cohorts'] :
wpc = Worksheet_plot_cohort(plot=plot_model, cohort_id=obj['id'])
wpc.save()
except ObjectDoesNotExist:
None
plot_model.save()
result['updated'] = "success"
elif request.method == "GET" :
json_response = True
plot_type = request.GET.get('type', 'default')
worksheet_model = Worksheet.objects.get(id=worksheet_id)
plots = worksheet_model.worksheet_plot_set.all()
for p in plots :
p.active = False
p.save()
plots = worksheet_model.worksheet_plot_set.filter(type=plot_type)
if len(plots) == 0:
model = Worksheet_plot(type=plot_type, worksheet=worksheet_model)
model.save()
else :
model = plots[0]
model.active = True
model.save()
result['data'] = model.toJSON()
else :
result['error'] = "method not correct"
if json_response :
return HttpResponse(json.dumps(result), status=200)
else :
redirect_url = reverse('worksheet_display', kwargs={'workbook_id':workbook_model.id, 'worksheet_id': worksheet_model.id})
return redirect(redirect_url)
@login_required
def worksheet_cohorts(request, workbook_id=0, worksheet_id=0, cohort_id=0):
command = request.path.rsplit('/',1)[1];
cohorts = json.loads(request.body)['cohorts']
if request.method == "POST" :
if command == "edit" :
Worksheet_cohort.edit_list(worksheet_id=worksheet_id, id=cohort_id, cohort_ids=cohorts, user=request.user)
elif command == "delete" :
Worksheet_cohort.destroy(worksheet_id=worksheet_id, id=cohort_id, user=request.user)
redirect_url = reverse('worksheet_display', kwargs={'workbook_id':workbook_id, 'worksheet_id': worksheet_id})
return redirect(redirect_url)
@login_required
def worksheet_comment(request, workbook_id=0, worksheet_id=0, comment_id=0):
command = request.path.rsplit('/',1)[1];
if request.method == "POST" :
if command == "create" :
result = Worksheet_comment.create(worksheet_id = worksheet_id,
content = request.POST.get('content'),
user = request.user)
return HttpResponse(json.dumps(result), status=200)
elif command == "delete" :
result = Worksheet_comment.destroy(comment_id = comment_id)
return HttpResponse(json.dumps(result), status=200)
| StarcoderdataPython |
132583 | <filename>Classification Based Machine Learning for Algorithmic Trading/default_predictions/SGDClassifier.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 25 22:02:07 2017
@author: Anthony
"""
import numpy as np
import pandas as pd
df = pd.read_csv("dataset_2.csv")
df['default'].describe()
sum(df['default'] == 0)
sum(df['default'] == 1)
X = df.iloc[:, 1:6].values
y = df['default'].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25,
random_state=0)
shuffle_index = np.random.permutation(len(X_train))
X_train, y_train = X_train[shuffle_index], y_train[shuffle_index]
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
from sklearn import linear_model
clf = linear_model.SGDClassifier(random_state=0)
clf.fit(X_train, y_train)
# Cross Validation
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_predict
cross_val_score(clf, X_train, y_train, cv=3, scoring='accuracy')
y_train_pred = cross_val_predict(clf, X_train, y_train, cv=3)
cm = confusion_matrix(y_train, y_train_pred)
print(cm)
from sklearn.metrics import precision_score, recall_score
print("precision score = {0:.4f}".format(precision_score(y_train, y_train_pred)))
print("recall score = {0:.4f}".format(recall_score(y_train, y_train_pred)))
| StarcoderdataPython |
161514 | <gh_stars>1-10
"""Computes the frame of the relevant cubes in the specified base frame."""
# Copyright (c) 2022, ABB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with
# or without modification, are permitted provided that
# the following conditions are met:
#
# * Redistributions of source code must retain the
# above copyright notice, this list of conditions
# and the following disclaimer.
# * Redistributions in binary form must reproduce the
# above copyright notice, this list of conditions
# and the following disclaimer in the documentation
# and/or other materials provided with the
# distribution.
# * Neither the name of ABB nor the names of its
# contributors may be used to endorse or promote
# products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import List
from aruco_interfaces.msg import ArucoMarkers
from geometry_msgs.msg import Pose, TransformStamped
import numpy as np
import quaternion
import rclpy
import rclpy.node
from tf2_ros import TransformBroadcaster
from tf2_ros.buffer import Buffer
from tf2_ros.transform_listener import TransformListener
CUBES = {
'A': [53, 54, 55, 56, 57, 58],
'B': [20, 21, 22, 23, 24, 25],
'C': [41, 42, 43, 44, 45, 46],
'D': [35, 36, 37, 38, 39, 40],
'E': [26, 27, 28, 29, 30, 31],
'F': [47, 48, 49, 50, 51, 52]
}
class MarkerNode(rclpy.node.Node):
"""This node listens to the poses of aruco tags and publishes a mean pose to TF."""
def __init__(self):
global CUBES
super().__init__('cube_publisher')
# Declare parameters
self.declare_parameter('base_frame', 'base_link')
self.declare_parameter('width', 0.05)
self.declare_parameter('max_age', 2.5)
self.declare_parameter('cubes', ['B', 'C'])
self.base_frame = self.get_parameter('base_frame').get_parameter_value().string_value
self.marker_width = self.get_parameter('width').get_parameter_value().double_value
self.max_pose_age = self.get_parameter('max_age').get_parameter_value().double_value
self.cube_names = self.get_parameter('cubes').get_parameter_value().string_array_value
self.marker_sub = self.create_subscription(
ArucoMarkers,
'~/aruco_markers',
self.marker_callback, 10
)
self.marker_info = ArucoMarkers()
# Initialize the transform listener
self.tf_buffer = Buffer()
self.tf_listener = TransformListener(self.tf_buffer, self)
self.br = TransformBroadcaster(self)
# iterate through the cube dictionary retaining only relevant ids
# initialize a dictionary with last poses
self.last_poses = {}
for name in self.cube_names:
for marker in CUBES[name]:
self.last_poses[marker] = None
self.allowed_ids = []
for id_list in CUBES.values():
self.allowed_ids += id_list
timer_period = 0.5 # seconds
self.timer = self.create_timer(timer_period, self.publish_object_pose)
def marker_callback(self, msg: ArucoMarkers):
"""
Get the recognised aruco markers in the scene.
Args:
----
msg: Aruco Marker message containing markers ids and poses.
"""
# get the published markers
published_markers_ids = msg.marker_ids
# get the poses for the published markers
now = rclpy.time.Time()
# Delete latest poses
for name in self.cube_names:
for marker in CUBES[name]:
self.last_poses[marker] = None
for i, marker in enumerate(published_markers_ids):
if marker not in self.allowed_ids:
# self.get_logger().warn(f'ID {marker} not allowed.')
continue
target_frame = f'marker_{marker}'
try:
marker_tf = self.tf_buffer.lookup_transform(self.base_frame, target_frame, now)
marker_pose = Pose()
marker_pose.position.x = marker_tf.transform.translation.x
marker_pose.position.y = marker_tf.transform.translation.y
marker_pose.position.z = marker_tf.transform.translation.z
marker_pose.orientation.x = marker_tf.transform.rotation.x
marker_pose.orientation.y = marker_tf.transform.rotation.y
marker_pose.orientation.z = marker_tf.transform.rotation.z
marker_pose.orientation.w = marker_tf.transform.rotation.w
self.last_poses[marker] = marker_pose
except Exception:
self.get_logger().warn(f'Frame marker_{marker} not available....')
self.marker_info.header.stamp = msg.header.stamp
self.marker_info.header.frame_id = msg.header.frame_id
def publish_object_pose(self):
"""Publish the pose of the cubes by using heuristics."""
global CUBES
transforms = []
for name in self.cube_names:
# maybe do the opposite
visible_markers = self.get_recent_marker_poses(ids=CUBES[name])
# print('visible markers:', visible_markers)
if len(visible_markers) == 0:
# marker id not visible, continue
continue
# Position
mean_position = self.compute_object_position(visible_markers)
# Orientation
orientation = self.compute_object_orientation(visible_markers)
t = TransformStamped()
t.header.frame_id = self.base_frame
t.header.stamp = self.get_clock().now().to_msg()
t.child_frame_id = name
t.transform.translation.x = mean_position[0]
t.transform.translation.y = mean_position[1]
t.transform.translation.z = mean_position[2]
t.transform.rotation.w = orientation.w
t.transform.rotation.x = orientation.x
t.transform.rotation.y = orientation.y
t.transform.rotation.z = orientation.z
self.__orientation_heuristic(t)
transforms.append(t)
for tr in transforms:
self.br.sendTransform(tr)
def compute_object_position(self, visible_markers: List[Pose]) -> np.ndarray:
"""
Heuristic to compute the position of the cube given the visible markers and the width.
Args
----
visible_markers: markers that are visible and usable to compute the position.
Returns
-------
mean_position: the position of the cube.
"""
offset_distance = self.marker_width/2.0
positions = np.zeros((len(visible_markers), 3))
offset = np.array([0.0, 0.0, offset_distance])
for i, marker in enumerate(visible_markers):
position = np.array([marker.position.x, marker.position.y, marker.position.z])
orientation = np.quaternion(
marker.orientation.w,
marker.orientation.x,
marker.orientation.y,
marker.orientation.z
)
orientation_inv = orientation.inverse()
positions[i, :] = position +\
quaternion.as_vector_part(
orientation_inv.conj()*quaternion.from_vector_part(-offset)*orientation_inv
)
mean_position = np.mean(positions, axis=0)
return mean_position
def compute_object_orientation(self, visible_markers: List[Pose]) -> quaternion:
"""
Heuristic to compute the orientation of the cube given the visible markers.
Args
----
visible_markers: markers that are visible and usable to compute the orientation.
Returns
-------
mean_orientation: the orientation of the cube.
"""
marker_orientations = np.zeros((len(visible_markers),), dtype=np.quaternion)
for i, marker in enumerate(visible_markers):
marker_orientations[i] = np.quaternion(
marker.orientation.w,
marker.orientation.x,
marker.orientation.y,
marker.orientation.z
)
# Are we seing the top marker?
top_visible = False
top_orientation = None
for marker_orientation in marker_orientations:
# Transform z-axis to base frame
rotation = marker_orientation.inverse()
z_axis = quaternion.as_vector_part(
rotation.conj()*quaternion.from_vector_part(np.array([0.0, 0.0, 1.0]))*rotation
)
z_axis /= np.linalg.norm(z_axis)
z_angle = np.arccos(np.dot(z_axis, np.array([0.0, 0.0, 1.0])))
if z_angle <= np.pi/4:
top_visible = True
top_orientation = marker_orientation
break
if top_visible:
return top_orientation
# Top marker is not visible. We must be seing markers on the sides.
# Pick whatever marker to compute orientation.
marker_orientation = marker_orientations[0]
# Find which of x- and y-axis is most paralell to base frame z-axis.
# I.e which has the largest dot product in magnitude.
axes = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]])
# Transform axes to base frame
rotation = marker_orientation.inverse()
axes_base = quaternion.as_vector_part(
rotation.conj()*quaternion.from_vector_part(axes)*rotation
)
dot_product = axes_base @ np.array([0.0, 0.0, 1.0]).reshape((3, 1))
best_axis_idx = np.argmax(np.abs(dot_product))
best_axis = axes_base[best_axis_idx]
# Flip axis if it points in the opposite direction of base frame z-axis
best_axis *= np.sign(dot_product[best_axis_idx])
# The rotation axis is perpendicular to best_axis and the marker
# z-axis.
# Transform z-axis to base frame
rotation = marker_orientation.inverse()
marker_z_axis = quaternion.as_vector_part(
rotation.conj()*quaternion.from_vector_part(np.array([0.0, 0.0, 1.0]))*rotation
)
# Compute the rotation axis
rot_axis = np.cross(marker_z_axis, best_axis)
# Transform z-axis to base frame
rotation = marker_orientation.inverse()
z_axis = quaternion.as_vector_part(
rotation.conj()*quaternion.from_vector_part(np.array([0.0, 0.0, 1.0]))*rotation
)
mean_orientation = quaternion.from_rotation_vector(np.pi/2*rot_axis)*marker_orientation
return mean_orientation
def get_recent_marker_poses(self, ids: List[int]) -> List[Pose]:
"""Return poses for the ids in ids that are visible (not too old)."""
recent = []
for marker in ids:
if self.last_poses[marker] is None:
# print('skipping:', marker)
continue
stamp = rclpy.time.Time.from_msg(self.marker_info.header.stamp)
age = self.get_clock().now() - stamp
if age <= rclpy.duration.Duration(seconds=self.max_pose_age):
recent.append(self.last_poses[marker])
return recent
def __orientation_heuristic(self, pose: Pose):
"""
Restrict rotation to +- 45 degrees.
Args:
----
pose: pose of the cube.
"""
quat = np.quaternion(
pose.transform.rotation.w,
pose.transform.rotation.x,
pose.transform.rotation.y,
pose.transform.rotation.z
)
# Transform base x-axis to object frame
base_x_axis = quaternion.as_vector_part(
quat.conj()*quaternion.from_vector_part(np.array([1.0, 0.0, 0.0]))*quat
)
# Project it onto the marker xy plane
base_x_proj = self.__proj(base_x_axis, np.array([1.0, 0.0, 0.0])) +\
self.__proj(base_x_axis, np.array([0.0, 1.0, 0.0]))
# Angle to x-axis
angle = np.arctan2(base_x_proj[1], base_x_proj[0])
# How much rotation to apply to keep angle within +-45degrees
rotation_angle = 0
while angle+rotation_angle > np.pi/4:
rotation_angle -= np.pi/2
while angle+rotation_angle < -np.pi/4:
rotation_angle += np.pi/2
# Apply rotation
quat = quat*quaternion.from_rotation_vector([0.0, 0.0, -rotation_angle])
pose.transform.rotation.w = quat.w
pose.transform.rotation.x = quat.x
pose.transform.rotation.y = quat.y
pose.transform.rotation.z = quat.z
def __proj(
self,
v1: np.ndarray,
v2: np.ndarray
) -> np.ndarray:
"""
Compute the projection of v1 onto v2.
Args
----
v1: vector to project.
v2: reference vector in which the projection is computed.
Returns
-------
projection: projection vector.
"""
projection = np.dot(v1, v2)/np.linalg.norm(v2)**2 * v2
return projection
def main(args=None):
rclpy.init(args=args)
marker_node = MarkerNode()
rclpy.spin(marker_node)
rclpy.shutdown()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1678990 | # -*- coding: UTF-8 -*-
##############################################################################
# #
# Copyright (c) 2007-2010 <NAME> <<EMAIL>> #
# #
# This program is licensed under the GNU General Public License V3, #
# the full source code is included in the binary distribution. #
# #
# Included in the distribution are files from other open source projects: #
# - TOR Onion Router (c) The Tor Project, 3-clause-BSD #
# - SocksiPy (c) <NAME>, BSD Style License #
# - Gajim buddy status icons (c) The Gajim Team, GNU GPL #
# #
##############################################################################
import sys, os
import locale
import ConfigParser
import traceback
import inspect
import translations
import shutil
def isWindows():
return sys.platform.startswith("win")
if isWindows():
import ctypes
config_defaults = {
("tor", "tor_server") : "127.0.0.1",
("tor", "tor_server_socks_port") : 9050,
("tor", "tor_server_control_port") : 9051,
("tor_portable", "tor_server") : "127.0.0.1",
("tor_portable", "tor_server_socks_port") : 11109,
("tor_portable", "tor_server_control_port") : 11119,
("client", "own_hostname") : "0000000000000000",
("client", "listen_interface") : "127.0.0.1",
("client", "listen_port") : 11009,
("logging", "log_file") : "",
("logging", "log_level") : 0,
("files", "temp_files_in_data_dir") : 1,
("files", "temp_files_custom_dir") : "",
("gui", "language") : "en",
("gui", "notification_popup") : 1,
("gui", "notification_method") : "generic",
("gui", "notification_flash_window") : 1,
("gui", "open_main_window_hidden") : 0,
("gui", "open_chat_window_hidden") : 0,
("gui", "time_stamp_format") : "(%H:%M:%S)",
("gui", "color_time_stamp") : "#808080",
("gui", "color_nick_myself") : "#0000c0",
("gui", "color_nick_buddy") : "#c00000",
("gui", "color_text_back") : "#ffffff",
("gui", "color_text_fore") : "#000000",
("gui", "color_text_use_system_colors") : 1,
("gui", "chat_font_name") : "Arial",
("gui", "chat_font_size") : 10,
("gui", "chat_window_width") : 400,
("gui", "chat_window_height") : 400,
("gui", "chat_window_height_lower") : 50,
("gui", "main_window_width") : 260,
("gui", "main_window_height") : 350,
("branding", "support_id") : "utvrla6mjdypbyw6",
("branding", "support_name") : "Bernd, author of TorChat",
("profile", "name") : "",
("profile", "text") : "",
}
LOCALE_ENC = locale.getpreferredencoding()
try:
CONSOLE_ENC = sys.stdout.encoding
except:
CONSOLE_ENC = None
def toUnicode(unknownstr):
# some things like sys.argv[] and also functions from os.path
# return bytestrings. Since I don't know if this might change
# eventually in some future Python version I need something to
# decode them only if needed. (I try to decode everything as
# soon as possible and only work with unicode everywhere)
# Note: it seems none of these strings I have come across so far
# was ever encoded in the console encoding, they all seem to use
# the locale encoding.
if isinstance(unknownstr, str):
return unknownstr.decode(LOCALE_ENC)
else:
return unknownstr
COPYRIGHT = u"Copyright (c) 2007-2011 <NAME> <<EMAIL>>"
DEAD_CONNECTION_TIMEOUT = 240
KEEPALIVE_INTERVAL = 5
MAX_UNANSWERED_PINGS = 4
SCRIPT_DIR = os.path.abspath(os.path.dirname(toUnicode(sys.argv[0])))
ICON_DIR = os.path.join(SCRIPT_DIR, "icons")
log_writer = None
cached_data_dir = None
def isWindows98():
if isWindows():
return sys.getwindowsversion()[0] == 4 #@UndefinedVariable (make PyDev happy)
else:
return False
def isMac():
return sys.platform == 'darwin'
def killProcess(pid):
try:
if isWindows():
PROCESS_TERMINATE = 1
handle = ctypes.windll.kernel32.OpenProcess(PROCESS_TERMINATE, #@UndefinedVariable
False,
pid)
print handle
ctypes.windll.kernel32.TerminateProcess(handle, -1) #@UndefinedVariable
ctypes.windll.kernel32.CloseHandle(handle) #@UndefinedVariable
else:
os.kill(pid, 15)
except:
print "(1) could not kill process %i" % pid
tb()
def isPortable():
#if the file portable.txt exists in the same directory
#then we know that we are running in portable mode.
dir = SCRIPT_DIR
try:
f = open(os.path.join(dir, "portable.txt"), "r")
f.close()
return True
except:
return False
def getHomeDir():
if isWindows():
CSIDL_PERSONAL = 0x0005
buf = ctypes.create_unicode_buffer(256)
ctypes.windll.shell32.SHGetSpecialFolderPathW(None, buf, CSIDL_PERSONAL, 0)
return buf.value
else:
return toUnicode(os.path.expanduser("~"))
def getDataDir():
global cached_data_dir
if isPortable():
return SCRIPT_DIR
if cached_data_dir:
return cached_data_dir
if isWindows():
CSIDL_APPDATA = 0x001a
buf = ctypes.create_unicode_buffer(256)
ctypes.windll.shell32.SHGetSpecialFolderPathW(None, buf, CSIDL_APPDATA, 0)
appdata = buf.value
# data_dir = os.path.join(appdata, "torchat")
data_dir = "Tor"
else:
home = toUnicode(os.path.expanduser("~"))
data_dir = os.path.join(home, ".torchat")
#test for optional profile name in command line
try:
data_dir += "_" + toUnicode(sys.argv[1])
except:
pass
#create it if necessary
if not os.path.exists(data_dir):
os.mkdir(data_dir)
#and create the folder 'Tor' with tor.exe and torrc.txt in it if necessary
data_dir_tor = os.path.join(data_dir, "Tor")
if isWindows():
tor_exe = "tor.exe"
else:
tor_exe = "tor.sh"
if not os.path.exists(data_dir_tor):
os.mkdir(data_dir_tor)
shutil.copy(os.path.join("Tor", tor_exe), data_dir_tor)
shutil.copy(os.path.join("Tor", "torrc.txt"), data_dir_tor)
#fix permissions
for filename in os.listdir(data_dir):
if os.path.isfile(filename):
# old log files still lying around in the data folder
os.chmod(os.path.join(data_dir, filename), 0600)
os.chmod(data_dir, 0700)
os.chmod(data_dir_tor, 0700)
os.chmod(os.path.join(data_dir_tor, tor_exe), 0700)
os.chmod(os.path.join(data_dir_tor, "torrc.txt"), 0600)
cached_data_dir = data_dir
return data_dir
def getProfileLongName():
try:
return "%s - %s" % (toUnicode(sys.argv[1]), get("client", "own_hostname"))
except:
return get("client", "own_hostname")
class OrderedRawConfigParser(ConfigParser.RawConfigParser):
def __init__(self, defaults = None):
ConfigParser.RawConfigParser.__init__(self, defaults = None)
def write(self, fp):
"""Write an .ini-format representation of the configuration state."""
if self._defaults:
fp.write("[%s]\n" % ConfigParser.DEFAULTSECT)
for key in sorted(self._defaults):
fp.write( "%s = %s\n" % (key, str(self._defaults[key]).replace('\n', '\n\t')))
fp.write("\n")
for section in sorted(self._sections):
fp.write("[%s]\n" % section)
for key in sorted(self._sections[section]):
if key != "__name__":
fp.write("%s = %s\n" %
(key, str(self._sections[section][key]).replace('\n', '\n\t')))
fp.write("\n")
def readConfig():
global file_name
global config
dir = getDataDir()
if not os.path.isdir(dir):
os.mkdir(dir)
file_name = dir + "/torchat.ini"
config = OrderedRawConfigParser()
#remove the BOM (notepad saves with BOM)
if os.path.exists(file_name):
f = file(file_name,'r+b')
try:
header = f.read(3)
if header == "\xef\xbb\xbf":
print "found UTF8 BOM in torchat.ini, removing it"
f.seek(0)
f.write("\x20\x0d\x0a")
except:
pass
f.close()
try:
config.read(file_name)
except ConfigParser.MissingSectionHeaderError:
print ""
print "*** torchat.ini must be saved as UTF-8 ***"
sys.exit()
#try to read all known options once. This will add
#all the missing options to the config file
for section, option in config_defaults:
get(section, option)
def writeConfig():
fp = open(file_name, "w")
os.chmod(file_name, 0600)
config.write(fp)
fp.close()
def get(section, option):
if not config.has_section(section):
config.add_section(section)
if not config.has_option(section, option):
value = config_defaults[section, option]
set(section, option, value)
value = config.get(section, option)
if type(value) == str:
try:
value = value.decode("UTF-8")
value = value.rstrip(" \"'").lstrip(" \"'")
except:
print "*** config file torchat.ini is not UTF-8 ***"
print "*** this will most likely break things ***"
elif type(value) == int:
value = str(value)
elif type(value) == float:
value = str(value)
return value # this should now be a unicode string
def getint(section, option):
value = get(section, option).lower()
if value in ["yes", "on", "true"]:
return 1
if value in ["no", "off", "false"]:
return 0
try:
return int(value)
except:
return 0
def set(section, option, value):
if not config.has_section(section):
config.add_section(section)
if type(value) == bool:
value = int(value)
if type(value) == unicode:
value = value.encode("UTF-8")
config.set(section, option, value)
writeConfig()
def tb(level=0):
print "(%i) ----- start traceback -----\n%s ----- end traceback -----\n" % (level, traceback.format_exc())
def tb1():
print "---- BEGIN DEBUG CALLSTACK"
traceback.print_stack()
print "---- END DEBUG CALLSTACK"
def getTranslators():
translators = []
for mname in translations.__dict__: #@UndefinedVariable
if mname[:5] == "lang_":
m = translations.__dict__[mname] #@UndefinedVariable
try:
lcode = m.LANGUAGE_CODE
lname = m.LANGUAGE_NAME
ltrans = m.TRANSLATOR_NAMES
for person in ltrans:
new_entry = "%s (%s [%s])" % (person, lname, lcode)
if not new_entry in translators:
translators.append(new_entry)
except:
pass
return ", ".join(translators)
def importLanguage():
"""switch the language by redefining all the variables that will be
available in the lang.* namespace, using the namespace __dict__
and making use of the wonderful dynamic nature of the Python language"""
# (The many undefinedvariable comments below are there to make
# the code analysis of Eclipse-PyDev happy, which would not be able
# to recognize that these are perfectly valid at *runtime*)
#if the strings in the language module have already been changed then
if translations.lang_en.LANGUAGE_CODE != "en":
#restore the original values from our backup to have
#all strings reset to english. This helps when switching
#between incomplete translations.
for key in standard_dict:
translations.lang_en.__dict__[key] = standard_dict[key] #@UndefinedVariable
lang_xx = "lang_" + get("gui", "language")
if lang_xx == "lang_en":
#lang_en is the standard translation. nothing to replace.
return
if not SCRIPT_DIR in sys.path:
#make sure that script dir is in sys.path (py2exe etc.)
print "(1) putting script directory into module search path"
sys.path.insert(0, SCRIPT_DIR)
dict_std = translations.lang_en.__dict__ #@UndefinedVariable
print "(1) trying to import language module %s" % lang_xx
try:
#first we try to find a language module in the script dir
dict_trans = __import__(lang_xx).__dict__
print "(1) found custom language module %s.py" % lang_xx
except:
#nothing found, so we try the built in translations
if lang_xx in translations.__dict__: #@UndefinedVariable
print "(1) found built in language module %s" % lang_xx
dict_trans = translations.__dict__[lang_xx].__dict__
else:
print "(0) translation module %s not found"
dict_trans = None
if dict_trans:
#dict_std is the __dict__ of the standard lang module
#dict_trans is the __dict__ of the translation
#find missing translations and report them in the log
for key in dict_std:
if not key in dict_trans:
print "(2) %s is missing translation for %s" % (lang_xx, key)
#replace the bindings in lang_en with those from lang_xx
for key in dict_trans:
if not key in dict_std:
print "(2) unused %s in %s" % (key, lang_xx)
else:
dict_std[key] = dict_trans[key]
class LogWriter:
def __init__(self):
old_dir = os.getcwd()
os.chdir(getDataDir())
self.encoding = LOCALE_ENC
#if log_file is a relative path then let it be relative to DataDir()
self.file_name = os.path.abspath(get("logging", "log_file"))
os.chdir(old_dir)
self.stdout = sys.stdout
sys.stdout = self
sys.stderr = self
self.level = getint("logging", "log_level")
if self.level and get("logging", "log_file"):
try:
self.logfile = open(self.file_name, 'w')
os.chmod(self.file_name, 0600)
print "(0) started logging to file '%s'" % self.file_name
print "(0) logging to file might leave sensitive information on disk"
except:
self.logfile = None
print "(0) could not open logfile '%s'" % self.file_name
print "(0) logging only to stdout"
else:
self.logfile = None
print "(1) logging to file is disabled"
print "(1) current log level is %i" % self.level
print "(1) locale encoding is %s" % LOCALE_ENC
print "(1) console encoding is %s" % CONSOLE_ENC
print "(1) LogWriter initialized"
def write(self, text):
text = text.rstrip()
if text == "":
return
# If something prints a string that is not unicode then we simply
# assume it is encoded in the encoding of the current locale.
if isinstance(text, str):
text = text.decode(self.encoding, 'replace')
text += "\n"
try:
x = text[0]
y = text[2]
if x == "(" and y == ")":
level = int(text[1])
else:
text = "(0) " + text
level = 0
except:
text = "(0) " + text
level = 0
if level <= self.level:
try:
frame = inspect.getframeinfo(inspect.currentframe(1))
module = os.path.basename(frame[0])
module = module.split(".")[0]
line = frame[1]
func = frame[2]
pos = "[%s,%i,%s]" % (module, line, func)
text = text[0:4] + pos + text[3:]
except:
pass
if CONSOLE_ENC:
self.stdout.write(text.encode(CONSOLE_ENC, 'replace'))
self.stdout.flush()
if self.logfile:
# logfile like all other TorChat related files always UTF-8
self.logfile.write(text.encode("UTF-8"))
self.logfile.flush()
def close(self):
self.stdout.close()
self.logfile.close()
def main():
global standard_dict
global log_writer
#many things are relative to the script directory, so set is as the cwd
os.chdir(SCRIPT_DIR)
readConfig()
log_writer = LogWriter()
print "(0) python version %s" % sys.version.replace("\n", "").replace("\r", "")
if isPortable():
print "(0) running in portable mode, all data is kept inside the bin folder."
if (len(sys.argv) > 1):
print "(0) ignoring requested profile '%s' because profiles do not exist in portable mode" % toUnicode(sys.argv[1])
print "(0) script directory is %s" % SCRIPT_DIR
print "(0) data directory is %s" % getDataDir()
#make a backup of all strings that are in the standard language file
#because we could need them when switching between incomplete languages
standard_dict = {}
for key in translations.lang_en.__dict__: #@UndefinedVariable
standard_dict[key] = translations.lang_en.__dict__[key] #@UndefinedVariable
#now switch to the configured translation
importLanguage()
main()
| StarcoderdataPython |
3315182 | # coding: utf-8
"""Attention module.
Attention as defined in Attention is All you Need.
https://arxiv.org/abs/1706.03762
"""
from typing import Union, Optional
import torch
import torch.nn as nn
from sgcn.masked.tensor import MaskedTensor
from . import affinity as aff
from . import normalization as norm
class Attention(nn.Module):
"""Attention.
TODO: if necessary make a batched version of this. Note batch of different
sizes can already be done using a block diagonal mask.
"""
def __init__(
self,
affinity: aff.Affinity,
normalization: norm.Normalization
) -> None:
"""Initialize the Attention.
Parameters
----------
affinity
Object of type Affinity to compute the affinity between keys
and attentions queries.
normalization
Object of type Normalization to apply a correction to the
attention weights.
"""
super().__init__()
self.affinity = affinity
self.normalization = normalization
def forward(
self,
K: torch.Tensor,
V: torch.Tensor,
Q: torch.Tensor,
m: Optional[Union[torch.Tensor, MaskedTensor]] = None
) -> torch.Tensor:
"""Compute attention.
Accoring to _Attention is All you Need_:
> An attention function can be described as mapping a query and a set
> of key-value pairs to an output, where the query, keys, values, and
> output are all vectors. The output is computed as a weighted sum of
> the values, where the weight assigned to each value is computed by a
> compatibility function of the query with the corresponding key.
https://arxiv.org/abs/1706.03762
Parameters
----------
K:
Attention keys. First dimension is key index, other are feature
values.
V:
Attention values. First dimension is the value index. There
should be as many attention values as their are keys.
Q:
Queries to make on attention keys.
m:
A matrix of dimension number of queries per number of keys.
Passed to the affinity function. Can be used to make a mask
or to pass additional queries data (e.g. edge information for
a graph).
Returns
-------
attention:
First dimension is align with queries indexes. Other dimensions are
similar to the value ones.
"""
QKt = self.affinity(Q, K, m)
QKt_n = self.normalization(QKt)
if isinstance(QKt_n, MaskedTensor):
return QKt_n.mm(V)
else:
return QKt_n @ V
class MultiHeadAttention(Attention):
"""Dot product attention with multiple heads.
Linearly project the keys, values, and queries and applies dot product
attention to the result. This process is repeated as many times as there
are heads, and the results are concatenated together.
"""
def __init__(
self,
in_key: int,
in_value: int,
in_query: int,
n_head: int,
head_qk: int,
head_v: int
) -> None:
"""Initialize multi head attention.
Parameters
----------
in_key:
Dimension of input keys.
in_value:
Dimension of input values.
in_query:
Dimension of input queries.
n_head:
Number of heads to use.
head_qk:
Dimension every projected head for queries and keys. They share the
Same dimension as the affinity is computed through dot product.
head_v:
Dimension every projected head for values.
"""
super().__init__(
affinity=aff.DotProduct(), normalization=norm.NoNorm()
)
self.lin_k = nn.Linear(in_key, head_qk * n_head)
self.lin_v = nn.Linear(in_value, head_v * n_head)
self.lin_q = nn.Linear(in_query, head_qk * n_head)
self._n_head = n_head
def _view_heads(self, X: torch.Tensor) -> torch.Tensor:
"""Reshape output of Linear by number of heads."""
if X.dim() == 2:
out_dim = X.size(1)
return X.view(-1, self._n_head, out_dim // self._n_head)
else:
raise RuntimeError(
f"Only dimension 2 supported, recieved: {X.dim()}"
)
def forward(
self,
K: torch.Tensor,
V: torch.Tensor,
Q: torch.Tensor,
m: Optional[Union[torch.Tensor, MaskedTensor]] = None
) -> torch.Tensor:
"""Compute attention.
Parameters
----------
K:
Attention keys. First dimension is key index, other are feature
values.
V:
Attention values. First dimension is the value index. There
should be as many attention values as their are keys.
Q:
Queries to make on attention keys.
m:
A matrix of dimension number of queries per number of keys.
Passed to the affinity function. Can be used to make a mask
or to pass additional queries data (e.g. edge information for
a graph).
Returns
-------
attention:
First dimension is align with queries indexes. Second dimension is
the number of heads times the output dimension of one value head
(`head_v`).
"""
K_proj = self._view_heads(self.lin_k(K))
V_proj = self._view_heads(self.lin_v(V))
Q_proj = self._view_heads(self.lin_q(Q))
V_out = []
for k in range(self._n_head):
V_out.append(super().forward(
K=K_proj[:, k], V=V_proj[:, k], Q=Q_proj[:, k], m=m
))
return torch.cat(V_out, dim=1)
| StarcoderdataPython |
75268 | <gh_stars>0
#!/usr/bin/python3
import cherrypy
import json
static_dir = '/templates/' # Needs to have trailing and leading slash '/'
class wellcome(object):
'''Base Index constructor and expose function'''
@cherrypy.expose
def index(self):
result = '''{
"firstName": "John",
"lastName": "Smith",
"isAlive": true,
"age": 27,
"address": {
"streetAddress": "21 2nd Street",
"city": "New York",
"state": "NY",
"postalCode": "10021-3100"
},
"phoneNumbers": [
{
"type": "home",
"number": "212 555-1234"
},
{
"type": "office",
"number": "646 555-4567"
},
{
"type": "mobile",
"number": "123 456-7890"
}
],
"children": [],
"spouse": null
}'''
return json.dumps(json.loads(result))
@cherrypy.expose
def other(self):
result = '<h1>Other</h1>'
return result
| StarcoderdataPython |
3234050 | <reponame>joewalk102/Adafruit_Learning_System_Guides
# Quote board matrix display
# uses AdafruitIO to serve up a quote text feed and color feed
# random quotes are displayed, updates periodically to look for new quotes
# avoids repeating the same quote twice in a row
import time
import random
import board
import terminalio
from adafruit_matrixportal.matrixportal import MatrixPortal
# --- Display setup ---
matrixportal = MatrixPortal(status_neopixel=board.NEOPIXEL, debug=True)
# Create a new label with the color and text selected
matrixportal.add_text(
text_font=terminalio.FONT,
text_position=(0, (matrixportal.graphics.display.height // 2) - 1),
scrolling=True,
)
# Static 'Connecting' Text
matrixportal.add_text(
text_font=terminalio.FONT,
text_position=(2, (matrixportal.graphics.display.height // 2) - 1),
)
QUOTES_FEED = "sign-quotes.signtext"
COLORS_FEED = "sign-quotes.signcolor"
SCROLL_DELAY = 0.02
UPDATE_DELAY = 600
quotes = []
colors = []
last_color = None
last_quote = None
def update_data():
print("Updating data from Adafruit IO")
matrixportal.set_text("Connecting", 1)
try:
quotes_data = matrixportal.get_io_data(QUOTES_FEED)
quotes.clear()
for json_data in quotes_data:
quotes.append(matrixportal.network.json_traverse(json_data, ["value"]))
print(quotes)
# pylint: disable=broad-except
except Exception as error:
print(error)
try:
color_data = matrixportal.get_io_data(COLORS_FEED)
colors.clear()
for json_data in color_data:
colors.append(matrixportal.network.json_traverse(json_data, ["value"]))
print(colors)
# pylint: disable=broad-except
except Exception as error:
print(error)
if not quotes or not colors:
raise "Please add at least one quote and color to your feeds"
matrixportal.set_text(" ", 1)
update_data()
last_update = time.monotonic()
matrixportal.set_text(" ", 1)
quote_index = None
color_index = None
while True:
# Choose a random quote from quotes
if len(quotes) > 1 and last_quote is not None:
while quote_index == last_quote:
quote_index = random.randrange(0, len(quotes))
else:
quote_index = random.randrange(0, len(quotes))
last_quote = quote_index
# Choose a random color from colors
if len(colors) > 1 and last_color is not None:
while color_index == last_color:
color_index = random.randrange(0, len(colors))
else:
color_index = random.randrange(0, len(colors))
last_color = color_index
# Set the quote text
matrixportal.set_text(quotes[quote_index])
# Set the text color
matrixportal.set_text_color(colors[color_index])
# Scroll it
matrixportal.scroll_text(SCROLL_DELAY)
if time.monotonic() > last_update + UPDATE_DELAY:
update_data()
last_update = time.monotonic()
| StarcoderdataPython |
77203 | <reponame>appointlet/span<gh_stars>1-10
from setuptools import setup
setup(
name="span",
version="0.0.1",
description="Helper for determining basic relationships between datetime ranges",
long_description="Helper for determining basic relationships between datetime ranges",
keywords="span, datetime",
author="<NAME> <<EMAIL>>",
author_email="<EMAIL>",
url="https://github.com/appointlet/span",
license="BSD",
packages=["span"],
zip_safe=False,
install_requires=[],
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| StarcoderdataPython |
20867 | <reponame>blackcow/pytorch-cifar-master
第一题:
import io
import sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf-8')
#str = input()
#print(str)
class Solution(object):
def findMedium(l):
length = len(l)
l.sort()
# 如果为奇数,输出中间的值
if length % 2 != 0:
print(l[length//2])
# 如果为偶数,中心两位均值
else:
print((l[length//2-1] + l[length//2])/2)
l = [1, 3, 5, 2, 8, 7]
Solution.findMedium(l)
第二题:
import io
import sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
# str = input()
# print(str)
class Solution:
def maxStr(str_in):
# 初始化
length = len(str_in)
count = [0 for i in range(26)]
char_a = ord('a')
# 统计出现次数
for i in range(length):
count[ord(str_in[i]) - char_a] += 1
last = str_in[0]
num = 1
res = 1
for m in range(1, length):
# 不同
if last != str_in[m]:
tmp_idx = m
while (tmp_idx + 1 < length) and (last == str_in[tmp_idx + 1]):
num += 1
tmp_idx += 1
if count[ord(last) - char_a] > num:
num += 1
num, res = 1, max(num, res)
last = str_in[m]
# 相同则累加
else:
num += 1
if (num > 1) and (count[ord(last) - char_a] > num):
num += 1
# 获取 max 长度后,对 str 遍历访问
max_length = max(num, res)
str2ls = list(str_in)
for i in count:
if i != max_length:
str2ls = str2ls[i:]
else:
str2ls = str2ls[:max_length]
out = ''.join(str2ls)
print(out)
return (out)
text = 'abbbbcccddddddddeee'
Solution.maxStr(text)
第三题:
import io
import sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf-8')
#str = input()
#print(str)
class Solution:
def findMaxArray(l):
# 初始化
tmp = l[0]
max_val = tmp
length = len(l)
for i in range(1, length):
# 计算当前序列和,记录当前最大值
if tmp + l[i] > l[i]:
max_val = max(max_val, tmp + l[i])
tmp = tmp + l[i]
# 否则到此为最长序列,并记录此时最大值
else:
max_val = max(max_val, tmp, tmp+l[i], l[i])
tmp = l[i]
print(max_val)
return max_val
l = [1, -2, 4, 5, -1, 1]
Solution.findMaxArray(l) | StarcoderdataPython |
3244276 | from collections import deque
from multiprocessing import Queue
from threading import Thread, Condition
import threading
import traceback
from typing import TypeVar, Generic, List, Optional
from tudelft.utilities.listener.Listener import Listener
from uri.uri import URI
from geniusweb.connection.ConnectionEnd import ConnectionEnd
from geniusweb.references.Reference import Reference
from geniusweb.simplerunner.BlockingQueue import BlockingQueue
S=TypeVar("S")
class Info (Generic[S]):
'''
Wrapper around data so that we can put Null end EOS in a
{@link ArrayBlockingQueue}
@param <S> the type of contained data.
'''
class Data (Info[S]):
def __init__(self, data:S):
self._data = data
def get(self)->S:
return self._data;
def __repr__(self):
return str(self._data)
class EOS (Info[S]):
'''
End of stream.
'''
def __repr__(self):
return "EOS"
# I use this single instance everywhere
THE_EOS:EOS = EOS()
IN = TypeVar('IN')
OUT = TypeVar('OUT')
class BasicConnection(ConnectionEnd[IN, OUT]):
'''
A basic connection that implements connection with direct calls
@param <IN> the type of the incoming data
@param <OUT> the type of outgoing data
'''
def __init__(self, reference:Reference , uri:URI ):
'''
@param reference Reference that was used to create this connection.
@param uri the URI of the remote endpoint that makes up the
connection. This is a URI that uniquely identifies the
remote object
'''
self._reference = reference
self._uri = uri
self._listeners:List[Listener[IN]] = []
self._synclock = threading.RLock()
self._error:Optional[Exception]=None
# to be initialized
self._handler:Optional[Listener[OUT]] = None
self._messages = BlockingQueue[Info](4)
def init(self, newhandler:Listener[OUT] ) :
'''
To be called to hook up the other side that will handle a send action
from us. Must be called first.
@param newhandler a Listener<OUT> that can handle send actions.
'''
if self._handler:
raise ValueError("already initialized")
self._handler = newhandler
this=self
class MyHandlerThread(Thread):
'''
thread that handles this._messages until EOS is hit.
It runs in scope of init and uses 'this'
'''
def run(self):
try:
while (True):
#print("INTO"+str(self))
mess = this._messages.take()
#print("OUT"+str(self))
if mess==THE_EOS:
break;
this._handler.notifyChange(mess.get())
except Exception as e:
this.setError(e)
this._handler = None
#print("BasicConnection closed");
handlerThread=MyHandlerThread()
handlerThread.start();
def setError(self, e:Exception):
'''
Error condition occurs. Record error and close connection
@param e
'''
with self._synclock:
if not self._error:
# maybe log instead?
traceback.print_exc()
self._error = e
self.close()
def send(self, data:OUT ) :
with self._synclock:
if not self._handler:
raise ValueError(
"BasicConnection has not been initialized or was closed")
# it seems there is no InterruptedException possible in python.
self._messages.put(Data(data))
def getReference(self) -> Reference :
return self._reference
def getRemoteURI(self)->URI:
return self._uri
def close(self):
with self._synclock:
print("flushing and terminating " + str(self))
if not self._handler or self._messages.contains(THE_EOS):
return
# it seems there is no InterruptedException possible in python.
self._messages.put(THE_EOS)
def __repr__(self):
return "BasicConnection[" + str(self._reference) + "]"
def getError(self)->Optional[Exception]:
return self._error;
def isOpen(self)->bool:
'''
@return true iff this connection is open. Returns false also when then
handler is in the close-down process
'''
return self._handler != None and not self._messages.contains(THE_EOS)
#****************** implements listenable ****************
# override because notifyListeners should throw exceptions.
def addListener(self, l:Listener[IN]):
self._listeners.append(l)
def removeListener(self, l:Listener[IN] ) :
self._listeners.remove(l)
def notifyListeners(self, data:IN ) :
for l in self._listeners:
l.notifyChange(data)
| StarcoderdataPython |
3243245 | <reponame>vishalbelsare/tensorflow-riemopt<filename>tensorflow_riemopt/manifolds/hyperboloid_test.py
import tensorflow as tf
from absl.testing import parameterized
from tensorflow.python.keras import combinations
from tensorflow_riemopt.manifolds.test_invariants import (
TestInvariants,
random_constant,
)
from tensorflow_riemopt.manifolds.hyperboloid import Hyperboloid
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
manifold=[Hyperboloid(), Hyperboloid(k=5.0)],
shape=[(5,), (2, 2)],
dtype=[tf.float64],
)
)
class HyperboloidTest(tf.test.TestCase, parameterized.TestCase):
test_random = TestInvariants.check_random
test_dist = TestInvariants.check_dist
test_inner = TestInvariants.check_inner
test_proj = TestInvariants.check_proj
test_exp_log_inverse = TestInvariants.check_exp_log_inverse
test_transp_retr = TestInvariants.check_transp_retr
test_ptransp_inverse = TestInvariants.check_ptransp_inverse
test_ptransp_inner = TestInvariants.check_ptransp_inner
def test_poincare(self, manifold, shape, dtype):
with self.cached_session(use_gpu=True):
x = manifold.projx(random_constant(shape=shape, dtype=dtype))
y = manifold.to_poincare(x, manifold.k)
x_ = manifold.from_poincare(y, manifold.k)
if not tf.executing_eagerly():
x_ = self.evaluate(x_)
self.assertAllCloseAccordingToType(x, x_)
| StarcoderdataPython |
4829349 | <gh_stars>1-10
# add this to force db migrate to detect models
from .model import User, UserConfirmation # noqa
| StarcoderdataPython |
1703555 | """
Copyright (c) 2021, Electric Power Research Institute
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of DER-VET nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
This file tests features of the params class. All tests should pass.
The tests in this file can be run with .
"""
import pytest
from pathlib import Path
from test.TestingLib import *
from storagevet.ErrorHandling import *
DIR = Path("./test/model_params")
"""
Tariff File checks
"""
def test_missing_tariff_row():
# following should fail
with pytest.raises(TariffError):
check_initialization(DIR/'002-missing_tariff.csv')
def test_single_tariff_row():
# following should pass
run_case(DIR/'051-tariff-single_billing_period_ok.csv')
def test_multi_tariff_row():
# following should pass
run_case(DIR/'052-tariff-multi_billing_periods_ok.csv')
def test_repeated_tariff_billing_period():
# following should fail
with pytest.raises(TariffError):
check_initialization(DIR/'053-tariff-repeated-billing-period-index.csv')
"""
Sensitivity checks
"""
def test_number_of_cases_in_sensitivity_analysis():
model_param_location = DIR/'009-bat_energy_sensitivity.csv'
results = run_case(model_param_location)
assert_file_exists(results)
assert len(results.instances.keys()) == 4
def test_number_of_cases_in_coupling():
model_param_location = DIR/'017-bat_timeseries_dt_sensitivity_couples.csv'
results = run_case(model_param_location)
assert_file_exists(results)
assert len(results.instances.keys()) == 2
def test_coupled_with_nonexisting_input_error():
# following should fail
with pytest.raises(ModelParameterError):
check_initialization(DIR/'020-coupled_dt_timseries_error.csv')
"""
DR parameter checks
"""
def test_dr_length_nan_allowed():
""" Test if DR allows length to be nan
"""
check_initialization(DIR/"022-DR_length_nan.csv")
def test_dr_program_end_hour_nan_allowed():
""" Test if DR allows program_end_hour to be nan
"""
check_initialization(DIR/"021-DR_program_end_nan.csv")
def test_dr_length_empty_allowed():
""" Test if DR allows length to be empty
"""
check_initialization(DIR/"047-DR_length_empty.csv")
def test_dr_program_end_hour_empty_allowed():
""" Test if DR allows program_end_hour to be empty
"""
check_initialization(DIR/"046-DR_program_end_empty.csv")
def test_dr_length_and_program_end_ok():
""" Test that DR allows length and program end
- as long as they are compatible
"""
check_initialization(DIR/"044-DR_program_end_with_length_ok.csv")
def test_dr_length_and_program_end_error():
""" Test that DR does not allow length and program end
- if they are not compatible
"""
with pytest.raises(ModelParameterError):
run_case(DIR/"045-DR_program_end_with_length_error.csv")
def test_dr_two_nans_not_allowed():
""" Test if DR allows both length and program end to be nan
"""
with pytest.raises(ModelParameterError):
check_initialization(DIR/"024-DR_nan_length_prgramd_end_hour.csv")
def test_dr_two_empties_not_allowed():
""" Test if DR allows both length and program end to be empty
"""
with pytest.raises(ModelParameterError):
check_initialization(DIR/"048-DR_empty_length_prgramd_end_hour.csv")
def test_dr_length_unsupported_value_types():
""" Test that DR does not allow length to be a non-nan string
"""
with pytest.raises(ModelParameterError):
run_case(DIR/"049-DR_unsupported_length.csv")
def test_dr_end_hour_unsupported_value_types():
""" Test that DR does not allow program_end_hour to be a non-nan string
"""
with pytest.raises(ModelParameterError):
run_case(DIR/"050-DR_unsupported_program_end_hour.csv")
"""
Test opt_year checks on referenced file data
"""
def test_opt_years_not_in_timeseries_data():
""" Test if opt_year not matching the data in timeseries file is caught
"""
with pytest.raises(TimeseriesDataError):
check_initialization(DIR / "025-opt_year_more_than_timeseries_data.csv")
def test_continuous_opt_years_in_timeseries_data():
""" Test if opt_year matching the data in timeseries file is cleared. Opt_years are continuous.
"""
assert_ran(DIR / "038-mutli_opt_years_continuous.csv")
def test_discontinuous_opt_years_in_timeseries_data():
""" Test if opt_year matching the data in timeseries file is cleared. Opt_years are not
continuous
"""
assert_ran(DIR / "037-mutli_opt_years_discontinuous.csv")
def test_opt_years_not_in_monthly_data():
""" Test if opt_year not matching the data in monthly file is caught
"""
with pytest.raises(MonthlyDataError):
check_initialization(DIR / "039-mutli_opt_years_not_in_monthly_data.csv")
def test_no_label_results_key():
""" Test if opt_year matching the data in timeseries file is cleared. Opt_years are not
continuous
"""
assert_ran(DIR / "042-no_results_label.csv")
| StarcoderdataPython |
85714 | from .common import get_tfvars_file, replace_tfvars, passwd_generator
def configure_sonarqube_container():
"""
Configure a containerized Sonar server.
"""
replace_tfvars("dockerizedSonarqube", "true", get_tfvars_file(), False)
replace_tfvars('sonar_username', "admin", get_tfvars_file())
replace_tfvars('sonar_passwd', passwd_generator(), get_tfvars_file())
replace_tfvars('codequality_type', 'sonarqube', get_tfvars_file())
replace_tfvars('codeq', 1, get_tfvars_file())
| StarcoderdataPython |
36372 | from ...reqs import publications
from .. import main
class Post(main._all["publication"]):
"""
Имитирует объект поста.
"""
__slots__ = (
"pages",
"best_comment",
"rubric_id",
"rubric_name"
)
def __init__(self, content):
"""
Создать класс Post.
content: :class:`dict`
Словарь, который сервер Campfire отправляет для создания объекта поста.
"""
super(Post, self).__init__(content)
self.pages = content["jsonDB"]["J_PAGES"] # list::dict
if content["bestComment"] != None:
self.best_comment = main._all["comment"](content["bestComment"])
else:
self.best_comment = None
self.rubric_id = content["rubricId"]
self.rubric_name = content["rubricName"]
@staticmethod
def get(post_id: int):
"""
Создать класс Post с помощью его идентификатора.
post_id: :class:`int`
Идентификатор поста.
Возвращает
:class:`Post`
Объект поста.
"""
return Post(publications.get_post(post_id))
@staticmethod
def get_from_feed(offset: int = 0, languages: list = [2], subscribes: bool = False, *, important: int = False):
"""
Получить посты из ленты.
offset: :class:`int`
Дата создания последнего поста в миллисекундах.
languages: :class:`list[int]`
Лист с языками, которые будут иметь посты из ленты.
subscribes: :class:`bool`
Если значение верно, то посты из ленты будут из категории "Подписки".
important: :class:`bool`
Только важные посты.
Возвращает
:class:`list[Post]`
Посты из ленты.
"""
posts = publications.get_posts_from_feed(offset, languages, subscribes, important)
return [ Post(post) for post in posts ]
# Self-actions
def change_fandom(self, fandom_id: int, fandom_lang: int = 2):
"""
Изменить фэндом поста.
fandom_id: :class:`int`
Идентификатор фэндома.
fandom_lang: :class:`int`
Язык фэндома.
"""
return publications.post_change_fandom(self.id, "", fandom_id, fandom_lang)
def to_drafts(self):
"""
Отправить пост в черновики.
"""
return publications.post_to_drafts(self.id)
def close(self):
"""
Закрыть пост.
"""
return publications.post_close(self.id)
def no_close(self):
"""
Открыть пост.
"""
return publications.post_close_no(self.id)
def set_multilingual(self):
"""
Сделать пост мультиязычным.
"""
return publications.post_set_multilingual(self.id)
def unset_multilingual(self):
"""
Сделать пост не мультиязычным.
"""
return publications.post_unset_multilingual(self.id)
def notify_followers(self):
"""
Уведомить подписчиков.
"""
return publications.post_notify_followers(self.id)
def pin_to_account(self):
"""
Закрепить пост в своём профиле.
"""
return publications.post_pin_to_account(self.id)
# Moderator
def moderator_close(self, comment: str):
"""
Закрыть пост.
comment: :class:`str`
Комментарий к модераторскому действию.
"""
return publications.moderator_post_close(self.id, comment)
def moderator_no_close(self, comment: str):
"""
Открыть пост.
comment: :class:`str`
Комментарий к модераторскому действию.
"""
return publications.moderator_post_close_no(self.id, comment)
def moderator_unset_multilingual(self, comment: str):
"""
Сделать пост не мультиязычным.
comment: :class:`str`
Комментарий к модераторскому действию.
"""
return publications.moderator_post_unset_multilingual(self.id, comment)
def moderator_set_important(self, comment: str, important: bool = True):
"""
Пометить/убрать метку важности с поста.
comment: :class:`str`
Комментарий к модераторскому действию.
important: :class:`bool`
Убрать или поставить метку важности.
"""
return publications.moderator_post_set_important(self.id, comment, important)
def moderator_to_drafts(self, comment: str):
"""
Отправить пост в черновики.
comment: :class:`str`
Комментарий к модераторскому действию.
"""
return publications.moderator_post_to_drafts(self.id, comment)
def moderator_pin_to_fandom(self, comment: str):
"""
Закрепить пост в фэндоме.
comment: :class:`str`
Комментарий к модераторскому действию.
"""
return publications.moderator_post_pin_to_fandom(self.id, self.fandom_id, self.fandom_lang, comment)
def admin_change_fandom(self, comment: str, fandom_id: int, fandom_lang: int = 2):
"""
Изменить фэндом поста.
comment: :class:`str`
Комментарий к модераторскому действию.
fandom_id: :class:`int`
Идентификатор фэндома.
fandom_lang: :class:`int`
Язык фэндома.
"""
return publications.post_change_fandom(self.id, comment, fandom_id, fandom_lang)
def admin_make_moderator(self, comment: str):
"""
Сделать автора поста модератором в фэндоме.
comment: :class:`str`
Комментарий к модераторскому действию.
"""
return publications.admin_post_make_moderator(self.id, comment)
main._all["post"] = Post | StarcoderdataPython |
81947 | <filename>Genetic Algorithms/DEAP/Sudoku.py
'''
May 27, 2019
<NAME>.
This files utilizes the DEAP framework to implement a genetic algorithm and create a Sudoku.
'''
import random
import math
import numpy as np
import matplotlib.pyplot as plt
from deap import base, creator, tools, algorithms
class GenerateSudoku():
def __init__(self, sudoku_size=9, pop_size=100, cxpb=0.5, mutpb=0.2, ngen=50):
block_size = math.sqrt(sudoku_size)
if block_size - int(block_size) > 0:
raise ValueError('Size must have an integer square root.')
self.block_size = int(block_size)
self.size = sudoku_size
self.len = self.size ** 2
self.pop_size = pop_size
self.cxpb = cxpb
self.mutpb = mutpb
self.ngen = ngen
creator.create('FitnessMax', base.Fitness, weights=(1.0,))
creator.create('Individual', list, fitness=creator.FitnessMax)
self.toolbox = base.Toolbox()
self.toolbox.register('attr', self.random_list)
self.toolbox.register('individual', tools.initRepeat, creator.Individual, self.toolbox.attr, self.len)
self.toolbox.register('population', tools.initRepeat, list, self.toolbox.individual)
self.population = self.toolbox.population(n=self.pop_size)
self.toolbox.register('mate', tools.cxOnePoint)
self.toolbox.register('mutate', tools.mutUniformInt, low=1, up=9, indpb=self.mutpb)
self.toolbox.register('select', tools.selTournament, tournsize=50)
self.toolbox.register('evaluate', self.evaluate)
self.stats = tools.Statistics(key=lambda ind: ind.fitness.values)
self.stats.register('avg', np.mean)
self.stats.register('std', np.std)
self.stats.register('min', np.min)
self.stats.register('max', np.max)
self.hall = tools.HallOfFame(10)
self.logbook = None
def random_list(self):
return random.randint(1, self.size)
def evaluate(self, ind):
fitness = 0
npind = np.array(ind).reshape(self.size, self.size)
# Evaluate rows and columns
for i in range(self.size):
fitness += len(set(npind[i, :])) - self.size + 1
fitness += len(set(npind[:, i])) - self.size + 1
# Evaluate block
for mblock in npind.reshape(self.block_size, self.block_size, self.block_size, self.block_size):
blocks = [[] for _ in range(self.block_size)]
for i, b in enumerate(mblock):
blocks[i].extend(b)
for block in mblock:
fitness += len(set(block.reshape(self.size))) - self.size + 1
return fitness,
def run(self):
_, self.logbook = algorithms.eaSimple(self.population, self.toolbox, cxpb=self.cxpb,
mutpb=self.mutpb, ngen=self.ngen, stats=self.stats,
halloffame=self.hall, verbose=True)
def main(sudoku_size=9, pop_size=100, cxpb=0.25, mutpb=0.1, ngen=50):
sudoku = GenerateSudoku(sudoku_size=sudoku_size, pop_size=pop_size, cxpb=cxpb, mutpb=mutpb, ngen=ngen)
sudoku.run()
print(np.array(sudoku.hall[0]).reshape(9,9))
avg, max = sudoku.logbook.select("avg", "max")
plt.plot(avg, label='Average')
plt.plot(max, label='Max')
plt.legend()
plt.xlabel('Generation')
plt.ylabel('Fitness')
plt.show()
if __name__ == '__main__':
main(pop_size=100, ngen=10000) | StarcoderdataPython |
4817782 | # proxy module
from __future__ import absolute_import
from apptools.help.help_plugin.help_plugin import *
| StarcoderdataPython |
3341495 | '''
Author: He,Yifan
Date: 2022-02-16 20:02:10
LastEditors: He,Yifan
LastEditTime: 2022-02-20 22:46:22
'''
from functools import partial
import os
import time
import numpy as np
import yaml
import sys
from pgsyn.gp.estimators import PushEstimator
from pgsyn.gp.genome import GeneSpawner
from pgsyn.knowledge.base import KnowledgeArchive
from pgsyn.push.config import PushConfig
from pgsyn.push.instruction_set import InstructionSet
from pgsyn.yaml_utils import register_yaml_constructors
from utils import erc_generator, load_psb
from utils import randchar, randfloat, randint, randbool, randstr
from utils import randinput_replace_space_with_newline
def get_psb(problem_filename):
dat = yaml.unsafe_load(open(problem_filename))
problem = dat.get("PROBLEM")
problem_name = problem.get("name")
path_to_root = problem.get("path_to_root")
n_train_edge = problem.get("train").get("edge", 0)
n_train_random = problem.get("train").get("random", 100)
n_test_edge = problem.get("test").get("edge", 0)
n_test_random = problem.get("test").get("random", 1000)
io_types = problem.get("io_types")
X_train, y_train = load_psb(problem_name, path_to_root, n_train_edge, n_train_random, io_types)
X_test, y_test = load_psb(problem_name, path_to_root, n_test_edge, n_test_random, io_types)
return X_train, y_train, X_test, y_test
def get_erc_generators(problem):
methods = {
"randint": randint,
"randfloat": randfloat,
"randchar": randchar,
"randbool": randbool,
"randinput_replace_space_with_newline": randinput_replace_space_with_newline,
}
erc_generators = []
for key, value in problem.items():
if key[:3] == "erc":
possible_values = value.get("range", None)
method_str = value.get("method")
method = methods.get(method_str)
erc_generators.append(partial(erc_generator, method, possible_values))
return erc_generators
def get_spawner(problem):
n_inputs = problem.get("n_inputs")
stacks = problem.get("stacks", ["exec", "int", "bool", "float", "char", "str", "stdout"])
spawner = GeneSpawner(
n_inputs=n_inputs,
instruction_set=InstructionSet().register_core_by_stack(set(stacks)),
literals=problem.get("literals", []),
erc_generators=get_erc_generators(problem),
)
return spawner
def get_knowledge_archive(problem, ka, name):
kwargs = ka.get(name, {"mode": "empty"})
knowledge_archive = KnowledgeArchive(spawner=get_spawner(problem), **kwargs)
return knowledge_archive
def get_estimator(problem_filename, pushgp_filename):
dat = yaml.unsafe_load(open(problem_filename))
problem = dat.get("PROBLEM")
ka = dat.get("KNOWLEDGE_ARCHIVE")
dat = yaml.unsafe_load(open(pushgp_filename))
pushgp = dat.get("PUSHGP")
search = pushgp.get("search", "UMAD")
last_str_from_stdout = problem.get("last_str_from_stdout", False)
push_config = PushConfig(step_limit=problem.get("step_limit", 500),
runtime_limit=problem.get("runtime_limit", 10),
growth_cap=problem.get("growth_cap", 500),
collection_size_cap=problem.get("collection_size_cap", 1000),
numeric_magnitude_limit=problem.get("numeric_magnitude_limit", 1e12))
interpreter = pushgp.get("interpreter", "default")
verbose = pushgp.get("verbose", 2)
spawner = get_spawner(problem)
error_threshold = problem.get("error_threshold", 0)
initial_genome_size = problem.get("initial_genome_size", [10, 50])
max_genome_size = problem.get("max_genome_size")
simplification_steps = problem.get("simplification_steps", 2000)
kwargs = pushgp.get(search)
kwargs.update({"knowledge_archive": get_knowledge_archive(problem, ka, kwargs.get("ka"))})
est = PushEstimator(
search=search,
last_str_from_stdout=last_str_from_stdout,
interpreter=interpreter,
push_config=push_config,
verbose=verbose,
spawner=spawner,
error_threshold = error_threshold,
initial_genome_size = initial_genome_size,
max_genome_size = max_genome_size,
simplification_steps = simplification_steps,
**kwargs
)
return est
if __name__ == "__main__":
register_yaml_constructors()
path_to_dir = os.getcwd()
_, problem_yml, algorithm_yml = sys.argv
if problem_yml[-4:] != ".yml":
problem_yml += ".yml"
if algorithm_yml[-4:] != ".yml":
algorithm_yml += ".yml"
est = get_estimator(path_to_dir+"/problem_cfg/"+problem_yml,
path_to_dir+"/algorithm_cfg/"+algorithm_yml)
X_train, y_train, X_test, y_test = get_psb(path_to_dir+"/problem_cfg/"+problem_yml)
start = time.time()
est.fit(X=X_train, y=y_train)
end = time.time()
np.save("solution.npy", est.solution.genome, allow_pickle=True)
print("========================================")
print("post-evolution stats")
print("========================================")
print("Runtime: ", time.strftime('%H:%M:%S', time.gmtime(end - start)))
print("Test Error: ", np.sum(est.score(X_test, y_test)))
| StarcoderdataPython |
4810504 | <reponame>neonbjb/DL-Art-School
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.arch_util import ConvGnLelu, default_init_weights, make_layer
from models.diffusion.nn import timestep_embedding
from trainer.networks import register_model
from utils.util import checkpoint
# Conditionally uses torch's checkpoint functionality if it is enabled in the opt file.
class ResidualDenseBlock(nn.Module):
"""Residual Dense Block.
Used in RRDB block in ESRGAN.
Args:
mid_channels (int): Channel number of intermediate features.
growth_channels (int): Channels for each growth.
"""
def __init__(self, mid_channels=64, growth_channels=32, embedding=False, init_weight=.1):
super(ResidualDenseBlock, self).__init__()
self.embedding = embedding
if embedding:
self.first_conv = ConvGnLelu(mid_channels, mid_channels, activation=True, norm=False, bias=True)
self.emb_layers = nn.Sequential(
nn.SiLU(),
nn.Linear(
mid_channels*4,
mid_channels,
),
)
for i in range(5):
out_channels = mid_channels if i == 4 else growth_channels
self.add_module(
f'conv{i + 1}',
nn.Conv2d(mid_channels + i * growth_channels, out_channels, 3,
1, 1))
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
for i in range(5):
default_init_weights(getattr(self, f'conv{i + 1}'), init_weight)
default_init_weights(self.conv5, 0)
self.normalize = nn.GroupNorm(num_groups=8, num_channels=mid_channels)
def forward(self, x, emb):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
if self.embedding:
x0 = self.first_conv(x)
emb_out = self.emb_layers(emb).type(x0.dtype)
while len(emb_out.shape) < len(x0.shape):
emb_out = emb_out[..., None]
x0 = x0 + emb_out
else:
x0 = x
x1 = self.lrelu(self.conv1(x0))
x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))
x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))
x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))
x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
return self.normalize(x5 * .2 + x)
class RRDB(nn.Module):
"""Residual in Residual Dense Block.
Used in RRDB-Net in ESRGAN.
Args:
mid_channels (int): Channel number of intermediate features.
growth_channels (int): Channels for each growth.
"""
def __init__(self, mid_channels, growth_channels=32):
super(RRDB, self).__init__()
self.rdb1 = ResidualDenseBlock(mid_channels, growth_channels, embedding=True)
self.rdb2 = ResidualDenseBlock(mid_channels, growth_channels)
self.rdb3 = ResidualDenseBlock(mid_channels, growth_channels)
self.normalize = nn.GroupNorm(num_groups=8, num_channels=mid_channels)
self.residual_mult = nn.Parameter(torch.FloatTensor([.1]))
def forward(self, x, emb):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
out = self.rdb1(x, emb)
out = self.rdb2(out, emb)
out = self.rdb3(out, emb)
return self.normalize(out * self.residual_mult + x)
class RRDBNet(nn.Module):
"""Networks consisting of Residual in Residual Dense Block, which is used
in ESRGAN.
ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks.
Currently, it supports x4 upsampling scale factor.
Args:
in_channels (int): Channel number of inputs.
out_channels (int): Channel number of outputs.
mid_channels (int): Channel number of intermediate features.
Default: 64
num_blocks (int): Block number in the trunk network. Defaults: 23
growth_channels (int): Channels for each growth. Default: 32.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels=64,
num_blocks=23,
growth_channels=32,
body_block=RRDB,
):
super(RRDBNet, self).__init__()
self.num_blocks = num_blocks
self.in_channels = in_channels
self.mid_channels = mid_channels
# The diffusion RRDB starts with a full resolution image and downsamples into a .25 working space
self.input_block = ConvGnLelu(in_channels, mid_channels, kernel_size=7, stride=1, activation=True, norm=False, bias=True)
self.down1 = ConvGnLelu(mid_channels, mid_channels, kernel_size=3, stride=2, activation=True, norm=False, bias=True)
self.down2 = ConvGnLelu(mid_channels, mid_channels, kernel_size=3, stride=2, activation=True, norm=False, bias=True)
# Guided diffusion uses a time embedding.
time_embed_dim = mid_channels * 4
self.time_embed = nn.Sequential(
nn.Linear(mid_channels, time_embed_dim),
nn.SiLU(),
nn.Linear(time_embed_dim, time_embed_dim),
)
self.body = make_layer(
body_block,
num_blocks,
mid_channels=mid_channels,
growth_channels=growth_channels)
self.conv_body = nn.Conv2d(self.mid_channels, self.mid_channels, 3, 1, 1)
# upsample
self.conv_up1 = nn.Conv2d(self.mid_channels, self.mid_channels, 3, 1, 1)
self.conv_up2 = nn.Conv2d(self.mid_channels*2, self.mid_channels, 3, 1, 1)
self.conv_up3 = None
self.conv_hr = nn.Conv2d(self.mid_channels*2, self.mid_channels, 3, 1, 1)
self.conv_last = nn.Conv2d(self.mid_channels, out_channels, 3, 1, 1)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
self.normalize = nn.GroupNorm(num_groups=8, num_channels=self.mid_channels)
for m in [
self.conv_body, self.conv_up1,
self.conv_up2, self.conv_hr
]:
if m is not None:
default_init_weights(m, 1.0)
default_init_weights(self.conv_last, 0)
def forward(self, x, timesteps, low_res, correction_factors=None):
emb = self.time_embed(timestep_embedding(timesteps, self.mid_channels))
_, _, new_height, new_width = x.shape
upsampled = F.interpolate(low_res, (new_height, new_width), mode="bilinear")
x = torch.cat([x, upsampled], dim=1)
if correction_factors is not None:
correction_factors = correction_factors.view(x.shape[0], -1, 1, 1).repeat(1, 1, new_height, new_width)
else:
correction_factors = torch.zeros((b, self.num_corruptions, new_height, new_width), dtype=torch.float, device=x.device)
x = torch.cat([x, correction_factors], dim=1)
d1 = self.input_block(x)
d2 = self.down1(d1)
feat = self.down2(d2)
for bl in self.body:
feat = checkpoint(bl, feat, emb)
feat = feat[:, :self.mid_channels]
feat = self.conv_body(feat)
# upsample
out = torch.cat([self.lrelu(
self.normalize(self.conv_up1(F.interpolate(feat, scale_factor=2, mode='nearest')))),
d2], dim=1)
out = torch.cat([self.lrelu(
self.normalize(self.conv_up2(F.interpolate(out, scale_factor=2, mode='nearest')))),
d1], dim=1)
out = self.conv_last(self.normalize(self.lrelu(self.conv_hr(out))))
return out
@register_model
def register_rrdb_diffusion(opt_net, opt):
return RRDBNet(**opt_net['args'])
if __name__ == '__main__':
model = RRDBNet(6,6)
x = torch.randn(1,3,128,128)
l = torch.randn(1,3,32,32)
t = torch.LongTensor([555])
y = model(x, t, l)
print(y.shape, y.mean(), y.std(), y.min(), y.max())
| StarcoderdataPython |
1705391 | <reponame>gadomski/pystac-client
import json
import logging
from copy import deepcopy
from typing import Callable, Iterator, Optional
from urllib.parse import urlparse
from urllib.request import Request, urlopen
import requests
from pystac import STAC_IO
from .exceptions import APIError
logger = logging.getLogger(__name__)
def read_text_method(uri):
"""Overwrites the default method for reading text from a URL or file to allow :class:`urllib.request.Request`
instances as input. This method also raises any :exc:`urllib.error.HTTPError` exceptions rather than catching
them to allow us to handle different response status codes as needed."""
if isinstance(uri, Request):
logger.debug(f"Requesting {uri.get_full_url()} with headers {uri.headers}")
with urlopen(uri) as response:
resp = response.read()
return resp.decode("utf-8")
elif bool(urlparse(uri).scheme):
logger.debug(f"Requesting {uri}")
resp = requests.get(uri)
return resp.content.decode("utf-8")
else:
return STAC_IO.default_read_text_method(uri)
def make_request(session, request, additional_parameters={}):
_request = deepcopy(request)
if _request.method == 'POST':
_request.json.update(additional_parameters)
logger.debug(
f"Requesting {_request.url}, Payload: {json.dumps(_request.json)}, Headers: {session.headers}"
)
else:
_request.params.update(additional_parameters)
logger.debug(
f"Requesting {_request.url}, Payload: {json.dumps(_request.params)}, Headers: {session.headers}"
)
prepped = session.prepare_request(_request)
resp = session.send(prepped)
if resp.status_code != 200:
raise APIError(resp.text)
return resp.json()
def simple_stac_resolver(link: dict, original_request: requests.Request) -> requests.Request:
"""Handles implementations of the extended STAC ``link`` object as described in the `STAC API - Item Search: Paging
<https://github.com/radiantearth/stac-api-spec/tree/master/item-search#paging>`_ documentation. All properties
described in that spec are considered optional, with fallback values based on the original request.
This resolver should handle most STAC API - Item Search and OGC API - Features paging implementations.
If the ``"next"`` link contains ``"body"``, ``"headers"``, or ``"method"`` attributes then these values will be
used in the respective parts of the next request. If the ``"next"`` link has a ``"merge"`` attribute that is a
``True`` boolean value, then these values will be merged with the corresponding values from the original request.
Otherwise, the ``"merge"`` attribute defaults to ``False`` and these values will overwrite the corresponding
values from the original request. If any of these attributes are *not present* then the values from the
original request will be used.
Parameters
----------
link : dict or pystac.Link
The ``"next"`` link that was returned in the previous response
original_request : requests.Request
The previous requests, which returned the ``"next"`` link used for the ``link`` argument.
Returns
-------
next_request : requests.Request
Examples
--------
>>> import json
>>> import requests
>>> from pystac_client.stac_io import simple_stac_resolver
>>> original_request = urllib.request.Request(
... method='POST',
... url='https://stac-api/search',
... data=json.dumps({'collections': ['my-collection']}).encode('utf-8'),
... headers={'x-custom-header': 'hi-there'}
... )
A link with only an ``"href"`` property.
>>> next_link = {
... 'href': 'https://stac-api/search?next=sometoken',
... 'rel': 'next'
... }
>>> next_request = simple_stac_resolver(next_link, original_request)
>>> next_request.method
'POST'
>>> assert next_request.data == original_request.data
>>> next_request.url
'https://stac-api/search?next=sometoken'
Request properties merged from ``"next"`` link. Note that the ``"collections"`` property is not automatically
transferred from the ``POST`` body to the query string params, it is explicitly given in the links's ``"href"``.
>>> next_link = {
... 'href': 'https://stac-api/search?next=sometoken&collections=my-collection',
... 'merge': True,
... 'headers': {'x-other-header': 'well-hello'},
... 'method': 'GET',
... 'rel': 'next'
... }
>>> next_request = simple_stac_resolver(next_link, original_request)
>>> next_request.method
'GET'
>>> next_request.url
'https://stac-api/search?next=sometoken&collections=my-collection'
>>> next_request.headers
{'x-custom-header': 'hi-there', 'x-other-header': 'well-hello'}
"""
# If the link object includes a "merge" property, use that (we assume it is provided as a boolean value and not
# a string). If not, default to False.
merge = bool(link.get('merge', False))
# If the link object includes a "method" property, use that. If not fall back to 'GET'.
method = link.get('method', 'GET')
# If the link object includes a "headers" property, use that and respect the "merge" property.
link_headers = link.get('headers')
headers = original_request.headers
if link_headers is not None:
headers = {**headers, **link_headers} if merge else link_headers
# If "POST" use the body object that and respect the "merge" property.
if method == 'POST':
parameters = original_request.json
link_body = link.get('body', {})
parameters = {**parameters, **link_body} if merge else link_body
request = requests.Request(method=method,
url=original_request.url,
headers=headers,
json=parameters)
else:
request = requests.Request(method=method,
url=original_request.url,
headers=headers,
params=parameters)
return request
def get_pages(
session: requests.Session,
request: requests.Request,
next_resolver: Optional[Callable] = simple_stac_resolver,
) -> Iterator[dict]:
"""
Parameters
----------
session : requests.Session
requests library Session object
request : requests.Request
The initial request to start paging. Subsequent requests will be determined by the ``next_resolver``.
next_resolver : Callable
An callable that will be used to construct the request for the next page of results based on the ``"next"``
link from the previous page.
"""
while True:
# Yield all items
page = make_request(session, request)
yield page
# Get the next link and make the next request
next_link = next((link for link in page.get('links', []) if link['rel'] == 'next'), None)
if next_link is None:
break
request = next_resolver(next_link, request)
| StarcoderdataPython |
1726681 | <filename>journal/models.py
import datetime
from django.db import models
from django.utils import timezone
class JournalEntry(models.Model):
heading_text = models.CharField(max_length=200)
entry_text = models.TextField()
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.heading_text
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
| StarcoderdataPython |
4803134 | <filename>supervisor/discovery/services/zwave_js.py<gh_stars>100-1000
"""Discovery service for Zwave JS."""
import voluptuous as vol
from supervisor.validate import network_port
from ..const import ATTR_HOST, ATTR_PORT
# pylint: disable=no-value-for-parameter
SCHEMA = vol.Schema(
{
vol.Required(ATTR_HOST): str,
vol.Required(ATTR_PORT): network_port,
}
)
| StarcoderdataPython |
3204924 | <reponame>hboueix/PyCheckers<filename>app/modules/inputbox.py
import pygame
COLOR_INACTIVE = pygame.Color('lightskyblue3')
COLOR_ACTIVE = pygame.Color('dodgerblue2')
class InputBox(pygame.sprite.Sprite):
def __init__(self, x, y, w, h, text=''):
super().__init__()
self.rect = pygame.Rect(x, y, w, h)
self.color = COLOR_INACTIVE
self.text = text
self.font = pygame.font.Font(None, 25)
self.txt_surface = self.font.render(text, True, self.color)
self.active = False
def handle_event(self, event):
input_text = ''
if event.type == pygame.MOUSEBUTTONDOWN:
# If the user clicked on the input_box rect.
if self.rect.collidepoint(event.pos):
# Toggle the active variable.
self.active = not self.active
else:
self.active = False
# Change the current color of the input box.
self.color = COLOR_ACTIVE if self.active else COLOR_INACTIVE
if event.type == pygame.KEYDOWN:
if self.active:
if event.key == pygame.K_RETURN:
input_text = self.text
self.text = ''
elif event.key == pygame.K_BACKSPACE:
if len(self.text) > 0:
self.text = self.text[:-1]
elif self.txt_surface.get_width()+20 < self.rect.w:
self.text += event.unicode
# Re-render the text.
self.txt_surface = self.font.render(
self.text, True, self.color
)
return input_text
def draw(self, window):
# Blit the text.
window.blit(self.txt_surface, (self.rect.x+5, self.rect.y+5))
# Blit the rect.
pygame.draw.rect(window, self.color, self.rect, 2)
| StarcoderdataPython |
157684 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# gdbm_create.py
#
# Jul/13/2010
import sys
import string
import anydbm
#
sys.path.append ('/var/www/uchida/data_base/common/python_common')
#
from dbm_manipulate import dbm_disp_proc,dbm_update_proc
# -------------------------------------------------------------
print ("*** 開始 ***")
#
#
db_name = "/var/tmp/gdbm/cities.pag";
dd = anydbm.open (db_name,"c")
#
#
dd["2151"]='{"name": "岐阜","population": 70230,"date_mod": "2003-7-24"}';
dd["2152"]='{"name": "大垣","population": 52070,"date_mod": "2003-8-12"}';
dd["2153"]='{"name": "多治見","population": 420155,"date_mod": "2003-9-14"}';
dd["2154"]='{"name": "各務原","population": 44630,"date_mod": "2003-8-2"}';
dd["2155"]='{"name": "土岐","population": 21204,"date_mod": "2003-5-15"}';
dd["2156"]='{"name": "高山","population": 92130,"date_mod": "2003-10-12"}';
dd["2157"]='{"name": "美濃加茂","population": 82034,"date_mod": "2003-11-21"}';
dd["2158"]='{"name": "恵那","population": 92304,"date_mod": "2003-10-11"}';
dd["2159"]='{"name": "関","population": 926340,"date_mod": "2003-7-25"}';
dd["2160"]='{"name": "中津川","population": 920534,"date_mod": "2003-12-4"}';
#
dbm_disp_proc (dd)
#
dd.close ()
#
print ("*** 終了 ***")
# -------------------------------------------------------------
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.