id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
52083 | def obfuscate(utils_path, project_path):
import subprocess
print('Running obfuscator ...')
subprocess.run(f'{utils_path}/confuser/Confuser.CLI.exe {project_path} -n')
| StarcoderdataPython |
3336769 | #
# pv_ifc.py
#
# Created on: Aug 16, 2008
# Author: dcoates
PV_ACTION_INIT=1
PV_ACTION_ADD_LAYER=2
PV_ACTION_SET_LAYER_PARAMS=3
PV_ACTION_ADD_CONNECTION=4
PV_ACTION_RUN=5
PV_ACTION_SET_PARAMS=6
PV_ACTION_SET_INPUT_FILENAME=7
PV_ACTION_INJECT=8
PV_ACTION_MEASURE=9
PV_ACTION_FINALIZE=10
PV_ACTION_SETUP=11
PV_HANDLER_LIF=1
PV_HANDLER_READFILE=2
PV_HANDLER_GAUSS2D=3
PV_HANDLER_THRU=4
PV_HANDLER_COCIRC1D=5
PV_HANDLER_COCIRC_K=6
PV_HANDLER_CENTER_SURR=7
PV_HANDLER_PROB_FIRE=8
PV_HANDLER_LIF2=9
PV_HANDLER_GAUSS2DX=10
PV_HANDLER_COCIRC_K2=11
PV_BUFFER_V=0
PV_BUFFER_PHI=1
PV_BUFFER_G_I=2
PV_BUFFER_G_E=3
PV_BUFFER_F=4
PV_CONNECTION_FLAG=100
| StarcoderdataPython |
3329207 | from dna_functions import dseq_from_both_overhangs, both_overhangs_from_dseq, \
format_sequence_genbank, read_dsrecord_from_json
import unittest
from pydna.dseqrecord import Dseqrecord
from pydna.dseq import Dseq
from Bio.SeqFeature import FeatureLocation
from pydna.seqfeature import SeqFeature
from typing import OrderedDict
class DseqFromBothOverhangsTest(unittest.TestCase):
def test_conversion(self):
# Here we try both with longer watson and longer crick
for watson, crick in [('AAAAAA', 'TTTT'), ('TTTT', 'AAAAAA')]:
for ovhg in [-2, 0, 3]:
with self.subTest(ovhg=ovhg):
dseq_original = Dseq(watson, crick=crick, ovhg=ovhg)
crick_overhang_3p, watson_overhang_3p = both_overhangs_from_dseq(
dseq_original)
dseq_2 = dseq_from_both_overhangs(
str(dseq_original), crick_overhang_3p, watson_overhang_3p)
# We check that the elements of Dseq are transferred properly
self.assertEqual(dseq_original.watson,
dseq_2.watson)
self.assertEqual(dseq_original.crick, dseq_2.crick)
self.assertEqual(dseq_original.ovhg, dseq_2.ovhg)
# Now we check for the features
dseq_original = Dseqrecord(dseq_original)
dseq_2 = Dseqrecord(dseq_2)
# We add some features:
# TODO document somewhere the fact that the strand must
# be specified. The file readers assume +1 strand for
# all features when reading from GenBank files
for a, start, end in [('a', 0, 2), ('b', 1, 2), ('c', 4, 7)]:
dseq_original.features.append(
SeqFeature(
location=FeatureLocation(start, end),
type="misc_feature",
qualifiers=OrderedDict({"label": [a]}),
strand=1)
)
dseq_2.features = dseq_original.features
# We check that the features are transferred normally
for i in range(len(dseq_2.features)):
feature_original: SeqFeature = dseq_original.features[i]
feature_2: SeqFeature = dseq_2.features[i]
self.assertEqual(
feature_original.extract(dseq_original),
feature_2.extract(dseq_2))
# Finally we test with pydantic models
seq_entity = format_sequence_genbank(dseq_original)
dseq_3 = read_dsrecord_from_json(seq_entity)
self.assertEqual(dseq_original.seq.watson,
dseq_3.seq.watson)
self.assertEqual(dseq_original.seq.crick, dseq_3.seq.crick)
self.assertEqual(dseq_original.seq.ovhg, dseq_3.seq.ovhg)
# We check that the features are transferred normally
for i in range(len(dseq_3.features)):
feature_original: SeqFeature = dseq_original.features[i]
feature_3: SeqFeature = dseq_3.features[i]
self.assertEqual(
feature_original.extract(dseq_original),
feature_3.extract(dseq_3))
| StarcoderdataPython |
4829429 | # Generated by Django 2.0.2 on 2018-03-09 18:14
import app.teams.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
('teams', '0002_team_members'),
]
operations = [
migrations.AlterModelOptions(
name='team',
options={'permissions': (('view_team', 'View team'),)},
),
migrations.AddField(
model_name='team',
name='group',
field=models.OneToOneField(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='team', to='auth.Group'),
preserve_default=False,
),
migrations.AlterField(
model_name='team',
name='name',
field=models.CharField(max_length=80, unique=True, validators=[app.teams.models.validate_team_name]),
),
]
| StarcoderdataPython |
1699319 | '''Main setup and run loop for xcffibaer.
'''
import asyncio
import os
import sys
import xcffib
import xcffib.render
import xcffib.randr
from xcffib.randr import NotifyMask, ScreenChangeNotifyEvent
import i3ipc
from . import Bar, Store, Window, XSetup
from .atoms import initAtoms
from .timers import addDelay
from .utils import QuitApplication, inspect, printError, printInfo
DEFAULT_SCREEN_INDEX = 0
HANDLE_SCREEN_CHANGE_DELAY = 0.5
awaitingScreenChange = False
def handleWindowEvent(event):
printInfo(f'Incoming {event.__class__.__name__}:')
inspect(event)
windowID = event.window if hasattr(event, 'window') else event.event
Window.windowsByID[windowID].handleEvent(event)
def charListToString(charList):
return ''.join(chr(c) for c in charList)
def setupX(theme, screenIndex=DEFAULT_SCREEN_INDEX):
conn = xcffib.connect(display=os.getenv('DISPLAY'))
conn.randr = conn(xcffib.randr.key)
conn.render = conn(xcffib.render.key)
screens = conn.get_screen_pointers()
root = conn.get_setup().roots.list[screenIndex]
initAtoms(conn)
depthInfo = [
d
for d in root.allowed_depths.list
if d.depth == 32
][0]
printInfo('depthInfo:')
inspect(depthInfo)
visualType = [
v
for v in depthInfo.visuals.list
if v._class == xcffib.xproto.VisualClass.TrueColor # pylint: disable=protected-access
][0]
printInfo('visualType:')
inspect(visualType)
xSetup = XSetup(conn, screens[screenIndex], depthInfo, visualType, root, theme)
return conn, xSetup
def wrapI3Command(i3conn):
wrappedI3Command = i3conn.command
def i3Command(command):
print(f'Sending i3 command: {repr(command)}')
sys.stdout.flush()
wrappedI3Command(command)
i3conn.command = i3Command
return i3conn
def run(theme, setupBar, setupStore, onInit=None, screenIndex=DEFAULT_SCREEN_INDEX):
conn, xSetup = setupX(theme, screenIndex)
if onInit:
onInit()
i3conn = wrapI3Command(i3ipc.Connection())
bars = []
def paintBars():
for bar in bars:
bar.paint()
store = Store(paintBars)
setupStore(store, i3conn)
def setupBars():
dummy = Window(xSetup)
screenResources = conn.randr.GetScreenResources(dummy.id).reply()
printInfo('GetScreenResources:')
inspect(screenResources)
crtcInfoCookies = [(crtc, conn.randr.GetCrtcInfo(crtc, 0)) for crtc in screenResources.crtcs]
for crtc, crtcInfoCookie in crtcInfoCookies:
crtcInfo = crtcInfoCookie.reply()
if crtcInfo.num_outputs:
printInfo(f'Creating bar for crtc {crtc}.')
outputs = [
charListToString(conn.randr.GetOutputInfo(output, 0).reply().name)
for output in crtcInfo.outputs
]
printInfo('outputs:', outputs)
bar = Bar(xSetup, height=21, screenExtents=crtcInfo, name=outputs[0])
setupBar(bar, store, outputs, i3conn)
bars.append(bar)
else:
print(f'(crtc {crtc} disabled)')
dummy.close()
setupBars()
conn.randr.SelectInput(xSetup.root.root, NotifyMask.ScreenChange)
loop = asyncio.get_event_loop()
def handleScreenChange():
printInfo(f'Incoming screen change event; closing and re-creating bars.')
while bars:
try:
bars.pop().close()
except Exception as error: # pylint: disable=broad-except
printError(f'Unexpected {error.__class__.__name__} received while closing bar:', error)
inspect(error)
setupBars()
globals()['awaitingScreenChange'] = False
def shutdown():
printInfo('Shutting down.')
loop.stop()
def xcbPoll():
while True:
try:
#event = conn.wait_for_event()
event = conn.poll_for_event()
except xcffib.ProtocolException as error:
printError(f'Protocol error {error.__class__.__name__} received!')
shutdown()
break
except Exception as error: # pylint: disable=broad-except
printError(f'Unexpected {error.__class__.__name__} received:', error)
inspect(error)
#shutdown()
break
if conn.has_error():
printError('Connection error received!')
shutdown()
break
if not event:
break
try:
if isinstance(event, ScreenChangeNotifyEvent):
if not awaitingScreenChange:
printInfo(f'Incoming {event.__class__.__name__}; scheduling bar re-creation.')
globals()['awaitingScreenChange'] = True
addDelay(HANDLE_SCREEN_CHANGE_DELAY, handleScreenChange)
else:
printInfo(f'Ignoring {event.__class__.__name__}; bar re-creation already scheduled.')
else:
handleWindowEvent(event)
except QuitApplication:
shutdown()
break
try:
i3conn.event_socket_setup()
loop.add_reader(conn.get_file_descriptor(), xcbPoll)
loop.add_reader(i3conn.sub_socket, i3conn.event_socket_poll)
loop.run_forever()
finally:
i3conn.event_socket_teardown()
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
for window in Window.windowsByID.values():
if hasattr(window, 'cleanUp') and callable(window.cleanUp):
window.cleanUp()
conn.disconnect()
| StarcoderdataPython |
3283242 | #65 - maior ou menor,varios numeros e mostrar a media,
#perguntar ao usuarios se quer ou nao continuar
maior = menor = media = cont = 0
laco = 'S'
while laco == 'S':
n1 = int(input('Digite os valores: '))
cont +=1
if cont == 1: #se contador for igual a 1, ou seja a primeira vez q ele conta o primeiro numero
maior = n1
menor = n1
else: # se o segundo numero for maior q o primeiro
if n1 > maior:
maior = n1
if n1 < menor:
menor = n1
media = n1 + media
laco = ' '
while laco != 'S' and laco != 'N':
laco = str(input('Deseja continuar [S|N]: ')).upper().strip()
if laco != 'S':
print('Programa finalizado.')
print('-'*100)
print('A média foi de: {:.2f}'.format(media/cont))
print('Maior número foi: {}'.format(maior))
print('Menor número foi: {}'.format(menor))
| StarcoderdataPython |
106942 | import pytest
pytestmark = [
pytest.mark.requires_salt_states("echo.text"),
]
def test_echoed(salt_call_cli):
echo_str = "Echoed!"
ret = salt_call_cli.run("state.single", "echo.echoed", echo_str)
assert ret.exitcode == 0
assert ret.json
assert ret.json == echo_str
def test_reversed(salt_call_cli):
echo_str = "Echoed!"
expected = echo_str[::-1]
ret = salt_call_cli.run("state.single", "echo.reversed", echo_str)
assert ret.exitcode == 0
assert ret.json
assert ret.json == expected
| StarcoderdataPython |
1712745 | <reponame>Mouedrhiri/get-running-processes-with-ram-usage
import psutil
import wmi
from tqdm import tqdm
from time import sleep
def progress(range):
for i in tqdm(range, desc ="Progress : "):
sleep(.1)
f = wmi.WMI()
l = []
#To Scan How Much Processing Tasks
for process in f.Win32_Process():
l.append(process.ProcessId)
# gives an object with many fields
ram1 = psutil.virtual_memory()
print("Scanning Ram : \n")
progress(ram1)
print()
# you can convert that object to a dictionary
dict(psutil.virtual_memory()._asdict())
# you can have the percentage of used RAM
ram = psutil.virtual_memory().percent
#You Can Have The Available Ram
avram = psutil.virtual_memory().available * 100 / psutil.virtual_memory().total
print(f"Hold On We've Found {len(l)} Running Process !! \n")
print(f"Those Processes Only Left For You "+"{:.1f}".format(avram)+"%"+" available Of Your RAM !!\n")
print(f"The Used ram is : {ram}%\n")
print("We Will Load Your Running Processes Please Wait \n")
progress(l)
print()
print("ID: Process name:\n")
for process in f.Win32_Process():
print(f"{process.ProcessId:<10} {process.Name}")
print(f"\nThe Number Of Actively Running Tasks {len(l)} \n")
input() | StarcoderdataPython |
3241316 | # -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2017, <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
import os
import pytest
from fluids.numerics import assert_close, assert_close1d
from thermo.joback import *
from thermo.joback import J_BIGGS_JOBACK_SMARTS_id_dict
from chemicals.identifiers import pubchem_db
folder = os.path.join(os.path.dirname(__file__), 'Data')
try:
import rdkit
from rdkit import Chem
except:
rdkit = None
@pytest.mark.rdkit
@pytest.mark.skipif(rdkit is None, reason="requires rdkit")
def test_Joback_acetone():
from rdkit import Chem
from rdkit.Chem import Descriptors
from rdkit.Chem import AllChem
from rdkit.Chem import rdMolDescriptors
for i in [Chem.MolFromSmiles('CC(=O)C'), 'CC(=O)C']:
ex = Joback(i) # Acetone example
assert_close(ex.Tb(ex.counts), 322.11)
assert_close(ex.Tm(ex.counts), 173.5)
assert_close(ex.Tc(ex.counts), 500.5590049525365)
assert_close(ex.Tc(ex.counts, 322.11), 500.5590049525365)
assert_close(ex.Pc(ex.counts, ex.atom_count), 4802499.604994407)
assert_close(ex.Vc(ex.counts), 0.0002095)
assert_close(ex.Hf(ex.counts), -217830)
assert_close(ex.Gf(ex.counts), -154540)
assert_close(ex.Hfus(ex.counts), 5125)
assert_close(ex.Hvap(ex.counts), 29018)
assert_close1d(ex.Cpig_coeffs(ex.counts),[7.52, 0.26084, -0.0001207, 1.546e-08] )
assert_close(ex.Cpig(300.0), 75.32642000000001)
assert_close1d(ex.mul_coeffs(ex.counts), [839.11, -14.99])
assert_close(ex.mul(300.0), 0.0002940378347162687)
with pytest.raises(ValueError):
# Raise an error if there are no groups matched
obj = Joback('[Fe]')
obj.estimate()
# Test we can handle missing groups
nitrobenzene = 'C1=CC=C(C=C1)[N+](=O)[O-]'
obj = Joback(nitrobenzene)
res = obj.estimate()
assert res['mul_coeffs'] is None
@pytest.mark.fuzz
@pytest.mark.slow
@pytest.mark.rdkit
@pytest.mark.skipif(rdkit is None, reason="requires rdkit")
def test_Joback_database():
pubchem_db.autoload_main_db()
f = open(os.path.join(folder, 'joback_log.txt'), 'w')
from rdkit import Chem
catalog = unifac_smarts = {i: Chem.MolFromSmarts(j) for i, j in J_BIGGS_JOBACK_SMARTS_id_dict.items()}
lines = []
for key in sorted(pubchem_db.CAS_index):
chem_info = pubchem_db.CAS_index[key]
try:
mol = Chem.MolFromSmiles(chem_info.smiles)
parsed = smarts_fragment(rdkitmol=mol, catalog=catalog, deduplicate=False)
line = '%s\t%s\t%s\t%s\n' %(parsed[2], chem_info.CASs, chem_info.smiles, parsed[0])
except Exception as e:
line = '%s\t%s\t%s\n' %(chem_info.CASs, chem_info.smiles, e)
lines.append(line)
[f.write(line) for line in sorted(lines)]
f.close()
# Maybe use this again if more work is done on Joback
del test_Joback_database | StarcoderdataPython |
1743027 | <filename>st2common/st2common/models/db/sensor.py
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mongoengine as me
from st2common.models.db import MongoDBAccess
from st2common.models.db import stormbase
from st2common.constants.types import ResourceType
__all__ = [
'SensorTypeDB'
]
class SensorTypeDB(stormbase.StormBaseDB, stormbase.ContentPackResourceMixin,
stormbase.UIDFieldMixin):
"""
Description of a specific type of a sensor (think of it as a sensor
template).
Attribute:
pack - Name of the content pack this sensor belongs to.
artifact_uri - URI to the artifact file.
entry_point - Full path to the sensor entry point (e.g. module.foo.ClassSensor).
trigger_type - A list of references to the TriggerTypeDB objects exposed by this sensor.
poll_interval - Poll interval for this sensor.
"""
RESOURCE_TYPE = ResourceType.SENSOR_TYPE
UID_FIELDS = ['pack', 'name']
name = me.StringField(required=True)
pack = me.StringField(required=True, unique_with='name')
artifact_uri = me.StringField()
entry_point = me.StringField()
trigger_types = me.ListField(field=me.StringField())
poll_interval = me.IntField()
enabled = me.BooleanField(default=True,
help_text=u'Flag indicating whether the sensor is enabled.')
meta = {
'indexes': stormbase.UIDFieldMixin.get_indexes()
}
def __init__(self, *args, **values):
super(SensorTypeDB, self).__init__(*args, **values)
self.ref = self.get_reference().ref
self.uid = self.get_uid()
sensor_type_access = MongoDBAccess(SensorTypeDB)
MODELS = [SensorTypeDB]
| StarcoderdataPython |
1636519 | #!/usr/bin/env python3
# -*- coding:utf8 -*-
import PyQt5,sys,traceback
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from tkinter import Tk
from tkinter.messagebox import showinfo
import RC_tray
import os,sys
from googletrans import Translator
def showbug(message):
Tk().withdraw()
info = showinfo('提示信息',str(message))
version = "1.0.2"
log = "1.0.2 2018年4月16日 仅使用translate.google.cn服务器"
class Form(QDialog):
def __init__(self,parent=None):
super(Form,self).__init__(parent)
self.createTrayIcon()
self.trayIcon.show()
self.processing = False
msgIcon = QSystemTrayIcon.MessageIcon(QSystemTrayIcon.Information)
self.trayIcon.showMessage("Coursera 字幕中文翻译助手","请复制字幕文件(txt格式)到剪贴板并直接点击图标即可翻译。",msgIcon)
# self.trayIcon.messageClicked.connect(self.traymesageClicked)
self.setWindowTitle("Coursera字幕中文翻译助手")
def createTrayIcon(self):
self.trayMenu = QMenu()
trans = self.trayMenu.addAction("处理剪贴板中的文件")
trans.triggered.connect(self.transProcess)
self.trayMenu.addSeparator()
about = self.trayMenu.addAction("使用说明")
about.triggered.connect(self.aboutBox)
self.trayMenu.addSeparator()
quit = self.trayMenu.addAction("退出")
quit.triggered.connect(self.fullClose)
self.trayIcon = QSystemTrayIcon(self)
self.trayIcon.setIcon(QIcon(":/Main/Media/normal.png"))
self.trayIcon.setContextMenu(self.trayMenu)
self.trayIcon.activated.connect(self.iconActived)
self.trayIcon.setToolTip("Coursera 字幕中文翻译助手")
def fullClose(self):
self.trayIcon.hide()
app.setQuitOnLastWindowClosed(True)
self.close()
def showMenu(self):
self.trayMenu.exec_()
def iconActived(self,reason):
mouse = QCursor()
if self.processing:
return 0
if reason == QSystemTrayIcon.DoubleClick:
self.trayMenu.exec_(mouse.pos())
if reason == QSystemTrayIcon.Trigger:
self.transProcess()
def transProcess(self):
clipboard = QGuiApplication.clipboard()
if str(clipboard.text()).startswith("file:///"):
if not str(clipboard.text()).endswith(".txt"):
self.showInfo("剪贴板文件并非TXT格式文本。")
return 0
else:
self.uri = clipboard.text()[8:]
print(self.uri)
if self.processing == False:
self.processing = True
self.changeIcon()
self.transGo(self.uri)
self.processing = False
self.changeIcon()
else:
self.showInfo("没有文件需要处理,请复制文本文件到剪贴板。")
def changeIcon(self):
if self.processing:
self.trayIcon.setIcon(QIcon(":/Main/Media/working.png"))
self.trayIcon.setToolTip("Coursera 字幕中文翻译助手 - 正在翻译中")
else:
self.trayIcon.setIcon(QIcon(":/Main/Media/normal.png"))
self.trayIcon.setToolTip("Coursera 字幕中文翻译助手")
def transIt_free(self,f):
translator = Translator(service_urls=[
'translate.google.cn'
])
translations = translator.translate(f,dest="zh-CN",src="en")
r = []
for t in translations:
r.append(t.text)
return r
def transGo(self,uri):
try:
print("START...")
file = open(uri,"r",encoding="utf-8",errors="ignore")
f = file.read()
f= f.replace("\n"," ")
f = f.replace("?","?\n").replace(".",".\n")
f = f.split("\n")
data = self.transIt_free(f)
of = ""
for x in data:
of += x
of = of.replace("?","?\n").replace("。","。\n")
open(uri,"a+",encoding="utf-8",errors="ignore").write("\n\n\n"+of)
en_list = f
cn_list = []
for x in of.split("\n"):
cn_list.append(x)
out = ""
i = 0
for en in en_list:
link = str(i) + en + "\n" + cn_list[i] + "\n\n"
i = i + 1
out += link
open(uri,"a+",encoding="utf-8",errors="ignore").write("\n\n\n"+out)
print("DONE!")
try:
os.startfile(self.uri)
except Exception as e2:
self.showInfo("翻译完毕,但是无法打开目标文件。"+str(e2))
except Exception as e:
self.showInfo("翻译出错,可能是文件格式错误或者连接故障。"+str(e))
print(traceback.format_exc())
def showInfo(self,info):
msgIcon = QSystemTrayIcon.MessageIcon(QSystemTrayIcon.Information)
self.trayIcon.showMessage("Coursera 字幕中文翻译助手",info,msgIcon)
def aboutBox(self):
msg = QMessageBox()
msg.information(self,"关于本程序","Coursera 字幕中文翻译助手 %s\nWriten by Corkine Ma\n\n本程序基于PyQt5和googletrans包。\n\n将txt格式字幕文件复制到剪贴板,点击托盘图标会调用Google翻译API,将其余语言翻译为中文,文件会被自动写入,使用默认打开方式自动打开。当程序正在运行时,变为红色,此时正在连接服务器并进行翻译,当完毕后图标变为蓝色。\n"%version)
if __name__ == "__main__":
app = QApplication(sys.argv)
app.setQuitOnLastWindowClosed(False)
app.setWindowIcon(QIcon(":/Main/Media/normal.png"))
if QSystemTrayIcon.isSystemTrayAvailable() != True:
showbug("系统不支持托盘图标")
form = Form()
app.exec_()
| StarcoderdataPython |
4820324 | <gh_stars>1-10
import setuptools
with open("asent/about.py") as f:
v = f.read()
for l in v.split("\n"):
if l.startswith("__version__"):
__version__ = l.split('"')[-2]
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(version=__version__)
| StarcoderdataPython |
1709366 | import tensorflow as tf
import numpy as np
import tensorflow_datasets as tfds
(ds_train, ds_test), ds_info = tfds.load('HorsesOrHumans', split=['train', 'test'], with_info=True, as_supervised=True, shuffle_files=True)
def normalize_img(image, label):
"""Normalizes images: `uint8` -> `float32`."""
return tf.cast(image, tf.float32) / 255., label
ds_train = ds_train.map(
normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(128)
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
ds_test = ds_test.map(
normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.cache()
ds_test = ds_test.batch(128)
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)
!rm -rf logs
logdir = "logs/scalars/"
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(300,300,3)),
tf.keras.layers.MaxPool2D(2,2),
tf.keras.layers.Conv2D(32, (3,3), activation='relu'),
tf.keras.layers.MaxPool2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPool2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPool2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPool2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr=1e-3),
metrics=['accuracy'])
model.fit(
ds_train,
epochs = 15,
callbacks=[tensorboard_callback]
)
| StarcoderdataPython |
134379 | import sys
import os
import asyncio
import dscframework
import json
import keras
from keras.models import load_model
from data import build_dataset
import numpy as np
version = 1
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
model = load_model(("export/mdl_v%d.h5")%(version))
async def on_facedetect(head, data):
print("got sub data", flush=True)
print(json.dumps(head), flush=True)
print(data, flush=True)
# Batch predict
#X, y = build_dataset()
#batch = np.asarray([X[0]])
#result = model.predict(batch, batch_size=1, verbose=1)
#print(result)
#print(y[0])
async def on_connect(cli):
await cli.subscribe("facebuffer", on_facedetect)
async def main():
cli = dscframework.Client("ws://localhost:8080")
await cli.start(on_connect)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| StarcoderdataPython |
3265259 | <filename>data/data.py
import numpy as np
from functools import partial
from paddle.io import DataLoader
from paddlenlp.data import Vocab, Pad, Stack
from paddlenlp.datasets import load_dataset
from .sampler import DistributedDynamicBatchSampler
def read(src_path, tgt_path, is_test=False, has_target=False):
if is_test and not has_target:
with open(src_path, 'r', encoding='utf-8') as src_f:
for sample_id, src_line in enumerate(src_f.readlines()):
src_line = src_line.strip()
if not src_line:
continue
yield {'id': sample_id, 'src': src_line, 'tgt': ''}
else:
with open(src_path, 'r', encoding='utf-8') as src_f, open(tgt_path, 'r', encoding='utf-8') as tgt_f:
for sample_id, (src_line, tgt_line) in enumerate(zip(src_f.readlines(), tgt_f.readlines())):
src_line, tgt_line = src_line.strip(), tgt_line.strip()
if not src_line or not tgt_line:
continue
yield {'id': sample_id, 'src': src_line, 'tgt': tgt_line}
def merge_pref_lang(pref, lang):
return f"{pref.strip()}.{lang.strip()}"
def prep_dataset(conf, mode='train'):
assert mode in ['train', 'dev', 'test']
data_args = conf.data
src_lang = data_args.src_lang
tgt_lang = data_args.tgt_lang
if mode == 'train':
src_path = merge_pref_lang(data_args.train_pref, src_lang)
tgt_path = merge_pref_lang(data_args.train_pref, tgt_lang)
elif mode == 'dev':
src_path = merge_pref_lang(data_args.valid_pref, src_lang)
tgt_path = merge_pref_lang(data_args.valid_pref, tgt_lang)
else:
src_path = merge_pref_lang(data_args.test_pref, src_lang)
tgt_path = merge_pref_lang(data_args.test_pref, tgt_lang)
dataset = load_dataset(read, src_path=src_path, tgt_path=tgt_path, is_test=(mode == 'test'),
has_target=conf.data.has_target,lazy=False)
return dataset
def prep_vocab(conf):
data_args = conf.data
src_vocab_fpath = merge_pref_lang(data_args.vocab_pref, data_args.src_lang)
tgt_vocab_fpath = merge_pref_lang(data_args.vocab_pref, data_args.tgt_lang)
src_vocab = Vocab.load_vocabulary(
src_vocab_fpath,
bos_token=data_args.special_token[0],
pad_token=data_args.special_token[1],
eos_token=data_args.special_token[2],
unk_token=data_args.special_token[3]
)
tgt_vocab = Vocab.load_vocabulary(
tgt_vocab_fpath,
bos_token=data_args.special_token[0],
pad_token=data_args.special_token[1],
eos_token=data_args.special_token[2],
unk_token=data_args.special_token[3]
)
# 是否把vocab词数pad到factor倍数,可以加速训练
conf.defrost()
if data_args.pad_vocab:
padding_vocab = (
lambda x: (x + data_args.pad_factor - 1) // data_args.pad_factor * data_args.pad_factor
)
conf.model.src_vocab_size = padding_vocab(len(src_vocab))
conf.model.tgt_vocab_size = padding_vocab(len(tgt_vocab))
else:
conf.model.src_vocab_size = len(src_vocab)
conf.model.tgt_vocab_size = len(tgt_vocab)
conf.freeze()
return src_vocab, tgt_vocab
def convert_samples(sample, src_vocab, tgt_vocab):
sample_id = sample['id']
source = sample['src'].split()
target = sample['tgt'].split()
source = src_vocab.to_indices(source)
target = tgt_vocab.to_indices(target)
return source, target, sample_id
# 过滤掉长度 ≤min_len或者≥max_len 的数据
def min_max_filer(data, max_len, min_len=0):
data_min_len = min(len(data[0]), len(data[1])) + 1
data_max_len = max(len(data[0]), len(data[1])) + 1
return (data_min_len >= min_len) and (data_max_len <= max_len)
def batchify(insts, bos_idx, eos_idx, pad_idx, is_test=False, has_target=False):
"""
Put all padded data needed by training into a list.
# insts是含batch个元素的list,每个batch含src和tgt,和id元素[([],[]),([],[]),...]
"""
# ★sort by descending source length
neg_src_len = list(map(lambda inst: -len(inst[0]), insts))
sorted_src_idx = np.argsort(neg_src_len, kind='mergsort') # 不能用[::-1],假设在长度全相等时,会从1-n变成n-1;且默认quicksort不稳定
insts = np.array(insts)[sorted_src_idx].tolist()
# pad data to full sentence length
left_pad = Pad(pad_idx, pad_right=False)
right_pad = Pad(pad_idx, pad_right=True, dtype='int64')
src_word = left_pad([inst[0] + [eos_idx] for inst in insts]) # src+</s>
samples_id = Stack()([inst[2] for inst in insts])
if not is_test:
prev_word = right_pad([[bos_idx] + inst[1] for inst in insts]) # <s>+tgt
tgt_word = np.expand_dims(right_pad([inst[1] + [eos_idx] for inst in insts]),
axis=2) # lbl+</s> # pad时候加了bos或eos,导致size突变,*bsz倍
data_inputs = [samples_id, src_word, prev_word, tgt_word]
else:
if not has_target:
data_inputs = [samples_id, src_word]
else:
tgt_word = right_pad([inst[0] for inst in insts])
data_inputs = [samples_id, src_word, tgt_word]
return data_inputs
def prep_loader(conf, dataset, mode='train', multi_process=False):
assert mode in ['train', 'dev', 'test']
data_args, model_args, strategy_args, train_args, gen_args = conf.data, conf.model, conf.learning_strategy, conf.train, conf.generate
# load vocab
src_vocab, tgt_vocab = prep_vocab(conf)
# dataset
trans_fn = partial(convert_samples, src_vocab=src_vocab, tgt_vocab=tgt_vocab)
dataset = dataset.map(trans_fn, lazy=False)
if mode != 'test':
filt_fn = partial(min_max_filer, max_len=model_args.max_length)
dataset = dataset.filter(filt_fn)
batchify_fn = partial(batchify, bos_idx=model_args.eos_idx, eos_idx=model_args.eos_idx,
pad_idx=model_args.pad_idx, is_test=mode == 'test', has_target=data_args.has_target)
# samplerv2
max_tokens = train_args.max_tokens if mode != 'test' else gen_args.max_tokens
max_sentences = train_args.max_sentences if mode != 'test' else gen_args.max_sentences
batch_sampler = DistributedDynamicBatchSampler(dataset,
mode=mode,
has_target=data_args.has_target,
max_tokens=max_tokens,
max_sentences=eval(str(max_sentences)),
bsz_factor=train_args.batch_size_factor,
seed=conf.seed,
num_replicas=None if multi_process == True else 1,
rank=None if multi_process == True else 0,
drop_last=False)
if conf.train.resume and mode == 'train': # resume应该bool,路径由init来决定
batch_sampler.set_epoch(conf.train.last_epoch + 1)
print(f"----- Resume Training: set sampler's epoch to {conf.train.last_epoch + 1} as a random seed")
# dataloader
dataloader = DataLoader(
dataset=dataset,
batch_sampler=batch_sampler,
collate_fn=batchify_fn,
num_workers=train_args.num_workers,
)
return dataloader
| StarcoderdataPython |
1761213 | <reponame>wckdouglas/tgirt-dna-seq<gh_stars>1-10
#!/bin/env python
import argparse
import subprocess
import os
import sys
def getOpt():
parser = argparse.ArgumentParser(description='Pipeline for trimming, mapping paired end plasma DNA')
parser.add_argument('-1','--fq1',required=True, help='read1 fastqfile [string]')
parser.add_argument('-o','--outdir',required=True, help='output directory')
parser.add_argument('-x','--index',required=True, help='bwa index or hisat2 index')
parser.add_argument('-a','--adaptor', default='adaptor.fa', help='Fasta file containing adaptor sequneces (default: adaptor.fa)')
parser.add_argument('-t','--threads',default=1, type=int, help='Threads to be used (default=: 1)')
args = parser.parse_args()
return args
# running in shell
def runProcess(command, samplename):
sys.stderr.write('[%s] %s\n' %(samplename, command))
result = subprocess.call('time ' + command, shell=True)
return 0
#Trimming
def trimming(fq1, threads, trim_path, samplename, adaptor):
sys.stderr.write('Running trim process with %s\n' %samplename)
## ILLUMINACLIP:<fastaWithAdaptersEtc>:<seed mismatches>:<palindrome clip
## threshold>:<simple clip threshold>:<minAdapterLength>:<keepBothReads>
options='ILLUMINACLIP:%s:2:10:10:1:true ' %(adaptor) +\
'LEADING:15 TRAILING:15 SLIDINGWINDOW:4:15 MINLEN:25'
command = 'time trimmomatic ' +\
'PE -threads %i ' %(threads)+\
'-phred33 '+\
'-basein %s ' %(fq1) + \
'-baseout %s/%s.fq.gz ' %(trim_path, samplename) + \
options
runProcess(command,samplename)
return 0
#MAPPING
def mapping(samplename, trim_path, index, threads, bam_path):
sys.stderr.write('Running mapping with %s\n' %samplename)
file1 = trim_path + '/' + samplename + '_1P.fq.gz'
file2 = file1.replace('1P','2P')
bam_file = '%s/%s.bam' %(bam_path, samplename)
command = 'bwa mem -t%i ' %(threads)+\
'%s %s %s ' %(index, file1, file2 ) +\
'| samtools sort -@ %i -O bam -T %s ' %(threads, bam_file.replace('.bam','')) +\
'> %s' %bam_file
runProcess(command, samplename)
return bam_file
def makedir(directory):
if not os.path.isdir(directory):
os.mkdir(directory)
sys.stderr.write('Make directory %s\n' %directory)
return 0
def main(args):
fq1 = args.fq1
outdir = args.outdir
index = args.index
adaptor = args.adaptor
threads = args.threads
# set up variables
suffix = '.'.join(fq1.split('.')[-2:]) if fq1.split('.')[-1] == 'gz' else fq1.split('.')[-1]
samplename = os.path.basename(fq1).replace('_R1_001','').split('.')[0]
#makedir
trim_path= outdir + '/trimmedFiles'
bam_path= outdir + '/bamFiles'
map(makedir,[trim_path, bam_path])
#trim
trim = trimming(fq1, threads, trim_path, samplename, adaptor)
#map
bam_file = mapping(samplename, trim_path, index, threads, bam_path)
sys.stderr.write('Finished mapping %s\n' %samplename)
return 0
if __name__ == '__main__':
args = getOpt()
main(args)
| StarcoderdataPython |
8026 | # last edited: 10/08/2017, 10:25
import os, sys, glob, subprocess
from datetime import datetime
from PyQt4 import QtGui, QtCore
import math
#from XChemUtils import mtztools
import XChemDB
import XChemRefine
import XChemUtils
import XChemLog
import XChemToolTips
import csv
try:
import gemmi
import pandas
except ImportError:
pass
#def get_names_of_current_clusters(xce_logfile,panddas_directory):
# Logfile=XChemLog.updateLog(xce_logfile)
# Logfile.insert('parsing {0!s}/cluster_analysis'.format(panddas_directory))
# os.chdir('{0!s}/cluster_analysis'.format(panddas_directory))
# cluster_dict={}
# for out_dir in sorted(glob.glob('*')):
# if os.path.isdir(out_dir):
# cluster_dict[out_dir]=[]
# found_first_pdb=False
# for folder in glob.glob(os.path.join(out_dir,'pdbs','*')):
# xtal=folder[folder.rfind('/')+1:]
# if not found_first_pdb:
# if os.path.isfile(os.path.join(panddas_directory,'cluster_analysis',out_dir,'pdbs',xtal,xtal+'.pdb') ):
# cluster_dict[out_dir].append(os.path.join(panddas_directory,'cluster_analysis',out_dir,'pdbs',xtal,xtal+'.pdb'))
# found_first_pdb=True
# cluster_dict[out_dir].append(xtal)
# return cluster_dict
class export_and_refine_ligand_bound_models(QtCore.QThread):
def __init__(self,PanDDA_directory,datasource,project_directory,xce_logfile,which_models):
QtCore.QThread.__init__(self)
self.PanDDA_directory = PanDDA_directory
self.datasource = datasource
self.db = XChemDB.data_source(self.datasource)
self.Logfile = XChemLog.updateLog(xce_logfile)
self.xce_logfile = xce_logfile
self.project_directory = project_directory
self.which_models=which_models
self.external_software=XChemUtils.external_software(xce_logfile).check()
# self.initial_model_directory=initial_model_directory
# self.db.create_missing_columns()
# self.db_list=self.db.get_empty_db_dict()
# self.external_software=XChemUtils.external_software(xce_logfile).check()
# self.xce_logfile=xce_logfile
# self.already_exported_models=[]
def run(self):
self.Logfile.warning(XChemToolTips.pandda_export_ligand_bound_models_only_disclaimer())
# find all folders with *-pandda-model.pdb
modelsDict = self.find_modeled_structures_and_timestamps()
# if only NEW models shall be exported, check timestamps
if not self.which_models.startswith('all'):
modelsDict = self.find_new_models(modelsDict)
# find pandda_inspect_events.csv and read in as pandas dataframe
inspect_csv = None
if os.path.isfile(os.path.join(self.PanDDA_directory,'analyses','pandda_inspect_events.csv')):
inspect_csv = pandas.read_csv(os.path.join(self.PanDDA_directory,'analyses','pandda_inspect_events.csv'))
progress = 0
try:
progress_step = float(1/len(modelsDict))
except TypeError:
self.Logfile.error('DID NOT FIND ANY MODELS TO EXPORT')
return None
for xtal in sorted(modelsDict):
os.chdir(os.path.join(self.PanDDA_directory,'processed_datasets',xtal))
pandda_model = os.path.join('modelled_structures',xtal + '-pandda-model.pdb')
pdb = gemmi.read_structure(pandda_model)
# find out ligand event map relationship
ligandDict = XChemUtils.pdbtools_gemmi(pandda_model).center_of_mass_ligand_dict('LIG')
if ligandDict == {}:
self.Logfile.error(xtal + ': cannot find ligand of type LIG; skipping...')
continue
self.show_ligands_in_model(xtal,ligandDict)
emapLigandDict = self.find_ligands_matching_event_map(inspect_csv,xtal,ligandDict)
self.Logfile.warning('emapLigandDict' + str(emapLigandDict))
# convert event map to SF
self.event_map_to_sf(pdb.resolution,emapLigandDict)
# move existing event maps in project directory to old folder
self.move_old_event_to_backup_folder(xtal)
# copy event MTZ to project directory
self.copy_event_mtz_to_project_directory(xtal)
# copy pandda-model to project directory
self.copy_pandda_model_to_project_directory(xtal)
# make map from MTZ and cut around ligand
self.make_and_cut_map(xtal,emapLigandDict)
# update database
self.update_database(xtal,modelsDict)
# refine models
self.refine_exported_model(xtal)
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
def update_database(self,xtal,modelsDict):
db_dict = {}
timestamp_file = modelsDict[xtal]
db_dict['DatePanDDAModelCreated'] = timestamp_file
db_dict['RefinementOutcome'] = '3 - In Refinement'
self.Logfile.insert('updating database for '+xtal+' setting time model was created to '+db_dict['DatePanDDAModelCreated'])
self.db.update_data_source(xtal,db_dict)
def make_and_cut_map(self,xtal,emapLigandDict):
self.Logfile.insert('changing directory to ' + os.path.join(self.project_directory,xtal))
os.chdir(os.path.join(self.project_directory,xtal))
XChemUtils.pdbtools_gemmi(xtal + '-pandda-model.pdb').save_ligands_to_pdb('LIG')
for ligID in emapLigandDict:
m = emapLigandDict[ligID]
emtz = m.replace('.ccp4','_' + ligID + '.mtz')
emap = m.replace('.ccp4','_' + ligID + '.ccp4')
XChemUtils.maptools().calculate_map(emtz,'FWT','PHWT')
XChemUtils.maptools().cut_map_around_ligand(emap,ligID+'.pdb','7')
if os.path.isfile(emap.replace('.ccp4','_mapmask.ccp4')):
os.system('/bin/mv %s %s_%s_event.ccp4' %(emap.replace('.ccp4','_mapmask.ccp4'),xtal,ligID))
os.system('ln -s %s_%s_event.ccp4 %s_%s_event_cut.ccp4' %(xtal,ligID,xtal,ligID))
def copy_pandda_model_to_project_directory(self,xtal):
os.chdir(os.path.join(self.project_directory,xtal))
model = os.path.join(self.PanDDA_directory,'processed_datasets',xtal,'modelled_structures',xtal+'-pandda-model.pdb')
self.Logfile.insert('copying %s to project directory' %model)
os.system('/bin/cp %s .' %model)
def copy_event_mtz_to_project_directory(self,xtal):
self.Logfile.insert('changing directory to ' + os.path.join(self.PanDDA_directory,'processed_datasets',xtal))
os.chdir(os.path.join(self.PanDDA_directory,'processed_datasets',xtal))
for emap in glob.glob('*-BDC_*.mtz'):
self.Logfile.insert('copying %s to %s...' %(emap,os.path.join(self.project_directory,xtal)))
os.system('/bin/cp %s %s' %(emap,os.path.join(self.project_directory,xtal)))
def move_old_event_to_backup_folder(self,xtal):
self.Logfile.insert('changing directory to ' + os.path.join(self.project_directory,xtal))
os.chdir(os.path.join(self.project_directory,xtal))
if not os.path.isdir('event_map_backup'):
os.mkdir('event_map_backup')
self.Logfile.insert('moving existing event maps to event_map_backup')
for emap in glob.glob('*-BDC_*.ccp4'):
os.system('/bin/mv %s event_map_backup/%s' %(emap,emap+'.'+str(datetime.now()).replace(' ','_').replace(':','-')))
def show_ligands_in_model(self,xtal,ligandDict):
self.Logfile.insert(xtal + ': found the following ligands...')
for lig in ligandDict:
self.Logfile.insert(lig + ' -> coordinates ' + str(ligandDict[lig]))
def find_modeled_structures_and_timestamps(self):
self.Logfile.insert('finding out modelled structures in ' + self.PanDDA_directory)
modelsDict={}
for model in sorted(glob.glob(os.path.join(self.PanDDA_directory,'processed_datasets','*','modelled_structures','*-pandda-model.pdb'))):
sample=model[model.rfind('/')+1:].replace('-pandda-model.pdb','')
timestamp=datetime.fromtimestamp(os.path.getmtime(model)).strftime('%Y-%m-%d %H:%M:%S')
self.Logfile.insert(sample+'-pandda-model.pdb was created on '+str(timestamp))
modelsDict[sample]=timestamp
return modelsDict
def find_new_models(self,modelsDict):
samples_to_export = {}
self.Logfile.hint('XCE will never export/ refine models that are "5-deposition ready" or "6-deposited"')
self.Logfile.hint('Please change the RefinementOutcome flag in the Refinement table if you wish to re-export them')
self.Logfile.insert('checking timestamps of models in database...')
for xtal in modelsDict:
timestamp_file = modelsDict[xtal]
db_query=self.db.execute_statement("select DatePanDDAModelCreated from mainTable where CrystalName is '"+xtal+"' and (RefinementOutcome like '3%' or RefinementOutcome like '4%')")
try:
timestamp_db=str(db_query[0][0])
except IndexError:
self.Logfile.warning('%s: database query gave no results for DatePanDDAModelCreated; skipping...' %xtal)
self.Logfile.warning('%s: this might be a brand new model; will continue with export!' %xtal)
samples_to_export[xtal]=timestamp_file
timestamp_db = "2100-01-01 00:00:00" # some time in the future...
try:
difference=(datetime.strptime(timestamp_file,'%Y-%m-%d %H:%M:%S') - datetime.strptime(timestamp_db,'%Y-%m-%d %H:%M:%S') )
if difference.seconds != 0:
self.Logfile.insert('exporting '+xtal+' -> was already refined, but newer PanDDA model available')
samples_to_export[xtal]=timestamp_file
else:
self.Logfile.insert('%s: model has not changed since it was created on %s' %(xtal,timestamp_db))
except (ValueError, IndexError), e:
self.Logfile.error(str(e))
return samples_to_export
def event_map_to_sf(self,resolution,emapLigandDict):
for lig in emapLigandDict:
emap = emapLigandDict[lig]
emtz = emap.replace('.ccp4','.mtz')
emtz_ligand = emap.replace('.ccp4','_' + lig + '.mtz')
self.Logfile.insert('trying to convert %s to SF -> %s' %(emap,emtz_ligand))
self.Logfile.insert('>>> ' + emtz)
XChemUtils.maptools_gemmi(emap).map_to_sf(resolution)
if os.path.isfile(emtz):
os.system('/bin/mv %s %s' %(emtz,emtz_ligand))
self.Logfile.insert('success; %s exists' %emtz_ligand)
else:
self.Logfile.warning('something went wrong; %s could not be created...' %emtz_ligand)
def find_ligands_matching_event_map(self,inspect_csv,xtal,ligandDict):
emapLigandDict = {}
for index, row in inspect_csv.iterrows():
if row['dtag'] == xtal:
for emap in glob.glob('*-BDC_*.ccp4'):
self.Logfile.insert('checking if event and ligand are within 7A of each other')
x = float(row['x'])
y = float(row['y'])
z = float(row['z'])
matching_ligand = self.calculate_distance_to_ligands(ligandDict,x,y,z)
if matching_ligand is not None:
emapLigandDict[matching_ligand] = emap
self.Logfile.insert('found matching ligand (%s) for %s' %(matching_ligand,emap))
break
else:
self.Logfile.warning('current ligand not close to event...')
if emapLigandDict == {}:
self.Logfile.error('could not find ligands within 7A of PanDDA events')
return emapLigandDict
def calculate_distance_to_ligands(self,ligandDict,x,y,z):
matching_ligand = None
p_event = gemmi.Position(x, y, z)
for ligand in ligandDict:
c = ligandDict[ligand]
p_ligand = gemmi.Position(c[0], c[1], c[2])
self.Logfile.insert('coordinates ligand: ' + str(c[0])+' '+ str(c[1])+' '+str(c[2]))
self.Logfile.insert('coordinates event: ' + str(x)+' '+ str(y)+' '+str(z))
distance = p_event.dist(p_ligand)
self.Logfile.insert('distance between ligand and event: %s A' %str(distance))
if distance < 7:
matching_ligand = ligand
break
return matching_ligand
def refine_exported_model(self,xtal):
RefmacParams={ 'HKLIN': '', 'HKLOUT': '',
'XYZIN': '', 'XYZOUT': '',
'LIBIN': '', 'LIBOUT': '',
'TLSIN': '', 'TLSOUT': '',
'TLSADD': '',
'NCYCLES': '10',
'MATRIX_WEIGHT': 'AUTO',
'BREF': ' bref ISOT\n',
'TLS': '',
'NCS': '',
'TWIN': '',
'WATER': '',
'LIGOCC': '',
'SANITY': '' }
if 'nocheck' in self.which_models:
RefmacParams['SANITY'] = 'off'
self.Logfile.insert('trying to refine ' + xtal + '...')
self.Logfile.insert('%s: getting compound code from database' %xtal)
query=self.db.execute_statement("select CompoundCode from mainTable where CrystalName='%s';" %xtal)
compoundID=str(query[0][0])
self.Logfile.insert('%s: compounds code = %s' %(xtal,compoundID))
if os.path.isfile(os.path.join(self.project_directory,xtal,xtal+'.free.mtz')):
if os.path.isfile(os.path.join(self.project_directory,xtal,xtal+'-pandda-model.pdb')):
self.Logfile.insert('running inital refinement on PANDDA model of '+xtal)
Serial=XChemRefine.GetSerial(self.project_directory,xtal)
if not os.path.isdir(os.path.join(self.project_directory,xtal,'cootOut')):
os.mkdir(os.path.join(self.project_directory,xtal,'cootOut'))
# create folder for new refinement cycle
if os.path.isdir(os.path.join(self.project_directory,xtal,'cootOut','Refine_'+str(Serial))):
os.chdir(os.path.join(self.project_directory,xtal,'cootOut','Refine_'+str(Serial)))
else:
os.mkdir(os.path.join(self.project_directory,xtal,'cootOut','Refine_'+str(Serial)))
os.chdir(os.path.join(self.project_directory,xtal,'cootOut','Refine_'+str(Serial)))
os.system('/bin/cp %s in.pdb' %os.path.join(self.project_directory,xtal,xtal+'-pandda-model.pdb'))
Refine=XChemRefine.Refine(self.project_directory,xtal,compoundID,self.datasource)
Refine.RunBuster(str(Serial),RefmacParams,self.external_software,self.xce_logfile,None)
else:
self.Logfile.error('%s: cannot find %s-pandda-model.pdb; cannot start refinement...' %(xtal,xtal))
else:
self.Logfile.error('%s: cannot start refinement because %s.free.mtz is missing in %s' % (
xtal, xtal, os.path.join(self.project_directory, xtal)))
class refine_bound_state_with_buster(QtCore.QThread):
def __init__(self,panddas_directory,datasource,initial_model_directory,xce_logfile,which_models):
QtCore.QThread.__init__(self)
self.panddas_directory=panddas_directory
self.datasource=datasource
self.initial_model_directory=initial_model_directory
self.db=XChemDB.data_source(self.datasource)
self.db.create_missing_columns()
self.db_list=self.db.get_empty_db_dict()
self.external_software=XChemUtils.external_software(xce_logfile).check()
self.xce_logfile=xce_logfile
self.Logfile=XChemLog.updateLog(xce_logfile)
self.which_models=which_models
self.already_exported_models=[]
def run(self):
samples_to_export=self.export_models()
self.refine_exported_models(samples_to_export)
def refine_exported_models(self,samples_to_export):
self.Logfile.insert('will try to refine the following crystals:')
for xtal in sorted(samples_to_export):
self.Logfile.insert(xtal)
for xtal in sorted(samples_to_export):
self.Logfile.insert('%s: getting compound code from database' %xtal)
query=self.db.execute_statement("select CompoundCode from mainTable where CrystalName='%s';" %xtal)
compoundID=str(query[0][0])
self.Logfile.insert('%s: compounds code = %s' %(xtal,compoundID))
# compoundID=str(item[1])
if os.path.isfile(os.path.join(self.initial_model_directory,xtal,xtal+'.free.mtz')):
if os.path.isfile(os.path.join(self.initial_model_directory,xtal,xtal+'-pandda-model.pdb')):
self.Logfile.insert('running inital refinement on PANDDA model of '+xtal)
Serial=XChemRefine.GetSerial(self.initial_model_directory,xtal)
#######################################################
if not os.path.isdir(os.path.join(self.initial_model_directory,xtal,'cootOut')):
os.mkdir(os.path.join(self.initial_model_directory,xtal,'cootOut'))
# create folder for new refinement cycle
if os.path.isdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial))):
os.chdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
else:
os.mkdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
os.chdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
os.system('/bin/cp %s in.pdb' %os.path.join(self.initial_model_directory,xtal,xtal+'-pandda-model.pdb'))
Refine=XChemRefine.Refine(self.initial_model_directory,xtal,compoundID,self.datasource)
Refine.RunBuster(str(Serial),self.external_software,self.xce_logfile,None)
else:
self.Logfile.error('%s: cannot find %s-pandda-model.pdb; cannot start refinement...' %(xtal,xtal))
elif xtal in samples_to_export and not os.path.isfile(
os.path.join(self.initial_model_directory, xtal, xtal + '.free.mtz')):
self.Logfile.error('%s: cannot start refinement because %s.free.mtz is missing in %s' % (
xtal, xtal, os.path.join(self.initial_model_directory, xtal)))
else:
self.Logfile.insert('%s: nothing to refine' % (xtal))
def export_models(self):
self.Logfile.insert('finding out which PanDDA models need to be exported')
# first find which samples are in interesting datasets and have a model
# and determine the timestamp
fileModelsDict={}
queryModels=''
for model in glob.glob(os.path.join(self.panddas_directory,'processed_datasets','*','modelled_structures','*-pandda-model.pdb')):
sample=model[model.rfind('/')+1:].replace('-pandda-model.pdb','')
timestamp=datetime.fromtimestamp(os.path.getmtime(model)).strftime('%Y-%m-%d %H:%M:%S')
self.Logfile.insert(sample+'-pandda-model.pdb was created on '+str(timestamp))
queryModels+="'"+sample+"',"
fileModelsDict[sample]=timestamp
# now get these models from the database and compare the datestamps
# Note: only get the models that underwent some form of refinement,
# because only if the model was updated in pandda.inspect will it be exported and refined
dbModelsDict={}
if queryModels != '':
dbEntries=self.db.execute_statement("select CrystalName,DatePanDDAModelCreated from mainTable where CrystalName in ("+queryModels[:-1]+") and (RefinementOutcome like '3%' or RefinementOutcome like '4%' or RefinementOutcome like '5%')")
for item in dbEntries:
xtal=str(item[0])
timestamp=str(item[1])
dbModelsDict[xtal]=timestamp
self.Logfile.insert('PanDDA model for '+xtal+' is in database and was created on '+str(timestamp))
# compare timestamps and only export the ones where the timestamp of the file is newer than the one in the DB
samples_to_export={}
self.Logfile.insert('checking which PanDDA models were newly created or updated')
if self.which_models=='all':
self.Logfile.insert('Note: you chose to export ALL available PanDDA!')
for sample in fileModelsDict:
if self.which_models=='all':
self.Logfile.insert('exporting '+sample)
samples_to_export[sample]=fileModelsDict[sample]
else:
if sample in dbModelsDict:
try:
difference=(datetime.strptime(fileModelsDict[sample],'%Y-%m-%d %H:%M:%S') - datetime.strptime(dbModelsDict[sample],'%Y-%m-%d %H:%M:%S') )
if difference.seconds != 0:
self.Logfile.insert('exporting '+sample+' -> was already refined, but newer PanDDA model available')
samples_to_export[sample]=fileModelsDict[sample]
except ValueError:
# this will be raised if timestamp is not properly formatted;
# which will usually be the case when respective field in database is blank
# these are hopefully legacy cases which are from before this extensive check was introduced (13/01/2017)
advice = ( 'The pandda model of '+xtal+' was changed, but it was already refined! '
'This is most likely because this was done with an older version of XCE. '
'If you really want to export and refine this model, you need to open the database '
'with DBbroweser (sqlitebrowser.org); then change the RefinementOutcome field '
'of the respective sample to "2 - PANDDA model", save the database and repeat the export prodedure.' )
self.Logfile.insert(advice)
else:
self.Logfile.insert('exporting '+sample+' -> first time to be exported and refined')
samples_to_export[sample]=fileModelsDict[sample]
# update the DB:
# set timestamp to current timestamp of file and set RefinementOutcome to '2-pandda...'
if samples_to_export != {}:
select_dir_string=''
select_dir_string_new_pannda=' '
for sample in samples_to_export:
self.Logfile.insert('changing directory to ' + os.path.join(self.initial_model_directory,sample))
os.chdir(os.path.join(self.initial_model_directory,sample))
self.Logfile.insert(sample + ': copying ' + os.path.join(self.panddas_directory,'processed_datasets',sample,'modelled_structures',sample+'-pandda-model.pdb'))
os.system('/bin/cp %s .' %os.path.join(self.panddas_directory,'processed_datasets',sample,'modelled_structures',sample+'-pandda-model.pdb'))
db_dict= {'RefinementOutcome': '2 - PANDDA model', 'DatePanDDAModelCreated': samples_to_export[sample]}
for old_event_map in glob.glob('*-BDC_*.ccp4'):
if not os.path.isdir('old_event_maps'):
os.mkdir('old_event_maps')
self.Logfile.warning(sample + ': moving ' + old_event_map + ' to old_event_maps folder')
os.system('/bin/mv %s old_event_maps' %old_event_map)
for event_map in glob.glob(os.path.join(self.panddas_directory,'processed_datasets',sample,'*-BDC_*.ccp4')):
self.Logfile.insert(sample + ': copying ' + event_map)
os.system('/bin/cp %s .' %event_map)
select_dir_string+="select_dir={0!s} ".format(sample)
select_dir_string_new_pannda+='{0!s} '.format(sample)
self.Logfile.insert('updating database for '+sample+' setting time model was created to '+db_dict['DatePanDDAModelCreated']+' and RefinementOutcome to '+db_dict['RefinementOutcome'])
self.db.update_data_source(sample,db_dict)
return samples_to_export
class run_pandda_export(QtCore.QThread):
def __init__(self,panddas_directory,datasource,initial_model_directory,xce_logfile,update_datasource_only,which_models,pandda_params):
QtCore.QThread.__init__(self)
self.panddas_directory=panddas_directory
self.datasource=datasource
self.initial_model_directory=initial_model_directory
self.db=XChemDB.data_source(self.datasource)
self.db.create_missing_columns()
self.db_list=self.db.get_empty_db_dict()
self.external_software=XChemUtils.external_software(xce_logfile).check()
self.xce_logfile=xce_logfile
self.Logfile=XChemLog.updateLog(xce_logfile)
self.update_datasource_only=update_datasource_only
self.which_models=which_models
self.already_exported_models=[]
self.pandda_analyse_data_table = pandda_params['pandda_table']
self.RefmacParams={ 'HKLIN': '', 'HKLOUT': '',
'XYZIN': '', 'XYZOUT': '',
'LIBIN': '', 'LIBOUT': '',
'TLSIN': '', 'TLSOUT': '',
'TLSADD': '',
'NCYCLES': '10',
'MATRIX_WEIGHT': 'AUTO',
'BREF': ' bref ISOT\n',
'TLS': '',
'NCS': '',
'TWIN': '' }
def run(self):
# v1.3.8.2 - removed option to update database only
# if not self.update_datasource_only:
samples_to_export=self.export_models()
self.import_samples_into_datasouce(samples_to_export)
# if not self.update_datasource_only:
self.refine_exported_models(samples_to_export)
def refine_exported_models(self,samples_to_export):
self.Logfile.insert('will try to refine the following crystals:')
for xtal in samples_to_export: self.Logfile.insert(xtal)
# sample_list=self.db.execute_statement("select CrystalName,CompoundCode from mainTable where RefinementOutcome='2 - PANDDA model';")
# for item in sample_list:
# xtal=str(item[0])
for xtal in sorted(samples_to_export):
self.Logfile.insert('%s: getting compound code from database' %xtal)
query=self.db.execute_statement("select CompoundCode from mainTable where CrystalName='%s';" %xtal)
compoundID=str(query[0][0])
self.Logfile.insert('%s: compounds code = %s' %(xtal,compoundID))
# compoundID=str(item[1])
if os.path.isfile(os.path.join(self.initial_model_directory,xtal,xtal+'.free.mtz')):
if os.path.isfile(os.path.join(self.initial_model_directory,xtal,xtal+'-ensemble-model.pdb')):
self.Logfile.insert('running inital refinement on PANDDA model of '+xtal)
Serial=XChemRefine.GetSerial(self.initial_model_directory,xtal)
#######################################################
if not os.path.isdir(os.path.join(self.initial_model_directory,xtal,'cootOut')):
os.mkdir(os.path.join(self.initial_model_directory,xtal,'cootOut'))
# create folder for new refinement cycle
if os.path.isdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial))):
os.chdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
try:
os.system('/bin/rm *-ensemble-model.pdb *restraints*')
except:
self.Logfile.error("Restraint files didn't exist to remove. Will try to continue")
else:
os.mkdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
os.chdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
Refine=XChemRefine.panddaRefine(self.initial_model_directory,xtal,compoundID,self.datasource)
os.symlink(os.path.join(self.initial_model_directory,xtal,xtal+'-ensemble-model.pdb'),xtal+'-ensemble-model.pdb')
Refine.RunQuickRefine(Serial,self.RefmacParams,self.external_software,self.xce_logfile,'pandda_refmac',None)
# elif xtal in os.path.join(self.panddas_directory,'processed_datasets',xtal,'modelled_structures',
# '{}-pandda-model.pdb'.format(xtal)):
# self.Logfile.insert('{}: cannot start refinement because {}'.format(xtal,xtal) +
# ' does not have a modelled structure. Check whether you expect this dataset to ' +
# ' have a modelled structure, compare pandda.inspect and datasource,'
# ' then tell XCHEMBB ')
else:
self.Logfile.error('%s: cannot find %s-ensemble-model.pdb; cannot start refinement...' %(xtal,xtal))
self.Logfile.error('Please check terminal window for any PanDDA related tracebacks')
elif xtal in samples_to_export and not os.path.isfile(
os.path.join(self.initial_model_directory, xtal, xtal + '.free.mtz')):
self.Logfile.error('%s: cannot start refinement because %s.free.mtz is missing in %s' % (
xtal, xtal, os.path.join(self.initial_model_directory, xtal)))
else:
self.Logfile.insert('%s: nothing to refine' % (xtal))
def import_samples_into_datasouce(self,samples_to_export):
# first make a note of all the datasets which were used in pandda directory
os.chdir(os.path.join(self.panddas_directory,'processed_datasets'))
for xtal in glob.glob('*'):
self.db.execute_statement("update mainTable set DimplePANDDAwasRun = 'True',DimplePANDDAreject = 'False',DimplePANDDApath='{0!s}' where CrystalName is '{1!s}'".format(self.panddas_directory, xtal))
# do the same as before, but look for rejected datasets
try:
os.chdir(os.path.join(self.panddas_directory,'rejected_datasets'))
for xtal in glob.glob('*'):
self.db.execute_statement("update mainTable set DimplePANDDAwasRun = 'True',DimplePANDDAreject = 'True',DimplePANDDApath='{0!s}',DimplePANDDAhit = 'False' where CrystalName is '{1!s}'".format(self.panddas_directory, xtal))
except OSError:
pass
site_list = []
pandda_hit_list=[]
with open(os.path.join(self.panddas_directory,'analyses','pandda_inspect_sites.csv'),'rb') as csv_import:
csv_dict = csv.DictReader(csv_import)
self.Logfile.insert('reding pandda_inspect_sites.csv')
for i,line in enumerate(csv_dict):
self.Logfile.insert(str(line).replace('\n','').replace('\r',''))
site_index=line['site_idx']
name=line['Name'].replace("'","")
comment=line['Comment']
site_list.append([site_index,name,comment])
self.Logfile.insert('add to site_list_:' + str([site_index,name,comment]))
progress_step=1
for i,line in enumerate(open(os.path.join(self.panddas_directory,'analyses','pandda_inspect_events.csv'))):
n_lines=i
if n_lines != 0:
progress_step=100/float(n_lines)
else:
progress_step=0
progress=0
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
self.Logfile.insert('reading '+os.path.join(self.panddas_directory,'analyses','pandda_inspect_events.csv'))
with open(os.path.join(self.panddas_directory,'analyses','pandda_inspect_events.csv'),'rb') as csv_import:
csv_dict = csv.DictReader(csv_import)
for i,line in enumerate(csv_dict):
db_dict={}
sampleID=line['dtag']
if sampleID not in samples_to_export:
self.Logfile.warning('%s: not to be exported; will not add to panddaTable...' %sampleID)
continue
if sampleID not in pandda_hit_list:
pandda_hit_list.append(sampleID)
site_index=str(line['site_idx']).replace('.0','')
event_index=str(line['event_idx']).replace('.0','')
self.Logfile.insert(str(line))
self.Logfile.insert('reading {0!s} -> site {1!s} -> event {2!s}'.format(sampleID, site_index, event_index))
for entry in site_list:
if entry[0]==site_index:
site_name=entry[1]
site_comment=entry[2]
break
# check if EVENT map exists in project directory
event_map=''
for file in glob.glob(os.path.join(self.initial_model_directory,sampleID,'*ccp4')):
filename=file[file.rfind('/')+1:]
if filename.startswith(sampleID+'-event_'+event_index) and filename.endswith('map.native.ccp4'):
event_map=file
self.Logfile.insert('found respective event maps in {0!s}: {1!s}'.format(self.initial_model_directory, event_map))
break
# initial pandda model and mtz file
pandda_model=''
for file in glob.glob(os.path.join(self.initial_model_directory,sampleID,'*pdb')):
filename=file[file.rfind('/')+1:]
if filename.endswith('-ensemble-model.pdb'):
pandda_model=file
if sampleID not in self.already_exported_models:
self.already_exported_models.append(sampleID)
break
inital_mtz=''
for file in glob.glob(os.path.join(self.initial_model_directory,sampleID,'*mtz')):
filename=file[file.rfind('/')+1:]
if filename.endswith('pandda-input.mtz'):
inital_mtz=file
break
db_dict['CrystalName'] = sampleID
db_dict['PANDDApath'] = self.panddas_directory
db_dict['PANDDA_site_index'] = site_index
db_dict['PANDDA_site_name'] = site_name
db_dict['PANDDA_site_comment'] = site_comment
db_dict['PANDDA_site_event_index'] = event_index
db_dict['PANDDA_site_event_comment'] = line['Comment'].replace("'","")
db_dict['PANDDA_site_confidence'] = line['Ligand Confidence']
db_dict['PANDDA_site_InspectConfidence'] = line['Ligand Confidence']
db_dict['PANDDA_site_ligand_placed'] = line['Ligand Placed']
db_dict['PANDDA_site_viewed'] = line['Viewed']
db_dict['PANDDA_site_interesting'] = line['Interesting']
db_dict['PANDDA_site_z_peak'] = line['z_peak']
db_dict['PANDDA_site_x'] = line['x']
db_dict['PANDDA_site_y'] = line['y']
db_dict['PANDDA_site_z'] = line['z']
db_dict['PANDDA_site_ligand_id'] = ''
db_dict['PANDDA_site_event_map'] = event_map
db_dict['PANDDA_site_initial_model'] = pandda_model
db_dict['PANDDA_site_initial_mtz'] = inital_mtz
db_dict['PANDDA_site_spider_plot'] = ''
# find apo structures which were used
# XXX missing XXX
self.db.update_insert_site_event_panddaTable(sampleID,db_dict)
# this is necessary, otherwise RefinementOutcome will be reset for samples that are actually already in refinement
self.db.execute_statement("update panddaTable set RefinementOutcome = '2 - PANDDA model' where CrystalName is '{0!s}' and RefinementOutcome is null".format(sampleID))
self.db.execute_statement("update mainTable set RefinementOutcome = '2 - PANDDA model' where CrystalName is '{0!s}' and (RefinementOutcome is null or RefinementOutcome is '1 - Analysis Pending')".format(sampleID))
self.db.execute_statement("update mainTable set DimplePANDDAhit = 'True' where CrystalName is '{0!s}'".format(sampleID))
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
self.Logfile.insert('done reading pandda_inspect_sites.csv')
# finally find all samples which do not have a pandda hit
os.chdir(os.path.join(self.panddas_directory,'processed_datasets'))
self.Logfile.insert('check which datasets are not interesting')
# DimplePANDDAhit
# for xtal in glob.glob('*'):
# if xtal not in pandda_hit_list:
# self.Logfile.insert(xtal+': not in interesting_datasets; updating database...')
# self.db.execute_statement("update mainTable set DimplePANDDAhit = 'False' where CrystalName is '{0!s}'".format(xtal))
def export_models(self):
self.Logfile.insert('finding out which PanDDA models need to be exported')
# first find which samples are in interesting datasets and have a model
# and determine the timestamp
fileModelsDict={}
queryModels=''
for model in glob.glob(os.path.join(self.panddas_directory,'processed_datasets','*','modelled_structures','*-pandda-model.pdb')):
sample=model[model.rfind('/')+1:].replace('-pandda-model.pdb','')
timestamp=datetime.fromtimestamp(os.path.getmtime(model)).strftime('%Y-%m-%d %H:%M:%S')
self.Logfile.insert(sample+'-pandda-model.pdb was created on '+str(timestamp))
queryModels+="'"+sample+"',"
fileModelsDict[sample]=timestamp
# now get these models from the database and compare the datestamps
# Note: only get the models that underwent some form of refinement,
# because only if the model was updated in pandda.inspect will it be exported and refined
dbModelsDict={}
if queryModels != '':
dbEntries=self.db.execute_statement("select CrystalName,DatePanDDAModelCreated from mainTable where CrystalName in ("+queryModels[:-1]+") and (RefinementOutcome like '3%' or RefinementOutcome like '4%' or RefinementOutcome like '5%')")
for item in dbEntries:
xtal=str(item[0])
timestamp=str(item[1])
dbModelsDict[xtal]=timestamp
self.Logfile.insert('PanDDA model for '+xtal+' is in database and was created on '+str(timestamp))
# compare timestamps and only export the ones where the timestamp of the file is newer than the one in the DB
samples_to_export={}
self.Logfile.insert('checking which PanDDA models were newly created or updated')
if self.which_models=='all':
self.Logfile.insert('Note: you chose to export ALL available PanDDA!')
for sample in fileModelsDict:
if self.which_models=='all':
self.Logfile.insert('exporting '+sample)
samples_to_export[sample]=fileModelsDict[sample]
elif self.which_models == 'selected':
for i in range(0, self.pandda_analyse_data_table.rowCount()):
if str(self.pandda_analyse_data_table.item(i, 0).text()) == sample:
if self.pandda_analyse_data_table.cellWidget(i, 1).isChecked():
self.Logfile.insert('Dataset selected by user -> exporting '+sample)
samples_to_export[sample]=fileModelsDict[sample]
break
else:
if sample in dbModelsDict:
try:
difference=(datetime.strptime(fileModelsDict[sample],'%Y-%m-%d %H:%M:%S') - datetime.strptime(dbModelsDict[sample],'%Y-%m-%d %H:%M:%S') )
if difference.seconds != 0:
self.Logfile.insert('exporting '+sample+' -> was already refined, but newer PanDDA model available')
samples_to_export[sample]=fileModelsDict[sample]
except ValueError:
# this will be raised if timestamp is not properly formatted;
# which will usually be the case when respective field in database is blank
# these are hopefully legacy cases which are from before this extensive check was introduced (13/01/2017)
advice = ( 'The pandda model of '+xtal+' was changed, but it was already refined! '
'This is most likely because this was done with an older version of XCE. '
'If you really want to export and refine this model, you need to open the database '
'with DBbroweser (sqlitebrowser.org); then change the RefinementOutcome field '
'of the respective sample to "2 - PANDDA model", save the database and repeat the export prodedure.' )
self.Logfile.insert(advice)
else:
self.Logfile.insert('exporting '+sample+' -> first time to be exported and refined')
samples_to_export[sample]=fileModelsDict[sample]
# update the DB:
# set timestamp to current timestamp of file and set RefinementOutcome to '2-pandda...'
if samples_to_export != {}:
select_dir_string=''
select_dir_string_new_pannda=' '
for sample in samples_to_export:
db_dict= {'RefinementOutcome': '2 - PANDDA model', 'DatePanDDAModelCreated': samples_to_export[sample]}
select_dir_string+="select_dir={0!s} ".format(sample)
select_dir_string_new_pannda+='{0!s} '.format(sample)
self.Logfile.insert('updating database for '+sample+' setting time model was created to '+db_dict['DatePanDDAModelCreated']+' and RefinementOutcome to '+db_dict['RefinementOutcome'])
self.db.update_data_source(sample,db_dict)
if os.path.isdir(os.path.join(self.panddas_directory,'rejected_datasets')):
Cmds = (
'pandda.export'
' pandda_dir=%s' %self.panddas_directory+
' export_dir={0!s}'.format(self.initial_model_directory)+
' {0!s}'.format(select_dir_string)+
' export_ligands=False'
' generate_occupancy_groupings=True\n'
)
else:
Cmds = (
'source /dls/science/groups/i04-1/software/pandda-update/ccp4/ccp4-7.0/bin/ccp4.setup-sh\n'
# 'source '+os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-sh')+'\n'
'pandda.export'
' pandda_dir=%s' %self.panddas_directory+
' export_dir={0!s}'.format(self.initial_model_directory)+
' {0!s}'.format(select_dir_string_new_pannda)+
' generate_restraints=True\n'
)
self.Logfile.insert('running pandda.export with the following settings:\n'+Cmds)
os.system(Cmds)
return samples_to_export
class run_pandda_analyse(QtCore.QThread):
def __init__(self,pandda_params,xce_logfile,datasource):
QtCore.QThread.__init__(self)
self.data_directory=pandda_params['data_dir']
self.panddas_directory=pandda_params['out_dir']
self.submit_mode=pandda_params['submit_mode']
self.pandda_analyse_data_table = pandda_params['pandda_table']
self.nproc=pandda_params['nproc']
self.min_build_datasets=pandda_params['min_build_datasets']
self.pdb_style=pandda_params['pdb_style']
self.mtz_style=pandda_params['mtz_style']
self.sort_event=pandda_params['sort_event']
self.number_of_datasets=pandda_params['N_datasets']
self.max_new_datasets=pandda_params['max_new_datasets']
self.grid_spacing=pandda_params['grid_spacing']
self.reference_dir=pandda_params['reference_dir']
self.filter_pdb=os.path.join(self.reference_dir,pandda_params['filter_pdb'])
self.wilson_scaling = pandda_params['perform_diffraction_data_scaling']
self.Logfile=XChemLog.updateLog(xce_logfile)
self.datasource=datasource
self.db=XChemDB.data_source(datasource)
self.appendix=pandda_params['appendix']
self.write_mean_maps=pandda_params['write_mean_map']
self.calc_map_by = pandda_params['average_map']
self.select_ground_state_model=''
projectDir = self.data_directory.replace('/*', '')
self.make_ligand_links='$CCP4/bin/ccp4-python %s %s %s\n' %(os.path.join(os.getenv('XChemExplorer_DIR'),
'helpers',
'make_ligand_links_after_pandda.py')
,projectDir,self.panddas_directory)
self.use_remote = pandda_params['use_remote']
self.remote_string = pandda_params['remote_string']
if self.appendix != '':
self.panddas_directory=os.path.join(self.reference_dir,'pandda_'+self.appendix)
if os.path.isdir(self.panddas_directory):
os.system('/bin/rm -fr %s' %self.panddas_directory)
os.mkdir(self.panddas_directory)
if self.data_directory.startswith('/dls'):
self.select_ground_state_model = 'module load ccp4\n'
self.select_ground_state_model +='$CCP4/bin/ccp4-python %s %s\n' %(os.path.join(os.getenv('XChemExplorer_DIR'),'helpers','select_ground_state_dataset.py'),self.panddas_directory)
self.make_ligand_links=''
def run(self):
# print self.reference_dir
# print self.filter_pdb
# how to run pandda.analyse on large datasets
#
# 1) Run the normal pandda command, with the new setting, e.g.
# pandda.analyse data_dirs=... max_new_datasets=500
# This will do the analysis on the first 500 datasets and build the statistical maps - just as normal.
#
# 2) Run pandda with the same command:
# pandda.analyse data_dirs=... max_new_datasets=500
# This will add 500 new datasets, and process them using the existing statistical maps
# (this will be quicker than the original analysis). It will then merge the results of the two analyses.
#
# 3) Repeat 2) until you don't add any "new" datasets. Then you can build the models as normal.
number_of_cyles=int(self.number_of_datasets)/int(self.max_new_datasets)
if int(self.number_of_datasets) % int(self.max_new_datasets) != 0: # modulo gives remainder after integer division
number_of_cyles+=1
self.Logfile.insert('will run %s rounds of pandda.analyse' %str(number_of_cyles))
if os.path.isfile(os.path.join(self.panddas_directory,'pandda.running')):
self.Logfile.insert('it looks as if a pandda.analyse job is currently running in: '+self.panddas_directory)
msg = ( 'there are three possibilities:\n'
'1.) choose another PANDDA directory\n'
'2.) - check if the job is really running either on the cluster (qstat) or on your local machine\n'
' - if so, be patient and wait until the job has finished\n'
'3.) same as 2., but instead of waiting, kill the job and remove at least the pandda.running file\n'
' (or all the contents in the directory if you want to start from scratch)\n' )
self.Logfile.insert(msg)
return None
else:
# if os.getenv('SHELL') == '/bin/tcsh' or os.getenv('SHELL') == '/bin/csh':
# source_file=os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-csh\n')
# elif os.getenv('SHELL') == '/bin/bash' or self.use_remote:
# source_file='export XChemExplorer_DIR="'+os.getenv('XChemExplorer_DIR')+'"\n'
# source_file+='source %s\n' %os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-sh\n')
# else:
# source_file=''
# v1.2.1 - pandda.setup files should be obsolete now that pandda is part of ccp4
# 08/10/2020 - pandda v0.2.12 installation at DLS is obsolete
# source_file='source /dls/science/groups/i04-1/software/pandda_0.2.12/ccp4/ccp4-7.0/bin/ccp4.setup-sh\n'
source_file = ''
source_file += 'export XChemExplorer_DIR="' + os.getenv('XChemExplorer_DIR') + '"\n'
if os.path.isfile(self.filter_pdb + '.pdb'):
print('filter pdb located')
filter_pdb=' filter.pdb='+self.filter_pdb+'.pdb'
print('will use ' + filter_pdb + 'as a filter for pandda.analyse')
else:
if self.use_remote:
stat_command = self.remote_string.replace("qsub'", str('stat ' + self.filter_pdb + "'"))
output = subprocess.Popen(stat_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = output.communicate()
print out
if 'cannot stat' in out:
filter_pdb = ''
else:
filter_pdb = ' filter.pdb=' + self.filter_pdb + '.pdb'
else:
filter_pdb=''
os.chdir(self.panddas_directory)
# note: copied latest pandda.setup-sh from XCE2 installation (08/08/2017)
dls = ''
if self.data_directory.startswith('/dls'):
dls = (
source_file +
'\n'
'module load pymol/1.8.2.0\n'
'\n'
'module load ccp4/7.0.072\n'
'\n'
)
Cmds = (
'#!'+os.getenv('SHELL')+'\n' +
'\n' +
dls +
'cd ' + self.panddas_directory + '\n' +
'\n'
)
ignore = []
char = []
zmap = []
for i in range(0, self.pandda_analyse_data_table.rowCount()):
ignore_all_checkbox = self.pandda_analyse_data_table.cellWidget(i, 7)
ignore_characterisation_checkbox = self.pandda_analyse_data_table.cellWidget(i, 8)
ignore_zmap_checkbox = self.pandda_analyse_data_table.cellWidget(i, 9)
if ignore_all_checkbox.isChecked():
ignore.append(str(self.pandda_analyse_data_table.item(i, 0).text()))
if ignore_characterisation_checkbox.isChecked():
char.append(str(self.pandda_analyse_data_table.item(i, 0).text()))
if ignore_zmap_checkbox.isChecked():
zmap.append(str(self.pandda_analyse_data_table.item(i, 0).text()))
print ignore
def append_to_ignore_string(datasets_list, append_string):
if len(datasets_list)==0:
append_string = ''
for i in range(0, len(datasets_list)):
if i < len(datasets_list)-1:
append_string += str(datasets_list[i] + ',')
else:
append_string += str(datasets_list[i] +'"')
print(append_string)
return append_string
ignore_string = 'ignore_datasets="'
ignore_string = append_to_ignore_string(ignore, ignore_string)
char_string = 'exclude_from_characterisation="'
char_string = append_to_ignore_string(char, char_string)
zmap_string = 'exclude_from_z_map_analysis="'
zmap_string = append_to_ignore_string(zmap, zmap_string)
for i in range(number_of_cyles):
Cmds += (
'pandda.analyse '+
' data_dirs="'+self.data_directory.replace('/*','')+'/*"'+
' out_dir="'+self.panddas_directory+'"'
' min_build_datasets='+self.min_build_datasets+
' max_new_datasets='+self.max_new_datasets+
' grid_spacing='+self.grid_spacing+
' cpus='+self.nproc+
' events.order_by='+self.sort_event+
filter_pdb+
' pdb_style='+self.pdb_style+
' mtz_style='+self.mtz_style+
' lig_style=/compound/*.cif'+
' apply_b_factor_scaling='+self.wilson_scaling+
' write_average_map='+self.write_mean_maps +
' average_map=' + self.calc_map_by +
' ' +
ignore_string +' '+
char_string +' '+
zmap_string +' '+
'\n'
)
Cmds += self.select_ground_state_model
Cmds += self.make_ligand_links
Cmds += '\n'
data_dir_string = self.data_directory.replace('/*', '')
Cmds += str(
'find ' + data_dir_string +
'/*/compound -name "*.cif" | while read line; do echo ${line//"' +
data_dir_string + '"/"' + self.panddas_directory +
'/processed_datasets/"}| while read line2; do cp $line ${line2//compound/ligand_files} > /dev/null 2>&1; '
'done; done;')
Cmds += '\n'
Cmds += str(
'find ' + data_dir_string +
'/*/compound -name "*.pdb" | while read line; do echo ${line//"' +
data_dir_string + '"/"' + self.panddas_directory +
'/processed_datasets/"}| while read line2; do cp $line ${line2//compound/ligand_files} > /dev/null 2>&1; '
'done; done;')
self.Logfile.insert('running pandda.analyse with the following command:\n'+Cmds)
f = open('pandda.sh','w')
f.write(Cmds)
f.close()
# #>>> for testing
# self.submit_mode='local machine'
self.Logfile.insert('trying to run pandda.analyse on ' + str(self.submit_mode))
if self.submit_mode=='local machine':
self.Logfile.insert('running PANDDA on local machine')
os.system('chmod +x pandda.sh')
os.system('./pandda.sh &')
elif self.use_remote:
# handles remote submission of pandda.analyse jobs
submission_string = self.remote_string.replace("qsub'",
str('cd ' +
self.panddas_directory +
'; ' +
"qsub -P labxchem -q medium.q -N pandda 5 -l exclusive,m_mem_free=100G pandda.sh'"))
os.system(submission_string)
self.Logfile.insert(str('running PANDDA remotely, using: ' + submission_string))
else:
self.Logfile.insert('running PANDDA on cluster, using qsub...')
os.system('qsub -P labxchem -q medium.q -N pandda -l exclusive,m_mem_free=100G pandda.sh')
self.emit(QtCore.SIGNAL('datasource_menu_reload_samples'))
class giant_cluster_datasets(QtCore.QThread):
def __init__(self,initial_model_directory,pandda_params,xce_logfile,datasource,):
QtCore.QThread.__init__(self)
self.panddas_directory=pandda_params['out_dir']
self.pdb_style=pandda_params['pdb_style']
self.mtz_style=pandda_params['mtz_style']
self.Logfile=XChemLog.updateLog(xce_logfile)
self.initial_model_directory=initial_model_directory
self.db=XChemDB.data_source(datasource)
def run(self):
self.emit(QtCore.SIGNAL('update_progress_bar'), 0)
if self.pdb_style.replace(' ','') == '':
self.Logfile.insert('PDB style is not set in pandda.analyse!')
self.Logfile.insert('cannot start pandda.analyse')
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'PDB style is not set in pandda.analyse!')
return None
if self.mtz_style.replace(' ','') == '':
self.Logfile.insert('MTZ style is not set in pandda.analyse!')
self.Logfile.insert('cannot start pandda.analyse')
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'MTZ style is not set in pandda.analyse!')
return None
# 1.) prepare output directory
os.chdir(self.panddas_directory)
if os.path.isdir('cluster_analysis'):
self.Logfile.insert('removing old cluster_analysis directory in {0!s}'.format(self.panddas_directory))
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'removing old cluster_analysis directory in {0!s}'.format(self.panddas_directory))
os.system('/bin/rm -fr cluster_analysis 2> /dev/null')
self.Logfile.insert('creating cluster_analysis directory in {0!s}'.format(self.panddas_directory))
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'creating cluster_analysis directory in {0!s}'.format(self.panddas_directory))
os.mkdir('cluster_analysis')
self.emit(QtCore.SIGNAL('update_progress_bar'), 10)
# 2.) go through project directory and make sure that all pdb files really exist
# broken links derail the giant.cluster_mtzs_and_pdbs script
self.Logfile.insert('cleaning up broken links of {0!s} and {1!s} in {2!s}'.format(self.pdb_style, self.mtz_style, self.initial_model_directory))
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'cleaning up broken links of {0!s} and {1!s} in {2!s}'.format(self.pdb_style, self.mtz_style, self.initial_model_directory))
os.chdir(self.initial_model_directory)
for xtal in glob.glob('*'):
if not os.path.isfile(os.path.join(xtal,self.pdb_style)):
self.Logfile.insert('missing {0!s} and {1!s} for {2!s}'.format(self.pdb_style, self.mtz_style, xtal))
os.system('/bin/rm {0!s}/{1!s} 2> /dev/null'.format(xtal, self.pdb_style))
os.system('/bin/rm {0!s}/{1!s} 2> /dev/null'.format(xtal, self.mtz_style))
self.emit(QtCore.SIGNAL('update_progress_bar'), 20)
# 3.) giant.cluster_mtzs_and_pdbs
self.Logfile.insert("running giant.cluster_mtzs_and_pdbs {0!s}/*/{1!s} pdb_regex='{2!s}/(.*)/{3!s}' out_dir='{4!s}/cluster_analysis'".format(self.initial_model_directory, self.pdb_style, self.initial_model_directory, self.pdb_style, self.panddas_directory))
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'running giant.cluster_mtzs_and_pdbs')
if os.getenv('SHELL') == '/bin/tcsh' or os.getenv('SHELL') == '/bin/csh':
source_file=os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-csh')
elif os.getenv('SHELL') == '/bin/bash':
source_file=os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-sh')
else:
source_file=''
Cmds = (
'#!'+os.getenv('SHELL')+'\n'
'unset PYTHONPATH\n'
'source '+source_file+'\n'
"giant.datasets.cluster %s/*/%s pdb_regex='%s/(.*)/%s' out_dir='%s/cluster_analysis'" %(self.initial_model_directory,self.pdb_style,self.initial_model_directory,self.pdb_style,self.panddas_directory)
)
# os.system("giant.cluster_mtzs_and_pdbs %s/*/%s pdb_regex='%s/(.*)/%s' out_dir='%s/cluster_analysis'" %(self.initial_model_directory,self.pdb_style,self.initial_model_directory,self.pdb_style,self.panddas_directory))
os.system(Cmds)
self.emit(QtCore.SIGNAL('update_progress_bar'), 80)
# 4.) analyse output
self.Logfile.insert('parsing {0!s}/cluster_analysis'.format(self.panddas_directory))
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'parsing {0!s}/cluster_analysis'.format(self.panddas_directory))
os.chdir('{0!s}/cluster_analysis'.format(self.panddas_directory))
cluster_dict={}
for out_dir in sorted(glob.glob('*')):
if os.path.isdir(out_dir):
cluster_dict[out_dir]=[]
for folder in glob.glob(os.path.join(out_dir,'pdbs','*')):
xtal=folder[folder.rfind('/')+1:]
cluster_dict[out_dir].append(xtal)
self.emit(QtCore.SIGNAL('update_progress_bar'), 90)
# 5.) update datasource
self.Logfile.insert('updating datasource with results from giant.cluster_mtzs_and_pdbs')
if cluster_dict != {}:
for key in cluster_dict:
for xtal in cluster_dict[key]:
db_dict= {'CrystalFormName': key}
self.db.update_data_source(xtal,db_dict)
# 6.) finish
self.emit(QtCore.SIGNAL('update_progress_bar'), 100)
self.Logfile.insert('finished giant.cluster_mtzs_and_pdbs')
self.emit(QtCore.SIGNAL('datasource_menu_reload_samples'))
class check_if_pandda_can_run:
# reasons why pandda cannot be run
# - there is currently a job running in the pandda directory
# - min datasets available is too low
# - required input paramters are not complete
# - map amplitude and phase labels don't exist
def __init__(self,pandda_params,xce_logfile,datasource):
self.data_directory=pandda_params['data_dir']
self.panddas_directory=pandda_params['out_dir']
self.min_build_datasets=pandda_params['min_build_datasets']
self.pdb_style=pandda_params['pdb_style']
self.mtz_style=pandda_params['mtz_style']
self.input_dir_structure=pandda_params['pandda_dir_structure']
self.problem_found=False
self.error_code=-1
self.Logfile=XChemLog.updateLog(xce_logfile)
self.db=XChemDB.data_source(datasource)
def number_of_available_datasets(self):
counter=0
for file in glob.glob(os.path.join(self.input_dir_structure,self.pdb_style)):
if os.path.isfile(file):
counter+=1
self.Logfile.insert('pandda.analyse: found {0!s} useable datasets'.format(counter))
return counter
def get_first_dataset_in_project_directory(self):
first_dataset=''
for file in glob.glob(os.path.join(self.input_dir_structure,self.pdb_style)):
if os.path.isfile(file):
first_dataset=file
break
return first_dataset
def compare_number_of_atoms_in_reference_vs_all_datasets(self,refData,dataset_list):
mismatched_datasets=[]
pdbtools=XChemUtils.pdbtools(refData)
refPDB=refData[refData.rfind('/')+1:]
refPDBlist=pdbtools.get_init_pdb_as_list()
n_atom_ref=len(refPDBlist)
for n_datasets,dataset in enumerate(dataset_list):
if os.path.isfile(os.path.join(self.data_directory.replace('*',''),dataset,self.pdb_style)):
n_atom=len(pdbtools.get_pdb_as_list(os.path.join(self.data_directory.replace('*',''),dataset,self.pdb_style)))
if n_atom_ref == n_atom:
self.Logfile.insert('{0!s}: atoms in PDB file ({1!s}): {2!s}; atoms in Reference file: {3!s} ===> OK'.format(dataset, self.pdb_style, str(n_atom), str(n_atom_ref)))
if n_atom_ref != n_atom:
self.Logfile.insert('{0!s}: atoms in PDB file ({1!s}): {2!s}; atoms in Reference file: {3!s} ===> ERROR'.format(dataset, self.pdb_style, str(n_atom), str(n_atom_ref)))
mismatched_datasets.append(dataset)
return n_datasets,mismatched_datasets
def get_datasets_which_fit_to_reference_file(self,ref,reference_directory,cluster_dict,allowed_unitcell_difference_percent):
refStructure=XChemUtils.pdbtools(os.path.join(reference_directory,ref+'.pdb'))
symmRef=refStructure.get_spg_number_from_pdb()
ucVolRef=refStructure.calc_unitcell_volume_from_pdb()
cluster_dict[ref]=[]
cluster_dict[ref].append(os.path.join(reference_directory,ref+'.pdb'))
for dataset in glob.glob(os.path.join(self.data_directory,self.pdb_style)):
datasetStructure=XChemUtils.pdbtools(dataset)
symmDataset=datasetStructure.get_spg_number_from_pdb()
ucVolDataset=datasetStructure.calc_unitcell_volume_from_pdb()
if symmDataset == symmRef:
try:
difference=math.fabs(1-(float(ucVolRef)/float(ucVolDataset)))*100
if difference < allowed_unitcell_difference_percent:
sampleID=dataset.replace('/'+self.pdb_style,'')[dataset.replace('/'+self.pdb_style,'').rfind('/')+1:]
cluster_dict[ref].append(sampleID)
except ZeroDivisionError:
continue
return cluster_dict
def remove_dimple_files(self,dataset_list):
for n_datasets,dataset in enumerate(dataset_list):
db_dict={}
if os.path.isfile(os.path.join(self.data_directory.replace('*',''),dataset,self.pdb_style)):
os.system('/bin/rm '+os.path.join(self.data_directory.replace('*',''),dataset,self.pdb_style))
self.Logfile.insert('{0!s}: removing {1!s}'.format(dataset, self.pdb_style))
db_dict['DimplePathToPDB']=''
db_dict['DimpleRcryst']=''
db_dict['DimpleRfree']=''
db_dict['DimpleResolutionHigh']=''
db_dict['DimpleStatus']='pending'
if os.path.isfile(os.path.join(self.data_directory.replace('*',''),dataset,self.mtz_style)):
os.system('/bin/rm '+os.path.join(self.data_directory.replace('*',''),dataset,self.mtz_style))
self.Logfile.insert('{0!s}: removing {1!s}'.format(dataset, self.mtz_style))
db_dict['DimplePathToMTZ']=''
if db_dict != {}:
self.db.update_data_source(dataset,db_dict)
def analyse_pdb_style(self):
pdb_found=False
for file in glob.glob(os.path.join(self.data_directory,self.pdb_style)):
if os.path.isfile(file):
pdb_found=True
break
if not pdb_found:
self.error_code=1
message=self.warning_messages()
return message
def analyse_mtz_style(self):
mtz_found=False
for file in glob.glob(os.path.join(self.data_directory,self.mtz_style)):
if os.path.isfile(file):
mtz_found=True
break
if not mtz_found:
self.error_code=2
message=self.warning_messages()
return message
def analyse_min_build_dataset(self):
counter=0
for file in glob.glob(os.path.join(self.data_directory,self.mtz_style)):
if os.path.isfile(file):
counter+=1
if counter <= self.min_build_datasets:
self.error_code=3
message=self.warning_messages()
return message
def warning_messages(self):
message=''
if self.error_code==1:
message='PDB file does not exist'
if self.error_code==2:
message='MTZ file does not exist'
if self.error_code==3:
message='Not enough datasets available'
return message
class convert_all_event_maps_in_database(QtCore.QThread):
def __init__(self,initial_model_directory,xce_logfile,datasource):
QtCore.QThread.__init__(self)
self.xce_logfile=xce_logfile
self.Logfile=XChemLog.updateLog(xce_logfile)
self.initial_model_directory=initial_model_directory
self.datasource=datasource
self.db=XChemDB.data_source(datasource)
def run(self):
sqlite = (
'select'
' CrystalName,'
' PANDDA_site_event_map,'
' PANDDA_site_ligand_resname,'
' PANDDA_site_ligand_chain,'
' PANDDA_site_ligand_sequence_number,'
' PANDDA_site_ligand_altLoc '
'from panddaTable '
'where PANDDA_site_event_map not like "event%"'
)
print sqlite
query=self.db.execute_statement(sqlite)
print query
progress_step=1
if len(query) != 0:
progress_step=100/float(len(query))
else:
progress_step=1
progress=0
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
for item in query:
print item
xtalID=str(item[0])
event_map=str(item[1])
resname=str(item[2])
chainID=str(item[3])
resseq=str(item[4])
altLoc=str(item[5])
if os.path.isfile(os.path.join(self.initial_model_directory,xtalID,'refine.pdb')):
os.chdir(os.path.join(self.initial_model_directory,xtalID))
self.Logfile.insert('extracting ligand ({0!s},{1!s},{2!s},{3!s}) from refine.pdb'.format(str(resname), str(chainID), str(resseq), str(altLoc)))
XChemUtils.pdbtools(os.path.join(self.initial_model_directory,xtalID,'refine.pdb')).save_specific_ligands_to_pdb(resname,chainID,resseq,altLoc)
if os.path.isfile('ligand_{0!s}_{1!s}_{2!s}_{3!s}.pdb'.format(str(resname), str(chainID), str(resseq), str(altLoc))):
ligand_pdb='ligand_{0!s}_{1!s}_{2!s}_{3!s}.pdb'.format(str(resname), str(chainID), str(resseq), str(altLoc))
print os.path.join(self.initial_model_directory,xtalID,ligand_pdb)
else:
self.Logfile.insert('could not extract ligand; trying next...')
continue
else:
self.Logfile.insert('directory: '+os.path.join(self.initial_model_directory,xtalID)+' -> cannot find refine.pdb; trying next')
continue
if os.path.isfile(os.path.join(self.initial_model_directory,xtalID,'refine.mtz')):
resolution=XChemUtils.mtztools(os.path.join(self.initial_model_directory,xtalID,'refine.mtz')).get_high_resolution_from_mtz()
else:
self.Logfile.insert('directory: '+os.path.join(self.initial_model_directory,xtalID)+' -> cannot find refine.mtz; trying next')
continue
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'eventMap -> SF for '+event_map)
convert_event_map_to_SF(self.initial_model_directory,xtalID,event_map,ligand_pdb,self.xce_logfile,self.datasource,resolution).run()
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
class convert_event_map_to_SF:
def __init__(self,project_directory,xtalID,event_map,ligand_pdb,xce_logfile,db_file,resolution):
self.Logfile=XChemLog.updateLog(xce_logfile)
self.event_map=event_map
if not os.path.isfile(self.event_map):
self.Logfile.insert('cannot find Event map: '+self.event_map)
self.Logfile.insert('cannot convert event_map to structure factors!')
return None
self.project_directory=project_directory
self.xtalID=xtalID
self.event_map=event_map
self.ligand_pdb=ligand_pdb
self.event=event_map[event_map.rfind('/')+1:].replace('.map','').replace('.ccp4','')
self.db=XChemDB.data_source(db_file)
self.resolution=resolution
def run(self):
os.chdir(os.path.join(self.project_directory,self.xtalID))
# remove exisiting mtz file
if os.path.isfile(self.event+'.mtz'):
self.Logfile.insert('removing existing '+self.event+'.mtz')
os.system('/bin/rm '+self.event+'.mtz')
# event maps generated with pandda v0.2 or higher have the same symmetry as the crystal
# but phenix.map_to_structure_facors only accepts maps in spg P1
# therefore map is first expanded to full unit cell and spg of map then set tp p1
# other conversion option like cinvfft give for whatever reason uninterpretable maps
self.convert_map_to_p1()
# run phenix.map_to_structure_factors
self.run_phenix_map_to_structure_factors()
self.remove_and_rename_column_labels()
# check if output files exist
if not os.path.isfile('{0!s}.mtz'.format(self.event)):
self.Logfile.insert('cannot find {0!s}.mtz'.format(self.event))
else:
self.Logfile.insert('conversion successful, {0!s}.mtz exists'.format(self.event))
# update datasource with event_map_mtz information
self.update_database()
def calculate_electron_density_map(self,mtzin):
missing_columns=False
column_dict=XChemUtils.mtztools(mtzin).get_all_columns_as_dict()
if 'FWT' in column_dict['F'] and 'PHWT' in column_dict['PHS']:
labin=' labin F1=FWT PHI=PHWT\n'
elif '2FOFCWT' in column_dict['F'] and 'PH2FOFCWT' in column_dict['PHS']:
labin=' labin F1=2FOFCWT PHI=PH2FOFCWT\n'
else:
missing_columns=True
if not missing_columns:
os.chdir(os.path.join(self.project_directory,self.xtalID))
cmd = (
'fft hklin '+mtzin+' mapout 2fofc.map << EOF\n'
+labin+
'EOF\n'
)
self.Logfile.insert('calculating 2fofc map from '+mtzin)
os.system(cmd)
else:
self.Logfile.insert('cannot calculate 2fofc.map; missing map coefficients')
def prepare_conversion_script(self):
os.chdir(os.path.join(self.project_directory, self.xtalID))
# see also:
# http://www.phaser.cimr.cam.ac.uk/index.php/Using_Electron_Density_as_a_Model
if os.getcwd().startswith('/dls'):
phenix_module='module_load_phenix\n'
else:
phenix_module=''
cmd = (
'#!'+os.getenv('SHELL')+'\n'
'\n'
+phenix_module+
'\n'
'pdbset XYZIN %s XYZOUT mask_ligand.pdb << eof\n' %self.ligand_pdb+
' SPACEGROUP {0!s}\n'.format(self.space_group)+
' CELL {0!s}\n'.format((' '.join(self.unit_cell)))+
' END\n'
'eof\n'
'\n'
'ncsmask XYZIN mask_ligand.pdb MSKOUT mask_ligand.msk << eof\n'
' GRID %s\n' %(' '.join(self.gridElectronDensityMap))+
' RADIUS 10\n'
' PEAK 1\n'
'eof\n'
'\n'
'mapmask MAPIN %s MAPOUT onecell_event_map.map << eof\n' %self.event_map+
' XYZLIM CELL\n'
'eof\n'
'\n'
'maprot MAPIN onecell_event_map.map MSKIN mask_ligand.msk WRKOUT masked_event_map.map << eof\n'
' MODE FROM\n'
' SYMMETRY WORK %s\n' %self.space_group_numberElectronDensityMap+
' AVERAGE\n'
' ROTATE EULER 0 0 0\n'
' TRANSLATE 0 0 0\n'
'eof\n'
'\n'
'mapmask MAPIN masked_event_map.map MAPOUT masked_event_map_fullcell.map << eof\n'
' XYZLIM CELL\n'
' PAD 0.0\n'
'eof\n'
'\n'
'sfall HKLOUT %s.mtz MAPIN masked_event_map_fullcell.map << eof\n' %self.event+
' LABOUT FC=FC_event PHIC=PHIC_event\n'
' MODE SFCALC MAPIN\n'
' RESOLUTION %s\n' %self.resolution+
' END\n'
)
self.Logfile.insert('preparing script for conversion of Event map to SF')
f = open('eventMap2sf.sh','w')
f.write(cmd)
f.close()
os.system('chmod +x eventMap2sf.sh')
def run_conversion_script(self):
self.Logfile.insert('running conversion script...')
os.system('./eventMap2sf.sh')
def convert_map_to_p1(self):
self.Logfile.insert('running mapmask -> converting map to p1...')
cmd = ( '#!'+os.getenv('SHELL')+'\n'
'\n'
'mapmask mapin %s mapout %s_p1.map << eof\n' %(self.event_map,self.event) +
'xyzlin cell\n'
'symmetry p1\n' )
self.Logfile.insert('mapmask command:\n%s' %cmd)
os.system(cmd)
def run_phenix_map_to_structure_factors(self):
if float(self.resolution) < 1.21: # program complains if resolution is 1.2 or higher
self.resolution='1.21'
self.Logfile.insert('running phenix.map_to_structure_factors {0!s}_p1.map d_min={1!s} output_file_name={2!s}_tmp.mtz'.format(self.event, self.resolution, self.event))
os.system('phenix.map_to_structure_factors {0!s}_p1.map d_min={1!s} output_file_name={2!s}_tmp.mtz'.format(self.event, self.resolution, self.event))
def run_cinvfft(self,mtzin):
# mtzin is usually refine.mtz
self.Logfile.insert('running cinvfft -mapin {0!s} -mtzin {1!s} -mtzout {2!s}_tmp.mtz -colout event'.format(self.event_map, mtzin, self.event))
os.system('cinvfft -mapin {0!s} -mtzin {1!s} -mtzout {2!s}_tmp.mtz -colout event'.format(self.event_map, mtzin, self.event))
def remove_and_rename_column_labels(self):
cmd = ( '#!'+os.getenv('SHELL')+'\n'
'\n'
'cad hklin1 %s_tmp.mtz hklout %s.mtz << eof\n' %(self.event,self.event)+
' labin file_number 1 E1=F-obs E2=PHIF\n'
' labout file_number 1 E1=F_ampl E2=PHIF\n'
'eof\n'
'\n' )
self.Logfile.insert('running CAD: new column labels F_ampl,PHIF')
os.system(cmd)
def remove_and_rename_column_labels_after_cinvfft(self):
cmd = ( '#!'+os.getenv('SHELL')+'\n'
'\n'
'cad hklin1 %s_tmp.mtz hklout %s.mtz << eof\n' %(self.event,self.event)+
' labin file_number 1 E1=event.F_phi.F E2=event.F_phi.phi\n'
' labout file_number 1 E1=F_ampl E2=PHIF\n'
'eof\n'
'\n' )
self.Logfile.insert('running CAD: renaming event.F_phi.F -> F_ampl and event.F_phi.phi -> PHIF')
os.system(cmd)
def update_database(self):
sqlite = ( "update panddaTable set "
" PANDDA_site_event_map_mtz = '%s' " %os.path.join(self.project_directory,self.xtalID,self.event+'.mtz')+
" where PANDDA_site_event_map is '{0!s}' ".format(self.event_map)
)
self.db.execute_statement(sqlite)
self.Logfile.insert('updating data source: '+sqlite)
def clean_output_directory(self):
os.system('/bin/rm mask_targetcell.pdb')
os.system('/bin/rm mask_targetcell.msk')
os.system('/bin/rm onecell.map')
os.system('/bin/rm masked_targetcell.map')
os.system('/bin/rm masked_fullcell.map')
os.system('/bin/rm eventMap2sf.sh')
os.system('/bin/rm '+self.ligand_pdb)
class run_pandda_inspect_at_home(QtCore.QThread):
def __init__(self,panddaDir,xce_logfile):
QtCore.QThread.__init__(self)
self.panddaDir=panddaDir
self.Logfile=XChemLog.updateLog(xce_logfile)
def run(self):
os.chdir(os.path.join(self.panddaDir,'processed_datasets'))
progress_step=1
if len(glob.glob('*')) != 0:
progress_step=100/float(len(glob.glob('*')))
else:
progress_step=1
progress=0
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
self.Logfile.insert('parsing '+self.panddaDir)
for xtal in sorted(glob.glob('*')):
for files in glob.glob(xtal+'/ligand_files/*'):
if os.path.islink(files):
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'replacing symlink for {0!s} with real file'.format(files))
self.Logfile.insert('replacing symlink for {0!s} with real file'.format(files))
os.system('cp --remove-destination {0!s} {1!s}/ligand_files'.format(os.path.realpath(files), xtal))
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
XChemToolTips.run_pandda_inspect_at_home(self.panddaDir)
class convert_apo_structures_to_mmcif(QtCore.QThread):
def __init__(self,panddaDir,xce_logfile):
QtCore.QThread.__init__(self)
self.panddaDir=panddaDir
self.Logfile=XChemLog.updateLog(xce_logfile)
def sf_convert_environment(self):
pdb_extract_init = ''
if os.path.isdir('/dls'):
pdb_extract_init = 'source /dls/science/groups/i04-1/software/pdb-extract-prod/setup.sh\n'
pdb_extract_init += '/dls/science/groups/i04-1/software/pdb-extract-prod/bin/sf_convert'
else:
pdb_extract_init = 'source ' + os.path.join(os.getenv('XChemExplorer_DIR'),
'pdb_extract/pdb-extract-prod/setup.sh') + '\n'
pdb_extract_init += +os.path.join(os.getenv('XChemExplorer_DIR'),
'pdb_extract/pdb-extract-prod/bin/sf_convert')
return pdb_extract_init
def run(self):
self.Logfile.insert('converting apo structures in pandda directory to mmcif files')
self.Logfile.insert('chanfing to '+self.panddaDir)
progress_step=1
if len(glob.glob('*')) != 0:
progress_step=100/float(len(glob.glob(os.path.join(self.panddaDir,'processed_datasets','*'))))
else:
progress_step=1
progress=0
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
pdb_extract_init = self.sf_convert_environment()
self.Logfile.insert('parsing '+self.panddaDir)
for dirs in glob.glob(os.path.join(self.panddaDir,'processed_datasets','*')):
xtal = dirs[dirs.rfind('/')+1:]
self.Logfile.insert('%s: converting %s to mmcif' %(xtal,xtal+'-pandda-input.mtz'))
if os.path.isfile(os.path.join(dirs,xtal+'-pandda-input.mtz')):
if os.path.isfile(os.path.join(dirs,xtal+'_sf.mmcif')):
self.Logfile.insert('%s: %s_sf.mmcif exists; skipping...' %(xtal,xtal))
else:
os.chdir(dirs)
Cmd = (pdb_extract_init +
' -o mmcif'
' -sf %s' % xtal+'-pandda-input.mtz' +
' -out {0!s}_sf.mmcif > {1!s}.sf_mmcif.log'.format(xtal, xtal))
self.Logfile.insert('running command: '+Cmd)
os.system(Cmd)
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
class check_number_of_modelled_ligands(QtCore.QThread):
def __init__(self,project_directory,xce_logfile,db_file):
QtCore.QThread.__init__(self)
self.Logfile=XChemLog.updateLog(xce_logfile)
self.project_directory=project_directory
self.db=XChemDB.data_source(db_file)
self.errorDict={}
def update_errorDict(self,xtal,message):
if xtal not in self.errorDict:
self.errorDict[xtal]=[]
self.errorDict[xtal].append(message)
def insert_new_row_in_panddaTable(self,xtal,ligand,site,dbDict):
resname= site[0]
chain= site[1]
seqnum= site[2]
altLoc= site[3]
x_site= site[5][0]
y_site= site[5][1]
z_site= site[5][2]
resnameSimilarSite= ligand[0]
chainSimilarSite= ligand[1]
seqnumSimilarSite= ligand[2]
siteList=[]
for entry in dbDict[xtal]:
siteList.append(str(entry[0]))
if entry[4] == resnameSimilarSite and entry[5] == chainSimilarSite and entry[6] == seqnumSimilarSite:
eventMap= str(entry[7])
eventMap_mtz= str(entry[8])
initialPDB= str(entry[9])
initialMTZ= str(entry[10])
event_id= str(entry[12])
PanDDApath= str(entry[13])
db_dict={
'PANDDA_site_index': str(int(max(siteList))+1),
'PANDDApath': PanDDApath,
'PANDDA_site_ligand_id': resname+'-'+chain+'-'+seqnum,
'PANDDA_site_ligand_resname': resname,
'PANDDA_site_ligand_chain': chain,
'PANDDA_site_ligand_sequence_number': seqnum,
'PANDDA_site_ligand_altLoc': 'D',
'PANDDA_site_event_index': event_id,
'PANDDA_site_event_map': eventMap,
'PANDDA_site_event_map_mtz': eventMap_mtz,
'PANDDA_site_initial_model': initialPDB,
'PANDDA_site_initial_mtz': initialMTZ,
'PANDDA_site_ligand_placed': 'True',
'PANDDA_site_x': x_site,
'PANDDA_site_y': y_site,
'PANDDA_site_z': z_site }
print xtal,db_dict
def run(self):
self.Logfile.insert('reading modelled ligands from panddaTable')
dbDict={}
sqlite = ( "select "
" CrystalName,"
" PANDDA_site_index,"
" PANDDA_site_x,"
" PANDDA_site_y,"
" PANDDA_site_z,"
" PANDDA_site_ligand_resname,"
" PANDDA_site_ligand_chain,"
" PANDDA_site_ligand_sequence_number,"
" PANDDA_site_event_map,"
" PANDDA_site_event_map_mtz,"
" PANDDA_site_initial_model,"
" PANDDA_site_initial_mtz,"
" RefinementOutcome,"
" PANDDA_site_event_index,"
" PANDDApath "
"from panddaTable " )
dbEntries=self.db.execute_statement(sqlite)
for item in dbEntries:
xtal= str(item[0])
site= str(item[1])
x= str(item[2])
y= str(item[3])
z= str(item[4])
resname= str(item[5])
chain= str(item[6])
seqnum= str(item[7])
eventMap= str(item[8])
eventMap_mtz= str(item[9])
initialPDB= str(item[10])
initialMTZ= str(item[11])
outcome= str(item[12])
event= str(item[13])
PanDDApath= str(item[14])
if xtal not in dbDict:
dbDict[xtal]=[]
dbDict[xtal].append([site,x,y,z,resname,chain,seqnum,eventMap,eventMap_mtz,initialPDB,initialMTZ,outcome,event,PanDDApath])
os.chdir(self.project_directory)
progress_step=1
if len(glob.glob('*')) != 0:
progress_step=100/float(len(glob.glob('*')))
else:
progress_step=1
progress=0
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
for xtal in sorted(glob.glob('*')):
if os.path.isfile(os.path.join(xtal,'refine.pdb')):
ligands=XChemUtils.pdbtools(os.path.join(xtal,'refine.pdb')).ligand_details_as_list()
self.Logfile.insert('{0!s}: found file refine.pdb'.format(xtal))
if ligands:
if os.path.isdir(os.path.join(xtal,'xceTmp')):
os.system('/bin/rm -fr {0!s}'.format(os.path.join(xtal,'xceTmp')))
os.mkdir(os.path.join(xtal,'xceTmp'))
else:
self.Logfile.warning('{0!s}: cannot find ligand molecule in refine.pdb; skipping...'.format(xtal))
continue
made_sym_copies=False
ligands_not_in_panddaTable=[]
for n,item in enumerate(ligands):
resnameLIG= item[0]
chainLIG= item[1]
seqnumLIG= item[2]
altLocLIG= item[3]
occupancyLig= item[4]
if altLocLIG.replace(' ','') == '':
self.Logfile.insert(xtal+': found a ligand not modelled with pandda.inspect -> {0!s} {1!s} {2!s}'.format(resnameLIG, chainLIG, seqnumLIG))
residue_xyz = XChemUtils.pdbtools(os.path.join(xtal,'refine.pdb')).get_center_of_gravity_of_residue_ish(item[1],item[2])
ligands[n].append(residue_xyz)
foundLigand=False
if xtal in dbDict:
for entry in dbDict[xtal]:
resnameTable=entry[4]
chainTable=entry[5]
seqnumTable=entry[6]
self.Logfile.insert('panddaTable: {0!s} {1!s} {2!s} {3!s}'.format(xtal, resnameTable, chainTable, seqnumTable))
if resnameLIG == resnameTable and chainLIG == chainTable and seqnumLIG == seqnumTable:
self.Logfile.insert('{0!s}: found ligand in database -> {1!s} {2!s} {3!s}'.format(xtal, resnameTable, chainTable, seqnumTable))
foundLigand=True
if not foundLigand:
self.Logfile.error('{0!s}: did NOT find ligand in database -> {1!s} {2!s} {3!s}'.format(xtal, resnameLIG, chainLIG, seqnumLIG))
ligands_not_in_panddaTable.append([resnameLIG,chainLIG,seqnumLIG,altLocLIG,occupancyLig,residue_xyz])
else:
self.Logfile.warning('ligand in PDB file, but dataset not listed in panddaTable: {0!s} -> {1!s} {2!s} {3!s}'.format(xtal, item[0], item[1], item[2]))
for entry in ligands_not_in_panddaTable:
self.Logfile.error('{0!s}: refine.pdb contains a ligand that is not assigned in the panddaTable: {1!s} {2!s} {3!s} {4!s}'.format(xtal, entry[0], entry[1], entry[2], entry[3]))
for site in ligands_not_in_panddaTable:
for files in glob.glob(os.path.join(self.project_directory,xtal,'xceTmp','ligand_*_*.pdb')):
mol_xyz = XChemUtils.pdbtools(files).get_center_of_gravity_of_molecule_ish()
# now need to check if there is a unassigned entry in panddaTable that is close
for entry in dbDict[xtal]:
distance = XChemUtils.misc().calculate_distance_between_coordinates(mol_xyz[0], mol_xyz[1],mol_xyz[2],entry[1],entry[2], entry[3])
self.Logfile.insert('{0!s}: {1!s} {2!s} {3!s} <---> {4!s} {5!s} {6!s}'.format(xtal, mol_xyz[0], mol_xyz[1], mol_xyz[2], entry[1], entry[2], entry[3]))
self.Logfile.insert('{0!s}: symm equivalent molecule: {1!s}'.format(xtal, files))
self.Logfile.insert('{0!s}: distance: {1!s}'.format(xtal, str(distance)))
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
if self.errorDict != {}:
self.update_errorDict('General','The aforementioned PDB files were automatically changed by XCE!\nPlease check and refine them!!!')
self.emit(QtCore.SIGNAL('show_error_dict'), self.errorDict)
class find_event_map_for_ligand(QtCore.QThread):
def __init__(self,project_directory,xce_logfile,external_software):
QtCore.QThread.__init__(self)
self.Logfile=XChemLog.updateLog(xce_logfile)
self.project_directory=project_directory
self.external_software=external_software
try:
import gemmi
self.Logfile.insert('found gemmi library in ccp4-python')
except ImportError:
self.external_software['gemmi'] = False
self.Logfile.warning('cannot import gemmi; will use phenix.map_to_structure_factors instead')
def run(self):
self.Logfile.insert('======== checking ligand CC in event maps ========')
for dirs in sorted(glob.glob(os.path.join(self.project_directory,'*'))):
xtal = dirs[dirs.rfind('/')+1:]
if os.path.isfile(os.path.join(dirs,'refine.pdb')) and \
os.path.isfile(os.path.join(dirs,'refine.mtz')):
self.Logfile.insert('%s: found refine.pdb' %xtal)
os.chdir(dirs)
try:
p = gemmi.read_structure('refine.pdb')
except:
self.Logfile.error('gemmi library not available')
self.external_software['gemmi'] = False
reso = XChemUtils.mtztools('refine.mtz').get_dmin()
ligList = XChemUtils.pdbtools('refine.pdb').save_residues_with_resname(dirs,'LIG')
self.Logfile.insert('%s: found %s ligands of type LIG in refine.pdb' %(xtal,str(len(ligList))))
for maps in glob.glob(os.path.join(dirs,'*event*.native.ccp4')):
if self.external_software['gemmi']:
self.convert_map_to_sf_with_gemmi(maps,p)
else:
self.expand_map_to_p1(maps)
self.convert_map_to_sf(maps.replace('.ccp4','.P1.ccp4'),reso)
summary = ''
for lig in sorted(ligList):
if self.external_software['gemmi']:
for mtz in sorted(glob.glob(os.path.join(dirs,'*event*.native.mtz'))):
self.get_lig_cc(mtz,lig)
cc = self.check_lig_cc(mtz.replace('.mtz', '_CC.log'))
summary += '%s: %s LIG CC = %s (%s)\n' %(xtal,lig,cc,mtz[mtz.rfind('/')+1:])
else:
for mtz in sorted(glob.glob(os.path.join(dirs,'*event*.native*P1.mtz'))):
self.get_lig_cc(mtz,lig)
cc = self.check_lig_cc(mtz.replace('.mtz', '_CC.log'))
summary += '%s: %s LIG CC = %s (%s)\n' %(xtal,lig,cc,mtz[mtz.rfind('/')+1:])
self.Logfile.insert('\nsummary of CC analysis:\n======================:\n'+summary)
def expand_map_to_p1(self,emap):
self.Logfile.insert('expanding map to P1: %s' %emap)
if os.path.isfile(emap.replace('.ccp4','.P1.ccp4')):
self.Logfile.warning('P1 map exists; skipping...')
return
cmd = ( 'mapmask MAPIN %s MAPOUT %s << eof\n' %(emap,emap.replace('.ccp4','.P1.ccp4'))+
' XYZLIM CELL\n'
' PAD 0.0\n'
' SYMMETRY 1\n'
'eof\n' )
os.system(cmd)
def convert_map_to_sf(self,emap,reso):
self.Logfile.insert('converting ccp4 map to mtz with phenix.map_to_structure_factors: %s' %emap)
if os.path.isfile(emap.replace('.ccp4','.mtz')):
self.Logfile.warning('mtz file of event map exists; skipping...')
return
cmd = ( 'module load phenix\n'
'phenix.map_to_structure_factors %s d_min=%s\n' %(emap,reso)+
'/bin/mv map_to_structure_factors.mtz %s' %emap.replace('.ccp4', '.mtz') )
os.system(cmd)
def get_lig_cc(self,mtz,lig):
self.Logfile.insert('calculating CC for %s in %s' %(lig,mtz))
if os.path.isfile(mtz.replace('.mtz', '_CC.log')):
self.Logfile.warning('logfile of CC analysis exists; skipping...')
return
cmd = ( 'module load phenix\n'
'phenix.get_cc_mtz_pdb %s %s > %s' % (mtz, lig, mtz.replace('.mtz', '_CC.log')) )
os.system(cmd)
def check_lig_cc(self,log):
cc = 'n/a'
if os.path.isfile(log):
for line in open(log):
if line.startswith('local'):
cc = line.split()[len(line.split()) - 1]
else:
self.Logfile.error('logfile does not exist: %s' %log)
return cc
def convert_map_to_sf_with_gemmi(self,emap,p):
self.Logfile.insert('converting ccp4 map to mtz with gemmi map2sf: %s' %emap)
if os.path.isfile(emap.replace('.ccp4','.mtz')):
self.Logfile.warning('mtz file of event map exists; skipping...')
return
cmd = 'gemmi map2sf %s %s FWT PHWT --dmin=%s' %(emap,emap.replace('.ccp4','.mtz'),p.resolution)
self.Logfile.insert('converting map with command:\n' + cmd)
os.system(cmd) | StarcoderdataPython |
3391400 | from typing import Any, Final, TypedDict
import numpy as np
import numpy.typing as npt
HelloWorldType: Final[Any] = TypedDict("HelloWorldType", {"Hello": str})
IntegerArrayType: Final[Any] = npt.NDArray[np.int_]
| StarcoderdataPython |
1680855 | from graphRL.envs.graphRL import graphRL
| StarcoderdataPython |
95602 | # Autogenerated config.py
#
# NOTE: config.py is intended for advanced users who are comfortable
# with manually migrating the config file on qutebrowser upgrades. If
# you prefer, you can also configure qutebrowser using the
# :set/:bind/:config-* commands without having to write a config.py
# file.
#
# Documentation:
# qute://help/configuring.html
# qute://help/settings.html
# Change the argument to True to still load settings configured via autoconfig.yml
config.load_autoconfig(False)
# Always restore open sites when qutebrowser is reopened. Without this
# option set, `:wq` (`:quit --save`) needs to be used to save open tabs
# (and restore them), while quitting qutebrowser in any other way will
# not save/restore the session. By default, this will save to the
# session which was last loaded. This behavior can be customized via the
# `session.default_name` setting.
# Type: Bool
c.auto_save.session = True
# Which cookies to accept. With QtWebEngine, this setting also controls
# other features with tracking capabilities similar to those of cookies;
# including IndexedDB, DOM storage, filesystem API, service workers, and
# AppCache. Note that with QtWebKit, only `all` and `never` are
# supported as per-domain values. Setting `no-3rdparty` or `no-
# unknown-3rdparty` per-domain on QtWebKit will have the same effect as
# `all`. If this setting is used with URL patterns, the pattern gets
# applied to the origin/first party URL of the page making the request,
# not the request URL. With QtWebEngine 5.15.0+, paths will be stripped
# from URLs, so URL patterns using paths will not match. With
# QtWebEngine 5.15.2+, subdomains are additionally stripped as well, so
# you will typically need to set this setting for `example.com` when the
# cookie is set on `somesubdomain.example.com` for it to work properly.
# To debug issues with this setting, start qutebrowser with `--debug
# --logfilter network --debug-flag log-cookies` which will show all
# cookies being set.
# Type: String
# Valid values:
# - all: Accept all cookies.
# - no-3rdparty: Accept cookies from the same origin only. This is known to break some sites, such as GMail.
# - no-unknown-3rdparty: Accept cookies from the same origin only, unless a cookie is already set for the domain. On QtWebEngine, this is the same as no-3rdparty.
# - never: Don't accept cookies at all.
config.set('content.cookies.accept', 'all', 'chrome-devtools://*')
# Which cookies to accept. With QtWebEngine, this setting also controls
# other features with tracking capabilities similar to those of cookies;
# including IndexedDB, DOM storage, filesystem API, service workers, and
# AppCache. Note that with QtWebKit, only `all` and `never` are
# supported as per-domain values. Setting `no-3rdparty` or `no-
# unknown-3rdparty` per-domain on QtWebKit will have the same effect as
# `all`. If this setting is used with URL patterns, the pattern gets
# applied to the origin/first party URL of the page making the request,
# not the request URL. With QtWebEngine 5.15.0+, paths will be stripped
# from URLs, so URL patterns using paths will not match. With
# QtWebEngine 5.15.2+, subdomains are additionally stripped as well, so
# you will typically need to set this setting for `example.com` when the
# cookie is set on `somesubdomain.example.com` for it to work properly.
# To debug issues with this setting, start qutebrowser with `--debug
# --logfilter network --debug-flag log-cookies` which will show all
# cookies being set.
# Type: String
# Valid values:
# - all: Accept all cookies.
# - no-3rdparty: Accept cookies from the same origin only. This is known to break some sites, such as GMail.
# - no-unknown-3rdparty: Accept cookies from the same origin only, unless a cookie is already set for the domain. On QtWebEngine, this is the same as no-3rdparty.
# - never: Don't accept cookies at all.
config.set('content.cookies.accept', 'all', 'devtools://*')
# Allow websites to request geolocations.
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
config.set('content.geolocation', False, 'https://www.google.com.ar')
# Allow websites to request geolocations.
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
# Value to send in the `Accept-Language` header. Note that the value
# read from JavaScript is always the global value.
# Type: String
config.set('content.headers.accept_language', '', 'https://matchmaker.krunker.io/*')
# User agent to send. The following placeholders are defined: *
# `{os_info}`: Something like "X11; Linux x86_64". * `{webkit_version}`:
# The underlying WebKit version (set to a fixed value with
# QtWebEngine). * `{qt_key}`: "Qt" for QtWebKit, "QtWebEngine" for
# QtWebEngine. * `{qt_version}`: The underlying Qt version. *
# `{upstream_browser_key}`: "Version" for QtWebKit, "Chrome" for
# QtWebEngine. * `{upstream_browser_version}`: The corresponding
# Safari/Chrome version. * `{qutebrowser_version}`: The currently
# running qutebrowser version. The default value is equal to the
# unchanged user agent of QtWebKit/QtWebEngine. Note that the value
# read from JavaScript is always the global value. With QtWebEngine
# between 5.12 and 5.14 (inclusive), changing the value exposed to
# JavaScript requires a restart.
# Type: FormatString
config.set('content.headers.user_agent', 'Mozilla/5.0 ({os_info}) AppleWebKit/{webkit_version} (KHTML, like Gecko) {upstream_browser_key}/{upstream_browser_version} Safari/{webkit_version}', 'https://web.whatsapp.com/')
# User agent to send. The following placeholders are defined: *
# `{os_info}`: Something like "X11; Linux x86_64". * `{webkit_version}`:
# The underlying WebKit version (set to a fixed value with
# QtWebEngine). * `{qt_key}`: "Qt" for QtWebKit, "QtWebEngine" for
# QtWebEngine. * `{qt_version}`: The underlying Qt version. *
# `{upstream_browser_key}`: "Version" for QtWebKit, "Chrome" for
# QtWebEngine. * `{upstream_browser_version}`: The corresponding
# Safari/Chrome version. * `{qutebrowser_version}`: The currently
# running qutebrowser version. The default value is equal to the
# unchanged user agent of QtWebKit/QtWebEngine. Note that the value
# read from JavaScript is always the global value. With QtWebEngine
# between 5.12 and 5.14 (inclusive), changing the value exposed to
# JavaScript requires a restart.
# Type: FormatString
config.set('content.headers.user_agent', 'Mozilla/5.0 ({os_info}) AppleWebKit/{webkit_version} (KHTML, like Gecko) {upstream_browser_key}/{upstream_browser_version} Safari/{webkit_version} Edg/{upstream_browser_version}', 'https://accounts.google.com/*')
# User agent to send. The following placeholders are defined: *
# `{os_info}`: Something like "X11; Linux x86_64". * `{webkit_version}`:
# The underlying WebKit version (set to a fixed value with
# QtWebEngine). * `{qt_key}`: "Qt" for QtWebKit, "QtWebEngine" for
# QtWebEngine. * `{qt_version}`: The underlying Qt version. *
# `{upstream_browser_key}`: "Version" for QtWebKit, "Chrome" for
# QtWebEngine. * `{upstream_browser_version}`: The corresponding
# Safari/Chrome version. * `{qutebrowser_version}`: The currently
# running qutebrowser version. The default value is equal to the
# unchanged user agent of QtWebKit/QtWebEngine. Note that the value
# read from JavaScript is always the global value. With QtWebEngine
# between 5.12 and 5.14 (inclusive), changing the value exposed to
# JavaScript requires a restart.
# Type: FormatString
config.set('content.headers.user_agent', 'Mozilla/5.0 ({os_info}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99 Safari/537.36', 'https://*.slack.com/*')
# Load images automatically in web pages.
# Type: Bool
config.set('content.images', True, 'chrome-devtools://*')
# Load images automatically in web pages.
# Type: Bool
config.set('content.images', True, 'devtools://*')
# Enable JavaScript.
# Type: Bool
config.set('content.javascript.enabled', True, 'chrome-devtools://*')
# Enable JavaScript.
# Type: Bool
config.set('content.javascript.enabled', True, 'devtools://*')
# Enable JavaScript.
# Type: Bool
config.set('content.javascript.enabled', True, 'chrome://*/*')
# Enable JavaScript.
# Type: Bool
config.set('content.javascript.enabled', True, 'qute://*/*')
# Allow websites to record audio.
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
config.set('content.media.audio_capture', True, 'https://discord.com')
# Allow websites to record audio and video.
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
config.set('content.media.audio_video_capture', True, 'https://hangouts.google.com')
# Allow websites to show notifications.
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
config.set('content.notifications.enabled', False, 'https://www.facebook.com')
# Allow websites to show notifications.
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
config.set('content.notifications.enabled', False, 'https://www.netflix.com')
# Allow websites to show notifications.
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
config.set('content.notifications.enabled', False, 'https://www.nokia.com')
# Allow websites to show notifications.
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
config.set('content.notifications.enabled', False, 'https://www.reddit.com')
# Allow websites to show notifications.
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
config.set('content.notifications.enabled', False, 'https://www.samsung.com')
# Allow websites to show notifications.
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
config.set('content.notifications.enabled', False, 'https://www.youtube.com')
# Allow websites to register protocol handlers via
# `navigator.registerProtocolHandler`.
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
config.set('content.register_protocol_handler', False, 'https://mail.google.com?extsrc=mailto&url=%25s')
# Duration (in milliseconds) to wait before removing finished downloads.
# If set to -1, downloads are never removed.
# Type: Int
c.downloads.remove_finished = 4000
# When to show the statusbar.
# Type: String
# Valid values:
# - always: Always show the statusbar.
# - never: Always hide the statusbar.
# - in-mode: Show the statusbar when in modes other than normal mode.
c.statusbar.show = 'in-mode'
# How to behave when the last tab is closed. If the
# `tabs.tabs_are_windows` setting is set, this is ignored and the
# behavior is always identical to the `close` value.
# Type: String
# Valid values:
# - ignore: Don't do anything.
# - blank: Load a blank page.
# - startpage: Load the start page.
# - default-page: Load the default page.
# - close: Close the window.
c.tabs.last_close = 'startpage'
# When to show the tab bar.
# Type: String
# Valid values:
# - always: Always show the tab bar.
# - never: Always hide the tab bar.
# - multiple: Hide the tab bar if only one tab is open.
# - switching: Show the tab bar when switching tabs.
c.tabs.show = 'never'
# Open a new window for every tab.
# Type: Bool
c.tabs.tabs_are_windows = True
# Search engines which can be used via the address bar. Maps a search
# engine name (such as `DEFAULT`, or `ddg`) to a URL with a `{}`
# placeholder. The placeholder will be replaced by the search term, use
# `{{` and `}}` for literal `{`/`}` braces. The following further
# placeholds are defined to configure how special characters in the
# search terms are replaced by safe characters (called 'quoting'): *
# `{}` and `{semiquoted}` quote everything except slashes; this is the
# most sensible choice for almost all search engines (for the search
# term `slash/and&` this placeholder expands to `slash/and%26amp`).
# * `{quoted}` quotes all characters (for `slash/and&` this
# placeholder expands to `slash%2Fand%26amp`). * `{unquoted}` quotes
# nothing (for `slash/and&` this placeholder expands to
# `slash/and&`). * `{0}` means the same as `{}`, but can be used
# multiple times. The search engine named `DEFAULT` is used when
# `url.auto_search` is turned on and something else than a URL was
# entered to be opened. Other search engines can be used by prepending
# the search engine name to the search term, e.g. `:open google
# qutebrowser`.
# Type: Dict
c.url.searchengines = {'DEFAULT': 'https://www.google.com/search?q={}', 'am': 'https://www.amazon.co.in/s?k={}', 'aw': 'https://wiki.archlinux.org/?search={}', 'g': 'https://www.google.com/search?q={}', 're': 'https://www.reddit.com/r/{}', 'wiki': 'https://en.wikipedia.org/wiki/{}', 'yt': 'https://www.youtube.com/results?search_query={}'}
# Page(s) to open at the start.
# Type: List of FuzzyUrl, or FuzzyUrl
c.url.start_pages = '/home/nml/.config/qutebrowser/startpage/index.html'
# Default font families to use. Whenever "default_family" is used in a
# font setting, it's replaced with the fonts listed here. If set to an
# empty value, a system-specific monospace default is used.
# Type: List of Font, or Font
c.fonts.default_family = 'Inter'
# Default font size to use. Whenever "default_size" is used in a font
# setting, it's replaced with the size listed here. Valid values are
# either a float value with a "pt" suffix, or an integer value with a
# "px" suffix.
# Type: String
c.fonts.default_size = '16px'
# Font used in the completion widget.
# Type: Font
c.fonts.completion.entry = '12pt "Inter"'
# Font used in the completion categories.
# Type: Font
c.fonts.completion.category = '12pt "Inter"'
# Font used for the context menu. If set to null, the Qt default is
# used.
# Type: Font
c.fonts.contextmenu = '12pt "Inter"'
# Font used for the debugging console.
# Type: Font
c.fonts.debug_console = '12pt "Inter"'
# Font used for the downloadbar.
# Type: Font
c.fonts.downloads = '12pt "Inter"'
# Font used for the hints.
# Type: Font
c.fonts.hints = '12pt "Inter"'
# Font used in the keyhint widget.
# Type: Font
c.fonts.keyhint = '12pt "Inter"'
# Font used for info messages.
# Type: Font
c.fonts.messages.info = '12pt "Inter"'
# Font used for prompts.
# Type: Font
c.fonts.prompts = '12pt "Inter"'
# Font used in the statusbar.
# Type: Font
c.fonts.statusbar = '12pt "Inter"'
# Font family for standard fonts.
# Type: FontFamily
c.fonts.web.family.standard = 'Inter'
# Font family for sans-serif fonts.
# Type: FontFamily
c.fonts.web.family.sans_serif = 'Inter'
config.source('gruvbox.py')
# Bindings for normal mode
config.bind(',M', 'hint links spawn mpv {hint-url}')
config.bind(',m', 'spawn mpv {url}')
| StarcoderdataPython |
51947 | # -*- coding: utf-8 -*-
"""Hello module."""
import platform
import sys
def get_hello():
system = platform.system()
py_version = sys.version_info.major
if system == "Windows":
if py_version < 3:
return "Hello Windows, I'm Python2 or earlier!"
else:
return "Hello Windows, I'm Python3 or later!"
elif system == "Darwin":
if py_version < 3:
return "Hello Mac OSX, I'm Python2 or earlier!"
else:
return "Hello Mac OSX, I'm Python3 or later!"
else:
if py_version < 3:
return "Hello {}, I'm Python2 or earlier!".format(system)
else:
return "Hello {}, I'm Python3 or later!".format(system)
| StarcoderdataPython |
3388632 | <filename>rwb/editor/custom_notebook.py
'''Tree-based Notebook
TODO: decouple the notebook from the listbox. Let them communicate
via events, or have them work together via an interface (eg:
notebook.configure(tablist=self.tablist)
'''
import os
import Tkinter as tk
import ttk
from rwb.widgets import AutoScrollbar
from editor_page import EditorPage
from tablist import TabList
# orange and gray colors taken from
# http://www.colorcombos.com/color-schemes/218/ColorCombo218.html
class CustomNotebook(tk.Frame):
def __init__(self, parent, app=None):
tk.Frame.__init__(self, parent)
background = self.cget("background")
self.app = app
self.pages = []
self.nodelist = []
self.current_page = None
# within the frame are two panes; the left has a tree,
# the right shows the current page. We need a splitter
self.pw = tk.PanedWindow(self, orient="horizontal", background="#f58735",
borderwidth=1,relief='solid',
sashwidth=3)
self.pw.pack(side="top", fill="both", expand=True, pady = (4,1), padx=4)
self.left = tk.Frame(self.pw, background=background, borderwidth=0, highlightthickness=0)
self.right = tk.Frame(self.pw, background="white", width=600, height=600, borderwidth=0,
highlightthickness=0)
self.pw.add(self.left)
self.pw.add(self.right)
self.list = TabList(self.left)
vsb = AutoScrollbar(self.left, command=self.list.yview, orient="vertical")
hsb = AutoScrollbar(self.left, command=self.list.xview, orient="horizontal")
self.list.configure(xscrollcommand=hsb.set, yscrollcommand=vsb.set)
self.list.grid(row=0, column=1, sticky="nsew", padx=0, pady=0)
vsb.grid(row=0, column=0, sticky="ns")
hsb.grid(row=1, column=1, sticky="ew")
self.left.grid_rowconfigure(0, weight=1)
self.left.grid_columnconfigure(1, weight=1)
self.list.bind("<<ListboxSelect>>", self.on_list_selection)
# start with them invisible; they will reappear when needed
vsb.grid_remove()
hsb.grid_remove()
def get_current_page(self):
return self.current_page
def on_list_selection(self, event):
page = self.list.get()[1]
self._select_page(page)
def delete_page(self, page):
print __file__, "delete_page is presently under development..."
if page in self.pages:
self.pages.remove(page)
self.list.remove(page)
selection = self.list.get()
page.pack_forget()
page.destroy()
# if selection is not None and len(selection) > 0:
# self.select_page(selection[1])
def _page_name_changed(self, page):
self.list.rename(page)
def get_page_by_name(self, name):
for page in self.pages:
if page.name == name:
return page
return None
def get_page_for_path(self, path):
target_path = os.path.abspath(path)
for page in self.pages:
if page.path == target_path:
return page
return None
def add_custom_page(self, page_class):
new_page = page_class(self.right)
self.pages.append(new_page)
self.list.add(new_page.name, new_page)
self._select_page(new_page)
return new_page
def add_page(self, path=None, name=None):
if path is None and name is None:
raise Exception("you must specify either a path or a name")
if path is not None and name is not None:
raise Exception("you cannot specify both a path and a name")
new_page = EditorPage(self.right, path, name=name, app=self.app)
new_page.bind("<<NameChanged>>", lambda event, page=new_page: self._page_name_changed(page))
self.pages.append(new_page)
self.list.add(new_page.name, new_page)
self._select_page(new_page)
return new_page
def select_page(self, page):
self.list.select(page)
def _select_page(self, page):
for p in self.pages:
p.pack_forget()
if page is not None:
page.pack(fill="both", expand=True, padx=4, pady=0)
self.after_idle(page.focus)
self.current_page = page
return page
| StarcoderdataPython |
122171 | <reponame>gitter-badger/share-analytics
from __future__ import absolute_import, unicode_literals
import os
import dj_database_url
from .base import *
#ALLOWED_HOSTS = ['share.osf.io/dashboard']
ALLOWED_HOSTS = ['*']
DEBUG=False
if os.environ.get('DEIS'):
DATABASES = {
'default': {
'ENGINE': os.environ.get('DATABASE_ENGINE'),
'NAME': os.environ.get('DATABASE_NAME'),
'USER': os.environ.get('DATABASE_USER'),
'PASSWORD': os.environ.get('DATABASE_PASSWORD'),
'HOST': os.environ.get('DATABASE_HOST'),
'PORT': os.environ.get('DATABASE_PORT'),
}
}
else:
DATABASES['default'] = dj_database_url.config() # For Heroku
SECRET_KEY = os.environ['SECRET_KEY']
try:
from .local import *
except ImportError:
pass
| StarcoderdataPython |
189897 | """
mixcoatl.admin.billing_code
---------------------------
Implements access to the DCM Billingcode API
"""
from mixcoatl.resource import Resource
from mixcoatl.decorators.lazy import lazy_property
from mixcoatl.decorators.validations import required_attrs
from mixcoatl.utils import uncamel, camelize, camel_keys, uncamel_keys
import json
class BillingCode(Resource):
"""A billing code is a budget item with optional hard and soft quotas
against which cloud resources may be provisioned and tracked."""
PATH = 'admin/BillingCode'
COLLECTION_NAME = 'billingCodes'
PRIMARY_KEY = 'billing_code_id'
def __init__(self, billing_code_id=None, endpoint=None, *args, **kwargs):
Resource.__init__(self, endpoint=endpoint)
self.__billing_code_id = billing_code_id
@property
def billing_code_id(self):
"""`int` - The unique id of this billing code"""
return self.__billing_code_id
@lazy_property
def budget_state(self):
"""`str` - The ability of users to provision against this budget"""
return self.__budget_state
@lazy_property
def current_usage(self):
"""`dict` - The month-to-data usage across all clouds for this code"""
return self.__current_usage
@lazy_property
def customer(self):
"""`dict` - The customer to whom this code belongs"""
return self.__customer
@lazy_property
def description(self):
"""`str` - User-friendly description of this code"""
return self.__description
@description.setter
def description(self, d):
self.__description = d
@lazy_property
def finance_code(self):
"""`str` - The alphanumeric identifier of this billing code"""
return self.__finance_code
@finance_code.setter
def finance_code(self, f):
self.__finance_code = f
@lazy_property
def name(self):
"""`str` - User-friendly name for this billing code"""
return self.__name
@name.setter
def name(self, n):
self.__name = n
@lazy_property
def projected_usage(self):
"""`dict` - Estimated end-of-month total to be charged against this budget"""
return self.__projected_usage
@lazy_property
def status(self):
"""`str` - The status of this billing code"""
return self.__status
@lazy_property
def hard_quota(self):
"""`dict` - Cutoff point where no further resources can be billed to this code"""
return self.__hard_quota
@hard_quota.setter
def hard_quota(self, h):
self.__hard_quota = h
@lazy_property
def soft_quota(self):
"""`dict` - Point where budget alerts will be triggered for this billing code"""
return self.__soft_quota
@soft_quota.setter
def soft_quota(self, s):
self.__soft_quota = s
@classmethod
def all(cls, keys_only=False, endpoint=None, **kwargs):
"""Get all visible billing codes
.. note::
The keys used to make the original request determine result visibility
:param keys_only: Only return :attr:`billing_code_id` instead of :class:`BillingCode` objects
:type keys_only: bool.
:param detail: The level of detail to return - `basic` or `extended`
:type detail: str.
:returns: `list` - of :class:`BillingCode` or :attr:`billing_code_id`
:raises: :class:`BillingCodeException`
"""
r = Resource(cls.PATH, endpoint=endpoint)
params = {}
if 'details' in kwargs:
r.request_details = kwargs['details']
else:
r.request_details = 'basic'
x = r.get()
if r.last_error is None:
if keys_only is True:
return [i[camelize(cls.PRIMARY_KEY)] for i in x[cls.COLLECTION_NAME]]
else:
return [type(cls.__name__, (object,), i) for i in uncamel_keys(x)[uncamel(cls.COLLECTION_NAME)]]
else:
raise BillingCodeException(r.last_error)
@required_attrs(['soft_quota', 'hard_quota', 'name', 'finance_code', 'description'])
def add(self):
"""Add a new billing code. """
payload = {"addBillingCode": [{
"softQuota": {"value": self.soft_quota, "currency": "USD"},
"hardQuota": {"value": self.hard_quota, "currency": "USD"},
"status": "ACTIVE",
"name": self.name,
"financeCode": self.finance_code,
"description": self.description}]}
response = self.post(data=json.dumps(payload))
if self.last_error is None:
return response
else:
raise BillingCodeAddException(self.last_error)
@required_attrs(['billing_code_id'])
def destroy(self, reason, replacement_code):
"""Destroy billing code with a specified reason :attr:`reason`
:param reason: The reason of destroying the billing code.
:type reason: str.
:param replacement_code: The replacement code.
:type replacement_code: int.
:returns: bool -- Result of API call
"""
p = self.PATH + "/" + str(self.billing_code_id)
qopts = {'reason': reason, 'replacementCode': replacement_code}
self.delete(p, params=qopts)
if self.last_error is None:
return True
else:
raise BillingCodeDestroyException(self.last_error)
class BillingCodeException(BaseException):
pass
class BillingCodeAddException(BillingCodeException):
pass
class BillingCodeDestroyException(BillingCodeException):
pass
| StarcoderdataPython |
1602188 | <filename>chamber/config.py
from django.conf import settings as django_settings
DEFAULTS = {
'MAX_FILE_UPLOAD_SIZE': 20,
'MULTIDOMAINS_OVERTAKER_AUTH_COOKIE_NAME': None,
'DEFAULT_IMAGE_ALLOWED_CONTENT_TYPES': {'image/jpeg', 'image/png', 'image/gif'},
'PRIVATE_S3_STORAGE_URL_EXPIRATION': 3600,
'AWS_S3_ON': getattr(django_settings, 'AWS_S3_ON', False),
'AWS_REGION': getattr(django_settings, 'AWS_REGION', None),
}
class Settings:
def __getattr__(self, attr):
if attr not in DEFAULTS:
raise AttributeError('Invalid CHAMBER setting: "{}"'.format(attr))
default = DEFAULTS[attr]
return getattr(django_settings, 'CHAMBER_{}'.format(attr), default(self) if callable(default) else default)
settings = Settings()
| StarcoderdataPython |
3216876 | <reponame>codilime/contrail-controller-arch
#
# Copyright (c) 2013,2014 Juniper Networks, Inc. All rights reserved.
#
import gevent
import os
import sys
import socket
import errno
import uuid
import logging
import coverage
import cgitb
cgitb.enable(format='text')
import testtools
from testtools.matchers import Equals, MismatchError, Not, Contains
from testtools import content, content_type, ExpectedException
import unittest
import re
import json
import copy
import inspect
import pycassa
import kombu
import requests
import bottle
from vnc_api.vnc_api import *
import vnc_api.gen.vnc_api_test_gen
from vnc_api.gen.resource_test import *
import cfgm_common
sys.path.append('../common/tests')
from test_utils import *
import test_common
import test_case
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class TestIpAlloc(test_case.ApiServerTestCase):
def __init__(self, *args, **kwargs):
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
super(TestIpAlloc, self).__init__(*args, **kwargs)
def test_subnet_quota(self):
domain = Domain('v4-domain')
self._vnc_lib.domain_create(domain)
# Create Project
project = Project('v4-proj', domain)
self._vnc_lib.project_create(project)
project = self._vnc_lib.project_read(fq_name=['v4-domain', 'v4-proj'])
ipam1_sn_v4 = IpamSubnetType(subnet=SubnetType('172.16.58.3', 28))
ipam2_sn_v4 = IpamSubnetType(subnet=SubnetType('172.16.17.32', 28))
ipam3_sn_v4 = IpamSubnetType(subnet=SubnetType('192.168.3.11', 28))
ipam4_sn_v4 = IpamSubnetType(subnet=SubnetType('192.168.3.11', 28))
#create two ipams
ipam1 = NetworkIpam('ipam1', project, IpamType("dhcp"))
self._vnc_lib.network_ipam_create(ipam1)
ipam1 = self._vnc_lib.network_ipam_read(fq_name=['v4-domain',
'v4-proj', 'ipam1'])
ipam2 = NetworkIpam('ipam2', project, IpamType("dhcp"))
self._vnc_lib.network_ipam_create(ipam2)
ipam2 = self._vnc_lib.network_ipam_read(fq_name=['v4-domain',
'v4-proj', 'ipam2'])
#create virtual network with unlimited subnet quota without any subnets
vn = VirtualNetwork('my-vn', project)
vn.add_network_ipam(ipam1, VnSubnetsType([]))
vn.add_network_ipam(ipam2, VnSubnetsType([]))
self._vnc_lib.virtual_network_create(vn)
net_obj = self._vnc_lib.virtual_network_read(id = vn.uuid)
#inspect net_obj to make sure we have 0 cidrs
ipam_refs = net_obj.__dict__.get('network_ipam_refs', [])
def _get_total_subnets_count(ipam_refs):
subnet_count = 0
for ipam_ref in ipam_refs:
vnsn_data = ipam_ref['attr'].__dict__
ipam_subnets = vnsn_data.get('ipam_subnets', [])
for ipam_subnet in ipam_subnets:
subnet_dict = ipam_subnet.__dict__.get('subnet', {})
if 'ip_prefix' in subnet_dict.__dict__:
subnet_count += 1
return subnet_count
total_subnets = _get_total_subnets_count(ipam_refs)
if total_subnets:
raise Exception("No Subnets expected in Virtual Network")
self._vnc_lib.virtual_network_delete(id=vn.uuid)
#keep subnet quota unlimited and have 4 cidrs in two ipams
vn = VirtualNetwork('my-vn', project)
vn.add_network_ipam(ipam1, VnSubnetsType([ipam1_sn_v4, ipam3_sn_v4]))
vn.add_network_ipam(ipam2, VnSubnetsType([ipam2_sn_v4, ipam4_sn_v4]))
self._vnc_lib.virtual_network_create(vn)
net_obj = self._vnc_lib.virtual_network_read(id = vn.uuid)
#inspect net_obj to make sure we have 4 cidrs
ipam_refs = net_obj.__dict__.get('network_ipam_refs', [])
total_subnets = _get_total_subnets_count(ipam_refs)
if total_subnets != 4:
raise Exception("4 Subnets expected in Virtual Network")
#Delete vn and create new one with a subnet quota of 1
self._vnc_lib.virtual_network_delete(id=vn.uuid)
quota_type = QuotaType()
quota_type.set_subnet(1)
project.set_quota(quota_type)
self._vnc_lib.project_update(project)
vn = VirtualNetwork('my-new-vn', project)
vn.add_network_ipam(ipam1, VnSubnetsType([ipam1_sn_v4]))
vn.add_network_ipam(ipam2, VnSubnetsType([ipam2_sn_v4]))
with ExpectedException(cfgm_common.exceptions.OverQuota):
self._vnc_lib.virtual_network_create(vn)
#increase subnet quota to 2, and network_create will go through..
quota_type.set_subnet(2)
project.set_quota(quota_type)
self._vnc_lib.project_update(project)
self._vnc_lib.virtual_network_create(vn)
net_obj = self._vnc_lib.virtual_network_read(id = vn.uuid)
ipam_refs = net_obj.__dict__.get('network_ipam_refs', [])
total_subnets = _get_total_subnets_count(ipam_refs)
if total_subnets != 2:
raise Exception("2 Subnets expected in Virtual Network")
#test quota through network_update
vn.add_network_ipam(ipam1, VnSubnetsType([ipam1_sn_v4, ipam3_sn_v4]))
vn.add_network_ipam(ipam2, VnSubnetsType([ipam2_sn_v4]))
with ExpectedException(cfgm_common.exceptions.OverQuota):
self._vnc_lib.virtual_network_update(vn)
self._vnc_lib.virtual_network_delete(id=vn.uuid)
quota_type.set_subnet(4)
project.set_quota(quota_type)
self._vnc_lib.project_update(project)
vn = VirtualNetwork('my-new-vn', project)
vn.add_network_ipam(ipam1, VnSubnetsType([ipam1_sn_v4]))
vn.add_network_ipam(ipam2, VnSubnetsType([ipam2_sn_v4]))
self._vnc_lib.virtual_network_create(vn)
vn.add_network_ipam(ipam1, VnSubnetsType([ipam1_sn_v4, ipam3_sn_v4]))
vn.add_network_ipam(ipam2, VnSubnetsType([ipam2_sn_v4, ipam4_sn_v4]))
self._vnc_lib.virtual_network_update(vn)
net_obj = self._vnc_lib.virtual_network_read(id = vn.uuid)
ipam_refs = net_obj.__dict__.get('network_ipam_refs', [])
total_subnets = _get_total_subnets_count(ipam_refs)
if total_subnets != 4:
raise Exception("4 Subnets expected in Virtual Network")
self._vnc_lib.virtual_network_delete(id=vn.uuid)
self._vnc_lib.network_ipam_delete(id=ipam1.uuid)
self._vnc_lib.network_ipam_delete(id=ipam2.uuid)
self._vnc_lib.project_delete(id=project.uuid)
def test_subnet_alloc_unit(self):
# Create Domain
domain = Domain('my-v4-v6-domain')
self._vnc_lib.domain_create(domain)
logger.debug('Created domain ')
# Create Project
project = Project('my-v4-v6-proj', domain)
self._vnc_lib.project_create(project)
logger.debug('Created Project')
# Create NetworkIpam
ipam = NetworkIpam('default-network-ipam', project, IpamType("dhcp"))
self._vnc_lib.network_ipam_create(ipam)
logger.debug('Created network ipam')
ipam = self._vnc_lib.network_ipam_read(fq_name=['my-v4-v6-domain',
'my-v4-v6-proj',
'default-network-ipam'])
logger.debug('Read network ipam')
# create ipv4 subnet with alloc_unit not power of 2
ipam_sn_v4 = IpamSubnetType(subnet=SubnetType('172.16.58.3', 24),
alloc_unit=3)
vn = VirtualNetwork('my-v4-v6-vn', project)
vn.add_network_ipam(ipam, VnSubnetsType([ipam_sn_v4]))
try:
self._vnc_lib.virtual_network_create(vn)
except HttpError:
logger.debug('alloc-unit is not power of 2')
pass
vn.del_network_ipam(ipam)
# create ipv6 subnet with alloc_unit not power of 2
ipam_sn_v6 = IpamSubnetType(subnet=SubnetType('fd14::', 120),
alloc_unit=3)
vn.add_network_ipam(ipam, VnSubnetsType([ipam_sn_v6]))
try:
self._vnc_lib.virtual_network_create(vn)
except HttpError:
logger.debug('alloc-unit is not power of 2')
pass
vn.del_network_ipam(ipam)
# Create subnets
ipam_sn_v4 = IpamSubnetType(subnet=SubnetType('172.16.58.3', 24),
alloc_unit=4)
ipam_sn_v6 = IpamSubnetType(subnet=SubnetType('fd14::', 120),
alloc_unit=4)
vn.add_network_ipam(ipam, VnSubnetsType([ipam_sn_v4, ipam_sn_v6]))
self._vnc_lib.virtual_network_create(vn)
logger.debug('Created Virtual Network object %s', vn.uuid)
net_obj = self._vnc_lib.virtual_network_read(id = vn.uuid)
# Create v4 Ip objects
ipv4_obj1 = InstanceIp(name=str(uuid.uuid4()), instance_ip_family='v4')
ipv4_obj1.uuid = ipv4_obj1.name
logger.debug('Created Instance IPv4 object 1 %s', ipv4_obj1.uuid)
ipv4_obj2 = InstanceIp(name=str(uuid.uuid4()), instance_ip_family='v4')
ipv4_obj2.uuid = ipv4_obj2.name
logger.debug('Created Instance IPv4 object 2 %s', ipv4_obj2.uuid)
# Create v6 Ip object
ipv6_obj1 = InstanceIp(name=str(uuid.uuid4()), instance_ip_family='v6')
ipv6_obj1.uuid = ipv6_obj1.name
logger.debug('Created Instance IPv6 object 2 %s', ipv6_obj1.uuid)
ipv6_obj2 = InstanceIp(name=str(uuid.uuid4()), instance_ip_family='v6')
ipv6_obj2.uuid = ipv6_obj2.name
logger.debug('Created Instance IPv6 object 2 %s', ipv6_obj2.uuid)
# Create VM
vm_inst_obj1 = VirtualMachine(str(uuid.uuid4()))
vm_inst_obj1.uuid = vm_inst_obj1.name
self._vnc_lib.virtual_machine_create(vm_inst_obj1)
id_perms = IdPermsType(enable=True)
port_obj1 = VirtualMachineInterface(
str(uuid.uuid4()), vm_inst_obj1, id_perms=id_perms)
port_obj1.uuid = port_obj1.name
port_obj1.set_virtual_network(vn)
ipv4_obj1.set_virtual_machine_interface(port_obj1)
ipv4_obj1.set_virtual_network(net_obj)
ipv4_obj2.set_virtual_machine_interface(port_obj1)
ipv4_obj2.set_virtual_network(net_obj)
ipv6_obj1.set_virtual_machine_interface(port_obj1)
ipv6_obj1.set_virtual_network(net_obj)
ipv6_obj2.set_virtual_machine_interface(port_obj1)
ipv6_obj2.set_virtual_network(net_obj)
port_id1 = self._vnc_lib.virtual_machine_interface_create(port_obj1)
logger.debug('Wrong ip address request,not aligned with alloc-unit')
ipv4_obj1.set_instance_ip_address('172.16.58.3')
with ExpectedException(BadRequest,
'Virtual-Network\(my-v4-v6-domain:my-v4-v6-proj:my-v4-v6-vn:172.16.58.3/24\) has invalid alloc_unit\(4\) in subnet\(172.16.58.3/24\)') as e:
ipv4_id1 = self._vnc_lib.instance_ip_create(ipv4_obj1)
ipv4_obj1.set_instance_ip_address(None)
logger.debug('Allocating an IP4 address for first VM')
ipv4_id1 = self._vnc_lib.instance_ip_create(ipv4_obj1)
ipv4_obj1 = self._vnc_lib.instance_ip_read(id=ipv4_id1)
ipv4_addr1 = ipv4_obj1.get_instance_ip_address()
logger.debug(' got v4 IP Address for first instance %s', ipv4_addr1)
if ipv4_addr1 != '192.168.3.11':
logger.debug('Allocation failed, expected v4 IP Address 192.168.3.11')
logger.debug('Allocating an IPV4 address for second VM')
ipv4_id2 = self._vnc_lib.instance_ip_create(ipv4_obj2)
ipv4_obj2 = self._vnc_lib.instance_ip_read(id=ipv4_id2)
ipv4_addr2 = ipv4_obj2.get_instance_ip_address()
logger.debug(' got v6 IP Address for first instance %s', ipv4_addr2)
if ipv4_addr2 != '192.168.127.12':
logger.debug('Allocation failed, expected v4 IP Address 192.168.127.12')
logger.debug('Allocating an IP6 address for first VM')
ipv6_id1 = self._vnc_lib.instance_ip_create(ipv6_obj1)
ipv6_obj1 = self._vnc_lib.instance_ip_read(id=ipv6_id1)
ipv6_addr1 = ipv6_obj1.get_instance_ip_address()
logger.debug(' got v6 IP Address for first instance %s', ipv6_addr1)
if ipv6_addr1 != 'fd14::f8':
logger.debug('Allocation failed, expected v6 IP Address fd14::f8')
logger.debug('Allocating an IP6 address for second VM')
ipv6_id2 = self._vnc_lib.instance_ip_create(ipv6_obj2)
ipv6_obj2 = self._vnc_lib.instance_ip_read(id=ipv6_id2)
ipv6_addr2 = ipv6_obj2.get_instance_ip_address()
logger.debug(' got v6 IP Address for first instance %s', ipv6_addr2)
if ipv6_addr2 != 'fd14::f4':
logger.debug('Allocation failed, expected v6 IP Address fd14::f4')
#cleanup
logger.debug('Cleaning up')
self._vnc_lib.instance_ip_delete(id=ipv4_id1)
self._vnc_lib.instance_ip_delete(id=ipv4_id2)
self._vnc_lib.instance_ip_delete(id=ipv6_id1)
self._vnc_lib.instance_ip_delete(id=ipv6_id2)
self._vnc_lib.virtual_machine_interface_delete(id=port_obj1.uuid)
self._vnc_lib.virtual_machine_delete(id=vm_inst_obj1.uuid)
self._vnc_lib.virtual_network_delete(id=vn.uuid)
self._vnc_lib.network_ipam_delete(id=ipam.uuid)
self._vnc_lib.project_delete(id=project.uuid)
self._vnc_lib.domain_delete(id=domain.uuid)
#end
def test_ip_alloction(self):
# Create Domain
domain = Domain('my-v4-v6-domain')
self._vnc_lib.domain_create(domain)
logger.debug('Created domain ')
# Create Project
project = Project('my-v4-v6-proj', domain)
self._vnc_lib.project_create(project)
logger.debug('Created Project')
# Create NetworkIpam
ipam = NetworkIpam('default-network-ipam', project, IpamType("dhcp"))
self._vnc_lib.network_ipam_create(ipam)
logger.debug('Created network ipam')
ipam = self._vnc_lib.network_ipam_read(fq_name=['my-v4-v6-domain', 'my-v4-v6-proj',
'default-network-ipam'])
logger.debug('Read network ipam')
# Create subnets
ipam_sn_v4 = IpamSubnetType(subnet=SubnetType('172.16.58.3', 24))
ipam_sn_v6 = IpamSubnetType(subnet=SubnetType('fd14::', 120))
# Create VN
vn = VirtualNetwork('my-v4-v6-vn', project)
vn.add_network_ipam(ipam, VnSubnetsType([ipam_sn_v4, ipam_sn_v6]))
self._vnc_lib.virtual_network_create(vn)
logger.debug('Created Virtual Network object %s', vn.uuid)
net_obj = self._vnc_lib.virtual_network_read(id = vn.uuid)
# Create v4 Ip object
ip_obj1 = InstanceIp(name=str(uuid.uuid4()), instance_ip_family='v4')
ip_obj1.uuid = ip_obj1.name
logger.debug('Created Instance IP object 1 %s', ip_obj1.uuid)
# Create v6 Ip object
ip_obj2 = InstanceIp(name=str(uuid.uuid4()), instance_ip_family='v6')
ip_obj2.uuid = ip_obj2.name
logger.debug('Created Instance IP object 2 %s', ip_obj2.uuid)
# Create VM
vm_inst_obj1 = VirtualMachine(str(uuid.uuid4()))
vm_inst_obj1.uuid = vm_inst_obj1.name
self._vnc_lib.virtual_machine_create(vm_inst_obj1)
id_perms = IdPermsType(enable=True)
port_obj1 = VirtualMachineInterface(
str(uuid.uuid4()), vm_inst_obj1, id_perms=id_perms)
port_obj1.uuid = port_obj1.name
port_obj1.set_virtual_network(vn)
ip_obj1.set_virtual_machine_interface(port_obj1)
ip_obj1.set_virtual_network(net_obj)
ip_obj2.set_virtual_machine_interface(port_obj1)
ip_obj2.set_virtual_network(net_obj)
port_id1 = self._vnc_lib.virtual_machine_interface_create(port_obj1)
logger.debug('Allocating an IP4 address for first VM')
ip_id1 = self._vnc_lib.instance_ip_create(ip_obj1)
ip_obj1 = self._vnc_lib.instance_ip_read(id=ip_id1)
ip_addr1 = ip_obj1.get_instance_ip_address()
logger.debug(' got v4 IP Address for first instance %s', ip_addr1)
if ip_addr1 != '192.168.3.11':
logger.debug('Allocation failed, expected v4 IP Address 192.168.3.11')
logger.debug('Allocating an IP6 address for first VM')
ip_id2 = self._vnc_lib.instance_ip_create(ip_obj2)
ip_obj2 = self._vnc_lib.instance_ip_read(id=ip_id2)
ip_addr2 = ip_obj2.get_instance_ip_address()
logger.debug(' got v6 IP Address for first instance %s', ip_addr2)
if ip_addr2 != 'fd14::fd':
logger.debug('Allocation failed, expected v6 IP Address fd14::fd')
# Read gateway ip address
logger.debug('Read default gateway ip address' )
ipam_refs = net_obj.get_network_ipam_refs()
for ipam_ref in ipam_refs:
subnets = ipam_ref['attr'].get_ipam_subnets()
for subnet in subnets:
logger.debug('Gateway for subnet (%s/%s) is (%s)' %(subnet.subnet.get_ip_prefix(),
subnet.subnet.get_ip_prefix_len(),
subnet.get_default_gateway()))
#cleanup
logger.debug('Cleaning up')
self._vnc_lib.instance_ip_delete(id=ip_id1)
self._vnc_lib.instance_ip_delete(id=ip_id2)
self._vnc_lib.virtual_machine_interface_delete(id=port_obj1.uuid)
self._vnc_lib.virtual_machine_delete(id=vm_inst_obj1.uuid)
self._vnc_lib.virtual_network_delete(id=vn.uuid)
self._vnc_lib.network_ipam_delete(id=ipam.uuid)
self._vnc_lib.project_delete(id=project.uuid)
self._vnc_lib.domain_delete(id=domain.uuid)
#end
def test_ip_alloction_pools(self):
# Create Domain
domain = Domain('my-v4-v6-domain')
self._vnc_lib.domain_create(domain)
logger.debug('Created domain ')
# Create Project
project = Project('my-v4-v6-proj', domain)
self._vnc_lib.project_create(project)
logger.debug('Created Project')
# Create NetworkIpam
ipam = NetworkIpam('default-network-ipam', project, IpamType("dhcp"))
self._vnc_lib.network_ipam_create(ipam)
logger.debug('Created network ipam')
ipam = self._vnc_lib.network_ipam_read(fq_name=['my-v4-v6-domain', 'my-v4-v6-proj',
'default-network-ipam'])
logger.debug('Read network ipam')
# Create subnets
alloc_pool_list = []
alloc_pool_list.append(AllocationPoolType(start='172.16.31.10', end='192.168.127.12'))
ipam_sn_v4 = IpamSubnetType(subnet=SubnetType('172.16.58.3', 24),
allocation_pools=alloc_pool_list,
addr_from_start=True)
alloc_pool_list_v6 = []
alloc_pool_list_v6.append(AllocationPoolType(start='fd14::30', end='fd14::40'))
ipam_sn_v6 = IpamSubnetType(subnet=SubnetType('fd14::', 120),
allocation_pools=alloc_pool_list_v6,
addr_from_start=True)
# Create VN
vn = VirtualNetwork('my-v4-v6-vn', project)
vn.add_network_ipam(ipam, VnSubnetsType([ipam_sn_v4, ipam_sn_v6]))
self._vnc_lib.virtual_network_create(vn)
logger.debug('Created Virtual Network object %s', vn.uuid)
net_obj = self._vnc_lib.virtual_network_read(id = vn.uuid)
# Create v4 Ip object
ip_obj1 = InstanceIp(name=str(uuid.uuid4()), instance_ip_family='v4')
ip_obj1.uuid = ip_obj1.name
logger.debug('Created Instance IP object 1 %s', ip_obj1.uuid)
# Create v6 Ip object
ip_obj2 = InstanceIp(name=str(uuid.uuid4()), instance_ip_family='v6')
ip_obj2.uuid = ip_obj2.name
logger.debug('Created Instance IP object 2 %s', ip_obj2.uuid)
# Create VM
vm_inst_obj1 = VirtualMachine(str(uuid.uuid4()))
vm_inst_obj1.uuid = vm_inst_obj1.name
self._vnc_lib.virtual_machine_create(vm_inst_obj1)
id_perms = IdPermsType(enable=True)
port_obj1 = VirtualMachineInterface(
str(uuid.uuid4()), vm_inst_obj1, id_perms=id_perms)
port_obj1.uuid = port_obj1.name
port_obj1.set_virtual_network(vn)
ip_obj1.set_virtual_machine_interface(port_obj1)
ip_obj1.set_virtual_network(net_obj)
ip_obj2.set_virtual_machine_interface(port_obj1)
ip_obj2.set_virtual_network(net_obj)
port_id1 = self._vnc_lib.virtual_machine_interface_create(port_obj1)
logger.debug('Allocating an IP4 address for first VM')
ip_id1 = self._vnc_lib.instance_ip_create(ip_obj1)
ip_obj1 = self._vnc_lib.instance_ip_read(id=ip_id1)
ip_addr1 = ip_obj1.get_instance_ip_address()
logger.debug('got v4 IP Address for first instance %s', ip_addr1)
if ip_addr1 != '172.16.31.10':
logger.debug('Allocation failed, expected v4 IP Address 172.16.31.10')
logger.debug('Allocating an IP6 address for first VM')
ip_id2 = self._vnc_lib.instance_ip_create(ip_obj2)
ip_obj2 = self._vnc_lib.instance_ip_read(id=ip_id2)
ip_addr2 = ip_obj2.get_instance_ip_address()
logger.debug('got v6 IP Address for first instance %s', ip_addr2)
if ip_addr2 != 'fd14::30':
logger.debug('Allocation failed, expected v6 IP Address fd14::30')
# Read gateway ip address
logger.debug('Read default gateway ip address')
ipam_refs = net_obj.get_network_ipam_refs()
for ipam_ref in ipam_refs:
subnets = ipam_ref['attr'].get_ipam_subnets()
for subnet in subnets:
logger.debug('Gateway for subnet (%s/%s) is (%s)' %(subnet.subnet.get_ip_prefix(),
subnet.subnet.get_ip_prefix_len(),
subnet.get_default_gateway()))
#cleanup
logger.debug('Cleaning up')
#cleanup subnet and allocation pools
self._vnc_lib.instance_ip_delete(id=ip_id1)
self._vnc_lib.instance_ip_delete(id=ip_id2)
self._vnc_lib.virtual_machine_interface_delete(id=port_obj1.uuid)
self._vnc_lib.virtual_machine_delete(id=vm_inst_obj1.uuid)
self._vnc_lib.virtual_network_delete(id=vn.uuid)
self._vnc_lib.network_ipam_delete(id=ipam.uuid)
self._vnc_lib.project_delete(id=project.uuid)
self._vnc_lib.domain_delete(id=domain.uuid)
#end
def test_subnet_gateway_ip_alloc(self):
# Create Domain
domain = Domain('my-v4-v6-domain')
self._vnc_lib.domain_create(domain)
logger.debug('Created domain ')
# Create Project
project = Project('my-v4-v6-proj', domain)
self._vnc_lib.project_create(project)
logger.debug('Created Project')
# Create NetworkIpam
ipam = NetworkIpam('default-network-ipam', project, IpamType("dhcp"))
self._vnc_lib.network_ipam_create(ipam)
logger.debug('Created network ipam')
ipam = self._vnc_lib.network_ipam_read(fq_name=['my-v4-v6-domain', 'my-v4-v6-proj',
'default-network-ipam'])
logger.debug('Read network ipam')
# Create subnets
alloc_pool_list = []
alloc_pool_list.append(AllocationPoolType(start='172.16.31.10', end='192.168.127.12'))
ipam_sn_v4 = IpamSubnetType(subnet=SubnetType('172.16.58.3', 24),
allocation_pools=alloc_pool_list,
addr_from_start=True)
alloc_pool_list_v6 = []
alloc_pool_list_v6.append(AllocationPoolType(start='fd14::30', end='fd14::40'))
ipam_sn_v6 = IpamSubnetType(subnet=SubnetType('fd14::', 120),
allocation_pools=alloc_pool_list_v6,
addr_from_start=True)
# Create VN
vn = VirtualNetwork('my-v4-v6-vn', project)
vn.add_network_ipam(ipam, VnSubnetsType([ipam_sn_v4, ipam_sn_v6]))
self._vnc_lib.virtual_network_create(vn)
logger.debug('Created Virtual Network object %s', vn.uuid)
net_obj = self._vnc_lib.virtual_network_read(id = vn.uuid)
# Read gateway ip address
logger.debug('Read default gateway ip address')
ipam_refs = net_obj.get_network_ipam_refs()
for ipam_ref in ipam_refs:
subnets = ipam_ref['attr'].get_ipam_subnets()
for subnet in subnets:
logger.debug('Gateway for subnet (%s/%s) is (%s)' %(subnet.subnet.get_ip_prefix(),
subnet.subnet.get_ip_prefix_len(),
subnet.get_default_gateway()))
if subnet.subnet.get_ip_prefix() == '172.16.58.3':
if subnet.get_default_gateway() != '192.168.3.11':
logger.debug(' Failure, expected gateway ip address 192.168.3.11')
if subnet.subnet.get_ip_prefix() == 'fd14::':
if subnet.get_default_gateway() != 'fd14::1':
logger.debug(' Failure, expected gateway ip address fd14::1')
#cleanup
logger.debug('Cleaning up')
self._vnc_lib.virtual_network_delete(id=vn.uuid)
self._vnc_lib.network_ipam_delete(id=ipam.uuid)
self._vnc_lib.project_delete(id=project.uuid)
self._vnc_lib.domain_delete(id=domain.uuid)
#end
def test_bulk_ip_alloc_free(self):
# Create Domain
domain = Domain('v4-domain')
self._vnc_lib.domain_create(domain)
logger.debug('Created domain ')
# Create Project
project = Project('v4-proj', domain)
self._vnc_lib.project_create(project)
logger.debug('Created Project')
# Create NetworkIpam
ipam = NetworkIpam('default-network-ipam', project, IpamType("dhcp"))
self._vnc_lib.network_ipam_create(ipam)
logger.debug('Created network ipam')
ipam = self._vnc_lib.network_ipam_read(fq_name=['v4-domain', 'v4-proj',
'default-network-ipam'])
logger.debug('Read network ipam')
# Create subnets
ipam_sn_v4 = IpamSubnetType(subnet=SubnetType('172.16.58.3', 24))
# Create VN
vn = VirtualNetwork('v4-vn', project)
vn.add_network_ipam(ipam, VnSubnetsType([ipam_sn_v4]))
self._vnc_lib.virtual_network_create(vn)
logger.debug('Created Virtual Network object %s', vn.uuid)
net_obj = self._vnc_lib.virtual_network_read(id = vn.uuid)
# request to allocate 10 ip address using bulk allocation api
data = {"subnet" : "172.16.58.3/24", "count" : 10}
url = '/virtual-network/%s/ip-alloc' %(vn.uuid)
rv_json = self._vnc_lib._request_server(rest.OP_POST, url,
json.dumps(data))
ret_data = json.loads(rv_json)
ret_ip_addr = ret_data['ip_addr']
expected_ip_addr = ['192.168.3.11', '172.16.31.10', '172.16.17.32',
'172.16.58.3', '192.168.3.11', '172.16.31.10',
'172.16.31.10', '192.168.3.11', '192.168.127.12',
'192.168.127.12']
self.assertEqual(len(expected_ip_addr), len(ret_ip_addr))
for idx in range(len(expected_ip_addr)):
self.assertEqual(expected_ip_addr[idx], ret_ip_addr[idx])
logger.debug('Verify bulk ip address allocation')
# Find out number of allocated ips from given VN/subnet
# We should not get 13 ip allocated from this subnet
# 10 user request + 3 reserved ips (first, last and gw).
data = {"subnet_list" : ["172.16.58.3/24"]}
url = '/virtual-network/%s/subnet-ip-count' %(vn.uuid)
rv_json = self._vnc_lib._request_server(rest.OP_POST, url,
json.dumps(data))
ret_ip_count = json.loads(rv_json)['ip_count_list'][0]
allocated_ip = ret_ip_count - 3
self.assertEqual(allocated_ip, 10)
#free 5 allocated ip addresses from vn
data = {"subnet" : "172.16.58.3/24",
"ip_addr" : ['192.168.3.11', '172.16.31.10', '172.16.17.32',
'172.16.58.3', '192.168.3.11']}
url = '/virtual-network/%s/ip-free' %(vn.uuid)
self._vnc_lib._request_server(rest.OP_POST, url, json.dumps(data))
# Find out number of allocated ips from given VN/subnet
# We should get 5+3 ip allocated from this subnet
data = {"subnet_list" : ["172.16.58.3/24"]}
url = '/virtual-network/%s/subnet-ip-count' %(vn.uuid)
rv_json = self._vnc_lib._request_server(rest.OP_POST, url,
json.dumps(data))
ret_ip_count = json.loads(rv_json)['ip_count_list'][0]
allocated_ip = ret_ip_count - 3
self.assertEqual(allocated_ip, 5)
#free remaining 5 allocated ip addresses from vn
data = {"subnet" : "172.16.58.3/24",
"ip_addr": ['172.16.31.10', '172.16.31.10', '192.168.3.11',
'192.168.127.12', '192.168.127.12']}
url = '/virtual-network/%s/ip-free' %(vn.uuid)
self._vnc_lib._request_server(rest.OP_POST, url, json.dumps(data))
data = {"subnet_list" : ["172.16.58.3/24"]}
url = '/virtual-network/%s/subnet-ip-count' %(vn.uuid)
rv_json = self._vnc_lib._request_server(rest.OP_POST, url,
json.dumps(data))
ret_ip_count = json.loads(rv_json)['ip_count_list'][0]
allocated_ip = ret_ip_count - 3
self.assertEqual(allocated_ip, 0)
logger.debug('Verified bulk ip free')
# cleanup
self._vnc_lib.virtual_network_delete(id=vn.uuid)
self._vnc_lib.network_ipam_delete(id=ipam.uuid)
self._vnc_lib.project_delete(id=project.uuid)
self._vnc_lib.domain_delete(id=domain.uuid)
#end
def test_v4_ip_allocation_exhaust(self):
# Create Domain
domain = Domain('v4-domain')
self._vnc_lib.domain_create(domain)
logger.debug('Created domain ')
# Create Project
project = Project('v4-proj', domain)
self._vnc_lib.project_create(project)
logger.debug('Created Project')
# Create NetworkIpam
ipam = NetworkIpam('default-network-ipam', project, IpamType("dhcp"))
self._vnc_lib.network_ipam_create(ipam)
logger.debug('Created network ipam')
ipam = self._vnc_lib.network_ipam_read(fq_name=['v4-domain', 'v4-proj',
'default-network-ipam'])
logger.debug('Read network ipam')
ip_alloc_from_start = [True, False]
for from_start in ip_alloc_from_start:
# Create subnets
alloc_pool_list = []
alloc_pool_list.append(
AllocationPoolType(start='192.168.3.11', end='172.16.31.10'))
alloc_pool_list.append(
AllocationPoolType(start='172.16.58.3', end='172.16.31.10'))
ipam_sn_v4 = IpamSubnetType(subnet=SubnetType('172.16.58.3', 24),
allocation_pools=alloc_pool_list,
addr_from_start=from_start)
ip_addr_list = []
for alloc_pool in alloc_pool_list:
start_ip = alloc_pool.start
end_ip = alloc_pool.end
start = list(map(int, start_ip.split(".")))
end = list(map(int, end_ip.split(".")))
temp = start
ip_addr_list.append(start_ip)
while temp != end:
start[3] += 1
for i in (3, 2, 1):
if temp[i] == 256:
temp[i] = 0
temp[i-1] += 1
ip_addr_list.append(".".join(map(str, temp)))
if from_start is False:
ip_addr_list.reverse()
total_addr = len(ip_addr_list)
logger.debug('ip address alloc list: %s', ip_addr_list[0:total_addr])
# Create VN
vn = VirtualNetwork('v4-vn', project)
vn.add_network_ipam(ipam, VnSubnetsType([ipam_sn_v4]))
self._vnc_lib.virtual_network_create(vn)
logger.debug('Created Virtual Network object %s', vn.uuid)
net_obj = self._vnc_lib.virtual_network_read(id = vn.uuid)
# Create v4 Ip object for all possible addresses in alloc_pool
v4_ip_obj_list = []
for idx, val in enumerate(ip_addr_list):
v4_ip_obj_list.append(
InstanceIp(name=str(uuid.uuid4()), instance_ip_family='v4'))
v4_ip_obj_list[idx].uuid = v4_ip_obj_list[idx].name
logger.debug('Created Instance IP object %s %s',idx, v4_ip_obj_list[idx].uuid)
# Create number of VMs to assign ip addresses
# to use all addresses in alloc_pool
vm_list_v4 = []
for idx, val in enumerate(ip_addr_list):
vm_list_v4.append(VirtualMachine(str(uuid.uuid4())))
vm_list_v4[idx].uuid = vm_list_v4[idx].name
self._vnc_lib.virtual_machine_create(vm_list_v4[idx])
port_list = []
port_id_list = []
for idx, val in enumerate(ip_addr_list):
id_perms = IdPermsType(enable=True)
port_list.append(
VirtualMachineInterface(str(uuid.uuid4()), vm_list_v4[idx],
id_perms=id_perms))
port_list[idx].uuid = port_list[idx].name
port_list[idx].set_virtual_network(vn)
v4_ip_obj_list[idx].set_virtual_machine_interface(port_list[idx])
v4_ip_obj_list[idx].set_virtual_network(net_obj)
port_id_list.append(
self._vnc_lib.virtual_machine_interface_create(port_list[idx]))
ip_ids = []
logger.debug('Allocating an IP4 address for VMs')
for idx, val in enumerate(ip_addr_list):
ip_ids.append(
self._vnc_lib.instance_ip_create(v4_ip_obj_list[idx]))
v4_ip_obj_list[idx] = self._vnc_lib.instance_ip_read(
id=ip_ids[idx])
ip_addr = v4_ip_obj_list[idx].get_instance_ip_address()
logger.debug('got v4 IP Address for instance %s:%s', idx, ip_addr)
if ip_addr != ip_addr_list[idx]:
logger.debug('Allocation failed, expected v4 IP Address: %s', ip_addr_list[idx])
# Find out number of allocated ips from given VN/subnet to test
# vn_subnet_ip_count_http_post()
data = {"subnet_list" : ["172.16.58.3/24"]}
url = '/virtual-network/%s/subnet-ip-count' %(vn.uuid)
rv_json = self._vnc_lib._request_server(rest.OP_POST, url, json.dumps(data))
ret_ip_count = json.loads(rv_json)['ip_count_list'][0]
total_ip_addr = len(ip_addr_list)
self.assertEqual(ret_ip_count, total_ip_addr)
# Delete 2 VMs (With First and Last IP), associated Ports
# and instanace IPs,
# recreate them to make sure that we get same ips again.
# Repeat this for 2 VMs from middle of the alloc_pool
total_ip_addr = len(ip_addr_list)
to_modifies = [[0, total_ip_addr-1],
[total_ip_addr/2 -1, total_ip_addr/2]]
for to_modify in to_modifies:
logger.debug('Delete Instances %s %s', to_modify[0], to_modify[1])
for idx, val in enumerate(to_modify):
self._vnc_lib.instance_ip_delete(id=ip_ids[val])
ip_ids[val] = None
self._vnc_lib.virtual_machine_interface_delete(
id=port_list[val].uuid)
port_list[val] = None
port_id_list[val] = None
self._vnc_lib.virtual_machine_delete(
id=vm_list_v4[val].uuid)
vm_list_v4[val] = None
v4_ip_obj_list[val] = None
ip_ids[val] = None
logger.debug('Deleted instance %s', val)
# Re-create two VMs and assign IP addresses
# these should get first and last ip.
for idx, val in enumerate(to_modify):
v4_ip_obj_list[val] = InstanceIp(
name=str(uuid.uuid4()), instance_ip_family='v4')
v4_ip_obj_list[val].uuid = v4_ip_obj_list[val].name
vm_list_v4[val] = VirtualMachine(str(uuid.uuid4()))
vm_list_v4[val].uuid = vm_list_v4[val].name
self._vnc_lib.virtual_machine_create(vm_list_v4[val])
id_perms = IdPermsType(enable=True)
port_list[val] = VirtualMachineInterface(
str(uuid.uuid4()), vm_list_v4[val], id_perms=id_perms)
port_list[val].uuid = port_list[val].name
port_list[val].set_virtual_network(vn)
v4_ip_obj_list[val].set_virtual_machine_interface(
port_list[val])
v4_ip_obj_list[val].set_virtual_network(net_obj)
port_id_list[val] = self._vnc_lib.virtual_machine_interface_create(port_list[val])
logger.debug('Created instance %s',val)
# Allocate IPs to modified VMs
for idx, val in enumerate(to_modify):
ip_ids[val] = self._vnc_lib.instance_ip_create(v4_ip_obj_list[val])
v4_ip_obj_list[val] = self._vnc_lib.instance_ip_read(
id=ip_ids[val])
ip_addr = v4_ip_obj_list[val].get_instance_ip_address()
logger.debug('got v4 IP Address for instance %s:%s', val, ip_addr)
if ip_addr != ip_addr_list[val]:
logger.debug('Allocation failed, expected v4 IP Address: %s', ip_addr_list[val])
# negative test.
# Create a new VM and try getting a new instance_ip
# we should get an exception as alloc_pool is fully exhausted.
logger.debug('Negative Test to create extra instance and try assigning IP address')
# Create v4 Ip object
ip_obj1 = InstanceIp(name=str(uuid.uuid4()), instance_ip_family='v4')
ip_obj1.uuid = ip_obj1.name
logger.debug('Created new Instance IP object %s', ip_obj1.uuid)
# Create VM
vm_inst_obj1 = VirtualMachine(str(uuid.uuid4()))
vm_inst_obj1.uuid = vm_inst_obj1.name
self._vnc_lib.virtual_machine_create(vm_inst_obj1)
id_perms = IdPermsType(enable=True)
port_obj1 = VirtualMachineInterface(
str(uuid.uuid4()), vm_inst_obj1, id_perms=id_perms)
port_obj1.uuid = port_obj1.name
port_obj1.set_virtual_network(vn)
ip_obj1.set_virtual_machine_interface(port_obj1)
ip_obj1.set_virtual_network(net_obj)
port_id1 = self._vnc_lib.virtual_machine_interface_create(port_obj1)
logger.debug('Created extra instance')
logger.debug('Allocating an IP4 address for extra instance')
with ExpectedException(BadRequest,
'Virtual-Network\(\[\'v4-domain\', \'v4-proj\', \'v4-vn\'\]\) has exhausted subnet\(all\)') as e:
ip_id1 = self._vnc_lib.instance_ip_create(ip_obj1)
# cleanup for negative test
self._vnc_lib.virtual_machine_interface_delete(id=port_obj1.uuid)
self._vnc_lib.virtual_machine_delete(id=vm_inst_obj1.uuid)
# user requested instance_ip, if VM is getting created
# with user requested ip and ip is already allocated,
# system allows VM creation with same ip
# Test is with start from begining allocation scheme
if from_start is True:
# Create a v4 Ip object
ip_obj2 = InstanceIp(name=str(uuid.uuid4()),
instance_ip_address='192.168.3.11',
instance_ip_family='v4')
ip_obj2.uuid = ip_obj2.name
logger.debug('Created new Instance IP object %s', ip_obj2.uuid)
# Create VM
vm_inst_obj2 = VirtualMachine(str(uuid.uuid4()))
vm_inst_obj2.uuid = vm_inst_obj2.name
self._vnc_lib.virtual_machine_create(vm_inst_obj2)
id_perms = IdPermsType(enable=True)
port_obj2 = VirtualMachineInterface(
str(uuid.uuid4()), vm_inst_obj2, id_perms=id_perms)
port_obj2.uuid = port_obj2.name
port_obj2.set_virtual_network(vn)
ip_obj2.set_virtual_machine_interface(port_obj2)
ip_obj2.set_virtual_network(net_obj)
port_id2 = self._vnc_lib.virtual_machine_interface_create(
port_obj2)
ip_id2 = self._vnc_lib.instance_ip_create(ip_obj2)
#cleanup for user requested IP, VM, port
self._vnc_lib.instance_ip_delete(id=ip_id2)
self._vnc_lib.virtual_machine_interface_delete(
id=port_obj2.uuid)
self._vnc_lib.virtual_machine_delete(id=vm_inst_obj2.uuid)
#cleanup subnet and allocation pools
for idx, val in enumerate(ip_addr_list):
self._vnc_lib.instance_ip_delete(id=ip_ids[idx])
self._vnc_lib.virtual_machine_interface_delete(
id=port_list[idx].uuid)
self._vnc_lib.virtual_machine_delete(id=vm_list_v4[idx].uuid)
self._vnc_lib.virtual_network_delete(id=vn.uuid)
# end of from_start
logger.debug('Cleaning up')
self._vnc_lib.network_ipam_delete(id=ipam.uuid)
self._vnc_lib.project_delete(id=project.uuid)
self._vnc_lib.domain_delete(id=domain.uuid)
#end
def test_req_ip_allocation(self):
# Create Domain
domain = Domain('my-v4-v6-req-ip-domain')
self._vnc_lib.domain_create(domain)
logger.debug('Created domain ')
# Create Project
project = Project('my-v4-v6-req-ip-proj', domain)
self._vnc_lib.project_create(project)
logger.debug('Created Project')
# Create NetworkIpam
ipam = NetworkIpam('default-network-ipam', project, IpamType("dhcp"))
self._vnc_lib.network_ipam_create(ipam)
logger.debug('Created network ipam')
ipam = self._vnc_lib.network_ipam_read(fq_name=['my-v4-v6-req-ip-domain',
'my-v4-v6-req-ip-proj',
'default-network-ipam'])
logger.debug('Read network ipam')
# Create subnets
ipam_sn_v4 = IpamSubnetType(subnet=SubnetType('172.16.58.3', 24))
ipam_sn_v6 = IpamSubnetType(subnet=SubnetType('fd14::', 120))
# Create VN
vn = VirtualNetwork('my-v4-v6-vn', project)
vn.add_network_ipam(ipam, VnSubnetsType([ipam_sn_v4, ipam_sn_v6]))
self._vnc_lib.virtual_network_create(vn)
logger.debug('Created Virtual Network object %s', vn.uuid)
net_obj = self._vnc_lib.virtual_network_read(id = vn.uuid)
# Create v4 Ip object, with v4 requested ip
ip_obj1 = InstanceIp(name=str(uuid.uuid4()), instance_ip_address='172.16.58.3',
instance_ip_family='v4')
ip_obj1.uuid = ip_obj1.name
logger.debug('Created Instance IP object 1 %s', ip_obj1.uuid)
# Create v6 Ip object with v6 requested ip
ip_obj2 = InstanceIp(name=str(uuid.uuid4()), instance_ip_address='fd14::4',
instance_ip_family='v6')
ip_obj2.uuid = ip_obj2.name
logger.debug('Created Instance IP object 2 %s', ip_obj2.uuid)
# Create VM
vm_inst_obj1 = VirtualMachine(str(uuid.uuid4()))
vm_inst_obj1.uuid = vm_inst_obj1.name
self._vnc_lib.virtual_machine_create(vm_inst_obj1)
id_perms = IdPermsType(enable=True)
port_obj1 = VirtualMachineInterface(
str(uuid.uuid4()), vm_inst_obj1, id_perms=id_perms)
port_obj1.uuid = port_obj1.name
port_obj1.set_virtual_network(vn)
ip_obj1.set_virtual_machine_interface(port_obj1)
ip_obj1.set_virtual_network(net_obj)
ip_obj2.set_virtual_machine_interface(port_obj1)
ip_obj2.set_virtual_network(net_obj)
port_id1 = self._vnc_lib.virtual_machine_interface_create(port_obj1)
logger.debug('Allocating an IP4 address for first VM')
ip_id1 = self._vnc_lib.instance_ip_create(ip_obj1)
ip_obj1 = self._vnc_lib.instance_ip_read(id=ip_id1)
ip_addr1 = ip_obj1.get_instance_ip_address()
logger.debug(' got v4 IP Address for first instance %s', ip_addr1)
if ip_addr1 != '172.16.58.3':
logger.debug('Allocation failed, expected v4 IP Address 172.16.58.3')
logger.debug('Allocating an IP6 address for first VM')
ip_id2 = self._vnc_lib.instance_ip_create(ip_obj2)
ip_obj2 = self._vnc_lib.instance_ip_read(id=ip_id2)
ip_addr2 = ip_obj2.get_instance_ip_address()
logger.debug(' got v6 IP Address for first instance %s', ip_addr2)
if ip_addr2 != 'fd14::4':
logger.debug('Allocation failed, expected v6 IP Address fd14::4')
# Read gateway ip address
logger.debug('Read default gateway ip address')
ipam_refs = net_obj.get_network_ipam_refs()
for ipam_ref in ipam_refs:
subnets = ipam_ref['attr'].get_ipam_subnets()
for subnet in subnets:
logger.debug('Gateway for subnet (%s/%s) is (%s)' %(subnet.subnet.get_ip_prefix(),
subnet.subnet.get_ip_prefix_len(),
subnet.get_default_gateway()))
#cleanup
logger.debug('Cleaning up')
self._vnc_lib.instance_ip_delete(id=ip_id1)
self._vnc_lib.instance_ip_delete(id=ip_id2)
self._vnc_lib.virtual_machine_interface_delete(id=port_obj1.uuid)
self._vnc_lib.virtual_machine_delete(id=vm_inst_obj1.uuid)
self._vnc_lib.virtual_network_delete(id=vn.uuid)
self._vnc_lib.network_ipam_delete(id=ipam.uuid)
self._vnc_lib.project_delete(id=project.uuid)
self._vnc_lib.domain_delete(id=domain.uuid)
#end
def test_notify_doesnt_persist(self):
# net/ip notify context shouldn't persist to db, should only
# update in-memory book-keeping
def_ipam = NetworkIpam()
ipam_obj = self._vnc_lib.network_ipam_read(
fq_name=def_ipam.get_fq_name())
vn_obj = VirtualNetwork('vn-%s' %(self.id()))
ipam_sn_v4 = IpamSubnetType(subnet=SubnetType('172.16.58.3', 24))
vn_obj.add_network_ipam(ipam_obj, VnSubnetsType([ipam_sn_v4]))
self._vnc_lib.virtual_network_create(vn_obj)
iip_obj = InstanceIp('iip-%s' %(self.id()))
iip_obj.add_virtual_network(vn_obj)
class SpyCreateNode(object):
def __init__(self, orig_object, method_name):
self._orig_method = getattr(orig_object, method_name)
self._invoked = 0
# end __init__
def __call__(self, *args, **kwargs):
if self._invoked >= 1:
raise Exception(
"Instance IP was persisted more than once")
if args[1].startswith('/api-server/subnets'):
self._invoked += 1
return self._orig_method(args, kwargs)
# end SpyCreateNode
orig_object = self._api_server._db_conn._zk_db._zk_client
method_name = 'create_node'
with test_common.patch(orig_object, method_name,
SpyCreateNode(orig_object, method_name)):
self._vnc_lib.instance_ip_create(iip_obj)
self.assertTill(self.ifmap_has_ident, obj=iip_obj)
#end test_notify_doesnt_persist
def test_ip_alloc_clash(self):
# prep objects for testing
proj_obj = Project('proj-%s' %(self.id()), parent_obj=Domain())
self._vnc_lib.project_create(proj_obj)
ipam_obj = NetworkIpam('ipam-%s' %(self.id()), proj_obj)
self._vnc_lib.network_ipam_create(ipam_obj)
vn_obj = VirtualNetwork('vn-%s' %(self.id()), proj_obj)
ipam_sn_v4 = IpamSubnetType(subnet=SubnetType('172.16.58.3', 24))
vn_obj.add_network_ipam(ipam_obj, VnSubnetsType([ipam_sn_v4]))
self._vnc_lib.virtual_network_create(vn_obj)
fip_pool_obj = FloatingIpPool(
'fip-pool-%s' %(self.id()), parent_obj=vn_obj)
self._vnc_lib.floating_ip_pool_create(fip_pool_obj)
aip_pool_obj = AliasIpPool(
'aip-pool-%s' %(self.id()), parent_obj=vn_obj)
self._vnc_lib.alias_ip_pool_create(aip_pool_obj)
iip_obj = InstanceIp('existing-iip-%s' %(self.id()))
iip_obj.add_virtual_network(vn_obj)
self._vnc_lib.instance_ip_create(iip_obj)
# read-in to find allocated address
iip_obj = self._vnc_lib.instance_ip_read(id=iip_obj.uuid)
fip_obj = FloatingIp('existing-fip-%s' %(self.id()), fip_pool_obj)
fip_obj.add_project(proj_obj)
self._vnc_lib.floating_ip_create(fip_obj)
# read-in to find allocated address
fip_obj = self._vnc_lib.floating_ip_read(id=fip_obj.uuid)
aip_obj = AliasIp('existing-aip-%s' %(self.id()), aip_pool_obj)
aip_obj.add_project(proj_obj)
self._vnc_lib.alias_ip_create(aip_obj)
# read-in to find allocated address
aip_obj = self._vnc_lib.alias_ip_read(id=aip_obj.uuid)
vm_obj = VirtualMachine('vm-%s' %(self.id()))
self._vnc_lib.virtual_machine_create(vm_obj)
vm_vmi_obj = VirtualMachineInterface('vm-vmi-%s' %(self.id()),
proj_obj)
vm_vmi_obj.add_virtual_network(vn_obj)
vm_vmi_obj.add_virtual_machine(vm_obj)
self._vnc_lib.virtual_machine_interface_create(vm_vmi_obj)
rtr_vmi_obj = VirtualMachineInterface('rtr-vmi-%s' %(self.id()),
proj_obj)
rtr_vmi_obj.add_virtual_network(vn_obj)
self._vnc_lib.virtual_machine_interface_create(rtr_vmi_obj)
lr_obj = LogicalRouter('rtr-%s' %(self.id()), proj_obj)
lr_obj.add_virtual_machine_interface(rtr_vmi_obj)
self._vnc_lib.logical_router_create(lr_obj)
isolated_vmi_obj = VirtualMachineInterface('iso-vmi-%s' %(self.id()),
proj_obj)
isolated_vmi_obj.add_virtual_network(vn_obj)
self._vnc_lib.virtual_machine_interface_create(isolated_vmi_obj)
# allocate instance-ip clashing with existing instance-ip
iip2_obj = InstanceIp('clashing-iip-%s' %(self.id()),
instance_ip_address=iip_obj.instance_ip_address)
iip2_obj.add_virtual_network(vn_obj)
with ExpectedException(cfgm_common.exceptions.BadRequest,
'Ip address already in use') as e:
self._vnc_lib.instance_ip_create(iip2_obj)
# allocate instance-ip clashing with existing floating-ip
iip2_obj.set_instance_ip_address(fip_obj.floating_ip_address)
with ExpectedException(cfgm_common.exceptions.BadRequest,
'Ip address already in use') as e:
self._vnc_lib.instance_ip_create(iip2_obj)
# allocate floating-ip clashing with existing floating-ip
fip2_obj = FloatingIp('clashing-fip-%s' %(self.id()), fip_pool_obj,
floating_ip_address=fip_obj.floating_ip_address)
fip2_obj.add_project(proj_obj)
with ExpectedException(cfgm_common.exceptions.BadRequest,
'Ip address already in use') as e:
self._vnc_lib.floating_ip_create(fip2_obj)
# allocate alias-ip clashing with existing alias-ip
aip2_obj = AliasIp('clashing-aip-%s' %(self.id()), aip_pool_obj,
alias_ip_address=aip_obj.alias_ip_address)
aip2_obj.add_project(proj_obj)
with ExpectedException(cfgm_common.exceptions.BadRequest,
'Ip address already in use') as e:
self._vnc_lib.alias_ip_create(aip2_obj)
# allocate floating-ip clashing with existing instance-ip
fip2_obj.set_floating_ip_address(iip_obj.instance_ip_address)
with ExpectedException(cfgm_common.exceptions.BadRequest,
'Ip address already in use') as e:
self._vnc_lib.floating_ip_create(fip2_obj)
# allocate alias-ip clashing with existing instance-ip
aip2_obj.set_alias_ip_address(iip_obj.instance_ip_address)
with ExpectedException(cfgm_common.exceptions.BadRequest,
'Ip address already in use') as e:
self._vnc_lib.alias_ip_create(aip2_obj)
# allocate alias-ip clashing with existing floating-ip
aip2_obj.set_alias_ip_address(fip_obj.floating_ip_address)
with ExpectedException(cfgm_common.exceptions.BadRequest,
'Ip address already in use') as e:
self._vnc_lib.alias_ip_create(aip2_obj)
# allocate floating-ip with gateway ip and verify failure
fip2_obj.set_floating_ip_address('172.16.31.10')
with ExpectedException(cfgm_common.exceptions.BadRequest,
'Ip address already in use') as e:
self._vnc_lib.floating_ip_create(fip2_obj)
# allocate alias-ip with gateway ip and verify failure
aip2_obj.set_alias_ip_address('172.16.31.10')
with ExpectedException(cfgm_common.exceptions.BadRequest,
'Ip address already in use') as e:
self._vnc_lib.alias_ip_create(aip2_obj)
# allocate 2 instance-ip with gateway ip - should work
# then verify iip cannot # ref to vm port (during iip-update
# or vmi-update)
iip_gw_ip = InstanceIp('gw-ip-iip-%s' %(self.id()),
instance_ip_address='172.16.31.10')
iip_gw_ip.add_virtual_network(vn_obj)
self._vnc_lib.instance_ip_create(iip_gw_ip)
iip2_gw_ip = InstanceIp('gw-ip-iip2-%s' %(self.id()),
instance_ip_address='172.16.31.10')
iip2_gw_ip.add_virtual_network(vn_obj)
iip2_gw_ip.add_virtual_machine_interface(rtr_vmi_obj)
self._vnc_lib.instance_ip_create(iip2_gw_ip)
iip_gw_ip.add_virtual_machine_interface(vm_vmi_obj)
with ExpectedException(cfgm_common.exceptions.BadRequest,
'Gateway IP cannot be used by VM port') as e:
self._vnc_lib.instance_ip_update(iip_gw_ip)
iip_gw_ip.del_virtual_machine_interface(vm_vmi_obj)
iip_gw_ip.add_virtual_machine_interface(rtr_vmi_obj)
self._vnc_lib.instance_ip_update(iip_gw_ip)
iip2_gw_ip.add_virtual_machine_interface(isolated_vmi_obj)
self._vnc_lib.instance_ip_update(iip2_gw_ip)
isolated_vmi_obj.add_virtual_machine(vm_obj)
with ExpectedException(cfgm_common.exceptions.BadRequest,
'Gateway IP cannot be used by VM port') as e:
self._vnc_lib.virtual_machine_interface_update(
isolated_vmi_obj)
# end test_ip_alloc_clash
#end class TestIpAlloc
if __name__ == '__main__':
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
unittest.main()
| StarcoderdataPython |
1632050 | <gh_stars>0
import api.tutils
import time
import sys
import api.objmodel
# Test configure ACI mode
def testACIMode(testbed):
api.tutils.info("testACIMode starting")
api.objmodel.setFabricMode("aci")
# Create a network
testTen = api.objmodel.tenant('default')
testNet = testTen.newNetwork("aciNet", 0, "192.168.127.12/24", "192.168.3.11", "vlan")
# Create two epgs
epgA = testNet.newGroup("epgA")
epgB = testNet.newGroup("epgB")
# Start two containers each on epgA and epgB
cA1 = testbed.runContainerOnNode(0, "epgA.aciNet")
cA2 = testbed.runContainerOnNode(0, "epgA.aciNet")
cB1 = testbed.runContainerOnNode(0, "epgB.aciNet")
cB2 = testbed.runContainerOnNode(0, "epgB.aciNet")
# Verify cA1 can ping cA2
cA1.checkPing(cA2.getIpAddr())
# Verify cB1 can ping cB2
cB1.checkPing(cB2.getIpAddr())
# Verify cA1 cannot ping cB1
cA1.checkPingFailure(cB1.getIpAddr())
# remove containers
testbed.removeContainers([cA1, cA2, cB1, cB2])
# delete epgs
testNet.deleteGroup("epgA")
testNet.deleteGroup("epgB")
# delete network
testTen.deleteNetwork("aciNet")
api.objmodel.setFabricMode("default")
# Check for errors
testbed.chekForNetpluginErrors()
api.tutils.info("testACIMode Test passed")
| StarcoderdataPython |
60240 | <gh_stars>10-100
#!/usr/bin/env python3
#
# Visualize filters in the network
import time
from math import sqrt
import torch
import torchvision
from torch import nn
from args import args
from made import MADE
from pixelcnn import PixelCNN
from utils import ensure_dir, get_ham_args_features
# Set args here or through CLI to match the state
args.ham = 'afm'
args.lattice = 'tri'
args.boundary = 'periodic'
args.L = 4
args.beta = 2
args.net = 'pixelcnn'
args.net_depth = 3
args.net_width = 64
args.half_kernel_size = 3
args.bias = True
args.beta_anneal = 0.998
args.max_step = 10**4
args.print_step = 100
args.visual_step = 1000
state_dir = 'out'
ham_args, features = get_ham_args_features()
state_filename = '{state_dir}/{ham_args}/{features}/out{args.out_infix}_save/10000.state'.format(
**locals())
target_layer = 1
num_channel = 1
out_dir = '../support/fig/filters/{ham_args}/{features}/layer{target_layer}'.format(
**locals())
if __name__ == '__main__':
ensure_dir(out_dir + '/')
if args.net == 'made':
net = MADE(**vars(args))
elif args.net == 'pixelcnn':
net = PixelCNN(**vars(args))
else:
raise ValueError('Unknown net: {}'.format(args.net))
net.to(args.device)
print('{}\n'.format(net))
print(state_filename)
state = torch.load(state_filename, map_location=args.device)
net.load_state_dict(state['net'])
sample = torch.zeros([num_channel, 1, args.L, args.L], requires_grad=True)
nn.init.normal_(sample)
optimizer = torch.optim.Adam([sample], lr=1e-3, weight_decay=1)
start_time = time.time()
for step in range(args.max_step + 1):
optimizer.zero_grad()
x = sample
for idx, layer in enumerate(net.net):
x = layer(x)
if idx == target_layer:
break
sample
loss = torch.mean(
torch.stack([
torch.mean(x[channel, channel])
for channel in range(num_channel)
]))
loss.backward()
optimizer.step()
if args.print_step and step % args.print_step == 0:
used_time = time.time() - start_time
print('step = {}, loss = {:.8g}, used_time = {:.3f}'.format(
step, loss, used_time))
if args.visual_step and step % args.visual_step == 0:
torchvision.utils.save_image(
sample,
'{}/{}.png'.format(out_dir, step),
nrow=int(sqrt(sample.shape[0])),
padding=2,
normalize=True)
| StarcoderdataPython |
1706907 | <reponame>wikimedia/search-MjoLniR<gh_stars>10-100
from collections import defaultdict
from functools import partial
import tempfile
from typing import cast, Any, Callable, Dict, List, Mapping, Optional
import hyperopt
import numpy as np
from pyspark.sql import SparkSession
import xgboost as xgb
from mjolnir.training.tuning import make_cv_objective, ModelSelection
from mjolnir.utils import as_local_path, as_local_paths, as_output_file
def _coerce_params(params: Mapping[str, Any]) -> Dict[str, Any]:
"""Force xgboost parameters into appropriate types
The output from hyperopt is always floats, but some xgboost parameters
explicitly require integers. Cast those as necessary
Parameters
----------
params : dict
xgboost parameters
Returns
-------
dict
Input parameters coerced as necessary
"""
def identity(x):
return x
def sloppy_int(x):
try:
return int(x)
except ValueError:
pass
val = float(x)
# This could fail for larger numbers due to fp precision, but not
# expecting integer values larger than two digits here.
if val.is_integer():
return int(val)
raise ValueError('Not parsable as integer: {}'.format(x))
types = cast(Dict[str, Callable[[Any], Any]], defaultdict(lambda: identity))
types.update({
'max_depth': sloppy_int,
'max_bin': sloppy_int,
'num_class': sloppy_int,
'silent': sloppy_int,
})
return {k: types[k](v) for k, v in params.items()}
def train(
fold: Mapping[str, str],
params: Mapping[str, Any],
train_matrix: Optional[str] = None,
spark: Optional[SparkSession] = None
) -> 'XGBoostModel':
"""Train a single xgboost ranking model.
Primary entry point for hyperparameter tuning normalizes
parameters and auto detects some values. Actual training is
passed on to XGBoostModel.trainWithFiles
Parameters
----------
fold :
Map from split name to data path. All provided splits will be
evaluated on each boosting iteration.
params :
parameters to pass on to xgboost training
train_matrix :
Optional name of training matrix in fold. If not provided will
auto-detect to either 'all' or 'train'
spark:
If provided, train remotely over spark
Returns
-------
XGBoostModel
Trained xgboost model
"""
# hyperparameter tuning may have given us floats where we need
# ints, so this gets all the types right for Java. Also makes
# a copy of params so we don't modifying the incoming dict.
# TODO: Does python care about this? Even if not, in the end it seems
# reasonable to not pass floats for integer values.
params = _coerce_params(params)
# TODO: Maybe num_rounds should just be external? But it's easier
# to do hyperparameter optimization with a consistent dict interface
kwargs = cast(Dict[str, Any], {
'num_boost_round': 100,
})
if 'num_boost_round' in params:
kwargs['num_boost_round'] = params['num_boost_round']
del params['num_rounds']
if 'early_stopping_rounds' in params:
kwargs['early_stopping_rounds'] = params['early_stopping_rounds']
del params['early_stopping_rounds']
# Set some sane defaults for ranking tasks
if 'objective' not in params:
params['objective'] = 'rank:ndcg'
if 'eval_metric' not in params:
params['eval_metric'] = 'ndcg@10'
# Not really ranking specific, but generally fastest method
if 'tree_method' not in params:
params['tree_method'] = 'hist'
# Convenience for some situations, but typically be explicit
# about the name of the matrix to train against.
if train_matrix is None:
train_matrix = "all" if "all" in fold else "train"
if spark:
return XGBoostModel.trainWithFilesRemote(spark, fold, train_matrix, params, **kwargs)
else:
return XGBoostModel.trainWithFiles(fold, train_matrix, params, **kwargs)
# Top level: matrix name
# Second level: metric name
# Inner list: stringified per-iteration metric value
EvalsResult = Mapping[str, Mapping[str, List[str]]]
class XGBoostBooster(object):
"""Wrapper for xgb.Booster usage in mjolnir
Wraps the booster to distinguish what we have after training,
the XGBoostModel, from what we write to disk, which is only the
booster. Would be better if there was a clean way to wrap all
the data up an serialize together, while working with xgboost's
c++ methods that expect file paths.
"""
def __init__(self, booster: xgb.Booster) -> None:
self.booster = booster
@staticmethod
def loadBoosterFromHadoopFile(path: str) -> 'XGBoostBooster':
with as_local_path(path) as local_path:
return XGBoostBooster.loadBoosterFromLocalFile(local_path)
@staticmethod
def loadBoosterFromLocalFile(path: str) -> 'XGBoostBooster':
booster = xgb.Booster.load_model(path)
# TODO: Not having the training parameters or the evaluation metrics
# almost makes this a different thing...
return XGBoostBooster(booster)
def saveBoosterAsHadoopFile(self, path: str):
with as_output_file(path) as f:
self.saveBoosterAsLocalFile(f.name)
def saveBoosterAsLocalFile(self, path: str):
# TODO: This doesn't save any metrics, should it?
self.booster.save_model(path)
class XGBoostModel(XGBoostBooster):
"""xgboost booster along with train-time metrics
TODO: Take XGBoostBooster as init arg instead of xgb.Booster?
"""
def __init__(
self,
booster: xgb.Booster,
evals_result: EvalsResult,
params: Mapping[str, Any]
) -> None:
super().__init__(booster)
self.evals_result = evals_result
self.params = params
@staticmethod
def trainWithFilesRemote(
spark: SparkSession,
fold: Mapping[str, str],
train_matrix: str,
params: Mapping[str, Any],
**kwargs
) -> 'XGBoostModel':
"""Train model on a single remote spark executor.
Silly hack to train models inside the yarn cluster. To train multiple
models in parallel python threads will need to be used. Wish pyspark
had collectAsync.
"""
nthread = int(spark.conf.get('spark.task.cpus', '1'))
if 'nthread' not in params:
params = dict(params, nthread=nthread)
elif params['nthread'] != nthread:
raise Exception("Executors have [{}] cpus but training requested [{}]".format(
nthread, params['nthread']))
return (
spark.sparkContext
.parallelize([1], 1)
.map(lambda x: XGBoostModel.trainWithFiles(fold, train_matrix, params, **kwargs))
.collect()[0]
)
@staticmethod
def trainWithFiles(
fold: Mapping[str, str],
train_matrix: str,
params: Mapping[str, Any],
**kwargs
) -> 'XGBoostModel':
"""Wrapper around xgb.train
This intentionally forwards to trainWithRDD, rather than
trainWithDataFrame, as the underlying method currently prevents using
rank:pairwise and metrics with @, such as ndcg@5.
Parameters
----------
fold :
Map from split name to data path. All provided splits will be
evaluated on each boosting iteration.
train_matrix: str
name of split in fold to train against
params : dict
XGBoost training parameters
Returns
-------
mjolnir.training.xgboost.XGBoostModel
trained xgboost ranking model
"""
with as_local_paths(fold.values()) as local_paths:
matrices = {name: xgb.DMatrix(path) for name, path in zip(fold.keys(), local_paths)}
dtrain = matrices[train_matrix]
evallist = [(dmat, name) for name, dmat in matrices.items()]
metrics = cast(Mapping, {})
booster = xgb.train(params, dtrain, evals=evallist, evals_result=metrics, **kwargs)
return XGBoostModel(booster, metrics, params)
def dump(self, features=None, with_stats=False, format="json"):
"""Dumps the xgboost model
Parameters
----------
features : list of str or None, optional
list of features names, or None for no feature names in dump.
(Default: None)
withStats : bool, optional
Should various additional statistics be included? These are not
necessary for prediction. (Default: False)
format : string, optional
The format of dump to produce, either json or text. (Default: json)
Returns
-------
str
valid json string containing all trees
"""
# Annoyingly the xgboost api doesn't take the feature map as a string, but
# instead as a filename. Write the feature map out to a file if necessary.
if features:
# When we write the svmrank formatted files the features are indexed
# starting at 1. We need to throw a fake index 0 in here or it's all
# wrong.
feat_map = "0 PLACEHOLDER_FEAT q\n" + \
"\n".join(["%d %s q" % (i, fname) for i, fname in enumerate(features, 1)])
fmap_f = tempfile.NamedTemporaryFile(mode='w')
fmap_f.write(feat_map)
fmap_f.flush()
fmap_path = fmap_f.name
else:
fmap_path = ''
trees = self.booster.get_dump(fmap_path, with_stats, dump_format='json')
# For whatever reason we get a json line per tree. Turn that into an array
# so we have a single valid json string.
return '[' + ','.join(trees) + ']'
def cv_transformer(model: XGBoostModel, params: Mapping[str, Any]):
"""Report model metrics in format expected by model selection"""
metric = params['eval_metric']
return {
'train': model.evals_result['train'][metric][-1],
'test': model.evals_result['test'][metric][-1],
'metrics': model.evals_result,
}
def tune(
folds: List[Mapping[str, str]],
stats: Dict,
train_matrix: str,
num_cv_jobs: int = 5,
initial_num_trees: int = 100,
final_num_trees: int = 500,
iterations: int = 150,
spark: Optional[SparkSession] = None
):
"""Find appropriate hyperparameters for training df
This is far from perfect, hyperparameter tuning is a bit of a black art
and could probably benefit from human interaction at each stage. Various
parameters depend a good bit on the number of samples in df, and how
that data is shaped.
Below is tuned for a dataframe with approximatly 10k normalized queries,
110k total queries, and 2.2M samples. This is actually a relatively small
dataset, we should rework the values used with larger data sets if they
are promising. It may also be that the current feature space can't take
advantage of more samples.
Note that hyperopt uses the first 20 iterations to initialize, during those
first 20 this is a strictly random search.
Parameters
----------
folds : list of dict containing train and test keys
stats : dict
stats about the fold from the make_folds utility script
num_cv_jobs : int, optional
The number of cross validation folds to train in parallel. (Default: 5)
initial_num_trees: int, optional
The number of trees to do most of the hyperparameter tuning with. This
should be large enough to be resonably representative of the final
training size. (Default: 100)
final_num_trees: int, optional
The number of trees to do the final eta optimization with. If set to
None the final eta optimization will be skipped and initial_n_tree will
be kept.
Returns
-------
dict
Dict with two keys, trials and params. params is the optimal set of
parameters. trials contains a dict of individual optimization steps
performed, each containing a hyperopt.Trials object recording what
happened.
"""
num_obs = stats['num_observations']
if num_obs > 8000000:
dataset_size = 'xlarge'
elif num_obs > 1000000:
dataset_size = 'large'
elif num_obs > 500000:
dataset_size = 'med'
elif num_obs > 500:
dataset_size = 'small'
else:
dataset_size = 'xsmall'
# Setup different tuning profiles for different sizes of datasets.
tune_spaces = [
('initial', {
'iterations': iterations,
'space': {
'xlarge': {
'eta': hyperopt.hp.uniform('eta', 0.3, 0.8),
# Have seen values of 7 and 10 as best on roughly same size
# datasets from different wikis. It really just depends.
'max_depth': hyperopt.hp.quniform('max_depth', 6, 11, 1),
'min_child_weight': hyperopt.hp.qloguniform(
'min_child_weight', np.log(10), np.log(500), 10),
# % of features to use for each tree. helps prevent overfit
'colsample_bytree': hyperopt.hp.quniform('colsample_bytree', 0.8, 1, .01),
'subsample': hyperopt.hp.quniform('subsample', 0.8, 1, .01),
},
'large': {
'eta': hyperopt.hp.uniform('eta', 0.3, 0.6),
'max_depth': hyperopt.hp.quniform('max_depth', 5, 9, 1),
'min_child_weight': hyperopt.hp.qloguniform(
'min_child_weight', np.log(10), np.log(300), 10),
'colsample_bytree': hyperopt.hp.quniform('colsample_bytree', 0.8, 1, .01),
'subsample': hyperopt.hp.quniform('subsample', 0.8, 1, .01),
},
'med': {
'eta': hyperopt.hp.uniform('eta', 0.1, 0.6),
'max_depth': hyperopt.hp.quniform('max_depth', 4, 7, 1),
'min_child_weight': hyperopt.hp.qloguniform(
'min_child_weight', np.log(10), np.log(300), 10),
'colsample_bytree': hyperopt.hp.quniform('colsample_bytree', 0.8, 1, .01),
'subsample': hyperopt.hp.quniform('subsample', 0.8, 1, .01),
},
'small': {
'eta': hyperopt.hp.uniform('eta', 0.1, 0.4),
'max_depth': hyperopt.hp.quniform('max_depth', 3, 6, 1),
'min_child_weight': hyperopt.hp.qloguniform(
'min_child_weight', np.log(10), np.log(100), 10),
'colsample_bytree': hyperopt.hp.quniform('colsample_bytree', 0.8, 1, .01),
'subsample': hyperopt.hp.quniform('subsample', 0.8, 1, .01),
},
'xsmall': {
'eta': hyperopt.hp.uniform('eta', 0.1, 0.4),
'max_depth': hyperopt.hp.quniform('max_depth', 3, 6, 1),
# Never use for real data, but convenient for tiny sets in test suite
'min_child_weight': 0,
'colsample_bytree': hyperopt.hp.quniform('colsample_bytree', 0.8, 1, .01),
'subsample': hyperopt.hp.quniform('subsample', 0.8, 1, .01),
}
}[dataset_size]
})
]
if final_num_trees is not None and final_num_trees != initial_num_trees:
tune_spaces.append(('trees', {
'iterations': 30,
'space': {
'num_rounds': final_num_trees,
'eta': hyperopt.hp.uniform('eta', 0.1, 0.4),
}
}))
# Baseline parameters to start with. Roughly tuned by what has worked in
# the past. These vary though depending on number of training samples. These
# defaults are for the smallest of wikis, which are then overridden for larger
# wikis
space = {
'objective': 'rank:ndcg',
'eval_metric': 'ndcg@10',
'num_rounds': initial_num_trees,
'min_child_weight': 200,
'max_depth': {
'xlarge': 7,
'large': 6,
'med': 5,
'small': 4,
'xsmall': 3,
}[dataset_size],
'gamma': 0,
'subsample': 1.0,
'colsample_bytree': 0.8,
}
tuner = ModelSelection(space, tune_spaces)
train_func = make_cv_objective(
partial(train, spark=spark), folds, num_cv_jobs,
cv_transformer, train_matrix=train_matrix)
trials_pool = tuner.build_pool(folds, num_cv_jobs)
return tuner(train_func, trials_pool)
| StarcoderdataPython |
3281025 | # -*- coding: utf-8 -*-
"""Tests that use cross-checks for generic methods
Should be easy to check consistency across models
Does not cover tsa
Initial cases copied from test_shrink_pickle
Created on Wed Oct 30 14:01:27 2013
Author: <NAME>
"""
from statsmodels.compat.pandas import assert_series_equal, assert_index_equal
from statsmodels.compat.platform import (PLATFORM_OSX, PLATFORM_LINUX32,
PLATFORM_WIN32)
from statsmodels.compat.scipy import SCIPY_GT_14
import numpy as np
import pandas as pd
import pytest
import statsmodels.api as sm
from statsmodels.tools.sm_exceptions import HessianInversionWarning
import statsmodels.tools._testing as smt
from statsmodels.formula.api import ols, glm
from numpy.testing import (assert_, assert_allclose, assert_equal,
assert_array_equal)
class CheckGenericMixin(object):
@classmethod
def setup_class(cls):
nobs = 500
np.random.seed(987689)
x = np.random.randn(nobs, 3)
x = sm.add_constant(x)
cls.exog = x
cls.xf = 0.25 * np.ones((2, 4))
cls.predict_kwds = {}
cls.transform_index = None
def test_ttest_tvalues(self):
# test that t_test has same results a params, bse, tvalues, ...
smt.check_ttest_tvalues(self.results)
res = self.results
mat = np.eye(len(res.params))
tt = res.t_test(mat[0])
string_confint = lambda alpha: "[%4.3F %4.3F]" % (
alpha / 2, 1- alpha / 2)
summ = tt.summary() # smoke test for #1323
assert_allclose(tt.pvalue, res.pvalues[0], rtol=5e-10)
assert_(string_confint(0.05) in str(summ))
# issue #3116 alpha not used in column headers
summ = tt.summary(alpha=0.1)
ss = "[0.05 0.95]" # different formatting
assert_(ss in str(summ))
summf = tt.summary_frame(alpha=0.1)
pvstring_use_t = 'P>|z|' if res.use_t is False else 'P>|t|'
tstring_use_t = 'z' if res.use_t is False else 't'
cols = ['coef', 'std err', tstring_use_t, pvstring_use_t,
'Conf. Int. Low', 'Conf. Int. Upp.']
assert_array_equal(summf.columns.values, cols)
def test_ftest_pvalues(self):
smt.check_ftest_pvalues(self.results)
def test_fitted(self):
smt.check_fitted(self.results)
def test_predict_types(self):
smt.check_predict_types(self.results)
def test_zero_constrained(self):
# not completely generic yet
if (isinstance(self.results.model, (sm.GEE))):
# GEE does not subclass LikelihoodModel
pytest.skip('GEE does not subclass LikelihoodModel')
use_start_params = not isinstance(self.results.model,
(sm.RLM, sm.OLS, sm.WLS))
self.use_start_params = use_start_params # attach for _get_constrained
keep_index = list(range(self.results.model.exog.shape[1]))
# index for params might include extra params
keep_index_p = list(range(self.results.params.shape[0]))
drop_index = [1]
for i in drop_index:
del keep_index[i]
del keep_index_p[i]
if use_start_params:
res1 = self.results.model._fit_zeros(keep_index, maxiter=500,
start_params=self.results.params)
else:
res1 = self.results.model._fit_zeros(keep_index, maxiter=500)
res2 = self._get_constrained(keep_index, keep_index_p)
assert_allclose(res1.params[keep_index_p], res2.params, rtol=1e-10,
atol=1e-10)
assert_equal(res1.params[drop_index], 0)
assert_allclose(res1.bse[keep_index_p], res2.bse, rtol=1e-10,
atol=1e-10)
assert_equal(res1.bse[drop_index], 0)
# OSX has many slight failures on this test
tol = 1e-8 if PLATFORM_OSX else 1e-10
tvals1 = res1.tvalues[keep_index_p]
assert_allclose(tvals1, res2.tvalues, rtol=tol, atol=tol)
# See gh5993
if PLATFORM_LINUX32 or SCIPY_GT_14:
pvals1 = res1.pvalues[keep_index_p]
else:
pvals1 = res1.pvalues[keep_index_p]
assert_allclose(pvals1, res2.pvalues, rtol=tol, atol=tol)
if hasattr(res1, 'resid'):
# discrete models, Logit do not have `resid` yet
# atol discussion at gh-5158
rtol = 1e-10
atol = 1e-12
if PLATFORM_OSX or PLATFORM_WIN32:
# GH 5628
rtol = 1e-8
atol = 1e-10
assert_allclose(res1.resid, res2.resid, rtol=rtol, atol=atol)
ex = self.results.model.exog.mean(0)
predicted1 = res1.predict(ex, **self.predict_kwds)
predicted2 = res2.predict(ex[keep_index], **self.predict_kwds)
assert_allclose(predicted1, predicted2, rtol=1e-10)
ex = self.results.model.exog[:5]
predicted1 = res1.predict(ex, **self.predict_kwds)
predicted2 = res2.predict(ex[:, keep_index], **self.predict_kwds)
assert_allclose(predicted1, predicted2, rtol=1e-10)
def _get_constrained(self, keep_index, keep_index_p):
# override in some test classes, no fit_kwds yet, e.g. cov_type
mod2 = self.results.model
mod_cls = mod2.__class__
init_kwds = mod2._get_init_kwds()
mod = mod_cls(mod2.endog, mod2.exog[:, keep_index], **init_kwds)
if self.use_start_params:
res = mod.fit(start_params=self.results.params[keep_index_p],
maxiter=500)
else:
res = mod.fit(maxiter=500)
return res
def test_zero_collinear(self):
# not completely generic yet
if isinstance(self.results.model, (sm.GEE)):
pytest.skip('Not completely generic yet')
use_start_params = not isinstance(self.results.model,
(sm.RLM, sm.OLS, sm.WLS, sm.GLM))
self.use_start_params = use_start_params # attach for _get_constrained
keep_index = list(range(self.results.model.exog.shape[1]))
# index for params might include extra params
keep_index_p = list(range(self.results.params.shape[0]))
drop_index = []
for i in drop_index:
del keep_index[i]
del keep_index_p[i]
keep_index_p = list(range(self.results.params.shape[0]))
# create collinear model
mod2 = self.results.model
mod_cls = mod2.__class__
init_kwds = mod2._get_init_kwds()
ex = np.column_stack((mod2.exog, mod2.exog))
mod = mod_cls(mod2.endog, ex, **init_kwds)
keep_index = list(range(self.results.model.exog.shape[1]))
keep_index_p = list(range(self.results.model.exog.shape[1]))
k_vars = ex.shape[1]
k_extra = 0
if hasattr(mod, 'k_extra') and mod.k_extra > 0:
keep_index_p += list(range(k_vars, k_vars + mod.k_extra))
k_extra = mod.k_extra
# TODO: Can we choose a test case without this issue?
# If not, should we be getting this warning for all
# model subclasses?
warn_cls = HessianInversionWarning if isinstance(mod, sm.GLM) else None
cov_types = ['nonrobust', 'HC0']
for cov_type in cov_types:
# Note: for RLM we only check default when cov_type is 'nonrobust'
# cov_type is otherwise ignored
if cov_type != 'nonrobust' and (isinstance(self.results.model,
sm.RLM)):
return
if use_start_params:
start_params = np.zeros(k_vars + k_extra)
method = self.results.mle_settings['optimizer']
# string in `method` is not mutable, so no need for copy
sp = self.results.mle_settings['start_params'].copy()
if self.transform_index is not None:
# work around internal transform_params, currently in NB
sp[self.transform_index] = np.exp(sp[self.transform_index])
start_params[keep_index_p] = sp
with pytest.warns(warn_cls):
res1 = mod._fit_collinear(cov_type=cov_type,
start_params=start_params,
method=method, disp=0)
if cov_type != 'nonrobust':
# reestimate original model to get robust cov
with pytest.warns(warn_cls):
res2 = self.results.model.fit(cov_type=cov_type,
start_params=sp,
method=method, disp=0)
else:
with pytest.warns(warn_cls):
# more special casing RLM
if (isinstance(self.results.model, (sm.RLM))):
res1 = mod._fit_collinear()
else:
res1 = mod._fit_collinear(cov_type=cov_type)
if cov_type != 'nonrobust':
# reestimate original model to get robust cov
res2 = self.results.model.fit(cov_type=cov_type)
if cov_type == 'nonrobust':
res2 = self.results
# check fit optimizer arguments, if mle_settings is available
if hasattr(res2, 'mle_settings'):
assert_equal(res1.results_constrained.mle_settings['optimizer'],
res2.mle_settings['optimizer'])
if 'start_params' in res2.mle_settings:
spc = res1.results_constrained.mle_settings['start_params']
assert_allclose(spc,
res2.mle_settings['start_params'],
rtol=1e-10, atol=1e-20)
assert_equal(res1.mle_settings['optimizer'],
res2.mle_settings['optimizer'])
assert_allclose(res1.mle_settings['start_params'],
res2.mle_settings['start_params'],
rtol=1e-10, atol=1e-20)
# Poisson has reduced precision in params, difficult optimization?
assert_allclose(res1.params[keep_index_p], res2.params, rtol=1e-6)
assert_allclose(res1.params[drop_index], 0, rtol=1e-10)
assert_allclose(res1.bse[keep_index_p], res2.bse, rtol=1e-8)
assert_allclose(res1.bse[drop_index], 0, rtol=1e-10)
tvals1 = res1.tvalues[keep_index_p]
assert_allclose(tvals1, res2.tvalues, rtol=5e-8)
# See gh5993
if PLATFORM_LINUX32 or SCIPY_GT_14:
pvals1 = res1.pvalues[keep_index_p]
else:
pvals1 = res1.pvalues[keep_index_p]
assert_allclose(pvals1, res2.pvalues, rtol=1e-6, atol=1e-30)
if hasattr(res1, 'resid'):
# discrete models, Logit do not have `resid` yet
assert_allclose(res1.resid, res2.resid, rtol=1e-5, atol=1e-10)
ex = res1.model.exog.mean(0)
predicted1 = res1.predict(ex, **self.predict_kwds)
predicted2 = res2.predict(ex[keep_index], **self.predict_kwds)
assert_allclose(predicted1, predicted2, rtol=1e-8, atol=1e-11)
ex = res1.model.exog[:5]
kwds = getattr(self, 'predict_kwds_5', {})
predicted1 = res1.predict(ex, **kwds)
predicted2 = res2.predict(ex[:, keep_index], **kwds)
assert_allclose(predicted1, predicted2, rtol=1e-8, atol=1e-11)
######### subclasses for individual models, unchanged from test_shrink_pickle
# TODO: check if setup_class is faster than setup
class TestGenericOLS(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.OLS(y, self.exog).fit()
class TestGenericOLSOneExog(CheckGenericMixin):
# check with single regressor (no constant)
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog[:, 1]
np.random.seed(987689)
y = x + np.random.randn(x.shape[0])
self.results = sm.OLS(y, x).fit()
def test_zero_constrained(self):
# override, we cannot remove the only regressor
pytest.skip('Override since cannot remove the only regressor')
pass
class TestGenericWLS(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.WLS(y, self.exog, weights=np.ones(len(y))).fit()
class TestGenericPoisson(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
model = sm.Poisson(y_count, x)
# use start_params to converge faster
start_params = np.array([0.75334818, 0.99425553, 1.00494724, 1.00247112])
self.results = model.fit(start_params=start_params, method='bfgs',
disp=0)
class TestGenericPoissonOffset(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
nobs = x.shape[0]
np.random.seed(987689)
y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
model = sm.Poisson(y_count, x, offset=0.01 * np.ones(nobs),
exposure=np.ones(nobs)) # bug with default
# use start_params to converge faster
start_params = np.array([0.75334818, 0.99425553, 1.00494724, 1.00247112])
self.results = model.fit(start_params=start_params, method='bfgs',
disp=0)
self.predict_kwds_5 = dict(exposure=0.01 * np.ones(5), offset=np.ones(5))
self.predict_kwds = dict(exposure=1, offset=0)
class TestGenericNegativeBinomial(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
np.random.seed(987689)
data = sm.datasets.randhie.load(as_pandas=False)
exog = sm.add_constant(data.exog, prepend=False)
mod = sm.NegativeBinomial(data.endog, exog)
start_params = np.array([-0.05783623, -0.26655806, 0.04109148, -0.03815837,
0.2685168 , 0.03811594, -0.04426238, 0.01614795,
0.17490962, 0.66461151, 1.2925957 ])
self.results = mod.fit(start_params=start_params, disp=0, maxiter=500)
self.transform_index = -1
class TestGenericLogit(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
nobs = x.shape[0]
np.random.seed(987689)
y_bin = (np.random.rand(nobs) < 1.0 / (1 + np.exp(x.sum(1) - x.mean()))).astype(int)
model = sm.Logit(y_bin, x) #, exposure=np.ones(nobs), offset=np.zeros(nobs)) #bug with default
# use start_params to converge faster
start_params = np.array([-0.73403806, -1.00901514, -0.97754543, -0.95648212])
self.results = model.fit(start_params=start_params, method='bfgs', disp=0)
class TestGenericRLM(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.RLM(y, self.exog).fit()
class TestGenericGLM(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.GLM(y, self.exog).fit()
class TestGenericGLMPoissonOffset(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
nobs = x.shape[0]
np.random.seed(987689)
y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
model = sm.GLM(y_count, x, family=sm.families.Poisson(),
offset=0.01 * np.ones(nobs),
exposure=np.ones(nobs))
# use start_params to converge faster
start_params = np.array([0.75334818, 0.99425553, 1.00494724, 1.00247112])
self.results = model.fit(start_params=start_params, method='bfgs',
disp=0)
self.predict_kwds_5 = dict(exposure=0.01 * np.ones(5), offset=np.ones(5))
self.predict_kwds = dict(exposure=1, offset=0)
class TestGenericGEEPoisson(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
groups = np.random.randint(0, 4, size=x.shape[0])
# use start_params to speed up test, difficult convergence not tested
start_params = np.array([0., 1., 1., 1.])
vi = sm.cov_struct.Independence()
family = sm.families.Poisson()
self.results = sm.GEE(y_count, self.exog, groups, family=family,
cov_struct=vi).fit(start_params=start_params)
class TestGenericGEEPoissonNaive(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
#y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
y_count = np.random.poisson(np.exp(x.sum(1) - x.sum(1).mean(0)))
groups = np.random.randint(0, 4, size=x.shape[0])
# use start_params to speed up test, difficult convergence not tested
start_params = np.array([0., 1., 1., 1.])
vi = sm.cov_struct.Independence()
family = sm.families.Poisson()
self.results = sm.GEE(y_count, self.exog, groups, family=family,
cov_struct=vi).fit(start_params=start_params,
cov_type='naive')
class TestGenericGEEPoissonBC(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
#y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
y_count = np.random.poisson(np.exp(x.sum(1) - x.sum(1).mean(0)))
groups = np.random.randint(0, 4, size=x.shape[0])
# use start_params to speed up test, difficult convergence not tested
start_params = np.array([0., 1., 1., 1.])
# params_est = np.array([-0.0063238 , 0.99463752, 1.02790201, 0.98080081])
vi = sm.cov_struct.Independence()
family = sm.families.Poisson()
mod = sm.GEE(y_count, self.exog, groups, family=family, cov_struct=vi)
self.results = mod.fit(start_params=start_params,
cov_type='bias_reduced')
# Other test classes
class CheckAnovaMixin(object):
@classmethod
def setup_class(cls):
import statsmodels.stats.tests.test_anova as ttmod
test = ttmod.TestAnova3()
test.setup_class()
cls.data = test.data.drop([0,1,2])
cls.initialize()
def test_combined(self):
res = self.res
wa = res.wald_test_terms(skip_single=False, combine_terms=['Duration', 'Weight'])
eye = np.eye(len(res.params))
c_const = eye[0]
c_w = eye[[2,3]]
c_d = eye[1]
c_dw = eye[[4,5]]
c_weight = eye[2:6]
c_duration = eye[[1, 4, 5]]
compare_waldres(res, wa, [c_const, c_d, c_w, c_dw, c_duration, c_weight])
def test_categories(self):
# test only multicolumn terms
res = self.res
wa = res.wald_test_terms(skip_single=True)
eye = np.eye(len(res.params))
c_w = eye[[2,3]]
c_dw = eye[[4,5]]
compare_waldres(res, wa, [c_w, c_dw])
def compare_waldres(res, wa, constrasts):
for i, c in enumerate(constrasts):
wt = res.wald_test(c)
assert_allclose(wa.table.values[i, 0], wt.statistic)
assert_allclose(wa.table.values[i, 1], wt.pvalue)
df = c.shape[0] if c.ndim == 2 else 1
assert_equal(wa.table.values[i, 2], df)
# attributes
assert_allclose(wa.statistic[i], wt.statistic)
assert_allclose(wa.pvalues[i], wt.pvalue)
assert_equal(wa.df_constraints[i], df)
if res.use_t:
assert_equal(wa.df_denom[i], res.df_resid)
col_names = wa.col_names
if res.use_t:
assert_equal(wa.distribution, 'F')
assert_equal(col_names[0], 'F')
assert_equal(col_names[1], 'P>F')
else:
assert_equal(wa.distribution, 'chi2')
assert_equal(col_names[0], 'chi2')
assert_equal(col_names[1], 'P>chi2')
# SMOKETEST
wa.summary_frame()
class TestWaldAnovaOLS(CheckAnovaMixin):
@classmethod
def initialize(cls):
mod = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)", cls.data)
cls.res = mod.fit(use_t=False)
def test_noformula(self):
# this verifies single and composite constraints against explicit
# wald test
endog = self.res.model.endog
exog = self.res.model.data.orig_exog
exog = pd.DataFrame(exog)
res = sm.OLS(endog, exog).fit()
wa = res.wald_test_terms(skip_single=False,
combine_terms=['Duration', 'Weight'])
eye = np.eye(len(res.params))
c_single = [row for row in eye]
c_weight = eye[2:6]
c_duration = eye[[1, 4, 5]]
compare_waldres(res, wa, c_single + [c_duration, c_weight])
# assert correct df_constraints, see #5475 for bug in single constraint
df_constraints = [1] * len(c_single) + [3, 4]
assert_equal(wa.df_constraints, df_constraints)
class TestWaldAnovaOLSF(CheckAnovaMixin):
@classmethod
def initialize(cls):
mod = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)", cls.data)
cls.res = mod.fit() # default use_t=True
def test_predict_missing(self):
ex = self.data[:5].copy()
ex.iloc[0, 1] = np.nan
predicted1 = self.res.predict(ex)
predicted2 = self.res.predict(ex[1:])
assert_index_equal(predicted1.index, ex.index)
assert_series_equal(predicted1[1:], predicted2)
assert_equal(predicted1.values[0], np.nan)
class TestWaldAnovaGLM(CheckAnovaMixin):
@classmethod
def initialize(cls):
mod = glm("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)", cls.data)
cls.res = mod.fit(use_t=False)
class TestWaldAnovaPoisson(CheckAnovaMixin):
@classmethod
def initialize(cls):
from statsmodels.discrete.discrete_model import Poisson
mod = Poisson.from_formula("Days ~ C(Duration, Sum)*C(Weight, Sum)", cls.data)
cls.res = mod.fit(cov_type='HC0')
class TestWaldAnovaNegBin(CheckAnovaMixin):
@classmethod
def initialize(cls):
from statsmodels.discrete.discrete_model import NegativeBinomial
formula = "Days ~ C(Duration, Sum)*C(Weight, Sum)"
mod = NegativeBinomial.from_formula(formula, cls.data,
loglike_method='nb2')
cls.res = mod.fit()
class TestWaldAnovaNegBin1(CheckAnovaMixin):
@classmethod
def initialize(cls):
from statsmodels.discrete.discrete_model import NegativeBinomial
formula = "Days ~ C(Duration, Sum)*C(Weight, Sum)"
mod = NegativeBinomial.from_formula(formula, cls.data,
loglike_method='nb1')
cls.res = mod.fit(cov_type='HC0')
class CheckPairwise(object):
def test_default(self):
res = self.res
tt = res.t_test(self.constraints)
pw = res.t_test_pairwise(self.term_name)
pw_frame = pw.result_frame
assert_allclose(pw_frame.iloc[:, :6].values,
tt.summary_frame().values)
class TestTTestPairwiseOLS(CheckPairwise):
@classmethod
def setup_class(cls):
from statsmodels.formula.api import ols
import statsmodels.stats.tests.test_anova as ttmod
test = ttmod.TestAnova3()
test.setup_class()
cls.data = test.data.drop([0,1,2])
mod = ols("np.log(Days+1) ~ C(Duration) + C(Weight)", cls.data)
cls.res = mod.fit()
cls.term_name = "C(Weight)"
cls.constraints = ['C(Weight)[T.2]',
'C(Weight)[T.3]',
'C(Weight)[T.3] - C(Weight)[T.2]']
def test_alpha(self):
pw1 = self.res.t_test_pairwise(self.term_name, method='hommel',
factor_labels='A B C'.split())
pw2 = self.res.t_test_pairwise(self.term_name, method='hommel',
alpha=0.01)
assert_allclose(pw1.result_frame.iloc[:, :7].values,
pw2.result_frame.iloc[:, :7].values, rtol=1e-10)
assert_equal(pw1.result_frame.iloc[:, -1].values,
[True]*3)
assert_equal(pw2.result_frame.iloc[:, -1].values,
[False, True, False])
assert_equal(pw1.result_frame.index.values,
np.array(['B-A', 'C-A', 'C-B'], dtype=object))
class TestTTestPairwiseOLS2(CheckPairwise):
@classmethod
def setup_class(cls):
from statsmodels.formula.api import ols
import statsmodels.stats.tests.test_anova as ttmod
test = ttmod.TestAnova3()
test.setup_class()
cls.data = test.data.drop([0,1,2])
mod = ols("np.log(Days+1) ~ C(Weight) + C(Duration)", cls.data)
cls.res = mod.fit()
cls.term_name = "C(Weight)"
cls.constraints = ['C(Weight)[T.2]',
'C(Weight)[T.3]',
'C(Weight)[T.3] - C(Weight)[T.2]']
class TestTTestPairwiseOLS3(CheckPairwise):
@classmethod
def setup_class(cls):
from statsmodels.formula.api import ols
import statsmodels.stats.tests.test_anova as ttmod
test = ttmod.TestAnova3()
test.setup_class()
cls.data = test.data.drop([0,1,2])
mod = ols("np.log(Days+1) ~ C(Weight) + C(Duration) - 1", cls.data)
cls.res = mod.fit()
cls.term_name = "C(Weight)"
cls.constraints = ['C(Weight)[2] - C(Weight)[1]',
'C(Weight)[3] - C(Weight)[1]',
'C(Weight)[3] - C(Weight)[2]']
class TestTTestPairwiseOLS4(CheckPairwise):
@classmethod
def setup_class(cls):
from statsmodels.formula.api import ols
import statsmodels.stats.tests.test_anova as ttmod
test = ttmod.TestAnova3()
test.setup_class()
cls.data = test.data.drop([0,1,2])
mod = ols("np.log(Days+1) ~ C(Weight, Treatment(2)) + C(Duration)", cls.data)
cls.res = mod.fit()
cls.term_name = "C(Weight, Treatment(2))"
cls.constraints = ['-C(Weight, Treatment(2))[T.1]',
'C(Weight, Treatment(2))[T.3] - C(Weight, Treatment(2))[T.1]',
'C(Weight, Treatment(2))[T.3]',]
class TestTTestPairwisePoisson(CheckPairwise):
@classmethod
def setup_class(cls):
from statsmodels.discrete.discrete_model import Poisson
import statsmodels.stats.tests.test_anova as ttmod
test = ttmod.TestAnova3()
test.setup_class()
cls.data = test.data.drop([0,1,2])
mod = Poisson.from_formula("Days ~ C(Duration) + C(Weight)", cls.data)
cls.res = mod.fit(cov_type='HC0')
cls.term_name = "C(Weight)"
cls.constraints = ['C(Weight)[T.2]',
'C(Weight)[T.3]',
'C(Weight)[T.3] - C(Weight)[T.2]']
| StarcoderdataPython |
61272 | import ctypes
"""
/*=======================================================================*
* Fixed width word size data types: *
* int8_T, int16_T, int32_T - signed 8, 16, or 32 bit integers *
* uint8_T, uint16_T, uint32_T - unsigned 8, 16, or 32 bit integers *
* real32_T, real64_T - 32 and 64 bit floating point numbers *
*=======================================================================*/
"""
int8_T = ctypes.c_byte
uint8_T = ctypes.c_ubyte
int16_T = ctypes.c_short
uint16_T = ctypes.c_ushort
int32_T = ctypes.c_int
uint32_T = ctypes.c_uint
int64_T = ctypes.c_longlong
uint64_T = ctypes.c_ulonglong
real32_T = ctypes.c_float
real64_T = ctypes.c_double
"""
/*===========================================================================*
* Generic type definitions: boolean_T, char_T, byte_T, int_T, uint_T, *
* real_T, time_T, ulong_T, ulonglong_T. *
*===========================================================================*/
"""
real_T = ctypes.c_double
time_T = ctypes.c_double
boolean_T = ctypes.c_ubyte
int_T = ctypes.c_int
uint_T = ctypes.c_uint
ulong_T = ctypes.c_ulong
ulonglong_T = ctypes.c_ulonglong
char_T = ctypes.c_char
uchar_T = ctypes.c_ubyte
char_T = ctypes.c_byte
"""
/*===========================================================================*
* Complex number type definitions *
*===========================================================================*/
"""
class creal32_T(ctypes.Structure):
_fields_ = [
("re", real32_T),
("im", real32_T),
]
class creal64_T(ctypes.Structure):
_fields_ = [
("re", real64_T),
("im", real64_T),
]
class creal_T(ctypes.Structure):
_fields_ = [
("re", real_T),
("im", real_T),
]
class cint8_T(ctypes.Structure):
_fields_ = [
("re", int8_T),
("im", int8_T),
]
class cuint8_T(ctypes.Structure):
_fields_ = [
("re", uint8_T),
("im", uint8_T),
]
class cint16_T(ctypes.Structure):
_fields_ = [
("re", int16_T),
("im", int16_T),
]
class cuint16_T(ctypes.Structure):
_fields_ = [
("re", uint16_T),
("im", uint16_T),
]
class cint32_T(ctypes.Structure):
_fields_ = [
("re", int32_T),
("im", int32_T),
]
class cuint32_T(ctypes.Structure):
_fields_ = [
("re", uint32_T),
("im", uint32_T),
]
class cint64_T(ctypes.Structure):
_fields_ = [
("re", int64_T),
("im", int64_T),
]
class cuint64_T(ctypes.Structure):
_fields_ = [
("re", uint64_T),
("im", uint64_T),
] | StarcoderdataPython |
3231668 | # Copyright 2016 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import timeout as etimeout
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from taskflow.patterns import linear_flow
import masakari.conf
from masakari.engine.drivers.taskflow import base
from masakari import exception
CONF = masakari.conf.CONF
LOG = logging.getLogger(__name__)
ACTION = "process:recovery"
TASKFLOW_CONF = cfg.CONF.taskflow_driver_recovery_flows
class DisableComputeNodeTask(base.MasakariTask):
def __init__(self, context, novaclient, **kwargs):
kwargs['requires'] = ["process_name", "host_name"]
super(DisableComputeNodeTask, self).__init__(context,
novaclient,
**kwargs)
def execute(self, process_name, host_name):
msg = "Disabling compute service on host: '%s'" % host_name
self.update_details(msg)
if not self.novaclient.is_service_down(self.context, host_name,
process_name):
# disable compute node on given host
self.novaclient.enable_disable_service(self.context, host_name)
msg = "Disabled compute service on host: '%s'" % host_name
self.update_details(msg, 1.0)
else:
msg = ("Skipping recovery for process %(process_name)s as it is "
"already disabled") % {'process_name': process_name}
LOG.info(msg)
self.update_details(msg, 1.0)
class ConfirmComputeNodeDisabledTask(base.MasakariTask):
def __init__(self, context, novaclient, **kwargs):
kwargs['requires'] = ["process_name", "host_name"]
super(ConfirmComputeNodeDisabledTask, self).__init__(context,
novaclient,
**kwargs)
def execute(self, process_name, host_name):
def _wait_for_disable():
service_disabled = self.novaclient.is_service_down(
self.context, host_name, process_name)
if service_disabled:
raise loopingcall.LoopingCallDone()
periodic_call = loopingcall.FixedIntervalLoopingCall(
_wait_for_disable)
try:
msg = "Confirming compute service is disabled on host: '%s'" % (
host_name)
self.update_details(msg)
# add a timeout to the periodic call.
periodic_call.start(interval=CONF.verify_interval)
etimeout.with_timeout(
CONF.wait_period_after_service_update,
periodic_call.wait)
msg = "Confirmed compute service is disabled on host: '%s'" % (
host_name)
self.update_details(msg, 1.0)
except etimeout.Timeout:
msg = "Failed to disable service %(process_name)s" % {
'process_name': process_name
}
self.update_details(msg, 1.0)
raise exception.ProcessRecoveryFailureException(
message=msg)
finally:
# stop the periodic call, in case of exceptions or Timeout.
periodic_call.stop()
def get_compute_process_recovery_flow(context, novaclient, process_what):
"""Constructs and returns the engine entrypoint flow.
This flow will do the following:
1. Disable nova-compute process
2. Confirm nova-compute process is disabled
"""
flow_name = ACTION.replace(":", "_") + "_engine"
nested_flow = linear_flow.Flow(flow_name)
task_dict = TASKFLOW_CONF.process_failure_recovery_tasks
process_recovery_workflow_pre = linear_flow.Flow('pre_tasks')
for plugin in base.get_recovery_flow(task_dict['pre'], context=context,
novaclient=novaclient):
process_recovery_workflow_pre.add(plugin)
process_recovery_workflow_main = linear_flow.Flow('main_tasks')
for plugin in base.get_recovery_flow(task_dict['main'], context=context,
novaclient=novaclient):
process_recovery_workflow_main.add(plugin)
process_recovery_workflow_post = linear_flow.Flow('post_tasks')
for plugin in base.get_recovery_flow(task_dict['post'], context=context,
novaclient=novaclient):
process_recovery_workflow_post.add(plugin)
nested_flow.add(process_recovery_workflow_pre)
nested_flow.add(process_recovery_workflow_main)
nested_flow.add(process_recovery_workflow_post)
return base.load_taskflow_into_engine(ACTION, nested_flow,
process_what)
| StarcoderdataPython |
61806 | <filename>get_image_url.py
import urllib.request
import time
import flickr
N = 5
PATH = "data/queries.txt"
def get_queries(path):
queries = []
with open(path) as f:
for line in f:
q = line.strip()
queries.append(q)
return queries
if __name__ == "__main__":
queries = get_queries(PATH)
for q in queries:
flickr.search_photos(q, n=N, sort="relevance")
time.sleep(1)
| StarcoderdataPython |
183335 | """
Creates the views for the:
- index page
- FAQ page
- About page
- Contact Us page
"""
from django.shortcuts import render
def get_index(request):
"""
Returns index page
:param request: The request type
:return: index page
"""
return render(request, 'index.html')
def get_faq(request):
"""
Returns FAQ page
:param request: The request type
:return: FAQ page
"""
return render(request, 'faq.html')
def get_about(request):
"""
Returns the About Us page
:param request: The request type
:return: About Us page
"""
return render(request, 'about.html')
def get_contact(request):
"""
Returns the Contact Us page
:param request: The request type
:return: Contact Us page
"""
return render(request, 'contact.html')
| StarcoderdataPython |
194163 | import pandas as pd
import json
import glob
def get_catalog(lst: list, num: int = 5):
if num == 0:
return lst
lst += glob.glob(f"jsons_unzipped/{'*/'*num}/cata*.json", recursive=True)
return get_catalog(lst, num-1)
cats = get_catalog([], 7)
cols = [c.split('\\')[-2] for c in cats]
periods = []
langs = []
def get_period(elem: dict):
return f'{elem["period"]}' if elem.get("period") else ""
def get_language(elem: dict):
return f'{elem["language"]}' if elem.get("language") else ""
def files_write(langs: list, period: list):
with open("data/lists/periob ds.txt", "w", encoding="utf_8") as file:
file.writelines(period)
with open("data/lists/langhjs.txt", "w", encoding="utf_8") as file:
file.writelines(langs)
def create_lists(cats: list):
global langs, periods
for c in cats:
try:
with open(c, "r", encoding="utf_8") as file:
data = json.load(file)["members"]
for d in data:
periods.append(get_period(data[d]))
langs.append(get_language(data[d]))
except:
continue
langs = list(set(langs))
periods = list(set(periods))
def count_langs(lang: str):
lag = {}
lag["Name"] = lang
global cats
# global l
global cols
for c, projects in zip(cats, cols):
counter = 0
try:
with open(c, "r", encoding="utf_8") as file:
data = json.load(file)["members"]
for d in data:
counter += 1 if get_language(data[d]) == lang else 0
lag[projects] = counter
except:
continue
return lag
if __name__ == "__main__":
create_lists(cats)
df = pd.DataFrame()
for l in langs:
try:
namen = count_langs(l)
if namen["Name"] == "":
namen.update({"Name": "None"})
df = df.append(namen, True)
except ValueError:
continue
with pd.ExcelWriter("data/lists/___excel.file.xlsx", "openpyxl") as xl:
df.to_excel(xl, index=False)
| StarcoderdataPython |
80199 |
import torch
import torch.nn as nn
#==============================<Abstract Classes>==============================#
"""
Class that acts as the base building-blocks of ProgNets.
Includes a module (usually a single layer),
a set of lateral modules, and an activation.
"""
class ProgBlock(nn.Module):
"""
Runs the block on input x.
Returns output tensor or list of output tensors.
"""
def runBlock(self, x):
raise NotImplementedError
"""
Runs lateral i on input x.
Returns output tensor or list of output tensors.
"""
def runLateral(self, i, x):
raise NotImplementedError
"""
Runs activation of the block on x.
Returns output tensor or list of output tensors.
"""
def runActivation(self, x):
raise NotImplementedError
"""
Returns a dictionary of data about the block.
"""
def getData(self):
raise NotImplementedError
"""
Returns True if block is meant to contain laterals.
Returns False if block is meant to be a utility with not lateral inputs.
Default is True.
"""
def isLateralized(self):
return True
"""
Conveniance class for un-lateralized blocks.
"""
class ProgInertBlock(ProgBlock):
def isLateralized(self):
return False
"""
A special case of ProgBlock with multiple paths.
"""
'''
class ProgMultiBlock(ProgBlock):
"""
Returns a list of booleans (pass_list).
Length of the pass_list is equal to the number of channels in the block.
Channels that return True do not operate on their inputs, and simply pass them to the next block.
"""
def getPassDescriptor(self):
raise NotImplementedError
def getNumChannels(self):
raise NotImplementedError
'''
"""
Class that generates new ProgColumns using the method generateColumn.
The parentCols list will contain references to each parent column,
such that columns can access lateral outputs.
Additional information may be passed through the msg argument in
generateColumn and ProgNet.addColumn.
"""
class ProgColumnGenerator:
def generateColumn(self, parentCols, msg = None):
raise NotImplementedError
#============================<ProgColumn & ProgNet>============================#
"""
A column representing one sequential ANN with all of its lateral modules.
Outputs of the last forward run are stored for child column laterals.
Output of each layer is calculated as:
y = activation(block(x) + sum(laterals(x)))
colID -- A unique identifier for the column.
blockList -- A list of ProgBlocks that will be run sequentially.
parentCols -- A list of pointers to columns that will be laterally connectected.
If the list is empty, the column is unlateralized.
"""
class ProgColumn(nn.Module):
def __init__(self, colID, blockList, parentCols = []):
super().__init__()
self.colID = colID
self.isFrozen = False
self.parentCols = parentCols
self.blocks = nn.ModuleList(blockList)
self.numRows = len(blockList)
self.lastOutputList = []
def freeze(self, unfreeze = False):
if not unfreeze: # Freeze params.
self.isFrozen = True
for param in self.parameters(): param.requires_grad = False
else: # Unfreeze params.
self.isFrozen = False
for param in self.parameters(): param.requires_grad = True
def getData(self):
data = dict()
data["colID"] = self.colID
data["rows"] = self.numRows
data["frozen"] = self.isFrozen
#data["last_outputs"] = self.lastOutputList
data["blocks"] = [block.getData() for block in self.blocks]
data["parent_cols"] = [col.colID for col in self.parentCols]
return data
def forward(self, input):
outputs = []
x = input
for r, block in enumerate(self.blocks):
#if isinstance(block, ProgMultiBlock):
#y = self.__forwardMulti(x, r, block)
#else:
y = self.__forwardSimple(x, r, block)
outputs.append(y)
x = y
self.lastOutputList = outputs
return outputs[-1]
def __forwardSimple(self, x, row, block):
currOutput = block.runBlock(x)
if not block.isLateralized() or row == 0 or len(self.parentCols) < 1:
y = block.runActivation(currOutput)
elif isinstance(currOutput, list):
for c, col in enumerate(self.parentCols):
lats = block.runLateral(c, col.lastOutputList[row - 1])
for i in range(len(currOutput)):
if currOutput[i] is not None and lats[i] is not None:
currOutput[i] += lats[i]
y = block.runActivation(currOutput)
else:
for c, col in enumerate(self.parentCols):
currOutput += block.runLateral(c, col.lastOutputList[row - 1])
y = block.runActivation(currOutput)
return y
def __forwardMulti(self, x, row, block):
if not isinstance(x, list):
raise ValueError("[Doric]: Multiblock input must be a python list of inputs.")
currOutput = block.runBlock(x)
if not block.isLateralized() or row == 0 or len(self.parentCols) < 1:
y = block.runActivation(currOutput)
else:
for c, col in enumerate(self.parentCols):
lats = block.runLateral(c, col.lastOutputList[row - 1])
for i, p in enumerate(block.getPassDescriptor()):
if not p: currOutput[i] += lats[i]
y = block.runActivation(currOutput)
return y
"""
A progressive neural network as described in Progressive Neural Networks (Rusu et al.).
Columns can be added manually or with a ProgColumnGenerator.
https://arxiv.org/abs/1606.04671
"""
class ProgNet(nn.Module):
def __init__(self, colGen = None):
super().__init__()
self.columns = nn.ModuleList()
self.numRows = None
self.numCols = 0
self.colMap = dict()
self.colGen = colGen
def addColumn(self, col = None, msg = None):
if not col:
if self.colGen is None:
raise ValueError("[Doric]: No column or generator supplied.")
parents = [colRef for colRef in self.columns]
col = self.colGen.generateColumn(parents, msg)
self.columns.append(col)
if col.colID in self.colMap:
raise ValueError("[Doric]: Column ID must be unique.")
self.colMap[col.colID] = self.numCols
if self.numRows is None:
self.numRows = col.numRows
else:
if self.numRows != col.numRows:
raise ValueError("[Doric]: Each column must have equal number of rows.")
self.numCols += 1
return col.colID
def freezeColumn(self, id):
if id not in self.colMap:
raise ValueError("[Doric]: No column with ID %s found." % str(id))
col = self.columns[self.colMap[id]]
col.freeze()
def freezeAllColumns(self):
for col in self.columns:
col.freeze()
def unfreezeColumn(self, id):
if id not in self.colMap:
raise ValueError("[Doric]: No column with ID %s found." % str(id))
col = self.columns[self.colMap[id]]
col.freeze(unfreeze = True)
def unfreezeAllColumns(self):
for col in self.columns:
col.freeze(unfreeze = True)
def isColumnFrozen(self, id):
if id not in self.colMap:
raise ValueError("[Doric]: No column with ID %s found." % str(id))
col = self.columns[self.colMap[id]]
return col.isFrozen
def getColumn(self, id):
if id not in self.colMap:
raise ValueError("[Doric]: No column with ID %s found." % str(id))
col = self.columns[self.colMap[id]]
return col
def forward(self, id, x):
if self.numCols <= 0:
raise ValueError("[Doric]: ProgNet cannot be run without at least one column.")
if id not in self.colMap:
raise ValueError("[Doric]: No column with ID %s found." % str(id))
colToOutput = self.colMap[id]
for i, col in enumerate(self.columns):
y = col(x)
if i == colToOutput:
return y
def getData(self):
data = dict()
data["cols"] = [c.getData() for c in self.columns]
return data
#===============================================================================
| StarcoderdataPython |
3385070 | <filename>library/microsoft_adcs_cert.py
# READ LICENSE before using this module
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinteface'],
'supported_by': 'curated'}
DOCUMENTATION = r'''
---
module: msacds_certreq
short_description: Upload CSR and download signed SSL certificate from Microsoft Active Directory Certificate Services.
description:
- This module generates and downloads certs from internal Microsoft Active Directory Certificate Services (CA).
- This module does not generate a CSR, CSR should be pre-existing in local disk and full path for CSR file should be passed in as variable.
- This module was tested only against Windows Server 2012 R2 Datacenter 64 bit Edition.
- This module uses kerberos as authentication mechanism, since NTLM has vulnerabilities.
- This module needs requests_kerberos,krbcontext [pip install requests_kerberos krbcontext] package as a pre-requisite.
version_added: 2.9
author: <EMAIL>
options:
ca_server:
description:
- Include Fully Qualified domain name or IP address of the Certificate Microsoft Active Directory Certificate server.
This server should be reachable from controller and 'https' GUI should be enabled.
type: str
required: True
user:
description:
- Admin user name that has access to request certificate from the CA.
type: str
required: True
credential_cachepath:
description:
- Full path to the kerberos credentail cache for C(user).
type: str
required: True
ca_template_name:
description:
- Name of the template that will be used in CA to sign the CSR request.
type: str
required: True
san_names:
description:
- List of Subject Alternative Names.
type: str
required: True
csr_file_path:
description:
- Complete path to CSR file in local disc.
type: str
required: True
cert_encoding:
description:
- Option to specify the encoding type while downloading the cert.
type: str
choices:
- pem
- der
default: pem
notes:
- Tested only against Windows Server 2012 R2 Datacenter 64 bit Edition.
- Backslash should be escaped , refer example.
- Valid Kerberos ticket TGT must be already avaliable in the controller machine by running the kinit command.
- 'Compatible with both py v2.7 and py v3.6+'
- requests_ntlm package should be installed and available.
pip3 install requests_kerberos
pip3 install krbcontext
- Cert file will be written in the same directory as input CSR file.
'''
EXAMPLES = r'''
- name: Upload a CSR and download Signed SSL cert
msadcs_certreq:
ca_server: msadserver.mydomain.com
user: "<EMAIL>"
credential_cachepath: "/tmp/user@DOMAIN.COM"
ca_template_name: CSR_SIGNING_TEMPLATE_2048
san_names:
- altname1.mydomain.com
- altname2.mydomain.com
csr_file_path: '/full/path/to/csr/file'
cert_encoding: pem
register: result
'''
RESULT = r'''
msacds_certreq_facts:
cert_full_path : '/full/path/to/cert/file'
err: '<Disposition Message if any> or null'
'''
from ansible.module_utils.basic import AnsibleModule
import json
import re
import sys
import time
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from requests_kerberos import HTTPKerberosAuth, OPTIONAL
from krbcontext import krbContext
if sys.version.startswith('3') :
from urllib.parse import quote_plus as encode_util
SLEEP_TIME=3
else :
from urllib import quote_plus as encode_util
SLEEP_TIME=3.0
ENCODING_MAP = {
'pem':'b64',
'der':'bin'
}
class ArgumentSpec(object):
'''
Aruguments specification to align with Ansible argument class.
'''
def __init__(self):
self.supports_check_mode = False
argument_spec = dict(
ca_server=dict(
required=True,
aliases=['ca'],
type = 'str'
),
user=dict(
required=True,
aliases=['ca_admin_user'],
type='str'
),
credential_cachepath=dict(
required=True,
aliases=['ccachepath'],
type='str',
no_log=True
),
ca_template_name=dict(
required=True,
type='str'
),
san_names=dict(
required=True,
type='list'
),
csr_file_path=dict(
required=True,
type='str'
),
cert_encoding=dict(
type='str',
default='pem',
choices=['pem', 'der']
)
)
self.argument_spec = {}
self.argument_spec.update(argument_spec)
def _get_csr_content() :
'''
private function to read csr from given file path and return URL encoded CSR data.
Parameters :
None : csr_path(global str) : Full path to CSR file.
Returns :
URL encoded CSR data only the content is returned header ('BEGIN CERTIFICATE REQUEST')
and footer ('END CERTIFICATE REQUEST') are removed.(str)
'''
csr_f = open(csr_path,'r')
csr_data = csr_f.read().replace('-----BEGIN CERTIFICATE REQUEST-----','')
csr_f.close()
csr_data = csr_data.split('-----END CERTIFICATE REQUEST-----',1)
csr_data = csr_data[0]
return csr_data
def _get_crt_attrib():
'''
private function to frame CertAttrib fpor the CURL calls.
Parameters :
None : sans(global list(str)) : List of San names.
None : ca_template(global str) : Template name to use for CSR signing.
Returns :
URL encoded crt_attribute data neccessary for the cert request call.(str)
'''
crt_attrb = ''
san_list = []
for each_san in sans :
san_list.append("dns={each_san}".format(each_san=each_san))
#CA SAN format : SAN:dns=host1.mydomain.com&dns=host2&dns=host3.mydomain.com
san_updated = 'SAN:'+('&'.join(san_list))
crt_attrb += san_updated
crt_attrb += "\nCertificateTemplate:{ca_template}".format(ca_template=ca_template)
crt_attrb += '\nUserAgent:Mozilla/5.0 \(Windows NT 10.0; Win64; x64\) AppleWebKit/537.36 \(KHTML, like Gecko\) Chrome/81.0.4044.122 Safari/537.36'
return crt_attrb
def _request_cert_req():
'''
private function to submit a csr signing request.
Parameters :
None : global vars
Returns :
request ID to download cert(str)
Raises:
Disposition message if any.
'''
payload =dict()
payload['Mode'] = 'newreq'
payload['CertRequest'] = _get_csr_content()
payload['CertAttrib'] = _get_crt_attrib()
payload['TargetStoreFlags'] = 0
payload['SaveCert'] = 'yes'
payload['ThumbPrint'] = ''
payload['FriendlyType'] = encode_util('Saved-Request Certificate')
headers=dict()
headers['Accept'] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9"
headers['Accept-Encoding'] = "gzip, deflate, br"
headers['Accept-Language'] = "en-US,en;q=0.9"
headers['Content-Type'] = "application/x-www-form-urlencoded"
headers['Cache-Control'] = "max-age=0"
headers['Connection'] = "keep-alive"
headers['Host'] = ca
headers['Origin'] = "https://{ca}".format(ca=ca)
headers['Referer'] = "https://{ca}/certsrv/certrqxt.asp".format(ca=ca)
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36"
response = session.post(cert_req_ep,data=payload,headers=headers,verify=False)
rsp_txt = response.text
try :
match = re.search('certnew.p7b\?ReqID=(.+?)\&\"\+getEncoding',rsp_txt)
req_id = match.group(1)
except Exception:
match = re.search('The disposition message is (.+?)\\n',rsp_txt)
err_msg = match.group(1)
raise ValueError(err_msg)
return req_id
def _download_cert_req(req_id,encoding) :
'''
private function to download certificate after signing.
Parameters :
req_id(str) : request ID to download the certificate.
Returns :
cert_obj(dict) : returns { 'cert_full_path' '/full/path/to/cert/in/local/disc',
'err': 'Exception messages if any' or null}
'''
crt_path = csr_path.replace('.csr','.p7b')
download_url = cert_down_ep.replace('<req_id>',req_id)
headers=dict()
headers['Accept'] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9"
headers['Accept-Encoding'] = "gzip, deflate, br"
headers['Accept-Language'] = "en-US,en;q=0.9"
headers['Content-Type'] = "application/x-www-form-urlencoded"
headers['Cache-Control'] = "max-age=0"
headers['Connection'] = "keep-alive"
headers['Host'] = ca
headers['Origin'] = "https://{ca}".format(ca=ca)
headers['Referer'] = "https://{ca}/certsrv/certfnsh.asp".format(ca=ca)
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36"
err=None
try:
#Handle for DER formatted file download.
if encoding == "der" :
#Download only leaf cert
response = session.get(download_url.replace('.p7b','.cer'),headers=headers,verify=False)
crt_path = csr_path.replace('.csr','.crt')
file = open(crt_path, "wb")
file.write(response.content)
else:
response = session.get(download_url,headers=headers,verify=False)
file = open(crt_path, "w")
file.write(response.text)
file.close()
except Exception as e:
err = str(e)
if err:
return {
'cert_full_path' : None,
'err' : err
}
return {
'cert_full_path' : crt_path,
'err' : err
}
def _exec_module(module):
'''
private proxy ansible function to invoke the cert request routines.
Parameters :
module(AnsibleModule) : request ID to download the certificate.
Returns :
updated results(dict) : returns updated cert object path { 'cert_full_path' '/full/path/to/cert/in/local/disc',
'err': 'Exception messages if any' or null}
'''
results = dict()
args = module.params
global csr_path
csr_path = args['csr_file_path']
global session
session=requests.Session()
session.verify=False
session.auth = HTTPKerberosAuth(mutual_authentication=OPTIONAL)
global ca_template
ca_template=args['ca_template_name']
global sans
sans=args['san_names']
global ca
ca=args['ca_server']
global cert_req_ep
cert_req_ep = 'https://{ca}/certsrv/certfnsh.asp'.format(ca=ca)
encoding = args['cert_encoding']
global cert_down_ep
cert_down_ep = "https://{ca}/certsrv/certnew.p7b?ReqID=<req_id>&Enc={encoding}".format(ca=ca,encoding=ENCODING_MAP[encoding])
req_id = _request_cert_req()
time.sleep(SLEEP_TIME)
crt_path_obj = _download_cert_req(req_id,encoding)
err_msg = crt_path_obj['err']
if err_msg:
module.fail_json(msg='400:'+err_msg)
results.update(crt_path_obj)
return results
def main():
'''
Main routine.
Returns :
path facts to the invoking ansible play (dict) : returns msacds_certreq_facts dict
"msacds_certreq_facts": {
"cert_full_path": "/tmp/ansiblehost.mydomain.com.p7b",
"err": null
},
"msg": "200:Success"
}
'''
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
try:
user = module.params['user']
credential_cachepath = module.params['credential_cachepath']
with krbContext(principal=user,
ccache_file=credential_cachepath):
results = _exec_module(module)
module.exit_json(changed=True,msacds_certreq_facts=results,msg='200:Success')
except Exception as ex:
module.fail_json(msg='400:'+str(ex))
if __name__ == '__main__':
main() | StarcoderdataPython |
169989 | from HSTB.kluster.gui.backends._qt import QtGui, QtCore, QtWidgets, Signal
from HSTB.kluster.gui.common_widgets import SaveStateDialog
from HSTB.kluster import kluster_variables
class PatchTestDialog(SaveStateDialog):
patch_query = Signal(str) # submit new query to main for data
def __init__(self, parent=None, title='', settings=None):
super().__init__(parent, settings, widgetname='patchtestdialog')
self.setWindowTitle('Patch Test')
self.setMinimumWidth(900)
self.setMinimumHeight(400)
self.main_layout = QtWidgets.QVBoxLayout()
self.listlayout = QtWidgets.QHBoxLayout()
self.leftlayout = QtWidgets.QVBoxLayout()
self.choose_layout = QtWidgets.QHBoxLayout()
self.from_selected_lines = QtWidgets.QRadioButton('Use selected lines')
self.from_selected_lines.setChecked(True)
self.choose_layout.addWidget(self.from_selected_lines)
self.from_points_view = QtWidgets.QRadioButton('Use Points View selection')
self.from_points_view.setChecked(False)
self.from_points_view.setDisabled(True)
self.choose_layout.addWidget(self.from_points_view)
self.choose_layout.addStretch()
self.leftlayout.addLayout(self.choose_layout)
self.button_layout = QtWidgets.QHBoxLayout()
self.analyze_button = QtWidgets.QPushButton('Analyze')
self.button_layout.addWidget(self.analyze_button)
self.button_layout.addStretch()
self.leftlayout.addLayout(self.button_layout)
self.line_list = LineList(self)
self.leftlayout.addWidget(self.line_list)
self.rightlayout = QtWidgets.QHBoxLayout()
self.explanation = QtWidgets.QTextEdit('', self)
self.explanation.setMinimumWidth(150)
self.rightlayout.addWidget(self.explanation)
self.listlayout.addLayout(self.leftlayout)
self.listlayout.addLayout(self.rightlayout)
self.main_layout.addLayout(self.listlayout)
self.button_layout = QtWidgets.QHBoxLayout()
self.button_layout.addStretch(1)
self.ok_button = QtWidgets.QPushButton('Run', self)
self.button_layout.addWidget(self.ok_button)
self.button_layout.addStretch(1)
self.cancel_button = QtWidgets.QPushButton('Cancel', self)
self.button_layout.addWidget(self.cancel_button)
self.button_layout.addStretch(1)
self.main_layout.addLayout(self.button_layout)
self.hlayout_msg = QtWidgets.QHBoxLayout()
self.warning_message = QtWidgets.QLabel('', self)
self.warning_message.setStyleSheet("color : {};".format(kluster_variables.error_color))
self.hlayout_msg.addWidget(self.warning_message)
self.main_layout.addLayout(self.hlayout_msg)
self.setLayout(self.main_layout)
self.canceled = False
self.return_pairs = None
self.from_selected_lines.clicked.connect(self.radio_selected)
self.from_points_view.clicked.connect(self.radio_selected)
self.analyze_button.clicked.connect(self.analyze_data)
self.ok_button.clicked.connect(self.return_patch_test_data)
self.cancel_button.clicked.connect(self.cancel_patch)
self.text_controls = []
self.checkbox_controls = [['from_points_view', self.from_points_view], ['from_selected_lines', self.from_selected_lines]]
self.read_settings()
self._set_explanation()
@property
def row_full_attribution(self):
return self.line_list.final_attribution
def _set_explanation(self):
msg = 'Based on "Computation of Calibration Parameters for Multibeam Echo Sounders Using the Least Squares Method"'
msg += ', by <NAME>\n\nCompute new offsets/angles for the data provided using this automated least squares'
msg += ' adjustment.'
self.explanation.setText(msg)
def err_message(self, text: str = ''):
if text:
self.warning_message.setText('ERROR: ' + text)
else:
self.warning_message.setText('')
def analyze_data(self):
self.err_message()
if self.from_selected_lines.isChecked():
self.patch_query.emit('lines')
elif self.from_points_view.isChecked():
self.patch_query.emit('pointsview')
def radio_selected(self, ev):
if self.from_selected_lines.isChecked():
self.from_points_view.setChecked(False)
elif self.from_points_view.isChecked():
self.from_selected_lines.setChecked(False)
def add_line(self, line_data: list):
self.line_list.add_line(line_data)
def validate_pairs(self):
pair_dict, err, msg = self.line_list.form_pairs()
if err:
self.err_message(msg)
return err, pair_dict
def return_patch_test_data(self):
self.canceled = False
err, pairdict = self.validate_pairs()
if not err:
self.return_pairs = pairdict
self.save_settings()
self.accept()
def cancel_patch(self):
self.canceled = True
self.accept()
def clear(self):
self.line_list.setup_table()
class LineList(QtWidgets.QTableWidget):
def __init__(self, parent):
super().__init__(parent)
self.setDragEnabled(True) # enable support for dragging table items
self.setAcceptDrops(True) # enable drop events
self.viewport().setAcceptDrops(True) # viewport is the total rendered area, this is recommended from my reading
self.setDragDropOverwriteMode(False) # False makes sure we don't overwrite rows on dragging
self.setDropIndicatorShown(True)
self.setSortingEnabled(True)
# ExtendedSelection - allows multiselection with shift/ctrl
self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.setDragDropMode(QtWidgets.QAbstractItemView.DragDrop)
# makes it so no editing is possible with the table
self.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.headr = ['Pair', 'Line Name', 'Heading']
self.setColumnCount(3)
self.setHorizontalHeaderLabels(self.headr)
self.setColumnWidth(0, 40)
self.setColumnWidth(1, 299)
self.setColumnWidth(2, 80)
self.row_full_attribution = {}
@property
def final_attribution(self):
curdata = self.row_full_attribution
actual_lines = []
for row in range(self.rowCount()): # update the pair numbers from the table comboboxes first
pair_num = int(self.cellWidget(row, 0).currentText())
line_name = str(self.item(row, 1).text())
curdata[line_name][0] = pair_num
actual_lines.append(line_name)
dropthese = []
for lname in curdata.keys():
if lname not in actual_lines:
dropthese.append(lname)
for lname in dropthese:
curdata.pop(lname)
return curdata
def keyReleaseEvent(self, e):
"""
Catch keyboard driven events to delete entries or select new rows
Parameters
----------
e: QEvent generated on keyboard key release
"""
if e.matches(QtGui.QKeySequence.Delete) or e.matches(QtGui.QKeySequence.Back):
rows = sorted(set(item.row() for item in self.selectedItems()))
for row in rows:
self.removeRow(row)
def dragEnterEvent(self, e):
"""
Catch mouse drag enter events to block things not move/read related
Parameters
----------
e: QEvent which is sent to a widget when a drag and drop action enters it
"""
if e.source() == self: # allow MIME type files, have a 'file://', 'http://', etc.
e.accept()
else:
e.ignore()
def dragMoveEvent(self, e):
"""
Catch mouse drag enter events to block things not move/read related
Parameters
----------
e: QEvent which is sent while a drag and drop action is in progress
"""
if e.source() == self:
e.accept()
else:
e.ignore()
def dropEvent(self, e):
"""
On drag and drop, handle either reordering of rows or incoming new data from zarr store
Parameters
----------
e: QEvent which is sent when a drag and drop action is completed
"""
if not e.isAccepted() and e.source() == self:
e.setDropAction(QtCore.Qt.MoveAction)
drop_row = self.drop_on(e)
self.custom_move_row(drop_row)
else:
e.ignore()
def drop_on(self, e):
"""
Returns the integer row index of the insertion point on drag and drop
Parameters
----------
e: QEvent which is sent when a drag and drop action is completed
Returns
-------
int: row index
"""
index = self.indexAt(e.pos())
if not index.isValid():
return self.rowCount()
return index.row() + 1 if self.is_below(e.pos(), index) else index.row()
def is_below(self, pos, index):
"""
Using the event position and the row rect shape, figure out if the new row should go above the index row or
below.
Parameters
----------
pos: position of the cursor at the event time
index: row index at the cursor
Returns
-------
bool: True if new row should go below, False otherwise
"""
rect = self.visualRect(index)
margin = 2
if pos.y() - rect.top() < margin:
return False
elif rect.bottom() - pos.y() < margin:
return True
return rect.contains(pos, True) and pos.y() >= rect.center().y()
def custom_move_row(self, drop_row):
"""
Something I stole from someone online. Will get the row indices of the selected rows and insert those rows
at the drag-n-drop mouse cursor location. Will even account for relative cursor position to the center
of the row, see is_below.
Parameters
----------
drop_row: int, row index of the insertion point for the drag and drop
"""
self.setSortingEnabled(False)
rows = sorted(set(item.row() for item in self.selectedItems())) # pull all the selected rows
rows_to_move = [[QtWidgets.QTableWidgetItem(self.item(row_index, column_index)) for column_index in
range(self.columnCount())] for row_index in rows] # get the data for the rows
for row_index in reversed(rows):
self.removeRow(row_index)
if row_index < drop_row:
drop_row -= 1
for row_index, data in enumerate(rows_to_move):
row_index += drop_row
self.insertRow(row_index)
for column_index, column_data in enumerate(data):
self.setItem(row_index, column_index, column_data)
for row_index in range(len(rows_to_move)):
for i in range(int(len(self.headr))):
self.item(drop_row + row_index, i).setSelected(True)
self.setSortingEnabled(True)
def setup_table(self):
self.clearContents()
self.setRowCount(0)
self.row_full_attribution = {}
def add_line(self, line_data: list):
if line_data:
self.setSortingEnabled(False)
pair_number, linename, heading = line_data
if linename in self.row_full_attribution:
raise Exception("ERROR: PatchTest - Unable to add line {} when this line already exists".format(linename))
self.row_full_attribution[linename] = [pair_number, heading]
next_row = self.rowCount()
self.insertRow(next_row)
for column_index, column_data in enumerate(line_data):
if column_index == 0:
item = QtWidgets.QComboBox()
item.addItems([str(i) for i in range(0, 15)])
item.setCurrentText(str(column_data))
self.setCellWidget(next_row, column_index, item)
else:
if column_index == 2: # heading
item = QtWidgets.QTableWidgetItem('{:3.3f}'.format(float(column_data)).zfill(7))
else:
item = QtWidgets.QTableWidgetItem(str(column_data))
self.setItem(next_row, column_index, item)
self.setSortingEnabled(True)
def form_pairs(self):
pair_dict = {}
az_dict = {}
err = False
msg = ''
for lname, ldata in self.final_attribution.items():
pair_index = int(ldata[0])
azimuth = float(ldata[1])
if pair_index in pair_dict:
pair_dict[pair_index].append(lname)
az_dict[pair_index].append(azimuth)
else:
pair_dict[pair_index] = [lname]
az_dict[pair_index] = [azimuth]
for pair_cnt, pair_lines in pair_dict.items():
if len(pair_lines) > 2:
msg = 'Pair {} has {} lines, can only have 2'.format(pair_cnt, len(pair_lines))
err = True
elif len(pair_lines) < 2:
msg = 'Pair {} has less than 2 lines, each pair must have 2 lines'.format(pair_cnt)
err = True
for pairidx, az_list in az_dict.items(): # tack on the lowest azimuth
pair_dict[pairidx].append(min(az_list))
return pair_dict, err, msg
if __name__ == '__main__':
try: # pyside2
app = QtWidgets.QApplication()
except TypeError: # pyqt5
app = QtWidgets.QApplication([])
dlog = PatchTestDialog()
dlog.add_line([1, 'tstline', 0.0])
dlog.add_line([2, 'tstline2', 180.0])
dlog.show()
if dlog.exec_():
pass
| StarcoderdataPython |
3305196 | <reponame>L-Net-1992/oneflow
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from collections import OrderedDict
from oneflow.test_utils.test_util import GenArgList
def get_sbp(device: str):
return flow.env.all_device_placement(device), flow.sbp.split(0)
shapes = {2: (128, 8), 3: (16, 8, 64), 4: (16, 8, 32, 32), 5: (16, 8, 16, 16, 16)}
def compare_loss(device_type, dim, reduction, cls, data_generator):
x, y, x1, y1 = data_generator(dim, device_type, *get_sbp(device_type))
reduce_loss_func = cls(reduction=reduction).to(device_type)
none_loss_func = cls(reduction="none").to(device_type)
loss_mean = reduce_loss_func(x, y)
loss_none = (
flow.mean(none_loss_func(x1, y1))
if reduction == "mean"
else flow.sum(none_loss_func(x1, y1))
)
loss_mean.backward()
loss_none.backward()
assert np.allclose(
loss_none.to_local().numpy(),
loss_mean.to_local().numpy(),
rtol=1e-05,
atol=1e-05,
)
assert np.allclose(loss_none.numpy(), loss_mean.numpy(), rtol=1e-05, atol=1e-05,)
assert np.allclose(
x.grad.to_local().numpy(), x1.grad.to_local().numpy(), rtol=1e-05, atol=1e-05,
)
def generate_necessity_default(dim: int, device: str, placement, sbp):
shape = shapes[dim]
x_np = np.random.uniform(0, 1, shape)
y_np = np.random.uniform(0, 1, shape)
def f(x, requires_grad):
t = flow.tensor(x, device=device, requires_grad=requires_grad).to_global(
placement=placement, sbp=[sbp]
)
if requires_grad:
t.retain_grad()
return t
return f(x_np, True), f(y_np, False), f(x_np, True), f(y_np, False)
def generate_necessity_for_cross_entropy_or_nll_loss(
dim: int, device: str, placement, sbp
):
shape = shapes[dim]
y_shape = (shape[0],) if dim == 2 else (shape[0], *shape[2:])
x_np = np.random.uniform(0, 1, shape)
y_np = np.random.randint(0, shape[1], y_shape)
def f(x, requires_grad):
t = flow.tensor(x, device=device, requires_grad=requires_grad).to_global(
placement=placement, sbp=[sbp]
)
if requires_grad:
t.retain_grad()
return t
return f(x_np, True), f(y_np, False), f(x_np, True), f(y_np, False)
class TestBCELossOrWithLogitsConsistent(flow.unittest.TestCase):
@flow.unittest.skip_unless_1n2d()
def test_bce_loss(testcase):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cuda", "cpu"]
arg_dict["dim"] = [2, 3, 4, 5]
arg_dict["reduction"] = ["sum", "mean"]
arg_dict["cls"] = [flow.nn.BCELoss, flow.nn.BCEWithLogitsLoss]
arg_dict["data_generator"] = [generate_necessity_default]
for arg in GenArgList(arg_dict):
compare_loss(*arg)
class TestCrossEntropyOrNllLossConsistent(flow.unittest.TestCase):
@flow.unittest.skip_unless_1n2d()
def test_cross_entropy_loss_or_nll_loss(testcase):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cuda", "cpu"]
arg_dict["dim"] = [2, 3, 4, 5]
arg_dict["reduction"] = ["sum", "mean"]
arg_dict["cls"] = [flow.nn.CrossEntropyLoss, flow.nn.NLLLoss]
arg_dict["data_generator"] = [generate_necessity_for_cross_entropy_or_nll_loss]
for arg in GenArgList(arg_dict):
compare_loss(*arg)
class TestKLDivLossConsistent(flow.unittest.TestCase):
@flow.unittest.skip_unless_1n2d()
def test_kl_div_loss(testcase):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cuda", "cpu"]
arg_dict["dim"] = [2, 3, 4, 5]
arg_dict["reduction"] = ["sum", "mean"]
arg_dict["cls"] = [flow.nn.KLDivLoss]
arg_dict["data_generator"] = [generate_necessity_default]
for arg in GenArgList(arg_dict):
compare_loss(*arg)
class TestSmoothL1LossConsistent(flow.unittest.TestCase):
@flow.unittest.skip_unless_1n2d()
def test_smooth_l1_loss(testcase):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cuda", "cpu"]
arg_dict["dim"] = [2, 3, 4, 5]
arg_dict["reduction"] = ["sum", "mean"]
arg_dict["cls"] = [flow.nn.SmoothL1Loss]
arg_dict["data_generator"] = [generate_necessity_default]
for arg in GenArgList(arg_dict):
compare_loss(*arg)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3200339 | from flask.ext.wtf import (Form, TextField, PasswordField, FormField, required,
EqualTo, Email, ValidationError, Optional)
from flask.ext.login import current_user
from nano.models import User
from nano.extensions import db
class UserForm(Form):
first_name = TextField(u'First name', validators=[required()])
last_name = TextField(u'Last name', validators=[required()])
email_address = TextField(u'Email address', validators=[required(), Email()])
password = PasswordField(u'<PASSWORD>', validators=[Optional(), EqualTo('password_confirm')])
password_confirm = PasswordField(u'Password confirm', validators=[Optional()])
def __init__(self, formdata=None, obj=None, prefix='', **kwargs):
super(UserForm, self).__init__(formdata, obj, prefix, **kwargs)
if obj:
self.user = obj
else:
self.user = None
def validate_email_address(self, field):
user = User.query.filter_by(email_address=field.data) \
.filter(User.id != self.user.id).first()
if user:
raise ValidationError('That email address is already in use')
def save(self):
self.user.first_name = self.first_name.data
self.user.last_name = self.last_name.data
self.user.email_address = self.email_address.data
if len(self.password.data) > 1:
self.user.password = self.password.data
db.session.add(self.user)
db.session.commit()
return self.user
| StarcoderdataPython |
4829936 | <filename>bigml/centroid.py
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2014-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Centroid structure for the BigML local Cluster
This module defines an auxiliary Centroid predicate structure that is used
in the cluster.
"""
import math
import sys
INDENT = " " * 4
STATISTIC_MEASURES = [
'Minimum', 'Mean', 'Median', 'Maximum', 'Standard deviation', 'Sum',
'Sum squares', 'Variance']
def cosine_distance2(terms, centroid_terms, scale):
"""Returns the distance defined by cosine similarity
"""
# Centroid values for the field can be an empty list.
# Then the distance for an empty input is 1
# (before applying the scale factor).
if not terms and not centroid_terms:
return 0
if not terms or not centroid_terms:
return scale ** 2
input_count = 0
for term in centroid_terms:
if term in terms:
input_count += 1
cosine_similarity = input_count / math.sqrt(
len(terms) * len(centroid_terms))
similarity_distance = scale * (1 - cosine_similarity)
return similarity_distance ** 2
class Centroid(object):
"""A Centroid.
"""
def __init__(self, centroid_info):
self.center = centroid_info.get('center', {})
self.count = centroid_info.get('count', 0)
self.centroid_id = centroid_info.get('id', None)
self.name = centroid_info.get('name', None)
self.distance = centroid_info.get('distance', {})
def distance2(self, input_data, term_sets, scales, stop_distance2=None):
"""Squared Distance from the given input data to the centroid
"""
distance2 = 0.0
for field_id, value in self.center.items():
if isinstance(value, list):
# text field
terms = ([] if field_id not in term_sets else
term_sets[field_id])
distance2 += cosine_distance2(terms, value, scales[field_id])
elif isinstance(value, basestring):
if field_id not in input_data or input_data[field_id] != value:
distance2 += 1 * scales[field_id] ** 2
else:
distance2 += ((input_data[field_id] - value) *
scales[field_id]) ** 2
if stop_distance2 is not None and distance2 >= stop_distance2:
return None
return distance2
def print_statistics(self, out=sys.stdout):
"""Print the statistics for the training data clustered around the
centroid
"""
out.write(u"%s%s:\n" % (INDENT, self.name))
literal = u"%s%s: %s\n"
for measure_title in STATISTIC_MEASURES:
measure = measure_title.lower().replace(" ", "_")
out.write(literal % (INDENT * 2, measure_title,
self.distance[measure]))
out.write("\n")
| StarcoderdataPython |
3396147 | import numpy as np
from helpers import baseline_discretization, solve_set_based, solve_sample_based, comparison_wrapper
from models import makeMatrixModel, makeSkewModel, makeDecayModel, make1DHeatModel
# define problem dimensions
inputDim, outputDim = 2, 2
# define reference parameter
# refParam = np.array([0.5]*inputDim)
if __name__ == "__main__":
import argparse
desc = """
Make voronoi-cell diagrams with uniform random samples
in a 2D unit domain.
"""
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-m', '--model', default='random', type=str,
help="""
Choose model from
- 'skew' (linear matrix map)
- 'identity' (linear matrix map)
- 'random' (linear matrix map)
- 'decay' (exponential decay)
- 'heatrod' (1-dimensional heat eq)
- 'diagonal' (linear matrix map)
If unrecognized, it will revert to 'random' (linear map).
""")
parser.add_argument('-n', '--num', default=int(1E2), type=int,
help="""
Set number of samples (default: 1E2).
If given as <=1, it will revert to the default value.
""")
parser.add_argument('-u', '--uncert_rect_size', default=0.2, type=float,
help='Set uncertainty (`rect_size`) (default: 0.2)')
parser.add_argument('-s', '--seed', default=21, type=int,
help='Set random seed (default: 21).')
parser.add_argument('-o', '--observed_cells_per_dim', default=1, type=int,
help="""
Cells per dimension (default: 1) for regular grid
discretizing the `output_probability_set`.
If given as <1, it will revert to the default value.
""")
parser.add_argument('-i', '--input_cells_per_dim', default=49, type=int,
help="""
Cells per dimension (default: 49) for regular grid
discretizing the `input_sample_set`.
If given as <1, it will revert to the default value.
""")
parser.add_argument('--mc_points', default=0, type=int,
help="""
Number of samples (default: 0) in calculation of
volumes using Monte Carlo emulation (integration).
If given as <100, it will revert to None.
If None, or not supplied, default to using the Monte Carlo
assumption (volumes = 1/num_samples).
""")
parser.add_argument('--reg', action='store_true',
help='Use regular grid sampling for input space.')
parser.add_argument('--pdf', action='store_true',
help='Store as pdf instead of png.')
parser.add_argument('--show', action='store_true',
help='Call `plt.show()` after plotting.')
parser.add_argument('--noplot', action='store_true',
help='Option to not create/save plots.')
parser.add_argument('--fontsize', default=16, type=float,
help='Sets `plt.rcParams[\'font.size\']` (default: 16).')
parser.add_argument('--figsize', default=5, type=int,
help="""
Sets `plt.rcParams[\'figure.size\']`(default: 5).
Assumes square aspect ratio.
""")
parser.add_argument('-l', '--numlevels', default=10, type=int,
help="""
Number of contours to plot (default=10).
If given as <2, it will revert to 2.
""")
parser.add_argument('--figlabel', default='', type=str,
help='Label in figure saved name.')
parser.add_argument('-t', '--title', default=None, type=str,
help='Title for figure. If `None`, use `--model` in title.')
# which problem type to solve?
parser.add_argument('--set', action='store_true',
help='Only do set-based solution.')
parser.add_argument('--sample', action='store_true',
help='Only do sample-based solution.')
# model- specific arguments
parser.add_argument('--skew', default=1.0, type=float,
help='Sets skew if `--model=\'skew\'` (default: 1.0).')
parser.add_argument('--lam1', default=0.5, type=float,
help='Sets first default parameter (default: 0.5).')
parser.add_argument('--lam2', default=0.5, type=float,
help='Sets second default parameter (default: 0.5).')
parser.add_argument('--t0', default=0.5, type=float,
help='Decay model: 1st observation time (default: 0.5). \
Heatrod model: 1st thermometer location.')
parser.add_argument('--t1', default=0.75, type=float,
help='Decay model: 2nd observation time (default: 0.75). \
Heatrod model: 1st thermometer location.')
parser.add_argument('--noeval', action='store_true',
help='Sample based model: plot using original samples, not mesh.')
#### START OF FUNCTIONALITY ###
args = parser.parse_args()
numSamples, r_seed = args.num, args.seed
eval = not args.noeval # evaluate sample-based model at new samples?
if numSamples <= 1:
print("Incompatible number of samples. Using default.")
numSamples = int(1E2)
if r_seed > 0:
np.random.seed(r_seed)
# define width of sidelengths of support of observed
uncert_rect_size = args.uncert_rect_size
# regular-grid discretization of sets (set-based approach):
# cpd = cells per dimension
# output_probability_set discretization
cpd_observed = args.observed_cells_per_dim
if cpd_observed < 1: cpd_observed = 1
# input_sample_set discretization (if regular)
# only pay attention to cpd_input if regular sampling has been specified.
if args.reg:
cpd_input = args.input_cells_per_dim
if cpd_input < 1: cpd_input = 49
else:
cpd_input = None
n_mc_points = int(args.mc_points)
if n_mc_points < 100:
n_mc_points = None
# MODEL SELECTION
if args.lam1 < 0 or args.lam1 > 1:
lam1 = 0.5
if args.lam2 < 0 or args.lam2 > 1:
lam2 = 0.5
refParam = np.array([args.lam1, args.lam2])
model_choice = args.model
# default domain
min_val, max_val = 0, 1
# TODO: add options to set it: need to take in list, potentially
if model_choice == 'skew':
# can be list for higher-dimensional outputs.
skew = args.skew
if skew < 1:
raise ValueError("Skewness must be greater than 1.")
myModel = makeSkewModel(skew)
elif model_choice == 'decay':
# times to evaluate define the QoI map
# error-handling for times.
if args.t0 > 0:
t0 = args.t0
else:
raise ValueError("t0<0.")
if args.t1 > 0:
t1 = args.t1
else:
raise ValueError("t1<0.")
if args.t1 <= t0:
raise ValueError("t1<t0.")
else:
t1 = args.t1
eval_times = [t0, t1]
myModel = makeDecayModel(eval_times)
model_choice += 'T=[%s-%s]'%(t0, t1)
elif model_choice == 'random':
A = np.random.randn(outputDim,inputDim)
myModel = makeMatrixModel(A)
elif model_choice == 'diagonal':
print("Using `t0/t1` as diagonal entries for operator.")
diag = [args.t0, args.t1]
D = np.diag(diag)
myModel = makeMatrixModel(D)
elif model_choice == 'heatrod':
print("Using `t0/t1` for thermometer locations")
assert args.t0 < 1 and args.t0 > 0
assert args.t1 < 1 and args.t1 > 0
myModel = make1DHeatModel([args.t0, args.t1])
min_val, max_val = 0.01, 0.2
if np.min(refParam) < min_val or np.max(refParam) > max_val:
print("Reference parameter passed out of range. Mapping to interval.")
refParam = refParam*(max_val - min_val) + min_val
# ADD NEW MODELS BELOW HERE with `elif`
else:
model_choice = 'identity'
I = np.eye(inputDim)
myModel = makeMatrixModel(I)
if not args.set and not args.sample: # if both false, set both to true.
args.set, args.sample = True, True
print("Solving using both methods.")
disc, disc_set, disc_samp = comparison_wrapper(model=myModel,
num_samples=numSamples,
input_dim=inputDim,
param_ref=refParam,
rect_size=uncert_rect_size,
cpd_observed=cpd_observed,
input_cpd=cpd_input,
n_mc_points=n_mc_points,
min_val=min_val,
max_val=max_val)
else: # only do one or the other.
# Create baseline discretization
disc = baseline_discretization(model=myModel,
num_samples=numSamples,
input_dim=inputDim,
param_ref=refParam,
input_cpd=cpd_input,
n_mc_points=n_mc_points,
min_val=min_val,
max_val=max_val)
if args.sample:
print("Solving only with sample-based approach.")
# Set up sample-based approach
disc_samp = solve_sample_based(discretization=disc,
rect_size=uncert_rect_size)
elif args.set:
print("Solving only with set-based approach.")
# Set up set-based approach
disc_set = solve_set_based(discretization=disc,
rect_size=uncert_rect_size,
obs_cpd=cpd_observed)
### STEP 4 ###
# plot results
if not args.noplot:
save_pdf = args.pdf
figLabel = args.figlabel
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from plot_examples import plot_2d
plt.rcParams['font.size'] = args.fontsize
plt.rcParams['figure.figsize'] = args.figsize, args.figsize # square ratio
### MISC ###
Qref = disc.get_output().get_reference_value()
print('Reference Value:', refParam, 'maps to', Qref)
### ACTUAL PLOTTING CODE ###
nbins = 50
# xmn, xmx = 0.25, 0.75
# ymn, ymx = 0.25, 0.75
xmn, xmx = min_val, max_val
ymn, ymx = min_val, max_val
xi, yi = np.mgrid[xmn:xmx:nbins*1j, ymn:ymx:nbins*1j]
if args.title is None:
model_title = args.model.capitalize() + ' Model'
else:
model_title = args.title
numLevels = args.numlevels
if numLevels <2: numLevels = 2
show_prev = args.show
# label keyword defaults to approx
if args.set:
print("\tPlotting set-based.")
plot_2d(xi, yi, disc_set, num_levels=numLevels,
label=figLabel, annotate='set',
title=model_title, pdf=save_pdf, preview=show_prev)
if args.sample:
print("\tPlotting sample-based.")
plot_2d(xi, yi, disc_samp, num_levels=numLevels,
label=figLabel, annotate='sample',
title=model_title, pdf=save_pdf,
eval=eval, preview=show_prev)
print("Done.")
| StarcoderdataPython |
1770906 | <reponame>YasinEhsan/interview-prep
#9.26 6:35pm
def insert(intervals, new_interval):
merged = []
start,end, i = 0,1,0
while i < len(intervals) and intervals[i][end] < new_interval[start]:
merged.append(intervals[i])
i+=1
while i < len(intervals) and intervals[i][start] <= new_interval[end]:
new_interval[start] = min(new_interval[start], intervals[i][start])
new_interval[end] = max(new_interval[end], intervals[i][end])
i+=1
merged.append(new_interval)
if i != len(intervals):
merged.append(intervals[i:len(intervals)])
return merged
# merged = []
# # TODO: Write your code here
# combinedRange = new_interval
# start, end = 0,1
# for i in range(len(intervals)):
# curr = intervals[i]
# if curr[end] < new_interval[start]:
# merged.append(curr)
# else:
# combinedRange[start] = min(curr[start], combinedRange[start])
# combinedRange[end] = max(curr[end], combinedRange[end])
# if combinedRange[start] < curr[end]:
# merged.append(combinedRange)
# merged.append(intervals[i:len(intervals)])
# break
# return merged
# def insert(intervals, new_interval):
# merged = []
# # TODO: Write your code here
# interval_start = new_interval[0]
# interval_end = new_interval[1]
# found = False
# for i in range(len(intervals)):
# curr_start = intervals[i][0]
# curr_end = intervals[i][1]
# if found and interval_end < curr_start:
# merged.append([interval_start, interval_end])
# merged.append(intervals[i:len(intervals[i])])
# break
# if interval_start <= curr_start:
# interval_end = max(curr_end, interval_end)
# found = True
# else:
# merged.append(intervals[i])
# return merged
| StarcoderdataPython |
1632153 | <filename>fakerfaker.py
import torch
import numpy as np
import sys
def fake_prob(upper, quantile=0.999, mode="exp"):
if mode == "exp":
beta = - upper / (np.log(1 - quantile))
return beta
elif mode == "lognorm":
pass
else:
sys.exit(f"NOT support mode {mode}")
def fake_data(num_samples=4096, num_t=1, num_d=13, num_s=26, ln_emb=None, text_file=None, quantile=0.999, mode="exp"):
"""
num_samples: number of samples
num_t: number of target
num_d: number of dense features
num_s: number of sparse features
ln_emb: embedding sizes
text_file: output file
"""
# generate place holder random array, including dense features
a = np.random.randint(0, 10, (num_t + num_d + num_s, num_samples))
# generate targets
a[0, :] = np.random.randint(0, 2, num_samples)
# generate sparse features
for k, size in enumerate(ln_emb):
if size <= 10000:
# uniqual dist
a[num_t + num_d + k, :] = np.random.randint(0, size, num_samples)
else:
# exp dist
beta = fake_prob(size, quantile=quantile, mode=mode)
a[num_t + num_d + k, :] = np.random.exponential(beta, num_samples).astype(np.int)
a = np.transpose(a)
# generate print format
lstr = []
for _ in range(num_t + num_d):
lstr.append("%d")
for _ in range(num_s):
lstr.append("%x")
if text_file is not None:
np.savetxt(text_file, a, fmt=lstr, delimiter='\t',)
def fake_emb(
sparse_feature_size=None,
sparse_feature_num=26,
emb_size="",
min_cat=2,
max_cat=1000000,
shuffle=True):
"""
make up `sparse_feature_num` embedding tables and
each emb has size (category, `sparse_feature_size`),
where `min_cat` < category < `max_cat`
"""
if sparse_feature_size is None:
sparse_feature_size = np.random.choice([16,32,64,128,256])
if emb_size:
ln_emb = np.fromstring(emb_size, dtype=int, sep="-")
ln_emb = np.asarray(ln_emb)
assert ln_emb.size == sparse_feature_num, "sparse_feature_num not match num_emb"
else:
emb_list = []
# hope to generate cat number evenly and reasonable
p = len(str(max_cat)) # 7
q = sparse_feature_num // p # 26//7=3
for i in range(p):
for _ in range(q):
s = 10**i if 10**i > min_cat else min_cat
e = 10**(i+1)-1 if 10**(i+1)-1 < max_cat else max_cat
# print(s,e)
size = np.random.randint(s,e) if s<e else s
emb_list.append(size)
if len(emb_list) >= sparse_feature_num:
break
if len(emb_list) >= sparse_feature_num:
break
while len(emb_list) < sparse_feature_num:
s = 10**i if 10**i > min_cat else min_cat
e = 10**(i+1)-1 if 10**(i+1)-1 < max_cat else max_cat
# print(s,e)
size = np.random.randint(s,e) if s<e else s
emb_list.append(size)
ln_emb = np.array(emb_list)
if shuffle:
np.random.shuffle(ln_emb)
print("=== fake embedding info ===")
print(f"sparse_feature_size: {sparse_feature_size}")
print(f"{ln_emb.size} ln_emb: {ln_emb}")
print("===========================")
return sparse_feature_size, sparse_feature_num, ln_emb
if __name__=="__main__":
profile = "terabyte0875"
num_samples = 4096
emb_dim=64
num_dense = 13
num_sparse = 26
num_days = 24
out_dir = "./fake_" + profile + "/"
out_name = "day_"
# make emb
spa_size, spa_num, ln_emb = fake_emb(sparse_feature_size=emb_dim, sparse_feature_num=num_sparse, shuffle=True)
# make data
# for k in range(num_days):
# text_file = out_dir + out_name + ("" if profile == "kaggle" else str(k))
# fake_data(num_samples=num_samples, num_d=num_dense, num_s=spa_num, ln_emb=ln_emb, text_file=text_file)
# print(f"faked data saved at {text_file}")
| StarcoderdataPython |
143105 | import copy
import numpy as np
import pytest
import tensorflow as tf
from tfsnippet.layers import as_gated
def safe_sigmoid(x):
return np.where(x < 0, np.exp(x) / (1. + np.exp(x)), 1. / (1. + np.exp(-x)))
class AsGatedHelper(object):
def __init__(self, main_ret, gate_ret):
self.main_args = None
self.gate_args = None
self.main_ret = main_ret
self.gate_ret = gate_ret
def __call__(self, *args, **kwargs):
scope = kwargs['scope']
if scope == 'main':
assert(self.main_args is None)
self.main_args = (args, copy.copy(kwargs))
return self.main_ret
elif scope == 'gate':
assert(self.gate_args is None)
self.gate_args = (args, copy.copy(kwargs))
return self.gate_ret
else:
raise RuntimeError()
class TestAsGated(tf.test.TestCase):
def test_as_gated(self):
main_ret = np.random.normal(size=[2, 3, 4]).astype(np.float32)
gate_ret = np.random.normal(size=[2, 3, 4]).astype(np.float32)
activation_fn = object()
# default_name infer failed
with pytest.raises(ValueError,
match='`default_name` cannot be inferred'):
g = as_gated(AsGatedHelper(main_ret, gate_ret))
with self.test_session() as sess:
# test infer default name
f = AsGatedHelper(main_ret, gate_ret)
f.__name__ = 'f'
g = as_gated(f)
g_ret = g(1, xyz=2, activation_fn=activation_fn)
np.testing.assert_allclose(
sess.run(g_ret), main_ret * safe_sigmoid(gate_ret + 2.))
self.assertTrue(g_ret.name, 'gated_f/')
self.assertEqual(
f.main_args,
(
(1,),
{'xyz': 2, 'activation_fn': activation_fn, 'scope': 'main'}
)
)
self.assertEqual(
f.gate_args,
(
(1,),
{'xyz': 2, 'scope': 'gate'}
)
)
# test specify default name
f = AsGatedHelper(main_ret, gate_ret)
g = as_gated(f, sigmoid_bias=1., default_name='ff')
g_ret = g(1, xyz=2, activation_fn=activation_fn)
np.testing.assert_allclose(
sess.run(g_ret), main_ret * safe_sigmoid(gate_ret + 1.))
self.assertTrue(g_ret.name, 'gated_ff/')
self.assertEqual(
f.main_args,
(
(1,),
{'xyz': 2, 'activation_fn': activation_fn, 'scope': 'main'}
)
)
self.assertEqual(
f.gate_args,
(
(1,),
{'xyz': 2, 'scope': 'gate'}
)
)
# test using `name`
f = AsGatedHelper(main_ret, gate_ret)
g = as_gated(f, default_name='f')
g_ret = g(1, xyz=2, activation_fn=activation_fn, name='name')
np.testing.assert_allclose(
sess.run(g_ret), main_ret * safe_sigmoid(gate_ret + 2.))
self.assertTrue(g_ret.name, 'name/')
# test using `scope`
f = AsGatedHelper(main_ret, gate_ret)
g = as_gated(f, default_name='f')
g_ret = g(1, xyz=2, activation_fn=activation_fn, scope='scope')
np.testing.assert_allclose(
sess.run(g_ret), main_ret * safe_sigmoid(gate_ret + 2.))
self.assertTrue(g_ret.name, 'scope/')
| StarcoderdataPython |
1635573 | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 13 06:55:29 2019
@author: Joule
"""
from sender_reciever import Reciever, Sender
from affine import Affine
from ubrytelig import Ubrytelig
class Hacker(Reciever):
"""Brute force hacker!
Denne versjonen trenger ikke å få det brukte cipheret som input,
hvis cipheret er det Ubrytelige,
tar dekrypteringen bare 52 sekunder lengre"""
def __init__(self):
Reciever.__init__(self)
_f = open('english_words.txt', "r")
self.valid_words = set(_f.read().split())
self.valid_inverse = [1, 2, 3, 4, 6, 7, 8, 9, 11,\
12, 13, 14, 16, 17, 18, 21,
22, 23, 24, 26, 27, 28, 29,
31, 32, 33, 34, 36, 37, 39,
41, 42, 43, 44, 46, 47, 48,
49, 51, 52, 53, 54, 56, 58,
59, 61, 62, 63, 64, 66, 67,
68, 69, 71, 72, 73, 74, 77,
78, 79, 81, 82, 83, 84, 86,
87, 88, 89, 91, 92, 93, 94]
_f.close()
def decode(self, text):
guessed_word = ''
words_recognized_record = 0
dummy = Reciever()
dummy.set_cipher(Affine())
"""Tester for Affine cipher, siden det også plukker opp alle
multiplikasjon og alle Caesar"""
for multiply in self.valid_inverse:
for add in range(95):
dummy.set_key((multiply, add))
dummy.decode(text)
decoded_text = dummy.get_decoded().lower().split()
matching_words = set(decoded_text).intersection(self.valid_words)
if len(matching_words) > words_recognized_record:
words_recognized_record = len(matching_words)
guessed_word = " ".join(decoded_text)
"""Hvis vi kjenner igjen minst tre ord, så kan vi stoppe
searchet tidlig og spare opp til ca 50 sekunder"""
if words_recognized_record >= 3:
return guessed_word
"""Ellers må vi annta at det "ubrytelige" cipheret ble brukt"""
"""10 min å hacke med kodeord "pizza"...
og 30 sekund å hacke med kodeord "ant"... """
dummy.set_cipher(Ubrytelig())
count = 1
for kodeord in self.valid_words:
dummy.set_key(kodeord)
dummy.decode(text)
decoded_text = dummy.get_decoded().lower().split()
#For å visualisere hvor langt dekrypteringen har kommet i fase 2
print(decoded_text, count)
count += 1
#Denne operasjonen koster sykt mye
matching_words = set(decoded_text).intersection(self.valid_words)
if len(matching_words) > words_recognized_record:
words_recognized_record = len(matching_words)
guessed_word = " ".join(decoded_text)
if words_recognized_record >= 5:
print("Kodeordet er", kodeord)
return guessed_word
else:
return "Could not brute force the message"
A = Sender()
#A.set_key((2,0))
#A.set_cipher(Affine())
A.set_key("pizza")
A.set_cipher(Ubrytelig())
A.encode("To recieve full marks you have to solve all parts")
#Print how the encrypted text looks like
print(A.get_encoded())
H = Hacker()
print(H.decode(A.get_encoded()))
| StarcoderdataPython |
3210020 | import os
import sys
import tempfile
import zipfile
def create_importable_zip(filename):
z = zipfile.ZipFile(filename, 'w')
z.writestr('hello.py', 'def f(): return "hello world from " + __file__\n')
z.close()
def import_and_run_module():
import hello
print hello.f()
def main():
fhandle, filename = tempfile.mkstemp('.zip')
create_importable_zip(filename)
sys.path.insert(0, filename)
import_and_run_module()
os.close(fhandle)
os.unlink(filename)
if __name__ == '__main__':
main()
| StarcoderdataPython |
52912 | <filename>thirdparty_xentax/test_extraction.py
# -*- coding: utf-8 -*-
import phyre, importlib, os
importlib.reload(phyre)
# 1 or 2
ffx=1
# pc, npc, mon, obj, skl, sum, or wep
tp = 'pc'
# model number (no leading zeros)
num = 106
ffxBaseDir=r'C:\SteamLibrary\steamapps\common\FINAL FANTASY FFX&FFX-2 HD Remaster\data\FFX_Data_VBF\ffx_data\gamedata\ps3data\chr'
ffx2BaseDir=r'C:\SteamLibrary\steamapps\common\FINAL FANTASY FFX&FFX-2 HD Remaster\data\FFX2_Data_VBF\ffx-2_data\gamedata\ps3data\chr'
baseDir=[ffxBaseDir, ffx2BaseDir]
types={'pc':'c', 'npc':'n', 'mon':'m', 'obj':'f', 'skl':'k', 'sum':'s', 'wep':'w'}
file=baseDir[ffx-1]
cs = types[tp] + '%03d' % num
meshFile = os.path.join(file, tp, cs,'mdl','d3d11', cs + r'.dae.phyre')
ddsFile = os.path.join(file, tp, cs, 'tex', 'd3d11', cs + r'.dds.phyre')
outFile = r'mytest.obj'
outFile2 = r'mytest.dds'
#outFile = None
phyre.extractMesh(meshFile,outFile, debug=False)
print("\n")
if os.path.isfile(ddsFile):
phyre.extractDDS(ddsFile, outFile2)
else:
print("DDS file not found. Skipping") | StarcoderdataPython |
150658 | from ferris import BasicModel, ndb
import logging
class Main(BasicModel):
criteria = ndb.StringProperty()
data = ndb.JsonProperty()
permissions = ndb.JsonProperty()
resolved = ndb.BooleanProperty()
@classmethod
def create(cls, params):
entity = cls.get(params['criteria'])
if entity:
return entity
item = cls(id=params['criteria'])
item.populate(**params)
item.put()
return item
@classmethod
def get(cls, key_name, key_only=False):
if not key_name:
return None
key = ndb.Key(cls, key_name)
ret = key.get()
if key_only:
return key if ret else None
return ret
| StarcoderdataPython |
161470 | # Name: <NAME> and <NAME>
# Date: 7/10/18
import random
"""
proj 03: Guessing Game
Generate a random number between 1 and 9 (including 1 and 9).
Ask the user to guess the number, then tell them whether they guessed too low, too high,
or exactly right. Keep the game going until the user types exit.
Keep track of how many guesses the user has taken, and when the game ends, print this out.
"""
#define variables
first_loop = True
points = 0
#loop
while first_loop == True:
second_loop = True
random_number = random.randint(1, 9)
guess_number = 0
print "I'm thinking of a number between 1 and 9. Can you guess my number?"
max_guesses = int(raw_input("How many guesses do you want? "))
while second_loop == True:
if guess_number == max_guesses:
print "You are out of guesses! The number was " + str(random_number) + "."
second_loop = False
print "You have " + str(points) + " points."
play_again = raw_input("Would you like to play again? (y/n)")
if play_again == "n":
first_loop = False
print "Game ended."
else:
user_input = raw_input("Enter a number, or 'exit' to end the game: ")
try:
int(user_input)
if int(user_input) < 1 or int(user_input) > 9:
print "That is not a number between 1 and 9!"
elif int(user_input) > random_number:
print "Your number is too high!"
guess_number = guess_number + 1
elif int(user_input) < random_number:
print "Your number is too low!"
guess_number = guess_number + 1
elif int(user_input) == random_number:
guess_number = guess_number + 1
print "Congratulations, you guessed my number!", "You used", guess_number, "guesses."
second_loop = False
points = points + 1
if points == 1:
print "You have " + str(points) + " point."
else:
print "You have " + str(points) + " points."
play_again = raw_input("Would you like to play again? (y/n)" )
if play_again == "n":
first_loop = False
print "Game ended."
except ValueError:
if str(user_input) == "exit" or "Exit":
print "Game ended."
first_loop = False
second_loop = False
else:
print "That is not a valid answer!"
| StarcoderdataPython |
1659567 | <filename>engine/test/mock_app/mock_stream_timeout.py
import asyncio
from hopeit.app.logger import app_extra_logger
from hopeit.app.context import EventContext
__steps__ = ['wait']
from mock_app import MockData, MockResult
logger, extra = app_extra_logger()
async def wait(payload: MockData, context: EventContext) -> MockResult:
logger.info(context, "mock_stream_timeout.wait")
if payload.value == "timeout":
await asyncio.sleep(5.0)
return MockResult("ok: ok")
| StarcoderdataPython |
1799818 | from bokeh.plotting import figure
from bokeh.layouts import column
from bokeh.io import export_png, show
from bokeh.palettes import Category20
from sklearn.decomposition import PCA
import holoviews as hv
from holoviews.operation import gridmatrix
import numpy as np
import pandas as pd
import os
if not os.path.exists("./simulated_pca_data.csv"):
print("Generating and saving new data")
N = 15
Profiles = np.random.uniform(0, 2, size=(5, N))
U = np.random.choice([0, 1, 2, 3, 4], size=200, replace=True)
d = np.zeros((200, N + 1))
for i, x in enumerate(U):
d[i, :-1] = np.random.normal(Profiles[x, :], 0.3)
d[:, N] = U.astype(int)
np.savetxt("./simulated_pca_data.csv", d, delimiter=",")
d = d[:, :-1]
pd.DataFrame(d,columns=[str(x) for x in range(N)]).to_csv('../data/simulated_pca_data.csv')
pd.DataFrame(U).to_csv('../data/simulated_pca_data_labels.csv')
else:
F = np.loadtxt("./simulated_pca_data.csv", delimiter=",")
d = F[:, :-1]
U = F[:, -1].astype(int)
N = d.shape[1]
print("Loaded array with {} features and {} samples".format(d.shape[0], d.shape[1]))
colors = ["blue", "red", "black", "orange", "green"]
P = PCA(n_components=N).fit(d)
S = P.components_
D = P.transform(d)
pc_plot = figure(
x_range=(-4, 4),
y_range=(-4, 4),
title="Scatter plot of two most significant principal components",
toolbar_location=None,
)
pc_plot.scatter(x=D[:, 0], y=D[:, 1])
export_png(pc_plot, filename="../img/pcadimred.png")
pc_plot_colored = figure(
x_range=(-4, 4),
y_range=(-4, 4),
title="Scatter plot of two most significant principal components (colored by underlying group)",
toolbar_location=None,
)
pc_plot_colored.scatter(x=D[:, 0], y=D[:, 1], color=[Category20[10][i] for i in U])
export_png(pc_plot_colored, filename="../img/pcadimred_colors.png")
eigenvalue_plot = figure(
title="Eigenvalues of the covariance matrix", toolbar_location=None
)
eigenvalue_plot.line(x=range(1, N + 1), y=P.explained_variance_)
eigenvalue_plot.circle(x=range(1, N + 1), y=P.explained_variance_)
export_png(eigenvalue_plot, filename="../img/eigenvalues.png")
feature_plot = figure(
x_range=(-4, 4),
y_range=(-4, 4),
title="Scatter plot of two of the original features",
toolbar_location=None,
)
feature_plot.scatter(x=d[:, 0], y=d[:, 7])
export_png(feature_plot, filename="../img/features.png")
ds = hv.Dataset(pd.DataFrame(d, columns=[str(x) for x in range(N)]))
hv.extension("bokeh")
density_grid = gridmatrix(ds, chart_type=hv.Points).opts(
height=1000, width=1000, toolbar=None
)
hv.save(density_grid, "../img/density.png")
with open("./simulated_pca_data_table.html", "w") as f:
pd.DataFrame(
d,
columns=["f-{}".format(x) for x in range(15)],
index=["s-{}".format(x) for x in range(200)],
).to_html(float_format=lambda x: "{:.2f}".format(x), max_rows=5, buf=f)
loading_plot = figure(
x_range=(-4, 4),
y_range=(-4, 4),
title="Projection of feature axes (loadings) in PC space",
toolbar_location=None,
)
loading_plot.scatter(x=D[:, 0], y=D[:, 1])
for i in range(15):
loading_plot.line(
x=[-100 * S[0, i], 100 * S[0, i]],
y=[-100 * S[1, i], 100 * S[1, i]],
color=Category20[20][i],
line_width=1,
legend_label=str(i),
)
loading_plot.legend.location = "top_left"
loading_plot.legend.click_policy = "hide"
export_png(loading_plot, filename="../img/loading.png")
show(column(pc_plot, pc_plot_colored, feature_plot, eigenvalue_plot, loading_plot))
| StarcoderdataPython |
1661548 | import logging.config
from pathlib import Path
import sqlite3
import yaml
# configuring logging
with open('log_config.yaml', 'r') as f:
log_config = yaml.safe_load(f.read())
logging.config.dictConfig(log_config)
logger = logging.getLogger(__name__)
class BooksPipeline(object):
def __init__(self):
self.create_connection()
def create_connection(self):
db_path = Path(__file__).parent.parent / 'db' / 'books.db'
self.conn = sqlite3.connect(db_path)
logger.info('Database connection created.')
self.cursor = self.conn.cursor()
def process_item(self, item, spider):
self.store_db(item)
return item
def store_db(self, item):
self.cursor.execute('''
INSERT INTO scraped_books_stage
VALUES (?, ?, ?, ?, ?, ?, ?)''', (
item['author'],
item['book_title'],
item['series_title'],
item['number_in_series'],
item['publication_year'],
item['publication_month'],
item['scraped_at']
))
self.conn.commit()
logger.debug('Inserted items into scraped_books_stage table: %s, %s, %s, %s, %s, %s, %s',
item['author'], item['book_title'], item['series_title'], item['number_in_series'],
item['publication_year'], item['publication_month'], item['scraped_at']
)
def close_spider(self, spider):
# add new books to scraped_books table and clear table scraped_books_stage
self.cursor.executescript('''
INSERT INTO scraped_books
('author', 'book_title', 'series_title', 'number_in_series',
'publication_year', 'publication_month', 'scraped_at')
SELECT ss.* FROM scraped_books_stage ss
LEFT JOIN scraped_books s
ON s.author = ss.author and s.book_title = ss.book_title
WHERE s.author IS NULL;
DELETE FROM scraped_books_stage
''')
logger.info('Checked new items for scraped_books table.')
self.conn.commit()
self.conn.close()
logger.info('Database connection closed.') | StarcoderdataPython |
1667068 | <gh_stars>0
import math
from functools import lru_cache, partial
from typing import List, Tuple, Optional, Mapping
import numpy as np
import pandas as pd
from .exp_center import exp_center_file_in_directory
from .input import find_input_file_in_directory, get_input_values, _OUTPUT_NAMES
from .log import log_process
from .outformation import find_outformation_in_directory, load_outformation
from .simudata import simudata_file_in_directory
from .trans import ff, l2_distance, epsg4326_to_3857
# K formation_num
# K initial_reduce
# K final_total_size
# K dispersion
# K density
# K center_gap
# K dangerous_frequency
# K crash_probability
# K polarization
# execute_time
# K airway_bias
# K loc_bias
# X adjust_ratio
# K stable_time
def get_formation_num(outformation_data: List[Tuple[float, int, int]]) -> float:
num = 0
last_t_time = 0
for data in outformation_data:
t_time, out_form, total_size = data
num += (total_size - out_form)
last_t_time = t_time
return num / (int(last_t_time) * 10 * 5)
def get_initial_reduce_and_final_total_size(outformation_data: List[Tuple[float, int, int]]) -> Tuple[float, float]:
first = 0
for line in outformation_data:
time, _, total_size = line
if total_size < 20:
first = time
break
_, _, final_total_size = outformation_data[-1]
return first, final_total_size
def get_dispersion(simudata: pd.DataFrame, exp_data: pd.DataFrame,
outformation_data: List[Tuple[float, int, int]]) -> float:
aim_list = []
j = 0
for i in range(exp_data.shape[0]):
current_time = exp_data['time'][i]
while abs(outformation_data[j][0] - current_time) >= 1e-4 and \
outformation_data[j][0] <= current_time:
j += 1
if abs(outformation_data[j][0] - current_time) >= 1e-4:
continue
_, outformation_num, cur_total_size = outformation_data[j]
if (cur_total_size - outformation_num) >= 0.95 * cur_total_size:
distance_list = []
center = [exp_data['r_x'][i], exp_data['r_y'][i], exp_data['r_h'][i]]
tt = ff(exp_data['time'][i])
tmp_sim = simudata[(simudata['time'] < tt + 0.001) & (simudata['time'] > tt - 0.001)].reset_index(
drop=True)
if tmp_sim.shape[0] > 0:
for j in range(tmp_sim.shape[0]):
distance_list.append(
l2_distance(center, [tmp_sim['x'][j], tmp_sim['y'][j], tmp_sim['height'][j]]))
distance_list.sort()
distance_list = distance_list[0:int(0.9 * len(distance_list))]
aim_list.append(np.std(np.array(distance_list)) / np.mean(np.array(distance_list)))
else:
continue
# noinspection PyTypeChecker
return -1 if len(aim_list) == 0 else np.mean(np.array(aim_list))
def get_density(simudata: pd.DataFrame, exp_data: pd.DataFrame) -> float:
density = 0
for i in range(exp_data.shape[0]):
center = [exp_data['r_x'][i], exp_data['r_y'][i], exp_data['r_h'][i]]
tt = ff(exp_data['time'][i])
tmp_sim = simudata[(simudata['time'] < tt + 0.001) & (simudata['time'] > tt - 0.001)].reset_index(drop=True)
distances = []
for j in range(tmp_sim.shape[0]):
x = tmp_sim['x'][j]
y = tmp_sim['y'][j]
obj = [x, y, tmp_sim['height'][j]]
distances.append(l2_distance(center, obj))
distances.sort()
bias = int(len(distances) * 0.9)
r_value = distances[bias - 1]
res = 3 * bias / (4 * math.pi * (r_value ** 3))
density += res
return density / (exp_data.shape[0]) * (100 ** 3)
def get_center_gap(simudata: pd.DataFrame, exp_data: pd.DataFrame) -> float:
center_avg_gap = 0
for i in range(exp_data.shape[0]):
tt = ff(exp_data['time'][i])
tmp_sim = simudata[(simudata['time'] < tt + 0.001) & (simudata['time'] > tt - 0.001)].reset_index(drop=True)
dist = 0
obj = [0, 0, 0]
for j in range(tmp_sim.shape[0]):
x = tmp_sim['x'][j]
y = tmp_sim['y'][j]
obj[0] += x
obj[1] += y
obj[2] += tmp_sim['height'][j]
center = [obj[0] / tmp_sim.shape[0], obj[1] / tmp_sim.shape[0], obj[2] / tmp_sim.shape[0]]
distances = []
for j in range(tmp_sim.shape[0]):
x = tmp_sim['x'][j]
y = tmp_sim['y'][j]
obj2 = [x, y, tmp_sim['height'][j]]
distances.append(l2_distance(center, obj2))
distances.sort()
distances = distances[0:int(len(distances) * 0.9)]
for d in distances:
dist += d
center_avg_gap += dist / len(distances)
return center_avg_gap / exp_data.shape[0]
def get_danger_frequency(outformation_data: List[Tuple[float, int, int]]) -> float:
init_num, crash_num = 20, 0
for item in outformation_data:
_, _, cur_total_size = item
if cur_total_size < init_num:
crash_num += (init_num - cur_total_size) * (init_num - cur_total_size - 1)
init_num = cur_total_size
return crash_num / 20
def get_crash_probability(outformation_data: List[Tuple[float, int, int]]) -> float:
init_num, crash_num = 20, 0
for item in outformation_data:
_, _, cur_total_size = item
if cur_total_size < init_num:
crash_num += init_num - cur_total_size
init_num = cur_total_size
return crash_num / 20
def get_polarization(simudata: pd.DataFrame, exp_data: pd.DataFrame,
outformation_data: List[Tuple[float, int, int]]) -> float:
aim_list = []
j = 0
for i in range(exp_data.shape[0]):
if i == 0:
continue
current_time = exp_data['time'][i]
while abs(outformation_data[j][0] - current_time) >= 1e-4 and \
outformation_data[j][0] <= current_time:
j += 1
if abs(outformation_data[j][0] - current_time) >= 1e-4:
continue
_, outformation_num, cur_total_size = outformation_data[j]
if (cur_total_size - outformation_num) >= 0.95 * cur_total_size:
ans = [0, 0, 0]
center = [exp_data['r_x'][i], exp_data['r_y'][i], exp_data['r_h'][i]]
tt = ff(exp_data['time'][i])
tmp_sim = simudata[(simudata['time'] < tt + 0.001) & (simudata['time'] > tt - 0.001)] \
.reset_index(drop=True)
pre_center = [exp_data['r_x'][i - 1], exp_data['r_y'][i - 1], exp_data['r_h'][i - 1]]
pre_tt = ff(exp_data['time'][i - 1])
pre_tmp_sim = simudata[
(simudata['time'] < pre_tt + 0.001) & (simudata['time'] > pre_tt - 0.001)] \
.reset_index(drop=True)
center_dir = [center[0] - pre_center[0], center[1] - pre_center[1], center[2] - pre_center[2]]
if tmp_sim.shape[0] > 0:
for j in range(tmp_sim.shape[0]):
for k in range(pre_tmp_sim.shape[0]):
if pre_tmp_sim['id'][k] == tmp_sim['id'][j]:
item_dir = [tmp_sim['x'][j] - pre_tmp_sim['x'][k],
tmp_sim['y'][j] - pre_tmp_sim['y'][k],
tmp_sim['height'][j] - pre_tmp_sim['height'][k]]
ans[0] += item_dir[0] - center_dir[0]
ans[1] += item_dir[1] - center_dir[1]
ans[2] += item_dir[2] - center_dir[2]
break
aim_list.append(math.sqrt(np.sum(np.array(ans) ** 2)))
return -1 if len(aim_list) == 0 else np.mean(np.array(aim_list))
@lru_cache()
def _prepare_airway_bias():
p1 = [13.147670731635747, 43.65982853870639, 2000.0]
p2 = [13.186799589095362, 43.78649351263097, 2000.0]
p1[0], p1[1] = epsg4326_to_3857(p1[1], p1[0])
p2[0], p2[1] = epsg4326_to_3857(p2[1], p2[0])
return p2[0] - p1[0], p2[1] - p1[1], p2[2] - p1[2]
def get_airway_bias(simudata: pd.DataFrame, exp_data: pd.DataFrame) -> float:
way_dir = _prepare_airway_bias()
aim_list = []
for i in range(exp_data.shape[0]):
if i == 0:
continue
center = [exp_data['r_x'][i], exp_data['r_y'][i], exp_data['r_h'][i]]
pre_center = [exp_data['r_x'][i - 1], exp_data['r_y'][i - 1], exp_data['r_h'][i - 1]]
center_dir = [center[0] - pre_center[0], center[1] - pre_center[1], center[2] - pre_center[2]]
ans = math.acos(
(way_dir[0] * center_dir[0] + way_dir[1] * center_dir[1] + way_dir[2] * center_dir[2]) /
(math.sqrt(np.sum(np.array(way_dir) ** 2)) * math.sqrt(np.sum(np.array(center_dir) ** 2)))
)
aim_list.append(ans)
# noinspection PyTypeChecker
return np.mean(np.array(aim_list))
def get_loc_bias(simudata: pd.DataFrame, exp_data: pd.DataFrame) -> float:
p2 = [13.186799589095362, 43.78649351263097, 2000.0]
p2[0], p2[1] = epsg4326_to_3857(p2[1], p2[0])
aim = float('inf')
for i in range(exp_data.shape[0]):
center = [exp_data['r_x'][i], exp_data['r_y'][i], exp_data['r_h'][i]]
cur_ans = math.sqrt((center[0] - p2[0]) ** 2 + (center[1] - p2[1]) ** 2 + (center[2] - p2[2]) ** 2)
if cur_ans < aim:
aim = cur_ans
return aim
def get_execute_time(inputs: Mapping[str, object], outformation_data: List[Tuple[float, int, int]]):
control_time = (inputs['time'], inputs['time.1'])
execute_time = 0
c_index = 0
cur_time = control_time[c_index]
for time_, out_form, total_size in outformation_data:
if abs(time_ - outformation_data[-1][0]) < 1e-6:
execute_time += time_ - cur_time
break
if c_index + 1 < len(control_time):
if time_ > control_time[c_index + 1]:
execute_time += time_ - cur_time
cur_time = control_time[c_index + 1]
c_index += 1
if (total_size - out_form) >= int(total_size * 0.95):
if time_ > cur_time:
execute_time += time_ - cur_time
if c_index + 1 == len(control_time):
break
else:
cur_time = control_time[c_index + 1]
c_index += 1
return execute_time
def get_stable_time(outformation_data: List[Tuple[float, int, int]]) -> float:
num = 0
for data in outformation_data:
_, out_form, total_size = data
if (total_size - out_form) >= int(total_size * 0.95):
num += 1
return ff(num / len(outformation_data) * 100)
_ALL_NAME_LIST = [
# inputs
*_OUTPUT_NAMES,
# metrics
'formation_num',
'initial_reduce',
'final_total_size',
'dispersion',
'density',
'center_gap',
'dangerous_frequency',
'crash_probability',
'polarization',
'execute_time',
'airway_bias',
'loc_bias',
# 'adjust_ratio',
'stable_time',
]
def get_all_metrics(directory: str, force: bool = False,
shown_names: Optional[List[str]] = None):
shown_names = shown_names or _ALL_NAME_LIST
input_file = find_input_file_in_directory(directory)
simudata_file = simudata_file_in_directory(directory)
exp_center_file = exp_center_file_in_directory(directory)
outformation_file = find_outformation_in_directory(directory)
log_process(directory, force)
input_values = get_input_values(input_file)
simudata = pd.read_csv(simudata_file)
exp_data = pd.read_csv(exp_center_file)
outformation_data = load_outformation(outformation_file)
irft = None
def _get_irft() -> Tuple[float, float]:
nonlocal irft
if irft is None:
irft = get_initial_reduce_and_final_total_size(outformation_data)
return irft
data_map = {
'formation_num': lambda: get_formation_num(outformation_data),
'initial_reduce': lambda: _get_irft()[0],
'final_total_size': lambda: _get_irft()[1],
'dispersion': lambda: get_dispersion(simudata, exp_data, outformation_data),
'density': lambda: get_density(simudata, exp_data),
'center_gap': lambda: get_center_gap(simudata, exp_data),
'dangerous_frequency': lambda: get_danger_frequency(outformation_data),
'crash_probability': lambda: get_crash_probability(outformation_data),
'polarization': lambda: get_polarization(simudata, exp_data, outformation_data),
'execute_time': lambda: get_execute_time(input_values, outformation_data),
'airway_bias': lambda: get_airway_bias(simudata, exp_data),
'loc_bias': lambda: get_loc_bias(simudata, exp_data),
# 'adjust_ratio': lambda: -1,
'stable_time': lambda: get_stable_time(outformation_data),
**{name: partial(input_values.__getitem__, name) for name in _OUTPUT_NAMES},
}
return {name: data_map[name]() for name in shown_names}
| StarcoderdataPython |
4816797 | <reponame>npvisual/fondat-aws
import pytest
import asyncio
from fondat.aws import Client, Config
from fondat.aws.secrets import Secret, secrets_resource
from fondat.error import BadRequestError, NotFoundError
from uuid import uuid4
pytestmark = pytest.mark.asyncio
config = Config(
endpoint_url="http://localhost:4566",
aws_access_key_id="id",
aws_secret_access_key="secret",
region_name="us-east-1",
)
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.new_event_loop()
yield loop
loop.close()
@pytest.fixture(scope="module")
async def client():
async with Client(service_name="secretsmanager", config=config) as client:
yield client
@pytest.fixture(scope="module")
async def resource(client):
yield secrets_resource(client)
async def test_string_binary(resource):
name = str(uuid4())
with pytest.raises(NotFoundError):
await resource[name].delete()
with pytest.raises(NotFoundError):
await resource[name].put(Secret(value="something"))
await resource.post(name=name, secret=Secret(value="string"))
assert (await resource[name].get()).value == "string"
await resource[name].put(Secret(value=b"binary"))
assert (await resource[name].get()).value == b"binary"
await resource[name].delete()
with pytest.raises(BadRequestError):
await resource[name].get()
async def test_binary_string(resource):
name = str(uuid4())
await resource.post(name=name, secret=Secret(value=b"binary"))
assert (await resource[name].get()).value == b"binary"
await resource[name].put(Secret(value="string"))
assert (await resource[name].get()).value == "string"
await resource[name].delete()
async def test_get_cache(client):
resource = secrets_resource(client, cache_size=10, cache_expire=10)
name = str(uuid4())
secret = Secret(value=name)
await client.create_secret(Name=name, SecretString=secret.value)
assert await resource[name].get() == secret # caches secret
await client.delete_secret(SecretId=name)
assert await resource[name].get() == secret # still cached
async def test_put_get_cache(client):
resource = secrets_resource(client, cache_size=10, cache_expire=10)
name = str(uuid4())
secret = Secret(value=name)
await resource.post(name=name, secret=secret) # caches secret
await client.delete_secret(SecretId=name)
assert await resource[name].get() == secret # still cached
async def test_delete_cache(client):
resource = secrets_resource(client, cache_size=10, cache_expire=10)
name = str(uuid4())
secret = Secret(value=name)
await resource.post(name=name, secret=secret) # caches secret
await resource[name].get() # still cached
await resource[name].delete() # deletes cached row
with pytest.raises(BadRequestError): # marked as deleted
await resource[name].get()
async def test_get_cache_evict(client):
resource = secrets_resource(client, cache_size=1, cache_expire=10)
name1 = str(uuid4())
secret1 = Secret(value=name1)
await client.create_secret(Name=name1, SecretString=secret1.value)
name2 = str(uuid4())
secret2 = Secret(value=name2)
await client.create_secret(Name=name2, SecretString=secret2.value)
assert await resource[name1].get() == secret1
assert await resource[name2].get() == secret2
await client.delete_secret(SecretId=name1)
await client.delete_secret(SecretId=name2)
with pytest.raises(BadRequestError):
await resource[name1].get() # evicted and marked deleted
assert await resource[name2].get() == secret2 # still cached
| StarcoderdataPython |
1724993 | from .image import subimage_by_roi
import astimp
class Antibiotic():
"an antibiotic tested in an AST"
def __init__(self, short_name, pellet_circle, inhibition, image, roi, px_per_mm):
self.short_name = short_name
self.pellet_circle = pellet_circle
self.inhibition = inhibition
self.img = image
self.px_per_mm = px_per_mm
self.roi = roi
self._center_in_roi = None
@property
def center_in_roi(self):
"""center relative to the roi coordinate"""
if self._center_in_roi is None:
cx, cy = self.pellet_circle.center
cx -= self.roi.left
cy -= self.roi.top
self._center_in_roi = (cx, cy)
return self._center_in_roi
def __repr__(self):
return "ATB : {n}, inhibition diameter: {d:.1f}mm".format(n=self.short_name, d=self.inhibition.diameter)
class AST():
"""Represent an AST"""
def __init__(self, ast_image):
self.img = ast_image
self._crop = None
self._petriDish = None
self._circles = None
self._rois = None
self._mm_per_px = None
self._px_per_mm = None
self._pellets = None
self._labels = None
self._labels_text = None
self._preproc = None
self._inhibitions = None
@property
def crop(self):
"""cropped image of Petri dish"""
if self._crop is None:
self._crop = self.petriDish.img
return self._crop
@crop.setter
def crop(self, image):
self._crop = image
@property
def petriDish(self):
"""Petri dish"""
if self._petriDish is None:
self._petriDish = astimp.getPetriDish(self.img)
return self._petriDish
@property
def circles(self):
"""circles representing pellets"""
if self._circles is None:
self._circles = astimp.find_atb_pellets(self.crop)
return self._circles
@property
def rois(self):
if self._rois is None:
max_diam_mm = 40 # TODO: get this from config
self._rois = astimp.inhibition_disks_ROIs(
self.circles, self.crop, max_diam_mm*self.px_per_mm)
return self._rois
@property
def mm_per_px(self):
"""image scale"""
if self._mm_per_px is None:
self._mm_per_px = astimp.get_mm_per_px(self.circles)
return self._mm_per_px
@property
def px_per_mm(self):
"""image scale"""
if self._px_per_mm is None:
self._px_per_mm = 1/astimp.get_mm_per_px(self.circles)
return self._px_per_mm
@property
def pellets(self):
"""subimages of the found pellets"""
if self._pellets is None:
self._pellets = [astimp.cutOnePelletInImage(
self.crop, circle) for circle in self.circles]
return self._pellets
@property
def labels(self):
"""label objects"""
if self._labels is None:
self._labels = [astimp.getOnePelletText(
pellet) for pellet in self.pellets]
return self._labels
@property
def labels_text(self):
"""label texts"""
if self._labels_text is None:
self._labels_text = tuple(label.text for label in self.labels)
return self._labels_text
@property
def preproc(self):
"""preporc object for inhib diameter measurement"""
if self._preproc is None:
self._preproc = astimp.inhib_diam_preprocessing(
self.petriDish, self.circles)
return self._preproc
@property
def inhibitions(self):
"""preporc object for inhib diameter measurement"""
if self._inhibitions is None:
self._inhibitions = astimp.measureDiameters(self.preproc)
return self._inhibitions
def get_atb_by_idx(self, idx):
return Antibiotic(short_name=self.labels[idx].text,
pellet_circle=self.circles[idx],
roi=self.rois[idx],
inhibition=self.inhibitions[idx],
image=subimage_by_roi(self.crop, self.rois[idx]),
px_per_mm=self.px_per_mm)
def get_atb_idx_by_name(self, short_name):
return self.labels_text.index(short_name)
| StarcoderdataPython |
1740906 | # flake8: noqa
from .charts_options import (
BarItem,
BarBackgroundStyleOpts,
BMapCopyrightTypeOpts,
BMapGeoLocationControlOpts,
BMapNavigationControlOpts,
BMapOverviewMapControlOpts,
BMapScaleControlOpts,
BMapTypeControlOpts,
BoxplotItem,
CandleStickItem,
ComponentTitleOpts,
EffectScatterItem,
GaugeDetailOpts,
GaugePointerOpts,
GaugeTitleOpts,
GraphCategory,
GraphicBasicStyleOpts,
GraphicGroup,
GraphicImage,
GraphicImageStyleOpts,
GraphicItem,
GraphicRect,
GraphicShapeOpts,
GraphicText,
GraphicTextStyleOpts,
GraphLink,
GraphNode,
HeatMapItem,
LineItem,
MapItem,
Map3DColorMaterialOpts,
Map3DLabelOpts,
Map3DLightOpts,
Map3DLambertMaterialOpts,
Map3DPostEffectOpts,
Map3DRealisticMaterialOpts,
Map3DViewControlOpts,
PageLayoutOpts,
ParallelItem,
PieItem,
RadarItem,
SankeyLevelsOpts,
ScatterItem,
SunburstItem,
ThemeRiverItem,
TreeItem,
TreeMapItemStyleOpts,
TreeMapLevelsOpts,
)
from .global_options import (
AngleAxisItem,
AngleAxisOpts,
AnimationOpts,
Axis3DOpts,
AxisLineOpts,
AxisOpts,
AxisPointerOpts,
AxisTickOpts,
BrushOpts,
CalendarOpts,
CalendarDayLabelOpts,
CalendarMonthLabelOpts,
CalendarYearLabelOpts,
DataZoomOpts,
Grid3DOpts,
GridOpts,
InitOpts,
LegendOpts,
ParallelAxisOpts,
ParallelOpts,
PolarOpts,
RadarIndicatorItem,
RadiusAxisItem,
RadiusAxisOpts,
SingleAxisOpts,
TitleOpts,
ToolBoxFeatureBrushOpts,
ToolBoxFeatureDataViewOpts,
ToolBoxFeatureDataZoomOpts,
ToolBoxFeatureMagicTypeOpts,
ToolBoxFeatureOpts,
ToolBoxFeatureRestoreOpts,
ToolBoxFeatureSaveAsImageOpts,
ToolboxOpts,
TooltipOpts,
VisualMapOpts,
)
from .series_options import (
AreaStyleOpts,
EffectOpts,
ItemStyleOpts,
LabelOpts,
LineStyleOpts,
Lines3DEffectOpts,
MarkAreaItem,
MarkAreaOpts,
MarkLineItem,
MarkLineOpts,
MarkPointItem,
MarkPointOpts,
MinorSplitLineOpts,
MinorTickOpts,
SplitAreaOpts,
SplitLineOpts,
TextStyleOpts,
TreeMapBreadcrumbOpts,
)
| StarcoderdataPython |
127049 | <reponame>jakelong0509/master
"""import numpy as np
import matplotlib.pyplot as plt
class Grid:
def __init__(self, height, weight, start):
self.height = height
self.weight = weight
self.i = start[0]
self.j = start[1]
def set(self, rewards, actions):
self.rewards = rewards
self.actions = actions
def set_state(self, s):
self.i = s[0]
self.j = s[1]
def current_state(self):
return (self.i, self.j)
def is_terminal(self, s):
return s not in self.actions
def move(self, action):
if action in self.actions[(self.i, self.j)]:
if action == 'U':
self.i -= 1
if action == "D":
self.i += 1
if action == "R":
self.j += 1
if action == "L":
self.j -= 1
return self.rewards.get((self.i, self.j), 0)
def undo_move(self, action):
if action == 'U':
self.i += 1
if action == 'D':
self.i -= 1
if action == 'R':
self.j -= 1
if action == 'L':
self.j += 1
assert(self.current_state in self.all_states)
def game_over(self):
return (self.i, self.j) not in self.actions
def all_states(self):
return set(self.actions.keys()) | set(self.rewards.keys())
def standard_grid():
g = Grid(3, 4, (0,2))
rewards = {(0,3) : 1, (1,3) : -1}
actions = {
(0, 0): ('D', 'R'),
(0, 1): ('L', 'R'),
(0, 2): ('L', 'D', 'R'),
(1, 0): ('U', 'D'),
(1, 2): ('U', 'D', 'R'),
(2, 0): ('U', 'R'),
(2, 1): ('L', 'R'),
(2, 2): ('L', 'R', 'U'),
(2, 3): ('L', 'U'),
}
g.set(rewards, actions)
return g
def negative_grid(step_cost = -0.1):
g = standard_grid()
g.rewards.update({
(0, 0): step_cost,
(0, 1): step_cost,
(0, 2): step_cost,
(1, 0): step_cost,
(1, 2): step_cost,
(2, 0): step_cost,
(2, 1): step_cost,
(2, 2): step_cost,
(2, 3): step_cost,
})
return g
"""
class Grid: #Environment
def __init__(self, width, height, start):
self.height = height
self.width = width
self.i = start[0]
self.j = start[1]
def set(self, rewards , actions):
self.rewards = rewards
self.actions = actions
def set_state(self, s):
self.i = s[0]
self.j = s[1]
def current_state(self):
return (self.i, self.j)
def game_over(self):
return (self.i, self.j) not in self.actions
def is_terminal(self, s):
return s not in self.actions
def move(self, action):
if action in self.actions[(self.i, self.j)]:
if action == 'U':
self.i -= 1
if action == 'D':
self.i += 1
if action == 'R':
self.j += 1
if action == 'L':
self.j -= 1
return self.rewards.get((self.i, self.j), 0)
def undo_move(self, action):
if action == 'U':
self.i += 1
if action == 'D':
self.i -= 1
if action == 'R':
self.j -= 1
if action == 'L':
self.j += 1
assert(self.current_state in self.all_states)
def all_states(self):
return set(self.actions.keys()) | set(self.rewards.keys())
def standard_grid():
g = Grid(3, 4, (2,0))
rewards = {(0,3) : 1, (1,3) : -1}
actions = {
(0, 0): ('D', 'R'),
(0, 1): ('L', 'R'),
(0, 2): ('L', 'D', 'R'),
(1, 0): ('U', 'D'),
(1, 2): ('U', 'D', 'R'),
(2, 0): ('U', 'R'),
(2, 1): ('L', 'R'),
(2, 2): ('L', 'R', 'U'),
(2, 3): ('L', 'U'),
}
g.set(rewards, actions)
return g
def negative_grid(step_cost = -0.1):
g = standard_grid()
g.rewards.update({
(0, 0): step_cost,
(0, 1): step_cost,
(0, 2): step_cost,
(1, 0): step_cost,
(1, 2): step_cost,
(2, 0): step_cost,
(2, 1): step_cost,
(2, 2): step_cost,
(2, 3): step_cost,
})
return g
| StarcoderdataPython |
40290 | import sys
from logs.logger import log
from utils import check_internet , get_public_ip
import bot
if __name__ == "__main__":
if check_internet() is True:
try:
log.info(f'Internet connection found : {get_public_ip()}')
bot.run()
except KeyboardInterrupt:
# quit
sys.exit()
else:
log.info('Please check your internet connection')
sys.exit()
| StarcoderdataPython |
156374 | <gh_stars>1-10
import numpy as np
import json
import sys
import os
import pandas as pd # To parse and dump JSON
from kafka import KafkaConsumer # Import Kafka consumer
from kafka import KafkaProducer # Import Kafka producer
import pickle # Library to save and load ML regressors using pickle
import pandas as pd
# scikit learn
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.model_selection import GridSearchCV
# Getting the current path of the project
current_path = os.getcwd()
class Learner :
def __init__(self, T_obs, samples, model, counter=1):
self.T_obs = T_obs
self.samples = samples
if model == 'RandomForest' :
self.model = RandomForestRegressor()
elif model == 'GradientBoosting' :
self.model = GradientBoostingRegressor()
self.counter = counter
def _reset_model(self) :
if model == 'RandomForest' :
self.model = RandomForestRegressor()
elif model == 'GradientBoosting' :
self.model = GradientBoostingRegressor()
def fit(self):
grid_parameters = {'max_depth': [10],
'min_samples_leaf': [1, 2, 4],
'min_samples_split': [2, 5, 10]
}
self._reset_model()
reg = GridSearchCV(self.model, grid_parameters, cv=3)
X = self.samples.iloc[:, :-1].values
y = self.samples.iloc[:, -1].values
reg.fit(X, y)
self.model = reg.best_estimator_
if __name__ == '__main__' :
if len(sys.argv) != 2 :
print("Usage" + sys.argv[0] + " <config-filename>")
exit()
else :
with open(current_path + "/" + str(sys.argv[1])) as f :
for line in f :
if line[0] not in ['#','[', ' '] :
param = list(map(lambda x: x.strip(' \' \n'), line.split('=')))
if param[0] == 'brokers' : brokers = param[1]
elif param[0] == 'in_' : in_ = param[1]
elif param[0] == 'out_' : out_ = param[1]
elif param[0] == 'treshold_to_learn' : treshold_to_learn = float(param[1])
elif param[0] == 'model' : model = param[1]
consumer = KafkaConsumer(in_, # Topic name
bootstrap_servers = brokers, # List of brokers
key_deserializer= lambda v: int(v.decode()), # How to deserialize a key (if any)
value_deserializer=lambda v: json.loads(v.decode('utf-8')) # How to deserialize sample messages
)
producer = KafkaProducer(
bootstrap_servers = brokers, # List of brokers passed from the command line
key_serializer=str.encode, # How to serialize the key
value_serializer=lambda v: pickle.dumps(v) # How to serialize a model
)
learners={} # for each key a learner
for msg in consumer :
print("\n -----------------Sample in------------------------")
samples_msg = msg.value
key = str(msg.key)
X = samples_msg['X']
y = samples_msg['W']
sample = pd.DataFrame(np.array(X + [y]).reshape(1, -1), columns=['beta','n_star','G1', 'W'])
if key not in learners.keys() :
print(f'Created a new learner for key : {key}')
learner = Learner(key, sample, model)
learners[key] = learner
else :
#print(f'samples df before append = {learners[key].samples}')
print(f'Appended the sample to the dataframe for key: {key}')
learners[key].samples = learners[key].samples.append(sample, ignore_index=True)
learners[key].counter += 1
#print(f'samples df after append = {learners[key].samples}')
if learners[key].counter >= treshold_to_learn :
print(f'Time to learn for key : {key} !')
print(f'counter = {learners[key].counter} !')
learners[key].fit()
learners[key].counter = 0
producer.send(out_, key=key, value=learners[key].model)
print(f'Model for key : {key} succesfully sent to the predictor')
print('-------------------------------------------') | StarcoderdataPython |
1688378 | <reponame>julpark-rh/cephci
"""
Entry module for executing RBD test scripts from ceph-qe-scripts.
This acts as a wrapper around the automation scripts in ceph-qe-scripts for cephci. The
following things are done
- Call the appropriate test script
- Return the status code of the script.
"""
from utility.log import Log
log = Log(__name__)
def run(**kw):
"""
Execute the test script.
Args:
kw: Supports the below keys
ceph_cluster: Ceph object
config: User configuration provided in the test suite.
"""
log.info("Running rbd tests")
ceph_cluster = kw["ceph_cluster"]
client_nodes = ceph_cluster.get_nodes(role="client")
client_node = client_nodes[0]
if not client_node:
log.error("Require a client node to execute the tests.")
return 1
# function constants
test_folder = "rbd-tests"
script_folder = "ceph-qe-scripts/rbd/system"
venv_folder = "venv"
python_cmd = "sudo venv/bin/python"
git_url = "https://github.com/red-hat-storage/ceph-qe-scripts.git"
git_clone = f"git clone {git_url}"
# Cleaning up the cloned repo to avoid test residues
client_node.exec_command(
cmd=f"sudo rm -rf {test_folder}"
+ f" ; mkdir {test_folder}"
+ f" ; cd {test_folder}"
+ f" ; {git_clone}"
)
# Optimizing the installation of prerequisites so that they are executed once
check_venv, err = client_node.exec_command(cmd="ls -l venv", check_ec=False)
if not check_venv:
commands = ["sudo yum install -y python3", f"python3 -m venv {venv_folder}"]
for command in commands:
client_node.exec_command(cmd=command)
config = kw["config"]
script_name = config["test_name"]
timeout = config.get("timeout", 1800)
command = f"{python_cmd} {test_folder}/{script_folder}/{script_name}"
if config.get("ec-pool-k-m", None):
ec_pool_arg = " --ec-pool-k-m " + config.get("ec-pool-k-m")
command = command + f" {ec_pool_arg}"
if config.get("test_case_name", None):
test_case_name = "--test-case " + config.get("test_case_name")
command = command + f" {test_case_name}"
out, err = client_node.exec_command(cmd=command, check_ec=False, timeout=timeout)
if out:
log.info(out)
if err:
log.error(err)
rc = client_node.exit_status
if rc == 0:
log.info("%s completed successfully", command)
else:
log.error("%s has failed", command)
return rc
| StarcoderdataPython |
1673801 | <gh_stars>10-100
"""baseline
Revision ID: c7b63286fd71
Revises:
Create Date: 2021-05-27 21:56:12.258456
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision = 'c7b63286fd71'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
tables = inspector.get_table_names()
if 'orders' not in tables:
op.create_table(
'orders', sa.Column('id', sa.Integer, primary_key=True),
sa.Column('bid', sa.Integer, default=0),
sa.Column('message_size', sa.Integer, nullable=False),
sa.Column('bid_per_byte', sa.Float, default=0),
sa.Column('message_digest', sa.String(64), nullable=False),
sa.Column('status', sa.Integer),
sa.Column('uuid', sa.String(36), nullable=False),
sa.Column('created_at', sa.DateTime, default=sa.func.now()),
sa.Column('cancelled_at', sa.DateTime),
sa.Column('started_transmission_at', sa.DateTime),
sa.Column('ended_transmission_at', sa.DateTime),
sa.Column('tx_seq_num', sa.Integer, unique=True),
sa.Column('unpaid_bid', sa.Integer, nullable=False))
if 'invoices' not in tables:
op.create_table(
'invoices', sa.Column('id', sa.Integer, primary_key=True),
sa.Column('lid', sa.String(100), nullable=False),
sa.Column('invoice', sa.String(1024), nullable=False),
sa.Column('paid_at', sa.DateTime),
sa.Column('created_at', sa.DateTime, default=sa.func.now()),
sa.Column('order_id', sa.Integer, sa.ForeignKey('orders.id')),
sa.Column('status', sa.Integer), sa.Column('amount', sa.Integer),
sa.Column('expires_at', sa.DateTime, nullable=False))
if 'tx_confirmations' not in tables:
op.create_table(
'tx_confirmations', sa.Column('id', sa.Integer, primary_key=True),
sa.Column('created_at', sa.DateTime, default=sa.func.now()),
sa.Column('order_id', sa.Integer, sa.ForeignKey('orders.id')),
sa.Column('region_id', sa.Integer),
sa.Column('presumed', sa.Boolean, default=False))
if 'rx_confirmations' not in tables:
op.create_table(
'rx_confirmations', sa.Column('id', sa.Integer, primary_key=True),
sa.Column('created_at', sa.DateTime, default=sa.func.now()),
sa.Column('order_id', sa.Integer, sa.ForeignKey('orders.id')),
sa.Column('region_id', sa.Integer),
sa.Column('presumed', sa.Boolean, default=False))
def downgrade():
op.drop_table('orders')
op.drop_table('invoices')
op.drop_table('tx_confirmations')
op.drop_table('rx_confirmations')
| StarcoderdataPython |
49281 | <filename>gogolook/models/task.py
from enum import IntEnum
from typing import Optional
from pydantic import Field
from sqlalchemy import Column, Enum, String
from gogolook.models import Base, BaseSchema
class TaskStatus(IntEnum):
Incomplete = 0
Complete = 1
class Task(Base):
name = Column(String(length=100))
status = Column(Enum(TaskStatus), default=TaskStatus.Incomplete)
class TaskSchema(BaseSchema):
id: int = Field(description="The id of Task")
name: str = Field(description="The name of Task")
status: TaskStatus = Field(
description="The status of Task", default=TaskStatus.Incomplete
)
class TaskUpdateSchema(TaskSchema):
name: Optional[str] = Field(description="The name of Task")
status: Optional[TaskStatus] = Field(description="The status of Task")
| StarcoderdataPython |
52403 | from flask_login import AnonymousUserMixin
from flask_login import UserMixin as BaseUserMixin
from werkzeug.datastructures import ImmutableList
from ayeauth import db
from ayeauth.auth.password import <PASSWORD>_password
from ayeauth.models import BaseModel
class UserMixin(BaseUserMixin):
@property
def is_active(self):
return self.active
def has_role(self, role):
if role in self.roles:
return True
return False
class User(BaseModel, UserMixin):
__tablename__ = "users"
username = db.Column(db.String(255), unique=True, nullable=False)
password = db.Column(db.String(255), nullable=False)
active = db.Column(db.Boolean(), default=True)
roles = db.relationship(
"Role", secondary="user_roles", backref=db.backref("users", lazy="dynamic")
)
authorized_applications = db.relationship(
"Application",
secondary="user_authorized_applications",
backref=db.backref("users", lazy="dynamic"),
)
def __init__(self, username, password):
super(User, self).__init__()
self.username = username
self.password = <PASSWORD>(password)
def __str__(self):
return str(self.username)
class AnonymousUser(AnonymousUserMixin):
def __init__(self):
self.roles = ImmutableList()
def has_role(self, *args):
return False
| StarcoderdataPython |
1616814 | # 反转链表
# 输入一个链表,反转链表后,输出新链表的表头。
# 链表结构
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
# 打印链表
def printChain(head):
node = head
while node:
print(node.val)
node = node.next
class Solution:
def ReverseList(self, pHead):
if pHead == None:
return None
if pHead.next == None:
return pHead
leftPointer = pHead
middlePointer = pHead.next
rightPointer = pHead.next.next
leftPointer.next = None
while rightPointer != None:
middlePointer.next = leftPointer
leftPointer = middlePointer
middlePointer = rightPointer
rightPointer = rightPointer.next
middlePointer.next = leftPointer
return middlePointer
if __name__ == '__main__':
# 创建链表
l1 = ListNode(1)
l2 = ListNode(2)
l3 = ListNode(3)
l4 = ListNode(4)
l5 = ListNode(5)
l1.next = l2
l2.next = l3
l3.next = l4
l4.next = l5
print(Solution().ReverseList(l1))
| StarcoderdataPython |
3316841 | <filename>hackerrank/Python/Strings/Print-Fucntion.py<gh_stars>0
#Represent all int values before stdin
print(*range(1, int(input())+1), sep='') | StarcoderdataPython |
1621239 | <filename>api/src/opentrons/protocol_engine/resources/model_utils.py
"""Unique ID generation provider."""
from datetime import datetime, timezone
from uuid import uuid4
class ModelUtils:
"""Common resource model utilities provider."""
@staticmethod
def generate_id() -> str:
"""Generate a unique identifier.
Uses UUIDv4 for safety in a multiprocessing environment.
"""
return str(uuid4())
@staticmethod
def get_timestamp() -> datetime:
"""Get a timestamp of the current time."""
return datetime.now(tz=timezone.utc)
| StarcoderdataPython |
3317529 | <gh_stars>0
# Copyright 2017 Apex.AI, Inc.
# flake8: noqa This file is for plotting data. Its dependencies are not necessarily on the CI.
import os
import sys
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt # noqa:
if len(sys.argv) == 2:
directory = sys.argv[1]
else:
print("Usage: python performance_test_file_reader.py /path_to_log_files")
sys.exit(0)
logfiles = []
N = 14 # Number of line to skip before CSV data starts.
for file in os.listdir(directory):
if file.startswith("log_"):
print(os.path.join(directory, file))
logfiles.append(os.path.join(directory, file))
for f in logfiles:
try:
print("Parsing file:" + str(f))
dataframe = pd.read_csv(f, skiprows=N + 1, sep="[ \t]*,[ \t]*", engine='python')
# dataframe = dataframe.drop(columns=['Unnamed: 19'])
with open(f) as myfile:
head = [next(myfile) for x in range(0, N)]
print(''.join(head))
if not dataframe.empty:
pd.options.display.float_format = '{:.4f}'.format
dataframe.drop(list(dataframe.filter(regex='ru_')), axis=1, inplace=True)
dataframe["latency_variance (ms) * 100"] = 100.0 * dataframe["latency_variance (ms)"]
dataframe[["T_experiment",
"latency_min (ms)",
"latency_max (ms)",
"latency_mean (ms)",
"latency_variance (ms) * 100"]] \
.plot(x='T_experiment')
plt.figtext(0.0, 1.0, ''.join(head), fontsize=8, horizontalalignment='left')
plt.figtext(0.65, 0.9, dataframe.mean().round(4), fontsize=8,
horizontalalignment='left')
plt.savefig(os.path.basename(f) + ".pdf",
bbox_inches=matplotlib.transforms.Bbox(np.array(((0, 0), (8, 8)))))
except: # noqa: E722 I do rethrow.
print("Could not parse file: " + str(f) + "\n")
raise
| StarcoderdataPython |
114342 | <reponame>christopher-roelofs/MiSTerDash<gh_stars>0
from time import sleep
import json
import config
from flask import Flask, Response, render_template
import time
app = Flask(__name__)
quit = False
SETTINGS = config.get_config()
RECENTS_FOLDER = '/media/{}/config/'.format(SETTINGS['core_storage'])
details = {}
@app.route('/')
def index():
return render_template('details.html')
@app.route('/details')
def game_details():
def get_game_details():
while True:
json_data = json.dumps(
{'rom_id': 5815, 'system_id': 20, 'name': '007: Everything or Nothing', 'region': 'Europe', 'front_cover': 'https://gamefaqs.gamespot.com/a/box/5/0/6/53506_front.jpg', 'back_cover': 'https://gamefaqs.gamespot.com/a/box/5/0/6/53506_back.jpg', 'description': "Think like Bond, act like Bond, and experience an entirely new Bond adventure.<NAME>, the world's greatest secret agent, returns in Everything or Nothing with new guns and gadgets, combat skills, and clever tricks--and it's up to you to put them to good use.Travel through four exciting continents including the Valley of the Kings in Egypt and the French Quarter in New Orleans.The game also features two-player co-op missions and four-player multiplayer arena modes.", 'developer': 'Griptonite Games', 'publisher': None, 'genre': 'Action,Shooter,Third-Person,Modern', 'release_date': 'Nov 17, 2003', 'gamefaqs': 'http://www.gamefaqs.com/gba/914854-007-everything-or-nothing'})
yield f"data:{json_data}\n\n"
time.sleep(1)
return Response(get_game_details(), mimetype='text/event-stream')
app.run(threaded=True,host='0.0.0.0', port=8080)
| StarcoderdataPython |
92791 | <filename>refinery/lib/deobfuscation.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Contains functions to aid in deobfuscation.
"""
from typing import Optional, Any
import ast
import re
class ExpressionParsingFailure(ValueError):
pass
_ALLOWED_NODE_TYPES = frozenset({
ast.Add,
ast.BinOp,
ast.BitAnd,
ast.BitAnd,
ast.BitOr,
ast.BitXor,
ast.Constant,
ast.Div,
ast.FloorDiv,
ast.Invert,
ast.LShift,
ast.Mod,
ast.Mult,
ast.Not,
ast.NotEq,
ast.Num,
ast.Or,
ast.RShift,
ast.Sub,
ast.UAdd,
ast.UnaryOp,
ast.USub
})
def cautious_eval(definition: str, size_limit: Optional[int] = None) -> Any:
"""
Very, very, very, very, very carefully evaluate a Python expression.
"""
definition = re.sub(R'\s+', '', definition)
class Abort(ExpressionParsingFailure):
def __init__(self, msg):
super().__init__(F'{msg}: {definition}')
if size_limit and len(definition) > size_limit:
raise Abort(F'Size limit {size_limit} was exceeded while parsing')
if any(x not in '.^%|&~<>()-+/*0123456789xabcdefABCDEF' for x in definition):
raise Abort('Unknown characters in expression')
try:
expression = ast.parse(definition)
nodes = ast.walk(expression)
except Exception:
raise Abort('Python AST parser failed')
try:
assert type(next(nodes)) == ast.Module
assert type(next(nodes)) == ast.Expr
except (StopIteration, AssertionError):
raise Abort('Not a Python expression')
nodes = list(nodes)
types = set(type(node) for node in nodes)
if not types <= _ALLOWED_NODE_TYPES:
problematic = types - _ALLOWED_NODE_TYPES
raise Abort('Expression contains operations that are not allowed: {}'.format(', '.join(str(p) for p in problematic)))
return eval(definition)
def cautious_eval_or_default(definition: str, default: Optional[Any] = None, size_limit: Optional[int] = None) -> Any:
try:
return cautious_eval(definition)
except ExpressionParsingFailure:
return default
| StarcoderdataPython |
1646925 | <gh_stars>10-100
# coding: utf-8
import ncloud_cdn
from ncloud_cdn.api.v2_api import V2Api
from ncloud_cdn.rest import ApiException
import ncloud_apikey
configuration = ncloud_cdn.Configuration()
apikeys = ncloud_apikey.ncloud_key.NcloudKey().keys()
configuration.access_key = apikeys['access_key'] # "<KEY>"
configuration.secret_key = apikeys['secret_key'] # "<KEY>"
api = V2Api(ncloud_cdn.ApiClient(configuration))
get_cdn_plus_instance_list_request = ncloud_cdn.GetCdnPlusInstanceListRequest()
try:
api_response = api.get_cdn_plus_instance_list(get_cdn_plus_instance_list_request)
print(api_response)
except ApiException as e:
print("Exception when calling V2Api->get_cdn_plus_instance_list: %s\n" % e)
| StarcoderdataPython |
1718560 | <reponame>Kittycatguspm/shuecm
"""
Place this db models.
"""
| StarcoderdataPython |
3214893 | <gh_stars>10-100
#!/usr/bin/env python
__all__ = [
"RequestPacket",
"ResponsePacket"
]
from CraftProtocol.Protocol.v1_8.Packet.Status.RequestPacket import RequestPacket
from CraftProtocol.Protocol.v1_8.Packet.Status.ResponsePacket import ResponsePacket
| StarcoderdataPython |
7934 | def execucoes():
return int(input())
def entradas():
return input().split(' ')
def imprimir(v):
print(v)
def tamanho_a(a):
return len(a)
def tamanho_b(b):
return len(b)
def diferenca_tamanhos(a, b):
return (len(a) <= len(b))
def analisar(e, i, s):
a, b = e
if(diferenca_tamanhos(a, b)):
for i in range(tamanho_a(a)):
s += a[i]
s += b[i]
s += b[tamanho_a(a):]
else:
for i in range(tamanho_b(b)):
s += a[i]
s += b[i]
s += a[tamanho_b(b):]
return s
def combinador():
n = execucoes()
for i in range(n): imprimir(analisar(entradas(), i, ''))
combinador() | StarcoderdataPython |
1740753 | <filename>mars/tensor/execution/tests/test_datasource_execute.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import scipy.sparse as sps
from mars.tests.core import TestBase
from mars.tensor.execution.core import Executor
from mars.tensor.expressions.datasource import tensor, ones_like, zeros, zeros_like, full, \
arange, empty, empty_like, diag, diagflat, eye, linspace, meshgrid, indices, \
triu, tril
from mars.lib.sparse import SparseNDArray
from mars.tensor.expressions.lib import nd_grid
class Test(TestBase):
def setUp(self):
super(Test, self).setUp()
self.executor = Executor()
def testCreateSparseExecution(self):
mat = sps.csr_matrix([[0, 0, 2], [2, 0, 0]])
t = tensor(mat, dtype='f8', chunks=2)
res = self.executor.execute_tensor(t)
self.assertIsInstance(res[0], SparseNDArray)
self.assertEqual(res[0].dtype, np.float64)
np.testing.assert_array_equal(res[0].toarray(), mat[..., :2].toarray())
np.testing.assert_array_equal(res[1].toarray(), mat[..., 2:].toarray())
t2 = ones_like(t, dtype='f4')
res = self.executor.execute_tensor(t2)
expected = sps.csr_matrix([[0, 0, 1], [1, 0, 0]])
self.assertIsInstance(res[0], SparseNDArray)
self.assertEqual(res[0].dtype, np.float32)
np.testing.assert_array_equal(res[0].toarray(), expected[..., :2].toarray())
np.testing.assert_array_equal(res[1].toarray(), expected[..., 2:].toarray())
t3 = tensor(np.array([[0, 0, 2], [2, 0, 0]]), chunks=2).tosparse()
res = self.executor.execute_tensor(t3)
self.assertIsInstance(res[0], SparseNDArray)
self.assertEqual(res[0].dtype, np.int_)
np.testing.assert_array_equal(res[0].toarray(), mat[..., :2].toarray())
np.testing.assert_array_equal(res[1].toarray(), mat[..., 2:].toarray())
def testZerosExecution(self):
t = zeros((20, 30), dtype='i8', chunks=5)
res = self.executor.execute_tensor(t, concat=True)
self.assertTrue(np.array_equal(res[0], np.zeros((20, 30), dtype='i8')))
self.assertEqual(res[0].dtype, np.int64)
t2 = zeros_like(t)
res = self.executor.execute_tensor(t2, concat=True)
self.assertTrue(np.array_equal(res[0], np.zeros((20, 30), dtype='i8')))
self.assertEqual(res[0].dtype, np.int64)
t = zeros((20, 30), dtype='i4', chunks=5, sparse=True)
res = self.executor.execute_tensor(t, concat=True)
self.assertEqual(res[0].nnz, 0)
def testEmptyExecution(self):
t = empty((20, 30), dtype='i8', chunks=5)
res = self.executor.execute_tensor(t, concat=True)
self.assertEqual(res[0].shape, (20, 30))
self.assertEqual(res[0].dtype, np.int64)
self.assertFalse(np.array_equal(res, np.zeros((20, 30))))
t = empty((20, 30), chunks=5)
res = self.executor.execute_tensor(t, concat=True)
self.assertFalse(np.allclose(res, np.zeros((20, 30))))
t2 = empty_like(t)
res = self.executor.execute_tensor(t2, concat=True)
self.assertEqual(res[0].shape, (20, 30))
self.assertEqual(res[0].dtype, np.float64)
def testFullExecution(self):
t = full((2, 2), 1, dtype='f4', chunks=1)
res = self.executor.execute_tensor(t, concat=True)
self.assertTrue(np.array_equal(res[0], np.full((2, 2), 1, dtype='f4')))
t = full((2, 2), [1, 2], dtype='f8', chunks=1)
res = self.executor.execute_tensor(t, concat=True)
self.assertTrue(np.array_equal(res[0], np.full((2, 2), [1, 2], dtype='f8')))
def testArangeExecution(self):
t = arange(1, 20, 3, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertTrue(np.array_equal(res, np.arange(1, 20, 3)))
t = arange(1, 20, .3, chunks=4)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.arange(1, 20, .3)
self.assertTrue(np.allclose(res, expected))
t = arange(1.0, 1.8, .3, chunks=4)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.arange(1.0, 1.8, .3)
self.assertTrue(np.allclose(res, expected))
t = arange('1066-10-13', '1066-10-31', dtype=np.datetime64, chunks=3)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.arange('1066-10-13', '1066-10-31', dtype=np.datetime64)
self.assertTrue(np.array_equal(res, expected))
def testDiagExecution(self):
# 2-d 6 * 6
a = arange(36, chunks=2).reshape(6, 6)
d = diag(a)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(36).reshape(6, 6))
np.testing.assert_equal(res, expected)
d = diag(a, k=1)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(36).reshape(6, 6), k=1)
np.testing.assert_equal(res, expected)
d = diag(a, k=3)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(36).reshape(6, 6), k=3)
np.testing.assert_equal(res, expected)
d = diag(a, k=-2)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(36).reshape(6, 6), k=-2)
np.testing.assert_equal(res, expected)
d = diag(a, k=-5)
res = self.executor.execute_tensor(d)[0]
expected = np.diag(np.arange(36).reshape(6, 6), k=-5)
np.testing.assert_equal(res, expected)
# 2-d 4 * 9
a = arange(36, chunks=2).reshape(4, 9)
d = diag(a)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(36).reshape(4, 9))
np.testing.assert_equal(res, expected)
d = diag(a, k=1)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(36).reshape(4, 9), k=1)
np.testing.assert_equal(res, expected)
d = diag(a, k=3)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(36).reshape(4, 9), k=3)
np.testing.assert_equal(res, expected)
d = diag(a, k=-2)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(36).reshape(4, 9), k=-2)
np.testing.assert_equal(res, expected)
d = diag(a, k=-3)
res = self.executor.execute_tensor(d)[0]
expected = np.diag(np.arange(36).reshape(4, 9), k=-3)
np.testing.assert_equal(res, expected)
# 1-d
a = arange(5, chunks=2)
d = diag(a)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(5))
np.testing.assert_equal(res, expected)
d = diag(a, k=1)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(5), k=1)
np.testing.assert_equal(res, expected)
d = diag(a, k=3)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(5), k=3)
np.testing.assert_equal(res, expected)
d = diag(a, k=-2)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(5), k=-2)
np.testing.assert_equal(res, expected)
d = diag(a, k=-3)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(5), k=-3)
np.testing.assert_equal(res, expected)
d = diag(a, sparse=True)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(5))
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
d = diag(a, k=1, sparse=True)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(5), k=1)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
d = diag(a, k=2, sparse=True)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(5), k=2)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
d = diag(a, k=-2, sparse=True)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(5), k=-2)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
d = diag(a, k=-3, sparse=True)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(5), k=-3)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
def testDiagflatExecution(self):
a = diagflat([[1, 2], [3, 4]], chunks=1)
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.diagflat([[1, 2], [3, 4]])
np.testing.assert_equal(res, expected)
d = tensor([[1, 2], [3, 4]], chunks=1)
a = diagflat(d)
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.diagflat([[1, 2], [3, 4]])
np.testing.assert_equal(res, expected)
a = diagflat([1, 2], 1, chunks=1)
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.diagflat([1, 2], 1)
np.testing.assert_equal(res, expected)
d = tensor([[1, 2]], chunks=1)
a = diagflat(d, 1)
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.diagflat([1, 2], 1)
np.testing.assert_equal(res, expected)
def testEyeExecution(self):
t = eye(5, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5)
np.testing.assert_equal(res, expected)
t = eye(5, k=1, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, k=1)
np.testing.assert_equal(res, expected)
t = eye(5, k=2, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, k=2)
np.testing.assert_equal(res, expected)
t = eye(5, k=-1, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, k=-1)
np.testing.assert_equal(res, expected)
t = eye(5, k=-3, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, k=-3)
np.testing.assert_equal(res, expected)
t = eye(5, M=3, k=1, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, M=3, k=1)
np.testing.assert_equal(res, expected)
t = eye(5, M=3, k=-3, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, M=3, k=-3)
np.testing.assert_equal(res, expected)
t = eye(5, M=7, k=1, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, M=7, k=1)
np.testing.assert_equal(res, expected)
t = eye(5, M=8, k=-3, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, M=8, k=-3)
np.testing.assert_equal(res, expected)
t = eye(2, dtype=int)
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.dtype, np.int_)
# test sparse
t = eye(5, sparse=True, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
t = eye(5, k=1, sparse=True, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, k=1)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
t = eye(5, k=2, sparse=True, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, k=2)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
t = eye(5, k=-1, sparse=True, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, k=-1)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
t = eye(5, k=-3, sparse=True, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, k=-3)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
t = eye(5, M=3, k=1, sparse=True, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, M=3, k=1)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
t = eye(5, M=3, k=-3, sparse=True, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, M=3, k=-3)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
t = eye(5, M=7, k=1, sparse=True, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, M=7, k=1)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
t = eye(5, M=8, k=-3, sparse=True, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, M=8, k=-3)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
def testLinspaceExecution(self):
a = linspace(2.0, 9.0, num=11, chunks=3)
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.linspace(2.0, 9.0, num=11)
np.testing.assert_allclose(res, expected)
a = linspace(2.0, 9.0, num=11, endpoint=False, chunks=3)
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.linspace(2.0, 9.0, num=11, endpoint=False)
np.testing.assert_allclose(res, expected)
a = linspace(2.0, 9.0, num=11, chunks=3, dtype=int)
res = self.executor.execute_tensor(a, concat=True)[0]
self.assertEqual(res.dtype, np.int_)
def testMeshgridExecution(self):
a = arange(5, chunks=2)
b = arange(6, 12, chunks=3)
c = arange(12, 19, chunks=4)
A, B, C = meshgrid(a, b, c)
A_res = self.executor.execute_tensor(A, concat=True)[0]
A_expected = np.meshgrid(np.arange(5), np.arange(6, 12), np.arange(12, 19))[0]
np.testing.assert_equal(A_res, A_expected)
B_res = self.executor.execute_tensor(B, concat=True)[0]
B_expected = np.meshgrid(np.arange(5), np.arange(6, 12), np.arange(12, 19))[1]
np.testing.assert_equal(B_res, B_expected)
C_res = self.executor.execute_tensor(C, concat=True)[0]
C_expected = np.meshgrid(np.arange(5), np.arange(6, 12), np.arange(12, 19))[2]
np.testing.assert_equal(C_res, C_expected)
A, B, C = meshgrid(a, b, c, indexing='ij')
A_res = self.executor.execute_tensor(A, concat=True)[0]
A_expected = np.meshgrid(np.arange(5), np.arange(6, 12), np.arange(12, 19), indexing='ij')[0]
np.testing.assert_equal(A_res, A_expected)
B_res = self.executor.execute_tensor(B, concat=True)[0]
B_expected = np.meshgrid(np.arange(5), np.arange(6, 12), np.arange(12, 19), indexing='ij')[1]
np.testing.assert_equal(B_res, B_expected)
C_res = self.executor.execute_tensor(C, concat=True)[0]
C_expected = np.meshgrid(np.arange(5), np.arange(6, 12), np.arange(12, 19), indexing='ij')[2]
np.testing.assert_equal(C_res, C_expected)
A, B, C = meshgrid(a, b, c, sparse=True)
A_res = self.executor.execute_tensor(A, concat=True)[0]
A_expected = np.meshgrid(np.arange(5), np.arange(6, 12), np.arange(12, 19), sparse=True)[0]
np.testing.assert_equal(A_res, A_expected)
B_res = self.executor.execute_tensor(B, concat=True)[0]
B_expected = np.meshgrid(np.arange(5), np.arange(6, 12), np.arange(12, 19), sparse=True)[1]
np.testing.assert_equal(B_res, B_expected)
C_res = self.executor.execute_tensor(C, concat=True)[0]
C_expected = np.meshgrid(np.arange(5), np.arange(6, 12), np.arange(12, 19), sparse=True)[2]
np.testing.assert_equal(C_res, C_expected)
A, B, C = meshgrid(a, b, c, indexing='ij', sparse=True)
A_res = self.executor.execute_tensor(A, concat=True)[0]
A_expected = np.meshgrid(np.arange(5), np.arange(6, 12), np.arange(12, 19),
indexing='ij', sparse=True)[0]
np.testing.assert_equal(A_res, A_expected)
B_res = self.executor.execute_tensor(B, concat=True)[0]
B_expected = np.meshgrid(np.arange(5), np.arange(6, 12), np.arange(12, 19),
indexing='ij', sparse=True)[1]
np.testing.assert_equal(B_res, B_expected)
C_res = self.executor.execute_tensor(C, concat=True)[0]
C_expected = np.meshgrid(np.arange(5), np.arange(6, 12), np.arange(12, 19),
indexing='ij', sparse=True)[2]
np.testing.assert_equal(C_res, C_expected)
def testIndicesExecution(self):
grid = indices((2, 3), chunks=1)
res = self.executor.execute_tensor(grid, concat=True)[0]
expected = np.indices((2, 3))
np.testing.assert_equal(res, expected)
res = self.executor.execute_tensor(grid[0], concat=True)[0]
np.testing.assert_equal(res, expected[0])
res = self.executor.execute_tensor(grid[1], concat=True)[0]
np.testing.assert_equal(res, expected[1])
def testTriuExecution(self):
a = arange(24, chunks=2).reshape(2, 3, 4)
t = triu(a)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.triu(np.arange(24).reshape(2, 3, 4))
np.testing.assert_equal(res, expected)
t = triu(a, k=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.triu(np.arange(24).reshape(2, 3, 4), k=1)
np.testing.assert_equal(res, expected)
t = triu(a, k=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.triu(np.arange(24).reshape(2, 3, 4), k=2)
np.testing.assert_equal(res, expected)
t = triu(a, k=-1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.triu(np.arange(24).reshape(2, 3, 4), k=-1)
np.testing.assert_equal(res, expected)
t = triu(a, k=-2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.triu(np.arange(24).reshape(2, 3, 4), k=-2)
np.testing.assert_equal(res, expected)
# test sparse
a = arange(12, chunks=2).reshape(3, 4).tosparse()
t = triu(a)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.triu(np.arange(12).reshape(3, 4))
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res, expected)
t = triu(a, k=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.triu(np.arange(12).reshape(3, 4), k=1)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res, expected)
t = triu(a, k=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.triu(np.arange(12).reshape(3, 4), k=2)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res, expected)
t = triu(a, k=-1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.triu(np.arange(12).reshape(3, 4), k=-1)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res, expected)
t = triu(a, k=-2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.triu(np.arange(12).reshape(3, 4), k=-2)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res, expected)
def testTrilExecution(self):
a = arange(24, chunks=2).reshape(2, 3, 4)
t = tril(a)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tril(np.arange(24).reshape(2, 3, 4))
np.testing.assert_equal(res, expected)
t = tril(a, k=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tril(np.arange(24).reshape(2, 3, 4), k=1)
np.testing.assert_equal(res, expected)
t = tril(a, k=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tril(np.arange(24).reshape(2, 3, 4), k=2)
np.testing.assert_equal(res, expected)
t = tril(a, k=-1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tril(np.arange(24).reshape(2, 3, 4), k=-1)
np.testing.assert_equal(res, expected)
t = tril(a, k=-2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tril(np.arange(24).reshape(2, 3, 4), k=-2)
np.testing.assert_equal(res, expected)
a = arange(12, chunks=2).reshape(3, 4).tosparse()
t = tril(a)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tril(np.arange(12).reshape(3, 4))
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res, expected)
t = tril(a, k=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tril(np.arange(12).reshape(3, 4), k=1)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res, expected)
t = tril(a, k=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tril(np.arange(12).reshape(3, 4), k=2)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res, expected)
t = tril(a, k=-1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tril(np.arange(12).reshape(3, 4), k=-1)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res, expected)
t = tril(a, k=-2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tril(np.arange(12).reshape(3, 4), k=-2)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res, expected)
def testIndexTrickExecution(self):
mgrid = nd_grid()
t = mgrid[0:5, 0:5]
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.lib.index_tricks.nd_grid()[0:5, 0:5]
np.testing.assert_equal(res, expected)
t = mgrid[-1:1:5j]
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.lib.index_tricks.nd_grid()[-1:1:5j]
np.testing.assert_equal(res, expected)
ogrid = nd_grid(sparse=True)
t = ogrid[0:5, 0:5]
res = [self.executor.execute_tensor(o, concat=True)[0] for o in t]
expected = np.lib.index_tricks.nd_grid(sparse=True)[0:5, 0:5]
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
| StarcoderdataPython |
1730685 | <filename>Cap 11/rascunho.py
# -*- coding: utf-8 -*-
"""
Created on Tue May 19 01:44:14 2020
@author: dreis
"""
def histogram(s):
d = dict()
for c in s:
if c not in d:
d[c] = 1
else:
d[c] += 1
return print(d)
def histogram_get(s):
d = dict()
for letra in s:
d[letra] = d.get(letra, 0) + 1
return print(d)
def fibonacci(n):
global known
known = {0: 0, 1: 1}
if n in known:
return known[n]
res = fibonacci(n-1) + fibonacci(n-2)
known[n] = res
return res
print(fibonacci(40))
| StarcoderdataPython |
157862 | <filename>nematus/metrics/test_chrf.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from chrf import CharacterFScorer
class TestCharacterFScoreReference(unittest.TestCase):
"""
Regression tests for SmoothedBleuReference
"""
@staticmethod
def tokenize(sentence):
return sentence.split(" ")
def test_identical_segments(self):
segment = self.tokenize("Consistency is the last refuge of the unimaginative")
scorer = CharacterFScorer('n=6,beta=3')
scorer.set_reference(segment)
self.assertEqual(scorer.score(segment), 1.0)
def test_completely_different_segments(self):
segment_a = self.tokenize("AAAAAA")
segment_b = self.tokenize("BBBB")
scorer = CharacterFScorer('n=3,beta=3')
scorer.set_reference(segment_a)
self.assertEqual(scorer.score(segment_b), 0.0)
def test_empty_string(self):
segment_a = self.tokenize("")
segment_b = self.tokenize("")
scorer = CharacterFScorer('n=6,beta=3')
scorer.set_reference(segment_a)
self.assertEqual(scorer.score(segment_b), 1.0)
def test_one_character_empty_string(self):
segment_a = self.tokenize("A")
segment_b = self.tokenize("")
scorer = CharacterFScorer('n=6,beta=3')
scorer.set_reference(segment_a)
self.assertEqual(scorer.score(segment_b), 0.0)
def test_empty_string_one_character(self):
segment_a = self.tokenize("")
segment_b = self.tokenize("A")
scorer = CharacterFScorer('n=6,beta=3')
scorer.set_reference(segment_a)
self.assertEqual(scorer.score(segment_b), 0.0)
def test_half_right(self):
segment_a = self.tokenize("AB")
segment_b = self.tokenize("AA")
scorer = CharacterFScorer('n=6,beta=3')
scorer.set_reference(segment_a)
self.assertEqual(scorer.score(segment_b), 0.25)
def test_one_character(self):
segment_a = self.tokenize("A")
segment_b = self.tokenize("A")
scorer = CharacterFScorer('n=6,beta=3')
scorer.set_reference(segment_a)
self.assertEqual(scorer.score(segment_b), 1.0)
def test_almost_correct(self):
segment_a = self.tokenize("risk assessment has to be undertaken by those who are qualified and expert in that area - that is the scientists .")
segment_b = self.tokenize(" risk assessment must be made of those who are qualified and expertise in the sector - these are the scientists .")
scorer = CharacterFScorer('n=6,beta=3')
scorer.set_reference(segment_a)
self.assertEqual('{0:.12f}'.format(scorer.score(segment_b)), "0.652414427449")
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3249237 | <filename>app.py<gh_stars>0
import graphviz as graphviz
import streamlit as st
from modules import helper_module_app as helper
st.set_option('deprecation.showPyplotGlobalUse', False)
def main():
"""
glues all parts together
"""
st.sidebar.title("Model configurations")
st.title("Topic discovering")
st.write("")
graph = graphviz.Digraph()
graph.edge('Input model embeddings from documents',
'Dimension reduction (Step 2)')
graph.edge('Input documents', 'Extract keywords \
and perform word embeddings (Step 1)')
graph.edge('Dimension reduction (Step 2)', 'Clustering (Step 3)')
graph.edge('Clustering (Step 3)', 'Construct topic vectors')
graph.edge('Construct topic vectors', 'Attach keywords \
(ngrams) to each topic vector')
graph.edge('Extract keywords and perform word embeddings (Step 1)', 'Attach keywords \
(ngrams) to each topic vector')
st.graphviz_chart(graph)
# choose dataset
dataset = st.sidebar.selectbox(
"Choose dataset",
(["REIT-Industrial"])
)
df, doc_embed, example_text = helper.load_data(dataset)
original_data_expander = st.beta_expander("Show raw data (source)")
paragraphs = df.paragraph.values.tolist()
if dataset == "Newsgroup20 Subset":
st.sidebar.markdown("For more information about the newsgroup20 dataset, \
see [here](https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html).")
# loads model
if dataset == "REIT-Industrial":
add_stop_words = ["Alexandrias", "Alexandria", "Yellow", "Yellows"]
add_stop_words = list(df.company.unique()) + add_stop_words
model = helper.load_model(add_stops_words=add_stop_words)
else:
model = helper.load_model()
if model.dataset_name != dataset:
with st.spinner("Change of dataset: updating step 1 to 3"):
# update and reset arguments
model.doc_embedding = doc_embed
model.documents = paragraphs
model.dataset_name = dataset
model.topic_sizes_reduced = None
model.topic_vectors_reduced = None
model.topic_words_reduced = None
model.topic_word_scores_reduced = None
model.topic_hierarchy = None
model.perform_steps()
st.sidebar.markdown("The paragraph and word embeddings were obtained using \
[distiluse-base-multilingual-cased](https://arxiv.org/abs/1910.01108) \
from the [sentence transformer library](https://www.sbert.net/).")
with original_data_expander.beta_container():
original_data_expander.markdown("Here you see the (sources) of the original \
data.")
if dataset == "REIT-Industrial":
show_p = original_data_expander.checkbox(
"Show extracted paragraphs", value=False
)
if show_p:
original_data_expander.dataframe(df)
else:
original_data_expander.dataframe(
df.iloc[:, 0:5].drop_duplicates().
reset_index(inplace=False, drop=True)
)
else:
original_data_expander.dataframe(df.iloc[:, 1])
# parameters word embeddings
model, stop_words, lower_ngrams, upper_ngrams, min_df, max_df = (
helper.params_word_embed(model)
)
# parameters dim reduction
model, n_components, n_neighbors, densmap = (
helper.params_dim_red(model)
)
# parameters clustering
(model, min_cluster_size,
min_samples, selection_epsilon) = (
helper.params_clustering(model)
)
st.sidebar.markdown("Do not forget to hit the **update model configurations** \
button when changing the parameter values. \
The updating should take no longer than 1 minute.")
if st.sidebar.button("Update model configurations"):
model = helper.update_model_steps(
model=model,
doc_embed=doc_embed,
paragraphs=paragraphs,
lower_ngrams=lower_ngrams,
upper_ngrams=upper_ngrams,
min_df=min_df,
max_df=max_df,
stop_words=stop_words,
n_neighbors=n_neighbors,
n_components=n_components,
densmap=densmap,
min_cluster_size=min_cluster_size,
min_samples=min_samples,
selection_epsilon=selection_epsilon
)
# apply topic reduction?
topic_reduction = st.sidebar.checkbox("Topic reduction", value=False)
if topic_reduction:
st.sidebar.markdown("Do not foret to hit the **update number of topics** \
button when changing the number of topics.")
# Section 1: Table with topics
expander_topics, reduced_topic_sec_tw, = (
helper.display_topics(model, topic_reduction)
)
# expander_topics.write("\n")
# helper.display_word_cloud(
# model, expander_topics, reduced_topic_sec_tw
# )
# Section 2: Keyword loadings on topics
helper.topic_keywords(
model, example_text, topic_reduction
)
# Section 3: Topic similarity matrix
helper.show_similarity_matrix(model, topic_reduction)
# Section 4: Search most relevant documents for a topic cluster
helper.most_relevant_doc_top(df, model, topic_reduction)
# Section 5: Search documents by keywords
helper.documents_keywords(model, df, example_text)
# Section 6: Search documents by keywords
if dataset == "REIT-Industrial":
helper.topic_size_vars_value(model, df, topic_reduction)
if __name__ == "__main__":
st.set_page_config(layout="wide")
main()
| StarcoderdataPython |
54088 | <reponame>CCC-CS-github/ursina_ks3<gh_stars>1-10
"""
private dev for the PythonCraft code -- i.e. in case
I break the original, PythonCraft.py.
Also -- I want the original kept to approx. 30 lines.
"""
# Import the ursina module, and its First Person character.
from ursina import *
# Import the Perlin Noise module for creating terrain.
from perlin_terrain import Terrain
from character import Character
# Create Window. Set background colour to sky blue.
app = Ursina()
window.color=color.rgb(0,200,255)
# ***
# scene.fog_density = 0.02
# scene.fog.color = rgb(0,200,255)
# Initialise our terrain.
# cambridge = Terrain(frequency=48,amplitude=32)
# ***
cambridge = Terrain(advanced=True,
a1=64,f1=128,
a2=12,f2=120,
a3=3,f3=9,
seed=99)
# Initialise and set up our first-person character.
steve = Character(speed=6)
# Our main program update loop.
def update():
# Allow character to move over terrain.
steve.move(cambridge)
# Function that responds to key and mouse presses.
def input(key):
steve.input(key) # Character responds to 'escape' key.
# Start the program :)
app.run() | StarcoderdataPython |
3385199 | <reponame>ablot/Pinceau
# -*- coding: utf-8 -*-
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import ui_graphDlg
import matplotlib.image as mpimg
class graphDlg(QDialog, ui_graphDlg.Ui_graphDialog):
def __init__(self,parent=None,debug=0):
super(graphDlg, self).__init__(parent)
self.setupUi(self)
self.MPLNav.initToolbar(self.MPLFig.fig.canvas)
self.graph = parent.tree.graph
self.updateGraph(1)
self.setWindowTitle('Graph of tree %s'%parent.tree.name)
self.connect(self.groupCheckBox, SIGNAL("stateChanged(int)"),
self.updateGraph)
def updateGraph(self, doIt = 0):
if self.sender is self.groupCheckBox:
doIt = 1
if not doIt and not self.interactiveCheckBox.isChecked():
return
self.graph.updateGraph(groups = self.groupCheckBox.isChecked(),
verbose = 0)
img=mpimg.imread(self.graph.path)
ax = self.MPLFig.fig.add_axes([0,0,1,1])
imgplot = ax.imshow(img, aspect = 'equal')
ax.axis('off')
self.MPLFig.fig.canvas.draw()
@pyqtSignature("")
def on_refreshPushButton_clicked(self):
self.updateGraph(1) | StarcoderdataPython |
3294439 | <reponame>ckamtsikis/cmssw<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
egmGedGsfElectronPFNoPileUpIsolation = cms.EDProducer(
"CITKPFIsolationSumProducer",
srcToIsolate = cms.InputTag("gedGsfElectrons"),
srcForIsolationCone = cms.InputTag('pfNoPileUpCandidates'),
isolationConeDefinitions = cms.VPSet(
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithConeVeto'),
coneSize = cms.double(0.3),
VetoConeSizeBarrel = cms.double(0.0),
VetoConeSizeEndcaps = cms.double(0.015),
isolateAgainst = cms.string('h+'),
miniAODVertexCodes = cms.vuint32(2,3) ),
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithConeVeto'),
coneSize = cms.double(0.3),
VetoConeSizeBarrel = cms.double(0.0),
VetoConeSizeEndcaps = cms.double(0.0),
isolateAgainst = cms.string('h0'),
miniAODVertexCodes = cms.vuint32(2,3) ),
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithConeVeto'),
coneSize = cms.double(0.3),
VetoConeSizeBarrel = cms.double(0.0),
VetoConeSizeEndcaps = cms.double(0.08),
isolateAgainst = cms.string('gamma'),
miniAODVertexCodes = cms.vuint32(2,3) )
)
)
egmGedGsfElectronPFPileUpIsolation = cms.EDProducer(
"CITKPFIsolationSumProducer",
srcToIsolate = cms.InputTag("gedGsfElectrons"),
srcForIsolationCone = cms.InputTag('pfPileUpAllChargedParticles'),
isolationConeDefinitions = cms.VPSet(
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithConeVeto'),
coneSize = cms.double(0.3),
VetoConeSizeBarrel = cms.double(0.0),
VetoConeSizeEndcaps = cms.double(0.015),
isolateAgainst = cms.string('h+'),
miniAODVertexCodes = cms.vuint32(0,1) )
)
)
egmGedGsfElectronPFNoPileUpIsolationMapBasedVeto = cms.EDProducer(
"CITKPFIsolationSumProducer",
srcToIsolate = cms.InputTag("gedGsfElectrons"),
srcForIsolationCone = cms.InputTag('pfNoPileUpCandidates'),
isolationConeDefinitions = cms.VPSet(
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithMapBasedVeto'),
coneSize = cms.double(0.3),
isolateAgainst = cms.string('h+'),
miniAODVertexCodes = cms.vuint32(2,3),
vertexIndex = cms.int32(0),
particleBasedIsolation = cms.InputTag("particleBasedIsolation", "gedGsfElectrons") ),
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithMapBasedVeto'),
coneSize = cms.double(0.3),
isolateAgainst = cms.string('h0'),
miniAODVertexCodes = cms.vuint32(2,3),
vertexIndex = cms.int32(0),
particleBasedIsolation = cms.InputTag("particleBasedIsolation", "gedGsfElectrons") ),
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithMapBasedVeto'),
coneSize = cms.double(0.3),
isolateAgainst = cms.string('gamma'),
miniAODVertexCodes = cms.vuint32(2,3),
vertexIndex = cms.int32(0),
particleBasedIsolation = cms.InputTag("particleBasedIsolation", "gedGsfElectrons") )
)
)
egmGedGsfElectronPFPileUpIsolationMapBasedVeto = cms.EDProducer(
"CITKPFIsolationSumProducer",
srcToIsolate = cms.InputTag("gedGsfElectrons"),
srcForIsolationCone = cms.InputTag('pfPileUpAllChargedParticles'),
isolationConeDefinitions = cms.VPSet(
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithMapBasedVeto'),
coneSize = cms.double(0.3),
isolateAgainst = cms.string('h+'),
miniAODVertexCodes = cms.vuint32(2,3),
vertexIndex = cms.int32(0),
particleBasedIsolation = cms.InputTag("particleBasedIsolation", "gedGsfElectrons") )
)
)
| StarcoderdataPython |
1727781 | <reponame>sasha-kantoriz/ocdsapi<filename>tests/test_records.py<gh_stars>0
from .base import storage, app
from werkzeug.exceptions import NotFound
def test_get(client, storage):
with client.get('/api/record.json?ocid=test_ocid') as response:
assert response.json['releases'][0] == storage.get_ocid('test_ocid')
def test_get_not_found(client, storage):
with client.get('/api/record.json?ocid=') as response:
assert response.status_code == 404
def test_response_ids_only(client, storage):
with client.get('/api/records.json?idsOnly=True') as response:
result = response.json
assert result['records'] == [{"id": 'spam_id', "ocid": 'spam_ocid'}]
def test_prepare_response(client, storage):
with client.get('/api/records.json') as response:
result = response.json
record = result['records'][0]
assert 'compiledRelease' in record
assert 'versionedRelease' in record
assert 'releases' in record
| StarcoderdataPython |
37251 | <reponame>timkrentz/SunTracker
import sys
# Override theSystemPath so it throws KeyError on gi.pygtkcompat:
from twisted.python import modules
modules.theSystemPath = modules.PythonPath([], moduleDict={})
# Now, when we import gireactor it shouldn't use pygtkcompat, and should
# instead prevent gobject from being importable:
from twisted.internet import gireactor
for name in gireactor._PYGTK_MODULES:
if sys.modules[name] is not None:
sys.stdout.write("failure, sys.modules[%r] is %r, instead of None" %
(name, sys.modules["gobject"]))
sys.exit(0)
try:
import gobject
except ImportError:
sys.stdout.write("success")
else:
sys.stdout.write("failure: %s was imported" % (gobject.__path__,))
| StarcoderdataPython |
3345836 | # -*- coding: utf-8 -*-
"""
@description: warnings
@author:Yee
"""
from __future__ import print_function
from __future__ import unicode_literals
# 引入警告模块
import warnings
# 我们使用 warnings 中的 warn 函数:
# warn(msg, WarningType = UserWarning)
def month_warining(m):
if not 1 <= m <= 12:
msg = "month (%d) is not between 1 and 12 " % m
warnings.warn(msg, RuntimeWarning)
month_warining(13) # 报警告
# 有时候我们想要忽略特定类型的警告,可以使用 warnings 的 filterwarnings 函数:
# filterwarnings(action, category)
# 将 action 设置为 'ignore' 便可以忽略特定类型的警告:
warnings.filterwarnings(action='ignore', category=RuntimeWarning)
month_warining(13) # 不报警告 | StarcoderdataPython |
3239552 | """
Based on https://djangosnippets.org/snippets/1179/
"""
import django
from django.conf import settings as django_settings
from django.http import HttpResponseRedirect
from re import compile
from django_auth_adfs.config import settings
from django_auth_adfs.util import get_adfs_auth_url
try:
from django.urls import reverse
except ImportError: # Django < 1.10
from django.core.urlresolvers import reverse
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError: # Django < 1.10
MiddlewareMixin = object
LOGIN_EXEMPT_URLS = [
compile(django_settings.LOGIN_URL.lstrip('/')),
compile(reverse("django_auth_adfs:login").lstrip('/')),
]
if hasattr(settings, 'LOGIN_EXEMPT_URLS'):
LOGIN_EXEMPT_URLS += [compile(expr) for expr in settings.LOGIN_EXEMPT_URLS]
class LoginRequiredMiddleware(MiddlewareMixin):
"""
Middleware that requires a user to be authenticated to view any page other
than LOGIN_URL. Exemptions to this requirement can optionally be specified
in settings via a list of regular expressions in LOGIN_EXEMPT_URLS (which
you can copy from your urls.py).
Requires authentication middleware and template context processors to be
loaded. You'll get an error if they aren't.
"""
def process_request(self, request):
assert hasattr(request, 'user'), "The Login Required middleware requires" \
" authentication middleware to be installed." \
" Edit your MIDDLEWARE setting to insert" \
" 'django.contrib.auth.middlware.AuthenticationMiddleware'." \
" If that doesn't work, ensure your TEMPLATE_CONTEXT_PROCESSORS" \
" setting includes 'django.core.context_processors.auth'."
if django.VERSION[:2] < (1, 10):
user_authenticated = request.user.is_authenticated()
else:
user_authenticated = request.user.is_authenticated
if not user_authenticated:
path = request.path_info.lstrip('/')
if not any(m.match(path) for m in LOGIN_EXEMPT_URLS):
return HttpResponseRedirect(get_adfs_auth_url())
| StarcoderdataPython |
3368296 | <reponame>oist-cnru/VCBot
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
BSD 3-Clause License
Copyright (c) 2020 Okinawa Institute of Science and Technology (OIST).
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Willow Garage, Inc. nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Author: <NAME> <<EMAIL>>
Publication:
<NAME>., <NAME>., & <NAME>. (2020).
A hybrid human-neurorobotics approach to primary intersubjectivity via
active inference. Frontiers in psychology, 11.
Okinawa Institute of Science and Technology Graduate University (OIST)
Cognitive Neurorobotics Research Unit (CNRU)
1919-1, Tancha, Onna, Kunigami District, Okinawa 904-0495, Japan
"""
import numpy as np
import matplotlib.pyplot as plt
class TrainingPlot():
def __init__(self, _mName, _train, _context):
wW = 8
wH = 5
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(wW, wH))
fig.canvas.set_window_title('Agent {} training details'.format(_mName))
fig.subplots_adjust(wspace=0.2, hspace=0.8)
for i in range (2):
for j in range (2):
ax = axes[i][j]
ax.grid(linestyle='dotted')
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
ax.set_xlabel('epochs')
times = np.cumsum(np.ones(_train.shape[0]))
times = (times - 1) * _train[0,0]
axes[0][0].ticklabel_format(style='sci', axis='y', scilimits=(0,0))
axes[0][0].ticklabel_format(style='sci', axis='x', scilimits=(0,0))
titleY = 1.1
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 12,
}
axes[0][0].set_title('Posterior reconstruction error', y=titleY, fontdict=font)
axes[0][0].plot(times, _train[:,2])
axes[0][1].set_title('Prior reconstruction error', y=titleY, fontdict=font)
axes[0][1].plot(times, _train[:,3], color='brown')
axes[1][0].set_title('Regulation error', y=titleY, fontdict=font)
axes[1][0].plot(times, _train[:,4], color='darkgreen')
axes[1][1].set_title('Loss (Negative ELBO)', y=titleY, fontdict=font)
axes[1][1].plot(times, _train[:,5], color='darkmagenta')
plt.show() | StarcoderdataPython |
1666934 | from cryptography.fernet import Fernet, InvalidToken
from django.conf import settings
class Fern:
# """
# Usage:
# encrypt('foo')
# decrypt('CIPHERTEXT_ENCRYPTED_TEXT')
# """
def __init__(self, key=None):
if key:
self.key = key
else:
self.key = settings.BETA_ENVIRONMENT
def encrypt(self, message: str) -> str:
message_b = message.encode('utf-8')
ciphertext_b = Fernet(self.key).encrypt(message_b)
return ciphertext_b.decode('utf-8')
def decrypt(self, ciphertext: str) -> str:
try:
ciphertext_b = Fernet(self.key).decrypt(ciphertext.encode('utf-8'))
return ciphertext_b.decode('utf-8')
except InvalidToken:
return ''
| StarcoderdataPython |
1629508 | <filename>api/estimator/serializers.py<gh_stars>0
from rest_framework import serializers
from estimator.models import LogsModel
class LogsSerializer(serializers.ModelSerializer):
class Meta:
model = LogsModel
fields = ('id', 'method', 'endpoint', 'status', 'response_time')
| StarcoderdataPython |
3249202 | #!/usr/bin/env python
import sys
input = sys.stdin.readline
print = sys.stdout.write
if __name__ == '__main__':
for _ in range(int(input())):
n = int(input())
x = list(map(int, input().strip().split()))
y = list(map(int, input().strip().split()))
a = b = 0
for j, (zi, i) in enumerate(sorted(((xi + yi, i) for i, (xi, yi) in enumerate(zip(x, y))), reverse=True)):
if j & 1:
b += y[i]
else:
a += x[i]
print(f"{a - b}\n")
| StarcoderdataPython |
1791548 | <reponame>mbelda/GCOM
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 12:33:47 2020
@author: Majo
"""
import numpy as np
iteraciones = 50
epsilon = 1e-40
def H(d):
#lim delta -> 0
suma = 0
i = iteraciones
delta = (1/3)**i
#suma normas 1 ^d
suma = 8**i * delta**2**d
return suma
d = 1.99
while H(d) < epsilon :
d = d - 1e-10
print(d)
print(d)
print('Log8/log3 =', np.log(8)/np.log(3)) | StarcoderdataPython |
1635105 | import re
from typing import List
from compile_md import get_md_files
LECTION_FILE_REGEXP = r"lec4_(\d+).*?.md"
def group_report(ids: List[int], start_id: int, end_id: int, group_name: str = "unknown"):
group_ids = list(range(start_id, end_id+1))
done_count = len(list(filter(lambda x: x in ids, group_ids)))
not_done_ids = list(filter(lambda x: x not in ids, group_ids))
print(f"\nОтчет по группе {group_name}")
print(f"Завершено {(done_count / (end_id - start_id + 1)) * 100.0}%")
print(f"Не завершены слайды: {', '.join(str(i) for i in not_done_ids)}")
def main():
files = get_md_files("lection/")
matches = [re.search(LECTION_FILE_REGEXP, f.name) for f in files]
ids = [int(m.group(1)) for m in matches if m]
group_report(ids, 1, 34, group_name="438-1")
group_report(ids, 35, 69, group_name="438-2")
group_report(ids, 70, 99, group_name="438-3")
if __name__ == "__main__":
main()
| StarcoderdataPython |
198815 | <reponame>bperez7/moments_models
import torch
from collections import OrderedDict
def inflate_from_2d_model(state_dict_2d, state_dict_3d, skipped_keys=None, inflated_dim=2):
if skipped_keys is None:
skipped_keys = []
missed_keys = []
new_keys = []
for old_key in state_dict_2d.keys():
if old_key not in state_dict_3d.keys():
missed_keys.append(old_key)
for new_key in state_dict_3d.keys():
if new_key not in state_dict_2d.keys():
new_keys.append(new_key)
print("Missed tensors: {}".format(missed_keys))
print("New tensors: {}".format(new_keys))
print("Following layers will be skipped: {}".format(skipped_keys))
state_d = OrderedDict()
unused_layers = [k for k in state_dict_2d.keys()]
uninitialized_layers = [k for k in state_dict_3d.keys()]
initialized_layers = []
for key, value in state_dict_2d.items():
skipped = False
for skipped_key in skipped_keys:
if skipped_key in key:
skipped = True
break
if skipped:
continue
new_value = value
# only inflated conv's weights
if key in state_dict_3d:
# TODO: a better way to identify conv layer?
# if 'conv.weight' in key or \
# 'conv1.weight' in key or 'conv2.weight' in key or 'conv3.weight' in key or \
# 'downsample.0.weight' in key:
if value.ndimension() == 4 and 'weight' in key:
value = torch.unsqueeze(value, inflated_dim)
# value.unsqueeze_(inflated_dim)
repeated_dim = torch.ones(state_dict_3d[key].ndimension(), dtype=torch.int)
repeated_dim[inflated_dim] = state_dict_3d[key].size(inflated_dim)
new_value = value.repeat(repeated_dim.tolist())
state_d[key] = new_value
initialized_layers.append(key)
uninitialized_layers.remove(key)
unused_layers.remove(key)
print("Initialized layers: {}".format(initialized_layers))
print("Uninitialized layers: {}".format(uninitialized_layers))
print("Unused layers: {}".format(unused_layers))
return state_d
def convert_rgb_model_to_others(state_dict, input_channels, ks=7):
new_state_dict = {}
for key, value in state_dict.items():
if "conv1.weight" in key:
o_c, in_c, k_h, k_w = value.shape
else:
o_c, in_c, k_h, k_w = 0, 0, 0, 0
if in_c == 3 and k_h == ks and k_w == ks:
# average the weights and expand to all channels
new_shape = (o_c, input_channels, k_h, k_w)
new_value = value.mean(dim=1, keepdim=True).expand(new_shape).contiguous()
else:
new_value = value
new_state_dict[key] = new_value
return new_state_dict
def convert_rgb_model_to_group(src_state_dict, target_state_dict, groups):
new_state_dict = {}
for key, value in target_state_dict.items():
if key in src_state_dict:
if len(src_state_dict[key].shape) == 0: #skip non-parameters
new_state_dict[key] = src_state_dict[key]
#print ('NO DATA === %s' % (key))
continue
#print (key, target_state_dict[key].shape, src_state_dict[key].shape)
assert target_state_dict[key].shape[0] == groups * src_state_dict[key].shape[0]
assert len(src_state_dict[key].shape) == 1 or len(src_state_dict[key].shape) == 4
#new_state_dict[key] = src_state_dict[key]
if len(src_state_dict[key].shape) == 1:
new_state_dict[key] = src_state_dict[key].repeat(groups)
else:
new_state_dict[key] = src_state_dict[key].repeat(groups, 1, 1, 1)
#print (value.shape, src_state_dict[key].shape)
#else:
#print ('NOT COPIED ***** %s' % (key))
return new_state_dict
| StarcoderdataPython |
1730418 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.CaptureCreateOrder import CaptureCreateOrder
class AlipayBossFncSettleCaptureCreateModel(object):
def __init__(self):
self._capture_create_order_list = None
@property
def capture_create_order_list(self):
return self._capture_create_order_list
@capture_create_order_list.setter
def capture_create_order_list(self, value):
if isinstance(value, list):
self._capture_create_order_list = list()
for i in value:
if isinstance(i, CaptureCreateOrder):
self._capture_create_order_list.append(i)
else:
self._capture_create_order_list.append(CaptureCreateOrder.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.capture_create_order_list:
if isinstance(self.capture_create_order_list, list):
for i in range(0, len(self.capture_create_order_list)):
element = self.capture_create_order_list[i]
if hasattr(element, 'to_alipay_dict'):
self.capture_create_order_list[i] = element.to_alipay_dict()
if hasattr(self.capture_create_order_list, 'to_alipay_dict'):
params['capture_create_order_list'] = self.capture_create_order_list.to_alipay_dict()
else:
params['capture_create_order_list'] = self.capture_create_order_list
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayBossFncSettleCaptureCreateModel()
if 'capture_create_order_list' in d:
o.capture_create_order_list = d['capture_create_order_list']
return o
| StarcoderdataPython |
4825238 | # -*- coding: utf-8 -*-
# Copyright 2018-2021 releng-tool
class RelengPackage:
"""
a releng package
A package tracks the name, options and dependencies of the package.
Args:
name: the name of the package
Attributes:
asc_file: file containing ascii-armored data to validate this package
build_dir: directory for a package's buildable content
build_output_dir: build output directory for the package process
build_subdir: override for a package's buildable content (if applicable)
cache_dir: cache directory for the package (if applicable)
cache_file: cache file for the package (if applicable)
def_dir: directory for the package definition
deps: list of dependencies for this package
devmode_ignore_cache: whether or not cache files should be ignored
ext_modifiers: extension-defined modifiers (dict)
extract_type: extraction type override (for extensions, if applicable)
fixed_jobs: fixed job count for this specific package
git_config: git config options to apply (if applicable)
git_depth: git fetch depth (if applicable)
git_refspecs: additional git refspecs to fetch (if applicable)
git_submodules: fetch any git submodules (if applicable)
git_verify_revision: verify signed git revisions
has_devmode_option: whether or not the package has a devmode revision
hash_file: file containing hashes to validate this package
install_type: install container for the package (target, staged, etc.)
is_internal: whether or not this package is an project internal package
license: license(s) of the package
license_files: list of files in sources holding license information
name: name of the package
no_extraction: whether or not this package will extract
nv: name-version value of the package
prefix: system root prefix override (if applicable)
revision: revision to use to fetch from vcs (if applicable)
site: site to acquire package assets
skip_remote_config: whether or not to skip any remote configuration
skip_remote_scripts: whether or not to skip any remote scripts
strip_count: archive extraction strip count (if applicable)
type: package type (script-based, cmake, etc.)
vcs_type: vcs type of the package (git, file, etc.)
version: package version
(package type - common)
build_defs: package-type build definitions
build_env: package-type build environment overrides
build_opts: package-type build option overrides
conf_defs: package-type configuration definitions
conf_env: package-type configuration environment overrides
conf_opts: package-type configuration option overrides
install_defs: package-type installation definitions
install_env: package-type installation environment overrides
install_opts: package-type installation option overrides
(package type - autotools)
autotools_autoreconf: flag to invoke autoreconf
(other - python)
python_interpreter: python interpreter to invoke stages with
"""
def __init__(self, name, version):
self.name = name
self.nv = '{}-{}'.format(name, version)
self.version = version
# (commons)
self.asc_file = None
self.build_dir = None
self.build_subdir = None
self.build_output_dir = None
self.cache_dir = None
self.cache_file = None
self.def_dir = None
self.deps = []
self.devmode_ignore_cache = None
self.fixed_jobs = None
self.has_devmode_option = None
self.hash_file = None
self.ext_modifiers = None
self.extract_type = None
self.install_type = None
self.is_internal = None
self.license = None
self.license_files = None
self.no_extraction = False
self.prefix = None
self.revision = None
self.site = None
self.skip_remote_config = None
self.skip_remote_scripts = None
self.strip_count = None
self.type = None
self.vcs_type = None
# (package type - common)
self.build_defs = None
self.build_env = None
self.build_opts = None
self.conf_defs = None
self.conf_env = None
self.conf_opts = None
self.install_defs = None
self.install_env = None
self.install_opts = None
# (package type - autotools)
self.autotools_autoreconf = None
# (other - git)
self.git_config = None
self.git_depth = None
self.git_refspecs = None
self.git_submodules = None
self.git_verify_revision = None
# (other - python)
self.python_interpreter = None
| StarcoderdataPython |
124978 | <filename>project_3_genetic_algorithms_using_binear_string.py
import math
import time
import matplotlib.pyplot as plt
from random import random, randint, uniform
def function_f1(x1, x2):
out = x2+10**(-5)*(x2-x1)**2-1
return out
def function_f2(x1, x2):
out = 1/(27*math.sqrt(3))*((x1-3)**2-9)*x2**3
return out
def function_f3(x1, x2):
out = (1/3)*(x1-2)**3+x2-11/3
return out
class Chromosome:
def __init__(self, gene=''):
self.pheno_x1 = 0
self.pheno_x2 = 0
self.value = 0
self.fitness = 0
self.feasible = True
if gene == 'random':
self.random_gene()
self.update_value()
while self.feasible==False:
self.random_gene()
self.update_value()
else:
self.geno = gene
self.update_value()
return
def crossover(self, mate):
pivot = randint(0, len(self.geno) - 1)
gene1 = self.geno[:pivot] + mate.geno[pivot:]
gene2 = mate.geno[:pivot] + self.geno[pivot:]
return Chromosome(gene1), Chromosome(gene2)
def mutate(self):
gene = self.geno
idx = randint(0, len(gene) - 1)
gene = gene[:idx] + str((int(gene[idx])+1)%2) + gene[idx+1:]
return Chromosome(gene)
def update_value(self):
x1_gene = self.geno[0:10]
x2_gene = self.geno[10:18]
x1_pheno = 0
x2_pheno = 0
''' Binary code'''
# for i in range(11):
# x1_pheno = x1_pheno*2 + int(x1_gene[i])
# x2_pheno = x2_pheno*2 + int(x2_gene[i])
''' Gray code '''
x1_flip = False
x2_flip = False
for i in range(10):
x1_read = int(x1_gene[i]) if not x1_flip else (int(x1_gene[i])+1)%2
x1_pheno = x1_pheno*2 + x1_read
x1_flip = True if x1_read==1 else False
for i in range(8):
x2_read = int(x2_gene[i]) if not x2_flip else (int(x2_gene[i])+1)%2
x2_pheno = x2_pheno*2 + x2_read
x2_flip = True if x2_read==1 else False
x1_pheno = x1_pheno/1024*6
x2_pheno = x2_pheno/256*math.sqrt(3)
self.pheno_x1 = x1_pheno
self.pheno_x2 = x2_pheno
if x2_pheno<0 or abs(x1_pheno-3)>(1-x2_pheno/math.sqrt(3))*3:
self.feasible = False
return
else:
self.feasible = True
if 0<=x1_pheno and x1_pheno<2:
self.value = function_f1(x1_pheno,x2_pheno)
elif 2<=x1_pheno and x1_pheno<4:
self.value = function_f2(x1_pheno,x2_pheno)
elif 4<=x1_pheno and x1_pheno<=6:
self.value = function_f3(x1_pheno,x2_pheno)
else:
print('update_value error !')
return
def random_gene(self):
gene = ''
for i in range(18):
gene = gene + str(randint(0, 1))
self.geno = gene
return
class Population:
def __init__(self, size=64, crossover_rate=0.7, mutation_rate=0.1):
self.size = size
self.crossover_rate = crossover_rate
self.mutation_rate = mutation_rate
self.population = []
self.children = []
for i in range(size):
self.population.append(Chromosome('random'))
self.population = sorted(self.population, key=lambda x: x.value)
return
def survive(self):
''' Roulette Wheel '''
# self.population = []
# sum_fitness = sum([(400-c.value) for c in self.children])
# for i in range(self.size):
# pick = uniform(0, sum_fitness)
# current = 0
# for survivor in self.children:
# current = current+(400-survivor.value)
# if current >= pick:
# self.population.extend([survivor])
# break
# else:
# print('ERROR~!')
''' Roulette Wheel 2 '''
self.population = []
self.children = sorted(self.children, key=lambda x: x.value)
self.population.extend([self.children[0]])
self.children.remove(self.children[0])
L = len(self.children)
for i in range(L):
self.children[i].fitness = L - i
sum_fitness = sum(range(1,L+1))
for i in range(self.size-1):
pick = uniform(0, sum_fitness)
current = 0
for survivor in self.children:
current = current + survivor.fitness
if current >= pick:
self.population.extend([survivor])
self.children.remove(survivor)
sum_fitness = sum_fitness - survivor.fitness
break
else:
print('survive error !')
print('i= %d pick= %f current= %f'%(i, pick, current))
''' First (size) children survive '''
# self.population = sorted(self.children, key=lambda x: x.value)[:self.size]
return
def select_parents(self):
if len(self.population)<2:
print('select_parents error !')
return None
return self.population.pop(randint(0,len(self.population)-1)), self.population.pop(randint(0,len(self.population)-1))
def evolve(self):
self.children = []
while (len(self.population)>0):
p1, p2 = self.select_parents()
self.children.extend([p1, p2])
if random() <= self.crossover_rate:
c1, c2 = p1.crossover(p2)
if c1.feasible:
self.children.extend([c1])
if c2.feasible:
self.children.extend([c2])
for idx in range(len(self.children)):
if random() <= self.mutation_rate:
cm = self.children[idx].mutate()
if cm.feasible:
self.children.extend([cm])
self.survive()
self.population = sorted(self.population, key=lambda x: x.value)
return
def plot(self,generation=0):
for i in range(self.size):
plt.plot(self.population[i].pheno_x1, self.population[i].pheno_x2,'bo', markersize=2)
plt.xlim(-1,7)
plt.ylim(-2,4)
plt.savefig('generation%03d.png'%(generation))
plt.show()
return
if __name__ == "__main__":
maxGenerations = 100
times = 100
every_value=[]
every_x1=[]
every_x2=[]
set_minimum = []
top_idx = -1
top_value = 1000
correct = 0
min_times = 0
Tstart = time.time()
for k in range(times):
P = Population(size=64, crossover_rate=0.7, mutation_rate=0.1)
for i in range(1, maxGenerations + 1):
# print("Generation %d: %f %f %f"%(i, P.population[0].pheno_x1,
# P.population[0].pheno_x2,
# P.population[0].value))
# set_minimum.append(P.population[0].value)
''' plot individual distribution '''
# P.plot(generation=i)
P.evolve()
''' compute the minimum found times '''
for individual in P.population:
if (individual.pheno_x1-0)**2+(individual.pheno_x2-0)**2<0.0001:
min_times = min_times +1
break
for individual in P.population:
if (individual.pheno_x1-3)**2+(individual.pheno_x2-math.sqrt(3))**2<0.0001:
min_times = min_times +1
break
for individual in P.population:
if (individual.pheno_x1-4)**2+(individual.pheno_x2-0)**2<0.0001:
min_times = min_times +1
break
Tend = time.time()
''' plot convergence figure '''
# for i in range(times):
# set_minimum[i]=sum(set_minimum[i::maxGenerations])/times
# plt.plot(list(range(1,maxGenerations+1)),set_minimum[:maxGenerations])
# plt.ylim(-1,-0.9)
# plt.savefig('./GABinary_%d'%(P.size))
print('Find %d global minimum in total %d global minimum'%(min_times, times*3))
print('Time : %f'%(Tend-Tstart))
| StarcoderdataPython |
7403 | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 3
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AuthAccessAccessItemFile(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'group': 'str',
'mode': 'str',
'owner': 'str',
'relevant_mode': 'str'
}
attribute_map = {
'group': 'group',
'mode': 'mode',
'owner': 'owner',
'relevant_mode': 'relevant_mode'
}
def __init__(self, group=None, mode=None, owner=None, relevant_mode=None): # noqa: E501
"""AuthAccessAccessItemFile - a model defined in Swagger""" # noqa: E501
self._group = None
self._mode = None
self._owner = None
self._relevant_mode = None
self.discriminator = None
if group is not None:
self.group = group
if mode is not None:
self.mode = mode
if owner is not None:
self.owner = owner
if relevant_mode is not None:
self.relevant_mode = relevant_mode
@property
def group(self):
"""Gets the group of this AuthAccessAccessItemFile. # noqa: E501
Specifies the group name or ID for the file. # noqa: E501
:return: The group of this AuthAccessAccessItemFile. # noqa: E501
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""Sets the group of this AuthAccessAccessItemFile.
Specifies the group name or ID for the file. # noqa: E501
:param group: The group of this AuthAccessAccessItemFile. # noqa: E501
:type: str
"""
self._group = group
@property
def mode(self):
"""Gets the mode of this AuthAccessAccessItemFile. # noqa: E501
Specifies the mode bits on the file. # noqa: E501
:return: The mode of this AuthAccessAccessItemFile. # noqa: E501
:rtype: str
"""
return self._mode
@mode.setter
def mode(self, mode):
"""Sets the mode of this AuthAccessAccessItemFile.
Specifies the mode bits on the file. # noqa: E501
:param mode: The mode of this AuthAccessAccessItemFile. # noqa: E501
:type: str
"""
self._mode = mode
@property
def owner(self):
"""Gets the owner of this AuthAccessAccessItemFile. # noqa: E501
Specifies the name or ID of the file owner. # noqa: E501
:return: The owner of this AuthAccessAccessItemFile. # noqa: E501
:rtype: str
"""
return self._owner
@owner.setter
def owner(self, owner):
"""Sets the owner of this AuthAccessAccessItemFile.
Specifies the name or ID of the file owner. # noqa: E501
:param owner: The owner of this AuthAccessAccessItemFile. # noqa: E501
:type: str
"""
self._owner = owner
@property
def relevant_mode(self):
"""Gets the relevant_mode of this AuthAccessAccessItemFile. # noqa: E501
Specifies the mode bits that are related to the user. # noqa: E501
:return: The relevant_mode of this AuthAccessAccessItemFile. # noqa: E501
:rtype: str
"""
return self._relevant_mode
@relevant_mode.setter
def relevant_mode(self, relevant_mode):
"""Sets the relevant_mode of this AuthAccessAccessItemFile.
Specifies the mode bits that are related to the user. # noqa: E501
:param relevant_mode: The relevant_mode of this AuthAccessAccessItemFile. # noqa: E501
:type: str
"""
self._relevant_mode = relevant_mode
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AuthAccessAccessItemFile):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| StarcoderdataPython |
4816785 | N = int(input())
C = N // 100
if N % 100 == 0:
print(C)
else:
print(C + 1)
| StarcoderdataPython |
27754 | from django.db import models
# Description of an object in the arena
class Entity(models.Model):
entityId = models.AutoField(primary_key=True)
entityClass = models.CharField(max_length=30)
entityName = models.CharField(max_length=30, null=True, blank=True)
entityCategory = models.CharField(max_length=30, null=True, blank=True)
entityColor = models.CharField(max_length=30, null=True, blank=True)
entityWeight = models.FloatField(default=None, null=True, blank=True)
entitySize = models.FloatField(default=None, null=True, blank=True)
entityIsRoom = models.BooleanField(default=False, blank=True)
entityIsWaypoint = models.BooleanField(default=False, blank=True)
entityIsContainer = models.BooleanField(default=False, blank=True)
entityGotPosition = models.BooleanField(default=False, blank=True)
# The position of the object in space if available
entityPosX = models.FloatField(default=None, null=True, blank=True)
entityPosY = models.FloatField(default=None, null=True, blank=True)
entityPosZ = models.FloatField(default=None, null=True, blank=True)
entityPosYaw = models.FloatField(default=None, null=True, blank=True)
entityPosPitch = models.FloatField(default=None, null=True, blank=True)
entityPosRoll = models.FloatField(default=None, null=True, blank=True)
# The position to reach to be able to catch the object
entityWaypointX = models.FloatField(default=None, null=True, blank=True)
entityWaypointY = models.FloatField(default=None, null=True, blank=True)
entityWaypointYaw = models.FloatField(default=None, null=True, blank=True)
# Just for serializer
depth_waypoint = models.IntegerField(null=True, blank=True)
depth_position = models.IntegerField(null=True, blank=True)
entityContainer = models.ForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True)
def __str__(self):
return self.entityClass + " - " + str(self.entityId)
# Description of an object in the arena
class People(models.Model):
peopleId = models.AutoField(primary_key=True)
peopleRecognitionId = models.IntegerField(null=True, blank=True, unique=True)
peopleName = models.CharField(max_length=30, null=True, blank=True)
peopleAge = models.IntegerField(null=True, blank=True)
peopleColor = models.CharField(max_length=30, null=True, blank=True)
peoplePose = models.CharField(max_length=30, null=True, blank=True)
peoplePoseAccuracy = models.FloatField(default=None, null=True, blank=True)
peopleEmotion = models.CharField(max_length=30, null=True, blank=True)
peopleEmotionAccuracy = models.FloatField(default=None, null=True, blank=True)
peopleGender = models.CharField(max_length=10, null=True, blank=True)
peopleGenderAccuracy = models.FloatField(default=None, null=True, blank=True)
peopleIsOperator = models.BooleanField(default=False)
def __str__(self):
return str(self.peopleId) + "(" + str(
self.peopleRecognitionId) + ") - " + self.peopleGender + " - " + self.peopleColor + " - " + self.peoplePose
| StarcoderdataPython |
1759899 | <filename>foxylib/tools/network/http/formdata_tool.py
import io
import os
from foxylib.tools.file.file_tool import FileTool
class FormdataTool:
@classmethod
def filepath2item(cls, filepath):
# https://stackoverflow.com/a/35712344
bytes = FileTool.filepath2bytes(filepath)
basename = os.path.basename(filepath)
return io.BytesIO(bytes), basename
| StarcoderdataPython |
3362767 | <gh_stars>1-10
from datetime import datetime
from typing import List, Dict
import src.dfa as dfa
from src.api.weather import WeatherAPI, ResponseStatus, WeatherDescription, WeatherTime
from src.parse.intent import Intent, Command
class GetCityWeatherState(dfa.BaseState):
_disable_message = "Модуль погоды ушёл в отпуск и сейчас недоступен."
__unavailable_message = "Сервер сейчас недоступен, попробуйте позже."
__unknown_city_message = "Я не знаю такого города: {}"
__today_weather_message = "Сейчас там {}. Температура {}°C, но ощущается как {}°C. Ветер дует со скоростью {} м/с."
__tomorrow_weather_message = (
"Завтра там будет {}. Температура {}°C, но ощущаться будет как {}°C. Ветер будет дуть со скоростью {} м/с."
)
__next_week_forecast = "А вот прогноз на следующую неделю."
__forecast_message = (
"{} там будет {}. Температура {}°C, но ощущаться будет как {}°C. Ветер будет дуть со скоростью {} м/с."
)
__weekdays = ["В понедельник", "Во вторник", "В среду", "В четверг", "В пятницу", "В субботу", "В воскресенье"]
def __init__(self):
super().__init__()
self._command_handler[Command.WEATHER] = self.handle_weather_command
self.__weather_api = WeatherAPI()
if not self.__weather_api.enabled:
self.move = self._disable_move
self.__history: Dict[int, Intent] = {}
@property
def is_technical_state(self) -> bool:
return True
def __prepare_message(self, weather_descriptions: List[WeatherDescription], time: WeatherTime) -> str:
if time == WeatherTime.TODAY:
desc = weather_descriptions[0]
return self.__today_weather_message.format(desc.weather, desc.temperature, desc.feels_like, desc.wind_speed)
elif time == WeatherTime.TOMORROW:
desc = weather_descriptions[1]
return self.__tomorrow_weather_message.format(
desc.weather, desc.temperature, desc.feels_like, desc.wind_speed
)
else:
desc = weather_descriptions[0]
message = [
self.__today_weather_message.format(desc.weather, desc.temperature, desc.feels_like, desc.wind_speed),
self.__next_week_forecast,
]
today_weekday = datetime.today().weekday()
for i, desc in enumerate(weather_descriptions[1:]):
weekday = self.__weekdays[(today_weekday + 1 + i) % len(self.__weekdays)]
message.append(
self.__forecast_message.format(
weekday, desc.weather, desc.temperature, desc.feels_like, desc.wind_speed
)
)
return "\n".join(message)
def handle_weather_command(self, intent: Intent, user_id: int) -> dfa.MoveResponse:
if "city" in intent.parameters:
api_response = self.__weather_api.get_weather(intent.parameters["city"])
next_state = dfa.StartState()
if api_response.status == ResponseStatus.UNAVAILABLE:
message = self.__unavailable_message
elif api_response.status == ResponseStatus.NOT_FOUND:
message = self.__unknown_city_message.format(intent.parameters["city"])
else:
desc = api_response.weather_description
if "time" in intent.parameters:
time = intent.parameters["time"]
elif user_id in self.__history:
time = self.__history[user_id].parameters["time"]
else:
time = WeatherTime.TODAY
message = self.__prepare_message(desc, time)
return dfa.MoveResponse(next_state, message)
if "time" in intent.parameters:
self.__history[user_id] = intent
return dfa.MoveResponse(dfa.AskCityState(), None)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.